From 53364c539970785f9317c2ddab26a4e350f12b23 Mon Sep 17 00:00:00 2001 From: Anurag Gupta <61045224+anurag-ris90@users.noreply.github.com> Date: Mon, 29 Sep 2025 23:00:18 +0530 Subject: [PATCH] Add enhancements from local-development/src/kafka~ --- .asf.yaml | 31 +- .github/actions/run-gradle/action.yml | 50 +- .github/actions/setup-gradle/action.yml | 4 +- .github/actions/setup-python/action.yml | 2 +- .github/configs/labeler.yml | 6 - .github/scripts/develocity_reports.py | 747 +- .github/scripts/junit.py | 112 +- .github/scripts/requirements.txt | 4 +- .github/scripts/thread-dump.sh | 6 +- .github/workflows/README.md | 87 +- .github/workflows/build.yml | 203 +- .github/workflows/ci-complete.yml | 26 +- .github/workflows/ci-requested.yml | 86 + .github/workflows/deflake.yml | 55 +- .github/workflows/docker_build_and_test.yml | 4 +- .../docker_official_image_build_and_test.yml | 4 +- .github/workflows/docker_rc_release.yml | 4 +- .github/workflows/docker_scan.yml | 2 +- .github/workflows/generate-reports.yml | 8 +- .github/workflows/pr-labeled.yml | 4 +- .github/workflows/pr-reviewed-trigger.yml | 42 + .github/workflows/pr-reviewed.yml | 44 +- .github/workflows/pr-update.yml | 4 +- .../prepare_docker_official_image_source.yml | 4 +- .github/workflows/stale.yml | 16 + Dockerfile | 41 + LICENSE-binary | 97 +- NOTICE | 4 + NOTICE-binary | 6 +- PULL_REQUEST_TEMPLATE.md | 14 + README.md | 58 +- bin/kafka-run-class.sh | 6 +- bin/windows/kafka-run-class.bat | 4 +- build.gradle | 473 +- checkstyle/checkstyle.xml | 1 - .../import-control-coordinator-common.xml | 2 - checkstyle/import-control-core.xml | 49 +- .../import-control-group-coordinator.xml | 4 - checkstyle/import-control-jmh-benchmarks.xml | 3 +- checkstyle/import-control-metadata.xml | 24 +- checkstyle/import-control-server-common.xml | 9 +- checkstyle/import-control-server.xml | 18 - checkstyle/import-control-storage.xml | 25 +- ...import-control-transaction-coordinator.xml | 3 - checkstyle/import-control.xml | 26 +- checkstyle/suppressions.xml | 47 +- .../org/apache/kafka/clients/ClientUtils.java | 6 +- .../kafka/clients/CommonClientConfigs.java | 23 +- .../kafka/clients/GroupRebalanceConfig.java | 7 - .../org/apache/kafka/clients/Metadata.java | 14 +- .../apache/kafka/clients/NetworkClient.java | 8 +- .../clients/admin/AbortTransactionResult.java | 2 +- .../kafka/clients/admin/AbstractOptions.java | 1 - .../org/apache/kafka/clients/admin/Admin.java | 286 +- .../clients/admin/AdminClientConfig.java | 16 +- .../admin/AlterClientQuotasResult.java | 2 +- .../kafka/clients/admin/AlterConfigOp.java | 14 - .../clients/admin/AlterConfigsResult.java | 2 +- .../AlterConsumerGroupOffsetsResult.java | 2 +- .../AlterPartitionReassignmentsOptions.java | 21 - .../AlterPartitionReassignmentsResult.java | 2 +- .../admin/AlterReplicaLogDirsResult.java | 2 +- .../AlterUserScramCredentialsResult.java | 2 +- .../admin/ClientMetricsResourceListing.java | 1 - .../clients/admin/ConsumerGroupListing.java | 2 - .../kafka/clients/admin/CreateAclsResult.java | 2 +- .../clients/admin/CreatePartitionsResult.java | 2 +- .../clients/admin/CreateTopicsResult.java | 2 +- .../kafka/clients/admin/DeleteAclsResult.java | 2 +- .../admin/DeleteConsumerGroupsResult.java | 8 +- .../clients/admin/DeleteRecordsResult.java | 2 +- .../clients/admin/DeleteTopicsResult.java | 4 +- .../admin/DescribeClassicGroupsResult.java | 2 +- .../clients/admin/DescribeClusterResult.java | 9 +- .../clients/admin/DescribeConfigsResult.java | 2 +- .../admin/DescribeConsumerGroupsResult.java | 2 +- .../clients/admin/DescribeLogDirsResult.java | 2 +- .../admin/DescribeProducersResult.java | 2 +- .../admin/DescribeReplicaLogDirsResult.java | 2 +- .../admin/DescribeShareGroupsResult.java | 2 +- .../clients/admin/DescribeTopicsOptions.java | 15 +- .../clients/admin/DescribeTopicsResult.java | 2 +- .../admin/DescribeTransactionsResult.java | 2 +- .../DescribeUserScramCredentialsResult.java | 2 +- .../clients/admin/FenceProducersResult.java | 2 +- .../kafka/clients/admin/ForwardingAdmin.java | 58 - .../kafka/clients/admin/KafkaAdminClient.java | 926 +- .../ListClientMetricsResourcesOptions.java | 2 - .../ListClientMetricsResourcesResult.java | 2 - .../admin/ListConsumerGroupOffsetsResult.java | 2 +- .../admin/ListConsumerGroupsOptions.java | 3 - .../admin/ListConsumerGroupsResult.java | 4 +- .../clients/admin/ListGroupsOptions.java | 53 +- .../clients/admin/ListOffsetsResult.java | 2 +- .../admin/ListTransactionsOptions.java | 30 +- .../clients/admin/LogDirDescription.java | 2 - .../apache/kafka/clients/admin/NewTopic.java | 1 - .../kafka/clients/admin/OffsetSpec.java | 10 - .../clients/admin/RaftVoterEndpoint.java | 27 +- .../kafka/clients/admin/RecordsToDelete.java | 6 +- .../kafka/clients/admin/ReplicaInfo.java | 1 - .../clients/admin/ShareGroupDescription.java | 32 +- .../clients/admin/ShareMemberDescription.java | 21 +- .../admin/UnregisterBrokerOptions.java | 2 +- .../clients/admin/UpdateFeaturesResult.java | 2 +- .../internals/AdminBootstrapAddresses.java | 2 +- .../admin/internals/AdminMetadataManager.java | 33 +- .../AlterConsumerGroupOffsetsHandler.java | 2 +- .../DeleteConsumerGroupsHandler.java | 116 +- .../internals/DescribeShareGroupsHandler.java | 9 +- .../ListConsumerGroupOffsetsHandler.java | 104 +- .../admin/internals/ListOffsetsHandler.java | 9 +- .../internals/ListTransactionsHandler.java | 3 - .../clients/consumer/AcknowledgeType.java | 4 - .../AcknowledgementCommitCallback.java | 8 +- .../kafka/clients/consumer/Consumer.java | 7 +- .../clients/consumer/ConsumerConfig.java | 78 +- .../clients/consumer/ConsumerInterceptor.java | 2 - .../consumer/ConsumerPartitionAssignor.java | 3 + .../consumer/ConsumerRebalanceListener.java | 58 +- .../clients/consumer/ConsumerRecord.java | 23 +- .../kafka/clients/consumer/KafkaConsumer.java | 113 +- .../clients/consumer/KafkaShareConsumer.java | 207 +- .../kafka/clients/consumer/MockConsumer.java | 45 +- .../clients/consumer/MockShareConsumer.java | 13 - .../clients/consumer/OffsetAndMetadata.java | 17 +- .../consumer/OffsetCommitCallback.java | 1 - .../kafka/clients/consumer/ShareConsumer.java | 20 +- .../internals/AbstractCoordinator.java | 116 +- .../consumer/internals/AbstractFetch.java | 45 +- .../AbstractHeartbeatRequestManager.java | 149 +- .../internals/AbstractMembershipManager.java | 112 +- .../internals/AbstractStickyAssignor.java | 4 +- .../AcknowledgementCommitCallbackHandler.java | 6 +- .../consumer/internals/Acknowledgements.java | 41 +- .../internals/AsyncKafkaConsumer.java | 254 +- .../internals/ClassicKafkaConsumer.java | 72 +- .../internals/CommitRequestManager.java | 143 +- .../consumer/internals/CompletedFetch.java | 9 +- .../internals/ConsumerCoordinator.java | 202 +- .../internals/ConsumerDelegateCreator.java | 3 +- .../ConsumerHeartbeatRequestManager.java | 55 +- .../internals/ConsumerInterceptors.java | 23 +- .../internals/ConsumerMembershipManager.java | 62 +- .../consumer/internals/ConsumerMetadata.java | 51 +- .../consumer/internals/ConsumerMetrics.java | 6 +- .../internals/ConsumerNetworkThread.java | 70 +- .../ConsumerRebalanceListenerInvoker.java | 35 +- .../consumer/internals/ConsumerUtils.java | 1 - .../internals/CoordinatorRequestManager.java | 2 +- .../consumer/internals/Deserializers.java | 52 +- .../consumer/internals/FetchBuffer.java | 32 +- .../consumer/internals/FetchCollector.java | 27 +- .../internals/FetchMetricsManager.java | 149 +- .../internals/FetchMetricsRegistry.java | 31 +- .../clients/consumer/internals/Fetcher.java | 2 +- .../consumer/internals/RequestManagers.java | 129 +- .../internals/ShareCompletedFetch.java | 31 +- .../internals/ShareConsumeRequestManager.java | 653 +- .../ShareConsumerDelegateCreator.java | 4 +- .../consumer/internals/ShareConsumerImpl.java | 301 +- .../internals/ShareConsumerMetrics.java | 6 +- .../consumer/internals/ShareFetch.java | 34 +- .../internals/ShareFetchCollector.java | 2 +- .../internals/ShareFetchMetricsManager.java | 17 +- .../ShareHeartbeatRequestManager.java | 53 +- .../internals/ShareInFlightBatch.java | 16 +- .../internals/ShareMembershipManager.java | 9 +- .../internals/ShareSessionHandler.java | 19 +- .../consumer/internals/SubscriptionState.java | 62 +- .../consumer/internals/TimedRequestState.java | 4 - .../TopicMetadataRequestManager.java | 32 +- .../internals/events/ApplicationEvent.java | 30 +- .../events/ApplicationEventProcessor.java | 103 +- .../internals/events/BackgroundEvent.java | 34 +- .../internals/events/CompletableEvent.java | 2 +- .../events/CompletableEventReaper.java | 119 +- .../events/LeaveGroupOnCloseEvent.java | 13 +- .../events/ShareAcknowledgeAsyncEvent.java | 20 +- .../events/ShareAcknowledgeOnCloseEvent.java | 8 +- .../events/ShareAcknowledgeSyncEvent.java | 7 +- .../internals/events/ShareFetchEvent.java | 16 +- .../metrics/AsyncConsumerMetrics.java | 42 +- .../ConsumerRebalanceMetricsManager.java | 23 +- .../metrics/KafkaConsumerMetrics.java | 6 +- .../metrics/KafkaShareConsumerMetrics.java | 6 +- .../kafka/clients/producer/Callback.java | 4 - .../kafka/clients/producer/KafkaProducer.java | 279 +- .../kafka/clients/producer/MockProducer.java | 40 +- .../kafka/clients/producer/Partitioner.java | 3 - .../kafka/clients/producer/Producer.java | 19 +- .../clients/producer/ProducerConfig.java | 87 +- .../clients/producer/ProducerInterceptor.java | 32 +- .../internals/KafkaProducerMetrics.java | 11 - .../internals/ProducerInterceptors.java | 46 +- .../producer/internals/RecordAccumulator.java | 15 +- .../clients/producer/internals/Sender.java | 78 +- .../internals/TransactionManager.java | 227 +- .../apache/kafka/common/ClusterResource.java | 5 +- .../kafka/common/ClusterResourceListener.java | 1 + .../org/apache/kafka/common/Endpoint.java | 25 +- .../org/apache/kafka/common/GroupState.java | 24 +- .../org/apache/kafka/common/GroupType.java | 3 +- .../kafka/common/InvalidRecordException.java | 4 +- .../org/apache/kafka/common/KafkaFuture.java | 2 +- .../org/apache/kafka/common/MetricName.java | 2 +- .../java/org/apache/kafka/common/Uuid.java | 8 +- .../apache/kafka/common/acl/AclOperation.java | 7 +- .../kafka/common/config/AbstractConfig.java | 17 +- .../apache/kafka/common/config/ConfigDef.java | 58 +- .../kafka/common/config/LogLevelConfig.java | 6 +- .../kafka/common/config/SaslConfigs.java | 192 +- .../kafka/common/config/SslConfigs.java | 15 +- .../kafka/common/config/TopicConfig.java | 62 +- .../internals/BrokerSecurityConfigs.java | 24 +- .../provider/DirectoryConfigProvider.java | 3 +- .../errors/AuthenticationException.java | 2 +- .../common/errors/AuthorizationException.java | 2 +- .../CoordinatorNotAvailableException.java | 2 +- .../errors/FencedInstanceIdException.java | 2 +- .../errors/GroupMaxSizeReachedException.java | 2 +- .../errors/IllegalGenerationException.java | 2 +- .../errors/InvalidConfigurationException.java | 12 +- .../errors/InvalidMetadataException.java | 2 +- .../errors/InvalidPidMappingException.java | 2 +- .../errors/InvalidProducerEpochException.java | 2 +- .../InvalidReplicationFactorException.java | 2 +- .../errors/InvalidRequiredAcksException.java | 2 +- .../common/errors/InvalidTopicException.java | 2 +- .../errors/NotCoordinatorException.java | 2 +- .../errors/ProducerFencedException.java | 2 +- .../errors/RecordBatchTooLargeException.java | 2 +- .../errors/TransactionAbortableException.java | 7 - .../errors/UnknownMemberIdException.java | 2 +- .../UnsupportedForMessageFormatException.java | 2 +- .../errors/UnsupportedVersionException.java | 2 +- .../apache/kafka/common/feature/Features.java | 2 +- .../apache/kafka/common/header/Header.java | 17 +- .../apache/kafka/common/header/Headers.java | 24 +- .../header/internals/RecordHeaders.java | 4 - .../common/internals/PartitionStates.java | 41 + .../apache/kafka/common/metrics/Metrics.java | 12 +- .../apache/kafka/common/metrics/Sensor.java | 2 +- .../kafka/common/network/KafkaChannel.java | 10 - .../network/PlaintextChannelBuilder.java | 2 +- .../apache/kafka/common/network/Selector.java | 1 - .../common/network/SslChannelBuilder.java | 2 +- .../apache/kafka/common/protocol/ApiKeys.java | 54 +- .../common/protocol/ByteBufferAccessor.java | 5 - .../apache/kafka/common/protocol/Errors.java | 19 +- .../kafka/common/protocol/MessageUtil.java | 39 +- .../kafka/common/protocol/Protocol.java | 2 +- .../kafka/common/protocol/Readable.java | 9 - .../kafka/common/record/ConvertedRecords.java | 36 + .../common/record/DefaultRecordBatch.java | 3 +- .../common/record/EndTransactionMarker.java | 60 +- .../kafka/common/record/FileRecords.java | 142 +- .../kafka/common/record/MemoryRecords.java | 40 +- .../common/record/MemoryRecordsBuilder.java | 10 +- .../kafka/common/record/RecordVersion.java | 12 +- .../apache/kafka/common/record/Records.java | 27 +- .../kafka/common/record/RecordsUtil.java | 141 + .../kafka/common/replica/ReplicaSelector.java | 3 - .../common/requests/AbstractRequest.java | 196 +- .../common/requests/AbstractResponse.java | 196 +- .../requests/AddOffsetsToTxnRequest.java | 8 +- .../requests/AddOffsetsToTxnResponse.java | 7 +- .../requests/AddPartitionsToTxnRequest.java | 7 +- .../requests/AddPartitionsToTxnResponse.java | 7 +- .../common/requests/AddRaftVoterRequest.java | 8 +- .../common/requests/AddRaftVoterResponse.java | 7 +- .../requests/AllocateProducerIdsRequest.java | 8 +- .../requests/AllocateProducerIdsResponse.java | 7 +- .../requests/AlterClientQuotasRequest.java | 7 +- .../requests/AlterClientQuotasResponse.java | 10 +- .../common/requests/AlterConfigsRequest.java | 7 +- .../common/requests/AlterConfigsResponse.java | 7 +- .../AlterPartitionReassignmentsRequest.java | 13 +- .../AlterPartitionReassignmentsResponse.java | 11 +- .../requests/AlterPartitionRequest.java | 7 +- .../requests/AlterPartitionResponse.java | 11 +- .../requests/AlterReplicaLogDirsRequest.java | 7 +- .../requests/AlterReplicaLogDirsResponse.java | 11 +- .../AlterUserScramCredentialsRequest.java | 23 +- .../AlterUserScramCredentialsResponse.java | 7 +- .../common/requests/ApiVersionsRequest.java | 7 +- .../common/requests/ApiVersionsResponse.java | 12 +- .../requests/AssignReplicasToDirsRequest.java | 8 +- .../AssignReplicasToDirsResponse.java | 7 +- .../requests/BeginQuorumEpochRequest.java | 7 +- .../requests/BeginQuorumEpochResponse.java | 11 +- .../requests/BrokerHeartbeatRequest.java | 8 +- .../requests/BrokerHeartbeatResponse.java | 11 +- .../requests/BrokerRegistrationRequest.java | 8 +- .../requests/BrokerRegistrationResponse.java | 11 +- .../ConsumerGroupDescribeRequest.java | 7 +- .../ConsumerGroupDescribeResponse.java | 12 +- .../ConsumerGroupHeartbeatRequest.java | 8 +- .../ConsumerGroupHeartbeatResponse.java | 26 +- .../ControllerRegistrationRequest.java | 8 +- .../ControllerRegistrationResponse.java | 7 +- .../common/requests/CreateAclsRequest.java | 7 +- .../common/requests/CreateAclsResponse.java | 7 +- .../CreateDelegationTokenRequest.java | 8 +- .../CreateDelegationTokenResponse.java | 6 +- .../requests/CreatePartitionsRequest.java | 8 +- .../requests/CreatePartitionsResponse.java | 11 +- .../common/requests/CreateTopicsRequest.java | 7 +- .../common/requests/CreateTopicsResponse.java | 11 +- .../common/requests/DeleteAclsRequest.java | 7 +- .../common/requests/DeleteAclsResponse.java | 7 +- .../common/requests/DeleteGroupsRequest.java | 7 +- .../common/requests/DeleteGroupsResponse.java | 10 +- .../common/requests/DeleteRecordsRequest.java | 8 +- .../requests/DeleteRecordsResponse.java | 11 +- .../DeleteShareGroupStateRequest.java | 31 +- .../DeleteShareGroupStateResponse.java | 65 +- .../common/requests/DeleteTopicsRequest.java | 7 +- .../common/requests/DeleteTopicsResponse.java | 11 +- .../common/requests/DescribeAclsRequest.java | 8 +- .../common/requests/DescribeAclsResponse.java | 7 +- .../requests/DescribeClientQuotasRequest.java | 7 +- .../DescribeClientQuotasResponse.java | 7 +- .../requests/DescribeClusterRequest.java | 8 +- .../requests/DescribeClusterResponse.java | 7 +- .../requests/DescribeConfigsRequest.java | 7 +- .../requests/DescribeConfigsResponse.java | 11 +- .../DescribeDelegationTokenRequest.java | 7 +- .../DescribeDelegationTokenResponse.java | 7 +- .../requests/DescribeGroupsRequest.java | 7 +- .../requests/DescribeGroupsResponse.java | 11 +- .../requests/DescribeLogDirsRequest.java | 8 +- .../requests/DescribeLogDirsResponse.java | 11 +- .../requests/DescribeProducersRequest.java | 8 +- .../requests/DescribeProducersResponse.java | 7 +- .../requests/DescribeQuorumRequest.java | 7 +- .../requests/DescribeQuorumResponse.java | 11 +- .../DescribeTopicPartitionsRequest.java | 7 +- .../DescribeTopicPartitionsResponse.java | 23 +- .../requests/DescribeTransactionsRequest.java | 8 +- .../DescribeTransactionsResponse.java | 11 +- .../DescribeUserScramCredentialsRequest.java | 17 +- .../DescribeUserScramCredentialsResponse.java | 7 +- .../common/requests/ElectLeadersRequest.java | 7 +- .../common/requests/ElectLeadersResponse.java | 10 +- .../requests/EndQuorumEpochRequest.java | 7 +- .../requests/EndQuorumEpochResponse.java | 11 +- .../kafka/common/requests/EndTxnRequest.java | 8 +- .../kafka/common/requests/EndTxnResponse.java | 7 +- .../common/requests/EnvelopeRequest.java | 6 +- .../common/requests/EnvelopeResponse.java | 6 +- .../ExpireDelegationTokenRequest.java | 6 +- .../ExpireDelegationTokenResponse.java | 7 +- .../kafka/common/requests/FetchRequest.java | 9 +- .../kafka/common/requests/FetchResponse.java | 46 +- .../common/requests/FetchSnapshotRequest.java | 7 +- .../requests/FetchSnapshotResponse.java | 11 +- .../requests/FindCoordinatorRequest.java | 7 +- .../requests/FindCoordinatorResponse.java | 11 +- .../GetTelemetrySubscriptionsRequest.java | 8 +- .../GetTelemetrySubscriptionsResponse.java | 11 +- .../common/requests/HeartbeatRequest.java | 8 +- .../common/requests/HeartbeatResponse.java | 7 +- .../IncrementalAlterConfigsRequest.java | 23 +- .../IncrementalAlterConfigsResponse.java | 10 +- .../requests/InitProducerIdRequest.java | 17 +- .../requests/InitProducerIdResponse.java | 7 +- .../InitializeShareGroupStateRequest.java | 31 +- .../InitializeShareGroupStateResponse.java | 79 +- .../common/requests/JoinGroupRequest.java | 13 +- .../common/requests/JoinGroupResponse.java | 7 +- .../common/requests/LeaderAndIsrRequest.java | 403 + .../common/requests/LeaderAndIsrResponse.java | 95 + .../common/requests/LeaveGroupRequest.java | 7 +- .../common/requests/LeaveGroupResponse.java | 11 +- .../ListClientMetricsResourcesRequest.java | 77 + .../ListClientMetricsResourcesResponse.java | 77 + .../common/requests/ListGroupsRequest.java | 25 +- .../common/requests/ListGroupsResponse.java | 7 +- .../common/requests/ListOffsetsRequest.java | 22 +- .../common/requests/ListOffsetsResponse.java | 11 +- .../ListPartitionReassignmentsRequest.java | 7 +- .../ListPartitionReassignmentsResponse.java | 7 +- .../requests/ListTransactionsRequest.java | 12 +- .../requests/ListTransactionsResponse.java | 11 +- .../common/requests/MetadataRequest.java | 52 +- .../common/requests/MetadataResponse.java | 10 +- .../common/requests/OffsetCommitRequest.java | 40 +- .../common/requests/OffsetCommitResponse.java | 135 +- .../common/requests/OffsetDeleteRequest.java | 8 +- .../common/requests/OffsetDeleteResponse.java | 11 +- .../common/requests/OffsetFetchRequest.java | 347 +- .../common/requests/OffsetFetchResponse.java | 413 +- .../OffsetsForLeaderEpochRequest.java | 14 +- .../OffsetsForLeaderEpochResponse.java | 11 +- .../kafka/common/requests/ProduceRequest.java | 30 +- .../common/requests/ProduceResponse.java | 40 +- .../common/requests/PushTelemetryRequest.java | 6 +- .../requests/PushTelemetryResponse.java | 11 +- .../requests/ReadShareGroupStateRequest.java | 33 +- .../requests/ReadShareGroupStateResponse.java | 86 +- .../ReadShareGroupStateSummaryRequest.java | 33 +- .../ReadShareGroupStateSummaryResponse.java | 86 +- .../requests/RemoveRaftVoterRequest.java | 8 +- .../requests/RemoveRaftVoterResponse.java | 7 +- .../requests/RenewDelegationTokenRequest.java | 8 +- .../RenewDelegationTokenResponse.java | 7 +- .../kafka/common/requests/RequestContext.java | 3 +- .../kafka/common/requests/RequestUtils.java | 17 - .../requests/SaslAuthenticateRequest.java | 8 +- .../requests/SaslAuthenticateResponse.java | 7 +- .../common/requests/SaslHandshakeRequest.java | 8 +- .../requests/SaslHandshakeResponse.java | 7 +- .../requests/ShareAcknowledgeRequest.java | 46 +- .../requests/ShareAcknowledgeResponse.java | 29 +- .../common/requests/ShareFetchRequest.java | 123 +- .../common/requests/ShareFetchResponse.java | 90 +- .../requests/ShareGroupDescribeRequest.java | 14 +- .../requests/ShareGroupDescribeResponse.java | 12 +- .../requests/ShareGroupHeartbeatRequest.java | 14 +- .../requests/ShareGroupHeartbeatResponse.java | 26 +- .../common/requests/SyncGroupRequest.java | 6 +- .../common/requests/SyncGroupResponse.java | 7 +- .../requests/TxnOffsetCommitRequest.java | 7 +- .../requests/TxnOffsetCommitResponse.java | 7 +- .../requests/UnregisterBrokerRequest.java | 11 +- .../requests/UnregisterBrokerResponse.java | 11 +- .../requests/UpdateFeaturesRequest.java | 7 +- .../requests/UpdateFeaturesResponse.java | 11 +- .../requests/UpdateRaftVoterRequest.java | 10 +- .../requests/UpdateRaftVoterResponse.java | 7 +- .../kafka/common/requests/VoteRequest.java | 7 +- .../kafka/common/requests/VoteResponse.java | 11 +- .../requests/WriteShareGroupStateRequest.java | 33 +- .../WriteShareGroupStateResponse.java | 64 +- .../requests/WriteTxnMarkersRequest.java | 7 +- .../requests/WriteTxnMarkersResponse.java | 10 +- .../kafka/common/security/JaasContext.java | 35 +- .../kafka/common/security/JaasUtils.java | 3 - .../security/auth/KafkaPrincipalBuilder.java | 6 +- .../DefaultKafkaPrincipalBuilder.java | 3 +- .../SaslClientAuthenticator.java | 2 +- .../SaslServerAuthenticator.java | 4 +- .../OAuthBearerLoginCallbackHandler.java | 90 +- .../OAuthBearerValidatorCallbackHandler.java | 170 +- .../internals/OAuthBearerSaslClient.java | 5 +- .../internals/OAuthBearerSaslServer.java | 5 +- .../secured/AccessTokenRetriever.java | 68 + .../secured/AccessTokenRetrieverFactory.java | 110 + .../secured/AccessTokenValidator.java | 64 + .../secured/AccessTokenValidatorFactory.java | 75 + .../secured/ClaimValidationUtils.java | 40 +- .../CloseableVerificationKeyResolver.java | 21 +- .../internals/secured/ConfigurationUtils.java | 223 +- .../internals/secured/FileTokenRetriever.java | 57 + .../secured/HttpAccessTokenRetriever.java | 400 + .../internals/secured/Initable.java | 35 + .../internals/secured/JaasOptionsUtils.java | 39 +- .../JwksFileVerificationKeyResolver.java | 63 +- .../secured/LoginAccessTokenValidator.java | 132 + .../secured/RefreshingHttpsJwks.java | 11 +- ...shingHttpsJwksVerificationKeyResolver.java | 18 +- .../internals/secured/SerializedJwt.java | 10 +- .../internals/secured/ValidateException.java | 47 + .../ValidatorAccessTokenValidator.java | 209 + .../VerificationKeyResolverFactory.java | 143 +- .../unsecured/OAuthBearerUnsecuredJws.java | 6 +- .../plain/internals/PlainSaslServer.java | 5 +- .../scram/internals/ScramSaslClient.java | 4 +- .../scram/internals/ScramSaslServer.java | 5 +- .../security/ssl/DefaultSslEngineFactory.java | 4 +- .../common/serialization/Deserializer.java | 69 +- .../common/serialization/Serializer.java | 49 +- .../internals/ClientTelemetryReporter.java | 23 +- .../internals/ClientTelemetryUtils.java | 25 +- .../kafka/common/utils/AppInfoParser.java | 56 +- .../common/utils/ByteBufferUnmapper.java | 4 +- .../apache/kafka/common/utils/Checksums.java | 35 +- .../common/utils/ChildFirstClassLoader.java | 2 +- .../kafka/common/utils/ConfigUtils.java | 3 +- .../org/apache/kafka/common/utils/Crc32C.java | 33 +- .../kafka/common/utils/FlattenedIterator.java | 45 + .../apache/kafka/common/utils/LogContext.java | 9 +- .../common/utils/LoggingSignalHandler.java | 4 +- .../kafka/common/utils/PureJavaCrc32C.java | 645 + .../kafka/common/utils/SecurityUtils.java | 2 +- .../org/apache/kafka/common/utils/Utils.java | 75 +- .../kafka/server/authorizer/Authorizer.java | 4 - .../server/quota/ClientQuotaCallback.java | 7 - .../common/message/AddRaftVoterRequest.json | 7 +- .../common/message/AddRaftVoterResponse.json | 3 +- .../AlterPartitionReassignmentsRequest.json | 5 +- .../AlterPartitionReassignmentsResponse.json | 5 +- .../message/BrokerHeartbeatRequest.json | 2 +- .../common/message/DeleteAclsRequest.json | 2 +- .../common/message/DeleteRecordsRequest.json | 2 +- .../message/DeleteShareGroupStateRequest.json | 5 +- .../message/DescribeClusterResponse.json | 2 +- .../message/DescribeLogDirsResponse.json | 6 +- .../common/message/FetchRequest.json | 11 +- .../common/message/FetchResponse.json | 4 +- .../common/message/InitProducerIdRequest.json | 12 +- .../message/InitProducerIdResponse.json | 10 +- .../InitializeShareGroupStateRequest.json | 3 +- .../common/message/JoinGroupRequest.json | 4 +- .../common/message/JoinGroupResponse.json | 4 +- .../ListClientMetricsResourcesRequest.json | 26 + .../ListClientMetricsResourcesResponse.json | 33 + .../common/message/ListOffsetsRequest.json | 4 +- .../common/message/ListOffsetsResponse.json | 4 +- .../message/ListTransactionsRequest.json | 7 +- .../message/ListTransactionsResponse.json | 4 +- .../common/message/OffsetCommitRequest.json | 9 +- .../common/message/OffsetCommitResponse.json | 9 +- .../common/message/OffsetFetchRequest.json | 9 +- .../common/message/OffsetFetchResponse.json | 9 +- .../common/message/ProduceRequest.json | 6 +- .../common/message/ProduceResponse.json | 6 +- .../main/resources/common/message/README.md | 4 +- .../message/ReadShareGroupStateRequest.json | 5 +- .../message/ReadShareGroupStateResponse.json | 7 +- .../ReadShareGroupStateSummaryRequest.json | 5 +- .../ReadShareGroupStateSummaryResponse.json | 2 - .../message/ShareAcknowledgeRequest.json | 19 +- .../message/ShareAcknowledgeResponse.json | 13 +- .../common/message/ShareFetchRequest.json | 21 +- .../common/message/ShareFetchResponse.json | 19 +- .../message/ShareGroupDescribeRequest.json | 9 +- .../message/ShareGroupDescribeResponse.json | 8 +- .../message/ShareGroupHeartbeatRequest.json | 13 +- .../message/ShareGroupHeartbeatResponse.json | 10 +- .../message/WriteShareGroupStateRequest.json | 11 +- .../message/WriteShareGroupStateResponse.json | 1 - .../clients/ClusterConnectionStatesTest.java | 4 +- .../clients/FetchSessionHandlerTest.java | 63 +- .../kafka/clients/InFlightRequestsTest.java | 2 +- .../apache/kafka/clients/MetadataTest.java | 96 +- .../kafka/clients/NetworkClientTest.java | 18 + .../clients/admin/AdminClientTestUtils.java | 51 +- .../kafka/clients/admin/ConfigTest.java | 2 +- .../DeleteConsumerGroupOffsetsResultTest.java | 12 +- .../clients/admin/KafkaAdminClientTest.java | 4811 ++----- .../admin/ListConsumerGroupsOptionsTest.java | 4 +- .../admin/ListTransactionsResultTest.java | 12 +- .../clients/admin/MemberDescriptionTest.java | 32 +- .../kafka/clients/admin/MockAdminClient.java | 115 +- ...oveMembersFromConsumerGroupResultTest.java | 12 +- .../admin/internals/AdminApiDriverTest.java | 3 +- .../AdminBootstrapAddressesTest.java | 18 +- .../internals/AdminMetadataManagerTest.java | 11 - .../AllBrokersStrategyIntegrationTest.java | 4 +- .../internals/CoordinatorStrategyTest.java | 20 +- ...DeleteConsumerGroupOffsetsHandlerTest.java | 4 +- .../DeleteConsumerGroupsHandlerTest.java | 112 +- .../DescribeConsumerGroupsHandlerTest.java | 5 +- .../ListConsumerGroupOffsetsHandlerTest.java | 479 +- .../ListTransactionsHandlerTest.java | 38 - ...artitionLeaderStrategyIntegrationTest.java | 2 +- .../clients/consumer/ConsumerConfigTest.java | 38 +- .../ConsumerPartitionAssignorTest.java | 1 - .../CooperativeStickyAssignorTest.java | 4 +- .../clients/consumer/KafkaConsumerTest.java | 809 +- .../KafkaShareConsumerMetricsTest.java | 141 +- .../clients/consumer/MockConsumerTest.java | 28 - .../consumer/OffsetAndMetadataTest.java | 15 - .../clients/consumer/StickyAssignorTest.java | 12 - .../internals/AbstractCoordinatorTest.java | 119 +- .../internals/AbstractStickyAssignorTest.java | 14 +- ...nowledgementCommitCallbackHandlerTest.java | 8 +- .../internals/AcknowledgementsTest.java | 6 +- .../ApplicationEventHandlerTest.java | 10 +- .../internals/AsyncKafkaConsumerTest.java | 320 +- .../internals/BackgroundEventHandlerTest.java | 15 +- .../internals/CommitRequestManagerTest.java | 204 +- .../internals/CompletedFetchTest.java | 8 +- .../internals/ConsumerCoordinatorTest.java | 237 +- .../ConsumerHeartbeatRequestManagerTest.java | 144 +- .../internals/ConsumerInterceptorsTest.java | 4 +- .../ConsumerMembershipManagerTest.java | 139 +- .../internals/ConsumerMetadataTest.java | 73 - .../internals/ConsumerNetworkClientTest.java | 36 +- .../internals/ConsumerNetworkThreadTest.java | 102 +- .../CoordinatorRequestManagerTest.java | 9 +- .../consumer/internals/FetchBufferTest.java | 9 +- .../internals/FetchCollectorTest.java | 12 +- .../internals/FetchMetricsManagerTest.java | 238 +- .../internals/FetchRequestManagerTest.java | 65 +- .../consumer/internals/FetcherTest.java | 81 +- .../consumer/internals/HeartbeatTest.java | 1 - .../internals/KafkaConsumerMetricsTest.java | 37 +- .../internals/NetworkClientDelegateTest.java | 22 +- .../consumer/internals/OffsetFetcherTest.java | 8 +- .../OffsetForLeaderEpochClientTest.java | 6 +- .../internals/OffsetsRequestManagerTest.java | 5 +- .../internals/RequestManagersTest.java | 57 +- .../internals/ShareCompletedFetchTest.java | 13 +- .../ShareConsumeRequestManagerTest.java | 1873 +-- .../internals/ShareConsumerImplTest.java | 272 +- .../internals/ShareFetchBufferTest.java | 5 +- .../internals/ShareFetchCollectorTest.java | 3 +- .../ShareFetchMetricsManagerTest.java | 39 - .../ShareHeartbeatRequestManagerTest.java | 75 +- .../internals/ShareMembershipManagerTest.java | 87 +- .../internals/ShareSessionHandlerTest.java | 172 +- .../internals/SubscriptionStateTest.java | 380 +- .../TopicMetadataRequestManagerTest.java | 1 - .../events/ApplicationEventProcessorTest.java | 114 +- .../metrics/AsyncConsumerMetricsTest.java | 190 +- .../clients/producer/KafkaProducerTest.java | 841 +- .../clients/producer/MockProducerTest.java | 18 +- .../clients/producer/ProducerConfigTest.java | 57 - .../producer/internals/BufferPoolTest.java | 4 +- .../internals/KafkaProducerMetricsTest.java | 4 +- .../producer/internals/ProducerBatchTest.java | 2 +- .../internals/ProducerInterceptorsTest.java | 108 +- .../internals/ProducerMetadataTest.java | 10 +- .../internals/RecordAccumulatorTest.java | 173 +- .../producer/internals/SenderTest.java | 378 +- .../internals/TransactionManagerTest.java | 374 +- .../apache/kafka/common/KafkaFutureTest.java | 2 +- .../org/apache/kafka/common/UuidTest.java | 10 +- .../kafka/common/acl/AclOperationTest.java | 3 +- .../common/config/AbstractConfigTest.java | 28 +- .../kafka/common/config/ConfigDefTest.java | 79 +- .../provider/EnvVarConfigProviderTest.java | 4 +- .../feature/SupportedVersionRangeTest.java | 2 +- .../header/internals/RecordHeadersTest.java | 103 +- .../kafka/common/message/MessageTest.java | 492 +- .../message/NullableStructMessageTest.java | 2 +- .../common/message/RecordsSerdeTest.java | 2 +- .../message/SimpleExampleMessageTest.java | 6 +- .../kafka/common/metrics/MetricsTest.java | 70 +- .../kafka/common/metrics/SensorTest.java | 12 +- .../common/network/ChannelBuildersTest.java | 28 +- .../kafka/common/network/NioEchoServer.java | 5 + .../kafka/common/network/SelectorTest.java | 4 +- .../common/network/SslTransportLayerTest.java | 17 +- .../common/network/Tls13SelectorTest.java | 8 - .../kafka/common/protocol/ErrorsTest.java | 2 +- .../kafka/common/protocol/ProtocolTest.java | 13 - .../types/ProtocolSerializationTest.java | 94 +- .../record/EndTransactionMarkerTest.java | 89 +- .../kafka/common/record/FileRecordsTest.java | 380 +- .../record/MemoryRecordsBuilderTest.java | 179 + .../common/record/MemoryRecordsTest.java | 174 +- .../common/replica/ReplicaSelectorTest.java | 6 +- .../AddPartitionsToTxnResponseTest.java | 5 +- .../requests/CreateAclsRequestTest.java | 9 +- .../requests/DeleteAclsRequestTest.java | 6 +- .../requests/DeleteAclsResponseTest.java | 7 +- .../requests/DeleteGroupsResponseTest.java | 3 +- .../requests/DescribeAclsRequestTest.java | 3 + .../requests/DescribeAclsResponseTest.java | 12 +- .../common/requests/JoinGroupRequestTest.java | 28 +- .../requests/LeaveGroupResponseTest.java | 12 +- .../requests/ListOffsetsRequestTest.java | 14 +- .../requests/OffsetCommitRequestTest.java | 11 +- .../requests/OffsetCommitResponseTest.java | 9 +- .../requests/OffsetFetchRequestTest.java | 406 +- .../requests/OffsetFetchResponseTest.java | 704 +- .../common/requests/ProduceRequestTest.java | 68 +- .../common/requests/ProduceResponseTest.java | 15 +- .../common/requests/RequestContextTest.java | 4 +- .../common/requests/RequestResponseTest.java | 761 +- .../requests/TxnOffsetCommitResponseTest.java | 2 +- .../requests/UpdateFeaturesRequestTest.java | 22 +- .../requests/WriteTxnMarkersResponseTest.java | 3 +- .../common/security/JaasContextTest.java | 37 +- .../ClientAuthenticationFailureTest.java | 4 +- .../authenticator/SaslAuthenticatorTest.java | 189 +- .../SaslServerAuthenticatorTest.java | 42 +- .../security/kerberos/KerberosRuleTest.java | 12 +- .../OAuthBearerLoginCallbackHandlerTest.java | 127 +- ...uthBearerValidatorCallbackHandlerTest.java | 123 +- .../internals/secured/AccessTokenBuilder.java | 8 +- .../AccessTokenRetrieverFactoryTest.java | 111 + .../AccessTokenValidatorFactoryTest.java | 73 + .../secured/AccessTokenValidatorTest.java | 92 + .../secured/ClaimValidationUtilsTest.java | 165 + .../secured/ConfigurationUtilsTest.java | 93 +- .../secured/HttpAccessTokenRetrieverTest.java | 238 + .../LoginAccessTokenValidatorTest.java | 27 + .../internals/secured/OAuthBearerTest.java | 149 +- .../ValidatorAccessTokenValidatorTest.java | 95 + .../VerificationKeyResolverFactoryTest.java | 36 +- ...arerUnsecuredLoginCallbackHandlerTest.java | 13 +- .../ssl/DefaultSslEngineFactoryTest.java | 12 +- .../common/security/ssl/SslFactoryTest.java | 13 +- .../ClientTelemetryReporterTest.java | 131 - .../internals/ClientTelemetryUtilsTest.java | 43 +- .../kafka/common/utils/AppInfoParserTest.java | 77 +- .../kafka/common/utils/ChecksumsTest.java | 13 +- .../apache/kafka/common/utils/Crc32CTest.java | 21 + .../common/utils/FlattenedIteratorTest.java | 116 + .../apache/kafka/common/utils/UtilsTest.java | 167 - .../annotation/ApiKeyVersionsProvider.java | 26 +- .../annotation/ApiKeyVersionsSource.java | 3 - .../server/policy/AlterConfigPolicyTest.java | 4 +- .../kafka/test/MockConsumerInterceptor.java | 1 - .../apache/kafka/test/MockDeserializer.java | 8 +- .../kafka/test/MockProducerInterceptor.java | 1 - .../org/apache/kafka/test/MockSerializer.java | 8 +- .../org/apache/kafka/test/TestSslUtils.java | 53 +- .../java/org/apache/kafka/test/TestUtils.java | 103 +- committer-tools/README.md | 8 +- committer-tools/kafka-merge-pr.py | 2 +- committer-tools/reviewers.py | 54 +- committer-tools/update-cache.sh | 19 +- committer-tools/verify_license.py | 17 +- config/broker.properties | 4 +- config/consumer.properties | 130 +- config/controller.properties | 4 +- config/log4j2.yaml | 14 +- config/producer.properties | 134 +- config/server.properties | 4 +- .../connect/connector/ConnectorContext.java | 24 - .../ConnectorClientConfigOverridePolicy.java | 5 - .../kafka/connect/data/ConnectSchema.java | 75 +- .../org/apache/kafka/connect/data/Schema.java | 17 +- .../kafka/connect/data/SchemaBuilder.java | 4 +- .../kafka/connect/data/SchemaProjector.java | 60 +- .../org/apache/kafka/connect/data/Values.java | 93 +- .../kafka/connect/header/ConnectHeader.java | 3 +- .../kafka/connect/header/ConnectHeaders.java | 6 +- .../connect/rest/ConnectRestExtension.java | 4 - .../kafka/connect/sink/SinkTaskContext.java | 23 - .../connect/source/SourceTaskContext.java | 23 - .../kafka/connect/storage/Converter.java | 13 +- .../kafka/connect/storage/ConverterType.java | 16 +- .../connect/storage/HeaderConverter.java | 4 - .../connect/transforms/Transformation.java | 4 - .../transforms/predicates/Predicate.java | 4 - .../ConnectorReconfigurationTest.java | 9 +- .../kafka/connect/data/ConnectSchemaTest.java | 81 +- .../kafka/connect/data/DecimalTest.java | 4 +- .../kafka/connect/data/SchemaBuilderTest.java | 26 +- .../connect/data/SchemaProjectorTest.java | 60 +- .../apache/kafka/connect/data/StructTest.java | 22 +- .../apache/kafka/connect/data/ValuesTest.java | 112 +- .../connect/header/ConnectHeadersTest.java | 7 +- .../kafka/connect/sink/SinkConnectorTest.java | 7 - .../connect/source/SourceConnectorTest.java | 7 - .../connect/source/SourceRecordTest.java | 5 +- .../connect/storage/StringConverterTest.java | 6 +- .../connect/util/ConnectorUtilsTest.java | 38 +- .../auth/extension/JaasBasicAuthFilter.java | 6 +- .../BasicAuthSecurityRestExtensionTest.java | 4 +- .../extension/JaasBasicAuthFilterTest.java | 5 +- .../connect/file/FileStreamSinkTask.java | 2 +- .../connect/file/FileStreamSourceTask.java | 2 +- .../connect/file/FileStreamSinkTaskTest.java | 13 +- .../file/FileStreamSourceConnectorTest.java | 40 +- .../file/FileStreamSourceTaskTest.java | 25 +- ...eStreamSourceConnectorIntegrationTest.java | 5 +- .../kafka/connect/json/JsonConverter.java | 50 +- .../connect/json/JsonConverterConfig.java | 25 - .../kafka/connect/json/JsonDeserializer.java | 3 +- .../kafka/connect/json/JsonSerializer.java | 3 +- .../connect/json/JsonConverterConfigTest.java | 2 +- .../kafka/connect/json/JsonConverterTest.java | 109 +- .../kafka/connect/mirror/MirrorClient.java | 3 +- .../connect/mirror/MirrorClientConfig.java | 15 +- .../connect/mirror/MirrorClientTest.java | 36 +- .../connect/mirror/ReplicationPolicyTest.java | 3 +- .../kafka/connect/mirror/CheckpointStore.java | 3 +- .../connect/mirror/DefaultGroupFilter.java | 2 - .../mirror/MirrorCheckpointConfig.java | 2 - .../mirror/MirrorCheckpointConnector.java | 16 +- .../mirror/MirrorCheckpointMetrics.java | 4 +- .../connect/mirror/MirrorCheckpointTask.java | 10 +- .../mirror/MirrorCheckpointTaskConfig.java | 11 +- .../connect/mirror/MirrorConnectorConfig.java | 7 - .../mirror/MirrorHeartbeatConnector.java | 5 +- .../connect/mirror/MirrorHeartbeatTask.java | 3 +- .../kafka/connect/mirror/MirrorMaker.java | 54 +- .../connect/mirror/MirrorMakerConfig.java | 19 +- .../connect/mirror/MirrorSourceConfig.java | 3 - .../connect/mirror/MirrorSourceConnector.java | 9 +- .../connect/mirror/MirrorSourceMetrics.java | 4 +- .../connect/mirror/MirrorSourceTask.java | 139 + .../mirror/MirrorSourceTaskConfig.java | 7 +- .../kafka/connect/mirror/MirrorUtils.java | 11 +- .../kafka/connect/mirror/OffsetSync.java | 24 +- .../connect/mirror/OffsetSyncWriter.java | 3 +- .../connect/mirror/rest/MirrorRestServer.java | 9 +- .../connect/mirror/CheckpointStoreTest.java | 10 +- .../mirror/MirrorCheckpointConfigTest.java | 6 +- .../mirror/MirrorCheckpointConnectorTest.java | 57 +- .../mirror/MirrorCheckpointTaskTest.java | 40 +- .../mirror/MirrorHeartBeatConnectorTest.java | 23 +- .../mirror/MirrorHeartbeatTaskTest.java | 4 +- .../connect/mirror/MirrorMakerConfigTest.java | 28 +- .../mirror/MirrorSourceConfigTest.java | 3 +- .../mirror/MirrorSourceConnectorTest.java | 76 +- .../connect/mirror/MirrorSourceTaskTest.java | 9 +- .../kafka/connect/mirror/MirrorUtilsTest.java | 15 +- .../connect/mirror/OffsetSyncWriterTest.java | 8 +- .../clients/admin/FakeLocalMetadataStore.java | 2 +- .../DedicatedMirrorIntegrationTest.java | 17 +- .../IdentityReplicationIntegrationTest.java | 10 +- .../MirrorConnectorsIntegrationBaseTest.java | 82 +- ...rConnectorsIntegrationExactlyOnceTest.java | 6 +- ...hCustomForwardingAdminIntegrationTest.java | 20 +- .../kafka/connect/cli/AbstractConnectCli.java | 6 +- .../kafka/connect/cli/ConnectDistributed.java | 6 +- .../kafka/connect/cli/ConnectStandalone.java | 3 +- .../kafka/connect/runtime/AbstractHerder.java | 510 +- .../kafka/connect/runtime/AbstractStatus.java | 22 +- .../runtime/AbstractWorkerSourceTask.java | 70 +- .../kafka/connect/runtime/ConnectMetrics.java | 82 +- .../runtime/ConnectMetricsRegistry.java | 94 +- .../connect/runtime/ConnectorConfig.java | 396 +- .../connect/runtime/ConnectorStatus.java | 8 +- .../runtime/ExactlyOnceWorkerSourceTask.java | 60 +- .../apache/kafka/connect/runtime/Herder.java | 42 +- .../runtime/HerderConnectorContext.java | 13 +- .../apache/kafka/connect/runtime/Loggers.java | 348 +- .../kafka/connect/runtime/RestartPlan.java | 13 +- .../kafka/connect/runtime/RestartRequest.java | 72 +- .../kafka/connect/runtime/SessionKey.java | 51 +- .../connect/runtime/SinkConnectorConfig.java | 35 +- .../runtime/SourceConnectorConfig.java | 27 +- .../connect/runtime/SubmittedRecords.java | 91 +- .../kafka/connect/runtime/TaskStatus.java | 4 +- .../connect/runtime/TopicCreationConfig.java | 7 +- .../kafka/connect/runtime/TopicStatus.java | 3 +- .../connect/runtime/TransformationChain.java | 5 - .../connect/runtime/TransformationStage.java | 95 +- .../apache/kafka/connect/runtime/Worker.java | 348 +- .../kafka/connect/runtime/WorkerConfig.java | 69 +- .../connect/runtime/WorkerConnector.java | 25 +- .../kafka/connect/runtime/WorkerSinkTask.java | 70 +- .../runtime/WorkerSinkTaskContext.java | 19 +- .../connect/runtime/WorkerSourceTask.java | 19 +- .../runtime/WorkerSourceTaskContext.java | 11 +- .../kafka/connect/runtime/WorkerTask.java | 77 +- .../runtime/distributed/ConnectAssignor.java | 4 +- .../runtime/distributed/ConnectProtocol.java | 4 +- .../ConnectProtocolCompatibility.java | 16 +- .../distributed/DistributedConfig.java | 19 +- .../distributed/DistributedHerder.java | 33 +- .../runtime/distributed/EagerAssignor.java | 10 +- .../distributed/ExtendedAssignment.java | 11 +- .../IncrementalCooperativeAssignor.java | 43 +- .../distributed/WorkerCoordinator.java | 34 +- .../distributed/WorkerGroupMember.java | 3 +- .../errors/DeadLetterQueueReporter.java | 9 +- .../errors/RetryWithToleranceOperator.java | 9 +- .../health/ConnectClusterDetailsImpl.java | 13 +- .../isolation/DelegatingClassLoader.java | 175 +- .../runtime/isolation/PluginClassLoader.java | 8 +- .../connect/runtime/isolation/PluginDesc.java | 9 +- .../runtime/isolation/PluginScanResult.java | 3 +- .../runtime/isolation/PluginScanner.java | 2 +- .../runtime/isolation/PluginSource.java | 33 +- .../runtime/isolation/PluginUtils.java | 46 +- .../connect/runtime/isolation/Plugins.java | 324 +- .../rest/ConnectRestExtensionContextImpl.java | 24 +- .../runtime/rest/ConnectRestServer.java | 9 +- .../runtime/rest/HerderRequestHandler.java | 2 +- .../connect/runtime/rest/RestClient.java | 23 +- .../connect/runtime/rest/RestServer.java | 29 +- .../runtime/rest/RestServerConfig.java | 59 +- .../runtime/rest/entities/ConfigInfo.java | 48 +- .../runtime/rest/entities/ConfigInfos.java | 84 +- .../runtime/rest/entities/ConfigKeyInfo.java | 158 +- .../rest/entities/ConfigValueInfo.java | 89 +- .../runtime/rest/entities/ConnectorInfo.java | 65 +- .../rest/entities/ConnectorOffset.java | 53 +- .../rest/entities/ConnectorOffsets.java | 36 +- .../rest/entities/ConnectorStateInfo.java | 67 +- .../rest/entities/CreateConnectorRequest.java | 72 +- .../runtime/rest/entities/ErrorMessage.java | 42 +- .../runtime/rest/entities/LoggerLevel.java | 52 +- .../runtime/rest/entities/Message.java | 33 +- .../runtime/rest/entities/PluginInfo.java | 74 +- .../runtime/rest/entities/TaskInfo.java | 41 +- .../rest/errors/ConnectExceptionMapper.java | 3 +- .../resources/ConnectorPluginsResource.java | 28 +- .../rest/resources/ConnectorsResource.java | 19 +- .../resources/InternalClusterResource.java | 2 +- .../rest/resources/LoggingResource.java | 4 +- .../connect/runtime/rest/util/SSLUtils.java | 10 +- .../runtime/standalone/StandaloneHerder.java | 49 +- .../connect/storage/ClusterConfigState.java | 25 +- .../storage/ConnectorOffsetBackingStore.java | 5 +- .../storage/FileOffsetBackingStore.java | 3 +- .../storage/KafkaConfigBackingStore.java | 18 +- .../storage/KafkaOffsetBackingStore.java | 7 +- .../storage/KafkaStatusBackingStore.java | 18 +- .../storage/MemoryConfigBackingStore.java | 15 +- .../storage/MemoryStatusBackingStore.java | 5 +- .../storage/OffsetStorageReaderImpl.java | 7 +- .../kafka/connect/storage/OffsetUtils.java | 3 +- .../storage/PrivilegedWriteException.java | 3 + .../kafka/connect/tools/PredicateDoc.java | 7 +- .../connect/tools/TransformationDoc.java | 14 +- .../apache/kafka/connect/util/Callback.java | 1 - .../kafka/connect/util/ConnectorTaskId.java | 30 +- .../kafka/connect/util/KafkaBasedLog.java | 5 +- .../kafka/connect/util/LoggingContext.java | 4 +- .../apache/kafka/connect/util/SinkUtils.java | 3 +- .../org/apache/kafka/connect/util/Table.java | 5 +- .../apache/kafka/connect/util/TopicAdmin.java | 25 +- .../kafka/connect/util/TopicCreation.java | 3 +- .../connect/util/TopicCreationGroup.java | 6 +- ...nnectorClientConfigOverridePolicyTest.java | 3 +- ...nnectorClientConfigOverridePolicyTest.java | 4 +- .../converters/BooleanConverterTest.java | 4 +- .../converters/ByteArrayConverterTest.java | 4 +- .../integration/BlockingConnectorTest.java | 7 +- .../ConnectWorkerIntegrationTest.java | 101 +- .../ConnectorClientPolicyIntegrationTest.java | 2 +- .../connect/integration/ConnectorHandle.java | 18 +- .../ConnectorRestartApiIntegrationTest.java | 11 +- .../ConnectorTopicsIntegrationTest.java | 49 +- .../ConnectorValidationIntegrationTest.java | 14 +- .../ErrantRecordSinkConnector.java | 4 +- .../ErrorHandlingIntegrationTest.java | 2 +- .../ExactlyOnceSourceIntegrationTest.java | 105 +- .../ExampleConnectIntegrationTest.java | 4 +- .../InternalTopicsIntegrationTest.java | 7 +- .../integration/MonitorableSinkConnector.java | 141 +- .../MonitorableSourceConnector.java | 289 +- .../OffsetsApiIntegrationTest.java | 61 +- ...alanceSourceConnectorsIntegrationTest.java | 16 +- .../RestExtensionIntegrationTest.java | 5 +- .../RestForwardingIntegrationTest.java | 9 +- .../SessionedProtocolIntegrationTest.java | 2 +- .../SinkConnectorsIntegrationTest.java | 10 +- .../SourceConnectorsIntegrationTest.java | 7 +- .../StandaloneWorkerIntegrationTest.java | 22 +- .../integration/StartAndStopLatchTest.java | 5 +- .../connect/integration/StartsAndStops.java | 18 +- .../TransformationIntegrationTest.java | 9 +- .../connect/runtime/AbstractHerderTest.java | 290 +- .../runtime/AbstractWorkerSourceTaskTest.java | 152 +- .../connect/runtime/ConnectMetricsTest.java | 250 +- .../connect/runtime/ConnectorConfigTest.java | 25 +- .../runtime/ErrorHandlingTaskTest.java | 68 +- .../ExactlyOnceWorkerSourceTaskTest.java | 46 +- .../runtime/InternalSinkRecordTest.java | 4 +- .../kafka/connect/runtime/LoggersTest.java | 42 +- .../connect/runtime/MockConnectMetrics.java | 1 - .../connect/runtime/MockLoggersTest.java | 29 +- .../connect/runtime/RestartPlanTest.java | 32 +- .../runtime/SampleSourceConnector.java | 2 +- .../SourceTaskOffsetCommitterTest.java | 7 +- .../connect/runtime/SubmittedRecordsTest.java | 41 +- .../runtime/TransformationConfigTest.java | 39 +- .../runtime/TransformationStageTest.java | 36 +- .../connect/runtime/WorkerConfigTest.java | 7 +- .../runtime/WorkerConfigTransformerTest.java | 19 +- .../connect/runtime/WorkerConnectorTest.java | 17 +- .../connect/runtime/WorkerSinkTaskTest.java | 163 +- .../runtime/WorkerSinkTaskThreadedTest.java | 61 +- .../connect/runtime/WorkerSourceTaskTest.java | 57 +- .../kafka/connect/runtime/WorkerTaskTest.java | 8 +- .../kafka/connect/runtime/WorkerTest.java | 490 +- .../connect/runtime/WorkerTestUtils.java | 34 +- .../ConnectProtocolCompatibilityTest.java | 68 +- .../distributed/DistributedConfigTest.java | 13 +- .../distributed/DistributedHerderTest.java | 730 +- .../IncrementalCooperativeAssignorTest.java | 102 +- .../WorkerCoordinatorIncrementalTest.java | 121 +- .../distributed/WorkerCoordinatorTest.java | 110 +- .../distributed/WorkerGroupMemberTest.java | 5 +- .../runtime/errors/ErrorReporterTest.java | 26 +- .../RetryWithToleranceOperatorTest.java | 42 +- .../WorkerErrantRecordReporterTest.java | 6 +- .../health/ConnectClusterStateImplTest.java | 7 +- .../isolation/DelegatingClassLoaderTest.java | 19 +- .../runtime/isolation/PluginScannerTest.java | 13 +- .../runtime/isolation/PluginUtilsTest.java | 74 +- .../runtime/isolation/PluginsTest.java | 62 +- .../runtime/isolation/SamplingTestPlugin.java | 5 +- .../isolation/SynchronizationTest.java | 13 +- .../runtime/isolation/TestPlugins.java | 45 +- .../runtime/rest/ConnectRestServerTest.java | 74 +- .../connect/runtime/rest/RestClientTest.java | 21 +- .../runtime/rest/RestServerConfigTest.java | 14 +- .../rest/entities/ConnectorOffsetsTest.java | 4 +- .../entities/CreateConnectorRequestTest.java | 4 +- .../ConnectorPluginsResourceTest.java | 56 +- .../resources/ConnectorsResourceTest.java | 76 +- .../InternalConnectResourceTest.java | 5 +- .../rest/resources/LoggingResourceTest.java | 12 +- .../runtime/rest/util/SSLUtilsTest.java | 15 +- .../standalone/StandaloneConfigTest.java | 1 - .../standalone/StandaloneHerderTest.java | 247 +- .../ConnectorOffsetBackingStoreTest.java | 4 +- .../storage/FileOffsetBackingStoreTest.java | 40 +- .../storage/KafkaConfigBackingStoreTest.java | 216 +- .../storage/KafkaOffsetBackingStoreTest.java | 46 +- .../KafkaStatusBackingStoreFormatTest.java | 12 +- .../storage/KafkaStatusBackingStoreTest.java | 8 +- .../storage/MemoryConfigBackingStoreTest.java | 24 +- .../storage/MemoryStatusBackingStoreTest.java | 5 +- .../storage/OffsetStorageWriterTest.java | 4 +- .../connect/storage/OffsetUtilsTest.java | 20 +- .../kafka/connect/util/ConnectUtilsTest.java | 5 +- .../kafka/connect/util/KafkaBasedLogTest.java | 14 +- .../connect/util/SharedTopicAdminTest.java | 3 +- .../kafka/connect/util/SinkUtilsTest.java | 3 +- .../apache/kafka/connect/util/TableTest.java | 1 + .../kafka/connect/util/TopicAdminTest.java | 84 +- .../kafka/connect/util/TopicCreationTest.java | 86 +- .../util/clusters/ConnectAssertions.java | 5 +- .../util/clusters/EmbeddedConnect.java | 27 +- .../util/clusters/EmbeddedConnectCluster.java | 2 +- .../clusters/EmbeddedConnectStandalone.java | 13 +- .../util/clusters/EmbeddedKafkaCluster.java | 38 +- .../connect/util/clusters/WorkerHandle.java | 3 +- ...rg.apache.kafka.connect.sink.SinkConnector | 1 - ...pache.kafka.connect.source.SourceConnector | 1 - .../test/plugins/ReadVersionFromResource.java | 6 +- .../test/plugins/ReadVersionFromResource.java | 6 +- .../kafka/connect/tools/MockConnector.java | 3 +- .../kafka/connect/tools/MockSourceTask.java | 3 +- .../kafka/connect/tools/SchemaSourceTask.java | 9 +- .../connect/tools/VerifiableSourceTask.java | 7 +- .../apache/kafka/connect/transforms/Cast.java | 86 +- .../kafka/connect/transforms/DropHeaders.java | 4 +- .../kafka/connect/transforms/HeaderFrom.java | 13 +- .../kafka/connect/transforms/MaskField.java | 54 +- .../connect/transforms/ReplaceField.java | 15 +- .../connect/transforms/SetSchemaMetadata.java | 3 +- .../transforms/TimestampConverter.java | 42 +- .../kafka/connect/transforms/ValueToKey.java | 3 +- .../transforms/field/FieldSyntaxVersion.java | 8 +- .../transforms/field/SingleFieldPath.java | 5 +- .../kafka/connect/transforms/CastTest.java | 83 +- .../connect/transforms/DropHeadersTest.java | 9 +- .../connect/transforms/ExtractFieldTest.java | 21 +- .../kafka/connect/transforms/FlattenTest.java | 47 +- .../connect/transforms/HeaderFromTest.java | 36 +- .../connect/transforms/HoistFieldTest.java | 9 +- .../connect/transforms/InsertFieldTest.java | 9 +- .../connect/transforms/InsertHeaderTest.java | 5 +- .../connect/transforms/MaskFieldTest.java | 140 +- .../transforms/SetSchemaMetadataTest.java | 5 +- .../transforms/TimestampConverterTest.java | 43 +- .../transforms/TimestampRouterTest.java | 4 +- .../connect/transforms/ValueToKeyTest.java | 7 +- .../field/FieldPathNotationTest.java | 6 +- .../field/FieldSyntaxVersionTest.java | 10 +- .../predicates/HasHeaderKeyTest.java | 23 +- .../predicates/TopicNameMatchesTest.java | 7 +- .../util/NonEmptyListValidatorTest.java | 6 +- .../runtime/CoordinatorExecutorImpl.java | 13 +- .../common/runtime/CoordinatorLoader.java | 46 +- .../runtime/CoordinatorMetricsShard.java | 2 +- .../CoordinatorOperationExceptionHelper.java | 39 +- .../common/runtime/CoordinatorPlayback.java | 4 +- .../common/runtime/CoordinatorRecord.java | 48 +- .../runtime/CoordinatorRecordSerde.java | 19 +- .../common/runtime/CoordinatorRuntime.java | 78 +- .../CoordinatorRuntimeMetricsImpl.java | 12 +- .../common/runtime/CoordinatorShard.java | 8 +- .../common/runtime/EventAccumulator.java | 8 +- .../common/runtime/HdrHistogram.java | 11 +- .../common/runtime/PartitionWriter.java | 2 +- .../runtime/SnapshottableCoordinator.java | 11 +- .../runtime/CoordinatorExecutorImplTest.java | 12 +- .../common/runtime/CoordinatorRecordTest.java | 16 +- .../common/runtime/CoordinatorResultTest.java | 10 +- .../CoordinatorRuntimeMetricsImplTest.java | 129 +- .../runtime/CoordinatorRuntimeTest.java | 1033 +- .../runtime/InMemoryPartitionWriter.java | 4 +- .../runtime/KafkaMetricHistogramTest.java | 12 +- .../runtime/MockCoordinatorExecutor.java | 23 +- .../common/runtime/MockCoordinatorTimer.java | 46 +- .../runtime/SnapshottableCoordinatorTest.java | 1 + .../coordinator/common/runtime/TestUtil.java | 132 - .../java/kafka/docker/Log4jConfiguration.java | 3 +- .../kafka/log/remote/RemoteLogManager.java | 2251 +++ .../log/remote/RemoteLogOffsetReader.java | 77 + .../kafka/log/remote/RemoteLogReader.java | 82 + .../server/ClientRequestQuotaManager.java | 25 +- .../main/java/kafka/server/NetworkUtils.java | 4 +- .../main/java/kafka/server/QuotaFactory.java | 108 +- .../java/kafka/server/TierStateMachine.java | 41 +- .../server/builders/KafkaApisBuilder.java | 42 +- .../server/builders/LogManagerBuilder.java | 17 +- .../builders/ReplicaManagerBuilder.java | 11 +- ...DescribeTopicPartitionsRequestHandler.java | 17 +- .../server/logger/RuntimeLoggerManager.java | 15 +- .../kafka/server/share/DelayedShareFetch.java | 786 +- .../kafka/server/share/ShareFetchUtils.java | 122 +- .../kafka/server/share/SharePartition.java | 1831 +-- .../server/share/SharePartitionManager.java | 424 +- .../main/scala/kafka/MetadataLogConfig.scala | 48 + .../scala/kafka/admin/ConfigCommand.scala | 202 +- .../src/main/scala/kafka/cluster/Broker.scala | 87 + .../main/scala/kafka/cluster/EndPoint.scala | 59 + .../main/scala/kafka/cluster/Partition.scala | 213 +- .../main/scala/kafka/cluster/Replica.scala | 213 + .../BrokerEndPointNotAvailableException.scala | 22 + .../common/LogCleaningAbortedException.scala | 24 + .../common/ThreadShutdownException.scala | 24 + .../kafka/controller/ControllerContext.scala | 52 + .../kafka/controller/StateChangeLogger.scala | 5 + .../group/CoordinatorLoaderImpl.scala | 253 + .../group/CoordinatorPartitionWriter.scala | 22 +- .../coordinator/group/DelayedHeartbeat.scala | 36 + .../kafka/coordinator/group/DelayedJoin.scala | 94 + .../coordinator/group/DelayedRebalance.scala | 33 + .../kafka/coordinator/group/DelayedSync.scala | 48 + .../coordinator/group/GroupCoordinator.scala | 1872 +++ .../group/GroupCoordinatorAdapter.scala | 664 + .../coordinator/group/GroupMetadata.scala | 857 ++ .../group/GroupMetadataManager.scala | 1282 ++ .../coordinator/group/MemberMetadata.scala | 153 + .../transaction/TransactionCoordinator.scala | 278 +- .../transaction/TransactionLog.scala | 122 +- .../TransactionMarkerChannelManager.scala | 19 +- ...actionMarkerRequestCompletionHandler.scala | 4 +- .../transaction/TransactionMetadata.scala | 641 + .../transaction/TransactionStateManager.scala | 177 +- .../src/main/scala/kafka/log/LogCleaner.scala | 1339 ++ .../scala/kafka/log/LogCleanerManager.scala | 687 + .../src/main/scala/kafka/log/LogManager.scala | 257 +- .../src/main/scala/kafka/log/UnifiedLog.scala | 2216 +++ .../kafka/metrics/KafkaMetricsConfig.scala | 4 +- .../kafka/metrics/KafkaMetricsReporter.scala | 5 +- .../scala/kafka/network/RequestChannel.scala | 13 +- .../scala/kafka/network/SocketServer.scala | 93 +- .../scala/kafka/raft/KafkaMetadataLog.scala | 134 +- .../main/scala/kafka/raft/RaftManager.scala | 301 + .../scala/kafka/raft/SegmentPosition.scala | 23 + .../raft/TimingWheelExpirationService.scala | 63 + .../kafka/server/AbstractFetcherManager.scala | 7 +- .../kafka/server/AbstractFetcherThread.scala | 268 +- .../src/main/scala/kafka/server/AclApis.scala | 35 +- .../server/AddPartitionsToTxnManager.scala | 314 + .../kafka/server/ApiVersionManager.scala | 175 + .../main/scala/kafka/server/AuthHelper.scala | 13 +- .../server/AutoTopicCreationManager.scala | 331 +- .../kafka/server/BrokerLifecycleManager.scala | 50 +- .../scala/kafka/server/BrokerServer.scala | 307 +- .../kafka/server/ClientQuotaManager.scala | 647 + .../kafka/server/ConfigAdminManager.scala | 19 +- .../scala/kafka/server/ConfigHandler.scala | 10 +- .../scala/kafka/server/ConfigHelper.scala | 90 +- .../scala/kafka/server/ControllerApis.scala | 38 +- .../ControllerMutationQuotaManager.scala | 282 + .../scala/kafka/server/ControllerServer.scala | 71 +- .../kafka/server/DelayedDeleteRecords.scala | 134 + .../kafka/server/DelayedElectLeader.scala | 85 + .../scala/kafka/server/DelayedFuture.scala | 107 + .../scala/kafka/server/DelayedProduce.scala | 39 +- .../kafka/server/DelayedRemoteFetch.scala | 32 +- .../server/DelayedRemoteListOffsets.scala | 167 + .../kafka/server/DelegationTokenManager.scala | 282 + .../kafka/server/DynamicBrokerConfig.scala | 292 +- .../scala/kafka/server/DynamicConfig.scala | 42 + .../scala/kafka/server/FetchSession.scala | 7 +- .../kafka/server/ForwardingManager.scala | 16 +- .../server/ForwardingManagerMetrics.scala | 100 + .../main/scala/kafka/server/KafkaApis.scala | 1370 +- .../main/scala/kafka/server/KafkaBroker.scala | 8 +- .../main/scala/kafka/server/KafkaConfig.scala | 226 +- .../scala/kafka/server/KafkaRaftServer.scala | 5 +- .../kafka/server/KafkaRequestHandler.scala | 7 +- .../scala/kafka/server/LeaderEndPoint.scala | 117 + .../server/ListOffsetsPartitionStatus.scala | 48 + .../kafka/server/LocalLeaderEndPoint.scala | 61 +- .../scala/kafka/server/MetadataCache.scala | 222 + .../NodeToControllerChannelManager.scala | 13 +- .../kafka/server/RemoteLeaderEndPoint.scala | 46 +- .../server/ReplicaAlterLogDirsManager.scala | 3 +- .../server/ReplicaAlterLogDirsThread.scala | 18 +- .../kafka/server/ReplicaFetcherManager.scala | 9 +- .../kafka/server/ReplicaFetcherThread.scala | 41 +- .../scala/kafka/server/ReplicaManager.scala | 1139 +- .../kafka/server/RequestHandlerHelper.scala | 4 +- .../scala/kafka/server/SharedServer.scala | 8 +- .../kafka/server/metadata/AclPublisher.scala | 102 + .../metadata/BrokerMetadataPublisher.scala | 106 +- .../metadata/ClientQuotaMetadataManager.scala | 34 +- .../server/metadata/ConfigRepository.scala | 62 + .../metadata/DelegationTokenPublisher.scala | 83 + .../metadata/DynamicConfigPublisher.scala | 2 +- .../DynamicTopicClusterQuotaPublisher.scala | 7 +- .../server/metadata/KRaftMetadataCache.scala | 222 +- .../server/metadata/ScramPublisher.scala | 71 + ...areCoordinatorMetadataCacheHelperImpl.java | 103 + .../scala/kafka/tools/DumpLogSegments.scala | 321 +- .../main/scala/kafka/tools/StorageTool.scala | 66 +- .../kafka/tools/TestRaftRequestHandler.scala | 9 +- .../scala/kafka/tools/TestRaftServer.scala | 37 +- .../main/scala/kafka/utils/CoreUtils.scala | 75 +- .../scala/kafka/utils/Log4jController.scala | 135 + core/src/main/scala/kafka/utils/Logging.scala | 19 +- .../main/scala/kafka/utils/Mx4jLoader.scala | 2 - core/src/main/scala/kafka/utils/Pool.scala | 99 + .../scala/kafka/utils/json/DecodeJson.scala | 109 + .../scala/kafka/utils/json/JsonArray.scala | 27 + .../scala/kafka/utils/json/JsonObject.scala | 42 + .../scala/kafka/utils/json/JsonValue.scala | 116 + .../kafka/admin/AdminFenceProducersTest.java | 148 + .../java/kafka/admin/ClientTelemetryTest.java | 195 + .../admin/ConfigCommandIntegrationTest.java | 631 + .../java/kafka/admin/ConfigCommandTest.java | 1476 ++ .../java/kafka/admin/DeleteTopicTest.java | 383 + .../DescribeAuthorizedOperationsTest.java | 252 + .../UserScramCredentialsCommandTest.java | 183 + .../consumer/ConsumerIntegrationTest.java | 184 + .../log/remote/RemoteLogManagerTest.java | 3760 +++++ .../log/remote/RemoteLogOffsetReaderTest.java | 198 + .../kafka/log/remote/RemoteLogReaderTest.java | 132 + .../test/java/kafka/security/JaasModule.java | 7 +- .../java/kafka/security/JaasTestUtils.java | 25 +- .../java/kafka/security/minikdc/MiniKdc.java | 27 +- .../kafka/security/minikdc/MiniKdcTest.java | 4 +- .../BootstrapControllersIntegrationTest.java | 345 + .../server/LogManagerIntegrationTest.java | 128 + .../ReconfigurableQuorumIntegrationTest.java | 127 +- ...ribeTopicPartitionsRequestHandlerTest.java | 79 +- ...EligibleLeaderReplicasIntegrationTest.java | 463 + .../logger/RuntimeLoggerManagerTest.java | 23 +- .../server/share/DelayedShareFetchTest.java | 1807 +-- .../server/share/ShareFetchUtilsTest.java | 432 +- .../share/SharePartitionManagerTest.java | 2148 ++- .../server/share/SharePartitionTest.java | 10732 +++++--------- .../test/api/CustomQuotaCallbackTest.java | 131 + .../kafka/test/api/ShareConsumerTest.java | 1905 +++ .../admin/ListOffsetsIntegrationTest.scala | 288 + .../kafka/admin/RemoteTopicCrudTest.scala | 520 +- .../AbstractAuthorizerIntegrationTest.scala | 7 - .../kafka/api/AbstractConsumerTest.scala | 10 +- .../api/AdminClientRebootstrapTest.scala | 51 + ...minClientWithPoliciesIntegrationTest.scala | 252 + .../kafka/api/AuthorizerIntegrationTest.scala | 2721 +--- .../kafka/api/BaseAdminIntegrationTest.scala | 47 +- .../kafka/api/BaseConsumerTest.scala | 68 +- .../kafka/api/BaseProducerSendTest.scala | 85 +- .../integration/kafka/api/BaseQuotaTest.scala | 47 +- .../kafka/api/ConsumerBounceTest.scala | 448 +- .../kafka/api/ConsumerRebootstrapTest.scala | 147 + .../kafka/api/ConsumerTopicCreationTest.scala | 108 + ...thLegacyMessageFormatIntegrationTest.scala | 159 + .../kafka/api/CustomQuotaCallbackTest.scala | 14 +- ...gationTokenEndToEndAuthorizationTest.scala | 12 +- ...enEndToEndAuthorizationWithOwnerTest.scala | 28 +- .../kafka/api/EndToEndAuthorizationTest.scala | 191 +- .../kafka/api/EndToEndClusterIdTest.scala | 218 + .../api/GroupAuthorizerIntegrationTest.scala | 238 + .../api/GroupCoordinatorIntegrationTest.scala | 93 +- .../kafka/api/IntegrationTestHarness.scala | 89 +- .../kafka/api/LogAppendTimeTest.scala | 80 + .../integration/kafka/api/MetricsTest.scala | 8 +- .../api/PlaintextAdminIntegrationTest.scala | 3396 ++--- .../api/PlaintextConsumerAssignTest.scala | 207 + .../api/PlaintextConsumerAssignorsTest.scala | 71 +- .../api/PlaintextConsumerCallbackTest.scala | 176 + .../api/PlaintextConsumerCommitTest.scala | 371 + .../api/PlaintextConsumerFetchTest.scala | 283 + .../kafka/api/PlaintextConsumerPollTest.scala | 307 + .../PlaintextConsumerSubscriptionTest.scala | 423 + .../kafka/api/PlaintextConsumerTest.scala | 819 +- .../PlaintextEndToEndAuthorizationTest.scala | 9 +- .../kafka/api/PlaintextProducerSendTest.scala | 133 +- .../kafka/api/ProducerCompressionTest.scala | 167 + .../api/ProducerFailureHandlingTest.scala | 261 + .../kafka/api/ProducerIdExpirationTest.scala | 254 + .../kafka/api/ProducerRebootstrapTest.scala | 57 + .../api/ProducerSendWhileDeletionTest.scala | 88 + .../api/RackAwareAutoTopicCreationTest.scala | 91 + .../kafka/api/RebootstrapTest.scala | 57 + ...aslClientsWithInvalidCredentialsTest.scala | 46 +- .../api/SaslEndToEndAuthorizationTest.scala | 12 +- .../api/SaslMultiMechanismConsumerTest.scala | 16 +- .../api/SaslPlainPlaintextConsumerTest.scala | 46 + ...aslScramSslEndToEndAuthorizationTest.scala | 13 +- .../integration/kafka/api/SaslSetup.scala | 6 +- .../api/SaslSslAdminIntegrationTest.scala | 218 +- .../kafka/api/SslAdminIntegrationTest.scala | 46 +- .../api/SslEndToEndAuthorizationTest.scala | 2 +- .../kafka/api/TransactionsBounceTest.scala | 6 +- .../api/TransactionsExpirationTest.scala | 247 + .../kafka/api/TransactionsTest.scala | 229 +- .../TransactionsWithMaxInFlightOneTest.scala | 136 + .../transaction/ProducerIntegrationTest.scala | 28 +- .../network/DynamicConnectionQuotaTest.scala | 37 +- .../DynamicNumNetworkThreadsTest.scala | 9 +- .../kafka/server/DelayedFetchTest.scala | 26 +- .../kafka/server/DelayedFutureTest.scala | 96 + .../kafka/server/DelayedRemoteFetchTest.scala | 362 +- .../server/DelayedRemoteListOffsetsTest.scala | 257 + .../DynamicBrokerReconfigurationTest.scala | 305 +- .../FetchFromFollowerIntegrationTest.scala | 42 +- .../server/GssapiAuthenticationTest.scala | 38 +- .../kafka/server/IntegrationTestUtils.scala | 106 + .../kafka/server/KRaftClusterTest.scala | 249 +- .../MetadataVersionIntegrationTest.scala | 89 + ...stenersWithAdditionalJaasContextTest.scala | 2 +- ...nersWithSameSecurityProtocolBaseTest.scala | 18 +- .../kafka/server/QuorumTestHarness.scala | 61 +- .../server/RaftClusterSnapshotTest.scala | 10 +- .../kafka/raft/KafkaMetadataLogTest.scala | 243 +- .../server/KafkaRequestHandlerTest.scala | 11 +- .../server/LocalLeaderEndPointTest.scala | 80 +- .../NodeToControllerRequestThreadTest.scala | 17 +- .../server/RemoteLeaderEndPointTest.scala | 36 +- .../ClientQuotaMetadataManagerTest.scala | 20 +- .../metadata/MockConfigRepository.scala | 62 + .../kafka/tools/LogCompactionTester.scala | 349 + .../test/scala/kafka/utils/LoggingTest.scala | 17 +- .../scala/kafka/utils/TestInfoUtils.scala | 18 +- .../scala/unit/kafka/KafkaConfigTest.scala | 43 +- .../unit/kafka/admin/AddPartitionsTest.scala | 98 +- .../unit/kafka/admin/AdminRackAwareTest.scala | 251 + .../unit/kafka/admin/RackAwareTest.scala | 94 + .../kafka/cluster/AbstractPartitionTest.scala | 41 +- .../kafka/cluster/AssignmentStateTest.scala | 127 +- .../kafka/cluster/PartitionLockTest.scala | 51 +- .../unit/kafka/cluster/PartitionTest.scala | 1462 +- .../unit/kafka/cluster/ReplicaTest.scala | 349 + .../AbstractCoordinatorConcurrencyTest.scala | 34 +- .../group/CoordinatorLoaderImplTest.scala | 705 + .../CoordinatorPartitionWriterTest.scala | 96 +- .../group/GroupCoordinatorAdapterTest.scala | 955 ++ .../GroupCoordinatorConcurrencyTest.scala | 406 + .../group/GroupCoordinatorTest.scala | 4249 ++++++ .../group/GroupMetadataManagerTest.scala | 3095 ++++ .../coordinator/group/GroupMetadataTest.scala | 866 ++ .../group/MemberMetadataTest.scala | 94 + ...ransactionCoordinatorConcurrencyTest.scala | 96 +- .../TransactionCoordinatorTest.scala | 867 +- .../transaction/TransactionLogTest.scala | 276 + .../TransactionMarkerChannelManagerTest.scala | 138 +- ...onMarkerRequestCompletionHandlerTest.scala | 12 +- .../transaction/TransactionMetadataTest.scala | 743 +- .../TransactionStateManagerTest.scala | 375 +- .../integration/KafkaServerTestHarness.scala | 12 +- ...tricsDuringTopicCreationDeletionTest.scala | 158 + .../kafka/integration/MinIsrConfigTest.scala | 39 + .../UncleanLeaderElectionTest.scala | 77 +- .../AbstractLogCleanerIntegrationTest.scala | 53 +- .../kafka/log/BrokerCompressionTest.scala | 106 + .../kafka/log/LogCleanerIntegrationTest.scala | 19 +- .../log/LogCleanerLagIntegrationTest.scala | 11 +- .../kafka/log/LogCleanerManagerTest.scala | 312 +- ...gCleanerParameterizedIntegrationTest.scala | 22 +- .../scala/unit/kafka/log/LogCleanerTest.scala | 839 +- .../unit/kafka/log/LogConcurrencyTest.scala | 191 + .../scala/unit/kafka/log/LogConfigTest.scala | 89 +- .../scala/unit/kafka/log/LogLoaderTest.scala | 198 +- .../scala/unit/kafka/log/LogManagerTest.scala | 319 +- .../scala/unit/kafka/log/LogTestUtils.scala | 59 +- .../scala/unit/kafka/log/UnifiedLogTest.scala | 1592 +- .../log/remote/RemoteIndexCacheTest.scala | 1092 ++ .../kafka/metrics/KafkaMetricsGroupTest.scala | 23 +- .../unit/kafka/metrics/MetricsTest.scala | 74 +- .../kafka/network/ConnectionQuotasTest.scala | 33 +- .../unit/kafka/network/ProcessorTest.scala | 35 +- .../kafka/network/RequestChannelTest.scala | 57 +- .../unit/kafka/network/SocketServerTest.scala | 35 +- .../unit/kafka/raft/RaftManagerTest.scala | 93 +- .../security/authorizer/AuthorizerTest.scala | 187 +- .../AbstractApiVersionsRequestTest.scala | 29 +- .../server/AbstractFetcherManagerTest.scala | 58 +- .../server/AbstractFetcherThreadTest.scala | 817 +- .../AbstractFetcherThreadWithIbp26Test.scala | 28 + .../AddPartitionsToTxnManagerTest.scala | 463 + .../AddPartitionsToTxnRequestServerTest.scala | 18 +- .../AllocateProducerIdsRequestTest.scala | 11 +- .../server/AlterPartitionManagerTest.scala | 12 +- .../AlterReplicaLogDirsRequestTest.scala | 38 +- ...mCredentialsRequestNotAuthorizedTest.scala | 14 +- ...AlterUserScramCredentialsRequestTest.scala | 56 +- .../kafka/server/ApiVersionManagerTest.scala | 133 + .../kafka/server/ApiVersionsRequestTest.scala | 19 +- .../unit/kafka/server/AuthHelperTest.scala | 31 +- .../server/BaseClientQuotaManagerTest.scala | 4 +- .../unit/kafka/server/BaseRequestTest.scala | 4 +- .../server/BrokerLifecycleManagerTest.scala | 19 +- .../kafka/server/BrokerMetricNamesTest.scala | 7 +- .../BrokerRegistrationRequestTest.scala | 172 + .../kafka/server/ClientQuotaManagerTest.scala | 336 +- .../server/ClientQuotasRequestTest.scala | 592 + .../ClientRequestQuotaManagerTest.scala | 16 +- .../kafka/server/ConfigAdminManagerTest.scala | 2 +- .../ConsumerGroupDescribeRequestTest.scala | 44 +- .../ConsumerGroupHeartbeatRequestTest.scala | 98 +- .../ConsumerProtocolMigrationTest.scala | 103 +- .../kafka/server/ControllerApisTest.scala | 63 +- ...ControllerConfigurationValidatorTest.scala | 12 +- .../ControllerMutationQuotaManagerTest.scala | 38 +- .../server/ControllerMutationQuotaTest.scala | 64 +- .../ControllerRegistrationManagerTest.scala | 2 +- .../server/CreateTopicsRequestTest.scala | 31 +- .../CreateTopicsRequestWithPolicyTest.scala | 179 + ...legationTokenRequestsOnPlainTextTest.scala | 9 +- .../server/DelegationTokenRequestsTest.scala | 9 +- ...nRequestsWithDisableTokenFeatureTest.scala | 9 +- .../server/DeleteGroupsRequestTest.scala | 52 +- .../server/DeleteRecordsRequestTest.scala | 18 +- .../server/DeleteTopicsRequestTest.scala | 26 +- ...opicsRequestWithDeletionDisabledTest.scala | 9 +- .../server/DescribeClusterRequestTest.scala | 14 +- .../server/DescribeGroupsRequestTest.scala | 36 +- .../server/DescribeLogDirsRequestTest.scala | 12 +- .../server/DescribeQuorumRequestTest.scala | 21 +- ...mCredentialsRequestNotAuthorizedTest.scala | 8 +- ...cribeUserScramCredentialsRequestTest.scala | 22 +- .../server/DynamicBrokerConfigTest.scala | 73 +- .../server/DynamicConfigChangeTest.scala | 121 +- .../kafka/server/EdgeCaseRequestTest.scala | 40 +- .../server/FetchRequestMaxBytesTest.scala | 9 +- .../unit/kafka/server/FetchRequestTest.scala | 68 +- .../server/ForwardingManagerMetricsTest.scala | 114 + .../kafka/server/ForwardingManagerTest.scala | 2 +- .../GroupCoordinatorBaseRequestTest.scala | 224 +- .../kafka/server/HeartbeatRequestTest.scala | 36 +- .../server/HighwatermarkPersistenceTest.scala | 27 +- .../unit/kafka/server/IsrExpirationTest.scala | 69 +- .../kafka/server/JoinGroupRequestTest.scala | 41 +- .../unit/kafka/server/KafkaApisTest.scala | 8492 ++++------- .../unit/kafka/server/KafkaConfigTest.scala | 231 +- ...aMetricReporterExceptionHandlingTest.scala | 9 +- .../server/KafkaMetricsReporterTest.scala | 12 +- .../kafka/server/KafkaRaftServerTest.scala | 10 +- .../kafka/server/LeaveGroupRequestTest.scala | 11 +- .../kafka/server/ListGroupsRequestTest.scala | 42 +- .../kafka/server/ListOffsetsRequestTest.scala | 78 +- .../unit/kafka/server/LogDirFailureTest.scala | 49 +- .../unit/kafka/server/LogOffsetTest.scala | 111 +- .../unit/kafka/server/LogRecoveryTest.scala | 40 +- .../unit/kafka/server/MetadataCacheTest.scala | 101 +- .../kafka/server/MetadataRequestTest.scala | 69 +- .../unit/kafka/server/MockFetcherThread.scala | 66 +- .../kafka/server/MockLeaderEndPoint.scala | 41 +- .../kafka/server/MockTierStateMachine.scala | 11 +- .../server/OffsetCommitRequestTest.scala | 70 +- .../server/OffsetDeleteRequestTest.scala | 42 +- .../kafka/server/OffsetFetchRequestTest.scala | 483 +- .../OffsetsForLeaderEpochRequestTest.scala | 13 +- .../kafka/server/ProduceRequestTest.scala | 63 +- .../server/RegistrationTestContext.scala | 2 +- .../ReplicaAlterLogDirsThreadTest.scala | 128 +- .../unit/kafka/server/ReplicaFetchTest.scala | 9 +- .../server/ReplicaFetcherThreadTest.scala | 146 +- .../ReplicaManagerConcurrencyTest.scala | 20 +- .../server/ReplicaManagerQuotasTest.scala | 28 +- .../kafka/server/ReplicaManagerTest.scala | 2507 ++-- .../kafka/server/ReplicationQuotasTest.scala | 19 +- .../unit/kafka/server/RequestQuotaTest.scala | 257 +- .../server/SaslApiVersionsRequestTest.scala | 11 +- .../kafka/server/ServerShutdownTest.scala | 38 +- .../ShareFetchAcknowledgeRequestTest.scala | 1961 ++- .../ShareGroupDescribeRequestTest.scala | 14 +- .../ShareGroupHeartbeatRequestTest.scala | 289 +- .../kafka/server/SyncGroupRequestTest.scala | 39 +- .../ThrottledChannelExpirationTest.scala | 2 +- .../kafka/server/TierStateMachineTest.scala | 9 +- .../server/TxnOffsetCommitRequestTest.scala | 154 +- .../epoch/LeaderEpochFileCacheTest.scala | 20 +- .../epoch/LeaderEpochIntegrationTest.scala | 23 +- .../epoch/OffsetsForLeaderEpochTest.scala | 18 +- .../epoch/util/MockBlockingSender.scala | 3 +- .../BrokerMetadataPublisherTest.scala | 148 +- .../metadata/MockConfigRepositoryTest.scala | 55 + .../kafka/tools/DumpLogSegmentsTest.scala | 425 +- .../unit/kafka/tools/StorageToolTest.scala | 198 +- .../unit/kafka/utils/CoreUtilsTest.scala | 46 + .../scala/unit/kafka/utils/PoolTest.scala | 40 + .../unit/kafka/utils/SchedulerTest.scala | 206 + .../scala/unit/kafka/utils/TestUtils.scala | 107 +- docker/README.md | 18 +- docker/common.py | 13 +- docker/docker_build_test.py | 21 +- docker/docker_official_image_build_test.py | 10 +- docker/examples/README.md | 27 +- .../combined/plaintext/docker-compose.yml | 7 +- .../cluster/combined/ssl/docker-compose.yml | 7 +- .../isolated/plaintext/docker-compose.yml | 13 +- .../cluster/isolated/ssl/docker-compose.yml | 13 +- .../single-node/file-input/docker-compose.yml | 1 + .../single-node/plaintext/docker-compose.yml | 3 +- .../single-node/ssl/docker-compose.yml | 3 +- .../fixtures/file-input/server.properties | 2 - docker/jvm/Dockerfile | 54 +- docker/native/Dockerfile | 21 +- docker/native/README.md | 2 +- .../native-image-configs/reflect-config.json | 18 - .../native-image-configs/resource-config.json | 4 +- .../prepare_docker_official_image_source.py | 11 +- docker/server.properties | 4 +- docker/test/docker_sanity_test.py | 16 +- .../fixtures/file-input/server.properties | 2 - .../fixtures/mode/combined/docker-compose.yml | 5 +- .../fixtures/mode/isolated/docker-compose.yml | 11 +- docs/api.html | 38 +- docs/configuration.html | 104 +- docs/connect.html | 24 +- docs/design.html | 65 +- docs/ecosystem.html | 2 +- docs/implementation.html | 4 - docs/introduction.html | 2 +- docs/js/templateData.js | 6 +- docs/ops.html | 393 +- docs/protocol.html | 6 +- docs/security.html | 400 +- docs/streams/architecture.html | 2 +- docs/streams/core-concepts.html | 6 +- .../developer-guide/app-reset-tool.html | 13 +- .../developer-guide/config-streams.html | 119 +- docs/streams/developer-guide/datatypes.html | 85 +- docs/streams/developer-guide/dsl-api.html | 34 +- .../developer-guide/dsl-topology-naming.html | 13 - docs/streams/developer-guide/running-app.html | 2 +- docs/streams/developer-guide/security.html | 65 +- docs/streams/upgrade-guide.html | 429 +- docs/toc.html | 46 +- docs/upgrade.html | 357 +- .../examples/ExactlyOnceMessageProcessor.java | 8 +- .../org/apache/kafka/message/CodeBuffer.java | 6 +- .../org/apache/kafka/message/FieldType.java | 2 +- .../kafka/message/MessageDataGenerator.java | 2 +- .../kafka/message/MessageGenerator.java | 12 +- .../org/apache/kafka/message/MessageSpec.java | 19 - .../apache/kafka/message/MessageSpecType.java | 8 +- .../kafka/message/checker/CheckerUtils.java | 36 +- .../message/checker/EvolutionVerifier.java | 2 +- .../checker/FieldSpecPairIterator.java | 1 - .../checker/MetadataSchemaCheckerTool.java | 50 +- .../apache/kafka/message/checker/Unifier.java | 2 +- .../MetadataSchemaCheckerToolTest.java | 19 +- gradle.properties | 4 +- gradle/dependencies.gradle | 75 +- gradle/spotbugs-exclude.xml | 205 +- gradle/wrapper/gradle-wrapper.properties | 4 +- gradlew | 2 +- .../group/api/assignor/GroupSpec.java | 16 - .../assignor/SubscribedTopicDescriber.java | 2 +- .../apache/kafka/coordinator/group/Group.java | 17 +- .../kafka/coordinator/group/GroupConfig.java | 129 +- .../coordinator/group/GroupConfigManager.java | 5 - .../coordinator/group/GroupCoordinator.java | 159 +- .../group/GroupCoordinatorConfig.java | 297 +- .../group/GroupCoordinatorRecordHelpers.java | 485 +- .../group/GroupCoordinatorRecordSerde.java | 118 +- .../group/GroupCoordinatorService.java | 1373 +- .../group/GroupCoordinatorShard.java | 568 +- .../group/GroupMetadataManager.java | 3303 +---- .../coordinator/group/OffsetAndMetadata.java | 29 +- .../kafka/coordinator/group/OffsetConfig.java | 73 + .../group/OffsetExpirationConditionImpl.java | 25 +- .../group/OffsetMetadataManager.java | 349 +- .../ShareGroupAutoOffsetResetStrategy.java | 33 +- .../apache/kafka/coordinator/group/Utils.java | 237 +- .../group/assignor/AssignorHelpers.java | 18 - .../group/assignor/RangeAssignor.java | 3 +- .../coordinator/group/assignor/RangeSet.java | 29 +- .../group/assignor/SimpleAssignor.java | 112 +- .../group/assignor/UniformAssignor.java | 4 +- ...UniformHeterogeneousAssignmentBuilder.java | 5 +- .../UniformHomogeneousAssignmentBuilder.java | 37 +- .../group/classic/ClassicGroup.java | 44 +- .../group/classic/ClassicGroupMember.java | 3 +- .../group/classic/ClassicGroupState.java | 4 +- .../metrics/GroupCoordinatorMetrics.java | 167 +- .../metrics/GroupCoordinatorMetricsShard.java | 189 +- .../coordinator/group/modern/Assignment.java | 2 +- .../group/modern/GroupSpecImpl.java | 49 +- .../group/modern/MemberAssignmentImpl.java | 27 +- .../coordinator/group/modern/ModernGroup.java | 76 +- .../modern/SubscribedTopicDescriberImpl.java | 46 +- .../group/modern/SubscriptionCount.java | 27 +- .../group/modern/TargetAssignmentBuilder.java | 79 +- .../coordinator/group/modern/TopicIds.java | 58 +- .../group/modern/TopicMetadata.java | 132 + .../coordinator/group/modern/UnionSet.java | 3 +- .../group/modern/consumer/ConsumerGroup.java | 69 +- .../modern/consumer/ConsumerGroupMember.java | 33 +- .../consumer/CurrentAssignmentBuilder.java | 13 +- .../consumer/ResolvedRegularExpression.java | 53 +- .../group/modern/share/ShareGroup.java | 54 +- .../group/modern/share/ShareGroupConfig.java | 70 +- .../group/modern/share/ShareGroupMember.java | 28 +- ...nsumerGroupCurrentMemberAssignmentKey.json | 9 +- ...umerGroupCurrentMemberAssignmentValue.json | 3 +- .../ConsumerGroupMemberMetadataKey.json | 9 +- .../ConsumerGroupMemberMetadataValue.json | 3 +- .../message/ConsumerGroupMetadataKey.json | 7 +- .../message/ConsumerGroupMetadataValue.json | 12 +- .../ConsumerGroupPartitionMetadataKey.json | 9 +- .../ConsumerGroupPartitionMetadataValue.json | 5 +- .../ConsumerGroupRegularExpressionKey.json | 9 +- .../ConsumerGroupRegularExpressionValue.json | 3 +- ...onsumerGroupTargetAssignmentMemberKey.json | 9 +- ...sumerGroupTargetAssignmentMemberValue.json | 3 +- ...sumerGroupTargetAssignmentMetadataKey.json | 7 +- ...merGroupTargetAssignmentMetadataValue.json | 3 +- .../common/message/GroupMetadataKey.json | 7 +- .../common/message/GroupMetadataValue.json | 3 +- .../common/message/OffsetCommitKey.json | 11 +- .../common/message/OffsetCommitValue.json | 7 +- .../ShareGroupCurrentMemberAssignmentKey.json | 9 +- ...hareGroupCurrentMemberAssignmentValue.json | 3 +- .../message/ShareGroupMemberMetadataKey.json | 9 +- .../ShareGroupMemberMetadataValue.json | 3 +- .../common/message/ShareGroupMetadataKey.json | 7 +- .../message/ShareGroupMetadataValue.json | 7 +- .../ShareGroupPartitionMetadataKey.json | 26 + .../ShareGroupPartitionMetadataValue.json | 40 + .../ShareGroupStatePartitionMetadataKey.json | 7 +- ...ShareGroupStatePartitionMetadataValue.json | 5 +- .../ShareGroupTargetAssignmentMemberKey.json | 9 +- ...ShareGroupTargetAssignmentMemberValue.json | 3 +- ...ShareGroupTargetAssignmentMetadataKey.json | 7 +- ...areGroupTargetAssignmentMetadataValue.json | 3 +- .../kafka/coordinator/group/Assertions.java | 49 +- .../coordinator/group/GroupConfigTest.java | 66 +- .../group/GroupCoordinatorConfigTest.java | 110 +- .../GroupCoordinatorRecordHelpersTest.java | 460 +- .../GroupCoordinatorRecordSerdeTest.java | 108 +- .../group/GroupCoordinatorServiceTest.java | 4862 +------ .../group/GroupCoordinatorShardTest.java | 1576 +- .../group/GroupMetadataManagerTest.java | 12004 ++++------------ .../GroupMetadataManagerTestContext.java | 331 +- .../group/MetadataImageBuilder.java | 78 + .../group/OffsetAndMetadataTest.java | 53 +- .../OffsetExpirationConditionImplTest.java | 8 +- .../group/OffsetMetadataManagerTest.java | 580 +- .../group/assignor/GroupSpecImplTest.java | 20 +- ...OptimizedUniformAssignmentBuilderTest.java | 288 +- .../group/assignor/RangeAssignorTest.java | 264 +- .../group/assignor/RangeSetTest.java | 57 +- .../group/assignor/SimpleAssignorTest.java | 817 +- ...ormHeterogeneousAssignmentBuilderTest.java | 268 +- .../group/classic/ClassicGroupMemberTest.java | 6 +- .../group/classic/ClassicGroupTest.java | 107 +- .../metrics/GroupCoordinatorMetricsTest.java | 150 +- .../modern/SubscribedTopicMetadataTest.java | 58 +- .../modern/TargetAssignmentBuilderTest.java | 46 +- .../group/modern/TopicIdsTest.java | 72 +- .../group/modern/TopicMetadataTest.java | 74 + .../group/modern/UnionSetTest.java | 13 +- .../modern/consumer/ConsumerGroupBuilder.java | 34 +- .../consumer/ConsumerGroupMemberTest.java | 29 +- .../modern/consumer/ConsumerGroupTest.java | 462 +- .../CurrentAssignmentBuilderTest.java | 6 +- .../ResolvedRegularExpressionTest.java | 6 +- .../group/modern/share/ShareGroupBuilder.java | 34 +- .../modern/share/ShareGroupConfigTest.java | 35 +- .../modern/share/ShareGroupMemberTest.java | 10 +- .../group/modern/share/ShareGroupTest.java | 287 +- .../kafka/jmh/acl/AuthorizerBenchmark.java | 22 +- .../StandardAuthorizerUpdateBenchmark.java | 13 +- .../jmh/assignor/AssignorBenchmarkUtils.java | 170 +- .../assignor/ClientSideAssignorBenchmark.java | 8 +- .../assignor/ServerSideAssignorBenchmark.java | 38 +- .../TargetAssignmentBuilderBenchmark.java | 24 +- .../jmh/common/FetchRequestBenchmark.java | 8 +- .../jmh/common/FetchResponseBenchmark.java | 7 +- ...ImplicitLinkedHashCollectionBenchmark.java | 3 +- .../jmh/common/MetadataResponseBenchmark.java | 10 +- .../jmh/connect/JsonConverterBenchmark.java | 4 +- .../kafka/jmh/connect/ValuesBenchmark.java | 9 +- .../coordinator/RegexResolutionBenchmark.java | 3 +- .../jmh/core/TestPurgatoryPerformance.java | 39 +- .../ReplicaFetcherThreadBenchmark.java | 82 +- .../fetchsession/FetchSessionBenchmark.java | 3 +- .../apache/kafka/jmh/log/StressTestLog.java | 12 +- .../kafka/jmh/log/TestLinearWriteSpeed.java | 31 +- .../KRaftMetadataRequestBenchmark.java | 37 +- ...opicsImageSingleRecordChangeBenchmark.java | 6 +- .../TopicsImageSnapshotLoadBenchmark.java | 6 +- .../TopicsImageZonalOutageBenchmark.java | 10 +- .../PartitionMakeFollowerBenchmark.java | 29 +- .../UpdateFollowerFetchStateBenchmark.java | 32 +- .../producer/ProducerRequestBenchmark.java | 11 +- .../producer/ProducerResponseBenchmark.java | 11 +- .../jmh/record/BaseRecordBatchBenchmark.java | 3 +- .../kafka/jmh/server/CheckpointBench.java | 13 +- .../jmh/server/PartitionCreationBench.java | 44 +- .../InvalidReplicaDirectoriesException.java | 5 +- .../kafka/controller/AclControlManager.java | 29 +- .../ActivationRecordsGenerator.java | 10 +- .../kafka/controller/BrokerControlStates.java | 37 +- .../controller/BrokerHeartbeatTracker.java | 2 +- .../kafka/controller/BrokerIdAndEpoch.java | 39 +- .../kafka/controller/BrokersToElrs.java | 3 +- .../kafka/controller/BrokersToIsrs.java | 7 +- .../controller/ClientQuotaControlManager.java | 35 +- .../controller/ClusterControlManager.java | 59 +- .../ConfigurationControlManager.java | 41 +- .../kafka/controller/ControllerResult.java | 5 +- .../controller/ControllerResultAndOffset.java | 2 +- .../DelegationTokenControlManager.java | 4 +- .../controller/EventPerformanceMonitor.java | 4 +- .../controller/FeatureControlManager.java | 127 +- .../kafka/controller/LogReplayTracker.java | 73 + .../controller/OffsetControlManager.java | 9 +- .../controller/PartitionChangeBuilder.java | 38 +- .../PartitionReassignmentReplicas.java | 12 +- .../PartitionReassignmentRevert.java | 3 +- .../apache/kafka/controller/PeriodicTask.java | 30 +- .../PeriodicTaskControlManager.java | 3 +- .../controller/ProducerIdControlManager.java | 4 +- .../kafka/controller/QuorumController.java | 77 +- .../kafka/controller/QuorumFeatures.java | 4 + .../controller/ReplicationControlManager.java | 124 +- .../kafka/controller/ResultOrError.java | 2 +- .../errors/ControllerExceptions.java | 15 + .../errors/EventHandlerExceptionInfo.java | 6 +- .../metrics/ControllerMetadataMetrics.java | 103 +- .../ControllerMetadataMetricsPublisher.java | 12 +- .../metrics/ControllerMetricsChanges.java | 47 +- .../metrics/QuorumControllerMetrics.java | 71 +- .../org/apache/kafka/image/AclsImage.java | 24 +- .../apache/kafka/image/ClientQuotaImage.java | 35 +- .../apache/kafka/image/ClientQuotasImage.java | 38 +- .../org/apache/kafka/image/ClusterDelta.java | 8 +- .../org/apache/kafka/image/ClusterImage.java | 39 +- .../kafka/image/ConfigurationImage.java | 30 +- .../kafka/image/ConfigurationsDelta.java | 3 +- .../kafka/image/ConfigurationsImage.java | 12 +- .../kafka/image/DelegationTokenImage.java | 31 +- .../org/apache/kafka/image/FeaturesImage.java | 5 +- .../kafka/image/LocalReplicaChanges.java | 22 +- .../org/apache/kafka/image/MetadataImage.java | 132 +- .../kafka/image/MetadataProvenance.java | 64 +- .../kafka/image/MetadataVersionChange.java | 45 +- .../apache/kafka/image/ProducerIdsImage.java | 43 +- .../org/apache/kafka/image/ScramImage.java | 53 +- .../org/apache/kafka/image/TopicDelta.java | 73 +- .../org/apache/kafka/image/TopicImage.java | 44 +- .../org/apache/kafka/image/TopicsDelta.java | 37 +- .../org/apache/kafka/image/TopicsImage.java | 43 +- .../kafka/image/loader/MetadataLoader.java | 35 +- .../kafka/image/loader/SnapshotManifest.java | 57 +- .../loader/metrics/MetadataLoaderMetrics.java | 110 +- .../kafka/image/node/AclsImageNode.java | 4 +- .../image/node/ClientQuotaImageNode.java | 4 +- .../image/node/ClientQuotasImageNode.java | 14 +- .../kafka/image/node/ClusterImageNode.java | 4 +- .../kafka/image/node/FeaturesImageNode.java | 6 + .../kafka/image/node/MetadataImageNode.java | 48 +- .../apache/kafka/image/node/MetadataNode.java | 4 +- .../image/node/ProducerIdsImageNode.java | 4 +- .../kafka/image/node/ScramImageNode.java | 3 +- .../kafka/image/node/TopicsImageNode.java | 4 +- .../ControllerRegistrationsPublisher.java | 3 +- .../image/publisher/SnapshotEmitter.java | 1 + .../image/publisher/SnapshotGenerator.java | 17 +- .../metrics/SnapshotEmitterMetrics.java | 4 +- .../kafka/metadata/BrokerHeartbeatReply.java | 80 +- .../kafka/metadata/BrokerRegistration.java | 13 +- .../metadata/BrokerRegistrationReply.java | 30 +- .../metadata/ControllerRegistration.java | 3 +- .../kafka/metadata/DelegationTokenData.java | 28 +- .../metadata/FinalizedControllerFeatures.java | 35 +- .../kafka/metadata/KafkaConfigSchema.java | 47 +- .../apache/kafka/metadata/LeaderAndIsr.java | 26 +- .../apache/kafka/metadata/ListenerInfo.java | 20 +- .../kafka/metadata/PartitionRegistration.java | 40 +- .../kafka/metadata/ScramCredentialData.java | 65 +- .../apache/kafka/metadata/VersionRange.java | 10 +- .../metadata/authorizer/StandardAcl.java | 98 +- .../authorizer/StandardAclWithId.java | 44 +- .../authorizer/StandardAuthorizer.java | 67 +- .../authorizer/StandardAuthorizerData.java | 42 +- .../metadata/bootstrap/BootstrapMetadata.java | 48 +- .../placement/PartitionAssignment.java | 11 +- .../metadata/placement/PlacementSpec.java | 58 +- .../placement/StripedReplicaPlacer.java | 3 +- .../metadata/placement/TopicAssignment.java | 39 +- .../metadata/placement/UsableBroker.java | 50 +- .../metadata/properties/MetaProperties.java | 16 +- .../properties/MetaPropertiesEnsemble.java | 8 +- .../properties/MetaPropertiesVersion.java | 10 +- .../metadata/publisher/FeaturesPublisher.java | 27 +- .../kafka/metadata/storage/Formatter.java | 55 +- .../kafka/metadata/util/BatchFileReader.java | 17 +- .../kafka/metadata/util/BatchFileWriter.java | 3 +- .../metadata/util/SnapshotFileReader.java | 14 +- .../BrokerRegistrationChangeRecord.json | 2 +- .../common/metadata/RegisterBrokerRecord.json | 2 +- .../controller/AclControlManagerTest.java | 96 +- .../ActivationRecordsGeneratorTest.java | 2 +- .../BrokerHeartbeatManagerTest.java | 5 +- .../kafka/controller/BrokerToElrsTest.java | 3 +- .../kafka/controller/BrokersToIsrsTest.java | 3 +- .../ClientQuotaControlManagerTest.java | 28 +- .../controller/ClusterControlManagerTest.java | 277 +- .../ConfigurationControlManagerTest.java | 111 +- .../controller/FeatureControlManagerTest.java | 248 +- .../controller/LogReplayTrackerTest.java | 38 + .../controller/OffsetControlManagerTest.java | 37 +- .../PartitionChangeBuilderTest.java | 255 +- .../PartitionReassignmentReplicasTest.java | 79 +- .../PartitionReassignmentRevertTest.java | 22 +- .../PeriodicTaskControlManagerTest.java | 18 +- .../ProducerIdControlManagerTest.java | 7 +- .../QuorumControllerIntegrationTestUtils.java | 15 +- ...uorumControllerMetricsIntegrationTest.java | 23 +- .../controller/QuorumControllerTest.java | 415 +- .../controller/QuorumControllerTestEnv.java | 26 +- .../kafka/controller/QuorumFeaturesTest.java | 21 +- .../ReplicationControlManagerTest.java | 734 +- .../ControllerMetadataMetricsTest.java | 97 +- .../metrics/ControllerMetricsChangesTest.java | 89 +- .../metrics/ControllerMetricsTestUtils.java | 22 +- .../metrics/QuorumControllerMetricsTest.java | 53 +- .../org/apache/kafka/image/AclsDeltaTest.java | 5 +- .../kafka/image/ClientQuotasImageTest.java | 18 +- .../apache/kafka/image/ClusterImageTest.java | 54 +- .../kafka/image/ConfigurationsImageTest.java | 9 +- .../kafka/image/FakeSnapshotWriter.java | 9 +- .../apache/kafka/image/FeaturesDeltaTest.java | 9 +- .../apache/kafka/image/FeaturesImageTest.java | 7 +- .../kafka/image/ImageDowngradeTest.java | 24 +- .../apache/kafka/image/MetadataImageTest.java | 4 +- .../image/MetadataVersionChangeTest.java | 19 - .../kafka/image/ProducerIdsImageTest.java | 4 +- .../apache/kafka/image/TopicsImageTest.java | 371 +- .../image/loader/MetadataBatchLoaderTest.java | 17 +- .../image/loader/MetadataLoaderTest.java | 182 +- .../metrics/MetadataLoaderMetricsTest.java | 129 +- .../image/node/ClientQuotasImageNodeTest.java | 22 +- .../node/ClusterImageBrokersNodeTest.java | 16 +- .../node/ClusterImageControllersNodeTest.java | 13 +- .../image/node/ClusterImageNodeTest.java | 4 +- .../node/ConfigurationImageNodeTest.java | 5 +- .../node/ConfigurationsImageNodeTest.java | 13 +- .../kafka/image/node/TopicImageNodeTest.java | 4 +- .../MetadataNodeRedactionCriteriaTest.java | 3 +- .../BrokerRegistrationTrackerTest.java | 9 +- .../ControllerRegistrationsPublisherTest.java | 9 +- .../image/publisher/SnapshotEmitterTest.java | 14 +- .../publisher/SnapshotGeneratorTest.java | 49 +- .../metrics/SnapshotEmitterMetricsTest.java | 10 +- .../kafka/image/writer/ImageReWriterTest.java | 6 +- .../image/writer/ImageWriterOptionsTest.java | 10 +- .../image/writer/RaftSnapshotWriterTest.java | 11 +- .../image/writer/RecordListWriterTest.java | 7 +- .../metadata/BrokerRegistrationTest.java | 33 +- .../metadata/ControllerRegistrationTest.java | 16 +- .../metadata/DelegationTokenDataTest.java | 10 +- .../kafka/metadata/FakeKafkaConfigSchema.java | 5 +- .../kafka/metadata/KafkaConfigSchemaTest.java | 47 +- .../kafka/metadata/LeaderAndIsrTest.java | 25 +- .../kafka/metadata/ListenerInfoTest.java | 41 +- .../metadata/PartitionRegistrationTest.java | 99 +- .../kafka/metadata/RecordTestUtils.java | 118 +- .../apache/kafka/metadata/ReplicasTest.java | 37 +- .../metadata/ScramCredentialDataTest.java | 3 +- .../ClusterMetadataAuthorizerTest.java | 11 +- .../StandardAclRecordIteratorTest.java | 9 +- .../StandardAuthorizerPropertyTest.java | 5 +- .../authorizer/StandardAuthorizerTest.java | 260 +- .../bootstrap/BootstrapDirectoryTest.java | 6 +- .../bootstrap/BootstrapMetadataTest.java | 40 +- .../placement/PartitionAssignmentTest.java | 13 +- .../placement/StripedReplicaPlacerTest.java | 108 +- .../placement/TopicAssignmentTest.java | 28 +- .../MetaPropertiesEnsembleTest.java | 40 +- .../properties/MetaPropertiesTest.java | 12 +- .../kafka/metadata/storage/FormatterTest.java | 220 +- .../metadata/util/RecordRedactorTest.java | 3 +- .../apache/kafka/metalog/LocalLogManager.java | 852 ++ .../kafka/metalog/LocalLogManagerTest.java | 172 + .../kafka/metalog/LocalLogManagerTestEnv.java | 252 + .../metalog/MockMetaLogManagerListener.java | 109 + .../java/org/apache/kafka/raft/Batch.java | 5 +- .../org/apache/kafka/raft/CandidateState.java | 43 +- .../org/apache/kafka/raft/ControlRecord.java | 86 +- .../org/apache/kafka/raft/DynamicVoter.java | 89 +- .../org/apache/kafka/raft/ElectionState.java | 2 +- .../java/org/apache/kafka/raft/Endpoints.java | 13 +- .../org/apache/kafka/raft/EpochState.java | 7 - .../kafka/raft/FileQuorumStateStore.java | 3 +- .../org/apache/kafka/raft/FollowerState.java | 60 +- .../kafka/raft/KafkaNetworkChannel.java | 25 +- .../apache/kafka/raft/KafkaRaftClient.java | 621 +- .../org/apache/kafka/raft/LeaderAndEpoch.java | 39 +- .../org/apache/kafka/raft/LeaderState.java | 339 +- .../org/apache/kafka/raft/LogAppendInfo.java | 11 +- .../org/apache/kafka/raft/LogFetchInfo.java | 2 +- .../apache/kafka/raft/LogOffsetMetadata.java | 3 +- .../org/apache/kafka/raft/OffsetAndEpoch.java | 68 + .../apache/kafka/raft/ProspectiveState.java | 10 +- .../org/apache/kafka/raft/QuorumConfig.java | 36 +- .../org/apache/kafka/raft/QuorumState.java | 28 +- .../org/apache/kafka/raft/RaftClient.java | 29 +- .../java/org/apache/kafka/raft/RaftUtil.java | 106 +- .../org/apache/kafka/raft/ReplicatedLog.java | 10 +- .../org/apache/kafka/raft/RequestManager.java | 2 +- .../org/apache/kafka/raft/ResignedState.java | 2 +- .../kafka/raft/ValidOffsetAndEpoch.java | 2 - .../java/org/apache/kafka/raft/VoterSet.java | 50 +- .../kafka/raft/internals/AddVoterHandler.java | 18 +- .../raft/internals/AddVoterHandlerState.java | 7 - .../raft/internals/BatchAccumulator.java | 22 +- .../kafka/raft/internals/EpochElection.java | 22 +- .../KRaftControlRecordStateMachine.java | 10 +- .../raft/internals/KafkaRaftMetrics.java | 8 +- .../kafka/raft/internals/LogHistory.java | 42 +- .../raft/internals/RecordsBatchReader.java | 6 +- .../kafka/raft/internals/RecordsIterator.java | 44 +- .../raft/internals/RemoveVoterHandler.java | 6 +- .../raft/internals/ThresholdPurgatory.java | 9 +- .../raft/internals/TreeMapLogHistory.java | 2 +- .../raft/internals/UpdateVoterHandler.java | 183 +- .../internals/UpdateVoterHandlerState.java | 72 + .../kafka/raft/internals/VoterSetHistory.java | 11 +- .../kafka/snapshot/FileRawSnapshotReader.java | 2 +- .../kafka/snapshot/FileRawSnapshotWriter.java | 2 +- .../snapshot/NotifyingRawSnapshotWriter.java | 2 +- .../kafka/snapshot/RawSnapshotReader.java | 6 +- .../kafka/snapshot/RawSnapshotWriter.java | 2 +- .../kafka/snapshot/RecordsSnapshotReader.java | 8 +- .../kafka/snapshot/RecordsSnapshotWriter.java | 24 +- .../apache/kafka/snapshot/SnapshotPath.java | 20 +- .../apache/kafka/snapshot/SnapshotReader.java | 6 +- .../apache/kafka/snapshot/SnapshotWriter.java | 4 +- .../org/apache/kafka/snapshot/Snapshots.java | 27 +- .../apache/kafka/raft/CandidateStateTest.java | 18 +- .../apache/kafka/raft/ControlRecordTest.java | 20 +- .../apache/kafka/raft/DynamicVoterTest.java | 4 +- .../apache/kafka/raft/DynamicVotersTest.java | 6 +- .../apache/kafka/raft/ElectionStateTest.java | 22 +- .../org/apache/kafka/raft/EndpointsTest.java | 3 +- .../kafka/raft/FileQuorumStateStoreTest.java | 9 +- .../apache/kafka/raft/FollowerStateTest.java | 13 +- .../kafka/raft/KafkaNetworkChannelTest.java | 99 +- .../raft/KafkaRaftClientPreVoteTest.java | 14 +- .../raft/KafkaRaftClientReconfigTest.java | 769 +- .../raft/KafkaRaftClientSnapshotTest.java | 209 +- .../kafka/raft/KafkaRaftClientTest.java | 596 +- .../apache/kafka/raft/LeaderStateTest.java | 404 +- .../kafka/raft/MockExpirationServiceTest.java | 8 +- .../java/org/apache/kafka/raft/MockLog.java | 144 +- .../org/apache/kafka/raft/MockLogTest.java | 170 +- .../kafka/raft/ProspectiveStateTest.java | 24 +- .../apache/kafka/raft/QuorumStateTest.java | 37 +- .../kafka/raft/RaftClientTestContext.java | 242 +- .../kafka/raft/RaftEventSimulationTest.java | 69 +- .../org/apache/kafka/raft/RaftUtilTest.java | 154 +- .../apache/kafka/raft/ReplicatedCounter.java | 8 +- .../apache/kafka/raft/RequestManagerTest.java | 37 +- .../apache/kafka/raft/ResignedStateTest.java | 11 +- .../kafka/raft/ValidOffsetAndEpochTest.java | 2 - .../org/apache/kafka/raft/VoterSetTest.java | 63 +- .../raft/internals/BatchAccumulatorTest.java | 29 +- .../raft/internals/BatchBuilderTest.java | 8 +- .../KRaftControlRecordStateMachineTest.java | 2 +- .../raft/internals/KafkaRaftMetricsTest.java | 5 +- .../raft/internals/MemoryBatchReaderTest.java | 10 +- .../internals/RecordsBatchReaderTest.java | 13 +- .../raft/internals/RecordsIteratorTest.java | 79 +- .../internals/ThresholdPurgatoryTest.java | 16 +- .../kafka/snapshot/FileRawSnapshotTest.java | 2 +- .../kafka/snapshot/MockRawSnapshotReader.java | 2 +- .../kafka/snapshot/MockRawSnapshotWriter.java | 2 +- .../NotifyingRawSnapshotWriterTest.java | 2 +- .../snapshot/RecordsSnapshotWriterTest.java | 34 +- .../snapshot/SnapshotWriterReaderTest.java | 24 +- .../apache/kafka/snapshot/SnapshotsTest.java | 20 +- release/README.md | 2 +- release/git.py | 7 +- release/gpg.py | 10 +- release/notes.py | 8 +- release/preferences.py | 2 + release/release.py | 17 +- release/runtime.py | 4 +- release/svn.py | 4 +- release/templates.py | 25 +- release/textfiles.py | 2 + server-common/bin/test/log4j2.yaml | 35 + .../org/apache/kafka/admin/AdminUtils.java | 254 + .../apache/kafka/admin/BrokerMetadata.java | 52 + .../kafka/deferred/DeferredEventQueue.java | 2 +- .../org/apache/kafka/queue/EventQueue.java | 67 +- .../apache/kafka/queue/KafkaEventQueue.java | 7 +- .../kafka/security/PasswordEncoder.java | 42 + .../common/AdminCommandFailedException.java | 28 + .../common/AdminOperationException.java | 28 + .../kafka/server/common/CheckpointFile.java | 7 +- .../common/EligibleLeaderReplicasVersion.java | 3 +- .../apache/kafka/server/common/Feature.java | 11 +- .../server/common/FinalizedFeatures.java | 70 +- .../kafka/server/common/GroupVersion.java | 5 +- .../kafka/server/common/KRaftVersion.java | 27 +- .../kafka/server/common/MetadataVersion.java | 72 +- .../common/MetadataVersionValidator.java | 41 + .../kafka/server/common/OffsetAndEpoch.java | 41 +- .../server/common/TestFeatureVersion.java | 7 +- .../kafka/server/common/TopicIdPartition.java | 43 +- .../common/TopicOptionalIdPartition.java | 101 + .../server/common/TransactionVersion.java | 7 +- .../server/common/UnitTestFeatureVersion.java | 33 +- .../kafka/server/config/ConfigType.java | 28 +- .../kafka/server/config/QuotaConfig.java | 12 +- .../kafka/server/config/ServerLogConfigs.java | 9 +- .../config/ServerTopicConfigSynonyms.java | 23 +- .../server/metrics/KafkaMetricsGroup.java | 15 +- .../kafka/server/network/BrokerEndPoint.java | 44 +- .../server/network/EndpointReadyFutures.java | 20 +- .../server/purgatory/DelayedOperation.java | 51 +- .../purgatory/DelayedOperationPurgatory.java | 16 +- .../kafka/server/purgatory/GroupJoinKey.java | 49 + .../kafka/server/purgatory/GroupSyncKey.java | 49 + .../kafka/server/purgatory/MemberKey.java | 51 + .../server/record/BrokerCompressionType.java | 4 +- .../persister/DefaultStatePersister.java | 342 +- .../DeleteShareGroupStateParameters.java | 20 - .../DeleteShareGroupStateResult.java | 16 +- .../InitializeShareGroupStateParameters.java | 22 +- .../InitializeShareGroupStateResult.java | 25 - .../persister/NoOpShareStatePersister.java | 106 + .../server/share/persister/PartitionData.java | 2 +- .../share/persister/PartitionFactory.java | 4 +- .../share/persister/PersisterStateBatch.java | 15 +- .../persister/PersisterStateManager.java | 668 +- .../ReadShareGroupStateSummaryParameters.java | 13 - .../ReadShareGroupStateSummaryResult.java | 17 +- .../kafka/server/storage/log/FetchParams.java | 25 +- .../kafka/server/util/CommandLineUtils.java | 30 - .../org/apache/kafka/server/util/Csv.java | 6 +- .../apache/kafka/server/util/FileLock.java | 8 - .../apache/kafka/server/util/FutureUtils.java | 5 +- .../kafka/server/util/KafkaScheduler.java | 4 - .../kafka/server/util/ShutdownableThread.java | 2 +- .../server/util/TranslatedValueMapView.java | 3 +- .../kafka/server/util/json/DecodeJson.java | 6 +- .../kafka/server/util/json/JsonObject.java | 19 + .../kafka/server/util/timer/TimingWheel.java | 10 - .../apache/kafka/timeline/BaseHashTable.java | 12 +- .../org/apache/kafka/timeline/Snapshot.java | 4 - .../kafka/timeline/TimelineHashMap.java | 3 +- .../kafka/timeline/TimelineHashSet.java | 3 +- .../kafka/timeline/TimelineInteger.java | 3 +- .../apache/kafka/timeline/TimelineLong.java | 3 +- .../apache/kafka/timeline/TimelineObject.java | 3 +- .../apache/kafka/common/DirectoryIdTest.java | 15 +- .../kafka/metadata/AssignmentsHelperTest.java | 19 +- .../kafka/queue/KafkaEventQueueTest.java | 3 +- .../kafka/server/common/FeatureTest.java | 7 +- .../server/common/FinalizedFeaturesTest.java | 19 +- .../kafka/server/common/KRaftVersionTest.java | 18 - .../server/common/MetadataVersionTest.java | 86 +- .../common/MetadataVersionValidatorTest.java | 33 + .../PCollectionsImmutableMapTest.java | 13 +- ...PCollectionsImmutableNavigableSetTest.java | 25 +- .../PCollectionsImmutableSetTest.java | 18 +- .../server/metrics/KafkaMetricsGroupTest.java | 4 +- .../kafka/server/mutable/BoundedListTest.java | 23 +- .../network/EndpointReadyFuturesTest.java | 20 +- .../purgatory/DelayedOperationTest.java | 26 +- .../kafka/server/quota/QuotaUtilsTest.java | 4 +- .../persister/DefaultStatePersisterTest.java | 1156 +- .../persister/PersisterStateManagerTest.java | 2660 +--- .../server/util/CommandLineUtilsTest.java | 136 +- .../org/apache/kafka/server/util/CsvTest.java | 10 +- .../util/InterBrokerSendThreadTest.java | 12 +- .../apache/kafka/server/util/JsonTest.java | 16 +- .../kafka/server/util/timer/MockTimer.java | 3 - .../util/timer/SystemTimerReaperTest.java | 6 +- .../kafka/timeline/SnapshotRegistryTest.java | 3 +- .../timeline/SnapshottableHashTableTest.java | 6 +- .../kafka/timeline/TimelineHashMapTest.java | 10 +- .../kafka/timeline/TimelineHashSetTest.java | 32 +- .../kafka/timeline/TimelineIntegerTest.java | 4 +- .../kafka/timeline/TimelineLongTest.java | 4 +- .../kafka/timeline/TimelineObjectTest.java | 4 +- .../kafka/network/ConnectionQuotaEntity.java | 7 +- .../kafka/network/RequestConvertToJson.java | 720 +- .../kafka/network/SocketServerConfigs.java | 31 +- .../metrics/RequestChannelMetrics.java | 9 +- .../kafka/network/metrics/RequestMetrics.java | 34 +- .../kafka/security/CredentialProvider.java | 54 + .../kafka/security/authorizer/AclEntry.java | 50 +- .../security/authorizer/AuthorizerUtils.java | 10 +- .../org/apache/kafka/server/Assignment.java | 99 +- .../kafka/server/AssignmentsManager.java | 11 +- .../AssignmentsManagerDeadlineFunction.java | 4 +- .../kafka/server/ClientMetricsManager.java | 25 +- .../server/config/AbstractKafkaConfig.java | 145 +- .../config/ClientQuotaManagerConfig.java | 26 +- .../config/DelegationTokenManagerConfigs.java | 52 + .../kafka/server/config/KRaftConfigs.java | 63 +- .../server/config/ReplicationConfigs.java | 6 +- .../kafka/server/config/ServerConfigs.java | 155 + .../server/metrics/BrokerServerMetrics.java | 6 +- .../server/metrics/ClientMetricsConfigs.java | 83 +- .../kafka/server/metrics/MetricConfigs.java | 8 +- .../server/share/CachedSharePartition.java | 22 +- .../share/ErroneousAndValidPartitionData.java | 21 +- .../ShareAcknowledgementBatch.java | 34 +- .../server/share/context/FinalContext.java | 5 +- .../share/context/ShareFetchContext.java | 6 +- .../share/context/ShareSessionContext.java | 33 +- .../fetch/DelayedShareFetchGroupKey.java | 5 - .../fetch/DelayedShareFetchPartitionKey.java | 5 - .../share/fetch/ShareAcquiredRecords.java | 12 +- .../kafka/server/share/fetch/ShareFetch.java | 56 +- .../server/share/session/LastUsedKey.java | 66 + .../server/share/session/ShareSession.java | 56 +- .../share/session/ShareSessionCache.java | 195 +- .../server/share/session/ShareSessionKey.java | 39 +- .../network/RequestConvertToJsonTest.java | 19 +- .../network/SocketServerConfigsTest.java | 60 +- .../apache/kafka/server/AssignmentTest.java | 40 +- .../kafka/server/AssignmentsManagerTest.java | 84 +- .../kafka/server/BrokerFeaturesTest.java | 89 +- .../server/ClientMetricsManagerTest.java | 172 +- .../metrics/BrokerServerMetricsTest.java | 21 +- .../ClientMetricsInstanceMetadataTest.java | 42 +- .../metrics/ClientMetricsInstanceTest.java | 8 +- .../metrics/ClientMetricsTestUtils.java | 18 +- .../metrics/LinuxIoMetricsCollectorTest.java | 3 +- .../share/fetch/DelayedShareFetchKeyTest.java | 20 +- .../server/share/fetch/ShareFetchTest.java | 167 +- .../share/session/ShareSessionCacheTest.java | 234 +- .../share/session/ShareSessionTest.java | 8 +- settings.gradle | 13 - .../share/PersisterStateBatchCombiner.java | 3 +- .../coordinator/share/ShareCoordinator.java | 61 +- .../share/ShareCoordinatorConfig.java | 16 +- .../share/ShareCoordinatorOffsetsManager.java | 9 +- .../share/ShareCoordinatorRecordHelpers.java | 35 +- .../share/ShareCoordinatorRecordSerde.java | 32 +- .../share/ShareCoordinatorService.java | 531 +- .../share/ShareCoordinatorShard.java | 665 +- .../coordinator/share/ShareGroupOffset.java | 134 +- .../metrics/ShareCoordinatorMetrics.java | 35 +- .../common/message/ShareSnapshotKey.json | 5 +- .../common/message/ShareSnapshotValue.json | 23 +- .../common/message/ShareUpdateKey.json | 13 +- .../common/message/ShareUpdateValue.json | 19 +- .../PersisterStateBatchCombinerTest.java | 13 +- .../ShareCoordinatorOffsetsManagerTest.java | 96 +- .../ShareCoordinatorRecordHelpersTest.java | 66 +- .../ShareCoordinatorRecordSerdeTest.java | 82 +- .../share/ShareCoordinatorServiceTest.java | 1369 +- .../share/ShareCoordinatorShardTest.java | 1336 +- .../share/ShareCoordinatorTestConfig.java | 3 +- .../metrics/ShareCoordinatorMetricsTest.java | 57 +- .../org/apache/kafka/shell/MetadataShell.java | 65 +- .../shell/command/CatCommandHandler.java | 3 +- .../kafka/shell/command/CdCommandHandler.java | 3 +- .../apache/kafka/shell/command/Commands.java | 3 +- .../command/ErroneousCommandHandler.java | 3 +- .../shell/command/FindCommandHandler.java | 3 +- .../shell/command/HistoryCommandHandler.java | 5 +- .../kafka/shell/command/LsCommandHandler.java | 8 +- .../shell/command/ManCommandHandler.java | 3 +- .../shell/command/TreeCommandHandler.java | 3 +- .../kafka/shell/glob/GlobComponent.java | 35 +- .../apache/kafka/shell/glob/GlobVisitor.java | 3 +- .../kafka/shell/node/LocalShellNode.java | 5 +- .../kafka/shell/node/RootShellNode.java | 4 +- .../shell/node/printer/ShellNodePrinter.java | 8 +- .../shell/MetadataShellIntegrationTest.java | 22 +- .../kafka/shell/command/CommandTest.java | 35 +- .../kafka/shell/command/CommandUtilsTest.java | 14 +- .../shell/command/LsCommandHandlerTest.java | 23 +- .../kafka/shell/glob/GlobVisitorTest.java | 16 +- .../storage/RemoteLogMetadataManager.java | 4 - .../remote/storage/RemoteStorageManager.java | 4 - .../remote/storage/RemoteStorageMetrics.java | 5 +- .../metadata/storage/ConsumerManager.java | 18 +- .../remote/metadata/storage/ConsumerTask.java | 31 +- .../metadata/storage/ProducerManager.java | 2 +- .../storage/RemoteLogMetadataCache.java | 23 +- .../RemoteLogSegmentMetadataSnapshot.java | 34 +- .../RemotePartitionMetadataEventHandler.java | 6 + .../storage/RemotePartitionMetadataStore.java | 37 +- .../TopicBasedRemoteLogMetadataManager.java | 404 +- ...icBasedRemoteLogMetadataManagerConfig.java | 8 +- ...teLogSegmentMetadataSnapshotTransform.java | 3 +- .../RemoteLogSegmentMetadataTransform.java | 3 +- .../log/remote/quota/RLMQuotaManager.java | 3 +- .../remote/quota/RLMQuotaManagerConfig.java | 47 +- .../log/remote/quota/RLMQuotaMetrics.java | 32 +- .../storage/RemoteLogManagerConfig.java | 88 +- .../checkpoint/CleanShutdownFileHandler.java | 2 +- .../checkpoint/LeaderEpochCheckpointFile.java | 2 +- .../checkpoint/OffsetCheckpointFile.java | 10 +- .../checkpoint/PartitionMetadata.java | 17 +- .../internals/epoch/LeaderEpochFileCache.java | 43 +- .../storage/internals/log/AbortedTxn.java | 2 +- .../storage/internals/log/AbstractIndex.java | 172 +- .../storage/internals/log/BatchMetadata.java | 40 +- .../storage/internals/log/CleanerConfig.java | 23 +- .../storage/internals/log/CompletedTxn.java | 59 +- .../storage/internals/log/EpochEntry.java | 24 +- .../storage/internals/log/LastRecord.java | 25 +- .../storage/internals/log/LazyIndex.java | 15 +- .../internals/log/LoadedLogOffsets.java | 29 +- .../kafka/storage/internals/log/LocalLog.java | 58 +- .../storage/internals/log/LogAppendInfo.java | 90 +- .../storage/internals/log/LogConfig.java | 108 +- .../storage/internals/log/LogLoader.java | 16 +- .../internals/log/LogOffsetMetadata.java | 8 +- .../internals/log/LogOffsetSnapshot.java | 43 +- .../storage/internals/log/LogReadInfo.java | 24 +- .../storage/internals/log/LogSegment.java | 121 +- .../storage/internals/log/LogSegments.java | 19 +- .../storage/internals/log/LogValidator.java | 69 +- .../storage/internals/log/OffsetIndex.java | 58 +- .../storage/internals/log/OffsetPosition.java | 29 +- .../internals/log/OffsetResultHolder.java | 39 +- .../internals/log/ProducerAppendInfo.java | 9 +- .../internals/log/ProducerStateEntry.java | 8 +- .../internals/log/ProducerStateManager.java | 27 +- .../internals/log/RemoteIndexCache.java | 186 +- .../internals/log/RemoteLogReadResult.java | 9 +- .../internals/log/RemoteStorageFetchInfo.java | 26 +- .../log/RemoteStorageThreadPool.java | 12 +- .../storage/internals/log/RollParams.java | 25 +- .../internals/log/SegmentDeletionReason.java | 3 +- .../internals/log/SkimpyOffsetMap.java | 21 +- .../log/ThrottledReplicaListValidator.java | 7 +- .../storage/internals/log/TimeIndex.java | 67 +- .../internals/log/TimestampOffset.java | 39 +- .../internals/log/TransactionIndex.java | 15 +- .../internals/log/TxnIndexSearchResult.java | 5 +- .../storage/internals/log/UnifiedLog.java | 2563 +--- .../internals/log/VerificationStateEntry.java | 2 +- .../log/metrics/BrokerTopicMetrics.java | 29 +- .../storage/log/metrics/BrokerTopicStats.java | 16 +- .../metadata/storage/ConsumerTaskTest.java | 43 +- .../RemoteLogLeaderEpochStateTest.java | 9 +- .../storage/RemoteLogMetadataCacheTest.java | 27 +- .../RemoteLogMetadataManagerTestUtils.java | 12 +- .../RemoteLogMetadataTransformTest.java | 4 +- .../RemoteLogSegmentLifecycleTest.java | 36 +- ...sedRemoteLogMetadataManagerConfigTest.java | 60 - ...adataManagerMultipleSubscriptionsTest.java | 28 +- ...edRemoteLogMetadataManagerRestartTest.java | 34 +- ...opicBasedRemoteLogMetadataManagerTest.java | 112 +- .../log/remote/quota/RLMQuotaManagerTest.java | 4 +- ...ssLoaderAwareRemoteStorageManagerTest.java | 4 +- .../remote/storage/LocalTieredStorage.java | 42 +- .../storage/LocalTieredStorageEvent.java | 2 +- .../storage/LocalTieredStorageHistory.java | 3 +- .../storage/LocalTieredStorageSnapshot.java | 9 +- .../storage/LocalTieredStorageTest.java | 32 +- .../storage/RemoteLogManagerConfigTest.java | 30 +- .../storage/RemoteLogMetadataManagerTest.java | 11 +- .../storage/RemoteLogSegmentFileset.java | 8 +- .../RemoteTopicPartitionDirectory.java | 8 +- .../RemoteTopicPartitionDirectoryTest.java | 3 +- .../CleanShutdownFileHandlerTest.java | 8 +- ...hCheckpointFileWithFailureHandlerTest.java | 7 +- ...tCheckpointFileWithFailureHandlerTest.java | 16 +- .../storage/internals/log/LocalLogTest.java | 13 +- .../storage/internals/log/LogSegmentTest.java | 211 +- .../internals/log/LogSegmentsTest.java | 65 +- .../internals/log/LogValidatorTest.java | 180 +- .../internals/log/OffsetIndexTest.java | 15 +- .../storage/internals/log/OffsetMapTest.java | 18 +- .../log/ProducerStateManagerTest.java | 186 +- .../storage/internals/log/TimeIndexTest.java | 6 +- .../internals/log/TransactionIndexTest.java | 42 +- .../storage/TieredStorageTestBuilder.java | 152 +- .../storage/TieredStorageTestContext.java | 49 +- .../storage/TieredStorageTestHarness.java | 20 +- .../storage/actions/AlterLogDirAction.java | 9 +- .../tiered/storage/actions/ConsumeAction.java | 107 +- .../storage/actions/CreateTopicAction.java | 8 +- .../storage/actions/DeleteRecordsAction.java | 16 +- .../storage/actions/DeleteTopicAction.java | 13 +- .../storage/actions/ExpectLeaderAction.java | 6 +- .../ExpectLeaderEpochCheckpointAction.java | 7 +- .../actions/ExpectListOffsetsAction.java | 10 +- ...ctTopicIdToMatchInRemoteStorageAction.java | 3 +- ...TopicMappedToMetadataPartitionsAction.java | 3 +- .../tiered/storage/actions/ProduceAction.java | 15 +- .../actions/ReassignReplicaAction.java | 6 +- .../storage/actions/ShrinkReplicaAction.java | 3 +- .../storage/integration/AlterLogDirTest.java | 7 +- .../integration/BaseDeleteSegmentsTest.java | 18 +- .../integration/BaseReassignReplicaTest.java | 11 +- .../DeleteSegmentsByRetentionSizeTest.java | 3 +- .../DeleteSegmentsByRetentionTimeTest.java | 3 +- ...SegmentsDueToLogStartOffsetBreachTest.java | 15 +- .../storage/integration/DeleteTopicTest.java | 22 +- .../DisableRemoteLogOnTopicTest.java | 40 +- .../EnableRemoteLogOnTopicTest.java | 24 +- ...FromLeaderWithCorruptedCheckpointTest.java | 21 +- .../storage/integration/ListOffsetsTest.java | 17 +- .../OffloadAndConsumeFromLeaderTest.java | 12 +- .../OffloadAndTxnConsumeFromLeaderTest.java | 12 +- .../integration/PartitionsExpandTest.java | 27 +- .../ReassignReplicaExpandTest.java | 3 +- .../integration/ReassignReplicaMoveTest.java | 3 +- .../ReassignReplicaShrinkTest.java | 24 +- .../RollAndOffloadActiveSegmentTest.java | 13 +- .../TransactionsWithTieredStoreTest.java | 6 +- .../tiered/storage/specs/ConsumableSpec.java | 42 +- .../tiered/storage/specs/DeletableSpec.java | 42 +- .../specs/ExpandPartitionCountSpec.java | 41 +- .../tiered/storage/specs/FetchableSpec.java | 33 +- .../tiered/storage/specs/OffloadableSpec.java | 42 +- .../storage/specs/OffloadedSegmentSpec.java | 66 +- .../tiered/storage/specs/ProducableSpec.java | 14 +- .../specs/RemoteDeleteSegmentSpec.java | 73 +- .../storage/specs/RemoteFetchCount.java | 59 +- .../tiered/storage/specs/RemoteFetchSpec.java | 58 +- .../kafka/tiered/storage/specs/TopicSpec.java | 89 +- .../storage/utils/BrokerLocalStorage.java | 22 +- .../utils/LocalTieredStorageOutput.java | 34 +- .../storage/utils/RecordsKeyValueMatcher.java | 37 +- .../storage/utils/TieredStorageTestUtils.java | 11 +- storage/src/test/resources/log4j2.yaml | 4 +- .../AdjustStreamThreadCountTest.java | 36 +- .../integration/EosIntegrationTest.java | 15 +- ...ingSourceTopicDeletionIntegrationTest.java | 66 +- ...ailabilityTaskAssignorIntegrationTest.java | 6 +- .../integration/IQv2IntegrationTest.java | 75 +- .../integration/IQv2StoreIntegrationTest.java | 18 +- .../IQv2VersionedStoreIntegrationTest.java | 67 +- .../InternalTopicIntegrationTest.java | 39 +- ...WithIncompleteMetadataIntegrationTest.java | 19 +- .../KStreamAggregationIntegrationTest.java | 32 +- .../KStreamRepartitionIntegrationTest.java | 102 +- .../KTableEfficientRangeQueryTest.java | 4 +- ...rJoinCustomPartitionerIntegrationTest.java | 2 +- ...leKTableForeignKeyJoinIntegrationTest.java | 172 +- .../KafkaStreamsTelemetryIntegrationTest.java | 305 +- .../integration/LagFetchIntegrationTest.java | 2 +- .../integration/MetricsIntegrationTest.java | 14 +- .../PauseResumeIntegrationTest.java | 63 +- ...essingExceptionHandlerIntegrationTest.java | 165 +- .../PurgeRepartitionTopicIntegrationTest.java | 5 +- .../QueryableStateIntegrationTest.java | 4 +- .../RegexSourceIntegrationTest.java | 2 +- .../integration/RestoreIntegrationTest.java | 255 +- .../SelfJoinUpgradeIntegrationTest.java | 3 +- ...SlidingWindowedKStreamIntegrationTest.java | 9 +- .../SmokeTestDriverIntegrationTest.java | 65 +- .../StandbyTaskCreationIntegrationTest.java | 31 +- ...caughtExceptionHandlerIntegrationTest.java | 85 +- ...allowUnknownTopicErrorIntegrationTest.java | 10 +- ...TestingMetricsInterceptingAdminClient.java | 81 - .../TimeWindowedKStreamIntegrationTest.java | 9 +- .../utils/CompositeStateListener.java | 4 +- .../utils/EmbeddedKafkaCluster.java | 44 - .../src/test/resources/log4j2.yaml | 3 - streams/quickstart/java/pom.xml | 2 +- .../resources/archetype-resources/pom.xml | 2 +- streams/quickstart/pom.xml | 2 +- .../apache/kafka/streams/KafkaStreams.java | 105 +- .../apache/kafka/streams/StreamsBuilder.java | 15 +- .../apache/kafka/streams/StreamsConfig.java | 178 +- .../org/apache/kafka/streams/Topology.java | 331 +- .../apache/kafka/streams/TopologyConfig.java | 36 +- .../errors/BrokerNotFoundException.java | 2 - .../DefaultProductionExceptionHandler.java | 36 +- .../DeserializationExceptionHandler.java | 156 - .../streams/errors/ErrorHandlerContext.java | 34 - .../LogAndContinueExceptionHandler.java | 39 +- ...AndContinueProcessingExceptionHandler.java | 14 +- .../errors/LogAndFailExceptionHandler.java | 39 +- .../LogAndFailProcessingExceptionHandler.java | 13 +- .../errors/ProcessingExceptionHandler.java | 162 +- .../errors/ProductionExceptionHandler.java | 217 +- .../internals/DefaultErrorHandlerContext.java | 16 +- .../kafka/streams/internals/ApiUtils.java | 9 +- .../streams/internals/UpgradeFromValues.java | 4 +- .../internals/metrics/ClientMetrics.java | 3 +- ...treamsThreadMetricsDelegatingReporter.java | 17 +- .../kafka/streams/kstream/Aggregator.java | 1 - .../streams/kstream/BranchedKStream.java | 128 +- .../streams/kstream/CogroupedKStream.java | 47 +- .../kafka/streams/kstream/ForeachAction.java | 1 - .../kafka/streams/kstream/GlobalKTable.java | 66 +- .../kafka/streams/kstream/Initializer.java | 1 - .../apache/kafka/streams/kstream/Joined.java | 2 +- .../kafka/streams/kstream/KGroupedStream.java | 59 +- .../kafka/streams/kstream/KGroupedTable.java | 25 +- .../apache/kafka/streams/kstream/KStream.java | 3886 +++-- .../apache/kafka/streams/kstream/KTable.java | 147 +- .../kafka/streams/kstream/KeyValueMapper.java | 1 - .../apache/kafka/streams/kstream/Merger.java | 1 - .../apache/kafka/streams/kstream/Named.java | 5 +- .../kafka/streams/kstream/Predicate.java | 1 - .../apache/kafka/streams/kstream/Reducer.java | 1 - .../SessionWindowedCogroupedKStream.java | 28 +- .../kstream/SessionWindowedDeserializer.java | 49 +- .../kstream/SessionWindowedKStream.java | 474 +- .../kstream/SessionWindowedSerializer.java | 48 +- .../kafka/streams/kstream/SessionWindows.java | 17 +- .../kstream/TimeWindowedCogroupedKStream.java | 25 +- .../kstream/TimeWindowedDeserializer.java | 115 +- .../streams/kstream/TimeWindowedKStream.java | 422 +- .../kstream/TimeWindowedSerializer.java | 48 +- .../streams/kstream/TransformerSupplier.java | 1 - .../kafka/streams/kstream/ValueJoiner.java | 1 - .../streams/kstream/ValueJoinerWithKey.java | 1 - .../kafka/streams/kstream/ValueMapper.java | 1 - .../streams/kstream/ValueMapperWithKey.java | 1 - .../kstream/ValueTransformerSupplier.java | 1 - .../ValueTransformerWithKeySupplier.java | 1 - .../kafka/streams/kstream/WindowedSerdes.java | 4 +- .../kstream/internals/AbstractStream.java | 16 +- .../CogroupedStreamAggregateBuilder.java | 53 +- .../kstream/internals/ForeachProcessor.java | 4 +- .../GroupedStreamAggregateBuilder.java | 86 +- .../internals/InternalStreamsBuilder.java | 19 +- .../kstream/internals/KGroupedStreamImpl.java | 26 +- .../kstream/internals/KGroupedTableImpl.java | 13 +- .../kstream/internals/KStreamFilter.java | 4 +- .../kstream/internals/KStreamImpl.java | 887 +- .../kstream/internals/KStreamImplJoin.java | 17 - .../kstream/internals/KStreamKStreamJoin.java | 2 +- .../internals/KStreamKStreamSelfJoin.java | 2 +- .../kstream/internals/KStreamKTableJoin.java | 23 +- .../internals/KStreamKTableJoinProcessor.java | 48 +- .../kstream/internals/KStreamMapValues.java | 4 +- .../kstream/internals/KStreamPeek.java | 4 +- .../kstream/internals/KTableFilter.java | 2 +- .../streams/kstream/internals/KTableImpl.java | 220 +- .../kstream/internals/KTablePassThrough.java | 1 - .../internals/KTableRepartitionMap.java | 22 +- .../internals/KTableTransformValues.java | 6 +- .../internals/MaterializedInternal.java | 8 - .../internals/SessionWindowedKStreamImpl.java | 79 +- .../internals/SlidingWindowedKStreamImpl.java | 112 +- .../internals/TimeWindowedKStreamImpl.java | 116 +- .../internals/WrappingNullableUtils.java | 34 +- .../internals/foreignkeyjoin/CombinedKey.java | 13 +- .../foreignkeyjoin/CombinedKeySchema.java | 34 +- .../foreignkeyjoin/ForeignKeyExtractor.java | 14 +- .../ForeignTableJoinProcessorSupplier.java | 26 +- .../ResponseJoinProcessorSupplier.java | 45 +- .../SubscriptionJoinProcessorSupplier.java | 59 +- .../SubscriptionReceiveProcessorSupplier.java | 29 +- .../SubscriptionResponseWrapper.java | 10 +- .../SubscriptionResponseWrapperSerde.java | 16 +- .../SubscriptionSendProcessorSupplier.java | 84 +- .../foreignkeyjoin/SubscriptionWrapper.java | 11 +- .../SubscriptionWrapperSerde.java | 48 +- .../internals/graph/BaseRepartitionNode.java | 33 +- .../internals/graph/GraphGraceSearchUtil.java | 38 +- .../kstream/internals/graph/GraphNode.java | 4 +- .../GroupedTableOperationRepartitionNode.java | 7 +- .../graph/OptimizableRepartitionNode.java | 6 +- .../internals/graph/StreamTableJoinNode.java | 6 +- .../internals/graph/TableSourceNode.java | 5 +- .../graph/UnoptimizableRepartitionNode.java | 3 +- .../BatchingStateRestoreCallback.java | 2 +- .../kafka/streams/processor/Cancellable.java | 1 - .../streams/processor/ProcessorContext.java | 13 +- .../kafka/streams/processor/Punctuator.java | 12 +- .../streams/processor/RecordContext.java | 27 - .../processor/StateRestoreCallback.java | 1 - .../streams/processor/StreamPartitioner.java | 1 - .../streams/processor/TimestampExtractor.java | 1 - .../streams/processor/TopicNameExtractor.java | 1 - .../processor/api/FixedKeyProcessor.java | 1 - .../processor/api/ProcessingContext.java | 6 - .../streams/processor/api/Processor.java | 1 - .../api/WrappedFixedKeyProcessorSupplier.java | 1 - .../api/WrappedProcessorSupplier.java | 1 - .../assignment/TaskAssignmentUtils.java | 10 +- .../assignors/StickyTaskAssignor.java | 4 +- .../internals/AbstractReadOnlyDecorator.java | 3 + .../internals/ActiveTaskCreator.java | 24 +- .../internals/DefaultStateUpdater.java | 71 +- .../internals/GlobalProcessorContextImpl.java | 2 +- .../internals/GlobalStateManagerImpl.java | 4 +- .../internals/GlobalStateUpdateTask.java | 16 +- .../internals/GlobalStreamThread.java | 4 - .../internals/InternalTopicManager.java | 2 +- .../internals/InternalTopologyBuilder.java | 318 +- .../internals/ProcessorContextImpl.java | 13 +- .../internals/ProcessorContextUtils.java | 20 +- .../processor/internals/ProcessorNode.java | 36 +- .../internals/ProcessorRecordContext.java | 40 +- .../internals/ProcessorStateManager.java | 43 +- .../internals/PunctuationSchedule.java | 8 +- .../processor/internals/ReadOnlyTask.java | 2 +- .../processor/internals/RecordCollector.java | 7 - .../internals/RecordCollectorImpl.java | 96 +- .../internals/RecordDeserializer.java | 32 +- .../processor/internals/RecordQueue.java | 2 +- .../internals/RepartitionTopics.java | 2 +- .../streams/processor/internals/SinkNode.java | 14 +- .../processor/internals/SourceNode.java | 4 +- .../streams/processor/internals/Stamped.java | 6 +- .../processor/internals/StampedRecord.java | 32 - .../processor/internals/StandbyTask.java | 10 +- .../internals/StandbyTaskCreator.java | 6 +- .../processor/internals/StateDirectory.java | 133 +- .../processor/internals/StateManagerUtil.java | 2 +- .../internals/StaticTopicNameExtractor.java | 5 +- .../processor/internals/StreamTask.java | 45 +- .../processor/internals/StreamThread.java | 394 +- .../internals/StreamsMetadataState.java | 2 +- .../processor/internals/StreamsProducer.java | 16 - .../streams/processor/internals/Task.java | 2 +- .../processor/internals/TaskExecutor.java | 2 +- .../processor/internals/TaskManager.java | 111 +- .../assignment/DefaultKafkaStreamsState.java | 6 +- .../processor/internals/assignment/Graph.java | 2 +- .../assignment/RackAwareTaskAssignor.java | 2 +- .../assignment/SubscriptionInfo.java | 2 +- .../internals/metrics/StreamsMetricsImpl.java | 20 +- .../internals/metrics/ThreadMetrics.java | 3 +- .../KafkaStreamsNamedTopologyWrapper.java | 4 +- .../apache/kafka/streams/query/Position.java | 8 +- .../streams/query/StateQueryRequest.java | 8 +- .../streams/state/QueryableStoreTypes.java | 17 +- .../kafka/streams/state/StateSerdes.java | 3 +- ...tDualSchemaRocksDBSegmentedBytesStore.java | 2 +- ...bstractMergedSortedCacheStoreIterator.java | 136 +- .../AbstractRocksDBSegmentedBytesStore.java | 5 +- .../state/internals/CachingKeyValueStore.java | 14 +- .../state/internals/CachingSessionStore.java | 13 +- .../state/internals/CachingWindowStore.java | 13 +- .../ChangeLoggingKeyValueBytesStore.java | 12 +- .../ChangeLoggingListValueBytesStore.java | 4 +- .../ChangeLoggingSessionBytesStore.java | 4 +- ...eLoggingTimestampedKeyValueBytesStore.java | 6 +- ...ngeLoggingTimestampedWindowBytesStore.java | 2 +- .../ChangeLoggingWindowBytesStore.java | 2 +- .../internals/GlobalStateStoreProvider.java | 4 +- .../state/internals/InMemorySessionStore.java | 4 +- ...MemoryTimeOrderedKeyValueChangeBuffer.java | 2 +- .../state/internals/InMemoryWindowStore.java | 5 +- .../state/internals/KeyValueIterators.java | 4 +- .../state/internals/LRUCacheEntry.java | 16 +- .../internals/LogicalSegmentIterator.java | 4 +- .../state/internals/MeteredKeyValueStore.java | 27 +- ...MeteredMultiVersionedKeyQueryIterator.java | 12 +- .../state/internals/MeteredSessionStore.java | 11 +- .../MeteredTimestampedKeyValueStore.java | 2 + .../MeteredVersionedKeyValueStore.java | 1 + .../state/internals/MeteredWindowStore.java | 11 +- ...ToDbOptionsColumnFamilyOptionsAdapter.java | 27 +- .../internals/RocksDBSegmentedBytesStore.java | 2 +- .../streams/state/internals/RocksDBStore.java | 2 +- ...RocksDBTimestampedSegmentedBytesStore.java | 2 +- .../internals/RocksDBTimestampedStore.java | 4 +- .../state/internals/StoreQueryUtils.java | 6 +- .../StreamThreadStateStoreProvider.java | 2 +- .../TimeOrderedCachingWindowStore.java | 27 +- .../internals/WrappingStoreProvider.java | 2 - .../internals/metrics/StateStoreMetrics.java | 13 +- .../kafka/streams/AutoOffsetResetTest.java | 2 +- .../kafka/streams/KafkaStreamsTest.java | 169 +- .../kafka/streams/StreamsBuilderTest.java | 559 +- .../kafka/streams/StreamsConfigTest.java | 210 +- .../apache/kafka/streams/TopologyTest.java | 172 +- .../internals/metrics/ClientMetricsTest.java | 2 +- ...msThreadMetricsDelegatingReporterTest.java | 3 +- .../streams/kstream/MaterializedTest.java | 10 +- .../kafka/streams/kstream/PrintedTest.java | 14 +- .../kstream/RepartitionTopicNamingTest.java | 57 +- .../SessionWindowedDeserializerTest.java | 55 +- .../SessionWindowedSerializerTest.java | 55 +- .../streams/kstream/SessionWindowsTest.java | 6 +- .../kstream/TimeWindowedDeserializerTest.java | 102 +- .../kstream/TimeWindowedSerializerTest.java | 54 +- .../internals/InternalStreamsBuilderTest.java | 7 +- .../internals/KGroupedStreamImplTest.java | 35 - .../KStreamGlobalKTableJoinTest.java | 112 +- .../KStreamGlobalKTableLeftJoinTest.java | 140 +- .../kstream/internals/KStreamImplTest.java | 268 +- .../internals/KStreamKStreamJoinTest.java | 12 +- .../internals/KStreamKStreamLeftJoinTest.java | 10 +- .../KStreamKStreamOuterJoinTest.java | 10 +- .../internals/KStreamKTableJoinTest.java | 75 +- .../internals/KStreamKTableLeftJoinTest.java | 66 +- .../kstream/internals/KTableImplTest.java | 158 +- .../internals/KTableKTableInnerJoinTest.java | 4 +- .../internals/KTableKTableLeftJoinTest.java | 52 +- .../internals/KTableKTableOuterJoinTest.java | 4 +- .../internals/KTableTransformValuesTest.java | 7 - .../internals/MaterializedInternalTest.java | 6 +- .../internals/SuppressScenarioTest.java | 31 +- ...SubscriptionSendProcessorSupplierTest.java | 82 +- .../graph/GraphGraceSearchUtilTest.java | 44 +- .../internals/graph/StreamsGraphTest.java | 180 +- .../streams/processor/ReadOnlyStoreTest.java | 2 +- .../AbstractProcessorContextTest.java | 2 +- .../internals/ActiveTaskCreatorTest.java | 19 +- .../CopartitionedTopicsEnforcerTest.java | 6 +- .../internals/DefaultStateUpdaterTest.java | 94 +- .../internals/GlobalStateManagerImplTest.java | 85 +- .../internals/GlobalStateTaskTest.java | 33 - .../internals/GlobalStreamThreadTest.java | 14 +- .../internals/InternalTopicManagerTest.java | 103 +- .../InternalTopologyBuilderTest.java | 12 +- .../internals/NamedTopologyTest.java | 6 +- .../internals/ProcessorContextImplTest.java | 172 +- .../internals/ProcessorMetadataTest.java | 4 +- .../internals/ProcessorNodeTest.java | 149 +- .../internals/ProcessorRecordContextTest.java | 1 + .../processor/internals/ReadOnlyTaskTest.java | 49 +- .../internals/RecordCollectorTest.java | 312 +- .../internals/RecordDeserializerTest.java | 168 +- .../internals/RepartitionOptimizingTest.java | 39 +- .../RepartitionWithMergeOptimizingTest.java | 4 + .../processor/internals/SinkNodeTest.java | 10 +- .../processor/internals/SourceNodeTest.java | 6 +- .../processor/internals/StandbyTaskTest.java | 16 +- .../internals/StateDirectoryTest.java | 169 +- .../internals/StateManagerUtilTest.java | 2 +- .../internals/StoreChangelogReaderTest.java | 10 +- .../processor/internals/StreamTaskTest.java | 108 +- .../processor/internals/StreamThreadTest.java | 716 +- .../StreamsPartitionAssignorTest.java | 24 +- .../internals/StreamsProducerTest.java | 10 +- .../internals/TaskExecutionMetadataTest.java | 3 +- .../processor/internals/TaskManagerTest.java | 291 +- .../assignment/AssignmentTestUtils.java | 2 +- .../LegacyStickyTaskAssignorTest.java | 117 +- .../assignment/RackAwareTaskAssignorTest.java | 14 +- .../assignment/SubscriptionInfoTest.java | 10 +- .../assignment/TaskAssignmentUtilsTest.java | 12 +- .../TaskAssignorConvergenceTest.java | 8 +- .../metrics/StreamsMetricsImplTest.java | 60 +- .../internals/metrics/ThreadMetricsTest.java | 2 +- .../tasks/DefaultTaskExecutorTest.java | 2 +- .../tasks/DefaultTaskManagerTest.java | 6 - .../kafka/streams/query/PositionTest.java | 127 - .../AbstractRocksDBWindowStoreTest.java | 110 +- .../AbstractSessionBytesStoreTest.java | 32 +- .../AbstractWindowBytesStoreTest.java | 136 +- .../CachingPersistentSessionStoreTest.java | 10 +- .../ChangeLoggingSessionBytesStoreTest.java | 5 - ...oggingTimestampedWindowBytesStoreTest.java | 13 +- .../ChangeLoggingWindowBytesStoreTest.java | 13 +- .../internals/FilteredCacheIteratorTest.java | 16 +- .../GlobalStateStoreProviderTest.java | 4 +- .../internals/InMemorySessionStoreTest.java | 5 +- .../LogicalKeyValueSegmentsTest.java | 6 +- .../internals/MeteredKeyValueStoreTest.java | 12 +- .../MeteredTimestampedKeyValueStoreTest.java | 12 +- .../MeteredVersionedKeyValueStoreTest.java | 12 +- .../internals/MeteredWindowStoreTest.java | 4 +- .../MonotonicProcessorRecordContext.java | 6 + .../streams/state/internals/Murmur3Test.java | 10 +- .../state/internals/NamedCacheTest.java | 20 +- .../internals/ReadOnlyWindowStoreStub.java | 12 +- ...OptionsColumnFamilyOptionsAdapterTest.java | 70 +- .../state/internals/RocksDBStoreTest.java | 2 +- .../RocksDBTimeOrderedKeyValueBufferTest.java | 6 +- .../internals/SegmentedCacheFunctionTest.java | 131 +- .../state/internals/ThreadCacheTest.java | 10 +- ...deredCachingPersistentWindowStoreTest.java | 11 +- .../TimeOrderedKeyValueBufferTest.java | 2 +- .../internals/TimeOrderedWindowStoreTest.java | 11 +- .../state/internals/WindowStoreFetchTest.java | 3 + .../internals/WrappingStoreProviderTest.java | 6 +- .../kafka/streams/tests/EosTestClient.java | 52 +- .../kafka/streams/tests/EosTestDriver.java | 53 +- .../streams/tests/RelationalSmokeTest.java | 22 +- .../tests/RelationalSmokeTestTest.java | 1 - .../streams/tests/ShutdownDeadlockTest.java | 8 +- .../kafka/streams/tests/SmokeTestClient.java | 15 +- .../kafka/streams/tests/SmokeTestDriver.java | 91 +- .../kafka/streams/tests/SmokeTestUtil.java | 2 +- .../StreamsBrokerDownResilienceTest.java | 2 +- .../kafka/streams/tests/StreamsEosTest.java | 4 +- .../tests/StreamsNamedRepartitionTest.java | 2 +- .../kafka/streams/tests/StreamsSmokeTest.java | 7 +- .../tests/StreamsStandByReplicaTest.java | 1 + .../apache/kafka/streams/utils/TestUtils.java | 2 +- .../test/InternalMockProcessorContext.java | 5 +- .../kafka/test/MockApiFixedKeyProcessor.java | 1 + .../apache/kafka/test/MockApiProcessor.java | 1 + .../kafka/test/MockInternalTopicManager.java | 2 +- .../kafka/test/MockRecordCollector.java | 17 - .../apache/kafka/test/MockValueJoiner.java | 7 +- .../kafka/test/NoOpProcessorContext.java | 6 +- .../NoOpValueTransformerWithKeySupplier.java | 2 +- .../kafka/test/ReadOnlySessionStoreStub.java | 4 +- .../kafka/test/StateStoreProviderStub.java | 2 +- .../src/test/resources/log4j2.yaml | 16 +- .../kafka/streams/TopologyTestDriver.java | 11 +- .../processor/MockProcessorContext.java | 2 +- .../processor/api/MockProcessorContext.java | 4 +- .../test/MockProcessorContextAPITest.java | 4 +- .../MockProcessorContextStateStoreTest.java | 2 +- .../streams/tests/StreamsUpgradeTest.java | 2 +- .../streams/tests/StreamsUpgradeTest.java | 2 +- .../kafka/common/test/api/ClusterConfig.java | 94 +- .../test/api/ClusterConfigProperty.java | 6 +- .../common/test/api/ClusterGenerator.java | 25 + .../common/test/api/ClusterTemplate.java | 16 +- .../kafka/common/test/api/ClusterTest.java | 2 +- .../common/test/api/ClusterTestDefaults.java | 2 +- .../apache/kafka/common/test/api/README.md | 175 +- .../common/test/api/ClusterConfigTest.java | 16 +- .../kafka/common/test/ClusterInstance.java | 293 +- .../apache/kafka/common/test/JaasUtils.java | 2 +- .../common/test/KafkaClusterTestKit.java | 196 +- .../kafka/common/test/MockController.java | 3 +- .../test/PreboundSocketFactoryManager.java | 18 +- .../kafka/common/test/TestKitNodes.java | 22 +- .../apache/kafka/common/test/TestUtils.java | 78 +- .../ClusterInstanceParameterResolver.java | 3 +- .../test/junit/ClusterTestExtensions.java | 49 +- .../junit/RaftClusterInvocationContext.java | 77 +- .../common/test/KafkaClusterTestKitTest.java | 60 +- .../kafka/common/test/TestKitNodeTest.java | 18 +- .../test/junit/ClusterTestExtensionsTest.java | 134 +- .../junit/ClusterTestExtensionsUnitTest.java | 27 +- .../apache/kafka/common/test/api/Flaky.java | 2 +- .../test/junit/AutoQuarantinedTestFilter.java | 172 + .../junit/QuarantinedPostDiscoveryFilter.java | 87 + ...unit.platform.launcher.PostDiscoveryFilter | 2 +- .../junit/AutoQuarantinedTestFilterTest.java | 82 + .../QuarantinedPostDiscoveryFilterTest.java | 175 + tests/README.md | 9 +- tests/docker/Dockerfile | 28 +- tests/docker/ducker-ak | 32 +- tests/kafkatest/__init__.py | 2 +- .../benchmarks/core/benchmark_test.py | 127 +- .../test_performance_services.py | 32 +- .../sanity_checks/test_verifiable_producer.py | 10 +- tests/kafkatest/services/console_consumer.py | 19 +- .../services/kafka/config_property.py | 12 +- tests/kafkatest/services/kafka/kafka.py | 176 +- .../services/kafka/templates/kafka.properties | 5 - .../services/kafka/templates/log4j2.yaml | 16 +- .../services/log_compaction_tester.py | 12 +- .../services/performance/__init__.py | 1 - .../performance/consumer_performance.py | 15 +- .../performance/producer_performance.py | 2 +- .../kafkatest/services/security/kafka_acls.py | 12 +- tests/kafkatest/services/streams.py | 32 +- tests/kafkatest/services/streams_property.py | 1 - tests/kafkatest/services/verifiable_client.py | 16 +- .../kafkatest/services/verifiable_consumer.py | 9 +- .../kafkatest/services/verifiable_producer.py | 14 +- .../client_compatibility_features_test.py | 5 +- ...ient_compatibility_produce_consume_test.py | 5 +- .../consumer_protocol_migration_test.py | 8 +- .../client/consumer_rolling_upgrade_test.py | 5 +- tests/kafkatest/tests/client/consumer_test.py | 88 +- tests/kafkatest/tests/client/quota_test.py | 2 +- .../kafkatest/tests/client/truncation_test.py | 4 +- .../tests/connect/connect_distributed_test.py | 85 +- .../tests/connect/connect_rest_test.py | 6 +- tests/kafkatest/tests/core/authorizer_test.py | 5 +- .../compatibility_test_new_broker_test.py | 4 +- .../tests/core/consume_bench_test.py | 46 +- .../tests/core/consumer_group_command_test.py | 16 +- .../tests/core/fetch_from_follower_test.py | 7 +- .../tests/core/reassign_partitions_test.py | 15 +- .../tests/core/replica_scale_test.py | 15 +- .../core/replication_replica_failure_test.py | 7 +- .../kafkatest/tests/core/replication_test.py | 9 +- tests/kafkatest/tests/core/security_test.py | 23 +- tests/kafkatest/tests/core/snapshot_test.py | 14 +- .../core/transactions_mixed_versions_test.py | 8 +- .../kafkatest/tests/core/transactions_test.py | 12 +- .../tests/core/transactions_upgrade_test.py | 8 +- tests/kafkatest/tests/core/upgrade_test.py | 45 +- .../tests/streams/base_streams_test.py | 49 +- .../streams_application_upgrade_test.py | 47 +- .../streams/streams_broker_bounce_test.py | 45 +- .../streams_broker_compatibility_test.py | 7 +- .../streams_broker_down_resilience_test.py | 61 +- .../tests/streams/streams_eos_test.py | 82 +- .../streams/streams_relational_smoke_test.py | 25 +- .../tests/streams/streams_smoke_test.py | 22 +- .../streams/streams_standby_replica_test.py | 12 +- .../streams/streams_static_membership_test.py | 4 +- .../tests/streams/streams_upgrade_test.py | 21 +- .../tests/tools/log_compaction_test.py | 21 +- tests/kafkatest/utils/util.py | 2 +- tests/kafkatest/version.py | 55 +- tests/setup.py | 3 +- .../org/apache/kafka/tools/AclCommand.java | 200 +- .../kafka/tools/BrokerApiVersionsCommand.java | 2 +- .../kafka/tools/ClientCompatibilityTest.java | 8 +- .../kafka/tools/ClientMetricsCommand.java | 81 +- .../org/apache/kafka/tools/ClusterTool.java | 31 +- .../apache/kafka/tools/ConnectPluginPath.java | 64 +- .../apache/kafka/tools/ConsoleProducer.java | 43 +- .../kafka/tools/ConsumerPerformance.java | 241 +- .../kafka/tools/DelegationTokenCommand.java | 20 +- .../kafka/tools/DeleteRecordsCommand.java | 2 + .../apache/kafka/tools/EndToEndLatency.java | 326 +- .../apache/kafka/tools/FeatureCommand.java | 27 +- .../apache/kafka/tools/GetOffsetShell.java | 15 +- .../org/apache/kafka/tools/GroupsCommand.java | 42 +- .../java/org/apache/kafka/tools/JmxTool.java | 21 +- .../kafka/tools/LeaderElectionCommand.java | 36 +- .../apache/kafka/tools/ManifestWorkspace.java | 27 +- .../kafka/tools/MetadataQuorumCommand.java | 91 +- .../kafka/tools/OAuthCompatibilityTool.java | 73 +- .../kafka/tools/ProducerPerformance.java | 253 +- .../kafka/tools/PushHttpMetricsReporter.java | 87 +- .../kafka/tools/ReplicaVerificationTool.java | 27 +- .../apache/kafka/tools/StreamsResetter.java | 55 +- .../org/apache/kafka/tools/TopicCommand.java | 162 +- .../tools/TransactionalMessageCopier.java | 11 +- .../kafka/tools/TransactionsCommand.java | 133 +- .../kafka/tools/VerifiableConsumer.java | 194 +- .../kafka/tools/VerifiableProducer.java | 119 +- .../tools/consumer/ApiMessageFormatter.java | 83 + .../kafka/tools/consumer/ConsoleConsumer.java | 9 +- .../consumer/ConsoleConsumerOptions.java | 92 +- .../tools/consumer/ConsoleShareConsumer.java | 20 +- .../consumer/ConsoleShareConsumerOptions.java | 85 +- .../GroupMetadataMessageFormatter.java | 44 +- .../consumer/OffsetsMessageFormatter.java | 45 +- .../TransactionLogMessageFormatter.java | 37 +- .../consumer/group/ConsumerGroupCommand.java | 608 +- .../group/ConsumerGroupCommandOptions.java | 18 +- .../kafka/tools/consumer/group/CsvUtils.java | 4 +- .../consumer/group/GroupInformation.java | 28 +- .../consumer/group/MemberAssignmentState.java | 42 +- .../group/PartitionAssignmentState.java | 42 +- .../consumer/group/ShareGroupCommand.java | 505 +- .../group/ShareGroupCommandOptions.java | 147 +- .../ShareGroupStateMessageFormatter.java | 148 +- .../kafka/tools/reassign/ActiveMoveState.java | 37 +- .../tools/reassign/CancelledMoveState.java | 32 +- .../tools/reassign/CompletedMoveState.java | 27 +- .../reassign/MissingLogDirMoveState.java | 27 +- .../reassign/MissingReplicaMoveState.java | 27 +- .../reassign/PartitionReassignmentState.java | 41 +- .../reassign/ReassignPartitionsCommand.java | 373 +- .../ReassignPartitionsCommandOptions.java | 2 - .../reassign/VerifyAssignmentResult.java | 51 +- .../tools/AbstractResetIntegrationTest.java | 14 +- .../apache/kafka/tools/AclCommandTest.java | 25 +- .../tools/BrokerApiVersionsCommandTest.java | 4 +- .../kafka/tools/ClientMetricsCommandTest.java | 96 +- .../apache/kafka/tools/ClusterToolTest.java | 85 +- .../kafka/tools/ConnectPluginPathTest.java | 93 +- .../kafka/tools/ConsoleProducerTest.java | 119 +- .../kafka/tools/ConsumerPerformanceTest.java | 190 +- .../tools/DelegationTokenCommandTest.java | 159 +- .../kafka/tools/DeleteRecordsCommandTest.java | 11 +- .../kafka/tools/EndToEndLatencyTest.java | 261 +- .../kafka/tools/FeatureCommandTest.java | 94 +- .../tools/GetOffsetShellParsingTest.java | 2 +- .../kafka/tools/GetOffsetShellTest.java | 70 +- .../apache/kafka/tools/GroupsCommandTest.java | 154 +- .../org/apache/kafka/tools/JmxToolTest.java | 14 +- .../tools/LeaderElectionCommandErrorTest.java | 1 + .../tools/LeaderElectionCommandTest.java | 106 +- .../kafka/tools/LineMessageReaderTest.java | 43 +- .../kafka/tools/LogDirsCommandTest.java | 42 +- .../tools/MetadataQuorumCommandTest.java | 5 +- .../tools/MetadataQuorumCommandUnitTest.java | 15 +- .../kafka/tools/ProducerPerformanceTest.java | 362 +- .../tools/PushHttpMetricsReporterTest.java | 41 +- .../tools/ReplicaVerificationToolTest.java | 9 +- .../kafka/tools/ResetIntegrationTest.java | 81 - .../kafka/tools/StreamsResetterTest.java | 22 +- .../apache/kafka/tools/ToolsTestUtils.java | 3 +- .../apache/kafka/tools/TopicCommandTest.java | 322 +- .../kafka/tools/TransactionsCommandTest.java | 196 +- .../consumer/ConsoleConsumerOptionsTest.java | 432 +- .../tools/consumer/ConsoleConsumerTest.java | 23 +- .../ConsoleShareConsumerOptionsTest.java | 416 +- .../consumer/ConsoleShareConsumerTest.java | 45 - .../GroupMetadataMessageFormatterTest.java | 274 +- .../consumer/OffsetMessageFormatterTest.java | 230 +- .../TransactionLogMessageFormatterTest.java | 151 +- .../group/AuthorizerIntegrationTest.java | 15 +- .../group/ConsumerGroupCommandTestUtils.java | 52 +- .../group/ConsumerGroupServiceTest.java | 88 +- .../group/DeleteConsumerGroupsTest.java | 45 +- ...tsConsumerGroupCommandIntegrationTest.java | 76 +- .../group/DescribeConsumerGroupTest.java | 334 +- .../consumer/group/ListConsumerGroupTest.java | 314 +- .../group/ResetConsumerGroupOffsetTest.java | 224 +- ...SaslClientsWithInvalidCredentialsTest.java | 35 +- .../consumer/group/ShareGroupCommandTest.java | 1438 +- .../ShareGroupStateMessageFormatterTest.java | 94 +- .../tools/other/ReplicationQuotasTestRig.java | 18 +- .../ReassignPartitionsCommandTest.java | 308 +- .../reassign/ReassignPartitionsUnitTest.java | 340 +- .../kafka/tools/api/RecordReaderTest.java | 4 +- .../transaction/TransactionLogConfig.java | 10 +- .../TransactionStateManagerConfig.java | 15 +- .../common/message/TransactionLogKey.json | 3 +- .../common/message/TransactionLogValue.json | 5 +- .../transaction/ProducerIdManagerTest.java | 2 +- .../transaction/TransactionLogConfigTest.java | 39 +- .../TransactionStateManagerConfigTest.java | 8 +- trogdor/README.md | 4 +- .../kafka/trogdor/agent/AgentClient.java | 5 +- .../apache/kafka/trogdor/basic/BasicNode.java | 9 +- .../kafka/trogdor/common/StringFormatter.java | 4 +- .../coordinator/CoordinatorClient.java | 17 +- .../trogdor/coordinator/TaskManager.java | 17 +- .../fault/DegradedNetworkFaultSpec.java | 3 +- .../apache/kafka/trogdor/fault/Kibosh.java | 3 +- .../fault/NetworkPartitionFaultWorker.java | 4 +- .../kafka/trogdor/rest/ErrorResponse.java | 23 +- .../kafka/trogdor/rest/TaskRequest.java | 9 +- .../kafka/trogdor/rest/TaskStateType.java | 5 +- .../kafka/trogdor/rest/TasksRequest.java | 5 +- .../kafka/trogdor/rest/TasksResponse.java | 3 +- .../apache/kafka/trogdor/task/TaskSpec.java | 3 +- .../workload/ConfigurableProducerSpec.java | 4 +- .../workload/ConfigurableProducerWorker.java | 2 + .../workload/ConnectionStressSpec.java | 5 +- .../workload/ConnectionStressWorker.java | 11 +- .../trogdor/workload/ConsumeBenchSpec.java | 3 +- .../trogdor/workload/ConsumeBenchWorker.java | 7 +- .../trogdor/workload/ExternalCommandSpec.java | 7 +- .../workload/ExternalCommandWorker.java | 2 +- .../kafka/trogdor/workload/Histogram.java | 3 +- .../trogdor/workload/PartitionsSpec.java | 9 +- .../trogdor/workload/PayloadKeyType.java | 39 + .../trogdor/workload/ProduceBenchSpec.java | 4 +- .../trogdor/workload/RandomComponent.java | 18 +- .../workload/RoundTripWorkloadSpec.java | 4 +- .../workload/ShareConsumeBenchSpec.java | 5 +- .../workload/SustainedConnectionSpec.java | 4 +- .../workload/SustainedConnectionWorker.java | 6 +- .../kafka/trogdor/workload/TopicsSpec.java | 5 +- .../apache/kafka/trogdor/agent/AgentTest.java | 27 +- .../trogdor/common/JsonSerializationTest.java | 5 +- .../kafka/trogdor/common/JsonUtilTest.java | 8 +- .../trogdor/common/MiniTrogdorCluster.java | 5 +- .../trogdor/common/StringExpanderTest.java | 30 +- .../trogdor/common/StringFormatterTest.java | 8 +- .../kafka/trogdor/common/WorkerUtilsTest.java | 84 +- .../trogdor/coordinator/CoordinatorTest.java | 5 +- .../kafka/trogdor/task/SampleTaskSpec.java | 5 +- .../workload/ConsumeBenchSpecTest.java | 21 +- .../workload/ExternalCommandWorkerTest.java | 4 +- .../workload/PayloadGeneratorTest.java | 17 +- .../workload/ShareConsumeBenchSpecTest.java | 19 +- .../trogdor/workload/TopicsSpecTest.java | 15 +- vagrant/base.sh | 18 +- vagrant/system-test-Vagrantfile.local | 2 +- wrapper.gradle | 1 + 2938 files changed, 135333 insertions(+), 142517 deletions(-) create mode 100644 .github/workflows/ci-requested.yml create mode 100644 .github/workflows/pr-reviewed-trigger.yml create mode 100644 Dockerfile create mode 100644 PULL_REQUEST_TEMPLATE.md create mode 100644 clients/src/main/java/org/apache/kafka/common/record/ConvertedRecords.java create mode 100644 clients/src/main/java/org/apache/kafka/common/record/RecordsUtil.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrResponse.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesRequest.java create mode 100644 clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesResponse.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetriever.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactory.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidator.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/FileTokenRetriever.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/Initable.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidator.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidateException.java create mode 100644 clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java create mode 100644 clients/src/main/java/org/apache/kafka/common/utils/FlattenedIterator.java create mode 100644 clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java create mode 100644 clients/src/main/resources/common/message/ListClientMetricsResourcesRequest.json create mode 100644 clients/src/main/resources/common/message/ListClientMetricsResourcesResponse.json create mode 100644 clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactoryTest.java create mode 100644 clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactoryTest.java create mode 100644 clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java create mode 100644 clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtilsTest.java create mode 100644 clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java create mode 100644 clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidatorTest.java create mode 100644 clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidatorTest.java create mode 100644 clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java create mode 100644 core/src/main/java/kafka/log/remote/RemoteLogManager.java create mode 100644 core/src/main/java/kafka/log/remote/RemoteLogOffsetReader.java create mode 100644 core/src/main/java/kafka/log/remote/RemoteLogReader.java create mode 100644 core/src/main/scala/kafka/MetadataLogConfig.scala create mode 100644 core/src/main/scala/kafka/cluster/Broker.scala create mode 100644 core/src/main/scala/kafka/cluster/EndPoint.scala create mode 100644 core/src/main/scala/kafka/cluster/Replica.scala create mode 100644 core/src/main/scala/kafka/common/BrokerEndPointNotAvailableException.scala create mode 100644 core/src/main/scala/kafka/common/LogCleaningAbortedException.scala create mode 100644 core/src/main/scala/kafka/common/ThreadShutdownException.scala create mode 100644 core/src/main/scala/kafka/controller/ControllerContext.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/CoordinatorLoaderImpl.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/DelayedHeartbeat.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/DelayedJoin.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/DelayedRebalance.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/DelayedSync.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/GroupCoordinatorAdapter.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/GroupMetadata.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala create mode 100644 core/src/main/scala/kafka/coordinator/group/MemberMetadata.scala create mode 100644 core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala create mode 100644 core/src/main/scala/kafka/log/LogCleaner.scala create mode 100644 core/src/main/scala/kafka/log/LogCleanerManager.scala create mode 100644 core/src/main/scala/kafka/log/UnifiedLog.scala create mode 100644 core/src/main/scala/kafka/raft/RaftManager.scala create mode 100644 core/src/main/scala/kafka/raft/SegmentPosition.scala create mode 100644 core/src/main/scala/kafka/raft/TimingWheelExpirationService.scala create mode 100644 core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala create mode 100644 core/src/main/scala/kafka/server/ApiVersionManager.scala create mode 100644 core/src/main/scala/kafka/server/ClientQuotaManager.scala create mode 100644 core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala create mode 100644 core/src/main/scala/kafka/server/DelayedDeleteRecords.scala create mode 100644 core/src/main/scala/kafka/server/DelayedElectLeader.scala create mode 100644 core/src/main/scala/kafka/server/DelayedFuture.scala create mode 100644 core/src/main/scala/kafka/server/DelayedRemoteListOffsets.scala create mode 100644 core/src/main/scala/kafka/server/DelegationTokenManager.scala create mode 100644 core/src/main/scala/kafka/server/ForwardingManagerMetrics.scala create mode 100644 core/src/main/scala/kafka/server/LeaderEndPoint.scala create mode 100644 core/src/main/scala/kafka/server/ListOffsetsPartitionStatus.scala create mode 100644 core/src/main/scala/kafka/server/MetadataCache.scala create mode 100644 core/src/main/scala/kafka/server/metadata/AclPublisher.scala create mode 100644 core/src/main/scala/kafka/server/metadata/ConfigRepository.scala create mode 100644 core/src/main/scala/kafka/server/metadata/DelegationTokenPublisher.scala create mode 100644 core/src/main/scala/kafka/server/metadata/ScramPublisher.scala create mode 100644 core/src/main/scala/kafka/server/metadata/ShareCoordinatorMetadataCacheHelperImpl.java create mode 100644 core/src/main/scala/kafka/utils/Log4jController.scala create mode 100644 core/src/main/scala/kafka/utils/Pool.scala create mode 100644 core/src/main/scala/kafka/utils/json/DecodeJson.scala create mode 100644 core/src/main/scala/kafka/utils/json/JsonArray.scala create mode 100644 core/src/main/scala/kafka/utils/json/JsonObject.scala create mode 100644 core/src/main/scala/kafka/utils/json/JsonValue.scala create mode 100644 core/src/test/java/kafka/admin/AdminFenceProducersTest.java create mode 100644 core/src/test/java/kafka/admin/ClientTelemetryTest.java create mode 100644 core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java create mode 100644 core/src/test/java/kafka/admin/ConfigCommandTest.java create mode 100644 core/src/test/java/kafka/admin/DeleteTopicTest.java create mode 100644 core/src/test/java/kafka/admin/DescribeAuthorizedOperationsTest.java create mode 100644 core/src/test/java/kafka/admin/UserScramCredentialsCommandTest.java create mode 100644 core/src/test/java/kafka/clients/consumer/ConsumerIntegrationTest.java create mode 100644 core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java create mode 100644 core/src/test/java/kafka/log/remote/RemoteLogOffsetReaderTest.java create mode 100644 core/src/test/java/kafka/log/remote/RemoteLogReaderTest.java create mode 100644 core/src/test/java/kafka/server/BootstrapControllersIntegrationTest.java create mode 100644 core/src/test/java/kafka/server/LogManagerIntegrationTest.java create mode 100644 core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java create mode 100644 core/src/test/java/kafka/test/api/CustomQuotaCallbackTest.java create mode 100644 core/src/test/java/kafka/test/api/ShareConsumerTest.java create mode 100644 core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/ConsumerTopicCreationTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/LogAppendTimeTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/PlaintextConsumerCallbackTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/PlaintextConsumerCommitTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/RackAwareAutoTopicCreationTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/RebootstrapTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala create mode 100644 core/src/test/scala/integration/kafka/api/TransactionsWithMaxInFlightOneTest.scala create mode 100644 core/src/test/scala/integration/kafka/server/DelayedFutureTest.scala create mode 100644 core/src/test/scala/integration/kafka/server/DelayedRemoteListOffsetsTest.scala create mode 100644 core/src/test/scala/integration/kafka/server/IntegrationTestUtils.scala create mode 100644 core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala create mode 100644 core/src/test/scala/kafka/server/metadata/MockConfigRepository.scala create mode 100644 core/src/test/scala/kafka/tools/LogCompactionTester.scala create mode 100644 core/src/test/scala/unit/kafka/admin/AdminRackAwareTest.scala create mode 100644 core/src/test/scala/unit/kafka/admin/RackAwareTest.scala create mode 100644 core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorAdapterTest.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataTest.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/group/MemberMetadataTest.scala create mode 100644 core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala create mode 100644 core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala create mode 100644 core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala create mode 100644 core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala create mode 100644 core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala create mode 100644 core/src/test/scala/unit/kafka/log/remote/RemoteIndexCacheTest.scala create mode 100644 core/src/test/scala/unit/kafka/server/AbstractFetcherThreadWithIbp26Test.scala create mode 100644 core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala create mode 100644 core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala create mode 100644 core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala create mode 100644 core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala create mode 100644 core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala create mode 100644 core/src/test/scala/unit/kafka/server/ForwardingManagerMetricsTest.scala create mode 100644 core/src/test/scala/unit/kafka/server/metadata/MockConfigRepositoryTest.scala create mode 100644 core/src/test/scala/unit/kafka/utils/PoolTest.scala create mode 100644 core/src/test/scala/unit/kafka/utils/SchedulerTest.scala create mode 100644 group-coordinator/src/main/java/org/apache/kafka/coordinator/group/OffsetConfig.java create mode 100644 group-coordinator/src/main/java/org/apache/kafka/coordinator/group/modern/TopicMetadata.java create mode 100644 group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataKey.json create mode 100644 group-coordinator/src/main/resources/common/message/ShareGroupPartitionMetadataValue.json create mode 100644 group-coordinator/src/test/java/org/apache/kafka/coordinator/group/MetadataImageBuilder.java create mode 100644 group-coordinator/src/test/java/org/apache/kafka/coordinator/group/modern/TopicMetadataTest.java create mode 100644 metadata/src/main/java/org/apache/kafka/controller/LogReplayTracker.java create mode 100644 metadata/src/test/java/org/apache/kafka/controller/LogReplayTrackerTest.java create mode 100644 metadata/src/test/java/org/apache/kafka/metalog/LocalLogManager.java create mode 100644 metadata/src/test/java/org/apache/kafka/metalog/LocalLogManagerTest.java create mode 100644 metadata/src/test/java/org/apache/kafka/metalog/LocalLogManagerTestEnv.java create mode 100644 metadata/src/test/java/org/apache/kafka/metalog/MockMetaLogManagerListener.java create mode 100644 raft/src/main/java/org/apache/kafka/raft/OffsetAndEpoch.java create mode 100644 raft/src/main/java/org/apache/kafka/raft/internals/UpdateVoterHandlerState.java create mode 100644 server-common/bin/test/log4j2.yaml create mode 100644 server-common/src/main/java/org/apache/kafka/admin/AdminUtils.java create mode 100644 server-common/src/main/java/org/apache/kafka/admin/BrokerMetadata.java create mode 100644 server-common/src/main/java/org/apache/kafka/security/PasswordEncoder.java create mode 100644 server-common/src/main/java/org/apache/kafka/server/common/AdminCommandFailedException.java create mode 100644 server-common/src/main/java/org/apache/kafka/server/common/AdminOperationException.java create mode 100644 server-common/src/main/java/org/apache/kafka/server/common/MetadataVersionValidator.java create mode 100644 server-common/src/main/java/org/apache/kafka/server/common/TopicOptionalIdPartition.java create mode 100644 server-common/src/main/java/org/apache/kafka/server/purgatory/GroupJoinKey.java create mode 100644 server-common/src/main/java/org/apache/kafka/server/purgatory/GroupSyncKey.java create mode 100644 server-common/src/main/java/org/apache/kafka/server/purgatory/MemberKey.java create mode 100644 server-common/src/main/java/org/apache/kafka/server/share/persister/NoOpShareStatePersister.java create mode 100644 server-common/src/test/java/org/apache/kafka/server/common/MetadataVersionValidatorTest.java create mode 100644 server/src/main/java/org/apache/kafka/security/CredentialProvider.java create mode 100644 server/src/main/java/org/apache/kafka/server/config/DelegationTokenManagerConfigs.java create mode 100644 server/src/main/java/org/apache/kafka/server/config/ServerConfigs.java create mode 100644 server/src/main/java/org/apache/kafka/server/share/session/LastUsedKey.java create mode 100644 test-common/test-common-internal-api/src/main/java/org/apache/kafka/common/test/api/ClusterGenerator.java create mode 100644 test-common/test-common-util/src/main/java/org/apache/kafka/common/test/junit/AutoQuarantinedTestFilter.java create mode 100644 test-common/test-common-util/src/main/java/org/apache/kafka/common/test/junit/QuarantinedPostDiscoveryFilter.java create mode 100644 test-common/test-common-util/src/test/java/org/apache/kafka/common/test/junit/AutoQuarantinedTestFilterTest.java create mode 100644 test-common/test-common-util/src/test/java/org/apache/kafka/common/test/junit/QuarantinedPostDiscoveryFilterTest.java create mode 100644 tools/src/main/java/org/apache/kafka/tools/consumer/ApiMessageFormatter.java create mode 100644 trogdor/src/main/java/org/apache/kafka/trogdor/workload/PayloadKeyType.java diff --git a/.asf.yaml b/.asf.yaml index 5d88fe28742fb..7be98003c67f0 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -15,10 +15,6 @@ # specific language governing permissions and limitations # under the License. -# Enable the next-gen .asf.yaml parser -meta: - nextgen: true - notifications: commits: commits@kafka.apache.org issues: jira@kafka.apache.org @@ -29,22 +25,13 @@ notifications: # Read more here: https://github.com/apache/infrastructure-asfyaml github: collaborators: - - m1a2st - - smjn - - TaiJuWu + - FrankYang0529 + - kirktrue - brandboat - - Yunyung - - xijiu - - chirag-wadhwa5 - - mingyen066 - - ShivsundarR - - Rancho-7 - enabled_merge_buttons: - squash: true - squash_commit_message: PR_TITLE_AND_DESC - merge: false - rebase: false - - # Disable legacy branch protections. We have manual rulesets which protect trunk - # and our release branches. See INFRA-26603 - protected_branches: ~ + - AndrewJSchofield + - OmniaGM + - nizhikov + - dongnuo123 + - gaurav-narula + - apourchet + - apoorvmittal10 diff --git a/.github/actions/run-gradle/action.yml b/.github/actions/run-gradle/action.yml index 9c8e0945184bb..18d6bdeb1f558 100644 --- a/.github/actions/run-gradle/action.yml +++ b/.github/actions/run-gradle/action.yml @@ -22,10 +22,7 @@ inputs: # Composite actions do not support typed parameters. Everything is treated as a string # See: https://github.com/actions/runner/issues/2238 test-task: - description: "The Gradle task name to run." - required: true - test-xml-output: - description: "Output directory for JUnit XML results" + description: "The test suite to run. Either 'test' or 'quarantinedTest'." required: true timeout-minutes: description: "The timeout for the tests, in minutes." @@ -36,32 +33,10 @@ inputs: build-scan-artifact-name: description: "The name to use for archiving the build scan." required: true - test-retries: - description: "The number of retries for a given test should we allow" - required: true - default: "0" - test-repeat: - description: "The number of times to repeat the integration tests" - required: true - default: "1" - test-verbose: - description: "Enable additional logging by the JUnit infrastructure" - required: true - default: "false" - run-new-tests: - description: "Run tests not present in the given test catalog" - required: true - default: "false" - run-flaky-tests: - description: "Run tests marked as flaky" - required: true - default: "false" - outputs: gradle-exitcode: description: "The result of the Gradle test task." value: ${{ steps.run-tests.outputs.exitcode }} - runs: using: "composite" steps: @@ -77,31 +52,16 @@ runs: TIMEOUT_MINUTES: ${{ inputs.timeout-minutes}} TEST_CATALOG: ${{ inputs.test-catalog-path }} TEST_TASK: ${{ inputs.test-task }} - TEST_RETRIES: ${{ inputs.test-retries }} - TEST_REPEAT: ${{ inputs.test-repeat }} - RUN_NEW_TESTS: ${{ inputs.run-new-tests }} - RUN_FLAKY_TESTS: ${{ inputs.run-flaky-tests }} - TEST_XML_OUTPUT_DIR: ${{ inputs.test-xml-output }} - TEST_VERBOSE: ${{ inputs.test-verbose }} - # This build step is invoked by build.yml to run junit tests only, - # Spotbugs is being run by that workflow via the "check" task and does not need to also be run here, - # since that is redundant. run: | set +e ./.github/scripts/thread-dump.sh & timeout ${TIMEOUT_MINUTES}m ./gradlew --build-cache --continue --no-scan \ -PtestLoggingEvents=started,passed,skipped,failed \ - -PmaxParallelForks=4 \ - -PmaxTestRetries=$TEST_RETRIES -PmaxTestRetryFailures=10 \ + -PmaxParallelForks=2 \ + -PmaxTestRetries=1 -PmaxTestRetryFailures=3 \ + -PmaxQuarantineTestRetries=3 -PmaxQuarantineTestRetryFailures=0 \ -Pkafka.test.catalog.file=$TEST_CATALOG \ - -Pkafka.test.run.new=$RUN_NEW_TESTS \ - -Pkafka.test.run.flaky=$RUN_FLAKY_TESTS \ - -Pkafka.test.xml.output.dir=$TEST_XML_OUTPUT_DIR \ - -Pkafka.cluster.test.repeat=$TEST_REPEAT \ - -Pkafka.test.verbose=$TEST_VERBOSE \ -PcommitId=xxxxxxxxxxxxxxxx \ - -x spotbugsMain \ - -x spotbugsTest \ $TEST_TASK exitcode="$?" echo "exitcode=$exitcode" >> $GITHUB_OUTPUT @@ -112,4 +72,4 @@ runs: name: ${{ inputs.build-scan-artifact-name }} path: ~/.gradle/build-scan-data compression-level: 9 - if-no-files-found: ignore + if-no-files-found: ignore \ No newline at end of file diff --git a/.github/actions/setup-gradle/action.yml b/.github/actions/setup-gradle/action.yml index fe456568066e6..3b1e1f71993cd 100644 --- a/.github/actions/setup-gradle/action.yml +++ b/.github/actions/setup-gradle/action.yml @@ -37,12 +37,12 @@ runs: using: "composite" steps: - name: Setup Java - uses: actions/setup-java@v5 + uses: actions/setup-java@v4 with: distribution: temurin java-version: ${{ inputs.java-version }} - name: Setup Gradle - uses: gradle/actions/setup-gradle@94baf225fe0a508e581a564467443d0e2379123b # v4.3.0 + uses: gradle/actions/setup-gradle@d156388eb19639ec20ade50009f3d199ce1e2808 # v4.1.0 env: GRADLE_BUILD_ACTION_CACHE_DEBUG_ENABLED: true with: diff --git a/.github/actions/setup-python/action.yml b/.github/actions/setup-python/action.yml index 10c55f6e083b5..d7e326314c1be 100644 --- a/.github/actions/setup-python/action.yml +++ b/.github/actions/setup-python/action.yml @@ -22,7 +22,7 @@ runs: using: "composite" steps: - name: Setup Python - uses: actions/setup-python@v6 + uses: actions/setup-python@v5 with: python-version: 3.12 - name: Pip install diff --git a/.github/configs/labeler.yml b/.github/configs/labeler.yml index b988967f33788..24a7a643c9042 100644 --- a/.github/configs/labeler.yml +++ b/.github/configs/labeler.yml @@ -92,12 +92,6 @@ transactions: - any-glob-to-any-file: - 'transaction-coordinator/**' -group-coordinator: - - changed-files: - - any-glob-to-any-file: - - 'group-coordinator/**' - - 'coordinator-common/**' - kip-932: - changed-files: - any-glob-to-any-file: diff --git a/.github/scripts/develocity_reports.py b/.github/scripts/develocity_reports.py index 2d30cbffa5550..a99d41df6b23e 100644 --- a/.github/scripts/develocity_reports.py +++ b/.github/scripts/develocity_reports.py @@ -191,13 +191,7 @@ def _save_cache(self): provider.save_cache(self.build_cache) logger.info(f"Saved cache to {provider.__class__.__name__}") - def build_query( - self, - project: str, - chunk_start: datetime, - chunk_end: datetime, - test_tags: List[str] - ) -> str: + def build_query(self, project: str, chunk_start: datetime, chunk_end: datetime, test_type: str) -> str: """ Constructs the query string to be used in both build info and test containers API calls. @@ -205,44 +199,33 @@ def build_query( project: The project name. chunk_start: The start datetime for the chunk. chunk_end: The end datetime for the chunk. - test_tags: A list of tags to include. + test_type: The type of tests to query. Returns: A formatted query string. """ - test_tags.append("+github") - tags = [] - for tag in test_tags: - if tag.startswith("+"): - tags.append(f"tag:{tag[1:]}") - elif tag.startswith("-"): - tags.append(f"-tag:{tag[1:]}") - else: - raise ValueError("Tag should include + or - to indicate inclusion or exclusion.") - - tags = " ".join(tags) - return f"project:{project} buildStartTime:[{chunk_start.isoformat()} TO {chunk_end.isoformat()}] gradle.requestedTasks:test {tags}" + return f'project:{project} buildStartTime:[{chunk_start.isoformat()} TO {chunk_end.isoformat()}] gradle.requestedTasks:{test_type}' def process_chunk( self, chunk_start: datetime, chunk_end: datetime, project: str, - test_tags: List[str], - remaining_build_ids: set | None, + test_type: str, + remaining_build_ids: set, max_builds_per_request: int ) -> Dict[str, BuildInfo]: """Helper method to process a single chunk of build information""" chunk_builds = {} # Use the helper method to build the query - query = self.build_query(project, chunk_start, chunk_end, test_tags) + query = self.build_query(project, chunk_start, chunk_end, test_type) # Initialize pagination for this chunk from_build = None continue_chunk = True - while continue_chunk and (remaining_build_ids is None or remaining_build_ids): + while continue_chunk and remaining_build_ids: query_params = { 'query': query, 'models': ['gradle-attributes'], @@ -290,7 +273,7 @@ def process_chunk( continue_chunk = False break - if remaining_build_ids is None or build_id in remaining_build_ids: + if build_id in remaining_build_ids: if 'problem' not in gradle_attrs: chunk_builds[build_id] = BuildInfo( id=build_id, @@ -298,8 +281,6 @@ def process_chunk( duration=attrs.get('buildDuration'), has_failed=attrs.get('hasFailed', False) ) - if remaining_build_ids is not None: - remaining_build_ids.remove(build_id) if continue_chunk and response_json: from_build = response_json[-1]['id'] @@ -310,55 +291,37 @@ def process_chunk( return chunk_builds - def get_build_info( - self, - build_ids: List[str] = None, - project: str = None, - test_tags: List[str] = None, - query_days: int = None, - bypass_cache: bool = False, - fetch_all: bool = False - ) -> Dict[str, BuildInfo]: + def get_build_info(self, build_ids: List[str], project: str, test_type: str, query_days: int) -> Dict[str, BuildInfo]: builds = {} max_builds_per_request = 100 cutoff_date = datetime.now(pytz.UTC) - timedelta(days=query_days) - current_time = datetime.now(pytz.UTC) - if not fetch_all and not build_ids: - raise ValueError(f"Either build_ids must be provided or fetch_all must be True: {build_ids} {fetch_all}") - - # Get builds from cache if available and bypass_cache is False - if not bypass_cache and self.build_cache: + # Get builds from cache if available + if self.build_cache: cached_builds = self.build_cache.builds cached_cutoff = self.build_cache.last_update - timedelta(days=query_days) - if fetch_all: - # Use all cached builds within the time period - for build_id, build in cached_builds.items(): + # Use cached data for builds within the cache period + for build_id in build_ids: + if build_id in cached_builds: + build = cached_builds[build_id] if build.timestamp >= cached_cutoff: builds[build_id] = build - else: - # Use cached data for specific builds within the cache period - for build_id in build_ids: - if build_id in cached_builds: - build = cached_builds[build_id] - if build.timestamp >= cached_cutoff: - builds[build_id] = build # Update cutoff date to only fetch new data cutoff_date = self.build_cache.last_update logger.info(f"Using cached data up to {cutoff_date.isoformat()}") - if not fetch_all: - # Remove already found builds from the search list - build_ids = [bid for bid in build_ids if bid not in builds] - - if not build_ids: - logger.info("All builds found in cache") - return builds + # Remove already found builds from the search list + build_ids = [bid for bid in build_ids if bid not in builds] + + if not build_ids: + logger.info("All builds found in cache") + return builds # Fetch remaining builds from API - remaining_build_ids = set(build_ids) if not fetch_all else None + remaining_build_ids = set(build_ids) + current_time = datetime.now(pytz.UTC) chunk_size = self.default_chunk_size # Create time chunks @@ -379,8 +342,8 @@ def get_build_info( chunk[0], chunk[1], project, - test_tags, - remaining_build_ids.copy() if remaining_build_ids else None, + test_type, + remaining_build_ids.copy(), max_builds_per_request ): chunk for chunk in chunks } @@ -389,8 +352,7 @@ def get_build_info( try: chunk_builds = future.result() builds.update(chunk_builds) - if remaining_build_ids: - remaining_build_ids -= set(chunk_builds.keys()) + remaining_build_ids -= set(chunk_builds.keys()) except Exception as e: logger.error(f"Chunk processing generated an exception: {str(e)}") @@ -399,11 +361,11 @@ def get_build_info( f"\nBuild Info Performance:" f"\n Total Duration: {total_duration:.2f}s" f"\n Builds Retrieved: {len(builds)}" - f"\n Builds Not Found: {len(remaining_build_ids) if remaining_build_ids else 0}" + f"\n Builds Not Found: {len(remaining_build_ids)}" ) - # Update cache with new data if not bypassing cache - if builds and not bypass_cache: + # Update cache with new data + if builds: if not self.build_cache: self.build_cache = BuildCache(current_time, {}) self.build_cache.builds.update(builds) @@ -412,14 +374,8 @@ def get_build_info( return builds - def get_test_results( - self, - project: str, - threshold_days: int, - test_tags: List[str], - outcomes: List[str] = None - ) -> List[TestResult]: - + def get_test_results(self, project: str, threshold_days: int, test_type: str = "quarantinedTest", + outcomes: List[str] = None) -> List[TestResult]: """Fetch test results with timeline information""" if outcomes is None: outcomes = ["failed", "flaky"] @@ -441,7 +397,7 @@ def get_test_results( logger.debug(f"Processing chunk: {chunk_start} to {chunk_end}") # Use the helper method to build the query - query = self.build_query(project, chunk_start, chunk_end, test_tags) + query = self.build_query(project, chunk_start, chunk_end, test_type) query_params = { 'query': query, @@ -485,10 +441,7 @@ def get_test_results( logger.debug(f"Total unique build IDs collected: {len(build_ids)}") # Fetch build information using the updated get_build_info method - print(build_ids) - print(list(build_ids)) - - builds = self.get_build_info(list(build_ids), project, test_tags, threshold_days) + builds = self.get_build_info(list(build_ids), project, test_type, threshold_days) logger.debug(f"Retrieved {len(builds)} builds from API") logger.debug(f"Retrieved build IDs: {sorted(builds.keys())}") @@ -511,11 +464,6 @@ def get_test_results( # Sort timeline by timestamp result.timeline = sorted(timeline, key=lambda x: x.timestamp) logger.debug(f"Final timeline entries for {test_name}: {len(result.timeline)}") - - # Print build details for debugging - logger.debug("Timeline entries:") - for entry in timeline: - logger.debug(f"Build ID: {entry.build_id}, Timestamp: {entry.timestamp}, Outcome: {entry.outcome}") # Calculate recent failure rate recent_cutoff = datetime.now(pytz.UTC) - timedelta(days=30) @@ -585,7 +533,7 @@ def get_problematic_quarantined_tests( "kafka", chunk_start, current_time, - test_tags=["+trunk", "+flaky"] + test_type="quarantinedTest" ) problematic_tests[result.name] = { @@ -606,7 +554,7 @@ def get_test_case_details( project: str, chunk_start: datetime, chunk_end: datetime, - test_tags: List[str] + test_type: str = "quarantinedTest" ) -> List[TestCaseResult]: """ Fetch detailed test case results for a specific container. @@ -616,10 +564,10 @@ def get_test_case_details( project: The project name chunk_start: Start time for the query chunk_end: End time for the query - test_tags: List of tags to query + test_type: Type of tests to query (default: "quarantinedTest") """ # Use the helper method to build the query, similar to get_test_results - query = self.build_query(project, chunk_start, chunk_end, test_tags) + query = self.build_query(project, chunk_start, chunk_end, test_type) query_params = { 'query': query, @@ -648,7 +596,7 @@ def get_test_case_details( build_ids.update(ids) # Get build info for all build IDs - builds = self.get_build_info(list(build_ids), project, test_tags, 7) # 7 days for test cases + builds = self.get_build_info(list(build_ids), project, test_type, 7) # 7 days for test cases for test in content: outcome_data = test['outcomeDistribution'] @@ -750,7 +698,6 @@ def get_cleared_tests(self, project: str, results: List[TestResult], """ cleared_tests = {} current_time = datetime.now(pytz.UTC) - chunk_start = current_time - timedelta(days=7) # Last 7 days for test cases for result in results: # Only consider tests with sufficient recent executions @@ -758,454 +705,86 @@ def get_cleared_tests(self, project: str, results: List[TestResult], if len(recent_executions) < min_executions: continue - # Calculate success rate at class level + # Calculate success rate successful_runs = sum(1 for t in recent_executions if t.outcome == 'passed') success_rate = successful_runs / len(recent_executions) - # Check if the test meets clearing criteria at class level + # Check if the test meets clearing criteria if success_rate >= success_threshold: # Verify no recent failures or flaky behavior has_recent_issues = any(t.outcome in ['failed', 'flaky'] for t in recent_executions[-min_executions:]) if not has_recent_issues: - try: - # Get test case details - test_cases = self.get_test_case_details( - result.name, - project, - chunk_start, - current_time, - test_tags=["+trunk", "+flaky"] - ) - - # Only include if all test cases are also passing consistently - all_cases_passing = True - passing_test_cases = [] - - for test_case in test_cases: - case_total = test_case.outcome_distribution.total - if case_total >= min_executions: - case_success_rate = test_case.outcome_distribution.passed / case_total - - # Check recent executions for the test case - recent_case_issues = any(t.outcome in ['failed', 'flaky'] - for t in test_case.timeline[-min_executions:]) - - if case_success_rate >= success_threshold and not recent_case_issues: - passing_test_cases.append({ - 'name': test_case.name, - 'success_rate': case_success_rate, - 'total_executions': case_total, - 'recent_executions': sorted(test_case.timeline, - key=lambda x: x.timestamp)[-min_executions:] - }) - else: - all_cases_passing = False - break - - if all_cases_passing and passing_test_cases: - cleared_tests[result.name] = { - 'result': result, - 'success_rate': success_rate, - 'total_executions': len(recent_executions), - 'successful_runs': successful_runs, - 'recent_executions': recent_executions[-min_executions:], - 'test_cases': passing_test_cases - } - - except Exception as e: - logger.error(f"Error getting test case details for {result.name}: {str(e)}") + cleared_tests[result.name] = { + 'result': result, + 'success_rate': success_rate, + 'total_executions': len(recent_executions), + 'successful_runs': successful_runs, + 'recent_executions': recent_executions[-min_executions:] + } return cleared_tests - def update_cache(self, builds: Dict[str, BuildInfo]): - """ - Update the build cache with new build information. - - Args: - builds: Dictionary of build IDs to BuildInfo objects - """ - current_time = datetime.now(pytz.UTC) - - # Initialize cache if it doesn't exist - if not self.build_cache: - self.build_cache = BuildCache(current_time, {}) - - # Update builds and last update time - self.build_cache.builds.update(builds) - self.build_cache.last_update = current_time - - # Save to all cache providers - self._save_cache() - - logger.info(f"Updated cache with {len(builds)} builds") - - def get_persistent_failing_tests(self, results: List[TestResult], - min_failure_rate: float = 0.2, - min_executions: int = 5) -> Dict[str, Dict]: - """ - Identify tests that have been consistently failing/flaky over time. - Groups by test class and includes individual test cases. - """ - persistent_failures = {} - current_time = datetime.now(pytz.UTC) - chunk_start = current_time - timedelta(days=7) # Last 7 days for test cases - - # Group results by class - class_groups = {} - for result in results: - class_name = result.name.split('#')[0] # Get class name - if class_name not in class_groups: - class_groups[class_name] = [] - class_groups[class_name].append(result) - - # Analyze each class and its test cases - for class_name, class_results in class_groups.items(): - class_total = sum(r.outcome_distribution.total for r in class_results) - class_problems = sum(r.outcome_distribution.failed + r.outcome_distribution.flaky - for r in class_results) - - if class_total < min_executions: - continue - - class_failure_rate = class_problems / class_total if class_total > 0 else 0 - - # Only include if class has significant failures - if class_failure_rate >= min_failure_rate: - try: - # Get detailed test case information using the same method as other reports - test_cases = self.get_test_case_details( - class_name, - "kafka", - chunk_start, - current_time, - test_tags=["+trunk", "-flaky"] - ) - - failing_test_cases = {} - for test_case in test_cases: - total_runs = test_case.outcome_distribution.total - if total_runs >= min_executions: - problem_runs = (test_case.outcome_distribution.failed + - test_case.outcome_distribution.flaky) - failure_rate = problem_runs / total_runs if total_runs > 0 else 0 - - if failure_rate >= min_failure_rate: - # Extract just the method name - method_name = test_case.name.split('.')[-1] - failing_test_cases[method_name] = { - 'result': test_case, - 'failure_rate': failure_rate, - 'total_executions': total_runs, - 'failed_executions': problem_runs, - 'timeline': sorted(test_case.timeline, key=lambda x: x.timestamp) - } - - if failing_test_cases: # Only include classes that have problematic test cases - persistent_failures[class_name] = { - 'failure_rate': class_failure_rate, - 'total_executions': class_total, - 'failed_executions': class_problems, - 'test_cases': failing_test_cases - } - - except Exception as e: - logger.error(f"Error getting test case details for {class_name}: {str(e)}") - - return persistent_failures +def print_summary(problematic_tests: Dict[str, Dict], flaky_regressions: Dict[str, Dict]): + """Print a summary of the most problematic tests at the top of the report""" + print("\n## Summary of Most Problematic Tests") -def get_develocity_class_link(class_name: str, threshold_days: int) -> str: - """ - Generate Develocity link for a test class - - Args: - class_name: Name of the test class - threshold_days: Number of days to look back in search - """ - base_url = "https://develocity.apache.org/scans/tests" - params = { - "search.rootProjectNames": "kafka", - "search.tags": "github,trunk", - "search.timeZoneId": "UTC", - "search.relativeStartTime": f"P{threshold_days}D", - "tests.container": class_name, - "search.tasks": "test" - } - - return f"{base_url}?{'&'.join(f'{k}={requests.utils.quote(str(v))}' for k, v in params.items())}" - -def get_develocity_method_link(class_name: str, method_name: str, threshold_days: int) -> str: - """ - Generate Develocity link for a test method - - Args: - class_name: Name of the test class - method_name: Name of the test method - threshold_days: Number of days to look back in search - """ - base_url = "https://develocity.apache.org/scans/tests" - - # Extract just the method name without the class prefix - if '.' in method_name: - method_name = method_name.split('.')[-1] - - params = { - "search.rootProjectNames": "kafka", - "search.tags": "github,trunk", - "search.timeZoneId": "UTC", - "search.relativeStartTime": f"P{threshold_days}D", - "tests.container": class_name, - "tests.test": method_name, - "search.tasks": "test" - } - - return f"{base_url}?{'&'.join(f'{k}={requests.utils.quote(str(v))}' for k, v in params.items())}" + # Combine and sort all test cases by failure rate + all_problem_cases = [] -def print_most_problematic_tests(problematic_tests: Dict[str, Dict], threshold_days: int): - """Print a summary of the most problematic tests""" - print("\n## Most Problematic Tests") - if not problematic_tests: - print("No high-priority problematic tests found.") - return - - print(f"Found {len(problematic_tests)} tests that have been quarantined for {threshold_days} days and are still failing frequently.") - - # Print table with class and method information - print("\n") - print("") - - for test_name, details in sorted(problematic_tests.items(), - key=lambda x: x[1]['failure_rate'], - reverse=True): - class_link = get_develocity_class_link(test_name, threshold_days) - print(f"") - - for test_case in sorted(details['test_cases'], - key=lambda x: (x.outcome_distribution.failed + x.outcome_distribution.flaky) / x.outcome_distribution.total - if x.outcome_distribution.total > 0 else 0, - reverse=True): + # Process problematic quarantined tests + if len(problematic_tests) > 0: + print(f"Found {len(problematic_tests)} tests that have been quarantined for a while and are still flaky.") + for full_class_name, details in problematic_tests.items(): + for test_case in details['test_cases']: + total_runs = test_case.outcome_distribution.total method_name = test_case.name.split('.')[-1] - if method_name != 'N/A': - method_link = get_develocity_method_link(test_name, test_case.name, threshold_days) - total_runs = test_case.outcome_distribution.total - failure_rate = (test_case.outcome_distribution.failed + test_case.outcome_distribution.flaky) / total_runs if total_runs > 0 else 0 - print(f"" - f"" - f"") - print("
ClassTest CaseFailure RateBuild ScansLink
{test_name}↗️
{method_name}{failure_rate:.2%}{total_runs}↗️
") - - # Print detailed execution history - print("\n
") - print("Detailed Execution History\n") - - for test_name, details in sorted(problematic_tests.items(), - key=lambda x: x[1]['failure_rate'], - reverse=True): - print(f"\n### {test_name}") - print(f"* Days Quarantined: {details['days_quarantined']}") - print(f"* Recent Failure Rate: {details['recent_failure_rate']:.2%}") - print(f"* Total Runs: {details['container_result'].outcome_distribution.total}") - print(f"* Build Outcomes: Passed: {details['container_result'].outcome_distribution.passed} | " - f"Failed: {details['container_result'].outcome_distribution.failed} | " - f"Flaky: {details['container_result'].outcome_distribution.flaky}") - - for test_method in sorted(details['test_cases'], - key=lambda x: (x.outcome_distribution.failed + x.outcome_distribution.flaky) / x.outcome_distribution.total - if x.outcome_distribution.total > 0 else 0, - reverse=True): - if test_method.timeline: - print(f"\n#### {method_name}") - print("Recent Executions:") - print("```") - print("Date/Time (UTC) Outcome Build ID") - print("-" * 44) - for entry in sorted(test_method.timeline, key=lambda x: x.timestamp, reverse=True)[:5]: - date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') - print(f"{date_str:<17} {entry.outcome:<10} {entry.build_id}") - print("```") - - print("
") - -def print_flaky_regressions(flaky_regressions: Dict[str, Dict], threshold_days: int): - """Print tests that have recently started showing flaky behavior""" - print("\n## Flaky Test Regressions") - if not flaky_regressions: - print("No flaky test regressions found.") - return - - print(f"Found {len(flaky_regressions)} tests that have started showing increased flaky behavior recently.") - - # Print table with test details - print("\n") - print("") - + if total_runs > 0: + failure_rate = (test_case.outcome_distribution.failed + + test_case.outcome_distribution.flaky) / total_runs + all_problem_cases.append({ + 'class': full_class_name, + 'method': method_name, + 'failure_rate': failure_rate, + 'total_runs': total_runs + }) + + # Process flaky regressions + if len(flaky_regressions) > 0: + print(f"Found {len(flaky_regressions)} tests that have started recently failing.") for test_name, details in flaky_regressions.items(): - class_link = get_develocity_class_link(test_name, threshold_days) - print(f"") - print(f"" - f"" - f"") - - # Add recent execution details in sub-rows - print("") - for entry in sorted(details['recent_executions'], key=lambda x: x.timestamp, reverse=True)[:5]: - date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') - print(f"") - print("
Test ClassRecent Flaky RateHistorical RateRecent ExecutionsLink
{test_name}↗️
{details['recent_flaky_rate']:.2%}{details['historical_flaky_rate']:.2%}{len(details['recent_executions'])}
Recent Executions:
{date_str} - {entry.outcome}
") - - # Print detailed history - print("\n
") - print("Detailed Execution History\n") - - for test_name, details in sorted(flaky_regressions.items(), - key=lambda x: x[1]['recent_flaky_rate'], - reverse=True): - print(f"\n### {test_name}") - print(f"* Recent Flaky Rate: {details['recent_flaky_rate']:.2%}") - print(f"* Historical Flaky Rate: {details['historical_flaky_rate']:.2%}") - print("\nRecent Executions:") - print("```") - print("Date/Time (UTC) Outcome Build ID") - print("-" * 44) - for entry in sorted(details['recent_executions'], key=lambda x: x.timestamp, reverse=True)[:5]: - date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') - print(f"{date_str:<17} {entry.outcome:<10} {entry.build_id}") - print("```") - - print("
") - -def print_persistent_failing_tests(persistent_failures: Dict[str, Dict], threshold_days: int): - """Print tests that have been consistently failing over time""" - print("\n## Persistently Failing/Flaky Tests") - if not persistent_failures: - print("No persistently failing tests found.") - return - - print(f"Found {len(persistent_failures)} tests that have been consistently failing or flaky.") - - # Print table with test details - print("\n") - print("") - - for class_name, class_details in sorted(persistent_failures.items(), - key=lambda x: x[1]['failure_rate'], - reverse=True): - class_link = get_develocity_class_link(class_name, threshold_days) - - # Print class row - print(f"" - f"") - - # Print test case rows - for test_name, test_details in sorted(class_details['test_cases'].items(), - key=lambda x: x[1]['failure_rate'], - reverse=True): - test_link = get_develocity_method_link(class_name, test_name, threshold_days) - print(f"" - f"" - f"" - f"" - f"" - f"") - print("
Test ClassTest CaseFailure RateTotal RunsFailed/FlakyLink
{class_name}↗️
{test_name}{test_details['failure_rate']:.2%}{test_details['total_executions']}{test_details['failed_executions']}↗️
") - - # Print detailed history - print("\n
") - print("Detailed Execution History\n") - - for class_name, class_details in sorted(persistent_failures.items(), - key=lambda x: x[1]['failure_rate'], - reverse=True): - print(f"\n### {class_name}") - print(f"* Overall Failure Rate: {class_details['failure_rate']:.2%}") - print(f"* Total Executions: {class_details['total_executions']}") - print(f"* Failed/Flaky Executions: {class_details['failed_executions']}") - - for test_name, test_details in sorted(class_details['test_cases'].items(), - key=lambda x: x[1]['failure_rate'], - reverse=True): - print("\nRecent Executions:") - print("```") - print("Date/Time (UTC) Outcome Build ID") - print("-" * 44) - for entry in sorted(test_details['timeline'], key=lambda x: x.timestamp, reverse=True)[:5]: - date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') - print(f"{date_str:<17} {entry.outcome:<10} {entry.build_id}") - print("```") - - print("
") - -def print_cleared_tests(cleared_tests: Dict[str, Dict], threshold_days: int): - """Print tests that are ready to be unquarantined""" - print("\n## Cleared Tests (Ready for Unquarantine)") - if not cleared_tests: - print("No tests ready to be cleared from quarantine.") - return - - # Calculate total number of test methods - total_methods = sum(len(details['test_cases']) for details in cleared_tests.values()) - - print(f"Found {len(cleared_tests)} test classes with {total_methods} test methods that have been consistently passing. " - f"These tests could be candidates for removing quarantine annotations at either class or method level.") - - # Print table with class and method information - print("\n") - print("") - - for test_name, details in sorted(cleared_tests.items(), - key=lambda x: x[1]['success_rate'], - reverse=True): - class_link = get_develocity_class_link(test_name, threshold_days) - print(f"") - print(f"" - f"" - f"" - f"") - - for test_case in details['test_cases']: - method_name = test_case['name'].split('.')[-1] - method_link = get_develocity_method_link(test_name, test_case['name'], threshold_days) - recent_status = "N/A" - if test_case['recent_executions']: - recent_status = test_case['recent_executions'][-1].outcome - - print(f"" - f"" - f"" - f"" - f"") - print("") + all_problem_cases.append({ + 'class': test_name, + 'method': 'N/A', # Flaky regressions are at class level + 'failure_rate': details['recent_flaky_rate'], + 'total_runs': len(details['recent_executions']) + }) + + # Sort by failure rate descending + sorted_cases = sorted(all_problem_cases, + key=lambda x: x['failure_rate'], + reverse=True) + + # Group by class + by_class = {} + for case in sorted_cases: + if case['class'] not in by_class: + by_class[case['class']] = [] + by_class[case['class']].append(case) + + # Print summary + print("
Test ClassTest MethodSuccess RateTotal RunsRecent StatusLink
{test_name}↗️
Class Overall{details['success_rate']:.2%}{details['total_executions']}{details['successful_runs']} passed
{method_name}{test_case['success_rate']:.2%}{test_case['total_executions']}{recent_status}↗️
 
") + for full_class_name, cases in by_class.items(): + print(f"") + for case in cases: + method = case['method'] + if method != 'N/A': + print(f"") + else: + print(f"") print("
ClassTest CaseFailure RateBuild Scans
{full_class_name}
{method:<60}{case['failure_rate']:.2%}{case['total_runs']}
{case['failure_rate']:.2%}{case['total_runs']}
") - - # Print detailed history - print("\n
") - print("Detailed Test Method History\n") - - for test_name, details in sorted(cleared_tests.items(), - key=lambda x: x[1]['success_rate'], - reverse=True): - print(f"\n### {test_name}") - print(f"* Overall Success Rate: {details['success_rate']:.2%}") - print(f"* Total Executions: {details['total_executions']}") - print(f"* Consecutive Successful Runs: {details['successful_runs']}") - - for test_case in details['test_cases']: - method_name = test_case['name'].split('.')[-1] - print(f"\n#### {method_name}") - print(f"* Success Rate: {test_case['success_rate']:.2%}") - print(f"* Total Runs: {test_case['total_executions']}") - print("\nRecent Executions:") - print("```") - print("Date/Time (UTC) Outcome Build ID") - print("-" * 44) - for entry in sorted(test_case['recent_executions'], key=lambda x: x.timestamp, reverse=True)[:5]: - date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') - print(f"{date_str:<17} {entry.outcome:<10} {entry.build_id}") - print("```") - - print("
") def main(): token = None @@ -1227,23 +806,18 @@ def main(): analyzer = TestAnalyzer(BASE_URL, token) try: - quarantined_builds = analyzer.get_build_info([], PROJECT, "quarantinedTest", 7, bypass_cache=True, fetch_all=True) - regular_builds = analyzer.get_build_info([], PROJECT, "test", 7, bypass_cache=True, fetch_all=True) - - analyzer.update_cache(quarantined_builds) - analyzer.update_cache(regular_builds) - - # Get test results + # Get quarantined test results quarantined_results = analyzer.get_test_results( PROJECT, threshold_days=QUARANTINE_THRESHOLD_DAYS, - test_tags=["+trunk", "+flaky", "-new"] + test_type="quarantinedTest" ) + # Get regular test results for flaky regression analysis regular_results = analyzer.get_test_results( PROJECT, threshold_days=7, # Last 7 days for regular tests - test_tags=["+trunk", "-flaky", "-new"] + test_type="test" ) # Generate reports @@ -1267,34 +841,111 @@ def main(): success_threshold=SUCCESS_THRESHOLD ) - # Get persistent failing tests (add after getting regular_results) - persistent_failures = analyzer.get_persistent_failing_tests( - regular_results, - min_failure_rate=0.2, # 20% failure rate threshold - min_executions=5 - ) - - # Print report header + # Print summary first print(f"\n# Flaky Test Report for {datetime.now(pytz.UTC).strftime('%Y-%m-%d')}") print(f"This report was run on {datetime.now(pytz.UTC).strftime('%Y-%m-%d %H:%M:%S')} UTC") - - # Print each section - print_most_problematic_tests(problematic_tests, QUARANTINE_THRESHOLD_DAYS) - print_flaky_regressions(flaky_regressions, QUARANTINE_THRESHOLD_DAYS) - print_persistent_failing_tests(persistent_failures, QUARANTINE_THRESHOLD_DAYS) - print_cleared_tests(cleared_tests, QUARANTINE_THRESHOLD_DAYS) + + print_summary(problematic_tests, flaky_regressions) + + # Print Flaky Test Regressions + print("\n## Flaky Test Regressions") + if not flaky_regressions: + print("No flaky test regressions found.") + else: + for test_name, details in flaky_regressions.items(): + print(f"\n{test_name}") + print(f"Recent Flaky Rate: {details['recent_flaky_rate']:.2%}") + print(f"Historical Flaky Rate: {details['historical_flaky_rate']:.2%}") + print(f"\nRecent Executions (last {len(details['recent_executions'])} runs):") + for entry in sorted(details['recent_executions'], key=lambda x: x.timestamp)[-5:]: + print(f" {entry.timestamp.strftime('%Y-%m-%d %H:%M')} - {entry.outcome}") + + # Print Cleared Tests + print("\n## Cleared Tests (Ready for Unquarantine)") + if not cleared_tests: + print("No tests ready to be cleared from quarantine.") + else: + # Print summary + print("") + for test_name, details in cleared_tests.items(): + print(f"") + print("
ClassTest CaseSuccess RateBuild Scans
{test_name}{details['success_rate']:.2%}{details['total_executions']}
") + + for test_name, details in cleared_tests.items(): + print(f"\n{test_name}") + print(f"Success Rate: {details['success_rate']:.2%}") + print(f"Total Executions: {details['total_executions']}") + print(f"\nRecent Executions (last {len(details['recent_executions'])} runs):") + for entry in sorted(details['recent_executions'], key=lambda x: x.timestamp): + print(f" {entry.timestamp.strftime('%Y-%m-%d %H:%M')} - {entry.outcome}") + + # Print Defective Tests + print("\n## High-Priority Quarantined Tests") + if not problematic_tests: + print("No high-priority quarantined tests found.") + else: + print("These are tests which have been quarantined for several days and need attention.") + sorted_tests = sorted( + problematic_tests.items(), + key=lambda x: (x[1]['failure_rate'], x[1]['days_quarantined']), + reverse=True + ) + + print(f"\nFound {len(sorted_tests)} high-priority quarantined test classes:") + for full_class_name, details in sorted_tests: + class_result = details['container_result'] + class_name = full_class_name.split(".")[-1] + print(f"### {class_name}") + print(f"{full_class_name} has been quarantined for {details['days_quarantined']} days") + print(f"Overall class failure: {details['failure_rate']:.2%}") + print(f"Recent class failure: {details['recent_failure_rate']:.2%}") + print("\nOverall Build Outcomes:") + print(f" Total Runs: {class_result.outcome_distribution.total}") + print(f" Failed: {class_result.outcome_distribution.failed}") + print(f" Flaky: {class_result.outcome_distribution.flaky}") + print(f" Passed: {class_result.outcome_distribution.passed}") + + print("\nQuarantined Methods (Last 7 Days):") + + # Sort test methods by failure rate + sorted_methods = sorted( + details['test_cases'], + key=lambda x: (x.outcome_distribution.failed + x.outcome_distribution.flaky) / x.outcome_distribution.total if x.outcome_distribution.total > 0 else 0, + reverse=True + ) + + for test_method in sorted_methods: + total_runs = test_method.outcome_distribution.total + if total_runs > 0: + failure_rate = (test_method.outcome_distribution.failed + test_method.outcome_distribution.flaky) / total_runs + + # Extract the method name from the full test name + method_name = test_method.name.split('.')[-1] + + print(f"\n → {method_name}") + print(f" Failure Rate: {failure_rate:.2%}") + print(f" Runs: {total_runs:3d} | Failed: {test_method.outcome_distribution.failed:3d} | " + f"Flaky: {test_method.outcome_distribution.flaky:3d} | " + f"Passed: {test_method.outcome_distribution.passed:3d}") + + # Show test method timeline + if test_method.timeline: + print(f"\n Recent Executions (last {min(3, len(test_method.timeline))} of {len(test_method.timeline)} runs):") + print(" Date/Time (UTC) Outcome Build ID") + print(" " + "-" * 44) + for entry in sorted(test_method.timeline, key=lambda x: x.timestamp)[-3:]: + date_str = entry.timestamp.strftime('%Y-%m-%d %H:%M') + print(f" {date_str:<17} {entry.outcome:<10} {entry.build_id}") except Exception as e: logger.exception("Error occurred during report generation") print(f"Error occurred: {str(e)}") + if __name__ == "__main__": # Configure logging logging.basicConfig( level=logging.INFO, - format='%(asctime)s - %(levelname)s - %(message)s', - handlers=[ - logging.FileHandler("flaky_test_report.log") - ] + format='%(asctime)s - %(levelname)s - %(message)s' ) main() diff --git a/.github/scripts/junit.py b/.github/scripts/junit.py index 550ea4935116d..5a2088f8ea196 100644 --- a/.github/scripts/junit.py +++ b/.github/scripts/junit.py @@ -208,12 +208,12 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: """ Parse a report XML and extract the module path. Test report paths look like: - build/junit-xml/module[/sub-module]/[test-job]/TEST-class.method.xml + build/junit-xml/module[/sub-module]/[task]/TEST-class.method.xml - This method strips off a base path and assumes all path segments leading up to the job name + This method strips off a base path and assumes all path segments leading up to the suite name are part of the module path. - Returns a tuple of (module, job) + Returns a tuple of (module, task) """ rel_report_path = os.path.relpath(report_path, base_path) path_segments = pathlib.Path(rel_report_path).parts @@ -238,7 +238,7 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: parser.add_argument("--export-test-catalog", required=False, default="", - help="Optional path to dump all tests.") + help="Optional path to dump all tests") if not os.getenv("GITHUB_WORKSPACE"): print("This script is intended to by run by GitHub Actions.") @@ -249,7 +249,6 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: glob_path = os.path.join(args.path, "**/*.xml") reports = glob(pathname=glob_path, recursive=True) logger.info(f"Found {len(reports)} JUnit results") - workspace_path = get_env("GITHUB_WORKSPACE") # e.g., /home/runner/work/apache/kafka total_file_count = 0 @@ -266,15 +265,14 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: flaky_table = [] skipped_table = [] quarantined_table = [] - new_table = [] exporter = TestCatalogExporter() logger.debug(f"::group::Parsing {len(reports)} JUnit Report Files") for report in reports: with open(report, "r") as fp: - module_path, test_job = split_report_path(args.path, report) - logger.debug(f"Parsing file: {report}, module: {module_path}, job: {test_job}") + module_path, task = split_report_path(args.path, report) + logger.debug(f"Parsing file: {report}, module: {module_path}, task: {task}") for suite in parse_report(workspace_path, report, fp): total_skipped += suite.skipped total_errors += suite.errors @@ -311,8 +309,8 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: logger.debug(f"Found skipped test: {skipped_test}") skipped_table.append((simple_class_name, skipped_test.test_name)) - # Only collect quarantined tests from the "flaky" test jobs - if re.match(r".*\bflaky\b.*", test_job) is not None: + # Only collect quarantined tests from the "quarantinedTest" task + if task == "quarantinedTest": for test in all_suite_passed.values(): simple_class_name = test.class_name.split(".")[-1] quarantined_table.append((simple_class_name, test.test_name)) @@ -320,14 +318,6 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: simple_class_name = test.class_name.split(".")[-1] quarantined_table.append((simple_class_name, test.test_name)) - if re.match(r".*\bnew\b.*", test_job) is not None: - for test in all_suite_passed.values(): - simple_class_name = test.class_name.split(".")[-1] - new_table.append((simple_class_name, test.test_name)) - for test in all_suite_failed.values(): - simple_class_name = test.class_name.split(".")[-1] - new_table.append((simple_class_name, test.test_name)) - if args.export_test_catalog: exporter.handle_suite(module_path, suite) @@ -341,36 +331,14 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: duration = pretty_time_duration(total_time) logger.info(f"Finished processing {len(reports)} reports") - # Determine exit status. If we add anything to failure_messages, we will exit(1) - failure_messages = [] - - exit_code = get_env("GRADLE_TEST_EXIT_CODE", int) - junit_report_url = get_env("JUNIT_REPORT_URL") - thread_dump_url = get_env("THREAD_DUMP_URL") - - if exit_code is None: - failure_messages.append("Missing required GRADLE_TEST_EXIT_CODE environment variable. Failing this script.") - elif exit_code == 124: - # Special handling for timeouts. The exit code 124 is emitted by 'timeout' command used in build.yml. - # A watchdog script "thread-dump.sh" will use jstack to force a thread dump for any Gradle process - # still running after the timeout. We capture the exit codes of the two test tasks and pass them to - # this script. If any task fails due to timeout, we want to fail the overall build since it will not - # include all the test results - failure_messages.append(f"Gradle task had a timeout. Failing this script. These are partial results!") - elif exit_code > 0: - failure_messages.append(f"Gradle task had a failure exit code. Failing this script.") - - if thread_dump_url: - failure_messages.append(f"Thread dump available at {thread_dump_url} and the script will now fail.") - - if junit_report_url: - report_md = f"Download [JUnit HTML report]({junit_report_url})" - else: - report_md = "No reports available. Environment variable JUNIT_REPORT_URL was not found." - - # Print summary of the tests + # Print summary of the tests. # The stdout (print) goes to the workflow step console output. # The stderr (logger) is redirected to GITHUB_STEP_SUMMARY which becomes part of the HTML job summary. + report_url = get_env("JUNIT_REPORT_URL") + if report_url: + report_md = f"Download [HTML report]({report_url})." + else: + report_md = "No report available. JUNIT_REPORT_URL was missing." summary = (f"{total_run} tests cases run in {duration}.\n\n" f"{total_success} {PASSED}, {total_failures} {FAILED}, " f"{total_flaky} {FLAKY}, {total_skipped} {SKIPPED}, {len(quarantined_table)} {QUARANTINED}, and {total_errors} errors.") @@ -434,28 +402,40 @@ def split_report_path(base_path: str, report_path: str) -> Tuple[str, str]: print("\n") logger.debug("::endgroup::") - if len(new_table) > 0: - print("
") - print(f"New Tests ({len(new_table)})\n") - print(f"| Module | Test |") - print(f"| ------ | ---- |") - logger.debug(f"::group::Found {len(new_table)} new tests") - for row in new_table: - row_joined = " | ".join(row) - print(f"| {row_joined} |") - logger.debug(f"{row[0]} > {row[1]}") - print("\n
") - logger.debug("::endgroup::") - print("
") + # Print special message if there was a timeout + test_exit_code = get_env("GRADLE_TEST_EXIT_CODE", int) + quarantined_test_exit_code = get_env("GRADLE_QUARANTINED_TEST_EXIT_CODE", int) - # Print errors and exit - for message in failure_messages: - logger.debug(message) - logger.debug(summary) - - if len(failure_messages) > 0: + if test_exit_code == 124 or quarantined_test_exit_code == 124: + # Special handling for timeouts. The exit code 124 is emitted by 'timeout' command used in build.yml. + # A watchdog script "thread-dump.sh" will use jstack to force a thread dump for any Gradle process + # still running after the timeout. We capture the exit codes of the two test tasks and pass them to + # this script. If either "test" or "quarantinedTest" fails due to timeout, we want to fail the overall build. + thread_dump_url = get_env("THREAD_DUMP_URL") + if test_exit_code == 124: + logger.debug(f"Gradle task for 'test' timed out. These are partial results!") + else: + logger.debug(f"Gradle task for 'quarantinedTest' timed out. These are partial results!") + logger.debug(summary) + if thread_dump_url: + print(f"\nThe JUnit tests were cancelled due to a timeout. Thread dumps were generated before the job was cancelled. " + f"Download [thread dumps]({thread_dump_url}).\n") + logger.debug(f"Failing this step because the tests timed out. Thread dumps were taken and archived here: {thread_dump_url}") + else: + logger.debug(f"Failing this step because the tests timed out. Thread dumps were not archived, check logs in JUnit step.") exit(1) + elif test_exit_code in (0, 1): + logger.debug(summary) + if total_failures > 0: + logger.debug(f"Failing this step due to {total_failures} test failures") + exit(1) + elif total_errors > 0: + logger.debug(f"Failing this step due to {total_errors} test errors") + exit(1) + else: + exit(0) else: - exit(0) + logger.debug(f"Gradle had unexpected exit code {test_exit_code}. Failing this step") + exit(1) diff --git a/.github/scripts/requirements.txt b/.github/scripts/requirements.txt index d3fcf50bb7400..d59455f79dac6 100644 --- a/.github/scripts/requirements.txt +++ b/.github/scripts/requirements.txt @@ -12,8 +12,6 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. - -# Note: Ensure the 'requests' version here matches the version in tests/setup.py PyYAML~=6.0 pytz==2024.2 -requests==2.32.4 +requests==2.32.3 diff --git a/.github/scripts/thread-dump.sh b/.github/scripts/thread-dump.sh index 89eb90f1355c2..8f387a3974cad 100755 --- a/.github/scripts/thread-dump.sh +++ b/.github/scripts/thread-dump.sh @@ -20,14 +20,16 @@ sleep $(($SLEEP_MINUTES*60)); echo "Timed out after $SLEEP_MINUTES minutes. Dumping threads now..." mkdir thread-dumps -touch thread-dumps/pids.txt sleep 5; for GRADLE_WORKER_PID in `jps | grep GradleWorkerMain | awk -F" " '{print $1}'`; do - echo $GRADLE_WORKER_PID >> thread-dumps/pids.txt echo "Dumping threads for GradleWorkerMain pid $GRADLE_WORKER_PID into $FILENAME"; FILENAME="thread-dumps/GradleWorkerMain-$GRADLE_WORKER_PID.txt" jstack $GRADLE_WORKER_PID > $FILENAME + if ! grep -q "kafka" $FILENAME; then + echo "No match for 'kafka' in thread dump file $FILENAME, discarding it." + rm $FILENAME; + fi; sleep 5; done; diff --git a/.github/workflows/README.md b/.github/workflows/README.md index 229b72596ac97..24116f32dd630 100644 --- a/.github/workflows/README.md +++ b/.github/workflows/README.md @@ -67,13 +67,13 @@ Unlike trunk, the PR builds _will_ utilize the Gradle cache. ### PR Triage In order to get the attention of committers, we have a triage workflow for Pull Requests -opened by non-committers. This workflow consists of two files: +opened by non-committers. This workflow consists of three files: -* [pr-update.yml](pr-update.yml) When a PR is created, add the `triage` label if - the PR was opened by a non-committer. -* [pr-labels-cron.yml](pr-labels-cron.yml) Cron job to add `needs-attention` label to community - PRs that have not been reviewed after 7 days. Also includes a cron job to - remove the `triage` and `needs-attention` labels from PRs which have been reviewed. +* [pr-update.yml](pr-update.yml) When a PR is created add the `triage` label if the PR + was opened by a non-committer. +* [pr-reviewed-trigger.yml](pr-reviewed-trigger.yml) Runs when any PR is reviewed. + Used as a trigger for the next workflow +* [pr-reviewed.yml](pr-reviewed.yml) Remove the `triage` label after a PR has been reviewed _The pr-update.yml workflow includes pull_request_target!_ @@ -84,8 +84,7 @@ organization must be public. Here are the steps to take: * Find yourself * Change "Organization Visibility" to Public -Full documentation for this process can be found in GitHub's docs: -https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-personal-account-on-github/managing-your-membership-in-organizations/publicizing-or-hiding-organization-membership +Full documentation for this process can be found in GitHub's docs: https://docs.github.com/en/account-and-profile/setting-up-and-managing-your-personal-account-on-github/managing-your-membership-in-organizations/publicizing-or-hiding-organization-membership If you are a committer and do not want your membership in the ASF org listed as public, you will need to remove the `triage` label manually. @@ -101,36 +100,11 @@ There are two files related to this workflow: * [pr-labeled.yml](pr-labeled.yml) approves a pending approval for PRs that have been labeled with `ci-approved` -* [workflow-requested.yml](workflow-requested.yml) approves future workflow requests automatically +* [ci-requested.yml](ci-requested.yml) approves future CI requests automatically if the PR has the `ci-approved` label _The pr-labeled.yml workflow includes pull_request_target!_ -### PR Linter - -To help ensure good commit messages, we have added a "Pull Request Linter" job -that checks the title and body of the PR. - -There are two files related to this workflow: - -* [pr-reviewed.yml](pr-reviewed.yml) runs when a PR is reviewed or has its title -or body edited. This workflow simply captures the PR number into a text file -* [pr-linter.yml](pr-linter.yml) runs after pr-reviewed.yml and loads the PR -using the saved text file. This workflow runs the linter script that checks the -structure of the PR - -Note that the pr-reviewed.yml workflow uses the `ci-approved` mechanism described -above. - -The following checks are performed on our PRs: -* Title is not too short or too long -* Title starts with "KAFKA-", "MINOR", or "HOTFIX" -* Body is not empty -* Body includes "Reviewers:" if the PR is approved - -With the merge queue, our PR title and body will become the commit subject and message. -This linting step will help to ensure that we have nice looking commits. - ### Stale PRs This one is straightforward. Using the "actions/stale" GitHub Action, we automatically @@ -145,47 +119,4 @@ Composite actions are a convenient way to reuse build logic, but they have some limitations. - Cannot run more than one step in a composite action (see `workflow_call` instead) -- Inputs can only be strings, no support for typed parameters. See: https://github.com/actions/runner/issues/2238 - -## Troubleshooting - -### Gradle Cache Misses - -If your PR is running for longer than you would expect due to cache misses, there are a -few things to check. - -First, find the cache that was loaded into your PR build. This is found in the Setup Gradle -output. Look for a line starting with "Restored Gradle User Home from cache key". -For example, - -``` -Restored Gradle User Home from cache key: gradle-home-v1|Linux-X64|test[188616818c9a3165053ef8704c27b28e]-5c20aa187aa8f51af4270d7d1b0db4963b0cd10b -``` - -The last part of the cache key is the SHA of the commit on trunk where the cache -was created. If that commit is not on your branch, it means your build loaded a -cache that includes changes your PR does not yet have. This is a common way to -have cache misses. To resolve this, update your PR with the latest cached trunk commit: - -```commandline -git fetch origin -./committer-tools/update-cache.sh -git merge trunk-cached -``` - -then push your branch. - -If your build seems to be using the correct cache, the next thing to check is for -changes to task inputs. You can find this by locating the trunk Build Scan from -the cache commit on trunk and comparing it with the build scan of your PR build. -This is done in the Develocity UI using the two overlapping circles like `(A()B)`. -This will show you differences in the task inputs for the two builds. - -Finally, you can run your PR with extra cache debugging. Add this to the gradle invocation in -[run-gradle/action.yml](../actions/run-gradle/action.yml). - -``` --Dorg.gradle.caching.debug=true -``` - -This will dump out a lot of output, so you may also reduce the test target to one module. +- Inputs can only be strings, no support for typed parameters. See: https://github.com/actions/runner/issues/2238 \ No newline at end of file diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 47bb2cbc31d5d..e1992a4059281 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -31,47 +31,17 @@ on: type: boolean jobs: - configure: - runs-on: ubuntu-latest - name: Configure Workflow - outputs: - is-draft: ${{ steps.check-draft-pr.outputs.is-draft }} - test-catalog-days: ${{ steps.configure-outputs.outputs.days }} - sha: ${{ steps.configure-outputs.outputs.sha }} - steps: - - name: Env - run: printenv - env: - GITHUB_CONTEXT: ${{ toJson(github) }} - - name: Check for Draft PR - id: check-draft-pr - if: | - github.event_name == 'pull_request' && - github.event.pull_request.draft - run: echo "is-draft=true" >> "$GITHUB_OUTPUT" - - name: Configure Outputs - id: configure-outputs - run: | - if [ "${{ github.event_name }}" = "pull_request" ]; then - echo "days=0" >> "$GITHUB_OUTPUT" - echo "sha=${{ github.event.pull_request.head.sha }}" >> "$GITHUB_OUTPUT" - else - echo "days=7" >> "$GITHUB_OUTPUT" - echo "sha=${{ github.sha }}" >> "$GITHUB_OUTPUT" - fi - load-catalog: - needs: [configure] runs-on: ubuntu-latest name: Load Test Catalog steps: - name: Checkout main - uses: actions/checkout@v5 + uses: actions/checkout@v4 with: persist-credentials: false - name: Checkout test-catalog - uses: actions/checkout@v5 + uses: actions/checkout@v4 with: ref: 'test-catalog' persist-credentials: false @@ -79,12 +49,9 @@ jobs: path: test-catalog - name: Checkout catalog at earlier date - if: ${{ needs.configure.outputs.test-catalog-days != '0' }} - env: - DAYS: ${{ needs.configure.outputs.test-catalog-days }} run: | cd test-catalog - SHA=$(git rev-list -1 --before $DAYS.days.ago origin/test-catalog) + SHA=$(git rev-list -1 --before 7.days.ago origin/test-catalog) echo $SHA git switch --detach $SHA git show --no-patch @@ -109,25 +76,31 @@ jobs: compression-level: 9 validate: - needs: [configure] runs-on: ubuntu-latest - name: Compile and Check (Merge Ref) + name: Compile and Check Java + outputs: + is-draft: ${{ steps.check-draft-pr.outputs.is-draft }} steps: - name: Env run: printenv env: GITHUB_CONTEXT: ${{ toJson(github) }} + - name: Check for Draft PR + id: check-draft-pr + if: | + github.event_name == 'pull_request' && + github.event.pull_request.draft + run: echo "is-draft=true" >> "$GITHUB_OUTPUT" - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v4 with: persist-credentials: false - ref: ${{ github.sha }} # this is the default, just being explicit. - name: Setup Python uses: ./.github/actions/setup-python - name: Setup Gradle uses: ./.github/actions/setup-gradle with: - java-version: 24 + java-version: 23 gradle-cache-read-only: ${{ !inputs.is-trunk }} gradle-cache-write-only: ${{ inputs.is-trunk }} develocity-access-key: ${{ secrets.DEVELOCITY_ACCESS_KEY }} @@ -140,7 +113,7 @@ jobs: # --scan: Publish the build scan. This will only work on PRs from apache/kafka and trunk # --no-scan: For public fork PRs, we won't attempt to publish the scan run: | - ./gradlew --build-cache --info $SCAN_ARG check releaseTarGz -x test + ./gradlew --build-cache --info $SCAN_ARG check siteDocTar -x test - name: Archive check reports if: always() uses: actions/upload-artifact@v4 @@ -170,32 +143,23 @@ jobs: find ./site-docs/generated -type f -exec grep -L "." {} \; >&2 exit 1 fi - - name: Verify license file - run: python committer-tools/verify_license.py --skip-build test: - needs: [configure, validate, load-catalog] - if: ${{ ! needs.configure.outputs.is-draft }} + needs: [validate, load-catalog] + if: ${{ ! needs.validate.outputs.is-draft }} runs-on: ubuntu-latest strategy: fail-fast: false matrix: - # If we change these, make sure to adjust ci-complete.yml - java: [ 24, 17 ] - run-flaky: [ true, false ] - run-new: [ true, false ] - exclude: - - run-flaky: true - run-new: true - env: - job-variation: ${{ matrix.java }}-${{ matrix.run-flaky == true && 'flaky' || 'noflaky' }}-${{ matrix.run-new == true && 'new' || 'nonew' }} - name: JUnit tests Java ${{ matrix.java }}${{ matrix.run-flaky == true && ' (flaky)' || '' }}${{ matrix.run-new == true && ' (new)' || '' }} + java: [ 23, 17 ] # If we change these, make sure to adjust ci-complete.yml + outputs: + timed-out: ${{ (steps.junit-test.outputs.gradle-exitcode == '124' || steps.junit-quarantined-test.outputs.gradle-exitcode == '124') }} + name: JUnit tests Java ${{ matrix.java }} steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v4 with: persist-credentials: false - ref: ${{ needs.configure.outputs.sha }} - name: Setup Python uses: ./.github/actions/setup-python - name: Setup Gradle @@ -210,11 +174,20 @@ jobs: # the overall workflow, so we'll continue here without a test catalog. - name: Load Test Catalog id: load-test-catalog - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v4 continue-on-error: true with: name: combined-test-catalog + - name: JUnit Quarantined Tests + id: junit-quarantined-test + uses: ./.github/actions/run-gradle + with: + test-task: quarantinedTest + timeout-minutes: 180 + test-catalog-path: ${{ steps.load-test-catalog.outputs.download-path }}/combined-test-catalog.txt + build-scan-artifact-name: build-scan-quarantined-test-${{ matrix.java }} + - name: JUnit Tests id: junit-test uses: ./.github/actions/run-gradle @@ -222,19 +195,13 @@ jobs: test-task: test timeout-minutes: 180 # 3 hours test-catalog-path: ${{ steps.load-test-catalog.outputs.download-path }}/combined-test-catalog.txt - build-scan-artifact-name: build-scan-${{ env.job-variation }} - run-new-tests: ${{ matrix.run-new }} - run-flaky-tests: ${{ matrix.run-flaky }} - test-retries: ${{ matrix.run-flaky == true && '3' || '1' }} - test-xml-output: ${{ env.job-variation }} - test-repeat: ${{ !inputs.is-trunk && matrix.run-new && '3' || '1' }} - test-verbose: ${{ runner.debug == '1' }} + build-scan-artifact-name: build-scan-test-${{ matrix.java }} - name: Archive JUnit HTML reports uses: actions/upload-artifact@v4 - id: archive-junit-html + id: junit-upload-artifact with: - name: junit-reports-${{ env.job-variation }} + name: junit-reports-${{ matrix.java }} path: | **/build/reports/tests/* compression-level: 9 @@ -243,98 +210,51 @@ jobs: - name: Archive JUnit XML uses: actions/upload-artifact@v4 with: - name: junit-xml-${{ env.job-variation }} + name: junit-xml-${{ matrix.java }} path: | build/junit-xml/**/*.xml compression-level: 9 if-no-files-found: ignore - name: Archive Thread Dumps - id: archive-thread-dump - if: steps.junit-test.outputs.gradle-exitcode == '124' + id: thread-dump-upload-artifact + if: always() && (steps.junit-test.outputs.gradle-exitcode == '124' || steps.junit-quarantined-test.outputs.gradle-exitcode == '124') uses: actions/upload-artifact@v4 with: - name: junit-thread-dumps-${{ env.job-variation }} + name: junit-thread-dumps-${{ matrix.java }} path: | thread-dumps/* compression-level: 9 if-no-files-found: ignore - name: Parse JUnit tests + run: python .github/scripts/junit.py --export-test-catalog ./test-catalog >> $GITHUB_STEP_SUMMARY env: GITHUB_WORKSPACE: ${{ github.workspace }} - JUNIT_REPORT_URL: ${{ steps.archive-junit-html.outputs.artifact-url }} - THREAD_DUMP_URL: ${{ steps.archive-thread-dump.outputs.artifact-url }} + JUNIT_REPORT_URL: ${{ steps.junit-upload-artifact.outputs.artifact-url }} + THREAD_DUMP_URL: ${{ steps.thread-dump-upload-artifact.outputs.artifact-url }} GRADLE_TEST_EXIT_CODE: ${{ steps.junit-test.outputs.gradle-exitcode }} - run: | - python .github/scripts/junit.py \ - --path build/junit-xml >> $GITHUB_STEP_SUMMARY + GRADLE_QUARANTINED_TEST_EXIT_CODE: ${{ steps.junit-quarantined-test.outputs.gradle-exitcode }} - # This job downloads all the JUnit XML files and thread dumps from the JDK 24 test runs. - # If any test job fails, we will not run this job. Also, if any thread dump artifacts - # are present, this means there was a timeout in the tests and so we will not proceed - # with catalog creation. - collate-test-catalog: - name: Collate Test Catalog - needs: test - runs-on: ubuntu-latest - outputs: - uploaded-test-catalog: ${{ steps.archive-test-catalog.outcome == 'success' }} - steps: - - name: Checkout code - uses: actions/checkout@v5 - with: - persist-credentials: false - - name: Download Thread Dumps - uses: actions/download-artifact@v5 - with: - pattern: junit-thread-dumps-24-* - path: thread-dumps - merge-multiple: true - - name: Check For Thread Dump - id: check-for-thread-dump - run: | - find . - if [ -d thread-dumps ]; then - echo "Found 'thread-dumps' directory. Will not proceed with test catalog collation."; - exit 1; - fi - - name: Download JUnit XMLs - uses: actions/download-artifact@v5 - with: - pattern: junit-xml-24-* # Only look at JDK 24 tests for the test catalog - path: junit-xml - merge-multiple: true - - name: Collate Test Catalog - continue-on-error: true - env: - GITHUB_WORKSPACE: ${{ github.workspace }} - GRADLE_TEST_EXIT_CODE: 0 - run: | - python .github/scripts/junit.py \ - --path junit-xml \ - --export-test-catalog ./test-catalog >> $GITHUB_STEP_SUMMARY - name: Archive Test Catalog - id: archive-test-catalog + if: ${{ always() && matrix.java == '23' }} uses: actions/upload-artifact@v4 with: name: test-catalog path: test-catalog compression-level: 9 - if-no-files-found: error + if-no-files-found: ignore - # This job downloads the test catalog from the previous job and overlays it on the test-catalog branch. - # This will only run on trunk and only if the collate job did not detect a timeout. update-test-catalog: name: Update Test Catalog - needs: collate-test-catalog - if: ${{ inputs.is-trunk && needs.collate-test-catalog.outputs.uploaded-test-catalog == 'true' }} + needs: test + if: ${{ always() && inputs.is-trunk && needs.test.outputs.timed-out == 'false' }} runs-on: ubuntu-latest permissions: contents: write steps: - name: Checkout Test Catalog - uses: actions/checkout@v5 + uses: actions/checkout@v4 with: persist-credentials: true # Needed to commit and push later ref: test-catalog @@ -342,7 +262,7 @@ jobs: run: | rm -rf test-catalog - name: Download Test Catalog - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v4 with: name: test-catalog path: test-catalog @@ -362,28 +282,3 @@ jobs: git add test-catalog git diff --quiet && git diff --staged --quiet || git commit -m "$COMMIT_MSG" git push - - checks-complete: - name: "CI checks completed" - needs: [configure, validate, test] - if: always() - runs-on: ubuntu-latest - steps: - - name: Env - run: printenv - env: - GITHUB_CONTEXT: ${{ toJson(github) }} - - name: Fail if Draft - if: ${{ needs.configure.outputs.is-draft }} - run: | - echo "Cannot merge a draft PR" - exit 1 - - name: Check Dependency Outcomes - run: | - if [[ "${{ needs.validate.result }}" == "success" && "${{ needs.test.result }}" == "success" ]]; then - echo "Required jobs completed successfully!" - exit 0 - else - echo "Required jobs did not complete successfully" - exit 1 - fi diff --git a/.github/workflows/ci-complete.yml b/.github/workflows/ci-complete.yml index 6b8492fb7c0ab..b2d26351545f4 100644 --- a/.github/workflows/ci-complete.yml +++ b/.github/workflows/ci-complete.yml @@ -43,25 +43,15 @@ jobs: strategy: fail-fast: false matrix: - # Make sure these match build.yml - java: [ 24, 17 ] - run-flaky: [ true, false ] - run-new: [ true, false ] - exclude: - - run-flaky: true - run-new: true - - env: - job-variation: ${{ matrix.java }}-${{ matrix.run-flaky == true && 'flaky' || 'noflaky' }}-${{ matrix.run-new == true && 'new' || 'nonew' }} - status-context: Java ${{ matrix.java }}${{ matrix.run-flaky == true && ' / Flaky' || '' }}${{ matrix.run-new == true && ' / New' || '' }} - + java: [ 23, 17 ] + artifact-prefix: [ "build-scan-test-", "build-scan-quarantined-test-"] steps: - name: Env run: printenv env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v4 with: persist-credentials: false @@ -72,12 +62,12 @@ jobs: develocity-access-key: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - name: Download build scan archive id: download-build-scan - uses: actions/download-artifact@v5 + uses: actions/download-artifact@v4 continue-on-error: true # Don't want this step to fail the overall workflow with: github-token: ${{ github.token }} run-id: ${{ github.event.workflow_run.id }} - name: build-scan-${{ env.job-variation }} + name: ${{ matrix.artifact-prefix }}${{ matrix.java }} path: ~/.gradle/build-scan-data # This is where Gradle buffers unpublished build scan data when --no-scan is given - name: Handle missing scan if: ${{ steps.download-build-scan.outcome == 'failure' }} @@ -88,7 +78,7 @@ jobs: commit_sha: ${{ github.event.workflow_run.head_sha }} url: '${{ github.event.workflow_run.html_url }}' description: 'Could not find build scan' - context: Gradle Build Scan / ${{ env.status-context }} + context: 'Gradle Build Scan / Java ${{ matrix.java }}' state: 'error' - name: Publish Scan id: publish-build-scan @@ -116,7 +106,7 @@ jobs: commit_sha: ${{ github.event.workflow_run.head_sha }} url: '${{ github.event.repository.html_url }}/actions/runs/${{ github.run_id }}' description: 'The build scan failed to be published' - context: Gradle Build Scan / ${{ env.status-context }} + context: 'Gradle Build Scan / Java ${{ matrix.java }}' state: 'error' - name: Update Status Check if: ${{ steps.publish-build-scan.outcome == 'success' }} @@ -127,5 +117,5 @@ jobs: commit_sha: ${{ github.event.workflow_run.head_sha }} url: ${{ steps.publish-build-scan.outputs.build-scan-url }} description: 'The build scan was successfully published' - context: Gradle Build Scan / ${{ env.status-context }} + context: 'Gradle Build Scan / Java ${{ matrix.java }}' state: 'success' diff --git a/.github/workflows/ci-requested.yml b/.github/workflows/ci-requested.yml new file mode 100644 index 0000000000000..a1da6fa34a1f8 --- /dev/null +++ b/.github/workflows/ci-requested.yml @@ -0,0 +1,86 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: CI Requested + +on: + workflow_run: + workflows: [CI] + types: + - requested + +run-name: CI Requested for ${{ github.event.workflow_run.display_title}} + +jobs: + check-pr-labels: + # Even though job conditionals are difficult to debug, this will reduce the number of unnecessary runs + if: | + github.event_name == 'workflow_run' && + github.event.workflow_run.event == 'pull_request' && + github.event.workflow_run.status == 'completed' && + github.event.workflow_run.conclusion == 'action_required' + runs-on: ubuntu-latest + steps: + - name: Env + run: printenv + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + - name: Checkout code + uses: actions/checkout@v4 + with: + persist-credentials: + false + - name: Check PR Labels + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + RUN_ID: ${{ github.event.workflow_run.id }} + HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }} + HEAD_REPO: ${{ github.event.workflow_run.head_repository.owner.login }} + # Caution! This is a bit hacky. The GH documentation shows that the workflow_run event should include a list + # of referencing pull_requests. I think this might only be the case for pull requests originating from the + # base repository. To deal with fork PRs, we need to query the API for PRs for the owner's branch. This + # code assumes that the fork repo owner is the same as the organization for the "org:branch" syntax used + # in the query. Also, only the first matching PR from that org will be considered. + run: | + set +e + PR_NUMBER=$(gh api \ + -H "Accept: application/vnd.github+json" \ + -H "X-GitHub-Api-Version: 2022-11-28" \ + /repos/$REPO/pulls?head=$HEAD_REPO:$HEAD_BRANCH \ + --jq '.[0].number') + if [ -z "$PR_NUMBER" ]; then + echo "Could not find the PR that triggered this workflow request"; + exit 1; + fi + gh pr view $PR_NUMBER --json labels -q '.labels[].name' | grep -q 'ci-approved' + exitcode="$?" + if [ $exitcode -ne 0 ]; then + echo "No ci-approved label set on PR #$PR_NUMBER. Will not auto-approve."; + exit 0; + else + echo "Found 'ci-approved' label on PR #$PR_NUMBER. Auto-approving workflow run $RUN_ID."; + fi + echo "PR_NUMBER=$PR_NUMBER" >> "$GITHUB_ENV" + echo "RUN_ID=$RUN_ID" >> "$GITHUB_ENV" + - name: Approve Workflow Run + if: env.RUN_ID != '' + uses: ./.github/actions/gh-api-approve-run + with: + gh-token: ${{ secrets.GITHUB_TOKEN }} + repository: ${{ github.repository }} + run_id: ${{ env.RUN_ID }} + pr_number: ${{ env.PR_NUMBER }} + commit_sha: ${{ github.event.workflow_run.head_sha }} diff --git a/.github/workflows/deflake.yml b/.github/workflows/deflake.yml index 3a2fbb56345b2..fa55ab5fb120b 100644 --- a/.github/workflows/deflake.yml +++ b/.github/workflows/deflake.yml @@ -42,20 +42,15 @@ jobs: name: Deflake JUnit tests steps: - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v4 with: persist-credentials: false - - - name: Setup Python - uses: ./.github/actions/setup-python - - name: Setup Gradle uses: ./.github/actions/setup-gradle with: java-version: ${{ inputs.java-version }} gradle-cache-read-only: true develocity-access-key: ${{ secrets.DEVELOCITY_ACCESS_KEY }} - - name: Test timeout-minutes: 60 id: junit-test @@ -65,52 +60,26 @@ jobs: TEST_PATTERN: ${{ inputs.test-pattern }} run: | set +e - ./.github/scripts/thread-dump.sh & - timeout 60m ./gradlew --info --build-cache --scan --continue \ + ./gradlew --info --build-cache --scan --continue \ -PtestLoggingEvents=started,passed,skipped,failed \ - -PmaxParallelForks=2 \ + -PignoreFailures=true -PmaxParallelForks=2 \ -Pkafka.cluster.test.repeat=$TEST_REPEAT \ -PmaxTestRetries=$TEST_REPEAT -PmaxTestRetryFailures=0 \ - ${TEST_MODULE}:test --tests $TEST_PATTERN + -PmaxQuarantineTestRetries=$TEST_REPEAT -PmaxQuarantineTestRetryFailures=0 \ + ${TEST_MODULE}:test ${TEST_MODULE}:quarantinedTest --tests $TEST_PATTERN exitcode="$?" echo "exitcode=$exitcode" >> $GITHUB_OUTPUT - - - name: Archive JUnit HTML reports - uses: actions/upload-artifact@v4 - id: archive-junit-html - with: - name: junit-html-reports - path: | - **/build/reports/tests/* - compression-level: 9 - if-no-files-found: ignore - - - name: Archive JUnit XML - uses: actions/upload-artifact@v4 - with: - name: junit-xml - path: | - build/junit-xml/**/*.xml - compression-level: 9 - if-no-files-found: ignore - - - name: Archive Thread Dumps - id: archive-thread-dump - if: steps.junit-test.outputs.gradle-exitcode == '124' + - name: Archive JUnit reports uses: actions/upload-artifact@v4 + id: junit-upload-artifact with: - name: junit-thread-dumps-${{ env.job-variation }} + name: junit-reports-${{ inputs.java-version }} path: | - thread-dumps/* - compression-level: 9 + **/build/reports/tests/test/* if-no-files-found: ignore - - name: Parse JUnit tests + run: python .github/scripts/junit.py >> $GITHUB_STEP_SUMMARY env: GITHUB_WORKSPACE: ${{ github.workspace }} - JUNIT_REPORT_URL: ${{ steps.archive-junit-html.outputs.artifact-url }} - THREAD_DUMP_URL: ${{ steps.archive-thread-dump.outputs.artifact-url }} - GRADLE_TEST_EXIT_CODE: ${{ steps.junit-test.outputs.gradle-exitcode }} - run: | - python .github/scripts/junit.py \ - --path build/junit-xml >> $GITHUB_STEP_SUMMARY + REPORT_URL: ${{ steps.junit-upload-artifact.outputs.artifact-url }} + GRADLE_EXIT_CODE: ${{ steps.junit-test.outputs.exitcode }} diff --git a/.github/workflows/docker_build_and_test.yml b/.github/workflows/docker_build_and_test.yml index 6a1b2f7de25f1..67acdf9fb7424 100644 --- a/.github/workflows/docker_build_and_test.yml +++ b/.github/workflows/docker_build_and_test.yml @@ -32,9 +32,9 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - name: Set up Python 3.10 - uses: actions/setup-python@v6 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Setup Docker Compose diff --git a/.github/workflows/docker_official_image_build_and_test.yml b/.github/workflows/docker_official_image_build_and_test.yml index 1580ea1f744ba..58866a19d6cab 100644 --- a/.github/workflows/docker_official_image_build_and_test.yml +++ b/.github/workflows/docker_official_image_build_and_test.yml @@ -31,9 +31,9 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - name: Set up Python 3.10 - uses: actions/setup-python@v6 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Setup Docker Compose diff --git a/.github/workflows/docker_rc_release.yml b/.github/workflows/docker_rc_release.yml index da851f4a43028..1f824b39b977a 100644 --- a/.github/workflows/docker_rc_release.yml +++ b/.github/workflows/docker_rc_release.yml @@ -37,9 +37,9 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v5 + uses: actions/checkout@v4 - name: Set up Python 3.10 - uses: actions/setup-python@v6 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install dependencies diff --git a/.github/workflows/docker_scan.yml b/.github/workflows/docker_scan.yml index 55df9f65e4c08..a76916fffa916 100644 --- a/.github/workflows/docker_scan.yml +++ b/.github/workflows/docker_scan.yml @@ -26,7 +26,7 @@ jobs: strategy: matrix: # This is an array of supported tags. Make sure this array only contains the supported tags - supported_image_tag: ['latest', '3.9.1', '4.0.0', '4.1.0'] + supported_image_tag: ['latest', '3.7.2', '3.8.1', '3.9.0'] steps: - name: Run CVE scan uses: aquasecurity/trivy-action@6e7b7d1fd3e4fef0c5fa8cce1229c54b2c9bd0d8 # v0.24.0 diff --git a/.github/workflows/generate-reports.yml b/.github/workflows/generate-reports.yml index dee7094c27c15..d3ad9cfa7737b 100644 --- a/.github/workflows/generate-reports.yml +++ b/.github/workflows/generate-reports.yml @@ -20,6 +20,7 @@ on: schedule: - cron: '0 6 * * *' # Run daily at 6am UTC + jobs: flaky-test-report: name: Flaky Test Report @@ -32,12 +33,11 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v4 - name: Setup Python uses: ./.github/actions/setup-python - name: Run Report - if : ${{ ! github.event.repository.fork }} env: - DEVELOCITY_ACCESS_TOKEN: ${{ secrets.DV_API_ACCESS }} + GE_ACCESS_TOKEN: ${{ secrets.GE_ACCESS_TOKEN }} run: | - python ./.github/scripts/develocity_reports.py >> $GITHUB_STEP_SUMMARY + python ./.github/scripts/develocity_reports.py >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.github/workflows/pr-labeled.yml b/.github/workflows/pr-labeled.yml index b5695825861ff..49a677ff7f850 100644 --- a/.github/workflows/pr-labeled.yml +++ b/.github/workflows/pr-labeled.yml @@ -35,7 +35,7 @@ jobs: env: GITHUB_CONTEXT: ${{ toJson(github) }} - name: Checkout code - uses: actions/checkout@v5 + uses: actions/checkout@v4 with: persist-credentials: false @@ -65,4 +65,4 @@ jobs: repository: ${{ github.repository }} run_id: ${{ env.RUN_ID }} pr_number: ${{ env.PR_NUMBER }} - commit_sha: ${{ github.event.pull_request.head.sha }} + commit_sha: ${{ github.event.workflow_run.head_sha }} diff --git a/.github/workflows/pr-reviewed-trigger.yml b/.github/workflows/pr-reviewed-trigger.yml new file mode 100644 index 0000000000000..f089176ff4b23 --- /dev/null +++ b/.github/workflows/pr-reviewed-trigger.yml @@ -0,0 +1,42 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +name: Pull Request Reviewed + +on: + pull_request_review: + types: + - submitted + +jobs: + # This job is a workaround for the fact that pull_request_review lacks necessary permissions to modify PRs. + # Also, there is no pull_request_target analog to pull_request_review. The approach taken here is taken from + # https://securitylab.github.com/resources/github-actions-preventing-pwn-requests/. + pr-review-trigger: + name: Reviewed + runs-on: ubuntu-latest + steps: + - name: Env + run: printenv + env: + GITHUB_CONTEXT: ${{ toJson(github) }} + - name: Capture PR Number + run: + echo ${{ github.event.pull_request.number }} >> pr-number.txt + - name: Archive Event + uses: actions/upload-artifact@v4 + with: + name: pr-number.txt + path: pr-number.txt diff --git a/.github/workflows/pr-reviewed.yml b/.github/workflows/pr-reviewed.yml index 64636a6c2920f..ea2a35f58e8c4 100644 --- a/.github/workflows/pr-reviewed.yml +++ b/.github/workflows/pr-reviewed.yml @@ -13,30 +13,42 @@ # See the License for the specific language governing permissions and # limitations under the License. -name: Pull Request Reviewed +name: Remove Triage Label on: - pull_request_review: - types: [submitted, edited, dismissed] - branches: - - trunk - pull_request: - types: [opened, reopened, edited] - branches: - - trunk + workflow_run: + workflows: [Pull Request Reviewed] + types: + - completed jobs: - save-pr-number: - name: Save PR Number + # This job runs with elevated permissions and the ability to modify pull requests. The steps taken here + # should be limited to updating labels and adding comments to PRs. This approach is taken from + # https://securitylab.github.com/resources/github-actions-preventing-pwn-requests/. + remove-triage: + if: ${{ github.event.workflow_run.conclusion == 'success' }} runs-on: ubuntu-latest steps: - name: Env run: printenv env: GITHUB_CONTEXT: ${{ toJson(github) }} - - name: Save PR Number - run: echo ${{ github.event.pull_request.number }} > PR_NUMBER.txt - - uses: actions/upload-artifact@v4 + - uses: actions/download-artifact@v4 with: - name: PR_NUMBER.txt - path: PR_NUMBER.txt + github-token: ${{ github.token }} + run-id: ${{ github.event.workflow_run.id }} + name: pr-number.txt + - name: Remove label + uses: actions/github-script@v7 + continue-on-error: true + with: + github-token: ${{ secrets.GITHUB_TOKEN }} + script: | + var fs = require('fs'); + var pr_number = Number(fs.readFileSync('./pr-number.txt')); + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: pr_number, + name: 'triage' + }); diff --git a/.github/workflows/pr-update.yml b/.github/workflows/pr-update.yml index 7b45a15d19126..e1cd7214d6c36 100644 --- a/.github/workflows/pr-update.yml +++ b/.github/workflows/pr-update.yml @@ -37,8 +37,8 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v5 - - uses: actions/labeler@v6 + uses: actions/checkout@v4 + - uses: actions/labeler@v5 with: configuration-path: .github/configs/labeler.yml - name: check small label diff --git a/.github/workflows/prepare_docker_official_image_source.yml b/.github/workflows/prepare_docker_official_image_source.yml index 82204b9b93597..32f21a0afd0bf 100644 --- a/.github/workflows/prepare_docker_official_image_source.yml +++ b/.github/workflows/prepare_docker_official_image_source.yml @@ -31,9 +31,9 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v4 - name: Set up Python 3.10 - uses: actions/setup-python@v6 + uses: actions/setup-python@v5 with: python-version: "3.10" - name: Install dependencies diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml index 9382d4173e94c..6ceb074f62c10 100644 --- a/.github/workflows/stale.yml +++ b/.github/workflows/stale.yml @@ -35,6 +35,22 @@ permissions: pull-requests: write jobs: + needs-attention: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@v9 + with: + debug-only: ${{ inputs.dryRun || false }} + operations-per-run: ${{ inputs.operationsPerRun || 500 }} + days-before-stale: 7 + days-before-close: -1 + ignore-pr-updates: true + only-pr-labels: 'triage' + stale-pr-label: 'needs-attention' + stale-pr-message: | + A label of 'needs-attention' was automatically added to this PR in order to raise the + attention of the committers. Once this issue has been triaged, the `triage` label + should be removed to prevent this automation from happening again. stale: runs-on: ubuntu-latest steps: diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000..a3774a8b1f9c9 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,41 @@ +FROM openjdk:11-jdk-slim + +# Install necessary tools +RUN apt-get update && apt-get install -y \ + wget \ + netcat \ + && rm -rf /var/lib/apt/lists/* + +# Set up working directory +WORKDIR /opt/kafka + +# Copy the entire Kafka source code +COPY . . + +# Build Kafka +RUN ./gradlew jar -PscalaVersion=2.13 + +# Create kafka bin directory structure +RUN mkdir -p /opt/kafka/bin /opt/kafka/config /opt/kafka/libs + +# Copy built JARs to libs +RUN find . -name "*.jar" -path "*/build/libs/*" -exec cp {} /opt/kafka/libs/ \; + +# Copy startup scripts +RUN cp bin/* /opt/kafka/bin/ || true + +# Copy default configurations +RUN cp config/* /opt/kafka/config/ || true + +# Make scripts executable +RUN chmod +x /opt/kafka/bin/*.sh || true + +# Set environment variables +ENV KAFKA_HOME=/opt/kafka +ENV PATH=$PATH:$KAFKA_HOME/bin + +# Expose ports +EXPOSE 9092 + +# Default command +CMD ["bash"] \ No newline at end of file diff --git a/LICENSE-binary b/LICENSE-binary index c8fa1e8207a95..030f62b96755f 100644 --- a/LICENSE-binary +++ b/LICENSE-binary @@ -205,54 +205,53 @@ This project bundles some components that are also licensed under the Apache License Version 2.0: -- caffeine-3.2.0 -- commons-beanutils-1.11.0 +- caffeine-3.1.1 +- commons-beanutils-1.9.4 - commons-collections-3.2.2 - commons-digester-2.1 -- commons-lang3-3.18.0 -- commons-logging-1.3.5 +- commons-lang3-3.12.0 +- commons-logging-1.3.2 - commons-validator-1.9.0 -- hash4j-0.22.0 -- jackson-annotations-2.19.0 -- jackson-core-2.19.0 -- jackson-databind-2.19.0 -- jackson-dataformat-csv-2.19.0 -- jackson-dataformat-yaml-2.19.0 -- jackson-datatype-jdk8-2.19.0 -- jackson-jakarta-rs-base-2.19.0 -- jackson-jakarta-rs-json-provider-2.19.0 -- jackson-module-blackbird-2.19.0 -- jackson-module-jakarta-xmlbind-annotations-2.19.0 +- error_prone_annotations-2.14.0 +- jackson-annotations-2.16.2 +- jackson-core-2.16.2 +- jackson-databind-2.16.2 +- jackson-dataformat-csv-2.16.2 +- jackson-dataformat-yaml-2.16.2 +- jackson-datatype-jdk8-2.16.2 +- jackson-jakarta-rs-base-2.16.2 +- jackson-jakarta-rs-json-provider-2.16.2 +- jackson-module-blackbird-2.16.2 +- jackson-module-jakarta-xmlbind-annotations-2.16.2 - jakarta.inject-api-2.0.1 - jakarta.validation-api-3.0.2 -- javassist-3.30.2-GA -- jetty-alpn-client-12.0.22 -- jetty-client-12.0.22 -- jetty-ee10-servlet-12.0.22 -- jetty-ee10-servlets-12.0.22 -- jetty-http-12.0.22 -- jetty-io-12.0.22 -- jetty-security-12.0.22 -- jetty-server-12.0.22 -- jetty-session-12.0.22 -- jetty-util-12.0.22 -- jose4j-0.9.6 -- jspecify-1.0.0 -- log4j-api-2.25.1 -- log4j-core-2.25.1 -- log4j-slf4j-impl-2.25.1 -- log4j-1.2-api-2.25.1 +- javassist-3.29.2-GA +- jetty-alpn-client-12.0.15 +- jetty-client-12.0.15 +- jetty-ee10-servlet-12.0.15 +- jetty-ee10-servlets-12.0.15 +- jetty-http-12.0.15 +- jetty-io-12.0.15 +- jetty-security-12.0.15 +- jetty-server-12.0.15 +- jetty-session-12.0.15 +- jetty-util-12.0.15 +- jose4j-0.9.4 +- log4j-api-2.24.3 +- log4j-core-2.24.3 +- log4j-slf4j-impl-2.24.3 +- log4j-1.2-api-2.24.3 - lz4-java-1.8.0 - maven-artifact-3.9.6 - metrics-core-2.2.0 -- opentelemetry-proto-1.3.2-alpha +- opentelemetry-proto-1.0.0-alpha - plexus-utils-3.5.1 -- rocksdbjni-10.1.3 -- scala-library-2.13.16 +- rocksdbjni-9.7.3 +- scala-library-2.13.15 - scala-logging_2.13-3.9.5 -- scala-reflect-2.13.16 -- snappy-java-1.1.10.7 -- snakeyaml-2.4 +- scala-reflect-2.13.15 +- snappy-java-1.1.10.5 +- snakeyaml-2.2 - swagger-annotations-2.2.25 =============================================================================== @@ -278,12 +277,12 @@ see: licenses/eclipse-public-license-2.0 - hk2-utils-3.0.6 - osgi-resource-locator-1.0.3 - aopalliance-repackaged-3.0.6 -- jersey-client-3.1.10 -- jersey-common-3.1.10 -- jersey-container-servlet-3.1.10 -- jersey-container-servlet-core-3.1.10 -- jersey-hk2-3.1.10 -- jersey-server-3.1.10 +- jersey-client-3.1.9 +- jersey-common-3.1.9 +- jersey-container-servlet-3.1.9 +- jersey-container-servlet-core-3.1.9 +- jersey-hk2-3.1.9 +- jersey-server-3.1.9 --------------------------------------- CDDL 1.1 + GPLv2 with classpath exception @@ -299,25 +298,25 @@ see: licenses/CDDL+GPL-1.1 MIT License - argparse4j-0.7.0, see: licenses/argparse-MIT -- classgraph-4.8.179, see: licenses/classgraph-MIT +- classgraph-4.8.173, see: licenses/classgraph-MIT - jopt-simple-5.0.4, see: licenses/jopt-simple-MIT - slf4j-api-1.7.36, see: licenses/slf4j-MIT -- pcollections-4.0.2, see: licenses/pcollections-MIT +- pcollections-4.0.1, see: licenses/pcollections-MIT --------------------------------------- BSD 2-Clause -- zstd-jni-1.5.6-10, see: licenses/zstd-jni-BSD-2-clause +- zstd-jni-1.5.6-6, see: licenses/zstd-jni-BSD-2-clause - HdrHistogram-2.2.2, see: licenses/hdrHistogram-BSD-2-clause --------------------------------------- BSD 3-Clause -- jline-3.30.4, see: licenses/jline-BSD-3-clause +- jline-3.25.1, see: licenses/jline-BSD-3-clause - protobuf-java-3.25.5, see: licenses/protobuf-java-BSD-3-clause - jakarta.activation-2.0.1, see: licenses/jakarta-BSD-3-clause --------------------------------------- Go License -- re2j-1.8, see: licenses/re2j-GO +- re2j-1.7, see: licenses/re2j-GO diff --git a/NOTICE b/NOTICE index 36f87db19a965..b59b5575a3454 100644 --- a/NOTICE +++ b/NOTICE @@ -15,5 +15,9 @@ The streams-scala (streams/streams-scala) module was donated by Lightbend and th Copyright (C) 2018 Lightbend Inc. Copyright (C) 2017-2018 Alexis Seigneurin. +This project contains the following code copied from Apache Hadoop: +clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java +Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license. + This project contains the following code copied from Apache Hive: streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java diff --git a/NOTICE-binary b/NOTICE-binary index b625e142293ad..49564569dfb05 100644 --- a/NOTICE-binary +++ b/NOTICE-binary @@ -15,6 +15,10 @@ The streams-scala (streams/streams-scala) module was donated by Lightbend and th Copyright (C) 2018 Lightbend Inc. Copyright (C) 2017-2018 Alexis Seigneurin. +This project contains the following code copied from Apache Hadoop: +clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java +Some portions of this file Copyright (c) 2004-2006 Intel Corporation and licensed under the BSD license. + This project contains the following code copied from Apache Hive: streams/src/main/java/org/apache/kafka/streams/state/internals/Murmur3.java @@ -349,7 +353,7 @@ The project maintains the following source code repositories: Angular JS, v1.6.6 * License MIT (http://www.opensource.org/licenses/mit-license.php) * Project: http://angularjs.org -* Copyright: (c) 2010-2017 Google, Inc. +* Coyright: (c) 2010-2017 Google, Inc. aopalliance Version 1 * License: all the source code provided by AOP Alliance is Public Domain. diff --git a/PULL_REQUEST_TEMPLATE.md b/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000000000..552a4d03efe90 --- /dev/null +++ b/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,14 @@ +*More detailed description of your change, +if necessary. The PR title and PR message become +the squashed commit message, so use a separate +comment to ping reviewers.* + +*Summary of testing strategy (including rationale) +for the feature or bug fix. Unit and/or integration +tests are expected for any behaviour change and +system tests should be considered for larger changes.* + +### Committer Checklist (excluded from commit message) +- [ ] Verify design and implementation +- [ ] Verify test coverage and CI build status +- [ ] Verify documentation (including upgrade notes) diff --git a/README.md b/README.md index 06c0e3921ebc1..364719583b711 100644 --- a/README.md +++ b/README.md @@ -1,22 +1,15 @@ -

- - - - Kafka Logo - -

+Apache Kafka +================= [![CI](https://github.com/apache/kafka/actions/workflows/ci.yml/badge.svg?branch=trunk&event=push)](https://github.com/apache/kafka/actions/workflows/ci.yml?query=event%3Apush+branch%3Atrunk) [![Flaky Test Report](https://github.com/apache/kafka/actions/workflows/generate-reports.yml/badge.svg?branch=trunk&event=schedule)](https://github.com/apache/kafka/actions/workflows/generate-reports.yml?query=event%3Aschedule+branch%3Atrunk) -[**Apache Kafka**](https://kafka.apache.org) is an open-source distributed event streaming platform used by thousands of companies for high-performance data pipelines, streaming analytics, data integration, and mission-critical applications. +See our [web site](https://kafka.apache.org) for details on the project. You need to have [Java](http://www.oracle.com/technetwork/java/javase/downloads/index.html) installed. -We build and test Apache Kafka with 17 and 24. The `release` parameter in javac is set to `11` for the clients -and streams modules, and `17` for the rest, ensuring compatibility with their respective -minimum Java versions. Similarly, the `release` parameter in scalac is set to `11` for the streams modules and `17` -for the rest. +We build and test Apache Kafka with 17 and 23. The `release` parameter in javac and scalac is set to `11` for the clients +and streams modules, and `17` for the broker and tools, ensuring compatibility with their respective minimum Java versions. Scala 2.13 is the only supported version in Apache Kafka. @@ -42,7 +35,7 @@ Follow instructions in https://kafka.apache.org/quickstart ./gradlew test # runs both unit and integration tests ./gradlew unitTest ./gradlew integrationTest - ./gradlew test -Pkafka.test.run.flaky=true # runs tests that are marked as flaky + ./gradlew quarantinedTest # runs the quarantined tests ### Force re-running tests without code change ### @@ -52,7 +45,6 @@ Follow instructions in https://kafka.apache.org/quickstart ### Running a particular unit/integration test ### ./gradlew clients:test --tests RequestResponseTest - ./gradlew streams:integration-tests:test --tests RestoreIntegrationTest ### Repeatedly running a particular unit/integration test with specific times by setting N ### N=500; I=0; while [ $I -lt $N ] && ./gradlew clients:test --tests RequestResponseTest --rerun --fail-fast; do (( I=$I+1 )); echo "Completed run: $I"; sleep 1; done @@ -60,7 +52,6 @@ Follow instructions in https://kafka.apache.org/quickstart ### Running a particular test method within a unit/integration test ### ./gradlew core:test --tests kafka.api.ProducerFailureHandlingTest.testCannotSendToInternalTopic ./gradlew clients:test --tests org.apache.kafka.clients.MetadataTest.testTimeToNextUpdate - ./gradlew streams:integration-tests:test --tests org.apache.kafka.streams.integration.RestoreIntegrationTest.shouldRestoreNullRecord ### Running a particular unit/integration test with log4j output ### By default, there will be only small number of logs output while testing. You can adjust it by changing the `log4j2.yaml` file in the module's `src/test/resources` directory. @@ -79,6 +70,10 @@ The following example declares -PmaxTestRetries=1 and -PmaxTestRetryFailures=3 t ./gradlew test -PmaxTestRetries=1 -PmaxTestRetryFailures=3 +The quarantinedTest task also has no retries by default, but you can set maxQuarantineTestRetries and maxQuarantineTestRetryFailures to enable retries, similar to the test task. + + ./gradlew quarantinedTest -PmaxQuarantineTestRetries=3 -PmaxQuarantineTestRetryFailures=20 + See [Test Retry Gradle Plugin](https://github.com/gradle/test-retry-gradle-plugin) for and [build.yml](.github/workflows/build.yml) more details. ### Generating test coverage reports ### @@ -101,9 +96,7 @@ fail due to code changes. You can just run: ./gradlew processMessages processTestMessages -See [Apache Kafka Message Definitions](clients/src/main/resources/common/message/README.md) for details on Apache Kafka message protocol. - -### Running a Kafka broker +### Running a Kafka broker in KRaft mode Using compiled files: @@ -113,9 +106,7 @@ Using compiled files: Using docker image: - docker run -p 9092:9092 apache/kafka:latest - -See [docker/README.md](docker/README.md) for detailed information. + docker run -p 9092:9092 apache/kafka:3.7.0 ### Cleaning the build ### ./gradlew clean @@ -136,17 +127,20 @@ Streams has multiple sub-projects, but you can run all the tests: ### Building IDE project #### *Note Please ensure that JDK17 is used when developing Kafka.* -IntelliJ supports Gradle natively and it will automatically check Java syntax and compatibility for each module, even if -the Java version shown in the `Structure > Project Settings > Modules` may not be the correct one. - -When it comes to Eclipse, run: +*Note that this is not strictly necessary (IntelliJ IDEA has good built-in support for Gradle projects, for example).* ./gradlew eclipse + ./gradlew idea The `eclipse` task has been configured to use `${project_dir}/build_eclipse` as Eclipse's build directory. Eclipse's default build directory (`${project_dir}/bin`) clashes with Kafka's scripts directory and we don't use Gradle's build directory to avoid known issues with this configuration. +IntelliJ Language Level awareness: + +IntelliJ will automatically check Java syntax and compatibility for each module, even if the Java version is not +explicitly set in the Structure > Project Settings > Modules. + ### Publishing the streams quickstart archetype artifact to maven ### For the Streams archetype project, one cannot use gradle to upload to maven; instead the `mvn deploy` command needs to be called at the quickstart folder: @@ -176,10 +170,6 @@ Please note for this to work you should create/update user maven settings (typic ... -### Installing all projects to the local Maven repository ### - - ./gradlew -PskipSigning=true publishToMavenLocal - ### Installing specific projects to the local Maven repository ### ./gradlew -PskipSigning=true :streams:publishToMavenLocal @@ -269,20 +259,10 @@ default. See https://www.lightbend.com/blog/scala-inliner-optimizer for more det See [tests/README.md](tests/README.md). -### Using Trogdor for testing ### - -We use Trogdor as a test framework for Apache Kafka. You can use it to run benchmarks and other workloads. - -See [trogdor/README.md](trogdor/README.md). - ### Running in Vagrant ### See [vagrant/README.md](vagrant/README.md). -### Kafka client examples ### - -See [examples/README.md](examples/README.md). - ### Contribution ### Apache Kafka is interested in building the community; we would welcome any thoughts or [patches](https://issues.apache.org/jira/browse/KAFKA). You can reach us [on the Apache mailing lists](http://kafka.apache.org/contact.html). diff --git a/bin/kafka-run-class.sh b/bin/kafka-run-class.sh index 0a5ecfae04e20..8bd1b17623b12 100755 --- a/bin/kafka-run-class.sh +++ b/bin/kafka-run-class.sh @@ -49,7 +49,7 @@ should_include_file() { base_dir=$(dirname $0)/.. if [ -z "$SCALA_VERSION" ]; then - SCALA_VERSION=2.13.16 + SCALA_VERSION=2.13.15 if [[ -f "$base_dir/gradle.properties" ]]; then SCALA_VERSION=`grep "^scalaVersion=" "$base_dir/gradle.properties" | cut -d= -f 2` fi @@ -225,7 +225,7 @@ if [ -z "$KAFKA_LOG4J_OPTS" ]; then (( WINDOWS_OS_FORMAT )) && LOG4J_DIR=$(cygpath --path --mixed "${LOG4J_DIR}") KAFKA_LOG4J_OPTS="-Dlog4j2.configurationFile=${LOG4J_DIR}" else - if echo "$KAFKA_LOG4J_OPTS" | grep -E "log4j\.[^[:space:]]+(\.properties|\.xml)$" >/dev/null; then + if echo "$KAFKA_LOG4J_OPTS" | grep -E "log4j\.[^[:space:]]+(\.properties|\.xml)$"; then # Enable Log4j 1.x configuration compatibility mode for Log4j 2 export LOG4J_COMPATIBILITY=true echo DEPRECATED: A Log4j 1.x configuration file has been detected, which is no longer recommended. >&2 @@ -282,7 +282,7 @@ fi # JVM performance options # MaxInlineLevel=15 is the default since JDK 14 and can be removed once older JDKs are no longer supported if [ -z "$KAFKA_JVM_PERFORMANCE_OPTS" ]; then - KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15" + KAFKA_JVM_PERFORMANCE_OPTS="-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -XX:MaxInlineLevel=15 -Djava.awt.headless=true" fi while [ $# -gt 0 ]; do diff --git a/bin/windows/kafka-run-class.bat b/bin/windows/kafka-run-class.bat index a73ae2b26f2d7..ca151e5df96ed 100755 --- a/bin/windows/kafka-run-class.bat +++ b/bin/windows/kafka-run-class.bat @@ -27,7 +27,7 @@ set BASE_DIR=%CD% popd IF ["%SCALA_VERSION%"] EQU [""] ( - set SCALA_VERSION=2.13.16 + set SCALA_VERSION=2.13.15 ) IF ["%SCALA_BINARY_VERSION%"] EQU [""] ( @@ -177,7 +177,7 @@ IF ["%KAFKA_HEAP_OPTS%"] EQU [""] ( rem JVM performance options IF ["%KAFKA_JVM_PERFORMANCE_OPTS%"] EQU [""] ( - set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent + set KAFKA_JVM_PERFORMANCE_OPTS=-server -XX:+UseG1GC -XX:MaxGCPauseMillis=20 -XX:InitiatingHeapOccupancyPercent=35 -XX:+ExplicitGCInvokesConcurrent -Djava.awt.headless=true ) IF not defined CLASSPATH ( diff --git a/build.gradle b/build.gradle index 29537dd77dca7..388a85aa851fa 100644 --- a/build.gradle +++ b/build.gradle @@ -14,6 +14,8 @@ // limitations under the License. import org.ajoberstar.grgit.Grgit +import org.gradle.api.JavaVersion + import java.nio.charset.StandardCharsets buildscript { @@ -37,9 +39,11 @@ plugins { id 'org.nosphere.apache.rat' version "0.8.1" id "io.swagger.core.v3.swagger-gradle-plugin" version "${swaggerVersion}" - id "com.github.spotbugs" version '6.2.3' apply false + id "com.github.spotbugs" version '6.0.25' apply false id 'org.scoverage' version '8.0.3' apply false - id 'com.gradleup.shadow' version '8.3.6' apply false + // Updating the shadow plugin version to 8.1.1 causes issue with signing and publishing the shadowed + // artifacts - see https://github.com/johnrengelman/shadow/issues/901 + id 'com.github.johnrengelman.shadow' version '8.1.0' apply false id 'com.diffplug.spotless' version "6.25.0" } @@ -47,7 +51,7 @@ ext { gradleVersion = versions.gradle minClientJavaVersion = 11 minNonClientJavaVersion = 17 - modulesNeedingJava11 = [":clients", ":generator", ":streams", ":streams:test-utils", ":streams:examples", ":streams-scala", ":test-common:test-common-util"] + modulesNeedingJava11 = [":clients", ":examples", ":generator", ":streams", ":streams:examples", ":streams:test-utils", ":streams-scala", ":test-common:test-common-util"] buildVersionFileName = "kafka-version.properties" @@ -80,6 +84,9 @@ ext { userMaxTestRetries = project.hasProperty('maxTestRetries') ? maxTestRetries.toInteger() : 0 userMaxTestRetryFailures = project.hasProperty('maxTestRetryFailures') ? maxTestRetryFailures.toInteger() : 0 + userMaxQuarantineTestRetries = project.hasProperty('maxQuarantineTestRetries') ? maxQuarantineTestRetries.toInteger() : 0 + userMaxQuarantineTestRetryFailures = project.hasProperty('maxQuarantineTestRetryFailures') ? maxQuarantineTestRetryFailures.toInteger() : 0 + skipSigning = project.hasProperty('skipSigning') && skipSigning.toBoolean() shouldSign = !skipSigning && !version.endsWith("SNAPSHOT") @@ -100,9 +107,6 @@ ext { throw new GradleException("Unexpected value for keepAliveMode property. Expected one of $keepAliveValues, but received: $userKeepAliveModeString") } - // Used by :test task - isGithubActions = System.getenv('GITHUB_ACTIONS') != null - // See README.md for details on this option and the reasoning for the default userScalaOptimizerMode = project.hasProperty("scalaOptimizerMode") ? scalaOptimizerMode : "inline-kafka" def scalaOptimizerValues = ["none", "method", "inline-kafka", "inline-scala"] @@ -126,6 +130,7 @@ ext { if (name in ["compileTestJava", "compileTestScala"]) { options.compilerArgs << "-parameters" } else if (name in ["compileJava", "compileScala"]) { + options.compilerArgs << "-Xlint:-rawtypes" options.compilerArgs << "-Xlint:all" options.compilerArgs << "-Xlint:-serial" options.compilerArgs << "-Xlint:-try" @@ -157,7 +162,6 @@ ext { libs.log4j2Api, libs.log4j2Core ] - } allprojects { @@ -193,10 +197,7 @@ allprojects { // ensure we have a single version in the classpath despite transitive dependencies libs.scalaLibrary, libs.scalaReflect, - // Workaround before `commons-validator` has new release. See KAFKA-19359. - libs.commonsBeanutils, - libs.jacksonAnnotations, - libs.commonsLang + libs.jacksonAnnotations ) } } @@ -248,7 +249,7 @@ static def projectToJUnitXmlPath(project) { projectNames.push(p.name) p = p.parent if (p.name == "kafka") { - break + break; } } return projectNames.join("/") @@ -276,8 +277,8 @@ if (repo != null) { excludes.addAll([ '**/.git/**', '**/build/**', - '.github/pull_request_template.md', 'CONTRIBUTING.md', + 'PULL_REQUEST_TEMPLATE.md', 'gradlew', 'gradlew.bat', 'gradle/wrapper/gradle-wrapper.properties', @@ -365,13 +366,17 @@ subprojects { if (!shouldPublishWithShadow) { from components.java } else { - apply plugin: 'com.gradleup.shadow' - from components.shadow + apply plugin: 'com.github.johnrengelman.shadow' + project.shadow.component(mavenJava) // Fix for avoiding inclusion of runtime dependencies marked as 'shadow' in MANIFEST Class-Path. - // https://github.com/GradleUp/shadow/issues/324 + // https://github.com/johnrengelman/shadow/issues/324 + afterEvaluate { pom.withXml { xml -> - def dependenciesNode = xml.asNode().get('dependencies') ?: xml.asNode().appendNode('dependencies') + if (xml.asNode().get('dependencies') == null) { + xml.asNode().appendNode('dependencies') + } + def dependenciesNode = xml.asNode().get('dependencies').get(0) project.configurations.shadowed.allDependencies.each { def dependencyNode = dependenciesNode.appendNode('dependency') dependencyNode.appendNode('groupId', it.group) @@ -380,6 +385,7 @@ subprojects { dependencyNode.appendNode('scope', 'runtime') } } + } } afterEvaluate { @@ -484,85 +490,99 @@ subprojects { } } - // Workaround for Mockito Java Agent restrictions in Java 21+ - // Starting with Java 21, the JDK restricts libraries from attaching a Java agent - // to their own JVM. As a result, Mockito’s inline mock maker (mockito-core) - // fails without explicit instrumentation, and the JVM consistently emits warnings. - // See also: https://javadoc.io/doc/org.mockito/mockito-core/latest/org.mockito/org/mockito/Mockito.html#mockito-instrumentation - afterEvaluate { subproject -> - def hasMockitoCore = subproject.configurations.findAll { - it.canBeResolved - }.any { config -> - config.incoming.dependencies.any { dependency -> - "$dependency" == libs.mockitoCore - } - } - - if (hasMockitoCore) { - subproject.configurations { - mockitoAgent { - transitive = false - } - } - subproject.dependencies { - mockitoAgent libs.mockitoCore - } - } - } - // The suites are for running sets of tests in IDEs. // Gradle will run each test class, so we exclude the suites to avoid redundantly running the tests twice. def testsToExclude = ['**/*Suite.class'] - // This task will copy JUnit XML files out of the sub-project's build directory and into - // a top-level build/junit-xml directory. This is necessary to avoid reporting on tests which - // were not run, but instead were restored via FROM-CACHE. See KAFKA-17479 for more details. - def copyTestXml = tasks.register('copyTestXml') { - onlyIf("Environment GITHUB_ACTIONS is set") { isGithubActions } - onlyIf("Project '${project.name}:test' has sources") { ! test.state.noSource } - onlyIf("Task '${project.name}:test' did work") { test.state.didWork } - + test { ext { - output = project.findProperty("kafka.test.xml.output.dir") + isGithubActions = System.getenv('GITHUB_ACTIONS') != null + hadFailure = false // Used to track if any tests failed, see afterSuite below } - // Never cache this task - outputs.cacheIf { false } - outputs.upToDateWhen { false } + maxParallelForks = maxTestForks + ignoreFailures = userIgnoreFailures || ext.isGithubActions - doLast { - def moduleDirPath = projectToJUnitXmlPath(project) - def dest = rootProject.layout.buildDirectory.dir("junit-xml/${moduleDirPath}/${output}").get().asFile - println "Copy JUnit XML for ${project.name} to $dest" - ant.copy(todir: "$dest") { - ant.fileset(dir: "${test.reports.junitXml.entryPoint}") { - ant.include(name: "**/*.xml") - } + maxHeapSize = defaultMaxHeapSize + jvmArgs = defaultJvmArgs + + // KAFKA-17433 Used by deflake.yml github action to repeat individual tests + systemProperty("kafka.cluster.test.repeat", project.findProperty("kafka.cluster.test.repeat")) + systemProperty("kafka.test.catalog.file", project.findProperty("kafka.test.catalog.file")) + systemProperty("kafka.test.run.quarantined", "false") + + testLogging { + events = userTestLoggingEvents ?: testLoggingEvents + showStandardStreams = userShowStandardStreams ?: testShowStandardStreams + exceptionFormat = testExceptionFormat + displayGranularity = 0 + } + logTestStdout.rehydrate(delegate, owner, this)() + + exclude testsToExclude + + useJUnitPlatform { + includeEngines 'junit-jupiter' + excludeTags 'flaky' + } + + develocity { + testRetry { + maxRetries = userMaxTestRetries + maxFailures = userMaxTestRetryFailures + } + } + + // As we process results, check if there were any test failures. + afterSuite { desc, result -> + if (result.resultType == TestResult.ResultType.FAILURE) { + ext.hadFailure = true } } - } - test { + // This closure will copy JUnit XML files out of the sub-project's build directory and into + // a top-level build/junit-xml directory. This is necessary to avoid reporting on tests which + // were not run, but instead were restored via FROM-CACHE. See KAFKA-17479 for more details. + doLast { + if (ext.isGithubActions) { + def moduleDirPath = projectToJUnitXmlPath(project) + def dest = rootProject.layout.buildDirectory.dir("junit-xml/${moduleDirPath}/test").get().asFile + println "Copy JUnit XML for ${project.name} to $dest" + ant.copy(todir: "$dest") { + ant.fileset(dir: "${test.reports.junitXml.entryPoint}") + } - doFirst { - def mockitoAgentConfig = configurations.findByName('mockitoAgent') - if (mockitoAgentConfig) { - jvmArgs("-javaagent:${mockitoAgentConfig.asPath}") + // If there were any test failures, we want to fail the task to prevent the failures + // from being cached. + if (ext.hadFailure) { + throw new GradleException("Failing this task since '${project.name}:${name}' had test failures.") + } } } + } + + task quarantinedTest(type: Test, dependsOn: compileJava) { + ext { + isGithubActions = System.getenv('GITHUB_ACTIONS') != null + hadFailure = false // Used to track if any tests failed, see afterSuite below + } + + // Disable caching and up-to-date for this task. We always want quarantined tests + // to run and never want to cache their results. Since we do this, we can avoid + // explicitly failing the build like we do in "test" with ext.hadFailure. + outputs.upToDateWhen { false } + outputs.cacheIf { false } maxParallelForks = maxTestForks - ignoreFailures = userIgnoreFailures + ignoreFailures = userIgnoreFailures || ext.isGithubActions - maxHeapSize = "3g" + maxHeapSize = defaultMaxHeapSize jvmArgs = defaultJvmArgs // KAFKA-17433 Used by deflake.yml github action to repeat individual tests systemProperty("kafka.cluster.test.repeat", project.findProperty("kafka.cluster.test.repeat")) systemProperty("kafka.test.catalog.file", project.findProperty("kafka.test.catalog.file")) - systemProperty("kafka.test.run.new", project.findProperty("kafka.test.run.new")) - systemProperty("kafka.test.run.flaky", project.findProperty("kafka.test.run.flaky")) - systemProperty("kafka.test.verbose", project.findProperty("kafka.test.verbose")) + systemProperty("kafka.test.run.quarantined", "true") testLogging { events = userTestLoggingEvents ?: testLoggingEvents @@ -572,20 +592,44 @@ subprojects { } logTestStdout.rehydrate(delegate, owner, this)() - exclude testsToExclude - useJUnitPlatform { includeEngines 'junit-jupiter' } develocity { testRetry { - maxRetries = userMaxTestRetries - maxFailures = userMaxTestRetryFailures + maxRetries = userMaxQuarantineTestRetries + maxFailures = userMaxQuarantineTestRetryFailures + } + } + + // As we process results, check if there were any test failures. + afterSuite { desc, result -> + if (result.resultType == TestResult.ResultType.FAILURE) { + ext.hadFailure = true } } - finalizedBy("copyTestXml") + // This closure will copy JUnit XML files out of the sub-project's build directory and into + // a top-level build/junit-xml directory. This is necessary to avoid reporting on tests which + // were not run, but instead were restored via FROM-CACHE. See KAFKA-17479 for more details. + doLast { + if (ext.isGithubActions) { + def moduleDirPath = projectToJUnitXmlPath(project) + def dest = rootProject.layout.buildDirectory.dir("junit-xml/${moduleDirPath}/quarantinedTest").get().asFile + println "Copy JUnit XML for ${project.name} to $dest" + ant.copy(todir: "$dest", failonerror: "false") { + ant.fileset(dir: "${quarantinedTest.reports.junitXml.entryPoint}") { + ant.include(name: "**/*.xml") + } + } + // If there were any test failures, we want to fail the task to prevent the failures + // from being cached. + if (ext.hadFailure) { + throw new GradleException("Failing this task since '${project.name}:${name}' had test failures.") + } + } + } } task integrationTest(type: Test, dependsOn: compileJava) { @@ -679,7 +723,7 @@ subprojects { task docsJar(dependsOn: javadocJar) - check.dependsOn('javadoc') + test.dependsOn('javadoc') task systemTestLibs(dependsOn: jar) @@ -926,9 +970,6 @@ project(':server') { } dependencies { - compileOnly libs.bndlib - compileOnly libs.spotbugs - implementation project(':clients') implementation project(':metadata') implementation project(':server-common') @@ -937,27 +978,21 @@ project(':server') { implementation project(':transaction-coordinator') implementation project(':raft') implementation project(':share-coordinator') - implementation project(':storage:storage-api') implementation libs.jacksonDatabind implementation libs.metrics implementation libs.slf4jApi - implementation log4j2Libs testImplementation project(':clients').sourceSets.test.output testImplementation libs.mockitoCore testImplementation libs.junitJupiter testImplementation testLog4j2Libs - testImplementation project(':test-common:test-common-internal-api') - testImplementation project(':test-common:test-common-runtime') - testImplementation project(':storage:storage-api').sourceSets.test.output - testImplementation project(':server-common').sourceSets.test.output testRuntimeOnly runtimeTestLibs } task createVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -977,13 +1012,13 @@ project(':server') { jar { dependsOn createVersionFile - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "${layout.buildDirectory.get().asFile.path}/kafka/" + delete "$buildDir/kafka/" } checkstyle { @@ -1061,7 +1096,6 @@ project(':core') { implementation libs.scalaReflect implementation libs.scalaLogging implementation libs.slf4jApi - implementation libs.re2j testImplementation project(':clients').sourceSets.test.output testImplementation project(':group-coordinator').sourceSets.test.output @@ -1076,7 +1110,6 @@ project(':core') { testImplementation project(':test-common:test-common-util') testImplementation libs.bcpkix testImplementation libs.mockitoCore - testImplementation libs.jqwik testImplementation(libs.apacheda) { exclude group: 'xml-apis', module: 'xml-apis' // `mina-core` is a transitive dependency for `apacheds` and `apacheda`. @@ -1094,7 +1127,6 @@ project(':core') { testImplementation libs.junitJupiter testImplementation libs.caffeine testImplementation testLog4j2Libs - testImplementation libs.mockOAuth2Server testRuntimeOnly runtimeTestLibs } @@ -1105,7 +1137,7 @@ project(':core') { if (versions.baseScala == '2.13') { scoverageScalaVersion = '2.13.9' // there's no newer 2.13 artifact, org.scoverage:scalac-scoverage-plugin_2.13.9:2.0.11 is the latest as of now } - reportDir = file("${layout.buildDirectory.get().asFile.path}/scoverage") + reportDir = file("${rootProject.buildDir}/scoverage") highlighting = false minimumRate = 0.0 } @@ -1115,7 +1147,8 @@ project(':core') { from (configurations.runtimeClasspath) { exclude('kafka-clients*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" + from (configurations.releaseOnly) + into "$buildDir/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -1182,13 +1215,6 @@ project(':core') { standardOutput = new File(generatedDocsDir, "topic_config.html").newOutputStream() } - task genGroupConfigDocs(type: JavaExec) { - classpath = sourceSets.main.runtimeClasspath - mainClass = 'org.apache.kafka.coordinator.group.GroupConfig' - if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() } - standardOutput = new File(generatedDocsDir, "group_config.html").newOutputStream() - } - task genConsumerMetricsDocs(type: JavaExec) { classpath = sourceSets.test.runtimeClasspath mainClass = 'org.apache.kafka.clients.consumer.internals.ConsumerMetrics' @@ -1205,7 +1231,7 @@ project(':core') { task siteDocsTar(dependsOn: ['genProtocolErrorDocs', 'genProtocolTypesDocs', 'genProtocolApiKeyDocs', 'genProtocolMessageDocs', 'genAdminClientConfigDocs', 'genProducerConfigDocs', 'genConsumerConfigDocs', - 'genKafkaConfigDocs', 'genTopicConfigDocs', 'genGroupConfigDocs', + 'genKafkaConfigDocs', 'genTopicConfigDocs', ':connect:runtime:genConnectConfigDocs', ':connect:runtime:genConnectTransformationDocs', ':connect:runtime:genConnectPredicateDocs', ':connect:runtime:genSinkConnectorConfigDocs', ':connect:runtime:genSourceConnectorConfigDocs', @@ -1278,21 +1304,14 @@ project(':core') { ) } - test { - useJUnitPlatform { - includeEngines 'jqwik', 'junit-jupiter' - } - } - tasks.create(name: "copyDependantTestLibs", type: Copy) { from (configurations.testRuntimeClasspath) { include('*.jar') } - from (configurations.releaseOnly) - into "${layout.buildDirectory.get().asFile.path}/dependant-testlibs" + into "$buildDir/dependant-testlibs" //By default gradle does not handle test dependencies between the sub-projects //This line is to include clients project test jar to dependant-testlibs - from (project(':clients').testJar ) { "${layout.buildDirectory.get().asFile.path}/dependant-testlibs" } + from (project(':clients').testJar ) { "$buildDir/dependant-testlibs" } duplicatesStrategy 'exclude' } @@ -1348,7 +1367,6 @@ project(':metadata') { testImplementation project(':clients').sourceSets.test.output testImplementation project(':raft').sourceSets.test.output testImplementation project(':server-common').sourceSets.test.output - testImplementation project(':test-common:test-common-util') testRuntimeOnly runtimeTestLibs @@ -1406,7 +1424,7 @@ project(':group-coordinator:group-coordinator-api') { } task createVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -1426,13 +1444,13 @@ project(':group-coordinator:group-coordinator-api') { jar { dependsOn createVersionFile - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "${layout.buildDirectory.get().asFile.path}/kafka/" + delete "$buildDir/kafka/" } javadoc { @@ -1466,7 +1484,6 @@ project(':group-coordinator') { implementation libs.hdrHistogram implementation libs.re2j implementation libs.slf4jApi - implementation libs.hash4j testImplementation project(':clients').sourceSets.test.output testImplementation project(':server-common').sourceSets.test.output @@ -1508,8 +1525,7 @@ project(':group-coordinator') { args = [ "-p", "org.apache.kafka.coordinator.group.generated", "-o", "${projectDir}/build/generated/main/java/org/apache/kafka/coordinator/group/generated", "-i", "src/main/resources/common/message", - "-m", "MessageDataGenerator", "JsonConverterGenerator", - "-t", "CoordinatorRecordTypeGenerator", "CoordinatorRecordJsonConvertersGenerator" + "-m", "MessageDataGenerator", "JsonConverterGenerator" ] inputs.dir("src/main/resources/common/message") .withPropertyName("messages") @@ -1580,15 +1596,14 @@ project(':test-common:test-common-runtime') { } dependencies { - api project(':core') - api project(':clients') - - implementation project(':server') - implementation project(':server-common') - implementation project(':group-coordinator') implementation project(':test-common:test-common-internal-api') + implementation project(':clients') + implementation project(':core') + implementation project(':group-coordinator') implementation project(':metadata') implementation project(':raft') + implementation project(':server') + implementation project(':server-common') implementation project(':storage') implementation libs.junitPlatformLanucher @@ -1625,7 +1640,6 @@ project(':transaction-coordinator') { implementation libs.jacksonDatabind implementation project(':clients') implementation project(':server-common') - implementation project(':coordinator-common') implementation libs.slf4jApi testImplementation testLog4j2Libs @@ -1663,8 +1677,7 @@ project(':transaction-coordinator') { args = [ "-p", "org.apache.kafka.coordinator.transaction.generated", "-o", "${projectDir}/build/generated/main/java/org/apache/kafka/coordinator/transaction/generated", "-i", "src/main/resources/common/message", - "-m", "MessageDataGenerator", "JsonConverterGenerator", - "-t", "CoordinatorRecordTypeGenerator", "CoordinatorRecordJsonConvertersGenerator" + "-m", "MessageDataGenerator", "JsonConverterGenerator" ] inputs.dir("src/main/resources/common/message") .withPropertyName("messages") @@ -1766,8 +1779,7 @@ project(':share-coordinator') { args = [ "-p", "org.apache.kafka.coordinator.share.generated", "-o", "${projectDir}/build/generated/main/java/org/apache/kafka/coordinator/share/generated", "-i", "src/main/resources/common/message", - "-m", "MessageDataGenerator", "JsonConverterGenerator", - "-t", "CoordinatorRecordTypeGenerator", "CoordinatorRecordJsonConvertersGenerator" + "-m", "MessageDataGenerator", "JsonConverterGenerator" ] inputs.dir("src/main/resources/common/message") .withPropertyName("messages") @@ -1798,7 +1810,7 @@ project(':examples') { } checkstyle { - configProperties = checkstyleConfigProperties("import-control-examples.xml") + configProperties = checkstyleConfigProperties("import-control-core.xml") } } @@ -1809,11 +1821,11 @@ project(':generator') { implementation libs.jacksonJDK8Datatypes implementation libs.jacksonJakartarsJsonProvider - implementation 'org.eclipse.jgit:org.eclipse.jgit:7.2.0.202503040940-r' + implementation 'org.eclipse.jgit:org.eclipse.jgit:6.4.0.202211300538-r' // SSH support for JGit based on Apache MINA sshd - implementation 'org.eclipse.jgit:org.eclipse.jgit.ssh.apache:7.2.0.202503040940-r' + implementation 'org.eclipse.jgit:org.eclipse.jgit.ssh.apache:6.4.0.202211300538-r' // GPG support for JGit based on BouncyCastle (commit signing) - implementation 'org.eclipse.jgit:org.eclipse.jgit.gpg.bc:7.2.0.202503040940-r' + implementation 'org.eclipse.jgit:org.eclipse.jgit.gpg.bc:6.4.0.202211300538-r' testImplementation libs.junitJupiter @@ -1859,7 +1871,6 @@ project(':clients') { testImplementation libs.jacksonJakartarsJsonProvider testImplementation libs.jose4j testImplementation libs.junitJupiter - testImplementation libs.jqwik testImplementation libs.spotbugs testImplementation libs.mockitoCore testImplementation libs.mockitoJunitJupiter // supports MockitoExtension @@ -1874,12 +1885,8 @@ project(':clients') { generator project(':generator') } - tasks.withType(GenerateModuleMetadata) { - enabled = false - } - task createVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -1912,14 +1919,14 @@ project(':clients') { // dependencies excluded from the final jar, since they are declared as runtime dependencies dependencies { project.configurations.shadowed.allDependencies.each { - exclude(dependency(it)) + exclude(dependency(it.group + ':' + it.name)) } // exclude proto files from the jar exclude "**/opentelemetry/proto/**/*.proto" exclude "**/google/protobuf/*.proto" } - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildVersionFileName" } @@ -1933,7 +1940,7 @@ project(':clients') { } clean.doFirst { - delete "${layout.buildDirectory.get().asFile.path}/kafka/" + delete "$buildDir/kafka/" } task processMessages(type:JavaExec) { @@ -2014,41 +2021,6 @@ project(':clients') { } } -project(':clients:clients-integration-tests') { - base { - archivesName = "kafka-clients-integration-tests" - } - - dependencies { - testImplementation libs.metrics - testImplementation libs.slf4jApi - testImplementation project(':test-common:test-common-internal-api') - testImplementation project(':test-common:test-common-runtime') - testImplementation project(':metadata') - testImplementation project(':server') - testImplementation project(':storage') - testImplementation project(':core').sourceSets.test.output - testImplementation project(':clients').sourceSets.test.output - implementation project(':server-common') - testImplementation project(':server-common').sourceSets.test.output - testImplementation project(':metadata') - implementation project(':group-coordinator') - implementation project(':group-coordinator:group-coordinator-api') - implementation project(':transaction-coordinator') - testImplementation project(':test-common:test-common-util') - - testImplementation libs.junitJupiter - testImplementation libs.junitPlatformSuiteEngine - - testRuntimeOnly runtimeTestLibs - } - - checkstyle { - configProperties = checkstyleConfigProperties("import-control-clients-integration-tests.xml") - } -} - - project(':raft') { base { archivesName = "kafka-raft" @@ -2080,7 +2052,7 @@ project(':raft') { } task createVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2130,7 +2102,7 @@ project(':raft') { jar { dependsOn createVersionFile - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildVersionFileName" } } @@ -2142,7 +2114,7 @@ project(':raft') { } clean.doFirst { - delete "${layout.buildDirectory.get().asFile.path}/kafka/" + delete "$buildDir/kafka/" } javadoc { @@ -2174,7 +2146,7 @@ project(':server-common') { } task createVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2194,13 +2166,13 @@ project(':server-common') { jar { dependsOn createVersionFile - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "${layout.buildDirectory.get().asFile.path}/kafka/" + delete "$buildDir/kafka/" } checkstyle { @@ -2233,7 +2205,7 @@ project(':storage:storage-api') { } task createVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2253,13 +2225,13 @@ project(':storage:storage-api') { jar { dependsOn createVersionFile - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "${layout.buildDirectory.get().asFile.path}/kafka/" + delete "$buildDir/kafka/" } javadoc { @@ -2281,13 +2253,12 @@ project(':storage') { } dependencies { - implementation project(':metadata') implementation project(':storage:storage-api') implementation project(':server-common') implementation project(':clients') + implementation project(':transaction-coordinator') implementation(libs.caffeine) { exclude group: 'org.checkerframework', module: 'checker-qual' - exclude group: 'com.google.errorprone', module: 'error_prone_annotations' } implementation libs.slf4jApi implementation libs.jacksonDatabind @@ -2297,14 +2268,11 @@ project(':storage') { testImplementation project(':clients').sourceSets.test.output testImplementation project(':core') testImplementation project(':core').sourceSets.test.output - testImplementation project(':storage:storage-api').sourceSets.test.output testImplementation project(':test-common:test-common-internal-api') testImplementation project(':test-common:test-common-runtime') - testImplementation project(':test-common:test-common-util') testImplementation project(':server') testImplementation project(':server-common') testImplementation project(':server-common').sourceSets.test.output - testImplementation project(':transaction-coordinator') testImplementation libs.hamcrest testImplementation libs.jacksonDatabindYaml testImplementation libs.junitJupiter @@ -2318,7 +2286,7 @@ project(':storage') { } task createVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2383,13 +2351,13 @@ project(':storage') { jar { dependsOn createVersionFile - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "${layout.buildDirectory.get().asFile.path}/kafka/" + delete "$buildDir/kafka/" } javadoc { @@ -2414,7 +2382,7 @@ project(':tools:tools-api') { } task createVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2434,13 +2402,13 @@ project(':tools:tools-api') { jar { dependsOn createVersionFile - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildVersionFileName" } } clean.doFirst { - delete "${layout.buildDirectory.get().asFile.path}/kafka/" + delete "$buildDir/kafka/" } javadoc { @@ -2471,7 +2439,6 @@ project(':tools') { implementation project(':group-coordinator') implementation project(':coordinator-common') implementation project(':share-coordinator') - implementation project(':raft') implementation libs.argparse4j implementation libs.jacksonDatabind implementation libs.jacksonDataformatCsv @@ -2531,7 +2498,7 @@ project(':tools') { exclude('kafka-clients*') } from (configurations.releaseOnly) - into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" + into "$buildDir/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2589,7 +2556,7 @@ project(':trogdor') { from (configurations.runtimeClasspath) { exclude('kafka-clients*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" + into "$buildDir/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2638,7 +2605,7 @@ project(':shell') { from (configurations.runtimeClasspath) { include('jline-*jar') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" + into "$buildDir/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2727,12 +2694,12 @@ project(':streams') { from (configurations.runtimeClasspath) { exclude('kafka-clients*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" + into "$buildDir/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } task createStreamsVersionFile() { - def receiptFile = file("${layout.buildDirectory.get().asFile.path}/kafka/$buildStreamsVersionFileName") + def receiptFile = file("$buildDir/kafka/$buildStreamsVersionFileName") inputs.property "commitId", commitId inputs.property "version", version outputs.file receiptFile @@ -2751,7 +2718,7 @@ project(':streams') { jar { dependsOn 'createStreamsVersionFile' - from("${layout.buildDirectory.get().asFile.path}") { + from("$buildDir") { include "kafka/$buildStreamsVersionFileName" } dependsOn 'copyDependantLibs' @@ -2796,7 +2763,6 @@ project(':streams') { ':streams:upgrade-system-tests-37:test', ':streams:upgrade-system-tests-38:test', ':streams:upgrade-system-tests-39:test', - ':streams:upgrade-system-tests-40:test', ':streams:examples:test' ] ) @@ -2837,7 +2803,7 @@ project(':streams:streams-scala') { from (configurations.runtimeClasspath) { exclude('kafka-streams*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" + into "$buildDir/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2864,7 +2830,6 @@ project(':streams:integration-tests') { dependencies { implementation libs.slf4jApi - implementation libs.scalaLibrary testImplementation project(':clients').sourceSets.test.output testImplementation project(':group-coordinator') @@ -2934,7 +2899,7 @@ project(':streams:test-utils') { from (configurations.runtimeClasspath) { exclude('kafka-streams*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" + into "$buildDir/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2952,8 +2917,6 @@ project(':streams:examples') { dependencies { implementation project(':streams') implementation libs.slf4jApi - implementation libs.jacksonDatabind - implementation libs.jacksonAnnotations testImplementation project(':streams:test-utils') testImplementation project(':clients').sourceSets.test.output // for org.apache.kafka.test.IntegrationTest @@ -2972,7 +2935,7 @@ project(':streams:examples') { from (configurations.runtimeClasspath) { exclude('kafka-streams*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs-${versions.scala}" + into "$buildDir/dependant-libs-${versions.scala}" duplicatesStrategy 'exclude' } @@ -2989,7 +2952,6 @@ project(':streams:upgrade-system-tests-0110') { dependencies { testImplementation libs.kafkaStreams_0110 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3005,7 +2967,6 @@ project(':streams:upgrade-system-tests-10') { dependencies { testImplementation libs.kafkaStreams_10 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3021,7 +2982,6 @@ project(':streams:upgrade-system-tests-11') { dependencies { testImplementation libs.kafkaStreams_11 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3037,7 +2997,6 @@ project(':streams:upgrade-system-tests-20') { dependencies { testImplementation libs.kafkaStreams_20 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3053,7 +3012,6 @@ project(':streams:upgrade-system-tests-21') { dependencies { testImplementation libs.kafkaStreams_21 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3069,7 +3027,6 @@ project(':streams:upgrade-system-tests-22') { dependencies { testImplementation libs.kafkaStreams_22 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3085,7 +3042,6 @@ project(':streams:upgrade-system-tests-23') { dependencies { testImplementation libs.kafkaStreams_23 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3101,7 +3057,6 @@ project(':streams:upgrade-system-tests-24') { dependencies { testImplementation libs.kafkaStreams_24 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3117,7 +3072,6 @@ project(':streams:upgrade-system-tests-25') { dependencies { testImplementation libs.kafkaStreams_25 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3133,7 +3087,6 @@ project(':streams:upgrade-system-tests-26') { dependencies { testImplementation libs.kafkaStreams_26 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3149,7 +3102,6 @@ project(':streams:upgrade-system-tests-27') { dependencies { testImplementation libs.kafkaStreams_27 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3165,7 +3117,6 @@ project(':streams:upgrade-system-tests-28') { dependencies { testImplementation libs.kafkaStreams_28 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3181,7 +3132,6 @@ project(':streams:upgrade-system-tests-30') { dependencies { testImplementation libs.kafkaStreams_30 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3197,7 +3147,6 @@ project(':streams:upgrade-system-tests-31') { dependencies { testImplementation libs.kafkaStreams_31 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3213,7 +3162,6 @@ project(':streams:upgrade-system-tests-32') { dependencies { testImplementation libs.kafkaStreams_32 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3229,7 +3177,6 @@ project(':streams:upgrade-system-tests-33') { dependencies { testImplementation libs.kafkaStreams_33 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3245,7 +3192,6 @@ project(':streams:upgrade-system-tests-34') { dependencies { testImplementation libs.kafkaStreams_34 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3261,7 +3207,6 @@ project(':streams:upgrade-system-tests-35') { dependencies { testImplementation libs.kafkaStreams_35 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3277,7 +3222,6 @@ project(':streams:upgrade-system-tests-36') { dependencies { testImplementation libs.kafkaStreams_36 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3293,7 +3237,6 @@ project(':streams:upgrade-system-tests-37') { dependencies { testImplementation libs.kafkaStreams_37 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3309,7 +3252,6 @@ project(':streams:upgrade-system-tests-38') { dependencies { testImplementation libs.kafkaStreams_38 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3325,39 +3267,6 @@ project(':streams:upgrade-system-tests-39') { dependencies { testImplementation libs.kafkaStreams_39 testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs - } - - systemTestLibs { - dependsOn testJar - } -} - -project(':streams:upgrade-system-tests-40') { - base { - archivesName = "kafka-streams-upgrade-system-tests-40" - } - - dependencies { - testImplementation libs.kafkaStreams_40 - testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs - } - - systemTestLibs { - dependsOn testJar - } -} - -project(':streams:upgrade-system-tests-41') { - base { - archivesName = "kafka-streams-upgrade-system-tests-41" - } - - dependencies { - testImplementation libs.kafkaStreams_41 - testRuntimeOnly libs.junitJupiter - testRuntimeOnly runtimeTestLibs } systemTestLibs { @@ -3367,7 +3276,7 @@ project(':streams:upgrade-system-tests-41') { project(':jmh-benchmarks') { - apply plugin: 'com.gradleup.shadow' + apply plugin: 'com.github.johnrengelman.shadow' shadowJar { archiveBaseName = 'kafka-jmh-benchmarks' @@ -3383,7 +3292,6 @@ project(':jmh-benchmarks') { implementation project(':raft') implementation project(':clients') implementation project(':coordinator-common') - implementation project(':coordinator-common').sourceSets.test.output implementation project(':group-coordinator') implementation project(':group-coordinator:group-coordinator-api') implementation project(':metadata') @@ -3397,7 +3305,6 @@ project(':jmh-benchmarks') { implementation project(':clients').sourceSets.test.output implementation project(':core').sourceSets.test.output implementation project(':server-common').sourceSets.test.output - implementation project(':metadata').sourceSets.test.output implementation libs.jmhCore annotationProcessor libs.jmhGeneratorAnnProcess @@ -3432,7 +3339,7 @@ project(':jmh-benchmarks') { if (System.getProperty("jmhArgs")) { args System.getProperty("jmhArgs").split(' ') } - args = [shadowJar.archiveFile.get().asFile, *args] + args = [shadowJar.archivePath, *args] } } @@ -3467,7 +3374,7 @@ project(':connect:api') { exclude('kafka-clients*') exclude('connect-*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs" + into "$buildDir/dependant-libs" duplicatesStrategy 'exclude' } @@ -3502,7 +3409,7 @@ project(':connect:transforms') { exclude('kafka-clients*') exclude('connect-*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs" + into "$buildDir/dependant-libs" duplicatesStrategy 'exclude' } @@ -3541,7 +3448,7 @@ project(':connect:json') { exclude('kafka-clients*') exclude('connect-*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs" + into "$buildDir/dependant-libs" duplicatesStrategy 'exclude' } @@ -3638,7 +3545,7 @@ project(':connect:runtime') { exclude('kafka-clients*') exclude('connect-*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs" + into "$buildDir/dependant-libs" duplicatesStrategy 'exclude' } @@ -3690,7 +3597,7 @@ project(':connect:runtime') { task setVersionInOpenAPISpec(type: Copy) { from "$rootDir/gradle/openapi.template" - into "${layout.buildDirectory.get().asFile.path}/resources/docs" + into "$buildDir/resources/docs" rename ('openapi.template', 'openapi.yaml') expand(kafkaVersion: "$rootProject.version") } @@ -3703,7 +3610,7 @@ project(':connect:runtime') { outputFormat = 'YAML' prettyPrint = 'TRUE' sortOutput = 'TRUE' - openApiFile = file("${layout.buildDirectory.get().asFile.path}/resources/docs/openapi.yaml") + openApiFile = file("$buildDir/resources/docs/openapi.yaml") resourcePackages = ['org.apache.kafka.connect.runtime.rest.resources'] if( !generatedDocsDir.exists() ) { generatedDocsDir.mkdirs() } outputDir = file(generatedDocsDir) @@ -3743,7 +3650,7 @@ project(':connect:file') { exclude('kafka-clients*') exclude('connect-*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs" + into "$buildDir/dependant-libs" duplicatesStrategy 'exclude' } @@ -3783,7 +3690,7 @@ project(':connect:basic-auth-extension') { exclude('kafka-clients*') exclude('connect-*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs" + into "$buildDir/dependant-libs" duplicatesStrategy 'exclude' } @@ -3834,7 +3741,7 @@ project(':connect:mirror') { testImplementation project(':core') testImplementation project(':test-common:test-common-runtime') testImplementation project(':server') - testImplementation project(':server-common') + testImplementation project(':server-common').sourceSets.test.output testRuntimeOnly project(':connect:runtime') @@ -3851,7 +3758,7 @@ project(':connect:mirror') { exclude('kafka-clients*') exclude('connect-*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs" + into "$buildDir/dependant-libs" duplicatesStrategy 'exclude' } @@ -3913,7 +3820,7 @@ project(':connect:mirror-client') { exclude('kafka-clients*') exclude('connect-*') } - into "${layout.buildDirectory.get().asFile.path}/dependant-libs" + into "$buildDir/dependant-libs" duplicatesStrategy 'exclude' } diff --git a/checkstyle/checkstyle.xml b/checkstyle/checkstyle.xml index 51cfe3dcb7e65..fa1d5873a2cbc 100644 --- a/checkstyle/checkstyle.xml +++ b/checkstyle/checkstyle.xml @@ -43,7 +43,6 @@ - diff --git a/checkstyle/import-control-coordinator-common.xml b/checkstyle/import-control-coordinator-common.xml index 7841697cf892a..bafffe8069746 100644 --- a/checkstyle/import-control-coordinator-common.xml +++ b/checkstyle/import-control-coordinator-common.xml @@ -58,11 +58,9 @@ - - diff --git a/checkstyle/import-control-core.xml b/checkstyle/import-control-core.xml index ea93b8d96ad31..8de4f5ac5361d 100644 --- a/checkstyle/import-control-core.xml +++ b/checkstyle/import-control-core.xml @@ -1,6 +1,6 @@ +"-//Puppy Crawl//DTD Import Control 1.1//EN" +"http://www.puppycrawl.com/dtds/import_control_1_1.dtd"> - + @@ -50,9 +49,6 @@ - - - @@ -64,7 +60,6 @@ - @@ -102,7 +97,6 @@ - @@ -135,7 +129,6 @@ - diff --git a/checkstyle/import-control-server.xml b/checkstyle/import-control-server.xml index b3d1b928cc6db..6c3332b1cfd4f 100644 --- a/checkstyle/import-control-server.xml +++ b/checkstyle/import-control-server.xml @@ -59,7 +59,6 @@ - @@ -82,19 +81,10 @@ - - - - - - - - - @@ -102,19 +92,11 @@ - - - - - - - - diff --git a/checkstyle/import-control-storage.xml b/checkstyle/import-control-storage.xml index 2a0f74126859a..d0b6524d9e128 100644 --- a/checkstyle/import-control-storage.xml +++ b/checkstyle/import-control-storage.xml @@ -29,7 +29,6 @@ - @@ -50,11 +49,7 @@ - - - - @@ -75,12 +70,8 @@ - - - - - + @@ -89,16 +80,11 @@ - - - - - @@ -158,13 +144,4 @@ - - - - - - - - - diff --git a/checkstyle/import-control-transaction-coordinator.xml b/checkstyle/import-control-transaction-coordinator.xml index 810c127c95c32..a6a4025079ce5 100644 --- a/checkstyle/import-control-transaction-coordinator.xml +++ b/checkstyle/import-control-transaction-coordinator.xml @@ -34,13 +34,10 @@ - - - diff --git a/checkstyle/import-control.xml b/checkstyle/import-control.xml index b1ef62ca3a26b..dc674ab997a6d 100644 --- a/checkstyle/import-control.xml +++ b/checkstyle/import-control.xml @@ -1,6 +1,6 @@ +"-//Puppy Crawl//DTD Import Control 1.1//EN" +"http://www.puppycrawl.com/dtds/import_control_1_1.dtd"> - - @@ -235,7 +227,6 @@ - @@ -299,7 +290,6 @@ - @@ -339,11 +329,9 @@ - - @@ -432,7 +420,6 @@ - @@ -495,7 +482,6 @@ - @@ -588,12 +574,8 @@ - - - - - + @@ -605,10 +587,12 @@ + + diff --git a/checkstyle/suppressions.xml b/checkstyle/suppressions.xml index dc9ac09a9b25c..e48b71c45076b 100644 --- a/checkstyle/suppressions.xml +++ b/checkstyle/suppressions.xml @@ -37,11 +37,15 @@ + + + + + files="(RemoteLogManagerTest|SharePartitionTest).java"/> @@ -49,7 +53,6 @@ - + files="(AbstractRequest|AbstractResponse|KerberosLogin|WorkerSinkTaskTest|TransactionManagerTest|SenderTest|KafkaAdminClient|ConsumerCoordinatorTest|KafkaAdminClientTest).java"/> + files="(AbstractMembershipManager|ConsumerCoordinator|BufferPool|MetricName|Node|ConfigDef|RecordBatch|SslFactory|SslTransportLayer|MetadataResponse|KerberosLogin|Selector|Sender|Serdes|TokenInformation|Agent|PluginUtils|MiniTrogdorCluster|TasksRequest|KafkaProducer|AbstractStickyAssignor|Authorizer|FetchSessionHandler|RecordAccumulator|Shell).java"/> @@ -126,7 +129,7 @@ files="(OffsetFetcher|RequestResponse)Test.java"/> + files="RequestResponseTest.java|FetcherTest.java|FetchRequestManagerTest.java|KafkaAdminClientTest.java"/> @@ -165,9 +168,6 @@ - - @@ -183,9 +183,6 @@ - - @@ -195,7 +192,7 @@ + files="(KafkaStreams|KStreamImpl|KTableImpl|InternalTopologyBuilder|StreamsPartitionAssignor|StreamThread|IQv2StoreIntegrationTest|KStreamImplTest|RocksDBStore|StreamTask).java"/> @@ -244,7 +241,7 @@ files=".*[/\\]streams[/\\].*test[/\\].*.java"/> + files="(EosV2UpgradeIntegrationTest|KStreamKStreamJoinTest|KTableKTableForeignKeyJoinIntegrationTest|RocksDBGenericOptionsToDbOptionsColumnFamilyOptionsAdapterTest|RelationalSmokeTest|MockProcessorContextStateStoreTest|IQv2StoreIntegrationTest).java"/> @@ -294,8 +291,6 @@ files="VerifiableConsumer.java"/> - + files="(ConsumerGroupMember|GroupMetadataManager|GeneralUniformAssignmentBuilder|GroupCoordinatorRecordSerde).java"/> + files="(GroupMetadataManager|ConsumerGroupTest|ShareGroupTest|GroupMetadataManagerTest|GroupMetadataManagerTestContext|GeneralUniformAssignmentBuilder).java"/> + files="(RecordHelpersTest|GroupCoordinatorRecordHelpers|GroupMetadataManager|GroupMetadataManagerTest|OffsetMetadataManagerTest|GroupCoordinatorServiceTest|GroupCoordinatorShardTest|GroupCoordinatorRecordSerde).java"/> @@ -343,27 +338,19 @@ - + - - + files="(LogLoader|LogValidator|RemoteLogManagerConfig|RemoteLogManager).java"/> + files="(LocalLog|LogLoader|LogValidator|RemoteLogManager|RemoteIndexCache).java"/> - - - + files="(LogAppendInfo|LogLoader|RemoteLogManagerConfig).java"/> List configuredInterceptors(AbstractConfig config, - String interceptorClassesConfigName, - Class clazz) { + public static List configuredInterceptors(AbstractConfig config, + String interceptorClassesConfigName, + Class clazz) { String clientId = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG); return config.getConfiguredInstances( interceptorClassesConfigName, diff --git a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java index 08b861673e3d7..aa3b5c9d628c9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java +++ b/clients/src/main/java/org/apache/kafka/clients/CommonClientConfigs.java @@ -88,9 +88,8 @@ public class CommonClientConfigs { "If provided, the backoff per host will increase exponentially for each consecutive connection failure, up to this maximum. After calculating the backoff increase, 20% random jitter is added to avoid connection storms."; public static final String RETRIES_CONFIG = "retries"; - public static final String RETRIES_DOC = "It is recommended to set the value to either MAX_VALUE or zero, and use corresponding timeout parameters to control how long a client should retry a request." + - " Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error." + - " Setting a value of zero will lead to transient errors not being retried, and they will be propagated to the application to be handled."; + public static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any request that fails with a potentially transient error." + + " It is recommended to set the value to either zero or `MAX_VALUE` and use corresponding timeout parameters to control how long a client should retry a request."; public static final String RETRY_BACKOFF_MS_CONFIG = "retry.backoff.ms"; public static final String RETRY_BACKOFF_MS_DOC = "The amount of time to wait before attempting to retry a failed request to a given topic partition. " + @@ -126,9 +125,7 @@ public class CommonClientConfigs { "\n" + "TRACE level records all possible metrics, capturing every detail about the system's performance and operation. It's best for controlled environments where in-depth analysis is required, though it can introduce significant overhead."; public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters"; - public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. " + - "Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation. " + - "When custom reporters are set and org.apache.kafka.common.metrics.JmxReporter is needed, it has to be explicitly added to the list."; + public static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters. Implementing the org.apache.kafka.common.metrics.MetricsReporter interface allows plugging in classes that will be notified of new metric creation."; public static final String METRICS_CONTEXT_PREFIX = "metrics.context."; @@ -195,8 +192,7 @@ public class CommonClientConfigs { + "is considered failed and the group will rebalance in order to reassign the partitions to another member. " + "For consumers using a non-null group.instance.id which reach this timeout, partitions will not be immediately reassigned. " + "Instead, the consumer will stop sending heartbeats and partitions will be reassigned " - + "after expiration of the session timeout (defined by the client config session.timeout.ms if using the Classic rebalance protocol, or by the broker config group.consumer.session.timeout.ms if using the Consumer protocol). " - + "This mirrors the behavior of a static consumer which has shutdown."; + + "after expiration of session.timeout.ms. This mirrors the behavior of a static consumer which has shutdown."; public static final String REBALANCE_TIMEOUT_MS_CONFIG = "rebalance.timeout.ms"; public static final String REBALANCE_TIMEOUT_MS_DOC = "The maximum allowed time for each worker to join the group " @@ -210,18 +206,15 @@ public class CommonClientConfigs { + "to the broker. If no heartbeats are received by the broker before the expiration of this session timeout, " + "then the broker will remove this client from the group and initiate a rebalance. Note that the value " + "must be in the allowable range as configured in the broker configuration by group.min.session.timeout.ms " - + "and group.max.session.timeout.ms. Note that this client configuration is not supported when group.protocol " - + "is set to \"consumer\". In that case, session timeout is controlled by the broker config group.consumer.session.timeout.ms."; + + "and group.max.session.timeout.ms. Note that this configuration is not supported when group.protocol " + + "is set to \"consumer\"."; public static final String HEARTBEAT_INTERVAL_MS_CONFIG = "heartbeat.interval.ms"; public static final String HEARTBEAT_INTERVAL_MS_DOC = "The expected time between heartbeats to the consumer " + "coordinator when using Kafka's group management facilities. Heartbeats are used to ensure that the " + "consumer's session stays active and to facilitate rebalancing when new consumers join or leave the group. " - + "This config is only supported if group.protocol is set to \"classic\". In that case, " - + "the value must be set lower than session.timeout.ms, but typically should be set no higher " - + "than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances." - + "If group.protocol is set to \"consumer\", this config is not supported, as " - + "the heartbeat interval is controlled by the broker with group.consumer.heartbeat.interval.ms."; + + "The value must be set lower than session.timeout.ms, but typically should be set no higher " + + "than 1/3 of that value. It can be adjusted even lower to control the expected time for normal rebalances."; public static final String DEFAULT_API_TIMEOUT_MS_CONFIG = "default.api.timeout.ms"; public static final String DEFAULT_API_TIMEOUT_MS_DOC = "Specifies the timeout (in milliseconds) for client APIs. " + diff --git a/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java b/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java index fce243ebc6461..4aff7c8c0a88f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/GroupRebalanceConfig.java @@ -43,7 +43,6 @@ public String toString() { public final int heartbeatIntervalMs; public final String groupId; public final Optional groupInstanceId; - public final Optional rackId; public final long retryBackoffMs; public final long retryBackoffMaxMs; public final boolean leaveGroupOnClose; @@ -54,12 +53,8 @@ public GroupRebalanceConfig(AbstractConfig config, ProtocolType protocolType) { // Consumer and Connect use different config names for defining rebalance timeout if ((protocolType == ProtocolType.CONSUMER) || (protocolType == ProtocolType.SHARE)) { this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.MAX_POLL_INTERVAL_MS_CONFIG); - - String rackId = config.getString(CommonClientConfigs.CLIENT_RACK_CONFIG); - this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId); } else { this.rebalanceTimeoutMs = config.getInt(CommonClientConfigs.REBALANCE_TIMEOUT_MS_CONFIG); - this.rackId = Optional.empty(); } this.heartbeatIntervalMs = config.getInt(CommonClientConfigs.HEARTBEAT_INTERVAL_MS_CONFIG); @@ -95,7 +90,6 @@ public GroupRebalanceConfig(final int sessionTimeoutMs, final int heartbeatIntervalMs, String groupId, Optional groupInstanceId, - String rackId, long retryBackoffMs, long retryBackoffMaxMs, boolean leaveGroupOnClose) { @@ -104,7 +98,6 @@ public GroupRebalanceConfig(final int sessionTimeoutMs, this.heartbeatIntervalMs = heartbeatIntervalMs; this.groupId = groupId; this.groupInstanceId = groupInstanceId; - this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId); this.retryBackoffMs = retryBackoffMs; this.retryBackoffMaxMs = retryBackoffMaxMs; this.leaveGroupOnClose = leaveGroupOnClose; diff --git a/clients/src/main/java/org/apache/kafka/clients/Metadata.java b/clients/src/main/java/org/apache/kafka/clients/Metadata.java index 0986d8a67bc36..b60156aae0066 100644 --- a/clients/src/main/java/org/apache/kafka/clients/Metadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/Metadata.java @@ -381,7 +381,7 @@ public synchronized void update(int requestVersion, MetadataResponse response, b public synchronized Set updatePartitionLeadership(Map partitionLeaders, List leaderNodes) { Map newNodes = leaderNodes.stream().collect(Collectors.toMap(Node::id, node -> node)); // Insert non-overlapping nodes from existing-nodes into new-nodes. - this.metadataSnapshot.cluster().nodes().forEach(node -> newNodes.putIfAbsent(node.id(), node)); + this.metadataSnapshot.cluster().nodes().stream().forEach(node -> newNodes.putIfAbsent(node.id(), node)); // Create partition-metadata for all updated partitions. Exclude updates for partitions - // 1. for which the corresponding partition has newer leader in existing metadata. @@ -508,7 +508,7 @@ private MetadataSnapshot handleMetadataResponse(MetadataResponse metadataRespons topicId = null; } - if (!retainTopic(topicName, topicId, metadata.isInternal(), nowMs)) + if (!retainTopic(topicName, metadata.isInternal(), nowMs)) continue; if (metadata.isInternal()) @@ -758,20 +758,10 @@ public Map topicNames() { return metadataSnapshot.topicNames(); } - /** - * Based on the topic name, check if the topic metadata should be kept when received in a metadata response. - */ protected boolean retainTopic(String topic, boolean isInternal, long nowMs) { return true; } - /** - * Based on the topic name and topic ID, check if the topic metadata should be kept when received in a metadata response. - */ - protected boolean retainTopic(String topicName, Uuid topicId, boolean isInternal, long nowMs) { - return retainTopic(topicName, isInternal, nowMs); - } - public static class MetadataRequestAndVersion { public final MetadataRequest.Builder requestBuilder; public final int requestVersion; diff --git a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java index 692847a8b1553..f8c3034ecfdee 100644 --- a/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/NetworkClient.java @@ -1051,9 +1051,9 @@ private void handleApiVersionsResponse(List responses, apiVersionsResponse.data().finalizedFeaturesEpoch()); apiVersions.update(node, nodeVersionInfo); this.connectionStates.ready(node); - log.debug("Node {} has finalized features epoch: {}, finalized features: {}, supported features: {}, API versions: {}.", + log.debug("Node {} has finalized features epoch: {}, finalized features: {}, supported features: {}, ZK migration ready: {}, API versions: {}.", node, apiVersionsResponse.data().finalizedFeaturesEpoch(), apiVersionsResponse.data().finalizedFeatures(), - apiVersionsResponse.data().supportedFeatures(), nodeVersionInfo); + apiVersionsResponse.data().supportedFeatures(), apiVersionsResponse.data().zkMigrationReady(), nodeVersionInfo); } /** @@ -1218,7 +1218,7 @@ public long maybeUpdate(long now) { return metadataTimeout; } - if (metadataAttemptStartMs.isEmpty()) + if (!metadataAttemptStartMs.isPresent()) metadataAttemptStartMs = Optional.of(now); // Beware that the behavior of this method and the computation of timeouts for poll() are @@ -1411,7 +1411,7 @@ private long maybeUpdate(long now, Node node) { if (canSendRequest(nodeConnectionId, now)) { Optional> requestOpt = clientTelemetrySender.createRequest(); - if (requestOpt.isEmpty()) + if (!requestOpt.isPresent()) return Long.MAX_VALUE; AbstractRequest.Builder request = requestOpt.get(); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AbortTransactionResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AbortTransactionResult.java index 602c4f96443ca..3d4abe85485ed 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AbortTransactionResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AbortTransactionResult.java @@ -39,7 +39,7 @@ public class AbortTransactionResult { * @return the future */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AbstractOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/AbstractOptions.java index 12effaf4e6372..2312fe4b81dd2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AbstractOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AbstractOptions.java @@ -21,7 +21,6 @@ /* * This class implements the common APIs that are shared by Options classes for various AdminClient commands */ -@SuppressWarnings("rawtypes") public abstract class AbstractOptions { protected Integer timeoutMs = null; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java index e596754f62ff1..f742cd22675b9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/Admin.java @@ -250,8 +250,9 @@ default DeleteTopicsResult deleteTopics(TopicCollection topics) { * During this time, {@link #listTopics()} and {@link #describeTopics(Collection)} * may continue to return information about the deleted topics. *

- * If delete.topic.enable is set to false on the brokers, an exception will be returned to the client indicating - * that topic deletion is disabled. + * If delete.topic.enable is false on the brokers, deleteTopics will mark + * the topics for deletion, but not actually delete them. The futures will + * return successfully in this case. *

* When using topic IDs, this operation is supported by brokers with inter-broker protocol 2.8 or higher. * When using topic names, this operation is supported by brokers with version 0.10.1.0 or higher. @@ -344,9 +345,6 @@ default DescribeClusterResult describeCluster() { /** * Get information about the nodes in the cluster. - *

- * To obtain broker cluster information, you must configure {@link AdminClientConfig#BOOTSTRAP_SERVERS_CONFIG}. - * To obtain controller cluster information, you must configure {@link AdminClientConfig#BOOTSTRAP_CONTROLLERS_CONFIG}. * * @param options The options to use when getting information about the cluster. * @return The DescribeClusterResult. @@ -475,9 +473,6 @@ default DescribeConfigsResult describeConfigs(Collection resourc *

  • {@link ConfigResource.Type#CLIENT_METRICS}: will return empty configs
  • * *

    - * Note that you cannot describe broker configs or broker logger using {@link AdminClientConfig#BOOTSTRAP_CONTROLLERS_CONFIG}, - * and you cannot describe controller configs or controller logger using {@link AdminClientConfig#BOOTSTRAP_SERVERS_CONFIG}. - *

    * This operation is supported by brokers with version 0.11.0.0 or higher. * * @param resources See relevant type {@link ConfigResource.Type} @@ -878,12 +873,10 @@ default DescribeConsumerGroupsResult describeConsumerGroups(Collection g /** * List the consumer groups available in the cluster. - * @deprecated Since 4.1. Use {@link Admin#listGroups(ListGroupsOptions)} instead. * * @param options The options to use when listing the consumer groups. * @return The ListConsumerGroupsResult. */ - @Deprecated(since = "4.1", forRemoval = true) ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options); /** @@ -891,11 +884,9 @@ default DescribeConsumerGroupsResult describeConsumerGroups(Collection g *

    * This is a convenience method for {@link #listConsumerGroups(ListConsumerGroupsOptions)} with default options. * See the overload for more details. - * @deprecated Since 4.1. Use {@link Admin#listGroups(ListGroupsOptions)} instead. * * @return The ListConsumerGroupsResult. */ - @Deprecated(since = "4.1", forRemoval = true) default ListConsumerGroupsResult listConsumerGroups() { return listConsumerGroups(new ListConsumerGroupsOptions()); } @@ -949,31 +940,6 @@ default ListConsumerGroupOffsetsResult listConsumerGroupOffsets(MapNote: this method effectively does the same as the corresponding consumer group method {@link Admin#listConsumerGroupOffsets} does. - * - * @param groupSpecs Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for. - * - * @param options The options to use when listing the streams group offsets. - * @return The ListStreamsGroupOffsetsResult - */ - ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map groupSpecs, ListStreamsGroupOffsetsOptions options); - - /** - * List the streams group offsets available in the cluster for the specified groups with the default options. - *

    - * This is a convenience method for - * {@link #listStreamsGroupOffsets(Map, ListStreamsGroupOffsetsOptions)} with default options. - * - * @param groupSpecs Map of streams group ids to a spec that specifies the topic partitions of the group to list offsets for. - * @return The ListStreamsGroupOffsetsResult. - */ - default ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map groupSpecs) { - return listStreamsGroupOffsets(groupSpecs, new ListStreamsGroupOffsetsOptions()); - } - /** * Delete consumer groups from the cluster. * @@ -991,25 +957,6 @@ default DeleteConsumerGroupsResult deleteConsumerGroups(Collection group return deleteConsumerGroups(groupIds, new DeleteConsumerGroupsOptions()); } - /** - * Delete streams groups from the cluster. - * - * Note: this method effectively does the same as the corresponding consumer group method {@link Admin#deleteConsumerGroups} does. - * - * @param options The options to use when deleting a streams group. - * @return The DeleteStreamsGroupsResult. - */ - DeleteStreamsGroupsResult deleteStreamsGroups(Collection groupIds, DeleteStreamsGroupsOptions options); - - /** - * Delete streams groups from the cluster with the default options. - * - * @return The DeleteStreamsGroupResult. - */ - default DeleteStreamsGroupsResult deleteStreamsGroups(Collection groupIds) { - return deleteStreamsGroups(groupIds, new DeleteStreamsGroupsOptions()); - } - /** * Delete committed offsets for a set of partitions in a consumer group. This will * succeed at the partition level only if the group is not actively subscribed @@ -1033,31 +980,6 @@ default DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String group return deleteConsumerGroupOffsets(groupId, partitions, new DeleteConsumerGroupOffsetsOptions()); } - /** - * Delete committed offsets for a set of partitions in a streams group. This will - * succeed at the partition level only if the group is not actively subscribed - * to the corresponding topic. - * - * Note: this method effectively does the same as the corresponding consumer group method {@link Admin#deleteConsumerGroupOffsets} does. - * - * @param options The options to use when deleting offsets in a streams group. - * @return The DeleteStreamsGroupOffsetsResult. - */ - DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, - Set partitions, - DeleteStreamsGroupOffsetsOptions options); - - /** - * Delete committed offsets for a set of partitions in a streams group with the default - * options. This will succeed at the partition level only if the group is not actively - * subscribed to the corresponding topic. - * - * @return The DeleteStreamsGroupOffsetsResult. - */ - default DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, Set partitions) { - return deleteStreamsGroupOffsets(groupId, partitions, new DeleteStreamsGroupOffsetsOptions()); - } - /** * List the groups available in the cluster with the default options. * @@ -1161,13 +1083,6 @@ default AlterPartitionReassignmentsResult alterPartitionReassignments( * if the request timed out before the controller could record the new assignments. *

  • {@link org.apache.kafka.common.errors.InvalidReplicaAssignmentException} * If the specified assignment was not valid.
  • - *
  • {@link org.apache.kafka.common.errors.InvalidReplicationFactorException} - * If the replication factor was changed in an invalid way. - * Only thrown when {@link AlterPartitionReassignmentsOptions#allowReplicationFactorChange()} is set to false and - * the request is attempting to alter reassignments (not cancel)
  • - *
  • {@link org.apache.kafka.common.errors.UnsupportedVersionException} - * If {@link AlterPartitionReassignmentsOptions#allowReplicationFactorChange()} was changed outside the default - * and the server does not support the option (e.g due to an old Kafka version).
  • *
  • {@link org.apache.kafka.common.errors.NoReassignmentInProgressException} * If there was an attempt to cancel a reassignment for a partition which was not being reassigned.
  • * @@ -1291,34 +1206,6 @@ default AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId */ AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, Map offsets, AlterConsumerGroupOffsetsOptions options); - /** - *

    Alters offsets for the specified group. In order to succeed, the group must be empty. - * - *

    This is a convenience method for {@link #alterStreamsGroupOffsets(String, Map, AlterStreamsGroupOffsetsOptions)} with default options. - * See the overload for more details. - * - * @param groupId The group for which to alter offsets. - * @param offsets A map of offsets by partition with associated metadata. - * @return The AlterOffsetsResult. - */ - default AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets(String groupId, Map offsets) { - return alterStreamsGroupOffsets(groupId, offsets, new AlterStreamsGroupOffsetsOptions()); - } - - /** - *

    Alters offsets for the specified group. In order to succeed, the group must be empty. - * - *

    This operation is not transactional so it may succeed for some partitions while fail for others. - * - * Note: this method effectively does the same as the corresponding consumer group method {@link Admin#alterConsumerGroupOffsets} does. - * - * @param groupId The group for which to alter offsets. - * @param offsets A map of offsets by partition with associated metadata. Partitions not specified in the map are ignored. - * @param options The options to use when altering the offsets. - * @return The AlterOffsetsResult. - */ - AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets(String groupId, Map offsets, AlterStreamsGroupOffsetsOptions options); - /** *

    List offset for the specified partitions and OffsetSpec. This operation enables to find * the beginning offset, end offset as well as the offset matching a timestamp in partitions. @@ -1775,36 +1662,12 @@ default FenceProducersResult fenceProducers(Collection transactionalIds) FenceProducersResult fenceProducers(Collection transactionalIds, FenceProducersOptions options); - /** - * List the configuration resources available in the cluster which matches config resource type. - * If no config resource types are specified, all configuration resources will be listed. - * - * @param configResourceTypes The set of configuration resource types to list. - * @param options The options to use when listing the configuration resources. - * @return The ListConfigurationResourcesResult. - */ - ListConfigResourcesResult listConfigResources(Set configResourceTypes, ListConfigResourcesOptions options); - - /** - * List all configuration resources available in the cluster with the default options. - *

    - * This is a convenience method for {@link #listConfigResources(Set, ListConfigResourcesOptions)} - * with default options. See the overload for more details. - * - * @return The ListConfigurationResourcesResult. - */ - default ListConfigResourcesResult listConfigResources() { - return listConfigResources(Set.of(), new ListConfigResourcesOptions()); - } - /** * List the client metrics configuration resources available in the cluster. * * @param options The options to use when listing the client metrics resources. * @return The ListClientMetricsResourcesResult. - * @deprecated Since 4.1. Use {@link #listConfigResources(Set, ListConfigResourcesOptions)} instead. */ - @Deprecated(since = "4.1", forRemoval = true) ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options); /** @@ -1814,9 +1677,7 @@ default ListConfigResourcesResult listConfigResources() { * with default options. See the overload for more details. * * @return The ListClientMetricsResourcesResult. - * @deprecated Since 4.1. Use {@link #listConfigResources()} instead. */ - @Deprecated(since = "4.1", forRemoval = true) default ListClientMetricsResourcesResult listClientMetricsResources() { return listClientMetricsResources(new ListClientMetricsResourcesOptions()); } @@ -1927,121 +1788,6 @@ default DescribeShareGroupsResult describeShareGroups(Collection groupId return describeShareGroups(groupIds, new DescribeShareGroupsOptions()); } - /** - * Alters offsets for the specified group. In order to succeed, the group must be empty. - * - *

    This operation is not transactional, so it may succeed for some partitions while fail for others. - * - * @param groupId The group for which to alter offsets. - * @param offsets A map of offsets by partition. Partitions not specified in the map are ignored. - * @param options The options to use when altering the offsets. - * @return The AlterShareGroupOffsetsResult. - */ - AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, Map offsets, AlterShareGroupOffsetsOptions options); - - /** - * Alters offsets for the specified group. In order to succeed, the group must be empty. - * - *

    This is a convenience method for {@link #alterShareGroupOffsets(String, Map, AlterShareGroupOffsetsOptions)} with default options. - * See the overload for more details. - * - * @param groupId The group for which to alter offsets. - * @param offsets A map of offsets by partition. - * @return The AlterShareGroupOffsetsResult. - */ - default AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, Map offsets) { - return alterShareGroupOffsets(groupId, offsets, new AlterShareGroupOffsetsOptions()); - } - - /** - * List the share group offsets available in the cluster for the specified share groups. - * - * @param groupSpecs Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for. - * @param options The options to use when listing the share group offsets. - * @return The ListShareGroupOffsetsResult - */ - ListShareGroupOffsetsResult listShareGroupOffsets(Map groupSpecs, ListShareGroupOffsetsOptions options); - - /** - * List the share group offsets available in the cluster for the specified share groups with the default options. - * - *

    This is a convenience method for {@link #listShareGroupOffsets(Map, ListShareGroupOffsetsOptions)} - * to list offsets of all partitions for the specified share groups with default options. - * - * @param groupSpecs Map of share group ids to a spec that specifies the topic partitions of the group to list offsets for. - * @return The ListShareGroupOffsetsResult - */ - default ListShareGroupOffsetsResult listShareGroupOffsets(Map groupSpecs) { - return listShareGroupOffsets(groupSpecs, new ListShareGroupOffsetsOptions()); - } - - /** - * Delete offsets for a set of topics in a share group. - * - * @param groupId The group for which to delete offsets. - * @param topics The topics for which to delete offsets. - * @param options The options to use when deleting offsets in a share group. - * @return The DeleteShareGroupOffsetsResult. - */ - DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set topics, DeleteShareGroupOffsetsOptions options); - - /** - * Delete offsets for a set of topics in a share group with the default options. - * - *

    - * This is a convenience method for {@link #deleteShareGroupOffsets(String, Set, DeleteShareGroupOffsetsOptions)} with default options. - * See the overload for more details. - * - * @param groupId The group for which to delete offsets. - * @param topics The topics for which to delete offsets. - * @return The DeleteShareGroupOffsetsResult. - */ - default DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set topics) { - return deleteShareGroupOffsets(groupId, topics, new DeleteShareGroupOffsetsOptions()); - } - - /** - * Delete share groups from the cluster. - * - * @param groupIds Collection of share group ids which are to be deleted. - * @param options The options to use when deleting a share group. - * @return The DeleteShareGroupsResult. - */ - DeleteShareGroupsResult deleteShareGroups(Collection groupIds, DeleteShareGroupsOptions options); - - /** - * Delete share groups from the cluster with the default options. - * - * @param groupIds Collection of share group ids which are to be deleted. - * @return The DeleteShareGroupsResult. - */ - default DeleteShareGroupsResult deleteShareGroups(Collection groupIds) { - return deleteShareGroups(groupIds, new DeleteShareGroupsOptions()); - } - - /** - * Describe streams groups in the cluster. - * - * @param groupIds The IDs of the groups to describe. - * @param options The options to use when describing the groups. - * @return The DescribeStreamsGroupsResult. - */ - DescribeStreamsGroupsResult describeStreamsGroups(Collection groupIds, - DescribeStreamsGroupsOptions options); - - /** - * Describe streams groups in the cluster, with the default options. - *

    - * This is a convenience method for {@link #describeStreamsGroups(Collection, DescribeStreamsGroupsOptions)} - * with default options. See the overload for more details. - * - * @param groupIds The IDs of the groups to describe. - * @return The DescribeStreamsGroupsResult. - */ - default DescribeStreamsGroupsResult describeStreamsGroups(Collection groupIds) { - return describeStreamsGroups(groupIds, new DescribeStreamsGroupsOptions()); - } - /** * Describe some classic groups in the cluster. * @@ -2103,30 +1849,4 @@ default DescribeClassicGroupsResult describeClassicGroups(Collection gro * Get the metrics kept by the adminClient */ Map metrics(); - - /** - * Force terminate a transaction for the given transactional ID with the default options. - *

    - * This is a convenience method for {@link #forceTerminateTransaction(String, TerminateTransactionOptions)} - * with default options. - * - * @param transactionalId The ID of the transaction to terminate. - * @return The TerminateTransactionResult. - */ - default TerminateTransactionResult forceTerminateTransaction(String transactionalId) { - return forceTerminateTransaction(transactionalId, new TerminateTransactionOptions()); - } - - /** - * Force terminate a transaction for the given transactional ID. - * This operation aborts any ongoing transaction associated with the transactional ID. - * It's similar to fenceProducers but only targets a single transactional ID to handle - * long-running transactions when 2PC is enabled. - * - * @param transactionalId The ID of the transaction to terminate. - * @param options The options to use when terminating the transaction. - * @return The TerminateTransactionResult. - */ - TerminateTransactionResult forceTerminateTransaction(String transactionalId, - TerminateTransactionOptions options); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java b/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java index 471d3916cfb55..a87af6be154a5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AdminClientConfig.java @@ -30,7 +30,6 @@ import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.utils.Utils; -import java.util.List; import java.util.Map; import java.util.Set; @@ -155,14 +154,12 @@ public class AdminClientConfig extends AbstractConfig { static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), + "", Importance.HIGH, BOOTSTRAP_SERVERS_DOC). define(BOOTSTRAP_CONTROLLERS_CONFIG, Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), + "", Importance.HIGH, BOOTSTRAP_CONTROLLERS_DOC) .define(CLIENT_ID_CONFIG, Type.STRING, "", Importance.MEDIUM, CLIENT_ID_DOC) @@ -241,7 +238,6 @@ public class AdminClientConfig extends AbstractConfig { .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, JmxReporter.class.getName(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, METRIC_REPORTER_CLASSES_DOC) .define(METRICS_RECORDING_LEVEL_CONFIG, @@ -284,13 +280,7 @@ public class AdminClientConfig extends AbstractConfig { DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, atLeast(0), Importance.LOW, - METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC) - .define(CONFIG_PROVIDERS_CONFIG, - ConfigDef.Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), - ConfigDef.Importance.LOW, - CONFIG_PROVIDERS_DOC); + METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterClientQuotasResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterClientQuotasResult.java index 4906184b3c925..f232c107f9407 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterClientQuotasResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterClientQuotasResult.java @@ -50,6 +50,6 @@ public Map> values() { * Returns a future which succeeds only if all quota alterations succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigOp.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigOp.java index 789c9f64a93aa..48d5646764d42 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigOp.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigOp.java @@ -26,20 +26,6 @@ /** * A class representing an alter configuration entry containing name, value and operation type. - *

    - * Note for Broker Logger Configuration:
    - * When altering broker logger levels (using {@link org.apache.kafka.common.config.ConfigResource.Type#BROKER_LOGGER}), - * it is strongly recommended to use log level constants from {@link org.apache.kafka.common.config.LogLevelConfig} instead of string literals. - * This ensures compatibility with Kafka's log level validation and avoids potential configuration errors. - *

    - * Example: - *

    - * Recommended approach:
    - * new AlterConfigOp(new ConfigEntry(loggerName, LogLevelConfig.DEBUG_LOG_LEVEL), OpType.SET)
    - *
    - * Avoid this:
    - * new AlterConfigOp(new ConfigEntry(loggerName, "DEBUG"), OpType.SET)
    - * 
    */ public class AlterConfigOp { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsResult.java index cd9279300de84..22851e39592de 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConfigsResult.java @@ -44,7 +44,7 @@ public Map> values() { * Return a future which succeeds only if all the alter configs operations succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsResult.java index 8d78a16b57458..0dc1bd14e1849 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterConsumerGroupOffsetsResult.java @@ -74,7 +74,7 @@ public KafkaFuture all() { for (Errors error : topicPartitionErrorsMap.values()) { if (error != Errors.NONE) { throw error.exception( - "Failed altering group offsets for the following partitions: " + partitionsFailed); + "Failed altering consumer group offsets for the following partitions: " + partitionsFailed); } } return null; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterPartitionReassignmentsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterPartitionReassignmentsOptions.java index 74c9f3dcdec8a..166e90404c336 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterPartitionReassignmentsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterPartitionReassignmentsOptions.java @@ -23,25 +23,4 @@ * Options for {@link AdminClient#alterPartitionReassignments(Map, AlterPartitionReassignmentsOptions)} */ public class AlterPartitionReassignmentsOptions extends AbstractOptions { - - private boolean allowReplicationFactorChange = true; - - /** - * Set the option indicating if the alter partition reassignments call should be - * allowed to alter the replication factor of a partition. - * In cases where it is not allowed, any replication factor change will result in an exception thrown by the API. - */ - public AlterPartitionReassignmentsOptions allowReplicationFactorChange(boolean allow) { - this.allowReplicationFactorChange = allow; - return this; - } - - /** - * A boolean indicating if the alter partition reassignments should be - * allowed to alter the replication factor of a partition. - * In cases where it is not allowed, any replication factor change will result in an exception thrown by the API. - */ - public boolean allowReplicationFactorChange() { - return this.allowReplicationFactorChange; - } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterPartitionReassignmentsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterPartitionReassignmentsResult.java index f918074af77f6..f65cdda0dea29 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterPartitionReassignmentsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterPartitionReassignmentsResult.java @@ -50,6 +50,6 @@ public Map> values() { * Return a future which succeeds only if all the reassignments were successfully initiated. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterReplicaLogDirsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterReplicaLogDirsResult.java index 6b6a98e20f65d..c44e7ea1baf31 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterReplicaLogDirsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterReplicaLogDirsResult.java @@ -73,6 +73,6 @@ public Map> values() { * if not, it throws an {@link Exception} described in {@link #values()} method. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/AlterUserScramCredentialsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/AlterUserScramCredentialsResult.java index d61d3958863b6..bca42c4d9d155 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/AlterUserScramCredentialsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/AlterUserScramCredentialsResult.java @@ -51,6 +51,6 @@ public Map> values() { * Return a future which succeeds only if all the user SCRAM credential alterations succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ClientMetricsResourceListing.java b/clients/src/main/java/org/apache/kafka/clients/admin/ClientMetricsResourceListing.java index d5af97080b080..b5c85b5873204 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ClientMetricsResourceListing.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ClientMetricsResourceListing.java @@ -18,7 +18,6 @@ import java.util.Objects; -@Deprecated(since = "4.1") public class ClientMetricsResourceListing { private final String name; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java b/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java index d594b50241ca5..34b9f08f10e06 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ConsumerGroupListing.java @@ -26,9 +26,7 @@ /** * A listing of a consumer group in the cluster. - * @deprecated Since 4.1. Use {@link Admin#listGroups(ListGroupsOptions)} and {@link GroupListing} instead. */ -@Deprecated(since = "4.1") public class ConsumerGroupListing { private final String groupId; private final boolean isSimpleConsumerGroup; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/CreateAclsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/CreateAclsResult.java index de57fbb8911a9..46a7ebef59a24 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/CreateAclsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/CreateAclsResult.java @@ -45,6 +45,6 @@ public Map> values() { * Return a future which succeeds only if all the ACL creations succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/CreatePartitionsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/CreatePartitionsResult.java index 21d26a6246764..574c88af00fe0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/CreatePartitionsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/CreatePartitionsResult.java @@ -44,6 +44,6 @@ public Map> values() { * Return a future which succeeds if all the partition creations succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(values.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(values.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/CreateTopicsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/CreateTopicsResult.java index 6cf794c65ee0a..1f6fd67f0bdec 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/CreateTopicsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/CreateTopicsResult.java @@ -49,7 +49,7 @@ public Map> values() { * Return a future which succeeds if all the topic creations succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteAclsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteAclsResult.java index db43ac3f047f0..b1f6619ec10bf 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteAclsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteAclsResult.java @@ -97,7 +97,7 @@ public Map> values() { * Note that it if the filters don't match any ACLs, this is not considered an error. */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply(v -> getAclBindings(futures)); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply(v -> getAclBindings(futures)); } private List getAclBindings(Map> futures) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java index 970f9adee7fd4..5c1e60b1a6176 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteConsumerGroupsResult.java @@ -17,14 +17,18 @@ package org.apache.kafka.clients.admin; import org.apache.kafka.common.KafkaFuture; +import org.apache.kafka.common.annotation.InterfaceStability; import java.util.Collection; import java.util.HashMap; import java.util.Map; /** - * The result of the {@link Admin#deleteConsumerGroups(Collection , DeleteConsumerGroupsOptions)} call. + * The result of the {@link Admin#deleteConsumerGroups(Collection)} call. + * + * The API of this class is evolving, see {@link Admin} for details. */ +@InterfaceStability.Evolving public class DeleteConsumerGroupsResult { private final Map> futures; @@ -46,6 +50,6 @@ public Map> deletedGroups() { * Return a future which succeeds only if all the consumer group deletions succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteRecordsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteRecordsResult.java index 061403eeb165e..9ff360b7ad50e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteRecordsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteRecordsResult.java @@ -45,6 +45,6 @@ public Map> lowWatermarks() { * Return a future which succeeds only if all the records deletions succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java index eefdbdf9da10f..348c07de2d446 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DeleteTopicsResult.java @@ -70,7 +70,7 @@ public Map> topicNameValues() { * @return a future which succeeds only if all the topic deletions succeed. */ public KafkaFuture all() { - return (topicIdFutures == null) ? KafkaFuture.allOf(nameFutures.values().toArray(new KafkaFuture[0])) : - KafkaFuture.allOf(topicIdFutures.values().toArray(new KafkaFuture[0])); + return (topicIdFutures == null) ? KafkaFuture.allOf(nameFutures.values().toArray(new KafkaFuture[0])) : + KafkaFuture.allOf(topicIdFutures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClassicGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClassicGroupsResult.java index 7b80f560ebfa6..8ee38e565469c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClassicGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClassicGroupsResult.java @@ -51,7 +51,7 @@ public Map> describedGroups() { * Return a future which yields all ClassicGroupDescription objects, if all the describes succeed. */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( nil -> { Map descriptions = new HashMap<>(futures.size()); futures.forEach((key, future) -> { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterResult.java index 69782d5c9c333..3113608a00728 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeClusterResult.java @@ -51,17 +51,16 @@ public KafkaFuture> nodes() { } /** - * Returns a future which yields the current controller node. - *

    - * When using {@link AdminClientConfig#BOOTSTRAP_SERVERS_CONFIG}, the controller refer to a random broker. - * When using {@link AdminClientConfig#BOOTSTRAP_CONTROLLERS_CONFIG}, it refers to the current voter leader. + * Returns a future which yields the current controller id. + * Note that this may yield null, if the controller ID is not yet known. */ public KafkaFuture controller() { return controller; } /** - * Returns a future which yields the current cluster id. + * Returns a future which yields the current cluster id. The future value will be non-null if the + * broker version is 0.10.1.0 or higher and null otherwise. */ public KafkaFuture clusterId() { return clusterId; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java index 72cdaf098a368..41170e6f08b05 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConfigsResult.java @@ -48,7 +48,7 @@ public Map> values() { * Return a future which succeeds only if all the config descriptions succeed. */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(v -> { Map configs = new HashMap<>(futures.size()); for (Map.Entry> entry : futures.entrySet()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java index f01a6c7b81491..3dafba966e4a1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeConsumerGroupsResult.java @@ -46,7 +46,7 @@ public Map> describedGroups() { * Return a future which yields all ConsumerGroupDescription objects, if all the describes succeed. */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( nil -> { Map descriptions = new HashMap<>(futures.size()); futures.forEach((key, future) -> { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java index cd2e37d7c8671..c7ad85332b804 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeLogDirsResult.java @@ -48,7 +48,7 @@ public Map>> descriptions() * to a description of that log directory. */ public KafkaFuture>> allDescriptions() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])). thenApply(v -> { Map> descriptions = new HashMap<>(futures.size()); for (Map.Entry>> entry : futures.entrySet()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeProducersResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeProducersResult.java index 597c59b8fff49..bcaab90b30da0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeProducersResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeProducersResult.java @@ -43,7 +43,7 @@ public KafkaFuture partitionResult(final TopicPartition } public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) .thenApply(nil -> { Map results = new HashMap<>(futures.size()); for (Map.Entry> entry : futures.entrySet()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java index 8e01bf80a91bf..e561468c5a0da 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeReplicaLogDirsResult.java @@ -46,7 +46,7 @@ public Map> values() { * Return a future which succeeds if log directory information of all replicas are available */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) .thenApply(v -> { Map replicaLogDirInfos = new HashMap<>(); for (Map.Entry> entry : futures.entrySet()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeShareGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeShareGroupsResult.java index 59641e49ba39a..0536b9e3f9dd2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeShareGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeShareGroupsResult.java @@ -50,7 +50,7 @@ public Map> describedGroups() { * Return a future which yields all ShareGroupDescription objects, if all the describes succeed. */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( nil -> { Map descriptions = new HashMap<>(futures.size()); futures.forEach((key, future) -> { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsOptions.java index 189c8c17531ae..831e4297fef21 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsOptions.java @@ -43,19 +43,8 @@ public DescribeTopicsOptions includeAuthorizedOperations(boolean includeAuthoriz return this; } - /** - * Sets the maximum number of partitions to be returned in a single response. - *

    - * This option: - *

      - *
    • Is only effective when using topic names (not topic IDs).
    • - *
    • Will not be effective if it is larger than the server-side configuration - * {@code max.request.partition.size.limit}. - *
    • - *
    - * - * @param partitionSizeLimitPerResponse the maximum number of partitions per response - */ + // Note that, partitionSizeLimitPerResponse will not be effective if it is larger than the config + // max.request.partition.size.limit on the server side. public DescribeTopicsOptions partitionSizeLimitPerResponse(int partitionSizeLimitPerResponse) { this.partitionSizeLimitPerResponse = partitionSizeLimitPerResponse; return this; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java index 300b67f90829f..d744f3cc8b876 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTopicsResult.java @@ -96,7 +96,7 @@ public KafkaFuture> allTopicIds() { */ private static KafkaFuture> all(Map> futures) { if (futures == null) return null; - KafkaFuture future = KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + KafkaFuture future = KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); return future. thenApply(v -> { Map descriptions = new HashMap<>(futures.size()); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTransactionsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTransactionsResult.java index 316c5e1c6d441..d5db958b1d06b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTransactionsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeTransactionsResult.java @@ -60,7 +60,7 @@ public KafkaFuture description(String transactionalId) { * if any of the descriptions cannot be obtained */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) .thenApply(nil -> { Map results = new HashMap<>(futures.size()); for (Map.Entry> entry : futures.entrySet()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java index fad56892f4596..5a2f55c544fe1 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/DescribeUserScramCredentialsResult.java @@ -69,7 +69,7 @@ public KafkaFuture> all() { retval.completeExceptionally(Errors.forCode(optionalFirstFailedDescribe.get().errorCode()).exception(optionalFirstFailedDescribe.get().errorMessage())); } else { Map retvalMap = new HashMap<>(); - data.results().forEach(userResult -> + data.results().stream().forEach(userResult -> retvalMap.put(userResult.user(), new UserScramCredentialsDescription(userResult.user(), getScramCredentialInfosFor(userResult)))); retval.complete(retvalMap); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/FenceProducersResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/FenceProducersResult.java index c1954f308accd..db34dd5e445c5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/FenceProducersResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/FenceProducersResult.java @@ -65,7 +65,7 @@ public KafkaFuture epochId(String transactionalId) { * Return a future which succeeds only if all the producer fencings succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } private KafkaFuture findAndApply(String transactionalId, KafkaFuture.BaseFunction followup) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java b/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java index b99e4f6587bd7..88a693934e1c8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ForwardingAdmin.java @@ -159,8 +159,6 @@ public DescribeConsumerGroupsResult describeConsumerGroups(Collection gr } @Override - @Deprecated - @SuppressWarnings("removal") public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) { return delegate.listConsumerGroups(options); } @@ -170,31 +168,16 @@ public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map groupSpecs, ListStreamsGroupOffsetsOptions options) { - return delegate.listStreamsGroupOffsets(groupSpecs, options); - } - @Override public DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupIds, DeleteConsumerGroupsOptions options) { return delegate.deleteConsumerGroups(groupIds, options); } - @Override - public DeleteStreamsGroupsResult deleteStreamsGroups(Collection groupIds, DeleteStreamsGroupsOptions options) { - return delegate.deleteStreamsGroups(groupIds, options); - } - @Override public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set partitions, DeleteConsumerGroupOffsetsOptions options) { return delegate.deleteConsumerGroupOffsets(groupId, partitions, options); } - @Override - public DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, Set partitions, DeleteStreamsGroupOffsetsOptions options) { - return delegate.deleteStreamsGroupOffsets(groupId, partitions, options); - } - @Override public ElectLeadersResult electLeaders(ElectionType electionType, Set partitions, ElectLeadersOptions options) { return delegate.electLeaders(electionType, partitions, options); @@ -220,11 +203,6 @@ public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(String groupId, return delegate.alterConsumerGroupOffsets(groupId, offsets, options); } - @Override - public AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets(String groupId, Map offsets, AlterStreamsGroupOffsetsOptions options) { - return delegate.alterStreamsGroupOffsets(groupId, offsets, options); - } - @Override public ListOffsetsResult listOffsets(Map topicPartitionOffsets, ListOffsetsOptions options) { return delegate.listOffsets(topicPartitionOffsets, options); @@ -285,11 +263,6 @@ public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortT return delegate.abortTransaction(spec, options); } - @Override - public TerminateTransactionResult forceTerminateTransaction(String transactionalId, TerminateTransactionOptions options) { - return delegate.forceTerminateTransaction(transactionalId, options); - } - @Override public ListTransactionsResult listTransactions(ListTransactionsOptions options) { return delegate.listTransactions(options); @@ -300,12 +273,6 @@ public FenceProducersResult fenceProducers(Collection transactionalIds, return delegate.fenceProducers(transactionalIds, options); } - @Override - public ListConfigResourcesResult listConfigResources(Set configResourceTypes, ListConfigResourcesOptions options) { - return delegate.listConfigResources(configResourceTypes, options); - } - - @SuppressWarnings({"deprecation", "removal"}) @Override public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) { return delegate.listClientMetricsResources(options); @@ -331,31 +298,6 @@ public DescribeShareGroupsResult describeShareGroups(Collection groupIds return delegate.describeShareGroups(groupIds, options); } - @Override - public AlterShareGroupOffsetsResult alterShareGroupOffsets(String groupId, Map offsets, AlterShareGroupOffsetsOptions options) { - return delegate.alterShareGroupOffsets(groupId, offsets, options); - } - - @Override - public ListShareGroupOffsetsResult listShareGroupOffsets(Map groupSpecs, ListShareGroupOffsetsOptions options) { - return delegate.listShareGroupOffsets(groupSpecs, options); - } - - @Override - public DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set topics, DeleteShareGroupOffsetsOptions options) { - return delegate.deleteShareGroupOffsets(groupId, topics, options); - } - - @Override - public DeleteShareGroupsResult deleteShareGroups(Collection groupIds, DeleteShareGroupsOptions options) { - return delegate.deleteShareGroups(groupIds, options); - } - - @Override - public DescribeStreamsGroupsResult describeStreamsGroups(Collection groupIds, DescribeStreamsGroupsOptions options) { - return delegate.describeStreamsGroups(groupIds, options); - } - @Override public ListGroupsResult listGroups(ListGroupsOptions options) { return delegate.listGroups(options); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java index 78a7f905319c8..dc3164993b84e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/KafkaAdminClient.java @@ -45,23 +45,18 @@ import org.apache.kafka.clients.admin.internals.AdminMetadataManager; import org.apache.kafka.clients.admin.internals.AllBrokersStrategy; import org.apache.kafka.clients.admin.internals.AlterConsumerGroupOffsetsHandler; -import org.apache.kafka.clients.admin.internals.AlterShareGroupOffsetsHandler; import org.apache.kafka.clients.admin.internals.CoordinatorKey; import org.apache.kafka.clients.admin.internals.DeleteConsumerGroupOffsetsHandler; import org.apache.kafka.clients.admin.internals.DeleteConsumerGroupsHandler; import org.apache.kafka.clients.admin.internals.DeleteRecordsHandler; -import org.apache.kafka.clients.admin.internals.DeleteShareGroupOffsetsHandler; -import org.apache.kafka.clients.admin.internals.DeleteShareGroupsHandler; import org.apache.kafka.clients.admin.internals.DescribeClassicGroupsHandler; import org.apache.kafka.clients.admin.internals.DescribeConsumerGroupsHandler; import org.apache.kafka.clients.admin.internals.DescribeProducersHandler; import org.apache.kafka.clients.admin.internals.DescribeShareGroupsHandler; -import org.apache.kafka.clients.admin.internals.DescribeStreamsGroupsHandler; import org.apache.kafka.clients.admin.internals.DescribeTransactionsHandler; import org.apache.kafka.clients.admin.internals.FenceProducersHandler; import org.apache.kafka.clients.admin.internals.ListConsumerGroupOffsetsHandler; import org.apache.kafka.clients.admin.internals.ListOffsetsHandler; -import org.apache.kafka.clients.admin.internals.ListShareGroupOffsetsHandler; import org.apache.kafka.clients.admin.internals.ListTransactionsHandler; import org.apache.kafka.clients.admin.internals.PartitionLeaderStrategy; import org.apache.kafka.clients.admin.internals.RemoveMembersFromConsumerGroupHandler; @@ -159,7 +154,7 @@ import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData; import org.apache.kafka.common.message.ExpireDelegationTokenRequestData; import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; -import org.apache.kafka.common.message.ListConfigResourcesRequestData; +import org.apache.kafka.common.message.ListClientMetricsResourcesRequestData; import org.apache.kafka.common.message.ListGroupsRequestData; import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.message.ListPartitionReassignmentsRequestData; @@ -233,8 +228,8 @@ import org.apache.kafka.common.requests.IncrementalAlterConfigsRequest; import org.apache.kafka.common.requests.IncrementalAlterConfigsResponse; import org.apache.kafka.common.requests.JoinGroupRequest; -import org.apache.kafka.common.requests.ListConfigResourcesRequest; -import org.apache.kafka.common.requests.ListConfigResourcesResponse; +import org.apache.kafka.common.requests.ListClientMetricsResourcesRequest; +import org.apache.kafka.common.requests.ListClientMetricsResourcesResponse; import org.apache.kafka.common.requests.ListGroupsRequest; import org.apache.kafka.common.requests.ListGroupsResponse; import org.apache.kafka.common.requests.ListOffsetsRequest; @@ -419,11 +414,11 @@ public class KafkaAdminClient extends AdminClient { /** * Get or create a list value from a map. * - * @param map The map to get or create the element from. - * @param key The key. - * @param The key type. - * @param The value type. - * @return The list value. + * @param map The map to get or create the element from. + * @param key The key. + * @param The key type. + * @param The value type. + * @return The list value. */ static List getOrCreateListValue(Map> map, K key) { return map.computeIfAbsent(key, k -> new LinkedList<>()); @@ -432,9 +427,9 @@ static List getOrCreateListValue(Map> map, K key) { /** * Send an exception to every element in a collection of KafkaFutureImpls. * - * @param futures The collection of KafkaFutureImpl objects. - * @param exc The exception - * @param The KafkaFutureImpl result type. + * @param futures The collection of KafkaFutureImpl objects. + * @param exc The exception + * @param The KafkaFutureImpl result type. */ private static void completeAllExceptionally(Collection> futures, Throwable exc) { completeAllExceptionally(futures.stream(), exc); @@ -443,9 +438,9 @@ private static void completeAllExceptionally(Collection> /** * Send an exception to all futures in the provided stream * - * @param futures The stream of KafkaFutureImpl objects. - * @param exc The exception - * @param The KafkaFutureImpl result type. + * @param futures The stream of KafkaFutureImpl objects. + * @param exc The exception + * @param The KafkaFutureImpl result type. */ private static void completeAllExceptionally(Stream> futures, Throwable exc) { futures.forEach(future -> future.completeExceptionally(exc)); @@ -454,9 +449,9 @@ private static void completeAllExceptionally(Stream> futu /** * Get the current time remaining before a deadline as an integer. * - * @param now The current time in milliseconds. - * @param deadlineMs The deadline time in milliseconds. - * @return The time delta in milliseconds. + * @param now The current time in milliseconds. + * @param deadlineMs The deadline time in milliseconds. + * @return The time delta in milliseconds. */ static int calcTimeoutMsRemainingAsInt(long now, long deadlineMs) { long deltaMs = deadlineMs - now; @@ -470,8 +465,9 @@ else if (deltaMs < Integer.MIN_VALUE) /** * Generate the client id based on the configuration. * - * @param config The configuration - * @return The client id + * @param config The configuration + * + * @return The client id */ static String generateClientId(AdminClientConfig config) { String clientId = config.getString(AdminClientConfig.CLIENT_ID_CONFIG); @@ -487,9 +483,10 @@ String getClientId() { /** * Get the deadline for a particular call. * - * @param now The current time in milliseconds. - * @param optionTimeoutMs The timeout option given by the user. - * @return The deadline in milliseconds. + * @param now The current time in milliseconds. + * @param optionTimeoutMs The timeout option given by the user. + * + * @return The deadline in milliseconds. */ private long calcDeadlineMs(long now, Integer optionTimeoutMs) { if (optionTimeoutMs != null) @@ -500,8 +497,9 @@ private long calcDeadlineMs(long now, Integer optionTimeoutMs) { /** * Pretty-print an exception. * - * @param throwable The exception. - * @return A compact human-readable string. + * @param throwable The exception. + * + * @return A compact human-readable string. */ static String prettyPrintException(Throwable throwable) { if (throwable == null) @@ -547,7 +545,7 @@ static KafkaAdminClient createInternal( .recordLevel(Sensor.RecordingLevel.forName(config.getString(AdminClientConfig.METRICS_RECORDING_LEVEL_CONFIG))) .tags(metricTags); MetricsContext metricsContext = new KafkaMetricsContext(JMX_PREFIX, - config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); + config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); metrics = new Metrics(metricConfig, reporters, time, metricsContext); networkClient = ClientUtils.createNetworkClient(config, clientId, @@ -579,12 +577,10 @@ static KafkaAdminClient createInternal(AdminClientConfig config, Time time) { Metrics metrics = null; String clientId = generateClientId(config); - List reporters = CommonClientConfigs.metricsReporters(clientId, config); Optional clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); - clientTelemetryReporter.ifPresent(reporters::add); try { - metrics = new Metrics(new MetricConfig(), reporters, time); + metrics = new Metrics(new MetricConfig(), new LinkedList<>(), time); LogContext logContext = createLogContext(clientId); return new KafkaAdminClient(config, clientId, time, metadataManager, metrics, client, null, logContext, clientTelemetryReporter); @@ -629,7 +625,9 @@ private KafkaAdminClient(AdminClientConfig config, CommonClientConfigs.RETRY_BACKOFF_EXP_BASE, retryBackoffMaxMs, CommonClientConfigs.RETRY_BACKOFF_JITTER); + List reporters = CommonClientConfigs.metricsReporters(this.clientId, config); this.clientTelemetryReporter = clientTelemetryReporter; + this.clientTelemetryReporter.ifPresent(reporters::add); this.metadataRecoveryStrategy = MetadataRecoveryStrategy.forName(config.getString(AdminClientConfig.METADATA_RECOVERY_STRATEGY_CONFIG)); this.partitionLeaderCache = new HashMap<>(); this.adminFetchMetricsManager = new AdminFetchMetricsManager(metrics); @@ -653,11 +651,11 @@ private int configureDefaultApiTimeoutMs(AdminClientConfig config) { if (defaultApiTimeoutMs < requestTimeoutMs) { if (config.originals().containsKey(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)) { throw new ConfigException("The specified value of " + AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG + - " must be no smaller than the value of " + AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG + "."); + " must be no smaller than the value of " + AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG + "."); } else { log.warn("Overriding the default value for {} ({}) with the explicitly configured request timeout {}", - AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, this.defaultApiTimeoutMs, - requestTimeoutMs); + AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, this.defaultApiTimeoutMs, + requestTimeoutMs); return requestTimeoutMs; } } @@ -715,7 +713,6 @@ public void close(Duration timeout) { */ private interface NodeProvider { Node provide(); - boolean supportsUseControllers(); } @@ -725,7 +722,7 @@ public Node provide() { long now = time.milliseconds(); LeastLoadedNode leastLoadedNode = client.leastLoadedNode(now); if (metadataRecoveryStrategy == MetadataRecoveryStrategy.REBOOTSTRAP - && !leastLoadedNode.hasNodeAvailableOrConnectionReady()) { + && !leastLoadedNode.hasNodeAvailableOrConnectionReady()) { metadataManager.rebootstrap(now); } @@ -755,7 +752,7 @@ private class ConstantNodeIdProvider implements NodeProvider { @Override public Node provide() { if (metadataManager.isReady() && - (metadataManager.nodeById(nodeId) != null)) { + (metadataManager.nodeById(nodeId) != null)) { return metadataManager.nodeById(nodeId); } // If we can't find the node with the given constant ID, we schedule a @@ -789,7 +786,7 @@ private class ControllerNodeProvider implements NodeProvider { @Override public Node provide() { if (metadataManager.isReady() && - (metadataManager.controller() != null)) { + (metadataManager.controller() != null)) { return metadataManager.controller(); } metadataManager.requestUpdate(); @@ -823,6 +820,36 @@ public boolean supportsUseControllers() { } } + /** + * Provides the least loaded broker, or the active kcontroller if we're using + * bootstrap.controllers. + */ + private class ConstantBrokerOrActiveKController implements NodeProvider { + private final int nodeId; + + ConstantBrokerOrActiveKController(int nodeId) { + this.nodeId = nodeId; + } + + @Override + public Node provide() { + if (metadataManager.isReady()) { + if (metadataManager.usingBootstrapControllers()) { + return metadataManager.controller(); + } else if (metadataManager.nodeById(nodeId) != null) { + return metadataManager.nodeById(nodeId); + } + } + metadataManager.requestUpdate(); + return null; + } + + @Override + public boolean supportsUseControllers() { + return true; + } + } + /** * Provides the least loaded broker, or the active kcontroller if we're using * bootstrap.controllers. @@ -891,13 +918,13 @@ protected Node curNode() { /** * Handle a failure. - *

    + * * Depending on what the exception is and how many times we have already tried, we may choose to * fail the Call, or retry it. It is important to print the stack traces here in some cases, * since they are not necessarily preserved in ApiVersionException objects. * - * @param now The current time in milliseconds. - * @param throwable The failure exception. + * @param now The current time in milliseconds. + * @param throwable The failure exception. */ final void fail(long now, Throwable throwable) { if (curNode != null) { @@ -913,7 +940,7 @@ final void fail(long now, Throwable throwable) { // protocol downgrade will not count against the total number of retries we get for // this RPC. That is why 'tries' is not incremented. if ((throwable instanceof UnsupportedVersionException) && - handleUnsupportedVersionException((UnsupportedVersionException) throwable)) { + handleUnsupportedVersionException((UnsupportedVersionException) throwable)) { log.debug("{} attempting protocol downgrade and then retry.", this); runnable.pendingCalls.add(this); return; @@ -967,14 +994,16 @@ private void handleTimeoutFailure(long now, Throwable cause) { * Create an AbstractRequest.Builder for this Call. * * @param timeoutMs The timeout in milliseconds. - * @return The AbstractRequest builder. + * + * @return The AbstractRequest builder. */ abstract AbstractRequest.Builder createRequest(int timeoutMs); /** * Process the call response. * - * @param abstractResponse The AbstractResponse. + * @param abstractResponse The AbstractResponse. + * */ abstract void handleResponse(AbstractResponse abstractResponse); @@ -982,15 +1011,16 @@ private void handleTimeoutFailure(long now, Throwable cause) { * Handle a failure. This will only be called if the failure exception was not * retriable, or if we hit a timeout. * - * @param throwable The exception. + * @param throwable The exception. */ abstract void handleFailure(Throwable throwable); /** * Handle an UnsupportedVersionException. * - * @param exception The exception. - * @return True if the exception can be handled; false otherwise. + * @param exception The exception. + * + * @return True if the exception can be handled; false otherwise. */ boolean handleUnsupportedVersionException(UnsupportedVersionException exception) { return false; @@ -1027,7 +1057,7 @@ static class TimeoutProcessor { /** * Create a new timeout processor. * - * @param now The current time in milliseconds since the epoch. + * @param now The current time in milliseconds since the epoch. */ TimeoutProcessor(long now) { this.now = now; @@ -1039,8 +1069,9 @@ static class TimeoutProcessor { * Timed out calls will be removed and failed. * The remaining milliseconds until the next timeout will be updated. * - * @param calls The collection of calls. - * @return The number of calls which were timed out. + * @param calls The collection of calls. + * + * @return The number of calls which were timed out. */ int handleTimeouts(Collection calls, String msg) { int numTimedOut = 0; @@ -1062,8 +1093,9 @@ int handleTimeouts(Collection calls, String msg) { * Check whether a call should be timed out. * The remaining milliseconds until the next timeout will be updated. * - * @param call The call. - * @return True if the call should be timed out. + * @param call The call. + * + * @return True if the call should be timed out. */ boolean callHasExpired(Call call) { int remainingMs = calcTimeoutMsRemainingAsInt(now, call.deadlineMs); @@ -1123,7 +1155,7 @@ private final class AdminClientRunnable implements Runnable { /** * Time out the elements in the pendingCalls list which are expired. * - * @param processor The timeout processor. + * @param processor The timeout processor. */ private void timeoutPendingCalls(TimeoutProcessor processor) { int numTimedOut = processor.handleTimeouts(pendingCalls, "Timed out waiting for a node assignment."); @@ -1134,7 +1166,7 @@ private void timeoutPendingCalls(TimeoutProcessor processor) { /** * Time out calls which have been assigned to nodes. * - * @param processor The timeout processor. + * @param processor The timeout processor. */ private int timeoutCallsToSend(TimeoutProcessor processor) { int numTimedOut = 0; @@ -1149,7 +1181,7 @@ private int timeoutCallsToSend(TimeoutProcessor processor) { /** * Drain all the calls from newCalls into pendingCalls. - *

    + * * This function holds the lock for the minimum amount of time, to avoid blocking * users of AdminClient who will also take the lock to add new calls. */ @@ -1161,7 +1193,7 @@ private synchronized void drainNewCalls() { * Add some calls to pendingCalls, and then clear the input list. * Also clears Call#curNode. * - * @param calls The calls to add. + * @param calls The calls to add. */ private void transitionToPendingAndClearList(List calls) { for (Call call : calls) { @@ -1174,9 +1206,9 @@ private void transitionToPendingAndClearList(List calls) { /** * Choose nodes for the calls in the pendingCalls list. * - * @param now The current time in milliseconds. - * @return The minimum time until a call is ready to be retried if any of the pending - * calls are backing off after a failure + * @param now The current time in milliseconds. + * @return The minimum time until a call is ready to be retried if any of the pending + * calls are backing off after a failure */ private long maybeDrainPendingCalls(long now) { long pollTimeout = Long.MAX_VALUE; @@ -1234,8 +1266,8 @@ private boolean maybeDrainPendingCall(Call call, long now) { /** * Send the calls which are ready. * - * @param now The current time in milliseconds. - * @return The minimum timeout we need for poll(). + * @param now The current time in milliseconds. + * @return The minimum timeout we need for poll(). */ private long sendEligibleCalls(long now) { long pollTimeout = Long.MAX_VALUE; @@ -1257,7 +1289,7 @@ private long sendEligibleCalls(long now) { if (deadline != null) { if (now >= deadline) { log.info("Disconnecting from {} and revoking {} node assignment(s) " + - "because the node is taking too long to become ready.", + "because the node is taking too long to become ready.", node.idString(), calls.size()); transitionToPendingAndClearList(calls); client.disconnect(node.idString()); @@ -1310,12 +1342,12 @@ private long sendEligibleCalls(long now) { /** * Time out expired calls that are in flight. - *

    + * * Calls that are in flight may have been partially or completely sent over the wire. They may * even be in the process of being processed by the remote server. At the moment, our only option * to time them out is to close the entire connection. * - * @param processor The timeout processor. + * @param processor The timeout processor. */ private void timeoutCallsInFlight(TimeoutProcessor processor) { int numTimedOut = 0; @@ -1338,8 +1370,8 @@ private void timeoutCallsInFlight(TimeoutProcessor processor) { /** * Handle responses from the server. * - * @param now The current time in milliseconds. - * @param responses The latest responses from KafkaClient. + * @param now The current time in milliseconds. + * @param responses The latest responses from KafkaClient. */ private void handleResponses(long now, List responses) { for (ClientResponse response : responses) { @@ -1350,7 +1382,7 @@ private void handleResponses(long now, List responses) { // If the server returns information about a correlation ID we didn't use yet, // an internal server error has occurred. Close the connection and log an error message. log.error("Internal server error on {}: server returned information about unknown " + - "correlation ID {}, requestHeader = {}", response.destination(), correlationId, + "correlation ID {}, requestHeader = {}", response.destination(), correlationId, response.requestHeader()); client.disconnect(response.destination()); continue; @@ -1469,7 +1501,7 @@ public void run() { numTimedOut += timeoutProcessor.handleTimeouts(pendingCalls, "The AdminClient thread has exited."); numTimedOut += timeoutCallsToSend(timeoutProcessor); numTimedOut += timeoutProcessor.handleTimeouts(correlationIdToCalls.values(), - "The AdminClient thread has exited."); + "The AdminClient thread has exited."); if (numTimedOut > 0) { log.info("Timed out {} remaining operation(s) during close.", numTimedOut); } @@ -1539,13 +1571,13 @@ private void processRequests() { /** * Queue a call for sending. - *

    + * * If the AdminClient thread has exited, this will fail. Otherwise, it will succeed (even * if the AdminClient is shutting down). This function should called when retrying an * existing call. * - * @param call The new call object. - * @param now The current time in milliseconds. + * @param call The new call object. + * @param now The current time in milliseconds. */ void enqueue(Call call, long now) { if (call.tries > maxRetries) { @@ -1576,18 +1608,18 @@ void enqueue(Call call, long now) { /** * Initiate a new call. - *

    + * * This will fail if the AdminClient is scheduled to shut down. * - * @param call The new call object. - * @param now The current time in milliseconds. + * @param call The new call object. + * @param now The current time in milliseconds. */ void call(Call call, long now) { if (hardShutdownTimeMs.get() != INVALID_SHUTDOWN_TIME) { log.debug("Cannot accept new call {} when AdminClient is closing.", call); call.handleFailure(new IllegalStateException("Cannot accept new calls when AdminClient is closing.")); } else if (metadataManager.usingBootstrapControllers() && - (!call.nodeProvider.supportsUseControllers())) { + (!call.nodeProvider.supportsUseControllers())) { call.fail(now, new UnsupportedEndpointTypeException("This Admin API is not " + "yet supported when communicating directly with the controller quorum.")); } else { @@ -1609,7 +1641,7 @@ private Call makeMetadataCall(long now) { private Call makeControllerMetadataCall(long now) { // Use DescribeCluster here, as specified by KIP-919. return new Call(true, "describeCluster", calcDeadlineMs(now, requestTimeoutMs), - new MetadataUpdateNodeIdProvider()) { + new MetadataUpdateNodeIdProvider()) { @Override public DescribeClusterRequest.Builder createRequest(int timeoutMs) { return new DescribeClusterRequest.Builder(new DescribeClusterRequestData() @@ -1652,7 +1684,7 @@ private Call makeBrokerMetadataCall(long now) { // We use MetadataRequest here so that we can continue to support brokers that are too // old to handle DescribeCluster. return new Call(true, "fetchMetadata", calcDeadlineMs(now, requestTimeoutMs), - new MetadataUpdateNodeIdProvider()) { + new MetadataUpdateNodeIdProvider()) { @Override public MetadataRequest.Builder createRequest(int timeoutMs) { // Since this only requests node information, it's safe to pass true @@ -1741,10 +1773,10 @@ int numPendingCalls() { * Used when a response handler expected a result for some entity but no result was present. */ private static void completeUnrealizedFutures( - Stream>> futures, - Function messageFormatter) { + Stream>> futures, + Function messageFormatter) { futures.filter(entry -> !entry.getValue().isDone()).forEach(entry -> - entry.getValue().completeExceptionally(new ApiException(messageFormatter.apply(entry.getKey())))); + entry.getValue().completeExceptionally(new ApiException(messageFormatter.apply(entry.getKey())))); } /** @@ -1752,11 +1784,11 @@ private static void completeUnrealizedFutures( * the initial error back to the caller if the request timed out. */ private static void maybeCompleteQuotaExceededException( - boolean shouldRetryOnQuotaViolation, - Throwable throwable, - Map> futures, - Map quotaExceededExceptions, - int throttleTimeDelta) { + boolean shouldRetryOnQuotaViolation, + Throwable throwable, + Map> futures, + Map quotaExceededExceptions, + int throttleTimeDelta) { if (shouldRetryOnQuotaViolation && throwable instanceof TimeoutException) { quotaExceededExceptions.forEach((key, value) -> futures.get(key).completeExceptionally( new ThrottlingQuotaExceededException( @@ -2035,10 +2067,10 @@ private Call getDeleteTopicsWithIdsCall(final DeleteTopicsOptions options, @Override DeleteTopicsRequest.Builder createRequest(int timeoutMs) { return new DeleteTopicsRequest.Builder( - new DeleteTopicsRequestData() - .setTopics(topicIds.stream().map( - topic -> new DeleteTopicState().setTopicId(topic)).collect(Collectors.toList())) - .setTimeoutMs(timeoutMs)); + new DeleteTopicsRequestData() + .setTopics(topicIds.stream().map( + topic -> new DeleteTopicState().setTopicId(topic)).collect(Collectors.toList())) + .setTimeoutMs(timeoutMs)); } @Override @@ -2058,7 +2090,7 @@ void handleResponse(AbstractResponse abstractResponse) { if (error.isFailure()) { if (error.is(Errors.THROTTLING_QUOTA_EXCEEDED)) { ThrottlingQuotaExceededException quotaExceededException = new ThrottlingQuotaExceededException( - response.throttleTimeMs(), error.messageWithFallback()); + response.throttleTimeMs(), error.messageWithFallback()); if (options.shouldRetryOnQuotaViolation()) { retryTopics.add(result.topicId()); retryTopicQuotaExceededExceptions.put(result.topicId(), quotaExceededException); @@ -2081,7 +2113,7 @@ void handleResponse(AbstractResponse abstractResponse) { } else { final long now = time.milliseconds(); final Call call = getDeleteTopicsWithIdsCall(options, futures, retryTopics, - retryTopicQuotaExceededExceptions, now, deadline); + retryTopicQuotaExceededExceptions, now, deadline); runnable.call(call, now); } } @@ -2091,7 +2123,7 @@ void handleFailure(Throwable throwable) { // If there were any topics retries due to a quota exceeded exception, we propagate // the initial error back to the caller if the request timed out. maybeCompleteQuotaExceededException(options.shouldRetryOnQuotaViolation(), - throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now)); + throwable, futures, quotaExceededExceptions, (int) (time.milliseconds() - now)); // Fail all the other remaining futures completeAllExceptionally(futures.values(), throwable); } @@ -2278,7 +2310,7 @@ void handleResponse(AbstractResponse abstractResponse) { } if (partiallyFinishedTopicDescription != null && - (responseCursor == null || !responseCursor.topicName().equals(partiallyFinishedTopicDescription.name()))) { + (responseCursor == null || !responseCursor.topicName().equals(partiallyFinishedTopicDescription.name()))) { // We can't simply check nextTopicDescription != null here to close the partiallyFinishedTopicDescription. // Because the responseCursor topic may not show in the response. String topicName = partiallyFinishedTopicDescription.name(); @@ -2334,7 +2366,7 @@ private Map> handleDescribeTopicsByNamesWi } // First, we need to retrieve the node info. - DescribeClusterResult clusterResult = describeCluster(new DescribeClusterOptions().timeoutMs(options.timeoutMs())); + DescribeClusterResult clusterResult = describeCluster(); clusterResult.nodes().whenComplete( (nodes, exception) -> { if (exception != null) { @@ -2361,7 +2393,7 @@ private Map> handleDescribeTopicsByIds(Colle if (topicIdIsUnrepresentable(topicId)) { KafkaFutureImpl future = new KafkaFutureImpl<>(); future.completeExceptionally(new InvalidTopicException("The given topic id '" + - topicId + "' cannot be represented in a request.")); + topicId + "' cannot be represented in a request.")); topicFutures.put(topicId, future); } else if (!topicFutures.containsKey(topicId)) { topicFutures.put(topicId, new KafkaFutureImpl<>()); @@ -2370,14 +2402,14 @@ private Map> handleDescribeTopicsByIds(Colle } final long now = time.milliseconds(); Call call = new Call("describeTopicsWithIds", calcDeadlineMs(now, options.timeoutMs()), - new LeastLoadedNodeProvider()) { + new LeastLoadedNodeProvider()) { @Override MetadataRequest.Builder createRequest(int timeoutMs) { return new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList)) - .setAllowAutoTopicCreation(false) - .setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations())); + .setTopics(convertTopicIdsToMetadataRequestTopic(topicIdsList)) + .setAllowAutoTopicCreation(false) + .setIncludeTopicAuthorizedOperations(options.includeAuthorizedOperations())); } @Override @@ -2439,8 +2471,8 @@ private TopicDescription getTopicDescriptionFromCluster(Cluster cluster, String List partitions = new ArrayList<>(partitionInfos.size()); for (PartitionInfo partitionInfo : partitionInfos) { TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo( - partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), - Arrays.asList(partitionInfo.inSyncReplicas())); + partitionInfo.partition(), leader(partitionInfo), Arrays.asList(partitionInfo.replicas()), + Arrays.asList(partitionInfo.inSyncReplicas())); partitions.add(topicPartitionInfo); } partitions.sort(Comparator.comparingInt(TopicPartitionInfo::partition)); @@ -2467,7 +2499,7 @@ public DescribeClusterResult describeCluster(DescribeClusterOptions options) { private boolean useMetadataRequest = false; @Override - AbstractRequest.Builder createRequest(int timeoutMs) { + AbstractRequest.Builder createRequest(int timeoutMs) { if (!useMetadataRequest) { if (metadataManager.usingBootstrapControllers() && options.includeFencedBrokers()) { throw new IllegalArgumentException("Cannot request fenced brokers from controller endpoint"); @@ -2475,7 +2507,7 @@ AbstractRequest.Builder createRequest(int timeoutMs) { return new DescribeClusterRequest.Builder(new DescribeClusterRequestData() .setIncludeClusterAuthorizedOperations(options.includeAuthorizedOperations()) .setEndpointType(metadataManager.usingBootstrapControllers() ? - EndpointType.CONTROLLER.id() : EndpointType.BROKER.id()) + EndpointType.CONTROLLER.id() : EndpointType.BROKER.id()) .setIncludeFencedBrokers(options.includeFencedBrokers())); } else { // Since this only requests node information, it's safe to pass true for allowAutoTopicCreation (and it @@ -2494,7 +2526,8 @@ void handleResponse(AbstractResponse abstractResponse) { DescribeClusterResponse response = (DescribeClusterResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); if (error != Errors.NONE) { - handleFailure(error.exception(response.data().errorMessage())); + ApiError apiError = new ApiError(error, response.data().errorMessage()); + handleFailure(apiError.exception()); return; } @@ -2558,7 +2591,7 @@ public DescribeAclsResult describeAcls(final AclBindingFilter filter, DescribeAc if (filter.isUnknown()) { KafkaFutureImpl> future = new KafkaFutureImpl<>(); future.completeExceptionally(new InvalidRequestException("The AclBindingFilter " + - "must not contain UNKNOWN elements.")); + "must not contain UNKNOWN elements.")); return new DescribeAclsResult(future); } final long now = time.milliseconds(); @@ -2690,9 +2723,10 @@ void handleResponse(AbstractResponse abstractResponse) { } else { List filterResults = new ArrayList<>(); for (DeleteAclsMatchingAcl matchingAcl : filterResult.matchingAcls()) { - Errors aclError = Errors.forCode(matchingAcl.errorCode()); + ApiError aclError = new ApiError(Errors.forCode(matchingAcl.errorCode()), + matchingAcl.errorMessage()); AclBinding aclBinding = DeleteAclsResponse.aclBinding(matchingAcl); - filterResults.add(new FilterResult(aclBinding, aclError.exception(matchingAcl.errorMessage()))); + filterResults.add(new FilterResult(aclBinding, aclError.exception())); } future.complete(new FilterResults(filterResults)); } @@ -2757,15 +2791,15 @@ void handleResponse(AbstractResponse abstractResponse) { if (future == null) { if (node != null) { log.warn("The config {} in the response from node {} is not in the request", - configResource, node); + configResource, node); } else { log.warn("The config {} in the response from the least loaded broker is not in the request", - configResource); + configResource); } } else { if (describeConfigsResult.errorCode() != Errors.NONE.code()) { future.completeExceptionally(Errors.forCode(describeConfigsResult.errorCode()) - .exception(describeConfigsResult.errorMessage())); + .exception(describeConfigsResult.errorMessage())); } else { future.complete(describeConfigResult(describeConfigsResult)); } @@ -2801,15 +2835,15 @@ void handleFailure(Throwable throwable) { private Config describeConfigResult(DescribeConfigsResponseData.DescribeConfigsResult describeConfigsResult) { return new Config(describeConfigsResult.configs().stream().map(config -> new ConfigEntry( - config.name(), - config.value(), - DescribeConfigsResponse.ConfigSource.forId(config.configSource()).source(), - config.isSensitive(), - config.readOnly(), - (config.synonyms().stream().map(synonym -> new ConfigEntry.ConfigSynonym(synonym.name(), synonym.value(), - DescribeConfigsResponse.ConfigSource.forId(synonym.source()).source()))).collect(Collectors.toList()), - DescribeConfigsResponse.ConfigType.forId(config.configType()).type(), - config.documentation() + config.name(), + config.value(), + DescribeConfigsResponse.ConfigSource.forId(config.configSource()).source(), + config.isSensitive(), + config.readOnly(), + (config.synonyms().stream().map(synonym -> new ConfigEntry.ConfigSynonym(synonym.name(), synonym.value(), + DescribeConfigsResponse.ConfigSource.forId(synonym.source()).source()))).collect(Collectors.toList()), + DescribeConfigsResponse.ConfigType.forId(config.configType()).type(), + config.documentation() )).collect(Collectors.toList())); } @@ -2921,7 +2955,7 @@ public AlterReplicaLogDirsResult alterReplicaLogDirs(Map()); Map replicaAssignmentByBroker = new HashMap<>(); - for (Map.Entry entry : replicaAssignment.entrySet()) { + for (Map.Entry entry: replicaAssignment.entrySet()) { TopicPartitionReplica replica = entry.getKey(); String logDir = entry.getValue(); int brokerId = replica.brokerId(); @@ -2942,7 +2976,7 @@ public AlterReplicaLogDirsResult alterReplicaLogDirs(Map entry : replicaAssignmentByBroker.entrySet()) { + for (Map.Entry entry: replicaAssignmentByBroker.entrySet()) { final int brokerId = entry.getKey(); final AlterReplicaLogDirsRequestData assignment = entry.getValue(); @@ -2957,15 +2991,15 @@ public AlterReplicaLogDirsRequest.Builder createRequest(int timeoutMs) { @Override public void handleResponse(AbstractResponse abstractResponse) { AlterReplicaLogDirsResponse response = (AlterReplicaLogDirsResponse) abstractResponse; - for (AlterReplicaLogDirTopicResult topicResult : response.data().results()) { - for (AlterReplicaLogDirPartitionResult partitionResult : topicResult.partitions()) { + for (AlterReplicaLogDirTopicResult topicResult: response.data().results()) { + for (AlterReplicaLogDirPartitionResult partitionResult: topicResult.partitions()) { TopicPartitionReplica replica = new TopicPartitionReplica( - topicResult.topicName(), partitionResult.partitionIndex(), brokerId); + topicResult.topicName(), partitionResult.partitionIndex(), brokerId); KafkaFutureImpl future = futures.get(replica); if (future == null) { log.warn("The partition {} in the response from broker {} is not in the request", - new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()), - brokerId); + new TopicPartition(topicResult.topicName(), partitionResult.partitionIndex()), + brokerId); } else if (partitionResult.errorCode() == Errors.NONE.code()) { future.complete(null); } else { @@ -2977,9 +3011,8 @@ public void handleResponse(AbstractResponse abstractResponse) { completeUnrealizedFutures( futures.entrySet().stream().filter(entry -> entry.getKey().brokerId() == brokerId), replica -> "The response from broker " + brokerId + - " did not contain a result for replica " + replica); + " did not contain a result for replica " + replica); } - @Override void handleFailure(Throwable throwable) { // Only completes the futures of brokerId @@ -3022,12 +3055,11 @@ public void handleResponse(AbstractResponse abstractResponse) { } else { // Up to v3 DescribeLogDirsResponse did not have an error code field, hence it defaults to None Errors error = response.data().errorCode() == Errors.NONE.code() - ? Errors.CLUSTER_AUTHORIZATION_FAILED - : Errors.forCode(response.data().errorCode()); + ? Errors.CLUSTER_AUTHORIZATION_FAILED + : Errors.forCode(response.data().errorCode()); future.completeExceptionally(error.exception()); } } - @Override void handleFailure(Throwable throwable) { future.completeExceptionally(throwable); @@ -3045,15 +3077,15 @@ private static Map logDirDescriptions(DescribeLogDirs for (DescribeLogDirsResponseData.DescribeLogDirsTopic t : logDirResult.topics()) { for (DescribeLogDirsResponseData.DescribeLogDirsPartition p : t.partitions()) { replicaInfoMap.put( - new TopicPartition(t.name(), p.partitionIndex()), - new ReplicaInfo(p.partitionSize(), p.offsetLag(), p.isFutureKey())); + new TopicPartition(t.name(), p.partitionIndex()), + new ReplicaInfo(p.partitionSize(), p.offsetLag(), p.isFutureKey())); } } result.put(logDirResult.logDir(), new LogDirDescription( - Errors.forCode(logDirResult.errorCode()).exception(), - replicaInfoMap, - logDirResult.totalBytes(), - logDirResult.usableBytes())); + Errors.forCode(logDirResult.errorCode()).exception(), + replicaInfoMap, + logDirResult.totalBytes(), + logDirResult.usableBytes())); } return result; } @@ -3068,7 +3100,7 @@ public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection partitionsByBroker = new HashMap<>(); - for (TopicPartitionReplica replica : replicas) { + for (TopicPartitionReplica replica: replicas) { DescribeLogDirsRequestData requestData = partitionsByBroker.computeIfAbsent(replica.brokerId(), brokerId -> new DescribeLogDirsRequestData()); DescribableLogDirTopic describableLogDirTopic = requestData.topics().find(replica.topic()); @@ -3076,7 +3108,7 @@ public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection partitions = new ArrayList<>(); partitions.add(replica.partition()); describableLogDirTopic = new DescribableLogDirTopic().setTopic(replica.topic()) - .setPartitions(partitions); + .setPartitions(partitions); requestData.topics().add(describableLogDirTopic); } else { describableLogDirTopic.partitions().add(replica.partition()); @@ -3084,11 +3116,11 @@ public DescribeReplicaLogDirsResult describeReplicaLogDirs(Collection entry : partitionsByBroker.entrySet()) { + for (Map.Entry entry: partitionsByBroker.entrySet()) { final int brokerId = entry.getKey(); final DescribeLogDirsRequestData topicPartitions = entry.getValue(); final Map replicaDirInfoByPartition = new HashMap<>(); - for (DescribableLogDirTopic topicPartition : topicPartitions.topics()) { + for (DescribableLogDirTopic topicPartition: topicPartitions.topics()) { for (Integer partitionId : topicPartition.partitions()) { replicaDirInfoByPartition.put(new TopicPartition(topicPartition.topic(), partitionId), new ReplicaLogDirInfo()); } @@ -3106,7 +3138,7 @@ public DescribeLogDirsRequest.Builder createRequest(int timeoutMs) { @Override public void handleResponse(AbstractResponse abstractResponse) { DescribeLogDirsResponse response = (DescribeLogDirsResponse) abstractResponse; - for (Map.Entry responseEntry : logDirDescriptions(response).entrySet()) { + for (Map.Entry responseEntry: logDirDescriptions(response).entrySet()) { String logDir = responseEntry.getKey(); LogDirDescription logDirInfo = responseEntry.getValue(); @@ -3117,7 +3149,7 @@ public void handleResponse(AbstractResponse abstractResponse) { handleFailure(new IllegalStateException( "The error " + logDirInfo.error().getClass().getName() + " for log directory " + logDir + " in the response from broker " + brokerId + " is illegal")); - for (Map.Entry replicaInfoEntry : logDirInfo.replicaInfos().entrySet()) { + for (Map.Entry replicaInfoEntry: logDirInfo.replicaInfos().entrySet()) { TopicPartition tp = replicaInfoEntry.getKey(); ReplicaInfo replicaInfo = replicaInfoEntry.getValue(); ReplicaLogDirInfo replicaLogDirInfo = replicaDirInfoByPartition.get(tp); @@ -3125,25 +3157,24 @@ public void handleResponse(AbstractResponse abstractResponse) { log.warn("Server response from broker {} mentioned unknown partition {}", brokerId, tp); } else if (replicaInfo.isFuture()) { replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(replicaLogDirInfo.getCurrentReplicaLogDir(), - replicaLogDirInfo.getCurrentReplicaOffsetLag(), - logDir, - replicaInfo.offsetLag())); + replicaLogDirInfo.getCurrentReplicaOffsetLag(), + logDir, + replicaInfo.offsetLag())); } else { replicaDirInfoByPartition.put(tp, new ReplicaLogDirInfo(logDir, - replicaInfo.offsetLag(), - replicaLogDirInfo.getFutureReplicaLogDir(), - replicaLogDirInfo.getFutureReplicaOffsetLag())); + replicaInfo.offsetLag(), + replicaLogDirInfo.getFutureReplicaLogDir(), + replicaLogDirInfo.getFutureReplicaOffsetLag())); } } } - for (Map.Entry entry : replicaDirInfoByPartition.entrySet()) { + for (Map.Entry entry: replicaDirInfoByPartition.entrySet()) { TopicPartition tp = entry.getKey(); KafkaFutureImpl future = futures.get(new TopicPartitionReplica(tp.topic(), tp.partition(), brokerId)); future.complete(entry.getValue()); } } - @Override void handleFailure(Throwable throwable) { completeAllExceptionally(futures.values(), throwable); @@ -3279,8 +3310,8 @@ public CreateDelegationTokenResult createDelegationToken(final CreateDelegationT List renewers = new ArrayList<>(); for (KafkaPrincipal principal : options.renewers()) { renewers.add(new CreatableRenewers() - .setPrincipalName(principal.getName()) - .setPrincipalType(principal.getPrincipalType())); + .setPrincipalName(principal.getName()) + .setPrincipalType(principal.getPrincipalType())); } runnable.call(new Call("createDelegationToken", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @@ -3304,7 +3335,7 @@ void handleResponse(AbstractResponse abstractResponse) { delegationTokenFuture.completeExceptionally(response.error().exception()); } else { CreateDelegationTokenResponseData data = response.data(); - TokenInformation tokenInfo = new TokenInformation(data.tokenId(), new KafkaPrincipal(data.principalType(), data.principalName()), + TokenInformation tokenInfo = new TokenInformation(data.tokenId(), new KafkaPrincipal(data.principalType(), data.principalName()), new KafkaPrincipal(data.tokenRequesterPrincipalType(), data.tokenRequesterPrincipalName()), options.renewers(), data.issueTimestampMs(), data.maxTimestampMs(), data.expiryTimestampMs()); DelegationToken token = new DelegationToken(tokenInfo, data.hmac()); @@ -3323,7 +3354,7 @@ void handleFailure(Throwable throwable) { @Override public RenewDelegationTokenResult renewDelegationToken(final byte[] hmac, final RenewDelegationTokenOptions options) { - final KafkaFutureImpl expiryTimeFuture = new KafkaFutureImpl<>(); + final KafkaFutureImpl expiryTimeFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("renewDelegationToken", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @@ -3331,7 +3362,7 @@ public RenewDelegationTokenResult renewDelegationToken(final byte[] hmac, final @Override RenewDelegationTokenRequest.Builder createRequest(int timeoutMs) { return new RenewDelegationTokenRequest.Builder( - new RenewDelegationTokenRequestData() + new RenewDelegationTokenRequestData() .setHmac(hmac) .setRenewPeriodMs(options.renewTimePeriodMs())); } @@ -3357,7 +3388,7 @@ void handleFailure(Throwable throwable) { @Override public ExpireDelegationTokenResult expireDelegationToken(final byte[] hmac, final ExpireDelegationTokenOptions options) { - final KafkaFutureImpl expiryTimeFuture = new KafkaFutureImpl<>(); + final KafkaFutureImpl expiryTimeFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("expireDelegationToken", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @@ -3365,9 +3396,9 @@ public ExpireDelegationTokenResult expireDelegationToken(final byte[] hmac, fina @Override ExpireDelegationTokenRequest.Builder createRequest(int timeoutMs) { return new ExpireDelegationTokenRequest.Builder( - new ExpireDelegationTokenRequestData() - .setHmac(hmac) - .setExpiryTimePeriodMs(options.expiryTimePeriodMs())); + new ExpireDelegationTokenRequestData() + .setHmac(hmac) + .setExpiryTimePeriodMs(options.expiryTimePeriodMs())); } @Override @@ -3391,7 +3422,7 @@ void handleFailure(Throwable throwable) { @Override public DescribeDelegationTokenResult describeDelegationToken(final DescribeDelegationTokenOptions options) { - final KafkaFutureImpl> tokensFuture = new KafkaFutureImpl<>(); + final KafkaFutureImpl> tokensFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("describeDelegationToken", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { @@ -3505,29 +3536,27 @@ ListGroupsRequest.Builder createRequest(int timeoutMs) { } private void maybeAddGroup(ListGroupsResponseData.ListedGroup group) { - String protocolType = group.protocolType(); - if (options.protocolTypes().isEmpty() || options.protocolTypes().contains(protocolType)) { - final String groupId = group.groupId(); - final Optional type; - if (group.groupType() == null || group.groupType().isEmpty()) { - type = Optional.empty(); - } else { - type = Optional.of(GroupType.parse(group.groupType())); - } - final Optional groupState; - if (group.groupState() == null || group.groupState().isEmpty()) { - groupState = Optional.empty(); - } else { - groupState = Optional.of(GroupState.parse(group.groupState())); - } - final GroupListing groupListing = new GroupListing( - groupId, - type, - protocolType, - groupState - ); - results.addListing(groupListing); + final String groupId = group.groupId(); + final Optional type; + if (group.groupType() == null || group.groupType().isEmpty()) { + type = Optional.empty(); + } else { + type = Optional.of(GroupType.parse(group.groupType())); + } + final String protocolType = group.protocolType(); + final Optional groupState; + if (group.groupState() == null || group.groupState().isEmpty()) { + groupState = Optional.empty(); + } else { + groupState = Optional.of(GroupState.parse(group.groupState())); } + final GroupListing groupListing = new GroupListing( + groupId, + type, + protocolType, + groupState + ); + results.addListing(groupListing); } @Override @@ -3573,14 +3602,13 @@ void handleFailure(Throwable throwable) { public DescribeConsumerGroupsResult describeConsumerGroups(final Collection groupIds, final DescribeConsumerGroupsOptions options) { SimpleAdminApiFuture future = - DescribeConsumerGroupsHandler.newFuture(groupIds); + DescribeConsumerGroupsHandler.newFuture(groupIds); DescribeConsumerGroupsHandler handler = new DescribeConsumerGroupsHandler(options.includeAuthorizedOperations(), logContext); invokeDriver(handler, future, options.timeoutMs); return new DescribeConsumerGroupsResult(future.all().entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); + .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); } - @Deprecated private static final class ListConsumerGroupsResults { private final List errors; private final HashMap listings; @@ -3624,8 +3652,6 @@ private synchronized void tryComplete() { } @Override - @SuppressWarnings("removal") - @Deprecated(since = "4.1", forRemoval = true) public ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) { final KafkaFutureImpl> all = new KafkaFutureImpl<>(); final long nowMetadata = time.milliseconds(); @@ -3654,13 +3680,13 @@ void handleResponse(AbstractResponse abstractResponse) { @Override ListGroupsRequest.Builder createRequest(int timeoutMs) { List states = options.groupStates() - .stream() - .map(GroupState::toString) - .collect(Collectors.toList()); + .stream() + .map(GroupState::toString) + .collect(Collectors.toList()); List groupTypes = options.types() - .stream() - .map(GroupType::toString) - .collect(Collectors.toList()); + .stream() + .map(GroupType::toString) + .collect(Collectors.toList()); return new ListGroupsRequest.Builder(new ListGroupsRequestData() .setStatesFilter(states) .setTypesFilter(groupTypes) @@ -3672,17 +3698,17 @@ private void maybeAddConsumerGroup(ListGroupsResponseData.ListedGroup group) { if (protocolType.equals(ConsumerProtocol.PROTOCOL_TYPE) || protocolType.isEmpty()) { final String groupId = group.groupId(); final Optional groupState = group.groupState().isEmpty() - ? Optional.empty() - : Optional.of(GroupState.parse(group.groupState())); + ? Optional.empty() + : Optional.of(GroupState.parse(group.groupState())); final Optional type = group.groupType().isEmpty() - ? Optional.empty() - : Optional.of(GroupType.parse(group.groupType())); + ? Optional.empty() + : Optional.of(GroupType.parse(group.groupType())); final ConsumerGroupListing groupListing = new ConsumerGroupListing( - groupId, - groupState, - type, - protocolType.isEmpty() - ); + groupId, + groupState, + type, + protocolType.isEmpty() + ); results.addListing(groupListing); } } @@ -3730,115 +3756,44 @@ void handleFailure(Throwable throwable) { public ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map groupSpecs, ListConsumerGroupOffsetsOptions options) { SimpleAdminApiFuture> future = - ListConsumerGroupOffsetsHandler.newFuture(groupSpecs.keySet()); + ListConsumerGroupOffsetsHandler.newFuture(groupSpecs.keySet()); ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(groupSpecs, options.requireStable(), logContext); invokeDriver(handler, future, options.timeoutMs); return new ListConsumerGroupOffsetsResult(future.all()); } - @Override - public ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map groupSpecs, - ListStreamsGroupOffsetsOptions options) { - Map consumerGroupSpecs = groupSpecs.entrySet().stream() - .collect(Collectors.toMap( - Map.Entry::getKey, - entry -> new ListConsumerGroupOffsetsSpec().topicPartitions(entry.getValue().topicPartitions()) - )); - ListConsumerGroupOffsetsOptions consumerGroupOptions = new ListConsumerGroupOffsetsOptions() - .requireStable(options.requireStable()) - .timeoutMs(options.timeoutMs()); - return new ListStreamsGroupOffsetsResult(listConsumerGroupOffsets(consumerGroupSpecs, consumerGroupOptions)); - } - @Override public DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupIds, DeleteConsumerGroupsOptions options) { SimpleAdminApiFuture future = - DeleteConsumerGroupsHandler.newFuture(groupIds); + DeleteConsumerGroupsHandler.newFuture(groupIds); DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(logContext); invokeDriver(handler, future, options.timeoutMs); return new DeleteConsumerGroupsResult(future.all().entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); - } - - @Override - public DeleteStreamsGroupsResult deleteStreamsGroups(Collection groupIds, DeleteStreamsGroupsOptions options) { - DeleteConsumerGroupsOptions consumerGroupOptions = new DeleteConsumerGroupsOptions() - .timeoutMs(options.timeoutMs()); - return new DeleteStreamsGroupsResult(deleteConsumerGroups(groupIds, consumerGroupOptions)); + .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); } @Override public DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets( - String groupId, - Set partitions, - DeleteConsumerGroupOffsetsOptions options) { + String groupId, + Set partitions, + DeleteConsumerGroupOffsetsOptions options) { SimpleAdminApiFuture> future = - DeleteConsumerGroupOffsetsHandler.newFuture(groupId); + DeleteConsumerGroupOffsetsHandler.newFuture(groupId); DeleteConsumerGroupOffsetsHandler handler = new DeleteConsumerGroupOffsetsHandler(groupId, partitions, logContext); invokeDriver(handler, future, options.timeoutMs); return new DeleteConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), partitions); } - @Override - public DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets( - String groupId, - Set partitions, - DeleteStreamsGroupOffsetsOptions options) { - DeleteConsumerGroupOffsetsOptions consumerGroupOptions = new DeleteConsumerGroupOffsetsOptions() - .timeoutMs(options.timeoutMs()); - return new DeleteStreamsGroupOffsetsResult(deleteConsumerGroupOffsets(groupId, partitions, consumerGroupOptions)); - } - @Override public DescribeShareGroupsResult describeShareGroups(final Collection groupIds, final DescribeShareGroupsOptions options) { SimpleAdminApiFuture future = - DescribeShareGroupsHandler.newFuture(groupIds); + DescribeShareGroupsHandler.newFuture(groupIds); DescribeShareGroupsHandler handler = new DescribeShareGroupsHandler(options.includeAuthorizedOperations(), logContext); invokeDriver(handler, future, options.timeoutMs); return new DescribeShareGroupsResult(future.all().entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); - } - - @Override - public AlterShareGroupOffsetsResult alterShareGroupOffsets(final String groupId, - final Map offsets, - final AlterShareGroupOffsetsOptions options) { - SimpleAdminApiFuture> future = AlterShareGroupOffsetsHandler.newFuture(groupId); - AlterShareGroupOffsetsHandler handler = new AlterShareGroupOffsetsHandler(groupId, offsets, logContext); - invokeDriver(handler, future, options.timeoutMs); - return new AlterShareGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId))); - } - - @Override - public ListShareGroupOffsetsResult listShareGroupOffsets(final Map groupSpecs, - final ListShareGroupOffsetsOptions options) { - SimpleAdminApiFuture> future = ListShareGroupOffsetsHandler.newFuture(groupSpecs.keySet()); - ListShareGroupOffsetsHandler handler = new ListShareGroupOffsetsHandler(groupSpecs, logContext); - invokeDriver(handler, future, options.timeoutMs); - return new ListShareGroupOffsetsResult(future.all()); - } - - @Override - public DeleteShareGroupOffsetsResult deleteShareGroupOffsets(final String groupId, - final Set topics, - final DeleteShareGroupOffsetsOptions options) { - SimpleAdminApiFuture> future = DeleteShareGroupOffsetsHandler.newFuture(groupId); - DeleteShareGroupOffsetsHandler handler = new DeleteShareGroupOffsetsHandler(groupId, topics, logContext); - invokeDriver(handler, future, options.timeoutMs); - return new DeleteShareGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId)), topics); - } - - @Override - public DescribeStreamsGroupsResult describeStreamsGroups(final Collection groupIds, - final DescribeStreamsGroupsOptions options) { - SimpleAdminApiFuture future = - DescribeStreamsGroupsHandler.newFuture(groupIds); - DescribeStreamsGroupsHandler handler = new DescribeStreamsGroupsHandler(options.includeAuthorizedOperations(), logContext); - invokeDriver(handler, future, options.timeoutMs); - return new DescribeStreamsGroupsResult(future.all().entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); + .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); } @Override @@ -3852,16 +3807,6 @@ public DescribeClassicGroupsResult describeClassicGroups(final Collection entry.getKey().idValue, Map.Entry::getValue))); } - @Override - public DeleteShareGroupsResult deleteShareGroups(Collection groupIds, DeleteShareGroupsOptions options) { - SimpleAdminApiFuture future = - DeleteShareGroupsHandler.newFuture(groupIds); - DeleteShareGroupsHandler handler = new DeleteShareGroupsHandler(logContext); - invokeDriver(handler, future, options.timeoutMs); - return new DeleteShareGroupsResult(future.all().entrySet().stream() - .collect(Collectors.toMap(entry -> entry.getKey().idValue, Map.Entry::getValue))); - } - @Override public Map metrics() { return Collections.unmodifiableMap(this.metrics.metrics()); @@ -3869,13 +3814,13 @@ public DeleteShareGroupsResult deleteShareGroups(Collection groupIds, De @Override public ElectLeadersResult electLeaders( - final ElectionType electionType, - final Set topicPartitions, - ElectLeadersOptions options) { + final ElectionType electionType, + final Set topicPartitions, + ElectLeadersOptions options) { final KafkaFutureImpl>> electionFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); runnable.call(new Call("electLeaders", calcDeadlineMs(now, options.timeoutMs()), - new ControllerNodeProvider()) { + new ControllerNodeProvider()) { @Override public ElectLeadersRequest.Builder createRequest(int timeoutMs) { @@ -3908,8 +3853,8 @@ void handleFailure(Throwable throwable) { @Override public AlterPartitionReassignmentsResult alterPartitionReassignments( - Map> reassignments, - AlterPartitionReassignmentsOptions options) { + Map> reassignments, + AlterPartitionReassignmentsOptions options) { final Map> futures = new HashMap<>(); final Map>> topicsToReassignments = new TreeMap<>(); for (Map.Entry> entry : reassignments.entrySet()) { @@ -3922,13 +3867,13 @@ public AlterPartitionReassignmentsResult alterPartitionReassignments( if (topicNameIsUnrepresentable(topic)) { future.completeExceptionally(new InvalidTopicException("The given topic name '" + - topic + "' cannot be represented in a request.")); + topic + "' cannot be represented in a request.")); } else if (topicPartition.partition() < 0) { future.completeExceptionally(new InvalidTopicException("The given partition index " + - topicPartition.partition() + " is not valid.")); + topicPartition.partition() + " is not valid.")); } else { Map> partitionReassignments = - topicsToReassignments.get(topicPartition.topic()); + topicsToReassignments.get(topicPartition.topic()); if (partitionReassignments == null) { partitionReassignments = new TreeMap<>(); topicsToReassignments.put(topic, partitionReassignments); @@ -3940,36 +3885,35 @@ public AlterPartitionReassignmentsResult alterPartitionReassignments( final long now = time.milliseconds(); Call call = new Call("alterPartitionReassignments", calcDeadlineMs(now, options.timeoutMs()), - new ControllerNodeProvider(true)) { + new ControllerNodeProvider(true)) { @Override public AlterPartitionReassignmentsRequest.Builder createRequest(int timeoutMs) { AlterPartitionReassignmentsRequestData data = - new AlterPartitionReassignmentsRequestData(); + new AlterPartitionReassignmentsRequestData(); for (Map.Entry>> entry : - topicsToReassignments.entrySet()) { + topicsToReassignments.entrySet()) { String topicName = entry.getKey(); Map> partitionsToReassignments = entry.getValue(); List reassignablePartitions = new ArrayList<>(); for (Map.Entry> partitionEntry : - partitionsToReassignments.entrySet()) { + partitionsToReassignments.entrySet()) { int partitionIndex = partitionEntry.getKey(); Optional reassignment = partitionEntry.getValue(); ReassignablePartition reassignablePartition = new ReassignablePartition() - .setPartitionIndex(partitionIndex) - .setReplicas(reassignment.map(NewPartitionReassignment::targetReplicas).orElse(null)); + .setPartitionIndex(partitionIndex) + .setReplicas(reassignment.map(NewPartitionReassignment::targetReplicas).orElse(null)); reassignablePartitions.add(reassignablePartition); } ReassignableTopic reassignableTopic = new ReassignableTopic() - .setName(topicName) - .setPartitions(reassignablePartitions); + .setName(topicName) + .setPartitions(reassignablePartitions); data.topics().add(reassignableTopic); } data.setTimeoutMs(timeoutMs); - data.setAllowReplicationFactorChange(options.allowReplicationFactorChange()); return new AlterPartitionReassignmentsRequest.Builder(data); } @@ -3992,8 +3936,8 @@ public void handleResponse(AbstractResponse abstractResponse) { String topicName = topicResponse.name(); for (ReassignablePartitionResponse partition : topicResponse.partitions()) { errors.put( - new TopicPartition(topicName, partition.partitionIndex()), - topLevelError.exception(response.data().errorMessage()) + new TopicPartition(topicName, partition.partitionIndex()), + new ApiError(topLevelError, response.data().errorMessage()).exception() ); receivedResponsesCount += 1; } @@ -4033,7 +3977,7 @@ private int validateTopicResponses(List topicResponse if (partitionError == Errors.NONE) { errors.put(tp, null); } else { - errors.put(tp, partitionError.exception(partResponse.errorMessage())); + errors.put(tp, new ApiError(partitionError, partResponse.errorMessage()).exception()); } receivedResponsesCount += 1; } @@ -4065,10 +4009,10 @@ public ListPartitionReassignmentsResult listPartitionReassignments(Optional reassignmentMap = new HashMap<>(); @@ -4157,7 +4101,7 @@ private void handleNotControllerError(Errors error) throws ApiException { */ private Integer nodeFor(ConfigResource resource) { if ((resource.type() == ConfigResource.Type.BROKER && !resource.isDefault()) - || resource.type() == ConfigResource.Type.BROKER_LOGGER) { + || resource.type() == ConfigResource.Type.BROKER_LOGGER) { return Integer.valueOf(resource.name()); } else { return null; @@ -4173,8 +4117,8 @@ private KafkaFutureImpl> getMembersFromGroup(String groupId } else { List membersToRemove = res.members().stream().map(member -> member.groupInstanceId().map(id -> new MemberIdentity().setGroupInstanceId(id)) - .orElseGet(() -> new MemberIdentity().setMemberId(member.consumerId())) - .setReason(reason) + .orElseGet(() -> new MemberIdentity().setMemberId(member.consumerId())) + .setReason(reason) ).collect(Collectors.toList()); future.complete(membersToRemove); @@ -4207,7 +4151,7 @@ public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(Strin DEFAULT_LEAVE_GROUP_REASON : JoinGroupRequest.maybeTruncateReason(options.reason()); final SimpleAdminApiFuture> adminFuture = - RemoveMembersFromConsumerGroupHandler.newFuture(groupId); + RemoveMembersFromConsumerGroupHandler.newFuture(groupId); KafkaFutureImpl> memFuture; if (options.removeAll()) { @@ -4215,8 +4159,8 @@ public RemoveMembersFromConsumerGroupResult removeMembersFromConsumerGroup(Strin } else { memFuture = new KafkaFutureImpl<>(); memFuture.complete(options.members().stream() - .map(m -> m.toMemberIdentity().setReason(reason)) - .collect(Collectors.toList())); + .map(m -> m.toMemberIdentity().setReason(reason)) + .collect(Collectors.toList())); } memFuture.whenComplete((members, ex) -> { @@ -4238,23 +4182,12 @@ public AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets( AlterConsumerGroupOffsetsOptions options ) { SimpleAdminApiFuture> future = - AlterConsumerGroupOffsetsHandler.newFuture(groupId); + AlterConsumerGroupOffsetsHandler.newFuture(groupId); AlterConsumerGroupOffsetsHandler handler = new AlterConsumerGroupOffsetsHandler(groupId, offsets, logContext); invokeDriver(handler, future, options.timeoutMs); return new AlterConsumerGroupOffsetsResult(future.get(CoordinatorKey.byGroupId(groupId))); } - @Override - public AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets( - String groupId, - Map offsets, - AlterStreamsGroupOffsetsOptions options - ) { - AlterConsumerGroupOffsetsOptions consumerGroupOptions = new AlterConsumerGroupOffsetsOptions() - .timeoutMs(options.timeoutMs()); - return new AlterStreamsGroupOffsetsResult(alterConsumerGroupOffsets(groupId, offsets, consumerGroupOptions)); - } - @Override public ListOffsetsResult listOffsets(Map topicPartitionOffsets, ListOffsetsOptions options) { @@ -4273,24 +4206,24 @@ public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, final long now = time.milliseconds(); runnable.call(new Call("describeClientQuotas", calcDeadlineMs(now, options.timeoutMs()), - new LeastLoadedNodeProvider()) { + new LeastLoadedNodeProvider()) { - @Override - DescribeClientQuotasRequest.Builder createRequest(int timeoutMs) { - return new DescribeClientQuotasRequest.Builder(filter); - } + @Override + DescribeClientQuotasRequest.Builder createRequest(int timeoutMs) { + return new DescribeClientQuotasRequest.Builder(filter); + } - @Override - void handleResponse(AbstractResponse abstractResponse) { - DescribeClientQuotasResponse response = (DescribeClientQuotasResponse) abstractResponse; - response.complete(future); - } + @Override + void handleResponse(AbstractResponse abstractResponse) { + DescribeClientQuotasResponse response = (DescribeClientQuotasResponse) abstractResponse; + response.complete(future); + } - @Override - void handleFailure(Throwable throwable) { - future.completeExceptionally(throwable); - } - }, now); + @Override + void handleFailure(Throwable throwable) { + future.completeExceptionally(throwable); + } + }, now); return new DescribeClientQuotasResult(future); } @@ -4304,24 +4237,24 @@ public AlterClientQuotasResult alterClientQuotas(Collection dataFuture = new KafkaFutureImpl<>(); final long now = time.milliseconds(); Call call = new Call("describeUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()), - new LeastLoadedNodeProvider()) { + new LeastLoadedNodeProvider()) { @Override public DescribeUserScramCredentialsRequest.Builder createRequest(final int timeoutMs) { final DescribeUserScramCredentialsRequestData requestData = new DescribeUserScramCredentialsRequestData(); @@ -4377,7 +4310,7 @@ public AlterUserScramCredentialsResult alterUserScramCredentials(List> futures = new HashMap<>(); - for (UserScramCredentialAlteration alteration : alterations) { + for (UserScramCredentialAlteration alteration: alterations) { futures.put(alteration.user(), new KafkaFutureImpl<>()); } final Map userIllegalAlterationExceptions = new HashMap<>(); @@ -4401,55 +4334,55 @@ public AlterUserScramCredentialsResult alterUserScramCredentials(List> userInsertions = new HashMap<>(); alterations.stream().filter(a -> a instanceof UserScramCredentialUpsertion) - .filter(alteration -> !userIllegalAlterationExceptions.containsKey(alteration.user())) - .forEach(alteration -> { - final String user = alteration.user(); - if (user == null || user.isEmpty()) { - userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg)); - } else { - UserScramCredentialUpsertion upsertion = (UserScramCredentialUpsertion) alteration; - try { - byte[] password = upsertion.password(); - if (password == null || password.length == 0) { - userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(passwordMustNotBeEmptyMsg)); - } else { - ScramMechanism mechanism = upsertion.credentialInfo().mechanism(); - if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) { - userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg)); + .filter(alteration -> !userIllegalAlterationExceptions.containsKey(alteration.user())) + .forEach(alteration -> { + final String user = alteration.user(); + if (user == null || user.isEmpty()) { + userIllegalAlterationExceptions.put(alteration.user(), new UnacceptableCredentialException(usernameMustNotBeEmptyMsg)); + } else { + UserScramCredentialUpsertion upsertion = (UserScramCredentialUpsertion) alteration; + try { + byte[] password = upsertion.password(); + if (password == null || password.length == 0) { + userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(passwordMustNotBeEmptyMsg)); } else { - userInsertions.putIfAbsent(user, new HashMap<>()); - userInsertions.get(user).put(mechanism, getScramCredentialUpsertion(upsertion)); + ScramMechanism mechanism = upsertion.credentialInfo().mechanism(); + if (mechanism == null || mechanism == ScramMechanism.UNKNOWN) { + userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg)); + } else { + userInsertions.putIfAbsent(user, new HashMap<>()); + userInsertions.get(user).put(mechanism, getScramCredentialUpsertion(upsertion)); + } } + } catch (NoSuchAlgorithmException e) { + // we might overwrite an exception from a previous alteration, but we don't really care + // since we just need to mark this user as having at least one illegal alteration + // and make an exception instance available for completing the corresponding future exceptionally + userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg)); + } catch (InvalidKeyException e) { + // generally shouldn't happen since we deal with the empty password case above, + // but we still need to catch/handle it + userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(e.getMessage(), e)); } - } catch (NoSuchAlgorithmException e) { - // we might overwrite an exception from a previous alteration, but we don't really care - // since we just need to mark this user as having at least one illegal alteration - // and make an exception instance available for completing the corresponding future exceptionally - userIllegalAlterationExceptions.put(user, new UnsupportedSaslMechanismException(unknownScramMechanismMsg)); - } catch (InvalidKeyException e) { - // generally shouldn't happen since we deal with the empty password case above, - // but we still need to catch/handle it - userIllegalAlterationExceptions.put(user, new UnacceptableCredentialException(e.getMessage(), e)); } - } - }); + }); // submit alterations only for users that do not have an illegal alteration as identified above Call call = new Call("alterUserScramCredentials", calcDeadlineMs(now, options.timeoutMs()), - new ControllerNodeProvider()) { + new ControllerNodeProvider()) { @Override public AlterUserScramCredentialsRequest.Builder createRequest(int timeoutMs) { return new AlterUserScramCredentialsRequest.Builder( - new AlterUserScramCredentialsRequestData().setUpsertions(alterations.stream() - .filter(a -> a instanceof UserScramCredentialUpsertion) - .filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())) - .map(a -> userInsertions.get(a.user()).get(((UserScramCredentialUpsertion) a).credentialInfo().mechanism())) - .collect(Collectors.toList())) + new AlterUserScramCredentialsRequestData().setUpsertions(alterations.stream() + .filter(a -> a instanceof UserScramCredentialUpsertion) + .filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())) + .map(a -> userInsertions.get(a.user()).get(((UserScramCredentialUpsertion) a).credentialInfo().mechanism())) + .collect(Collectors.toList())) .setDeletions(alterations.stream() - .filter(a -> a instanceof UserScramCredentialDeletion) - .filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())) - .map(d -> getScramCredentialDeletion((UserScramCredentialDeletion) d)) - .collect(Collectors.toList()))); + .filter(a -> a instanceof UserScramCredentialDeletion) + .filter(a -> !userIllegalAlterationExceptions.containsKey(a.user())) + .map(d -> getScramCredentialDeletion((UserScramCredentialDeletion) d)) + .collect(Collectors.toList()))); } @Override @@ -4466,8 +4399,8 @@ public void handleResponse(AbstractResponse abstractResponse) { * Be sure to do this after the NOT_CONTROLLER error check above * so that all errors are consistent in that case. */ - userIllegalAlterationExceptions.forEach((key, value) -> - futures.get(key).completeExceptionally(value) + userIllegalAlterationExceptions.entrySet().stream().forEach(entry -> + futures.get(entry.getKey()).completeExceptionally(entry.getValue()) ); response.data().results().forEach(result -> { KafkaFutureImpl future = futures.get(result.user()); @@ -4499,10 +4432,10 @@ void handleFailure(Throwable throwable) { private static AlterUserScramCredentialsRequestData.ScramCredentialUpsertion getScramCredentialUpsertion(UserScramCredentialUpsertion u) throws InvalidKeyException, NoSuchAlgorithmException { AlterUserScramCredentialsRequestData.ScramCredentialUpsertion retval = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion(); return retval.setName(u.user()) - .setMechanism(u.credentialInfo().mechanism().type()) - .setIterations(u.credentialInfo().iterations()) - .setSalt(u.salt()) - .setSaltedPassword(getSaltedPassword(u.credentialInfo().mechanism(), u.password(), u.salt(), u.credentialInfo().iterations())); + .setMechanism(u.credentialInfo().mechanism().type()) + .setIterations(u.credentialInfo().iterations()) + .setSalt(u.salt()) + .setSaltedPassword(getSaltedPassword(u.credentialInfo().mechanism(), u.password(), u.salt(), u.credentialInfo().iterations())); } private static AlterUserScramCredentialsRequestData.ScramCredentialDeletion getScramCredentialDeletion(UserScramCredentialDeletion d) { @@ -4511,7 +4444,7 @@ private static AlterUserScramCredentialsRequestData.ScramCredentialDeletion getS private static byte[] getSaltedPassword(ScramMechanism publicScramMechanism, byte[] password, byte[] salt, int iterations) throws NoSuchAlgorithmException, InvalidKeyException { return new ScramFormatter(org.apache.kafka.common.security.scram.internals.ScramMechanism.forMechanismName(publicScramMechanism.mechanismName())) - .hi(password, salt, iterations); + .hi(password, salt, iterations); } @Override @@ -4637,7 +4570,7 @@ void handleResponse(AbstractResponse abstractResponse) { } // The server should send back a response for every feature, but we do a sanity check anyway. completeUnrealizedFutures(updateFutures.entrySet().stream(), - feature -> "The controller response did not contain a result for feature " + feature); + feature -> "The controller response did not contain a result for feature " + feature); } break; case NOT_CONTROLLER: @@ -4668,15 +4601,15 @@ public DescribeMetadataQuorumResult describeMetadataQuorum(DescribeMetadataQuoru final KafkaFutureImpl future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call( - "describeMetadataQuorum", calcDeadlineMs(now, options.timeoutMs()), provider) { + "describeMetadataQuorum", calcDeadlineMs(now, options.timeoutMs()), provider) { private QuorumInfo.ReplicaState translateReplicaState(DescribeQuorumResponseData.ReplicaState replica) { return new QuorumInfo.ReplicaState( - replica.replicaId(), - replica.replicaDirectoryId() == null ? Uuid.ZERO_UUID : replica.replicaDirectoryId(), - replica.logEndOffset(), - replica.lastFetchTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastFetchTimestamp()), - replica.lastCaughtUpTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastCaughtUpTimestamp())); + replica.replicaId(), + replica.replicaDirectoryId() == null ? Uuid.ZERO_UUID : replica.replicaDirectoryId(), + replica.logEndOffset(), + replica.lastFetchTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastFetchTimestamp()), + replica.lastCaughtUpTimestamp() == -1 ? OptionalLong.empty() : OptionalLong.of(replica.lastCaughtUpTimestamp())); } private QuorumInfo createQuorumResult(final DescribeQuorumResponseData.PartitionData partition, DescribeQuorumResponseData.NodeCollection nodeCollection) { @@ -4709,7 +4642,7 @@ private QuorumInfo createQuorumResult(final DescribeQuorumResponseData.Partition @Override DescribeQuorumRequest.Builder createRequest(int timeoutMs) { return new Builder(DescribeQuorumRequest.singletonRequest( - new TopicPartition(CLUSTER_METADATA_TOPIC_NAME, CLUSTER_METADATA_TOPIC_PARTITION.partition()))); + new TopicPartition(CLUSTER_METADATA_TOPIC_NAME, CLUSTER_METADATA_TOPIC_PARTITION.partition()))); } @Override @@ -4721,27 +4654,27 @@ void handleResponse(AbstractResponse response) { } if (quorumResponse.data().topics().size() != 1) { String msg = String.format("DescribeMetadataQuorum received %d topics when 1 was expected", - quorumResponse.data().topics().size()); + quorumResponse.data().topics().size()); log.debug(msg); throw new UnknownServerException(msg); } DescribeQuorumResponseData.TopicData topic = quorumResponse.data().topics().get(0); if (!topic.topicName().equals(CLUSTER_METADATA_TOPIC_NAME)) { String msg = String.format("DescribeMetadataQuorum received a topic with name %s when %s was expected", - topic.topicName(), CLUSTER_METADATA_TOPIC_NAME); + topic.topicName(), CLUSTER_METADATA_TOPIC_NAME); log.debug(msg); throw new UnknownServerException(msg); } if (topic.partitions().size() != 1) { String msg = String.format("DescribeMetadataQuorum received a topic %s with %d partitions when 1 was expected", - topic.topicName(), topic.partitions().size()); + topic.topicName(), topic.partitions().size()); log.debug(msg); throw new UnknownServerException(msg); } DescribeQuorumResponseData.PartitionData partition = topic.partitions().get(0); if (partition.partitionIndex() != CLUSTER_METADATA_TOPIC_PARTITION.partition()) { String msg = String.format("DescribeMetadataQuorum received a single partition with index %d when %d was expected", - partition.partitionIndex(), CLUSTER_METADATA_TOPIC_PARTITION.partition()); + partition.partitionIndex(), CLUSTER_METADATA_TOPIC_PARTITION.partition()); log.debug(msg); throw new UnknownServerException(msg); } @@ -4766,30 +4699,30 @@ public UnregisterBrokerResult unregisterBroker(int brokerId, UnregisterBrokerOpt final KafkaFutureImpl future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call("unregisterBroker", calcDeadlineMs(now, options.timeoutMs()), - new LeastLoadedBrokerOrActiveKController()) { + new LeastLoadedNodeProvider()) { @Override UnregisterBrokerRequest.Builder createRequest(int timeoutMs) { UnregisterBrokerRequestData data = - new UnregisterBrokerRequestData().setBrokerId(brokerId); + new UnregisterBrokerRequestData().setBrokerId(brokerId); return new UnregisterBrokerRequest.Builder(data); } @Override void handleResponse(AbstractResponse abstractResponse) { final UnregisterBrokerResponse response = - (UnregisterBrokerResponse) abstractResponse; + (UnregisterBrokerResponse) abstractResponse; Errors error = Errors.forCode(response.data().errorCode()); switch (error) { case NONE: future.complete(null); break; case REQUEST_TIMED_OUT: - throw error.exception(response.data().errorMessage()); + throw error.exception(); default: log.error("Unregister broker request for broker ID {} failed: {}", - brokerId, response.data().errorMessage()); - future.completeExceptionally(error.exception(response.data().errorMessage())); + brokerId, error.message()); + future.completeExceptionally(error.exception()); break; } } @@ -4830,35 +4763,6 @@ public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortT return new AbortTransactionResult(future.all()); } - /** - * Forcefully terminates an ongoing transaction for a given transactional ID. - *

    - * This API is intended for well-formed but long-running transactions that are known to the - * transaction coordinator. It is primarily designed for supporting 2PC (two-phase commit) workflows, - * where a coordinator may need to unilaterally terminate a participant transaction that hasn't completed. - *

    - * - * @param transactionalId The transactional ID whose active transaction should be forcefully terminated. - * @return a {@link TerminateTransactionResult} that can be used to await the operation result. - */ - @Override - public TerminateTransactionResult forceTerminateTransaction(String transactionalId, TerminateTransactionOptions options) { - // Simply leverage the existing fenceProducers implementation with a single transactional ID - FenceProducersOptions fenceOptions = new FenceProducersOptions(); - if (options.timeoutMs() != null) { - fenceOptions.timeoutMs(options.timeoutMs()); - } - - FenceProducersResult fenceResult = fenceProducers( - Collections.singleton(transactionalId), - fenceOptions - ); - - // Convert the result to a TerminateTransactionResult - KafkaFuture future = fenceResult.fencedProducers().get(transactionalId); - return new TerminateTransactionResult(future); - } - @Override public ListTransactionsResult listTransactions(ListTransactionsOptions options) { AllBrokersStrategy.AllBrokersFuture> future = @@ -4877,45 +4781,6 @@ public FenceProducersResult fenceProducers(Collection transactionalIds, return new FenceProducersResult(future.all()); } - @Override - public ListConfigResourcesResult listConfigResources(Set configResourceTypes, ListConfigResourcesOptions options) { - final long now = time.milliseconds(); - final KafkaFutureImpl> future = new KafkaFutureImpl<>(); - final Call call = new Call("listConfigResources", calcDeadlineMs(now, options.timeoutMs()), new LeastLoadedNodeProvider()) { - - @Override - ListConfigResourcesRequest.Builder createRequest(int timeoutMs) { - return new ListConfigResourcesRequest.Builder( - new ListConfigResourcesRequestData() - .setResourceTypes( - configResourceTypes - .stream() - .map(ConfigResource.Type::id) - .collect(Collectors.toList()) - ) - ); - } - - @Override - void handleResponse(AbstractResponse abstractResponse) { - ListConfigResourcesResponse response = (ListConfigResourcesResponse) abstractResponse; - if (response.error().isFailure()) { - future.completeExceptionally(response.error().exception()); - } else { - future.complete(response.configResources()); - } - } - - @Override - void handleFailure(Throwable throwable) { - future.completeExceptionally(throwable); - } - }; - runnable.call(call, now); - return new ListConfigResourcesResult(future); - } - - @SuppressWarnings({"deprecation", "removal"}) @Override public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) { final long now = time.milliseconds(); @@ -4924,26 +4789,17 @@ public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMet new LeastLoadedNodeProvider()) { @Override - ListConfigResourcesRequest.Builder createRequest(int timeoutMs) { - return new ListConfigResourcesRequest.Builder( - new ListConfigResourcesRequestData() - .setResourceTypes(List.of(ConfigResource.Type.CLIENT_METRICS.id())) - ); + ListClientMetricsResourcesRequest.Builder createRequest(int timeoutMs) { + return new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()); } @Override void handleResponse(AbstractResponse abstractResponse) { - ListConfigResourcesResponse response = (ListConfigResourcesResponse) abstractResponse; + ListClientMetricsResourcesResponse response = (ListClientMetricsResourcesResponse) abstractResponse; if (response.error().isFailure()) { future.completeExceptionally(response.error().exception()); } else { - future.complete(response - .data() - .configResources() - .stream() - .filter(entry -> entry.resourceType() == ConfigResource.Type.CLIENT_METRICS.id()) - .map(entry -> new ClientMetricsResourceListing(entry.resourceName())) - .collect(Collectors.toList())); + future.complete(response.clientMetricsResources()); } } @@ -4967,7 +4823,7 @@ public AddRaftVoterResult addRaftVoter( final KafkaFutureImpl future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call( - "addRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { + "addRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { @Override AddRaftVoterRequest.Builder createRequest(int timeoutMs) { @@ -4975,27 +4831,30 @@ AddRaftVoterRequest.Builder createRequest(int timeoutMs) { new AddRaftVoterRequestData.ListenerCollection(); endpoints.forEach(endpoint -> listeners.add(new AddRaftVoterRequestData.Listener(). - setName(endpoint.listener()). + setName(endpoint.name()). setHost(endpoint.host()). setPort(endpoint.port()))); return new AddRaftVoterRequest.Builder( - new AddRaftVoterRequestData(). - setClusterId(options.clusterId().orElse(null)). - setTimeoutMs(timeoutMs). - setVoterId(voterId). - setVoterDirectoryId(voterDirectoryId). - setListeners(listeners)); + new AddRaftVoterRequestData(). + setClusterId(options.clusterId().orElse(null)). + setTimeoutMs(timeoutMs). + setVoterId(voterId) . + setVoterDirectoryId(voterDirectoryId). + setListeners(listeners)); } @Override void handleResponse(AbstractResponse response) { handleNotControllerError(response); AddRaftVoterResponse addResponse = (AddRaftVoterResponse) response; - Errors error = Errors.forCode(addResponse.data().errorCode()); - if (error != Errors.NONE) - future.completeExceptionally(error.exception(addResponse.data().errorMessage())); - else + if (addResponse.data().errorCode() != Errors.NONE.code()) { + ApiError error = new ApiError( + addResponse.data().errorCode(), + addResponse.data().errorMessage()); + future.completeExceptionally(error.exception()); + } else { future.complete(null); + } } @Override @@ -5018,14 +4877,14 @@ public RemoveRaftVoterResult removeRaftVoter( final KafkaFutureImpl future = new KafkaFutureImpl<>(); final long now = time.milliseconds(); final Call call = new Call( - "removeRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { + "removeRaftVoter", calcDeadlineMs(now, options.timeoutMs()), provider) { @Override RemoveRaftVoterRequest.Builder createRequest(int timeoutMs) { return new RemoveRaftVoterRequest.Builder( new RemoveRaftVoterRequestData(). setClusterId(options.clusterId().orElse(null)). - setVoterId(voterId). + setVoterId(voterId) . setVoterDirectoryId(voterDirectoryId)); } @@ -5033,11 +4892,14 @@ RemoveRaftVoterRequest.Builder createRequest(int timeoutMs) { void handleResponse(AbstractResponse response) { handleNotControllerError(response); RemoveRaftVoterResponse addResponse = (RemoveRaftVoterResponse) response; - Errors error = Errors.forCode(addResponse.data().errorCode()); - if (error != Errors.NONE) - future.completeExceptionally(error.exception(addResponse.data().errorMessage())); - else + if (addResponse.data().errorCode() != Errors.NONE.code()) { + ApiError error = new ApiError( + addResponse.data().errorCode(), + addResponse.data().errorMessage()); + future.completeExceptionally(error.exception()); + } else { future.complete(null); + } } @Override @@ -5146,8 +5008,6 @@ private static long getOffsetFromSpec(OffsetSpec offsetSpec) { return ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP; } else if (offsetSpec instanceof OffsetSpec.LatestTieredSpec) { return ListOffsetsRequest.LATEST_TIERED_TIMESTAMP; - } else if (offsetSpec instanceof OffsetSpec.EarliestPendingUploadSpec) { - return ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP; } return ListOffsetsRequest.LATEST_TIMESTAMP; } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.java index f90778db12ce6..7b6dbf302c65e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesOptions.java @@ -19,8 +19,6 @@ /** * Options for {@link Admin#listClientMetricsResources()}. - * @deprecated Since 4.1. Use {@link ListConfigResourcesOptions} instead. */ -@Deprecated(since = "4.1") public class ListClientMetricsResourcesOptions extends AbstractOptions { } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java index a4d0ed3cecb31..4a63e31c2381e 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListClientMetricsResourcesResult.java @@ -25,9 +25,7 @@ /** * The result of the {@link Admin#listClientMetricsResources()} call. *

    - * @deprecated Since 4.1. Use {@link ListConfigResourcesResult} instead. */ -@Deprecated(since = "4.1") public class ListClientMetricsResourcesResult { private final KafkaFuture> future; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.java index 13797d1e9bb86..3da6c7385d316 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupOffsetsResult.java @@ -70,7 +70,7 @@ public KafkaFuture> partitionsToOffsetAnd * if requests for all the groups succeed. */ public KafkaFuture>> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])).thenApply( nil -> { Map> listedConsumerGroupOffsets = new HashMap<>(futures.size()); futures.forEach((key, future) -> { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java index d4cefd5856ead..52828cb16e28d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptions.java @@ -28,10 +28,7 @@ /** * Options for {@link Admin#listConsumerGroups()}. - * @deprecated Since 4.1. Use {@link Admin#listGroups(ListGroupsOptions)} instead. */ -@Deprecated(since = "4.1") -@SuppressWarnings("removal") public class ListConsumerGroupsOptions extends AbstractOptions { private Set groupStates = Collections.emptySet(); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java index f9bc452005f63..e4394e13a86d8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListConsumerGroupsResult.java @@ -25,10 +25,8 @@ /** * The result of the {@link Admin#listConsumerGroups()} call. - * @deprecated Since 4.1. Use {@link Admin#listGroups(ListGroupsOptions)} instead. + *

    */ -@Deprecated(since = "4.1") -@SuppressWarnings("removal") public class ListConsumerGroupsResult { private final KafkaFutureImpl> all; private final KafkaFutureImpl> valid; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java index 7d7083f46c5c7..d1fa2c7b288c8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListGroupsOptions.java @@ -17,11 +17,11 @@ package org.apache.kafka.clients.admin; -import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; import org.apache.kafka.common.GroupState; import org.apache.kafka.common.GroupType; import org.apache.kafka.common.annotation.InterfaceStability; +import java.util.Collections; import java.util.Set; /** @@ -32,37 +32,8 @@ @InterfaceStability.Evolving public class ListGroupsOptions extends AbstractOptions { - private Set groupStates = Set.of(); - private Set types = Set.of(); - private Set protocolTypes = Set.of(); - - /** - * Only consumer groups will be returned by listGroups(). - * This operation sets filters on group type and protocol type which select consumer groups. - */ - public static ListGroupsOptions forConsumerGroups() { - return new ListGroupsOptions() - .withTypes(Set.of(GroupType.CLASSIC, GroupType.CONSUMER)) - .withProtocolTypes(Set.of("", ConsumerProtocol.PROTOCOL_TYPE)); - } - - /** - * Only share groups will be returned by listGroups(). - * This operation sets a filter on group type which select share groups. - */ - public static ListGroupsOptions forShareGroups() { - return new ListGroupsOptions() - .withTypes(Set.of(GroupType.SHARE)); - } - - /** - * Only streams groups will be returned by listGroups(). - * This operation sets a filter on group type which select streams groups. - */ - public static ListGroupsOptions forStreamsGroups() { - return new ListGroupsOptions() - .withTypes(Set.of(GroupType.STREAMS)); - } + private Set groupStates = Collections.emptySet(); + private Set types = Collections.emptySet(); /** * If groupStates is set, only groups in these states will be returned by listGroups(). @@ -70,16 +41,7 @@ public static ListGroupsOptions forStreamsGroups() { * This operation is supported by brokers with version 2.6.0 or later. */ public ListGroupsOptions inGroupStates(Set groupStates) { - this.groupStates = (groupStates == null || groupStates.isEmpty()) ? Set.of() : Set.copyOf(groupStates); - return this; - } - - /** - * If protocol types is set, only groups of these protocol types will be returned by listGroups(). - * Otherwise, all groups are returned. - */ - public ListGroupsOptions withProtocolTypes(Set protocolTypes) { - this.protocolTypes = (protocolTypes == null || protocolTypes.isEmpty()) ? Set.of() : Set.copyOf(protocolTypes); + this.groupStates = (groupStates == null || groupStates.isEmpty()) ? Collections.emptySet() : Set.copyOf(groupStates); return this; } @@ -99,13 +61,6 @@ public Set groupStates() { return groupStates; } - /** - * Returns the list of protocol types that are requested or empty if no protocol types have been specified. - */ - public Set protocolTypes() { - return protocolTypes; - } - /** * Returns the list of group types that are requested or empty if no types have been specified. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java index 3d72409a72aca..f8d694c6b24df 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListOffsetsResult.java @@ -52,7 +52,7 @@ public KafkaFuture partitionResult(final TopicPartition p * retrieved. */ public KafkaFuture> all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])) .thenApply(v -> { Map offsets = new HashMap<>(futures.size()); for (Map.Entry> entry : futures.entrySet()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java index 72a796308d45b..0768b1a75a71c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ListTransactionsOptions.java @@ -31,7 +31,6 @@ public class ListTransactionsOptions extends AbstractOptions filteredProducerIds = Collections.emptySet(); private long filteredDuration = -1L; - private String filteredTransactionalIdPattern; /** * Filter only the transactions that are in a specific set of states. If no filter * is specified or if the passed set of states is empty, then transactions in all @@ -71,19 +70,6 @@ public ListTransactionsOptions filterOnDuration(long durationMs) { return this; } - /** - * Filter only the transactions that match with the given transactional ID pattern. - * If the filter is null or if the passed string is empty, - * then all the transactions will be returned. - * - * @param pattern the transactional ID regular expression pattern to filter by - * @return this object - */ - public ListTransactionsOptions filterOnTransactionalIdPattern(String pattern) { - this.filteredTransactionalIdPattern = pattern; - return this; - } - /** * Returns the set of states to be filtered or empty if no states have been specified. * @@ -113,23 +99,12 @@ public long filteredDuration() { return filteredDuration; } - /** - * Returns transactional ID being filtered. - * - * @return the current transactional ID pattern filter (empty means no transactional IDs are filtered and all - * transactions will be returned) - */ - public String filteredTransactionalIdPattern() { - return filteredTransactionalIdPattern; - } - @Override public String toString() { return "ListTransactionsOptions(" + "filteredStates=" + filteredStates + ", filteredProducerIds=" + filteredProducerIds + ", filteredDuration=" + filteredDuration + - ", filteredTransactionalIdPattern=" + filteredTransactionalIdPattern + ", timeoutMs=" + timeoutMs + ')'; } @@ -141,12 +116,11 @@ public boolean equals(Object o) { ListTransactionsOptions that = (ListTransactionsOptions) o; return Objects.equals(filteredStates, that.filteredStates) && Objects.equals(filteredProducerIds, that.filteredProducerIds) && - Objects.equals(filteredDuration, that.filteredDuration) && - Objects.equals(filteredTransactionalIdPattern, that.filteredTransactionalIdPattern); + Objects.equals(filteredDuration, that.filteredDuration); } @Override public int hashCode() { - return Objects.hash(filteredStates, filteredProducerIds, filteredDuration, filteredTransactionalIdPattern); + return Objects.hash(filteredStates, filteredProducerIds, filteredDuration); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/LogDirDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/LogDirDescription.java index 340e88db16010..665c86649ba37 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/LogDirDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/LogDirDescription.java @@ -67,7 +67,6 @@ public Map replicaInfos() { /** * The total size of the volume this log directory is on or empty if the broker did not return a value. * For volumes larger than Long.MAX_VALUE, Long.MAX_VALUE is returned. - * This value does not include the size of data stored in remote storage. */ public OptionalLong totalBytes() { return totalBytes; @@ -76,7 +75,6 @@ public OptionalLong totalBytes() { /** * The usable size on the volume this log directory is on or empty if the broker did not return a value. * For usable sizes larger than Long.MAX_VALUE, Long.MAX_VALUE is returned. - * This value does not include the size of data stored in remote storage. */ public OptionalLong usableBytes() { return usableBytes; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java b/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java index 0f1107c91c9be..aabb535c94901 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/NewTopic.java @@ -66,7 +66,6 @@ public NewTopic(String name, Optional numPartitions, Optional re * @param name the topic name. * @param replicasAssignments a map from partition id to replica ids (i.e. broker ids). Although not enforced, it is * generally a good idea for all partitions to have the same number of replicas. - * The first replica will be treated as the preferred leader. */ public NewTopic(String name, Map> replicasAssignments) { this.name = name; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/OffsetSpec.java b/clients/src/main/java/org/apache/kafka/clients/admin/OffsetSpec.java index ad73c8d51f086..68f94cc493e5a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/OffsetSpec.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/OffsetSpec.java @@ -28,7 +28,6 @@ public static class LatestSpec extends OffsetSpec { } public static class MaxTimestampSpec extends OffsetSpec { } public static class EarliestLocalSpec extends OffsetSpec { } public static class LatestTieredSpec extends OffsetSpec { } - public static class EarliestPendingUploadSpec extends OffsetSpec { } public static class TimestampSpec extends OffsetSpec { private final long timestamp; @@ -92,13 +91,4 @@ public static OffsetSpec earliestLocal() { public static OffsetSpec latestTiered() { return new LatestTieredSpec(); } - - /** - * Used to retrieve the earliest offset of records that are pending upload to remote storage. - *
    - * Note: When tiered storage is not enabled, we will return unknown offset. - */ - public static OffsetSpec earliestPendingUpload() { - return new EarliestPendingUploadSpec(); - } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java b/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java index ba5b39284ebea..984ac9993933d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/RaftVoterEndpoint.java @@ -26,7 +26,7 @@ */ @InterfaceStability.Stable public class RaftVoterEndpoint { - private final String listener; + private final String name; private final String host; private final int port; @@ -49,33 +49,22 @@ static String requireNonNullAllCapsNonEmpty(String input) { /** * Create an endpoint for a metadata quorum voter. * - * @param listener The human-readable name for this endpoint. For example, CONTROLLER. + * @param name The human-readable name for this endpoint. For example, CONTROLLER. * @param host The DNS hostname for this endpoint. * @param port The network port for this endpoint. */ public RaftVoterEndpoint( - String listener, + String name, String host, int port ) { - this.listener = requireNonNullAllCapsNonEmpty(listener); + this.name = requireNonNullAllCapsNonEmpty(name); this.host = Objects.requireNonNull(host); this.port = port; } - /** - * The listener name for this endpoint. - */ - public String listener() { - return listener; - } - - /** - * @deprecated Since 4.1. Use {@link #listener()} instead. This function will be removed in 5.0. - */ - @Deprecated(since = "4.1", forRemoval = true) public String name() { - return listener; + return name; } public String host() { @@ -90,20 +79,20 @@ public int port() { public boolean equals(Object o) { if (o == null || (!o.getClass().equals(getClass()))) return false; RaftVoterEndpoint other = (RaftVoterEndpoint) o; - return listener.equals(other.listener) && + return name.equals(other.name) && host.equals(other.host) && port == other.port; } @Override public int hashCode() { - return Objects.hash(listener, host, port); + return Objects.hash(name, host, port); } @Override public String toString() { // enclose IPv6 hosts in square brackets for readability String hostString = host.contains(":") ? "[" + host + "]" : host; - return listener + "://" + hostString + ":" + port; + return name + "://" + hostString + ":" + port; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/RecordsToDelete.java b/clients/src/main/java/org/apache/kafka/clients/admin/RecordsToDelete.java index 57421e3568b4f..d3da26b03bbb0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/RecordsToDelete.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/RecordsToDelete.java @@ -33,16 +33,14 @@ private RecordsToDelete(long offset) { /** * Delete all the records before the given {@code offset} * - * @param offset The offset before which all records will be deleted. - * Use {@code -1} to truncate to the high watermark. + * @param offset the offset before which all records will be deleted */ public static RecordsToDelete beforeOffset(long offset) { return new RecordsToDelete(offset); } /** - * The offset before which all records will be deleted. - * Use {@code -1} to truncate to the high watermark. + * The offset before which all records will be deleted */ public long beforeOffset() { return offset; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java b/clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java index efe645b704d16..b77375d59605d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ReplicaInfo.java @@ -33,7 +33,6 @@ public ReplicaInfo(long size, long offsetLag, boolean isFuture) { /** * The total size of the log segments in this replica in bytes. - * This value does not include the size of data stored in remote storage. */ public long size() { return size; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupDescription.java index 469c23428eb9b..913667bcf4779 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ShareGroupDescription.java @@ -38,32 +38,24 @@ public class ShareGroupDescription { private final Collection members; private final GroupState groupState; private final Node coordinator; - private final int groupEpoch; - private final int targetAssignmentEpoch; private final Set authorizedOperations; public ShareGroupDescription(String groupId, Collection members, GroupState groupState, - Node coordinator, - int groupEpoch, - int targetAssignmentEpoch) { - this(groupId, members, groupState, coordinator, groupEpoch, targetAssignmentEpoch, Collections.emptySet()); + Node coordinator) { + this(groupId, members, groupState, coordinator, Collections.emptySet()); } public ShareGroupDescription(String groupId, Collection members, GroupState groupState, Node coordinator, - int groupEpoch, - int targetAssignmentEpoch, Set authorizedOperations) { this.groupId = groupId == null ? "" : groupId; this.members = members == null ? Collections.emptyList() : List.copyOf(members); this.groupState = groupState; this.coordinator = coordinator; - this.groupEpoch = groupEpoch; - this.targetAssignmentEpoch = targetAssignmentEpoch; this.authorizedOperations = authorizedOperations; } @@ -76,14 +68,12 @@ public boolean equals(final Object o) { Objects.equals(members, that.members) && groupState == that.groupState && Objects.equals(coordinator, that.coordinator) && - groupEpoch == that.groupEpoch && - targetAssignmentEpoch == that.targetAssignmentEpoch && Objects.equals(authorizedOperations, that.authorizedOperations); } @Override public int hashCode() { - return Objects.hash(groupId, members, groupState, coordinator, groupEpoch, targetAssignmentEpoch, authorizedOperations); + return Objects.hash(groupId, members, groupState, coordinator, authorizedOperations); } /** @@ -121,28 +111,12 @@ public Set authorizedOperations() { return authorizedOperations; } - /** - * The epoch of the share group. - */ - public int groupEpoch() { - return groupEpoch; - } - - /** - * The epoch of the target assignment. - */ - public int targetAssignmentEpoch() { - return targetAssignmentEpoch; - } - @Override public String toString() { return "(groupId=" + groupId + ", members=" + members.stream().map(ShareMemberDescription::toString).collect(Collectors.joining(",")) + ", groupState=" + groupState + ", coordinator=" + coordinator + - ", groupEpoch=" + groupEpoch + - ", targetAssignmentEpoch=" + targetAssignmentEpoch + ", authorizedOperations=" + authorizedOperations + ")"; } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberDescription.java b/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberDescription.java index 5fb74d8b24276..57f2d90ae86f6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberDescription.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/ShareMemberDescription.java @@ -30,21 +30,18 @@ public class ShareMemberDescription { private final String clientId; private final String host; private final ShareMemberAssignment assignment; - private final int memberEpoch; public ShareMemberDescription( String memberId, String clientId, String host, - ShareMemberAssignment assignment, - int memberEpoch + ShareMemberAssignment assignment ) { this.memberId = memberId == null ? "" : memberId; this.clientId = clientId == null ? "" : clientId; this.host = host == null ? "" : host; this.assignment = assignment == null ? new ShareMemberAssignment(Collections.emptySet()) : assignment; - this.memberEpoch = memberEpoch; } @Override @@ -55,13 +52,12 @@ public boolean equals(Object o) { return memberId.equals(that.memberId) && clientId.equals(that.clientId) && host.equals(that.host) && - assignment.equals(that.assignment) && - memberEpoch == that.memberEpoch; + assignment.equals(that.assignment); } @Override public int hashCode() { - return Objects.hash(memberId, clientId, host, assignment, memberEpoch); + return Objects.hash(memberId, clientId, host, assignment); } /** @@ -92,20 +88,11 @@ public ShareMemberAssignment assignment() { return assignment; } - /** - * The epoch of the group member. - */ - public int memberEpoch() { - return memberEpoch; - } - @Override public String toString() { return "(memberId=" + memberId + ", clientId=" + clientId + ", host=" + host + - ", assignment=" + assignment + - ", memberEpoch=" + memberEpoch + - ")"; + ", assignment=" + assignment + ")"; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java b/clients/src/main/java/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java index 63e0a06ea2486..bb814d51e74fd 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/UnregisterBrokerOptions.java @@ -20,5 +20,5 @@ /** * Options for {@link Admin#unregisterBroker(int, UnregisterBrokerOptions)}. */ -public class UnregisterBrokerOptions extends AbstractOptions { +public class UnregisterBrokerOptions extends AbstractOptions { } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesResult.java b/clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesResult.java index 36418ecb83f5b..9939b48fa975f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesResult.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/UpdateFeaturesResult.java @@ -44,6 +44,6 @@ public Map> values() { * Return a future which succeeds if all the feature updates succeed. */ public KafkaFuture all() { - return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); + return KafkaFuture.allOf(futures.values().toArray(new KafkaFuture[0])); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java index 77a491a320180..8e115c2944ded 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddresses.java @@ -95,7 +95,7 @@ public boolean equals(Object o) { public String toString() { StringBuilder bld = new StringBuilder(); bld.append("AdminBootstrapAddresses"); - bld.append("(usingBootstrapControllers=").append(usingBootstrapControllers); + bld.append("(usingBoostrapControllers=").append(usingBootstrapControllers); bld.append(", addresses=["); String prefix = ""; for (InetSocketAddress address : addresses) { diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java index 0ac5419991e1b..9dc2e190d134c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AdminMetadataManager.java @@ -23,10 +23,11 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.AuthenticationException; +import org.apache.kafka.common.errors.MismatchedEndpointTypeException; +import org.apache.kafka.common.errors.UnsupportedEndpointTypeException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestHeader; -import org.apache.kafka.common.requests.RequestUtils; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; @@ -276,21 +277,23 @@ public void updateFailed(Throwable exception) { // We depend on pending calls to request another metadata update this.state = State.QUIESCENT; - if (RequestUtils.isFatalException(exception)) { - log.warn("Fatal error during metadata update", exception); - // avoid unchecked/unconfirmed cast to ApiException - if (exception instanceof ApiException) { - this.fatalException = (ApiException) exception; - } - - if (exception instanceof UnsupportedVersionException) { - if (usingBootstrapControllers) { - log.warn("The remote node is not a CONTROLLER that supports the KIP-919 " + - "DESCRIBE_CLUSTER api.", exception); - } else { - log.warn("The remote node is not a BROKER that supports the METADATA api.", exception); - } + if (exception instanceof AuthenticationException) { + log.warn("Metadata update failed due to authentication error", exception); + this.fatalException = (ApiException) exception; + } else if (exception instanceof MismatchedEndpointTypeException) { + log.warn("Metadata update failed due to mismatched endpoint type error", exception); + this.fatalException = (ApiException) exception; + } else if (exception instanceof UnsupportedEndpointTypeException) { + log.warn("Metadata update failed due to unsupported endpoint type error", exception); + this.fatalException = (ApiException) exception; + } else if (exception instanceof UnsupportedVersionException) { + if (usingBootstrapControllers) { + log.warn("The remote node is not a CONTROLLER that supports the KIP-919 " + + "DESCRIBE_CLUSTER api.", exception); + } else { + log.warn("The remote node is not a BROKER that supports the METADATA api.", exception); } + this.fatalException = (ApiException) exception; } else { log.info("Metadata update failed", exception); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java index 99111a70d4bae..5ef72f327d637 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/AlterConsumerGroupOffsetsHandler.java @@ -108,7 +108,7 @@ public OffsetCommitRequest.Builder buildBatchedRequest( .setGroupId(groupId.idValue) .setTopics(new ArrayList<>(offsetData.values())); - return OffsetCommitRequest.Builder.forTopicNames(data); + return new OffsetCommitRequest.Builder(data); } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandler.java index c3b248838f3d8..0d581243ddc7b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandler.java @@ -16,14 +16,37 @@ */ package org.apache.kafka.clients.admin.internals; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.message.DeleteGroupsRequestData; +import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.AbstractResponse; +import org.apache.kafka.common.requests.DeleteGroupsRequest; +import org.apache.kafka.common.requests.DeleteGroupsResponse; +import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.utils.LogContext; -public class DeleteConsumerGroupsHandler extends DeleteGroupsHandler { +import org.slf4j.Logger; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.Collectors; + +public class DeleteConsumerGroupsHandler extends AdminApiHandler.Batched { + + private final Logger log; + private final AdminApiLookupStrategy lookupStrategy; public DeleteConsumerGroupsHandler( LogContext logContext ) { - super(logContext, DeleteConsumerGroupsHandler.class); + this.log = logContext.logger(DeleteConsumerGroupsHandler.class); + this.lookupStrategy = new CoordinatorStrategy(CoordinatorType.GROUP, logContext); } @Override @@ -32,7 +55,92 @@ public String apiName() { } @Override - public String displayName() { - return "DeleteConsumerGroups"; + public AdminApiLookupStrategy lookupStrategy() { + return lookupStrategy; + } + + public static AdminApiFuture.SimpleAdminApiFuture newFuture( + Collection groupIds + ) { + return AdminApiFuture.forKeys(buildKeySet(groupIds)); + } + + private static Set buildKeySet(Collection groupIds) { + return groupIds.stream() + .map(CoordinatorKey::byGroupId) + .collect(Collectors.toSet()); + } + + @Override + public DeleteGroupsRequest.Builder buildBatchedRequest( + int coordinatorId, + Set keys + ) { + List groupIds = keys.stream().map(key -> key.idValue).collect(Collectors.toList()); + DeleteGroupsRequestData data = new DeleteGroupsRequestData() + .setGroupsNames(groupIds); + return new DeleteGroupsRequest.Builder(data); + } + + @Override + public ApiResult handleResponse( + Node coordinator, + Set groupIds, + AbstractResponse abstractResponse + ) { + final DeleteGroupsResponse response = (DeleteGroupsResponse) abstractResponse; + final Map completed = new HashMap<>(); + final Map failed = new HashMap<>(); + final Set groupsToUnmap = new HashSet<>(); + + for (DeletableGroupResult deletedGroup : response.data().results()) { + CoordinatorKey groupIdKey = CoordinatorKey.byGroupId(deletedGroup.groupId()); + Errors error = Errors.forCode(deletedGroup.errorCode()); + if (error != Errors.NONE) { + handleError(groupIdKey, error, failed, groupsToUnmap); + continue; + } + + completed.put(groupIdKey, null); + } + + return new ApiResult<>(completed, failed, new ArrayList<>(groupsToUnmap)); } + + private void handleError( + CoordinatorKey groupId, + Errors error, + Map failed, + Set groupsToUnmap + ) { + switch (error) { + case GROUP_AUTHORIZATION_FAILED: + case INVALID_GROUP_ID: + case NON_EMPTY_GROUP: + case GROUP_ID_NOT_FOUND: + log.debug("`DeleteConsumerGroups` request for group id {} failed due to error {}", groupId.idValue, error); + failed.put(groupId, error.exception()); + break; + + case COORDINATOR_LOAD_IN_PROGRESS: + // If the coordinator is in the middle of loading, then we just need to retry + log.debug("`DeleteConsumerGroups` request for group id {} failed because the coordinator " + + "is still in the process of loading state. Will retry", groupId.idValue); + break; + + case COORDINATOR_NOT_AVAILABLE: + case NOT_COORDINATOR: + // If the coordinator is unavailable or there was a coordinator change, then we unmap + // the key so that we retry the `FindCoordinator` request + log.debug("`DeleteConsumerGroups` request for group id {} returned error {}. " + + "Will attempt to find the coordinator again and retry", groupId.idValue, error); + groupsToUnmap.add(groupId); + break; + + default: + log.error("`DeleteConsumerGroups` request for group id {} failed due to unexpected error {}", groupId.idValue, error); + failed.put(groupId, error.exception()); + } + } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java index 1f49a0d60580d..a763a4255e661 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/DescribeShareGroupsHandler.java @@ -92,7 +92,7 @@ public ShareGroupDescribeRequest.Builder buildBatchedRequest(int coordinatorId, ShareGroupDescribeRequestData data = new ShareGroupDescribeRequestData() .setGroupIds(groupIds) .setIncludeAuthorizedOperations(includeAuthorizedOperations); - return new ShareGroupDescribeRequest.Builder(data); + return new ShareGroupDescribeRequest.Builder(data, true); } @Override @@ -121,8 +121,7 @@ public ApiResult handleResponse( groupMember.memberId(), groupMember.clientId(), groupMember.clientHost(), - new ShareMemberAssignment(convertAssignment(groupMember.assignment())), - groupMember.memberEpoch() + new ShareMemberAssignment(convertAssignment(groupMember.assignment())) )) ); @@ -131,8 +130,6 @@ public ApiResult handleResponse( memberDescriptions, GroupState.parse(describedGroup.groupState()), coordinator, - describedGroup.groupEpoch(), - describedGroup.assignmentEpoch(), authorizedOperations); completed.put(groupIdKey, shareGroupDescription); } @@ -159,9 +156,7 @@ private void handleError( Set groupsToUnmap) { switch (error) { case GROUP_AUTHORIZATION_FAILED: - case TOPIC_AUTHORIZATION_FAILED: log.debug("`DescribeShareGroups` request for group id {} failed due to error {}", groupId.idValue, error); - // The topic auth response received on DescribeShareGroup is a generic one not including topic names, so we just pass it on unchanged here. failed.put(groupId, error.exception(errorMsg)); break; diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java index febc4033223b5..4c0e3db925404 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandler.java @@ -20,13 +20,11 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; -import org.apache.kafka.common.requests.RequestUtils; import org.apache.kafka.common.utils.LogContext; import org.slf4j.Logger; @@ -37,6 +35,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -87,32 +86,15 @@ private static Set coordinatorKeys(Collection groupIds) } public OffsetFetchRequest.Builder buildBatchedRequest(Set groupIds) { - // Create a request that only contains the consumer groups owned by the coordinator. - return OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setRequireStable(requireStable) - .setGroups(groupIds.stream().map(groupId -> { - ListConsumerGroupOffsetsSpec spec = groupSpecs.get(groupId.idValue); - - List topics = null; - if (spec.topicPartitions() != null) { - topics = spec.topicPartitions().stream() - .collect(Collectors.groupingBy(TopicPartition::topic)) - .entrySet() - .stream() - .map(entry -> new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(entry.getKey()) - .setPartitionIndexes(entry.getValue().stream() - .map(TopicPartition::partition) - .collect(Collectors.toList()))) - .collect(Collectors.toList()); - } - return new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupId.idValue) - .setTopics(topics); - }).collect(Collectors.toList())), - false - ); + // Create a map that only contains the consumer groups owned by the coordinator. + Map> coordinatorGroupIdToTopicPartitions = new HashMap<>(groupIds.size()); + groupIds.forEach(g -> { + ListConsumerGroupOffsetsSpec spec = groupSpecs.get(g.idValue); + List partitions = spec.topicPartitions() != null ? new ArrayList<>(spec.topicPartitions()) : null; + coordinatorGroupIdToTopicPartitions.put(g.idValue, partitions); + }); + + return new OffsetFetchRequest.Builder(coordinatorGroupIdToTopicPartitions, requireStable, false); } @Override @@ -139,52 +121,40 @@ public ApiResult> handleR ) { validateKeys(groupIds); - var response = (OffsetFetchResponse) abstractResponse; - var completed = new HashMap>(); - var failed = new HashMap(); - var unmapped = new ArrayList(); + final OffsetFetchResponse response = (OffsetFetchResponse) abstractResponse; + Map> completed = new HashMap<>(); + Map failed = new HashMap<>(); + List unmapped = new ArrayList<>(); for (CoordinatorKey coordinatorKey : groupIds) { - var groupId = coordinatorKey.idValue; - var group = response.group(groupId); - var error = Errors.forCode(group.errorCode()); - - if (error != Errors.NONE) { - handleGroupError( - coordinatorKey, - error, - failed, - unmapped - ); + String group = coordinatorKey.idValue; + if (response.groupHasError(group)) { + handleGroupError(CoordinatorKey.byGroupId(group), response.groupLevelError(group), failed, unmapped); } else { - var offsets = new HashMap(); - - group.topics().forEach(topic -> - topic.partitions().forEach(partition -> { - var tp = new TopicPartition(topic.name(), partition.partitionIndex()); - var partitionError = Errors.forCode(partition.errorCode()); - - if (partitionError == Errors.NONE) { - // Negative offset indicates that the group has no committed offset for this partition. - if (partition.committedOffset() < 0) { - offsets.put(tp, null); - } else { - offsets.put(tp, new OffsetAndMetadata( - partition.committedOffset(), - RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), - partition.metadata() - )); - } + final Map groupOffsetsListing = new HashMap<>(); + Map responseData = response.partitionDataMap(group); + for (Map.Entry partitionEntry : responseData.entrySet()) { + final TopicPartition topicPartition = partitionEntry.getKey(); + OffsetFetchResponse.PartitionData partitionData = partitionEntry.getValue(); + final Errors error = partitionData.error; + + if (error == Errors.NONE) { + final long offset = partitionData.offset; + final String metadata = partitionData.metadata; + final Optional leaderEpoch = partitionData.leaderEpoch; + // Negative offset indicates that the group has no committed offset for this partition + if (offset < 0) { + groupOffsetsListing.put(topicPartition, null); } else { - log.warn("Skipping return offset for {} due to error {}.", tp, partitionError); + groupOffsetsListing.put(topicPartition, new OffsetAndMetadata(offset, leaderEpoch, metadata)); } - }) - ); - - completed.put(coordinatorKey, offsets); + } else { + log.warn("Skipping return offset for {} due to error {}.", topicPartition, error); + } + } + completed.put(CoordinatorKey.byGroupId(group), groupOffsetsListing); } } - return new ApiResult<>(completed, failed, unmapped); } diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java index 330a9efaf9b6c..f7c495d7fd8aa 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListOffsetsHandler.java @@ -103,17 +103,12 @@ ListOffsetsRequest.Builder buildBatchedRequest(int brokerId, Set .stream() .anyMatch(key -> offsetTimestampsByPartition.get(key) == ListOffsetsRequest.LATEST_TIERED_TIMESTAMP); - boolean requireEarliestPendingUploadTimestamp = keys - .stream() - .anyMatch(key -> offsetTimestampsByPartition.get(key) == ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP); - int timeoutMs = options.timeoutMs() != null ? options.timeoutMs() : defaultApiTimeoutMs; return ListOffsetsRequest.Builder.forConsumer(true, options.isolationLevel(), supportsMaxTimestamp, requireEarliestLocalTimestamp, - requireTieredStorageTimestamp, - requireEarliestPendingUploadTimestamp) + requireTieredStorageTimestamp) .setTargetTimes(new ArrayList<>(topicsByName.values())) .setTimeoutMs(timeoutMs); } @@ -202,7 +197,7 @@ private void handlePartitionError( public Map handleUnsupportedVersionException( int brokerId, UnsupportedVersionException exception, Set keys ) { - log.warn("Broker {} does not support MAX_TIMESTAMP offset specs", brokerId); + log.warn("Broker " + brokerId + " does not support MAX_TIMESTAMP offset specs"); Map maxTimestampPartitions = new HashMap<>(); for (TopicPartition topicPartition : keys) { Long offsetTimestamp = offsetTimestampsByPartition.get(topicPartition); diff --git a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java index f47d9f90189a5..71b8e1a7c5607 100644 --- a/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandler.java @@ -75,9 +75,6 @@ public ListTransactionsRequest.Builder buildBatchedRequest( .map(TransactionState::toString) .collect(Collectors.toList())); request.setDurationFilter(options.filteredDuration()); - if (options.filteredTransactionalIdPattern() != null && !options.filteredTransactionalIdPattern().isEmpty()) { - request.setTransactionalIdPattern(options.filteredTransactionalIdPattern()); - } return new ListTransactionsRequest.Builder(request); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgeType.java b/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgeType.java index b42bc13536304..14b5415c2a407 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgeType.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgeType.java @@ -20,10 +20,6 @@ import java.util.Locale; -/** - * The acknowledge type is used with {@link KafkaShareConsumer#acknowledge(ConsumerRecord, AcknowledgeType)} to indicate - * whether the record was consumed successfully. - */ @InterfaceStability.Evolving public enum AcknowledgeType { /** The record was consumed successfully. */ diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java b/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java index c84f9c78d0d1b..f37fbe0575079 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/AcknowledgementCommitCallback.java @@ -20,10 +20,8 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.errors.AuthorizationException; -import org.apache.kafka.common.errors.DisconnectException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidRecordStateException; -import org.apache.kafka.common.errors.NotLeaderOrFollowerException; import org.apache.kafka.common.errors.WakeupException; import java.util.Map; @@ -44,16 +42,12 @@ public interface AcknowledgementCommitCallback { * * @param exception The exception thrown during processing of the request, or null if the acknowledgement completed successfully. *

      - *
    • {@link AuthorizationException} if not authorized to the topic or group *
    • {@link InvalidRecordStateException} if the record state is invalid - *
    • {@link NotLeaderOrFollowerException} if the leader had changed by the time the acknowledgements were sent - *
    • {@link DisconnectException} if the broker disconnected before the request could be completed + *
    • {@link AuthorizationException} if not authorized to the topic of group *
    • {@link WakeupException} if {@link KafkaShareConsumer#wakeup()} is called before or while this function is called *
    • {@link InterruptException} if the calling thread is interrupted before or while this function is called *
    • {@link KafkaException} for any other unrecoverable errors *
    - *

    Note that even if the exception is a retriable exception, the acknowledgement could not be completed and the - * records need to be fetched again. The callback is called after any retries have been performed. */ void onComplete(Map> offsets, Exception exception); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java index 365d19d41349a..2c8376e5ccd8a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/Consumer.java @@ -279,16 +279,11 @@ public interface Consumer extends Closeable { /** * @see KafkaConsumer#close(Duration) */ - @Deprecated void close(Duration timeout); - /** - * @see KafkaConsumer#close(CloseOptions) - */ - void close(final CloseOptions option); - /** * @see KafkaConsumer#wakeup() */ void wakeup(); + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java index ee62dc9561b41..d9cee3ec1796a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerConfig.java @@ -20,7 +20,6 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.MetadataRecoveryStrategy; import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; -import org.apache.kafka.clients.consumer.internals.ShareAcknowledgementMode; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; @@ -37,6 +36,7 @@ import org.apache.kafka.common.utils.Utils; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -62,9 +62,9 @@ public class ConsumerConfig extends AbstractConfig { // a list contains all the assignor names that only assign subscribed topics to consumer. Should be updated when new assignor added. // This is to help optimize ConsumerCoordinator#performAssignment method public static final List ASSIGN_FROM_SUBSCRIBED_ASSIGNORS = List.of( - RANGE_ASSIGNOR_NAME, - ROUNDROBIN_ASSIGNOR_NAME, - STICKY_ASSIGNOR_NAME, + RANGE_ASSIGNOR_NAME, + ROUNDROBIN_ASSIGNOR_NAME, + STICKY_ASSIGNOR_NAME, COOPERATIVE_STICKY_ASSIGNOR_NAME ); @@ -185,7 +185,7 @@ public class ConsumerConfig extends AbstractConfig { */ public static final String FETCH_MIN_BYTES_CONFIG = "fetch.min.bytes"; public static final int DEFAULT_FETCH_MIN_BYTES = 1; - private static final String FETCH_MIN_BYTES_DOC = "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of " + DEFAULT_FETCH_MIN_BYTES + " byte means that fetch requests are answered as soon as that many byte(s) of data is available or the fetch request times out waiting for data to arrive. Setting this to a larger value will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency. Even if the total data available in the broker exceeds fetch.min.bytes, the actual returned size may still be less than this value due to per-partition limits max.partition.fetch.bytes and max returned limits fetch.max.bytes."; + private static final String FETCH_MIN_BYTES_DOC = "The minimum amount of data the server should return for a fetch request. If insufficient data is available the request will wait for that much data to accumulate before answering the request. The default setting of " + DEFAULT_FETCH_MIN_BYTES + " byte means that fetch requests are answered as soon as that many byte(s) of data is available or the fetch request times out waiting for data to arrive. Setting this to a larger value will cause the server to wait for larger amounts of data to accumulate which can improve server throughput a bit at the cost of some additional latency."; /** * fetch.max.bytes @@ -195,8 +195,7 @@ public class ConsumerConfig extends AbstractConfig { "Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than " + "this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. " + "The maximum record batch size accepted by the broker is defined via message.max.bytes (broker config) or " + - "max.message.bytes (topic config). A fetch request consists of many partitions, and there is another setting that controls how much " + - "data is returned for each partition in a fetch request - see max.partition.fetch.bytes. Note that the consumer performs multiple fetches in parallel."; + "max.message.bytes (topic config). Note that the consumer performs multiple fetches in parallel."; public static final int DEFAULT_FETCH_MAX_BYTES = 50 * 1024 * 1024; /** @@ -371,7 +370,8 @@ public class ConsumerConfig extends AbstractConfig { public static final String ALLOW_AUTO_CREATE_TOPICS_CONFIG = "allow.auto.create.topics"; private static final String ALLOW_AUTO_CREATE_TOPICS_DOC = "Allow automatic topic creation on the broker when" + " subscribing to or assigning a topic. A topic being subscribed to will be automatically created only if the" + - " broker allows for it using auto.create.topics.enable broker configuration."; + " broker allows for it using `auto.create.topics.enable` broker configuration. This configuration must" + + " be set to `true` when using brokers older than 0.11.0"; public static final boolean DEFAULT_ALLOW_AUTO_CREATE_TOPICS = true; /** @@ -380,42 +380,29 @@ public class ConsumerConfig extends AbstractConfig { public static final String SECURITY_PROVIDERS_CONFIG = SecurityConfig.SECURITY_PROVIDERS_CONFIG; private static final String SECURITY_PROVIDERS_DOC = SecurityConfig.SECURITY_PROVIDERS_DOC; - /** - * share.acknowledgement.mode - */ - public static final String SHARE_ACKNOWLEDGEMENT_MODE_CONFIG = "share.acknowledgement.mode"; - private static final String SHARE_ACKNOWLEDGEMENT_MODE_DOC = "Controls the acknowledgement mode for a share consumer." + - " If set to implicit, the acknowledgement mode of the consumer is implicit and it must not" + - " use org.apache.kafka.clients.consumer.ShareConsumer.acknowledge() to acknowledge delivery of records. Instead," + - " delivery is acknowledged implicitly on the next call to poll or commit." + - " If set to explicit, the acknowledgement mode of the consumer is explicit and it must use" + - " org.apache.kafka.clients.consumer.ShareConsumer.acknowledge() to acknowledge delivery of records."; - private static final AtomicInteger CONSUMER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); /** * A list of configuration keys not supported for CLASSIC protocol. */ - private static final List CLASSIC_PROTOCOL_UNSUPPORTED_CONFIGS = List.of( - GROUP_REMOTE_ASSIGNOR_CONFIG, - SHARE_ACKNOWLEDGEMENT_MODE_CONFIG + private static final List CLASSIC_PROTOCOL_UNSUPPORTED_CONFIGS = Collections.singletonList( + GROUP_REMOTE_ASSIGNOR_CONFIG ); /** * A list of configuration keys not supported for CONSUMER protocol. */ private static final List CONSUMER_PROTOCOL_UNSUPPORTED_CONFIGS = List.of( - PARTITION_ASSIGNMENT_STRATEGY_CONFIG, - HEARTBEAT_INTERVAL_MS_CONFIG, - SESSION_TIMEOUT_MS_CONFIG, - SHARE_ACKNOWLEDGEMENT_MODE_CONFIG + PARTITION_ASSIGNMENT_STRATEGY_CONFIG, + HEARTBEAT_INTERVAL_MS_CONFIG, + SESSION_TIMEOUT_MS_CONFIG ); - + static { CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, - ConfigDef.NO_DEFAULT_VALUE, - ConfigDef.ValidList.anyNonDuplicateValues(false, false), + Collections.emptyList(), + new ConfigDef.NonNullValidator(), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) .define(CLIENT_DNS_LOOKUP_CONFIG, @@ -445,7 +432,7 @@ public class ConsumerConfig extends AbstractConfig { .define(PARTITION_ASSIGNMENT_STRATEGY_CONFIG, Type.LIST, List.of(RangeAssignor.class, CooperativeStickyAssignor.class), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), + new ConfigDef.NonNullValidator(), Importance.MEDIUM, PARTITION_ASSIGNMENT_STRATEGY_DOC) .define(METADATA_MAX_AGE_CONFIG, @@ -572,7 +559,7 @@ public class ConsumerConfig extends AbstractConfig { .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, JmxReporter.class.getName(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), + new ConfigDef.NonNullValidator(), Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define(KEY_DESERIALIZER_CLASS_CONFIG, @@ -613,8 +600,8 @@ public class ConsumerConfig extends AbstractConfig { CommonClientConfigs.CONNECTIONS_MAX_IDLE_MS_DOC) .define(INTERCEPTOR_CLASSES_CONFIG, Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), + Collections.emptyList(), + new ConfigDef.NonNullValidator(), Importance.LOW, INTERCEPTOR_CLASSES_DOC) .define(MAX_POLL_RECORDS_CONFIG, @@ -691,19 +678,8 @@ public class ConsumerConfig extends AbstractConfig { CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, atLeast(0), Importance.LOW, - CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC) - .define(ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_CONFIG, - Type.STRING, - ShareAcknowledgementMode.IMPLICIT.name(), - new ShareAcknowledgementMode.Validator(), - Importance.MEDIUM, - ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_DOC) - .define(CONFIG_PROVIDERS_CONFIG, - ConfigDef.Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), - ConfigDef.Importance.LOW, - CONFIG_PROVIDERS_DOC); + CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); + } @Override @@ -713,7 +689,7 @@ protected Map postProcessParsedConfig(final Map Map refinedConfigs = CommonClientConfigs.postProcessReconnectBackoffConfigs(this, parsedValues); maybeOverrideClientId(refinedConfigs); maybeOverrideEnableAutoCommit(refinedConfigs); - checkUnsupportedConfigsPostProcess(); + checkUnsupportedConfigs(); return refinedConfigs; } @@ -760,16 +736,16 @@ private void maybeOverrideEnableAutoCommit(Map configs) { } } - protected void checkUnsupportedConfigsPostProcess() { + private void checkUnsupportedConfigs() { String groupProtocol = getString(GROUP_PROTOCOL_CONFIG); if (GroupProtocol.CLASSIC.name().equalsIgnoreCase(groupProtocol)) { - checkUnsupportedConfigsPostProcess(GroupProtocol.CLASSIC, CLASSIC_PROTOCOL_UNSUPPORTED_CONFIGS); + checkUnsupportedConfigs(GroupProtocol.CLASSIC, CLASSIC_PROTOCOL_UNSUPPORTED_CONFIGS); } else if (GroupProtocol.CONSUMER.name().equalsIgnoreCase(groupProtocol)) { - checkUnsupportedConfigsPostProcess(GroupProtocol.CONSUMER, CONSUMER_PROTOCOL_UNSUPPORTED_CONFIGS); + checkUnsupportedConfigs(GroupProtocol.CONSUMER, CONSUMER_PROTOCOL_UNSUPPORTED_CONFIGS); } } - private void checkUnsupportedConfigsPostProcess(GroupProtocol groupProtocol, List unsupportedConfigs) { + private void checkUnsupportedConfigs(GroupProtocol groupProtocol, List unsupportedConfigs) { if (getString(GROUP_PROTOCOL_CONFIG).equalsIgnoreCase(groupProtocol.name())) { List invalidConfigs = new ArrayList<>(); unsupportedConfigs.forEach(configName -> { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerInterceptor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerInterceptor.java index 206e6d04a2c2b..c04afccd8aaf9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerInterceptor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerInterceptor.java @@ -39,8 +39,6 @@ * {@link org.apache.kafka.clients.consumer.KafkaConsumer#poll(java.time.Duration)}. *

    * Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. - * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the interceptor to register metrics. The following tags are automatically added to - * all metrics registered: config set to interceptor.classes, and class set to the ConsumerInterceptor class name. */ public interface ConsumerInterceptor extends Configurable, AutoCloseable { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java index 45cb505c744e5..20f2551ba6bc2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignor.java @@ -291,6 +291,9 @@ static List getAssignorInstances(List assigno // a map to store assignor name -> assignor class name Map assignorNameMap = new HashMap<>(); + if (assignorClasses == null) + return assignors; + for (Object klass : assignorClasses) { // first try to get the class if passed in as a string if (klass instanceof String) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java index 23e045b760005..914c0ab979f54 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRebalanceListener.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.TopicPartition; +import java.time.Duration; import java.util.Collection; /** @@ -50,7 +51,7 @@ * Under normal conditions, if a partition is reassigned from one consumer to another, then the old consumer will * always invoke {@link #onPartitionsRevoked(Collection) onPartitionsRevoked} for that partition prior to the new consumer * invoking {@link #onPartitionsAssigned(Collection) onPartitionsAssigned} for the same partition. So if offsets or other state is saved in the - * {@link #onPartitionsRevoked(Collection) onPartitionsRevoked} call by one consumer member, it will always be accessible by the time the + * {@link #onPartitionsRevoked(Collection) onPartitionsRevoked} call by one consumer member, it will be always accessible by the time the * other consumer member taking over that partition and triggering its {@link #onPartitionsAssigned(Collection) onPartitionsAssigned} callback to load the state. *

    * You can think of revocation as a graceful way to give up ownership of a partition. In some cases, the consumer may not have an opportunity to do so. @@ -120,31 +121,13 @@ public interface ConsumerRebalanceListener { /** * A callback method the user can implement to provide handling of offset commits to a customized store. * This method will be called during a rebalance operation when the consumer has to give up some partitions. - * The consumer may need to give up some partitions (thus this callback executed) under the following scenarios: - *

      - *
    • If the consumer assignment changes
    • - *
    • If the consumer is being closed ({@link KafkaConsumer#close(CloseOptions option)})
    • - *
    • If the consumer is unsubscribing ({@link KafkaConsumer#unsubscribe()})
    • - *
    + * It can also be called when consumer is being closed ({@link KafkaConsumer#close(Duration)}) + * or is unsubscribing ({@link KafkaConsumer#unsubscribe()}). * It is recommended that offsets should be committed in this callback to either Kafka or a * custom offset store to prevent duplicate data. *

    - * This callback is always called before re-assigning the partitions. - * If the consumer is using the {@link GroupProtocol#CLASSIC} rebalance protocol: - *

      - *
    • - * In eager rebalancing, onPartitionsRevoked will be called with the full set of assigned partitions as a parameter (all partitions are revoked). - * It will be called even if there are no partitions to revoke. - *
    • - *
    • - * In cooperative rebalancing, onPartitionsRevoked will be called with the set of partitions to revoke, - * iff the set is non-empty. - *
    • - *
    - * If the consumer is using the {@link GroupProtocol#CONSUMER} rebalance protocol, this callback will be called - * with the set of partitions to revoke iff the set is non-empty - * (same behavior as the {@link GroupProtocol#CLASSIC} rebalance protocol with Cooperative mode). - *

    + * In eager rebalancing, it will always be called at the start of a rebalance and after the consumer stops fetching data. + * In cooperative rebalancing, it will be called at the end of a rebalance on the set of partitions being revoked iff the set is non-empty. * For examples on usage of this API, see Usage Examples section of {@link KafkaConsumer KafkaConsumer}. *

    * It is common for the revocation callback to use the consumer instance in order to commit offsets. It is possible @@ -153,9 +136,8 @@ public interface ConsumerRebalanceListener { * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * - * @param partitions The list of partitions that were assigned to the consumer and now need to be revoked. This will - * include the full assignment under the Classic/Eager protocol, given that it revokes all partitions. - * It will only include the subset to revoke under the Classic/Cooperative and Consumer protocols. + * @param partitions The list of partitions that were assigned to the consumer and now need to be revoked (may not + * include all currently assigned partitions, i.e. there may still be some partitions left) * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ @@ -163,13 +145,12 @@ public interface ConsumerRebalanceListener { /** * A callback method the user can implement to provide handling of customized offsets on completion of a successful - * partition re-assignment. This method will be called after the partition re-assignment completes (even if no new - * partitions were assigned to the consumer), and before the consumer starts fetching data, - * and only as the result of a {@link Consumer#poll(java.time.Duration) poll(long)} call. + * partition re-assignment. This method will be called after the partition re-assignment completes and before the + * consumer starts fetching data, and only as the result of a {@link Consumer#poll(java.time.Duration) poll(long)} call. *

    * It is guaranteed that under normal conditions all the processes in a consumer group will execute their - * {@link #onPartitionsRevoked(Collection)} callback before any instance executes this onPartitionsAssigned callback. - * During exceptional scenarios, partitions may be migrated + * {@link #onPartitionsRevoked(Collection)} callback before any instance executes its + * {@link #onPartitionsAssigned(Collection)} callback. During exceptional scenarios, partitions may be migrated * without the old owner being notified (i.e. their {@link #onPartitionsRevoked(Collection)} callback not triggered), * and later when the old owner consumer realized this event, the {@link #onPartitionsLost(Collection)} callback * will be triggered by the consumer then. @@ -180,11 +161,9 @@ public interface ConsumerRebalanceListener { * invocation of {@link KafkaConsumer#poll(java.time.Duration)} in which this callback is being executed. This means it is not * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * - * @param partitions Partitions that have been added to the assignment as a result of the rebalance. - * Note that partitions that were already owned by this consumer and remain assigned are not - * included in this list under the Classic/Cooperative or Consumer protocols. THe full assignment - * will be received under the Classic/Eager protocol. - * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} + * @param partitions The list of partitions that are now assigned to the consumer (previously owned partitions will + * NOT be included, i.e. this list will only include newly added partitions) + * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ void onPartitionsAssigned(Collection partitions); @@ -209,9 +188,10 @@ public interface ConsumerRebalanceListener { * necessary to catch these exceptions and re-attempt to wakeup or interrupt the consumer thread. * * @param partitions The list of partitions that were assigned to the consumer and now have been reassigned - * to other consumers. With both, the Classic and Consumer protocols, this will always include - * all partitions that were previously assigned to the consumer. - * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} + * to other consumers. With the current protocol this will always include all of the consumer's + * previously assigned partitions, but this may change in future protocols (ie there would still + * be some partitions left) + * @throws org.apache.kafka.common.errors.WakeupException If raised from a nested call to {@link KafkaConsumer} * @throws org.apache.kafka.common.errors.InterruptException If raised from a nested call to {@link KafkaConsumer} */ default void onPartitionsLost(Collection partitions) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java index 11360b0dac0c9..453503e6dac84 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ConsumerRecord.java @@ -21,31 +21,12 @@ import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; -import java.util.ConcurrentModificationException; import java.util.Optional; /** - * A key/value pair to be received from Kafka. This also consists of a topic name and - * a partition number from which the record is being received, an offset that points + * A key/value pair to be received from Kafka. This also consists of a topic name and + * a partition number from which the record is being received, an offset that points * to the record in a Kafka partition, and a timestamp as marked by the corresponding ProducerRecord. - *

    - * - *

    Thread Safety

    - * This consumer record is not thread-safe. Concurrent access to a {@code ConsumerRecord} instance by - * multiple threads may result in undefined behavior, including but not limited to the following: - *
      - *
    • Throwing {@link ConcurrentModificationException} (e.g., when concurrently modifying {@link #headers()}).
    • - *
    • Data corruption or logical errors (e.g., inconsistent state of {@code headers} or {@code value}).
    • - *
    • Visibility issues (e.g., modifications by one thread not being visible to another thread).
    • - *
    - * - *

    - * In particular, the {@link #headers()} method returns a mutable collection of headers. If multiple - * threads access or modify these headers concurrently, it may lead to race conditions or inconsistent - * states. It is the responsibility of the user to ensure that multi-threaded access is properly synchronized. - * - *

    - * Refer to the {@link KafkaConsumer} documentation for more details on multi-threaded consumption and processing strategies. */ public class ConsumerRecord { public static final long NO_TIMESTAMP = RecordBatch.NO_TIMESTAMP; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java index 9f1992d65688a..70c0f7cadd5ba 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaConsumer.java @@ -30,7 +30,6 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidRegularExpression; -import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; @@ -661,7 +660,7 @@ public Set subscription() { * If the given list of topics is empty, it is treated the same as {@link #unsubscribe()}. * *

    - * As part of group management, the group coordinator will keep track of the list of consumers that belong to a particular + * As part of group management, the consumer will keep track of the list of consumers that belong to a particular * group and will trigger a rebalance operation if any one of the following events are triggered: *

      *
    • Number of partitions change for any of the subscribed topics @@ -670,11 +669,8 @@ public Set subscription() { *
    • A new member is added to the consumer group *
    *

    - * When any of these events are triggered, the provided listener will be invoked in this way: - *

      - *
    • {@link ConsumerRebalanceListener#onPartitionsRevoked(Collection)} will be invoked with the partitions to revoke, before re-assigning those partitions to another consumer.
    • - *
    • {@link ConsumerRebalanceListener#onPartitionsAssigned(Collection)} will be invoked when the rebalance completes (even if no new partitions are assigned to the consumer)
    • - *
    + * When any of these events are triggered, the provided listener will be invoked first to indicate that + * the consumer's assignment has been revoked, and then again when the new assignment has been received. * Note that rebalances will only occur during an active call to {@link #poll(Duration)}, so callbacks will * also only be invoked during that time. * @@ -912,8 +908,7 @@ public ConsumerRecords poll(final Duration timeout) { * (in which case a {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). *

    * Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} - * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, - * but only when the consumer is using the consumer group protocol. + * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. * * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This fatal error can only occur if you are using automatic group management with {@link #subscribe(Collection)}, @@ -957,8 +952,7 @@ public void commitSync() { * encountered (in which case it is thrown to the caller), or the passed timeout expires. *

    * Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} - * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, - * but only when the consumer is using the consumer group protocol. + * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. * * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, @@ -1007,11 +1001,9 @@ public void commitSync(Duration timeout) { * (in which case a {@link org.apache.kafka.common.errors.TimeoutException} is thrown to the caller). *

    * Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} - * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, - * but only when the consumer is using the consumer group protocol. + * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. * - * @param offsets A map of offsets by partition with associated metadata. This map will be copied internally, so it - * is safe to mutate the map after returning. + * @param offsets A map of offsets by partition with associated metadata * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, * or if there is an active group with the same group.id which is using group management. In such cases, @@ -1060,11 +1052,9 @@ public void commitSync(final Map offsets) { * encountered (in which case it is thrown to the caller), or the timeout expires. *

    * Note that asynchronous offset commits sent previously with the {@link #commitAsync(OffsetCommitCallback)} - * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method, - * but only when the consumer is using the consumer group protocol. + * (or similar) are guaranteed to have their callbacks invoked prior to completion of this method. * - * @param offsets A map of offsets by partition with associated metadata. This map will be copied internally, so it - * is safe to mutate the map after returning. + * @param offsets A map of offsets by partition with associated metadata * @param timeout The maximum amount of time to await completion of the offset commit * @throws org.apache.kafka.clients.consumer.CommitFailedException if the commit failed and cannot be retried. * This can only occur if you are using automatic group management with {@link #subscribe(Collection)}, @@ -1153,7 +1143,7 @@ public void commitAsync(OffsetCommitCallback callback) { * offsets committed through this API are guaranteed to complete before a subsequent call to {@link #commitSync()} * (and variants) returns. * - * @param offsets A map of offsets by partition with associated metadata. This map will be copied internally, so it + * @param offsets A map of offsets by partition with associate metadata. This map will be copied internally, so it * is safe to mutate the map after returning. * @param callback Callback to invoke when the commit completes * @throws org.apache.kafka.common.errors.FencedInstanceIdException if this consumer is using the classic group protocol @@ -1573,8 +1563,8 @@ public Set paused() { * @param timestampsToSearch the mapping from partition to the timestamp to look up. * * @return a mapping from partition to the timestamp and offset of the first message with timestamp greater - * than or equal to the target timestamp. If the timestamp and offset for a specific partition cannot be found within - * the default timeout, and no corresponding message exists, the entry in the returned map will be {@code null} + * than or equal to the target timestamp. {@code null} will be returned for the partition if there is no + * such message. * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws IllegalArgumentException if the target timestamp is negative @@ -1600,8 +1590,8 @@ public Map offsetsForTimes(Map beginningOffsets(Collection par * @param partitions the partitions to get the earliest offsets * @param timeout The maximum amount of time to await retrieval of the beginning offsets * - * @return The earliest available offsets for the given partitions, and it will return empty map if zero timeout is provided + * @return The earliest available offsets for the given partitions * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offset metadata could not be fetched before @@ -1694,7 +1684,7 @@ public Map endOffsets(Collection partition * @param partitions the partitions to get the end offsets. * @param timeout The maximum amount of time to await retrieval of the end offsets * - * @return The end offsets for the given partitions, and it will return empty map if zero timeout is provided + * @return The end offsets for the given partitions. * @throws org.apache.kafka.common.errors.AuthenticationException if authentication fails. See the exception for more details * @throws org.apache.kafka.common.errors.AuthorizationException if not authorized to the topic(s). See the exception for more details * @throws org.apache.kafka.common.errors.TimeoutException if the offsets could not be fetched before @@ -1771,19 +1761,14 @@ public void enforceRebalance() { } /** - * Close the consumer with {@link CloseOptions.GroupMembershipOperation#DEFAULT default leave group behavior}, - * waiting for up to the default timeout of 30 seconds for any needed cleanup. + * Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup. * If auto-commit is enabled, this will commit the current offsets if possible within the default - * timeout. See {@link #close(CloseOptions)} for details. Note that {@link #wakeup()} + * timeout. See {@link #close(Duration)} for details. Note that {@link #wakeup()} * cannot be used to interrupt close. - *

    - * This close operation will attempt all shutdown steps even if one of them fails. - * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. * - * @throws WakeupException if {@link #wakeup()} is called before or while this function is called - * @throws InterruptException if the calling thread is interrupted before or while this function is called - * @throws KafkaException for any other error during close - * (e.g., errors thrown from rebalance callbacks or commit callbacks from previous asynchronous commits) + * @throws org.apache.kafka.common.errors.InterruptException if the calling thread is interrupted + * before or while this function is called + * @throws org.apache.kafka.common.KafkaException for any other error during close */ @Override public void close() { @@ -1791,13 +1776,10 @@ public void close() { } /** - * This method has been deprecated since Kafka 4.1 and should use {@link KafkaConsumer#close(CloseOptions)} instead. - *

    - * Close the consumer with {@link CloseOptions.GroupMembershipOperation#DEFAULT default leave group behavior} - * cleanly within the specified timeout. This method waits up to - * {@code timeout} for the consumer to complete pending commits and maybe leave the group (if the member is dynamic). + * Tries to close the consumer cleanly within the specified timeout. This method waits up to + * {@code timeout} for the consumer to complete pending commits and leave the group. * If auto-commit is enabled, this will commit the current offsets if possible within the - * timeout. If the consumer is unable to complete offset commits and to gracefully leave the group (if applicable) + * timeout. If the consumer is unable to complete offset commits and gracefully leave the group * before the timeout expires, the consumer is force closed. Note that {@link #wakeup()} cannot be * used to interrupt close. *

    @@ -1807,60 +1789,19 @@ public void close() { * {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} for these requests to complete during the close operation. * Note that the execution time of callbacks (such as {@link OffsetCommitCallback} and * {@link ConsumerRebalanceListener}) does not consume time from the close timeout. - *

    - * This close operation will attempt all shutdown steps even if one of them fails. - * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. * * @param timeout The maximum time to wait for consumer to close gracefully. The value must be * non-negative. Specifying a timeout of zero means do not wait for pending requests to complete. + * * @throws IllegalArgumentException If the {@code timeout} is negative. - * @throws WakeupException if {@link #wakeup()} is called before or while this function is called - * @throws InterruptException if the calling thread is interrupted before or while this function is called - * @throws KafkaException for any other error during close - * (e.g., errors thrown from rebalance callbacks or commit callbacks from previous asynchronous commits) + * @throws InterruptException If the thread is interrupted before or while this function is called + * @throws org.apache.kafka.common.KafkaException for any other error during close */ - @Deprecated(since = "4.1") @Override public void close(Duration timeout) { delegate.close(timeout); } - /** - * Close the consumer cleanly. {@link CloseOptions} allows to specify a timeout and a - * {@link CloseOptions.GroupMembershipOperation leave group behavior}. - * If no timeout is specified, the default timeout of 30 seconds is used. - * If no leave group behavior is specified, the {@link CloseOptions.GroupMembershipOperation#DEFAULT default - * leave group behavior} is used. - *

    - * This method waits up to the timeout for the consumer to complete pending commits and maybe leave the group, - * depending on the specified leave group behavior. - * If auto-commit is enabled, this will commit the current offsets if possible within the - * timeout. If the consumer is unable to complete offset commits and to gracefully leave the group (if applicable) - * before the timeout expires, the consumer is force closed. Note that {@link #wakeup()} cannot be - * used to interrupt close. - *

    - * The actual maximum wait time is bounded by the {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} setting, which - * only applies to operations performed with the broker (coordinator-related requests and - * fetch sessions). Even if a larger timeout is specified, the consumer will not wait longer than - * {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} for these requests to complete during the close operation. - * Note that the execution time of callbacks (such as {@link OffsetCommitCallback} and - * {@link ConsumerRebalanceListener}) does not consume time from the close timeout. - *

    - * This close operation will attempt all shutdown steps even if one of them fails. - * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. - * - * @param option see {@link CloseOptions}; cannot be {@code null} - * @throws IllegalArgumentException If the {@code option} timeout is negative - * @throws WakeupException if {@link #wakeup()} is called before or while this function is called - * @throws InterruptException if the calling thread is interrupted before or while this function is called - * @throws KafkaException for any other error during close - * (e.g., errors thrown from rebalance callbacks or commit callbacks from previous asynchronous commits) - */ - @Override - public void close(CloseOptions option) { - delegate.close(option); - } - /** * Wakeup the consumer. This method is thread-safe and is useful in particular to abort a long poll. * The thread which is blocking in an operation will throw {@link org.apache.kafka.common.errors.WakeupException}. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java index 7f3bad2e318f2..51d6718a76c53 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/KafkaShareConsumer.java @@ -33,7 +33,6 @@ import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.utils.LogContext; @@ -51,7 +50,8 @@ /** * A client that consumes records from a Kafka cluster using a share group. *

    - * This is a preview feature introduced by KIP-932. It is not yet recommended for production use. + * This is an early access feature under development which is introduced by KIP-932. + * It is not suitable for production use until it is fully implemented and released. * *

    Cross-Version Compatibility

    * This client can communicate with brokers that are a version that supports share groups. You will receive an @@ -99,7 +99,7 @@ * of the topic-partitions that match its subscriptions. Records are acquired for delivery to this consumer with a * time-limited acquisition lock. While a record is acquired, it is not available for another consumer. By default, * the lock duration is 30 seconds, but it can also be controlled using the group {@code group.share.record.lock.duration.ms} - * configuration property. The idea is that the lock is automatically released once the lock duration has elapsed, and + * configuration parameter. The idea is that the lock is automatically released once the lock duration has elapsed, and * then the record is available to be given to another consumer. The consumer which holds the lock can deal with it in * the following ways: *
      @@ -115,33 +115,31 @@ * {@code group.share.record.lock.partition.limit}. By limiting the duration of the acquisition lock and automatically * releasing the locks, the broker ensures delivery progresses even in the presence of consumer failures. *

      - * The consumer can choose to use implicit or explicit acknowledgement of the records it processes by using the - * consumer {@code share.acknowledgement.mode} configuration property. - *

      - * If the application sets the property to "implicit" or does not set it at all, then the consumer is using - * implicit acknowledgement. In this mode, the application acknowledges delivery by: + * The consumer can choose to use implicit or explicit acknowledgement of the records it processes. + *

      If the application calls {@link #acknowledge(ConsumerRecord, AcknowledgeType)} for any record in the batch, + * it is using explicit acknowledgement. In this case: *

        - *
      • Calling {@link #poll(Duration)} without committing, which also implicitly acknowledges all - * the delivered records and commits the acknowledgements to Kafka asynchronously. In this case, no exception is - * thrown by a failure to commit the acknowledgements.
      • - *
      • Calling {@link #commitSync()} or {@link #commitAsync()} which implicitly acknowledges all - * the delivered records as processed successfully and commits the acknowledgements to Kafka.
      • - *
      • Calling {@link #close()} which releases any acquired records without acknowledgement.
      • + *
      • The application calls {@link #commitSync()} or {@link #commitAsync()} which commits the acknowledgements to Kafka. + * If any records in the batch were not acknowledged, they remain acquired and will be presented to the application + * in response to a future poll.
      • + *
      • The application calls {@link #poll(Duration)} without committing first, which commits the acknowledgements to + * Kafka asynchronously. In this case, no exception is thrown by a failure to commit the acknowledgement. + * If any records in the batch were not acknowledged, they remain acquired and will be presented to the application + * in response to a future poll.
      • + *
      • The application calls {@link #close()} which attempts to commit any pending acknowledgements and + * releases any remaining acquired records.
      • *
      - * If the application sets the property to "explicit", then the consumer is using explicit acknowledgement. - * The application must acknowledge all records returned from {@link #poll(Duration)} using - * {@link #acknowledge(ConsumerRecord, AcknowledgeType)} before its next call to {@link #poll(Duration)}. - * If the application calls {@link #poll(Duration)} without having acknowledged all records, an - * {@link IllegalStateException} is thrown. The remaining unacknowledged records can still be acknowledged. - * In this mode, the application acknowledges delivery by: + * If the application does not call {@link #acknowledge(ConsumerRecord, AcknowledgeType)} for any record in the batch, + * it is using implicit acknowledgement. In this case: *
        - *
      • Calling {@link #poll(Duration)} after it has acknowledged all records, which commits the acknowledgements - * to Kafka asynchronously. In this case, no exception is thrown by a failure to commit the acknowledgements.
      • - *
      • Calling {@link #commitSync()} or {@link #commitAsync()} which commits any pending - * acknowledgements to Kafka.
      • - *
      • Calling {@link #close()} which attempts to commit any pending acknowledgements and releases - * any remaining acquired records.
      • + *
      • The application calls {@link #commitSync()} or {@link #commitAsync()} which implicitly acknowledges all of + * the delivered records as processed successfully and commits the acknowledgements to Kafka.
      • + *
      • The application calls {@link #poll(Duration)} without committing, which also implicitly acknowledges all of + * the delivered records and commits the acknowledgements to Kafka asynchronously. In this case, no exception is + * thrown by a failure to commit the acknowledgements.
      • + *
      • The application calls {@link #close()} which releases any acquired records without acknowledgement.
      • *
      + *

      * The consumer guarantees that the records returned in the {@code ConsumerRecords} object for a specific topic-partition * are in order of increasing offset. For each topic-partition, Kafka guarantees that acknowledgements for the records * in a batch are performed atomically. This makes error handling significantly more straightforward because there can be @@ -161,7 +159,6 @@ * props.setProperty("group.id", "test"); * props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); * props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); - * * KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props); * consumer.subscribe(Arrays.asList("foo")); * while (true) { @@ -181,7 +178,6 @@ * props.setProperty("group.id", "test"); * props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); * props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer"); - * * KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props); * consumer.subscribe(Arrays.asList("foo")); * while (true) { @@ -195,16 +191,13 @@ * * *

      Per-record acknowledgement (explicit acknowledgement)

      - * This example demonstrates using different acknowledge types depending on the outcome of processing the records. - * Here the {@code share.acknowledgement.mode} property is set to "explicit" so the consumer must explicitly acknowledge each record. + * This example demonstrates using different acknowledgement types depending on the outcome of processing the records. *
        *     Properties props = new Properties();
        *     props.setProperty("bootstrap.servers", "localhost:9092");
        *     props.setProperty("group.id", "test");
        *     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        *     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
      - *     props.setProperty("share.acknowledgement.mode", "explicit");
      - *
        *     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
        *     consumer.subscribe(Arrays.asList("foo"));
        *     while (true) {
      @@ -231,6 +224,42 @@
        * It is only once {@link #commitSync()} is called that the acknowledgements are committed by sending the new state
        * information to Kafka.
        *
      + * 

      Per-record acknowledgement, ending processing of the batch on an error (explicit acknowledgement)

      + * This example demonstrates ending processing of a batch of records on the first error. + *
      + *     Properties props = new Properties();
      + *     props.setProperty("bootstrap.servers", "localhost:9092");
      + *     props.setProperty("group.id", "test");
      + *     props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
      + *     props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
      + *     KafkaShareConsumer<String, String> consumer = new KafkaShareConsumer<>(props);
      + *     consumer.subscribe(Arrays.asList("foo"));
      + *     while (true) {
      + *         ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(100));
      + *         for (ConsumerRecord<String, String> record : records) {
      + *             try {
      + *                 doProcessing(record);
      + *                 consumer.acknowledge(record, AcknowledgeType.ACCEPT);
      + *             } catch (Exception e) {
      + *                 consumer.acknowledge(record, AcknowledgeType.REJECT);
      + *                 break;
      + *             }
      + *         }
      + *         consumer.commitSync();
      + *     }
      + * 
      + * There are the following cases in this example: + *
        + *
      1. The batch contains no records, in which case the application just polls again. The call to {@link #commitSync()} + * just does nothing because the batch was empty.
      2. + *
      3. All of the records in the batch are processed successfully. The calls to {@link #acknowledge(ConsumerRecord, AcknowledgeType)} + * specifying {@code AcknowledgeType.ACCEPT} mark all records in the batch as successfully processed.
      4. + *
      5. One of the records encounters an exception. The call to {@link #acknowledge(ConsumerRecord, AcknowledgeType)} specifying + * {@code AcknowledgeType.REJECT} rejects that record. Earlier records in the batch have already been marked as successfully + * processed. The call to {@link #commitSync()} commits the acknowledgements, but the records after the failed record + * remain acquired as part of the same delivery attempt and will be presented to the application in response to another poll.
      6. + *
      + * *

      Reading Transactional Records

      * The way that share groups handle transactional records is controlled by the {@code group.share.isolation.level} * configuration property. In a share group, the isolation level applies to the entire share group, not just individual @@ -239,8 +268,8 @@ * In read_uncommitted isolation level, the share group consumes all non-transactional and transactional * records. The consumption is bounded by the high-water mark. *

      - * In read_committed isolation level, the share group only consumes non-transactional records and - * committed transactional records. The set of records which are eligible to become in-flight records are + * In read_committed isolation level (not yet supported), the share group only consumes non-transactional + * records and committed transactional records. The set of records which are eligible to become in-flight records are * non-transactional records and committed transactional records only. The consumption is bounded by the last stable * offset, so an open transaction blocks the progress of the share group with read_committed isolation level. * @@ -373,11 +402,11 @@ public KafkaShareConsumer(Properties properties, public KafkaShareConsumer(Map configs, Deserializer keyDeserializer, Deserializer valueDeserializer) { - this(new ShareConsumerConfig(ShareConsumerConfig.appendDeserializerToConfig(configs, keyDeserializer, valueDeserializer)), + this(new ConsumerConfig(ConsumerConfig.appendDeserializerToConfig(configs, keyDeserializer, valueDeserializer)), keyDeserializer, valueDeserializer); } - KafkaShareConsumer(ShareConsumerConfig config, + KafkaShareConsumer(ConsumerConfig config, Deserializer keyDeserializer, Deserializer valueDeserializer) { delegate = CREATOR.create(config, keyDeserializer, valueDeserializer); @@ -386,7 +415,7 @@ public KafkaShareConsumer(Map configs, KafkaShareConsumer(final LogContext logContext, final String clientId, final String groupId, - final ShareConsumerConfig config, + final ConsumerConfig config, final Deserializer keyDeserializer, final Deserializer valueDeserializer, final Time time, @@ -445,7 +474,7 @@ public void unsubscribe() { } /** - * Deliver records for the topics specified using {@link #subscribe(Collection)}. It is an error to not have + * Fetch data for the topics specified using {@link #subscribe(Collection)}. It is an error to not have * subscribed to any topics before polling for data. * *

      @@ -454,14 +483,13 @@ public void unsubscribe() { * * @param timeout The maximum time to block (must not be greater than {@link Long#MAX_VALUE} milliseconds) * - * @return map of topic to records + * @return map of topic to records since the last fetch for the subscribed list of topics * * @throws AuthenticationException if authentication fails. See the exception for more details * @throws AuthorizationException if caller lacks Read access to any of the subscribed * topics or to the share group. See the exception for more details * @throws IllegalArgumentException if the timeout value is negative - * @throws IllegalStateException if the consumer is not subscribed to any topics, or it is using - * explicit acknowledgement and has not acknowledged all records previously delivered + * @throws IllegalStateException if the consumer is not subscribed to any topics * @throws ArithmeticException if the timeout is greater than {@link Long#MAX_VALUE} milliseconds. * @throws InvalidTopicException if the current subscription contains any invalid * topic (per {@link org.apache.kafka.common.internals.Topic#validate(String)}) @@ -478,12 +506,11 @@ public ConsumerRecords poll(Duration timeout) { * Acknowledge successful delivery of a record returned on the last {@link #poll(Duration)} call. * The acknowledgement is committed on the next {@link #commitSync()}, {@link #commitAsync()} or * {@link #poll(Duration)} call. - *

      This method can only be used if the consumer is using explicit acknowledgement. * * @param record The record to acknowledge * - * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer is not using - * explicit acknowledgement + * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer has already + * used implicit acknowledgement */ @Override public void acknowledge(ConsumerRecord record) { @@ -493,42 +520,20 @@ public void acknowledge(ConsumerRecord record) { /** * Acknowledge delivery of a record returned on the last {@link #poll(Duration)} call indicating whether * it was processed successfully. The acknowledgement is committed on the next {@link #commitSync()}, - * {@link #commitAsync()} or {@link #poll(Duration)} call. - *

      This method can only be used if the consumer is using explicit acknowledgement. + * {@link #commitAsync()} or {@link #poll(Duration)} call. By using this method, the consumer is using + * explicit acknowledgement. * * @param record The record to acknowledge - * @param type The acknowledge type which indicates whether it was processed successfully + * @param type The acknowledgement type which indicates whether it was processed successfully * - * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer is not using - * explicit acknowledgement + * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer has already + * used implicit acknowledgement */ @Override public void acknowledge(ConsumerRecord record, AcknowledgeType type) { delegate.acknowledge(record, type); } - /** - * Acknowledge delivery of a record returned on the last {@link #poll(Duration)} call indicating whether - * it was processed successfully. The acknowledgement is committed on the next {@link #commitSync()}, - * {@link #commitAsync()} or {@link #poll(Duration)} call. - *

      This method can only be used if the consumer is using explicit acknowledgement. - *

      It provides an alternative to {@link #acknowledge(ConsumerRecord, AcknowledgeType)} for - * situations where the {@link ConsumerRecord} is not available, such as when the record could not be deserialized. - * - * @param topic The topic of the record to acknowledge - * @param partition The partition of the record to acknowledge - * @param offset The offset of the record to acknowledge - * @param type The acknowledge type which indicates whether it was processed successfully - * - * @throws IllegalStateException if the record is not waiting to be acknowledged, or the consumer is not using - * explicit acknowledgement - */ - - @Override - public void acknowledge(String topic, int partition, long offset, AcknowledgeType type) { - delegate.acknowledge(topic, partition, offset, type); - } - /** * Commit the acknowledgements for the records returned. If the consumer is using explicit acknowledgement, * the acknowledgements to commit have been indicated using {@link #acknowledge(ConsumerRecord)} or @@ -611,7 +616,7 @@ public void setAcknowledgementCommitCallback(AcknowledgementCommitCallback callb * client to complete the request. *

      * Client telemetry is controlled by the {@link ConsumerConfig#ENABLE_METRICS_PUSH_CONFIG} - * configuration property. + * configuration option. * * @param timeout The maximum time to wait for consumer client to determine its client instance ID. * The value must be non-negative. Specifying a timeout of zero means do not @@ -640,52 +645,14 @@ public Uuid clientInstanceId(Duration timeout) { return delegate.metrics(); } - /** - * Add the provided application metric for subscription. This metric will be added to this client's metrics - * that are available for subscription and sent as telemetry data to the broker. - * The provided metric must map to an OTLP metric data point type in the OpenTelemetry v1 metrics protobuf message types. - * Specifically, the metric should be one of the following: - *

        - *
      • - * Sum: Monotonic total count meter (Counter). Suitable for metrics like total number of X, e.g., total bytes sent. - *
      • - *
      • - * Gauge: Non-monotonic current value meter (UpDownCounter). Suitable for metrics like current value of Y, e.g., current queue count. - *
      • - *
      - * Metrics not matching these types are silently ignored. Executing this method for a previously registered metric - * is a benign operation and results in updating that metric's entry. - * - * @param metric The application metric to register - */ - @Override - public void registerMetricForSubscription(KafkaMetric metric) { - delegate.registerMetricForSubscription(metric); - } - - /** - * Remove the provided application metric for subscription. This metric is removed from this client's metrics - * and will not be available for subscription any longer. Executing this method with a metric that has not been registered is a - * benign operation and does not result in any action taken (no-op). - * - * @param metric The application metric to remove - */ - @Override - public void unregisterMetricFromSubscription(KafkaMetric metric) { - delegate.unregisterMetricFromSubscription(metric); - } - /** * Close the consumer, waiting for up to the default timeout of 30 seconds for any needed cleanup. * This will commit acknowledgements if possible within the default timeout. * See {@link #close(Duration)} for details. Note that {@link #wakeup()} cannot be used to interrupt close. - *

      - * This close operation will attempt all shutdown steps even if one of them fails. - * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. * - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called + * @throws WakeupException if {@link #wakeup()} is called before or while this method is called * @throws InterruptException if the thread is interrupted before or while this method is called - * @throws KafkaException for any other error during close + * @throws KafkaException for any other error during close */ @Override public void close() { @@ -698,22 +665,14 @@ public void close() { * If the consumer is unable to complete acknowledgements and gracefully leave the group * before the timeout expires, the consumer is force closed. Note that {@link #wakeup()} cannot be * used to interrupt close. - *

      - * The actual maximum wait time is bounded by the {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} setting, which - * only applies to operations performed with the broker (coordinator-related requests). - * Even if a larger timeout is specified, the consumer will not wait longer than - * {@link ConsumerConfig#REQUEST_TIMEOUT_MS_CONFIG} for these requests to complete during the close operation. - * Note that the execution time of callbacks (such as {@link AcknowledgementCommitCallback}) do not consume time from the close timeout. - *

      - * This close operation will attempt all shutdown steps even if one of them fails. - * It logs all encountered errors, continues to execute the next steps, and finally throws the first error found. * * @param timeout The maximum time to wait for consumer to close gracefully. The value must be * non-negative. Specifying a timeout of zero means do not wait for pending requests to complete. + * * @throws IllegalArgumentException if the {@code timeout} is negative - * @throws WakeupException if {@link #wakeup()} is called before or while this method is called - * @throws InterruptException if the thread is interrupted before or while this method is called - * @throws KafkaException for any other error during close + * @throws WakeupException if {@link #wakeup()} is called before or while this method is called + * @throws InterruptException if the thread is interrupted before or while this method is called + * @throws KafkaException for any other error during close */ @Override public void close(Duration timeout) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java index 303f8e5f1ddc0..b9e69806694dd 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/MockConsumer.java @@ -36,7 +36,6 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; @@ -80,8 +79,6 @@ public class MockConsumer implements Consumer { private Uuid clientInstanceId; private int injectTimeoutExceptionCounter; - private long maxPollRecords = Long.MAX_VALUE; - private final List addedMetrics = new ArrayList<>(); /** @@ -278,22 +275,14 @@ public synchronized ConsumerRecords poll(final Duration timeout) { // update the consumed offset final Map>> results = new HashMap<>(); final Map nextOffsetAndMetadata = new HashMap<>(); - long numPollRecords = 0L; - - final Iterator>>> partitionsIter = this.records.entrySet().iterator(); - while (partitionsIter.hasNext() && numPollRecords < this.maxPollRecords) { - Map.Entry>> entry = partitionsIter.next(); + final List toClear = new ArrayList<>(); + for (Map.Entry>> entry : this.records.entrySet()) { if (!subscriptions.isPaused(entry.getKey())) { - final Iterator> recIterator = entry.getValue().iterator(); - while (recIterator.hasNext()) { - if (numPollRecords >= this.maxPollRecords) { - break; - } + final List> recs = entry.getValue(); + for (final ConsumerRecord rec : recs) { long position = subscriptions.position(entry.getKey()).offset; - final ConsumerRecord rec = recIterator.next(); - if (beginningOffsets.get(entry.getKey()) != null && beginningOffsets.get(entry.getKey()) > position) { throw new OffsetOutOfRangeException(Collections.singletonMap(entry.getKey(), position)); } @@ -305,17 +294,13 @@ public synchronized ConsumerRecords poll(final Duration timeout) { rec.offset() + 1, rec.leaderEpoch(), leaderAndEpoch); subscriptions.position(entry.getKey(), newPosition); nextOffsetAndMetadata.put(entry.getKey(), new OffsetAndMetadata(rec.offset() + 1, rec.leaderEpoch(), "")); - numPollRecords++; - recIterator.remove(); } } - - if (entry.getValue().isEmpty()) { - partitionsIter.remove(); - } + toClear.add(entry.getKey()); } } + toClear.forEach(records::remove); return new ConsumerRecords<>(results, nextOffsetAndMetadata); } @@ -329,18 +314,6 @@ public synchronized void addRecord(ConsumerRecord record) { recs.add(record); } - /** - * Sets the maximum number of records returned in a single call to {@link #poll(Duration)}. - * - * @param maxPollRecords the max.poll.records. - */ - public synchronized void setMaxPollRecords(long maxPollRecords) { - if (maxPollRecords < 1) { - throw new IllegalArgumentException("MaxPollRecords must be strictly superior to 0"); - } - this.maxPollRecords = maxPollRecords; - } - public synchronized void setPollException(KafkaException exception) { this.pollException = exception; } @@ -575,7 +548,6 @@ public void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); } - @Deprecated @Override public synchronized void close(Duration timeout) { this.closed = true; @@ -590,11 +562,6 @@ public synchronized void wakeup() { wakeup.set(true); } - @Override - public void close(CloseOptions option) { - this.closed = true; - } - /** * Schedule a task to be executed during a poll(). One enqueued task will be executed per {@link #poll(Duration)} * invocation. You can use this repeatedly to mock out multiple responses to poll invocations. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java index f1dad522d5ab0..7c04cc30abdf8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/MockShareConsumer.java @@ -24,7 +24,6 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.utils.LogContext; import java.time.Duration; @@ -104,10 +103,6 @@ public synchronized void acknowledge(ConsumerRecord record) { public synchronized void acknowledge(ConsumerRecord record, AcknowledgeType type) { } - @Override - public synchronized void acknowledge(String topic, int partition, long offset, AcknowledgeType type) { - } - @Override public synchronized Map> commitSync() { return new HashMap<>(); @@ -145,14 +140,6 @@ public synchronized Uuid clientInstanceId(Duration timeout) { return Collections.emptyMap(); } - @Override - public void registerMetricForSubscription(KafkaMetric metric) { - } - - @Override - public void unregisterMetricFromSubscription(KafkaMetric metric) { - } - @Override public synchronized void close() { close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java index f459dd5ba5507..d6b3b947c209d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetAndMetadata.java @@ -54,7 +54,10 @@ public OffsetAndMetadata(long offset, Optional leaderEpoch, String meta // The server converts null metadata to an empty string. So we store it as an empty string as well on the client // to be consistent. - this.metadata = Objects.requireNonNullElse(metadata, OffsetFetchResponse.NO_METADATA); + if (metadata == null) + this.metadata = OffsetFetchResponse.NO_METADATA; + else + this.metadata = metadata; } /** @@ -79,11 +82,6 @@ public long offset() { return offset; } - /** - * Get the metadata of the previously consumed record. - * - * @return the metadata or empty string if no metadata - */ public String metadata() { return metadata; } @@ -108,20 +106,21 @@ public boolean equals(Object o) { OffsetAndMetadata that = (OffsetAndMetadata) o; return offset == that.offset && Objects.equals(metadata, that.metadata) && - Objects.equals(leaderEpoch(), that.leaderEpoch()); + Objects.equals(leaderEpoch, that.leaderEpoch); } @Override public int hashCode() { - return Objects.hash(offset, metadata, leaderEpoch()); + return Objects.hash(offset, metadata, leaderEpoch); } @Override public String toString() { return "OffsetAndMetadata{" + "offset=" + offset + - ", leaderEpoch=" + leaderEpoch().orElse(null) + + ", leaderEpoch=" + leaderEpoch + ", metadata='" + metadata + '\'' + '}'; } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetCommitCallback.java b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetCommitCallback.java index 1f0f1f3076478..53e8ae7b906a0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetCommitCallback.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/OffsetCommitCallback.java @@ -26,7 +26,6 @@ * A callback interface that the user can implement to trigger custom actions when a commit request completes. The callback * may be executed in any thread calling {@link Consumer#poll(java.time.Duration) poll()}. */ -@FunctionalInterface public interface OffsetCommitCallback { /** diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java index 58f5fc4d38ea9..8ac4198c70df3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/ShareConsumer.java @@ -22,7 +22,6 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.annotation.InterfaceStability; -import org.apache.kafka.common.metrics.KafkaMetric; import java.io.Closeable; import java.time.Duration; @@ -32,8 +31,6 @@ import java.util.Set; /** - * A client that consumes records from a Kafka cluster using a share group. - * * @see KafkaShareConsumer * @see MockShareConsumer */ @@ -70,11 +67,6 @@ public interface ShareConsumer extends Closeable { */ void acknowledge(ConsumerRecord record, AcknowledgeType type); - /** - * @see KafkaShareConsumer#acknowledge(String, int, long, AcknowledgeType) - */ - void acknowledge(String topic, int partition, long offset, AcknowledgeType type); - /** * @see KafkaShareConsumer#commitSync() */ @@ -96,7 +88,7 @@ public interface ShareConsumer extends Closeable { void setAcknowledgementCommitCallback(AcknowledgementCommitCallback callback); /** - * @see KafkaShareConsumer#clientInstanceId(Duration) + * See {@link KafkaShareConsumer#clientInstanceId(Duration)}} */ Uuid clientInstanceId(Duration timeout); @@ -105,16 +97,6 @@ public interface ShareConsumer extends Closeable { */ Map metrics(); - /** - * @see KafkaShareConsumer#registerMetricForSubscription(KafkaMetric) - */ - void registerMetricForSubscription(KafkaMetric metric); - - /** - * @see KafkaShareConsumer#unregisterMetricFromSubscription(KafkaMetric) - */ - void unregisterMetricFromSubscription(KafkaMetric metric); - /** * @see KafkaShareConsumer#close() */ diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java index a07e12a518abb..9860d2f58901f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinator.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; @@ -68,6 +67,7 @@ import org.apache.kafka.common.telemetry.internals.ClientTelemetryProvider; import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.utils.ExponentialBackoff; +import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; @@ -84,10 +84,7 @@ import java.util.Objects; import java.util.Optional; import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; - -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.DEFAULT; -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.LEAVE_GROUP; +import java.util.concurrent.atomic.AtomicReference; /** * AbstractCoordinator implements group management for a single group member by interacting with @@ -138,7 +135,6 @@ public boolean hasNotJoinedGroup() { private final GroupCoordinatorMetrics sensors; private final GroupRebalanceConfig rebalanceConfig; private final Optional clientTelemetryReporter; - private final Optional> heartbeatThreadSupplier; protected final Time time; protected final ConsumerNetworkClient client; @@ -148,7 +144,7 @@ public boolean hasNotJoinedGroup() { private String rejoinReason = ""; private boolean rejoinNeeded = true; private boolean needsJoinPrepare = true; - private BaseHeartbeatThread heartbeatThread = null; + private HeartbeatThread heartbeatThread = null; private RequestFuture joinFuture = null; private RequestFuture findCoordinatorFuture = null; private volatile RuntimeException fatalFindCoordinatorException = null; @@ -169,7 +165,7 @@ public AbstractCoordinator(GroupRebalanceConfig rebalanceConfig, Metrics metrics, String metricGrpPrefix, Time time) { - this(rebalanceConfig, logContext, client, metrics, metricGrpPrefix, time, Optional.empty(), Optional.empty()); + this(rebalanceConfig, logContext, client, metrics, metricGrpPrefix, time, Optional.empty()); } public AbstractCoordinator(GroupRebalanceConfig rebalanceConfig, @@ -178,8 +174,7 @@ public AbstractCoordinator(GroupRebalanceConfig rebalanceConfig, Metrics metrics, String metricGrpPrefix, Time time, - Optional clientTelemetryReporter, - Optional> heartbeatThreadSupplier) { + Optional clientTelemetryReporter) { Objects.requireNonNull(rebalanceConfig.groupId, "Expected a non-null group id for coordinator construction"); this.rebalanceConfig = rebalanceConfig; @@ -194,7 +189,6 @@ public AbstractCoordinator(GroupRebalanceConfig rebalanceConfig, this.heartbeat = new Heartbeat(rebalanceConfig, time); this.sensors = new GroupCoordinatorMetrics(metrics, metricGrpPrefix); this.clientTelemetryReporter = clientTelemetryReporter; - this.heartbeatThreadSupplier = heartbeatThreadSupplier; } /** @@ -367,7 +361,7 @@ protected synchronized boolean rejoinNeededOrPending() { */ protected synchronized void pollHeartbeat(long now) { if (heartbeatThread != null) { - if (heartbeatThread.isFailed()) { + if (heartbeatThread.hasFailed()) { // set the heartbeat thread to null and raise an exception. If the user catches it, // the next call to ensureActiveGroup() will spawn a new heartbeat thread. RuntimeException cause = heartbeatThread.failureCause(); @@ -387,7 +381,7 @@ protected synchronized long timeToNextHeartbeat(long now) { // we don't need to send heartbeats if (state.hasNotJoinedGroup()) return Long.MAX_VALUE; - if (heartbeatThread != null && heartbeatThread.isFailed()) { + if (heartbeatThread != null && heartbeatThread.hasFailed()) { // if an exception occurs in the heartbeat thread, raise it. throw heartbeatThread.failureCause(); } @@ -423,13 +417,13 @@ boolean ensureActiveGroup(final Timer timer) { private synchronized void startHeartbeatThreadIfNeeded() { if (heartbeatThread == null) { - heartbeatThread = heartbeatThreadSupplier.orElse(HeartbeatThread::new).get(); + heartbeatThread = new HeartbeatThread(); heartbeatThread.start(); } } private void closeHeartbeatThread() { - BaseHeartbeatThread thread; + HeartbeatThread thread; synchronized (this) { if (heartbeatThread == null) return; @@ -1120,21 +1114,23 @@ private boolean isProtocolTypeInconsistent(String protocolType) { */ @Override public final void close() { - close(time.timer(0), DEFAULT); + close(time.timer(0)); } /** * @throws KafkaException if the rebalance callback throws exception */ - protected void close(Timer timer, CloseOptions.GroupMembershipOperation membershipOperation) { + protected void close(Timer timer) { try { closeHeartbeatThread(); } finally { // Synchronize after closing the heartbeat thread since heartbeat thread // needs this lock to complete and terminate after close flag is set. synchronized (this) { - onLeavePrepare(); - maybeLeaveGroup(membershipOperation, "the consumer is being closed"); + if (rebalanceConfig.leaveGroupOnClose) { + onLeavePrepare(); + maybeLeaveGroup("the consumer is being closed"); + } // At this point, there may be pending commits (async commits or sync commits that were // interrupted using wakeup) and the leave group request which have been queued, but not @@ -1155,22 +1151,26 @@ protected void handlePollTimeoutExpiry() { "either by increasing max.poll.interval.ms or by reducing the maximum size of batches " + "returned in poll() with max.poll.records."); - maybeLeaveGroup(DEFAULT, "consumer poll timeout has expired."); + maybeLeaveGroup("consumer poll timeout has expired."); } /** - * Sends LeaveGroupRequest and logs the {@code leaveReason}, unless this member is using static membership - * with the default consumer group membership operation, or is already not part of the group (i.e., does not have a - * valid member ID, is in the UNJOINED state, or the coordinator is unknown). + * Sends LeaveGroupRequest and logs the {@code leaveReason}, unless this member is using static membership or is already + * not part of the group (ie does not have a valid member id, is in the UNJOINED state, or the coordinator is unknown). * - * @param membershipOperation the operation on consumer group membership that the consumer will perform when closing * @param leaveReason the reason to leave the group for logging * @throws KafkaException if the rebalance callback throws exception */ - public synchronized RequestFuture maybeLeaveGroup(CloseOptions.GroupMembershipOperation membershipOperation, String leaveReason) { + public synchronized RequestFuture maybeLeaveGroup(String leaveReason) { RequestFuture future = null; - if (rebalanceConfig.leaveGroupOnClose && shouldSendLeaveGroupRequest(membershipOperation)) { + // Starting from 2.3, only dynamic members will send LeaveGroupRequest to the broker, + // consumer with valid group.instance.id is viewed as static member that never sends LeaveGroup, + // and the membership expiration is only controlled by session timeout. + if (isDynamicMember() && !coordinatorUnknown() && + state != MemberState.UNJOINED && generation.hasMemberId()) { + // this is a minimal effort attempt to leave the group. we do not + // attempt any resending if the request fails or times out. log.info("Member {} sending LeaveGroup request to coordinator {} due to {}", generation.memberId, coordinator, leaveReason); LeaveGroupRequest.Builder request = new LeaveGroupRequest.Builder( @@ -1187,14 +1187,6 @@ public synchronized RequestFuture maybeLeaveGroup(CloseOptions.GroupMember return future; } - private boolean shouldSendLeaveGroupRequest(CloseOptions.GroupMembershipOperation membershipOperation) { - if (!coordinatorUnknown() && state != MemberState.UNJOINED && generation.hasMemberId()) { - return membershipOperation == LEAVE_GROUP || (isDynamicMember() && membershipOperation == DEFAULT); - } else { - return false; - } - } - protected boolean isDynamicMember() { return rebalanceConfig.groupInstanceId.isEmpty(); } @@ -1338,13 +1330,6 @@ protected final Meter createMeter(Metrics metrics, String groupName, String base String.format("The total number of %s", descriptiveName))); } - /** - * Visible for testing. - */ - protected BaseHeartbeatThread heartbeatThread() { - return heartbeatThread; - } - private class GroupCoordinatorMetrics { public final String metricGrpName; @@ -1451,40 +1436,56 @@ public GroupCoordinatorMetrics(Metrics metrics, String metricGrpPrefix) { } } - private class HeartbeatThread extends BaseHeartbeatThread { + private class HeartbeatThread extends KafkaThread implements AutoCloseable { + private boolean enabled = false; + private boolean closed = false; + private final AtomicReference failed = new AtomicReference<>(null); private HeartbeatThread() { super(HEARTBEAT_THREAD_PREFIX + (rebalanceConfig.groupId.isEmpty() ? "" : " | " + rebalanceConfig.groupId), true); } - @Override public void enable() { synchronized (AbstractCoordinator.this) { log.debug("Enabling heartbeat thread"); - super.enable(); + this.enabled = true; heartbeat.resetTimeouts(); AbstractCoordinator.this.notify(); } } - @Override + public void disable() { + synchronized (AbstractCoordinator.this) { + log.debug("Disabling heartbeat thread"); + this.enabled = false; + } + } + public void close() { synchronized (AbstractCoordinator.this) { - super.close(); + this.closed = true; AbstractCoordinator.this.notify(); } } + private boolean hasFailed() { + return failed.get() != null; + } + + private RuntimeException failureCause() { + return failed.get(); + } + @Override public void run() { try { log.debug("Heartbeat thread started"); while (true) { synchronized (AbstractCoordinator.this) { - if (isClosed()) + if (closed) return; - if (!isEnabled()) { + if (!enabled) { AbstractCoordinator.this.wait(); continue; } @@ -1492,7 +1493,7 @@ public void run() { // we do not need to heartbeat we are not part of a group yet; // also if we already have fatal error, the client will be // crashed soon, hence we do not need to continue heartbeating either - if (state.hasNotJoinedGroup() || isFailed()) { + if (state.hasNotJoinedGroup() || hasFailed()) { disable(); continue; } @@ -1546,7 +1547,7 @@ public void onFailure(RuntimeException e) { heartbeat.receiveHeartbeat(); } else if (e instanceof FencedInstanceIdException) { log.error("Caught fenced group.instance.id {} error in heartbeat thread", rebalanceConfig.groupInstanceId); - setFailureCause(e); + heartbeatThread.failed.set(e); } else { heartbeat.failHeartbeat(); // wake up the thread if it's sleeping to reschedule the heartbeat @@ -1560,27 +1561,26 @@ public void onFailure(RuntimeException e) { } } catch (AuthenticationException e) { log.error("An authentication error occurred in the heartbeat thread", e); - setFailureCause(e); + this.failed.set(e); } catch (GroupAuthorizationException e) { log.error("A group authorization error occurred in the heartbeat thread", e); - setFailureCause(e); + this.failed.set(e); } catch (InterruptedException | InterruptException e) { Thread.interrupted(); log.error("Unexpected interrupt received in heartbeat thread", e); - setFailureCause(new RuntimeException(e)); + this.failed.set(new RuntimeException(e)); } catch (Throwable e) { log.error("Heartbeat thread failed due to unexpected error", e); if (e instanceof RuntimeException) - setFailureCause((RuntimeException) e); + this.failed.set((RuntimeException) e); else - setFailureCause(new RuntimeException(e)); + this.failed.set(new RuntimeException(e)); } finally { log.debug("Heartbeat thread has closed"); - synchronized (AbstractCoordinator.this) { - super.close(); - } + this.closed = true; } } + } protected static class Generation { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java index e3e52f7525dd9..5083f7733b81b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractFetch.java @@ -45,7 +45,6 @@ import java.io.Closeable; import java.time.Duration; -import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -55,6 +54,7 @@ import java.util.Optional; import java.util.Set; import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.internals.FetchUtils.requestMetadataUpdate; @@ -147,7 +147,6 @@ public boolean hasAvailableFetches() { * @param data {@link FetchSessionHandler.FetchRequestData} that represents the session data * @param resp {@link ClientResponse} from which the {@link FetchResponse} will be retrieved */ - @SuppressWarnings("NPathComplexity") protected void handleFetchSuccess(final Node fetchTarget, final FetchSessionHandler.FetchRequestData data, final ClientResponse resp) { @@ -175,8 +174,6 @@ protected void handleFetchSuccess(final Node fetchTarget, final Set partitions = new HashSet<>(responseData.keySet()); final FetchMetricsAggregator metricAggregator = new FetchMetricsAggregator(metricsManager, partitions); - boolean needsWakeup = true; - Map partitionsWithUpdatedLeaderInfo = new HashMap<>(); for (Map.Entry entry : responseData.entrySet()) { TopicPartition partition = entry.getKey(); @@ -221,26 +218,16 @@ protected void handleFetchSuccess(final Node fetchTarget, partition, partitionData, metricAggregator, - fetchOffset); + fetchOffset, + requestVersion); fetchBuffer.add(completedFetch); - needsWakeup = false; } - // "Wake" the fetch buffer on any response, even if it's empty, to allow the consumer to not block - // indefinitely waiting on the fetch buffer to get data. - if (needsWakeup) - fetchBuffer.wakeup(); - if (!partitionsWithUpdatedLeaderInfo.isEmpty()) { - List leaderNodes = new ArrayList<>(); - - for (FetchResponseData.NodeEndpoint e : response.data().nodeEndpoints()) { - Node node = new Node(e.nodeId(), e.host(), e.port(), e.rack()); - - if (!node.equals(Node.noNode())) - leaderNodes.add(node); - } - + List leaderNodes = response.data().nodeEndpoints().stream() + .map(e -> new Node(e.nodeId(), e.host(), e.port(), e.rack())) + .filter(e -> !e.equals(Node.noNode())) + .collect(Collectors.toList()); Set updatedPartitions = metadata.updatePartitionLeadership(partitionsWithUpdatedLeaderInfo, leaderNodes); updatedPartitions.forEach( tp -> { @@ -411,7 +398,7 @@ protected Map prepareCloseFetchSessi fetchable.put(fetchTarget, sessionHandler.newBuilder()); }); - return convert(fetchable); + return fetchable.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().build())); } /** @@ -484,21 +471,7 @@ protected Map prepareFetchRequests() } } - return convert(fetchable); - } - - /** - * This method converts {@link FetchSessionHandler.Builder} instances to - * {@link FetchSessionHandler.FetchRequestData} instances. It intentionally forgoes use of the Java Collections - * Streams API to reduce overhead in the critical network path. - */ - private Map convert(Map fetchable) { - Map map = new HashMap<>(fetchable.size()); - - for (Map.Entry entry : fetchable.entrySet()) - map.put(entry.getKey(), entry.getValue().build()); - - return map; + return fetchable.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().build())); } /** diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java index 3998d672006a3..608434e524c15 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractHeartbeatRequestManager.java @@ -25,6 +25,8 @@ import org.apache.kafka.clients.consumer.internals.metrics.HeartbeatMetricsManager; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.RetriableException; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.AbstractResponse; import org.apache.kafka.common.utils.LogContext; @@ -63,9 +65,8 @@ public abstract class AbstractHeartbeatRequestManager membershipManager(); - /** - * @return the member should send leave heartbeat immediately or not - */ - protected abstract boolean shouldSendLeaveHeartbeatNow(); - /** * Generate a heartbeat request to leave the group if the state is still LEAVING when this is * called to close the consumer. @@ -229,7 +224,7 @@ public NetworkClientDelegate.PollResult poll(long currentTimeMs) { public PollResult pollOnClose(long currentTimeMs) { if (membershipManager().isLeavingGroup()) { NetworkClientDelegate.UnsentRequest request = makeHeartbeatRequest(currentTimeMs, true); - return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs(), Collections.singletonList(request)); + return new NetworkClientDelegate.PollResult(heartbeatRequestState.heartbeatIntervalMs, Collections.singletonList(request)); } return EMPTY; } @@ -328,14 +323,31 @@ private void onFailure(final Throwable exception, final long responseTimeMs) { heartbeatRequestState.remainingBackoffMs(responseTimeMs), exception.getMessage()); logger.debug(message); - } else if (!handleSpecificFailure(exception)) { + } else { logger.error("{} failed due to fatal error: {}", heartbeatRequestName(), exception.getMessage()); - handleFatalFailure(exception); + if (isHBApiUnsupportedErrorMsg(exception)) { + // This is expected to be the case where building the request fails because the node does not support + // the API. Propagate custom message. + handleFatalFailure(new UnsupportedVersionException(CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG, exception)); + } else { + // This is the case where building the request fails even though the node supports the API (ex. + // required version 1 not available when regex in use). + handleFatalFailure(exception); + } } // Notify the group manager about the failure after all errors have been handled and propagated. membershipManager().onHeartbeatFailure(exception instanceof RetriableException); } + /*** + * @return True if the exception is the UnsupportedVersion generated on the client, before sending the request, + * when checking if the API is available on the broker. + */ + private boolean isHBApiUnsupportedErrorMsg(Throwable exception) { + return exception instanceof UnsupportedVersionException && + exception.getMessage().equals("The node does not support " + ApiKeys.CONSUMER_GROUP_HEARTBEAT); + } + private void onResponse(final R response, final long currentTimeMs) { if (errorForResponse(response) == Errors.NONE) { heartbeatRequestState.updateHeartbeatIntervalMs(heartbeatIntervalForResponse(response)); @@ -406,6 +418,14 @@ private void onErrorResponse(final R response, final long currentTimeMs) { handleFatalFailure(error.exception(errorMessage)); break; + case UNSUPPORTED_VERSION: + // Broker responded with HB not supported, meaning the new protocol is not enabled, so propagate + // custom message for it. Note that the case where the protocol is not supported at all should fail + // on the client side when building the request and checking supporting APIs (handled on onFailure). + logger.error("{} failed due to {}: {}", heartbeatRequestName(), error, errorMessage); + handleFatalFailure(error.exception(CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG)); + break; + case FENCED_MEMBER_EPOCH: message = String.format("%s failed for member %s because epoch %s is fenced.", heartbeatRequestName(), membershipManager().memberId(), membershipManager().memberEpoch()); @@ -431,7 +451,7 @@ private void onErrorResponse(final R response, final long currentTimeMs) { break; default: - if (!handleSpecificExceptionInResponse(response, currentTimeMs)) { + if (!handleSpecificError(response, currentTimeMs)) { // If the manager receives an unknown error - there could be a bug in the code or a new error code logger.error("{} failed due to unexpected error {}: {}", heartbeatRequestName(), error, errorMessage); handleFatalFailure(error.exception(errorMessage)); @@ -455,25 +475,15 @@ protected void handleFatalFailure(Throwable error) { membershipManager().transitionToFatal(); } - /** - * Error handling specific failure to a group type when sending the request - * and no response has been received. - * - * @param exception The exception thrown building the request - * @return true if the error was handled, else false - */ - public boolean handleSpecificFailure(Throwable exception) { - return false; - } /** - * Error handling specific response exception to a group type. + * Error handling specific to a group type. * * @param response The heartbeat response * @param currentTimeMs Current time * @return true if the error was handled, else false */ - public boolean handleSpecificExceptionInResponse(final R response, final long currentTimeMs) { + public boolean handleSpecificError(final R response, final long currentTimeMs) { return false; } @@ -519,4 +529,85 @@ public boolean handleSpecificExceptionInResponse(final R response, final long cu * @return The heartbeat interval */ public abstract long heartbeatIntervalForResponse(R response); + + /** + * Represents the state of a heartbeat request, including logic for timing, retries, and exponential backoff. The + * object extends {@link RequestState} to enable exponential backoff and duplicated request handling. The two fields + * that it holds are: + */ + static class HeartbeatRequestState extends RequestState { + /** + * heartbeatTimer tracks the time since the last heartbeat was sent + */ + private final Timer heartbeatTimer; + + /** + * The heartbeat interval which is acquired/updated through the heartbeat request + */ + private long heartbeatIntervalMs; + + HeartbeatRequestState( + final LogContext logContext, + final Time time, + final long heartbeatIntervalMs, + final long retryBackoffMs, + final long retryBackoffMaxMs, + final double jitter) { + super(logContext, HeartbeatRequestState.class.getName(), retryBackoffMs, 2, retryBackoffMaxMs, jitter); + this.heartbeatIntervalMs = heartbeatIntervalMs; + this.heartbeatTimer = time.timer(heartbeatIntervalMs); + } + + private void update(final long currentTimeMs) { + this.heartbeatTimer.update(currentTimeMs); + } + + void resetTimer() { + this.heartbeatTimer.reset(heartbeatIntervalMs); + } + + @Override + public String toStringBase() { + return super.toStringBase() + + ", remainingMs=" + heartbeatTimer.remainingMs() + + ", heartbeatIntervalMs=" + heartbeatIntervalMs; + } + + /** + * Check if a heartbeat request should be sent on the current time. A heartbeat should be + * sent if the heartbeat timer has expired, backoff has expired, and there is no request + * in-flight. + */ + @Override + public boolean canSendRequest(final long currentTimeMs) { + update(currentTimeMs); + return heartbeatTimer.isExpired() && super.canSendRequest(currentTimeMs); + } + + long timeToNextHeartbeatMs(final long currentTimeMs) { + if (heartbeatTimer.isExpired()) { + return this.remainingBackoffMs(currentTimeMs); + } + return heartbeatTimer.remainingMs(); + } + + @Override + public void onFailedAttempt(final long currentTimeMs) { + // Reset timer to allow sending HB after a failure without waiting for the interval. + // After a failure, a next HB may be needed with backoff (ex. errors that lead to + // retries, like coordinator load error), or immediately (ex. errors that lead to + // rejoining, like fencing errors). + heartbeatTimer.reset(0); + super.onFailedAttempt(currentTimeMs); + } + + private void updateHeartbeatIntervalMs(final long heartbeatIntervalMs) { + if (this.heartbeatIntervalMs == heartbeatIntervalMs) { + // no need to update the timer if the interval hasn't changed + return; + } + this.heartbeatIntervalMs = heartbeatIntervalMs; + this.heartbeatTimer.updateAndReset(heartbeatIntervalMs); + } + } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java index ffe01c089e7bf..71b61a26d3fc2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractMembershipManager.java @@ -16,11 +16,11 @@ */ package org.apache.kafka.clients.consumer.internals; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.internals.metrics.RebalanceMetricsManager; import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.requests.AbstractResponse; @@ -43,6 +43,7 @@ import java.util.TreeSet; import java.util.concurrent.CompletableFuture; import java.util.concurrent.atomic.AtomicBoolean; +import java.util.stream.Collectors; import static java.util.Collections.unmodifiableList; @@ -64,6 +65,12 @@ public abstract class AbstractMembershipManager impl */ static final Utils.TopicPartitionComparator TOPIC_PARTITION_COMPARATOR = new Utils.TopicPartitionComparator(); + /** + * TopicIdPartition comparator based on topic name and partition (ignoring topic ID while sorting, + * as this is sorted mainly for logging purposes). + */ + static final Utils.TopicIdPartitionComparator TOPIC_ID_PARTITION_COMPARATOR = new Utils.TopicIdPartitionComparator(); + /** * Group ID of the consumer group the member will be part of, provided when creating the current * membership manager. @@ -123,7 +130,7 @@ public abstract class AbstractMembershipManager impl * partition assigned, or revoked), but it is not present the Metadata cache at that moment. * The cache is cleared when the subscription changes ({@link #transitionToJoining()}, the * member fails ({@link #transitionToFatal()} or leaves the group - * ({@link #leaveGroup()}/{@link #leaveGroupOnClose(CloseOptions.GroupMembershipOperation)}). + * ({@link #leaveGroup()}/{@link #leaveGroupOnClose()}). */ private final Map assignedTopicNamesCache; @@ -151,8 +158,8 @@ public abstract class AbstractMembershipManager impl /** * If the member is currently leaving the group after a call to {@link #leaveGroup()} or - * {@link #leaveGroupOnClose(CloseOptions.GroupMembershipOperation)}, this will have a future that will complete when the ongoing leave operation - * completes (callbacks executed and heartbeat request to leave is sent out). This will be empty if the + * {@link #leaveGroupOnClose()}, this will have a future that will complete when the ongoing leave operation + * completes (callbacks executed and heartbeat request to leave is sent out). This will be empty is the * member is not leaving. */ private Optional> leaveGroupInProgress = Optional.empty(); @@ -194,14 +201,6 @@ public abstract class AbstractMembershipManager impl private final boolean autoCommitEnabled; - /** - * Indicate the operation on consumer group membership that the consumer will perform when leaving the group. - * The property should remain {@code GroupMembershipOperation.DEFAULT} until the consumer is closing. - * - * @see CloseOptions.GroupMembershipOperation - */ - protected CloseOptions.GroupMembershipOperation leaveGroupOperation = CloseOptions.GroupMembershipOperation.DEFAULT; - AbstractMembershipManager(String groupId, SubscriptionState subscriptions, ConsumerMetadata metadata, @@ -276,15 +275,6 @@ public int memberEpoch() { return memberEpoch; } - /** - * @return the operation the consumer will perform on leaving the group. - * - * @see CloseOptions.GroupMembershipOperation - */ - public CloseOptions.GroupMembershipOperation leaveGroupOperation() { - return leaveGroupOperation; - } - /** * Update member info and transition member state based on a successful heartbeat response. * @@ -368,12 +358,9 @@ protected void processAssignmentReceived(Map> assignmen */ private void replaceTargetAssignmentWithNewAssignment(Map> assignment) { currentTargetAssignment.updateWith(assignment).ifPresent(updatedAssignment -> { - log.debug("Member {} updated its target assignment from {} to {}. Member will reconcile it on the next poll.", - memberId, currentTargetAssignment, updatedAssignment); + log.debug("Target assignment updated from {} to {}. Member will reconcile it on the next poll.", + currentTargetAssignment, updatedAssignment); currentTargetAssignment = updatedAssignment; - // Register the assigned topic IDs on the subscription state. - // This will be used to ensure they are included in metadata requests (even though they may not be reconciled yet). - subscriptions.setAssignedTopicIds(currentTargetAssignment.partitions.keySet()); }); } @@ -444,7 +431,7 @@ public void transitionToFatal() { log.error("Member {} with epoch {} transitioned to fatal state", memberId, memberEpoch); notifyEpochChange(Optional.empty()); - if (previousState == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { + if (previousState == MemberState.UNSUBSCRIBED) { log.debug("Member {} with epoch {} got fatal error from the broker but it already " + "left the group, so onPartitionsLost callback won't be triggered.", memberId, memberEpoch); return; @@ -512,10 +499,11 @@ private void clearAssignment() { * @param assignedPartitions Full assignment, to update in the subscription state * @param addedPartitions Newly added partitions */ - private void updateSubscriptionAwaitingCallback(TopicIdPartitionSet assignedPartitions, + private void updateSubscriptionAwaitingCallback(SortedSet assignedPartitions, SortedSet addedPartitions) { - subscriptions.assignFromSubscribedAwaitingCallback(assignedPartitions.topicPartitions(), addedPartitions); - notifyAssignmentChange(assignedPartitions.topicPartitions()); + Set assignedTopicPartitions = toTopicPartitionSet(assignedPartitions); + subscriptions.assignFromSubscribedAwaitingCallback(assignedTopicPartitions, addedPartitions); + notifyAssignmentChange(assignedTopicPartitions); } /** @@ -535,21 +523,17 @@ public void transitionToJoining() { } resetEpoch(); transitionTo(MemberState.JOINING); - log.debug("Member {} will join the group on the next call to poll.", memberId); clearPendingAssignmentsAndLocalNamesCache(); } /** * Transition to {@link MemberState#PREPARE_LEAVING} to release the assignment. Once completed, * transition to {@link MemberState#LEAVING} to send the heartbeat request and leave the group. - * It also sets the membership operation to be performed on close. * This is expected to be invoked when the user calls the {@link Consumer#close()} API. * - * @param membershipOperation the membership operation to be performed on close * @return Future that will complete when the heartbeat to leave the group has been sent out. */ - public CompletableFuture leaveGroupOnClose(CloseOptions.GroupMembershipOperation membershipOperation) { - this.leaveGroupOperation = membershipOperation; + public CompletableFuture leaveGroupOnClose() { return leaveGroup(false); } @@ -613,8 +597,6 @@ protected CompletableFuture leaveGroup(boolean runCallbacks) { clearAssignmentAndLeaveGroup(); }); } else { - log.debug("Member {} attempting to leave has no rebalance callbacks, " + - "so it will clear assignments and transition to send heartbeat to leave group.", memberId); clearAssignmentAndLeaveGroup(); } @@ -705,10 +687,8 @@ public void onHeartbeatRequestGenerated() { transitionTo(MemberState.STABLE); } else { log.debug("Member {} with epoch {} transitioned to {} after a heartbeat was sent " + - "to ack a previous reconciliation. \n" + - "\t\tCurrent assignment: {} \n" + - "\t\tTarget assignment: {}\n", - memberId, memberEpoch, MemberState.RECONCILING, currentAssignment, currentTargetAssignment); + "to ack a previous reconciliation. New assignments are ready to " + + "be reconciled.", memberId, memberEpoch, MemberState.RECONCILING); transitionTo(MemberState.RECONCILING); } } else if (state == MemberState.LEAVING) { @@ -831,14 +811,14 @@ public void maybeReconcile(boolean canCommit) { return; } if (reconciliationInProgress) { - log.trace("Ignoring reconciliation attempt. Another reconciliation is already in progress. " + - "Assignment {} will be handled in the next reconciliation loop.", currentTargetAssignment); + log.trace("Ignoring reconciliation attempt. Another reconciliation is already in progress. Assignment " + + currentTargetAssignment + " will be handled in the next reconciliation loop."); return; } // Find the subset of the target assignment that can be resolved to topic names, and trigger a metadata update // if some topic IDs are not resolvable. - TopicIdPartitionSet assignedTopicIdPartitions = findResolvableAssignmentAndTriggerMetadataUpdate(); + SortedSet assignedTopicIdPartitions = findResolvableAssignmentAndTriggerMetadataUpdate(); final LocalAssignment resolvedAssignment = new LocalAssignment(currentTargetAssignment.localEpoch, assignedTopicIdPartitions); if (!currentAssignment.isNone() && resolvedAssignment.partitions.equals(currentAssignment.partitions)) { @@ -856,7 +836,7 @@ public void maybeReconcile(boolean canCommit) { // Keep copy of assigned TopicPartitions created from the TopicIdPartitions that are // being reconciled. Needed for interactions with the centralized subscription state that // does not support topic IDs yet, and for the callbacks. - SortedSet assignedTopicPartitions = assignedTopicIdPartitions.toTopicNamePartitionSet(); + SortedSet assignedTopicPartitions = toTopicPartitionSet(assignedTopicIdPartitions); SortedSet ownedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); ownedPartitions.addAll(subscriptions.assignedPartitions()); @@ -933,7 +913,7 @@ long getDeadlineMsForTimeout(final long timeoutMs) { * transition. Note that if any of the 2 callbacks fails, the reconciliation should fail. */ private void revokeAndAssign(LocalAssignment resolvedAssignment, - TopicIdPartitionSet assignedTopicIdPartitions, + SortedSet assignedTopicIdPartitions, SortedSet revokedPartitions, SortedSet addedPartitions) { CompletableFuture revocationResult; @@ -988,7 +968,7 @@ boolean maybeAbortReconciliation() { String reason = rejoinedWhileReconciliationInProgress ? "the member has re-joined the group" : "the member already transitioned out of the reconciling state into " + state; - log.info("Interrupting reconciliation that is not relevant anymore because {}", reason); + log.info("Interrupting reconciliation that is not relevant anymore because " + reason); markReconciliationCompleted(); } return shouldAbort; @@ -1030,6 +1010,15 @@ protected CompletableFuture signalPartitionsLost(Set parti return CompletableFuture.completedFuture(null); } + /** + * Build set of {@link TopicPartition} from the given set of {@link TopicIdPartition}. + */ + protected SortedSet toTopicPartitionSet(SortedSet topicIdPartitions) { + SortedSet result = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); + topicIdPartitions.forEach(topicIdPartition -> result.add(topicIdPartition.topicPartition())); + return result; + } + /** * Visible for testing. */ @@ -1063,8 +1052,8 @@ void markReconciliationCompleted() { * * */ - private TopicIdPartitionSet findResolvableAssignmentAndTriggerMetadataUpdate() { - final TopicIdPartitionSet assignmentReadyToReconcile = new TopicIdPartitionSet(); + private SortedSet findResolvableAssignmentAndTriggerMetadataUpdate() { + final SortedSet assignmentReadyToReconcile = new TreeSet<>(TOPIC_ID_PARTITION_COMPARATOR); final HashMap> unresolved = new HashMap<>(currentTargetAssignment.partitions); // Try to resolve topic names from metadata cache or subscription cache, and move @@ -1078,7 +1067,9 @@ private TopicIdPartitionSet findResolvableAssignmentAndTriggerMetadataUpdate() { Optional nameFromMetadata = findTopicNameInGlobalOrLocalCache(topicId); nameFromMetadata.ifPresent(resolvedTopicName -> { // Name resolved, so assignment is ready for reconciliation. - assignmentReadyToReconcile.addAll(topicId, resolvedTopicName, topicPartitions); + topicPartitions.forEach(tp -> + assignmentReadyToReconcile.add(new TopicIdPartition(topicId, tp, resolvedTopicName)) + ); it.remove(); }); } @@ -1134,7 +1125,7 @@ CompletableFuture revokePartitions(Set partitionsToRevoke) // Ensure the set of partitions to revoke are still assigned Set revokedPartitions = new HashSet<>(partitionsToRevoke); revokedPartitions.retainAll(subscriptions.assignedPartitions()); - log.info("Revoking previously assigned partitions {}", revokedPartitions); + log.info("Revoking previously assigned partitions {}", revokedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); signalPartitionsBeingRevoked(revokedPartitions); @@ -1188,7 +1179,7 @@ CompletableFuture revokePartitions(Set partitionsToRevoke) * @return Future that will complete when the callback execution completes. */ private CompletableFuture assignPartitions( - TopicIdPartitionSet assignedPartitions, + SortedSet assignedPartitions, SortedSet addedPartitions) { // Update assignment in the subscription state, and ensure that no fetching or positions @@ -1206,7 +1197,7 @@ private CompletableFuture assignPartitions( // returning no records, as no topic partitions are marked as fetchable. In contrast, with the classic consumer, // if the first callback fails but the next one succeeds, polling can still retrieve data. To align with // this behavior, we rely on assignedPartitions to avoid such scenarios. - subscriptions.enablePartitionsAwaitingCallback(assignedPartitions.topicPartitions()); + subscriptions.enablePartitionsAwaitingCallback(toTopicPartitionSet(assignedPartitions)); } else { // Keeping newly added partitions as non-fetchable after the callback failure. // They will be retried on the next reconciliation loop, until it succeeds or the @@ -1220,7 +1211,7 @@ private CompletableFuture assignPartitions( }); // Clear topic names cache, removing topics that are not assigned to the member anymore. - Set assignedTopics = assignedPartitions.topicNames(); + Set assignedTopics = assignedPartitions.stream().map(TopicIdPartition::topic).collect(Collectors.toSet()); assignedTopicNamesCache.values().retainAll(assignedTopics); return result; @@ -1283,14 +1274,14 @@ protected void resetEpoch() { } /** - * Returns the epoch a member uses to join the group. This is group-type-specific. + * Returns the epoch a member uses to join the group. This is group-type specific. * * @return the epoch to join the group */ abstract int joinGroupEpoch(); /** - * Returns the epoch a member uses to leave the group. This is group-type-specific. + * Returns the epoch a member uses to leave the group. This is group-type specific. * * @return the epoch to leave the group */ @@ -1438,13 +1429,16 @@ public LocalAssignment(long localEpoch, Map> partitions } } - public LocalAssignment(long localEpoch, TopicIdPartitionSet topicIdPartitions) { - Objects.requireNonNull(topicIdPartitions); + public LocalAssignment(long localEpoch, SortedSet topicIdPartitions) { this.localEpoch = localEpoch; + this.partitions = new HashMap<>(); if (localEpoch == NONE_EPOCH && !topicIdPartitions.isEmpty()) { throw new IllegalArgumentException("Local epoch must be set if there are partitions"); } - this.partitions = topicIdPartitions.toTopicIdPartitionMap(); + topicIdPartitions.forEach(topicIdPartition -> { + Uuid topicId = topicIdPartition.topicId(); + partitions.computeIfAbsent(topicId, k -> new TreeSet<>()).add(topicIdPartition.partition()); + }); } public String toString() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java index c38b5859f5f59..4ac1513ede52d 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignor.java @@ -965,8 +965,8 @@ private class GeneralAssignmentBuilder extends AbstractAssignmentBuilder { super(partitionsPerTopic, rackInfo, currentAssignment); this.subscriptions = subscriptions; - topic2AllPotentialConsumers = new HashMap<>(partitionsPerTopic.size()); - consumer2AllPotentialTopics = new HashMap<>(subscriptions.size()); + topic2AllPotentialConsumers = new HashMap<>(partitionsPerTopic.keySet().size()); + consumer2AllPotentialTopics = new HashMap<>(subscriptions.keySet().size()); // initialize topic2AllPotentialConsumers and consumer2AllPotentialTopics partitionsPerTopic.keySet().forEach( diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AcknowledgementCommitCallbackHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AcknowledgementCommitCallbackHandler.java index b746e1a213591..794be0a67b3b0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AcknowledgementCommitCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AcknowledgementCommitCallbackHandler.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.AcknowledgementCommitCallback; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicIdPartition; import org.slf4j.Logger; @@ -46,7 +45,10 @@ public boolean hasEnteredCallback() { void onComplete(List> acknowledgementsMapList) { final ArrayList exceptions = new ArrayList<>(); acknowledgementsMapList.forEach(acknowledgementsMap -> acknowledgementsMap.forEach((partition, acknowledgements) -> { - KafkaException exception = acknowledgements.getAcknowledgeException(); + Exception exception = null; + if (acknowledgements.getAcknowledgeErrorCode() != null) { + exception = acknowledgements.getAcknowledgeErrorCode().exception(); + } Set offsets = acknowledgements.getAcknowledgementsTypeMap().keySet(); Set offsetsCopy = Collections.unmodifiableSet(offsets); enteredCallback = true; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java index 5bce77651b9c9..410f8478d4c29 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Acknowledgements.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.AcknowledgeType; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.protocol.Errors; import java.util.ArrayList; @@ -36,11 +35,8 @@ public class Acknowledgements { // The acknowledgements keyed by offset. If the record is a gap, the AcknowledgeType will be null. private final Map acknowledgements; - // When the broker responds to the acknowledgements, this is the exception thrown. - private KafkaException acknowledgeException; - - // Set when the broker has responded to the acknowledgements. - private boolean completed; + // When the broker responds to the acknowledgements, this is the error code returned. + private Errors acknowledgeErrorCode; public static Acknowledgements empty() { return new Acknowledgements(new TreeMap<>()); @@ -48,8 +44,6 @@ public static Acknowledgements empty() { private Acknowledgements(Map acknowledgements) { this.acknowledgements = acknowledgements; - this.acknowledgeException = null; - this.completed = false; } /** @@ -121,26 +115,25 @@ public int size() { * @return Whether the acknowledgements were sent to the broker and a response received */ public boolean isCompleted() { - return completed; + return acknowledgeErrorCode != null; } /** - * Completes the acknowledgements when the response has been received from the broker. + * Set the acknowledgement error code when the response has been received from the broker. * - * @param acknowledgeException the exception (will be null if successful) + * @param acknowledgeErrorCode the error code */ - public void complete(KafkaException acknowledgeException) { - this.acknowledgeException = acknowledgeException; - completed = true; + public void setAcknowledgeErrorCode(Errors acknowledgeErrorCode) { + this.acknowledgeErrorCode = acknowledgeErrorCode; } /** - * Get the acknowledgement exception when the response has been received from the broker. + * Get the acknowledgement error code when the response has been received from the broker. * * @return the error code */ - public KafkaException getAcknowledgeException() { - return acknowledgeException; + public Errors getAcknowledgeErrorCode() { + return acknowledgeErrorCode; } /** @@ -185,7 +178,7 @@ public List getAcknowledgementBatches() { currentBatch.acknowledgeTypes().add(ACKNOWLEDGE_TYPE_GAP); } } - List optimalBatches = maybeOptimiseAcknowledgeTypes(currentBatch); + List optimalBatches = maybeOptimiseAcknowledgementTypes(currentBatch); optimalBatches.forEach(batch -> { if (canOptimiseForSingleAcknowledgeType(batch)) { @@ -204,7 +197,7 @@ public List getAcknowledgementBatches() { */ private AcknowledgementBatch maybeCreateNewBatch(AcknowledgementBatch currentBatch, Long nextOffset, List batches) { if (nextOffset != currentBatch.lastOffset() + 1) { - List optimalBatches = maybeOptimiseAcknowledgeTypes(currentBatch); + List optimalBatches = maybeOptimiseAcknowledgementTypes(currentBatch); optimalBatches.forEach(batch -> { if (canOptimiseForSingleAcknowledgeType(batch)) { @@ -228,7 +221,7 @@ private AcknowledgementBatch maybeCreateNewBatch(AcknowledgementBatch currentBat * whose count exceeds the default value. In this case, the batch is split into 2 such that the * batch with the continuous records has only 1 acknowledge type in its array. */ - private List maybeOptimiseAcknowledgeTypes(AcknowledgementBatch currentAcknowledgeBatch) { + private List maybeOptimiseAcknowledgementTypes(AcknowledgementBatch currentAcknowledgeBatch) { List batches = new ArrayList<>(); if (currentAcknowledgeBatch == null) return batches; @@ -308,10 +301,10 @@ private boolean canOptimiseForSingleAcknowledgeType(AcknowledgementBatch acknowl public String toString() { StringBuilder sb = new StringBuilder("Acknowledgements("); sb.append(acknowledgements); - sb.append(", acknowledgeException="); - sb.append(acknowledgeException != null ? Errors.forException(acknowledgeException) : "null"); - sb.append(", completed="); - sb.append(completed); + if (acknowledgeErrorCode != null) { + sb.append(", errorCode="); + sb.append(acknowledgeErrorCode.code()); + } sb.append(")"); return sb.toString(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java index 938ae909027d0..38a4bd2d0cc20 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumer.java @@ -21,7 +21,6 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.KafkaClient; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; @@ -64,12 +63,6 @@ import org.apache.kafka.clients.consumer.internals.events.ResumePartitionsEvent; import org.apache.kafka.clients.consumer.internals.events.SeekUnvalidatedEvent; import org.apache.kafka.clients.consumer.internals.events.StopFindCoordinatorOnCloseEvent; -import org.apache.kafka.clients.consumer.internals.events.StreamsOnAllTasksLostCallbackCompletedEvent; -import org.apache.kafka.clients.consumer.internals.events.StreamsOnAllTasksLostCallbackNeededEvent; -import org.apache.kafka.clients.consumer.internals.events.StreamsOnTasksAssignedCallbackCompletedEvent; -import org.apache.kafka.clients.consumer.internals.events.StreamsOnTasksAssignedCallbackNeededEvent; -import org.apache.kafka.clients.consumer.internals.events.StreamsOnTasksRevokedCallbackCompletedEvent; -import org.apache.kafka.clients.consumer.internals.events.StreamsOnTasksRevokedCallbackNeededEvent; import org.apache.kafka.clients.consumer.internals.events.SyncCommitEvent; import org.apache.kafka.clients.consumer.internals.events.TopicMetadataEvent; import org.apache.kafka.clients.consumer.internals.events.TopicPatternSubscriptionChangeEvent; @@ -78,7 +71,6 @@ import org.apache.kafka.clients.consumer.internals.events.UnsubscribeEvent; import org.apache.kafka.clients.consumer.internals.events.UpdatePatternSubscriptionEvent; import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; -import org.apache.kafka.clients.consumer.internals.metrics.KafkaConsumerMetrics; import org.apache.kafka.clients.consumer.internals.metrics.RebalanceCallbackMetricsManager; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.IsolationLevel; @@ -144,7 +136,7 @@ import static java.util.Objects.requireNonNull; import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_JMX_PREFIX; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.configuredConsumerInterceptors; @@ -198,18 +190,6 @@ public void process(final BackgroundEvent event) { process((ConsumerRebalanceListenerCallbackNeededEvent) event); break; - case STREAMS_ON_TASKS_REVOKED_CALLBACK_NEEDED: - processStreamsOnTasksRevokedCallbackNeededEvent((StreamsOnTasksRevokedCallbackNeededEvent) event); - break; - - case STREAMS_ON_TASKS_ASSIGNED_CALLBACK_NEEDED: - processStreamsOnTasksAssignedCallbackNeededEvent((StreamsOnTasksAssignedCallbackNeededEvent) event); - break; - - case STREAMS_ON_ALL_TASKS_LOST_CALLBACK_NEEDED: - processStreamsOnAllTasksLostCallbackNeededEvent((StreamsOnAllTasksLostCallbackNeededEvent) event); - break; - default: throw new IllegalArgumentException("Background event type " + event.type() + " was not expected"); @@ -232,63 +212,12 @@ private void process(final ConsumerRebalanceListenerCallbackNeededEvent event) { throw invokedEvent.error().get(); } } - - private void processStreamsOnTasksRevokedCallbackNeededEvent(final StreamsOnTasksRevokedCallbackNeededEvent event) { - StreamsOnTasksRevokedCallbackCompletedEvent invokedEvent = invokeOnTasksRevokedCallback(event.activeTasksToRevoke(), event.future()); - applicationEventHandler.add(invokedEvent); - if (invokedEvent.error().isPresent()) { - throw invokedEvent.error().get(); - } - } - - private void processStreamsOnTasksAssignedCallbackNeededEvent(final StreamsOnTasksAssignedCallbackNeededEvent event) { - StreamsOnTasksAssignedCallbackCompletedEvent invokedEvent = invokeOnTasksAssignedCallback(event.assignment(), event.future()); - applicationEventHandler.add(invokedEvent); - if (invokedEvent.error().isPresent()) { - throw invokedEvent.error().get(); - } - } - - private void processStreamsOnAllTasksLostCallbackNeededEvent(final StreamsOnAllTasksLostCallbackNeededEvent event) { - StreamsOnAllTasksLostCallbackCompletedEvent invokedEvent = invokeOnAllTasksLostCallback(event.future()); - applicationEventHandler.add(invokedEvent); - if (invokedEvent.error().isPresent()) { - throw invokedEvent.error().get(); - } - } - - private StreamsOnTasksRevokedCallbackCompletedEvent invokeOnTasksRevokedCallback(final Set activeTasksToRevoke, - final CompletableFuture future) { - final Optional exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeTasksRevoked(activeTasksToRevoke)); - final Optional error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "Task revocation callback throws an error")); - return new StreamsOnTasksRevokedCallbackCompletedEvent(future, error); - } - - private StreamsOnTasksAssignedCallbackCompletedEvent invokeOnTasksAssignedCallback(final StreamsRebalanceData.Assignment assignment, - final CompletableFuture future) { - final Optional exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeTasksAssigned(assignment)); - final Optional error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "Task assignment callback throws an error")); - return new StreamsOnTasksAssignedCallbackCompletedEvent(future, error); - } - - private StreamsOnAllTasksLostCallbackCompletedEvent invokeOnAllTasksLostCallback(final CompletableFuture future) { - final Optional exceptionFromCallback = Optional.ofNullable(streamsRebalanceListenerInvoker().invokeAllTasksLost()); - final Optional error = exceptionFromCallback.map(e -> ConsumerUtils.maybeWrapAsKafkaException(e, "All tasks lost callback throws an error")); - return new StreamsOnAllTasksLostCallbackCompletedEvent(future, error); - } - - private StreamsRebalanceListenerInvoker streamsRebalanceListenerInvoker() { - return streamsRebalanceListenerInvoker.orElseThrow( - () -> new IllegalStateException("Background event processor was not created to be used with Streams " + - "rebalance protocol events")); - } } private final ApplicationEventHandler applicationEventHandler; private final Time time; private final AtomicReference> groupMetadata = new AtomicReference<>(Optional.empty()); - private final AsyncConsumerMetrics asyncConsumerMetrics; - private final KafkaConsumerMetrics kafkaConsumerMetrics; + private final AsyncConsumerMetrics kafkaConsumerMetrics; private Logger log; private final String clientId; private final BlockingQueue backgroundEventQueue; @@ -330,7 +259,6 @@ private StreamsRebalanceListenerInvoker streamsRebalanceListenerInvoker() { private final WakeupTrigger wakeupTrigger = new WakeupTrigger(); private final OffsetCommitCallbackInvoker offsetCommitCallbackInvoker; private final ConsumerRebalanceListenerInvoker rebalanceListenerInvoker; - private final Optional streamsRebalanceListenerInvoker; // Last triggered async commit future. Used to wait until all previous async commits are completed. // We only need to keep track of the last one, since they are guaranteed to complete in order. private CompletableFuture> lastPendingAsyncCommit = null; @@ -352,10 +280,9 @@ public void onGroupAssignmentUpdated(Set partitions) { } }; - public AsyncKafkaConsumer(final ConsumerConfig config, - final Deserializer keyDeserializer, - final Deserializer valueDeserializer, - final Optional streamsRebalanceData) { + AsyncKafkaConsumer(final ConsumerConfig config, + final Deserializer keyDeserializer, + final Deserializer valueDeserializer) { this( config, keyDeserializer, @@ -365,13 +292,11 @@ public AsyncKafkaConsumer(final ConsumerConfig config, CompletableEventReaper::new, FetchCollector::new, ConsumerMetadata::new, - new LinkedBlockingQueue<>(), - streamsRebalanceData + new LinkedBlockingQueue<>() ); } // Visible for testing - @SuppressWarnings({"this-escape"}) AsyncKafkaConsumer(final ConsumerConfig config, final Deserializer keyDeserializer, final Deserializer valueDeserializer, @@ -380,8 +305,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, final CompletableEventReaperFactory backgroundEventReaperFactory, final FetchCollectorFactory fetchCollectorFactory, final ConsumerMetadataFactory metadataFactory, - final LinkedBlockingQueue backgroundEventQueue, - final Optional streamsRebalanceData) { + final LinkedBlockingQueue backgroundEventQueue) { try { GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( config, @@ -400,18 +324,17 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); this.metrics = createMetrics(config, time, reporters); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); + this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); List> interceptorList = configuredConsumerInterceptors(config); - this.interceptors = new ConsumerInterceptors<>(interceptorList, metrics); - this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); + this.interceptors = new ConsumerInterceptors<>(interceptorList); + this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); this.subscriptions = createSubscriptionState(config, logContext); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners(metrics.reporters(), interceptorList, - Arrays.asList(deserializers.keyDeserializer(), deserializers.valueDeserializer())); + Arrays.asList(deserializers.keyDeserializer, deserializers.valueDeserializer)); this.metadata = metadataFactory.build(config, subscriptions, logContext, clusterResourceListeners); final List addresses = ClientUtils.parseAndValidateAddresses(config); metadata.bootstrap(addresses); @@ -425,7 +348,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, - asyncConsumerMetrics + kafkaConsumerMetrics ); // This FetchBuffer is shared between the application and network threads. @@ -440,7 +363,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, clientTelemetryReporter.map(ClientTelemetryReporter::telemetrySender).orElse(null), backgroundEventHandler, false, - asyncConsumerMetrics + kafkaConsumerMetrics ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.groupMetadata.set(initializeGroupMetadata(config, groupRebalanceConfig)); @@ -458,8 +381,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, clientTelemetryReporter, metrics, offsetCommitCallbackInvoker, - memberStateListener, - streamsRebalanceData + memberStateListener ); final Supplier applicationEventProcessorSupplier = ApplicationEventProcessor.supplier(logContext, metadata, @@ -473,16 +395,15 @@ public AsyncKafkaConsumer(final ConsumerConfig config, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, - asyncConsumerMetrics + kafkaConsumerMetrics ); + this.rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, subscriptions, time, new RebalanceCallbackMetricsManager(metrics) ); - this.streamsRebalanceListenerInvoker = streamsRebalanceData.map(s -> - new StreamsRebalanceListenerInvoker(logContext, s)); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaperFactory.build(logContext); @@ -506,7 +427,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, // call close methods if internal objects are already constructed; this is to prevent resource leak. see KAFKA-2121 // we do not need to call `close` at all when `log` is null, which means no internal objects were initialized. if (this.log != null) { - close(Duration.ZERO, CloseOptions.GroupMembershipOperation.LEAVE_GROUP, true); + close(Duration.ZERO, true); } // now propagate the exception throw new KafkaException("Failed to construct kafka consumer", t); @@ -543,7 +464,6 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.time = time; this.backgroundEventQueue = backgroundEventQueue; this.rebalanceListenerInvoker = rebalanceListenerInvoker; - this.streamsRebalanceListenerInvoker = Optional.empty(); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = backgroundEventReaper; this.metrics = metrics; @@ -554,15 +474,14 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.defaultApiTimeoutMs = Duration.ofMillis(defaultApiTimeoutMs); this.deserializers = deserializers; this.applicationEventHandler = applicationEventHandler; - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); + this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); this.clientTelemetryReporter = Optional.empty(); this.autoCommitEnabled = autoCommitEnabled; this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, - asyncConsumerMetrics + kafkaConsumerMetrics ); } @@ -580,17 +499,17 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.autoCommitEnabled = config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG); this.fetchBuffer = new FetchBuffer(logContext); this.isolationLevel = IsolationLevel.READ_UNCOMMITTED; + this.interceptors = new ConsumerInterceptors<>(Collections.emptyList()); this.time = time; this.metrics = new Metrics(time); - this.interceptors = new ConsumerInterceptors<>(Collections.emptyList(), metrics); this.metadata = metadata; this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); this.defaultApiTimeoutMs = Duration.ofMillis(config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG)); - this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); + this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); this.clientTelemetryReporter = Optional.empty(); - ConsumerMetrics metricsRegistry = new ConsumerMetrics(); + ConsumerMetrics metricsRegistry = new ConsumerMetrics(CONSUMER_METRIC_GROUP_PREFIX); FetchMetricsManager fetchMetricsManager = new FetchMetricsManager(metrics, metricsRegistry.fetcherMetrics); this.fetchCollector = new FetchCollector<>(logContext, metadata, @@ -599,8 +518,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, deserializers, fetchMetricsManager, time); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_METRIC_GROUP); - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); + this.kafkaConsumerMetrics = new AsyncConsumerMetrics(metrics); GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( config, @@ -614,7 +532,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, this.backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, - asyncConsumerMetrics + kafkaConsumerMetrics ); this.rebalanceListenerInvoker = new ConsumerRebalanceListenerInvoker( logContext, @@ -631,7 +549,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, metadata, backgroundEventHandler, false, - asyncConsumerMetrics + kafkaConsumerMetrics ); this.offsetCommitCallbackInvoker = new OffsetCommitCallbackInvoker(interceptors); Supplier requestManagersSupplier = RequestManagers.supplier( @@ -649,8 +567,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, clientTelemetryReporter, metrics, offsetCommitCallbackInvoker, - memberStateListener, - Optional.empty() + memberStateListener ); Supplier applicationEventProcessorSupplier = ApplicationEventProcessor.supplier( logContext, @@ -665,8 +582,7 @@ public AsyncKafkaConsumer(final ConsumerConfig config, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, - asyncConsumerMetrics); - this.streamsRebalanceListenerInvoker = Optional.empty(); + kafkaConsumerMetrics); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = new CompletableEventReaper(logContext); } @@ -900,7 +816,7 @@ public void commitAsync(OffsetCommitCallback callback) { @Override public void commitAsync(Map offsets, OffsetCommitCallback callback) { - commitAsync(Optional.of(new HashMap<>(offsets)), callback); + commitAsync(Optional.of(offsets), callback); } private void commitAsync(Optional> offsets, OffsetCommitCallback callback) { @@ -927,7 +843,7 @@ private void commitAsync(Optional> offset } private CompletableFuture> commit(final CommitEvent commitEvent) { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); offsetCommitCallbackInvoker.executeCallbacks(); if (commitEvent.offsets().isPresent() && commitEvent.offsets().get().isEmpty()) { @@ -1056,7 +972,7 @@ public Map committed(final Set committed(final Set beginningOrEndOffset(Collection(); + return listOffsetsEvent.emptyResults(); } Map offsetAndTimestampMap; @@ -1319,7 +1232,7 @@ public OptionalLong currentLag(TopicPartition topicPartition) { public ConsumerGroupMetadata groupMetadata() { acquireAndEnsureOpen(); try { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); return groupMetadata.get().get(); } finally { release(); @@ -1338,19 +1251,11 @@ public void enforceRebalance(String reason) { @Override public void close() { - close(CloseOptions.timeout(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS))); + close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); } - @Deprecated @Override public void close(Duration timeout) { - close(CloseOptions.timeout(timeout)); - } - - @Override - public void close(CloseOptions option) { - Duration timeout = option.timeout().orElseGet(() -> Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); - if (timeout.toMillis() < 0) throw new IllegalArgumentException("The timeout cannot be negative."); acquire(); @@ -1358,7 +1263,7 @@ public void close(CloseOptions option) { if (!closed) { // need to close before setting the flag since the close function // itself may trigger rebalance callback that needs the consumer to be open still - close(timeout, option.groupMembershipOperation(), false); + close(timeout, false); } } finally { closed = true; @@ -1428,7 +1333,7 @@ public void close(CloseOptions option) { * * */ - private void close(Duration timeout, CloseOptions.GroupMembershipOperation membershipOperation, boolean swallowException) { + private void close(Duration timeout, boolean swallowException) { log.trace("Closing the Kafka consumer"); AtomicReference firstException = new AtomicReference<>(); @@ -1445,10 +1350,10 @@ private void close(Duration timeout, CloseOptions.GroupMembershipOperation membe () -> autoCommitOnClose(closeTimer), firstException); swallow(log, Level.ERROR, "Failed to stop finding coordinator", this::stopFindCoordinatorOnClose, firstException); - swallow(log, Level.ERROR, "Failed to run rebalance callbacks", + swallow(log, Level.ERROR, "Failed to release group assignment", this::runRebalanceCallbacksOnClose, firstException); swallow(log, Level.ERROR, "Failed to leave group while closing consumer", - () -> leaveGroupOnClose(closeTimer, membershipOperation), firstException); + () -> leaveGroupOnClose(closeTimer), firstException); swallow(log, Level.ERROR, "Failed invoking asynchronous commit callbacks while closing consumer", () -> awaitPendingAsyncCommitsAndExecuteCommitCallbacks(closeTimer, false), firstException); if (applicationEventHandler != null) @@ -1462,7 +1367,6 @@ private void close(Duration timeout, CloseOptions.GroupMembershipOperation membe closeQuietly(interceptors, "consumer interceptors", firstException); closeQuietly(kafkaConsumerMetrics, "kafka consumer metrics", firstException); - closeQuietly(asyncConsumerMetrics, "async consumer metrics", firstException); closeQuietly(metrics, "consumer metrics", firstException); closeQuietly(deserializers, "consumer deserializers", firstException); clientTelemetryReporter.ifPresent(reporter -> closeQuietly(reporter, "async consumer telemetry reporter", firstException)); @@ -1485,7 +1389,7 @@ private Timer createTimerForCloseRequests(Duration timeout) { } private void autoCommitOnClose(final Timer timer) { - if (groupMetadata.get().isEmpty() || applicationEventHandler == null) + if (groupMetadata.get().isEmpty()) return; if (autoCommitEnabled) @@ -1500,46 +1404,33 @@ private void runRebalanceCallbacksOnClose() { int memberEpoch = groupMetadata.get().get().generationId(); - Exception error = null; - - if (streamsRebalanceListenerInvoker != null && streamsRebalanceListenerInvoker.isPresent()) { - - if (memberEpoch > 0) { - error = streamsRebalanceListenerInvoker.get().invokeAllTasksRevoked(); - } else { - error = streamsRebalanceListenerInvoker.get().invokeAllTasksLost(); - } - - } else if (rebalanceListenerInvoker != null) { + Set assignedPartitions = groupAssignmentSnapshot.get(); - Set assignedPartitions = groupAssignmentSnapshot.get(); - - if (assignedPartitions.isEmpty()) - // Nothing to revoke. - return; + if (assignedPartitions.isEmpty()) + // Nothing to revoke. + return; - SortedSet droppedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); - droppedPartitions.addAll(assignedPartitions); + SortedSet droppedPartitions = new TreeSet<>(TOPIC_PARTITION_COMPARATOR); + droppedPartitions.addAll(assignedPartitions); - if (memberEpoch > 0) { - error = rebalanceListenerInvoker.invokePartitionsRevoked(droppedPartitions); - } else { - error = rebalanceListenerInvoker.invokePartitionsLost(droppedPartitions); - } + final Exception error; - } + if (memberEpoch > 0) + error = rebalanceListenerInvoker.invokePartitionsRevoked(droppedPartitions); + else + error = rebalanceListenerInvoker.invokePartitionsLost(droppedPartitions); if (error != null) throw ConsumerUtils.maybeWrapAsKafkaException(error); } - private void leaveGroupOnClose(final Timer timer, final CloseOptions.GroupMembershipOperation membershipOperation) { - if (groupMetadata.get().isEmpty() || applicationEventHandler == null) + private void leaveGroupOnClose(final Timer timer) { + if (groupMetadata.get().isEmpty()) return; log.debug("Leaving the consumer group during consumer close"); try { - applicationEventHandler.addAndGet(new LeaveGroupOnCloseEvent(calculateDeadlineMs(timer), membershipOperation)); + applicationEventHandler.addAndGet(new LeaveGroupOnCloseEvent(calculateDeadlineMs(timer))); log.info("Completed leaving the group"); } catch (TimeoutException e) { log.warn("Consumer attempted to leave the group but couldn't " + @@ -1550,7 +1441,7 @@ private void leaveGroupOnClose(final Timer timer, final CloseOptions.GroupMember } private void stopFindCoordinatorOnClose() { - if (groupMetadata.get().isEmpty() || applicationEventHandler == null) + if (groupMetadata.get().isEmpty()) return; log.debug("Stop finding coordinator during consumer close"); applicationEventHandler.add(new StopFindCoordinatorOnCloseEvent()); @@ -1586,12 +1477,12 @@ public void commitSync(final Duration timeout) { @Override public void commitSync(Map offsets) { - commitSync(Optional.of(new HashMap<>(offsets)), defaultApiTimeoutMs); + commitSync(Optional.of(offsets), defaultApiTimeoutMs); } @Override public void commitSync(Map offsets, Duration timeout) { - commitSync(Optional.of(new HashMap<>(offsets)), timeout); + commitSync(Optional.of(offsets), timeout); } private void commitSync(Optional> offsets, Duration timeout) { @@ -1615,7 +1506,7 @@ private void commitSync(Optional> offsets } private void awaitPendingAsyncCommitsAndExecuteCommitCallbacks(Timer timer, boolean enableWakeup) { - if (lastPendingAsyncCommit == null || offsetCommitCallbackInvoker == null) { + if (lastPendingAsyncCommit == null) { return; } @@ -1792,7 +1683,7 @@ private Fetch pollForFetches(Timer timer) { // use of a shorter, dedicated "pollTimer" here which updates "timer" so that calling method (poll) will // correctly handle the overall timeout. try { - fetchBuffer.awaitWakeup(pollTimer); + fetchBuffer.awaitNotEmpty(pollTimer); } catch (InterruptException e) { log.trace("Interrupt during fetch", e); throw e; @@ -1886,7 +1777,7 @@ private boolean isCommittedOffsetsManagementEnabled() { private void sendFetches(Timer timer) { try { applicationEventHandler.addAndGet(new CreateFetchRequestsEvent(calculateDeadlineMs(timer))); - } catch (TimeoutException swallow) { + } catch (TimeoutException e) { // Can be ignored, per above comments. } } @@ -1943,15 +1834,6 @@ public void subscribe(Collection topics, ConsumerRebalanceListener liste subscribeInternal(topics, Optional.of(listener)); } - public void subscribe(Collection topics, StreamsRebalanceListener streamsRebalanceListener) { - - streamsRebalanceListenerInvoker - .orElseThrow(() -> new IllegalStateException("Consumer was not created to be used with Streams rebalance protocol events")) - .setRebalanceListener(streamsRebalanceListener); - - subscribeInternal(topics, Optional.empty()); - } - @Override public void subscribe(Pattern pattern) { subscribeInternal(pattern, Optional.empty()); @@ -2019,7 +1901,7 @@ private void release() { private void subscribeInternal(Pattern pattern, Optional listener) { acquireAndEnsureOpen(); try { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); if (pattern == null || pattern.toString().isEmpty()) throw new IllegalArgumentException("Topic pattern to subscribe to cannot be " + (pattern == null ? "null" : "empty")); @@ -2043,7 +1925,7 @@ private void subscribeToRegex(SubscriptionPattern pattern, Optional listener) { acquireAndEnsureOpen(); try { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); throwIfSubscriptionPatternIsInvalid(pattern); log.info("Subscribing to regular expression {}", pattern); applicationEventHandler.addAndGet(new TopicRe2JPatternSubscriptionChangeEvent( @@ -2067,7 +1949,7 @@ private void throwIfSubscriptionPatternIsInvalid(SubscriptionPattern subscriptio private void subscribeInternal(Collection topics, Optional listener) { acquireAndEnsureOpen(); try { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); if (topics == null) throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); if (topics.isEmpty()) { @@ -2115,7 +1997,7 @@ boolean processBackgroundEvents() { if (!events.isEmpty()) { long startMs = time.milliseconds(); for (BackgroundEvent event : events) { - asyncConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); + kafkaConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); try { if (event instanceof CompletableEvent) backgroundEventReaper.add((CompletableEvent) event); @@ -2128,7 +2010,7 @@ boolean processBackgroundEvents() { log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); } } - asyncConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); + kafkaConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); } backgroundEventReaper.reap(time.milliseconds()); @@ -2203,7 +2085,7 @@ T processBackgroundEvents(Future future, Timer timer, Predicate implements ConsumerDelegate { this.retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); List> interceptorList = configuredConsumerInterceptors(config); - this.interceptors = new ConsumerInterceptors<>(interceptorList, metrics); - this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); + this.interceptors = new ConsumerInterceptors<>(interceptorList); + this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); this.subscriptions = createSubscriptionState(config, logContext); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners( metrics.reporters(), interceptorList, - Arrays.asList(this.deserializers.keyDeserializer(), this.deserializers.valueDeserializer())); + Arrays.asList(this.deserializers.keyDeserializer, this.deserializers.valueDeserializer)); this.metadata = new ConsumerMetadata(config, subscriptions, logContext, clusterResourceListeners); List addresses = ClientUtils.parseAndValidateAddresses(config); this.metadata.bootstrap(addresses); @@ -230,6 +229,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { config.getInt(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG), this.interceptors, config.getBoolean(THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED), + config.getString(ConsumerConfig.CLIENT_RACK_CONFIG), clientTelemetryReporter); } this.fetcher = new Fetcher<>( @@ -256,7 +256,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { retryBackoffMs, retryBackoffMaxMs); - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); + this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, CONSUMER_METRIC_GROUP_PREFIX); config.logUnused(); AppInfoParser.registerAppInfo(CONSUMER_JMX_PREFIX, clientId, metrics, time.milliseconds()); @@ -265,10 +265,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { // call close methods if internal objects are already constructed; this is to prevent resource leak. see KAFKA-2121 // we do not need to call `close` at all when `log` is null, which means no internal objects were initialized. if (this.log != null) { - // If a consumer fails during initialization, it means it hasn't joined the group yet. - // Since it's not a group member, we use REMAIN_IN_GROUP option when closing - // to prevent sending an unnecessary leave request to the coordinator. - close(Duration.ZERO, CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP, true); + close(Duration.ZERO, true); } // now propagate the exception throw new KafkaException("Failed to construct kafka consumer", t); @@ -292,12 +289,12 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { this.metrics = new Metrics(time); this.clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG); this.groupId = Optional.ofNullable(config.getString(ConsumerConfig.GROUP_ID_CONFIG)); - this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); + this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); this.isolationLevel = ConsumerUtils.configuredIsolationLevel(config); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); this.assignors = assignors; - this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics); - this.interceptors = new ConsumerInterceptors<>(Collections.emptyList(), metrics); + this.kafkaConsumerMetrics = new KafkaConsumerMetrics(metrics, CONSUMER_METRIC_GROUP_PREFIX); + this.interceptors = new ConsumerInterceptors<>(Collections.emptyList()); this.retryBackoffMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG); this.retryBackoffMaxMs = config.getLong(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); @@ -329,7 +326,6 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { heartbeatIntervalMs, groupId.get(), groupInstanceId, - rackId, retryBackoffMs, retryBackoffMaxMs, true @@ -348,6 +344,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { autoCommitIntervalMs, interceptors, throwOnStableOffsetNotSupported, + rackId, clientTelemetryReporter ); } else { @@ -361,7 +358,7 @@ public class ClassicKafkaConsumer implements ConsumerDelegate { int maxPollRecords = config.getInt(ConsumerConfig.MAX_POLL_RECORDS_CONFIG); boolean checkCrcs = config.getBoolean(ConsumerConfig.CHECK_CRCS_CONFIG); - ConsumerMetrics metricsRegistry = new ConsumerMetrics(); + ConsumerMetrics metricsRegistry = new ConsumerMetrics(CONSUMER_METRIC_GROUP_PREFIX); FetchMetricsManager metricsManager = new FetchMetricsManager(metrics, metricsRegistry.fetcherMetrics); ApiVersions apiVersions = new ApiVersions(); FetchConfig fetchConfig = new FetchConfig( @@ -477,7 +474,7 @@ public void subscribe(Collection topics) { private void subscribeInternal(Collection topics, Optional listener) { acquireAndEnsureOpen(); try { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); if (topics == null) throw new IllegalArgumentException("Topic collection to subscribe to cannot be null"); if (topics.isEmpty()) { @@ -558,7 +555,7 @@ public void subscribe(SubscriptionPattern pattern) { * configured at-least one partition assignment strategy */ private void subscribeInternal(Pattern pattern, Optional listener) { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); if (pattern == null || pattern.toString().isEmpty()) throw new IllegalArgumentException("Topic pattern to subscribe to cannot be " + (pattern == null ? "null" : "empty")); @@ -581,7 +578,7 @@ public void unsubscribe() { fetcher.clearBufferedDataForUnassignedPartitions(Collections.emptySet()); if (this.coordinator != null) { this.coordinator.onLeavePrepare(); - this.coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "the consumer unsubscribed from all topics"); + this.coordinator.maybeLeaveGroup("the consumer unsubscribed from all topics"); } this.subscriptions.unsubscribe(); log.info("Unsubscribed all topics or patterns and assigned partitions"); @@ -742,7 +739,7 @@ public void commitSync(final Map offsets, fin acquireAndEnsureOpen(); long commitStart = time.nanoseconds(); try { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); offsets.forEach(this::updateLastSeenEpochIfNewer); if (!coordinator.commitOffsetsSync(new HashMap<>(offsets), time.timer(timeout))) { throw new TimeoutException("Timeout of " + timeout.toMillis() + "ms expired before successfully " + @@ -768,7 +765,7 @@ public void commitAsync(OffsetCommitCallback callback) { public void commitAsync(final Map offsets, OffsetCommitCallback callback) { acquireAndEnsureOpen(); try { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); log.debug("Committing offsets: {}", offsets); offsets.forEach(this::updateLastSeenEpochIfNewer); coordinator.commitOffsetsAsync(new HashMap<>(offsets), callback); @@ -889,7 +886,7 @@ public Map committed(final Set offsets; offsets = coordinator.fetchCommittedOffsets(partitions, time.timer(timeout)); if (offsets == null) { @@ -1078,7 +1075,7 @@ public OptionalLong currentLag(TopicPartition topicPartition) { public ConsumerGroupMetadata groupMetadata() { acquireAndEnsureOpen(); try { - throwIfGroupIdNotDefined(); + maybeThrowInvalidGroupIdException(); return coordinator.groupMetadata(); } finally { release(); @@ -1105,23 +1102,11 @@ public void enforceRebalance() { @Override public void close() { - close(CloseOptions.timeout(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS))); + close(Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); } - @Deprecated @Override public void close(Duration timeout) { - close(CloseOptions.timeout(timeout)); - } - - @Override - public void wakeup() { - this.client.wakeup(); - } - - @Override - public void close(CloseOptions option) { - Duration timeout = option.timeout().orElseGet(() -> Duration.ofMillis(DEFAULT_CLOSE_TIMEOUT_MS)); if (timeout.toMillis() < 0) throw new IllegalArgumentException("The timeout cannot be negative."); acquire(); @@ -1129,7 +1114,7 @@ public void close(CloseOptions option) { if (!closed) { // need to close before setting the flag since the close function // itself may trigger rebalance callback that needs the consumer to be open still - close(timeout, option.groupMembershipOperation(), false); + close(timeout, false); } } finally { closed = true; @@ -1137,13 +1122,18 @@ public void close(CloseOptions option) { } } + @Override + public void wakeup() { + this.client.wakeup(); + } + private Timer createTimerForRequest(final Duration timeout) { // this.time could be null if an exception occurs in constructor prior to setting the this.time field final Time localTime = (time == null) ? Time.SYSTEM : time; return localTime.timer(Math.min(timeout.toMillis(), requestTimeoutMs)); } - private void close(Duration timeout, CloseOptions.GroupMembershipOperation membershipOperation, boolean swallowException) { + private void close(Duration timeout, boolean swallowException) { log.trace("Closing the Kafka consumer"); AtomicReference firstException = new AtomicReference<>(); @@ -1155,13 +1145,7 @@ private void close(Duration timeout, CloseOptions.GroupMembershipOperation membe // consumer. if (coordinator != null) { // This is a blocking call bound by the time remaining in closeTimer - swallow( - log, - Level.ERROR, - "Failed to close coordinator with a timeout(ms)=" + closeTimer.timeoutMs(), - () -> coordinator.close(closeTimer, membershipOperation), - firstException - ); + swallow(log, Level.ERROR, "Failed to close coordinator with a timeout(ms)=" + closeTimer.timeoutMs(), () -> coordinator.close(closeTimer), firstException); } if (fetcher != null) { @@ -1272,7 +1256,7 @@ private void throwIfNoAssignorsConfigured() { ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG + " configuration property"); } - private void throwIfGroupIdNotDefined() { + private void maybeThrowInvalidGroupIdException() { if (groupId.isEmpty()) throw new InvalidGroupIdException("To use the group management or offset commit APIs, you must " + "provide a valid " + ConsumerConfig.GROUP_ID_CONFIG + " in the consumer configuration."); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java index fe4d3806f2af4..284707a812b53 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CommitRequestManager.java @@ -34,8 +34,6 @@ import org.apache.kafka.common.errors.UnstableOffsetCommitException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; -import org.apache.kafka.common.message.OffsetFetchRequestData; -import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; @@ -44,7 +42,6 @@ import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; -import org.apache.kafka.common.requests.RequestUtils; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; @@ -541,7 +538,7 @@ private void fetchOffsetsWithRetries(final OffsetFetchRequestState fetchRequest, boolean inflightRemoved = pendingRequests.inflightOffsetFetches.remove(fetchRequest); if (!inflightRemoved) { log.warn("A duplicated, inflight, request was identified, but unable to find it in the " + - "outbound buffer: {}", fetchRequest); + "outbound buffer:" + fetchRequest); } if (error == null) { maybeUpdateLastSeenEpochIfNewer(res); @@ -589,8 +586,6 @@ public void onMemberEpochUpdated(Optional memberEpoch, String memberId) if (memberEpoch.isEmpty() && memberInfo.memberEpoch.isPresent()) { log.info("Member {} won't include epoch in following offset " + "commit/fetch requests because it has left the group.", memberInfo.memberId); - } else if (memberEpoch.isPresent()) { - log.debug("Member {} will include new member epoch {} in following offset commit/fetch requests.", memberId, memberEpoch); } memberInfo.memberId = memberId; memberInfo.memberEpoch = memberEpoch; @@ -732,7 +727,7 @@ public NetworkClientDelegate.UnsentRequest toUnsentRequest() { lastEpochSentOnCommit = Optional.empty(); } - OffsetCommitRequest.Builder builder = OffsetCommitRequest.Builder.forTopicNames(data); + OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder(data); return buildRequestWithResponseHandling(builder); } @@ -975,37 +970,21 @@ public boolean sameRequest(final OffsetFetchRequestState request) { } public NetworkClientDelegate.UnsentRequest toUnsentRequest() { - List topics = requestedPartitions.stream() - .collect(Collectors.groupingBy(TopicPartition::topic)) - .entrySet() - .stream() - .map(entry -> new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(entry.getKey()) - .setPartitionIndexes(entry.getValue().stream() - .map(TopicPartition::partition) - .collect(Collectors.toList()))) - .collect(Collectors.toList()); - OffsetFetchRequest.Builder builder = memberInfo.memberEpoch - .map(epoch -> OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setRequireStable(true) - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupId) - .setMemberId(memberInfo.memberId) - .setMemberEpoch(epoch) - .setTopics(topics))), - throwOnFetchStableOffsetUnsupported)) + OffsetFetchRequest.Builder builder = memberInfo.memberEpoch. + map(epoch -> new OffsetFetchRequest.Builder( + groupId, + memberInfo.memberId, + epoch, + true, + new ArrayList<>(this.requestedPartitions), + throwOnFetchStableOffsetUnsupported)) // Building request without passing member ID/epoch to leave the logic to choose // default values when not present on the request builder. - .orElseGet(() -> OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setRequireStable(true) - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupId) - .setTopics(topics))), + .orElseGet(() -> new OffsetFetchRequest.Builder( + groupId, + true, + new ArrayList<>(this.requestedPartitions), throwOnFetchStableOffsetUnsupported)); return buildRequestWithResponseHandling(builder); } @@ -1016,14 +995,13 @@ public NetworkClientDelegate.UnsentRequest toUnsentRequest() { @Override void onResponse(final ClientResponse response) { long currentTimeMs = response.receivedTimeMs(); - var fetchResponse = (OffsetFetchResponse) response.responseBody(); - var groupResponse = fetchResponse.group(groupId); - var error = Errors.forCode(groupResponse.errorCode()); - if (error != Errors.NONE) { - onFailure(currentTimeMs, error); + OffsetFetchResponse fetchResponse = (OffsetFetchResponse) response.responseBody(); + Errors responseError = fetchResponse.groupLevelError(groupId); + if (responseError != Errors.NONE) { + onFailure(currentTimeMs, responseError); return; } - onSuccess(currentTimeMs, groupResponse); + onSuccess(currentTimeMs, fetchResponse); } /** @@ -1088,58 +1066,53 @@ void removeRequest() { * offsets contained in the response, and record a successful request attempt. */ private void onSuccess(final long currentTimeMs, - final OffsetFetchResponseData.OffsetFetchResponseGroup response) { - var offsets = new HashMap(); - var unstableTxnOffsetTopicPartitions = new HashSet(); - var unauthorizedTopics = new HashSet(); - var failedRequestRegistered = false; - - for (var topic : response.topics()) { - for (var partition : topic.partitions()) { - var tp = new TopicPartition( - topic.name(), - partition.partitionIndex() - ); - var error = Errors.forCode(partition.errorCode()); - if (error != Errors.NONE) { - log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); + final OffsetFetchResponse response) { + Set unauthorizedTopics = null; + Map responseData = + response.partitionDataMap(groupId); + Map offsets = new HashMap<>(responseData.size()); + Set unstableTxnOffsetTopicPartitions = new HashSet<>(); + boolean failedRequestRegistered = false; + for (Map.Entry entry : responseData.entrySet()) { + TopicPartition tp = entry.getKey(); + OffsetFetchResponse.PartitionData partitionData = entry.getValue(); + if (partitionData.hasError()) { + Errors error = partitionData.error; + log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); - if (!failedRequestRegistered) { - onFailedAttempt(currentTimeMs); - failedRequestRegistered = true; - } + if (!failedRequestRegistered) { + onFailedAttempt(currentTimeMs); + failedRequestRegistered = true; + } - if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { - future.completeExceptionally(new KafkaException("Topic or Partition " + tp + " does not exist")); - return; - } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { - unauthorizedTopics.add(tp.topic()); - } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { - unstableTxnOffsetTopicPartitions.add(tp); - } else { - // Fail with a non-retriable KafkaException for all unexpected partition - // errors (even if they are retriable) - future.completeExceptionally(new KafkaException("Unexpected error in fetch offset " + - "response for partition " + tp + ": " + error.message())); - return; + if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { + future.completeExceptionally(new KafkaException("Topic or Partition " + tp + " does not exist")); + return; + } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { + if (unauthorizedTopics == null) { + unauthorizedTopics = new HashSet<>(); } - } else if (partition.committedOffset() >= 0) { - // record the position with the offset (-1 indicates no committed offset to fetch); - // if there's no committed offset, record as null - offsets.put(tp, new OffsetAndMetadata( - partition.committedOffset(), - RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), - partition.metadata() - )); + unauthorizedTopics.add(tp.topic()); + } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { + unstableTxnOffsetTopicPartitions.add(tp); } else { - log.info("Found no committed offset for partition {}", tp); - offsets.put(tp, null); + // Fail with a non-retriable KafkaException for all unexpected partition + // errors (even if they are retriable) + future.completeExceptionally(new KafkaException("Unexpected error in fetch offset " + + "response for partition " + tp + ": " + error.message())); + return; } - + } else if (partitionData.offset >= 0) { + // record the position with the offset (-1 indicates no committed offset to fetch); + // if there's no committed offset, record as null + offsets.put(tp, new OffsetAndMetadata(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata)); + } else { + log.info("Found no committed offset for partition {}", tp); + offsets.put(tp, null); } } - if (!unauthorizedTopics.isEmpty()) { + if (unauthorizedTopics != null) { future.completeExceptionally(new TopicAuthorizationException(unauthorizedTopics)); } else if (!unstableTxnOffsetTopicPartitions.isEmpty()) { // TODO: Optimization question: Do we need to retry all partitions upon a single partition error? diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java index 8e2a6c3b513ed..d615c21318d8b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CompletedFetch.java @@ -60,6 +60,7 @@ public class CompletedFetch { final TopicPartition partition; final FetchResponseData.PartitionData partitionData; + final short requestVersion; private final Logger log; private final SubscriptionState subscriptions; @@ -87,7 +88,8 @@ public class CompletedFetch { TopicPartition partition, FetchResponseData.PartitionData partitionData, FetchMetricsAggregator metricAggregator, - Long fetchOffset) { + Long fetchOffset, + short requestVersion) { this.log = log; this.subscriptions = subscriptions; this.decompressionBufferSupplier = decompressionBufferSupplier; @@ -96,6 +98,7 @@ public class CompletedFetch { this.metricAggregator = metricAggregator; this.batches = FetchResponse.recordsOrFail(partitionData).batches().iterator(); this.nextFetchOffset = fetchOffset; + this.requestVersion = requestVersion; this.lastEpoch = Optional.empty(); this.abortedProducerIds = new HashSet<>(); this.abortedTransactions = abortedTransactions(partitionData); @@ -315,13 +318,13 @@ ConsumerRecord parseRecord(Deserializers deserializers, K key; V value; try { - key = keyBytes == null ? null : deserializers.keyDeserializer().deserialize(partition.topic(), headers, keyBytes); + key = keyBytes == null ? null : deserializers.keyDeserializer.deserialize(partition.topic(), headers, keyBytes); } catch (RuntimeException e) { log.error("Key Deserializers with error: {}", deserializers); throw newRecordDeserializationException(DeserializationExceptionOrigin.KEY, partition, timestampType, record, e, headers); } try { - value = valueBytes == null ? null : deserializers.valueDeserializer().deserialize(partition.topic(), headers, valueBytes); + value = valueBytes == null ? null : deserializers.valueDeserializer.deserialize(partition.topic(), headers, valueBytes); } catch (RuntimeException e) { log.error("Value Deserializers with error: {}", deserializers); throw newRecordDeserializationException(DeserializationExceptionOrigin.VALUE, partition, timestampType, record, e, headers); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java index 4956d64228dbb..584a03736f97f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinator.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.GroupRebalanceConfig; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.CommitFailedException; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; @@ -49,7 +48,6 @@ import org.apache.kafka.common.message.JoinGroupResponseData; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; -import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.Sensor; @@ -63,7 +61,6 @@ import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; -import org.apache.kafka.common.requests.RequestUtils; import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; @@ -89,7 +86,6 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Supplier; import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.ConsumerConfig.ASSIGN_FROM_SUBSCRIBED_ASSIGNORS; @@ -178,54 +174,19 @@ public ConsumerCoordinator(GroupRebalanceConfig rebalanceConfig, int autoCommitIntervalMs, ConsumerInterceptors interceptors, boolean throwOnFetchStableOffsetsUnsupported, + String rackId, Optional clientTelemetryReporter) { - this(rebalanceConfig, - logContext, - client, - assignors, - metadata, - subscriptions, - metrics, - metricGrpPrefix, - time, - autoCommitEnabled, - autoCommitIntervalMs, - interceptors, - throwOnFetchStableOffsetsUnsupported, - clientTelemetryReporter, - Optional.empty()); - } - - /** - * Initialize the coordination manager. - */ - public ConsumerCoordinator(GroupRebalanceConfig rebalanceConfig, - LogContext logContext, - ConsumerNetworkClient client, - List assignors, - ConsumerMetadata metadata, - SubscriptionState subscriptions, - Metrics metrics, - String metricGrpPrefix, - Time time, - boolean autoCommitEnabled, - int autoCommitIntervalMs, - ConsumerInterceptors interceptors, - boolean throwOnFetchStableOffsetsUnsupported, - Optional clientTelemetryReporter, - Optional> heartbeatThreadSupplier) { super(rebalanceConfig, logContext, client, metrics, metricGrpPrefix, time, - clientTelemetryReporter, - heartbeatThreadSupplier); + clientTelemetryReporter); this.rebalanceConfig = rebalanceConfig; this.log = logContext.logger(ConsumerCoordinator.class); this.metadata = metadata; - this.rackId = rebalanceConfig.rackId; + this.rackId = rackId == null || rackId.isEmpty() ? Optional.empty() : Optional.of(rackId); this.metadataSnapshot = new MetadataSnapshot(this.rackId, subscriptions, metadata.fetch(), metadata.updateVersion()); this.subscriptions = subscriptions; this.defaultOffsetCommitCallback = new DefaultOffsetCommitCallback(); @@ -1012,7 +973,7 @@ public ConsumerGroupMetadata groupMetadata() { /** * @throws KafkaException if the rebalance callback throws exception */ - public void close(final Timer timer, CloseOptions.GroupMembershipOperation membershipOperation) { + public void close(final Timer timer) { // we do not need to re-enable wakeups since we are closing already client.disableWakeups(); try { @@ -1023,7 +984,7 @@ public void close(final Timer timer, CloseOptions.GroupMembershipOperation membe invokeCompletedOffsetCommitCallbacks(); } } finally { - super.close(timer, membershipOperation); + super.close(timer); } } @@ -1302,25 +1263,23 @@ RequestFuture sendOffsetCommitRequest(final Map sendOffsetCommitRequest(final Map> sendOffsetFetchReq return RequestFuture.coordinatorNotAvailable(); log.debug("Fetching committed offsets for partitions: {}", partitions); - // construct the request - List topics = partitions.stream() - .collect(Collectors.groupingBy(TopicPartition::topic)) - .entrySet() - .stream() - .map(entry -> new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(entry.getKey()) - .setPartitionIndexes(entry.getValue().stream() - .map(TopicPartition::partition) - .collect(Collectors.toList()))) - .collect(Collectors.toList()); - - OffsetFetchRequest.Builder requestBuilder = OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setRequireStable(true) - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(this.rebalanceConfig.groupId) - .setTopics(topics))), - throwOnFetchStableOffsetsUnsupported); + OffsetFetchRequest.Builder requestBuilder = + new OffsetFetchRequest.Builder(this.rebalanceConfig.groupId, true, new ArrayList<>(partitions), throwOnFetchStableOffsetsUnsupported); // send the request with a callback return client.send(coordinator, requestBuilder) @@ -1512,71 +1453,64 @@ private OffsetFetchResponseHandler() { @Override public void handle(OffsetFetchResponse response, RequestFuture> future) { - var group = response.group(rebalanceConfig.groupId); - var groupError = Errors.forCode(group.errorCode()); - - if (groupError != Errors.NONE) { - log.debug("Offset fetch failed: {}", groupError.message()); + Errors responseError = response.groupLevelError(rebalanceConfig.groupId); + if (responseError != Errors.NONE) { + log.debug("Offset fetch failed: {}", responseError.message()); - if (groupError == Errors.COORDINATOR_NOT_AVAILABLE || - groupError == Errors.NOT_COORDINATOR) { + if (responseError == Errors.COORDINATOR_NOT_AVAILABLE || + responseError == Errors.NOT_COORDINATOR) { // re-discover the coordinator and retry - markCoordinatorUnknown(groupError); - future.raise(groupError); - } else if (groupError == Errors.GROUP_AUTHORIZATION_FAILED) { + markCoordinatorUnknown(responseError); + future.raise(responseError); + } else if (responseError == Errors.GROUP_AUTHORIZATION_FAILED) { future.raise(GroupAuthorizationException.forGroupId(rebalanceConfig.groupId)); - } else if (groupError.exception() instanceof RetriableException) { + } else if (responseError.exception() instanceof RetriableException) { // retry - future.raise(groupError); + future.raise(responseError); } else { - future.raise(new KafkaException("Unexpected error in fetch offset response: " + groupError.message())); + future.raise(new KafkaException("Unexpected error in fetch offset response: " + responseError.message())); } return; } - var offsets = new HashMap(); - var unstableTxnOffsetTopicPartitions = new HashSet(); - var unauthorizedTopics = new HashSet(); - - for (var topic : group.topics()) { - for (var partition : topic.partitions()) { - var tp = new TopicPartition( - topic.name(), - partition.partitionIndex() - ); - var error = Errors.forCode(partition.errorCode()); - - if (error != Errors.NONE) { - log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); - - if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { - future.raise(new KafkaException("Topic or Partition " + tp + " does not exist")); - return; - } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { - unauthorizedTopics.add(tp.topic()); - } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { - unstableTxnOffsetTopicPartitions.add(tp); - } else { - future.raise(new KafkaException("Unexpected error in fetch offset response for partition " + - tp + ": " + error.message())); - return; + Set unauthorizedTopics = null; + Map responseData = + response.partitionDataMap(rebalanceConfig.groupId); + Map offsets = new HashMap<>(responseData.size()); + Set unstableTxnOffsetTopicPartitions = new HashSet<>(); + for (Map.Entry entry : responseData.entrySet()) { + TopicPartition tp = entry.getKey(); + OffsetFetchResponse.PartitionData partitionData = entry.getValue(); + if (partitionData.hasError()) { + Errors error = partitionData.error; + log.debug("Failed to fetch offset for partition {}: {}", tp, error.message()); + + if (error == Errors.UNKNOWN_TOPIC_OR_PARTITION) { + future.raise(new KafkaException("Topic or Partition " + tp + " does not exist")); + return; + } else if (error == Errors.TOPIC_AUTHORIZATION_FAILED) { + if (unauthorizedTopics == null) { + unauthorizedTopics = new HashSet<>(); } - } else if (partition.committedOffset() >= 0) { - // record the position with the offset (-1 indicates no committed offset to fetch); - // if there's no committed offset, record as null - offsets.put(tp, new OffsetAndMetadata( - partition.committedOffset(), - RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), - partition.metadata() - )); + unauthorizedTopics.add(tp.topic()); + } else if (error == Errors.UNSTABLE_OFFSET_COMMIT) { + unstableTxnOffsetTopicPartitions.add(tp); } else { - log.info("Found no committed offset for partition {}", tp); - offsets.put(tp, null); + future.raise(new KafkaException("Unexpected error in fetch offset response for partition " + + tp + ": " + error.message())); + return; } + } else if (partitionData.offset >= 0) { + // record the position with the offset (-1 indicates no committed offset to fetch); + // if there's no committed offset, record as null + offsets.put(tp, new OffsetAndMetadata(partitionData.offset, partitionData.leaderEpoch, partitionData.metadata)); + } else { + log.info("Found no committed offset for partition {}", tp); + offsets.put(tp, null); } } - if (!unauthorizedTopics.isEmpty()) { + if (unauthorizedTopics != null) { future.raise(new TopicAuthorizationException(unauthorizedTopics)); } else if (!unstableTxnOffsetTopicPartitions.isEmpty()) { // just retry diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerDelegateCreator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerDelegateCreator.java index d4ded4377b290..74592972b9dda 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerDelegateCreator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerDelegateCreator.java @@ -29,7 +29,6 @@ import java.util.List; import java.util.Locale; -import java.util.Optional; /** * {@code ConsumerDelegateCreator} implements a quasi-factory pattern to allow the caller to remain unaware of the @@ -61,7 +60,7 @@ public ConsumerDelegate create(ConsumerConfig config, GroupProtocol groupProtocol = GroupProtocol.valueOf(config.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG).toUpperCase(Locale.ROOT)); if (groupProtocol == GroupProtocol.CONSUMER) - return new AsyncKafkaConsumer<>(config, keyDeserializer, valueDeserializer, Optional.empty()); + return new AsyncKafkaConsumer<>(config, keyDeserializer, valueDeserializer); else return new ClassicKafkaConsumer<>(config, keyDeserializer, valueDeserializer); } catch (KafkaException e) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java index c5f95305a4747..4fe0e7085a244 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManager.java @@ -21,7 +21,6 @@ import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.metrics.HeartbeatMetricsManager; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; @@ -39,9 +38,6 @@ import java.util.TreeSet; import java.util.stream.Collectors; -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP; -import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.REGEX_RESOLUTION_NOT_SUPPORTED_MSG; - /** * This is the heartbeat request manager for consumer groups. * @@ -82,7 +78,7 @@ public ConsumerHeartbeatRequestManager( final CoordinatorRequestManager coordinatorRequestManager, final ConsumerMembershipManager membershipManager, final HeartbeatState heartbeatState, - final HeartbeatRequestState heartbeatRequestState, + final AbstractHeartbeatRequestManager.HeartbeatRequestState heartbeatRequestState, final BackgroundEventHandler backgroundEventHandler, final Metrics metrics) { super(logContext, timer, config, coordinatorRequestManager, heartbeatRequestState, backgroundEventHandler, @@ -95,43 +91,12 @@ public ConsumerHeartbeatRequestManager( * {@inheritDoc} */ @Override - public boolean handleSpecificFailure(Throwable exception) { - boolean errorHandled = false; - String errorMessage = exception.getMessage(); - if (exception instanceof UnsupportedVersionException) { - String message = CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG; - if (errorMessage.equals(REGEX_RESOLUTION_NOT_SUPPORTED_MSG)) { - message = REGEX_RESOLUTION_NOT_SUPPORTED_MSG; - logger.error("{} regex resolution not supported: {}", heartbeatRequestName(), message); - } else { - logger.error("{} failed due to unsupported version while sending request: {}", heartbeatRequestName(), errorMessage); - } - handleFatalFailure(new UnsupportedVersionException(message, exception)); - errorHandled = true; - } - return errorHandled; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean handleSpecificExceptionInResponse(final ConsumerGroupHeartbeatResponse response, final long currentTimeMs) { + public boolean handleSpecificError(final ConsumerGroupHeartbeatResponse response, final long currentTimeMs) { Errors error = errorForResponse(response); String errorMessage = errorMessageForResponse(response); boolean errorHandled; switch (error) { - // Broker responded with HB not supported, meaning the new protocol is not enabled, so propagate - // custom message for it. Note that the case where the protocol is not supported at all should fail - // on the client side when building the request and checking supporting APIs (handled on onFailure). - case UNSUPPORTED_VERSION: - logger.error("{} failed due to unsupported version response on broker side: {}", - heartbeatRequestName(), CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG); - handleFatalFailure(error.exception(CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG)); - errorHandled = true; - break; - case UNRELEASED_INSTANCE_ID: logger.error("{} failed due to unreleased instance id {}: {}", heartbeatRequestName(), membershipManager.groupInstanceId().orElse("null"), errorMessage); @@ -212,15 +177,6 @@ public ConsumerMembershipManager membershipManager() { return membershipManager; } - @Override - protected boolean shouldSendLeaveHeartbeatNow() { - // If the consumer has dynamic membership, - // we should skip the leaving heartbeat when leaveGroupOperation is REMAIN_IN_GROUP - if (membershipManager.groupInstanceId().isEmpty() && REMAIN_IN_GROUP == membershipManager.leaveGroupOperation()) - return false; - return membershipManager().state() == MemberState.LEAVING; - } - /** * Builds the heartbeat requests correctly, ensuring that all information is sent according to * the protocol, but subsequent requests do not send information which has not changed. This @@ -247,7 +203,6 @@ public void reset() { sentFields.reset(); } - @SuppressWarnings("NPathComplexity") public ConsumerGroupHeartbeatRequestData buildRequestData() { ConsumerGroupHeartbeatRequestData data = new ConsumerGroupHeartbeatRequestData(); @@ -307,12 +262,6 @@ public ConsumerGroupHeartbeatRequestData buildRequestData() { sentFields.localAssignment = local; } - // RackId - sent when joining - String rackId = membershipManager.rackId().orElse(null); - if (sendAllFields) { - data.setRackId(rackId); - } - return data; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java index c58b60ba0f25a..c56ea1a03e979 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptors.java @@ -17,13 +17,10 @@ package org.apache.kafka.clients.consumer.internals; -import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerInterceptor; import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.internals.Plugin; -import org.apache.kafka.common.metrics.Metrics; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -38,15 +35,15 @@ */ public class ConsumerInterceptors implements Closeable { private static final Logger log = LoggerFactory.getLogger(ConsumerInterceptors.class); - private final List>> interceptorPlugins; + private final List> interceptors; - public ConsumerInterceptors(List> interceptors, Metrics metrics) { - this.interceptorPlugins = Plugin.wrapInstances(interceptors, metrics, ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG); + public ConsumerInterceptors(List> interceptors) { + this.interceptors = interceptors; } /** Returns true if no interceptors are defined. All other methods will be no-ops in this case. */ public boolean isEmpty() { - return interceptorPlugins.isEmpty(); + return interceptors.isEmpty(); } /** @@ -65,9 +62,9 @@ public boolean isEmpty() { */ public ConsumerRecords onConsume(ConsumerRecords records) { ConsumerRecords interceptRecords = records; - for (Plugin> interceptorPlugin : this.interceptorPlugins) { + for (ConsumerInterceptor interceptor : this.interceptors) { try { - interceptRecords = interceptorPlugin.get().onConsume(interceptRecords); + interceptRecords = interceptor.onConsume(interceptRecords); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors log.warn("Error executing interceptor onConsume callback", e); @@ -86,9 +83,9 @@ public ConsumerRecords onConsume(ConsumerRecords records) { * @param offsets A map of offsets by partition with associated metadata */ public void onCommit(Map offsets) { - for (Plugin> interceptorPlugin : this.interceptorPlugins) { + for (ConsumerInterceptor interceptor : this.interceptors) { try { - interceptorPlugin.get().onCommit(offsets); + interceptor.onCommit(offsets); } catch (Exception e) { // do not propagate interceptor exception, just log log.warn("Error executing interceptor onCommit callback", e); @@ -101,9 +98,9 @@ public void onCommit(Map offsets) { */ @Override public void close() { - for (Plugin> interceptorPlugin : this.interceptorPlugins) { + for (ConsumerInterceptor interceptor : this.interceptors) { try { - interceptorPlugin.close(); + interceptor.close(); } catch (Exception e) { log.error("Failed to close consumer interceptor ", e); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java index 25e523c3a0db7..57d6c21e48b99 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManager.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.clients.consumer.internals; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; @@ -44,10 +43,8 @@ import java.util.SortedSet; import java.util.TreeSet; import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.DEFAULT; -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.LEAVE_GROUP; -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP; import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_ASSIGNED; import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_LOST; import static org.apache.kafka.clients.consumer.internals.ConsumerRebalanceListenerMethodName.ON_PARTITIONS_REVOKED; @@ -111,8 +108,6 @@ public class ConsumerMembershipManager extends AbstractMembershipManager groupInstanceId; - private final Optional rackId; - /** * Rebalance timeout. To be used as time limit for the commit request issued * when a new assignment is received, that is retried until it succeeds, fails with a @@ -141,7 +136,6 @@ public class ConsumerMembershipManager extends AbstractMembershipManager groupInstanceId, - Optional rackId, int rebalanceTimeoutMs, Optional serverAssignor, SubscriptionState subscriptions, @@ -154,7 +148,6 @@ public ConsumerMembershipManager(String groupId, boolean autoCommitEnabled) { this(groupId, groupInstanceId, - rackId, rebalanceTimeoutMs, serverAssignor, subscriptions, @@ -163,14 +156,13 @@ public ConsumerMembershipManager(String groupId, logContext, backgroundEventHandler, time, - new ConsumerRebalanceMetricsManager(metrics, subscriptions), + new ConsumerRebalanceMetricsManager(metrics), autoCommitEnabled); } // Visible for testing ConsumerMembershipManager(String groupId, Optional groupInstanceId, - Optional rackId, int rebalanceTimeoutMs, Optional serverAssignor, SubscriptionState subscriptions, @@ -189,7 +181,6 @@ public ConsumerMembershipManager(String groupId, metricsManager, autoCommitEnabled); this.groupInstanceId = groupInstanceId; - this.rackId = rackId; this.rebalanceTimeoutMs = rebalanceTimeoutMs; this.serverAssignor = serverAssignor; this.commitRequestManager = commitRequestManager; @@ -204,10 +195,6 @@ public Optional groupInstanceId() { return groupInstanceId; } - public Optional rackId() { - return rackId; - } - /** * {@inheritDoc} */ @@ -227,7 +214,7 @@ public void onHeartbeatSuccess(ConsumerGroupHeartbeatResponse response) { "already leaving the group.", memberId, memberEpoch); return; } - if (state == MemberState.UNSUBSCRIBED && responseData.memberEpoch() < 0 && maybeCompleteLeaveInProgress()) { + if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; @@ -237,13 +224,6 @@ public void onHeartbeatSuccess(ConsumerGroupHeartbeatResponse response) { " so it's not a member of the group. ", memberId, state); return; } - if (responseData.memberEpoch() < 0) { - log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} " + - "is in {} state and the member epoch is invalid: {}. ", memberId, memberEpoch, state, - responseData.memberEpoch()); - maybeCompleteLeaveInProgress(); - return; - } updateMemberEpoch(responseData.memberEpoch()); @@ -414,30 +394,10 @@ private void logPausedPartitionsBeingRevoked(Set partitionsToRev Set revokePausedPartitions = subscriptions.pausedPartitions(); revokePausedPartitions.retainAll(partitionsToRevoke); if (!revokePausedPartitions.isEmpty()) { - log.info("The pause flag in partitions {} will be removed due to revocation.", revokePausedPartitions); + log.info("The pause flag in partitions [{}] will be removed due to revocation.", revokePausedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); } } - @Override - public boolean isLeavingGroup() { - CloseOptions.GroupMembershipOperation leaveGroupOperation = leaveGroupOperation(); - if (REMAIN_IN_GROUP == leaveGroupOperation) { - return false; - } - - MemberState state = state(); - boolean isLeavingState = state == MemberState.PREPARE_LEAVING || state == MemberState.LEAVING; - - // Default operation: both static and dynamic consumers will send a leave heartbeat - boolean hasLeaveOperation = DEFAULT == leaveGroupOperation || - // Leave operation: both static and dynamic consumers will send a leave heartbeat - LEAVE_GROUP == leaveGroupOperation || - // Remain in group: only static consumers will send a leave heartbeat, while dynamic members will not - groupInstanceId().isPresent(); - - return isLeavingState && hasLeaveOperation; - } - /** * Enqueue a {@link ConsumerRebalanceListenerCallbackNeededEvent} to trigger the execution of the * appropriate {@link ConsumerRebalanceListener} {@link ConsumerRebalanceListenerMethodName method} on the @@ -509,16 +469,8 @@ public int joinGroupEpoch() { */ @Override public int leaveGroupEpoch() { - boolean isStaticMember = groupInstanceId.isPresent(); - // Currently, the server doesn't have a mechanism for static members to permanently leave the group. - // Therefore, we use LEAVE_GROUP_MEMBER_EPOCH to force the GroupMetadataManager to fence - // this member, effectively removing it from the group. - if (LEAVE_GROUP == leaveGroupOperation) { - return ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH; - } - - return isStaticMember ? - ConsumerGroupHeartbeatRequest.LEAVE_GROUP_STATIC_MEMBER_EPOCH : - ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH; + return groupInstanceId.isPresent() ? + ConsumerGroupHeartbeatRequest.LEAVE_GROUP_STATIC_MEMBER_EPOCH : + ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java index 677beaa5fa1c8..434e989f068e5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadata.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.consumer.ConsumerConfig; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.utils.LogContext; @@ -67,34 +66,14 @@ public boolean allowAutoTopicCreation() { return allowAutoTopicCreation; } - /** - * Constructs a metadata request builder for fetching cluster metadata for the topics the consumer needs. - * This will include: - *

        - *
      • topics the consumer is subscribed to using topic names (calls to subscribe with topic name list or client-side regex)
      • - *
      • topics the consumer is subscribed to using topic IDs (calls to subscribe with broker-side regex RE2J)
      • - *
      • topics involved in calls for fetching offsets (transient topics)
      • - *
      - * Note that this will generate a request for all topics in the cluster only when the consumer is subscribed to a client-side regex. - */ @Override public synchronized MetadataRequest.Builder newMetadataRequestBuilder() { - if (subscription.hasPatternSubscription()) { - // Consumer subscribed to client-side regex => request all topics to compute regex + if (subscription.hasPatternSubscription() || subscription.hasRe2JPatternSubscription()) return MetadataRequest.Builder.allTopics(); - } - if (subscription.hasRe2JPatternSubscription() && transientTopics.isEmpty()) { - // Consumer subscribed to broker-side regex and no need for transient topic names metadata => request topic IDs - return MetadataRequest.Builder.forTopicIds(subscription.assignedTopicIds()); - } - // Subscription to explicit topic names or transient topics present. - // Note that in the case of RE2J broker-side regex subscription, we may end up in this path - // if there are transient topics. They are just needed temporarily (lifetime of offsets-related API calls), - // so we'll request them to unblock their APIs, then go back to requesting assigned topic IDs as needed List topics = new ArrayList<>(); topics.addAll(subscription.metadataTopics()); topics.addAll(transientTopics); - return MetadataRequest.Builder.forTopicNames(topics, allowAutoTopicCreation); + return new MetadataRequest.Builder(topics, allowAutoTopicCreation); } synchronized void addTransientTopics(Set topics) { @@ -107,15 +86,6 @@ synchronized void clearTransientTopics() { this.transientTopics.clear(); } - /** - * Check if the metadata for the topic should be retained, based on the topic name. - * It will return true for: - *
        - *
      • topic names the consumer subscribed to
      • - *
      • topic names that match a client-side regex the consumer subscribed to
      • - *
      • topics involved in fetching offsets
      • - *
      - */ @Override protected synchronized boolean retainTopic(String topic, boolean isInternal, long nowMs) { if (transientTopics.contains(topic) || subscription.needsMetadata(topic)) @@ -124,21 +94,6 @@ protected synchronized boolean retainTopic(String topic, boolean isInternal, lon if (isInternal && !includeInternalTopics) return false; - return subscription.matchesSubscribedPattern(topic); - } - - /** - * Check if the metadata for the topic should be retained, based on topic name and topic ID. - * This will return true for: - *
        - *
      • topic names the consumer subscribed to
      • - *
      • topic names that match a client-side regex the consumer subscribed to
      • - *
      • topic IDs that have been received in an assignment from the broker after the consumer subscribed to a broker-side regex
      • - *
      • topics involved in fetching offsets
      • - *
      - */ - @Override - protected synchronized boolean retainTopic(String topicName, Uuid topicId, boolean isInternal, long nowMs) { - return retainTopic(topicName, isInternal, nowMs) || subscription.isAssignedFromRe2j(topicId); + return subscription.matchesSubscribedPattern(topic) || subscription.isAssignedFromRe2j(topic); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java index 3aa0bbcfbcf70..19e9a7e832094 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerMetrics.java @@ -24,8 +24,6 @@ import java.util.List; import java.util.Set; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; - public class ConsumerMetrics { public FetchMetricsRegistry fetcherMetrics; @@ -34,8 +32,8 @@ public ConsumerMetrics(Set metricsTags, String metricGrpPrefix) { this.fetcherMetrics = new FetchMetricsRegistry(metricsTags, metricGrpPrefix); } - public ConsumerMetrics() { - this(new HashSet<>(), CONSUMER_METRIC_GROUP_PREFIX); + public ConsumerMetrics(String metricGroupPrefix) { + this(new HashSet<>(), metricGroupPrefix); } private List getAllTemplates() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java index d2d178a88c38b..a48289919b023 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThread.java @@ -35,13 +35,14 @@ import java.io.Closeable; import java.time.Duration; -import java.util.ArrayList; import java.util.Collection; import java.util.LinkedList; import java.util.List; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.function.Supplier; +import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.common.utils.Utils.closeQuietly; @@ -144,7 +145,6 @@ void initializeResources() { * */ void runOnce() { - // The following code avoids use of the Java Collections Streams API to reduce overhead in this loop. processApplicationEvents(); final long currentTimeMs = time.milliseconds(); @@ -153,24 +153,19 @@ void runOnce() { } lastPollTimeMs = currentTimeMs; - long pollWaitTimeMs = MAX_POLL_TIMEOUT_MS; - - for (RequestManager rm : requestManagers.entries()) { - NetworkClientDelegate.PollResult pollResult = rm.poll(currentTimeMs); - long timeoutMs = networkClientDelegate.addAll(pollResult); - pollWaitTimeMs = Math.min(pollWaitTimeMs, timeoutMs); - } - + final long pollWaitTimeMs = requestManagers.entries().stream() + .filter(Optional::isPresent) + .map(Optional::get) + .map(rm -> rm.poll(currentTimeMs)) + .map(networkClientDelegate::addAll) + .reduce(MAX_POLL_TIMEOUT_MS, Math::min); networkClientDelegate.poll(pollWaitTimeMs, currentTimeMs); - long maxTimeToWaitMs = Long.MAX_VALUE; - - for (RequestManager rm : requestManagers.entries()) { - long waitMs = rm.maximumTimeToWait(currentTimeMs); - maxTimeToWaitMs = Math.min(maxTimeToWaitMs, waitMs); - } - - cachedMaximumTimeToWait = maxTimeToWaitMs; + cachedMaximumTimeToWait = requestManagers.entries().stream() + .filter(Optional::isPresent) + .map(Optional::get) + .map(rm -> rm.maximumTimeToWait(currentTimeMs)) + .reduce(Long.MAX_VALUE, Math::min); reapExpiredApplicationEvents(currentTimeMs); List> uncompletedEvents = applicationEventReaper.uncompletedEvents(); @@ -238,14 +233,15 @@ private void reapExpiredApplicationEvents(long currentTimeMs) { * */ // Visible for testing - static void runAtClose(final Collection requestManagers, + static void runAtClose(final Collection> requestManagers, final NetworkClientDelegate networkClientDelegate, final long currentTimeMs) { - // These are the optional outgoing requests at the time of closing the consumer - for (RequestManager rm : requestManagers) { - NetworkClientDelegate.PollResult pollResult = rm.pollOnClose(currentTimeMs); - networkClientDelegate.addAll(pollResult); - } + // These are the optional outgoing requests at the + requestManagers.stream() + .filter(Optional::isPresent) + .map(Optional::get) + .map(rm -> rm.pollOnClose(currentTimeMs)) + .forEach(networkClientDelegate::addAll); } public boolean isRunning() { @@ -343,20 +339,11 @@ void cleanup() { log.trace("Closing the consumer network thread"); Timer timer = time.timer(closeTimeout); try { - // If an error was thrown from initializeResources(), it's possible that the list of request managers - // is null, so check before using. If the request manager list is null, there wasn't any real work - // performed, so not being able to close the request managers isn't so bad. - if (requestManagers != null && networkClientDelegate != null) - runAtClose(requestManagers.entries(), networkClientDelegate, time.milliseconds()); + runAtClose(requestManagers.entries(), networkClientDelegate, time.milliseconds()); } catch (Exception e) { log.error("Unexpected error during shutdown. Proceed with closing.", e); } finally { - // Likewise, if an error was thrown from initializeResources(), it's possible for the network client - // to be null, so check before using. If the network client is null, things have failed catastrophically - // enough that there aren't any outstanding requests to be sent anyway. - if (networkClientDelegate != null) - sendUnsentRequests(timer); - + sendUnsentRequests(timer); asyncConsumerMetrics.recordApplicationEventExpiredSize(applicationEventReaper.reap(applicationEventQueue)); closeQuietly(requestManagers, "request managers"); @@ -369,13 +356,12 @@ void cleanup() { * If there is a metadata error, complete all uncompleted events that require subscription metadata. */ private void maybeFailOnMetadataError(List> events) { - List> subscriptionMetadataEvent = new ArrayList<>(); - - for (CompletableEvent ce : events) { - if (ce instanceof CompletableApplicationEvent && ((CompletableApplicationEvent) ce).requireSubscriptionMetadata()) - subscriptionMetadataEvent.add((CompletableApplicationEvent) ce); - } - + List> subscriptionMetadataEvent = events.stream() + .filter(e -> e instanceof CompletableApplicationEvent) + .map(e -> (CompletableApplicationEvent) e) + .filter(CompletableApplicationEvent::requireSubscriptionMetadata) + .collect(Collectors.toList()); + if (subscriptionMetadataEvent.isEmpty()) return; networkClientDelegate.getAndClearMetadataError().ifPresent(metadataError -> diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java index 3f66b6ce3c383..b42cf85a8602a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerRebalanceListenerInvoker.java @@ -30,6 +30,7 @@ import java.util.Optional; import java.util.Set; import java.util.SortedSet; +import java.util.stream.Collectors; /** * This class encapsulates the invocation of the callback methods defined in the {@link ConsumerRebalanceListener} @@ -54,7 +55,7 @@ public class ConsumerRebalanceListenerInvoker { } public Exception invokePartitionsAssigned(final SortedSet assignedPartitions) { - log.info("Adding newly assigned partitions: {}", assignedPartitions); + log.info("Adding newly assigned partitions: {}", assignedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Optional listener = subscriptions.rebalanceListener(); @@ -66,12 +67,8 @@ public Exception invokePartitionsAssigned(final SortedSet assign } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { - log.error( - "User provided listener {} failed on invocation of onPartitionsAssigned for partitions {}", - listener.get().getClass().getName(), - assignedPartitions, - e - ); + log.error("User provided listener {} failed on invocation of onPartitionsAssigned for partitions {}", + listener.get().getClass().getName(), assignedPartitions, e); return e; } } @@ -80,11 +77,11 @@ public Exception invokePartitionsAssigned(final SortedSet assign } public Exception invokePartitionsRevoked(final SortedSet revokedPartitions) { - log.info("Revoke previously assigned partitions {}", revokedPartitions); + log.info("Revoke previously assigned partitions {}", revokedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Set revokePausedPartitions = subscriptions.pausedPartitions(); revokePausedPartitions.retainAll(revokedPartitions); if (!revokePausedPartitions.isEmpty()) - log.info("The pause flag in partitions {} will be removed due to revocation.", revokePausedPartitions); + log.info("The pause flag in partitions [{}] will be removed due to revocation.", revokePausedPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Optional listener = subscriptions.rebalanceListener(); @@ -96,12 +93,8 @@ public Exception invokePartitionsRevoked(final SortedSet revoked } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { - log.error( - "User provided listener {} failed on invocation of onPartitionsRevoked for partitions {}", - listener.get().getClass().getName(), - revokedPartitions, - e - ); + log.error("User provided listener {} failed on invocation of onPartitionsRevoked for partitions {}", + listener.get().getClass().getName(), revokedPartitions, e); return e; } } @@ -110,11 +103,11 @@ public Exception invokePartitionsRevoked(final SortedSet revoked } public Exception invokePartitionsLost(final SortedSet lostPartitions) { - log.info("Lost previously assigned partitions {}", lostPartitions); + log.info("Lost previously assigned partitions {}", lostPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Set lostPausedPartitions = subscriptions.pausedPartitions(); lostPausedPartitions.retainAll(lostPartitions); if (!lostPausedPartitions.isEmpty()) - log.info("The pause flag in partitions {} will be removed due to partition lost.", lostPartitions); + log.info("The pause flag in partitions [{}] will be removed due to partition lost.", lostPartitions.stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); Optional listener = subscriptions.rebalanceListener(); @@ -126,12 +119,8 @@ public Exception invokePartitionsLost(final SortedSet lostPartit } catch (WakeupException | InterruptException e) { throw e; } catch (Exception e) { - log.error( - "User provided listener {} failed on invocation of onPartitionsLost for partitions {}", - listener.get().getClass().getName(), - lostPartitions, - e - ); + log.error("User provided listener {} failed on invocation of onPartitionsLost for partitions {}", + listener.get().getClass().getName(), lostPartitions, e); return e; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java index c07a6747559c7..e4b0fa924c0d2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ConsumerUtils.java @@ -69,7 +69,6 @@ public final class ConsumerUtils { public static final String COORDINATOR_METRICS_SUFFIX = "-coordinator-metrics"; public static final String CONSUMER_METRICS_SUFFIX = "-metrics"; public static final String CONSUMER_METRIC_GROUP = CONSUMER_METRIC_GROUP_PREFIX + CONSUMER_METRICS_SUFFIX; - public static final String CONSUMER_SHARE_METRIC_GROUP = CONSUMER_SHARE_METRIC_GROUP_PREFIX + CONSUMER_METRICS_SUFFIX; /** * A fixed, large enough value will suffice for max. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java index 2c9c72e052040..dd53ae11790f8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManager.java @@ -175,7 +175,7 @@ public void markCoordinatorUnknown(final String cause, final long currentTimeMs) long durationOfOngoingDisconnectMs = Math.max(0, currentTimeMs - timeMarkedUnknownMs); long currDisconnectMin = durationOfOngoingDisconnectMs / COORDINATOR_DISCONNECT_LOGGING_INTERVAL_MS; if (currDisconnectMin > totalDisconnectedMin) { - log.warn("Consumer has been disconnected from the group coordinator for {}ms", durationOfOngoingDisconnectMs); + log.debug("Consumer has been disconnected from the group coordinator for {}ms", durationOfOngoingDisconnectMs); totalDisconnectedMin = currDisconnectMin; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Deserializers.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Deserializers.java index 0926c720c0c62..5de2a888775af 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Deserializers.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Deserializers.java @@ -19,8 +19,6 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.errors.InterruptException; -import org.apache.kafka.common.internals.Plugin; -import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.utils.Utils; @@ -30,54 +28,44 @@ public class Deserializers implements AutoCloseable { - private final Plugin> keyDeserializerPlugin; - private final Plugin> valueDeserializerPlugin; + public final Deserializer keyDeserializer; + public final Deserializer valueDeserializer; - public Deserializers(Deserializer keyDeserializer, Deserializer valueDeserializer, Metrics metrics) { - this.keyDeserializerPlugin = Plugin.wrapInstance( - Objects.requireNonNull(keyDeserializer, "Key deserializer provided to Deserializers should not be null"), - metrics, - ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); - this.valueDeserializerPlugin = Plugin.wrapInstance( - Objects.requireNonNull(valueDeserializer, "Value deserializer provided to Deserializers should not be null"), - metrics, - ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); + public Deserializers(Deserializer keyDeserializer, Deserializer valueDeserializer) { + this.keyDeserializer = Objects.requireNonNull(keyDeserializer, "Key deserializer provided to Deserializers should not be null"); + this.valueDeserializer = Objects.requireNonNull(valueDeserializer, "Value deserializer provided to Deserializers should not be null"); + } + + public Deserializers(ConsumerConfig config) { + this(config, null, null); } @SuppressWarnings("unchecked") - public Deserializers(ConsumerConfig config, Deserializer keyDeserializer, Deserializer valueDeserializer, Metrics metrics) { + public Deserializers(ConsumerConfig config, Deserializer keyDeserializer, Deserializer valueDeserializer) { String clientId = config.getString(ConsumerConfig.CLIENT_ID_CONFIG); if (keyDeserializer == null) { - keyDeserializer = config.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); - keyDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), true); + this.keyDeserializer = config.getConfiguredInstance(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, Deserializer.class); + this.keyDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), true); } else { config.ignore(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); + this.keyDeserializer = keyDeserializer; } - this.keyDeserializerPlugin = Plugin.wrapInstance(keyDeserializer, metrics, ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG); if (valueDeserializer == null) { - valueDeserializer = config.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); - valueDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), false); + this.valueDeserializer = config.getConfiguredInstance(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, Deserializer.class); + this.valueDeserializer.configure(config.originals(Collections.singletonMap(ConsumerConfig.CLIENT_ID_CONFIG, clientId)), false); } else { config.ignore(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); + this.valueDeserializer = valueDeserializer; } - this.valueDeserializerPlugin = Plugin.wrapInstance(valueDeserializer, metrics, ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG); - } - - public Deserializer keyDeserializer() { - return keyDeserializerPlugin.get(); - } - - public Deserializer valueDeserializer() { - return valueDeserializerPlugin.get(); } @Override public void close() { AtomicReference firstException = new AtomicReference<>(); - Utils.closeQuietly(keyDeserializerPlugin, "key deserializer", firstException); - Utils.closeQuietly(valueDeserializerPlugin, "value deserializer", firstException); + Utils.closeQuietly(keyDeserializer, "key deserializer", firstException); + Utils.closeQuietly(valueDeserializer, "value deserializer", firstException); Throwable exception = firstException.get(); if (exception != null) { @@ -91,8 +79,8 @@ public void close() { @Override public String toString() { return "Deserializers{" + - "keyDeserializer=" + keyDeserializerPlugin.get() + - ", valueDeserializer=" + valueDeserializerPlugin.get() + + "keyDeserializer=" + keyDeserializer + + ", valueDeserializer=" + valueDeserializer + '}'; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java index 6cf5bc301b370..23adf9c9afaaa 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchBuffer.java @@ -27,7 +27,6 @@ import java.util.Collection; import java.util.Collections; import java.util.HashSet; -import java.util.List; import java.util.Set; import java.util.concurrent.ConcurrentLinkedQueue; import java.util.concurrent.TimeUnit; @@ -52,7 +51,7 @@ public class FetchBuffer implements AutoCloseable { private final Logger log; private final ConcurrentLinkedQueue completedFetches; private final Lock lock; - private final Condition blockingCondition; + private final Condition notEmptyCondition; private final IdempotentCloser idempotentCloser = new IdempotentCloser(); private final AtomicBoolean wokenup = new AtomicBoolean(false); @@ -63,7 +62,7 @@ public FetchBuffer(final LogContext logContext) { this.log = logContext.logger(FetchBuffer.class); this.completedFetches = new ConcurrentLinkedQueue<>(); this.lock = new ReentrantLock(); - this.blockingCondition = lock.newCondition(); + this.notEmptyCondition = lock.newCondition(); } /** @@ -96,7 +95,13 @@ boolean hasCompletedFetches(Predicate predicate) { } void add(CompletedFetch completedFetch) { - addAll(List.of(completedFetch)); + try { + lock.lock(); + completedFetches.add(completedFetch); + notEmptyCondition.signalAll(); + } finally { + lock.unlock(); + } } void addAll(Collection completedFetches) { @@ -106,8 +111,7 @@ void addAll(Collection completedFetches) { try { lock.lock(); this.completedFetches.addAll(completedFetches); - wokenup.set(true); - blockingCondition.signalAll(); + notEmptyCondition.signalAll(); } finally { lock.unlock(); } @@ -150,23 +154,23 @@ CompletedFetch poll() { } /** - * Allows the caller to await a response from the broker for requested data. The method will block, returning only + * Allows the caller to await presence of data in the buffer. The method will block, returning only * under one of the following conditions: * *
        - *
      1. The buffer was already woken
      2. - *
      3. The buffer was woken during the wait
      4. + *
      5. The buffer was already non-empty on entry
      6. + *
      7. The buffer was populated during the wait
      8. *
      9. The remaining time on the {@link Timer timer} elapsed
      10. *
      11. The thread was interrupted
      12. *
      * * @param timer Timer that provides time to wait */ - void awaitWakeup(Timer timer) { + void awaitNotEmpty(Timer timer) { try { lock.lock(); - while (!wokenup.compareAndSet(true, false)) { + while (isEmpty() && !wokenup.compareAndSet(true, false)) { // Update the timer before we head into the loop in case it took a while to get the lock. timer.update(); @@ -181,7 +185,7 @@ void awaitWakeup(Timer timer) { break; } - if (!blockingCondition.await(timer.remainingMs(), TimeUnit.MILLISECONDS)) { + if (!notEmptyCondition.await(timer.remainingMs(), TimeUnit.MILLISECONDS)) { break; } } @@ -194,10 +198,10 @@ void awaitWakeup(Timer timer) { } void wakeup() { + wokenup.set(true); try { lock.lock(); - wokenup.set(true); - blockingCondition.signalAll(); + notEmptyCondition.signalAll(); } finally { lock.unlock(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java index bbe216c2fc837..94e76edd0a578 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchCollector.java @@ -16,11 +16,13 @@ */ package org.apache.kafka.clients.consumer.internals; +import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetOutOfRangeException; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.protocol.Errors; @@ -35,6 +37,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Optional; import java.util.Queue; @@ -155,10 +158,7 @@ private Fetch fetchRecords(final CompletedFetch nextInLineFetch, int maxRe log.debug("Not returning fetched records for partition {} since it is no longer assigned", tp); } else if (!subscriptions.isFetchable(tp)) { // this can happen when a partition is paused before fetched records are returned to the consumer's - // poll call or if the offset is being reset. - // It can also happen under the Consumer rebalance protocol, when the consumer changes its subscription. - // Until the consumer receives an updated assignment from the coordinator, it can hold assigned partitions - // that are not in the subscription anymore, so we make them not fetchable. + // poll call or if the offset is being reset log.debug("Not returning fetched records for assigned partition {} since it is no longer fetchable", tp); } else { SubscriptionState.FetchPosition position = subscriptions.position(tp); @@ -263,10 +263,21 @@ private CompletedFetch handleInitializeSuccess(final CompletedFetch completedFet Iterator batches = FetchResponse.recordsOrFail(partition).batches().iterator(); if (!batches.hasNext() && FetchResponse.recordsSize(partition) > 0) { - // This should not happen with brokers that support FetchRequest/Response V4 or higher (i.e. KIP-74) - throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + - fetchOffset + ". Received a non-empty fetch response from the server, but no " + - "complete records were found."); + if (completedFetch.requestVersion < 3) { + // Implement the pre KIP-74 behavior of throwing a RecordTooLargeException. + Map recordTooLargePartitions = Collections.singletonMap(tp, fetchOffset); + throw new RecordTooLargeException("There are some messages at [Partition=Offset]: " + + recordTooLargePartitions + " whose size is larger than the fetch size " + fetchConfig.fetchSize + + " and hence cannot be returned. Please considering upgrading your broker to 0.10.1.0 or " + + "newer to avoid this issue. Alternately, increase the fetch size on the client (using " + + ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG + ")", + recordTooLargePartitions); + } else { + // This should not happen with brokers that support FetchRequest/Response V3 or higher (i.e. KIP-74) + throw new KafkaException("Failed to make progress reading messages at " + tp + "=" + + fetchOffset + ". Received a non-empty fetch response from the server, but no " + + "complete records were found."); + } } if (!updatePartitionState(partition, tp)) { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java index 98644180e8b0b..153279162bc09 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManager.java @@ -24,12 +24,10 @@ import org.apache.kafka.common.metrics.stats.WindowedCount; import java.util.Collections; +import java.util.HashMap; import java.util.Map; import java.util.Set; -import static org.apache.kafka.common.utils.Utils.mkEntry; -import static org.apache.kafka.common.utils.Utils.mkMap; - /** * The {@link FetchMetricsManager} class provides wrapper methods to record lag, lead, latency, and fetch metrics. * It keeps an internal ID of the assigned set of partitions which is updated to ensure the set of metrics it @@ -103,24 +101,20 @@ void recordRecordsFetched(int records) { void recordBytesFetched(String topic, int bytes) { String name = topicBytesFetchedMetricName(topic); - maybeRecordDeprecatedBytesFetched(name, topic, bytes); - - Sensor bytesFetched = new SensorBuilder(metrics, name, () -> Map.of("topic", topic)) - .withAvg(metricsRegistry.topicFetchSizeAvg) - .withMax(metricsRegistry.topicFetchSizeMax) - .withMeter(metricsRegistry.topicBytesConsumedRate, metricsRegistry.topicBytesConsumedTotal) - .build(); + Sensor bytesFetched = new SensorBuilder(metrics, name, () -> topicTags(topic)) + .withAvg(metricsRegistry.topicFetchSizeAvg) + .withMax(metricsRegistry.topicFetchSizeMax) + .withMeter(metricsRegistry.topicBytesConsumedRate, metricsRegistry.topicBytesConsumedTotal) + .build(); bytesFetched.record(bytes); } void recordRecordsFetched(String topic, int records) { String name = topicRecordsFetchedMetricName(topic); - maybeRecordDeprecatedRecordsFetched(name, topic, records); - - Sensor recordsFetched = new SensorBuilder(metrics, name, () -> Map.of("topic", topic)) - .withAvg(metricsRegistry.topicRecordsPerRequestAvg) - .withMeter(metricsRegistry.topicRecordsConsumedRate, metricsRegistry.topicRecordsConsumedTotal) - .build(); + Sensor recordsFetched = new SensorBuilder(metrics, name, () -> topicTags(topic)) + .withAvg(metricsRegistry.topicRecordsPerRequestAvg) + .withMeter(metricsRegistry.topicRecordsConsumedRate, metricsRegistry.topicRecordsConsumedTotal) + .build(); recordsFetched.record(records); } @@ -128,13 +122,11 @@ void recordPartitionLag(TopicPartition tp, long lag) { this.recordsLag.record(lag); String name = partitionRecordsLagMetricName(tp); - maybeRecordDeprecatedPartitionLag(name, tp, lag); - - Sensor recordsLag = new SensorBuilder(metrics, name, () -> mkMap(mkEntry("topic", tp.topic()), mkEntry("partition", String.valueOf(tp.partition())))) - .withValue(metricsRegistry.partitionRecordsLag) - .withMax(metricsRegistry.partitionRecordsLagMax) - .withAvg(metricsRegistry.partitionRecordsLagAvg) - .build(); + Sensor recordsLag = new SensorBuilder(metrics, name, () -> topicPartitionTags(tp)) + .withValue(metricsRegistry.partitionRecordsLag) + .withMax(metricsRegistry.partitionRecordsLagMax) + .withAvg(metricsRegistry.partitionRecordsLagAvg) + .build(); recordsLag.record(lag); } @@ -143,13 +135,11 @@ void recordPartitionLead(TopicPartition tp, long lead) { this.recordsLead.record(lead); String name = partitionRecordsLeadMetricName(tp); - maybeRecordDeprecatedPartitionLead(name, tp, lead); - - Sensor recordsLead = new SensorBuilder(metrics, name, () -> mkMap(mkEntry("topic", tp.topic()), mkEntry("partition", String.valueOf(tp.partition())))) - .withValue(metricsRegistry.partitionRecordsLead) - .withMin(metricsRegistry.partitionRecordsLeadMin) - .withAvg(metricsRegistry.partitionRecordsLeadAvg) - .build(); + Sensor recordsLead = new SensorBuilder(metrics, name, () -> topicPartitionTags(tp)) + .withValue(metricsRegistry.partitionRecordsLead) + .withMin(metricsRegistry.partitionRecordsLeadMin) + .withAvg(metricsRegistry.partitionRecordsLeadAvg) + .build(); recordsLead.record(lead); } @@ -172,22 +162,16 @@ void maybeUpdateAssignment(SubscriptionState subscription) { metrics.removeSensor(partitionRecordsLagMetricName(tp)); metrics.removeSensor(partitionRecordsLeadMetricName(tp)); metrics.removeMetric(partitionPreferredReadReplicaMetricName(tp)); - // Remove deprecated metrics. - metrics.removeSensor(deprecatedMetricName(partitionRecordsLagMetricName(tp))); - metrics.removeSensor(deprecatedMetricName(partitionRecordsLeadMetricName(tp))); - metrics.removeMetric(deprecatedPartitionPreferredReadReplicaMetricName(tp)); } } for (TopicPartition tp : newAssignedPartitions) { if (!this.assignedPartitions.contains(tp)) { - maybeRecordDeprecatedPreferredReadReplica(tp, subscription); - MetricName metricName = partitionPreferredReadReplicaMetricName(tp); metrics.addMetricIfAbsent( - metricName, - null, - (Gauge) (config, now) -> subscription.preferredReadReplica(tp, 0L).orElse(-1) + metricName, + null, + (Gauge) (config, now) -> subscription.preferredReadReplica(tp, 0L).orElse(-1) ); } } @@ -197,67 +181,6 @@ void maybeUpdateAssignment(SubscriptionState subscription) { } } - @Deprecated // To be removed in Kafka 5.0 release. - private void maybeRecordDeprecatedBytesFetched(String name, String topic, int bytes) { - if (shouldReportDeprecatedMetric(topic)) { - Sensor deprecatedBytesFetched = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicTags(topic)) - .withAvg(metricsRegistry.topicFetchSizeAvg) - .withMax(metricsRegistry.topicFetchSizeMax) - .withMeter(metricsRegistry.topicBytesConsumedRate, metricsRegistry.topicBytesConsumedTotal) - .build(); - deprecatedBytesFetched.record(bytes); - } - } - - @Deprecated // To be removed in Kafka 5.0 release. - private void maybeRecordDeprecatedRecordsFetched(String name, String topic, int records) { - if (shouldReportDeprecatedMetric(topic)) { - Sensor deprecatedRecordsFetched = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicTags(topic)) - .withAvg(metricsRegistry.topicRecordsPerRequestAvg) - .withMeter(metricsRegistry.topicRecordsConsumedRate, metricsRegistry.topicRecordsConsumedTotal) - .build(); - deprecatedRecordsFetched.record(records); - } - } - - @Deprecated // To be removed in Kafka 5.0 release. - private void maybeRecordDeprecatedPartitionLag(String name, TopicPartition tp, long lag) { - if (shouldReportDeprecatedMetric(tp.topic())) { - Sensor deprecatedRecordsLag = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicPartitionTags(tp)) - .withValue(metricsRegistry.partitionRecordsLag) - .withMax(metricsRegistry.partitionRecordsLagMax) - .withAvg(metricsRegistry.partitionRecordsLagAvg) - .build(); - - deprecatedRecordsLag.record(lag); - } - } - - @Deprecated // To be removed in Kafka 5.0 release. - private void maybeRecordDeprecatedPartitionLead(String name, TopicPartition tp, double lead) { - if (shouldReportDeprecatedMetric(tp.topic())) { - Sensor deprecatedRecordsLead = new SensorBuilder(metrics, deprecatedMetricName(name), () -> topicPartitionTags(tp)) - .withValue(metricsRegistry.partitionRecordsLead) - .withMin(metricsRegistry.partitionRecordsLeadMin) - .withAvg(metricsRegistry.partitionRecordsLeadAvg) - .build(); - - deprecatedRecordsLead.record(lead); - } - } - - @Deprecated // To be removed in Kafka 5.0 release. - private void maybeRecordDeprecatedPreferredReadReplica(TopicPartition tp, SubscriptionState subscription) { - if (shouldReportDeprecatedMetric(tp.topic())) { - MetricName metricName = deprecatedPartitionPreferredReadReplicaMetricName(tp); - metrics.addMetricIfAbsent( - metricName, - null, - (Gauge) (config, now) -> subscription.preferredReadReplica(tp, 0L).orElse(-1) - ); - } - } - private static String topicBytesFetchedMetricName(String topic) { return "topic." + topic + ".bytes-fetched"; } @@ -274,34 +197,22 @@ private static String partitionRecordsLagMetricName(TopicPartition tp) { return tp + ".records-lag"; } - private static String deprecatedMetricName(String name) { - return name + ".deprecated"; - } - - private static boolean shouldReportDeprecatedMetric(String topic) { - return topic.contains("."); - } - private MetricName partitionPreferredReadReplicaMetricName(TopicPartition tp) { - Map metricTags = mkMap(mkEntry("topic", tp.topic()), mkEntry("partition", String.valueOf(tp.partition()))); - return this.metrics.metricInstance(metricsRegistry.partitionPreferredReadReplica, metricTags); - } - - @Deprecated - private MetricName deprecatedPartitionPreferredReadReplicaMetricName(TopicPartition tp) { Map metricTags = topicPartitionTags(tp); return this.metrics.metricInstance(metricsRegistry.partitionPreferredReadReplica, metricTags); } - @Deprecated static Map topicTags(String topic) { - return Map.of("topic", topic.replace('.', '_')); + Map metricTags = new HashMap<>(1); + metricTags.put("topic", topic.replace('.', '_')); + return metricTags; } - @Deprecated static Map topicPartitionTags(TopicPartition tp) { - return mkMap(mkEntry("topic", tp.topic().replace('.', '_')), - mkEntry("partition", String.valueOf(tp.partition()))); + Map metricTags = new HashMap<>(2); + metricTags.put("topic", tp.topic().replace('.', '_')); + metricTags.put("partition", String.valueOf(tp.partition())); + return metricTags; } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java index 589cb6736b367..b0e69bb22a389 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/FetchMetricsRegistry.java @@ -26,9 +26,6 @@ public class FetchMetricsRegistry { - private static final String DEPRECATED_TOPIC_METRICS_MESSAGE = "Note: For topic names with periods (.), an additional " - + "metric with underscores is emitted. However, the periods replaced metric is deprecated. Please use the metric with actual topic name instead."; - public MetricNameTemplate fetchSizeAvg; public MetricNameTemplate fetchSizeMax; public MetricNameTemplate bytesConsumedRate; @@ -113,39 +110,39 @@ public FetchMetricsRegistry(Set tags, String metricGrpPrefix) { topicTags.add("topic"); this.topicFetchSizeAvg = new MetricNameTemplate("fetch-size-avg", groupName, - "The average number of bytes fetched per request for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); + "The average number of bytes fetched per request for a topic", topicTags); this.topicFetchSizeMax = new MetricNameTemplate("fetch-size-max", groupName, - "The maximum number of bytes fetched per request for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); + "The maximum number of bytes fetched per request for a topic", topicTags); this.topicBytesConsumedRate = new MetricNameTemplate("bytes-consumed-rate", groupName, - "The average number of bytes consumed per second for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); + "The average number of bytes consumed per second for a topic", topicTags); this.topicBytesConsumedTotal = new MetricNameTemplate("bytes-consumed-total", groupName, - "The total number of bytes consumed for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); + "The total number of bytes consumed for a topic", topicTags); this.topicRecordsPerRequestAvg = new MetricNameTemplate("records-per-request-avg", groupName, - "The average number of records in each request for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); + "The average number of records in each request for a topic", topicTags); this.topicRecordsConsumedRate = new MetricNameTemplate("records-consumed-rate", groupName, - "The average number of records consumed per second for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); + "The average number of records consumed per second for a topic", topicTags); this.topicRecordsConsumedTotal = new MetricNameTemplate("records-consumed-total", groupName, - "The total number of records consumed for a topic. " + DEPRECATED_TOPIC_METRICS_MESSAGE, topicTags); + "The total number of records consumed for a topic", topicTags); /* Partition level */ Set partitionTags = new HashSet<>(topicTags); partitionTags.add("partition"); this.partitionRecordsLag = new MetricNameTemplate("records-lag", groupName, - "The latest lag of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); + "The latest lag of the partition", partitionTags); this.partitionRecordsLagMax = new MetricNameTemplate("records-lag-max", groupName, - "The max lag of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); + "The max lag of the partition", partitionTags); this.partitionRecordsLagAvg = new MetricNameTemplate("records-lag-avg", groupName, - "The average lag of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); + "The average lag of the partition", partitionTags); this.partitionRecordsLead = new MetricNameTemplate("records-lead", groupName, - "The latest lead of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); + "The latest lead of the partition", partitionTags); this.partitionRecordsLeadMin = new MetricNameTemplate("records-lead-min", groupName, - "The min lead of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); + "The min lead of the partition", partitionTags); this.partitionRecordsLeadAvg = new MetricNameTemplate("records-lead-avg", groupName, - "The average lead of the partition. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); + "The average lead of the partition", partitionTags); this.partitionPreferredReadReplica = new MetricNameTemplate( "preferred-read-replica", groupName, - "The current read replica for the partition, or -1 if reading from leader. " + DEPRECATED_TOPIC_METRICS_MESSAGE, partitionTags); + "The current read replica for the partition, or -1 if reading from leader", partitionTags); } public List getAllTemplates() { diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java index 35d735d56c77a..ac86d1ebeaab0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/Fetcher.java @@ -138,7 +138,7 @@ protected void maybeCloseFetchSessions(final Timer timer) { // here. log.debug("All requests couldn't be sent in the specific timeout period {}ms. " + "This may result in unnecessary fetch sessions at the broker. Consider increasing the timeout passed for " + - "KafkaConsumer.close(...)", timer.timeoutMs()); + "KafkaConsumer.close(Duration timeout)", timer.timeoutMs()); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java index ae39753f3d8e8..f7446b7ad3ce7 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/RequestManagers.java @@ -54,13 +54,11 @@ public class RequestManagers implements Closeable { public final Optional shareHeartbeatRequestManager; public final Optional consumerMembershipManager; public final Optional shareMembershipManager; - public final Optional streamsMembershipManager; public final OffsetsRequestManager offsetsRequestManager; public final TopicMetadataRequestManager topicMetadataRequestManager; public final FetchRequestManager fetchRequestManager; public final Optional shareConsumeRequestManager; - public final Optional streamsGroupHeartbeatRequestManager; - private final List entries; + private final List> entries; private final IdempotentCloser closer = new IdempotentCloser(); public RequestManagers(LogContext logContext, @@ -70,9 +68,7 @@ public RequestManagers(LogContext logContext, Optional coordinatorRequestManager, Optional commitRequestManager, Optional heartbeatRequestManager, - Optional membershipManager, - Optional streamsGroupHeartbeatRequestManager, - Optional streamsMembershipManager) { + Optional membershipManager) { this.log = logContext.logger(RequestManagers.class); this.offsetsRequestManager = requireNonNull(offsetsRequestManager, "OffsetsRequestManager cannot be null"); this.coordinatorRequestManager = coordinatorRequestManager; @@ -82,21 +78,17 @@ public RequestManagers(LogContext logContext, this.shareConsumeRequestManager = Optional.empty(); this.consumerHeartbeatRequestManager = heartbeatRequestManager; this.shareHeartbeatRequestManager = Optional.empty(); - this.streamsGroupHeartbeatRequestManager = streamsGroupHeartbeatRequestManager; this.consumerMembershipManager = membershipManager; - this.streamsMembershipManager = streamsMembershipManager; this.shareMembershipManager = Optional.empty(); - List list = new ArrayList<>(); - coordinatorRequestManager.ifPresent(list::add); - commitRequestManager.ifPresent(list::add); - heartbeatRequestManager.ifPresent(list::add); - membershipManager.ifPresent(list::add); - streamsGroupHeartbeatRequestManager.ifPresent(list::add); - streamsMembershipManager.ifPresent(list::add); - list.add(offsetsRequestManager); - list.add(topicMetadataRequestManager); - list.add(fetchRequestManager); + List> list = new ArrayList<>(); + list.add(coordinatorRequestManager); + list.add(commitRequestManager); + list.add(heartbeatRequestManager); + list.add(membershipManager); + list.add(Optional.of(offsetsRequestManager)); + list.add(Optional.of(topicMetadataRequestManager)); + list.add(Optional.of(fetchRequestManager)); entries = Collections.unmodifiableList(list); } @@ -110,24 +102,22 @@ public RequestManagers(LogContext logContext, this.coordinatorRequestManager = coordinatorRequestManager; this.commitRequestManager = Optional.empty(); this.consumerHeartbeatRequestManager = Optional.empty(); - this.streamsGroupHeartbeatRequestManager = Optional.empty(); this.shareHeartbeatRequestManager = shareHeartbeatRequestManager; this.consumerMembershipManager = Optional.empty(); - this.streamsMembershipManager = Optional.empty(); this.shareMembershipManager = shareMembershipManager; this.offsetsRequestManager = null; this.topicMetadataRequestManager = null; this.fetchRequestManager = null; - List list = new ArrayList<>(); - coordinatorRequestManager.ifPresent(list::add); - shareHeartbeatRequestManager.ifPresent(list::add); - shareMembershipManager.ifPresent(list::add); - list.add(shareConsumeRequestManager); + List> list = new ArrayList<>(); + list.add(coordinatorRequestManager); + list.add(shareHeartbeatRequestManager); + list.add(shareMembershipManager); + list.add(Optional.of(shareConsumeRequestManager)); entries = Collections.unmodifiableList(list); } - public List entries() { + public List> entries() { return entries; } @@ -138,6 +128,8 @@ public void close() { log.debug("Closing RequestManagers"); entries.stream() + .filter(Optional::isPresent) + .map(Optional::get) .filter(rm -> rm instanceof Closeable) .map(rm -> (Closeable) rm) .forEach(c -> closeQuietly(c, c.getClass().getSimpleName())); @@ -166,9 +158,8 @@ public static Supplier supplier(final Time time, final Optional clientTelemetryReporter, final Metrics metrics, final OffsetCommitCallbackInvoker offsetCommitCallbackInvoker, - final MemberStateListener applicationThreadMemberStateListener, - final Optional streamsRebalanceData - ) { + final MemberStateListener applicationThreadMemberStateListener + ) { return new CachedSupplier<>() { @Override protected RequestManagers create() { @@ -196,59 +187,28 @@ protected RequestManagers create() { ConsumerMembershipManager membershipManager = null; CoordinatorRequestManager coordinator = null; CommitRequestManager commitRequestManager = null; - StreamsGroupHeartbeatRequestManager streamsGroupHeartbeatRequestManager = null; - StreamsMembershipManager streamsMembershipManager = null; if (groupRebalanceConfig != null && groupRebalanceConfig.groupId != null) { Optional serverAssignor = Optional.ofNullable(config.getString(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG)); coordinator = new CoordinatorRequestManager( - logContext, - retryBackoffMs, - retryBackoffMaxMs, - groupRebalanceConfig.groupId); - commitRequestManager = new CommitRequestManager( - time, - logContext, - subscriptions, - config, - coordinator, - offsetCommitCallbackInvoker, - groupRebalanceConfig.groupId, - groupRebalanceConfig.groupInstanceId, - metrics, - metadata); - if (streamsRebalanceData.isPresent()) { - streamsMembershipManager = new StreamsMembershipManager( - groupRebalanceConfig.groupId, - streamsRebalanceData.get(), - subscriptions, - backgroundEventHandler, logContext, + retryBackoffMs, + retryBackoffMaxMs, + groupRebalanceConfig.groupId); + commitRequestManager = new CommitRequestManager( time, - metrics); - streamsMembershipManager.registerStateListener(commitRequestManager); - streamsMembershipManager.registerStateListener(applicationThreadMemberStateListener); - - if (clientTelemetryReporter.isPresent()) { - clientTelemetryReporter.get() - .updateMetricsLabels(Map.of(ClientTelemetryProvider.GROUP_MEMBER_ID, streamsMembershipManager.memberId())); - } - - streamsGroupHeartbeatRequestManager = new StreamsGroupHeartbeatRequestManager( logContext, - time, + subscriptions, config, coordinator, - streamsMembershipManager, - backgroundEventHandler, + offsetCommitCallbackInvoker, + groupRebalanceConfig.groupId, + groupRebalanceConfig.groupInstanceId, metrics, - streamsRebalanceData.get() - ); - } else { - membershipManager = new ConsumerMembershipManager( + metadata); + membershipManager = new ConsumerMembershipManager( groupRebalanceConfig.groupId, groupRebalanceConfig.groupInstanceId, - groupRebalanceConfig.rackId, groupRebalanceConfig.rebalanceTimeoutMs, serverAssignor, subscriptions, @@ -260,17 +220,17 @@ protected RequestManagers create() { metrics, config.getBoolean(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)); - // Update the group member ID label in the client telemetry reporter. - // According to KIP-1082, the consumer will generate the member ID as the incarnation ID of the process. - // Therefore, we can update the group member ID during initialization. - if (clientTelemetryReporter.isPresent()) { - clientTelemetryReporter.get() - .updateMetricsLabels(Map.of(ClientTelemetryProvider.GROUP_MEMBER_ID, membershipManager.memberId())); - } + // Update the group member ID label in the client telemetry reporter. + // According to KIP-1082, the consumer will generate the member ID as the incarnation ID of the process. + // Therefore, we can update the group member ID during initialization. + if (clientTelemetryReporter.isPresent()) { + clientTelemetryReporter.get() + .updateMetricsLabels(Map.of(ClientTelemetryProvider.GROUP_MEMBER_ID, membershipManager.memberId())); + } - membershipManager.registerStateListener(commitRequestManager); - membershipManager.registerStateListener(applicationThreadMemberStateListener); - heartbeatRequestManager = new ConsumerHeartbeatRequestManager( + membershipManager.registerStateListener(commitRequestManager); + membershipManager.registerStateListener(applicationThreadMemberStateListener); + heartbeatRequestManager = new ConsumerHeartbeatRequestManager( logContext, time, config, @@ -279,7 +239,6 @@ protected RequestManagers create() { membershipManager, backgroundEventHandler, metrics); - } } final OffsetsRequestManager listOffsets = new OffsetsRequestManager(subscriptions, @@ -302,9 +261,7 @@ protected RequestManagers create() { Optional.ofNullable(coordinator), Optional.ofNullable(commitRequestManager), Optional.ofNullable(heartbeatRequestManager), - Optional.ofNullable(membershipManager), - Optional.ofNullable(streamsGroupHeartbeatRequestManager), - Optional.ofNullable(streamsMembershipManager) + Optional.ofNullable(membershipManager) ); } }; @@ -342,10 +299,10 @@ protected RequestManagers create() { ShareMembershipManager shareMembershipManager = new ShareMembershipManager( logContext, groupRebalanceConfig.groupId, - groupRebalanceConfig.rackId.orElse(null), + null, subscriptions, metadata, - time, + time, metrics); // Update the group member ID label in the client telemetry reporter. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java index 2c337782dd415..74760beec6d73 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetch.java @@ -41,13 +41,11 @@ import java.io.Closeable; import java.nio.ByteBuffer; -import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.ListIterator; import java.util.Optional; -import java.util.Set; /** * {@link ShareCompletedFetch} represents a {@link RecordBatch batch} of {@link Record records} @@ -57,7 +55,7 @@ * to keep track of aborted transactions or the need to keep track of fetch position. */ public class ShareCompletedFetch { - final int nodeId; + final TopicIdPartition partition; final ShareFetchResponseData.PartitionData partitionData; final short requestVersion; @@ -81,14 +79,12 @@ public class ShareCompletedFetch { ShareCompletedFetch(final LogContext logContext, final BufferSupplier decompressionBufferSupplier, - final int nodeId, final TopicIdPartition partition, final ShareFetchResponseData.PartitionData partitionData, final ShareFetchMetricsAggregator metricAggregator, final short requestVersion) { this.log = logContext.logger(org.apache.kafka.clients.consumer.internals.ShareCompletedFetch.class); this.decompressionBufferSupplier = decompressionBufferSupplier; - this.nodeId = nodeId; this.partition = partition; this.partitionData = partitionData; this.metricAggregator = metricAggregator; @@ -154,25 +150,25 @@ void recordAggregatedMetrics(int bytes, int records) { * @param maxRecords The number of records to return; the number returned may be {@code 0 <= maxRecords} * @param checkCrcs Whether to check the CRC of fetched records * - * @return {@link ShareInFlightBatch The ShareInFlightBatch containing records and their acknowledgements} + * @return {@link ShareInFlightBatch The ShareInFlightBatch containing records and their acknowledgments} */ ShareInFlightBatch fetchRecords(final Deserializers deserializers, final int maxRecords, final boolean checkCrcs) { // Creating an empty ShareInFlightBatch - ShareInFlightBatch inFlightBatch = new ShareInFlightBatch<>(nodeId, partition); + ShareInFlightBatch inFlightBatch = new ShareInFlightBatch<>(partition); if (cachedBatchException != null) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. - Set offsets = rejectRecordBatch(inFlightBatch, currentBatch); - inFlightBatch.setException(new ShareInFlightBatchException(cachedBatchException, offsets)); + rejectRecordBatch(inFlightBatch, currentBatch); + inFlightBatch.setException(cachedBatchException); cachedBatchException = null; return inFlightBatch; } if (cachedRecordException != null) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); - inFlightBatch.setException(new ShareInFlightBatchException(cachedRecordException, Set.of(lastRecord.offset()))); + inFlightBatch.setException(cachedRecordException); cachedRecordException = null; return inFlightBatch; } @@ -226,7 +222,7 @@ ShareInFlightBatch fetchRecords(final Deserializers deseriali nextAcquired = nextAcquiredRecord(); if (inFlightBatch.isEmpty()) { inFlightBatch.addAcknowledgement(lastRecord.offset(), AcknowledgeType.RELEASE); - inFlightBatch.setException(new ShareInFlightBatchException(se, Set.of(lastRecord.offset()))); + inFlightBatch.setException(se); } else { cachedRecordException = se; inFlightBatch.setHasCachedException(true); @@ -234,8 +230,8 @@ ShareInFlightBatch fetchRecords(final Deserializers deseriali } catch (CorruptRecordException e) { if (inFlightBatch.isEmpty()) { // If the event that a CRC check fails, reject the entire record batch because it is corrupt. - Set offsets = rejectRecordBatch(inFlightBatch, currentBatch); - inFlightBatch.setException(new ShareInFlightBatchException(e, offsets)); + rejectRecordBatch(inFlightBatch, currentBatch); + inFlightBatch.setException(e); } else { cachedBatchException = e; inFlightBatch.setHasCachedException(true); @@ -263,13 +259,12 @@ private OffsetAndDeliveryCount nextAcquiredRecord() { return null; } - private Set rejectRecordBatch(final ShareInFlightBatch inFlightBatch, + private void rejectRecordBatch(final ShareInFlightBatch inFlightBatch, final RecordBatch currentBatch) { // Rewind the acquiredRecordIterator to the start, so we are in a known state acquiredRecordIterator = acquiredRecordList.listIterator(); OffsetAndDeliveryCount nextAcquired = nextAcquiredRecord(); - Set offsets = new HashSet<>(); for (long offset = currentBatch.baseOffset(); offset <= currentBatch.lastOffset(); offset++) { if (nextAcquired == null) { // No more acquired records, so we are done @@ -277,7 +272,6 @@ private Set rejectRecordBatch(final ShareInFlightBatch inFlig } else if (offset == nextAcquired.offset) { // It's acquired, so we reject it inFlightBatch.addAcknowledgement(offset, AcknowledgeType.REJECT); - offsets.add(offset); } else if (offset < nextAcquired.offset) { // It's not acquired, so we skip it continue; @@ -285,7 +279,6 @@ private Set rejectRecordBatch(final ShareInFlightBatch inFlig nextAcquired = nextAcquiredRecord(); } - return offsets; } /** @@ -303,13 +296,13 @@ ConsumerRecord parseRecord(final Deserializers deserializers, K key; V value; try { - key = keyBytes == null ? null : deserializers.keyDeserializer().deserialize(partition.topic(), headers, keyBytes); + key = keyBytes == null ? null : deserializers.keyDeserializer.deserialize(partition.topic(), headers, keyBytes); } catch (RuntimeException e) { log.error("Key Deserializers with error: {}", deserializers); throw newRecordDeserializationException(RecordDeserializationException.DeserializationExceptionOrigin.KEY, partition.topicPartition(), timestampType, record, e, headers); } try { - value = valueBytes == null ? null : deserializers.valueDeserializer().deserialize(partition.topic(), headers, valueBytes); + value = valueBytes == null ? null : deserializers.valueDeserializer.deserialize(partition.topic(), headers, valueBytes); } catch (RuntimeException e) { log.error("Value Deserializers with error: {}", deserializers); throw newRecordDeserializationException(RecordDeserializationException.DeserializationExceptionOrigin.VALUE, partition.topicPartition(), timestampType, record, e, headers); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java index 51e3fb39dfb0e..a83e971600e40 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManager.java @@ -23,16 +23,13 @@ import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.events.ShareAcknowledgementCommitCallbackEvent; import org.apache.kafka.common.Cluster; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.internals.IdempotentCloser; import org.apache.kafka.common.message.ShareAcknowledgeRequestData; -import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.message.ShareFetchRequestData; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.Errors; @@ -49,6 +46,7 @@ import java.io.Closeable; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -89,8 +87,8 @@ public class ShareConsumeRequestManager implements RequestManager, MemberStateLi private final IdempotentCloser idempotentCloser = new IdempotentCloser(); private Uuid memberId; private boolean fetchMoreRecords = false; - private final Map> fetchAcknowledgementsToSend; - private final Map> fetchAcknowledgementsInFlight; + private final Map fetchAcknowledgementsToSend; + private final Map fetchAcknowledgementsInFlight; private final Map> acknowledgeRequestStates; private final long retryBackoffMs; private final long retryBackoffMaxMs; @@ -98,7 +96,6 @@ public class ShareConsumeRequestManager implements RequestManager, MemberStateLi private final CompletableFuture closeFuture; private boolean isAcknowledgementCommitCallbackRegistered = false; private final Map topicNamesMap = new HashMap<>(); - private static final String INVALID_RESPONSE = "Acknowledgement not successful due to invalid response from broker"; ShareConsumeRequestManager(final Time time, final LogContext logContext, @@ -149,6 +146,7 @@ public PollResult poll(long currentTimeMs) { Map handlerMap = new HashMap<>(); Map topicIds = metadata.topicIds(); + Set fetchedPartitions = new HashSet<>(); for (TopicPartition partition : partitionsToFetch()) { Optional leaderOpt = metadata.currentLeader(partition).leader; @@ -174,77 +172,71 @@ public PollResult poll(long currentTimeMs) { k -> sessionHandlers.computeIfAbsent(node.id(), n -> new ShareSessionHandler(logContext, n, memberId))); TopicIdPartition tip = new TopicIdPartition(topicId, partition); - Acknowledgements acknowledgementsToSend = null; - boolean canSendAcknowledgements = true; - - Map nodeAcksFromFetchMap = fetchAcknowledgementsToSend.get(node.id()); - if (nodeAcksFromFetchMap != null) { - acknowledgementsToSend = nodeAcksFromFetchMap.remove(tip); - - if (acknowledgementsToSend != null) { - // Check if the share session epoch is valid for sending acknowledgements. - if (!maybeAddAcknowledgements(handler, node, tip, acknowledgementsToSend)) { - canSendAcknowledgements = false; - } - } - } - - if (canSendAcknowledgements) { - handler.addPartitionToFetch(tip, acknowledgementsToSend); - } else { - handler.addPartitionToFetch(tip, null); + Acknowledgements acknowledgementsToSend = fetchAcknowledgementsToSend.remove(tip); + if (acknowledgementsToSend != null) { + metricsManager.recordAcknowledgementSent(acknowledgementsToSend.size()); + fetchAcknowledgementsInFlight.put(tip, acknowledgementsToSend); } + handler.addPartitionToFetch(tip, acknowledgementsToSend); + fetchedPartitions.add(tip); topicNamesMap.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic()); log.debug("Added fetch request for partition {} to node {}", tip, node.id()); } } - - // Iterate over the session handlers to see if there are acknowledgements to be sent for partitions - // which are no longer part of the current subscription. - // We fail acknowledgements for records fetched from a previous leader. + // Map storing the list of partitions to forget in the upcoming request. + Map> partitionsToForgetMap = new HashMap<>(); Cluster cluster = metadata.fetch(); + // Iterating over the session handlers to see if there are acknowledgements to be sent for partitions + // which are no longer part of the current subscription. sessionHandlers.forEach((nodeId, sessionHandler) -> { Node node = cluster.nodeById(nodeId); if (node != null) { if (nodesWithPendingRequests.contains(node.id())) { - log.trace("Skipping fetch because previous fetch request to {} has not been processed", nodeId); + log.trace("Skipping fetch because previous fetch request to {} has not been processed", node.id()); } else { - Map nodeAcksFromFetchMap = fetchAcknowledgementsToSend.get(nodeId); - if (nodeAcksFromFetchMap != null) { - nodeAcksFromFetchMap.forEach((tip, acks) -> { - if (!isLeaderKnownToHaveChanged(nodeId, tip)) { - // Check if the share session epoch is valid for sending acknowledgements. - if (!maybeAddAcknowledgements(sessionHandler, node, tip, acks)) { - return; - } + for (TopicIdPartition tip : sessionHandler.sessionPartitions()) { + if (!fetchedPartitions.contains(tip)) { + Acknowledgements acknowledgementsToSend = fetchAcknowledgementsToSend.remove(tip); - sessionHandler.addPartitionToAcknowledgeOnly(tip, acks); - handlerMap.put(node, sessionHandler); - - topicNamesMap.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic()); - log.debug("Added fetch request for previously subscribed partition {} to node {}", tip, nodeId); - } else { - log.debug("Leader for the partition is down or has changed, failing Acknowledgements for partition {}", tip); - acks.complete(Errors.NOT_LEADER_OR_FOLLOWER.exception()); - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(tip, acks)); + if (acknowledgementsToSend != null) { + metricsManager.recordAcknowledgementSent(acknowledgementsToSend.size()); + fetchAcknowledgementsInFlight.put(tip, acknowledgementsToSend); } - }); - nodeAcksFromFetchMap.clear(); + sessionHandler.addPartitionToFetch(tip, acknowledgementsToSend); + partitionsToForgetMap.putIfAbsent(node, new ArrayList<>()); + partitionsToForgetMap.get(node).add(tip); + + topicNamesMap.putIfAbsent(new IdAndPartition(tip.topicId(), tip.partition()), tip.topic()); + fetchedPartitions.add(tip); + log.debug("Added fetch request for previously subscribed partition {} to node {}", tip, node.id()); + } } } } }); - // Iterate over the share session handlers and build a list of UnsentRequests - List requests = handlerMap.entrySet().stream().map(entry -> { - Node target = entry.getKey(); - ShareSessionHandler handler = entry.getValue(); + Map builderMap = new LinkedHashMap<>(); + for (Map.Entry entry : handlerMap.entrySet()) { + ShareFetchRequest.Builder builder = entry.getValue().newShareFetchBuilder(groupId, fetchConfig); + Node node = entry.getKey(); + if (partitionsToForgetMap.containsKey(node)) { + if (builder.data().forgottenTopicsData() == null) { + builder.data().setForgottenTopicsData(new ArrayList<>()); + } + builder.updateForgottenData(partitionsToForgetMap.get(node)); + } + + builderMap.put(node, builder); + } + + List requests = builderMap.entrySet().stream().map(entry -> { + Node target = entry.getKey(); log.trace("Building ShareFetch request to send to node {}", target.id()); - ShareFetchRequest.Builder requestBuilder = handler.newShareFetchBuilder(groupId, fetchConfig); + ShareFetchRequest.Builder requestBuilder = entry.getValue(); nodesWithPendingRequests.add(target.id()); @@ -261,57 +253,14 @@ public PollResult poll(long currentTimeMs) { return new PollResult(requests); } - /** - * - * @return True if we can add acknowledgements to the share session. - * If we cannot add acknowledgements, they are completed with {@link Errors#INVALID_SHARE_SESSION_EPOCH} exception. - */ - private boolean maybeAddAcknowledgements(ShareSessionHandler handler, - Node node, - TopicIdPartition tip, - Acknowledgements acknowledgements) { - if (handler.isNewSession()) { - // Failing the acknowledgements as we cannot have piggybacked acknowledgements in the initial ShareFetchRequest. - log.debug("Cannot send acknowledgements on initial epoch for ShareSession for partition {}", tip); - acknowledgements.complete(Errors.INVALID_SHARE_SESSION_EPOCH.exception()); - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(tip, acknowledgements)); - return false; - } else { - metricsManager.recordAcknowledgementSent(acknowledgements.size()); - fetchAcknowledgementsInFlight.computeIfAbsent(node.id(), k -> new HashMap<>()).put(tip, acknowledgements); - return true; - } - } - - public void fetch(Map acknowledgementsMap, - Map controlRecordAcknowledgements) { + public void fetch(Map acknowledgementsMap) { if (!fetchMoreRecords) { log.debug("Fetch more data"); fetchMoreRecords = true; } - // Process both acknowledgement maps and sends them in the next ShareFetch. - processAcknowledgementsMap(acknowledgementsMap); - processAcknowledgementsMap(controlRecordAcknowledgements); - } - - private void processAcknowledgementsMap(Map acknowledgementsMap) { - acknowledgementsMap.forEach((tip, nodeAcks) -> { - int nodeId = nodeAcks.nodeId(); - Map currentNodeAcknowledgementsMap = fetchAcknowledgementsToSend.get(nodeId); - if (currentNodeAcknowledgementsMap != null) { - Acknowledgements currentAcknowledgementsForNode = currentNodeAcknowledgementsMap.get(tip); - if (currentAcknowledgementsForNode != null) { - currentAcknowledgementsForNode.merge(nodeAcks.acknowledgements()); - } else { - currentNodeAcknowledgementsMap.put(tip, nodeAcks.acknowledgements()); - } - } else { - Map nodeAcknowledgementsMap = new HashMap<>(); - nodeAcknowledgementsMap.put(tip, nodeAcks.acknowledgements()); - fetchAcknowledgementsToSend.put(nodeId, nodeAcknowledgementsMap); - } - }); + // The acknowledgements sent via ShareFetch are stored in this map. + acknowledgementsMap.forEach((tip, acks) -> fetchAcknowledgementsToSend.merge(tip, acks, Acknowledgements::merge)); } /** @@ -331,8 +280,7 @@ private PollResult processAcknowledgements(long currentTimeMs) { log.trace("Skipping acknowledge request because previous request to {} has not been processed, so acks are not sent", nodeId); } else { isAsyncSent.set(false); - - // First, the acknowledgements from commitAsync are sent. + // First, the acknowledgements from commitAsync is sent. maybeBuildRequest(requestStates.getValue().getAsyncRequest(), currentTimeMs, true, isAsyncSent).ifPresent(unsentRequests::add); // Check to ensure we start processing commitSync/close only if there are no commitAsync requests left to process. @@ -350,15 +298,12 @@ private PollResult processAcknowledgements(long currentTimeMs) { } else { // Processing the acknowledgements from commitSync for (AcknowledgeRequestState acknowledgeRequestState : requestStates.getValue().getSyncRequestQueue()) { - if (!isNodeFree(nodeId)) { - log.trace("Skipping acknowledge request because previous request to {} has not been processed, so acks are not sent", nodeId); - break; - } maybeBuildRequest(acknowledgeRequestState, currentTimeMs, false, isAsyncSent).ifPresent(unsentRequests::add); } } } } + } PollResult pollResult = null; @@ -406,9 +351,7 @@ private Optional maybeBuildRequest(AcknowledgeRequestState acknow AtomicBoolean isAsyncSent) { boolean asyncSent = true; try { - if (acknowledgeRequestState == null || - (!acknowledgeRequestState.isCloseRequest() && acknowledgeRequestState.isEmpty()) || - (acknowledgeRequestState.isCloseRequest() && acknowledgeRequestState.isProcessed)) { + if (acknowledgeRequestState == null || (!acknowledgeRequestState.onClose() && acknowledgeRequestState.isEmpty())) { return Optional.empty(); } @@ -419,8 +362,6 @@ private Optional maybeBuildRequest(AcknowledgeRequestState acknow acknowledgeRequestState.handleAcknowledgeTimedOut(tip); } acknowledgeRequestState.incompleteAcknowledgements.clear(); - // Reset timer for any future processing on the same request state. - acknowledgeRequestState.maybeResetTimerAndRequestState(); return Optional.empty(); } @@ -485,7 +426,7 @@ private boolean checkAndRemoveCompletedAcknowledgements() { private boolean isRequestStateInProgress(AcknowledgeRequestState acknowledgeRequestState) { if (acknowledgeRequestState == null) { return false; - } else if (acknowledgeRequestState.isCloseRequest()) { + } else if (acknowledgeRequestState.onClose()) { return !acknowledgeRequestState.isProcessed; } else { return !(acknowledgeRequestState.isEmpty()); @@ -512,7 +453,7 @@ private boolean areRequestStatesInProgress(Queue acknow * @return The future which completes when the acknowledgements finished */ public CompletableFuture> commitSync( - final Map acknowledgementsMap, + final Map acknowledgementsMap, final long deadlineMs) { final AtomicInteger resultCount = new AtomicInteger(); final CompletableFuture> future = new CompletableFuture<>(); @@ -528,23 +469,17 @@ public CompletableFuture> commitSync( // Add the incoming commitSync() request to the queue. Map acknowledgementsMapForNode = new HashMap<>(); for (TopicIdPartition tip : sessionHandler.sessionPartitions()) { - NodeAcknowledgements nodeAcknowledgements = acknowledgementsMap.get(tip); - if ((nodeAcknowledgements != null) && (nodeAcknowledgements.nodeId() == node.id())) { - if (!isLeaderKnownToHaveChanged(node.id(), tip)) { - acknowledgementsMapForNode.put(tip, nodeAcknowledgements.acknowledgements()); - - metricsManager.recordAcknowledgementSent(nodeAcknowledgements.acknowledgements().size()); - log.debug("Added sync acknowledge request for partition {} to node {}", tip.topicPartition(), node.id()); - resultCount.incrementAndGet(); - } else { - nodeAcknowledgements.acknowledgements().complete(Errors.NOT_LEADER_OR_FOLLOWER.exception()); - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(tip, nodeAcknowledgements.acknowledgements())); - } + Acknowledgements acknowledgements = acknowledgementsMap.get(tip); + if (acknowledgements != null) { + acknowledgementsMapForNode.put(tip, acknowledgements); + + metricsManager.recordAcknowledgementSent(acknowledgements.size()); + log.debug("Added sync acknowledge request for partition {} to node {}", tip.topicPartition(), node.id()); + resultCount.incrementAndGet(); } } - if (!acknowledgementsMapForNode.isEmpty()) { - acknowledgeRequestStates.get(nodeId).addSyncRequest(new AcknowledgeRequestState(logContext, + acknowledgeRequestStates.get(nodeId).addSyncRequest(new AcknowledgeRequestState(logContext, ShareConsumeRequestManager.class.getSimpleName() + ":1", deadlineMs, retryBackoffMs, @@ -554,8 +489,7 @@ public CompletableFuture> commitSync( acknowledgementsMapForNode, resultHandler, AcknowledgeRequestType.COMMIT_SYNC - )); - } + )); } }); @@ -568,12 +502,8 @@ public CompletableFuture> commitSync( * Enqueue an AcknowledgeRequestState to be picked up on the next poll. * * @param acknowledgementsMap The acknowledgements to commit - * @param deadlineMs Time until which the request will be retried if it fails with - * an expected retriable error. */ - public void commitAsync( - final Map acknowledgementsMap, - final long deadlineMs) { + public void commitAsync(final Map acknowledgementsMap) { final Cluster cluster = metadata.fetch(); final ResultHandler resultHandler = new ResultHandler(Optional.empty()); @@ -585,36 +515,30 @@ public void commitAsync( acknowledgeRequestStates.putIfAbsent(nodeId, new Tuple<>(null, null, null)); for (TopicIdPartition tip : sessionHandler.sessionPartitions()) { - NodeAcknowledgements nodeAcknowledgements = acknowledgementsMap.get(tip); - if ((nodeAcknowledgements != null) && (nodeAcknowledgements.nodeId() == node.id())) { - if (!isLeaderKnownToHaveChanged(node.id(), tip)) { - Acknowledgements acknowledgements = nodeAcknowledgements.acknowledgements(); - acknowledgementsMapForNode.put(tip, acknowledgements); - - metricsManager.recordAcknowledgementSent(acknowledgements.size()); - log.debug("Added async acknowledge request for partition {} to node {}", tip.topicPartition(), node.id()); - AcknowledgeRequestState asyncRequestState = acknowledgeRequestStates.get(nodeId).getAsyncRequest(); - if (asyncRequestState == null) { - acknowledgeRequestStates.get(nodeId).setAsyncRequest(new AcknowledgeRequestState(logContext, - ShareConsumeRequestManager.class.getSimpleName() + ":2", - deadlineMs, - retryBackoffMs, - retryBackoffMaxMs, - sessionHandler, - nodeId, - acknowledgementsMapForNode, - resultHandler, - AcknowledgeRequestType.COMMIT_ASYNC - )); - } else { - Acknowledgements prevAcks = asyncRequestState.acknowledgementsToSend.putIfAbsent(tip, acknowledgements); - if (prevAcks != null) { - asyncRequestState.acknowledgementsToSend.get(tip).merge(acknowledgements); - } - } + Acknowledgements acknowledgements = acknowledgementsMap.get(tip); + if (acknowledgements != null) { + acknowledgementsMapForNode.put(tip, acknowledgements); + + metricsManager.recordAcknowledgementSent(acknowledgements.size()); + log.debug("Added async acknowledge request for partition {} to node {}", tip.topicPartition(), node.id()); + AcknowledgeRequestState asyncRequestState = acknowledgeRequestStates.get(nodeId).getAsyncRequest(); + if (asyncRequestState == null) { + acknowledgeRequestStates.get(nodeId).setAsyncRequest(new AcknowledgeRequestState(logContext, + ShareConsumeRequestManager.class.getSimpleName() + ":2", + Long.MAX_VALUE, + retryBackoffMs, + retryBackoffMaxMs, + sessionHandler, + nodeId, + acknowledgementsMapForNode, + resultHandler, + AcknowledgeRequestType.COMMIT_ASYNC + )); } else { - nodeAcknowledgements.acknowledgements().complete(Errors.NOT_LEADER_OR_FOLLOWER.exception()); - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(tip, nodeAcknowledgements.acknowledgements())); + Acknowledgements prevAcks = asyncRequestState.acknowledgementsToSend.putIfAbsent(tip, acknowledgements); + if (prevAcks != null) { + asyncRequestState.acknowledgementsToSend.get(tip).merge(acknowledgements); + } } } } @@ -634,64 +558,41 @@ public void commitAsync( * * @return The future which completes when the acknowledgements finished */ - public CompletableFuture acknowledgeOnClose(final Map acknowledgementsMap, + public CompletableFuture acknowledgeOnClose(final Map acknowledgementsMap, final long deadlineMs) { final Cluster cluster = metadata.fetch(); final AtomicInteger resultCount = new AtomicInteger(); final ResultHandler resultHandler = new ResultHandler(resultCount, Optional.empty()); closing = true; - Map> acknowledgementsMapAllNodes = new HashMap<>(); - - acknowledgementsMap.forEach((tip, nodeAcks) -> { - if (!isLeaderKnownToHaveChanged(nodeAcks.nodeId(), tip)) { - Map acksMap = acknowledgementsMapAllNodes.computeIfAbsent(nodeAcks.nodeId(), k -> new HashMap<>()); - Acknowledgements prevAcks = acksMap.putIfAbsent(tip, nodeAcks.acknowledgements()); - if (prevAcks != null) { - acksMap.get(tip).merge(nodeAcks.acknowledgements()); - } - } else { - nodeAcks.acknowledgements().complete(Errors.NOT_LEADER_OR_FOLLOWER.exception()); - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(tip, nodeAcks.acknowledgements())); - } - }); sessionHandlers.forEach((nodeId, sessionHandler) -> { Node node = cluster.nodeById(nodeId); if (node != null) { - //Add any waiting piggyback acknowledgements for the node. - Map fetchAcks = fetchAcknowledgementsToSend.remove(nodeId); - if (fetchAcks != null) { - fetchAcks.forEach((tip, acks) -> { - if (!isLeaderKnownToHaveChanged(nodeId, tip)) { - Map acksMap = acknowledgementsMapAllNodes.computeIfAbsent(nodeId, k -> new HashMap<>()); - Acknowledgements prevAcks = acksMap.putIfAbsent(tip, acks); - if (prevAcks != null) { - acksMap.get(tip).merge(acks); - } - } else { - acks.complete(Errors.NOT_LEADER_OR_FOLLOWER.exception()); - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(tip, acks)); - } - }); - } + Map acknowledgementsMapForNode = new HashMap<>(); + for (TopicIdPartition tip : sessionHandler.sessionPartitions()) { + Acknowledgements acknowledgements = acknowledgementsMap.getOrDefault(tip, Acknowledgements.empty()); + + Acknowledgements acksFromShareFetch = fetchAcknowledgementsToSend.remove(tip); + + if (acksFromShareFetch != null) { + acknowledgements.merge(acksFromShareFetch); + } + + if (acknowledgements != null && !acknowledgements.isEmpty()) { + acknowledgementsMapForNode.put(tip, acknowledgements); - Map acknowledgementsMapForNode = acknowledgementsMapAllNodes.get(nodeId); - if (acknowledgementsMapForNode != null) { - acknowledgementsMapForNode.forEach((tip, acknowledgements) -> { metricsManager.recordAcknowledgementSent(acknowledgements.size()); log.debug("Added closing acknowledge request for partition {} to node {}", tip.topicPartition(), node.id()); resultCount.incrementAndGet(); - }); - } else { - acknowledgementsMapForNode = new HashMap<>(); + } } acknowledgeRequestStates.putIfAbsent(nodeId, new Tuple<>(null, null, null)); // Ensure there is no close() request already present as they are blocking calls // and only one request can be active at a time. - if (acknowledgeRequestStates.get(nodeId).getCloseRequest() != null && isRequestStateInProgress(acknowledgeRequestStates.get(nodeId).getCloseRequest())) { + if (acknowledgeRequestStates.get(nodeId).getCloseRequest() != null && !acknowledgeRequestStates.get(nodeId).getCloseRequest().isEmpty()) { log.error("Attempt to call close() when there is an existing close request for node {}-{}", node.id(), acknowledgeRequestStates.get(nodeId).getSyncRequestQueue()); closeFuture.completeExceptionally( new IllegalStateException("Attempt to call close() when there is an existing close request for node : " + node.id())); @@ -708,6 +609,7 @@ public CompletableFuture acknowledgeOnClose(final Map acknowledgeOnClose(final Map leaderNode = metadata.currentLeader(topicIdPartition.topicPartition()).leader; - if (leaderNode.isPresent()) { - if (leaderNode.get().id() != nodeId) { - log.debug("Node {} is no longer the leader for partition {}, failing acknowledgements", nodeId, topicIdPartition); - return true; - } - } else { - log.debug("No leader found for partition {}", topicIdPartition); - metadata.requestUpdate(false); - return false; - } - return false; - } - private void handleShareFetchSuccess(Node fetchTarget, @SuppressWarnings("unused") ShareFetchRequestData requestData, ClientResponse resp) { @@ -758,28 +638,17 @@ private void handleShareFetchSuccess(Node fetchTarget, if (response.error() == Errors.UNKNOWN_TOPIC_ID) { metadata.requestUpdate(false); } - // Complete any inFlight acknowledgements with the error code from the response. - Map nodeAcknowledgementsInFlight = fetchAcknowledgementsInFlight.get(fetchTarget.id()); - if (nodeAcknowledgementsInFlight != null) { - nodeAcknowledgementsInFlight.forEach((tip, acks) -> { - acks.complete(Errors.forCode(response.error().code()).exception()); - metricsManager.recordFailedAcknowledgements(acks.size()); - }); - maybeSendShareAcknowledgeCommitCallbackEvent(nodeAcknowledgementsInFlight); - nodeAcknowledgementsInFlight.clear(); - } return; } final Map responseData = new LinkedHashMap<>(); response.data().responses().forEach(topicResponse -> - topicResponse.partitions().forEach(partition -> { - TopicIdPartition tip = lookupTopicId(topicResponse.topicId(), partition.partitionIndex()); - if (tip != null) { - responseData.put(tip, partition); - } - }) + topicResponse.partitions().forEach(partition -> + responseData.put(new TopicIdPartition(topicResponse.topicId(), + partition.partitionIndex(), + metadata.topicNames().getOrDefault(topicResponse.topicId(), + topicNamesMap.remove(new IdAndPartition(topicResponse.topicId(), partition.partitionIndex())))), partition)) ); final Set partitions = responseData.keySet().stream().map(TopicIdPartition::topicPartition).collect(Collectors.toSet()); @@ -793,22 +662,19 @@ private void handleShareFetchSuccess(Node fetchTarget, log.debug("ShareFetch for partition {} returned fetch data {}", tip, partitionData); - Map nodeAcknowledgementsInFlight = fetchAcknowledgementsInFlight.get(fetchTarget.id()); - if (nodeAcknowledgementsInFlight != null) { - Acknowledgements acks = nodeAcknowledgementsInFlight.remove(tip); - if (acks != null) { - if (partitionData.acknowledgeErrorCode() != Errors.NONE.code()) { - metricsManager.recordFailedAcknowledgements(acks.size()); - } - acks.complete(Errors.forCode(partitionData.acknowledgeErrorCode()).exception()); - Map acksMap = Map.of(tip, acks); - maybeSendShareAcknowledgeCommitCallbackEvent(acksMap); + Acknowledgements acks = fetchAcknowledgementsInFlight.remove(tip); + if (acks != null) { + if (partitionData.acknowledgeErrorCode() != Errors.NONE.code()) { + metricsManager.recordFailedAcknowledgements(acks.size()); } + acks.setAcknowledgeErrorCode(Errors.forCode(partitionData.acknowledgeErrorCode())); + Map acksMap = Collections.singletonMap(tip, acks); + maybeSendShareAcknowledgeCommitCallbackEvent(acksMap); } Errors partitionError = Errors.forCode(partitionData.errorCode()); if (partitionError == Errors.NOT_LEADER_OR_FOLLOWER || partitionError == Errors.FENCED_LEADER_EPOCH) { - log.debug("For {}, received error {}, with leaderIdAndEpoch {} in ShareFetch", tip, partitionError, partitionData.currentLeader()); + log.debug("For {}, received error {}, with leaderIdAndEpoch {}", tip, partitionError, partitionData.currentLeader()); if (partitionData.currentLeader().leaderId() != -1 && partitionData.currentLeader().leaderEpoch() != -1) { partitionsWithUpdatedLeaderInfo.put(tip.topicPartition(), new Metadata.LeaderIdAndEpoch( Optional.of(partitionData.currentLeader().leaderId()), Optional.of(partitionData.currentLeader().leaderEpoch()))); @@ -818,7 +684,6 @@ private void handleShareFetchSuccess(Node fetchTarget, ShareCompletedFetch completedFetch = new ShareCompletedFetch( logContext, BufferSupplier.create(), - fetchTarget.id(), tip, partitionData, shareFetchMetricsAggregator, @@ -830,14 +695,6 @@ private void handleShareFetchSuccess(Node fetchTarget, } } - // Handle any acknowledgements which were not received in the response for this node. - if (fetchAcknowledgementsInFlight.get(fetchTarget.id()) != null) { - fetchAcknowledgementsInFlight.remove(fetchTarget.id()).forEach((partition, acknowledgements) -> { - acknowledgements.complete(new InvalidRecordStateException(INVALID_RESPONSE)); - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(partition, acknowledgements)); - }); - } - if (!partitionsWithUpdatedLeaderInfo.isEmpty()) { List leaderNodes = response.data().nodeEndpoints().stream() .map(e -> new Node(e.nodeId(), e.host(), e.port(), e.rack())) @@ -864,25 +721,16 @@ private void handleShareFetchFailure(Node fetchTarget, } requestData.topics().forEach(topic -> topic.partitions().forEach(partition -> { - TopicIdPartition tip = lookupTopicId(topic.topicId(), partition.partitionIndex()); - if (tip == null) { - return; - } + TopicIdPartition tip = new TopicIdPartition(topic.topicId(), + partition.partitionIndex(), + metadata.topicNames().get(topic.topicId())); - Map nodeAcknowledgementsInFlight = fetchAcknowledgementsInFlight.get(fetchTarget.id()); - if (nodeAcknowledgementsInFlight != null) { - Acknowledgements acks = nodeAcknowledgementsInFlight.remove(tip); - - if (acks != null) { - metricsManager.recordFailedAcknowledgements(acks.size()); - if (error instanceof KafkaException) { - acks.complete((KafkaException) error); - } else { - acks.complete(Errors.UNKNOWN_SERVER_ERROR.exception()); - } - Map acksMap = Map.of(tip, acks); - maybeSendShareAcknowledgeCommitCallbackEvent(acksMap); - } + Acknowledgements acks = fetchAcknowledgementsInFlight.remove(tip); + if (acks != null) { + metricsManager.recordFailedAcknowledgements(acks.size()); + acks.setAcknowledgeErrorCode(Errors.forException(error)); + Map acksMap = Collections.singletonMap(tip, acks); + maybeSendShareAcknowledgeCommitCallbackEvent(acksMap); } })); } finally { @@ -902,21 +750,20 @@ private void handleShareAcknowledgeSuccess(Node fetchTarget, Map partitionsWithUpdatedLeaderInfo = new HashMap<>(); - if (acknowledgeRequestState.isCloseRequest()) { - response.data().responses().forEach(topicResponse -> topicResponse.partitions().forEach(partitionData -> { - TopicIdPartition tip = lookupTopicId(topicResponse.topicId(), partitionData.partitionIndex()); - if (tip == null) { - return; - } - - if (partitionData.errorCode() != Errors.NONE.code()) { + if (acknowledgeRequestState.onClose()) { + response.data().responses().forEach(topic -> topic.partitions().forEach(partition -> { + TopicIdPartition tip = new TopicIdPartition(topic.topicId(), + partition.partitionIndex(), + metadata.topicNames().get(topic.topicId())); + if (partition.errorCode() != Errors.NONE.code()) { metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getInFlightAcknowledgementsCount(tip)); } - acknowledgeRequestState.handleAcknowledgeErrorCode(tip, Errors.forCode(partitionData.errorCode())); + acknowledgeRequestState.handleAcknowledgeErrorCode(tip, Errors.forCode(partition.errorCode())); })); acknowledgeRequestState.onSuccessfulAttempt(responseCompletionTimeMs); acknowledgeRequestState.processingComplete(); + } else { if (!acknowledgeRequestState.sessionHandler.handleResponse(response, resp.requestHeader().apiVersion())) { // Received a response-level error code. @@ -926,23 +773,59 @@ private void handleShareAcknowledgeSuccess(Node fetchTarget, // We retry the request until the timer expires, unless we are closing. acknowledgeRequestState.moveAllToIncompleteAcks(); } else { - acknowledgeRequestState.processPendingInFlightAcknowledgements(response.error().exception()); + response.data().responses().forEach(shareAcknowledgeTopicResponse -> shareAcknowledgeTopicResponse.partitions().forEach(partitionData -> { + TopicIdPartition tip = new TopicIdPartition(shareAcknowledgeTopicResponse.topicId(), + partitionData.partitionIndex(), + metadata.topicNames().get(shareAcknowledgeTopicResponse.topicId())); + + acknowledgeRequestState.handleAcknowledgeErrorCode(tip, response.error()); + })); acknowledgeRequestState.processingComplete(); } } else { AtomicBoolean shouldRetry = new AtomicBoolean(false); // Check all partition level error codes - response.data().responses().forEach(topicResponse -> topicResponse.partitions().forEach(partitionData -> { + response.data().responses().forEach(shareAcknowledgeTopicResponse -> shareAcknowledgeTopicResponse.partitions().forEach(partitionData -> { Errors partitionError = Errors.forCode(partitionData.errorCode()); - TopicIdPartition tip = lookupTopicId(topicResponse.topicId(), partitionData.partitionIndex()); - if (tip == null) { - return; - } + TopicIdPartition tip = new TopicIdPartition(shareAcknowledgeTopicResponse.topicId(), + partitionData.partitionIndex(), + metadata.topicNames().get(shareAcknowledgeTopicResponse.topicId())); + if (partitionError.exception() != null) { + boolean retry = false; + + if (partitionError == Errors.NOT_LEADER_OR_FOLLOWER || partitionError == Errors.FENCED_LEADER_EPOCH) { + // If the leader has changed, there's no point in retrying the operation because the acquisition locks + // will have been released. + TopicPartition tp = new TopicPartition(metadata.topicNames().get(shareAcknowledgeTopicResponse.topicId()), partitionData.partitionIndex()); + + log.debug("For {}, received error {}, with leaderIdAndEpoch {}", tp, partitionError, partitionData.currentLeader()); + if (partitionData.currentLeader().leaderId() != -1 && partitionData.currentLeader().leaderEpoch() != -1) { + partitionsWithUpdatedLeaderInfo.put(tp, new Metadata.LeaderIdAndEpoch( + Optional.of(partitionData.currentLeader().leaderId()), Optional.of(partitionData.currentLeader().leaderEpoch()))); + } + } else if (partitionError.exception() instanceof RetriableException) { + retry = true; + } - handlePartitionError(partitionData, partitionsWithUpdatedLeaderInfo, acknowledgeRequestState, partitionError, tip, shouldRetry); + if (retry) { + // Move to incomplete acknowledgements to retry + acknowledgeRequestState.moveToIncompleteAcks(tip); + shouldRetry.set(true); + } else { + metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getInFlightAcknowledgementsCount(tip)); + acknowledgeRequestState.handleAcknowledgeErrorCode(tip, partitionError); + } + } else { + acknowledgeRequestState.handleAcknowledgeErrorCode(tip, partitionError); + } })); - processRetryLogic(acknowledgeRequestState, shouldRetry, responseCompletionTimeMs); + if (shouldRetry.get()) { + acknowledgeRequestState.onFailedAttempt(responseCompletionTimeMs); + } else { + acknowledgeRequestState.onSuccessfulAttempt(responseCompletionTimeMs); + acknowledgeRequestState.processingComplete(); + } } } @@ -961,7 +844,7 @@ private void handleShareAcknowledgeSuccess(Node fetchTarget, log.debug("Removing pending request for node {} - success", fetchTarget.id()); nodesWithPendingRequests.remove(fetchTarget.id()); - if (acknowledgeRequestState.isCloseRequest()) { + if (acknowledgeRequestState.onClose()) { log.debug("Removing node from ShareSession {}", fetchTarget.id()); sessionHandlers.remove(fetchTarget.id()); } @@ -979,11 +862,9 @@ private void handleShareAcknowledgeFailure(Node fetchTarget, acknowledgeRequestState.onFailedAttempt(responseCompletionTimeMs); requestData.topics().forEach(topic -> topic.partitions().forEach(partition -> { - TopicIdPartition tip = lookupTopicId(topic.topicId(), partition.partitionIndex()); - if (tip == null) { - return; - } - + TopicIdPartition tip = new TopicIdPartition(topic.topicId(), + partition.partitionIndex(), + metadata.topicNames().get(topic.topicId())); metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getInFlightAcknowledgementsCount(tip)); acknowledgeRequestState.handleAcknowledgeErrorCode(tip, Errors.forException(error)); })); @@ -993,85 +874,13 @@ private void handleShareAcknowledgeFailure(Node fetchTarget, log.debug("Removing pending request for node {} - failed", fetchTarget.id()); nodesWithPendingRequests.remove(fetchTarget.id()); - if (acknowledgeRequestState.isCloseRequest()) { + if (acknowledgeRequestState.onClose()) { log.debug("Removing node from ShareSession {}", fetchTarget.id()); sessionHandlers.remove(fetchTarget.id()); } } } - private void handlePartitionError(ShareAcknowledgeResponseData.PartitionData partitionData, - Map partitionsWithUpdatedLeaderInfo, - AcknowledgeRequestState acknowledgeRequestState, - Errors partitionError, - TopicIdPartition tip, - AtomicBoolean shouldRetry) { - if (partitionError.exception() != null) { - boolean retry = false; - if (partitionError == Errors.NOT_LEADER_OR_FOLLOWER || partitionError == Errors.FENCED_LEADER_EPOCH || partitionError == Errors.UNKNOWN_TOPIC_OR_PARTITION) { - // If the leader has changed, there's no point in retrying the operation because the acquisition locks - // will have been released. - // If the topic or partition has been deleted, we do not retry the failed acknowledgements. - // Instead, these records will be re-delivered once they get timed out on the broker. - updateLeaderInfoMap(partitionData, partitionsWithUpdatedLeaderInfo, partitionError, tip.topicPartition()); - } else if (partitionError.exception() instanceof RetriableException) { - retry = true; - } - - if (retry) { - if (acknowledgeRequestState.moveToIncompleteAcks(tip)) { - shouldRetry.set(true); - } - } else { - metricsManager.recordFailedAcknowledgements(acknowledgeRequestState.getInFlightAcknowledgementsCount(tip)); - acknowledgeRequestState.handleAcknowledgeErrorCode(tip, partitionError); - } - } else { - acknowledgeRequestState.handleAcknowledgeErrorCode(tip, partitionError); - } - } - - private void processRetryLogic(AcknowledgeRequestState acknowledgeRequestState, - AtomicBoolean shouldRetry, - long responseCompletionTimeMs) { - if (shouldRetry.get()) { - acknowledgeRequestState.onFailedAttempt(responseCompletionTimeMs); - - // Check for any acknowledgements that did not receive a response. - // These acknowledgements are failed with InvalidRecordStateException. - acknowledgeRequestState.processPendingInFlightAcknowledgements(new InvalidRecordStateException(INVALID_RESPONSE)); - } else { - acknowledgeRequestState.onSuccessfulAttempt(responseCompletionTimeMs); - acknowledgeRequestState.processingComplete(); - } - } - - private void updateLeaderInfoMap(ShareAcknowledgeResponseData.PartitionData partitionData, - Map partitionsWithUpdatedLeaderInfo, - Errors partitionError, - TopicPartition tp) { - - log.debug("For {}, received error {}, with leaderIdAndEpoch {} in ShareAcknowledge", tp, partitionError, partitionData.currentLeader()); - if (partitionData.currentLeader().leaderId() != -1 && partitionData.currentLeader().leaderEpoch() != -1) { - partitionsWithUpdatedLeaderInfo.put(tp, new Metadata.LeaderIdAndEpoch( - Optional.of(partitionData.currentLeader().leaderId()), - Optional.of(partitionData.currentLeader().leaderEpoch()) - )); - } - } - - private TopicIdPartition lookupTopicId(Uuid topicId, int partitionIndex) { - String topicName = metadata.topicNames().get(topicId); - if (topicName == null) { - topicName = topicNamesMap.remove(new IdAndPartition(topicId, partitionIndex)); - } - if (topicName == null) { - log.error("Topic name not found in metadata for topicId {} and partitionIndex {}", topicId, partitionIndex); - return null; - } - return new TopicIdPartition(topicId, partitionIndex, topicName); - } - private List partitionsToFetch() { return subscriptions.fetchablePartitions(tp -> true); } @@ -1086,7 +895,6 @@ boolean hasCompletedFetches() { protected void closeInternal() { Utils.closeQuietly(shareFetchBuffer, "shareFetchBuffer"); - Utils.closeQuietly(metricsManager, "shareFetchMetricsManager"); } public void close() { @@ -1147,11 +955,6 @@ public class AcknowledgeRequestState extends TimedRequestState { */ private boolean isProcessed; - /** - * Timeout in milliseconds indicating how long the request would be retried if it fails with a retriable exception. - */ - private final long timeoutMs; - AcknowledgeRequestState(LogContext logContext, String owner, long deadlineMs, @@ -1171,12 +974,11 @@ public class AcknowledgeRequestState extends TimedRequestState { this.incompleteAcknowledgements = new HashMap<>(); this.requestType = acknowledgeRequestType; this.isProcessed = false; - this.timeoutMs = remainingMs(); } UnsentRequest buildRequest() { // If this is the closing request, close the share session by setting the final epoch - if (isCloseRequest()) { + if (onClose()) { sessionHandler.notifyClose(); } @@ -1254,29 +1056,16 @@ boolean isEmpty() { inFlightAcknowledgements.isEmpty(); } - /** - * Resets the timer with the configured timeout and resets the RequestState. - * This is only applicable for commitAsync() requests as these states could be re-used. - */ - void maybeResetTimerAndRequestState() { - if (requestType == AcknowledgeRequestType.COMMIT_ASYNC) { - resetTimeout(timeoutMs); - reset(); - } - } - /** * Sets the error code in the acknowledgements and sends the response * through a background event. */ void handleAcknowledgeErrorCode(TopicIdPartition tip, Errors acknowledgeErrorCode) { - Acknowledgements acks = inFlightAcknowledgements.remove(tip); + Acknowledgements acks = inFlightAcknowledgements.get(tip); if (acks != null) { - acks.complete(acknowledgeErrorCode.exception()); - resultHandler.complete(tip, acks, requestType); - } else { - log.error("Invalid partition {} received in ShareAcknowledge response", tip); + acks.setAcknowledgeErrorCode(acknowledgeErrorCode); } + resultHandler.complete(tip, acks, onCommitAsync()); } /** @@ -1286,14 +1075,14 @@ void handleAcknowledgeErrorCode(TopicIdPartition tip, Errors acknowledgeErrorCod void handleAcknowledgeTimedOut(TopicIdPartition tip) { Acknowledgements acks = incompleteAcknowledgements.get(tip); if (acks != null) { - acks.complete(Errors.REQUEST_TIMED_OUT.exception()); - resultHandler.complete(tip, acks, requestType); + acks.setAcknowledgeErrorCode(Errors.REQUEST_TIMED_OUT); } + resultHandler.complete(tip, acks, onCommitAsync()); } /** * Set the error code for all remaining acknowledgements in the event - * of a session error which prevents the remaining acknowledgements from + * of a session error which prevents the remains acknowledgements from * being sent. */ void handleSessionErrorCode(Errors errorCode) { @@ -1302,9 +1091,9 @@ void handleSessionErrorCode(Errors errorCode) { acknowledgementsMapToClear.forEach((tip, acks) -> { if (acks != null) { - acks.complete(errorCode.exception()); + acks.setAcknowledgeErrorCode(errorCode); } - resultHandler.complete(tip, acks, requestType); + resultHandler.complete(tip, acks, onCommitAsync()); }); acknowledgementsMapToClear.clear(); processingComplete(); @@ -1315,25 +1104,9 @@ ShareSessionHandler sessionHandler() { } void processingComplete() { - // If there are any pending inFlightAcknowledgements after processing the response, we fail them with an InvalidRecordStateException. - processPendingInFlightAcknowledgements(new InvalidRecordStateException(INVALID_RESPONSE)); + inFlightAcknowledgements.clear(); resultHandler.completeIfEmpty(); isProcessed = true; - maybeResetTimerAndRequestState(); - } - - /** - * Fail any existing in-flight acknowledgements with the given exception and clear the map. - * We also send a background event to update {@link org.apache.kafka.clients.consumer.AcknowledgementCommitCallback } - */ - private void processPendingInFlightAcknowledgements(KafkaException exception) { - if (!inFlightAcknowledgements.isEmpty()) { - inFlightAcknowledgements.forEach((partition, acknowledgements) -> { - acknowledgements.complete(exception); - resultHandler.complete(partition, acknowledgements, requestType); - }); - inFlightAcknowledgements.clear(); - } } /** @@ -1352,25 +1125,21 @@ boolean maybeExpire() { /** * Moves the in-flight acknowledgements for a given partition to incomplete acknowledgements to retry * in the next request. - * - * @param tip The TopicIdPartition for which we move the acknowledgements. - * @return True if the partition was sent in the request. - *

      False if the partition was not part of the request, we log an error and ignore such partitions.

      */ - public boolean moveToIncompleteAcks(TopicIdPartition tip) { + public void moveToIncompleteAcks(TopicIdPartition tip) { Acknowledgements acks = inFlightAcknowledgements.remove(tip); if (acks != null) { incompleteAcknowledgements.put(tip, acks); - return true; - } else { - log.error("Invalid partition {} received in ShareAcknowledge response", tip); - return false; } } - public boolean isCloseRequest() { + public boolean onClose() { return requestType == AcknowledgeRequestType.CLOSE; } + + public boolean onCommitAsync() { + return requestType == AcknowledgeRequestType.COMMIT_ASYNC; + } } /** @@ -1399,19 +1168,20 @@ class ResultHandler { * Handle the result of a ShareAcknowledge request sent to one or more nodes and * signal the completion when all results are known. */ - public void complete(TopicIdPartition partition, Acknowledgements acknowledgements, AcknowledgeRequestType type) { - if (type.equals(AcknowledgeRequestType.COMMIT_ASYNC)) { - if (acknowledgements != null) { - maybeSendShareAcknowledgeCommitCallbackEvent(Map.of(partition, acknowledgements)); - } - } else { + public void complete(TopicIdPartition partition, Acknowledgements acknowledgements, boolean isCommitAsync) { + if (!isCommitAsync && acknowledgements != null) { + result.put(partition, acknowledgements); + } + // For commitAsync, we do not wait for other results to complete, we prepare a background event + // for every ShareAcknowledgeResponse. + // For commitAsync, we send out a background event for every TopicIdPartition, so we use a singletonMap each time. + if (isCommitAsync) { if (acknowledgements != null) { - result.put(partition, acknowledgements); - } - if (remainingResults != null && remainingResults.decrementAndGet() == 0) { - maybeSendShareAcknowledgeCommitCallbackEvent(result); - future.ifPresent(future -> future.complete(result)); + maybeSendShareAcknowledgeCommitCallbackEvent(Collections.singletonMap(partition, acknowledgements)); } + } else if (remainingResults != null && remainingResults.decrementAndGet() == 0) { + maybeSendShareAcknowledgeCommitCallbackEvent(result); + future.ifPresent(future -> future.complete(result)); } } @@ -1519,9 +1289,6 @@ public enum AcknowledgeRequestType { public String toString() { return super.toString().toLowerCase(Locale.ROOT); } - } - Map getFetchAcknowledgementsToSend(Integer nodeId) { - return fetchAcknowledgementsToSend.get(nodeId); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java index 9eb5fd13699b0..625f6abf0cd38 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerDelegateCreator.java @@ -45,7 +45,7 @@ public ShareConsumerDelegate create(final ConsumerConfig config, try { LogContext logContext = new LogContext(); Logger log = logContext.logger(getClass()); - log.warn("Share groups and KafkaShareConsumer are part of a preview feature introduced by KIP-932, and are not recommended for use in production."); + log.warn("Share groups and KafkaShareConsumer are part of the early access of KIP-932 and MUST NOT be used in production."); return new ShareConsumerImpl<>(config, keyDeserializer, valueDeserializer); } catch (KafkaException e) { throw e; @@ -66,7 +66,7 @@ public ShareConsumerDelegate create(final LogContext logContext, final ConsumerMetadata metadata) { try { Logger log = logContext.logger(getClass()); - log.warn("Share groups and KafkaShareConsumer are part of a preview feature introduced by KIP-932, and are not recommended for use in production."); + log.warn("Share groups and KafkaShareConsumer are part of the early access of KIP-932 and MUST NOT be used in production."); return new ShareConsumerImpl<>( logContext, clientId, diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java index 12b01b5482e32..714d076143896 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImpl.java @@ -47,7 +47,6 @@ import org.apache.kafka.clients.consumer.internals.events.ShareFetchEvent; import org.apache.kafka.clients.consumer.internals.events.ShareSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.ShareUnsubscribeEvent; -import org.apache.kafka.clients.consumer.internals.events.StopFindCoordinatorOnCloseEvent; import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.clients.consumer.internals.metrics.KafkaShareConsumerMetrics; import org.apache.kafka.common.KafkaException; @@ -55,16 +54,14 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.errors.GroupAuthorizationException; +import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; -import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.internals.ClusterResourceListeners; -import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.MetricsReporter; +import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.telemetry.internals.ClientTelemetryUtils; @@ -96,11 +93,10 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Predicate; import java.util.function.Supplier; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_JMX_PREFIX; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.DEFAULT_CLOSE_TIMEOUT_MS; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createMetrics; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.createShareFetchMetricsManager; @@ -170,14 +166,26 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { private final String clientId; private final String groupId; private final BlockingQueue backgroundEventQueue; - private final BackgroundEventHandler backgroundEventHandler; private final BackgroundEventProcessor backgroundEventProcessor; private final CompletableEventReaper backgroundEventReaper; private final Deserializers deserializers; private ShareFetch currentFetch; private AcknowledgementCommitCallbackHandler acknowledgementCommitCallbackHandler; private final List> completedAcknowledgements; - private final ShareAcknowledgementMode acknowledgementMode; + + private enum AcknowledgementMode { + /** Acknowledgement mode is not yet known */ + UNKNOWN, + /** Acknowledgement mode is pending, meaning that {@link #poll(Duration)} has been called once and + * {@link #acknowledge(ConsumerRecord, AcknowledgeType)} has not been called */ + PENDING, + /** Acknowledgements are explicit, using {@link #acknowledge(ConsumerRecord, AcknowledgeType)} */ + EXPLICIT, + /** Acknowledgements are implicit, not using {@link #acknowledge(ConsumerRecord, AcknowledgeType)} */ + IMPLICIT + } + + private AcknowledgementMode acknowledgementMode = AcknowledgementMode.UNKNOWN; /** * A thread-safe {@link ShareFetchBuffer fetch buffer} for the results that are populated in the @@ -191,7 +199,6 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { private final SubscriptionState subscriptions; private final ConsumerMetadata metadata; private final Metrics metrics; - private final int requestTimeoutMs; private final int defaultApiTimeoutMs; private volatile boolean closed = false; // Init value is needed to avoid NPE in case of exception raised in the constructor @@ -241,22 +248,20 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { this.log = logContext.logger(getClass()); log.debug("Initializing the Kafka share consumer"); - this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); this.time = time; List reporters = CommonClientConfigs.metricsReporters(clientId, config); this.clientTelemetryReporter = CommonClientConfigs.telemetryReporter(clientId, config); this.clientTelemetryReporter.ifPresent(reporters::add); this.metrics = createMetrics(config, time, reporters); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); - this.acknowledgementMode = initializeAcknowledgementMode(config, log); - this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); + this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); this.currentFetch = ShareFetch.empty(); this.subscriptions = createSubscriptionState(config, logContext); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners( metrics.reporters(), - Arrays.asList(deserializers.keyDeserializer(), deserializers.valueDeserializer())); + Arrays.asList(deserializers.keyDeserializer, deserializers.valueDeserializer)); this.metadata = new ConsumerMetadata(config, subscriptions, logContext, clusterResourceListeners); final List addresses = ClientUtils.parseAndValidateAddresses(config); metadata.bootstrap(addresses); @@ -264,7 +269,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { ShareFetchMetricsManager shareFetchMetricsManager = createShareFetchMetricsManager(metrics); ApiVersions apiVersions = new ApiVersions(); final BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); - this.backgroundEventHandler = new BackgroundEventHandler( + final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, asyncConsumerMetrics); // This FetchBuffer is shared between the application and network threads. @@ -324,7 +329,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { new FetchConfig(config), deserializers); - this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics); + this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); config.logUnused(); AppInfoParser.registerAppInfo(CONSUMER_JMX_PREFIX, clientId, metrics, time.milliseconds()); @@ -357,17 +362,15 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { this.time = time; this.metrics = new Metrics(time); this.clientTelemetryReporter = Optional.empty(); - this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer, metrics); + this.deserializers = new Deserializers<>(config, keyDeserializer, valueDeserializer); this.currentFetch = ShareFetch.empty(); this.subscriptions = subscriptions; this.metadata = metadata; - this.requestTimeoutMs = config.getInt(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG); this.defaultApiTimeoutMs = config.getInt(ConsumerConfig.DEFAULT_API_TIMEOUT_MS_CONFIG); - this.acknowledgementMode = initializeAcknowledgementMode(config, log); this.fetchBuffer = new ShareFetchBuffer(logContext); this.completedAcknowledgements = new LinkedList<>(); - ShareConsumerMetrics metricsRegistry = new ShareConsumerMetrics(); + ShareConsumerMetrics metricsRegistry = new ShareConsumerMetrics(CONSUMER_SHARE_METRIC_GROUP_PREFIX); ShareFetchMetricsManager shareFetchMetricsManager = new ShareFetchMetricsManager(metrics, metricsRegistry.shareFetchMetrics); this.fetchCollector = new ShareFetchCollector<>( logContext, @@ -375,12 +378,12 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { subscriptions, new FetchConfig(config), deserializers); - this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP); + this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); final BlockingQueue applicationEventQueue = new LinkedBlockingQueue<>(); - this.backgroundEventQueue = new LinkedBlockingQueue<>(); - this.backgroundEventHandler = new BackgroundEventHandler( + final BlockingQueue backgroundEventQueue = new LinkedBlockingQueue<>(); + final BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( backgroundEventQueue, time, asyncConsumerMetrics); final Supplier networkClientDelegateSupplier = @@ -420,6 +423,7 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { requestManagersSupplier, asyncConsumerMetrics); + this.backgroundEventQueue = new LinkedBlockingQueue<>(); this.backgroundEventProcessor = new BackgroundEventProcessor(); this.backgroundEventReaper = new CompletableEventReaper(logContext); @@ -442,10 +446,8 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { final Metrics metrics, final SubscriptionState subscriptions, final ConsumerMetadata metadata, - final int requestTimeoutMs, final int defaultApiTimeoutMs, - final String groupId, - final String acknowledgementModeConfig) { + final String groupId) { this.log = logContext.logger(getClass()); this.subscriptions = subscriptions; this.clientId = clientId; @@ -458,18 +460,14 @@ private void process(final ShareAcknowledgementCommitCallbackEvent event) { this.backgroundEventReaper = backgroundEventReaper; this.metrics = metrics; this.metadata = metadata; - this.requestTimeoutMs = requestTimeoutMs; this.defaultApiTimeoutMs = defaultApiTimeoutMs; - this.acknowledgementMode = ShareAcknowledgementMode.fromString(acknowledgementModeConfig); - this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); + this.deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); this.currentFetch = ShareFetch.empty(); this.applicationEventHandler = applicationEventHandler; - this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics); + this.kafkaShareConsumerMetrics = new KafkaShareConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP_PREFIX); this.clientTelemetryReporter = Optional.empty(); this.completedAcknowledgements = Collections.emptyList(); - this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, CONSUMER_SHARE_METRIC_GROUP); - this.backgroundEventHandler = new BackgroundEventHandler( - backgroundEventQueue, time, asyncConsumerMetrics); + this.asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); } // auxiliary interface for testing @@ -563,17 +561,16 @@ public void unsubscribe() { * {@inheritDoc} */ @Override - @SuppressWarnings("unchecked") public synchronized ConsumerRecords poll(final Duration timeout) { Timer timer = time.timer(timeout); acquireAndEnsureOpen(); try { // Handle any completed acknowledgements for which we already have the responses - handleCompletedAcknowledgements(false); + handleCompletedAcknowledgements(); // If using implicit acknowledgement, acknowledge the previously fetched records - acknowledgeBatchIfImplicitAcknowledgement(); + acknowledgeBatchIfImplicitAcknowledgement(true); kafkaShareConsumerMetrics.recordPollStart(timer.currentTimeMs()); @@ -604,9 +601,6 @@ public synchronized ConsumerRecords poll(final Duration timeout) { } while (timer.notExpired()); return ConsumerRecords.empty(); - } catch (ShareFetchException e) { - currentFetch = (ShareFetch) e.shareFetch(); - throw e.cause(); } finally { kafkaShareConsumerMetrics.recordPollEnd(timer.currentTimeMs()); release(); @@ -616,7 +610,7 @@ public synchronized ConsumerRecords poll(final Duration timeout) { private ShareFetch pollForFetches(final Timer timer) { long pollTimeout = Math.min(applicationEventHandler.maximumTimeToWait(), timer.remainingMs()); - Map acknowledgementsMap = currentFetch.takeAcknowledgedRecords(); + Map acknowledgementsMap = currentFetch.takeAcknowledgedRecords(); // If data is available already, return it immediately final ShareFetch fetch = collect(acknowledgementsMap); @@ -641,19 +635,18 @@ private ShareFetch pollForFetches(final Timer timer) { return collect(Collections.emptyMap()); } - private ShareFetch collect(Map acknowledgementsMap) { + private ShareFetch collect(Map acknowledgementsMap) { if (currentFetch.isEmpty()) { final ShareFetch fetch = fetchCollector.collect(fetchBuffer); if (fetch.isEmpty()) { - // Check for any acknowledgements which could have come from control records (GAP) and include them. - applicationEventHandler.add(new ShareFetchEvent(acknowledgementsMap, fetch.takeAcknowledgedRecords())); + // Fetch more records and send any waiting acknowledgements + applicationEventHandler.add(new ShareFetchEvent(acknowledgementsMap)); // Notify the network thread to wake up and start the next round of fetching applicationEventHandler.wakeupNetworkThread(); } else if (!acknowledgementsMap.isEmpty()) { // Asynchronously commit any waiting acknowledgements - Timer timer = time.timer(defaultApiTimeoutMs); - applicationEventHandler.add(new ShareAcknowledgeAsyncEvent(acknowledgementsMap, calculateDeadlineMs(timer))); + applicationEventHandler.add(new ShareAcknowledgeAsyncEvent(acknowledgementsMap)); // Notify the network thread to wake up and start the next round of fetching applicationEventHandler.wakeupNetworkThread(); @@ -662,16 +655,11 @@ private ShareFetch collect(Map ack } else { if (!acknowledgementsMap.isEmpty()) { // Asynchronously commit any waiting acknowledgements - Timer timer = time.timer(defaultApiTimeoutMs); - applicationEventHandler.add(new ShareAcknowledgeAsyncEvent(acknowledgementsMap, calculateDeadlineMs(timer))); + applicationEventHandler.add(new ShareAcknowledgeAsyncEvent(acknowledgementsMap)); // Notify the network thread to wake up and start the next round of fetching applicationEventHandler.wakeupNetworkThread(); } - if (acknowledgementMode == ShareAcknowledgementMode.EXPLICIT) { - // We cannot leave unacknowledged records in EXPLICIT acknowledgement mode, so we throw an exception to the application. - throw new IllegalStateException("All records must be acknowledged in explicit acknowledgement mode."); - } return currentFetch; } } @@ -698,19 +686,6 @@ public void acknowledge(final ConsumerRecord record, final AcknowledgeType } } - /** - * {@inheritDoc} - */ - public void acknowledge(final String topic, final int partition, final long offset, final AcknowledgeType type) { - acquireAndEnsureOpen(); - try { - ensureExplicitAcknowledgement(); - currentFetch.acknowledge(topic, partition, offset, type); - } finally { - release(); - } - } - /** * {@inheritDoc} */ @@ -727,13 +702,13 @@ public Map> commitSync(final Duration acquireAndEnsureOpen(); try { // Handle any completed acknowledgements for which we already have the responses - handleCompletedAcknowledgements(false); + handleCompletedAcknowledgements(); // If using implicit acknowledgement, acknowledge the previously fetched records - acknowledgeBatchIfImplicitAcknowledgement(); + acknowledgeBatchIfImplicitAcknowledgement(false); Timer requestTimer = time.timer(timeout.toMillis()); - Map acknowledgementsMap = acknowledgementsToSend(); + Map acknowledgementsMap = acknowledgementsToSend(); if (acknowledgementsMap.isEmpty()) { return Collections.emptyMap(); } else { @@ -745,11 +720,16 @@ public Map> commitSync(final Duration Map> result = new HashMap<>(); Map completedAcknowledgements = ConsumerUtils.getResult(commitFuture); completedAcknowledgements.forEach((tip, acks) -> { - KafkaException exception = acks.getAcknowledgeException(); - if (exception == null) { + Errors ackErrorCode = acks.getAcknowledgeErrorCode(); + if (ackErrorCode == null) { result.put(tip, Optional.empty()); } else { - result.put(tip, Optional.of(exception)); + ApiException exception = ackErrorCode.exception(); + if (exception == null) { + result.put(tip, Optional.empty()); + } else { + result.put(tip, Optional.of(ackErrorCode.exception())); + } } }); return result; @@ -771,15 +751,14 @@ public void commitAsync() { acquireAndEnsureOpen(); try { // Handle any completed acknowledgements for which we already have the responses - handleCompletedAcknowledgements(false); + handleCompletedAcknowledgements(); // If using implicit acknowledgement, acknowledge the previously fetched records - acknowledgeBatchIfImplicitAcknowledgement(); + acknowledgeBatchIfImplicitAcknowledgement(false); - Map acknowledgementsMap = acknowledgementsToSend(); + Map acknowledgementsMap = acknowledgementsToSend(); if (!acknowledgementsMap.isEmpty()) { - Timer timer = time.timer(defaultApiTimeoutMs); - ShareAcknowledgeAsyncEvent event = new ShareAcknowledgeAsyncEvent(acknowledgementsMap, calculateDeadlineMs(timer)); + ShareAcknowledgeAsyncEvent event = new ShareAcknowledgeAsyncEvent(acknowledgementsMap); applicationEventHandler.add(event); } } finally { @@ -833,30 +812,6 @@ public Uuid clientInstanceId(final Duration timeout) { return Collections.unmodifiableMap(metrics.metrics()); } - /** - * {@inheritDoc} - */ - @Override - public void registerMetricForSubscription(KafkaMetric metric) { - if (!metrics().containsKey(metric.metricName())) { - clientTelemetryReporter.ifPresent(reporter -> reporter.metricChange(metric)); - } else { - log.debug("Skipping registration for metric {}. Existing consumer metrics cannot be overwritten.", metric.metricName()); - } - } - - /** - * {@inheritDoc} - */ - @Override - public void unregisterMetricFromSubscription(KafkaMetric metric) { - if (!metrics().containsKey(metric.metricName())) { - clientTelemetryReporter.ifPresent(reporter -> reporter.metricRemoval(metric)); - } else { - log.debug("Skipping unregistration for metric {}. Existing consumer metrics cannot be removed.", metric.metricName()); - } - } - /** * {@inheritDoc} */ @@ -892,17 +847,15 @@ private void close(final Duration timeout, final boolean swallowException) { // We are already closing with a timeout, don't allow wake-ups from here on. wakeupTrigger.disableWakeups(); - final Timer closeTimer = createTimerForCloseRequests(timeout); + final Timer closeTimer = time.timer(timeout); clientTelemetryReporter.ifPresent(ClientTelemetryReporter::initiateClose); closeTimer.update(); // Prepare shutting down the network thread swallow(log, Level.ERROR, "Failed to release assignment before closing consumer", () -> sendAcknowledgementsAndLeaveGroup(closeTimer, firstException), firstException); - swallow(log, Level.ERROR, "Failed to stop finding coordinator", - this::stopFindCoordinatorOnClose, firstException); swallow(log, Level.ERROR, "Failed invoking acknowledgement commit callback", - () -> handleCompletedAcknowledgements(true), firstException); + this::handleCompletedAcknowledgements, firstException); if (applicationEventHandler != null) closeQuietly(() -> applicationEventHandler.close(Duration.ofMillis(closeTimer.remainingMs())), "Failed shutting down network thread", firstException); closeTimer.update(); @@ -929,30 +882,12 @@ private void close(final Duration timeout, final boolean swallowException) { } } - private void stopFindCoordinatorOnClose() { - if (applicationEventHandler == null) { - return; - } - log.debug("Stop finding coordinator during consumer close"); - applicationEventHandler.add(new StopFindCoordinatorOnCloseEvent()); - } - - private Timer createTimerForCloseRequests(Duration timeout) { - // this.time could be null if an exception occurs in constructor prior to setting the this.time field - final Time time = (this.time == null) ? Time.SYSTEM : this.time; - return time.timer(Math.min(timeout.toMillis(), requestTimeoutMs)); - } - /** * Prior to closing the network thread, we need to make sure the following operations happen in the right sequence: * 1. commit pending acknowledgements and close any share sessions * 2. leave the group */ private void sendAcknowledgementsAndLeaveGroup(final Timer timer, final AtomicReference firstException) { - if (applicationEventHandler == null || backgroundEventProcessor == null || - backgroundEventReaper == null || backgroundEventQueue == null) { - return; - } completeQuietly( () -> applicationEventHandler.addAndGet(new ShareAcknowledgeOnCloseEvent(acknowledgementsToSend(), calculateDeadlineMs(timer))), "Failed to send pending acknowledgements with a timeout(ms)=" + timer.timeoutMs(), firstException); @@ -961,10 +896,7 @@ private void sendAcknowledgementsAndLeaveGroup(final Timer timer, final AtomicRe ShareUnsubscribeEvent unsubscribeEvent = new ShareUnsubscribeEvent(calculateDeadlineMs(timer)); applicationEventHandler.add(unsubscribeEvent); try { - // If users have fatal error, they will get some exceptions in the background queue. - // When running unsubscribe, these exceptions should be ignored, or users can't unsubscribe successfully. - processBackgroundEvents(unsubscribeEvent.future(), timer, e -> (e instanceof GroupAuthorizationException - || e instanceof TopicAuthorizationException || e instanceof InvalidTopicException)); + processBackgroundEvents(unsubscribeEvent.future(), timer); log.info("Completed releasing assignment and leaving group to close consumer."); } catch (TimeoutException e) { log.warn("Consumer triggered an unsubscribe event to leave the group but couldn't " + @@ -1043,15 +975,8 @@ private void maybeThrowInvalidGroupIdException() { *

      * If the acknowledgement commit callback throws an exception, this method will throw an exception. */ - private void handleCompletedAcknowledgements(boolean onClose) { - if (backgroundEventQueue == null || backgroundEventReaper == null || backgroundEventProcessor == null) { - return; - } - // If the user gets any fatal errors, they will get these exceptions in the background queue. - // While closing, we ignore these exceptions so that the consumers close successfully. - processBackgroundEvents(onClose ? e -> (e instanceof GroupAuthorizationException - || e instanceof TopicAuthorizationException - || e instanceof InvalidTopicException) : e -> false); + private void handleCompletedAcknowledgements() { + processBackgroundEvents(); if (!completedAcknowledgements.isEmpty()) { try { @@ -1065,11 +990,29 @@ private void handleCompletedAcknowledgements(boolean onClose) { } /** + * Called to progressively move the acknowledgement mode into IMPLICIT if it is not known to be EXPLICIT. * If the acknowledgement mode is IMPLICIT, acknowledges all records in the current batch. + * + * @param calledOnPoll If true, called on poll. Otherwise, called on commit. */ - private void acknowledgeBatchIfImplicitAcknowledgement() { + private void acknowledgeBatchIfImplicitAcknowledgement(boolean calledOnPoll) { + if (calledOnPoll) { + if (acknowledgementMode == AcknowledgementMode.UNKNOWN) { + // The first call to poll(Duration) moves into PENDING + acknowledgementMode = AcknowledgementMode.PENDING; + } else if (acknowledgementMode == AcknowledgementMode.PENDING && !currentFetch.isEmpty()) { + // If there are records to acknowledge and PENDING, moves into IMPLICIT + acknowledgementMode = AcknowledgementMode.IMPLICIT; + } + } else { + // If there are records to acknowledge and PENDING, moves into IMPLICIT + if (acknowledgementMode == AcknowledgementMode.PENDING && !currentFetch.isEmpty()) { + acknowledgementMode = AcknowledgementMode.IMPLICIT; + } + } + // If IMPLICIT, acknowledge all records - if (acknowledgementMode == ShareAcknowledgementMode.IMPLICIT) { + if (acknowledgementMode == AcknowledgementMode.IMPLICIT) { currentFetch.acknowledgeAll(AcknowledgeType.ACCEPT); } } @@ -1077,33 +1020,21 @@ private void acknowledgeBatchIfImplicitAcknowledgement() { /** * Returns any ready acknowledgements to be sent to the cluster. */ - private Map acknowledgementsToSend() { + private Map acknowledgementsToSend() { return currentFetch.takeAcknowledgedRecords(); } /** - * Called to verify if the acknowledgement mode is EXPLICIT, else throws an exception. + * Called to move the acknowledgement mode into EXPLICIT, if it is not known to be IMPLICIT. */ private void ensureExplicitAcknowledgement() { - if (acknowledgementMode == ShareAcknowledgementMode.IMPLICIT) { + if (acknowledgementMode == AcknowledgementMode.PENDING) { + // If poll(Duration) has been called once, moves into EXPLICIT + acknowledgementMode = AcknowledgementMode.EXPLICIT; + } else if (acknowledgementMode == AcknowledgementMode.IMPLICIT) { throw new IllegalStateException("Implicit acknowledgement of delivery is being used."); - } - } - - /** - * Initializes the acknowledgement mode based on the configuration. - */ - private static ShareAcknowledgementMode initializeAcknowledgementMode(ConsumerConfig config, Logger log) { - String s = config.getString(ConsumerConfig.SHARE_ACKNOWLEDGEMENT_MODE_CONFIG); - return ShareAcknowledgementMode.fromString(s); - } - - private void processBackgroundEvents(final Predicate ignoreErrorEventException) { - try { - processBackgroundEvents(); - } catch (Exception e) { - if (!ignoreErrorEventException.test(e)) - throw e; + } else if (acknowledgementMode == AcknowledgementMode.UNKNOWN) { + throw new IllegalStateException("Acknowledge called before poll."); } } @@ -1112,30 +1043,25 @@ private void processBackgroundEvents(final Predicate ignoreErrorEvent * It is possible that {@link ErrorEvent an error} * could occur when processing the events. In such cases, the processor will take a reference to the first * error, continue to process the remaining events, and then throw the first error that occurred. - * - * Visible for testing. */ - boolean processBackgroundEvents() { + private boolean processBackgroundEvents() { AtomicReference firstError = new AtomicReference<>(); - List events = backgroundEventHandler.drainEvents(); - if (!events.isEmpty()) { - long startMs = time.milliseconds(); - for (BackgroundEvent event : events) { - asyncConsumerMetrics.recordBackgroundEventQueueTime(time.milliseconds() - event.enqueuedMs()); - try { - if (event instanceof CompletableEvent) - backgroundEventReaper.add((CompletableEvent) event); + LinkedList events = new LinkedList<>(); + backgroundEventQueue.drainTo(events); + + for (BackgroundEvent event : events) { + try { + if (event instanceof CompletableEvent) + backgroundEventReaper.add((CompletableEvent) event); - backgroundEventProcessor.process(event); - } catch (Throwable t) { - KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); + backgroundEventProcessor.process(event); + } catch (Throwable t) { + KafkaException e = ConsumerUtils.maybeWrapAsKafkaException(t); - if (!firstError.compareAndSet(null, e)) - log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); - } + if (!firstError.compareAndSet(null, e)) + log.warn("An error occurred when processing the background event: {}", e.getMessage(), e); } - asyncConsumerMetrics.recordBackgroundEventQueueProcessingTime(time.milliseconds() - startMs); } backgroundEventReaper.reap(time.milliseconds()); @@ -1163,27 +1089,18 @@ boolean processBackgroundEvents() { * Each iteration gives the application thread an opportunity to process background events, which may be * necessary to complete the overall processing. * - * @param future Event that contains a {@link CompletableFuture}; it is on this future that the - * application thread will wait for completion - * @param timer Overall timer that bounds how long to wait for the event to complete - * @param ignoreErrorEventException Predicate to ignore background errors. - * Any exceptions found while processing background events that match the predicate won't be propagated. + * @param future Event that contains a {@link CompletableFuture}; it is on this future that the + * application thread will wait for completion + * @param timer Overall timer that bounds how long to wait for the event to complete * @return {@code true} if the event completed within the timeout, {@code false} otherwise */ // Visible for testing T processBackgroundEvents(final Future future, - final Timer timer, - final Predicate ignoreErrorEventException) { + final Timer timer) { log.trace("Will wait up to {} ms for future {} to complete", timer.remainingMs(), future); do { - boolean hadEvents = false; - try { - hadEvents = processBackgroundEvents(); - } catch (Exception e) { - if (!ignoreErrorEventException.test(e)) - throw e; - } + boolean hadEvents = processBackgroundEvents(); try { if (future.isDone()) { @@ -1237,10 +1154,6 @@ public Metrics metricsRegistry() { return metrics; } - AsyncConsumerMetrics asyncConsumerMetrics() { - return asyncConsumerMetrics; - } - @Override public KafkaShareConsumerMetrics kafkaShareConsumerMetrics() { return kafkaShareConsumerMetrics; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerMetrics.java index ee02dfcc17b56..41a41818deea4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareConsumerMetrics.java @@ -19,8 +19,6 @@ import java.util.HashSet; import java.util.Set; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; - public class ShareConsumerMetrics { public ShareFetchMetricsRegistry shareFetchMetrics; @@ -28,7 +26,7 @@ public ShareConsumerMetrics(Set metricsTags, String metricGrpPrefix) { this.shareFetchMetrics = new ShareFetchMetricsRegistry(metricsTags, metricGrpPrefix); } - public ShareConsumerMetrics() { - this(new HashSet<>(), CONSUMER_SHARE_METRIC_GROUP_PREFIX); + public ShareConsumerMetrics(String metricGroupPrefix) { + this(new HashSet<>(), metricGroupPrefix); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java index 406110fe5024f..1bab0527536b9 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetch.java @@ -112,7 +112,7 @@ public boolean isEmpty() { * @param record The record to acknowledge * @param type The acknowledge type which indicates whether it was processed successfully */ - public void acknowledge(final ConsumerRecord record, final AcknowledgeType type) { + public void acknowledge(final ConsumerRecord record, AcknowledgeType type) { for (Map.Entry> tipBatch : batches.entrySet()) { TopicIdPartition tip = tipBatch.getKey(); if (tip.topic().equals(record.topic()) && (tip.partition() == record.partition())) { @@ -123,29 +123,6 @@ public void acknowledge(final ConsumerRecord record, final AcknowledgeType throw new IllegalStateException("The record cannot be acknowledged."); } - /** - * Acknowledge a single record by its topic, partition and offset in the current batch. - * - * @param topic The topic of the record to acknowledge - * @param partition The partition of the record - * @param offset The offset of the record - * @param type The acknowledge type which indicates whether it was processed successfully - */ - public void acknowledge(final String topic, final int partition, final long offset, final AcknowledgeType type) { - for (Map.Entry> tipBatch : batches.entrySet()) { - TopicIdPartition tip = tipBatch.getKey(); - ShareInFlightBatchException exception = tipBatch.getValue().getException(); - if (tip.topic().equals(topic) && (tip.partition() == partition) && - exception != null && - exception.offsets().contains(offset)) { - - tipBatch.getValue().addAcknowledgement(offset, type); - return; - } - } - throw new IllegalStateException("The record cannot be acknowledged."); - } - /** * Acknowledge all records in the current batch. If any records in the batch already have * been acknowledged, those acknowledgements are not overwritten. @@ -161,15 +138,14 @@ public void acknowledgeAll(final AcknowledgeType type) { * to send. If some records were not acknowledged, the in-flight records will not be empty after this * method. * - * @return The map of acknowledgements to send, along with node information + * @return The map of acknowledgements to send */ - public Map takeAcknowledgedRecords() { - Map acknowledgementMap = new LinkedHashMap<>(); + public Map takeAcknowledgedRecords() { + Map acknowledgementMap = new LinkedHashMap<>(); batches.forEach((tip, batch) -> { - int nodeId = batch.nodeId(); Acknowledgements acknowledgements = batch.takeAcknowledgedRecords(); if (!acknowledgements.isEmpty()) - acknowledgementMap.put(tip, new NodeAcknowledgements(nodeId, acknowledgements)); + acknowledgementMap.put(tip, acknowledgements); }); return acknowledgementMap; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java index c2a17d051b17e..3d073fa92eb82 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollector.java @@ -112,7 +112,7 @@ public ShareFetch collect(final ShareFetchBuffer fetchBuffer) { fetch.add(tp, batch); if (batch.getException() != null) { - throw new ShareFetchException(fetch, batch.getException().cause()); + throw batch.getException(); } else if (batch.hasCachedException()) { break; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManager.java index d3e60a3dfaaee..249edc6aa2747 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManager.java @@ -20,10 +20,7 @@ import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.WindowedCount; -import java.io.IOException; -import java.util.Arrays; - -public class ShareFetchMetricsManager implements AutoCloseable { +public class ShareFetchMetricsManager { private final Metrics metrics; private final Sensor throttleTime; private final Sensor bytesFetched; @@ -95,16 +92,4 @@ void recordAcknowledgementSent(int acknowledgements) { void recordFailedAcknowledgements(int acknowledgements) { failedAcknowledgements.record(acknowledgements); } - - @Override - public void close() throws IOException { - Arrays.asList( - throttleTime.name(), - bytesFetched.name(), - recordsFetched.name(), - fetchLatency.name(), - sentAcknowledgements.name(), - failedAcknowledgements.name() - ).forEach(metrics::removeSensor); - } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java index f46b6f72c87e1..6e37fa0ed3878 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManager.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; import org.apache.kafka.clients.consumer.internals.metrics.HeartbeatMetricsManager; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; @@ -51,12 +50,6 @@ public class ShareHeartbeatRequestManager extends AbstractHeartbeatRequestManage */ private final HeartbeatState heartbeatState; - public static final String SHARE_PROTOCOL_NOT_SUPPORTED_MSG = "The cluster does not support the share group protocol. " + - "To use share groups, the cluster must have the share group protocol enabled."; - - public static final String SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG = "The cluster does not support the share group protocol " + - "using ShareGroupHeartbeat API version 1 or later. This version of the API was introduced in Apache Kafka v4.1."; - public ShareHeartbeatRequestManager( final LogContext logContext, final Time time, @@ -80,7 +73,7 @@ public ShareHeartbeatRequestManager( final CoordinatorRequestManager coordinatorRequestManager, final ShareMembershipManager membershipManager, final HeartbeatState heartbeatState, - final HeartbeatRequestState heartbeatRequestState, + final AbstractHeartbeatRequestManager.HeartbeatRequestState heartbeatRequestState, final BackgroundEventHandler backgroundEventHandler, final Metrics metrics) { super(logContext, timer, config, coordinatorRequestManager, heartbeatRequestState, backgroundEventHandler, @@ -89,45 +82,6 @@ public ShareHeartbeatRequestManager( this.heartbeatState = heartbeatState; } - /** - * {@inheritDoc} - */ - @Override - public boolean handleSpecificFailure(Throwable exception) { - boolean errorHandled = false; - if (exception instanceof UnsupportedVersionException) { - logger.error("{} failed due to {}: {}", heartbeatRequestName(), exception.getMessage(), SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG); - handleFatalFailure(new UnsupportedVersionException(SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG, exception)); - errorHandled = true; - } - return errorHandled; - } - - /** - * {@inheritDoc} - */ - @Override - public boolean handleSpecificExceptionInResponse(final ShareGroupHeartbeatResponse response, final long currentTimeMs) { - Errors error = errorForResponse(response); - boolean errorHandled; - - switch (error) { - // Broker responded with HB not supported, meaning the new protocol is not enabled, so propagate - // custom message for it. Note that the case where the protocol is not supported at all should fail - // on the client side when building the request and checking supporting APIs (handled on onFailure). - case UNSUPPORTED_VERSION: - logger.error("{} failed due to unsupported version: {}", - heartbeatRequestName(), SHARE_PROTOCOL_NOT_SUPPORTED_MSG); - handleFatalFailure(error.exception(SHARE_PROTOCOL_NOT_SUPPORTED_MSG)); - errorHandled = true; - break; - - default: - errorHandled = false; - } - return errorHandled; - } - /** * {@inheritDoc} */ @@ -186,11 +140,6 @@ public ShareMembershipManager membershipManager() { return membershipManager; } - @Override - protected boolean shouldSendLeaveHeartbeatNow() { - return membershipManager().state() == MemberState.LEAVING; - } - /** * Builds the heartbeat requests correctly, ensuring that all information is sent according to * the protocol, but subsequent requests do not send information which has not changed. This diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatch.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatch.java index 0fa0499aa1fba..bff9d62e3a783 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatch.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareInFlightBatch.java @@ -18,6 +18,7 @@ import org.apache.kafka.clients.consumer.AcknowledgeType; import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicIdPartition; import java.util.ArrayList; @@ -28,16 +29,14 @@ import java.util.TreeSet; public class ShareInFlightBatch { - private final int nodeId; final TopicIdPartition partition; private final Map> inFlightRecords; private final Set acknowledgedRecords; private Acknowledgements acknowledgements; - private ShareInFlightBatchException exception; + private KafkaException exception; private boolean hasCachedException = false; - public ShareInFlightBatch(int nodeId, TopicIdPartition partition) { - this.nodeId = nodeId; + public ShareInFlightBatch(TopicIdPartition partition) { this.partition = partition; inFlightRecords = new TreeMap<>(); acknowledgedRecords = new TreeSet<>(); @@ -88,10 +87,6 @@ int numRecords() { return inFlightRecords.size(); } - int nodeId() { - return nodeId; - } - Acknowledgements takeAcknowledgedRecords() { // Usually, all records will be acknowledged, so we can just clear the in-flight records leaving // an empty batch, which will trigger more fetching @@ -101,7 +96,6 @@ Acknowledgements takeAcknowledgedRecords() { acknowledgedRecords.forEach(inFlightRecords::remove); } acknowledgedRecords.clear(); - exception = null; Acknowledgements currentAcknowledgements = acknowledgements; acknowledgements = Acknowledgements.empty(); @@ -116,11 +110,11 @@ public boolean isEmpty() { return inFlightRecords.isEmpty() && acknowledgements.isEmpty(); } - public void setException(ShareInFlightBatchException exception) { + public void setException(KafkaException exception) { this.exception = exception; } - public ShareInFlightBatchException getException() { + public KafkaException getException() { return exception; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManager.java index 47ab87edb358d..d7944466130a4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManager.java @@ -137,7 +137,7 @@ public void onHeartbeatSuccess(ShareGroupHeartbeatResponse response) { "already leaving the group.", memberId, memberEpoch); return; } - if (state == MemberState.UNSUBSCRIBED && responseData.memberEpoch() < 0 && maybeCompleteLeaveInProgress()) { + if (state == MemberState.UNSUBSCRIBED && maybeCompleteLeaveInProgress()) { log.debug("Member {} with epoch {} received a successful response to the heartbeat " + "to leave the group and completed the leave operation. ", memberId, memberEpoch); return; @@ -147,13 +147,6 @@ public void onHeartbeatSuccess(ShareGroupHeartbeatResponse response) { " so it's not a member of the group. ", memberId, state); return; } - if (responseData.memberEpoch() < 0) { - log.debug("Ignoring heartbeat response received from broker. Member {} with epoch {} " + - "is in {} state and the member epoch is invalid: {}. ", memberId, memberEpoch, state, - responseData.memberEpoch()); - maybeCompleteLeaveInProgress(); - return; - } updateMemberEpoch(responseData.memberEpoch()); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java index 634a9839c5d00..100c9ce61b6d4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandler.java @@ -68,12 +68,12 @@ public class ShareSessionHandler { */ private final LinkedHashMap sessionPartitions; - /** + /* * The partitions to be included in the next ShareFetch request. */ private LinkedHashMap nextPartitions; - /** + /* * The acknowledgements to be included in the next ShareFetch/ShareAcknowledge request. */ private LinkedHashMap nextAcknowledgements; @@ -103,14 +103,6 @@ public void addPartitionToFetch(TopicIdPartition topicIdPartition, Acknowledgeme } } - public void addPartitionToAcknowledgeOnly(TopicIdPartition topicIdPartition, Acknowledgements partitionAcknowledgements) { - nextAcknowledgements.put(topicIdPartition, partitionAcknowledgements); - } - - public boolean isNewSession() { - return nextMetadata.isNewSession(); - } - public ShareFetchRequest.Builder newShareFetchBuilder(String groupId, FetchConfig fetchConfig) { List added = new ArrayList<>(); List removed = new ArrayList<>(); @@ -179,8 +171,8 @@ public ShareFetchRequest.Builder newShareFetchBuilder(String groupId, FetchConfi return ShareFetchRequest.Builder.forConsumer( groupId, nextMetadata, fetchConfig.maxWaitMs, - fetchConfig.minBytes, fetchConfig.maxBytes, fetchConfig.maxPollRecords, - fetchConfig.maxPollRecords, added, removed, acknowledgementBatches); + fetchConfig.minBytes, fetchConfig.maxBytes, fetchConfig.fetchSize, + added, removed, acknowledgementBatches); } public ShareAcknowledgeRequest.Builder newShareAcknowledgeBuilder(String groupId, FetchConfig fetchConfig) { @@ -219,8 +211,7 @@ private String topicIdPartitionsToLogString(Collection partiti */ public boolean handleResponse(ShareFetchResponse response, short version) { if ((response.error() == Errors.SHARE_SESSION_NOT_FOUND) || - (response.error() == Errors.INVALID_SHARE_SESSION_EPOCH) || - (response.error() == Errors.SHARE_SESSION_LIMIT_REACHED)) { + (response.error() == Errors.INVALID_SHARE_SESSION_EPOCH)) { log.info("Node {} was unable to process the ShareFetch request with {}: {}.", node, nextMetadata, response.error()); nextMetadata = nextMetadata.nextCloseExistingAttemptNew(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java index 9d44e98ed3954..e237165f5b771 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/SubscriptionState.java @@ -25,7 +25,6 @@ import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.internals.PartitionStates; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; import org.apache.kafka.common.utils.LogContext; @@ -92,13 +91,6 @@ private enum SubscriptionType { /* the list of topics the user has requested */ private Set subscription; - /** - * Topic IDs received in an assignment from the coordinator when using the Consumer rebalance protocol. - * This will be used to include assigned topic IDs in metadata requests when the consumer - * does not know the topic names (ex. when the user subscribes to a RE2J regex computed on the broker) - */ - private Set assignedTopicIds; - /* The list of topics the group has subscribed to. This may include some topics which are not part * of `subscription` for the leader of a group since it is responsible for detecting metadata changes * which require a group rebalance. */ @@ -157,7 +149,6 @@ public SubscriptionState(LogContext logContext, AutoOffsetResetStrategy defaultR this.log = logContext.logger(this.getClass()); this.defaultResetStrategy = defaultResetStrategy; this.subscription = new TreeSet<>(); // use a sorted set for better logging - this.assignedTopicIds = new TreeSet<>(); this.assignment = new PartitionStates<>(); this.groupSubscription = new HashSet<>(); this.subscribedPattern = null; @@ -347,7 +338,6 @@ public synchronized void unsubscribe() { this.subscription = Collections.emptySet(); this.groupSubscription = Collections.emptySet(); this.assignment.clear(); - this.assignedTopicIds = Collections.emptySet(); this.subscribedPattern = null; this.subscriptionType = SubscriptionType.NONE; this.assignmentId++; @@ -477,7 +467,7 @@ public synchronized List assignedPartitionsList() { * Provides the number of assigned partitions in a thread safe manner. * @return the number of assigned partitions. */ - public synchronized int numAssignedPartitions() { + synchronized int numAssignedPartitions() { return this.assignment.size(); } @@ -487,7 +477,7 @@ public synchronized List fetchablePartitions(Predicate result = new ArrayList<>(); assignment.forEach((topicPartition, topicPartitionState) -> { // Cheap check is first to avoid evaluating the predicate if possible - if ((subscriptionType.equals(SubscriptionType.AUTO_TOPICS_SHARE) || isFetchableAndSubscribed(topicPartition, topicPartitionState)) + if ((subscriptionType.equals(SubscriptionType.AUTO_TOPICS_SHARE) || topicPartitionState.isFetchable()) && isAvailable.test(topicPartition)) { result.add(topicPartition); } @@ -495,34 +485,23 @@ public synchronized List fetchablePartitions(Predicate assignedTopicIds() { - return assignedTopicIds; - } - - /** - * Set the set of topic IDs that have been assigned to the consumer by the coordinator. - * This is used for topic IDs received in an assignment when using the new consumer rebalance protocol (KIP-848). - */ - public synchronized void setAssignedTopicIds(Set assignedTopicIds) { - this.assignedTopicIds = assignedTopicIds; - } - /** * Enable fetching and updating positions for the given partitions that were assigned to the * consumer, but waiting for the onPartitionsAssigned callback to complete. This is diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TimedRequestState.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TimedRequestState.java index 8aaf482f9accf..c61032cea7249 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TimedRequestState.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TimedRequestState.java @@ -53,10 +53,6 @@ public boolean isExpired() { return timer.isExpired(); } - public void resetTimeout(long timeoutMs) { - timer.updateAndReset(timeoutMs); - } - public long remainingMs() { timer.update(); return timer.remainingMs(); diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java index fcef3ce2647af..2d9cab0dd9686 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManager.java @@ -33,16 +33,15 @@ import org.slf4j.Logger; -import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.concurrent.CompletableFuture; +import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.internals.NetworkClientDelegate.PollResult.EMPTY; @@ -85,23 +84,16 @@ public TopicMetadataRequestManager(final LogContext context, final Time time, fi @Override public NetworkClientDelegate.PollResult poll(final long currentTimeMs) { // Prune any requests which have timed out - Iterator requestStateIterator = inflightRequests.iterator(); + List expiredRequests = inflightRequests.stream() + .filter(TimedRequestState::isExpired) + .collect(Collectors.toList()); + expiredRequests.forEach(TopicMetadataRequestState::expire); - while (requestStateIterator.hasNext()) { - TopicMetadataRequestState requestState = requestStateIterator.next(); - - if (requestState.isExpired()) { - requestState.expire(); - requestStateIterator.remove(); - } - } - - List requests = new ArrayList<>(); - - for (TopicMetadataRequestState request : inflightRequests) { - Optional unsentRequest = request.send(currentTimeMs); - unsentRequest.ifPresent(requests::add); - } + List requests = inflightRequests.stream() + .map(req -> req.send(currentTimeMs)) + .filter(Optional::isPresent) + .map(Optional::get) + .collect(Collectors.toList()); return requests.isEmpty() ? EMPTY : new NetworkClientDelegate.PollResult(0, requests); } @@ -189,9 +181,7 @@ private Optional send(final long currentTim } private void expire() { - // The request state is removed from inflightRequests via an iterator by the caller of this method, - // so don't remove it from inflightRequests here. - future.completeExceptionally( + completeFutureAndRemoveRequest( new TimeoutException("Timeout expired while fetching topic metadata")); } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java index f3f0e161015b4..4e0b8e3d2d17f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEvent.java @@ -18,6 +18,7 @@ import org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer; import org.apache.kafka.clients.consumer.internals.ShareConsumerImpl; +import org.apache.kafka.common.Uuid; import java.util.Objects; @@ -40,13 +41,16 @@ public enum Type { SHARE_ACKNOWLEDGE_ON_CLOSE, SHARE_ACKNOWLEDGEMENT_COMMIT_CALLBACK_REGISTRATION, SEEK_UNVALIDATED, - STREAMS_ON_TASKS_ASSIGNED_CALLBACK_COMPLETED, - STREAMS_ON_TASKS_REVOKED_CALLBACK_COMPLETED, - STREAMS_ON_ALL_TASKS_LOST_CALLBACK_COMPLETED, } private final Type type; + /** + * This identifies a particular event. It is used to disambiguate events via {@link #hashCode()} and + * {@link #equals(Object)} and can be used in log messages when debugging. + */ + private final Uuid id; + /** * The time in milliseconds when this event was enqueued. * This field can be changed after the event is created, so it should not be used in hashCode or equals. @@ -55,12 +59,17 @@ public enum Type { protected ApplicationEvent(Type type) { this.type = Objects.requireNonNull(type); + this.id = Uuid.randomUuid(); } public Type type() { return type; } + public Uuid id() { + return id; + } + public void setEnqueuedMs(long enqueuedMs) { this.enqueuedMs = enqueuedMs; } @@ -69,8 +78,21 @@ public long enqueuedMs() { return enqueuedMs; } + @Override + public final boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ApplicationEvent that = (ApplicationEvent) o; + return type == that.type && id.equals(that.id); + } + + @Override + public final int hashCode() { + return Objects.hash(type, id); + } + protected String toStringBase() { - return "type=" + type + ", enqueuedMs=" + enqueuedMs; + return "type=" + type + ", id=" + id + ", enqueuedMs=" + enqueuedMs; } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java index 853c5484df5be..08661ef7581c0 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessor.java @@ -72,7 +72,7 @@ public ApplicationEventProcessor(final LogContext logContext, this.metadataVersionSnapshot = metadata.updateVersion(); } - @SuppressWarnings({"CyclomaticComplexity", "JavaNCSSCheck"}) + @SuppressWarnings({"CyclomaticComplexity"}) @Override public void process(ApplicationEvent event) { switch (event.type()) { @@ -200,18 +200,6 @@ public void process(ApplicationEvent event) { process((CurrentLagEvent) event); return; - case STREAMS_ON_TASKS_REVOKED_CALLBACK_COMPLETED: - process((StreamsOnTasksRevokedCallbackCompletedEvent) event); - return; - - case STREAMS_ON_TASKS_ASSIGNED_CALLBACK_COMPLETED: - process((StreamsOnTasksAssignedCallbackCompletedEvent) event); - return; - - case STREAMS_ON_ALL_TASKS_LOST_CALLBACK_COMPLETED: - process((StreamsOnAllTasksLostCallbackCompletedEvent) event); - return; - default: log.warn("Application event type {} was not expected", event.type()); } @@ -232,10 +220,6 @@ private void process(final PollEvent event) { hrm.membershipManager().onConsumerPoll(); hrm.resetPollTimer(event.pollTimeMs()); }); - requestManagers.streamsGroupHeartbeatRequestManager.ifPresent(hrm -> { - hrm.membershipManager().onConsumerPoll(); - hrm.resetPollTimer(event.pollTimeMs()); - }); } else { // safe to unblock - no auto-commit risk here: // 1. commitRequestManager is not present @@ -310,7 +294,7 @@ private void process(final AssignmentChangeEvent event) { manager.updateTimerAndMaybeCommit(event.currentTimeMs()); } - log.info("Assigned to partition(s): {}", event.partitions()); + log.info("Assigned to partition(s): {}", event.partitions().stream().map(TopicPartition::toString).collect(Collectors.joining(", "))); try { if (subscriptions.assignFromUser(new HashSet<>(event.partitions()))) metadata.requestUpdateForNewTopics(); @@ -336,32 +320,22 @@ private void process(final ListOffsetsEvent event) { * it is already a member on the next poll. */ private void process(final TopicSubscriptionChangeEvent event) { - if (requestManagers.consumerHeartbeatRequestManager.isPresent()) { - try { - if (subscriptions.subscribe(event.topics(), event.listener())) { - this.metadataVersionSnapshot = metadata.requestUpdateForNewTopics(); - } - // Join the group if not already part of it, or just send the new subscription to the broker on the next poll. - requestManagers.consumerHeartbeatRequestManager.get().membershipManager().onSubscriptionUpdated(); - event.future().complete(null); - } catch (Exception e) { - event.future().completeExceptionally(e); - } - } else if (requestManagers.streamsGroupHeartbeatRequestManager.isPresent()) { - try { - if (subscriptions.subscribe(event.topics(), event.listener())) { - this.metadataVersionSnapshot = metadata.requestUpdateForNewTopics(); - } - requestManagers.streamsMembershipManager.get().onSubscriptionUpdated(); - event.future().complete(null); - } catch (Exception e) { - event.future().completeExceptionally(e); - } - } else { + if (requestManagers.consumerHeartbeatRequestManager.isEmpty()) { log.warn("Group membership manager not present when processing a subscribe event"); event.future().complete(null); + return; } + try { + if (subscriptions.subscribe(event.topics(), event.listener())) + this.metadataVersionSnapshot = metadata.requestUpdateForNewTopics(); + + // Join the group if not already part of it, or just send the new subscription to the broker on the next poll. + requestManagers.consumerHeartbeatRequestManager.get().membershipManager().onSubscriptionUpdated(); + event.future().complete(null); + } catch (Exception e) { + event.future().completeExceptionally(e); + } } /** @@ -431,9 +405,6 @@ private void process(final UnsubscribeEvent event) { if (requestManagers.consumerHeartbeatRequestManager.isPresent()) { CompletableFuture future = requestManagers.consumerHeartbeatRequestManager.get().membershipManager().leaveGroup(); future.whenComplete(complete(event.future())); - } else if (requestManagers.streamsGroupHeartbeatRequestManager.isPresent()) { - CompletableFuture future = requestManagers.streamsGroupHeartbeatRequestManager.get().membershipManager().leaveGroup(); - future.whenComplete(complete(event.future())); } else { // If the consumer is not using the group management capabilities, we still need to clear all assignments it may have. subscriptions.unsubscribe(); @@ -492,15 +463,12 @@ private void process(@SuppressWarnings("unused") final CommitOnCloseEvent event) } private void process(final LeaveGroupOnCloseEvent event) { - if (requestManagers.consumerMembershipManager.isPresent()) { - log.debug("Signal the ConsumerMembershipManager to leave the consumer group since the consumer is closing"); - CompletableFuture future = requestManagers.consumerMembershipManager.get().leaveGroupOnClose(event.membershipOperation()); - future.whenComplete(complete(event.future())); - } else if (requestManagers.streamsMembershipManager.isPresent()) { - log.debug("Signal the StreamsMembershipManager to leave the streams group since the member is closing"); - CompletableFuture future = requestManagers.streamsMembershipManager.get().leaveGroupOnClose(); - future.whenComplete(complete(event.future())); - } + if (requestManagers.consumerMembershipManager.isEmpty()) + return; + + log.debug("Signal the ConsumerMembershipManager to leave the consumer group since the consumer is closing"); + CompletableFuture future = requestManagers.consumerMembershipManager.get().leaveGroupOnClose(); + future.whenComplete(complete(event.future())); } private void process(@SuppressWarnings("unused") final StopFindCoordinatorOnCloseEvent event) { @@ -514,7 +482,7 @@ private void process(@SuppressWarnings("unused") final StopFindCoordinatorOnClos * Process event that tells the share consume request manager to fetch more records. */ private void process(final ShareFetchEvent event) { - requestManagers.shareConsumeRequestManager.ifPresent(scrm -> scrm.fetch(event.acknowledgementsMap(), event.controlRecordAcknowledgements())); + requestManagers.shareConsumeRequestManager.ifPresent(scrm -> scrm.fetch(event.acknowledgementsMap())); } /** @@ -540,7 +508,7 @@ private void process(final ShareAcknowledgeAsyncEvent event) { } ShareConsumeRequestManager manager = requestManagers.shareConsumeRequestManager.get(); - manager.commitAsync(event.acknowledgementsMap(), event.deadlineMs()); + manager.commitAsync(event.acknowledgementsMap()); } /** @@ -699,33 +667,6 @@ private void process(final CurrentLagEvent event) { } } - private void process(final StreamsOnTasksRevokedCallbackCompletedEvent event) { - if (requestManagers.streamsMembershipManager.isEmpty()) { - log.warn("An internal error occurred; the Streams membership manager was not present, so the notification " + - "of the onTasksRevoked callback execution could not be sent"); - return; - } - requestManagers.streamsMembershipManager.get().onTasksRevokedCallbackCompleted(event); - } - - private void process(final StreamsOnTasksAssignedCallbackCompletedEvent event) { - if (requestManagers.streamsMembershipManager.isEmpty()) { - log.warn("An internal error occurred; the Streams membership manager was not present, so the notification " + - "of the onTasksAssigned callback execution could not be sent"); - return; - } - requestManagers.streamsMembershipManager.get().onTasksAssignedCallbackCompleted(event); - } - - private void process(final StreamsOnAllTasksLostCallbackCompletedEvent event) { - if (requestManagers.streamsMembershipManager.isEmpty()) { - log.warn("An internal error occurred; the Streams membership manager was not present, so the notification " + - "of the onAllTasksLost callback execution could not be sent"); - return; - } - requestManagers.streamsMembershipManager.get().onAllTasksLostCallbackCompleted(event); - } - private BiConsumer complete(final CompletableFuture b) { return (value, exception) -> { if (exception != null) diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java index 6fa737c727805..02fc4b4a29ba4 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/BackgroundEvent.java @@ -17,6 +17,7 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.internals.ConsumerNetworkThread; +import org.apache.kafka.common.Uuid; import java.util.Objects; @@ -26,16 +27,17 @@ public abstract class BackgroundEvent { public enum Type { - ERROR, - CONSUMER_REBALANCE_LISTENER_CALLBACK_NEEDED, - SHARE_ACKNOWLEDGEMENT_COMMIT_CALLBACK, - STREAMS_ON_TASKS_ASSIGNED_CALLBACK_NEEDED, - STREAMS_ON_TASKS_REVOKED_CALLBACK_NEEDED, - STREAMS_ON_ALL_TASKS_LOST_CALLBACK_NEEDED + ERROR, CONSUMER_REBALANCE_LISTENER_CALLBACK_NEEDED, SHARE_ACKNOWLEDGEMENT_COMMIT_CALLBACK } private final Type type; + /** + * This identifies a particular event. It is used to disambiguate events via {@link #hashCode()} and + * {@link #equals(Object)} and can be used in log messages when debugging. + */ + private final Uuid id; + /** * The time in milliseconds when this event was enqueued. * This field can be changed after the event is created, so it should not be used in hashCode or equals. @@ -44,12 +46,17 @@ public enum Type { protected BackgroundEvent(Type type) { this.type = Objects.requireNonNull(type); + this.id = Uuid.randomUuid(); } public Type type() { return type; } + public Uuid id() { + return id; + } + public void setEnqueuedMs(long enqueuedMs) { this.enqueuedMs = enqueuedMs; } @@ -58,8 +65,21 @@ public long enqueuedMs() { return enqueuedMs; } + @Override + public final boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + BackgroundEvent that = (BackgroundEvent) o; + return type == that.type && id.equals(that.id); + } + + @Override + public final int hashCode() { + return Objects.hash(type, id); + } + protected String toStringBase() { - return "type=" + type + ", enqueuedMs=" + enqueuedMs; + return "type=" + type + ", id=" + id + ", enqueuedMs=" + enqueuedMs; } @Override diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java index bb59a4ec7e389..20231b0f99a10 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEvent.java @@ -45,7 +45,7 @@ public interface CompletableEvent { * (if applicable) is passed to {@link CompletableFuture#complete(Object)}. In the case where the generic * bound type is specified as {@link Void}, {@code null} is provided. *

    • - * Error: when the event logic generates an error, the error is passed to + * Error: when the the event logic generates an error, the error is passed to * {@link CompletableFuture#completeExceptionally(Throwable)}. *
    • *
    • diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java index b4440de06264b..5a0358df8964f 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/CompletableEventReaper.java @@ -25,10 +25,11 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Collection; -import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.concurrent.CompletableFuture; +import java.util.function.Consumer; +import java.util.stream.Collectors; /** * {@code CompletableEventReaper} is responsible for tracking {@link CompletableEvent time-bound events} and removing @@ -84,39 +85,26 @@ public void add(CompletableEvent event) { * @return The number of events that were expired */ public long reap(long currentTimeMs) { - int count = 0; + Consumer> expireEvent = event -> { + long pastDueMs = currentTimeMs - event.deadlineMs(); + TimeoutException error = new TimeoutException(String.format("%s was %s ms past its expiration of %s", event.getClass().getSimpleName(), pastDueMs, event.deadlineMs())); - Iterator> iterator = tracked.iterator(); - - while (iterator.hasNext()) { - CompletableEvent event = iterator.next(); - - if (event.future().isDone()) { - // Remove any events that are already complete. - iterator.remove(); - continue; - } - - long deadlineMs = event.deadlineMs(); - long pastDueMs = currentTimeMs - deadlineMs; - - if (pastDueMs < 0) - continue; - - TimeoutException error = new TimeoutException(String.format("%s was %s ms past its expiration of %s", event.getClass().getSimpleName(), pastDueMs, deadlineMs)); - - // Complete (exceptionally) any events that have passed their deadline AND aren't already complete. if (event.future().completeExceptionally(error)) { - log.debug("Event {} completed exceptionally since its expiration of {} passed {} ms ago", event, deadlineMs, pastDueMs); + log.debug("Event {} completed exceptionally since its expiration of {} passed {} ms ago", event, event.deadlineMs(), pastDueMs); } else { log.trace("Event {} not completed exceptionally since it was previously completed", event); } - - count++; - - // Remove the events so that we don't hold a reference to it. - iterator.remove(); - } + }; + + // First, complete (exceptionally) any events that have passed their deadline AND aren't already complete. + long count = tracked.stream() + .filter(e -> !e.future().isDone()) + .filter(e -> currentTimeMs >= e.deadlineMs()) + .peek(expireEvent) + .count(); + // Second, remove any events that are already complete, just to make sure we don't hold references. This will + // include any events that finished successfully as well as any events we just completed exceptionally above. + tracked.removeIf(e -> e.future().isDone()); return count; } @@ -143,12 +131,29 @@ public long reap(long currentTimeMs) { public long reap(Collection events) { Objects.requireNonNull(events, "Event queue to reap must be non-null"); - long trackedExpiredCount = completeEventsExceptionallyOnClose(tracked); + Consumer> expireEvent = event -> { + TimeoutException error = new TimeoutException(String.format("%s could not be completed before the consumer closed", event.getClass().getSimpleName())); + + if (event.future().completeExceptionally(error)) { + log.debug("Event {} completed exceptionally since the consumer is closing", event); + } else { + log.trace("Event {} not completed exceptionally since it was completed prior to the consumer closing", event); + } + }; + + long trackedExpiredCount = tracked.stream() + .filter(e -> !e.future().isDone()) + .peek(expireEvent) + .count(); tracked.clear(); - long eventExpiredCount = completeEventsExceptionallyOnClose(events); + long eventExpiredCount = events.stream() + .filter(e -> e instanceof CompletableEvent) + .map(e -> (CompletableEvent) e) + .filter(e -> !e.future().isDone()) + .peek(expireEvent) + .count(); events.clear(); - return trackedExpiredCount + eventExpiredCount; } @@ -161,51 +166,9 @@ public boolean contains(CompletableEvent event) { } public List> uncompletedEvents() { - // The following code does not use the Java Collections Streams API to reduce overhead in the critical - // path of the ConsumerNetworkThread loop. - List> events = new ArrayList<>(); - - for (CompletableEvent event : tracked) { - if (!event.future().isDone()) - events.add(event); - } - - return events; - } - - /** - * For all the {@link CompletableEvent}s in the collection, if they're not already complete, invoke - * {@link CompletableFuture#completeExceptionally(Throwable)}. - * - * @param events Collection of objects, assumed to be subclasses of {@link ApplicationEvent} or - * {@link BackgroundEvent}, but will only perform completion for any - * unfinished {@link CompletableEvent}s - * - * @return Number of events closed - */ - private long completeEventsExceptionallyOnClose(Collection events) { - long count = 0; - - for (Object o : events) { - if (!(o instanceof CompletableEvent)) - continue; - - CompletableEvent event = (CompletableEvent) o; - - if (event.future().isDone()) - continue; - - count++; - - TimeoutException error = new TimeoutException(String.format("%s could not be completed before the consumer closed", event.getClass().getSimpleName())); - - if (event.future().completeExceptionally(error)) { - log.debug("Event {} completed exceptionally since the consumer is closing", event); - } else { - log.trace("Event {} not completed exceptionally since it was completed prior to the consumer closing", event); - } - } - - return count; + return tracked.stream() + .filter(e -> !e.future().isDone()) + .collect(Collectors.toList()); } + } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveGroupOnCloseEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveGroupOnCloseEvent.java index e7496c3671a47..4afc00390d449 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveGroupOnCloseEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/LeaveGroupOnCloseEvent.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.internals.ConsumerMembershipManager; import org.apache.kafka.clients.consumer.internals.ConsumerUtils; @@ -32,17 +31,7 @@ */ public class LeaveGroupOnCloseEvent extends CompletableApplicationEvent { - /** - * @see org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation - */ - private final CloseOptions.GroupMembershipOperation membershipOperation; - - public LeaveGroupOnCloseEvent(final long deadlineMs, final CloseOptions.GroupMembershipOperation membershipOperation) { + public LeaveGroupOnCloseEvent(final long deadlineMs) { super(Type.LEAVE_GROUP_ON_CLOSE, deadlineMs); - this.membershipOperation = membershipOperation; - } - - public CloseOptions.GroupMembershipOperation membershipOperation() { - return membershipOperation; } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeAsyncEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeAsyncEvent.java index 26ceb962b2c3b..7bfc86e92356b 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeAsyncEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeAsyncEvent.java @@ -16,33 +16,21 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.clients.consumer.internals.NodeAcknowledgements; +import org.apache.kafka.clients.consumer.internals.Acknowledgements; import org.apache.kafka.common.TopicIdPartition; import java.util.Map; public class ShareAcknowledgeAsyncEvent extends ApplicationEvent { - private final Map acknowledgementsMap; - private final long deadlineMs; + private final Map acknowledgementsMap; - public ShareAcknowledgeAsyncEvent(final Map acknowledgementsMap, - final long deadlineMs) { + public ShareAcknowledgeAsyncEvent(final Map acknowledgementsMap) { super(Type.SHARE_ACKNOWLEDGE_ASYNC); this.acknowledgementsMap = acknowledgementsMap; - this.deadlineMs = deadlineMs; } - public Map acknowledgementsMap() { + public Map acknowledgementsMap() { return acknowledgementsMap; } - - public long deadlineMs() { - return deadlineMs; - } - - @Override - protected String toStringBase() { - return super.toStringBase() + ", acknowledgementsMap=" + acknowledgementsMap + ", deadlineMs=" + deadlineMs; - } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeOnCloseEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeOnCloseEvent.java index cc98655b83eaa..0916ab8666c09 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeOnCloseEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeOnCloseEvent.java @@ -16,21 +16,21 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.clients.consumer.internals.NodeAcknowledgements; +import org.apache.kafka.clients.consumer.internals.Acknowledgements; import org.apache.kafka.common.TopicIdPartition; import java.util.Map; public class ShareAcknowledgeOnCloseEvent extends CompletableApplicationEvent { - private final Map acknowledgementsMap; + private final Map acknowledgementsMap; - public ShareAcknowledgeOnCloseEvent(final Map acknowledgementsMap, final long deadlineMs) { + public ShareAcknowledgeOnCloseEvent(final Map acknowledgementsMap, final long deadlineMs) { super(Type.SHARE_ACKNOWLEDGE_ON_CLOSE, deadlineMs); this.acknowledgementsMap = acknowledgementsMap; } - public Map acknowledgementsMap() { + public Map acknowledgementsMap() { return acknowledgementsMap; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeSyncEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeSyncEvent.java index 8b3237c6f95c9..49cb422e63325 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeSyncEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareAcknowledgeSyncEvent.java @@ -17,21 +17,20 @@ package org.apache.kafka.clients.consumer.internals.events; import org.apache.kafka.clients.consumer.internals.Acknowledgements; -import org.apache.kafka.clients.consumer.internals.NodeAcknowledgements; import org.apache.kafka.common.TopicIdPartition; import java.util.Map; public class ShareAcknowledgeSyncEvent extends CompletableApplicationEvent> { - private final Map acknowledgementsMap; + private final Map acknowledgementsMap; - public ShareAcknowledgeSyncEvent(final Map acknowledgementsMap, final long deadlineMs) { + public ShareAcknowledgeSyncEvent(final Map acknowledgementsMap, final long deadlineMs) { super(Type.SHARE_ACKNOWLEDGE_SYNC, deadlineMs); this.acknowledgementsMap = acknowledgementsMap; } - public Map acknowledgementsMap() { + public Map acknowledgementsMap() { return acknowledgementsMap; } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareFetchEvent.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareFetchEvent.java index 2bc38b07b8b10..2a2b56e87cd78 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareFetchEvent.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/events/ShareFetchEvent.java @@ -16,32 +16,24 @@ */ package org.apache.kafka.clients.consumer.internals.events; -import org.apache.kafka.clients.consumer.internals.NodeAcknowledgements; +import org.apache.kafka.clients.consumer.internals.Acknowledgements; import org.apache.kafka.common.TopicIdPartition; import java.util.Map; public class ShareFetchEvent extends ApplicationEvent { - private final Map acknowledgementsMap; + private final Map acknowledgementsMap; - private final Map controlRecordAcknowledgements; - - public ShareFetchEvent(Map acknowledgementsMap, - Map controlRecordAcknowledgements) { + public ShareFetchEvent(Map acknowledgementsMap) { super(Type.SHARE_FETCH); this.acknowledgementsMap = acknowledgementsMap; - this.controlRecordAcknowledgements = controlRecordAcknowledgements; } - public Map acknowledgementsMap() { + public Map acknowledgementsMap() { return acknowledgementsMap; } - public Map controlRecordAcknowledgements() { - return controlRecordAcknowledgements; - } - @Override protected String toStringBase() { return super.toStringBase() + ", acknowledgementsMap=" + acknowledgementsMap; diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java index 2f90440a66244..09e84cbe985cc 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetrics.java @@ -24,7 +24,10 @@ import java.util.Arrays; -public class AsyncConsumerMetrics implements AutoCloseable { +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; + +public class AsyncConsumerMetrics extends KafkaConsumerMetrics implements AutoCloseable { private final Metrics metrics; public static final String TIME_BETWEEN_NETWORK_THREAD_POLL_SENSOR_NAME = "time-between-network-thread-poll"; @@ -48,13 +51,15 @@ public class AsyncConsumerMetrics implements AutoCloseable { private final Sensor unsentRequestsQueueSizeSensor; private final Sensor unsentRequestsQueueTimeSensor; - public AsyncConsumerMetrics(Metrics metrics, String groupName) { + public AsyncConsumerMetrics(Metrics metrics) { + super(metrics, CONSUMER_METRIC_GROUP_PREFIX); + this.metrics = metrics; this.timeBetweenNetworkThreadPollSensor = metrics.sensor(TIME_BETWEEN_NETWORK_THREAD_POLL_SENSOR_NAME); this.timeBetweenNetworkThreadPollSensor.add( metrics.metricName( "time-between-network-thread-poll-avg", - groupName, + CONSUMER_METRIC_GROUP, "The average time taken, in milliseconds, between each poll in the network thread." ), new Avg() @@ -62,7 +67,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.timeBetweenNetworkThreadPollSensor.add( metrics.metricName( "time-between-network-thread-poll-max", - groupName, + CONSUMER_METRIC_GROUP, "The maximum time taken, in milliseconds, between each poll in the network thread." ), new Max() @@ -72,7 +77,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.applicationEventQueueSizeSensor.add( metrics.metricName( APPLICATION_EVENT_QUEUE_SIZE_SENSOR_NAME, - groupName, + CONSUMER_METRIC_GROUP, "The current number of events in the queue to send from the application thread to the background thread." ), new Value() @@ -82,7 +87,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.applicationEventQueueTimeSensor.add( metrics.metricName( "application-event-queue-time-avg", - groupName, + CONSUMER_METRIC_GROUP, "The average time, in milliseconds, that application events are taking to be dequeued." ), new Avg() @@ -90,7 +95,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.applicationEventQueueTimeSensor.add( metrics.metricName( "application-event-queue-time-max", - groupName, + CONSUMER_METRIC_GROUP, "The maximum time, in milliseconds, that an application event took to be dequeued." ), new Max() @@ -100,14 +105,14 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.applicationEventQueueProcessingTimeSensor.add( metrics.metricName( "application-event-queue-processing-time-avg", - groupName, + CONSUMER_METRIC_GROUP, "The average time, in milliseconds, that the background thread takes to process all available application events." ), new Avg() ); this.applicationEventQueueProcessingTimeSensor.add( metrics.metricName("application-event-queue-processing-time-max", - groupName, + CONSUMER_METRIC_GROUP, "The maximum time, in milliseconds, that the background thread took to process all available application events." ), new Max() @@ -117,7 +122,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.applicationEventExpiredSizeSensor.add( metrics.metricName( APPLICATION_EVENT_EXPIRED_SIZE_SENSOR_NAME, - groupName, + CONSUMER_METRIC_GROUP, "The current number of expired application events." ), new Value() @@ -127,7 +132,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.unsentRequestsQueueSizeSensor.add( metrics.metricName( UNSENT_REQUESTS_QUEUE_SIZE_SENSOR_NAME, - groupName, + CONSUMER_METRIC_GROUP, "The current number of unsent requests in the background thread." ), new Value() @@ -137,7 +142,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.unsentRequestsQueueTimeSensor.add( metrics.metricName( "unsent-requests-queue-time-avg", - groupName, + CONSUMER_METRIC_GROUP, "The average time, in milliseconds, that requests are taking to be sent in the background thread." ), new Avg() @@ -145,7 +150,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.unsentRequestsQueueTimeSensor.add( metrics.metricName( "unsent-requests-queue-time-max", - groupName, + CONSUMER_METRIC_GROUP, "The maximum time, in milliseconds, that a request remained unsent in the background thread." ), new Max() @@ -155,7 +160,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.backgroundEventQueueSizeSensor.add( metrics.metricName( BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, - groupName, + CONSUMER_METRIC_GROUP, "The current number of events in the queue to send from the background thread to the application thread." ), new Value() @@ -165,7 +170,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.backgroundEventQueueTimeSensor.add( metrics.metricName( "background-event-queue-time-avg", - groupName, + CONSUMER_METRIC_GROUP, "The average time, in milliseconds, that background events are taking to be dequeued." ), new Avg() @@ -173,7 +178,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.backgroundEventQueueTimeSensor.add( metrics.metricName( "background-event-queue-time-max", - groupName, + CONSUMER_METRIC_GROUP, "The maximum time, in milliseconds, that background events are taking to be dequeued." ), new Max() @@ -183,7 +188,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.backgroundEventQueueProcessingTimeSensor.add( metrics.metricName( "background-event-queue-processing-time-avg", - groupName, + CONSUMER_METRIC_GROUP, "The average time, in milliseconds, that the consumer took to process all available background events." ), new Avg() @@ -191,7 +196,7 @@ public AsyncConsumerMetrics(Metrics metrics, String groupName) { this.backgroundEventQueueProcessingTimeSensor.add( metrics.metricName( "background-event-queue-processing-time-max", - groupName, + CONSUMER_METRIC_GROUP, "The maximum time, in milliseconds, that the consumer took to process all available background events." ), new Max() @@ -252,5 +257,6 @@ public void close() { unsentRequestsQueueSizeSensor.name(), unsentRequestsQueueTimeSensor.name() ).forEach(metrics::removeSensor); + super.close(); } } diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java index e271dee526172..c312edd54b602 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/ConsumerRebalanceMetricsManager.java @@ -16,8 +16,6 @@ */ package org.apache.kafka.clients.consumer.internals.metrics; -import org.apache.kafka.clients.consumer.KafkaConsumer; -import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.Metrics; @@ -29,9 +27,7 @@ import org.apache.kafka.common.metrics.stats.Rate; import org.apache.kafka.common.metrics.stats.WindowedCount; -import java.util.Collection; import java.util.concurrent.TimeUnit; -import java.util.regex.Pattern; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.COORDINATOR_METRICS_SUFFIX; @@ -48,14 +44,11 @@ public final class ConsumerRebalanceMetricsManager extends RebalanceMetricsManag public final MetricName lastRebalanceSecondsAgo; public final MetricName failedRebalanceTotal; public final MetricName failedRebalanceRate; - public final MetricName assignedPartitionsCount; private long lastRebalanceEndMs = -1L; private long lastRebalanceStartMs = -1L; - private final Metrics metrics; - public ConsumerRebalanceMetricsManager(Metrics metrics, SubscriptionState subscriptions) { + public ConsumerRebalanceMetricsManager(Metrics metrics) { super(CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX); - this.metrics = metrics; rebalanceLatencyAvg = createMetric(metrics, "rebalance-latency-avg", "The average time in ms taken for a group to complete a rebalance"); @@ -71,9 +64,6 @@ public ConsumerRebalanceMetricsManager(Metrics metrics, SubscriptionState subscr "The total number of failed rebalance events"); failedRebalanceRate = createMetric(metrics, "failed-rebalance-rate-per-hour", "The number of failed rebalance events per hour"); - assignedPartitionsCount = createMetric(metrics, "assigned-partitions", - "The number of partitions currently assigned to this consumer"); - registerAssignedPartitionCount(subscriptions); successfulRebalanceSensor = metrics.sensor("rebalance-latency"); successfulRebalanceSensor.add(rebalanceLatencyAvg, new Avg()); @@ -116,15 +106,4 @@ public void maybeRecordRebalanceFailed() { public boolean rebalanceStarted() { return lastRebalanceStartMs > lastRebalanceEndMs; } - - /** - * Register metric to track the number of assigned partitions. - * It will consider partitions assigned to the consumer - * regardless of whether they were assigned via {@link KafkaConsumer#subscribe(Pattern)} or - * {@link KafkaConsumer#assign(Collection)} - */ - private void registerAssignedPartitionCount(SubscriptionState subscriptions) { - Measurable numParts = (config, now) -> subscriptions.numAssignedPartitions(); - metrics.addMetric(assignedPartitionsCount, numParts); - } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaConsumerMetrics.java index 1b2bb4518f979..52502e714a947 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaConsumerMetrics.java @@ -26,7 +26,7 @@ import java.util.concurrent.TimeUnit; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRICS_SUFFIX; public class KafkaConsumerMetrics implements AutoCloseable { private final Metrics metrics; @@ -39,9 +39,9 @@ public class KafkaConsumerMetrics implements AutoCloseable { private long pollStartMs; private long timeSinceLastPollMs; - public KafkaConsumerMetrics(Metrics metrics) { + public KafkaConsumerMetrics(Metrics metrics, String metricGrpPrefix) { this.metrics = metrics; - final String metricGroupName = CONSUMER_METRIC_GROUP; + final String metricGroupName = metricGrpPrefix + CONSUMER_METRICS_SUFFIX; Measurable lastPoll = (mConfig, now) -> { if (lastPollMs == 0L) // if no poll is ever triggered, just return -1. diff --git a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java index e154b97da5a80..b7da8245aaaa8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/consumer/internals/metrics/KafkaShareConsumerMetrics.java @@ -25,7 +25,7 @@ import java.util.concurrent.TimeUnit; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRICS_SUFFIX; public class KafkaShareConsumerMetrics implements AutoCloseable { private final Metrics metrics; @@ -36,9 +36,9 @@ public class KafkaShareConsumerMetrics implements AutoCloseable { private long pollStartMs; private long timeSinceLastPollMs; - public KafkaShareConsumerMetrics(Metrics metrics) { + public KafkaShareConsumerMetrics(Metrics metrics, String metricGrpPrefix) { this.metrics = metrics; - final String metricGroupName = CONSUMER_SHARE_METRIC_GROUP; + final String metricGroupName = metricGrpPrefix + CONSUMER_METRICS_SUFFIX; Measurable lastPoll = (mConfig, now) -> { if (lastPollMs == 0L) // if no poll is ever triggered, just return -1. diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/Callback.java b/clients/src/main/java/org/apache/kafka/clients/producer/Callback.java index 5c8f159ac8221..29acb88044be6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/Callback.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/Callback.java @@ -20,7 +20,6 @@ * A callback interface that the user can implement to allow code to execute when the request is complete. This callback * will generally execute in the background I/O thread so it should be fast. */ -@FunctionalInterface public interface Callback { /** @@ -43,8 +42,6 @@ public interface Callback { *
    • {@link org.apache.kafka.common.errors.UnknownServerException UnknownServerException} *
    • {@link org.apache.kafka.common.errors.UnknownProducerIdException UnknownProducerIdException} *
    • {@link org.apache.kafka.common.errors.InvalidProducerEpochException InvalidProducerEpochException} - *
    • {@link org.apache.kafka.common.errors.AuthenticationException AuthenticationException} - *
    • {@link org.apache.kafka.common.errors.AuthorizationException AuthorizationException} *
    * Retriable exceptions (transient, may be covered by increasing #.retries): *
      @@ -55,7 +52,6 @@ public interface Callback { *
    • {@link org.apache.kafka.common.errors.OffsetOutOfRangeException OffsetOutOfRangeException} *
    • {@link org.apache.kafka.common.errors.TimeoutException TimeoutException} *
    • {@link org.apache.kafka.common.errors.UnknownTopicOrPartitionException UnknownTopicOrPartitionException} - *
    • {@link org.apache.kafka.clients.producer.BufferExhaustedException BufferExhaustedException} *
    */ void onCompletion(RecordMetadata metadata, Exception exception); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java index 6e656f590e42f..ce438f477bcd6 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/KafkaProducer.java @@ -50,18 +50,15 @@ import org.apache.kafka.common.errors.AuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidTopicException; -import org.apache.kafka.common.errors.InvalidTxnStateException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.SerializationException; import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.internals.ClusterResourceListeners; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.KafkaMetricsContext; import org.apache.kafka.common.metrics.MetricConfig; @@ -77,8 +74,8 @@ import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.telemetry.internals.ClientTelemetryUtils; import org.apache.kafka.common.utils.AppInfoParser; +import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; -import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; import org.apache.kafka.common.utils.Utils; @@ -152,8 +149,8 @@ *

    * The buffer.memory controls the total amount of memory available to the producer for buffering. If records * are sent faster than they can be transmitted to the server then this buffer space will be exhausted. When the buffer space is - * exhausted additional send calls will block. The threshold for time to block is determined by max.block.ms after which it returns - * a failed future with BufferExhaustedException. + * exhausted additional send calls will block. The threshold for time to block is determined by max.block.ms after which it throws + * a TimeoutException. *

    * The key.serializer and value.serializer instruct how to turn the key and value objects the user provides with * their ProducerRecord into bytes. You can use the included {@link org.apache.kafka.common.serialization.ByteArraySerializer} or @@ -252,18 +249,18 @@ public class KafkaProducer implements Producer { // Visible for testing final Metrics metrics; private final KafkaProducerMetrics producerMetrics; - private final Plugin partitionerPlugin; + private final Partitioner partitioner; private final int maxRequestSize; private final long totalMemorySize; private final ProducerMetadata metadata; private final RecordAccumulator accumulator; private final Sender sender; - private final Sender.SenderThread ioThread; + private final Thread ioThread; private final Compression compression; private final Sensor errors; private final Time time; - private final Plugin> keySerializerPlugin; - private final Plugin> valueSerializerPlugin; + private final Serializer keySerializer; + private final Serializer valueSerializer; private final ProducerConfig producerConfig; private final long maxBlockTimeMs; private final boolean partitionerIgnoreKeys; @@ -369,44 +366,41 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali config.originalsWithPrefix(CommonClientConfigs.METRICS_CONTEXT_PREFIX)); this.metrics = new Metrics(metricConfig, reporters, time, metricsContext); this.producerMetrics = new KafkaProducerMetrics(metrics); - this.partitionerPlugin = Plugin.wrapInstance( - config.getConfiguredInstance( - ProducerConfig.PARTITIONER_CLASS_CONFIG, - Partitioner.class, - Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), - metrics, - ProducerConfig.PARTITIONER_CLASS_CONFIG); + this.partitioner = config.getConfiguredInstance( + ProducerConfig.PARTITIONER_CLASS_CONFIG, + Partitioner.class, + Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)); this.partitionerIgnoreKeys = config.getBoolean(ProducerConfig.PARTITIONER_IGNORE_KEYS_CONFIG); long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); long retryBackoffMaxMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MAX_MS_CONFIG); if (keySerializer == null) { - keySerializer = config.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, Serializer.class); - keySerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), true); + this.keySerializer = config.getConfiguredInstance(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, + Serializer.class); + this.keySerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), true); } else { config.ignore(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); + this.keySerializer = keySerializer; } - this.keySerializerPlugin = Plugin.wrapInstance(keySerializer, metrics, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); - if (valueSerializer == null) { - valueSerializer = config.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, Serializer.class); - valueSerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), false); + this.valueSerializer = config.getConfiguredInstance(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, + Serializer.class); + this.valueSerializer.configure(config.originals(Collections.singletonMap(ProducerConfig.CLIENT_ID_CONFIG, clientId)), false); } else { config.ignore(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); + this.valueSerializer = valueSerializer; } - this.valueSerializerPlugin = Plugin.wrapInstance(valueSerializer, metrics, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); - - List> interceptorList = (List>) ClientUtils.configuredInterceptors(config, + List> interceptorList = ClientUtils.configuredInterceptors(config, ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptor.class); if (interceptors != null) this.interceptors = interceptors; else - this.interceptors = new ProducerInterceptors<>(interceptorList, metrics); + this.interceptors = new ProducerInterceptors<>(interceptorList); ClusterResourceListeners clusterResourceListeners = ClientUtils.configureClusterResourceListeners( interceptorList, reporters, - Arrays.asList(this.keySerializerPlugin.get(), this.valueSerializerPlugin.get())); + Arrays.asList(this.keySerializer, this.valueSerializer)); this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); this.compression = configureCompression(config); @@ -417,8 +411,8 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali this.apiVersions = apiVersions; this.transactionManager = configureTransactionState(config, logContext); // There is no need to do work required for adaptive partitioning, if we use a custom partitioner. - boolean enableAdaptivePartitioning = partitionerPlugin.get() == null && - config.getBoolean(ProducerConfig.PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG); + boolean enableAdaptivePartitioning = partitioner == null && + config.getBoolean(ProducerConfig.PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG); RecordAccumulator.PartitionerConfig partitionerConfig = new RecordAccumulator.PartitionerConfig( enableAdaptivePartitioning, config.getLong(ProducerConfig.PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG) @@ -437,6 +431,7 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali metrics, PRODUCER_METRIC_GROUP_NAME, time, + apiVersions, transactionManager, new BufferPool(this.totalMemorySize, batchSize, metrics, time, PRODUCER_METRIC_GROUP_NAME)); @@ -456,7 +451,7 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali this.errors = this.metrics.sensor("errors"); this.sender = newSender(logContext, kafkaClient, this.metadata); String ioThreadName = NETWORK_THREAD_PREFIX + " | " + clientId; - this.ioThread = new Sender.SenderThread(ioThreadName, this.sender, true); + this.ioThread = new KafkaThread(ioThreadName, this.sender, true); this.ioThread.start(); config.logUnused(); AppInfoParser.registerAppInfo(JMX_PREFIX, clientId, metrics, time.milliseconds()); @@ -482,7 +477,7 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali ProducerInterceptors interceptors, Partitioner partitioner, Time time, - Sender.SenderThread ioThread, + KafkaThread ioThread, Optional clientTelemetryReporter) { this.producerConfig = config; this.time = time; @@ -490,9 +485,9 @@ public KafkaProducer(Properties properties, Serializer keySerializer, Seriali this.log = logContext.logger(KafkaProducer.class); this.metrics = metrics; this.producerMetrics = new KafkaProducerMetrics(metrics); - this.partitionerPlugin = Plugin.wrapInstance(partitioner, metrics, ProducerConfig.PARTITIONER_CLASS_CONFIG); - this.keySerializerPlugin = Plugin.wrapInstance(keySerializer, metrics, ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG); - this.valueSerializerPlugin = Plugin.wrapInstance(valueSerializer, metrics, ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG); + this.partitioner = partitioner; + this.keySerializer = keySerializer; + this.valueSerializer = valueSerializer; this.interceptors = interceptors; this.maxRequestSize = config.getInt(ProducerConfig.MAX_REQUEST_SIZE_CONFIG); this.totalMemorySize = config.getLong(ProducerConfig.BUFFER_MEMORY_CONFIG); @@ -539,7 +534,8 @@ Sender newSender(LogContext logContext, KafkaClient kafkaClient, ProducerMetadat time, requestTimeoutMs, producerConfig.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG), - this.transactionManager); + this.transactionManager, + apiVersions); } private static Compression configureCompression(ProducerConfig config) { @@ -598,17 +594,14 @@ private TransactionManager configureTransactionState(ProducerConfig config, if (config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)) { final String transactionalId = config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG); - final boolean enable2PC = config.getBoolean(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG); final int transactionTimeoutMs = config.getInt(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); final long retryBackoffMs = config.getLong(ProducerConfig.RETRY_BACKOFF_MS_CONFIG); - transactionManager = new TransactionManager( logContext, transactionalId, transactionTimeoutMs, retryBackoffMs, - apiVersions, - enable2PC + apiVersions ); if (transactionManager.isTransactional()) @@ -623,13 +616,8 @@ private TransactionManager configureTransactionState(ProducerConfig config, } /** - * Initialize the transactional state for this producer, similar to {@link #initTransactions()} but - * with additional capabilities to keep a previously prepared transaction. - * * Needs to be called before any other methods when the {@code transactional.id} is set in the configuration. - * - * When {@code keepPreparedTxn} is {@code false}, this behaves like the standard transactional - * initialization where the method does the following: + * This method does the following: *

      *
    1. Ensures any transactions initiated by previous instances of the producer with the same * {@code transactional.id} are completed. If the previous instance had failed with a transaction in @@ -638,39 +626,26 @@ private TransactionManager configureTransactionState(ProducerConfig config, *
    2. Gets the internal producer id and epoch, used in all future transactional * messages issued by the producer.
    3. *
    - * - *

    - * When {@code keepPreparedTxn} is set to {@code true}, the producer does not automatically abort existing - * transactions. Instead, it enters a recovery mode allowing only finalization of those previously - * prepared transactions. - * This behavior is especially crucial for 2PC scenarios, where transactions should remain intact - * until the external transaction manager decides whether to commit or abort. - *

    - * - * @param keepPreparedTxn true to retain any in-flight prepared transactions (necessary for 2PC - * recovery), false to abort existing transactions and behave like - * the standard initTransactions. - * * Note that this method will raise {@link TimeoutException} if the transactional state cannot * be initialized before expiration of {@code max.block.ms}. Additionally, it will raise {@link InterruptException} * if interrupted. It is safe to retry in either case, but once the transactional state has been successfully * initialized, this method should no longer be used. * - * @throws IllegalStateException if no {@code transactional.id} is configured - * @throws org.apache.kafka.common.errors.UnsupportedVersionException if the broker does not - * support transactions (i.e. if its version is lower than 0.11.0.0) - * @throws org.apache.kafka.common.errors.TransactionalIdAuthorizationException if the configured - * {@code transactional.id} is unauthorized either for normal transaction writes or 2PC. - * @throws KafkaException if the producer encounters a fatal error or any other unexpected error + * @throws IllegalStateException if no {@code transactional.id} has been configured + * @throws org.apache.kafka.common.errors.UnsupportedVersionException fatal error indicating the broker + * does not support transactions (i.e. if its version is lower than 0.11.0.0) + * @throws org.apache.kafka.common.errors.AuthorizationException error indicating that the configured + * transactional.id is not authorized, or the idempotent producer id is unavailable. See the exception for + * more details. User may retry this function call after fixing the permission. + * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error * @throws TimeoutException if the time taken for initialize the transaction has surpassed max.block.ms. * @throws InterruptException if the thread is interrupted while blocked */ - public void initTransactions(boolean keepPreparedTxn) { + public void initTransactions() { throwIfNoTransactionManager(); throwIfProducerClosed(); - throwIfInPreparedState(); long now = time.nanoseconds(); - TransactionalRequestResult result = transactionManager.initializeTransactions(keepPreparedTxn); + TransactionalRequestResult result = transactionManager.initializeTransactions(); sender.wakeup(); result.await(maxBlockTimeMs, TimeUnit.MILLISECONDS); producerMetrics.recordInit(time.nanoseconds() - now); @@ -695,7 +670,6 @@ public void initTransactions(boolean keepPreparedTxn) { public void beginTransaction() throws ProducerFencedException { throwIfNoTransactionManager(); throwIfProducerClosed(); - throwIfInPreparedState(); long now = time.nanoseconds(); transactionManager.beginTransaction(); producerMetrics.recordBeginTxn(time.nanoseconds() - now); @@ -755,7 +729,6 @@ public void sendOffsetsToTransaction(Map offs throwIfInvalidGroupMetadata(groupMetadata); throwIfNoTransactionManager(); throwIfProducerClosed(); - throwIfInPreparedState(); if (!offsets.isEmpty()) { long start = time.nanoseconds(); @@ -766,49 +739,6 @@ public void sendOffsetsToTransaction(Map offs } } - /** - * Prepares the current transaction for a two-phase commit. This method will flush all pending messages - * and transition the producer into a mode where only {@link #commitTransaction()}, {@link #abortTransaction()}, - * or completeTransaction(PreparedTxnState) may be called. - *

    - * This method is used as part of a two-phase commit protocol: - *

      - *
    1. Prepare the transaction by calling this method. This returns a {@link PreparedTxnState} if successful.
    2. - *
    3. Make any external system changes that need to be atomic with this transaction.
    4. - *
    5. Complete the transaction by calling {@link #commitTransaction()}, {@link #abortTransaction()} or - * completeTransaction(PreparedTxnState).
    6. - *
    - * - * @return the prepared transaction state to use when completing the transaction - * - * @throws IllegalStateException if no transactional.id has been configured or no transaction has been started yet. - * @throws InvalidTxnStateException if the producer is not in a state where preparing - * a transaction is possible or 2PC is not enabled. - * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active - * @throws UnsupportedVersionException fatal error indicating the broker - * does not support transactions (i.e. if its version is lower than 0.11.0.0) - * @throws AuthorizationException fatal error indicating that the configured - * transactional.id is not authorized. See the exception for more details - * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error - * @throws TimeoutException if the time taken for preparing the transaction has surpassed max.block.ms - * @throws InterruptException if the thread is interrupted while blocked - */ - @Override - public PreparedTxnState prepareTransaction() throws ProducerFencedException { - throwIfNoTransactionManager(); - throwIfProducerClosed(); - throwIfInPreparedState(); - if (!transactionManager.is2PCEnabled()) { - throw new InvalidTxnStateException("Cannot prepare a transaction when 2PC is not enabled"); - } - long now = time.nanoseconds(); - flush(); - transactionManager.prepareTransaction(); - producerMetrics.recordPrepareTxn(time.nanoseconds() - now); - ProducerIdAndEpoch producerIdAndEpoch = transactionManager.preparedTransactionState(); - return new PreparedTxnState(producerIdAndEpoch.producerId, producerIdAndEpoch.epoch); - } - /** * Commits the ongoing transaction. This method will flush any unsent records before actually committing the transaction. *

    @@ -886,41 +816,6 @@ public void abortTransaction() throws ProducerFencedException { producerMetrics.recordAbortTxn(time.nanoseconds() - abortStart); } - /** - * Completes a prepared transaction by comparing the provided prepared transaction state with the - * current prepared state on the producer. - * If they match, the transaction is committed; otherwise, it is aborted. - * - * @param preparedTxnState The prepared transaction state to compare against the current state - * @throws IllegalStateException if no transactional.id has been configured or no transaction has been started - * @throws InvalidTxnStateException if the producer is not in prepared state - * @throws ProducerFencedException fatal error indicating another producer with the same transactional.id is active - * @throws KafkaException if the producer has encountered a previous fatal error or for any other unexpected error - * @throws TimeoutException if the time taken for completing the transaction has surpassed max.block.ms - * @throws InterruptException if the thread is interrupted while blocked - */ - @Override - public void completeTransaction(PreparedTxnState preparedTxnState) throws ProducerFencedException { - throwIfNoTransactionManager(); - throwIfProducerClosed(); - - if (!transactionManager.isPrepared()) { - throw new InvalidTxnStateException("Cannot complete transaction because no transaction has been prepared. " + - "Call prepareTransaction() first, or make sure initTransaction(true) was called."); - } - - // Get the current prepared transaction state - ProducerIdAndEpoch currentProducerIdAndEpoch = transactionManager.preparedTransactionState(); - PreparedTxnState currentPreparedState = new PreparedTxnState(currentProducerIdAndEpoch.producerId, currentProducerIdAndEpoch.epoch); - - // Compare the prepared transaction state token and commit or abort accordingly - if (currentPreparedState.equals(preparedTxnState)) { - commitTransaction(); - } else { - abortTransaction(); - } - } - /** * Asynchronously send a record to a topic. Equivalent to send(record, null). * See {@link #send(ProducerRecord, Callback)} for details. @@ -1027,15 +922,19 @@ public Future send(ProducerRecord record) { * expensive callbacks it is recommended to use your own {@link java.util.concurrent.Executor} in the callback body * to parallelize processing. * - * @param record The record to send + * @param record The record to send * @param callback A user-supplied callback to execute when the record has been acknowledged by the server (null - * indicates no callback) - * @throws IllegalStateException if a transactional.id has been configured and no transaction has been started, or - * when send is invoked after producer has been closed. - * @throws TimeoutException if the topic or the partition specified in the record cannot be found in metadata within {@code max.block.ms} - * @throws InterruptException If the thread is interrupted while blocked + * indicates no callback) + * + * @throws AuthenticationException if authentication fails. See the exception for more details + * @throws AuthorizationException fatal error indicating that the producer is not allowed to write + * @throws IllegalStateException if a transactional.id has been configured and no transaction has been started, or + * when send is invoked after producer has been closed. + * @throws InterruptException If the thread is interrupted while blocked * @throws SerializationException If the key or value are not valid objects given the configured serializers - * @throws KafkaException If a Kafka related error occurs that does not belong to the public API exceptions. + * @throws TimeoutException If the record could not be appended to the send buffer due to memory unavailable + * or missing metadata within {@code max.block.ms}. + * @throws KafkaException If a Kafka related error occurs that does not belong to the public API exceptions. */ @Override public Future send(ProducerRecord record, Callback callback) { @@ -1051,23 +950,6 @@ private void throwIfProducerClosed() { throw new IllegalStateException("Cannot perform operation after producer has been closed"); } - /** - * Throws an exception if the transaction is in a prepared state. - * In a two-phase commit (2PC) flow, once a transaction enters the prepared state, - * only commit, abort, or complete operations are allowed. - * - * @throws IllegalStateException if any other operation is attempted in the prepared state. - */ - private void throwIfInPreparedState() { - if (transactionManager != null && - transactionManager.isTransactional() && - transactionManager.isPrepared() - ) { - throw new IllegalStateException("Cannot perform operation while the transaction is in a prepared state. " + - "Only commitTransaction(), abortTransaction(), or completeTransaction() are permitted."); - } - } - /** * Implementation of asynchronously send a record to a topic. */ @@ -1079,8 +961,6 @@ private Future doSend(ProducerRecord record, Callback call try { throwIfProducerClosed(); - throwIfInPreparedState(); - // first make sure the metadata for the topic is available long nowMs = time.milliseconds(); ClusterAndWaitTime clusterAndWaitTime; @@ -1096,7 +976,7 @@ private Future doSend(ProducerRecord record, Callback call Cluster cluster = clusterAndWaitTime.cluster; byte[] serializedKey; try { - serializedKey = keySerializerPlugin.get().serialize(record.topic(), record.headers(), record.key()); + serializedKey = keySerializer.serialize(record.topic(), record.headers(), record.key()); } catch (ClassCastException cce) { throw new SerializationException("Can't convert key of class " + record.key().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG).getName() + @@ -1104,7 +984,7 @@ private Future doSend(ProducerRecord record, Callback call } byte[] serializedValue; try { - serializedValue = valueSerializerPlugin.get().serialize(record.topic(), record.headers(), record.value()); + serializedValue = valueSerializer.serialize(record.topic(), record.headers(), record.value()); } catch (ClassCastException cce) { throw new SerializationException("Can't convert value of class " + record.value().getClass().getName() + " to class " + producerConfig.getClass(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG).getName() + @@ -1274,8 +1154,7 @@ private void ensureValidRecordSize(int size) { /** * Invoking this method makes all buffered records immediately available to send (even if linger.ms is * greater than 0) and blocks on the completion of the requests associated with these records. The post-condition - * of flush() is that any previously sent record will have completed (e.g. Future.isDone() == true - * and callbacks passed to {@link #send(ProducerRecord,Callback)} have been called). + * of flush() is that any previously sent record will have completed (e.g. Future.isDone() == true). * A request is considered completed when it is successfully acknowledged * according to the acks configuration you have specified or else it results in an error. *

    @@ -1304,19 +1183,16 @@ private void ensureValidRecordSize(int size) { * calls made since the previous {@link #beginTransaction()} are completed before the commit. *

    *

    - * Important: This method must not be called from within the callback provided to - * {@link #send(ProducerRecord, Callback)}. Invoking flush() in this context will result in a - * {@link KafkaException} being thrown, as it will cause a deadlock. + * Important: This method should not be used within the callback provided to + * {@link #send(ProducerRecord, Callback)}. Invoking flush() in this context will cause a deadlock. *

    * * @throws InterruptException If the thread is interrupted while blocked - * @throws KafkaException If the method is invoked inside a {@link #send(ProducerRecord, Callback)} callback */ @Override public void flush() { if (Thread.currentThread() == this.ioThread) { - log.error("KafkaProducer.flush() invocation inside a callback is not permitted because it may lead to deadlock."); - throw new KafkaException("KafkaProducer.flush() invocation inside a callback is not permitted because it may lead to deadlock."); + log.error("KafkaProducer.flush() invocation inside a callback will cause a deadlock."); } log.trace("Flushing accumulated records in producer."); @@ -1335,14 +1211,11 @@ public void flush() { /** * Get the partition metadata for the given topic. This can be used for custom partitioning. - *

    - * This will attempt to refresh metadata until it finds the topic in it, or the configured {@link ProducerConfig#MAX_BLOCK_MS_CONFIG} expires. - * * @throws AuthenticationException if authentication fails. See the exception for more details - * @throws AuthorizationException if not authorized to the specified topic. See the exception for more details - * @throws InterruptException if the thread is interrupted while blocked - * @throws TimeoutException if the topic cannot be found in metadata within {@code max.block.ms} - * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close + * @throws AuthorizationException if not authorized to the specified topic. See the exception for more details + * @throws InterruptException if the thread is interrupted while blocked + * @throws TimeoutException if metadata could not be refreshed within {@code max.block.ms} + * @throws KafkaException for all Kafka-related exceptions, including the case where this method is called after producer close */ @Override public List partitionsFor(String topic) { @@ -1541,9 +1414,9 @@ private void close(Duration timeout, boolean swallowException) { Utils.closeQuietly(interceptors, "producer interceptors", firstException); Utils.closeQuietly(producerMetrics, "producer metrics wrapper", firstException); Utils.closeQuietly(metrics, "producer metrics", firstException); - Utils.closeQuietly(keySerializerPlugin, "producer keySerializer", firstException); - Utils.closeQuietly(valueSerializerPlugin, "producer valueSerializer", firstException); - Utils.closeQuietly(partitionerPlugin, "producer partitioner", firstException); + Utils.closeQuietly(keySerializer, "producer keySerializer", firstException); + Utils.closeQuietly(valueSerializer, "producer valueSerializer", firstException); + Utils.closeQuietly(partitioner, "producer partitioner", firstException); clientTelemetryReporter.ifPresent(reporter -> Utils.closeQuietly(reporter, "producer telemetry reporter", firstException)); AppInfoParser.unregisterAppInfo(JMX_PREFIX, clientId, metrics); Throwable exception = firstException.get(); @@ -1570,8 +1443,8 @@ private int partition(ProducerRecord record, byte[] serializedKey, byte[] if (record.partition() != null) return record.partition(); - if (partitionerPlugin.get() != null) { - int customPartition = partitionerPlugin.get().partition( + if (partitioner != null) { + int customPartition = partitioner.partition( record.topic(), record.key(), serializedKey, record.value(), serializedValue, cluster); if (customPartition < 0) { throw new IllegalArgumentException(String.format( @@ -1608,6 +1481,11 @@ String getClientId() { return clientId; } + // Visible for testing + TransactionManager getTransactionManager() { + return transactionManager; + } + private static class ClusterAndWaitTime { final Cluster cluster; final long waitedOnMetadataMs; @@ -1666,7 +1544,6 @@ private class AppendCallbacks implements RecordAccumulator.AppendCallbacks { private final String recordLogString; private volatile int partition = RecordMetadata.UNKNOWN_PARTITION; private volatile TopicPartition topicPartition; - private final Headers headers; private AppendCallbacks(Callback userCallback, ProducerInterceptors interceptors, ProducerRecord record) { this.userCallback = userCallback; @@ -1675,12 +1552,6 @@ private AppendCallbacks(Callback userCallback, ProducerInterceptors interc // whole lifetime of the batch. // We don't want to have an NPE here, because the interceptors would not be notified (see .doSend). topic = record != null ? record.topic() : null; - if (record != null) { - headers = record.headers(); - } else { - headers = new RecordHeaders(); - ((RecordHeaders) headers).setReadOnly(); - } recordPartition = record != null ? record.partition() : null; recordLogString = log.isTraceEnabled() && record != null ? record.toString() : ""; } @@ -1690,7 +1561,7 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { if (metadata == null) { metadata = new RecordMetadata(topicPartition(), -1, -1, RecordBatch.NO_TIMESTAMP, -1, -1); } - this.interceptors.onAcknowledgement(metadata, exception, headers); + this.interceptors.onAcknowledgement(metadata, exception); if (this.userCallback != null) this.userCallback.onCompletion(metadata, exception); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java index 3e5cb9f5d5ab3..564171608568a 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/MockProducer.java @@ -29,7 +29,6 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.serialization.Serializer; @@ -142,7 +141,7 @@ public MockProducer() { } @Override - public void initTransactions(boolean keepPreparedTxn) { + public void initTransactions() { verifyNotClosed(); verifyNotFenced(); if (this.transactionInitialized) { @@ -200,18 +199,6 @@ public void sendOffsetsToTransaction(Map offs this.sentOffsets = true; } - @Override - public PreparedTxnState prepareTransaction() throws ProducerFencedException { - verifyNotClosed(); - verifyNotFenced(); - verifyTransactionsInitialized(); - verifyTransactionInFlight(); - - // Return a new PreparedTxnState with mock values for producerId and epoch - // Using 1000L and (short)1 as arbitrary values for a valid PreparedTxnState - return new PreparedTxnState(1000L, (short) 1); - } - @Override public void commitTransaction() throws ProducerFencedException { verifyNotClosed(); @@ -257,27 +244,6 @@ public void abortTransaction() throws ProducerFencedException { this.transactionInFlight = false; } - @Override - public void completeTransaction(PreparedTxnState preparedTxnState) throws ProducerFencedException { - verifyNotClosed(); - verifyNotFenced(); - verifyTransactionsInitialized(); - - if (!this.transactionInFlight) { - throw new IllegalStateException("There is no prepared transaction to complete."); - } - - // For testing purposes, we'll consider a prepared state with producerId=1000L and epoch=1 as valid - // This should match what's returned in prepareTransaction() - PreparedTxnState currentState = new PreparedTxnState(1000L, (short) 1); - - if (currentState.equals(preparedTxnState)) { - commitTransaction(); - } else { - abortTransaction(); - } - } - private synchronized void verifyNotClosed() { if (this.closed) { throw new IllegalStateException("MockProducer is already closed."); @@ -335,8 +301,8 @@ public synchronized Future send(ProducerRecord record, Cal partition = partition(record, this.cluster); else { //just to throw ClassCastException if serializers are not the proper ones to serialize key/value - keySerializer.serialize(record.topic(), new RecordHeaders(), record.key()); - valueSerializer.serialize(record.topic(), new RecordHeaders(), record.value()); + keySerializer.serialize(record.topic(), record.key()); + valueSerializer.serialize(record.topic(), record.value()); } TopicPartition topicPartition = new TopicPartition(record.topic(), partition); diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java b/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java index d1d1ad3ac55f1..96345d8f8b041 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/Partitioner.java @@ -23,9 +23,6 @@ /** * Partitioner Interface - *
    - * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the partitioner to register metrics. The following tags are automatically added to - * all metrics registered: config set to partitioner.class, and class set to the Partitioner class name. */ public interface Partitioner extends Configurable, Closeable { diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java b/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java index e6e94691e3454..798034dda6de2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/Producer.java @@ -42,14 +42,7 @@ public interface Producer extends Closeable { /** * See {@link KafkaProducer#initTransactions()} */ - default void initTransactions() { - initTransactions(false); - } - - /** - * See {@link KafkaProducer#initTransactions(boolean)} - */ - void initTransactions(boolean keepPreparedTxn); + void initTransactions(); /** * See {@link KafkaProducer#beginTransaction()} @@ -62,11 +55,6 @@ default void initTransactions() { void sendOffsetsToTransaction(Map offsets, ConsumerGroupMetadata groupMetadata) throws ProducerFencedException; - /** - * See {@link KafkaProducer#prepareTransaction()} - */ - PreparedTxnState prepareTransaction() throws ProducerFencedException; - /** * See {@link KafkaProducer#commitTransaction()} */ @@ -77,11 +65,6 @@ void sendOffsetsToTransaction(Map offsets, */ void abortTransaction() throws ProducerFencedException; - /** - * See {@link KafkaProducer#completeTransaction(PreparedTxnState)} - */ - void completeTransaction(PreparedTxnState preparedTxnState) throws ProducerFencedException; - /** * @see KafkaProducer#registerMetricForSubscription(KafkaMetric) */ diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java index 313648497bab1..23dd02bda98f3 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerConfig.java @@ -35,14 +35,13 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; import java.util.concurrent.atomic.AtomicInteger; -import static org.apache.kafka.common.config.ConfigDef.NO_DEFAULT_VALUE; import static org.apache.kafka.common.config.ConfigDef.Range.atLeast; import static org.apache.kafka.common.config.ConfigDef.Range.between; import static org.apache.kafka.common.config.ConfigDef.ValidString.in; @@ -95,26 +94,24 @@ public class ProducerConfig extends AbstractConfig { + "Note: This setting gives the upper bound of the batch size to be sent. If we have fewer than this many bytes accumulated " + "for this partition, we will 'linger' for the linger.ms time waiting for more records to show up. " + "This linger.ms setting defaults to 5, which means the producer will wait for 5ms or until the record batch is " - + "of batch.size (whichever happens first) before sending the record batch. Note that broker backpressure can " - + " result in a higher effective linger time than this setting. " + + "of batch.size(whichever happens first) before sending the record batch. Note that broker backpressure can " + + " result in a higher effective linger time than this setting." + "The default changed from 0 to 5 in Apache Kafka 4.0 as the efficiency gains from larger batches typically result in " + "similar or lower producer latency despite the increased linger."; /** partitioner.adaptive.partitioning.enable */ - public static final String PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG = "partitioner.adaptive.partitioning.enable"; - @Deprecated - public static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG = PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG; - private static final String PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_DOC = + public static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG = "partitioner.adaptive.partitioning.enable"; + private static final String PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_DOC = "When set to 'true', the producer will try to adapt to broker performance and produce more messages to partitions hosted on faster brokers. " - + "If 'false', the producer will try to distribute messages uniformly. Note: this setting has no effect if a custom partitioner is used."; + + "If 'false', producer will try to distribute messages uniformly. Note: this setting has no effect if a custom partitioner is used"; /** partitioner.availability.timeout.ms */ public static final String PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG = "partitioner.availability.timeout.ms"; private static final String PARTITIONER_AVAILABILITY_TIMEOUT_MS_DOC = "If a broker cannot process produce requests from a partition for " + PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG + " time, " + "the partitioner treats that partition as not available. If the value is 0, this logic is disabled. " - + "Note: this setting has no effect if a custom partitioner is used or " + PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG - + " is set to 'false'."; + + "Note: this setting has no effect if a custom partitioner is used or " + PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG + + " is set to 'false'"; /** partitioner.ignore.keys */ public static final String PARTITIONER_IGNORE_KEYS_CONFIG = "partitioner.ignore.keys"; @@ -212,7 +209,7 @@ public class ProducerConfig extends AbstractConfig { /** buffer.memory */ public static final String BUFFER_MEMORY_CONFIG = "buffer.memory"; private static final String BUFFER_MEMORY_DOC = "The total bytes of memory the producer can use to buffer records waiting to be sent to the server. If records are " - + "sent faster than they can be delivered to the server the producer will block for " + MAX_BLOCK_MS_CONFIG + " after which it will fail with an exception." + + "sent faster than they can be delivered to the server the producer will block for " + MAX_BLOCK_MS_CONFIG + " after which it will throw an exception." + "

    " + "This setting should correspond roughly to the total memory the producer will use, but is not a hard bound since " + "not all memory the producer uses is used for buffering. Some additional memory will be used for compression (if " @@ -277,12 +274,11 @@ public class ProducerConfig extends AbstractConfig { /** retries */ public static final String RETRIES_CONFIG = CommonClientConfigs.RETRIES_CONFIG; - private static final String RETRIES_DOC = "Number of times to retry a request that fails with a transient error." - + " Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error. " - + " Requests will be retried this many times until they succeed, fail with a non-transient error, or the " + DELIVERY_TIMEOUT_MS_CONFIG + " expires." - + " Note that this automatic retry will simply resend the same record upon receiving the error." - + " Setting a value of zero will disable this automatic retry behaviour, so that the transient errors will be propagated to the application to be handled." - + " Users should generally prefer to leave this config unset and instead use " + DELIVERY_TIMEOUT_MS_CONFIG + " to control" + private static final String RETRIES_DOC = "Setting a value greater than zero will cause the client to resend any record whose send fails with a potentially transient error." + + " Note that this retry is no different than if the client resent the record upon receiving the error." + + " Produce requests will be failed before the number of retries has been exhausted if the timeout configured by" + + " " + DELIVERY_TIMEOUT_MS_CONFIG + " expires first before successful acknowledgement. Users should generally" + + " prefer to leave this config unset and instead use " + DELIVERY_TIMEOUT_MS_CONFIG + " to control" + " retry behavior." + "

    " + "Enabling idempotence requires this config value to be greater than 0." @@ -313,8 +309,8 @@ public class ProducerConfig extends AbstractConfig { public static final String PARTITIONER_CLASS_CONFIG = "partitioner.class"; private static final String PARTITIONER_CLASS_DOC = "Determines which partition to send a record to when records are produced. Available options are:" + "

      " + - "
    • If not set, the default partitioning logic is used. " + - "This strategy send records to a partition until at least " + BATCH_SIZE_CONFIG + " bytes is produced to the partition. It works with the strategy:" + + "
    • If not set, the default partitioning logic is used. " + + "This strategy send records to a partition until at least " + BATCH_SIZE_CONFIG + " bytes is produced to the partition. It works with the strategy:" + "
        " + "
      1. If no partition is specified but a key is present, choose a partition based on a hash of the key.
      2. " + "
      3. If no partition or key is present, choose the sticky partition that changes when at least " + BATCH_SIZE_CONFIG + " bytes are produced to the partition.
      4. " + @@ -359,11 +355,6 @@ public class ProducerConfig extends AbstractConfig { "By default the TransactionId is not configured, which means transactions cannot be used. " + "Note that, by default, transactions require a cluster of at least three brokers which is the recommended setting for production; for development you can change this, by adjusting broker setting transaction.state.log.replication.factor."; - /** transaction.two.phase.commit.enable */ - public static final String TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG = "transaction.two.phase.commit.enable"; - private static final String TRANSACTION_TWO_PHASE_COMMIT_ENABLE_DOC = "If set to true, then the broker is informed that the client is participating in " + - "two phase commit protocol and transactions that this client starts never expire."; - /** * security.providers */ @@ -373,12 +364,7 @@ public class ProducerConfig extends AbstractConfig { private static final AtomicInteger PRODUCER_CLIENT_ID_SEQUENCE = new AtomicInteger(1); static { - CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, - Type.LIST, - NO_DEFAULT_VALUE, - ConfigDef.ValidList.anyNonDuplicateValues(false, false), - Importance.HIGH, - CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) + CONFIG = new ConfigDef().define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) .define(CLIENT_DNS_LOOKUP_CONFIG, Type.STRING, ClientDnsLookup.USE_ALL_DNS_IPS.toString(), @@ -399,7 +385,7 @@ public class ProducerConfig extends AbstractConfig { .define(COMPRESSION_LZ4_LEVEL_CONFIG, Type.INT, CompressionType.LZ4.defaultLevel(), CompressionType.LZ4.levelValidator(), Importance.MEDIUM, COMPRESSION_LZ4_LEVEL_DOC) .define(COMPRESSION_ZSTD_LEVEL_CONFIG, Type.INT, CompressionType.ZSTD.defaultLevel(), CompressionType.ZSTD.levelValidator(), Importance.MEDIUM, COMPRESSION_ZSTD_LEVEL_DOC) .define(BATCH_SIZE_CONFIG, Type.INT, 16384, atLeast(0), Importance.MEDIUM, BATCH_SIZE_DOC) - .define(PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_CONFIG, Type.BOOLEAN, true, Importance.LOW, PARTITIONER_ADAPTIVE_PARTITIONING_ENABLE_DOC) + .define(PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_CONFIG, Type.BOOLEAN, true, Importance.LOW, PARTITIONER_ADPATIVE_PARTITIONING_ENABLE_DOC) .define(PARTITIONER_AVAILABILITY_TIMEOUT_MS_CONFIG, Type.LONG, 0, atLeast(0), Importance.LOW, PARTITIONER_AVAILABILITY_TIMEOUT_MS_DOC) .define(PARTITIONER_IGNORE_KEYS_CONFIG, Type.BOOLEAN, false, Importance.MEDIUM, PARTITIONER_IGNORE_KEYS_DOC) .define(LINGER_MS_CONFIG, Type.LONG, 5, atLeast(0), Importance.MEDIUM, LINGER_MS_DOC) @@ -467,7 +453,7 @@ public class ProducerConfig extends AbstractConfig { .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, JmxReporter.class.getName(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), + new ConfigDef.NonNullValidator(), Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define(MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, @@ -506,8 +492,8 @@ public class ProducerConfig extends AbstractConfig { Importance.MEDIUM, PARTITIONER_CLASS_DOC) .define(INTERCEPTOR_CLASSES_CONFIG, Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), + Collections.emptyList(), + new ConfigDef.NonNullValidator(), Importance.LOW, INTERCEPTOR_CLASSES_DOC) .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, @@ -540,11 +526,6 @@ public class ProducerConfig extends AbstractConfig { new ConfigDef.NonEmptyString(), Importance.LOW, TRANSACTIONAL_ID_DOC) - .define(TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG, - Type.BOOLEAN, - false, - Importance.LOW, - TRANSACTION_TWO_PHASE_COMMIT_ENABLE_DOC) .define(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, Type.STRING, CommonClientConfigs.DEFAULT_METADATA_RECOVERY_STRATEGY, @@ -557,13 +538,7 @@ public class ProducerConfig extends AbstractConfig { CommonClientConfigs.DEFAULT_METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS, atLeast(0), Importance.LOW, - CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC) - .define(CONFIG_PROVIDERS_CONFIG, - ConfigDef.Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), - ConfigDef.Importance.LOW, - CONFIG_PROVIDERS_DOC); + CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_DOC); } @Override @@ -634,20 +609,6 @@ private void postProcessAndValidateIdempotenceConfigs(final Map if (!idempotenceEnabled && userConfiguredTransactions) { throw new ConfigException("Cannot set a " + ProducerConfig.TRANSACTIONAL_ID_CONFIG + " without also enabling idempotence."); } - - // Validate that transaction.timeout.ms is not set when transaction.two.phase.commit.enable is true - // In standard Kafka transactions, the broker enforces transaction.timeout.ms and aborts any - // transaction that isn't completed in time. With two-phase commit (2PC), an external coordinator - // decides when to finalize, so broker-side timeouts don't apply. Disallow using both. - boolean enable2PC = this.getBoolean(TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG); - boolean userConfiguredTransactionTimeout = originalConfigs.containsKey(TRANSACTION_TIMEOUT_CONFIG); - if (enable2PC && userConfiguredTransactionTimeout) { - throw new ConfigException( - "Cannot set " + ProducerConfig.TRANSACTION_TIMEOUT_CONFIG + - " when " + ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG + - " is set to true. Transactions will not expire with two-phase commit enabled." - ); - } } private static String parseAcks(String acksString) { @@ -682,6 +643,10 @@ public ProducerConfig(Map props) { super(CONFIG, props); } + ProducerConfig(Map props, boolean doLog) { + super(CONFIG, props, doLog); + } + public static Set configNames() { return CONFIG.names(); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java index 4a813dc96babc..48caf98d44a3c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/ProducerInterceptor.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.producer; import org.apache.kafka.common.Configurable; -import org.apache.kafka.common.header.Headers; /** * A plugin interface that allows you to intercept (and possibly mutate) the records received by the producer before @@ -34,8 +33,6 @@ * ProducerInterceptor callbacks may be called from multiple threads. Interceptor implementation must ensure thread-safety, if needed. *

        * Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. - * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the interceptor to register metrics. The following tags are automatically added to - * all metrics registered: config set to interceptor.classes, and class set to the ProducerInterceptor class name. */ public interface ProducerInterceptor extends Configurable, AutoCloseable { /** @@ -84,37 +81,12 @@ public interface ProducerInterceptor extends Configurable, AutoCloseable { * @param metadata The metadata for the record that was sent (i.e. the partition and offset). * If an error occurred, metadata will contain only valid topic and maybe * partition. If partition is not given in ProducerRecord and an error occurs - * before partition gets assigned, then partition will be set to {@link RecordMetadata#UNKNOWN_PARTITION}. + * before partition gets assigned, then partition will be set to RecordMetadata.NO_PARTITION. * The metadata may be null if the client passed null record to * {@link org.apache.kafka.clients.producer.KafkaProducer#send(ProducerRecord)}. * @param exception The exception thrown during processing of this record. Null if no error occurred. */ - default void onAcknowledgement(RecordMetadata metadata, Exception exception) {} - - /** - * This method is called when the record sent to the server has been acknowledged, or when sending the record fails before - * it gets sent to the server. - *

        - * This method is generally called just before the user callback is called, and in additional cases when KafkaProducer.send() - * throws an exception. - *

        - * Any exception thrown by this method will be ignored by the caller. - *

        - * This method will generally execute in the background I/O thread, so the implementation should be reasonably fast. - * Otherwise, sending of messages from other threads could be delayed. - * - * @param metadata The metadata for the record that was sent (i.e. the partition and offset). - * If an error occurred, metadata will contain only valid topic and maybe - * partition. If partition is not given in ProducerRecord and an error occurs - * before partition gets assigned, then partition will be set to {@link RecordMetadata#UNKNOWN_PARTITION}. - * The metadata may be null if the client passed null record to - * {@link org.apache.kafka.clients.producer.KafkaProducer#send(ProducerRecord)}. - * @param exception The exception thrown during processing of this record. Null if no error occurred. - * @param headers The headers for the record that was sent. It is read-only. - */ - default void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { - onAcknowledgement(metadata, exception); - } + void onAcknowledgement(RecordMetadata metadata, Exception exception); /** * This is called when interceptor is closed diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java index 6c94466c55e85..7d942d572cfd5 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetrics.java @@ -33,7 +33,6 @@ public class KafkaProducerMetrics implements AutoCloseable { private static final String TXN_SEND_OFFSETS = "txn-send-offsets"; private static final String TXN_COMMIT = "txn-commit"; private static final String TXN_ABORT = "txn-abort"; - private static final String TXN_PREPARE = "txn-prepare"; private static final String TOTAL_TIME_SUFFIX = "-time-ns-total"; private static final String METADATA_WAIT = "metadata-wait"; @@ -45,7 +44,6 @@ public class KafkaProducerMetrics implements AutoCloseable { private final Sensor sendOffsetsSensor; private final Sensor commitTxnSensor; private final Sensor abortTxnSensor; - private final Sensor prepareTxnSensor; private final Sensor metadataWaitSensor; public KafkaProducerMetrics(Metrics metrics) { @@ -75,10 +73,6 @@ public KafkaProducerMetrics(Metrics metrics) { TXN_ABORT, "Total time producer has spent in abortTransaction in nanoseconds." ); - prepareTxnSensor = newLatencySensor( - TXN_PREPARE, - "Total time producer has spent in prepareTransaction in nanoseconds." - ); metadataWaitSensor = newLatencySensor( METADATA_WAIT, "Total time producer has spent waiting on topic metadata in nanoseconds." @@ -93,7 +87,6 @@ public void close() { removeMetric(TXN_SEND_OFFSETS); removeMetric(TXN_COMMIT); removeMetric(TXN_ABORT); - removeMetric(TXN_PREPARE); removeMetric(METADATA_WAIT); } @@ -121,10 +114,6 @@ public void recordAbortTxn(long duration) { abortTxnSensor.record(duration); } - public void recordPrepareTxn(long duration) { - prepareTxnSensor.record(duration); - } - public void recordMetadataWait(long duration) { metadataWaitSensor.record(duration); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java index 71d3839cedd12..75bf8485e4737 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/ProducerInterceptors.java @@ -17,15 +17,10 @@ package org.apache.kafka.clients.producer.internals; -import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.clients.producer.ProducerInterceptor; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.header.Headers; -import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.internals.Plugin; -import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.record.RecordBatch; import org.slf4j.Logger; @@ -40,10 +35,10 @@ */ public class ProducerInterceptors implements Closeable { private static final Logger log = LoggerFactory.getLogger(ProducerInterceptors.class); - private final List>> interceptorPlugins; + private final List> interceptors; - public ProducerInterceptors(List> interceptors, Metrics metrics) { - this.interceptorPlugins = Plugin.wrapInstances(interceptors, metrics, ProducerConfig.INTERCEPTOR_CLASSES_CONFIG); + public ProducerInterceptors(List> interceptors) { + this.interceptors = interceptors; } /** @@ -62,9 +57,9 @@ public ProducerInterceptors(List> interceptors, Metric */ public ProducerRecord onSend(ProducerRecord record) { ProducerRecord interceptRecord = record; - for (Plugin> interceptorPlugin : this.interceptorPlugins) { + for (ProducerInterceptor interceptor : this.interceptors) { try { - interceptRecord = interceptorPlugin.get().onSend(interceptRecord); + interceptRecord = interceptor.onSend(interceptRecord); } catch (Exception e) { // do not propagate interceptor exception, log and continue calling other interceptors // be careful not to throw exception from here @@ -79,7 +74,7 @@ public ProducerRecord onSend(ProducerRecord record) { /** * This method is called when the record sent to the server has been acknowledged, or when sending the record fails before - * it gets sent to the server. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception, Headers)} + * it gets sent to the server. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} * method for each interceptor. * * This method does not throw exceptions. Exceptions thrown by any of interceptor methods are caught and ignored. @@ -87,12 +82,11 @@ public ProducerRecord onSend(ProducerRecord record) { * @param metadata The metadata for the record that was sent (i.e. the partition and offset). * If an error occurred, metadata will only contain valid topic and maybe partition. * @param exception The exception thrown during processing of this record. Null if no error occurred. - * @param headers The headers for the record that was sent */ - public void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { - for (Plugin> interceptorPlugin : this.interceptorPlugins) { + public void onAcknowledgement(RecordMetadata metadata, Exception exception) { + for (ProducerInterceptor interceptor : this.interceptors) { try { - interceptorPlugin.get().onAcknowledgement(metadata, exception, headers); + interceptor.onAcknowledgement(metadata, exception); } catch (Exception e) { // do not propagate interceptor exceptions, just log log.warn("Error executing interceptor onAcknowledgement callback", e); @@ -102,7 +96,7 @@ public void onAcknowledgement(RecordMetadata metadata, Exception exception, Head /** * This method is called when sending the record fails in {@link ProducerInterceptor#onSend - * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception, Headers)} + * (ProducerRecord)} method. This method calls {@link ProducerInterceptor#onAcknowledgement(RecordMetadata, Exception)} * method for each interceptor * * @param record The record from client @@ -111,24 +105,16 @@ public void onAcknowledgement(RecordMetadata metadata, Exception exception, Head * @param exception The exception thrown during processing of this record. */ public void onSendError(ProducerRecord record, TopicPartition interceptTopicPartition, Exception exception) { - for (Plugin> interceptorPlugin : this.interceptorPlugins) { + for (ProducerInterceptor interceptor : this.interceptors) { try { - Headers headers = record != null ? record.headers() : new RecordHeaders(); - if (headers instanceof RecordHeaders && !((RecordHeaders) headers).isReadOnly()) { - // make a copy of the headers to make sure we don't change the state of origin record's headers. - // original headers are still writable because client might want to mutate them before retrying. - RecordHeaders recordHeaders = (RecordHeaders) headers; - headers = new RecordHeaders(recordHeaders); - ((RecordHeaders) headers).setReadOnly(); - } if (record == null && interceptTopicPartition == null) { - interceptorPlugin.get().onAcknowledgement(null, exception, headers); + interceptor.onAcknowledgement(null, exception); } else { if (interceptTopicPartition == null) { interceptTopicPartition = extractTopicPartition(record); } - interceptorPlugin.get().onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, - RecordBatch.NO_TIMESTAMP, -1, -1), exception, headers); + interceptor.onAcknowledgement(new RecordMetadata(interceptTopicPartition, -1, -1, + RecordBatch.NO_TIMESTAMP, -1, -1), exception); } } catch (Exception e) { // do not propagate interceptor exceptions, just log @@ -146,9 +132,9 @@ public static TopicPartition extractTopicPartition(ProducerRecord r */ @Override public void close() { - for (Plugin> interceptorPlugin : this.interceptorPlugins) { + for (ProducerInterceptor interceptor : this.interceptors) { try { - interceptorPlugin.close(); + interceptor.close(); } catch (Exception e) { log.error("Failed to close producer interceptor ", e); } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java index f0c2719db9612..64256d040a08c 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/RecordAccumulator.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.clients.producer.internals; +import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.MetadataSnapshot; import org.apache.kafka.clients.producer.Callback; @@ -81,6 +82,7 @@ public class RecordAccumulator { private final boolean enableAdaptivePartitioning; private final BufferPool free; private final Time time; + private final ApiVersions apiVersions; private final ConcurrentMap topicInfoMap = new CopyOnWriteMap<>(); private final ConcurrentMap nodeStats = new CopyOnWriteMap<>(); private final IncompleteBatches incomplete; @@ -107,6 +109,7 @@ public class RecordAccumulator { * @param metrics The metrics * @param metricGrpName The metric group name * @param time The time instance to use + * @param apiVersions Request API versions for current connected brokers * @param transactionManager The shared transaction state object which tracks producer IDs, epochs, and sequence * numbers per partition. * @param bufferPool The buffer pool @@ -122,6 +125,7 @@ public RecordAccumulator(LogContext logContext, Metrics metrics, String metricGrpName, Time time, + ApiVersions apiVersions, TransactionManager transactionManager, BufferPool bufferPool) { this.logContext = logContext; @@ -143,6 +147,7 @@ public RecordAccumulator(LogContext logContext, this.incomplete = new IncompleteBatches(); this.muted = new HashSet<>(); this.time = time; + this.apiVersions = apiVersions; nodesDrainIndex = new HashMap<>(); this.transactionManager = transactionManager; registerMetrics(metrics, metricGrpName); @@ -164,6 +169,7 @@ public RecordAccumulator(LogContext logContext, * @param metrics The metrics * @param metricGrpName The metric group name * @param time The time instance to use + * @param apiVersions Request API versions for current connected brokers * @param transactionManager The shared transaction state object which tracks producer IDs, epochs, and sequence * numbers per partition. * @param bufferPool The buffer pool @@ -178,6 +184,7 @@ public RecordAccumulator(LogContext logContext, Metrics metrics, String metricGrpName, Time time, + ApiVersions apiVersions, TransactionManager transactionManager, BufferPool bufferPool) { this(logContext, @@ -191,6 +198,7 @@ public RecordAccumulator(LogContext logContext, metrics, metricGrpName, time, + apiVersions, transactionManager, bufferPool); } @@ -514,12 +522,7 @@ public int splitAndReenqueue(ProducerBatch bigBatch) { // the split doesn't happen too often. CompressionRatioEstimator.setEstimation(bigBatch.topicPartition.topic(), compression.type(), Math.max(1.0f, (float) bigBatch.compressionRatio())); - int targetSplitBatchSize = this.batchSize; - - if (bigBatch.isSplitBatch()) { - targetSplitBatchSize = Math.max(bigBatch.maxRecordSize, bigBatch.estimatedSizeInBytes() / 2); - } - Deque dq = bigBatch.split(targetSplitBatchSize); + Deque dq = bigBatch.split(this.batchSize); int numSplitBatches = dq.size(); Deque partitionDequeue = getOrCreateDeque(bigBatch.topicPartition); while (!dq.isEmpty()) { diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java index 64e8646d6f153..e98122bd9c8c8 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/Sender.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.clients.producer.internals; +import org.apache.kafka.clients.ApiVersions; import org.apache.kafka.clients.ClientRequest; import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.KafkaClient; @@ -28,7 +29,6 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.FencedLeaderEpochException; @@ -53,7 +53,6 @@ import org.apache.kafka.common.requests.ProduceRequest; import org.apache.kafka.common.requests.ProduceResponse; import org.apache.kafka.common.requests.RequestHeader; -import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; @@ -72,6 +71,8 @@ import java.util.function.Function; import java.util.stream.Collectors; +import static org.apache.kafka.common.requests.ProduceResponse.INVALID_OFFSET; + /** * The background thread that handles the sending of produce requests to the Kafka cluster. This thread makes metadata * requests to renew its view of the cluster and then sends produce requests to the appropriate nodes. @@ -119,6 +120,9 @@ public class Sender implements Runnable { /* The max time to wait before retrying a request which has failed */ private final long retryBackoffMs; + /* current request API versions supported by the known brokers */ + private final ApiVersions apiVersions; + /* all the state related to transactions, in particular the producer id, producer epoch, and sequence numbers */ private final TransactionManager transactionManager; @@ -137,7 +141,8 @@ public Sender(LogContext logContext, Time time, int requestTimeoutMs, long retryBackoffMs, - TransactionManager transactionManager) { + TransactionManager transactionManager, + ApiVersions apiVersions) { this.log = logContext.logger(Sender.class); this.client = client; this.accumulator = accumulator; @@ -151,6 +156,7 @@ public Sender(LogContext logContext, this.sensors = new SenderMetrics(metricsRegistry, metadata, client, time); this.requestTimeoutMs = requestTimeoutMs; this.retryBackoffMs = retryBackoffMs; + this.apiVersions = apiVersions; this.transactionManager = transactionManager; this.inFlightBatches = new HashMap<>(); } @@ -236,6 +242,9 @@ private boolean hasPendingTransactionalRequests() { public void run() { log.debug("Starting Kafka producer I/O thread."); + if (transactionManager != null) + transactionManager.setPoisonStateOnInvalidTransition(true); + // main loop, runs until close is called while (running) { try { @@ -453,10 +462,17 @@ private boolean maybeSendAndPollTransactionalRequest() { return true; } - if (transactionManager.hasAbortableError()) { - accumulator.abortUndrainedBatches(transactionManager.lastError()); - } else if (transactionManager.isAborting()) { - accumulator.abortUndrainedBatches(new TransactionAbortedException()); + if (transactionManager.hasAbortableError() || transactionManager.isAborting()) { + if (accumulator.hasIncomplete()) { + // Attempt to get the last error that caused this abort. + RuntimeException exception = transactionManager.lastError(); + // If there was no error, but we are still aborting, + // then this is most likely a case where there was no fatal error. + if (exception == null) { + exception = new TransactionAbortedException(); + } + accumulator.abortUndrainedBatches(exception); + } } TransactionManager.TxnRequestHandler nextRequestHandler = transactionManager.nextRequest(accumulator.hasIncomplete()); @@ -565,7 +581,7 @@ private boolean awaitNodeReady(Node node, FindCoordinatorRequest.CoordinatorType /** * Handle a produce response */ - private void handleProduceResponse(ClientResponse response, Map batches, Map topicNames, long now) { + private void handleProduceResponse(ClientResponse response, Map batches, long now) { RequestHeader requestHeader = response.requestHeader(); int correlationId = requestHeader.correlationId(); if (response.wasTimedOut()) { @@ -595,9 +611,11 @@ private void handleProduceResponse(ClientResponse response, Map partitionsWithUpdatedLeaderInfo = new HashMap<>(); produceResponse.data().responses().forEach(r -> r.partitionResponses().forEach(p -> { + TopicPartition tp = new TopicPartition(r.name(), p.index()); ProduceResponse.PartitionResponse partResp = new ProduceResponse.PartitionResponse( Errors.forCode(p.errorCode()), p.baseOffset(), + INVALID_OFFSET, p.logAppendTimeMs(), p.logStartOffset(), p.recordErrors() @@ -606,20 +624,7 @@ private void handleProduceResponse(ClientResponse response, Map recordsByPartition = new HashMap<>(batches.size()); - Map topicIds = topicIdsForBatches(batches); - ProduceRequestData.TopicProduceDataCollection tpd = new ProduceRequestData.TopicProduceDataCollection(); for (ProducerBatch batch : batches) { TopicPartition tp = batch.topicPartition; MemoryRecords records = batch.records(); - Uuid topicId = topicIds.get(tp.topic()); - ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic(), topicId); - + ProduceRequestData.TopicProduceData tpData = tpd.find(tp.topic()); if (tpData == null) { - tpData = new ProduceRequestData.TopicProduceData() - .setTopicId(topicId).setName(tp.topic()); + tpData = new ProduceRequestData.TopicProduceData().setName(tp.topic()); tpd.add(tpData); } - tpData.partitionData().add(new ProduceRequestData.PartitionProduceData() .setIndex(tp.partition()) .setRecords(records)); @@ -902,11 +901,7 @@ private void sendProduceRequest(long now, int destination, short acks, int timeo .setTopicData(tpd), useTransactionV1Version ); - // Fetch topic names from metadata outside callback as topic ids may change during the callback - // for example if topic was recreated. - Map topicNames = metadata.topicNames(); - - RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, topicNames, time.milliseconds()); + RequestCompletionHandler callback = response -> handleProduceResponse(response, recordsByPartition, time.milliseconds()); String nodeId = Integer.toString(destination); ClientRequest clientRequest = client.newClientRequest(nodeId, requestBuilder, now, acks != 0, @@ -915,15 +910,6 @@ private void sendProduceRequest(long now, int destination, short acks, int timeo log.trace("Sent produce request to {}: {}", nodeId, requestBuilder); } - private Map topicIdsForBatches(List batches) { - return batches.stream() - .collect(Collectors.toMap( - b -> b.topicPartition.topic(), - b -> metadata.topicIds().getOrDefault(b.topicPartition.topic(), Uuid.ZERO_UUID), - (existing, replacement) -> replacement) - ); - } - /** * Wake up the selector associated with this send thread */ @@ -1102,10 +1088,4 @@ void recordBatchSplit() { } } - public static class SenderThread extends KafkaThread { - - public SenderThread(final String name, Runnable runnable, boolean daemon) { - super(name, runnable, daemon); - } - } } diff --git a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java index 969085809e656..c78134c72ecf2 100644 --- a/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java +++ b/clients/src/main/java/org/apache/kafka/clients/producer/internals/TransactionManager.java @@ -33,12 +33,10 @@ import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InvalidPidMappingException; import org.apache.kafka.common.errors.InvalidProducerEpochException; -import org.apache.kafka.common.errors.InvalidTxnStateException; import org.apache.kafka.common.errors.OutOfOrderSequenceException; import org.apache.kafka.common.errors.ProducerFencedException; import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TopicAuthorizationException; -import org.apache.kafka.common.errors.TransactionAbortableException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.UnknownProducerIdException; import org.apache.kafka.common.errors.UnsupportedVersionException; @@ -122,6 +120,58 @@ public class TransactionManager { private final Set newPartitionsInTransaction; private final Set pendingPartitionsInTransaction; private final Set partitionsInTransaction; + + /** + * During its normal course of operations, the transaction manager transitions through different internal + * states (i.e. by updating {@link #currentState}) to one of those defined in {@link State}. These state transitions + * result from actions on one of the following classes of threads: + * + *

          + *
        • Application threads that invokes {@link Producer} API calls
        • + *
        • {@link Sender} thread operations
        • + *
        + * + * When an invalid state transition is detected during execution on an application thread, the + * {@link #currentState} is not updated and an {@link IllegalStateException} is thrown. This gives the + * application the opportunity to fix the issue without permanently poisoning the state of the + * transaction manager. The {@link Producer} API calls that perform a state transition include: + * + *
          + *
        • {@link Producer#initTransactions()} calls {@link #initializeTransactions()}
        • + *
        • {@link Producer#beginTransaction()} calls {@link #beginTransaction()}
        • + *
        • {@link Producer#commitTransaction()}} calls {@link #beginCommit()}
        • + *
        • {@link Producer#abortTransaction()} calls {@link #beginAbort()} + *
        • + *
        • {@link Producer#sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} calls + * {@link #sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} + *
        • + *
        • {@link Producer#send(ProducerRecord)} (and its variants) calls + * {@link #maybeAddPartition(TopicPartition)} and + * {@link #maybeTransitionToErrorState(RuntimeException)} + *
        • + *
        + * + *

        + * + * The {@link Producer} is implemented such that much of its work delegated to and performed asynchronously on the + * {@link Sender} thread. This includes record batching, network I/O, broker response handlers, etc. If an + * invalid state transition is detected in the {@link Sender} thread, in addition to throwing an + * {@link IllegalStateException}, the transaction manager intentionally "poisons" itself by setting its + * {@link #currentState} to {@link State#FATAL_ERROR}, a state from which it cannot recover. + * + *

        + * + * It's important to prevent possible corruption when the transaction manager has determined that it is in a + * fatal state. Subsequent transaction operations attempted via either the application or the + * {@link Sender} thread should fail. This is achieved when these operations invoke the + * {@link #maybeFailWithError()} method, as it causes a {@link KafkaException} to be thrown, ensuring the stated + * transactional guarantees are not violated. + * + *

        + * + * See KAFKA-14831 for more detail. + */ + private final ThreadLocal shouldPoisonStateOnInvalidTransition; private PendingStateTransition pendingTransition; // This is used by the TxnRequestHandlers to control how long to back off before a given request is retried. @@ -145,15 +195,12 @@ public class TransactionManager { private volatile boolean clientSideEpochBumpRequired = false; private volatile long latestFinalizedFeaturesEpoch = -1; private volatile boolean isTransactionV2Enabled = false; - private final boolean enable2PC; - private volatile ProducerIdAndEpoch preparedTxnState = ProducerIdAndEpoch.NONE; private enum State { UNINITIALIZED, INITIALIZING, READY, IN_TRANSACTION, - PREPARED_TRANSACTION, COMMITTING_TRANSACTION, ABORTING_TRANSACTION, ABORTABLE_ERROR, @@ -169,12 +216,10 @@ private boolean isTransitionValid(State source, State target) { return source == INITIALIZING || source == COMMITTING_TRANSACTION || source == ABORTING_TRANSACTION; case IN_TRANSACTION: return source == READY; - case PREPARED_TRANSACTION: - return source == IN_TRANSACTION || source == INITIALIZING; case COMMITTING_TRANSACTION: - return source == IN_TRANSACTION || source == PREPARED_TRANSACTION; + return source == IN_TRANSACTION; case ABORTING_TRANSACTION: - return source == IN_TRANSACTION || source == PREPARED_TRANSACTION || source == ABORTABLE_ERROR; + return source == IN_TRANSACTION || source == ABORTABLE_ERROR; case ABORTABLE_ERROR: return source == IN_TRANSACTION || source == COMMITTING_TRANSACTION || source == ABORTABLE_ERROR || source == INITIALIZING; @@ -210,8 +255,7 @@ public TransactionManager(final LogContext logContext, final String transactionalId, final int transactionTimeoutMs, final long retryBackoffMs, - final ApiVersions apiVersions, - final boolean enable2PC) { + final ApiVersions apiVersions) { this.producerIdAndEpoch = ProducerIdAndEpoch.NONE; this.transactionalId = transactionalId; this.log = logContext.logger(TransactionManager.class); @@ -221,6 +265,7 @@ public TransactionManager(final LogContext logContext, this.newPartitionsInTransaction = new HashSet<>(); this.pendingPartitionsInTransaction = new HashSet<>(); this.partitionsInTransaction = new HashSet<>(); + this.shouldPoisonStateOnInvalidTransition = ThreadLocal.withInitial(() -> false); this.pendingRequests = new PriorityQueue<>(10, Comparator.comparingInt(o -> o.priority().priority)); this.pendingTxnOffsetCommits = new HashMap<>(); this.partitionsWithUnresolvedSequences = new HashMap<>(); @@ -228,78 +273,17 @@ public TransactionManager(final LogContext logContext, this.retryBackoffMs = retryBackoffMs; this.txnPartitionMap = new TxnPartitionMap(logContext); this.apiVersions = apiVersions; - this.enable2PC = enable2PC; - } - - /** - * During its normal course of operations, the transaction manager transitions through different internal - * states (i.e. by updating {@link #currentState}) to one of those defined in {@link State}. These state transitions - * result from actions on one of the following classes of threads: - * - *

          - *
        • Application threads that invokes {@link Producer} API calls
        • - *
        • {@link Sender} thread operations
        • - *
        - * - * When an invalid state transition is detected during execution on an application thread, the - * {@link #currentState} is not updated and an {@link IllegalStateException} is thrown. This gives the - * application the opportunity to fix the issue without permanently poisoning the state of the - * transaction manager. The {@link Producer} API calls that perform a state transition include: - * - *
          - *
        • {@link Producer#initTransactions()} calls {@link #initializeTransactions(boolean)}
        • - *
        • {@link Producer#beginTransaction()} calls {@link #beginTransaction()}
        • - *
        • {@link Producer#commitTransaction()}} calls {@link #beginCommit()}
        • - *
        • {@link Producer#abortTransaction()} calls {@link #beginAbort()} - *
        • - *
        • {@link Producer#sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} calls - * {@link #sendOffsetsToTransaction(Map, ConsumerGroupMetadata)} - *
        • - *
        • {@link Producer#send(ProducerRecord)} (and its variants) calls - * {@link #maybeAddPartition(TopicPartition)} and - * {@link #maybeTransitionToErrorState(RuntimeException)} - *
        • - *
        - * - *

        - * - * The {@link Producer} is implemented such that much of its work delegated to and performed asynchronously on the - * {@link Sender} thread. This includes record batching, network I/O, broker response handlers, etc. If an - * invalid state transition is detected in the {@link Sender} thread, in addition to throwing an - * {@link IllegalStateException}, the transaction manager intentionally "poisons" itself by setting its - * {@link #currentState} to {@link State#FATAL_ERROR}, a state from which it cannot recover. - * - *

        - * - * It's important to prevent possible corruption when the transaction manager has determined that it is in a - * fatal state. Subsequent transaction operations attempted via either the application or the - * {@link Sender} thread should fail. This is achieved when these operations invoke the - * {@link #maybeFailWithError()} method, as it causes a {@link KafkaException} to be thrown, ensuring the stated - * transactional guarantees are not violated. - * - *

        - * - * See KAFKA-14831 for more detail. - * - * @return {@code true} to set state to {@link State#FATAL_ERROR} before throwing an exception, - * {@code false} to throw an exception without first changing the state - */ - protected boolean shouldPoisonStateOnInvalidTransition() { - return Thread.currentThread() instanceof Sender.SenderThread; } - synchronized TransactionalRequestResult initializeTransactions(ProducerIdAndEpoch producerIdAndEpoch) { - return initializeTransactions(producerIdAndEpoch, false); + void setPoisonStateOnInvalidTransition(boolean shouldPoisonState) { + shouldPoisonStateOnInvalidTransition.set(shouldPoisonState); } - public synchronized TransactionalRequestResult initializeTransactions(boolean keepPreparedTxn) { - return initializeTransactions(ProducerIdAndEpoch.NONE, keepPreparedTxn); + public synchronized TransactionalRequestResult initializeTransactions() { + return initializeTransactions(ProducerIdAndEpoch.NONE); } - synchronized TransactionalRequestResult initializeTransactions( - ProducerIdAndEpoch producerIdAndEpoch, - boolean keepPreparedTxn - ) { + synchronized TransactionalRequestResult initializeTransactions(ProducerIdAndEpoch producerIdAndEpoch) { maybeFailWithError(); boolean isEpochBump = producerIdAndEpoch != ProducerIdAndEpoch.NONE; @@ -308,9 +292,6 @@ synchronized TransactionalRequestResult initializeTransactions( if (!isEpochBump) { transitionTo(State.INITIALIZING); log.info("Invoking InitProducerId for the first time in order to acquire a producer ID"); - if (keepPreparedTxn) { - log.info("Invoking InitProducerId with keepPreparedTxn set to true for 2PC transactions"); - } } else { log.info("Invoking InitProducerId with current producer ID and epoch {} in order to bump the epoch", producerIdAndEpoch); } @@ -318,10 +299,7 @@ synchronized TransactionalRequestResult initializeTransactions( .setTransactionalId(transactionalId) .setTransactionTimeoutMs(transactionTimeoutMs) .setProducerId(producerIdAndEpoch.producerId) - .setProducerEpoch(producerIdAndEpoch.epoch) - .setEnable2Pc(enable2PC) - .setKeepPreparedTxn(keepPreparedTxn); - + .setProducerEpoch(producerIdAndEpoch.epoch); InitProducerIdHandler handler = new InitProducerIdHandler(new InitProducerIdRequest.Builder(requestData), isEpochBump); enqueueRequest(handler); @@ -336,22 +314,6 @@ public synchronized void beginTransaction() { transitionTo(State.IN_TRANSACTION); } - /** - * Prepare a transaction for a two-phase commit. - * This transitions the transaction to the PREPARED_TRANSACTION state. - * The preparedTxnState is set with the current producer ID and epoch. - */ - public synchronized void prepareTransaction() { - ensureTransactional(); - throwIfPendingState("prepareTransaction"); - maybeFailWithError(); - transitionTo(State.PREPARED_TRANSACTION); - this.preparedTxnState = new ProducerIdAndEpoch( - this.producerIdAndEpoch.producerId, - this.producerIdAndEpoch.epoch - ); - } - public synchronized TransactionalRequestResult beginCommit() { return handleCachedTransactionRequestResult(() -> { maybeFailWithError(); @@ -509,10 +471,6 @@ public boolean isTransactionV2Enabled() { return isTransactionV2Enabled; } - public boolean is2PCEnabled() { - return enable2PC; - } - synchronized boolean hasPartitionsToAdd() { return !newPartitionsInTransaction.isEmpty() || !pendingPartitionsInTransaction.isEmpty(); } @@ -771,15 +729,6 @@ public synchronized void maybeTransitionToErrorState(RuntimeException exception) || exception instanceof InvalidPidMappingException) { transitionToFatalError(exception); } else if (isTransactional()) { - // RetriableExceptions from the Sender thread are converted to Abortable errors - // because they indicate that the transaction cannot be completed after all retry attempts. - // This conversion ensures the application layer treats these errors as abortable, - // preventing duplicate message delivery. - if (exception instanceof RetriableException || - exception instanceof InvalidTxnStateException) { - exception = new TransactionAbortableException("Transaction Request was aborted after exhausting retries.", exception); - } - if (needToTriggerEpochBumpFromClient() && !isCompleting()) { clientSideEpochBumpRequired = true; } @@ -922,7 +871,7 @@ synchronized TxnRequestHandler nextRequest(boolean hasIncompleteBatches) { log.debug("Not sending EndTxn for completed transaction since no partitions " + "or offsets were successfully added"); } - resetTransactionState(); + completeTransaction(); } nextRequestHandler = pendingRequests.poll(); } @@ -1093,15 +1042,6 @@ synchronized boolean isInitializing() { return isTransactional() && currentState == State.INITIALIZING; } - /** - * Check if the transaction is in the prepared state. - * - * @return true if the current state is PREPARED_TRANSACTION - */ - public synchronized boolean isPrepared() { - return currentState == State.PREPARED_TRANSACTION; - } - void handleCoordinatorReady() { NodeApiVersions nodeApiVersions = transactionCoordinator != null ? apiVersions.get(transactionCoordinator.idString()) : @@ -1123,7 +1063,7 @@ private void transitionTo(State target, RuntimeException error) { String message = idString + "Invalid transition attempted from state " + currentState.name() + " to state " + target.name(); - if (shouldPoisonStateOnInvalidTransition()) { + if (shouldPoisonStateOnInvalidTransition.get()) { currentState = State.FATAL_ERROR; lastError = new IllegalStateException(message); throw lastError; @@ -1329,7 +1269,7 @@ boolean canHandleAbortableError() { return coordinatorSupportsBumpingEpoch || isTransactionV2Enabled; } - private void resetTransactionState() { + private void completeTransaction() { if (clientSideEpochBumpRequired) { transitionTo(State.INITIALIZING); } else { @@ -1341,7 +1281,6 @@ private void resetTransactionState() { newPartitionsInTransaction.clear(); pendingPartitionsInTransaction.clear(); partitionsInTransaction.clear(); - preparedTxnState = ProducerIdAndEpoch.NONE; } abstract class TxnRequestHandler implements RequestCompletionHandler { @@ -1498,21 +1437,7 @@ public void handleResponse(AbstractResponse response) { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(initProducerIdResponse.data().producerId(), initProducerIdResponse.data().producerEpoch()); setProducerIdAndEpoch(producerIdAndEpoch); - // If this is a transaction with keepPreparedTxn=true, transition directly - // to PREPARED_TRANSACTION state IFF there is an ongoing transaction. - if (builder.data.keepPreparedTxn() && - initProducerIdResponse.data().ongoingTxnProducerId() != RecordBatch.NO_PRODUCER_ID - ) { - transitionTo(State.PREPARED_TRANSACTION); - // Update the preparedTxnState with the ongoing pid and epoch from the response. - // This will be used to complete the transaction later. - TransactionManager.this.preparedTxnState = new ProducerIdAndEpoch( - initProducerIdResponse.data().ongoingTxnProducerId(), - initProducerIdResponse.data().ongoingTxnProducerEpoch() - ); - } else { - transitionTo(State.READY); - } + transitionTo(State.READY); lastError = null; if (this.isEpochBump) { resetSequenceNumbers(); @@ -1749,7 +1674,7 @@ boolean isEndTxn() { public void handleResponse(AbstractResponse response) { EndTxnResponse endTxnResponse = (EndTxnResponse) response; Errors error = endTxnResponse.error(); - boolean isAbort = !builder.data.committed(); + if (error == Errors.NONE) { // For End Txn version 5+, the broker includes the producerId and producerEpoch in the EndTxnResponse. // For versions lower than 5, the producer Id and epoch are set to -1 by default. @@ -1766,7 +1691,7 @@ public void handleResponse(AbstractResponse response) { setProducerIdAndEpoch(producerIdAndEpoch); resetSequenceNumbers(); } - resetTransactionState(); + completeTransaction(); result.done(); } else if (error == Errors.COORDINATOR_NOT_AVAILABLE || error == Errors.NOT_COORDINATOR) { lookupCoordinator(FindCoordinatorRequest.CoordinatorType.TRANSACTION, transactionalId); @@ -1782,11 +1707,6 @@ public void handleResponse(AbstractResponse response) { fatalError(error.exception()); } else if (error == Errors.UNKNOWN_PRODUCER_ID) { abortableErrorIfPossible(error.exception()); - } else if (isAbort && error.exception() instanceof TransactionAbortableException) { - // When aborting a transaction, we must convert TRANSACTION_ABORTABLE errors to KafkaException - // because if an abort operation itself encounters an abortable error, retrying the abort would create a cycle. - // Instead, we treat this as fatal error at the application layer to ensure the transaction can be cleanly terminated. - fatalError(new KafkaException("Failed to abort transaction", error.exception())); } else if (error == Errors.TRANSACTION_ABORTABLE) { abortableError(error.exception()); } else { @@ -1968,14 +1888,5 @@ private PendingStateTransition( } } - /** - * Returns a ProducerIdAndEpoch object containing the producer ID and epoch - * of the ongoing transaction. - * This is used when preparing a transaction for a two-phase commit. - * - * @return a ProducerIdAndEpoch with the current producer ID and epoch. - */ - public ProducerIdAndEpoch preparedTransactionState() { - return this.preparedTxnState; - } + } diff --git a/clients/src/main/java/org/apache/kafka/common/ClusterResource.java b/clients/src/main/java/org/apache/kafka/common/ClusterResource.java index aad5c3d6d85be..2f857ff560975 100644 --- a/clients/src/main/java/org/apache/kafka/common/ClusterResource.java +++ b/clients/src/main/java/org/apache/kafka/common/ClusterResource.java @@ -28,7 +28,8 @@ public class ClusterResource { /** * Create {@link ClusterResource} with a cluster id. Note that cluster id may be {@code null} if the - * metadata request was sent to a broker without support for cluster ids. + * metadata request was sent to a broker without support for cluster ids. The first version of Kafka + * to support cluster id is 0.10.1.0. * @param clusterId The cluster id */ public ClusterResource(String clusterId) { @@ -37,7 +38,7 @@ public ClusterResource(String clusterId) { /** * Return the cluster id. Note that it may be {@code null} if the metadata request was sent to a broker without - * support for cluster ids. + * support for cluster ids. The first version of Kafka to support cluster id is 0.10.1.0. */ public String clusterId() { return clusterId; diff --git a/clients/src/main/java/org/apache/kafka/common/ClusterResourceListener.java b/clients/src/main/java/org/apache/kafka/common/ClusterResourceListener.java index 63f3f6a13e309..f1939df29559d 100644 --- a/clients/src/main/java/org/apache/kafka/common/ClusterResourceListener.java +++ b/clients/src/main/java/org/apache/kafka/common/ClusterResourceListener.java @@ -24,6 +24,7 @@ *

        *

        Clients

        * There will be one invocation of {@link ClusterResourceListener#onUpdate(ClusterResource)} after each metadata response. + * Note that the cluster id may be null when the Kafka broker version is below 0.10.1.0. If you receive a null cluster id, you can expect it to always be null unless you have a cluster with multiple broker versions which can happen if the cluster is being upgraded while the client is running. *

        * {@link org.apache.kafka.clients.producer.ProducerInterceptor} : The {@link ClusterResourceListener#onUpdate(ClusterResource)} method will be invoked after {@link org.apache.kafka.clients.producer.ProducerInterceptor#onSend(org.apache.kafka.clients.producer.ProducerRecord)} * but before {@link org.apache.kafka.clients.producer.ProducerInterceptor#onAcknowledgement(org.apache.kafka.clients.producer.RecordMetadata, Exception)} . diff --git a/clients/src/main/java/org/apache/kafka/common/Endpoint.java b/clients/src/main/java/org/apache/kafka/common/Endpoint.java index baa1045929f8e..8d5e8c6d16a55 100644 --- a/clients/src/main/java/org/apache/kafka/common/Endpoint.java +++ b/clients/src/main/java/org/apache/kafka/common/Endpoint.java @@ -16,6 +16,7 @@ */ package org.apache.kafka.common; +import org.apache.kafka.common.annotation.InterfaceStability; import org.apache.kafka.common.security.auth.SecurityProtocol; import java.util.Objects; @@ -25,35 +26,27 @@ * Represents a broker endpoint. */ +@InterfaceStability.Evolving public class Endpoint { - private final String listener; + private final String listenerName; private final SecurityProtocol securityProtocol; private final String host; private final int port; - public Endpoint(String listener, SecurityProtocol securityProtocol, String host, int port) { - this.listener = listener; + public Endpoint(String listenerName, SecurityProtocol securityProtocol, String host, int port) { + this.listenerName = listenerName; this.securityProtocol = securityProtocol; this.host = host; this.port = port; } - /** - * Returns the listener name of this endpoint. - */ - public String listener() { - return listener; - } - /** * Returns the listener name of this endpoint. This is non-empty for endpoints provided * to broker plugins, but may be empty when used in clients. - * @deprecated Since 4.1. Use {@link #listener()} instead. This function will be removed in 5.0. */ - @Deprecated(since = "4.1", forRemoval = true) public Optional listenerName() { - return Optional.ofNullable(listener); + return Optional.ofNullable(listenerName); } /** @@ -87,7 +80,7 @@ public boolean equals(Object o) { } Endpoint that = (Endpoint) o; - return Objects.equals(this.listener, that.listener) && + return Objects.equals(this.listenerName, that.listenerName) && Objects.equals(this.securityProtocol, that.securityProtocol) && Objects.equals(this.host, that.host) && this.port == that.port; @@ -96,13 +89,13 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(listener, securityProtocol, host, port); + return Objects.hash(listenerName, securityProtocol, host, port); } @Override public String toString() { return "Endpoint(" + - "listenerName='" + listener + '\'' + + "listenerName='" + listenerName + '\'' + ", securityProtocol=" + securityProtocol + ", host='" + host + '\'' + ", port=" + port + diff --git a/clients/src/main/java/org/apache/kafka/common/GroupState.java b/clients/src/main/java/org/apache/kafka/common/GroupState.java index aa2565abf249d..c0bcfb999b0df 100644 --- a/clients/src/main/java/org/apache/kafka/common/GroupState.java +++ b/clients/src/main/java/org/apache/kafka/common/GroupState.java @@ -32,18 +32,17 @@ * The following table shows the correspondence between the group states and types. * * - * + * * * - * - * - * - * - * - * - * - * - * + * + * + * + * + * + * + * + * * *
        StateClassic groupConsumer groupShare groupStreams group
        StateClassic groupConsumer groupShare group
        UNKNOWNYesYesYesYes
        PREPARING_REBALANCEYesYes
        COMPLETING_REBALANCEYesYes
        STABLEYesYesYesYes
        DEADYesYesYesYes
        EMPTYYesYesYesYes
        ASSIGNINGYesYes
        RECONCILINGYesYes
        NOT_READYYes
        UNKNOWNYesYesYes
        PREPARING_REBALANCEYesYes
        COMPLETING_REBALANCEYesYes
        STABLEYesYesYes
        DEADYesYesYes
        EMPTYYesYesYes
        ASSIGNINGYes
        RECONCILINGYes
        */ @@ -56,8 +55,7 @@ public enum GroupState { DEAD("Dead"), EMPTY("Empty"), ASSIGNING("Assigning"), - RECONCILING("Reconciling"), - NOT_READY("NotReady"); + RECONCILING("Reconciling"); private static final Map NAME_TO_ENUM = Arrays.stream(values()) .collect(Collectors.toMap(state -> state.name.toUpperCase(Locale.ROOT), Function.identity())); @@ -81,8 +79,6 @@ public static Set groupStatesForType(GroupType type) { return Set.of(PREPARING_REBALANCE, COMPLETING_REBALANCE, STABLE, DEAD, EMPTY); } else if (type == GroupType.CONSUMER) { return Set.of(PREPARING_REBALANCE, COMPLETING_REBALANCE, STABLE, DEAD, EMPTY, ASSIGNING, RECONCILING); - } else if (type == GroupType.STREAMS) { - return Set.of(STABLE, DEAD, EMPTY, ASSIGNING, RECONCILING, NOT_READY); } else if (type == GroupType.SHARE) { return Set.of(STABLE, DEAD, EMPTY); } else { diff --git a/clients/src/main/java/org/apache/kafka/common/GroupType.java b/clients/src/main/java/org/apache/kafka/common/GroupType.java index 4c3aeac93fbd6..eeb79ea282596 100644 --- a/clients/src/main/java/org/apache/kafka/common/GroupType.java +++ b/clients/src/main/java/org/apache/kafka/common/GroupType.java @@ -26,8 +26,7 @@ public enum GroupType { UNKNOWN("Unknown"), CONSUMER("Consumer"), CLASSIC("Classic"), - SHARE("Share"), - STREAMS("Streams"); + SHARE("Share"); private static final Map NAME_TO_ENUM = Arrays.stream(values()) .collect(Collectors.toMap(type -> type.name.toLowerCase(Locale.ROOT), Function.identity())); diff --git a/clients/src/main/java/org/apache/kafka/common/InvalidRecordException.java b/clients/src/main/java/org/apache/kafka/common/InvalidRecordException.java index aa13ffd9936b0..4c2815bb3bda5 100644 --- a/clients/src/main/java/org/apache/kafka/common/InvalidRecordException.java +++ b/clients/src/main/java/org/apache/kafka/common/InvalidRecordException.java @@ -16,9 +16,9 @@ */ package org.apache.kafka.common; -import org.apache.kafka.common.errors.InvalidConfigurationException; +import org.apache.kafka.common.errors.ApiException; -public class InvalidRecordException extends InvalidConfigurationException { +public class InvalidRecordException extends ApiException { private static final long serialVersionUID = 1; diff --git a/clients/src/main/java/org/apache/kafka/common/KafkaFuture.java b/clients/src/main/java/org/apache/kafka/common/KafkaFuture.java index 257c98e293059..d4b5f35314b80 100644 --- a/clients/src/main/java/org/apache/kafka/common/KafkaFuture.java +++ b/clients/src/main/java/org/apache/kafka/common/KafkaFuture.java @@ -135,7 +135,7 @@ public static KafkaFuture allOf(KafkaFuture... futures) { * The action may be invoked by the thread that calls {@code whenComplete} or it may be invoked by the thread that * completes the future. * - * @param action the action to perform + * @param action the action to preform * @return the new future */ public abstract KafkaFuture whenComplete(BiConsumer action); diff --git a/clients/src/main/java/org/apache/kafka/common/MetricName.java b/clients/src/main/java/org/apache/kafka/common/MetricName.java index 1f5b43104b1b6..578c848f103ab 100644 --- a/clients/src/main/java/org/apache/kafka/common/MetricName.java +++ b/clients/src/main/java/org/apache/kafka/common/MetricName.java @@ -20,7 +20,7 @@ import java.util.Objects; /** - * The MetricName class encapsulates a metric's name, logical group and its related attributes. It should be constructed using metrics.metricName(...). + * The MetricName class encapsulates a metric's name, logical group and its related attributes. It should be constructed using metrics.MetricName(...). *

        * This class captures the following parameters: *

        diff --git a/clients/src/main/java/org/apache/kafka/common/Uuid.java b/clients/src/main/java/org/apache/kafka/common/Uuid.java
        index 6f7f09537f178..45e2b9f1d8fb2 100644
        --- a/clients/src/main/java/org/apache/kafka/common/Uuid.java
        +++ b/clients/src/main/java/org/apache/kafka/common/Uuid.java
        @@ -20,6 +20,8 @@
         import java.util.ArrayList;
         import java.util.Arrays;
         import java.util.Base64;
        +import java.util.Collections;
        +import java.util.HashSet;
         import java.util.List;
         import java.util.Set;
         
        @@ -49,7 +51,11 @@ public class Uuid implements Comparable {
             /**
              * The set of reserved UUIDs that will never be returned by the randomUuid method.
              */
        -    public static final Set RESERVED = Set.of(ZERO_UUID, ONE_UUID);
        +    public static final Set RESERVED = Collections.unmodifiableSet(new HashSet<>(Arrays.asList(
        +            METADATA_TOPIC_ID,
        +            ZERO_UUID,
        +            ONE_UUID
        +    )));
         
             private final long mostSignificantBits;
             private final long leastSignificantBits;
        diff --git a/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java b/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
        index 485c3602d2623..7fbbfa519d153 100644
        --- a/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
        +++ b/clients/src/main/java/org/apache/kafka/common/acl/AclOperation.java
        @@ -112,12 +112,7 @@ public enum AclOperation {
             /**
              * DESCRIBE_TOKENS operation.
              */
        -    DESCRIBE_TOKENS((byte) 14),
        -
        -    /**
        -     * TWO_PHASE_COMMIT operation.
        -     */
        -    TWO_PHASE_COMMIT((byte) 15);
        +    DESCRIBE_TOKENS((byte) 14);
         
             // Note: we cannot have more than 30 ACL operations without modifying the format used
             // to describe ACL operations in MetadataResponse.
        diff --git a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
        index e271cd99c4cd0..3710ab2811cb5 100644
        --- a/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
        +++ b/clients/src/main/java/org/apache/kafka/common/config/AbstractConfig.java
        @@ -65,10 +65,6 @@ public class AbstractConfig {
             public static final String AUTOMATIC_CONFIG_PROVIDERS_PROPERTY = "org.apache.kafka.automatic.config.providers";
         
             public static final String CONFIG_PROVIDERS_CONFIG = "config.providers";
        -    public static final String CONFIG_PROVIDERS_DOC = 
        -            "Comma-separated alias names for classes implementing the ConfigProvider interface. " +
        -            "This enables loading configuration data (such as passwords, API keys, and other credentials) from external " +
        -            "sources. For example, see Configuration Providers.";
         
             private static final String CONFIG_PROVIDERS_PARAM = ".param.";
         
        @@ -113,7 +109,8 @@ public class AbstractConfig {
              */
             @SuppressWarnings({"this-escape"})
             public AbstractConfig(ConfigDef definition, Map originals, Map configProviderProps, boolean doLog) {
        -        Map originalMap = preProcessParsedConfig(Collections.unmodifiableMap(Utils.castToStringObjectMap(originals)));
        +        Map originalMap = Utils.castToStringObjectMap(originals);
        +
                 this.originals = resolveConfigVariables(configProviderProps, originalMap);
                 this.values = definition.parse(this.originals);
                 Map configUpdates = postProcessParsedConfig(Collections.unmodifiableMap(this.values));
        @@ -147,17 +144,7 @@ public AbstractConfig(ConfigDef definition, Map originals) {
              */
             public AbstractConfig(ConfigDef definition, Map originals, boolean doLog) {
                 this(definition, originals, Collections.emptyMap(), doLog);
        -    }
         
        -    /**
        -     * Called directly after user configs got parsed (and thus default values is not set).
        -     * This allows to check user's config.
        -     *
        -     * @param parsedValues unmodifiable map of current configuration
        -     * @return a map of updates that should be applied to the configuration (will be validated to prevent bad updates)
        -     */
        -    protected Map preProcessParsedConfig(Map parsedValues) {
        -        return parsedValues;
             }
         
             /**
        diff --git a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
        index ee2f8c2cfd951..970d9cebf7231 100644
        --- a/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
        +++ b/clients/src/main/java/org/apache/kafka/common/config/ConfigDef.java
        @@ -1006,72 +1006,26 @@ else if (max == null)
             public static class ValidList implements Validator {
         
                 final ValidString validString;
        -        final boolean isEmptyAllowed;
        -        final boolean isNullAllowed;
         
        -        private ValidList(List validStrings, boolean isEmptyAllowed, boolean isNullAllowed) {
        +        private ValidList(List validStrings) {
                     this.validString = new ValidString(validStrings);
        -            this.isEmptyAllowed = isEmptyAllowed;
        -            this.isNullAllowed = isNullAllowed;
        -        }
        -
        -        public static ValidList anyNonDuplicateValues(boolean isEmptyAllowed, boolean isNullAllowed) {
        -            return new ValidList(List.of(), isEmptyAllowed, isNullAllowed);
                 }
         
                 public static ValidList in(String... validStrings) {
        -            return new ValidList(List.of(validStrings), true, false);
        -        }
        -
        -        public static ValidList in(boolean isEmptyAllowed, String... validStrings) {
        -            if (!isEmptyAllowed && validStrings.length == 0) {
        -                throw new IllegalArgumentException("At least one valid string must be provided when empty values are not allowed");
        -            }
        -            return new ValidList(List.of(validStrings), isEmptyAllowed, false);
        +            return new ValidList(Arrays.asList(validStrings));
                 }
         
                 @Override
                 public void ensureValid(final String name, final Object value) {
        -            if (value == null) {
        -                if (isNullAllowed)
        -                    return;
        -                else
        -                    throw new ConfigException("Configuration '" + name + "' values must not be null.");
        -            }
        -
                     @SuppressWarnings("unchecked")
        -            List values = (List) value;
        -            if (!isEmptyAllowed && values.isEmpty()) {
        -                String validString = this.validString.validStrings.isEmpty() ? "any non-empty value" : this.validString.toString();
        -                throw new ConfigException("Configuration '" + name + "' must not be empty. Valid values include: " + validString);
        -            }
        -
        -            if (Set.copyOf(values).size() != values.size()) {
        -                throw new ConfigException("Configuration '" + name + "' values must not be duplicated.");
        -            }
        -
        -            validateIndividualValues(name, values);
        -        }
        -
        -        private void validateIndividualValues(String name, List values) {
        -            boolean hasValidStrings = !validString.validStrings.isEmpty();
        -
        -            for (Object value : values) {
        -                if (value instanceof String) {
        -                    String string = (String) value;
        -                    if (string.isEmpty()) {
        -                        throw new ConfigException("Configuration '" + name + "' values must not be empty.");
        -                    }
        -                    if (hasValidStrings) {
        -                        validString.ensureValid(name, value);
        -                    }
        -                }
        +            List values = (List) value;
        +            for (String string : values) {
        +                validString.ensureValid(name, string);
                     }
                 }
         
                 public String toString() {
        -            return validString + (isEmptyAllowed ? " (empty config allowed)" : " (empty not allowed)") +
        -                    (isNullAllowed ? " (null config allowed)" : " (null not allowed)");
        +            return validString.toString();
                 }
             }
         
        diff --git a/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java b/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java
        index 410082d908be6..fe7e2eb6669e7 100644
        --- a/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java
        +++ b/clients/src/main/java/org/apache/kafka/common/config/LogLevelConfig.java
        @@ -17,6 +17,8 @@
         
         package org.apache.kafka.common.config;
         
        +import java.util.Arrays;
        +import java.util.HashSet;
         import java.util.Set;
         
         /**
        @@ -62,8 +64,8 @@ public class LogLevelConfig {
              */
             public static final String TRACE_LOG_LEVEL = "TRACE";
         
        -    public static final Set VALID_LOG_LEVELS = Set.of(
        +    public static final Set VALID_LOG_LEVELS = new HashSet<>(Arrays.asList(
                     FATAL_LOG_LEVEL, ERROR_LOG_LEVEL, WARN_LOG_LEVEL,
                     INFO_LOG_LEVEL, DEBUG_LOG_LEVEL, TRACE_LOG_LEVEL
        -    );
        +    ));
         }
        diff --git a/clients/src/main/java/org/apache/kafka/common/config/SaslConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SaslConfigs.java
        index 01f7ad1f92718..15e23270d6f0c 100644
        --- a/clients/src/main/java/org/apache/kafka/common/config/SaslConfigs.java
        +++ b/clients/src/main/java/org/apache/kafka/common/config/SaslConfigs.java
        @@ -16,11 +16,8 @@
          */
         package org.apache.kafka.common.config;
         
        -import org.apache.kafka.common.config.ConfigDef.CaseInsensitiveValidString;
         import org.apache.kafka.common.config.ConfigDef.Range;
         
        -import java.util.List;
        -
         public class SaslConfigs {
         
             private static final String OAUTHBEARER_NOTE = " Currently applies only to OAUTHBEARER.";
        @@ -132,173 +129,6 @@ public class SaslConfigs {
                     + " authentication provider."
                     + LOGIN_EXPONENTIAL_BACKOFF_NOTE;
         
        -    public static final String SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS = "sasl.oauthbearer.jwt.retriever.class";
        -    public static final String DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS = "org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever";
        -    public static final String SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC = "

        The fully-qualified class name of a JwtRetriever implementation used to" - + " request tokens from the identity provider.

        " - + "

        The default configuration value represents a class that maintains backward compatibility with previous versions of" - + " Apache Kafka. The default implementation uses the configuration to determine which concrete implementation to create." - + "

        Other implementations that are provided include:

        " - + "
          " - + "
        • org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
        • " - + "
        • org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever
        • " - + "
        • org.apache.kafka.common.security.oauthbearer.FileJwtRetriever
        • " - + "
        • org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
        • " - + "
        "; - - public static final String SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS = "sasl.oauthbearer.jwt.validator.class"; - public static final String DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS = "org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator"; - public static final String SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC = "

        The fully-qualified class name of a JwtValidator implementation used to" - + " validate the JWT from the identity provider.

        " - + "

        The default validator (org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator) maintains backward compatibility with previous" - + " versions of Apache Kafka. The default validator uses configuration to determine which concrete implementation to create." - + "

        The built-in JwtValidator implementations are:

        " - + "
          " - + "
        • org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator
        • " - + "
        • org.apache.kafka.common.security.oauthbearer.ClientJwtValidator
        • " - + "
        • org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator
        • " - + "
        "; - - public static final String SASL_OAUTHBEARER_SCOPE = "sasl.oauthbearer.scope"; - public static final String SASL_OAUTHBEARER_SCOPE_DOC = "

        This is the level of access a client application is granted to a resource or API which is" - + " included in the token request. If provided, it should match one or more scopes configured in the identity provider.

        " - + "

        " - + "The scope was previously stored as part of the sasl.jaas.config configuration with the key scope." - + " For backward compatibility, the scope JAAS option can still be used, but it is deprecated and will be removed in a future version." - + "

        " - + "

        Order of precedence:

        " - + "
          " - + "
        • sasl.oauthbearer.scope from configuration
        • " - + "
        • scope from JAAS
        • " - + "
        "; - - public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID = "sasl.oauthbearer.client.credentials.client.id"; - public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC = "

        The ID (defined in/by the OAuth identity provider) to identify the client" + - " requesting the token.

        " - + "

        " - + "The client ID was previously stored as part of the sasl.jaas.config configuration with the key clientId." - + " For backward compatibility, the clientId JAAS option can still be used, but it is deprecated and will be removed in a future version." - + "

        " - + "

        Order of precedence:

        " - + "
          " - + "
        • sasl.oauthbearer.client.credentials.client.id from configuration
        • " - + "
        • clientId from JAAS
        • " - + "
        "; - - public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET = "sasl.oauthbearer.client.credentials.client.secret"; - public static final String SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC = "

        The secret (defined by either the user or preassigned, depending on the" - + " identity provider) of the client requesting the token.

        " - + "

        " - + "The client secret was previously stored as part of the sasl.jaas.config configuration with the key clientSecret." - + " For backward compatibility, the clientSecret JAAS option can still be used, but it is deprecated and will be removed in a future version." - + "

        " - + "

        Order of precedence:

        " - + "
          " - + "
        • sasl.oauthbearer.client.credentials.client.secret from configuration
        • " - + "
        • clientSecret from JAAS
        • " - + "
        "; - - private static final String ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE = "

        Note: If a value for sasl.oauthbearer.assertion.file is provided," - + " this configuration will be ignored.

        "; - - public static final String SASL_OAUTHBEARER_ASSERTION_ALGORITHM = "sasl.oauthbearer.assertion.algorithm"; - public static final String DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM = "RS256"; - public static final String SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC = "

        The algorithm the Apache Kafka client should use to sign the assertion sent" - + " to the identity provider. It is also used as the value of the OAuth alg (Algorithm) header in the JWT assertion.

        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD = "sasl.oauthbearer.assertion.claim.aud"; - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC = "

        The JWT aud (Audience) claim which will be included in the " - + " client JWT assertion created locally.

        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS = "sasl.oauthbearer.assertion.claim.exp.seconds"; - public static final int DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS = 300; - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC = "

        The number of seconds in the future for which the JWT is valid." - + " The value is used to determine the JWT exp (Expiration) claim based on the current system time when the JWT is created.

        " - + "

        The formula to generate the exp claim is very simple:

        " - + "
        "
        -        + "Let:\n\n"
        -        + "  x = the current timestamp in seconds, on client\n"
        -        + "  y = the value of this configuration\n"
        -        + "\n"
        -        + "Then:\n\n"
        -        + "  exp = x + y\n"
        -        + "
        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS = "sasl.oauthbearer.assertion.claim.iss"; - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC = "

        The value to be used as the iss (Issuer) claim which will be included in the" - + " client JWT assertion created locally.

        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE = "sasl.oauthbearer.assertion.claim.jti.include"; - public static final boolean DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE = false; - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC = "

        Flag that determines if the JWT assertion should generate a unique ID for the" - + " JWT and include it in the jti (JWT ID) claim.

        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS = "sasl.oauthbearer.assertion.claim.nbf.seconds"; - public static final int DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS = 60; - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC = "

        The number of seconds in the past from which the JWT is valid." - + " The value is used to determine the JWT nbf (Not Before) claim based on the current system time when the JWT is created.

        " - + "

        The formula to generate the nbf claim is very simple:

        " - + "
        "
        -        + "Let:\n\n"
        -        + "  x = the current timestamp in seconds, on client\n"
        -        + "  y = the value of this configuration\n"
        -        + "\n"
        -        + "Then:\n\n"
        -        + "  nbf = x - y\n"
        -        + "
        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB = "sasl.oauthbearer.assertion.claim.sub"; - public static final String SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC = "

        The value to be used as the sub (Subject) claim which will be included in the" - + " client JWT assertion created locally.

        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_FILE = "sasl.oauthbearer.assertion.file"; - public static final String SASL_OAUTHBEARER_ASSERTION_FILE_DOC = "

        File that contains a pre-generated JWT assertion.

        " - + "

        The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when" - + "the file changes to allow for the file to be reloaded on modifications. This allows for "live" assertion rotation without restarting the Kafka client.

        " - + "

        The file contains the assertion in the serialized, three part JWT format:

        " - + "
          " - + "
        1. The header section is a base 64-encoded JWT header that contains values like alg (Algorithm)," - + " typ (Type, always the literal value JWT), etc.
        2. " - + "
        3. The payload section includes the base 64-encoded set of JWT claims, such as aud (Audience), iss (Issuer)," - + " sub (Subject), etc.
        4. " - + "
        5. The signature section is the concatenated header and payload sections that was signed using a private key
        6. " - + "
        " - + "

        See RFC 7519 and RFC 7515" - + " for more details on the JWT and JWS formats.

        " - + "

        Note: If a value for sasl.oauthbearer.assertion.file is provided, all other" - + " sasl.oauthbearer.assertion.* configurations are ignored.

        "; - - public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE = "sasl.oauthbearer.assertion.private.key.file"; - public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC = "

        File that contains a private key in the standard PEM format which is used to" - + " sign the JWT assertion sent to the identity provider.

        " - + "

        The underlying implementation caches the file contents to avoid the performance hit of loading the file on each access. The caching mechanism will detect when" - + " the file changes to allow for the file to be reloaded on modifications. This allows for "live" private key rotation without restarting the Kafka client.

        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE = "sasl.oauthbearer.assertion.private.key.passphrase"; - public static final String SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC = "

        The optional passphrase to decrypt the private key file specified by" - + " sasl.oauthbearer.assertion.private.key.file.

        " - + "

        Note: If the file referred to by sasl.oauthbearer.assertion.private.key.file is modified on the file system at runtime and it was" - + " created with a different passphrase than it was previously, the client will not be able to access the private key file because the passphrase is now" - + " out of date. For that reason, when using private key passphrases, either use the same passphrase each time, or—for improved security—restart" - + " the Kafka client using the new passphrase configuration.

        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - - public static final String SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE = "sasl.oauthbearer.assertion.template.file"; - public static final String SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC = "

        This optional configuration specifies the file containing the JWT headers and/or" - + " payload claims to be used when creating the JWT assertion.

        " - + "

        Not all identity providers require the same set of claims; some may require a given claim while others may prohibit it." - + " In order to provide the most flexibility, this configuration allows the user to provide the static header values and claims" - + " that are to be included in the JWT.

        " - + ASSERTION_FILE_MUTUAL_EXCLUSION_NOTICE; - public static final String SASL_OAUTHBEARER_SCOPE_CLAIM_NAME = "sasl.oauthbearer.scope.claim.name"; public static final String DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME = "scope"; public static final String SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC = "The OAuth claim for the scope is often named \"" + DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME + "\", but this (optional)" @@ -313,8 +143,8 @@ public class SaslConfigs { public static final String SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL = "sasl.oauthbearer.token.endpoint.url"; public static final String SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC = "The URL for the OAuth/OIDC identity provider. If the URL is HTTP(S)-based, it is the issuer's token" - + " endpoint URL to which requests will be made to login based on the configuration in " + SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS + ". If the URL is" - + " file-based, it specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization."; + + " endpoint URL to which requests will be made to login based on the configuration in " + SASL_JAAS_CONFIG + ". If the URL is file-based, it" + + " specifies a file containing an access token (in JWT serialized form) issued by the OAuth/OIDC identity provider to use for authorization."; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_URL = "sasl.oauthbearer.jwks.endpoint.url"; public static final String SASL_OAUTHBEARER_JWKS_ENDPOINT_URL_DOC = "The OAuth/OIDC provider URL from which the provider's" @@ -385,22 +215,6 @@ public static void addClientSaslSupport(ConfigDef config) { .define(SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS, ConfigDef.Type.INT, null, ConfigDef.Importance.LOW, SASL_LOGIN_READ_TIMEOUT_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS, ConfigDef.Type.LONG, DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS, ConfigDef.Importance.LOW, SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS, ConfigDef.Type.LONG, DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS, ConfigDef.Importance.LOW, SASL_LOGIN_RETRY_BACKOFF_MS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, ConfigDef.Type.CLASS, DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, ConfigDef.Type.CLASS, DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_SCOPE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM, ConfigDef.Type.STRING, DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM, CaseInsensitiveValidString.in("ES256", "RS256"), ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, ConfigDef.Type.INT, DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, Range.between(0, 86400), ConfigDef.Importance.LOW, SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, ConfigDef.Type.BOOLEAN, DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, ConfigDef.Type.INT, DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, Range.between(0, 3600), ConfigDef.Importance.LOW, SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_FILE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, ConfigDef.Type.STRING, DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME, ConfigDef.Type.STRING, DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC) @@ -409,7 +223,7 @@ public static void addClientSaslSupport(ConfigDef config) { .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, ConfigDef.Type.LONG, DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, ConfigDef.Type.LONG, DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, ConfigDef.Type.INT, DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, ConfigDef.Type.LIST, List.of(), ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.LOW, SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, ConfigDef.Type.LIST, null, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER, ConfigDef.Type.STRING, null, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE, ConfigDef.Type.BOOLEAN, DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE, ConfigDef.Importance.LOW, SASL_OAUTHBEARER_HEADER_URLENCODE_DOC); } diff --git a/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java index 7675f75a9ab71..5ca4980fc2220 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/SslConfigs.java @@ -18,7 +18,6 @@ import org.apache.kafka.common.config.internals.BrokerSecurityConfigs; -import java.util.List; import java.util.Set; import javax.net.ssl.KeyManagerFactory; @@ -35,7 +34,7 @@ public class SslConfigs { + "this config are dependent on the JVM. " + "Clients using the defaults for this config and 'ssl.enabled.protocols' will downgrade to 'TLSv1.2' if " + "the server does not support 'TLSv1.3'. If this config is set to 'TLSv1.2', however, clients will not use 'TLSv1.3' even " - + "if it is one of the values in ssl.enabled.protocols and the server only supports 'TLSv1.3'."; + + "if it is one of the values in `ssl.enabled.protocols` and the server only supports 'TLSv1.3'."; public static final String DEFAULT_SSL_PROTOCOL = "TLSv1.3"; @@ -50,14 +49,12 @@ public class SslConfigs { public static final String SSL_ENABLED_PROTOCOLS_DOC = "The list of protocols enabled for SSL connections. " + "The default is 'TLSv1.2,TLSv1.3'. This means that clients and servers will prefer TLSv1.3 if both support it " + "and fallback to TLSv1.2 otherwise (assuming both support at least TLSv1.2). This default should be fine for most use " - + "cases. If this configuration is set to an empty list, Kafka will use the protocols enabled by default in the underlying SSLEngine, " - + "which may include additional protocols depending on the JVM version. " - + "Also see the config documentation for ssl.protocol to understand how it can impact the TLS version negotiation behavior."; + + "cases. Also see the config documentation for `ssl.protocol` to understand how it can impact the TLS version negotiation behavior."; public static final String DEFAULT_SSL_ENABLED_PROTOCOLS = "TLSv1.2,TLSv1.3"; public static final String SSL_KEYSTORE_TYPE_CONFIG = "ssl.keystore.type"; public static final String SSL_KEYSTORE_TYPE_DOC = "The file format of the key store file. " - + "This is optional for client. The values currently supported by the default ssl.engine.factory.class are [JKS, PKCS12, PEM]."; + + "This is optional for client. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM]."; public static final String DEFAULT_SSL_KEYSTORE_TYPE = "JKS"; public static final String SSL_KEYSTORE_KEY_CONFIG = "ssl.keystore.key"; @@ -87,7 +84,7 @@ public class SslConfigs { + "the PEM key specified in 'ssl.keystore.key'."; public static final String SSL_TRUSTSTORE_TYPE_CONFIG = "ssl.truststore.type"; - public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. The values currently supported by the default ssl.engine.factory.class are [JKS, PKCS12, PEM]."; + public static final String SSL_TRUSTSTORE_TYPE_DOC = "The file format of the trust store file. The values currently supported by the default `ssl.engine.factory.class` are [JKS, PKCS12, PEM]."; public static final String DEFAULT_SSL_TRUSTSTORE_TYPE = "JKS"; public static final String SSL_TRUSTSTORE_LOCATION_CONFIG = "ssl.truststore.location"; @@ -126,8 +123,8 @@ public class SslConfigs { public static void addClientSslSupport(ConfigDef config) { config.define(SslConfigs.SSL_PROTOCOL_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_PROTOCOL, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_PROTOCOL_DOC) .define(SslConfigs.SSL_PROVIDER_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_PROVIDER_DOC) - .define(SslConfigs.SSL_CIPHER_SUITES_CONFIG, ConfigDef.Type.LIST, List.of(), ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.LOW, SslConfigs.SSL_CIPHER_SUITES_DOC) - .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, ConfigDef.Type.LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(SslConfigs.SSL_CIPHER_SUITES_CONFIG, ConfigDef.Type.LIST, null, ConfigDef.Importance.LOW, SslConfigs.SSL_CIPHER_SUITES_DOC) + .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, ConfigDef.Type.LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) .define(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, ConfigDef.Type.STRING, SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, ConfigDef.Importance.MEDIUM, SslConfigs.SSL_KEYSTORE_TYPE_DOC) .define(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, ConfigDef.Type.STRING, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_LOCATION_DOC) .define(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, ConfigDef.Type.PASSWORD, null, ConfigDef.Importance.HIGH, SslConfigs.SSL_KEYSTORE_PASSWORD_DOC) diff --git a/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java b/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java index e97c39bc61911..10d6024163b87 100755 --- a/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java +++ b/clients/src/main/java/org/apache/kafka/common/config/TopicConfig.java @@ -21,7 +21,7 @@ *

        Keys that can be used to configure a topic. These keys are useful when creating or reconfiguring a * topic using the AdminClient. * - *

        The intended pattern is for broker configs to include a log. prefix. For example, to set the default broker + *

        The intended pattern is for broker configs to include a `log.` prefix. For example, to set the default broker * cleanup policy, one would set log.cleanup.policy instead of cleanup.policy. Unfortunately, there are many cases * where this pattern is not followed. */ @@ -58,10 +58,9 @@ public class TopicConfig { public static final String FLUSH_MS_CONFIG = "flush.ms"; public static final String FLUSH_MS_DOC = "This setting allows specifying a time interval at which we will " + "force an fsync of data written to the log. For example if this was set to 1000 " + - "we would fsync after 1000 ms had passed. Note that this setting depends on the broker-level " + - "configuration log.flush.scheduler.interval.ms, which controls how frequently the flush check occurs. " + - "In general we recommend you not set this and use replication for durability and allow the operating system's " + - "background flush capabilities as it is more efficient."; + "we would fsync after 1000 ms had passed. In general we recommend you not set " + + "this and use replication for durability and allow the operating system's background " + + "flush capabilities as it is more efficient."; public static final String RETENTION_BYTES_CONFIG = "retention.bytes"; public static final String RETENTION_BYTES_DOC = "This configuration controls the maximum size a partition " + @@ -81,19 +80,18 @@ public class TopicConfig { "Moreover, it triggers the rolling of new segment if the retention.ms condition is satisfied."; public static final String REMOTE_LOG_STORAGE_ENABLE_CONFIG = "remote.storage.enable"; - public static final String REMOTE_LOG_STORAGE_ENABLE_DOC = "To enable tiered storage for a topic, set this configuration to true. " + - "To disable tiered storage for a topic that has it enabled, set this configuration to false. " + - "When disabling, you must also set remote.log.delete.on.disable to true."; + public static final String REMOTE_LOG_STORAGE_ENABLE_DOC = "To enable tiered storage for a topic, set this configuration as true. " + + "You can not disable this config once it is enabled. It will be provided in future versions."; public static final String LOCAL_LOG_RETENTION_MS_CONFIG = "local.retention.ms"; public static final String LOCAL_LOG_RETENTION_MS_DOC = "The number of milliseconds to keep the local log segment before it gets deleted. " + - "Default value is -2, it represents retention.ms value is to be used. The effective value should always be less than or equal " + - "to retention.ms value."; + "Default value is -2, it represents `retention.ms` value is to be used. The effective value should always be less than or equal " + + "to `retention.ms` value."; public static final String LOCAL_LOG_RETENTION_BYTES_CONFIG = "local.retention.bytes"; public static final String LOCAL_LOG_RETENTION_BYTES_DOC = "The maximum size of local log segments that can grow for a partition before it " + - "deletes the old segments. Default value is -2, it represents retention.bytes value to be used. The effective value should always be " + - "less than or equal to retention.bytes value."; + "deletes the old segments. Default value is -2, it represents `retention.bytes` value to be used. The effective value should always be " + + "less than or equal to `retention.bytes` value."; public static final String REMOTE_LOG_COPY_DISABLE_CONFIG = "remote.log.copy.disable"; public static final String REMOTE_LOG_COPY_DISABLE_DOC = "Determines whether tiered data for a topic should become read only," + @@ -104,19 +102,22 @@ public class TopicConfig { public static final String REMOTE_LOG_DELETE_ON_DISABLE_CONFIG = "remote.log.delete.on.disable"; public static final String REMOTE_LOG_DELETE_ON_DISABLE_DOC = "Determines whether tiered data for a topic should be " + "deleted after tiered storage is disabled on a topic. This configuration should be enabled when trying to " + - "set remote.storage.enable from true to false"; + "set `remote.storage.enable` from true to false"; public static final String MAX_MESSAGE_BYTES_CONFIG = "max.message.bytes"; public static final String MAX_MESSAGE_BYTES_DOC = - "The largest record batch size allowed by Kafka (after compression if compression is enabled)."; + "The largest record batch size allowed by Kafka (after compression if compression is enabled). " + + "If this is increased and there are consumers older than 0.10.2, the consumers' fetch " + + "size must also be increased so that they can fetch record batches this large. " + + "In the latest message format version, records are always grouped into batches for efficiency. " + + "In previous message format versions, uncompressed records are not grouped into batches and this " + + "limit only applies to a single record in that case."; public static final String INDEX_INTERVAL_BYTES_CONFIG = "index.interval.bytes"; - public static final String INDEX_INTERVAL_BYTES_DOC = "This setting controls how frequently Kafka " + - "adds entries to its offset index and, conditionally, to its time index. " + - "The default setting ensures that we index a message roughly every 4096 bytes. " + - "More frequent indexing allows reads to jump closer to the exact position in the log " + - "but results in larger index files. You probably don't need to change this." + - "

        Note: the time index will be inserted only when the timestamp is greater than the last indexed timestamp.

        "; + public static final String INDEX_INTERVAL_BYTES_DOC = "This setting controls how frequently " + + "Kafka adds an index entry to its offset index. The default setting ensures that we index a " + + "message roughly every 4096 bytes. More indexing allows reads to jump closer to the exact " + + "position in the log but makes the index larger. You probably don't need to change this."; public static final String FILE_DELETE_DELAY_MS_CONFIG = "file.delete.delay.ms"; public static final String FILE_DELETE_DELAY_MS_DOC = "The time to wait before deleting a file from the " + @@ -159,33 +160,26 @@ public class TopicConfig { "log compaction, which retains the latest value for each key. " + "It is also possible to specify both policies in a comma-separated list (e.g. \"delete,compact\"). " + "In this case, old segments will be discarded per the retention time and size configuration, " + - "while retained segments will be compacted. " + - "An empty list means infinite retention - no cleanup policies will be applied and log segments " + - "will be retained indefinitely. Note that with remote storage enabled, local retention limits " + - "(log.local.retention.ms and log.local.retention.bytes) are still applied to local segments."; + "while retained segments will be compacted."; public static final String UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG = "unclean.leader.election.enable"; public static final String UNCLEAN_LEADER_ELECTION_ENABLE_DOC = "Indicates whether to enable replicas " + "not in the ISR set to be elected as leader as a last resort, even though doing so may result in data " + "loss.

        Note: In KRaft mode, when enabling this config dynamically, it needs to wait for the unclean leader election" + - "thread to trigger election periodically (default is 5 minutes). Please run kafka-leader-election.sh with unclean option " + + "thread to trigger election periodically (default is 5 minutes). Please run `kafka-leader-election.sh` with `unclean` option " + "to trigger the unclean leader election immediately if needed.

        "; public static final String MIN_IN_SYNC_REPLICAS_CONFIG = "min.insync.replicas"; - public static final String MIN_IN_SYNC_REPLICAS_DOC = "Specifies the minimum number of in-sync replicas (including the leader) " + - "required for a write to succeed when a producer sets acks to \"all\" (or \"-1\"). In the acks=all " + - "case, every in-sync replica must acknowledge a write for it to be considered successful. E.g., if a topic has " + - "replication.factor of 3 and the ISR set includes all three replicas, then all three replicas must acknowledge an " + - "acks=all write for it to succeed, even if min.insync.replicas happens to be less than 3. " + - "If acks=all and the current ISR set contains fewer than min.insync.replicas members, then the producer " + - "will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).
        " + + public static final String MIN_IN_SYNC_REPLICAS_DOC = "When a producer sets acks to \"all\" (or \"-1\"), " + + "this configuration specifies the minimum number of replicas that must acknowledge " + + "a write for the write to be considered successful. If this minimum cannot be met, " + + "then the producer will raise an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend).
        " + "Regardless of the acks setting, the messages will not be visible to the consumers until " + "they are replicated to all in-sync replicas and the min.insync.replicas condition is met.
        " + "When used together, min.insync.replicas and acks allow you to enforce greater durability guarantees. " + "A typical scenario would be to create a topic with a replication factor of 3, " + "set min.insync.replicas to 2, and produce with acks of \"all\". " + - "This ensures that a majority of replicas must persist a write before it's considered successful by the producer and it's visible to consumers." + - "

        Note that when the Eligible Leader Replicas feature is enabled, the semantics of this config changes. Please refer to the ELR section for more info.

        "; + "This will ensure that a majority of replicas must persist a write before it's considered successful by the producer and it's visible to consumers."; public static final String COMPRESSION_TYPE_CONFIG = "compression.type"; public static final String COMPRESSION_TYPE_DOC = "Specify the final compression type for a given topic. " + diff --git a/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java b/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java index 48f3948ef9d1f..a8947ede15446 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java +++ b/clients/src/main/java/org/apache/kafka/common/config/internals/BrokerSecurityConfigs.java @@ -136,10 +136,6 @@ public class BrokerSecurityConfigs { // The allowlist of the SASL OAUTHBEARER endpoints public static final String ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG = "org.apache.kafka.sasl.oauthbearer.allowed.urls"; public static final String ALLOWED_SASL_OAUTHBEARER_URLS_DEFAULT = ""; - - public static final String ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG = "org.apache.kafka.sasl.oauthbearer.allowed.files"; - public static final String ALLOWED_SASL_OAUTHBEARER_FILES_DEFAULT = ""; - public static final ConfigDef CONFIG_DEF = new ConfigDef() // General Security Configuration .define(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_CONFIG, LONG, BrokerSecurityConfigs.DEFAULT_CONNECTIONS_MAX_REAUTH_MS, MEDIUM, BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_DOC) @@ -154,7 +150,7 @@ public class BrokerSecurityConfigs { .define(BrokerSecurityConfigs.SSL_ALLOW_SAN_CHANGES_CONFIG, BOOLEAN, BrokerSecurityConfigs.DEFAULT_SSL_ALLOW_SAN_CHANGES_VALUE, LOW, BrokerSecurityConfigs.SSL_ALLOW_SAN_CHANGES_DOC) .define(SslConfigs.SSL_PROTOCOL_CONFIG, STRING, SslConfigs.DEFAULT_SSL_PROTOCOL, MEDIUM, SslConfigs.SSL_PROTOCOL_DOC) .define(SslConfigs.SSL_PROVIDER_CONFIG, STRING, null, MEDIUM, SslConfigs.SSL_PROVIDER_DOC) - .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, ConfigDef.ValidList.anyNonDuplicateValues(true, false), MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) + .define(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, LIST, SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS, MEDIUM, SslConfigs.SSL_ENABLED_PROTOCOLS_DOC) .define(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, STRING, SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, MEDIUM, SslConfigs.SSL_KEYSTORE_TYPE_DOC) .define(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, STRING, null, MEDIUM, SslConfigs.SSL_KEYSTORE_LOCATION_DOC) .define(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, PASSWORD, null, MEDIUM, SslConfigs.SSL_KEYSTORE_PASSWORD_DOC) @@ -194,22 +190,6 @@ public class BrokerSecurityConfigs { .define(SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS, INT, null, LOW, SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS, LONG, SaslConfigs.DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MAX_MS, LOW, SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS, LONG, SaslConfigs.DEFAULT_SASL_LOGIN_RETRY_BACKOFF_MS, LOW, SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, CLASS, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, CLASS, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_SCOPE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET, PASSWORD, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM, STRING, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_ALGORITHM, ConfigDef.CaseInsensitiveValidString.in("ES256", "RS256"), MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, INT, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS, ConfigDef.Range.between(0, 86400), LOW, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, BOOLEAN, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, INT, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS, ConfigDef.Range.between(0, 3600), LOW, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE, PASSWORD, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, STRING, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, LOW, SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME, STRING, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME, LOW, SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, STRING, null, MEDIUM, SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL_DOC) @@ -218,6 +198,6 @@ public class BrokerSecurityConfigs { .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, LONG, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS, LOW, SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, LONG, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS, LOW, SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, INT, SaslConfigs.DEFAULT_SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, LOW, SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS_DOC) - .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, LIST, List.of(), LOW, SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) + .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE, LIST, null, LOW, SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE_DOC) .define(SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER, STRING, null, LOW, SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER_DOC); } diff --git a/clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java b/clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java index 3e0fa5ed77258..86f991bf7a625 100644 --- a/clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java +++ b/clients/src/main/java/org/apache/kafka/common/config/provider/DirectoryConfigProvider.java @@ -24,6 +24,7 @@ import org.slf4j.LoggerFactory; import java.io.IOException; +import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.nio.file.Path; import java.util.Map; @@ -116,7 +117,7 @@ private ConfigData get(String path, Predicate fileFilter) { private static String read(Path path) { try { - return Files.readString(path); + return new String(Files.readAllBytes(path), StandardCharsets.UTF_8); } catch (IOException e) { log.error("Could not read file {} for property {}", path, path.getFileName(), e); throw new ConfigException("Could not read file " + path + " for property " + path.getFileName()); diff --git a/clients/src/main/java/org/apache/kafka/common/errors/AuthenticationException.java b/clients/src/main/java/org/apache/kafka/common/errors/AuthenticationException.java index 5d1d50701736e..7a05eba03f2bc 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/AuthenticationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/AuthenticationException.java @@ -32,7 +32,7 @@ *
      5. {@link SslAuthenticationException} if SSL handshake failed due to any {@link SSLException}. * */ -public class AuthenticationException extends InvalidConfigurationException { +public class AuthenticationException extends ApiException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/AuthorizationException.java b/clients/src/main/java/org/apache/kafka/common/errors/AuthorizationException.java index 8ff29af9597d4..0471fe643d9a8 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/AuthorizationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/AuthorizationException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class AuthorizationException extends InvalidConfigurationException { +public class AuthorizationException extends ApiException { public AuthorizationException(String message) { super(message); diff --git a/clients/src/main/java/org/apache/kafka/common/errors/CoordinatorNotAvailableException.java b/clients/src/main/java/org/apache/kafka/common/errors/CoordinatorNotAvailableException.java index 2bd7d911a7a03..827ce54e0e58d 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/CoordinatorNotAvailableException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/CoordinatorNotAvailableException.java @@ -23,7 +23,7 @@ * In the context of the transactional coordinator, this error will be returned if the underlying transactional log * is under replicated or if an append to the log times out. */ -public class CoordinatorNotAvailableException extends RefreshRetriableException { +public class CoordinatorNotAvailableException extends RetriableException { public static final CoordinatorNotAvailableException INSTANCE = new CoordinatorNotAvailableException(); private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/FencedInstanceIdException.java b/clients/src/main/java/org/apache/kafka/common/errors/FencedInstanceIdException.java index ac4ae6bdcc6ef..78e4034a24a2f 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/FencedInstanceIdException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/FencedInstanceIdException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class FencedInstanceIdException extends ApplicationRecoverableException { +public class FencedInstanceIdException extends ApiException { private static final long serialVersionUID = 1L; public FencedInstanceIdException(String message) { diff --git a/clients/src/main/java/org/apache/kafka/common/errors/GroupMaxSizeReachedException.java b/clients/src/main/java/org/apache/kafka/common/errors/GroupMaxSizeReachedException.java index fd55289dcccea..85d0c7d25ce85 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/GroupMaxSizeReachedException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/GroupMaxSizeReachedException.java @@ -17,7 +17,7 @@ package org.apache.kafka.common.errors; /** - * Indicates that a group is already at its configured maximum capacity and cannot accommodate more members + * Indicates that a consumer group is already at its configured maximum capacity and cannot accommodate more members */ public class GroupMaxSizeReachedException extends ApiException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/IllegalGenerationException.java b/clients/src/main/java/org/apache/kafka/common/errors/IllegalGenerationException.java index 2d739ce0290dc..efd749f7f2c95 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/IllegalGenerationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/IllegalGenerationException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class IllegalGenerationException extends ApplicationRecoverableException { +public class IllegalGenerationException extends ApiException { private static final long serialVersionUID = 1L; public IllegalGenerationException() { diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidConfigurationException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidConfigurationException.java index 85e63c42ec398..333566a64ba1f 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidConfigurationException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidConfigurationException.java @@ -20,20 +20,12 @@ public class InvalidConfigurationException extends ApiException { private static final long serialVersionUID = 1L; - public InvalidConfigurationException(String message, Throwable cause) { - super(message, cause); - } - public InvalidConfigurationException(String message) { super(message); } - public InvalidConfigurationException(Throwable cause) { - super(cause); - } - - public InvalidConfigurationException() { - super(); + public InvalidConfigurationException(String message, Throwable cause) { + super(message, cause); } } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidMetadataException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidMetadataException.java index f3485a8364acb..504e8f3cc782b 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidMetadataException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidMetadataException.java @@ -19,7 +19,7 @@ /** * An exception that may indicate the client's metadata is out of date */ -public abstract class InvalidMetadataException extends RefreshRetriableException { +public abstract class InvalidMetadataException extends RetriableException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidPidMappingException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidPidMappingException.java index 626a19507f400..69fb71ea42b19 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidPidMappingException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidPidMappingException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class InvalidPidMappingException extends ApplicationRecoverableException { +public class InvalidPidMappingException extends ApiException { public InvalidPidMappingException(String message) { super(message); } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidProducerEpochException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidProducerEpochException.java index b65ea67aed52c..79b82368feb95 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidProducerEpochException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidProducerEpochException.java @@ -22,7 +22,7 @@ * by calling KafkaProducer#abortTransaction which would try to send initPidRequest and reinitialize the producer * under the hood. */ -public class InvalidProducerEpochException extends ApplicationRecoverableException { +public class InvalidProducerEpochException extends ApiException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidReplicationFactorException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidReplicationFactorException.java index 508d73a793f5d..699d5a83a432a 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidReplicationFactorException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidReplicationFactorException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class InvalidReplicationFactorException extends InvalidConfigurationException { +public class InvalidReplicationFactorException extends ApiException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRequiredAcksException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidRequiredAcksException.java index f861dbfee18bb..423c09166b7ba 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidRequiredAcksException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidRequiredAcksException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class InvalidRequiredAcksException extends InvalidConfigurationException { +public class InvalidRequiredAcksException extends ApiException { private static final long serialVersionUID = 1L; public InvalidRequiredAcksException(String message) { diff --git a/clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java b/clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java index cf0ed5ed5cc9b..344d231ce9e6d 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/InvalidTopicException.java @@ -27,7 +27,7 @@ * * @see UnknownTopicOrPartitionException */ -public class InvalidTopicException extends InvalidConfigurationException { +public class InvalidTopicException extends ApiException { private static final long serialVersionUID = 1L; private final Set invalidTopics; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/NotCoordinatorException.java b/clients/src/main/java/org/apache/kafka/common/errors/NotCoordinatorException.java index dc116d02c05c5..00ca32cffd10a 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/NotCoordinatorException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/NotCoordinatorException.java @@ -23,7 +23,7 @@ * In the context of the transactional coordinator, it returns this error when it receives a transactional * request with a transactionalId the coordinator doesn't own. */ -public class NotCoordinatorException extends RefreshRetriableException { +public class NotCoordinatorException extends RetriableException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/ProducerFencedException.java b/clients/src/main/java/org/apache/kafka/common/errors/ProducerFencedException.java index 4a83bcbeb87af..c47dbf53a8caa 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/ProducerFencedException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/ProducerFencedException.java @@ -22,7 +22,7 @@ * given time, and the latest one to be started "fences" the previous instances so that they can no longer * make transactional requests. When you encounter this exception, you must close the producer instance. */ -public class ProducerFencedException extends ApplicationRecoverableException { +public class ProducerFencedException extends ApiException { public ProducerFencedException(String msg) { super(msg); diff --git a/clients/src/main/java/org/apache/kafka/common/errors/RecordBatchTooLargeException.java b/clients/src/main/java/org/apache/kafka/common/errors/RecordBatchTooLargeException.java index 24f563e1c2777..d1ba8ff716095 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/RecordBatchTooLargeException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/RecordBatchTooLargeException.java @@ -19,7 +19,7 @@ /** * This record batch is larger than the maximum allowable size */ -public class RecordBatchTooLargeException extends InvalidConfigurationException { +public class RecordBatchTooLargeException extends ApiException { private static final long serialVersionUID = 1L; diff --git a/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java b/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java index 544a5c122b2f8..aa592d552bf00 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/TransactionAbortableException.java @@ -17,13 +17,6 @@ package org.apache.kafka.common.errors; public class TransactionAbortableException extends ApiException { - - private static final long serialVersionUID = 1L; - - public TransactionAbortableException(String message, Throwable cause) { - super(message, cause); - } - public TransactionAbortableException(String message) { super(message); } diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnknownMemberIdException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnknownMemberIdException.java index 88d717c120f3a..f6eea5bc8ea02 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/UnknownMemberIdException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnknownMemberIdException.java @@ -16,7 +16,7 @@ */ package org.apache.kafka.common.errors; -public class UnknownMemberIdException extends ApplicationRecoverableException { +public class UnknownMemberIdException extends ApiException { private static final long serialVersionUID = 1L; public UnknownMemberIdException() { diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java index 03add5c4ffce8..f66298e99c30b 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedForMessageFormatException.java @@ -20,7 +20,7 @@ * The message format version does not support the requested function. For example, if idempotence is * requested and the topic is using a message format older than 0.11.0.0, then this error will be returned. */ -public class UnsupportedForMessageFormatException extends InvalidConfigurationException { +public class UnsupportedForMessageFormatException extends ApiException { private static final long serialVersionUID = 1L; public UnsupportedForMessageFormatException(String message) { diff --git a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedVersionException.java b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedVersionException.java index 00da3a1b2b8b2..484947b0ae2b4 100644 --- a/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedVersionException.java +++ b/clients/src/main/java/org/apache/kafka/common/errors/UnsupportedVersionException.java @@ -28,7 +28,7 @@ * is raised from {@link org.apache.kafka.clients.consumer.KafkaConsumer#offsetsForTimes(Map)}, it would * be possible to revert to alternative logic to set the consumer's position. */ -public class UnsupportedVersionException extends InvalidConfigurationException { +public class UnsupportedVersionException extends ApiException { private static final long serialVersionUID = 1L; public UnsupportedVersionException(String message, Throwable cause) { diff --git a/clients/src/main/java/org/apache/kafka/common/feature/Features.java b/clients/src/main/java/org/apache/kafka/common/feature/Features.java index 16f496810b5cc..b2f0543faf0a5 100644 --- a/clients/src/main/java/org/apache/kafka/common/feature/Features.java +++ b/clients/src/main/java/org/apache/kafka/common/feature/Features.java @@ -145,7 +145,7 @@ public boolean equals(Object other) { return false; } - final Features that = (Features) other; + final Features that = (Features) other; return Objects.equals(this.features, that.features); } diff --git a/clients/src/main/java/org/apache/kafka/common/header/Header.java b/clients/src/main/java/org/apache/kafka/common/header/Header.java index e1d0aa00a4417..58869b41fb777 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/Header.java +++ b/clients/src/main/java/org/apache/kafka/common/header/Header.java @@ -16,23 +16,10 @@ */ package org.apache.kafka.common.header; -/** - * A header is a key-value pair. - */ public interface Header { - - /** - * Returns the key of the header. - * - * @return the header's key; must not be null. - */ + String key(); - /** - * Returns the value of the header. - * - * @return the header's value; may be null. - */ byte[] value(); - + } diff --git a/clients/src/main/java/org/apache/kafka/common/header/Headers.java b/clients/src/main/java/org/apache/kafka/common/header/Headers.java index 9cce54a5c5cc5..b736cbcabcc9b 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/Headers.java +++ b/clients/src/main/java/org/apache/kafka/common/header/Headers.java @@ -16,18 +16,12 @@ */ package org.apache.kafka.common.header; - -/** - * A mutable ordered collection of {@link Header} objects. Note that multiple headers may have the same {@link Header#key() key}. - *

        - * The order of headers is preserved in the order they were added. - */ public interface Headers extends Iterable

        { /** * Adds a header (key inside), to the end, returning if the operation succeeded. * - * @param header the Header to be added. + * @param header the Header to be added * @return this instance of the Headers, once the header is added. * @throws IllegalStateException is thrown if headers are in a read-only state. */ @@ -36,18 +30,17 @@ public interface Headers extends Iterable
        { /** * Creates and adds a header, to the end, returning if the operation succeeded. * - * @param key of the header to be added; must not be null. - * @param value of the header to be added; may be null. + * @param key of the header to be added. + * @param value of the header to be added. * @return this instance of the Headers, once the header is added. * @throws IllegalStateException is thrown if headers are in a read-only state. */ Headers add(String key, byte[] value) throws IllegalStateException; /** - * Removes all headers for the given key returning if the operation succeeded, - * while preserving the insertion order of the remaining headers. + * Removes all headers for the given key returning if the operation succeeded. * - * @param key to remove all headers for; must not be null. + * @param key to remove all headers for. * @return this instance of the Headers, once the header is removed. * @throws IllegalStateException is thrown if headers are in a read-only state. */ @@ -56,17 +49,16 @@ public interface Headers extends Iterable
        { /** * Returns just one (the very last) header for the given key, if present. * - * @param key to get the last header for; must not be null. + * @param key to get the last header for. * @return this last header matching the given key, returns null if not present. */ Header lastHeader(String key); /** * Returns all headers for the given key, in the order they were added in, if present. - * The iterator does not support {@link java.util.Iterator#remove()}. * - * @param key to return the headers for; must not be null. - * @return all headers for the given key, in the order they were added in, if NO headers are present an empty iterable is returned. + * @param key to return the headers for. + * @return all headers for the given key, in the order they were added in, if NO headers are present an empty iterable is returned. */ Iterable
        headers(String key); diff --git a/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java b/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java index 6311f6ca34825..52863c6c0b564 100644 --- a/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java +++ b/clients/src/main/java/org/apache/kafka/common/header/internals/RecordHeaders.java @@ -108,10 +108,6 @@ public void setReadOnly() { this.isReadOnly = true; } - public boolean isReadOnly() { - return isReadOnly; - } - public Header[] toArray() { return headers.isEmpty() ? Record.EMPTY_HEADERS : headers.toArray(new Header[0]); } diff --git a/clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java b/clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java index 1ce0557119291..96652df9410b5 100644 --- a/clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java +++ b/clients/src/main/java/org/apache/kafka/common/internals/PartitionStates.java @@ -24,6 +24,7 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; import java.util.function.BiConsumer; @@ -150,6 +151,46 @@ private void update(Map partitionToState) { } } + public static class PartitionState { + private final TopicPartition topicPartition; + private final S value; + public PartitionState(TopicPartition topicPartition, S state) { + this.topicPartition = Objects.requireNonNull(topicPartition); + this.value = Objects.requireNonNull(state); + } + + public S value() { + return value; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + PartitionState that = (PartitionState) o; + + return topicPartition.equals(that.topicPartition) && value.equals(that.value); + } + + @Override + public int hashCode() { + int result = topicPartition.hashCode(); + result = 31 * result + value.hashCode(); + return result; + } + + public TopicPartition topicPartition() { + return topicPartition; + } + + @Override + public String toString() { + return "PartitionState(" + topicPartition + "=" + value + ')'; + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java b/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java index 182a8c7484931..13d8db4b0cb13 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Metrics.java @@ -48,7 +48,7 @@ * A registry of sensors and metrics. *

        * A metric is a named, numerical measurement. A sensor is a handle to record numerical measurements as they occur. Each - * Sensor has zero or more associated metrics. For example, a Sensor might represent message sizes, and we might associate + * Sensor has zero or more associated metrics. For example a Sensor might represent message sizes and we might associate * with this sensor a metric for the average, maximum, or other statistics computed off the sequence of message sizes * that are recorded by the sensor. *

        @@ -58,9 +58,9 @@ * // set up metrics: * Metrics metrics = new Metrics(); // this is the global repository of metrics and sensors * Sensor sensor = metrics.sensor("message-sizes"); - * MetricName metricName = metrics.metricName("message-size-avg", "producer-metrics"); + * MetricName metricName = new MetricName("message-size-avg", "producer-metrics"); * sensor.add(metricName, new Avg()); - * metricName = metrics.metricName("message-size-max", "producer-metrics"); + * metricName = new MetricName("message-size-max", "producer-metrics"); * sensor.add(metricName, new Max()); * * // as messages are sent we record the sizes @@ -553,7 +553,7 @@ public synchronized KafkaMetric removeMetric(MetricName metricName) { try { reporter.metricRemoval(metric); } catch (Exception e) { - log.error("Error when removing metric from {}", reporter.getClass().getName(), e); + log.error("Error when removing metric from " + reporter.getClass().getName(), e); } } log.trace("Removed metric named {}", metricName); @@ -596,7 +596,7 @@ synchronized KafkaMetric registerMetric(KafkaMetric metric) { try { reporter.metricChange(metric); } catch (Exception e) { - log.error("Error when registering metric on {}", reporter.getClass().getName(), e); + log.error("Error when registering metric on " + reporter.getClass().getName(), e); } } log.trace("Registered metric named {}", metricName); @@ -688,7 +688,7 @@ public void close() { log.info("Closing reporter {}", reporter.getClass().getName()); reporter.close(); } catch (Exception e) { - log.error("Error when closing {}", reporter.getClass().getName(), e); + log.error("Error when closing " + reporter.getClass().getName(), e); } } log.info("Metrics reporters closed"); diff --git a/clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java b/clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java index a5da5294b4d4d..b0761248894c5 100644 --- a/clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java +++ b/clients/src/main/java/org/apache/kafka/common/metrics/Sensor.java @@ -367,7 +367,7 @@ public boolean hasExpired() { } synchronized List metrics() { - return List.copyOf(this.metrics.values()); + return unmodifiableList(new ArrayList<>(this.metrics.values())); } /** diff --git a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java index 22c24f8408c89..df6ccd67ce5bb 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java +++ b/clients/src/main/java/org/apache/kafka/common/network/KafkaChannel.java @@ -681,14 +681,4 @@ private void swapAuthenticatorsAndBeginReauthentication(ReauthenticationContext public ChannelMetadataRegistry channelMetadataRegistry() { return metadataRegistry; } - - - /** - * Maybe add write interest after re-authentication. This is to ensure that any pending write operation - * is resumed. - */ - public void maybeAddWriteInterestAfterReauth() { - if (send != null) - this.transportLayer.addInterestOps(SelectionKey.OP_WRITE); - } } diff --git a/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java index cf4ef470af0db..c6181b81c5e73 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/PlaintextChannelBuilder.java @@ -103,7 +103,7 @@ public KafkaPrincipal principal() { @Override public Optional principalSerde() { - return Optional.of(principalBuilder); + return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty(); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/network/Selector.java b/clients/src/main/java/org/apache/kafka/common/network/Selector.java index 7acf88269ee14..151a0fbbd8876 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/Selector.java +++ b/clients/src/main/java/org/apache/kafka/common/network/Selector.java @@ -551,7 +551,6 @@ void pollSelectionKeys(Set selectionKeys, boolean isReauthentication = channel.successfulAuthentications() > 1; if (isReauthentication) { sensors.successfulReauthentication.record(1.0, readyTimeMs); - channel.maybeAddWriteInterestAfterReauth(); if (channel.reauthenticationLatencyMs() == null) log.warn( "Should never happen: re-authentication latency for a re-authenticated channel was null; continuing..."); diff --git a/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java b/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java index a35a0b8b20938..b45fb07442e7f 100644 --- a/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/network/SslChannelBuilder.java @@ -164,7 +164,7 @@ public KafkaPrincipal principal() { @Override public Optional principalSerde() { - return Optional.of(principalBuilder); + return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty(); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java index 89b952e6ce766..d12c8765c53b7 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ApiKeys.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.message.ApiVersionsResponseData; import org.apache.kafka.common.protocol.types.Schema; import org.apache.kafka.common.protocol.types.Type; +import org.apache.kafka.common.record.RecordBatch; import java.util.ArrayList; import java.util.Arrays; @@ -66,11 +67,11 @@ public enum ApiKeys { DELETE_RECORDS(ApiMessageType.DELETE_RECORDS), INIT_PRODUCER_ID(ApiMessageType.INIT_PRODUCER_ID), OFFSET_FOR_LEADER_EPOCH(ApiMessageType.OFFSET_FOR_LEADER_EPOCH), - ADD_PARTITIONS_TO_TXN(ApiMessageType.ADD_PARTITIONS_TO_TXN, false, false), - ADD_OFFSETS_TO_TXN(ApiMessageType.ADD_OFFSETS_TO_TXN, false, false), - END_TXN(ApiMessageType.END_TXN, false, false), - WRITE_TXN_MARKERS(ApiMessageType.WRITE_TXN_MARKERS, true, false), - TXN_OFFSET_COMMIT(ApiMessageType.TXN_OFFSET_COMMIT, false, false), + ADD_PARTITIONS_TO_TXN(ApiMessageType.ADD_PARTITIONS_TO_TXN, false, RecordBatch.MAGIC_VALUE_V2, false), + ADD_OFFSETS_TO_TXN(ApiMessageType.ADD_OFFSETS_TO_TXN, false, RecordBatch.MAGIC_VALUE_V2, false), + END_TXN(ApiMessageType.END_TXN, false, RecordBatch.MAGIC_VALUE_V2, false), + WRITE_TXN_MARKERS(ApiMessageType.WRITE_TXN_MARKERS, true, RecordBatch.MAGIC_VALUE_V2, false), + TXN_OFFSET_COMMIT(ApiMessageType.TXN_OFFSET_COMMIT, false, RecordBatch.MAGIC_VALUE_V2, false), DESCRIBE_ACLS(ApiMessageType.DESCRIBE_ACLS), CREATE_ACLS(ApiMessageType.CREATE_ACLS, false, true), DELETE_ACLS(ApiMessageType.DELETE_ACLS, false, true), @@ -94,19 +95,19 @@ public enum ApiKeys { ALTER_CLIENT_QUOTAS(ApiMessageType.ALTER_CLIENT_QUOTAS, false, true), DESCRIBE_USER_SCRAM_CREDENTIALS(ApiMessageType.DESCRIBE_USER_SCRAM_CREDENTIALS), ALTER_USER_SCRAM_CREDENTIALS(ApiMessageType.ALTER_USER_SCRAM_CREDENTIALS, false, true), - VOTE(ApiMessageType.VOTE, true, false), - BEGIN_QUORUM_EPOCH(ApiMessageType.BEGIN_QUORUM_EPOCH, true, false), - END_QUORUM_EPOCH(ApiMessageType.END_QUORUM_EPOCH, true, false), - DESCRIBE_QUORUM(ApiMessageType.DESCRIBE_QUORUM, true, true), + VOTE(ApiMessageType.VOTE, true, RecordBatch.MAGIC_VALUE_V0, false), + BEGIN_QUORUM_EPOCH(ApiMessageType.BEGIN_QUORUM_EPOCH, true, RecordBatch.MAGIC_VALUE_V0, false), + END_QUORUM_EPOCH(ApiMessageType.END_QUORUM_EPOCH, true, RecordBatch.MAGIC_VALUE_V0, false), + DESCRIBE_QUORUM(ApiMessageType.DESCRIBE_QUORUM, true, RecordBatch.MAGIC_VALUE_V0, true), ALTER_PARTITION(ApiMessageType.ALTER_PARTITION, true), UPDATE_FEATURES(ApiMessageType.UPDATE_FEATURES, true, true), - ENVELOPE(ApiMessageType.ENVELOPE, true, false), - FETCH_SNAPSHOT(ApiMessageType.FETCH_SNAPSHOT, false, false), + ENVELOPE(ApiMessageType.ENVELOPE, true, RecordBatch.MAGIC_VALUE_V0, false), + FETCH_SNAPSHOT(ApiMessageType.FETCH_SNAPSHOT, false, RecordBatch.MAGIC_VALUE_V0, false), DESCRIBE_CLUSTER(ApiMessageType.DESCRIBE_CLUSTER), DESCRIBE_PRODUCERS(ApiMessageType.DESCRIBE_PRODUCERS), - BROKER_REGISTRATION(ApiMessageType.BROKER_REGISTRATION, true, false), - BROKER_HEARTBEAT(ApiMessageType.BROKER_HEARTBEAT, true, false), - UNREGISTER_BROKER(ApiMessageType.UNREGISTER_BROKER, false, true), + BROKER_REGISTRATION(ApiMessageType.BROKER_REGISTRATION, true, RecordBatch.MAGIC_VALUE_V0, false), + BROKER_HEARTBEAT(ApiMessageType.BROKER_HEARTBEAT, true, RecordBatch.MAGIC_VALUE_V0, false), + UNREGISTER_BROKER(ApiMessageType.UNREGISTER_BROKER, false, RecordBatch.MAGIC_VALUE_V0, true), DESCRIBE_TRANSACTIONS(ApiMessageType.DESCRIBE_TRANSACTIONS), LIST_TRANSACTIONS(ApiMessageType.LIST_TRANSACTIONS), ALLOCATE_PRODUCER_IDS(ApiMessageType.ALLOCATE_PRODUCER_IDS, true, true), @@ -116,26 +117,20 @@ public enum ApiKeys { GET_TELEMETRY_SUBSCRIPTIONS(ApiMessageType.GET_TELEMETRY_SUBSCRIPTIONS), PUSH_TELEMETRY(ApiMessageType.PUSH_TELEMETRY), ASSIGN_REPLICAS_TO_DIRS(ApiMessageType.ASSIGN_REPLICAS_TO_DIRS), - LIST_CONFIG_RESOURCES(ApiMessageType.LIST_CONFIG_RESOURCES), + LIST_CLIENT_METRICS_RESOURCES(ApiMessageType.LIST_CLIENT_METRICS_RESOURCES), DESCRIBE_TOPIC_PARTITIONS(ApiMessageType.DESCRIBE_TOPIC_PARTITIONS), SHARE_GROUP_HEARTBEAT(ApiMessageType.SHARE_GROUP_HEARTBEAT), SHARE_GROUP_DESCRIBE(ApiMessageType.SHARE_GROUP_DESCRIBE), SHARE_FETCH(ApiMessageType.SHARE_FETCH), SHARE_ACKNOWLEDGE(ApiMessageType.SHARE_ACKNOWLEDGE), - ADD_RAFT_VOTER(ApiMessageType.ADD_RAFT_VOTER, false, true), - REMOVE_RAFT_VOTER(ApiMessageType.REMOVE_RAFT_VOTER, false, true), + ADD_RAFT_VOTER(ApiMessageType.ADD_RAFT_VOTER, false, RecordBatch.MAGIC_VALUE_V0, true), + REMOVE_RAFT_VOTER(ApiMessageType.REMOVE_RAFT_VOTER, false, RecordBatch.MAGIC_VALUE_V0, true), UPDATE_RAFT_VOTER(ApiMessageType.UPDATE_RAFT_VOTER), INITIALIZE_SHARE_GROUP_STATE(ApiMessageType.INITIALIZE_SHARE_GROUP_STATE, true), READ_SHARE_GROUP_STATE(ApiMessageType.READ_SHARE_GROUP_STATE, true), WRITE_SHARE_GROUP_STATE(ApiMessageType.WRITE_SHARE_GROUP_STATE, true), DELETE_SHARE_GROUP_STATE(ApiMessageType.DELETE_SHARE_GROUP_STATE, true), - READ_SHARE_GROUP_STATE_SUMMARY(ApiMessageType.READ_SHARE_GROUP_STATE_SUMMARY, true), - STREAMS_GROUP_HEARTBEAT(ApiMessageType.STREAMS_GROUP_HEARTBEAT), - STREAMS_GROUP_DESCRIBE(ApiMessageType.STREAMS_GROUP_DESCRIBE), - DESCRIBE_SHARE_GROUP_OFFSETS(ApiMessageType.DESCRIBE_SHARE_GROUP_OFFSETS), - ALTER_SHARE_GROUP_OFFSETS(ApiMessageType.ALTER_SHARE_GROUP_OFFSETS), - DELETE_SHARE_GROUP_OFFSETS(ApiMessageType.DELETE_SHARE_GROUP_OFFSETS); - + READ_SHARE_GROUP_STATE_SUMMARY(ApiMessageType.READ_SHARE_GROUP_STATE_SUMMARY, true); private static final Map> APIS_BY_LISTENER = new EnumMap<>(ApiMessageType.ListenerType.class); @@ -164,6 +159,9 @@ public enum ApiKeys { /** indicates if this is a ClusterAction request used only by brokers */ public final boolean clusterAction; + /** indicates the minimum required inter broker magic required to support the API */ + public final byte minRequiredInterBrokerMagic; + /** indicates whether the API is enabled for forwarding */ public final boolean forwardable; @@ -176,18 +174,24 @@ public enum ApiKeys { } ApiKeys(ApiMessageType messageType, boolean clusterAction) { - this(messageType, clusterAction, false); + this(messageType, clusterAction, RecordBatch.MAGIC_VALUE_V0, false); + } + + ApiKeys(ApiMessageType messageType, boolean clusterAction, boolean forwardable) { + this(messageType, clusterAction, RecordBatch.MAGIC_VALUE_V0, forwardable); } ApiKeys( ApiMessageType messageType, boolean clusterAction, + byte minRequiredInterBrokerMagic, boolean forwardable ) { this.messageType = messageType; this.id = messageType.apiKey(); this.name = messageType.name; this.clusterAction = clusterAction; + this.minRequiredInterBrokerMagic = minRequiredInterBrokerMagic; this.requiresDelayedAllocation = forwardable || shouldRetainsBufferReference(messageType.requestSchemas()); this.forwardable = forwardable; } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/ByteBufferAccessor.java b/clients/src/main/java/org/apache/kafka/common/protocol/ByteBufferAccessor.java index c3e2886e656a7..f643f5b5779b1 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/ByteBufferAccessor.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/ByteBufferAccessor.java @@ -145,11 +145,6 @@ public int remaining() { return buf.remaining(); } - @Override - public Readable slice() { - return new ByteBufferAccessor(buf.slice()); - } - public void flip() { buf.flip(); } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java index a27a7fcf23c77..309ae7bc86a36 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Errors.java @@ -115,14 +115,10 @@ import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.SaslAuthenticationException; import org.apache.kafka.common.errors.SecurityDisabledException; -import org.apache.kafka.common.errors.ShareSessionLimitReachedException; import org.apache.kafka.common.errors.ShareSessionNotFoundException; import org.apache.kafka.common.errors.SnapshotNotFoundException; import org.apache.kafka.common.errors.StaleBrokerEpochException; import org.apache.kafka.common.errors.StaleMemberEpochException; -import org.apache.kafka.common.errors.StreamsInvalidTopologyEpochException; -import org.apache.kafka.common.errors.StreamsInvalidTopologyException; -import org.apache.kafka.common.errors.StreamsTopologyFencedException; import org.apache.kafka.common.errors.TelemetryTooLargeException; import org.apache.kafka.common.errors.ThrottlingQuotaExceededException; import org.apache.kafka.common.errors.TimeoutException; @@ -229,7 +225,7 @@ public enum Errors { "The group member's supported protocols are incompatible with those of existing members " + "or first group member tried to join with empty protocol type or empty protocol list.", InconsistentGroupProtocolException::new), - INVALID_GROUP_ID(24, "The group id is invalid.", + INVALID_GROUP_ID(24, "The configured groupId is invalid.", InvalidGroupIdException::new), UNKNOWN_MEMBER_ID(25, "The coordinator is not aware of this member.", UnknownMemberIdException::new), @@ -358,7 +354,7 @@ public enum Errors { MemberIdRequiredException::new), PREFERRED_LEADER_NOT_AVAILABLE(80, "The preferred leader was not available.", PreferredLeaderNotAvailableException::new), - GROUP_MAX_SIZE_REACHED(81, "The group has reached its maximum size.", GroupMaxSizeReachedException::new), + GROUP_MAX_SIZE_REACHED(81, "The consumer group has reached its max size.", GroupMaxSizeReachedException::new), FENCED_INSTANCE_ID(82, "The broker rejected this static consumer since " + "another consumer with the same group.instance.id has registered with a different member.id.", FencedInstanceIdException::new), @@ -384,7 +380,10 @@ public enum Errors { PRINCIPAL_DESERIALIZATION_FAILURE(97, "Request principal deserialization failed during forwarding. " + "This indicates an internal error on the broker cluster security setup.", PrincipalDeserializationException::new), SNAPSHOT_NOT_FOUND(98, "Requested snapshot was not found.", SnapshotNotFoundException::new), - POSITION_OUT_OF_RANGE(99, "Requested position is not greater than or equal to zero, and less than the size of the snapshot.", PositionOutOfRangeException::new), + POSITION_OUT_OF_RANGE( + 99, + "Requested position is not greater than or equal to zero, and less than the size of the snapshot.", + PositionOutOfRangeException::new), UNKNOWN_TOPIC_ID(100, "This server does not host this topic ID.", UnknownTopicIdException::new), DUPLICATE_BROKER_REGISTRATION(101, "This broker ID is already in use.", DuplicateBrokerRegistrationException::new), BROKER_ID_NOT_REGISTERED(102, "The given broker ID was not registered.", BrokerIdNotRegisteredException::new), @@ -414,11 +413,7 @@ public enum Errors { DUPLICATE_VOTER(126, "The voter is already part of the set of voters.", DuplicateVoterException::new), VOTER_NOT_FOUND(127, "The voter is not part of the set of voters.", VoterNotFoundException::new), INVALID_REGULAR_EXPRESSION(128, "The regular expression is not valid.", InvalidRegularExpression::new), - REBOOTSTRAP_REQUIRED(129, "Client metadata is stale. The client should rebootstrap to obtain new metadata.", RebootstrapRequiredException::new), - STREAMS_INVALID_TOPOLOGY(130, "The supplied topology is invalid.", StreamsInvalidTopologyException::new), - STREAMS_INVALID_TOPOLOGY_EPOCH(131, "The supplied topology epoch is invalid.", StreamsInvalidTopologyEpochException::new), - STREAMS_TOPOLOGY_FENCED(132, "The supplied topology epoch is outdated.", StreamsTopologyFencedException::new), - SHARE_SESSION_LIMIT_REACHED(133, "The limit of share sessions has been reached.", ShareSessionLimitReachedException::new); + REBOOTSTRAP_REQUIRED(129, "Client metadata is stale, client should rebootstrap to obtain new metadata.", RebootstrapRequiredException::new); private static final Logger log = LoggerFactory.getLogger(Errors.class); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java b/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java index fe79bb94e6517..ab6600a7d059f 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/MessageUtil.java @@ -204,13 +204,13 @@ public static boolean compareRawTaggedFields(List first, } } - public static ByteBufferAccessor toByteBufferAccessor(final Message message, final short version) { + public static ByteBuffer toByteBuffer(final Message message, final short version) { ObjectSerializationCache cache = new ObjectSerializationCache(); int messageSize = message.size(cache, version); ByteBufferAccessor bytes = new ByteBufferAccessor(ByteBuffer.allocate(messageSize)); message.write(bytes, cache, version); bytes.flip(); - return bytes; + return bytes.buffer(); } public static ByteBuffer toVersionPrefixedByteBuffer(final short version, final Message message) { @@ -225,38 +225,11 @@ public static ByteBuffer toVersionPrefixedByteBuffer(final short version, final public static byte[] toVersionPrefixedBytes(final short version, final Message message) { ByteBuffer buffer = toVersionPrefixedByteBuffer(version, message); - // take the inner array directly if it is full of data. - if (buffer.hasArray() && - buffer.arrayOffset() == 0 && - buffer.position() == 0 && - buffer.limit() == buffer.array().length) return buffer.array(); - else return Utils.toArray(buffer); - } - - public static ByteBuffer toCoordinatorTypePrefixedByteBuffer(final ApiMessage message) { - if (message.apiKey() < 0) { - throw new IllegalArgumentException("Cannot serialize a message without an api key."); - } - if (message.highestSupportedVersion() != 0 || message.lowestSupportedVersion() != 0) { - throw new IllegalArgumentException("Cannot serialize a message with a different version than 0."); - } - - ObjectSerializationCache cache = new ObjectSerializationCache(); - int messageSize = message.size(cache, (short) 0); - ByteBufferAccessor bytes = new ByteBufferAccessor(ByteBuffer.allocate(messageSize + 2)); - bytes.writeShort(message.apiKey()); - message.write(bytes, cache, (short) 0); - bytes.flip(); - return bytes.buffer(); - } - - public static byte[] toCoordinatorTypePrefixedBytes(final ApiMessage message) { - ByteBuffer buffer = toCoordinatorTypePrefixedByteBuffer(message); - // take the inner array directly if it is full of data. + // take the inner array directly if it is full with data if (buffer.hasArray() && - buffer.arrayOffset() == 0 && - buffer.position() == 0 && - buffer.limit() == buffer.array().length) return buffer.array(); + buffer.arrayOffset() == 0 && + buffer.position() == 0 && + buffer.limit() == buffer.array().length) return buffer.array(); else return Utils.toArray(buffer); } } diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java index c23aa1782d61a..237948f61c97d 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Protocol.java @@ -208,7 +208,7 @@ public static String toHtml() { // Responses b.append("Responses:
        \n"); Schema[] responses = key.messageType.responseSchemas(); - for (int version = key.oldestVersion(); version <= key.latestVersion(); version++) { + for (int version = key.oldestVersion(); version < key.latestVersion(); version++) { Schema schema = responses[version]; if (schema == null) throw new IllegalStateException("Unexpected null schema for " + key + " with version " + version); diff --git a/clients/src/main/java/org/apache/kafka/common/protocol/Readable.java b/clients/src/main/java/org/apache/kafka/common/protocol/Readable.java index a00a7dae596c3..80bee86748269 100644 --- a/clients/src/main/java/org/apache/kafka/common/protocol/Readable.java +++ b/clients/src/main/java/org/apache/kafka/common/protocol/Readable.java @@ -39,15 +39,6 @@ public interface Readable { long readVarlong(); int remaining(); - /** - * Returns a new Readable object whose content will be shared with this object. - *
        - * The content of the new Readable object will start at this Readable's current - * position. The two Readable position will be independent, so read from one will - * not impact the other. - */ - Readable slice(); - default String readString(int length) { byte[] arr = readArray(length); return new String(arr, StandardCharsets.UTF_8); diff --git a/clients/src/main/java/org/apache/kafka/common/record/ConvertedRecords.java b/clients/src/main/java/org/apache/kafka/common/record/ConvertedRecords.java new file mode 100644 index 0000000000000..79ce2c83f2894 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/record/ConvertedRecords.java @@ -0,0 +1,36 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.record; + +public class ConvertedRecords { + + private final T records; + private final RecordValidationStats recordValidationStats; + + public ConvertedRecords(T records, RecordValidationStats recordValidationStats) { + this.records = records; + this.recordValidationStats = recordValidationStats; + } + + public T records() { + return records; + } + + public RecordValidationStats recordConversionStats() { + return recordValidationStats; + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java b/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java index d6e9cc6bd7fbb..912c3490f4341 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java +++ b/clients/src/main/java/org/apache/kafka/common/record/DefaultRecordBatch.java @@ -159,7 +159,7 @@ public void ensureValid() { /** * Gets the base timestamp of the batch which is used to calculate the record timestamps from the deltas. - * + * * @return The base timestamp */ public long baseTimestamp() { @@ -502,7 +502,6 @@ public static void writeHeader(ByteBuffer buffer, public String toString() { return "RecordBatch(magic=" + magic() + ", offsets=[" + baseOffset() + ", " + lastOffset() + "], " + "sequence=[" + baseSequence() + ", " + lastSequence() + "], " + - "partitionLeaderEpoch=" + partitionLeaderEpoch() + ", " + "isTransactional=" + isTransactional() + ", isControlBatch=" + isControlBatch() + ", " + "compression=" + compressionType() + ", timestampType=" + timestampType() + ", crc=" + checksum() + ")"; } diff --git a/clients/src/main/java/org/apache/kafka/common/record/EndTransactionMarker.java b/clients/src/main/java/org/apache/kafka/common/record/EndTransactionMarker.java index b8b4f968a32e6..9e7225c21106d 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/EndTransactionMarker.java +++ b/clients/src/main/java/org/apache/kafka/common/record/EndTransactionMarker.java @@ -17,9 +17,10 @@ package org.apache.kafka.common.record; import org.apache.kafka.common.InvalidRecordException; -import org.apache.kafka.common.message.EndTxnMarker; -import org.apache.kafka.common.protocol.ByteBufferAccessor; -import org.apache.kafka.common.protocol.MessageUtil; +import org.apache.kafka.common.protocol.types.Field; +import org.apache.kafka.common.protocol.types.Schema; +import org.apache.kafka.common.protocol.types.Struct; +import org.apache.kafka.common.protocol.types.Type; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,16 +35,23 @@ public class EndTransactionMarker { private static final Logger log = LoggerFactory.getLogger(EndTransactionMarker.class); + private static final short CURRENT_END_TXN_MARKER_VERSION = 0; + private static final Schema END_TXN_MARKER_SCHEMA_VERSION_V0 = new Schema( + new Field("version", Type.INT16), + new Field("coordinator_epoch", Type.INT32)); + static final int CURRENT_END_TXN_MARKER_VALUE_SIZE = 6; + static final int CURRENT_END_TXN_SCHEMA_RECORD_SIZE = DefaultRecord.sizeInBytes(0, 0L, + ControlRecordType.CURRENT_CONTROL_RECORD_KEY_SIZE, + EndTransactionMarker.CURRENT_END_TXN_MARKER_VALUE_SIZE, + Record.EMPTY_HEADERS); + private final ControlRecordType type; private final int coordinatorEpoch; - private final ByteBuffer buffer; public EndTransactionMarker(ControlRecordType type, int coordinatorEpoch) { ensureTransactionMarkerControlType(type); this.type = type; this.coordinatorEpoch = coordinatorEpoch; - EndTxnMarker marker = new EndTxnMarker().setCoordinatorEpoch(coordinatorEpoch); - this.buffer = MessageUtil.toVersionPrefixedByteBuffer(EndTxnMarker.HIGHEST_SUPPORTED_VERSION, marker); } public int coordinatorEpoch() { @@ -54,8 +62,19 @@ public ControlRecordType controlType() { return type; } + private Struct buildRecordValue() { + Struct struct = new Struct(END_TXN_MARKER_SCHEMA_VERSION_V0); + struct.set("version", CURRENT_END_TXN_MARKER_VERSION); + struct.set("coordinator_epoch", coordinatorEpoch); + return struct; + } + public ByteBuffer serializeValue() { - return buffer.duplicate(); + Struct valueStruct = buildRecordValue(); + ByteBuffer value = ByteBuffer.allocate(valueStruct.sizeOf()); + valueStruct.writeTo(value); + value.flip(); + return value; } @Override @@ -76,7 +95,7 @@ public int hashCode() { private static void ensureTransactionMarkerControlType(ControlRecordType type) { if (type != ControlRecordType.COMMIT && type != ControlRecordType.ABORT) - throw new IllegalArgumentException("Invalid control record type for end transaction marker " + type); + throw new IllegalArgumentException("Invalid control record type for end transaction marker" + type); } public static EndTransactionMarker deserialize(Record record) { @@ -84,29 +103,24 @@ public static EndTransactionMarker deserialize(Record record) { return deserializeValue(type, record.value()); } - // Visible for testing static EndTransactionMarker deserializeValue(ControlRecordType type, ByteBuffer value) { ensureTransactionMarkerControlType(type); - short version = value.getShort(); - if (version < EndTxnMarker.LOWEST_SUPPORTED_VERSION) + if (value.remaining() < CURRENT_END_TXN_MARKER_VALUE_SIZE) + throw new InvalidRecordException("Invalid value size found for end transaction marker. Must have " + + "at least " + CURRENT_END_TXN_MARKER_VALUE_SIZE + " bytes, but found only " + value.remaining()); + + short version = value.getShort(0); + if (version < 0) throw new InvalidRecordException("Invalid version found for end transaction marker: " + version + ". May indicate data corruption"); - if (version > EndTxnMarker.HIGHEST_SUPPORTED_VERSION) { + if (version > CURRENT_END_TXN_MARKER_VERSION) log.debug("Received end transaction marker value version {}. Parsing as version {}", version, - EndTxnMarker.HIGHEST_SUPPORTED_VERSION); - version = EndTxnMarker.HIGHEST_SUPPORTED_VERSION; - } - EndTxnMarker marker = new EndTxnMarker(new ByteBufferAccessor(value), version); - return new EndTransactionMarker(type, marker.coordinatorEpoch()); - } + CURRENT_END_TXN_MARKER_VERSION); - public int endTxnMarkerValueSize() { - return DefaultRecord.sizeInBytes(0, 0L, - ControlRecordType.CURRENT_CONTROL_RECORD_KEY_SIZE, - buffer.remaining(), - Record.EMPTY_HEADERS); + int coordinatorEpoch = value.getInt(2); + return new EndTransactionMarker(type, coordinatorEpoch); } } diff --git a/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java b/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java index 2f5e2e50dde75..64dd73de41212 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/FileRecords.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.network.TransferableChannel; import org.apache.kafka.common.record.FileLogInputStream.FileChannelRecordBatch; import org.apache.kafka.common.utils.AbstractIterator; +import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import java.io.Closeable; @@ -54,52 +55,33 @@ public class FileRecords extends AbstractRecords implements Closeable { * The {@code FileRecords.open} methods should be used instead of this constructor whenever possible. * The constructor is visible for tests. */ - FileRecords( - File file, - FileChannel channel, - int end - ) throws IOException { + FileRecords(File file, + FileChannel channel, + int start, + int end, + boolean isSlice) throws IOException { this.file = file; this.channel = channel; - this.start = 0; + this.start = start; this.end = end; - this.isSlice = false; - - if (channel.size() > Integer.MAX_VALUE) { - throw new KafkaException( - "The size of segment " + file + " (" + channel.size() + - ") is larger than the maximum allowed segment size of " + Integer.MAX_VALUE - ); - } - - int limit = Math.min((int) channel.size(), end); - this.size = new AtomicInteger(limit - start); - - // update the file position to the end of the file - channel.position(limit); + this.isSlice = isSlice; + this.size = new AtomicInteger(); - batches = batchesFrom(start); - } + if (isSlice) { + // don't check the file size if this is just a slice view + size.set(end - start); + } else { + if (channel.size() > Integer.MAX_VALUE) + throw new KafkaException("The size of segment " + file + " (" + channel.size() + + ") is larger than the maximum allowed segment size of " + Integer.MAX_VALUE); - /** - * Constructor for creating a slice. - * - * This overloaded constructor avoids having to declare a checked IO exception. - */ - private FileRecords( - File file, - FileChannel channel, - int start, - int end - ) { - this.file = file; - this.channel = channel; - this.start = start; - this.end = end; - this.isSlice = true; + int limit = Math.min((int) channel.size(), end); + size.set(limit - start); - // don't check the file size since this is just a slice view - this.size = new AtomicInteger(end - start); + // if this is not a slice, update the file pointer to the end of the file + // set the file position to the last byte in the file + channel.position(limit); + } batches = batchesFrom(start); } @@ -139,12 +121,22 @@ public void readInto(ByteBuffer buffer, int position) throws IOException { buffer.flip(); } - @Override - public FileRecords slice(int position, int size) { + /** + * Return a slice of records from this instance, which is a view into this set starting from the given position + * and with the given size limit. + * + * If the size is beyond the end of the file, the end will be based on the size of the file at the time of the read. + * + * If this message set is already sliced, the position will be taken relative to that slicing. + * + * @param position The start position to begin the read from + * @param size The number of bytes after the start position to include + * @return A sliced wrapper on this message set limited based on the given position and size + */ + public FileRecords slice(int position, int size) throws IOException { int availableBytes = availableBytes(position, size); int startPosition = this.start + position; - - return new FileRecords(file, channel, startPosition, startPosition + availableBytes); + return new FileRecords(file, channel, startPosition, startPosition + availableBytes, true); } /** @@ -169,9 +161,7 @@ private int availableBytes(int position, int size) { if (position < 0) throw new IllegalArgumentException("Invalid position: " + position + " in read from " + this); - // position should always be relative to the start of the file hence compare with file size - // to verify if the position is within the file. - if (position > currentSizeInBytes) + if (position > currentSizeInBytes - start) throw new IllegalArgumentException("Slice from position " + position + " exceeds end position of " + this); if (size < 0) throw new IllegalArgumentException("Invalid size: " + size + " in read from " + this); @@ -211,10 +201,6 @@ public void flush() throws IOException { * Close this record set */ public void close() throws IOException { - if (!channel.isOpen()) { - return; - } - flush(); trim(); channel.close(); @@ -287,6 +273,23 @@ public int truncateTo(int targetSize) throws IOException { return originalSize - targetSize; } + @Override + public ConvertedRecords downConvert(byte toMagic, long firstOffset, Time time) { + ConvertedRecords convertedRecords = RecordsUtil.downConvert(batches, toMagic, firstOffset, time); + if (convertedRecords.recordConversionStats().numRecordsConverted() == 0) { + // This indicates that the message is too large, which means that the buffer is not large + // enough to hold a full record batch. We just return all the bytes in this instance. + // Even though the record batch does not have the right format version, we expect old clients + // to raise an error to the user after reading the record batch size and seeing that there + // are not enough available bytes in the response to read it fully. Note that this is + // only possible prior to KIP-74, after which the broker was changed to always return at least + // one full record batch, even if it requires exceeding the max fetch size requested by the client. + return new ConvertedRecords<>(this, RecordValidationStats.EMPTY); + } else { + return convertedRecords; + } + } + @Override public int writeTo(TransferableChannel destChannel, int offset, int length) throws IOException { long newSize = Math.min(channel.size(), end) - start; @@ -310,35 +313,12 @@ public int writeTo(TransferableChannel destChannel, int offset, int length) thro * @param startingPosition The starting position in the file to begin searching from. * @return the batch's base offset, its physical position, and its size (including log overhead) */ - public LogOffsetPosition searchForOffsetFromPosition(long targetOffset, int startingPosition) { - FileChannelRecordBatch prevBatch = null; - // The following logic is intentionally designed to minimize memory usage by avoiding - // unnecessary calls to lastOffset() for every batch. - // Instead, we use baseOffset() comparisons when possible, and only check lastOffset() when absolutely necessary. + public LogOffsetPosition searchForOffsetWithSize(long targetOffset, int startingPosition) { for (FileChannelRecordBatch batch : batchesFrom(startingPosition)) { - // If baseOffset exactly equals targetOffset, return immediately - if (batch.baseOffset() == targetOffset) { - return LogOffsetPosition.fromBatch(batch); - } - - // If we find the first batch with baseOffset greater than targetOffset - if (batch.baseOffset() > targetOffset) { - // If the previous batch contains the target - if (prevBatch != null && prevBatch.lastOffset() >= targetOffset) - return LogOffsetPosition.fromBatch(prevBatch); - else { - // If there's no previous batch or the previous batch doesn't contain the - // target, return the current batch - return LogOffsetPosition.fromBatch(batch); - } - } - prevBatch = batch; + long offset = batch.lastOffset(); + if (offset >= targetOffset) + return new LogOffsetPosition(batch.baseOffset(), batch.position(), batch.sizeInBytes()); } - // Only one case would reach here: all batches have baseOffset less than targetOffset - // Check if the last batch contains the target - if (prevBatch != null && prevBatch.lastOffset() >= targetOffset) - return LogOffsetPosition.fromBatch(prevBatch); - return null; } @@ -448,7 +428,7 @@ public static FileRecords open(File file, boolean preallocate) throws IOException { FileChannel channel = openChannel(file, mutable, fileAlreadyExists, initFileSize, preallocate); int end = (!fileAlreadyExists && preallocate) ? 0 : Integer.MAX_VALUE; - return new FileRecords(file, channel, end); + return new FileRecords(file, channel, 0, end, false); } public static FileRecords open(File file, @@ -500,10 +480,6 @@ public static class LogOffsetPosition { public final int position; public final int size; - public static LogOffsetPosition fromBatch(FileChannelRecordBatch batch) { - return new LogOffsetPosition(batch.baseOffset(), batch.position(), batch.sizeInBytes()); - } - public LogOffsetPosition(long offset, int position, int size) { this.offset = offset; this.position = position; diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java index 2e2b97dfe37ba..3aee889aded6e 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecords.java @@ -30,8 +30,12 @@ import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.CloseableIterator; +import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + import java.io.IOException; import java.nio.ByteBuffer; import java.nio.channels.GatheringByteChannel; @@ -46,6 +50,7 @@ * or one of the {@link #builder(ByteBuffer, byte, Compression, TimestampType, long)} variants. */ public class MemoryRecords extends AbstractRecords { + private static final Logger log = LoggerFactory.getLogger(MemoryRecords.class); public static final MemoryRecords EMPTY = MemoryRecords.readableRecords(ByteBuffer.allocate(0)); private final ByteBuffer buffer; @@ -106,6 +111,11 @@ public int validBytes() { return bytes; } + @Override + public ConvertedRecords downConvert(byte toMagic, long firstOffset, Time time) { + return RecordsUtil.downConvert(batches(), toMagic, firstOffset, time); + } + @Override public AbstractIterator batchIterator() { return new RecordBatchIterator<>(new ByteBufferLogInputStream(buffer.duplicate(), Integer.MAX_VALUE)); @@ -300,31 +310,6 @@ public ByteBuffer buffer() { return buffer.duplicate(); } - @Override - public MemoryRecords slice(int position, int size) { - if (position < 0) - throw new IllegalArgumentException("Invalid position: " + position + " in read from " + this); - if (position > buffer.limit()) - throw new IllegalArgumentException("Slice from position " + position + " exceeds end position of " + this); - if (size < 0) - throw new IllegalArgumentException("Invalid size: " + size + " in read from " + this); - - int availableBytes = Math.min(size, buffer.limit() - position); - // As of now, clients module support Java11 hence can't use ByteBuffer::slice(position, size) method. - // So we need to create a duplicate buffer and set the position and limit. Duplicate buffer - // is backed by original bytes hence not the content but only the relative position and limit - // are changed in the duplicate buffer. Once the position and limit are set, we can call the - // slice method to get the sliced buffer, which is a backed by the original buffer with the - // position reset to 0 and limit set to the size of the slice. - ByteBuffer slicedBuffer = buffer.duplicate(); - slicedBuffer.position(position); - slicedBuffer.limit(position + availableBytes); - // Reset the position to 0 so that the sliced view has a relative position. - slicedBuffer = slicedBuffer.slice(); - - return readableRecords(slicedBuffer); - } - @Override public Iterable batches() { return batches; @@ -617,7 +602,7 @@ public static MemoryRecords withRecords(byte magic, long initialOffset, Compress return withRecords(magic, initialOffset, compression, TimestampType.CREATE_TIME, records); } - public static MemoryRecords withRecords(long initialOffset, Compression compression, int partitionLeaderEpoch, SimpleRecord... records) { + public static MemoryRecords withRecords(long initialOffset, Compression compression, Integer partitionLeaderEpoch, SimpleRecord... records) { return withRecords(RecordBatch.CURRENT_MAGIC_VALUE, initialOffset, compression, TimestampType.CREATE_TIME, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, RecordBatch.NO_SEQUENCE, partitionLeaderEpoch, false, records); } @@ -703,7 +688,8 @@ public static MemoryRecords withEndTransactionMarker(long timestamp, long produc public static MemoryRecords withEndTransactionMarker(long initialOffset, long timestamp, int partitionLeaderEpoch, long producerId, short producerEpoch, EndTransactionMarker marker) { - int endTxnMarkerBatchSize = DefaultRecordBatch.RECORD_BATCH_OVERHEAD + marker.endTxnMarkerValueSize(); + int endTxnMarkerBatchSize = DefaultRecordBatch.RECORD_BATCH_OVERHEAD + + EndTransactionMarker.CURRENT_END_TXN_SCHEMA_RECORD_SIZE; ByteBuffer buffer = ByteBuffer.allocate(endTxnMarkerBatchSize); writeEndTransactionalMarker(buffer, initialOffset, timestamp, partitionLeaderEpoch, producerId, producerEpoch, marker); diff --git a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java index 759ff67e8f898..b37b1f1ca6851 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/record/MemoryRecordsBuilder.java @@ -634,7 +634,7 @@ public void appendLeaderChangeMessage(long timestamp, LeaderChangeMessage leader appendControlRecord( timestamp, ControlRecordType.LEADER_CHANGE, - MessageUtil.toByteBufferAccessor(leaderChangeMessage, ControlRecordUtils.LEADER_CHANGE_CURRENT_VERSION).buffer() + MessageUtil.toByteBuffer(leaderChangeMessage, ControlRecordUtils.LEADER_CHANGE_CURRENT_VERSION) ); } @@ -642,7 +642,7 @@ public void appendSnapshotHeaderMessage(long timestamp, SnapshotHeaderRecord sna appendControlRecord( timestamp, ControlRecordType.SNAPSHOT_HEADER, - MessageUtil.toByteBufferAccessor(snapshotHeaderRecord, ControlRecordUtils.SNAPSHOT_HEADER_CURRENT_VERSION).buffer() + MessageUtil.toByteBuffer(snapshotHeaderRecord, ControlRecordUtils.SNAPSHOT_HEADER_CURRENT_VERSION) ); } @@ -650,7 +650,7 @@ public void appendSnapshotFooterMessage(long timestamp, SnapshotFooterRecord sna appendControlRecord( timestamp, ControlRecordType.SNAPSHOT_FOOTER, - MessageUtil.toByteBufferAccessor(snapshotHeaderRecord, ControlRecordUtils.SNAPSHOT_FOOTER_CURRENT_VERSION).buffer() + MessageUtil.toByteBuffer(snapshotHeaderRecord, ControlRecordUtils.SNAPSHOT_FOOTER_CURRENT_VERSION) ); } @@ -658,7 +658,7 @@ public void appendKRaftVersionMessage(long timestamp, KRaftVersionRecord kraftVe appendControlRecord( timestamp, ControlRecordType.KRAFT_VERSION, - MessageUtil.toByteBufferAccessor(kraftVersionRecord, ControlRecordUtils.KRAFT_VERSION_CURRENT_VERSION).buffer() + MessageUtil.toByteBuffer(kraftVersionRecord, ControlRecordUtils.KRAFT_VERSION_CURRENT_VERSION) ); } @@ -666,7 +666,7 @@ public void appendVotersMessage(long timestamp, VotersRecord votersRecord) { appendControlRecord( timestamp, ControlRecordType.KRAFT_VOTERS, - MessageUtil.toByteBufferAccessor(votersRecord, ControlRecordUtils.KRAFT_VOTERS_CURRENT_VERSION).buffer() + MessageUtil.toByteBuffer(votersRecord, ControlRecordUtils.KRAFT_VOTERS_CURRENT_VERSION) ); } diff --git a/clients/src/main/java/org/apache/kafka/common/record/RecordVersion.java b/clients/src/main/java/org/apache/kafka/common/record/RecordVersion.java index 13d6357d9b388..8406d5331c8a8 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/RecordVersion.java +++ b/clients/src/main/java/org/apache/kafka/common/record/RecordVersion.java @@ -20,7 +20,8 @@ * Defines the record format versions supported by Kafka. * * For historical reasons, the record format version is also known as `magic` and `message format version`. Note that - * the version actually applies to the {@link RecordBatch} (instead of the {@link Record}). + * the version actually applies to the {@link RecordBatch} (instead of the {@link Record}). Finally, the + * `message.format.version` topic config confusingly expects an ApiVersion instead of a RecordVersion. */ public enum RecordVersion { V0(0), V1(1), V2(2); @@ -33,6 +34,15 @@ public enum RecordVersion { this.value = (byte) value; } + /** + * Check whether this version precedes another version. + * + * @return true only if the magic value is less than the other's + */ + public boolean precedes(RecordVersion other) { + return this.value < other.value; + } + public static RecordVersion lookup(byte value) { if (value < 0 || value >= VALUES.length) throw new IllegalArgumentException("Unknown record version: " + value); diff --git a/clients/src/main/java/org/apache/kafka/common/record/Records.java b/clients/src/main/java/org/apache/kafka/common/record/Records.java index 017c49ba94cdb..e1ea4f5364eb8 100644 --- a/clients/src/main/java/org/apache/kafka/common/record/Records.java +++ b/clients/src/main/java/org/apache/kafka/common/record/Records.java @@ -17,6 +17,7 @@ package org.apache.kafka.common.record; import org.apache.kafka.common.utils.AbstractIterator; +import org.apache.kafka.common.utils.Time; import java.util.Iterator; import java.util.Optional; @@ -84,25 +85,21 @@ public interface Records extends TransferableRecords { */ boolean hasMatchingMagic(byte magic); + /** + * Convert all batches in this buffer to the format passed as a parameter. Note that this requires + * deep iteration since all of the deep records must also be converted to the desired format. + * @param toMagic The magic value to convert to + * @param firstOffset The starting offset for returned records. This only impacts some cases. See + * {@link RecordsUtil#downConvert(Iterable, byte, long, Time)} for an explanation. + * @param time instance used for reporting stats + * @return A ConvertedRecords instance which may or may not contain the same instance in its records field. + */ + ConvertedRecords downConvert(byte toMagic, long firstOffset, Time time); + /** * Get an iterator over the records in this log. Note that this generally requires decompression, * and should therefore be used with care. * @return The record iterator */ Iterable records(); - - /** - * Return a slice of records from this instance, which is a view into this set starting from the given position - * and with the given size limit. - * - * If the size is beyond the end of the records, the end will be based on the size of the records at the time of the read. - * - * If this records set is already sliced, the position will be taken relative to that slicing. - * - * @param position The start position to begin the read from. The position should be aligned to - * the batch boundary, else the returned records can't be iterated. - * @param size The number of bytes after the start position to include - * @return A sliced wrapper on this message set limited based on the given position and size - */ - Records slice(int position, int size); } diff --git a/clients/src/main/java/org/apache/kafka/common/record/RecordsUtil.java b/clients/src/main/java/org/apache/kafka/common/record/RecordsUtil.java new file mode 100644 index 0000000000000..aa34a5190dc07 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/record/RecordsUtil.java @@ -0,0 +1,141 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.record; + +import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Utils; + +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; + +public class RecordsUtil { + /** + * Down convert batches to the provided message format version. The first offset parameter is only relevant in the + * conversion from uncompressed v2 or higher to v1 or lower. The reason is that uncompressed records in v0 and v1 + * are not batched (put another way, each batch always has 1 record). + * + * If a client requests records in v1 format starting from the middle of an uncompressed batch in v2 format, we + * need to drop records from the batch during the conversion. Some versions of librdkafka rely on this for + * correctness. + * + * The temporaryMemoryBytes computation assumes that the batches are not loaded into the heap + * (via classes like FileChannelRecordBatch) before this method is called. This is the case in the broker (we + * only load records into the heap when down converting), but it's not for the producer. However, down converting + * in the producer is very uncommon and the extra complexity to handle that case is not worth it. + */ + protected static ConvertedRecords downConvert(Iterable batches, byte toMagic, + long firstOffset, Time time) { + // maintain the batch along with the decompressed records to avoid the need to decompress again + List recordBatchAndRecordsList = new ArrayList<>(); + int totalSizeEstimate = 0; + long startNanos = time.nanoseconds(); + + for (RecordBatch batch : batches) { + if (toMagic < RecordBatch.MAGIC_VALUE_V2) { + if (batch.isControlBatch()) + continue; + + if (batch.compressionType() == CompressionType.ZSTD) + throw new UnsupportedCompressionTypeException("Down-conversion of zstandard-compressed batches " + + "is not supported"); + } + + if (batch.magic() <= toMagic) { + totalSizeEstimate += batch.sizeInBytes(); + recordBatchAndRecordsList.add(new RecordBatchAndRecords(batch, null, null)); + } else { + List records = new ArrayList<>(); + for (Record record : batch) { + // See the method javadoc for an explanation + if (toMagic > RecordBatch.MAGIC_VALUE_V1 || batch.isCompressed() || record.offset() >= firstOffset) + records.add(record); + } + if (records.isEmpty()) + continue; + final long baseOffset; + if (batch.magic() >= RecordBatch.MAGIC_VALUE_V2 && toMagic >= RecordBatch.MAGIC_VALUE_V2) + baseOffset = batch.baseOffset(); + else + baseOffset = records.get(0).offset(); + totalSizeEstimate += AbstractRecords.estimateSizeInBytes(toMagic, baseOffset, batch.compressionType(), records); + recordBatchAndRecordsList.add(new RecordBatchAndRecords(batch, records, baseOffset)); + } + } + + ByteBuffer buffer = ByteBuffer.allocate(totalSizeEstimate); + long temporaryMemoryBytes = 0; + int numRecordsConverted = 0; + + for (RecordBatchAndRecords recordBatchAndRecords : recordBatchAndRecordsList) { + temporaryMemoryBytes += recordBatchAndRecords.batch.sizeInBytes(); + if (recordBatchAndRecords.batch.magic() <= toMagic) { + buffer = Utils.ensureCapacity(buffer, buffer.position() + recordBatchAndRecords.batch.sizeInBytes()); + recordBatchAndRecords.batch.writeTo(buffer); + } else { + MemoryRecordsBuilder builder = convertRecordBatch(toMagic, buffer, recordBatchAndRecords); + buffer = builder.buffer(); + temporaryMemoryBytes += builder.uncompressedBytesWritten(); + numRecordsConverted += builder.numRecords(); + } + } + + buffer.flip(); + RecordValidationStats stats = new RecordValidationStats(temporaryMemoryBytes, numRecordsConverted, + time.nanoseconds() - startNanos); + return new ConvertedRecords<>(MemoryRecords.readableRecords(buffer), stats); + } + + /** + * Return a buffer containing the converted record batches. The returned buffer may not be the same as the received + * one (e.g. it may require expansion). + */ + private static MemoryRecordsBuilder convertRecordBatch(byte magic, ByteBuffer buffer, RecordBatchAndRecords recordBatchAndRecords) { + RecordBatch batch = recordBatchAndRecords.batch; + final TimestampType timestampType = batch.timestampType(); + long logAppendTime = timestampType == TimestampType.LOG_APPEND_TIME ? batch.maxTimestamp() : RecordBatch.NO_TIMESTAMP; + + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic, Compression.of(batch.compressionType()).build(), + timestampType, recordBatchAndRecords.baseOffset, logAppendTime); + for (Record record : recordBatchAndRecords.records) { + // Down-convert this record. Ignore headers when down-converting to V0 and V1 since they are not supported + if (magic > RecordBatch.MAGIC_VALUE_V1) + builder.append(record); + else + builder.appendWithOffset(record.offset(), record.timestamp(), record.key(), record.value()); + } + + builder.close(); + return builder; + } + + + private static class RecordBatchAndRecords { + private final RecordBatch batch; + private final List records; + private final Long baseOffset; + + private RecordBatchAndRecords(RecordBatch batch, List records, Long baseOffset) { + this.batch = batch; + this.records = records; + this.baseOffset = baseOffset; + } + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/replica/ReplicaSelector.java b/clients/src/main/java/org/apache/kafka/common/replica/ReplicaSelector.java index 601bd6ce22ae4..301fc9fdc4b36 100644 --- a/clients/src/main/java/org/apache/kafka/common/replica/ReplicaSelector.java +++ b/clients/src/main/java/org/apache/kafka/common/replica/ReplicaSelector.java @@ -27,9 +27,6 @@ /** * Plug-able interface for selecting a preferred read replica given the current set of replicas for a partition * and metadata from the client. - * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the selector to register metrics. - * The following tags are automatically added to all metrics registered: config set to - * replica.selector.class, and class set to the ReplicaSelector class name. */ public interface ReplicaSelector extends Configurable, Closeable { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java index 750de2050f432..38da78efb1b5b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractRequest.java @@ -19,11 +19,9 @@ import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.network.Send; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.SendBuilder; import java.nio.ByteBuffer; @@ -124,8 +122,8 @@ public final ByteBuffer serializeWithHeader(RequestHeader header) { } // Visible for testing - public final ByteBufferAccessor serialize() { - return MessageUtil.toByteBufferAccessor(data(), version); + public final ByteBuffer serialize() { + return MessageUtil.toByteBuffer(data(), version); } // Visible for testing @@ -138,7 +136,7 @@ public String toString(boolean verbose) { } @Override - public String toString() { + public final String toString() { return toString(true); } @@ -169,191 +167,181 @@ public Map errorCounts(Throwable e) { /** * Factory method for getting a request object based on ApiKey ID and a version */ - public static RequestAndSize parseRequest(ApiKeys apiKey, short apiVersion, Readable readable) { - int bufferSize = readable.remaining(); - return new RequestAndSize(doParseRequest(apiKey, apiVersion, readable), bufferSize); + public static RequestAndSize parseRequest(ApiKeys apiKey, short apiVersion, ByteBuffer buffer) { + int bufferSize = buffer.remaining(); + return new RequestAndSize(doParseRequest(apiKey, apiVersion, buffer), bufferSize); } - private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, Readable readable) { + private static AbstractRequest doParseRequest(ApiKeys apiKey, short apiVersion, ByteBuffer buffer) { switch (apiKey) { case PRODUCE: - return ProduceRequest.parse(readable, apiVersion); + return ProduceRequest.parse(buffer, apiVersion); case FETCH: - return FetchRequest.parse(readable, apiVersion); + return FetchRequest.parse(buffer, apiVersion); case LIST_OFFSETS: - return ListOffsetsRequest.parse(readable, apiVersion); + return ListOffsetsRequest.parse(buffer, apiVersion); case METADATA: - return MetadataRequest.parse(readable, apiVersion); + return MetadataRequest.parse(buffer, apiVersion); case OFFSET_COMMIT: - return OffsetCommitRequest.parse(readable, apiVersion); + return OffsetCommitRequest.parse(buffer, apiVersion); case OFFSET_FETCH: - return OffsetFetchRequest.parse(readable, apiVersion); + return OffsetFetchRequest.parse(buffer, apiVersion); case FIND_COORDINATOR: - return FindCoordinatorRequest.parse(readable, apiVersion); + return FindCoordinatorRequest.parse(buffer, apiVersion); case JOIN_GROUP: - return JoinGroupRequest.parse(readable, apiVersion); + return JoinGroupRequest.parse(buffer, apiVersion); case HEARTBEAT: - return HeartbeatRequest.parse(readable, apiVersion); + return HeartbeatRequest.parse(buffer, apiVersion); case LEAVE_GROUP: - return LeaveGroupRequest.parse(readable, apiVersion); + return LeaveGroupRequest.parse(buffer, apiVersion); case SYNC_GROUP: - return SyncGroupRequest.parse(readable, apiVersion); + return SyncGroupRequest.parse(buffer, apiVersion); case DESCRIBE_GROUPS: - return DescribeGroupsRequest.parse(readable, apiVersion); + return DescribeGroupsRequest.parse(buffer, apiVersion); case LIST_GROUPS: - return ListGroupsRequest.parse(readable, apiVersion); + return ListGroupsRequest.parse(buffer, apiVersion); case SASL_HANDSHAKE: - return SaslHandshakeRequest.parse(readable, apiVersion); + return SaslHandshakeRequest.parse(buffer, apiVersion); case API_VERSIONS: - return ApiVersionsRequest.parse(readable, apiVersion); + return ApiVersionsRequest.parse(buffer, apiVersion); case CREATE_TOPICS: - return CreateTopicsRequest.parse(readable, apiVersion); + return CreateTopicsRequest.parse(buffer, apiVersion); case DELETE_TOPICS: - return DeleteTopicsRequest.parse(readable, apiVersion); + return DeleteTopicsRequest.parse(buffer, apiVersion); case DELETE_RECORDS: - return DeleteRecordsRequest.parse(readable, apiVersion); + return DeleteRecordsRequest.parse(buffer, apiVersion); case INIT_PRODUCER_ID: - return InitProducerIdRequest.parse(readable, apiVersion); + return InitProducerIdRequest.parse(buffer, apiVersion); case OFFSET_FOR_LEADER_EPOCH: - return OffsetsForLeaderEpochRequest.parse(readable, apiVersion); + return OffsetsForLeaderEpochRequest.parse(buffer, apiVersion); case ADD_PARTITIONS_TO_TXN: - return AddPartitionsToTxnRequest.parse(readable, apiVersion); + return AddPartitionsToTxnRequest.parse(buffer, apiVersion); case ADD_OFFSETS_TO_TXN: - return AddOffsetsToTxnRequest.parse(readable, apiVersion); + return AddOffsetsToTxnRequest.parse(buffer, apiVersion); case END_TXN: - return EndTxnRequest.parse(readable, apiVersion); + return EndTxnRequest.parse(buffer, apiVersion); case WRITE_TXN_MARKERS: - return WriteTxnMarkersRequest.parse(readable, apiVersion); + return WriteTxnMarkersRequest.parse(buffer, apiVersion); case TXN_OFFSET_COMMIT: - return TxnOffsetCommitRequest.parse(readable, apiVersion); + return TxnOffsetCommitRequest.parse(buffer, apiVersion); case DESCRIBE_ACLS: - return DescribeAclsRequest.parse(readable, apiVersion); + return DescribeAclsRequest.parse(buffer, apiVersion); case CREATE_ACLS: - return CreateAclsRequest.parse(readable, apiVersion); + return CreateAclsRequest.parse(buffer, apiVersion); case DELETE_ACLS: - return DeleteAclsRequest.parse(readable, apiVersion); + return DeleteAclsRequest.parse(buffer, apiVersion); case DESCRIBE_CONFIGS: - return DescribeConfigsRequest.parse(readable, apiVersion); + return DescribeConfigsRequest.parse(buffer, apiVersion); case ALTER_CONFIGS: - return AlterConfigsRequest.parse(readable, apiVersion); + return AlterConfigsRequest.parse(buffer, apiVersion); case ALTER_REPLICA_LOG_DIRS: - return AlterReplicaLogDirsRequest.parse(readable, apiVersion); + return AlterReplicaLogDirsRequest.parse(buffer, apiVersion); case DESCRIBE_LOG_DIRS: - return DescribeLogDirsRequest.parse(readable, apiVersion); + return DescribeLogDirsRequest.parse(buffer, apiVersion); case SASL_AUTHENTICATE: - return SaslAuthenticateRequest.parse(readable, apiVersion); + return SaslAuthenticateRequest.parse(buffer, apiVersion); case CREATE_PARTITIONS: - return CreatePartitionsRequest.parse(readable, apiVersion); + return CreatePartitionsRequest.parse(buffer, apiVersion); case CREATE_DELEGATION_TOKEN: - return CreateDelegationTokenRequest.parse(readable, apiVersion); + return CreateDelegationTokenRequest.parse(buffer, apiVersion); case RENEW_DELEGATION_TOKEN: - return RenewDelegationTokenRequest.parse(readable, apiVersion); + return RenewDelegationTokenRequest.parse(buffer, apiVersion); case EXPIRE_DELEGATION_TOKEN: - return ExpireDelegationTokenRequest.parse(readable, apiVersion); + return ExpireDelegationTokenRequest.parse(buffer, apiVersion); case DESCRIBE_DELEGATION_TOKEN: - return DescribeDelegationTokenRequest.parse(readable, apiVersion); + return DescribeDelegationTokenRequest.parse(buffer, apiVersion); case DELETE_GROUPS: - return DeleteGroupsRequest.parse(readable, apiVersion); + return DeleteGroupsRequest.parse(buffer, apiVersion); case ELECT_LEADERS: - return ElectLeadersRequest.parse(readable, apiVersion); + return ElectLeadersRequest.parse(buffer, apiVersion); case INCREMENTAL_ALTER_CONFIGS: - return IncrementalAlterConfigsRequest.parse(readable, apiVersion); + return IncrementalAlterConfigsRequest.parse(buffer, apiVersion); case ALTER_PARTITION_REASSIGNMENTS: - return AlterPartitionReassignmentsRequest.parse(readable, apiVersion); + return AlterPartitionReassignmentsRequest.parse(buffer, apiVersion); case LIST_PARTITION_REASSIGNMENTS: - return ListPartitionReassignmentsRequest.parse(readable, apiVersion); + return ListPartitionReassignmentsRequest.parse(buffer, apiVersion); case OFFSET_DELETE: - return OffsetDeleteRequest.parse(readable, apiVersion); + return OffsetDeleteRequest.parse(buffer, apiVersion); case DESCRIBE_CLIENT_QUOTAS: - return DescribeClientQuotasRequest.parse(readable, apiVersion); + return DescribeClientQuotasRequest.parse(buffer, apiVersion); case ALTER_CLIENT_QUOTAS: - return AlterClientQuotasRequest.parse(readable, apiVersion); + return AlterClientQuotasRequest.parse(buffer, apiVersion); case DESCRIBE_USER_SCRAM_CREDENTIALS: - return DescribeUserScramCredentialsRequest.parse(readable, apiVersion); + return DescribeUserScramCredentialsRequest.parse(buffer, apiVersion); case ALTER_USER_SCRAM_CREDENTIALS: - return AlterUserScramCredentialsRequest.parse(readable, apiVersion); + return AlterUserScramCredentialsRequest.parse(buffer, apiVersion); case VOTE: - return VoteRequest.parse(readable, apiVersion); + return VoteRequest.parse(buffer, apiVersion); case BEGIN_QUORUM_EPOCH: - return BeginQuorumEpochRequest.parse(readable, apiVersion); + return BeginQuorumEpochRequest.parse(buffer, apiVersion); case END_QUORUM_EPOCH: - return EndQuorumEpochRequest.parse(readable, apiVersion); + return EndQuorumEpochRequest.parse(buffer, apiVersion); case DESCRIBE_QUORUM: - return DescribeQuorumRequest.parse(readable, apiVersion); + return DescribeQuorumRequest.parse(buffer, apiVersion); case ALTER_PARTITION: - return AlterPartitionRequest.parse(readable, apiVersion); + return AlterPartitionRequest.parse(buffer, apiVersion); case UPDATE_FEATURES: - return UpdateFeaturesRequest.parse(readable, apiVersion); + return UpdateFeaturesRequest.parse(buffer, apiVersion); case ENVELOPE: - return EnvelopeRequest.parse(readable, apiVersion); + return EnvelopeRequest.parse(buffer, apiVersion); case FETCH_SNAPSHOT: - return FetchSnapshotRequest.parse(readable, apiVersion); + return FetchSnapshotRequest.parse(buffer, apiVersion); case DESCRIBE_CLUSTER: - return DescribeClusterRequest.parse(readable, apiVersion); + return DescribeClusterRequest.parse(buffer, apiVersion); case DESCRIBE_PRODUCERS: - return DescribeProducersRequest.parse(readable, apiVersion); + return DescribeProducersRequest.parse(buffer, apiVersion); case BROKER_REGISTRATION: - return BrokerRegistrationRequest.parse(readable, apiVersion); + return BrokerRegistrationRequest.parse(buffer, apiVersion); case BROKER_HEARTBEAT: - return BrokerHeartbeatRequest.parse(readable, apiVersion); + return BrokerHeartbeatRequest.parse(buffer, apiVersion); case UNREGISTER_BROKER: - return UnregisterBrokerRequest.parse(readable, apiVersion); + return UnregisterBrokerRequest.parse(buffer, apiVersion); case DESCRIBE_TRANSACTIONS: - return DescribeTransactionsRequest.parse(readable, apiVersion); + return DescribeTransactionsRequest.parse(buffer, apiVersion); case LIST_TRANSACTIONS: - return ListTransactionsRequest.parse(readable, apiVersion); + return ListTransactionsRequest.parse(buffer, apiVersion); case ALLOCATE_PRODUCER_IDS: - return AllocateProducerIdsRequest.parse(readable, apiVersion); + return AllocateProducerIdsRequest.parse(buffer, apiVersion); case CONSUMER_GROUP_HEARTBEAT: - return ConsumerGroupHeartbeatRequest.parse(readable, apiVersion); + return ConsumerGroupHeartbeatRequest.parse(buffer, apiVersion); case CONSUMER_GROUP_DESCRIBE: - return ConsumerGroupDescribeRequest.parse(readable, apiVersion); + return ConsumerGroupDescribeRequest.parse(buffer, apiVersion); case CONTROLLER_REGISTRATION: - return ControllerRegistrationRequest.parse(readable, apiVersion); + return ControllerRegistrationRequest.parse(buffer, apiVersion); case GET_TELEMETRY_SUBSCRIPTIONS: - return GetTelemetrySubscriptionsRequest.parse(readable, apiVersion); + return GetTelemetrySubscriptionsRequest.parse(buffer, apiVersion); case PUSH_TELEMETRY: - return PushTelemetryRequest.parse(readable, apiVersion); + return PushTelemetryRequest.parse(buffer, apiVersion); case ASSIGN_REPLICAS_TO_DIRS: - return AssignReplicasToDirsRequest.parse(readable, apiVersion); - case LIST_CONFIG_RESOURCES: - return ListConfigResourcesRequest.parse(readable, apiVersion); + return AssignReplicasToDirsRequest.parse(buffer, apiVersion); + case LIST_CLIENT_METRICS_RESOURCES: + return ListClientMetricsResourcesRequest.parse(buffer, apiVersion); case DESCRIBE_TOPIC_PARTITIONS: - return DescribeTopicPartitionsRequest.parse(readable, apiVersion); + return DescribeTopicPartitionsRequest.parse(buffer, apiVersion); case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatRequest.parse(readable, apiVersion); + return ShareGroupHeartbeatRequest.parse(buffer, apiVersion); case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeRequest.parse(readable, apiVersion); + return ShareGroupDescribeRequest.parse(buffer, apiVersion); case SHARE_FETCH: - return ShareFetchRequest.parse(readable, apiVersion); + return ShareFetchRequest.parse(buffer, apiVersion); case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeRequest.parse(readable, apiVersion); + return ShareAcknowledgeRequest.parse(buffer, apiVersion); case ADD_RAFT_VOTER: - return AddRaftVoterRequest.parse(readable, apiVersion); + return AddRaftVoterRequest.parse(buffer, apiVersion); case REMOVE_RAFT_VOTER: - return RemoveRaftVoterRequest.parse(readable, apiVersion); + return RemoveRaftVoterRequest.parse(buffer, apiVersion); case UPDATE_RAFT_VOTER: - return UpdateRaftVoterRequest.parse(readable, apiVersion); + return UpdateRaftVoterRequest.parse(buffer, apiVersion); case INITIALIZE_SHARE_GROUP_STATE: - return InitializeShareGroupStateRequest.parse(readable, apiVersion); + return InitializeShareGroupStateRequest.parse(buffer, apiVersion); case READ_SHARE_GROUP_STATE: - return ReadShareGroupStateRequest.parse(readable, apiVersion); + return ReadShareGroupStateRequest.parse(buffer, apiVersion); case WRITE_SHARE_GROUP_STATE: - return WriteShareGroupStateRequest.parse(readable, apiVersion); + return WriteShareGroupStateRequest.parse(buffer, apiVersion); case DELETE_SHARE_GROUP_STATE: - return DeleteShareGroupStateRequest.parse(readable, apiVersion); + return DeleteShareGroupStateRequest.parse(buffer, apiVersion); case READ_SHARE_GROUP_STATE_SUMMARY: - return ReadShareGroupStateSummaryRequest.parse(readable, apiVersion); - case STREAMS_GROUP_HEARTBEAT: - return StreamsGroupHeartbeatRequest.parse(readable, apiVersion); - case STREAMS_GROUP_DESCRIBE: - return StreamsGroupDescribeRequest.parse(readable, apiVersion); - case DESCRIBE_SHARE_GROUP_OFFSETS: - return DescribeShareGroupOffsetsRequest.parse(readable, apiVersion); - case ALTER_SHARE_GROUP_OFFSETS: - return AlterShareGroupOffsetsRequest.parse(readable, apiVersion); - case DELETE_SHARE_GROUP_OFFSETS: - return DeleteShareGroupOffsetsRequest.parse(readable, apiVersion); + return ReadShareGroupStateSummaryRequest.parse(buffer, apiVersion); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseRequest`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java index bc313078d7424..cc29f4a0cd052 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AbstractResponse.java @@ -18,16 +18,14 @@ import org.apache.kafka.common.network.Send; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.protocol.SendBuilder; import java.nio.ByteBuffer; import java.util.Collection; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -53,8 +51,8 @@ final ByteBuffer serializeWithHeader(ResponseHeader header, short version) { } // Visible for testing - final ByteBufferAccessor serialize(short version) { - return MessageUtil.toByteBufferAccessor(data(), version); + final ByteBuffer serialize(short version) { + return MessageUtil.toByteBuffer(data(), version); } /** @@ -73,14 +71,14 @@ protected static Map errorCounts(Stream errors) { } protected static Map errorCounts(Collection errors) { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); for (Errors error : errors) updateErrorCounts(errorCounts, error); return errorCounts; } protected static Map apiErrorCounts(Map errors) { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); for (ApiError apiError : errors.values()) updateErrorCounts(errorCounts, apiError.error()); return errorCounts; @@ -108,189 +106,179 @@ public static AbstractResponse parseResponse(ByteBuffer buffer, RequestHeader re requestHeader.correlationId(), responseHeader.correlationId()); } - return AbstractResponse.parseResponse(apiKey, new ByteBufferAccessor(buffer), apiVersion); + return AbstractResponse.parseResponse(apiKey, buffer, apiVersion); } - public static AbstractResponse parseResponse(ApiKeys apiKey, Readable readable, short version) { + public static AbstractResponse parseResponse(ApiKeys apiKey, ByteBuffer responseBuffer, short version) { switch (apiKey) { case PRODUCE: - return ProduceResponse.parse(readable, version); + return ProduceResponse.parse(responseBuffer, version); case FETCH: - return FetchResponse.parse(readable, version); + return FetchResponse.parse(responseBuffer, version); case LIST_OFFSETS: - return ListOffsetsResponse.parse(readable, version); + return ListOffsetsResponse.parse(responseBuffer, version); case METADATA: - return MetadataResponse.parse(readable, version); + return MetadataResponse.parse(responseBuffer, version); case OFFSET_COMMIT: - return OffsetCommitResponse.parse(readable, version); + return OffsetCommitResponse.parse(responseBuffer, version); case OFFSET_FETCH: - return OffsetFetchResponse.parse(readable, version); + return OffsetFetchResponse.parse(responseBuffer, version); case FIND_COORDINATOR: - return FindCoordinatorResponse.parse(readable, version); + return FindCoordinatorResponse.parse(responseBuffer, version); case JOIN_GROUP: - return JoinGroupResponse.parse(readable, version); + return JoinGroupResponse.parse(responseBuffer, version); case HEARTBEAT: - return HeartbeatResponse.parse(readable, version); + return HeartbeatResponse.parse(responseBuffer, version); case LEAVE_GROUP: - return LeaveGroupResponse.parse(readable, version); + return LeaveGroupResponse.parse(responseBuffer, version); case SYNC_GROUP: - return SyncGroupResponse.parse(readable, version); + return SyncGroupResponse.parse(responseBuffer, version); case DESCRIBE_GROUPS: - return DescribeGroupsResponse.parse(readable, version); + return DescribeGroupsResponse.parse(responseBuffer, version); case LIST_GROUPS: - return ListGroupsResponse.parse(readable, version); + return ListGroupsResponse.parse(responseBuffer, version); case SASL_HANDSHAKE: - return SaslHandshakeResponse.parse(readable, version); + return SaslHandshakeResponse.parse(responseBuffer, version); case API_VERSIONS: - return ApiVersionsResponse.parse(readable, version); + return ApiVersionsResponse.parse(responseBuffer, version); case CREATE_TOPICS: - return CreateTopicsResponse.parse(readable, version); + return CreateTopicsResponse.parse(responseBuffer, version); case DELETE_TOPICS: - return DeleteTopicsResponse.parse(readable, version); + return DeleteTopicsResponse.parse(responseBuffer, version); case DELETE_RECORDS: - return DeleteRecordsResponse.parse(readable, version); + return DeleteRecordsResponse.parse(responseBuffer, version); case INIT_PRODUCER_ID: - return InitProducerIdResponse.parse(readable, version); + return InitProducerIdResponse.parse(responseBuffer, version); case OFFSET_FOR_LEADER_EPOCH: - return OffsetsForLeaderEpochResponse.parse(readable, version); + return OffsetsForLeaderEpochResponse.parse(responseBuffer, version); case ADD_PARTITIONS_TO_TXN: - return AddPartitionsToTxnResponse.parse(readable, version); + return AddPartitionsToTxnResponse.parse(responseBuffer, version); case ADD_OFFSETS_TO_TXN: - return AddOffsetsToTxnResponse.parse(readable, version); + return AddOffsetsToTxnResponse.parse(responseBuffer, version); case END_TXN: - return EndTxnResponse.parse(readable, version); + return EndTxnResponse.parse(responseBuffer, version); case WRITE_TXN_MARKERS: - return WriteTxnMarkersResponse.parse(readable, version); + return WriteTxnMarkersResponse.parse(responseBuffer, version); case TXN_OFFSET_COMMIT: - return TxnOffsetCommitResponse.parse(readable, version); + return TxnOffsetCommitResponse.parse(responseBuffer, version); case DESCRIBE_ACLS: - return DescribeAclsResponse.parse(readable, version); + return DescribeAclsResponse.parse(responseBuffer, version); case CREATE_ACLS: - return CreateAclsResponse.parse(readable, version); + return CreateAclsResponse.parse(responseBuffer, version); case DELETE_ACLS: - return DeleteAclsResponse.parse(readable, version); + return DeleteAclsResponse.parse(responseBuffer, version); case DESCRIBE_CONFIGS: - return DescribeConfigsResponse.parse(readable, version); + return DescribeConfigsResponse.parse(responseBuffer, version); case ALTER_CONFIGS: - return AlterConfigsResponse.parse(readable, version); + return AlterConfigsResponse.parse(responseBuffer, version); case ALTER_REPLICA_LOG_DIRS: - return AlterReplicaLogDirsResponse.parse(readable, version); + return AlterReplicaLogDirsResponse.parse(responseBuffer, version); case DESCRIBE_LOG_DIRS: - return DescribeLogDirsResponse.parse(readable, version); + return DescribeLogDirsResponse.parse(responseBuffer, version); case SASL_AUTHENTICATE: - return SaslAuthenticateResponse.parse(readable, version); + return SaslAuthenticateResponse.parse(responseBuffer, version); case CREATE_PARTITIONS: - return CreatePartitionsResponse.parse(readable, version); + return CreatePartitionsResponse.parse(responseBuffer, version); case CREATE_DELEGATION_TOKEN: - return CreateDelegationTokenResponse.parse(readable, version); + return CreateDelegationTokenResponse.parse(responseBuffer, version); case RENEW_DELEGATION_TOKEN: - return RenewDelegationTokenResponse.parse(readable, version); + return RenewDelegationTokenResponse.parse(responseBuffer, version); case EXPIRE_DELEGATION_TOKEN: - return ExpireDelegationTokenResponse.parse(readable, version); + return ExpireDelegationTokenResponse.parse(responseBuffer, version); case DESCRIBE_DELEGATION_TOKEN: - return DescribeDelegationTokenResponse.parse(readable, version); + return DescribeDelegationTokenResponse.parse(responseBuffer, version); case DELETE_GROUPS: - return DeleteGroupsResponse.parse(readable, version); + return DeleteGroupsResponse.parse(responseBuffer, version); case ELECT_LEADERS: - return ElectLeadersResponse.parse(readable, version); + return ElectLeadersResponse.parse(responseBuffer, version); case INCREMENTAL_ALTER_CONFIGS: - return IncrementalAlterConfigsResponse.parse(readable, version); + return IncrementalAlterConfigsResponse.parse(responseBuffer, version); case ALTER_PARTITION_REASSIGNMENTS: - return AlterPartitionReassignmentsResponse.parse(readable, version); + return AlterPartitionReassignmentsResponse.parse(responseBuffer, version); case LIST_PARTITION_REASSIGNMENTS: - return ListPartitionReassignmentsResponse.parse(readable, version); + return ListPartitionReassignmentsResponse.parse(responseBuffer, version); case OFFSET_DELETE: - return OffsetDeleteResponse.parse(readable, version); + return OffsetDeleteResponse.parse(responseBuffer, version); case DESCRIBE_CLIENT_QUOTAS: - return DescribeClientQuotasResponse.parse(readable, version); + return DescribeClientQuotasResponse.parse(responseBuffer, version); case ALTER_CLIENT_QUOTAS: - return AlterClientQuotasResponse.parse(readable, version); + return AlterClientQuotasResponse.parse(responseBuffer, version); case DESCRIBE_USER_SCRAM_CREDENTIALS: - return DescribeUserScramCredentialsResponse.parse(readable, version); + return DescribeUserScramCredentialsResponse.parse(responseBuffer, version); case ALTER_USER_SCRAM_CREDENTIALS: - return AlterUserScramCredentialsResponse.parse(readable, version); + return AlterUserScramCredentialsResponse.parse(responseBuffer, version); case VOTE: - return VoteResponse.parse(readable, version); + return VoteResponse.parse(responseBuffer, version); case BEGIN_QUORUM_EPOCH: - return BeginQuorumEpochResponse.parse(readable, version); + return BeginQuorumEpochResponse.parse(responseBuffer, version); case END_QUORUM_EPOCH: - return EndQuorumEpochResponse.parse(readable, version); + return EndQuorumEpochResponse.parse(responseBuffer, version); case DESCRIBE_QUORUM: - return DescribeQuorumResponse.parse(readable, version); + return DescribeQuorumResponse.parse(responseBuffer, version); case ALTER_PARTITION: - return AlterPartitionResponse.parse(readable, version); + return AlterPartitionResponse.parse(responseBuffer, version); case UPDATE_FEATURES: - return UpdateFeaturesResponse.parse(readable, version); + return UpdateFeaturesResponse.parse(responseBuffer, version); case ENVELOPE: - return EnvelopeResponse.parse(readable, version); + return EnvelopeResponse.parse(responseBuffer, version); case FETCH_SNAPSHOT: - return FetchSnapshotResponse.parse(readable, version); + return FetchSnapshotResponse.parse(responseBuffer, version); case DESCRIBE_CLUSTER: - return DescribeClusterResponse.parse(readable, version); + return DescribeClusterResponse.parse(responseBuffer, version); case DESCRIBE_PRODUCERS: - return DescribeProducersResponse.parse(readable, version); + return DescribeProducersResponse.parse(responseBuffer, version); case BROKER_REGISTRATION: - return BrokerRegistrationResponse.parse(readable, version); + return BrokerRegistrationResponse.parse(responseBuffer, version); case BROKER_HEARTBEAT: - return BrokerHeartbeatResponse.parse(readable, version); + return BrokerHeartbeatResponse.parse(responseBuffer, version); case UNREGISTER_BROKER: - return UnregisterBrokerResponse.parse(readable, version); + return UnregisterBrokerResponse.parse(responseBuffer, version); case DESCRIBE_TRANSACTIONS: - return DescribeTransactionsResponse.parse(readable, version); + return DescribeTransactionsResponse.parse(responseBuffer, version); case LIST_TRANSACTIONS: - return ListTransactionsResponse.parse(readable, version); + return ListTransactionsResponse.parse(responseBuffer, version); case ALLOCATE_PRODUCER_IDS: - return AllocateProducerIdsResponse.parse(readable, version); + return AllocateProducerIdsResponse.parse(responseBuffer, version); case CONSUMER_GROUP_HEARTBEAT: - return ConsumerGroupHeartbeatResponse.parse(readable, version); + return ConsumerGroupHeartbeatResponse.parse(responseBuffer, version); case CONSUMER_GROUP_DESCRIBE: - return ConsumerGroupDescribeResponse.parse(readable, version); + return ConsumerGroupDescribeResponse.parse(responseBuffer, version); case CONTROLLER_REGISTRATION: - return ControllerRegistrationResponse.parse(readable, version); + return ControllerRegistrationResponse.parse(responseBuffer, version); case GET_TELEMETRY_SUBSCRIPTIONS: - return GetTelemetrySubscriptionsResponse.parse(readable, version); + return GetTelemetrySubscriptionsResponse.parse(responseBuffer, version); case PUSH_TELEMETRY: - return PushTelemetryResponse.parse(readable, version); + return PushTelemetryResponse.parse(responseBuffer, version); case ASSIGN_REPLICAS_TO_DIRS: - return AssignReplicasToDirsResponse.parse(readable, version); - case LIST_CONFIG_RESOURCES: - return ListConfigResourcesResponse.parse(readable, version); + return AssignReplicasToDirsResponse.parse(responseBuffer, version); + case LIST_CLIENT_METRICS_RESOURCES: + return ListClientMetricsResourcesResponse.parse(responseBuffer, version); case DESCRIBE_TOPIC_PARTITIONS: - return DescribeTopicPartitionsResponse.parse(readable, version); + return DescribeTopicPartitionsResponse.parse(responseBuffer, version); case SHARE_GROUP_HEARTBEAT: - return ShareGroupHeartbeatResponse.parse(readable, version); + return ShareGroupHeartbeatResponse.parse(responseBuffer, version); case SHARE_GROUP_DESCRIBE: - return ShareGroupDescribeResponse.parse(readable, version); + return ShareGroupDescribeResponse.parse(responseBuffer, version); case SHARE_FETCH: - return ShareFetchResponse.parse(readable, version); + return ShareFetchResponse.parse(responseBuffer, version); case SHARE_ACKNOWLEDGE: - return ShareAcknowledgeResponse.parse(readable, version); + return ShareAcknowledgeResponse.parse(responseBuffer, version); case ADD_RAFT_VOTER: - return AddRaftVoterResponse.parse(readable, version); + return AddRaftVoterResponse.parse(responseBuffer, version); case REMOVE_RAFT_VOTER: - return RemoveRaftVoterResponse.parse(readable, version); + return RemoveRaftVoterResponse.parse(responseBuffer, version); case UPDATE_RAFT_VOTER: - return UpdateRaftVoterResponse.parse(readable, version); + return UpdateRaftVoterResponse.parse(responseBuffer, version); case INITIALIZE_SHARE_GROUP_STATE: - return InitializeShareGroupStateResponse.parse(readable, version); + return InitializeShareGroupStateResponse.parse(responseBuffer, version); case READ_SHARE_GROUP_STATE: - return ReadShareGroupStateResponse.parse(readable, version); + return ReadShareGroupStateResponse.parse(responseBuffer, version); case WRITE_SHARE_GROUP_STATE: - return WriteShareGroupStateResponse.parse(readable, version); + return WriteShareGroupStateResponse.parse(responseBuffer, version); case DELETE_SHARE_GROUP_STATE: - return DeleteShareGroupStateResponse.parse(readable, version); + return DeleteShareGroupStateResponse.parse(responseBuffer, version); case READ_SHARE_GROUP_STATE_SUMMARY: - return ReadShareGroupStateSummaryResponse.parse(readable, version); - case STREAMS_GROUP_HEARTBEAT: - return StreamsGroupHeartbeatResponse.parse(readable, version); - case STREAMS_GROUP_DESCRIBE: - return StreamsGroupDescribeResponse.parse(readable, version); - case DESCRIBE_SHARE_GROUP_OFFSETS: - return DescribeShareGroupOffsetsResponse.parse(readable, version); - case ALTER_SHARE_GROUP_OFFSETS: - return AlterShareGroupOffsetsResponse.parse(readable, version); - case DELETE_SHARE_GROUP_OFFSETS: - return DeleteShareGroupOffsetsResponse.parse(readable, version); + return ReadShareGroupStateSummaryResponse.parse(responseBuffer, version); default: throw new AssertionError(String.format("ApiKey %s is not currently handled in `parseResponse`, the " + "code should be updated to do so.", apiKey)); diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnRequest.java index db4de2c860d9d..1e5f9862178bf 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.AddOffsetsToTxnRequestData; import org.apache.kafka.common.message.AddOffsetsToTxnResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class AddOffsetsToTxnRequest extends AbstractRequest { @@ -62,7 +64,7 @@ public AddOffsetsToTxnResponse getErrorResponse(int throttleTimeMs, Throwable e) .setThrottleTimeMs(throttleTimeMs)); } - public static AddOffsetsToTxnRequest parse(Readable readable, short version) { - return new AddOffsetsToTxnRequest(new AddOffsetsToTxnRequestData(readable, version), version); + public static AddOffsetsToTxnRequest parse(ByteBuffer buffer, short version) { + return new AddOffsetsToTxnRequest(new AddOffsetsToTxnRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java index ad3946b432b6f..d90afd04ddcde 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddOffsetsToTxnResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.AddOffsetsToTxnResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; /** @@ -65,8 +66,8 @@ public AddOffsetsToTxnResponseData data() { return data; } - public static AddOffsetsToTxnResponse parse(Readable readable, short version) { - return new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData(readable, version)); + public static AddOffsetsToTxnResponse parse(ByteBuffer buffer, short version) { + return new AddOffsetsToTxnResponse(new AddOffsetsToTxnResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequest.java index ec8b848f4593e..1ae84c20c6145 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnRequest.java @@ -29,9 +29,10 @@ import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResultCollection; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -197,7 +198,7 @@ private AddPartitionsToTxnTopicResultCollection errorResponseForTopics(AddPartit return topicResults; } - public static AddPartitionsToTxnRequest parse(Readable readable, short version) { - return new AddPartitionsToTxnRequest(new AddPartitionsToTxnRequestData(readable, version), version); + public static AddPartitionsToTxnRequest parse(ByteBuffer buffer, short version) { + return new AddPartitionsToTxnRequest(new AddPartitionsToTxnRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java index 645fd667186b8..0abf85bf0239a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponse.java @@ -24,9 +24,10 @@ import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResult; import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnTopicResultCollection; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -149,8 +150,8 @@ public AddPartitionsToTxnResponseData data() { return data; } - public static AddPartitionsToTxnResponse parse(Readable readable, short version) { - return new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData(readable, version)); + public static AddPartitionsToTxnResponse parse(ByteBuffer buffer, short version) { + return new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterRequest.java index b7c2dbd8e475d..2d385d861c4ac 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.AddRaftVoterRequestData; import org.apache.kafka.common.message.AddRaftVoterResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class AddRaftVoterRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { @@ -65,9 +67,9 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { setThrottleTimeMs(throttleTimeMs)); } - public static AddRaftVoterRequest parse(Readable readable, short version) { + public static AddRaftVoterRequest parse(ByteBuffer buffer, short version) { return new AddRaftVoterRequest( - new AddRaftVoterRequestData(readable, version), + new AddRaftVoterRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java index 52a0cb05feb76..ab0600b618406 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AddRaftVoterResponse.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.AddRaftVoterResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -57,8 +58,8 @@ public Map errorCounts() { } } - public static AddRaftVoterResponse parse(Readable readable, short version) { + public static AddRaftVoterResponse parse(ByteBuffer buffer, short version) { return new AddRaftVoterResponse( - new AddRaftVoterResponseData(readable, version)); + new AddRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsRequest.java index cf58d2eaf6e0d..7938f92df56d9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.AllocateProducerIdsRequestData; import org.apache.kafka.common.message.AllocateProducerIdsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class AllocateProducerIdsRequest extends AbstractRequest { private final AllocateProducerIdsRequestData data; @@ -63,8 +65,8 @@ public String toString() { } } - public static AllocateProducerIdsRequest parse(Readable readable, short version) { + public static AllocateProducerIdsRequest parse(ByteBuffer buffer, short version) { return new AllocateProducerIdsRequest(new AllocateProducerIdsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java index 4c47651193188..2511e2b2db320 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AllocateProducerIdsResponse.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.AllocateProducerIdsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -64,8 +65,8 @@ public Errors error() { return Errors.forCode(data.errorCode()); } - public static AllocateProducerIdsResponse parse(Readable readable, short version) { + public static AllocateProducerIdsResponse parse(ByteBuffer buffer, short version) { return new AllocateProducerIdsResponse(new AllocateProducerIdsResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasRequest.java index 3df308f594498..3b06348a9fc66 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasRequest.java @@ -22,11 +22,12 @@ import org.apache.kafka.common.message.AlterClientQuotasRequestData.OpData; import org.apache.kafka.common.message.AlterClientQuotasResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.quota.ClientQuotaAlteration; import org.apache.kafka.common.quota.ClientQuotaEntity; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; @@ -137,7 +138,7 @@ public AlterClientQuotasResponse getErrorResponse(int throttleTimeMs, Throwable return new AlterClientQuotasResponse(responseData); } - public static AlterClientQuotasRequest parse(Readable readable, short version) { - return new AlterClientQuotasRequest(new AlterClientQuotasRequestData(readable, version), version); + public static AlterClientQuotasRequest parse(ByteBuffer buffer, short version) { + return new AlterClientQuotasRequest(new AlterClientQuotasRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasResponse.java index 9c4990dd3c719..fc56db7e73658 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterClientQuotasResponse.java @@ -21,12 +21,12 @@ import org.apache.kafka.common.message.AlterClientQuotasResponseData.EntityData; import org.apache.kafka.common.message.AlterClientQuotasResponseData.EntryData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.quota.ClientQuotaEntity; +import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.EnumMap; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -74,7 +74,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + Map counts = new HashMap<>(); data.entries().forEach(entry -> updateErrorCounts(counts, Errors.forCode(entry.errorCode())) ); @@ -96,8 +96,8 @@ private static List toEntityData(ClientQuotaEntity entity) { return entityData; } - public static AlterClientQuotasResponse parse(Readable readable, short version) { - return new AlterClientQuotasResponse(new AlterClientQuotasResponseData(readable, version)); + public static AlterClientQuotasResponse parse(ByteBuffer buffer, short version) { + return new AlterClientQuotasResponse(new AlterClientQuotasResponseData(new ByteBufferAccessor(buffer), version)); } public static AlterClientQuotasResponse fromQuotaEntities(Map result, int throttleTimeMs) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsRequest.java index f36fe3bf6fb2f..b4d35d52ae35f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsRequest.java @@ -21,8 +21,9 @@ import org.apache.kafka.common.message.AlterConfigsRequestData; import org.apache.kafka.common.message.AlterConfigsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import java.nio.ByteBuffer; import java.util.Collection; import java.util.Map; import java.util.Objects; @@ -131,7 +132,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { } - public static AlterConfigsRequest parse(Readable readable, short version) { - return new AlterConfigsRequest(new AlterConfigsRequestData(readable, version), version); + public static AlterConfigsRequest parse(ByteBuffer buffer, short version) { + return new AlterConfigsRequest(new AlterConfigsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsResponse.java index 5f7b9421cb8e9..1668c2446bc77 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterConfigsResponse.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.message.AlterConfigsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; import java.util.stream.Collectors; @@ -64,8 +65,8 @@ public AlterConfigsResponseData data() { return data; } - public static AlterConfigsResponse parse(Readable readable, short version) { - return new AlterConfigsResponse(new AlterConfigsResponseData(readable, version)); + public static AlterConfigsResponse parse(ByteBuffer buffer, short version) { + return new AlterConfigsResponse(new AlterConfigsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsRequest.java index af82771c0ca43..2d289cc1497e1 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsRequest.java @@ -17,15 +17,15 @@ package org.apache.kafka.common.requests; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData; import org.apache.kafka.common.message.AlterPartitionReassignmentsRequestData.ReassignableTopic; import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData; import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse; import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -42,11 +42,6 @@ public Builder(AlterPartitionReassignmentsRequestData data) { @Override public AlterPartitionReassignmentsRequest build(short version) { - if (!data.allowReplicationFactorChange() && version < 1) { - throw new UnsupportedVersionException("The broker does not support the AllowReplicationFactorChange " + - "option for the AlterPartitionReassignments API. Consider re-sending the request without the " + - "option or updating the server version"); - } return new AlterPartitionReassignmentsRequest(data, version); } @@ -63,9 +58,9 @@ private AlterPartitionReassignmentsRequest(AlterPartitionReassignmentsRequestDat this.data = data; } - public static AlterPartitionReassignmentsRequest parse(Readable readable, short version) { + public static AlterPartitionReassignmentsRequest parse(ByteBuffer buffer, short version) { return new AlterPartitionReassignmentsRequest(new AlterPartitionReassignmentsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } public AlterPartitionReassignmentsRequestData data() { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java index 691a399761dec..7d6c340fd147b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionReassignmentsResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class AlterPartitionReassignmentsResponse extends AbstractResponse { @@ -34,9 +35,9 @@ public AlterPartitionReassignmentsResponse(AlterPartitionReassignmentsResponseDa this.data = data; } - public static AlterPartitionReassignmentsResponse parse(Readable readable, short version) { + public static AlterPartitionReassignmentsResponse parse(ByteBuffer buffer, short version) { return new AlterPartitionReassignmentsResponse( - new AlterPartitionReassignmentsResponseData(readable, version)); + new AlterPartitionReassignmentsResponseData(new ByteBufferAccessor(buffer), version)); } @Override @@ -61,7 +62,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + Map counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.responses().forEach(topicResponse -> diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java index 2d181f4876629..8b5031f2a68a2 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionRequest.java @@ -21,9 +21,10 @@ import org.apache.kafka.common.message.AlterPartitionRequestData.BrokerState; import org.apache.kafka.common.message.AlterPartitionResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -53,8 +54,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .setErrorCode(Errors.forException(e).code())); } - public static AlterPartitionRequest parse(Readable readable, short version) { - return new AlterPartitionRequest(new AlterPartitionRequestData(readable, version), version); + public static AlterPartitionRequest parse(ByteBuffer buffer, short version) { + return new AlterPartitionRequest(new AlterPartitionRequestData(new ByteBufferAccessor(buffer), version), version); } public static class Builder extends AbstractRequest.Builder { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java index 580be92b8999e..38b8eaf275bbd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterPartitionResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.AlterPartitionResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class AlterPartitionResponse extends AbstractResponse { @@ -41,7 +42,7 @@ public AlterPartitionResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + Map counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.topics().forEach(topicResponse -> topicResponse.partitions().forEach(partitionResponse -> updateErrorCounts(counts, Errors.forCode(partitionResponse.errorCode())) @@ -59,7 +60,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static AlterPartitionResponse parse(Readable readable, short version) { - return new AlterPartitionResponse(new AlterPartitionResponseData(readable, version)); + public static AlterPartitionResponse parse(ByteBuffer buffer, short version) { + return new AlterPartitionResponse(new AlterPartitionResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java index 6e24f437af90c..6f909786e9ef9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsRequest.java @@ -23,9 +23,10 @@ import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirTopicResult; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; import java.util.stream.Collectors; @@ -86,7 +87,7 @@ public Map partitionDirs() { return result; } - public static AlterReplicaLogDirsRequest parse(Readable readable, short version) { - return new AlterReplicaLogDirsRequest(new AlterReplicaLogDirsRequestData(readable, version), version); + public static AlterReplicaLogDirsRequest parse(ByteBuffer buffer, short version) { + return new AlterReplicaLogDirsRequest(new AlterReplicaLogDirsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java index 755fc98038a26..0c38a83ee3d7e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterReplicaLogDirsResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; /** @@ -59,15 +60,15 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); data.results().forEach(topicResult -> topicResult.partitions().forEach(partitionResult -> updateErrorCounts(errorCounts, Errors.forCode(partitionResult.errorCode())))); return errorCounts; } - public static AlterReplicaLogDirsResponse parse(Readable readable, short version) { - return new AlterReplicaLogDirsResponse(new AlterReplicaLogDirsResponseData(readable, version)); + public static AlterReplicaLogDirsResponse parse(ByteBuffer buffer, short version) { + return new AlterReplicaLogDirsResponse(new AlterReplicaLogDirsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java index 7f2991cfad7f9..1ca7ea77aa422 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsRequest.java @@ -17,14 +17,11 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.message.AlterUserScramCredentialsRequestData; -import org.apache.kafka.common.message.AlterUserScramCredentialsRequestDataJsonConverter; import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import java.nio.ByteBuffer; import java.util.List; import java.util.Set; import java.util.stream.Collectors; @@ -58,8 +55,8 @@ private AlterUserScramCredentialsRequest(AlterUserScramCredentialsRequestData da this.data = data; } - public static AlterUserScramCredentialsRequest parse(Readable readable, short version) { - return new AlterUserScramCredentialsRequest(new AlterUserScramCredentialsRequestData(readable, version), version); + public static AlterUserScramCredentialsRequest parse(ByteBuffer buffer, short version) { + return new AlterUserScramCredentialsRequest(new AlterUserScramCredentialsRequestData(new ByteBufferAccessor(buffer), version), version); } @Override @@ -85,16 +82,4 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .collect(Collectors.toList()); return new AlterUserScramCredentialsResponse(new AlterUserScramCredentialsResponseData().setResults(results)); } - - // Do not print salt or saltedPassword - @Override - public String toString() { - JsonNode json = AlterUserScramCredentialsRequestDataJsonConverter.write(data, version()).deepCopy(); - - for (JsonNode upsertion : json.get("upsertions")) { - ((ObjectNode) upsertion).put("salt", ""); - ((ObjectNode) upsertion).put("saltedPassword", ""); - } - return AlterUserScramCredentialsRequestDataJsonConverter.read(json, version()).toString(); - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java index bc448a9e1042f..86c9b006a2ce0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AlterUserScramCredentialsResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class AlterUserScramCredentialsResponse extends AbstractResponse { @@ -57,7 +58,7 @@ public Map errorCounts() { return errorCounts(data.results().stream().map(r -> Errors.forCode(r.errorCode()))); } - public static AlterUserScramCredentialsResponse parse(Readable readable, short version) { - return new AlterUserScramCredentialsResponse(new AlterUserScramCredentialsResponseData(readable, version)); + public static AlterUserScramCredentialsResponse parse(ByteBuffer buffer, short version) { + return new AlterUserScramCredentialsResponse(new AlterUserScramCredentialsResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java index 1bdb0903c7d7d..f7737750cd81c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsRequest.java @@ -20,10 +20,11 @@ import org.apache.kafka.common.message.ApiVersionsResponseData; import org.apache.kafka.common.message.ApiVersionsResponseData.ApiVersionCollection; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.utils.AppInfoParser; +import java.nio.ByteBuffer; import java.util.regex.Pattern; public class ApiVersionsRequest extends AbstractRequest { @@ -127,8 +128,8 @@ public ApiVersionsResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new ApiVersionsResponse(data); } - public static ApiVersionsRequest parse(Readable readable, short version) { - return new ApiVersionsRequest(new ApiVersionsRequestData(readable, version), version); + public static ApiVersionsRequest parse(ByteBuffer buffer, short version) { + return new ApiVersionsRequest(new ApiVersionsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java index daaa7ba2fce7b..324e527984d08 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ApiVersionsResponse.java @@ -29,9 +29,10 @@ import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKey; import org.apache.kafka.common.message.ApiVersionsResponseData.SupportedFeatureKeyCollection; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; import java.util.Objects; import java.util.Optional; @@ -151,18 +152,19 @@ public boolean zkMigrationReady() { return data.zkMigrationReady(); } - public static ApiVersionsResponse parse(Readable readable, short version) { + public static ApiVersionsResponse parse(ByteBuffer buffer, short version) { // Fallback to version 0 for ApiVersions response. If a client sends an ApiVersionsRequest // using a version higher than that supported by the broker, a version 0 response is sent // to the client indicating UNSUPPORTED_VERSION. When the client receives the response, it // falls back while parsing it which means that the version received by this // method is not necessarily the real one. It may be version 0 as well. - Readable readableCopy = readable.slice(); + int prev = buffer.position(); try { - return new ApiVersionsResponse(new ApiVersionsResponseData(readable, version)); + return new ApiVersionsResponse(new ApiVersionsResponseData(new ByteBufferAccessor(buffer), version)); } catch (RuntimeException e) { + buffer.position(prev); if (version != 0) - return new ApiVersionsResponse(new ApiVersionsResponseData(readableCopy, (short) 0)); + return new ApiVersionsResponse(new ApiVersionsResponseData(new ByteBufferAccessor(buffer), (short) 0)); else throw e; } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsRequest.java index 71200bca70198..5941181ed81dc 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.AssignReplicasToDirsRequestData; import org.apache.kafka.common.message.AssignReplicasToDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class AssignReplicasToDirsRequest extends AbstractRequest { @@ -73,8 +75,8 @@ public AssignReplicasToDirsRequestData data() { return data; } - public static AssignReplicasToDirsRequest parse(Readable readable, short version) { + public static AssignReplicasToDirsRequest parse(ByteBuffer buffer, short version) { return new AssignReplicasToDirsRequest(new AssignReplicasToDirsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsResponse.java index 84f86d058ec65..90912956029cc 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/AssignReplicasToDirsResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.AssignReplicasToDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -53,8 +54,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static AssignReplicasToDirsResponse parse(Readable readable, short version) { + public static AssignReplicasToDirsResponse parse(ByteBuffer buffer, short version) { return new AssignReplicasToDirsResponse(new AssignReplicasToDirsResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochRequest.java index f3375da9c4171..58998933df8f3 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.BeginQuorumEpochRequestData; import org.apache.kafka.common.message.BeginQuorumEpochResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; public class BeginQuorumEpochRequest extends AbstractRequest { @@ -63,8 +64,8 @@ public BeginQuorumEpochResponse getErrorResponse(int throttleTimeMs, Throwable e .setErrorCode(Errors.forException(e).code())); } - public static BeginQuorumEpochRequest parse(Readable readable, short version) { - return new BeginQuorumEpochRequest(new BeginQuorumEpochRequestData(readable, version), version); + public static BeginQuorumEpochRequest parse(ByteBuffer buffer, short version) { + return new BeginQuorumEpochRequest(new BeginQuorumEpochRequestData(new ByteBufferAccessor(buffer), version), version); } public static BeginQuorumEpochRequestData singletonRequest( diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java index 7f77c10e93859..b96728351af96 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BeginQuorumEpochResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.BeginQuorumEpochResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; /** @@ -48,7 +49,7 @@ public BeginQuorumEpochResponse(BeginQuorumEpochResponseData data) { @Override public Map errorCounts() { - Map errors = new EnumMap<>(Errors.class); + Map errors = new HashMap<>(); errors.put(Errors.forCode(data.errorCode()), 1); @@ -76,8 +77,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // Not supported by the response schema } - public static BeginQuorumEpochResponse parse(Readable readable, short version) { - return new BeginQuorumEpochResponse(new BeginQuorumEpochResponseData(readable, version)); + public static BeginQuorumEpochResponse parse(ByteBuffer buffer, short version) { + return new BeginQuorumEpochResponse(new BeginQuorumEpochResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatRequest.java index 61962a6ed57c3..3c3f35089941a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.BrokerHeartbeatRequestData; import org.apache.kafka.common.message.BrokerHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class BrokerHeartbeatRequest extends AbstractRequest { @@ -63,8 +65,8 @@ public BrokerHeartbeatResponse getErrorResponse(int throttleTimeMs, Throwable e) .setErrorCode(error.code())); } - public static BrokerHeartbeatRequest parse(Readable readable, short version) { - return new BrokerHeartbeatRequest(new BrokerHeartbeatRequestData(readable, version), + public static BrokerHeartbeatRequest parse(ByteBuffer buffer, short version) { + return new BrokerHeartbeatRequest(new BrokerHeartbeatRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java index f46e56ca50eef..4c8b3aafc4dd2 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BrokerHeartbeatResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.BrokerHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class BrokerHeartbeatResponse extends AbstractResponse { @@ -50,13 +51,13 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); errorCounts.put(Errors.forCode(data.errorCode()), 1); return errorCounts; } - public static BrokerHeartbeatResponse parse(Readable readable, short version) { - return new BrokerHeartbeatResponse(new BrokerHeartbeatResponseData(readable, version)); + public static BrokerHeartbeatResponse parse(ByteBuffer buffer, short version) { + return new BrokerHeartbeatResponse(new BrokerHeartbeatResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationRequest.java index c84bd429bce21..93dbf2ac97217 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.BrokerRegistrationRequestData; import org.apache.kafka.common.message.BrokerRegistrationResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class BrokerRegistrationRequest extends AbstractRequest { @@ -80,8 +82,8 @@ public BrokerRegistrationResponse getErrorResponse(int throttleTimeMs, Throwable .setErrorCode(error.code())); } - public static BrokerRegistrationRequest parse(Readable readable, short version) { - return new BrokerRegistrationRequest(new BrokerRegistrationRequestData(readable, version), + public static BrokerRegistrationRequest parse(ByteBuffer buffer, short version) { + return new BrokerRegistrationRequest(new BrokerRegistrationRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationResponse.java index be8a2f1f50601..8b6121c376339 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/BrokerRegistrationResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.BrokerRegistrationResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class BrokerRegistrationResponse extends AbstractResponse { @@ -50,13 +51,13 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); errorCounts.put(Errors.forCode(data.errorCode()), 1); return errorCounts; } - public static BrokerRegistrationResponse parse(Readable readable, short version) { - return new BrokerRegistrationResponse(new BrokerRegistrationResponseData(readable, version)); + public static BrokerRegistrationResponse parse(ByteBuffer buffer, short version) { + return new BrokerRegistrationResponse(new BrokerRegistrationResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequest.java index 2806e22102b67..48a03bd8bb680 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeRequest.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.ConsumerGroupDescribeRequestData; import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.List; import java.util.stream.Collectors; @@ -78,9 +79,9 @@ public ConsumerGroupDescribeRequestData data() { return data; } - public static ConsumerGroupDescribeRequest parse(Readable readable, short version) { + public static ConsumerGroupDescribeRequest parse(ByteBuffer buffer, short version) { return new ConsumerGroupDescribeRequest( - new ConsumerGroupDescribeRequestData(readable, version), + new ConsumerGroupDescribeRequestData(new ByteBufferAccessor(buffer), version), version ); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java index 25ec8b6fbe235..70456e7b0240b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupDescribeResponse.java @@ -18,10 +18,11 @@ import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; /** @@ -34,7 +35,6 @@ * - {@link Errors#INVALID_REQUEST} * - {@link Errors#INVALID_GROUP_ID} * - {@link Errors#GROUP_ID_NOT_FOUND} - * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} */ public class ConsumerGroupDescribeResponse extends AbstractResponse { @@ -52,7 +52,7 @@ public ConsumerGroupDescribeResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); data.groups().forEach( group -> updateErrorCounts(counts, Errors.forCode(group.errorCode())) ); @@ -69,9 +69,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static ConsumerGroupDescribeResponse parse(Readable readable, short version) { + public static ConsumerGroupDescribeResponse parse(ByteBuffer buffer, short version) { return new ConsumerGroupDescribeResponse( - new ConsumerGroupDescribeResponseData(readable, version) + new ConsumerGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java index 654c07721319b..5b09131d49470 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.ConsumerGroupHeartbeatRequestData; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class ConsumerGroupHeartbeatRequest extends AbstractRequest { @@ -95,8 +97,8 @@ public ConsumerGroupHeartbeatRequestData data() { return data; } - public static ConsumerGroupHeartbeatRequest parse(Readable readable, short version) { + public static ConsumerGroupHeartbeatRequest parse(ByteBuffer buffer, short version) { return new ConsumerGroupHeartbeatRequest(new ConsumerGroupHeartbeatRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java index 1d6fb4e682d67..76f89ed4df824 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ConsumerGroupHeartbeatResponse.java @@ -16,18 +16,14 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ConsumerGroupHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.ArrayList; +import java.nio.ByteBuffer; import java.util.Collections; -import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; /** * Possible error codes. @@ -43,7 +39,6 @@ * - {@link Errors#UNRELEASED_INSTANCE_ID} * - {@link Errors#GROUP_MAX_SIZE_REACHED} * - {@link Errors#INVALID_REGULAR_EXPRESSION} - * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} */ public class ConsumerGroupHeartbeatResponse extends AbstractResponse { private final ConsumerGroupHeartbeatResponseData data; @@ -73,21 +68,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static ConsumerGroupHeartbeatResponse parse(Readable readable, short version) { + public static ConsumerGroupHeartbeatResponse parse(ByteBuffer buffer, short version) { return new ConsumerGroupHeartbeatResponse(new ConsumerGroupHeartbeatResponseData( - readable, version)); - } - - public static ConsumerGroupHeartbeatResponseData.Assignment createAssignment( - Map> assignment - ) { - List topicPartitions = assignment.entrySet().stream() - .map(keyValue -> new ConsumerGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(keyValue.getKey()) - .setPartitions(new ArrayList<>(keyValue.getValue()))) - .collect(Collectors.toList()); - - return new ConsumerGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(topicPartitions); + new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationRequest.java index 5eef63bdb179f..34cbef09294f8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.ControllerRegistrationRequestData; import org.apache.kafka.common.message.ControllerRegistrationResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class ControllerRegistrationRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { @@ -63,9 +65,9 @@ public ControllerRegistrationResponse getErrorResponse(int throttleTimeMs, Throw .setErrorMessage(error.message())); } - public static ControllerRegistrationRequest parse(Readable readable, short version) { + public static ControllerRegistrationRequest parse(ByteBuffer buffer, short version) { return new ControllerRegistrationRequest( - new ControllerRegistrationRequestData(readable, version), + new ControllerRegistrationRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java index 9cc53db15a9a4..d44e915b5fa5a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ControllerRegistrationResponse.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.ControllerRegistrationResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -53,8 +54,8 @@ public Map errorCounts() { return Collections.singletonMap(Errors.forCode(data.errorCode()), 1); } - public static ControllerRegistrationResponse parse(Readable readable, short version) { + public static ControllerRegistrationResponse parse(ByteBuffer buffer, short version) { return new ControllerRegistrationResponse( - new ControllerRegistrationResponseData(readable, version)); + new ControllerRegistrationResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsRequest.java index 5d437033208fd..29df8326bc5a6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsRequest.java @@ -27,11 +27,12 @@ import org.apache.kafka.common.message.CreateAclsResponseData; import org.apache.kafka.common.message.CreateAclsResponseData.AclCreationResult; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; @@ -82,8 +83,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable throwable .setResults(results)); } - public static CreateAclsRequest parse(Readable readable, short version) { - return new CreateAclsRequest(new CreateAclsRequestData(readable, version), version); + public static CreateAclsRequest parse(ByteBuffer buffer, short version) { + return new CreateAclsRequest(new CreateAclsRequestData(new ByteBufferAccessor(buffer), version), version); } private void validate(CreateAclsRequestData data) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsResponse.java index 2f2877ad510f7..cef7b73ac27e9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateAclsResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.CreateAclsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.List; import java.util.Map; @@ -56,8 +57,8 @@ public Map errorCounts() { return errorCounts(results().stream().map(r -> Errors.forCode(r.errorCode()))); } - public static CreateAclsResponse parse(Readable readable, short version) { - return new CreateAclsResponse(new CreateAclsResponseData(readable, version)); + public static CreateAclsResponse parse(ByteBuffer buffer, short version) { + return new CreateAclsResponse(new CreateAclsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenRequest.java index 97fe5e2d175ac..b48f84f1fa6dd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenRequest.java @@ -18,10 +18,12 @@ import org.apache.kafka.common.message.CreateDelegationTokenRequestData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.security.auth.KafkaPrincipal; +import java.nio.ByteBuffer; + public class CreateDelegationTokenRequest extends AbstractRequest { private final CreateDelegationTokenRequestData data; @@ -31,8 +33,8 @@ private CreateDelegationTokenRequest(CreateDelegationTokenRequestData data, shor this.data = data; } - public static CreateDelegationTokenRequest parse(Readable readable, short version) { - return new CreateDelegationTokenRequest(new CreateDelegationTokenRequestData(readable, version), + public static CreateDelegationTokenRequest parse(ByteBuffer buffer, short version) { + return new CreateDelegationTokenRequest(new CreateDelegationTokenRequestData(new ByteBufferAccessor(buffer), version), version); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java index ce577d48d9779..0a9f9a8991bdc 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateDelegationTokenResponse.java @@ -18,8 +18,8 @@ import org.apache.kafka.common.message.CreateDelegationTokenResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.security.auth.KafkaPrincipal; import java.nio.ByteBuffer; @@ -34,9 +34,9 @@ public CreateDelegationTokenResponse(CreateDelegationTokenResponseData data) { this.data = data; } - public static CreateDelegationTokenResponse parse(Readable readable, short version) { + public static CreateDelegationTokenResponse parse(ByteBuffer buffer, short version) { return new CreateDelegationTokenResponse( - new CreateDelegationTokenResponseData(readable, version)); + new CreateDelegationTokenResponseData(new ByteBufferAccessor(buffer), version)); } public static CreateDelegationTokenResponse prepareResponse(int version, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsRequest.java index d7b7adf344209..d371bbb216995 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsRequest.java @@ -22,7 +22,9 @@ import org.apache.kafka.common.message.CreatePartitionsResponseData; import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; + +import java.nio.ByteBuffer; public class CreatePartitionsRequest extends AbstractRequest { @@ -74,7 +76,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new CreatePartitionsResponse(response); } - public static CreatePartitionsRequest parse(Readable readable, short version) { - return new CreatePartitionsRequest(new CreatePartitionsRequestData(readable, version), version); + public static CreatePartitionsRequest parse(ByteBuffer buffer, short version) { + return new CreatePartitionsRequest(new CreatePartitionsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsResponse.java index 86d8672e19b34..2dcd2b200cadd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreatePartitionsResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.CreatePartitionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class CreatePartitionsResponse extends AbstractResponse { @@ -41,15 +42,15 @@ public CreatePartitionsResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + Map counts = new HashMap<>(); data.results().forEach(result -> updateErrorCounts(counts, Errors.forCode(result.errorCode())) ); return counts; } - public static CreatePartitionsResponse parse(Readable readable, short version) { - return new CreatePartitionsResponse(new CreatePartitionsResponseData(readable, version)); + public static CreatePartitionsResponse parse(ByteBuffer buffer, short version) { + return new CreatePartitionsResponse(new CreatePartitionsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsRequest.java index ca29ba59e36a5..6f713fcdaf3c2 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsRequest.java @@ -22,8 +22,9 @@ import org.apache.kafka.common.message.CreateTopicsResponseData; import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import java.nio.ByteBuffer; import java.util.List; import java.util.stream.Collectors; @@ -108,7 +109,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new CreateTopicsResponse(response); } - public static CreateTopicsRequest parse(Readable readable, short version) { - return new CreateTopicsRequest(new CreateTopicsRequestData(readable, version), version); + public static CreateTopicsRequest parse(ByteBuffer buffer, short version) { + return new CreateTopicsRequest(new CreateTopicsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsResponse.java index be36ff7df5b77..da011e224ed08 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/CreateTopicsResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.CreateTopicsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class CreateTopicsResponse extends AbstractResponse { @@ -66,15 +67,15 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); data.topics().forEach(result -> updateErrorCounts(counts, Errors.forCode(result.errorCode())) ); return counts; } - public static CreateTopicsResponse parse(Readable readable, short version) { - return new CreateTopicsResponse(new CreateTopicsResponseData(readable, version)); + public static CreateTopicsResponse parse(ByteBuffer buffer, short version) { + return new CreateTopicsResponse(new CreateTopicsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsRequest.java index bb7db5b78d892..fea08e38be8f0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsRequest.java @@ -26,11 +26,12 @@ import org.apache.kafka.common.message.DeleteAclsResponseData; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePatternFilter; import org.apache.kafka.common.resource.ResourceType; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -115,8 +116,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable throwable .setFilterResults(filterResults), version()); } - public static DeleteAclsRequest parse(Readable readable, short version) { - return new DeleteAclsRequest(new DeleteAclsRequestData(readable, version), version); + public static DeleteAclsRequest parse(ByteBuffer buffer, short version) { + return new DeleteAclsRequest(new DeleteAclsRequestData(new ByteBufferAccessor(buffer), version), version); } public static DeleteAclsFilter deleteAclsFilter(AclBindingFilter filter) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java index 789c00148699c..6b759d7cee5f5 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteAclsResponse.java @@ -25,8 +25,8 @@ import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; @@ -35,6 +35,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -74,8 +75,8 @@ public Map errorCounts() { return errorCounts(filterResults().stream().map(r -> Errors.forCode(r.errorCode()))); } - public static DeleteAclsResponse parse(Readable readable, short version) { - return new DeleteAclsResponse(new DeleteAclsResponseData(readable, version), version); + public static DeleteAclsResponse parse(ByteBuffer buffer, short version) { + return new DeleteAclsResponse(new DeleteAclsResponseData(new ByteBufferAccessor(buffer), version), version); } public String toString() { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsRequest.java index feea9c431293e..6bee4eb8937b4 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsRequest.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.DeleteGroupsRequestData; import org.apache.kafka.common.message.DeleteGroupsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.List; public class DeleteGroupsRequest extends AbstractRequest { @@ -59,8 +60,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { ); } - public static DeleteGroupsRequest parse(Readable readable, short version) { - return new DeleteGroupsRequest(new DeleteGroupsRequestData(readable, version), version); + public static DeleteGroupsRequest parse(ByteBuffer buffer, short version) { + return new DeleteGroupsRequest(new DeleteGroupsRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsResponse.java index d1939581a08e0..3bbb08d59fb9b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteGroupsResponse.java @@ -19,10 +19,10 @@ import org.apache.kafka.common.message.DeleteGroupsResponseData; import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; @@ -69,15 +69,15 @@ public Errors get(String group) throws IllegalArgumentException { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + Map counts = new HashMap<>(); data.results().forEach(result -> updateErrorCounts(counts, Errors.forCode(result.errorCode())) ); return counts; } - public static DeleteGroupsResponse parse(Readable readable, short version) { - return new DeleteGroupsResponse(new DeleteGroupsResponseData(readable, version)); + public static DeleteGroupsResponse parse(ByteBuffer buffer, short version) { + return new DeleteGroupsResponse(new DeleteGroupsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsRequest.java index 8b56f6963ae97..92d9c1bfc41fa 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsRequest.java @@ -22,8 +22,10 @@ import org.apache.kafka.common.message.DeleteRecordsResponseData; import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsTopicResult; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class DeleteRecordsRequest extends AbstractRequest { @@ -77,7 +79,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new DeleteRecordsResponse(result); } - public static DeleteRecordsRequest parse(Readable readable, short version) { - return new DeleteRecordsRequest(new DeleteRecordsRequestData(readable, version), version); + public static DeleteRecordsRequest parse(ByteBuffer buffer, short version) { + return new DeleteRecordsRequest(new DeleteRecordsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsResponse.java index b440dec72dc4e..5084681f5373f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteRecordsResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.DeleteRecordsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class DeleteRecordsResponse extends AbstractResponse { @@ -62,7 +63,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); data.topics().forEach(topicResponses -> topicResponses.partitions().forEach(response -> updateErrorCounts(errorCounts, Errors.forCode(response.errorCode())) @@ -71,8 +72,8 @@ public Map errorCounts() { return errorCounts; } - public static DeleteRecordsResponse parse(Readable readable, short version) { - return new DeleteRecordsResponse(new DeleteRecordsResponseData(readable, version)); + public static DeleteRecordsResponse parse(ByteBuffer buffer, short version) { + return new DeleteRecordsResponse(new DeleteRecordsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java index c15e76328e1eb..074ace1cfea35 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.DeleteShareGroupStateRequestData; import org.apache.kafka.common.message.DeleteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -33,7 +34,11 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new DeleteShareGroupStateResponseData.DeleteStateResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new DeleteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code())) - .collect(Collectors.toList())))); + topicResult -> results.add(new DeleteShareGroupStateResponseData.DeleteStateResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new DeleteShareGroupStateResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code())) + .collect(Collectors.toList())))); return new DeleteShareGroupStateResponse(new DeleteShareGroupStateResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -75,10 +80,10 @@ public DeleteShareGroupStateRequestData data() { return data; } - public static DeleteShareGroupStateRequest parse(Readable readable, short version) { + public static DeleteShareGroupStateRequest parse(ByteBuffer buffer, short version) { return new DeleteShareGroupStateRequest( - new DeleteShareGroupStateRequestData(readable, version), - version + new DeleteShareGroupStateRequestData(new ByteBufferAccessor(buffer), version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateResponse.java index e7da3e048c4a0..78c87a6398748 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteShareGroupStateResponse.java @@ -17,16 +17,13 @@ package org.apache.kafka.common.requests; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.DeleteShareGroupStateRequestData; import org.apache.kafka.common.message.DeleteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.ArrayList; +import java.nio.ByteBuffer; import java.util.HashMap; -import java.util.List; import java.util.Map; public class DeleteShareGroupStateResponse extends AbstractResponse { @@ -63,63 +60,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } - public static DeleteShareGroupStateResponse parse(Readable readable, short version) { + public static DeleteShareGroupStateResponse parse(ByteBuffer buffer, short version) { return new DeleteShareGroupStateResponse( - new DeleteShareGroupStateResponseData(readable, version) + new DeleteShareGroupStateResponseData(new ByteBufferAccessor(buffer), version) ); } - - public static DeleteShareGroupStateResponseData toResponseData(Uuid topicId, int partitionId) { - return new DeleteShareGroupStateResponseData() - .setResults(List.of( - new DeleteShareGroupStateResponseData.DeleteStateResult() - .setTopicId(topicId) - .setPartitions(List.of( - new DeleteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId))))); - } - - public static DeleteShareGroupStateResponseData.PartitionResult toErrorResponsePartitionResult( - int partitionId, - Errors error, - String errorMessage - ) { - return new DeleteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); - } - - public static DeleteShareGroupStateResponseData.DeleteStateResult toResponseDeleteStateResult(Uuid topicId, List partitionResults) { - return new DeleteShareGroupStateResponseData.DeleteStateResult() - .setTopicId(topicId) - .setPartitions(partitionResults); - } - - public static DeleteShareGroupStateResponseData.PartitionResult toResponsePartitionResult(int partitionId) { - return new DeleteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId); - } - - public static DeleteShareGroupStateResponseData toErrorResponseData(Uuid topicId, int partitionId, Errors error, String errorMessage) { - return new DeleteShareGroupStateResponseData().setResults( - List.of(new DeleteShareGroupStateResponseData.DeleteStateResult() - .setTopicId(topicId) - .setPartitions(List.of(new DeleteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage))))); - } - - public static DeleteShareGroupStateResponseData toGlobalErrorResponse(DeleteShareGroupStateRequestData request, Errors error) { - List deleteStateResults = new ArrayList<>(); - request.topics().forEach(topicData -> { - List partitionResults = new ArrayList<>(); - topicData.partitions().forEach(partitionData -> partitionResults.add( - toErrorResponsePartitionResult(partitionData.partition(), error, error.message())) - ); - deleteStateResults.add(toResponseDeleteStateResult(topicData.topicId(), partitionResults)); - }); - return new DeleteShareGroupStateResponseData().setResults(deleteStateResults); - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsRequest.java index b90f853211ddc..97b3a58843745 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsRequest.java @@ -22,8 +22,9 @@ import org.apache.kafka.common.message.DeleteTopicsResponseData; import org.apache.kafka.common.message.DeleteTopicsResponseData.DeletableTopicResult; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -113,8 +114,8 @@ public List topics() { return data.topicNames().stream().map(name -> new DeleteTopicState().setName(name)).collect(Collectors.toList()); } - public static DeleteTopicsRequest parse(Readable readable, short version) { - return new DeleteTopicsRequest(new DeleteTopicsRequestData(readable, version), version); + public static DeleteTopicsRequest parse(ByteBuffer buffer, short version) { + return new DeleteTopicsRequest(new DeleteTopicsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java index efc26026d8449..65a54481ba07e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DeleteTopicsResponse.java @@ -18,10 +18,11 @@ import org.apache.kafka.common.message.DeleteTopicsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; @@ -61,15 +62,15 @@ public DeleteTopicsResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); data.responses().forEach(result -> updateErrorCounts(counts, Errors.forCode(result.errorCode())) ); return counts; } - public static DeleteTopicsResponse parse(Readable readable, short version) { - return new DeleteTopicsResponse(new DeleteTopicsResponseData(readable, version)); + public static DeleteTopicsResponse parse(ByteBuffer buffer, short version) { + return new DeleteTopicsResponse(new DeleteTopicsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsRequest.java index 8bbeeceabbfbb..1ddf5bf99fc89 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsRequest.java @@ -24,11 +24,13 @@ import org.apache.kafka.common.message.DescribeAclsRequestData; import org.apache.kafka.common.message.DescribeAclsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePatternFilter; import org.apache.kafka.common.resource.ResourceType; +import java.nio.ByteBuffer; + public class DescribeAclsRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { @@ -102,8 +104,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable throwable return new DescribeAclsResponse(response, version()); } - public static DescribeAclsRequest parse(Readable readable, short version) { - return new DescribeAclsRequest(new DescribeAclsRequestData(readable, version), version); + public static DescribeAclsRequest parse(ByteBuffer buffer, short version) { + return new DescribeAclsRequest(new DescribeAclsRequestData(new ByteBufferAccessor(buffer), version), version); } public AclBindingFilter filter() { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java index c387dbd4da3b3..424ff563a4c8a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeAclsResponse.java @@ -26,12 +26,13 @@ import org.apache.kafka.common.message.DescribeAclsResponseData.AclDescription; import org.apache.kafka.common.message.DescribeAclsResponseData.DescribeAclsResource; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -88,8 +89,8 @@ public final List acls() { return data.resources(); } - public static DescribeAclsResponse parse(Readable readable, short version) { - return new DescribeAclsResponse(new DescribeAclsResponseData(readable, version), version); + public static DescribeAclsResponse parse(ByteBuffer buffer, short version) { + return new DescribeAclsResponse(new DescribeAclsResponseData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java index c33402a6bec33..3d95f420401d6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasRequest.java @@ -20,10 +20,11 @@ import org.apache.kafka.common.message.DescribeClientQuotasRequestData.ComponentData; import org.apache.kafka.common.message.DescribeClientQuotasResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.quota.ClientQuotaFilter; import org.apache.kafka.common.quota.ClientQuotaFilterComponent; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -119,8 +120,8 @@ public DescribeClientQuotasResponse getErrorResponse(int throttleTimeMs, Throwab .setEntries(null)); } - public static DescribeClientQuotasRequest parse(Readable readable, short version) { - return new DescribeClientQuotasRequest(new DescribeClientQuotasRequestData(readable, version), + public static DescribeClientQuotasRequest parse(ByteBuffer buffer, short version) { + return new DescribeClientQuotasRequest(new DescribeClientQuotasRequestData(new ByteBufferAccessor(buffer), version), version); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java index 5d9a980ea3c36..3a052c9fe8eba 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClientQuotasResponse.java @@ -22,10 +22,11 @@ import org.apache.kafka.common.message.DescribeClientQuotasResponseData.EntryData; import org.apache.kafka.common.message.DescribeClientQuotasResponseData.ValueData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.quota.ClientQuotaEntity; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -84,8 +85,8 @@ public Map errorCounts() { return errorCounts(Errors.forCode(data.errorCode())); } - public static DescribeClientQuotasResponse parse(Readable readable, short version) { - return new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData(readable, version)); + public static DescribeClientQuotasResponse parse(ByteBuffer buffer, short version) { + return new DescribeClientQuotasResponse(new DescribeClientQuotasResponseData(new ByteBufferAccessor(buffer), version)); } public static DescribeClientQuotasResponse fromQuotaEntities(Map> entities, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterRequest.java index 32d5e6752ad64..9edd14cb45514 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterRequest.java @@ -20,7 +20,9 @@ import org.apache.kafka.common.message.DescribeClusterRequestData; import org.apache.kafka.common.message.DescribeClusterResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; + +import java.nio.ByteBuffer; public class DescribeClusterRequest extends AbstractRequest { @@ -69,7 +71,7 @@ public String toString(final boolean verbose) { return data.toString(); } - public static DescribeClusterRequest parse(Readable readable, short version) { - return new DescribeClusterRequest(new DescribeClusterRequestData(readable, version), version); + public static DescribeClusterRequest parse(ByteBuffer buffer, short version) { + return new DescribeClusterRequest(new DescribeClusterRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java index 0b8724a57602b..7c892874214e8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeClusterResponse.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.message.DescribeClusterResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; @@ -62,7 +63,7 @@ public DescribeClusterResponseData data() { return data; } - public static DescribeClusterResponse parse(Readable readable, short version) { - return new DescribeClusterResponse(new DescribeClusterResponseData(readable, version)); + public static DescribeClusterResponse parse(ByteBuffer buffer, short version) { + return new DescribeClusterResponse(new DescribeClusterResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsRequest.java index 8b03cb6246faf..1bae21a9e9c91 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsRequest.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.DescribeConfigsRequestData; import org.apache.kafka.common.message.DescribeConfigsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.stream.Collectors; public class DescribeConfigsRequest extends AbstractRequest { @@ -66,7 +67,7 @@ public DescribeConfigsResponse getErrorResponse(int throttleTimeMs, Throwable e) )); } - public static DescribeConfigsRequest parse(Readable readable, short version) { - return new DescribeConfigsRequest(new DescribeConfigsRequestData(readable, version), version); + public static DescribeConfigsRequest parse(ByteBuffer buffer, short version) { + return new DescribeConfigsRequest(new DescribeConfigsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java index f291af273b84a..fa4070622b37d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeConfigsResponse.java @@ -20,11 +20,12 @@ import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.message.DescribeConfigsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collection; -import java.util.EnumMap; +import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.function.Function; @@ -240,15 +241,15 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); data.results().forEach(response -> updateErrorCounts(errorCounts, Errors.forCode(response.errorCode())) ); return errorCounts; } - public static DescribeConfigsResponse parse(Readable readable, short version) { - return new DescribeConfigsResponse(new DescribeConfigsResponseData(readable, version)); + public static DescribeConfigsResponse parse(ByteBuffer buffer, short version) { + return new DescribeConfigsResponse(new DescribeConfigsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenRequest.java index 836fe7337e0f6..bd3b5fd57c002 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenRequest.java @@ -18,10 +18,11 @@ import org.apache.kafka.common.message.DescribeDelegationTokenRequestData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.security.auth.KafkaPrincipal; +import java.nio.ByteBuffer; import java.util.List; import java.util.stream.Collectors; @@ -73,8 +74,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new DescribeDelegationTokenResponse(version(), throttleTimeMs, Errors.forException(e)); } - public static DescribeDelegationTokenRequest parse(Readable readable, short version) { + public static DescribeDelegationTokenRequest parse(ByteBuffer buffer, short version) { return new DescribeDelegationTokenRequest(new DescribeDelegationTokenRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java index d0476a3772caf..a922f056a89aa 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeDelegationTokenResponse.java @@ -20,12 +20,13 @@ import org.apache.kafka.common.message.DescribeDelegationTokenResponseData.DescribedDelegationToken; import org.apache.kafka.common.message.DescribeDelegationTokenResponseData.DescribedDelegationTokenRenewer; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.security.token.delegation.DelegationToken; import org.apache.kafka.common.security.token.delegation.TokenInformation; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.Map; @@ -75,9 +76,9 @@ public DescribeDelegationTokenResponse(DescribeDelegationTokenResponseData data) this.data = data; } - public static DescribeDelegationTokenResponse parse(Readable readable, short version) { + public static DescribeDelegationTokenResponse parse(ByteBuffer buffer, short version) { return new DescribeDelegationTokenResponse(new DescribeDelegationTokenResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsRequest.java index 8a7e3028a6e97..3a4b9e38b02ee 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsRequest.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.DescribeGroupsRequestData; import org.apache.kafka.common.message.DescribeGroupsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.List; import java.util.stream.Collectors; @@ -73,8 +74,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new DescribeGroupsResponse(describeGroupsResponseData); } - public static DescribeGroupsRequest parse(Readable readable, short version) { - return new DescribeGroupsRequest(new DescribeGroupsRequestData(readable, version), version); + public static DescribeGroupsRequest parse(ByteBuffer buffer, short version) { + return new DescribeGroupsRequest(new DescribeGroupsRequestData(new ByteBufferAccessor(buffer), version), version); } public static List getErrorDescribedGroupList( diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java index 4d59aee8758ab..b02480553397b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeGroupsResponse.java @@ -20,12 +20,13 @@ import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroup; import org.apache.kafka.common.message.DescribeGroupsResponseData.DescribedGroupMember; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.utils.Utils; +import java.nio.ByteBuffer; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -138,14 +139,14 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); data.groups().forEach(describedGroup -> updateErrorCounts(errorCounts, Errors.forCode(describedGroup.errorCode()))); return errorCounts; } - public static DescribeGroupsResponse parse(Readable readable, short version) { - return new DescribeGroupsResponse(new DescribeGroupsResponseData(readable, version)); + public static DescribeGroupsResponse parse(ByteBuffer buffer, short version) { + return new DescribeGroupsResponse(new DescribeGroupsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsRequest.java index c84b273209ceb..4b5f3517420aa 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.DescribeLogDirsRequestData; import org.apache.kafka.common.message.DescribeLogDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class DescribeLogDirsRequest extends AbstractRequest { @@ -67,7 +69,7 @@ public boolean isAllTopicPartitions() { return data.topics() == null; } - public static DescribeLogDirsRequest parse(Readable readable, short version) { - return new DescribeLogDirsRequest(new DescribeLogDirsRequestData(readable, version), version); + public static DescribeLogDirsRequest parse(ByteBuffer buffer, short version) { + return new DescribeLogDirsRequest(new DescribeLogDirsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java index 37929c0b3d8fd..b2245d3edce95 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeLogDirsResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.DescribeLogDirsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; @@ -55,7 +56,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); errorCounts.put(Errors.forCode(data.errorCode()), 1); data.results().forEach(result -> updateErrorCounts(errorCounts, Errors.forCode(result.errorCode())) @@ -63,8 +64,8 @@ public Map errorCounts() { return errorCounts; } - public static DescribeLogDirsResponse parse(Readable readable, short version) { - return new DescribeLogDirsResponse(new DescribeLogDirsResponseData(readable, version)); + public static DescribeLogDirsResponse parse(ByteBuffer buffer, short version) { + return new DescribeLogDirsResponse(new DescribeLogDirsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersRequest.java index 3c04e785a743e..39aab22a1c34c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersRequest.java @@ -22,8 +22,10 @@ import org.apache.kafka.common.message.DescribeProducersResponseData.PartitionResponse; import org.apache.kafka.common.message.DescribeProducersResponseData.TopicResponse; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class DescribeProducersRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { @@ -83,9 +85,9 @@ public DescribeProducersResponse getErrorResponse(int throttleTimeMs, Throwable return new DescribeProducersResponse(response); } - public static DescribeProducersRequest parse(Readable readable, short version) { + public static DescribeProducersRequest parse(ByteBuffer buffer, short version) { return new DescribeProducersRequest(new DescribeProducersRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersResponse.java index 1b30862c4dc5b..065a101bed6e8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeProducersResponse.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.DescribeProducersResponseData.PartitionResponse; import org.apache.kafka.common.message.DescribeProducersResponseData.TopicResponse; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; @@ -50,9 +51,9 @@ public Map errorCounts() { return errorCounts; } - public static DescribeProducersResponse parse(Readable readable, short version) { + public static DescribeProducersResponse parse(ByteBuffer buffer, short version) { return new DescribeProducersResponse(new DescribeProducersResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumRequest.java index e5f6b00f9fce4..5de7da403d438 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.DescribeQuorumRequestData; import org.apache.kafka.common.message.DescribeQuorumResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -55,8 +56,8 @@ private DescribeQuorumRequest(DescribeQuorumRequestData data, short version) { this.data = data; } - public static DescribeQuorumRequest parse(Readable readable, short version) { - return new DescribeQuorumRequest(new DescribeQuorumRequestData(readable, version), version); + public static DescribeQuorumRequest parse(ByteBuffer buffer, short version) { + return new DescribeQuorumRequest(new DescribeQuorumRequestData(new ByteBufferAccessor(buffer), version), version); } public static DescribeQuorumRequestData singletonRequest(TopicPartition topicPartition) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumResponse.java index c3b33d48052cd..5ad51f4e1de98 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeQuorumResponse.java @@ -19,11 +19,12 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.DescribeQuorumResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.Map; /** @@ -47,7 +48,7 @@ public DescribeQuorumResponse(DescribeQuorumResponseData data) { @Override public Map errorCounts() { - Map errors = new EnumMap<>(Errors.class); + Map errors = new HashMap<>(); errors.put(Errors.forCode(data.errorCode()), 1); @@ -105,7 +106,7 @@ public static DescribeQuorumResponseData singletonResponse( return res; } - public static DescribeQuorumResponse parse(Readable readable, short version) { - return new DescribeQuorumResponse(new DescribeQuorumResponseData(readable, version)); + public static DescribeQuorumResponse parse(ByteBuffer buffer, short version) { + return new DescribeQuorumResponse(new DescribeQuorumResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsRequest.java index f65ef91db5410..588c562f1ed71 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; @@ -90,9 +91,9 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new DescribeTopicPartitionsResponse(responseData); } - public static DescribeTopicPartitionsRequest parse(Readable readable, short version) { + public static DescribeTopicPartitionsRequest parse(ByteBuffer buffer, short version) { return new DescribeTopicPartitionsRequest( - new DescribeTopicPartitionsRequestData(readable, version), + new DescribeTopicPartitionsRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsResponse.java index 04350480ac8ca..25d6851299260 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTopicPartitionsResponse.java @@ -20,11 +20,14 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartitionInfo; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData; +import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.List; import java.util.Map; import java.util.stream.Collectors; @@ -58,7 +61,7 @@ public boolean shouldClientThrottle(short version) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); data.topics().forEach(topicResponse -> { topicResponse.partitions().forEach(p -> updateErrorCounts(errorCounts, Errors.forCode(p.errorCode()))); updateErrorCounts(errorCounts, Errors.forCode(topicResponse.errorCode())); @@ -66,9 +69,19 @@ public Map errorCounts() { return errorCounts; } - public static DescribeTopicPartitionsResponse parse(Readable readable, short version) { + public static DescribeTopicPartitionsResponse prepareResponse( + int throttleTimeMs, + List topics + ) { + DescribeTopicPartitionsResponseData responseData = new DescribeTopicPartitionsResponseData(); + responseData.setThrottleTimeMs(throttleTimeMs); + topics.forEach(topicResponse -> responseData.topics().add(topicResponse)); + return new DescribeTopicPartitionsResponse(responseData); + } + + public static DescribeTopicPartitionsResponse parse(ByteBuffer buffer, short version) { return new DescribeTopicPartitionsResponse( - new DescribeTopicPartitionsResponseData(readable, version)); + new DescribeTopicPartitionsResponseData(new ByteBufferAccessor(buffer), version)); } public static TopicPartitionInfo partitionToTopicPartitionInfo( diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsRequest.java index c708c89a0446f..a6e44fa5f6ddb 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.DescribeTransactionsRequestData; import org.apache.kafka.common.message.DescribeTransactionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class DescribeTransactionsRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { @@ -70,9 +72,9 @@ public DescribeTransactionsResponse getErrorResponse(int throttleTimeMs, Throwab return new DescribeTransactionsResponse(response); } - public static DescribeTransactionsRequest parse(Readable readable, short version) { + public static DescribeTransactionsRequest parse(ByteBuffer buffer, short version) { return new DescribeTransactionsRequest(new DescribeTransactionsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsResponse.java index e69bcddd9ac88..8839778569a57 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeTransactionsResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.DescribeTransactionsResponseData; import org.apache.kafka.common.message.DescribeTransactionsResponseData.TransactionState; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class DescribeTransactionsResponse extends AbstractResponse { @@ -40,7 +41,7 @@ public DescribeTransactionsResponseData data() { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); for (TransactionState transactionState : data.transactionStates()) { Errors error = Errors.forCode(transactionState.errorCode()); updateErrorCounts(errorCounts, error); @@ -48,9 +49,9 @@ public Map errorCounts() { return errorCounts; } - public static DescribeTransactionsResponse parse(Readable readable, short version) { + public static DescribeTransactionsResponse parse(ByteBuffer buffer, short version) { return new DescribeTransactionsResponse(new DescribeTransactionsResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java index a67bfcd362fa5..a4a389b4b591e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsRequest.java @@ -19,7 +19,9 @@ import org.apache.kafka.common.message.DescribeUserScramCredentialsRequestData; import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; + +import java.nio.ByteBuffer; public class DescribeUserScramCredentialsRequest extends AbstractRequest { @@ -49,9 +51,9 @@ private DescribeUserScramCredentialsRequest(DescribeUserScramCredentialsRequestD this.data = data; } - public static DescribeUserScramCredentialsRequest parse(Readable readable, short version) { + public static DescribeUserScramCredentialsRequest parse(ByteBuffer buffer, short version) { return new DescribeUserScramCredentialsRequest(new DescribeUserScramCredentialsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } @Override @@ -66,12 +68,11 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .setThrottleTimeMs(throttleTimeMs) .setErrorCode(apiError.error().code()) .setErrorMessage(apiError.message()); - - data.users().forEach(__ -> + for (DescribeUserScramCredentialsRequestData.UserName user : data.users()) { response.results().add(new DescribeUserScramCredentialsResponseData.DescribeUserScramCredentialsResult() - .setErrorCode(apiError.error().code()) - .setErrorMessage(apiError.message())) - ); + .setErrorCode(apiError.error().code()) + .setErrorMessage(apiError.message())); + } return new DescribeUserScramCredentialsResponse(response); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java index fad733fd44f5b..58ba4212949c6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/DescribeUserScramCredentialsResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.DescribeUserScramCredentialsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class DescribeUserScramCredentialsResponse extends AbstractResponse { @@ -57,7 +58,7 @@ public Map errorCounts() { return errorCounts(data.results().stream().map(r -> Errors.forCode(r.errorCode()))); } - public static DescribeUserScramCredentialsResponse parse(Readable readable, short version) { - return new DescribeUserScramCredentialsResponse(new DescribeUserScramCredentialsResponseData(readable, version)); + public static DescribeUserScramCredentialsResponse parse(ByteBuffer buffer, short version) { + return new DescribeUserScramCredentialsResponse(new DescribeUserScramCredentialsResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersRequest.java index 8ed9cb676717f..dae4086569f24 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersRequest.java @@ -25,9 +25,10 @@ import org.apache.kafka.common.message.ElectLeadersResponseData.PartitionResult; import org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.MessageUtil; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; @@ -146,7 +147,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new ElectLeadersResponse(throttleTimeMs, apiError.error().code(), electionResults, version()); } - public static ElectLeadersRequest parse(Readable readable, short version) { - return new ElectLeadersRequest(new ElectLeadersRequestData(readable, version), version); + public static ElectLeadersRequest parse(ByteBuffer buffer, short version) { + return new ElectLeadersRequest(new ElectLeadersRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersResponse.java index 1a1546980a8cf..4cb1c70ba7e7f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ElectLeadersResponse.java @@ -21,10 +21,10 @@ import org.apache.kafka.common.message.ElectLeadersResponseData; import org.apache.kafka.common.message.ElectLeadersResponseData.ReplicaElectionResult; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -69,7 +69,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.replicaElectionResults().forEach(result -> result.partitionResult().forEach(partitionResult -> @@ -79,8 +79,8 @@ public Map errorCounts() { return counts; } - public static ElectLeadersResponse parse(Readable readable, short version) { - return new ElectLeadersResponse(new ElectLeadersResponseData(readable, version)); + public static ElectLeadersResponse parse(ByteBuffer buffer, short version) { + return new ElectLeadersResponse(new ElectLeadersResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochRequest.java index de45ca457cb2b..659c4f7ef226f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochRequest.java @@ -21,9 +21,10 @@ import org.apache.kafka.common.message.EndQuorumEpochRequestData; import org.apache.kafka.common.message.EndQuorumEpochResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; import java.util.stream.Collectors; @@ -66,8 +67,8 @@ public EndQuorumEpochResponse getErrorResponse(int throttleTimeMs, Throwable e) .setErrorCode(Errors.forException(e).code())); } - public static EndQuorumEpochRequest parse(Readable readable, short version) { - return new EndQuorumEpochRequest(new EndQuorumEpochRequestData(readable, version), version); + public static EndQuorumEpochRequest parse(ByteBuffer buffer, short version) { + return new EndQuorumEpochRequest(new EndQuorumEpochRequestData(new ByteBufferAccessor(buffer), version), version); } public static EndQuorumEpochRequestData singletonRequest(TopicPartition topicPartition, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochResponse.java index fe39d80087e2d..37ca81deca63c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EndQuorumEpochResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.EndQuorumEpochResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; /** @@ -48,7 +49,7 @@ public EndQuorumEpochResponse(EndQuorumEpochResponseData data) { @Override public Map errorCounts() { - Map errors = new EnumMap<>(Errors.class); + Map errors = new HashMap<>(); errors.put(Errors.forCode(data.errorCode()), 1); @@ -75,7 +76,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // Not supported by the response schema } - public static EndQuorumEpochResponse parse(Readable readable, short version) { - return new EndQuorumEpochResponse(new EndQuorumEpochResponseData(readable, version)); + public static EndQuorumEpochResponse parse(ByteBuffer buffer, short version) { + return new EndQuorumEpochResponse(new EndQuorumEpochResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EndTxnRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/EndTxnRequest.java index 1d064a5ddb1bf..cbf4553f3df7e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EndTxnRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EndTxnRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.EndTxnRequestData; import org.apache.kafka.common.message.EndTxnResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class EndTxnRequest extends AbstractRequest { public static final short LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 = 4; @@ -79,7 +81,7 @@ public EndTxnResponse getErrorResponse(int throttleTimeMs, Throwable e) { ); } - public static EndTxnRequest parse(Readable readable, short version) { - return new EndTxnRequest(new EndTxnRequestData(readable, version), version); + public static EndTxnRequest parse(ByteBuffer buffer, short version) { + return new EndTxnRequest(new EndTxnRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EndTxnResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/EndTxnResponse.java index 21f111e93956e..0ab01bb1a3d33 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EndTxnResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EndTxnResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.EndTxnResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; /** @@ -68,8 +69,8 @@ public EndTxnResponseData data() { return data; } - public static EndTxnResponse parse(Readable readable, short version) { - return new EndTxnResponse(new EndTxnResponseData(readable, version)); + public static EndTxnResponse parse(ByteBuffer buffer, short version) { + return new EndTxnResponse(new EndTxnResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeRequest.java index f414e32b385f6..6e62aee75bd62 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeRequest.java @@ -19,8 +19,8 @@ import org.apache.kafka.common.message.EnvelopeRequestData; import org.apache.kafka.common.message.EnvelopeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import java.nio.ByteBuffer; @@ -76,8 +76,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .setErrorCode(Errors.forException(e).code())); } - public static EnvelopeRequest parse(Readable readable, short version) { - return new EnvelopeRequest(new EnvelopeRequestData(readable, version), version); + public static EnvelopeRequest parse(ByteBuffer buffer, short version) { + return new EnvelopeRequest(new EnvelopeRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeResponse.java index 9faccc417d546..4f534b6721f4e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/EnvelopeResponse.java @@ -18,8 +18,8 @@ import org.apache.kafka.common.message.EnvelopeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import java.nio.ByteBuffer; import java.util.Map; @@ -72,8 +72,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // Not supported by the response schema } - public static EnvelopeResponse parse(Readable readable, short version) { - return new EnvelopeResponse(new EnvelopeResponseData(readable, version)); + public static EnvelopeResponse parse(ByteBuffer buffer, short version) { + return new EnvelopeResponse(new EnvelopeResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenRequest.java index 37f10f61f3dca..3660a45646059 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenRequest.java @@ -19,8 +19,8 @@ import org.apache.kafka.common.message.ExpireDelegationTokenRequestData; import org.apache.kafka.common.message.ExpireDelegationTokenResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import java.nio.ByteBuffer; @@ -33,9 +33,9 @@ private ExpireDelegationTokenRequest(ExpireDelegationTokenRequestData data, shor this.data = data; } - public static ExpireDelegationTokenRequest parse(Readable readable, short version) { + public static ExpireDelegationTokenRequest parse(ByteBuffer buffer, short version) { return new ExpireDelegationTokenRequest( - new ExpireDelegationTokenRequestData(readable, version), version); + new ExpireDelegationTokenRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java index 9fe141565c239..945db7acdc09e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ExpireDelegationTokenResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.ExpireDelegationTokenResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class ExpireDelegationTokenResponse extends AbstractResponse { @@ -32,8 +33,8 @@ public ExpireDelegationTokenResponse(ExpireDelegationTokenResponseData data) { this.data = data; } - public static ExpireDelegationTokenResponse parse(Readable readable, short version) { - return new ExpireDelegationTokenResponse(new ExpireDelegationTokenResponseData(readable, + public static ExpireDelegationTokenResponse parse(ByteBuffer buffer, short version) { + return new ExpireDelegationTokenResponse(new ExpireDelegationTokenResponseData(new ByteBufferAccessor(buffer), version)); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java index 0b478b759a88a..5d1fc9a996ed6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchRequest.java @@ -25,10 +25,11 @@ import org.apache.kafka.common.message.FetchRequestData.ReplicaState; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.RecordBatch; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedHashMap; @@ -360,7 +361,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .setPartitions(partitionResponses)); }); } - return FetchResponse.of(new FetchResponseData() + return new FetchResponse(new FetchResponseData() .setThrottleTimeMs(throttleTimeMs) .setErrorCode(error.code()) .setSessionId(data.sessionId()) @@ -452,8 +453,8 @@ public String rackId() { return data.rackId(); } - public static FetchRequest parse(Readable readable, short version) { - return new FetchRequest(new FetchRequestData(readable, version), version); + public static FetchRequest parse(ByteBuffer buffer, short version) { + return new FetchRequest(new FetchRequestData(new ByteBufferAccessor(buffer), version), version); } // Broker ids are non-negative int. diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java index 5013468095c2a..91837bfaa557e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchResponse.java @@ -22,15 +22,16 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.ObjectSerializationCache; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.Records; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; @@ -86,7 +87,7 @@ public FetchResponseData data() { * We may also return INCONSISTENT_TOPIC_ID error as a partition-level error when a partition in the session has a topic ID * inconsistent with the log. */ - private FetchResponse(FetchResponseData fetchResponseData) { + public FetchResponse(FetchResponseData fetchResponseData) { super(ApiKeys.FETCH); this.data = fetchResponseData; } @@ -128,7 +129,7 @@ public int sessionId() { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); updateErrorCounts(errorCounts, error()); data.responses().forEach(topicResponse -> topicResponse.partitions().forEach(partition -> @@ -137,15 +138,8 @@ public Map errorCounts() { return errorCounts; } - /** - * Creates a {@link org.apache.kafka.common.requests.FetchResponse} from the given byte buffer. - * Unlike {@link org.apache.kafka.common.requests.FetchResponse#of(FetchResponseData)}, this method doesn't convert - * null records to {@link org.apache.kafka.common.record.MemoryRecords#EMPTY}. - * - *

        This method should only be used in client-side.

        - */ - public static FetchResponse parse(Readable readable, short version) { - return new FetchResponse(new FetchResponseData(readable, version)); + public static FetchResponse parse(ByteBuffer buffer, short version) { + return new FetchResponse(new FetchResponseData(new ByteBufferAccessor(buffer), version)); } // Fetch versions 13 and above should have topic IDs for all topics. @@ -226,21 +220,12 @@ public static int recordsSize(FetchResponseData.PartitionData partition) { return partition.records() == null ? 0 : partition.records().sizeInBytes(); } - /** - * Creates a {@link org.apache.kafka.common.requests.FetchResponse} from the given data. - * This method converts null records to {@link org.apache.kafka.common.record.MemoryRecords#EMPTY} - * to ensure consistent record representation in the response. - * - *

        This method should only be used in server-side.

        - */ - public static FetchResponse of(FetchResponseData data) { - for (FetchResponseData.FetchableTopicResponse response : data.responses()) { - for (FetchResponseData.PartitionData partition : response.partitions()) { - if (partition.records() == null) - partition.setRecords(MemoryRecords.EMPTY); - } - } - return new FetchResponse(data); + // TODO: remove as a part of KAFKA-12410 + public static FetchResponse of(Errors error, + int throttleTimeMs, + int sessionId, + LinkedHashMap responseData) { + return new FetchResponse(toMessage(error, throttleTimeMs, sessionId, responseData.entrySet().iterator(), Collections.emptyList())); } // TODO: remove as a part of KAFKA-12410 @@ -273,11 +258,6 @@ private static FetchResponseData toMessage(Errors error, FetchResponseData.PartitionData partitionData = entry.getValue(); // Since PartitionData alone doesn't know the partition ID, we set it here partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); - // To protect the clients from failing due to null records, - // we always convert null records to MemoryRecords.EMPTY - // We will propose a KIP to change the schema definitions in the future - if (partitionData.records() == null) - partitionData.setRecords(MemoryRecords.EMPTY); // We have to keep the order of input topic-partition. Hence, we batch the partitions only if the last // batch is in the same topic group. FetchResponseData.FetchableTopicResponse previousTopic = topicResponseList.isEmpty() ? null diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotRequest.java index 47b654c4acc14..ffd8635d843fd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.FetchSnapshotRequestData; import org.apache.kafka.common.message.FetchSnapshotResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Optional; public final class FetchSnapshotRequest extends AbstractRequest { @@ -67,8 +68,8 @@ public static Optional forTopicParti .findAny(); } - public static FetchSnapshotRequest parse(Readable readable, short version) { - return new FetchSnapshotRequest(new FetchSnapshotRequestData(readable, version), version); + public static FetchSnapshotRequest parse(ByteBuffer buffer, short version) { + return new FetchSnapshotRequest(new FetchSnapshotRequestData(new ByteBufferAccessor(buffer), version), version); } public static class Builder extends AbstractRequest.Builder { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java index 77a1089abb477..ca79d396dc6fa 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FetchSnapshotResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.FetchSnapshotResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -36,7 +37,7 @@ public FetchSnapshotResponse(FetchSnapshotResponseData data) { @Override public Map errorCounts() { - Map errors = new EnumMap<>(Errors.class); + Map errors = new HashMap<>(); Errors topLevelError = Errors.forCode(data.errorCode()); if (topLevelError != Errors.NONE) { @@ -98,7 +99,7 @@ public static Optional forTopicPart .findAny(); } - public static FetchSnapshotResponse parse(Readable readable, short version) { - return new FetchSnapshotResponse(new FetchSnapshotResponseData(readable, version)); + public static FetchSnapshotResponse parse(ByteBuffer buffer, short version) { + return new FetchSnapshotResponse(new FetchSnapshotResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorRequest.java index b14fae5cfac91..26cfc809d995a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorRequest.java @@ -22,9 +22,10 @@ import org.apache.kafka.common.message.FindCoordinatorRequestData; import org.apache.kafka.common.message.FindCoordinatorResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; public class FindCoordinatorRequest extends AbstractRequest { @@ -104,8 +105,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { } } - public static FindCoordinatorRequest parse(Readable readable, short version) { - return new FindCoordinatorRequest(new FindCoordinatorRequestData(readable, version), + public static FindCoordinatorRequest parse(ByteBuffer buffer, short version) { + return new FindCoordinatorRequest(new FindCoordinatorRequestData(new ByteBufferAccessor(buffer), version), version); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorResponse.java index 5bd08934b62c7..967782d35f5c8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/FindCoordinatorResponse.java @@ -20,12 +20,13 @@ import org.apache.kafka.common.message.FindCoordinatorResponseData; import org.apache.kafka.common.message.FindCoordinatorResponseData.Coordinator; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -97,7 +98,7 @@ public Errors error() { @Override public Map errorCounts() { if (!data.coordinators().isEmpty()) { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); for (Coordinator coordinator : data.coordinators()) { updateErrorCounts(errorCounts, Errors.forCode(coordinator.errorCode())); } @@ -107,8 +108,8 @@ public Map errorCounts() { } } - public static FindCoordinatorResponse parse(Readable readable, short version) { - return new FindCoordinatorResponse(new FindCoordinatorResponseData(readable, version)); + public static FindCoordinatorResponse parse(ByteBuffer buffer, short version) { + return new FindCoordinatorResponse(new FindCoordinatorResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsRequest.java index c507d930e24d5..cf19794fa5b3e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.GetTelemetrySubscriptionsRequestData; import org.apache.kafka.common.message.GetTelemetrySubscriptionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class GetTelemetrySubscriptionsRequest extends AbstractRequest { @@ -69,8 +71,8 @@ public GetTelemetrySubscriptionsRequestData data() { return data; } - public static GetTelemetrySubscriptionsRequest parse(Readable readable, short version) { + public static GetTelemetrySubscriptionsRequest parse(ByteBuffer buffer, short version) { return new GetTelemetrySubscriptionsRequest(new GetTelemetrySubscriptionsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponse.java index fa8cdf28a91df..01e9038457073 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/GetTelemetrySubscriptionsResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.GetTelemetrySubscriptionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class GetTelemetrySubscriptionsResponse extends AbstractResponse { @@ -41,7 +42,7 @@ public GetTelemetrySubscriptionsResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); return counts; } @@ -64,8 +65,8 @@ public Errors error() { return Errors.forCode(data.errorCode()); } - public static GetTelemetrySubscriptionsResponse parse(Readable readable, short version) { + public static GetTelemetrySubscriptionsResponse parse(ByteBuffer buffer, short version) { return new GetTelemetrySubscriptionsResponse(new GetTelemetrySubscriptionsResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatRequest.java index 56c7ff564a34b..482e61a255a8e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.HeartbeatRequestData; import org.apache.kafka.common.message.HeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class HeartbeatRequest extends AbstractRequest { @@ -65,8 +67,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new HeartbeatResponse(responseData); } - public static HeartbeatRequest parse(Readable readable, short version) { - return new HeartbeatRequest(new HeartbeatRequestData(readable, version), version); + public static HeartbeatRequest parse(ByteBuffer buffer, short version) { + return new HeartbeatRequest(new HeartbeatRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java index cc9d81fefa120..aebb903e967e7 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/HeartbeatResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.HeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class HeartbeatResponse extends AbstractResponse { @@ -66,8 +67,8 @@ public HeartbeatResponseData data() { return data; } - public static HeartbeatResponse parse(Readable readable, short version) { - return new HeartbeatResponse(new HeartbeatResponseData(readable, version)); + public static HeartbeatResponse parse(ByteBuffer buffer, short version) { + return new HeartbeatResponse(new HeartbeatResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java index 59cf8f2f138d8..222097502b246 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsRequest.java @@ -21,15 +21,12 @@ import org.apache.kafka.common.config.ConfigResource; import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData; import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterConfigsResource; -import org.apache.kafka.common.message.IncrementalAlterConfigsRequestDataJsonConverter; import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData; import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.node.ObjectNode; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import java.nio.ByteBuffer; import java.util.Collection; import java.util.Map; @@ -87,9 +84,9 @@ public IncrementalAlterConfigsRequest(IncrementalAlterConfigsRequestData data, s this.data = data; } - public static IncrementalAlterConfigsRequest parse(Readable readable, short version) { + public static IncrementalAlterConfigsRequest parse(ByteBuffer buffer, short version) { return new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } @Override @@ -110,16 +107,4 @@ public AbstractResponse getErrorResponse(final int throttleTimeMs, final Throwab } return new IncrementalAlterConfigsResponse(response); } - - // It is not safe to print all config values - @Override - public String toString() { - JsonNode json = IncrementalAlterConfigsRequestDataJsonConverter.write(data, version()).deepCopy(); - for (JsonNode resource : json.get("resources")) { - for (JsonNode config : resource.get("configs")) { - ((ObjectNode) config).put("value", "REDACTED"); - } - } - return IncrementalAlterConfigsRequestDataJsonConverter.read(json, version()).toString(); - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java index 2f487ac420d46..826be30a8d3fc 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/IncrementalAlterConfigsResponse.java @@ -21,11 +21,11 @@ import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData; import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.EnumMap; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -73,7 +73,7 @@ public IncrementalAlterConfigsResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); data.responses().forEach(response -> updateErrorCounts(counts, Errors.forCode(response.errorCode())) ); @@ -95,8 +95,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static IncrementalAlterConfigsResponse parse(Readable readable, short version) { + public static IncrementalAlterConfigsResponse parse(ByteBuffer buffer, short version) { return new IncrementalAlterConfigsResponse(new IncrementalAlterConfigsResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdRequest.java index 5051890b1829d..9d92f0e5351dd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdRequest.java @@ -19,10 +19,12 @@ import org.apache.kafka.common.message.InitProducerIdRequestData; import org.apache.kafka.common.message.InitProducerIdResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.RecordBatch; +import java.nio.ByteBuffer; + public class InitProducerIdRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { public final InitProducerIdRequestData data; @@ -62,12 +64,12 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .setErrorCode(Errors.forException(e).code()) .setProducerId(RecordBatch.NO_PRODUCER_ID) .setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) - .setThrottleTimeMs(throttleTimeMs); + .setThrottleTimeMs(0); return new InitProducerIdResponse(response); } - public static InitProducerIdRequest parse(Readable readable, short version) { - return new InitProducerIdRequest(new InitProducerIdRequestData(readable, version), version); + public static InitProducerIdRequest parse(ByteBuffer buffer, short version) { + return new InitProducerIdRequest(new InitProducerIdRequestData(new ByteBufferAccessor(buffer), version), version); } @Override @@ -75,11 +77,4 @@ public InitProducerIdRequestData data() { return data; } - public boolean enable2Pc() { - return data.enable2Pc(); - } - - public boolean keepPreparedTxn() { - return data.keepPreparedTxn(); - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java index 6e864b9ec4292..96c7a4d400ced 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/InitProducerIdResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.InitProducerIdResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; /** @@ -62,8 +63,8 @@ public InitProducerIdResponseData data() { return data; } - public static InitProducerIdResponse parse(Readable readable, short version) { - return new InitProducerIdResponse(new InitProducerIdResponseData(readable, version)); + public static InitProducerIdResponse parse(ByteBuffer buffer, short version) { + return new InitProducerIdResponse(new InitProducerIdResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateRequest.java index 2feaf55d8142b..fc9abc7161363 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.InitializeShareGroupStateRequestData; import org.apache.kafka.common.message.InitializeShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -33,7 +34,11 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new InitializeShareGroupStateResponseData.InitializeStateResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new InitializeShareGroupStateResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code())) - .collect(Collectors.toList())))); + topicResult -> results.add(new InitializeShareGroupStateResponseData.InitializeStateResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new InitializeShareGroupStateResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code())) + .collect(Collectors.toList())))); return new InitializeShareGroupStateResponse(new InitializeShareGroupStateResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -75,10 +80,10 @@ public InitializeShareGroupStateRequestData data() { return data; } - public static InitializeShareGroupStateRequest parse(Readable readable, short version) { + public static InitializeShareGroupStateRequest parse(ByteBuffer buffer, short version) { return new InitializeShareGroupStateRequest( - new InitializeShareGroupStateRequestData(readable, version), - version + new InitializeShareGroupStateRequestData(new ByteBufferAccessor(buffer), version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateResponse.java index 91924e7c6ed5a..44880c2cb86f7 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/InitializeShareGroupStateResponse.java @@ -17,16 +17,13 @@ package org.apache.kafka.common.requests; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.InitializeShareGroupStateRequestData; import org.apache.kafka.common.message.InitializeShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.ArrayList; -import java.util.EnumMap; -import java.util.List; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class InitializeShareGroupStateResponse extends AbstractResponse { @@ -44,11 +41,11 @@ public InitializeShareGroupStateResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + Map counts = new HashMap<>(); data.results().forEach( - result -> result.partitions().forEach( - partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) - ) + result -> result.partitions().forEach( + partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) + ) ); return counts; } @@ -63,67 +60,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } - public static InitializeShareGroupStateResponse parse(Readable readable, short version) { + public static InitializeShareGroupStateResponse parse(ByteBuffer buffer, short version) { return new InitializeShareGroupStateResponse( - new InitializeShareGroupStateResponseData(readable, version) + new InitializeShareGroupStateResponseData(new ByteBufferAccessor(buffer), version) ); } - - public static InitializeShareGroupStateResponseData toGlobalErrorResponse(InitializeShareGroupStateRequestData request, Errors error) { - List initStateResults = new ArrayList<>(); - request.topics().forEach(topicData -> { - List partitionResults = new ArrayList<>(); - topicData.partitions().forEach(partitionData -> partitionResults.add( - toErrorResponsePartitionResult(partitionData.partition(), error, error.message())) - ); - initStateResults.add(toResponseInitializeStateResult(topicData.topicId(), partitionResults)); - }); - return new InitializeShareGroupStateResponseData().setResults(initStateResults); - } - - public static InitializeShareGroupStateResponseData.PartitionResult toErrorResponsePartitionResult( - int partitionId, - Errors error, - String errorMessage - ) { - return new InitializeShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); - } - - public static InitializeShareGroupStateResponseData.InitializeStateResult toResponseInitializeStateResult( - Uuid topicId, - List partitionResults - ) { - return new InitializeShareGroupStateResponseData.InitializeStateResult() - .setTopicId(topicId) - .setPartitions(partitionResults); - } - - public static InitializeShareGroupStateResponseData toErrorResponseData(Uuid topicId, int partitionId, Errors error, String errorMessage) { - return new InitializeShareGroupStateResponseData().setResults(List.of( - new InitializeShareGroupStateResponseData.InitializeStateResult() - .setTopicId(topicId) - .setPartitions(List.of(new InitializeShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage))) - )); - } - - public static InitializeShareGroupStateResponseData.PartitionResult toResponsePartitionResult(int partitionId) { - return new InitializeShareGroupStateResponseData.PartitionResult().setPartition(partitionId); - } - - public static InitializeShareGroupStateResponseData toResponseData(Uuid topicId, int partitionId) { - return new InitializeShareGroupStateResponseData().setResults(List.of( - new InitializeShareGroupStateResponseData.InitializeStateResult() - .setTopicId(topicId) - .setPartitions(List.of( - new InitializeShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - )) - )); - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java index 9d75d383bab13..946f7849bf14c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupRequest.java @@ -22,9 +22,10 @@ import org.apache.kafka.common.message.JoinGroupRequestData; import org.apache.kafka.common.message.JoinGroupResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; public class JoinGroupRequest extends AbstractRequest { @@ -96,7 +97,7 @@ public static String maybeTruncateReason(final String reason) { * * @return whether a known member id is required or not. */ - public static boolean requiresKnownMemberId(int apiVersion) { + public static boolean requiresKnownMemberId(short apiVersion) { return apiVersion >= 4; } @@ -117,7 +118,7 @@ public static boolean requiresKnownMemberId(int apiVersion) { */ public static boolean requiresKnownMemberId( JoinGroupRequestData request, - int apiVersion + short apiVersion ) { return request.groupInstanceId() == null && request.memberId().equals(UNKNOWN_MEMBER_ID) @@ -150,7 +151,7 @@ public static boolean requiresKnownMemberId( * @return whether the version supports skipping assignment. */ - public static boolean supportsSkippingAssignment(int apiVersion) { + public static boolean supportsSkippingAssignment(short apiVersion) { return apiVersion >= 9; } @@ -207,7 +208,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new JoinGroupResponse(data, version()); } - public static JoinGroupRequest parse(Readable readable, short version) { - return new JoinGroupRequest(new JoinGroupRequestData(readable, version), version); + public static JoinGroupRequest parse(ByteBuffer buffer, short version) { + return new JoinGroupRequest(new JoinGroupRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupResponse.java index 553bbbb3ab34d..bf8083d910712 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/JoinGroupResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.JoinGroupResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class JoinGroupResponse extends AbstractResponse { @@ -72,8 +73,8 @@ public Map errorCounts() { return errorCounts(Errors.forCode(data.errorCode())); } - public static JoinGroupResponse parse(Readable readable, short version) { - return new JoinGroupResponse(new JoinGroupResponseData(readable, version), version); + public static JoinGroupResponse parse(ByteBuffer buffer, short version) { + return new JoinGroupResponse(new JoinGroupResponseData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java new file mode 100644 index 0000000000000..90c0add47a9ab --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrRequest.java @@ -0,0 +1,403 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.Node; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.protocol.MessageUtil; +import org.apache.kafka.common.utils.FlattenedIterator; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.stream.Collectors; + +public final class LeaderAndIsrRequest { + + /** + * Indicates if a controller request is incremental, full, or unknown. + */ + public enum Type { + UNKNOWN(0), + INCREMENTAL(1), + FULL(2); + + private final byte type; + Type(int type) { + this.type = (byte) type; + } + + public byte toByte() { + return type; + } + } + + public static class Builder { + protected final int controllerId; + protected final int controllerEpoch; + protected final long brokerEpoch; + private final List partitionStates; + private final Map topicIds; + private final Collection liveLeaders; + private final Type updateType; + + public Builder(int controllerId, int controllerEpoch, long brokerEpoch, + List partitionStates, Map topicIds, + Collection liveLeaders) { + this(controllerId, controllerEpoch, brokerEpoch, partitionStates, topicIds, liveLeaders, Type.UNKNOWN); + } + + public Builder(int controllerId, int controllerEpoch, long brokerEpoch, + List partitionStates, Map topicIds, + Collection liveLeaders, Type updateType) { + this.controllerId = controllerId; + this.controllerEpoch = controllerEpoch; + this.brokerEpoch = brokerEpoch; + this.partitionStates = partitionStates; + this.topicIds = topicIds; + this.liveLeaders = liveLeaders; + this.updateType = updateType; + } + + public LeaderAndIsrRequest build() { + return new LeaderAndIsrRequest(this); + } + + @Override + public String toString() { + return "(type=LeaderAndIsRequest" + + ", controllerId=" + controllerId + + ", controllerEpoch=" + controllerEpoch + + ", brokerEpoch=" + brokerEpoch + + ", partitionStates=" + partitionStates + + ", topicIds=" + topicIds + + ", liveLeaders=(" + liveLeaders.stream().map(Node::toString).collect(Collectors.joining(", ")) + ")" + + ")"; + + } + } + + private final int controllerId; + private final int controllerEpoch; + private final long brokerEpoch; + private final List liveLeaders; + private final List topicStates; + private final Type requestType; + + public LeaderAndIsrRequest(Builder builder) { + this.controllerId = builder.controllerId; + this.controllerEpoch = builder.controllerEpoch; + this.brokerEpoch = builder.brokerEpoch; + this.requestType = builder.updateType; + this.liveLeaders = new ArrayList<>(builder.liveLeaders); + this.topicStates = new ArrayList<>(groupByTopic(builder.partitionStates, builder.topicIds).values()); + } + + private static Map groupByTopic(List partitionStates, Map topicIds) { + Map topicStates = new HashMap<>(); + for (PartitionState partition : partitionStates) { + TopicState topicState = topicStates.computeIfAbsent(partition.topicName(), t -> { + var topic = new TopicState(); + topic.topicName = partition.topicName(); + topic.topicId = topicIds.getOrDefault(partition.topicName(), Uuid.ZERO_UUID); + return topic; + }); + topicState.partitionStates().add(partition); + } + return topicStates; + } + + public int controllerId() { + return controllerId; + } + + public int controllerEpoch() { + return controllerEpoch; + } + + public long brokerEpoch() { + return brokerEpoch; + } + + public Iterable partitionStates() { + return () -> new FlattenedIterator<>(topicStates.iterator(), + topicState -> topicState.partitionStates().iterator()); + } + + public Map topicIds() { + return topicStates.stream() + .collect(Collectors.toMap(TopicState::topicName, TopicState::topicId)); + } + + public List liveLeaders() { + return Collections.unmodifiableList(liveLeaders); + } + + public Type requestType() { + return requestType; + } + + public LeaderAndIsrResponse getErrorResponse(Exception e) { + LinkedHashMap> errorsMap = new LinkedHashMap<>(); + Errors error = Errors.forException(e); + + for (TopicState topicState : topicStates) { + List partitions = new ArrayList<>(topicState.partitionStates().size()); + for (PartitionState partition : topicState.partitionStates()) { + partitions.add(new LeaderAndIsrResponse.PartitionError(partition.partitionIndex, error.code())); + } + errorsMap.put(topicState.topicId, partitions); + } + + return new LeaderAndIsrResponse(error, errorsMap); + + } + + public static class TopicState { + String topicName; + Uuid topicId; + List partitionStates; + + public TopicState() { + this.topicName = ""; + this.topicId = Uuid.ZERO_UUID; + this.partitionStates = new ArrayList<>(0); + } + + public String topicName() { + return this.topicName; + } + + public Uuid topicId() { + return this.topicId; + } + + public List partitionStates() { + return this.partitionStates; + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) + return false; + TopicState that = (TopicState) o; + return Objects.equals(topicName, that.topicName) && + Objects.equals(topicId, that.topicId) && + Objects.equals(partitionStates, that.partitionStates); + } + + @Override + public int hashCode() { + return Objects.hash(topicName, topicId, partitionStates); + } + + @Override + public String toString() { + return "LeaderAndIsrTopicState(" + + "topicName='" + topicName + "'" + + ", topicId=" + topicId + + ", partitionStates=" + MessageUtil.deepToString(partitionStates.iterator()) + + ")"; + } + } + + public static class PartitionState { + String topicName; + int partitionIndex; + int controllerEpoch; + int leader; + int leaderEpoch; + List isr; + int partitionEpoch; + List replicas; + List addingReplicas; + List removingReplicas; + boolean isNew; + byte leaderRecoveryState; + + public PartitionState() { + this.topicName = ""; + this.partitionIndex = 0; + this.controllerEpoch = 0; + this.leader = 0; + this.leaderEpoch = 0; + this.isr = new ArrayList<>(0); + this.partitionEpoch = 0; + this.replicas = new ArrayList<>(0); + this.addingReplicas = new ArrayList<>(0); + this.removingReplicas = new ArrayList<>(0); + this.isNew = false; + this.leaderRecoveryState = (byte) 0; + } + + @Override + public boolean equals(Object o) { + if (o == null || getClass() != o.getClass()) return false; + PartitionState that = (PartitionState) o; + return partitionIndex == that.partitionIndex && + controllerEpoch == that.controllerEpoch && + leader == that.leader && + leaderEpoch == that.leaderEpoch && + partitionEpoch == that.partitionEpoch && + isNew == that.isNew && + leaderRecoveryState == that.leaderRecoveryState && + Objects.equals(topicName, that.topicName) && + Objects.equals(isr, that.isr) && + Objects.equals(replicas, that.replicas) && + Objects.equals(addingReplicas, that.addingReplicas) && + Objects.equals(removingReplicas, that.removingReplicas); + } + + @Override + public int hashCode() { + return Objects.hash(topicName, partitionIndex, controllerEpoch, leader, leaderEpoch, isr, partitionEpoch, + replicas, addingReplicas, removingReplicas, isNew, leaderRecoveryState); + } + + @Override + public String toString() { + return "LeaderAndIsrPartitionState(" + + "topicName='" + topicName + "'" + + ", partitionIndex=" + partitionIndex + + ", controllerEpoch=" + controllerEpoch + + ", leader=" + leader + + ", leaderEpoch=" + leaderEpoch + + ", isr=" + MessageUtil.deepToString(isr.iterator()) + + ", partitionEpoch=" + partitionEpoch + + ", replicas=" + MessageUtil.deepToString(replicas.iterator()) + + ", addingReplicas=" + MessageUtil.deepToString(addingReplicas.iterator()) + + ", removingReplicas=" + MessageUtil.deepToString(removingReplicas.iterator()) + + ", isNew=" + (isNew ? "true" : "false") + + ", leaderRecoveryState=" + leaderRecoveryState + + ")"; + } + + public String topicName() { + return this.topicName; + } + + public int partitionIndex() { + return this.partitionIndex; + } + + public int controllerEpoch() { + return this.controllerEpoch; + } + + public int leader() { + return this.leader; + } + + public int leaderEpoch() { + return this.leaderEpoch; + } + + public List isr() { + return this.isr; + } + + public int partitionEpoch() { + return this.partitionEpoch; + } + + public List replicas() { + return this.replicas; + } + + public List addingReplicas() { + return this.addingReplicas; + } + + public List removingReplicas() { + return this.removingReplicas; + } + + public boolean isNew() { + return this.isNew; + } + + public byte leaderRecoveryState() { + return this.leaderRecoveryState; + } + + public PartitionState setTopicName(String v) { + this.topicName = v; + return this; + } + + public PartitionState setPartitionIndex(int v) { + this.partitionIndex = v; + return this; + } + + public PartitionState setControllerEpoch(int v) { + this.controllerEpoch = v; + return this; + } + + public PartitionState setLeader(int v) { + this.leader = v; + return this; + } + + public PartitionState setLeaderEpoch(int v) { + this.leaderEpoch = v; + return this; + } + + public PartitionState setIsr(List v) { + this.isr = v; + return this; + } + + public PartitionState setPartitionEpoch(int v) { + this.partitionEpoch = v; + return this; + } + + public PartitionState setReplicas(List v) { + this.replicas = v; + return this; + } + + public PartitionState setAddingReplicas(List v) { + this.addingReplicas = v; + return this; + } + + public PartitionState setRemovingReplicas(List v) { + this.removingReplicas = v; + return this; + } + + public PartitionState setIsNew(boolean v) { + this.isNew = v; + return this; + } + + public PartitionState setLeaderRecoveryState(byte v) { + this.leaderRecoveryState = v; + return this; + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrResponse.java new file mode 100644 index 0000000000000..bcdcf35b5d38d --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/LeaderAndIsrResponse.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.protocol.Errors; + +import java.util.Collections; +import java.util.HashMap; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +public class LeaderAndIsrResponse { + + /** + * Possible error code: + * + * STALE_CONTROLLER_EPOCH (11) + * STALE_BROKER_EPOCH (77) + */ + private final Errors error; + private final LinkedHashMap> topicErrors; + + public LeaderAndIsrResponse(Errors error, LinkedHashMap> topicErrors) { + this.error = error; + this.topicErrors = topicErrors; + } + + public LinkedHashMap> topics() { + return topicErrors; + } + + public Errors error() { + return error; + } + + public Map errorCounts() { + Errors error = error(); + if (error != Errors.NONE) { + // Minor optimization since the top-level error applies to all partitions + return Collections.singletonMap(error, topics().values().stream().mapToInt(partitionErrors -> + partitionErrors.size()).sum() + 1); + } + Map errors = AbstractResponse.errorCounts(topics().values().stream().flatMap(partitionErrors -> + partitionErrors.stream()).map(p -> Errors.forCode(p.errorCode))); + AbstractResponse.updateErrorCounts(errors, Errors.NONE); + return errors; + } + + public Map partitionErrors(Map topicNames) { + Map errors = new HashMap<>(); + topics().forEach((topicId, partitionErrors) -> { + String topicName = topicNames.get(topicId); + if (topicName != null) { + partitionErrors.forEach(partition -> + errors.put(new TopicPartition(topicName, partition.partitionIndex), Errors.forCode(partition.errorCode))); + } + }); + return errors; + } + + @Override + public String toString() { + return "LeaderAndIsrResponse{" + + "error=" + error + + ", topicErrors=" + topicErrors + + '}'; + } + + public static class PartitionError { + public final int partitionIndex; + public final short errorCode; + + public PartitionError(int partitionIndex, short errorCode) { + this.partitionIndex = partitionIndex; + this.errorCode = errorCode; + } + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupRequest.java index 2dd69afab9812..60a95b9fa251a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupRequest.java @@ -21,10 +21,11 @@ import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; import org.apache.kafka.common.message.LeaveGroupResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.List; @@ -120,7 +121,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new LeaveGroupResponse(responseData); } - public static LeaveGroupRequest parse(Readable readable, short version) { - return new LeaveGroupRequest(new LeaveGroupRequestData(readable, version), version); + public static LeaveGroupRequest parse(ByteBuffer buffer, short version) { + return new LeaveGroupRequest(new LeaveGroupRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupResponse.java index 7cae507d4c2a7..77a72d5532bc4 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/LeaveGroupResponse.java @@ -20,10 +20,11 @@ import org.apache.kafka.common.message.LeaveGroupResponseData; import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -132,7 +133,7 @@ private static Errors getError(Errors topLevelError, List member @Override public Map errorCounts() { - Map combinedErrorCounts = new EnumMap<>(Errors.class); + Map combinedErrorCounts = new HashMap<>(); // Top level error. updateErrorCounts(combinedErrorCounts, Errors.forCode(data.errorCode())); @@ -148,8 +149,8 @@ public LeaveGroupResponseData data() { return data; } - public static LeaveGroupResponse parse(Readable readable, short version) { - return new LeaveGroupResponse(new LeaveGroupResponseData(readable, version)); + public static LeaveGroupResponse parse(ByteBuffer buffer, short version) { + return new LeaveGroupResponse(new LeaveGroupResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesRequest.java new file mode 100644 index 0000000000000..ab396ff10c8b2 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesRequest.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.common.message.ListClientMetricsResourcesRequestData; +import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; + +public class ListClientMetricsResourcesRequest extends AbstractRequest { + public static class Builder extends AbstractRequest.Builder { + public final ListClientMetricsResourcesRequestData data; + + public Builder(ListClientMetricsResourcesRequestData data) { + super(ApiKeys.LIST_CLIENT_METRICS_RESOURCES); + this.data = data; + } + + @Override + public ListClientMetricsResourcesRequest build(short version) { + return new ListClientMetricsResourcesRequest(data, version); + } + + @Override + public String toString() { + return data.toString(); + } + } + + private final ListClientMetricsResourcesRequestData data; + + private ListClientMetricsResourcesRequest(ListClientMetricsResourcesRequestData data, short version) { + super(ApiKeys.LIST_CLIENT_METRICS_RESOURCES, version); + this.data = data; + } + + public ListClientMetricsResourcesRequestData data() { + return data; + } + + @Override + public ListClientMetricsResourcesResponse getErrorResponse(int throttleTimeMs, Throwable e) { + Errors error = Errors.forException(e); + ListClientMetricsResourcesResponseData response = new ListClientMetricsResourcesResponseData() + .setErrorCode(error.code()) + .setThrottleTimeMs(throttleTimeMs); + return new ListClientMetricsResourcesResponse(response); + } + + public static ListClientMetricsResourcesRequest parse(ByteBuffer buffer, short version) { + return new ListClientMetricsResourcesRequest(new ListClientMetricsResourcesRequestData( + new ByteBufferAccessor(buffer), version), version); + } + + @Override + public String toString(boolean verbose) { + return data.toString(); + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesResponse.java new file mode 100644 index 0000000000000..87b25a0a90444 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListClientMetricsResourcesResponse.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.requests; + +import org.apache.kafka.clients.admin.ClientMetricsResourceListing; +import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData; +import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import org.apache.kafka.common.protocol.Errors; + +import java.nio.ByteBuffer; +import java.util.Collection; +import java.util.Map; +import java.util.stream.Collectors; + +public class ListClientMetricsResourcesResponse extends AbstractResponse { + private final ListClientMetricsResourcesResponseData data; + + public ListClientMetricsResourcesResponse(ListClientMetricsResourcesResponseData data) { + super(ApiKeys.LIST_CLIENT_METRICS_RESOURCES); + this.data = data; + } + + public ListClientMetricsResourcesResponseData data() { + return data; + } + + public ApiError error() { + return new ApiError(Errors.forCode(data.errorCode())); + } + + @Override + public Map errorCounts() { + return errorCounts(Errors.forCode(data.errorCode())); + } + + public static ListClientMetricsResourcesResponse parse(ByteBuffer buffer, short version) { + return new ListClientMetricsResourcesResponse(new ListClientMetricsResourcesResponseData( + new ByteBufferAccessor(buffer), version)); + } + + @Override + public String toString() { + return data.toString(); + } + + @Override + public int throttleTimeMs() { + return data.throttleTimeMs(); + } + + @Override + public void maybeSetThrottleTimeMs(int throttleTimeMs) { + data.setThrottleTimeMs(throttleTimeMs); + } + + public Collection clientMetricsResources() { + return data.clientMetricsResources() + .stream() + .map(entry -> new ClientMetricsResourceListing(entry.name())) + .collect(Collectors.toList()); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsRequest.java index 84f7cc2a72d69..05ced8202d9d5 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsRequest.java @@ -16,17 +16,15 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.GroupType; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ListGroupsRequestData; import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; -import java.util.HashSet; -import java.util.List; /** * Possible error codes: @@ -53,19 +51,8 @@ public ListGroupsRequest build(short version) { "v" + version + ", but we need v4 or newer to request groups by states."); } if (!data.typesFilter().isEmpty() && version < 5) { - // Types filter is supported by brokers with version 3.8.0 or later. Older brokers only support - // classic groups, so listing consumer groups on an older broker does not need to use a types filter. - // If the types filter is only for consumer and classic, or just classic groups, it can be safely omitted. - // This allows a modern admin client to list consumer groups on older brokers in a straightforward way. - HashSet typesCopy = new HashSet<>(data.typesFilter()); - boolean containedClassic = typesCopy.remove(GroupType.CLASSIC.toString()); - boolean containedConsumer = typesCopy.remove(GroupType.CONSUMER.toString()); - if (!typesCopy.isEmpty() || (!containedClassic && containedConsumer)) { - throw new UnsupportedVersionException("The broker only supports ListGroups " + - "v" + version + ", but we need v5 or newer to request groups by type. " + - "Requested group types: [" + String.join(", ", data.typesFilter()) + "]."); - } - return new ListGroupsRequest(data.duplicate().setTypesFilter(List.of()), version); + throw new UnsupportedVersionException("The broker only supports ListGroups " + + "v" + version + ", but we need v5 or newer to request groups by type."); } return new ListGroupsRequest(data, version); } @@ -94,8 +81,8 @@ public ListGroupsResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new ListGroupsResponse(listGroupsResponseData); } - public static ListGroupsRequest parse(Readable readable, short version) { - return new ListGroupsRequest(new ListGroupsRequestData(readable, version), version); + public static ListGroupsRequest parse(ByteBuffer buffer, short version) { + return new ListGroupsRequest(new ListGroupsRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsResponse.java index fa40a4f751067..a12f85341d6a4 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListGroupsResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class ListGroupsResponse extends AbstractResponse { @@ -52,8 +53,8 @@ public Map errorCounts() { return errorCounts(Errors.forCode(data.errorCode())); } - public static ListGroupsResponse parse(Readable readable, short version) { - return new ListGroupsResponse(new ListGroupsResponseData(readable, version)); + public static ListGroupsResponse parse(ByteBuffer buffer, short version) { + return new ListGroupsResponse(new ListGroupsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java index 5862ebdfafc67..7b61ca847ffe3 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsRequest.java @@ -25,9 +25,10 @@ import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; @@ -47,8 +48,6 @@ public class ListOffsetsRequest extends AbstractRequest { public static final long LATEST_TIERED_TIMESTAMP = -5L; - public static final long EARLIEST_PENDING_UPLOAD_TIMESTAMP = -6L; - public static final int CONSUMER_REPLICA_ID = -1; public static final int DEBUGGING_REPLICA_ID = -2; @@ -60,19 +59,16 @@ public static class Builder extends AbstractRequest.Builder public static Builder forConsumer(boolean requireTimestamp, IsolationLevel isolationLevel) { - return forConsumer(requireTimestamp, isolationLevel, false, false, false, false); + return forConsumer(requireTimestamp, isolationLevel, false, false, false); } public static Builder forConsumer(boolean requireTimestamp, IsolationLevel isolationLevel, boolean requireMaxTimestamp, boolean requireEarliestLocalTimestamp, - boolean requireTieredStorageTimestamp, - boolean requireEarliestPendingUploadTimestamp) { - short minVersion = ApiKeys.LIST_OFFSETS.oldestVersion(); - if (requireEarliestPendingUploadTimestamp) - minVersion = 11; - else if (requireTieredStorageTimestamp) + boolean requireTieredStorageTimestamp) { + short minVersion = 0; + if (requireTieredStorageTimestamp) minVersion = 9; else if (requireEarliestLocalTimestamp) minVersion = 8; @@ -86,7 +82,7 @@ else if (requireTimestamp) } public static Builder forReplica(short allowedVersion, int replicaId) { - return new Builder(ApiKeys.LIST_OFFSETS.oldestVersion(), allowedVersion, replicaId, IsolationLevel.READ_UNCOMMITTED); + return new Builder((short) 0, allowedVersion, replicaId, IsolationLevel.READ_UNCOMMITTED); } private Builder(short oldestAllowedVersion, @@ -188,8 +184,8 @@ public int timeoutMs() { return data.timeoutMs(); } - public static ListOffsetsRequest parse(Readable readable, short version) { - return new ListOffsetsRequest(new ListOffsetsRequestData(readable, version), version); + public static ListOffsetsRequest parse(ByteBuffer buffer, short version) { + return new ListOffsetsRequest(new ListOffsetsRequestData(new ByteBufferAccessor(buffer), version), version); } public static List toListOffsetsTopics(Map timestampsToSearch) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsResponse.java index cadff02033958..c34a843ffbc3f 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListOffsetsResponse.java @@ -21,12 +21,13 @@ import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.RecordBatch; +import java.nio.ByteBuffer; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -79,7 +80,7 @@ public List topics() { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); topics().forEach(topic -> topic.partitions().forEach(partition -> updateErrorCounts(errorCounts, Errors.forCode(partition.errorCode())) @@ -88,8 +89,8 @@ public Map errorCounts() { return errorCounts; } - public static ListOffsetsResponse parse(Readable readable, short version) { - return new ListOffsetsResponse(new ListOffsetsResponseData(readable, version)); + public static ListOffsetsResponse parse(ByteBuffer buffer, short version) { + return new ListOffsetsResponse(new ListOffsetsResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsRequest.java index 503e7cb87632e..596471badd6c7 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsRequest.java @@ -21,8 +21,9 @@ import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment; import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -57,9 +58,9 @@ private ListPartitionReassignmentsRequest(ListPartitionReassignmentsRequestData this.data = data; } - public static ListPartitionReassignmentsRequest parse(Readable readable, short version) { + public static ListPartitionReassignmentsRequest parse(ByteBuffer buffer, short version) { return new ListPartitionReassignmentsRequest(new ListPartitionReassignmentsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java index cee49055598a9..cbf06d4c46624 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListPartitionReassignmentsResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class ListPartitionReassignmentsResponse extends AbstractResponse { @@ -32,9 +33,9 @@ public ListPartitionReassignmentsResponse(ListPartitionReassignmentsResponseData this.data = responseData; } - public static ListPartitionReassignmentsResponse parse(Readable readable, short version) { + public static ListPartitionReassignmentsResponse parse(ByteBuffer buffer, short version) { return new ListPartitionReassignmentsResponse(new ListPartitionReassignmentsResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsRequest.java index 34c39625972c9..a5fef3ee7b29a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.ListTransactionsRequestData; import org.apache.kafka.common.message.ListTransactionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class ListTransactionsRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { @@ -38,10 +40,6 @@ public ListTransactionsRequest build(short version) { throw new UnsupportedVersionException("Duration filter can be set only when using API version 1 or higher." + " If client is connected to an older broker, do not specify duration filter or set duration filter to -1."); } - if (data.transactionalIdPattern() != null && version < 2) { - throw new UnsupportedVersionException("Transactional ID pattern filter can be set only when using API version 2 or higher." + - " If client is connected to an older broker, do not specify the pattern filter."); - } return new ListTransactionsRequest(data, version); } @@ -71,9 +69,9 @@ public ListTransactionsResponse getErrorResponse(int throttleTimeMs, Throwable e return new ListTransactionsResponse(response); } - public static ListTransactionsRequest parse(Readable readable, short version) { + public static ListTransactionsRequest parse(ByteBuffer buffer, short version) { return new ListTransactionsRequest(new ListTransactionsRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsResponse.java index e563b111984b1..f509543025b92 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ListTransactionsResponse.java @@ -18,10 +18,11 @@ import org.apache.kafka.common.message.ListTransactionsResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class ListTransactionsResponse extends AbstractResponse { @@ -38,14 +39,14 @@ public ListTransactionsResponseData data() { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); updateErrorCounts(errorCounts, Errors.forCode(data.errorCode())); return errorCounts; } - public static ListTransactionsResponse parse(Readable readable, short version) { + public static ListTransactionsResponse parse(ByteBuffer buffer, short version) { return new ListTransactionsResponse(new ListTransactionsResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java index d3dcabfb4f9c5..2e60e04b2aa06 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataRequest.java @@ -22,14 +22,13 @@ import org.apache.kafka.common.message.MetadataRequestData.MetadataRequestTopic; import org.apache.kafka.common.message.MetadataResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; -import java.util.Set; import java.util.stream.Collectors; public class MetadataRequest extends AbstractRequest { @@ -51,33 +50,33 @@ public Builder(List topics, boolean allowAutoTopicCreation, short allowe public Builder(List topics, boolean allowAutoTopicCreation, short minVersion, short maxVersion) { super(ApiKeys.METADATA, minVersion, maxVersion); - this.data = requestTopicNamesOrAllTopics(topics, allowAutoTopicCreation); - } - - private MetadataRequestData requestTopicNamesOrAllTopics(List topics, boolean allowAutoTopicCreation) { MetadataRequestData data = new MetadataRequestData(); if (topics == null) data.setTopics(null); else { topics.forEach(topic -> data.topics().add(new MetadataRequestTopic().setName(topic))); } + data.setAllowAutoTopicCreation(allowAutoTopicCreation); - return data; + this.data = data; } - private static MetadataRequestData requestTopicIds(Set topicIds) { + public Builder(List topics, boolean allowAutoTopicCreation) { + this(topics, allowAutoTopicCreation, ApiKeys.METADATA.oldestVersion(), ApiKeys.METADATA.latestVersion()); + } + + public Builder(List topicIds) { + super(ApiKeys.METADATA, ApiKeys.METADATA.oldestVersion(), ApiKeys.METADATA.latestVersion()); MetadataRequestData data = new MetadataRequestData(); if (topicIds == null) data.setTopics(null); else { topicIds.forEach(topicId -> data.topics().add(new MetadataRequestTopic().setTopicId(topicId))); } - data.setAllowAutoTopicCreation(false); // can't auto-create without topic name - return data; - } - public Builder(List topics, boolean allowAutoTopicCreation) { - this(topics, allowAutoTopicCreation, ApiKeys.METADATA.oldestVersion(), ApiKeys.METADATA.latestVersion()); + // It's impossible to create topic with topicId + data.setAllowAutoTopicCreation(false); + this.data = data; } public static Builder allTopics() { @@ -86,20 +85,6 @@ public static Builder allTopics() { return new Builder(ALL_TOPICS_REQUEST_DATA); } - /** - * @return Builder for metadata request using topic names. - */ - public static Builder forTopicNames(List topicNames, boolean allowAutoTopicCreation) { - return new MetadataRequest.Builder(topicNames, allowAutoTopicCreation); - } - - /** - * @return Builder for metadata request using topic IDs. - */ - public static Builder forTopicIds(Set topicIds) { - return new MetadataRequest.Builder(requestTopicIds(new HashSet<>(topicIds))); - } - public boolean emptyTopicList() { return data.topics().isEmpty(); } @@ -108,13 +93,6 @@ public boolean isAllTopics() { return data.topics() == null; } - public List topicIds() { - return data.topics() - .stream() - .map(MetadataRequestTopic::topicId) - .collect(Collectors.toList()); - } - public List topics() { return data.topics() .stream() @@ -214,8 +192,8 @@ public boolean allowAutoTopicCreation() { return data.allowAutoTopicCreation(); } - public static MetadataRequest parse(Readable readable, short version) { - return new MetadataRequest(new MetadataRequestData(readable, version), version); + public static MetadataRequest parse(ByteBuffer buffer, short version) { + return new MetadataRequest(new MetadataRequestData(new ByteBufferAccessor(buffer), version), version); } public static List convertToMetadataRequestTopic(final Collection topics) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java index bbcebdd484618..3a7e4f276d9dc 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/MetadataResponse.java @@ -26,13 +26,13 @@ import org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition; import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.EnumMap; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -126,7 +126,7 @@ public Map errorsByTopicId() { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); data.topics().forEach(metadata -> { metadata.partitions().forEach(p -> updateErrorCounts(errorCounts, Errors.forCode(p.errorCode()))); updateErrorCounts(errorCounts, Errors.forCode(metadata.errorCode())); @@ -280,8 +280,8 @@ private static boolean hasReliableLeaderEpochs(short version) { return version >= 9; } - public static MetadataResponse parse(Readable readable, short version) { - return new MetadataResponse(new MetadataResponseData(readable, version), + public static MetadataResponse parse(ByteBuffer buffer, short version) { + return new MetadataResponse(new MetadataResponseData(new ByteBufferAccessor(buffer), version), hasReliableLeaderEpochs(version)); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java index 1bd9c41f66834..88111b1007717 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitRequest.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestTopic; @@ -25,9 +24,10 @@ import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponsePartition; import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.HashMap; import java.util.Map; @@ -46,39 +46,20 @@ public static class Builder extends AbstractRequest.Builder private final OffsetCommitRequestData data; - private Builder(OffsetCommitRequestData data, short oldestAllowedVersion, short latestAllowedVersion) { - super(ApiKeys.OFFSET_COMMIT, oldestAllowedVersion, latestAllowedVersion); + public Builder(OffsetCommitRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.OFFSET_COMMIT, enableUnstableLastVersion); this.data = data; } - public static Builder forTopicIdsOrNames(OffsetCommitRequestData data, boolean enableUnstableLastVersion) { - return new Builder(data, ApiKeys.OFFSET_COMMIT.oldestVersion(), ApiKeys.OFFSET_COMMIT.latestVersion(enableUnstableLastVersion)); - } - - public static Builder forTopicNames(OffsetCommitRequestData data) { - return new Builder(data, ApiKeys.OFFSET_COMMIT.oldestVersion(), (short) 9); + public Builder(OffsetCommitRequestData data) { + this(data, false); } @Override public OffsetCommitRequest build(short version) { if (data.groupInstanceId() != null && version < 7) { - throw new UnsupportedVersionException("The broker offset commit api version " + - version + " does not support usage of config group.instance.id."); - } - if (version >= 10) { - data.topics().forEach(topic -> { - if (topic.topicId() == null || topic.topicId().equals(Uuid.ZERO_UUID)) { - throw new UnsupportedVersionException("The broker offset commit api version " + - version + " does require usage of topic ids."); - } - }); - } else { - data.topics().forEach(topic -> { - if (topic.name() == null || topic.name().isEmpty()) { - throw new UnsupportedVersionException("The broker offset commit api version " + - version + " does require usage of topic names."); - } - }); + throw new UnsupportedVersionException("The broker offset commit protocol version " + + version + " does not support usage of config group.instance.id."); } return new OffsetCommitRequest(data, version); } @@ -117,7 +98,6 @@ public static OffsetCommitResponseData getErrorResponse( OffsetCommitResponseData response = new OffsetCommitResponseData(); request.topics().forEach(topic -> { OffsetCommitResponseTopic responseTopic = new OffsetCommitResponseTopic() - .setTopicId(topic.topicId()) .setName(topic.name()); response.topics().add(responseTopic); @@ -141,7 +121,7 @@ public OffsetCommitResponse getErrorResponse(Throwable e) { return getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e); } - public static OffsetCommitRequest parse(Readable readable, short version) { - return new OffsetCommitRequest(new OffsetCommitRequestData(readable, version), version); + public static OffsetCommitRequest parse(ByteBuffer buffer, short version) { + return new OffsetCommitRequest(new OffsetCommitRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java index 521ffa1c2fdc3..2b6d00b1a47f6 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetCommitResponse.java @@ -17,14 +17,14 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.OffsetCommitResponseData; import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponsePartition; import org.apache.kafka.common.message.OffsetCommitResponseData.OffsetCommitResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -99,8 +99,8 @@ public Map errorCounts() { Errors.forCode(partitionResult.errorCode())))); } - public static OffsetCommitResponse parse(Readable readable, short version) { - return new OffsetCommitResponse(new OffsetCommitResponseData(readable, version)); + public static OffsetCommitResponse parse(ByteBuffer buffer, short version) { + return new OffsetCommitResponse(new OffsetCommitResponseData(new ByteBufferAccessor(buffer), version)); } @Override @@ -123,56 +123,43 @@ public boolean shouldClientThrottle(short version) { return version >= 4; } - public static boolean useTopicIds(short version) { - return version >= 10; - } - - public static Builder newBuilder(boolean useTopicIds) { - if (useTopicIds) { - return new TopicIdBuilder(); - } else { - return new TopicNameBuilder(); - } - } + public static class Builder { + OffsetCommitResponseData data = new OffsetCommitResponseData(); + HashMap byTopicName = new HashMap<>(); - public abstract static class Builder { - protected OffsetCommitResponseData data = new OffsetCommitResponseData(); - - protected abstract void add( - OffsetCommitResponseTopic topic - ); - - protected abstract OffsetCommitResponseTopic get( - Uuid topicId, + private OffsetCommitResponseTopic getOrCreateTopic( String topicName - ); - - protected abstract OffsetCommitResponseTopic getOrCreate( - Uuid topicId, - String topicName - ); + ) { + OffsetCommitResponseTopic topic = byTopicName.get(topicName); + if (topic == null) { + topic = new OffsetCommitResponseTopic().setName(topicName); + data.topics().add(topic); + byTopicName.put(topicName, topic); + } + return topic; + } public Builder addPartition( - Uuid topicId, String topicName, int partitionIndex, Errors error ) { - final OffsetCommitResponseTopic topicResponse = getOrCreate(topicId, topicName); + final OffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName); + topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partitionIndex) .setErrorCode(error.code())); + return this; } public

        Builder addPartitions( - Uuid topicId, String topicName, List

        partitions, Function partitionIndex, Errors error ) { - final OffsetCommitResponseTopic topicResponse = getOrCreate(topicId, topicName); + final OffsetCommitResponseTopic topicResponse = getOrCreateTopic(topicName); partitions.forEach(partition -> topicResponse.partitions().add(new OffsetCommitResponsePartition() .setPartitionIndex(partitionIndex.apply(partition)) @@ -190,10 +177,11 @@ public Builder merge( } else { // Otherwise, we have to merge them together. newData.topics().forEach(newTopic -> { - OffsetCommitResponseTopic existingTopic = get(newTopic.topicId(), newTopic.name()); + OffsetCommitResponseTopic existingTopic = byTopicName.get(newTopic.name()); if (existingTopic == null) { // If no topic exists, we can directly copy the new topic data. - add(newTopic); + data.topics().add(newTopic); + byTopicName.put(newTopic.name(), newTopic); } else { // Otherwise, we add the partitions to the existing one. Note we // expect non-overlapping partitions here as we don't verify @@ -202,6 +190,7 @@ public Builder merge( } }); } + return this; } @@ -209,78 +198,4 @@ public OffsetCommitResponse build() { return new OffsetCommitResponse(data); } } - - public static class TopicIdBuilder extends Builder { - private final HashMap byTopicId = new HashMap<>(); - - @Override - protected void add(OffsetCommitResponseTopic topic) { - throwIfTopicIdIsNull(topic.topicId()); - data.topics().add(topic); - byTopicId.put(topic.topicId(), topic); - } - - @Override - protected OffsetCommitResponseTopic get(Uuid topicId, String topicName) { - throwIfTopicIdIsNull(topicId); - return byTopicId.get(topicId); - } - - @Override - protected OffsetCommitResponseTopic getOrCreate(Uuid topicId, String topicName) { - throwIfTopicIdIsNull(topicId); - OffsetCommitResponseTopic topic = byTopicId.get(topicId); - if (topic == null) { - topic = new OffsetCommitResponseTopic() - .setName(topicName) - .setTopicId(topicId); - data.topics().add(topic); - byTopicId.put(topicId, topic); - } - return topic; - } - - private static void throwIfTopicIdIsNull(Uuid topicId) { - if (topicId == null) { - throw new IllegalArgumentException("TopicId cannot be null."); - } - } - } - - public static class TopicNameBuilder extends Builder { - private final HashMap byTopicName = new HashMap<>(); - - @Override - protected void add(OffsetCommitResponseTopic topic) { - throwIfTopicNameIsNull(topic.name()); - data.topics().add(topic); - byTopicName.put(topic.name(), topic); - } - - @Override - protected OffsetCommitResponseTopic get(Uuid topicId, String topicName) { - throwIfTopicNameIsNull(topicName); - return byTopicName.get(topicName); - } - - @Override - protected OffsetCommitResponseTopic getOrCreate(Uuid topicId, String topicName) { - throwIfTopicNameIsNull(topicName); - OffsetCommitResponseTopic topic = byTopicName.get(topicName); - if (topic == null) { - topic = new OffsetCommitResponseTopic() - .setName(topicName) - .setTopicId(topicId); - data.topics().add(topic); - byTopicName.put(topicName, topic); - } - return topic; - } - - private void throwIfTopicNameIsNull(String topicName) { - if (topicName == null) { - throw new IllegalArgumentException("TopicName cannot be null."); - } - } - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteRequest.java index 78bec9df0a92b..28b763d520f00 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.OffsetDeleteRequestData; import org.apache.kafka.common.message.OffsetDeleteResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class OffsetDeleteRequest extends AbstractRequest { @@ -64,8 +66,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return getErrorResponse(throttleTimeMs, Errors.forException(e)); } - public static OffsetDeleteRequest parse(Readable readable, short version) { - return new OffsetDeleteRequest(new OffsetDeleteRequestData(readable, version), version); + public static OffsetDeleteRequest parse(ByteBuffer buffer, short version) { + return new OffsetDeleteRequest(new OffsetDeleteRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java index 0f3655d62c67d..aa9b4bc4ffe66 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetDeleteResponse.java @@ -20,10 +20,11 @@ import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponsePartition; import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.function.Function; @@ -139,7 +140,7 @@ public OffsetDeleteResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + Map counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.topics().forEach(topic -> topic.partitions().forEach(partition -> @@ -149,8 +150,8 @@ public Map errorCounts() { return counts; } - public static OffsetDeleteResponse parse(Readable readable, short version) { - return new OffsetDeleteResponse(new OffsetDeleteResponseData(readable, version)); + public static OffsetDeleteResponse parse(ByteBuffer buffer, short version) { + return new OffsetDeleteResponse(new OffsetDeleteResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java index 0d91788522916..7ece0700bfa96 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchRequest.java @@ -17,87 +17,131 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestGroup; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopic; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics; -import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import org.apache.kafka.common.record.RecordBatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Map.Entry; +import java.util.Optional; import java.util.stream.Collectors; public class OffsetFetchRequest extends AbstractRequest { private static final Logger log = LoggerFactory.getLogger(OffsetFetchRequest.class); - public static final short TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION = 2; - public static final short REQUIRE_STABLE_OFFSET_MIN_VERSION = 7; - public static final short BATCH_MIN_VERSION = 8; - public static final short TOPIC_ID_MIN_VERSION = 10; + private static final List ALL_TOPIC_PARTITIONS = null; + private static final List ALL_TOPIC_PARTITIONS_BATCH = null; private final OffsetFetchRequestData data; public static class Builder extends AbstractRequest.Builder { - private final OffsetFetchRequestData data; + + public final OffsetFetchRequestData data; private final boolean throwOnFetchStableOffsetsUnsupported; - public static Builder forTopicIdsOrNames( - OffsetFetchRequestData data, - boolean throwOnFetchStableOffsetsUnsupported, - boolean enableUnstableLastVersion - ) { - return new Builder( - data, - throwOnFetchStableOffsetsUnsupported, - ApiKeys.OFFSET_FETCH.oldestVersion(), - ApiKeys.OFFSET_FETCH.latestVersion(enableUnstableLastVersion) + public Builder(String groupId, + boolean requireStable, + List partitions, + boolean throwOnFetchStableOffsetsUnsupported) { + this( + groupId, + null, + -1, + requireStable, + partitions, + throwOnFetchStableOffsetsUnsupported ); } - public static Builder forTopicNames( - OffsetFetchRequestData data, - boolean throwOnFetchStableOffsetsUnsupported - ) { - return new Builder( - data, - throwOnFetchStableOffsetsUnsupported, - ApiKeys.OFFSET_FETCH.oldestVersion(), - (short) (TOPIC_ID_MIN_VERSION - 1) - ); + public Builder(String groupId, + String memberId, + int memberEpoch, + boolean requireStable, + List partitions, + boolean throwOnFetchStableOffsetsUnsupported) { + super(ApiKeys.OFFSET_FETCH); + + OffsetFetchRequestData.OffsetFetchRequestGroup group = + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId(groupId) + .setMemberId(memberId) + .setMemberEpoch(memberEpoch); + + if (partitions != null) { + Map offsetFetchRequestTopicMap = new HashMap<>(); + for (TopicPartition topicPartition : partitions) { + String topicName = topicPartition.topic(); + OffsetFetchRequestTopics topic = offsetFetchRequestTopicMap.getOrDefault( + topicName, new OffsetFetchRequestTopics().setName(topicName)); + topic.partitionIndexes().add(topicPartition.partition()); + offsetFetchRequestTopicMap.put(topicName, topic); + } + group.setTopics(new ArrayList<>(offsetFetchRequestTopicMap.values())); + } else { + // If passed in partition list is null, it is requesting offsets for all topic partitions. + group.setTopics(ALL_TOPIC_PARTITIONS_BATCH); + } + + this.data = new OffsetFetchRequestData() + .setRequireStable(requireStable) + .setGroups(Collections.singletonList(group)); + this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported; } - private Builder( - OffsetFetchRequestData data, - boolean throwOnFetchStableOffsetsUnsupported, - short oldestAllowedVersion, - short latestAllowedVersion - ) { - super(ApiKeys.OFFSET_FETCH, oldestAllowedVersion, latestAllowedVersion); - this.data = data; + public Builder(Map> groupIdToTopicPartitionMap, + boolean requireStable, + boolean throwOnFetchStableOffsetsUnsupported) { + super(ApiKeys.OFFSET_FETCH); + + List groups = new ArrayList<>(); + for (Entry> entry : groupIdToTopicPartitionMap.entrySet()) { + String groupName = entry.getKey(); + List tpList = entry.getValue(); + final List topics; + if (tpList != null) { + Map offsetFetchRequestTopicMap = + new HashMap<>(); + for (TopicPartition topicPartition : tpList) { + String topicName = topicPartition.topic(); + OffsetFetchRequestTopics topic = offsetFetchRequestTopicMap.getOrDefault( + topicName, new OffsetFetchRequestTopics().setName(topicName)); + topic.partitionIndexes().add(topicPartition.partition()); + offsetFetchRequestTopicMap.put(topicName, topic); + } + topics = new ArrayList<>(offsetFetchRequestTopicMap.values()); + } else { + topics = ALL_TOPIC_PARTITIONS_BATCH; + } + groups.add(new OffsetFetchRequestGroup() + .setGroupId(groupName) + .setTopics(topics)); + } + this.data = new OffsetFetchRequestData() + .setGroups(groups) + .setRequireStable(requireStable); this.throwOnFetchStableOffsetsUnsupported = throwOnFetchStableOffsetsUnsupported; } - private void throwIfBatchingIsUnsupported(short version) { - if (data.groups().size() > 1 && version < BATCH_MIN_VERSION) { + @Override + public OffsetFetchRequest build(short version) { + if (data.groups().size() > 1 && version < 8) { throw new NoBatchedOffsetFetchRequestException("Broker does not support" + " batching groups for fetch offset request on version " + version); } - } - - private void throwIfStableOffsetsUnsupported(short version) { - if (data.requireStable() && version < REQUIRE_STABLE_OFFSET_MIN_VERSION) { + if (data.requireStable() && version < 7) { if (throwOnFetchStableOffsetsUnsupported) { throw new UnsupportedVersionException("Broker unexpectedly " + "doesn't support requireStable flag on version " + version); @@ -108,77 +152,37 @@ private void throwIfStableOffsetsUnsupported(short version) { data.setRequireStable(false); } } - } - - private void throwIfMissingRequiredTopicIdentifiers(short version) { - if (version < TOPIC_ID_MIN_VERSION) { - data.groups().forEach(group -> { - if (group.topics() != null) { - group.topics().forEach(topic -> { - if (topic.name() == null || topic.name().isEmpty()) { - throw new UnsupportedVersionException("The broker offset fetch api version " + - version + " does require usage of topic names."); - } - }); - } - }); - } else { - data.groups().forEach(group -> { - if (group.topics() != null) { - group.topics().forEach(topic -> { - if (topic.topicId() == null || topic.topicId().equals(Uuid.ZERO_UUID)) { - throw new UnsupportedVersionException("The broker offset fetch api version " + - version + " does require usage of topic ids."); - } - }); + // convert data to use the appropriate version since version 8 uses different format + if (version < 8) { + OffsetFetchRequestData normalizedData; + if (!data.groups().isEmpty()) { + OffsetFetchRequestGroup group = data.groups().get(0); + String groupName = group.groupId(); + List topics = group.topics(); + List oldFormatTopics = null; + if (topics != null) { + oldFormatTopics = topics + .stream() + .map(t -> + new OffsetFetchRequestTopic() + .setName(t.name()) + .setPartitionIndexes(t.partitionIndexes())) + .collect(Collectors.toList()); } - }); - } - } - - private void throwIfRequestingAllTopicsIsUnsupported(short version) { - if (version < TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION) { - data.groups().forEach(group -> { - if (group.topics() == null) { - throw new UnsupportedVersionException("The broker only supports OffsetFetchRequest " + - "v" + version + ", but we need v2 or newer to request all topic partitions."); - } - }); - } - } - - private OffsetFetchRequestData maybeDowngrade(short version) { - // Convert data to use the appropriate version since version 8 - // uses different format. - if (version >= BATCH_MIN_VERSION || data.groups().isEmpty()) return data; - - OffsetFetchRequestGroup group = data.groups().get(0); - String groupName = group.groupId(); - List topics = group.topics(); - List oldFormatTopics = null; - - if (topics != null) { - oldFormatTopics = topics - .stream() - .map(t -> new OffsetFetchRequestTopic() - .setName(t.name()) - .setPartitionIndexes(t.partitionIndexes())) - .collect(Collectors.toList()); + normalizedData = new OffsetFetchRequestData() + .setGroupId(groupName) + .setTopics(oldFormatTopics) + .setRequireStable(data.requireStable()); + } else { + normalizedData = data; + } + if (normalizedData.topics() == null && version < 2) { + throw new UnsupportedVersionException("The broker only supports OffsetFetchRequest " + + "v" + version + ", but we need v2 or newer to request all topic partitions."); + } + return new OffsetFetchRequest(normalizedData, version); } - - return new OffsetFetchRequestData() - .setGroupId(groupName) - .setTopics(oldFormatTopics) - .setRequireStable(data.requireStable()); - } - - @Override - public OffsetFetchRequest build(short version) { - throwIfBatchingIsUnsupported(version); - throwIfStableOffsetsUnsupported(version); - throwIfMissingRequiredTopicIdentifiers(version); - throwIfRequestingAllTopicsIsUnsupported(version); - return new OffsetFetchRequest(maybeDowngrade(version), version); + return new OffsetFetchRequest(data, version); } @Override @@ -199,6 +203,19 @@ public NoBatchedOffsetFetchRequestException(String message) { } } + public List partitions() { + if (isAllPartitions()) { + return null; + } + List partitions = new ArrayList<>(); + for (OffsetFetchRequestTopic topic : data.topics()) { + for (Integer partitionIndex : topic.partitionIndexes()) { + partitions.add(new TopicPartition(topic.name(), partitionIndex)); + } + } + return partitions; + } + public String groupId() { return data.groupId(); } @@ -208,7 +225,7 @@ public boolean requireStable() { } public List groups() { - if (version() >= BATCH_MIN_VERSION) { + if (version() >= 8) { return data.groups(); } else { OffsetFetchRequestData.OffsetFetchRequestGroup group = @@ -237,7 +254,7 @@ public Map> groupIdsToPartitions() { Map> groupIdsToPartitions = new HashMap<>(); for (OffsetFetchRequestGroup group : data.groups()) { List tpList = null; - if (group.topics() != null) { + if (group.topics() != ALL_TOPIC_PARTITIONS_BATCH) { tpList = new ArrayList<>(); for (OffsetFetchRequestTopics topic : group.topics()) { for (Integer partitionIndex : topic.partitionIndexes()) { @@ -269,61 +286,65 @@ private OffsetFetchRequest(OffsetFetchRequestData data, short version) { this.data = data; } + public OffsetFetchResponse getErrorResponse(Errors error) { + return getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, error); + } + + public OffsetFetchResponse getErrorResponse(int throttleTimeMs, Errors error) { + Map responsePartitions = new HashMap<>(); + if (version() < 2) { + OffsetFetchResponse.PartitionData partitionError = new OffsetFetchResponse.PartitionData( + OffsetFetchResponse.INVALID_OFFSET, + Optional.empty(), + OffsetFetchResponse.NO_METADATA, + error); + + for (OffsetFetchRequestTopic topic : this.data.topics()) { + for (int partitionIndex : topic.partitionIndexes()) { + responsePartitions.put( + new TopicPartition(topic.name(), partitionIndex), partitionError); + } + } + return new OffsetFetchResponse(error, responsePartitions); + } + if (version() == 2) { + return new OffsetFetchResponse(error, responsePartitions); + } + if (version() >= 3 && version() < 8) { + return new OffsetFetchResponse(throttleTimeMs, error, responsePartitions); + } + List groupIds = groupIds(); + Map errorsMap = new HashMap<>(groupIds.size()); + Map> partitionMap = + new HashMap<>(groupIds.size()); + for (String g : groupIds) { + errorsMap.put(g, error); + partitionMap.put(g, responsePartitions); + } + return new OffsetFetchResponse(throttleTimeMs, errorsMap, partitionMap); + } + @Override public OffsetFetchResponse getErrorResponse(int throttleTimeMs, Throwable e) { - Errors error = Errors.forException(e); - - if (version() < TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION) { - // The response does not support top level error so we return each - // partition with the error. - return new OffsetFetchResponse( - new OffsetFetchResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setTopics(data.topics().stream().map(topic -> - new OffsetFetchResponseData.OffsetFetchResponseTopic() - .setName(topic.name()) - .setPartitions(topic.partitionIndexes().stream().map(partition -> - new OffsetFetchResponseData.OffsetFetchResponsePartition() - .setPartitionIndex(partition) - .setErrorCode(error.code()) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - ).collect(Collectors.toList())) - ).collect(Collectors.toList())), - version() - ); - } else if (version() < BATCH_MIN_VERSION) { - // The response does not support multiple groups but it does support - // top level error. - return new OffsetFetchResponse( - new OffsetFetchResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code()), - version() - ); - } else { - // The response does support multiple groups so we provide a top level - // error per group. - return new OffsetFetchResponse( - new OffsetFetchResponseData() - .setThrottleTimeMs(throttleTimeMs) - .setGroups(data.groups().stream().map(group -> - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group.groupId()) - .setErrorCode(error.code()) - ).collect(Collectors.toList())), - version() - ); - } + return getErrorResponse(throttleTimeMs, Errors.forException(e)); } - public static OffsetFetchRequest parse(Readable readable, short version) { - return new OffsetFetchRequest(new OffsetFetchRequestData(readable, version), version); + public static OffsetFetchRequest parse(ByteBuffer buffer, short version) { + return new OffsetFetchRequest(new OffsetFetchRequestData(new ByteBufferAccessor(buffer), version), version); } - public static boolean useTopicIds(short version) { - return version >= TOPIC_ID_MIN_VERSION; + public boolean isAllPartitions() { + return data.topics() == ALL_TOPIC_PARTITIONS; + } + + public boolean isAllPartitionsForGroup(String groupId) { + OffsetFetchRequestGroup group = data + .groups() + .stream() + .filter(g -> g.groupId().equals(groupId)) + .collect(Collectors.toList()) + .get(0); + return group.topics() == ALL_TOPIC_PARTITIONS_BATCH; } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java index 77297e96e6e6b..82b6cdb097951 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetFetchResponse.java @@ -16,26 +16,30 @@ */ package org.apache.kafka.common.requests; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseGroup; import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartition; +import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartitions; import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopic; +import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopics; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Arrays; -import java.util.EnumMap; +import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.function.Function; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Optional; import java.util.stream.Collectors; import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH; -import static org.apache.kafka.common.requests.OffsetFetchRequest.BATCH_MIN_VERSION; -import static org.apache.kafka.common.requests.OffsetFetchRequest.TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION; /** * Possible error codes: @@ -56,112 +60,221 @@ public class OffsetFetchResponse extends AbstractResponse { public static final long INVALID_OFFSET = -1L; public static final String NO_METADATA = ""; - - // We only need to track the partition errors returned in version 1. This - // is used to identify group level errors when the response is normalized. + public static final PartitionData UNKNOWN_PARTITION = new PartitionData(INVALID_OFFSET, + Optional.empty(), + NO_METADATA, + Errors.UNKNOWN_TOPIC_OR_PARTITION); + public static final PartitionData UNAUTHORIZED_PARTITION = new PartitionData(INVALID_OFFSET, + Optional.empty(), + NO_METADATA, + Errors.TOPIC_AUTHORIZATION_FAILED); private static final List PARTITION_ERRORS = Arrays.asList( - Errors.UNKNOWN_TOPIC_OR_PARTITION, - Errors.TOPIC_AUTHORIZATION_FAILED - ); + Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.TOPIC_AUTHORIZATION_FAILED); - private final short version; private final OffsetFetchResponseData data; - // Lazily initialized when OffsetFetchResponse#group is called. - private Map groups = null; + private final Errors error; + private final Map groupLevelErrors = new HashMap<>(); - public static class Builder { - private final List groups; + public static final class PartitionData { + public final long offset; + public final String metadata; + public final Errors error; + public final Optional leaderEpoch; - public Builder(OffsetFetchResponseGroup group) { - this(List.of(group)); + public PartitionData(long offset, + Optional leaderEpoch, + String metadata, + Errors error) { + this.offset = offset; + this.leaderEpoch = leaderEpoch; + this.metadata = metadata; + this.error = error; } - public Builder(List groups) { - this.groups = groups; + public boolean hasError() { + return this.error != Errors.NONE; } - public OffsetFetchResponse build(short version) { - var data = new OffsetFetchResponseData(); + @Override + public boolean equals(Object other) { + if (!(other instanceof PartitionData)) + return false; + PartitionData otherPartition = (PartitionData) other; + return Objects.equals(this.offset, otherPartition.offset) + && Objects.equals(this.leaderEpoch, otherPartition.leaderEpoch) + && Objects.equals(this.metadata, otherPartition.metadata) + && Objects.equals(this.error, otherPartition.error); + } - if (version >= BATCH_MIN_VERSION) { - data.setGroups(groups); - } else { - if (groups.size() != 1) { - throw new UnsupportedVersionException( - "Version " + version + " of OffsetFetchResponse only supports one group." - ); - } + @Override + public String toString() { + return "PartitionData(" + + "offset=" + offset + + ", leaderEpoch=" + leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH) + + ", metadata=" + metadata + + ", error='" + error.toString() + + ")"; + } + + @Override + public int hashCode() { + return Objects.hash(offset, leaderEpoch, metadata, error); + } + } + + /** + * Constructor without throttle time. + * @param error Potential coordinator or group level error code (for api version 2 and later) + * @param responseData Fetched offset information grouped by topic-partition + */ + public OffsetFetchResponse(Errors error, Map responseData) { + this(DEFAULT_THROTTLE_TIME, error, responseData); + } + + /** + * Constructor with throttle time for version 0 to 7 + * @param throttleTimeMs The time in milliseconds that this response was throttled + * @param error Potential coordinator or group level error code (for api version 2 and later) + * @param responseData Fetched offset information grouped by topic-partition + */ + public OffsetFetchResponse(int throttleTimeMs, Errors error, Map responseData) { + super(ApiKeys.OFFSET_FETCH); + Map offsetFetchResponseTopicMap = new HashMap<>(); + for (Map.Entry entry : responseData.entrySet()) { + String topicName = entry.getKey().topic(); + OffsetFetchResponseTopic topic = offsetFetchResponseTopicMap.getOrDefault( + topicName, new OffsetFetchResponseTopic().setName(topicName)); + PartitionData partitionData = entry.getValue(); + topic.partitions().add(new OffsetFetchResponsePartition() + .setPartitionIndex(entry.getKey().partition()) + .setErrorCode(partitionData.error.code()) + .setCommittedOffset(partitionData.offset) + .setCommittedLeaderEpoch( + partitionData.leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH)) + .setMetadata(partitionData.metadata) + ); + offsetFetchResponseTopicMap.put(topicName, topic); + } + + this.data = new OffsetFetchResponseData() + .setTopics(new ArrayList<>(offsetFetchResponseTopicMap.values())) + .setErrorCode(error.code()) + .setThrottleTimeMs(throttleTimeMs); + this.error = error; + } + + /** + * Constructor with throttle time for version 8 and above. + * @param throttleTimeMs The time in milliseconds that this response was throttled + * @param errors Potential coordinator or group level error code + * @param responseData Fetched offset information grouped by topic-partition and by group + */ + public OffsetFetchResponse(int throttleTimeMs, + Map errors, + Map> responseData) { + super(ApiKeys.OFFSET_FETCH); + List groupList = new ArrayList<>(); + for (Entry> entry : responseData.entrySet()) { + String groupName = entry.getKey(); + Map partitionDataMap = entry.getValue(); + Map offsetFetchResponseTopicsMap = new HashMap<>(); + for (Entry partitionEntry : partitionDataMap.entrySet()) { + String topicName = partitionEntry.getKey().topic(); + OffsetFetchResponseTopics topic = + offsetFetchResponseTopicsMap.getOrDefault(topicName, + new OffsetFetchResponseTopics().setName(topicName)); + PartitionData partitionData = partitionEntry.getValue(); + topic.partitions().add(new OffsetFetchResponsePartitions() + .setPartitionIndex(partitionEntry.getKey().partition()) + .setErrorCode(partitionData.error.code()) + .setCommittedOffset(partitionData.offset) + .setCommittedLeaderEpoch( + partitionData.leaderEpoch.orElse(NO_PARTITION_LEADER_EPOCH)) + .setMetadata(partitionData.metadata)); + offsetFetchResponseTopicsMap.put(topicName, topic); + } + groupList.add(new OffsetFetchResponseGroup() + .setGroupId(groupName) + .setTopics(new ArrayList<>(offsetFetchResponseTopicsMap.values())) + .setErrorCode(errors.get(groupName).code())); + groupLevelErrors.put(groupName, errors.get(groupName)); + } + this.data = new OffsetFetchResponseData() + .setGroups(groupList) + .setThrottleTimeMs(throttleTimeMs); + this.error = null; + } + + public OffsetFetchResponse(List groups, short version) { + super(ApiKeys.OFFSET_FETCH); + data = new OffsetFetchResponseData(); + + if (version >= 8) { + data.setGroups(groups); + error = null; + + for (OffsetFetchResponseGroup group : data.groups()) { + this.groupLevelErrors.put(group.groupId(), Errors.forCode(group.errorCode())); + } + } else { + if (groups.size() != 1) { + throw new UnsupportedVersionException( + "Version " + version + " of OffsetFetchResponse only supports one group." + ); + } + + OffsetFetchResponseGroup group = groups.get(0); + data.setErrorCode(group.errorCode()); + error = Errors.forCode(group.errorCode()); - OffsetFetchResponseGroup group = groups.get(0); - data.setErrorCode(group.errorCode()); + group.topics().forEach(topic -> { + OffsetFetchResponseTopic newTopic = new OffsetFetchResponseTopic().setName(topic.name()); + data.topics().add(newTopic); - group.topics().forEach(topic -> { - OffsetFetchResponseTopic newTopic = new OffsetFetchResponseTopic().setName(topic.name()); - data.topics().add(newTopic); + topic.partitions().forEach(partition -> { + OffsetFetchResponsePartition newPartition; - topic.partitions().forEach(partition -> { - newTopic.partitions().add(new OffsetFetchResponsePartition() + if (version < 2 && group.errorCode() != Errors.NONE.code()) { + // Versions prior to version 2 do not support a top level error. Therefore, + // we put it at the partition level. + newPartition = new OffsetFetchResponsePartition() + .setPartitionIndex(partition.partitionIndex()) + .setErrorCode(group.errorCode()) + .setCommittedOffset(INVALID_OFFSET) + .setMetadata(NO_METADATA) + .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH); + } else { + newPartition = new OffsetFetchResponsePartition() .setPartitionIndex(partition.partitionIndex()) .setErrorCode(partition.errorCode()) .setCommittedOffset(partition.committedOffset()) .setMetadata(partition.metadata()) - .setCommittedLeaderEpoch(partition.committedLeaderEpoch())); - }); - }); - } + .setCommittedLeaderEpoch(partition.committedLeaderEpoch()); + } - return new OffsetFetchResponse(data, version); + newTopic.partitions().add(newPartition); + }); + }); } } public OffsetFetchResponse(OffsetFetchResponseData data, short version) { super(ApiKeys.OFFSET_FETCH); this.data = data; - this.version = version; - } - - public OffsetFetchResponseData.OffsetFetchResponseGroup group(String groupId) { - if (version < BATCH_MIN_VERSION) { - // for version 2 and later use the top-level error code from the response. - // for older versions there is no top-level error in the response and all errors are partition errors, - // so if there is a group or coordinator error at the partition level use that as the top-level error. - // this way clients can depend on the top-level error regardless of the offset fetch version. - // we return the error differently starting with version 8, so we will only populate the - // error field if we are between version 2 and 7. if we are in version 8 or greater, then - // we will populate the map of group id to error codes. - short topLevelError = version < TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION ? topLevelError(data).code() : data.errorCode(); - if (topLevelError != Errors.NONE.code()) { - return new OffsetFetchResponseGroup() - .setGroupId(groupId) - .setErrorCode(topLevelError); - } else { - return new OffsetFetchResponseGroup() - .setGroupId(groupId) - .setTopics(data.topics().stream().map(topic -> - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topic.name()) - .setPartitions(topic.partitions().stream().map(partition -> - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(partition.partitionIndex()) - .setErrorCode(partition.errorCode()) - .setCommittedOffset(partition.committedOffset()) - .setMetadata(partition.metadata()) - .setCommittedLeaderEpoch(partition.committedLeaderEpoch()) - ).collect(Collectors.toList())) - ).collect(Collectors.toList())); - } + // for version 2 and later use the top-level error code (in ERROR_CODE_KEY_NAME) from the response. + // for older versions there is no top-level error in the response and all errors are partition errors, + // so if there is a group or coordinator error at the partition level use that as the top-level error. + // this way clients can depend on the top-level error regardless of the offset fetch version. + // we return the error differently starting with version 8, so we will only populate the + // error field if we are between version 2 and 7. if we are in version 8 or greater, then + // we will populate the map of group id to error codes. + if (version < 8) { + this.error = version >= 2 ? Errors.forCode(data.errorCode()) : topLevelError(data); } else { - if (groups == null) { - groups = data.groups().stream().collect(Collectors.toMap( - OffsetFetchResponseData.OffsetFetchResponseGroup::groupId, - Function.identity() - )); - } - var group = groups.get(groupId); - if (group == null) { - throw new IllegalArgumentException("Group " + groupId + " not found in the response"); + for (OffsetFetchResponseGroup group : data.groups()) { + this.groupLevelErrors.put(group.groupId(), Errors.forCode(group.errorCode())); } - return group; + this.error = null; } } @@ -187,33 +300,98 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } + public boolean hasError() { + return error != Errors.NONE; + } + + public boolean groupHasError(String groupId) { + Errors error = groupLevelErrors.get(groupId); + if (error == null) { + return this.error != null && this.error != Errors.NONE; + } + return error != Errors.NONE; + } + + public Errors error() { + return error; + } + + public Errors groupLevelError(String groupId) { + if (error != null) { + return error; + } + return groupLevelErrors.get(groupId); + } + @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); - if (version < BATCH_MIN_VERSION) { - if (version >= TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION) { - updateErrorCounts(counts, Errors.forCode(data.errorCode())); + Map counts = new HashMap<>(); + if (!groupLevelErrors.isEmpty()) { + // built response with v8 or above + for (Map.Entry entry : groupLevelErrors.entrySet()) { + updateErrorCounts(counts, entry.getValue()); } - data.topics().forEach(topic -> - topic.partitions().forEach(partition -> - updateErrorCounts(counts, Errors.forCode(partition.errorCode())) - ) - ); - } else { - data.groups().forEach(group -> { - updateErrorCounts(counts, Errors.forCode(group.errorCode())); + for (OffsetFetchResponseGroup group : data.groups()) { group.topics().forEach(topic -> topic.partitions().forEach(partition -> - updateErrorCounts(counts, Errors.forCode(partition.errorCode())) - ) - ); - }); + updateErrorCounts(counts, Errors.forCode(partition.errorCode())))); + } + } else { + // built response with v0-v7 + updateErrorCounts(counts, error); + data.topics().forEach(topic -> + topic.partitions().forEach(partition -> + updateErrorCounts(counts, Errors.forCode(partition.errorCode())))); } return counts; } - public static OffsetFetchResponse parse(Readable readable, short version) { - return new OffsetFetchResponse(new OffsetFetchResponseData(readable, version), version); + // package-private for testing purposes + Map responseDataV0ToV7() { + Map responseData = new HashMap<>(); + for (OffsetFetchResponseTopic topic : data.topics()) { + for (OffsetFetchResponsePartition partition : topic.partitions()) { + responseData.put(new TopicPartition(topic.name(), partition.partitionIndex()), + new PartitionData(partition.committedOffset(), + RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), + partition.metadata(), + Errors.forCode(partition.errorCode())) + ); + } + } + return responseData; + } + + private Map buildResponseData(String groupId) { + Map responseData = new HashMap<>(); + OffsetFetchResponseGroup group = data + .groups() + .stream() + .filter(g -> g.groupId().equals(groupId)) + .collect(Collectors.toList()) + .get(0); + for (OffsetFetchResponseTopics topic : group.topics()) { + for (OffsetFetchResponsePartitions partition : topic.partitions()) { + responseData.put(new TopicPartition(topic.name(), partition.partitionIndex()), + new PartitionData(partition.committedOffset(), + RequestUtils.getLeaderEpoch(partition.committedLeaderEpoch()), + partition.metadata(), + Errors.forCode(partition.errorCode())) + ); + } + } + return responseData; + } + + public Map partitionDataMap(String groupId) { + if (groupLevelErrors.isEmpty()) { + return responseDataV0ToV7(); + } + return buildResponseData(groupId); + } + + public static OffsetFetchResponse parse(ByteBuffer buffer, short version) { + return new OffsetFetchResponse(new OffsetFetchResponseData(new ByteBufferAccessor(buffer), version), version); } @Override @@ -225,31 +403,4 @@ public OffsetFetchResponseData data() { public boolean shouldClientThrottle(short version) { return version >= 4; } - - public static OffsetFetchResponseData.OffsetFetchResponseGroup groupError( - OffsetFetchRequestData.OffsetFetchRequestGroup group, - Errors error, - int version - ) { - if (version >= TOP_LEVEL_ERROR_AND_NULL_TOPICS_MIN_VERSION) { - return new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group.groupId()) - .setErrorCode(error.code()); - } else { - return new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group.groupId()) - .setTopics(group.topics().stream().map(topic -> - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topic.name()) - .setPartitions(topic.partitionIndexes().stream().map(partition -> - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(partition) - .setErrorCode(error.code()) - .setCommittedOffset(INVALID_OFFSET) - .setMetadata(NO_METADATA) - .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) - ).collect(Collectors.toList())) - ).collect(Collectors.toList())); - } - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java index 78f7e3132c85d..aced8fc57ffa4 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochRequest.java @@ -23,8 +23,10 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.OffsetForLeaderTopicResult; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH; import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET; @@ -36,6 +38,12 @@ public class OffsetsForLeaderEpochRequest extends AbstractRequest { */ public static final int CONSUMER_REPLICA_ID = -1; + /** + * Sentinel replica_id which indicates either a debug consumer or a replica which is using + * an old version of the protocol. + */ + public static final int DEBUGGING_REPLICA_ID = -2; + private final OffsetForLeaderEpochRequestData data; public static class Builder extends AbstractRequest.Builder { @@ -93,8 +101,8 @@ public int replicaId() { return data.replicaId(); } - public static OffsetsForLeaderEpochRequest parse(Readable readable, short version) { - return new OffsetsForLeaderEpochRequest(new OffsetForLeaderEpochRequestData(readable, version), version); + public static OffsetsForLeaderEpochRequest parse(ByteBuffer buffer, short version) { + return new OffsetsForLeaderEpochRequest(new OffsetForLeaderEpochRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java index ff879667b2655..10c257c0a37cf 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/OffsetsForLeaderEpochResponse.java @@ -18,10 +18,11 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH; @@ -56,7 +57,7 @@ public OffsetForLeaderEpochResponseData data() { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); data.topics().forEach(topic -> topic.partitions().forEach(partition -> updateErrorCounts(errorCounts, Errors.forCode(partition.errorCode())))); @@ -72,8 +73,8 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static OffsetsForLeaderEpochResponse parse(Readable readable, short version) { - return new OffsetsForLeaderEpochResponse(new OffsetForLeaderEpochResponseData(readable, version)); + public static OffsetsForLeaderEpochResponse parse(ByteBuffer buffer, short version) { + return new OffsetsForLeaderEpochResponse(new OffsetForLeaderEpochResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java index a39ea157d72fa..a9f5205a308fe 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ProduceRequest.java @@ -17,19 +17,20 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.InvalidRecordException; -import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.message.ProduceRequestData; import org.apache.kafka.common.message.ProduceResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.BaseRecords; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.Records; import org.apache.kafka.common.utils.Utils; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; @@ -96,7 +97,7 @@ public String toString() { // Care should be taken in methods that use this field. private volatile ProduceRequestData data; // the partitionSizes is lazily initialized since it is used by server-side in production. - private volatile Map partitionSizes; + private volatile Map partitionSizes; public ProduceRequest(ProduceRequestData produceRequestData, short version) { super(ApiKeys.PRODUCE, version); @@ -107,20 +108,15 @@ public ProduceRequest(ProduceRequestData produceRequestData, short version) { } // visible for testing - Map partitionSizes() { + Map partitionSizes() { if (partitionSizes == null) { // this method may be called by different thread (see the comment on data) synchronized (this) { if (partitionSizes == null) { - Map tmpPartitionSizes = new HashMap<>(); + Map tmpPartitionSizes = new HashMap<>(); data.topicData().forEach(topicData -> topicData.partitionData().forEach(partitionData -> - // While topic id and name might not be populated at the same time in the request all the time; - // for example on server side they will never be populated together while in produce client they will be, - // to simplify initializing `TopicIdPartition` the code will use both topic name and id. - // TopicId will be Uuid.ZERO_UUID in versions < 13 and topic name will be used as main identifier of topic partition. - // TopicName will be empty string in versions >= 13 and topic id will be used as the main identifier. - tmpPartitionSizes.compute(new TopicIdPartition(topicData.topicId(), partitionData.index(), topicData.name()), + tmpPartitionSizes.compute(new TopicPartition(topicData.name(), partitionData.index()), (ignored, previousValue) -> partitionData.records().sizeInBytes() + (previousValue == null ? 0 : previousValue)) ) @@ -166,14 +162,14 @@ public ProduceResponse getErrorResponse(int throttleTimeMs, Throwable e) { if (acks == 0) return null; ApiError apiError = ApiError.fromThrowable(e); ProduceResponseData data = new ProduceResponseData().setThrottleTimeMs(throttleTimeMs); - partitionSizes().forEach((tpId, ignored) -> { - ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tpId.topic(), tpId.topicId()); + partitionSizes().forEach((tp, ignored) -> { + ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tp.topic()); if (tpr == null) { - tpr = new ProduceResponseData.TopicProduceResponse().setName(tpId.topic()).setTopicId(tpId.topicId()); + tpr = new ProduceResponseData.TopicProduceResponse().setName(tp.topic()); data.responses().add(tpr); } tpr.partitionResponses().add(new ProduceResponseData.PartitionProduceResponse() - .setIndex(tpId.partition()) + .setIndex(tp.partition()) .setRecordErrors(Collections.emptyList()) .setBaseOffset(INVALID_OFFSET) .setLogAppendTimeMs(RecordBatch.NO_TIMESTAMP) @@ -231,8 +227,8 @@ public static void validateRecords(short version, BaseRecords baseRecords) { } } - public static ProduceRequest parse(Readable readable, short version) { - return new ProduceRequest(new ProduceRequestData(readable, version), version); + public static ProduceRequest parse(ByteBuffer buffer, short version) { + return new ProduceRequest(new ProduceRequestData(new ByteBufferAccessor(buffer), version), version); } public static boolean isTransactionV2Requested(short version) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ProduceResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ProduceResponse.java index 673b91ac9ab20..99f7f475ba5bd 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ProduceResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ProduceResponse.java @@ -17,16 +17,17 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.ProduceResponseData; import org.apache.kafka.common.message.ProduceResponseData.LeaderIdAndEpoch; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.RecordBatch; +import java.nio.ByteBuffer; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; @@ -55,7 +56,6 @@ * {@link Errors#INVALID_TXN_STATE} * {@link Errors#INVALID_PRODUCER_ID_MAPPING} * {@link Errors#CONCURRENT_TRANSACTIONS} - * {@link Errors#UNKNOWN_TOPIC_ID} */ public class ProduceResponse extends AbstractResponse { public static final long INVALID_OFFSET = -1L; @@ -73,7 +73,7 @@ public ProduceResponse(ProduceResponseData produceResponseData) { * @param responses Produced data grouped by topic-partition */ @Deprecated - public ProduceResponse(Map responses) { + public ProduceResponse(Map responses) { this(responses, DEFAULT_THROTTLE_TIME, Collections.emptyList()); } @@ -84,7 +84,7 @@ public ProduceResponse(Map responses) { * @param throttleTimeMs Time in milliseconds the response was throttled */ @Deprecated - public ProduceResponse(Map responses, int throttleTimeMs) { + public ProduceResponse(Map responses, int throttleTimeMs) { this(toData(responses, throttleTimeMs, Collections.emptyList())); } @@ -97,16 +97,16 @@ public ProduceResponse(Map responses, int t * @param nodeEndpoints List of node endpoints */ @Deprecated - public ProduceResponse(Map responses, int throttleTimeMs, List nodeEndpoints) { + public ProduceResponse(Map responses, int throttleTimeMs, List nodeEndpoints) { this(toData(responses, throttleTimeMs, nodeEndpoints)); } - private static ProduceResponseData toData(Map responses, int throttleTimeMs, List nodeEndpoints) { + private static ProduceResponseData toData(Map responses, int throttleTimeMs, List nodeEndpoints) { ProduceResponseData data = new ProduceResponseData().setThrottleTimeMs(throttleTimeMs); responses.forEach((tp, response) -> { - ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tp.topic(), tp.topicId()); + ProduceResponseData.TopicProduceResponse tpr = data.responses().find(tp.topic()); if (tpr == null) { - tpr = new ProduceResponseData.TopicProduceResponse().setName(tp.topic()).setTopicId(tp.topicId()); + tpr = new ProduceResponseData.TopicProduceResponse().setName(tp.topic()); data.responses().add(tpr); } tpr.partitionResponses() @@ -151,7 +151,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); data.responses().forEach(t -> t.partitionResponses().forEach(p -> updateErrorCounts(errorCounts, Errors.forCode(p.errorCode())))); return errorCounts; } @@ -159,6 +159,7 @@ public Map errorCounts() { public static final class PartitionResponse { public Errors error; public long baseOffset; + public long lastOffset; public long logAppendTime; public long logStartOffset; public List recordErrors; @@ -182,12 +183,17 @@ public PartitionResponse(Errors error, long baseOffset, long logAppendTime, long } public PartitionResponse(Errors error, long baseOffset, long logAppendTime, long logStartOffset, List recordErrors, String errorMessage) { - this(error, baseOffset, logAppendTime, logStartOffset, recordErrors, errorMessage, new ProduceResponseData.LeaderIdAndEpoch()); + this(error, baseOffset, INVALID_OFFSET, logAppendTime, logStartOffset, recordErrors, errorMessage, new ProduceResponseData.LeaderIdAndEpoch()); + } + + public PartitionResponse(Errors error, long baseOffset, long lastOffset, long logAppendTime, long logStartOffset, List recordErrors, String errorMessage) { + this(error, baseOffset, lastOffset, logAppendTime, logStartOffset, recordErrors, errorMessage, new ProduceResponseData.LeaderIdAndEpoch()); } public PartitionResponse( Errors error, long baseOffset, + long lastOffset, long logAppendTime, long logStartOffset, List recordErrors, @@ -196,6 +202,7 @@ public PartitionResponse( ) { this.error = error; this.baseOffset = baseOffset; + this.lastOffset = lastOffset; this.logAppendTime = logAppendTime; this.logStartOffset = logStartOffset; this.recordErrors = recordErrors; @@ -209,6 +216,7 @@ public boolean equals(Object o) { if (o == null || getClass() != o.getClass()) return false; PartitionResponse that = (PartitionResponse) o; return baseOffset == that.baseOffset && + lastOffset == that.lastOffset && logAppendTime == that.logAppendTime && logStartOffset == that.logStartOffset && error == that.error && @@ -219,7 +227,7 @@ public boolean equals(Object o) { @Override public int hashCode() { - return Objects.hash(error, baseOffset, logAppendTime, logStartOffset, recordErrors, errorMessage, currentLeader); + return Objects.hash(error, baseOffset, lastOffset, logAppendTime, logStartOffset, recordErrors, errorMessage, currentLeader); } @Override @@ -230,6 +238,8 @@ public String toString() { b.append(error); b.append(",offset: "); b.append(baseOffset); + b.append(",lastOffset: "); + b.append(lastOffset); b.append(",logAppendTime: "); b.append(logAppendTime); b.append(", logStartOffset: "); @@ -286,8 +296,8 @@ public String toString() { } } - public static ProduceResponse parse(Readable readable, short version) { - return new ProduceResponse(new ProduceResponseData(readable, version)); + public static ProduceResponse parse(ByteBuffer buffer, short version) { + return new ProduceResponse(new ProduceResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryRequest.java index d58730e5b3f08..9264de0f59e94 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryRequest.java @@ -19,8 +19,8 @@ import org.apache.kafka.common.message.PushTelemetryRequestData; import org.apache.kafka.common.message.PushTelemetryResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.telemetry.internals.ClientTelemetryUtils; @@ -91,8 +91,8 @@ public ByteBuffer metricsData() { this.data.metrics() : ClientTelemetryUtils.decompress(this.data.metrics(), cType); } - public static PushTelemetryRequest parse(Readable readable, short version) { + public static PushTelemetryRequest parse(ByteBuffer buffer, short version) { return new PushTelemetryRequest(new PushTelemetryRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryResponse.java index dcd12b42bb68c..230ea1d358488 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/PushTelemetryResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.PushTelemetryResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class PushTelemetryResponse extends AbstractResponse { @@ -41,7 +42,7 @@ public PushTelemetryResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); return counts; } @@ -64,8 +65,8 @@ public Errors error() { return Errors.forCode(data.errorCode()); } - public static PushTelemetryResponse parse(Readable readable, short version) { + public static PushTelemetryResponse parse(ByteBuffer buffer, short version) { return new PushTelemetryResponse(new PushTelemetryResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateRequest.java index 3637da2ca1b49..a40598aec89c2 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -33,7 +34,11 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code()) - .setErrorMessage(Errors.forException(e).message())) - .collect(Collectors.toList())))); + topicResult -> results.add(new ReadShareGroupStateResponseData.ReadStateResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new ReadShareGroupStateResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code()) + .setErrorMessage(Errors.forException(e).message())) + .collect(Collectors.toList())))); return new ReadShareGroupStateResponse(new ReadShareGroupStateResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -76,10 +81,10 @@ public ReadShareGroupStateRequestData data() { return data; } - public static ReadShareGroupStateRequest parse(Readable readable, short version) { + public static ReadShareGroupStateRequest parse(ByteBuffer buffer, short version) { return new ReadShareGroupStateRequest( - new ReadShareGroupStateRequestData(readable, version), - version + new ReadShareGroupStateRequestData(new ByteBufferAccessor(buffer), version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateResponse.java index 2ab20e52e95c3..45559c76177df 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateResponse.java @@ -18,14 +18,14 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ReadShareGroupStateRequestData; import org.apache.kafka.common.message.ReadShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.ArrayList; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -44,11 +44,11 @@ public ReadShareGroupStateResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + Map counts = new HashMap<>(); data.results().forEach( - result -> result.partitions().forEach( - partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) - ) + result -> result.partitions().forEach( + partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) + ) ); return counts; } @@ -63,65 +63,53 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } - public static ReadShareGroupStateResponse parse(Readable readable, short version) { + public static ReadShareGroupStateResponse parse(ByteBuffer buffer, short version) { return new ReadShareGroupStateResponse( - new ReadShareGroupStateResponseData(readable, version) + new ReadShareGroupStateResponseData(new ByteBufferAccessor(buffer), version) ); } public static ReadShareGroupStateResponseData toResponseData( - Uuid topicId, - int partition, - long startOffset, - int stateEpoch, - List stateBatches + Uuid topicId, + int partition, + long startOffset, + int stateEpoch, + List stateBatches ) { return new ReadShareGroupStateResponseData() - .setResults(List.of( - new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicId) - .setPartitions(List.of( - new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(partition) - .setStartOffset(startOffset) - .setStateEpoch(stateEpoch) - .setStateBatches(stateBatches) - )) - )); + .setResults(Collections.singletonList( + new ReadShareGroupStateResponseData.ReadStateResult() + .setTopicId(topicId) + .setPartitions(Collections.singletonList( + new ReadShareGroupStateResponseData.PartitionResult() + .setPartition(partition) + .setStartOffset(startOffset) + .setStateEpoch(stateEpoch) + .setStateBatches(stateBatches) + )) + )); } public static ReadShareGroupStateResponseData toErrorResponseData(Uuid topicId, int partitionId, Errors error, String errorMessage) { return new ReadShareGroupStateResponseData().setResults( - List.of(new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicId) - .setPartitions(List.of(new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage))))); + Collections.singletonList(new ReadShareGroupStateResponseData.ReadStateResult() + .setTopicId(topicId) + .setPartitions(Collections.singletonList(new ReadShareGroupStateResponseData.PartitionResult() + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage))))); } public static ReadShareGroupStateResponseData.PartitionResult toErrorResponsePartitionResult(int partitionId, Errors error, String errorMessage) { return new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); } public static ReadShareGroupStateResponseData.ReadStateResult toResponseReadStateResult(Uuid topicId, List partitionResults) { return new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicId) - .setPartitions(partitionResults); - } - - public static ReadShareGroupStateResponseData toGlobalErrorResponse(ReadShareGroupStateRequestData request, Errors error) { - List readStateResults = new ArrayList<>(); - request.topics().forEach(topicData -> { - List partitionResults = new ArrayList<>(); - topicData.partitions().forEach(partitionData -> partitionResults.add( - toErrorResponsePartitionResult(partitionData.partition(), error, error.message())) - ); - readStateResults.add(toResponseReadStateResult(topicData.topicId(), partitionResults)); - }); - return new ReadShareGroupStateResponseData().setResults(readStateResults); + .setTopicId(topicId) + .setPartitions(partitionResults); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java index 803b63ac16835..27daa78967d35 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -33,7 +34,11 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new ReadShareGroupStateSummaryResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code()) - .setErrorMessage(Errors.forException(e).message())) - .collect(Collectors.toList())))); + topicResult -> results.add(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code()) + .setErrorMessage(Errors.forException(e).message())) + .collect(Collectors.toList())))); return new ReadShareGroupStateSummaryResponse(new ReadShareGroupStateSummaryResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -76,10 +81,10 @@ public ReadShareGroupStateSummaryRequestData data() { return data; } - public static ReadShareGroupStateSummaryRequest parse(Readable readable, short version) { + public static ReadShareGroupStateSummaryRequest parse(ByteBuffer buffer, short version) { return new ReadShareGroupStateSummaryRequest( - new ReadShareGroupStateSummaryRequestData(readable, version), - version + new ReadShareGroupStateSummaryRequestData(new ByteBufferAccessor(buffer), version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryResponse.java index a2787ff82c96e..77c1dac65a1b8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ReadShareGroupStateSummaryResponse.java @@ -17,16 +17,13 @@ package org.apache.kafka.common.requests; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.ReadShareGroupStateSummaryRequestData; import org.apache.kafka.common.message.ReadShareGroupStateSummaryResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.ArrayList; +import java.nio.ByteBuffer; import java.util.HashMap; -import java.util.List; import java.util.Map; public class ReadShareGroupStateSummaryResponse extends AbstractResponse { @@ -46,9 +43,9 @@ public ReadShareGroupStateSummaryResponseData data() { public Map errorCounts() { Map counts = new HashMap<>(); data.results().forEach( - result -> result.partitions().forEach( - partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) - ) + result -> result.partitions().forEach( + partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) + ) ); return counts; } @@ -62,78 +59,9 @@ public int throttleTimeMs() { public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } - - public static ReadShareGroupStateSummaryResponse parse(Readable readable, short version) { + public static ReadShareGroupStateSummaryResponse parse(ByteBuffer buffer, short version) { return new ReadShareGroupStateSummaryResponse( - new ReadShareGroupStateSummaryResponseData(readable, version) + new ReadShareGroupStateSummaryResponseData(new ByteBufferAccessor(buffer), version) ); } - - public static ReadShareGroupStateSummaryResponseData toErrorResponseData( - Uuid topicId, - int partitionId, - Errors error, - String errorMessage - ) { - return new ReadShareGroupStateSummaryResponseData().setResults( - List.of(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() - .setTopicId(topicId) - .setPartitions(List.of(new ReadShareGroupStateSummaryResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage))))); - } - - public static ReadShareGroupStateSummaryResponseData.PartitionResult toErrorResponsePartitionResult( - int partitionId, - Errors error, - String errorMessage - ) { - return new ReadShareGroupStateSummaryResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); - } - - public static ReadShareGroupStateSummaryResponseData toResponseData( - Uuid topicId, - int partition, - long startOffset, - int leaderEpoch, - int stateEpoch - ) { - return new ReadShareGroupStateSummaryResponseData() - .setResults(List.of( - new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() - .setTopicId(topicId) - .setPartitions(List.of( - new ReadShareGroupStateSummaryResponseData.PartitionResult() - .setPartition(partition) - .setStartOffset(startOffset) - .setLeaderEpoch(leaderEpoch) - .setStateEpoch(stateEpoch) - )) - )); - } - - public static ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult toResponseReadStateSummaryResult( - Uuid topicId, - List partitionResults - ) { - return new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() - .setTopicId(topicId) - .setPartitions(partitionResults); - } - - public static ReadShareGroupStateSummaryResponseData toGlobalErrorResponse(ReadShareGroupStateSummaryRequestData request, Errors error) { - List readStateSummaryResults = new ArrayList<>(); - request.topics().forEach(topicData -> { - List partitionResults = new ArrayList<>(); - topicData.partitions().forEach(partitionData -> partitionResults.add( - toErrorResponsePartitionResult(partitionData.partition(), error, error.message())) - ); - readStateSummaryResults.add(toResponseReadStateSummaryResult(topicData.topicId(), partitionResults)); - }); - return new ReadShareGroupStateSummaryResponseData().setResults(readStateSummaryResults); - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterRequest.java index f16d974c8495f..cf5f1dc0ce20b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterRequest.java @@ -20,8 +20,10 @@ import org.apache.kafka.common.message.RemoveRaftVoterRequestData; import org.apache.kafka.common.message.RemoveRaftVoterResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class RemoveRaftVoterRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { @@ -65,9 +67,9 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { setThrottleTimeMs(throttleTimeMs)); } - public static RemoveRaftVoterRequest parse(Readable readable, short version) { + public static RemoveRaftVoterRequest parse(ByteBuffer buffer, short version) { return new RemoveRaftVoterRequest( - new RemoveRaftVoterRequestData(readable, version), + new RemoveRaftVoterRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterResponse.java index 271cfde8cffa9..2c8387da927e5 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RemoveRaftVoterResponse.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.RemoveRaftVoterResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -57,8 +58,8 @@ public Map errorCounts() { } } - public static RemoveRaftVoterResponse parse(Readable readable, short version) { + public static RemoveRaftVoterResponse parse(ByteBuffer buffer, short version) { return new RemoveRaftVoterResponse( - new RemoveRaftVoterResponseData(readable, version)); + new RemoveRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenRequest.java index 17af0a3c2715f..963097093dc2d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.RenewDelegationTokenRequestData; import org.apache.kafka.common.message.RenewDelegationTokenResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class RenewDelegationTokenRequest extends AbstractRequest { @@ -31,9 +33,9 @@ public RenewDelegationTokenRequest(RenewDelegationTokenRequestData data, short v this.data = data; } - public static RenewDelegationTokenRequest parse(Readable readable, short version) { + public static RenewDelegationTokenRequest parse(ByteBuffer buffer, short version) { return new RenewDelegationTokenRequest(new RenewDelegationTokenRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java index 6eb0dc0e0ed53..2baf992283e90 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RenewDelegationTokenResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.RenewDelegationTokenResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class RenewDelegationTokenResponse extends AbstractResponse { @@ -32,9 +33,9 @@ public RenewDelegationTokenResponse(RenewDelegationTokenResponseData data) { this.data = data; } - public static RenewDelegationTokenResponse parse(Readable readable, short version) { + public static RenewDelegationTokenResponse parse(ByteBuffer buffer, short version) { return new RenewDelegationTokenResponse(new RenewDelegationTokenResponseData( - readable, version)); + new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RequestContext.java b/clients/src/main/java/org/apache/kafka/common/requests/RequestContext.java index a13bcf103b922..c15fa960abf12 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RequestContext.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RequestContext.java @@ -22,7 +22,6 @@ import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.network.Send; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.security.auth.KafkaPrincipalSerde; import org.apache.kafka.common.security.auth.SecurityProtocol; @@ -118,7 +117,7 @@ public RequestAndSize parseRequest(ByteBuffer buffer) { ApiKeys apiKey = header.apiKey(); try { short apiVersion = header.apiVersion(); - return AbstractRequest.parseRequest(apiKey, apiVersion, new ByteBufferAccessor(buffer)); + return AbstractRequest.parseRequest(apiKey, apiVersion, buffer); } catch (Throwable ex) { throw new InvalidRequestException("Error getting request for apiKey: " + apiKey + ", apiVersion: " + header.apiVersion() + diff --git a/clients/src/main/java/org/apache/kafka/common/requests/RequestUtils.java b/clients/src/main/java/org/apache/kafka/common/requests/RequestUtils.java index d434e6e7b185e..cc6e5a2303879 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/RequestUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/RequestUtils.java @@ -16,13 +16,6 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.errors.AuthenticationException; -import org.apache.kafka.common.errors.AuthorizationException; -import org.apache.kafka.common.errors.MismatchedEndpointTypeException; -import org.apache.kafka.common.errors.SecurityDisabledException; -import org.apache.kafka.common.errors.UnsupportedEndpointTypeException; -import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ProduceRequestData; import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Message; @@ -84,14 +77,4 @@ public static ByteBuffer serialize( writable.flip(); return writable.buffer(); } - - public static boolean isFatalException(Throwable e) { - return e instanceof AuthenticationException || - e instanceof AuthorizationException || - e instanceof MismatchedEndpointTypeException || - e instanceof SecurityDisabledException || - e instanceof UnsupportedVersionException || - e instanceof UnsupportedEndpointTypeException || - e instanceof UnsupportedForMessageFormatException; - } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateRequest.java index 610b084533890..47dd5fd315769 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateRequest.java @@ -19,7 +19,9 @@ import org.apache.kafka.common.message.SaslAuthenticateRequestData; import org.apache.kafka.common.message.SaslAuthenticateResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; + +import java.nio.ByteBuffer; /** @@ -72,8 +74,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new SaslAuthenticateResponse(response); } - public static SaslAuthenticateRequest parse(Readable readable, short version) { - return new SaslAuthenticateRequest(new SaslAuthenticateRequestData(readable, version), + public static SaslAuthenticateRequest parse(ByteBuffer buffer, short version) { + return new SaslAuthenticateRequest(new SaslAuthenticateRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateResponse.java index ba0fc29a39180..d6ca8c170dc45 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SaslAuthenticateResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.SaslAuthenticateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; /** @@ -76,7 +77,7 @@ public SaslAuthenticateResponseData data() { return data; } - public static SaslAuthenticateResponse parse(Readable readable, short version) { - return new SaslAuthenticateResponse(new SaslAuthenticateResponseData(readable, version)); + public static SaslAuthenticateResponse parse(ByteBuffer buffer, short version) { + return new SaslAuthenticateResponse(new SaslAuthenticateResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeRequest.java index 710700257ab34..c623b2ee093e0 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeRequest.java @@ -20,7 +20,9 @@ import org.apache.kafka.common.message.SaslHandshakeRequestData; import org.apache.kafka.common.message.SaslHandshakeResponseData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; + +import java.nio.ByteBuffer; /** * Request from SASL client containing client SASL mechanism. @@ -71,7 +73,7 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { return new SaslHandshakeResponse(response); } - public static SaslHandshakeRequest parse(Readable readable, short version) { - return new SaslHandshakeRequest(new SaslHandshakeRequestData(readable, version), version); + public static SaslHandshakeRequest parse(ByteBuffer buffer, short version) { + return new SaslHandshakeRequest(new SaslHandshakeRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeResponse.java index 40de2ceff30dd..5097711e73787 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SaslHandshakeResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.SaslHandshakeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.List; import java.util.Map; @@ -70,7 +71,7 @@ public List enabledMechanisms() { return data.mechanisms(); } - public static SaslHandshakeResponse parse(Readable readable, short version) { - return new SaslHandshakeResponse(new SaslHandshakeResponseData(readable, version)); + public static SaslHandshakeResponse parse(ByteBuffer buffer, short version) { + return new SaslHandshakeResponse(new SaslHandshakeResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java index c73df0b1d5656..74ceaa685c6e8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeRequest.java @@ -17,12 +17,16 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareAcknowledgeRequestData; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -33,7 +37,11 @@ public static class Builder extends AbstractRequest.Builder> ackMap = new HashMap<>(); + for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { TopicIdPartition tip = acknowledgeEntry.getKey(); - ShareAcknowledgeRequestData.AcknowledgeTopic ackTopic = ackTopics.find(tip.topicId()); - if (ackTopic == null) { - ackTopic = new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(tip.topicId()) - .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection()); - ackTopics.add(ackTopic); - } - ShareAcknowledgeRequestData.AcknowledgePartition ackPartition = ackTopic.partitions().find(tip.partition()); + Map partMap = ackMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); + ShareAcknowledgeRequestData.AcknowledgePartition ackPartition = partMap.get(tip.partition()); if (ackPartition == null) { ackPartition = new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(tip.partition()); - ackTopic.partitions().add(ackPartition); + partMap.put(tip.partition(), ackPartition); } ackPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); } - data.setTopics(ackTopics); - return new ShareAcknowledgeRequest.Builder(data); + // Finally, build up the data to fetch + data.setTopics(new ArrayList<>()); + ackMap.forEach((topicId, partMap) -> { + ShareAcknowledgeRequestData.AcknowledgeTopic ackTopic = new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopicId(topicId) + .setPartitions(new ArrayList<>()); + data.topics().add(ackTopic); + + partMap.forEach((index, ackPartition) -> ackTopic.partitions().add(ackPartition)); + }); + + return new ShareAcknowledgeRequest.Builder(data, true); } public ShareAcknowledgeRequestData data() { @@ -104,9 +118,9 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .setErrorCode(error.code())); } - public static ShareAcknowledgeRequest parse(Readable readable, short version) { + public static ShareAcknowledgeRequest parse(ByteBuffer buffer, short version) { return new ShareAcknowledgeRequest( - new ShareAcknowledgeRequestData(readable, version), + new ShareAcknowledgeRequestData(new ByteBufferAccessor(buffer), version), version ); } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java index d303a852b795e..5cab233dccac8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareAcknowledgeResponse.java @@ -18,13 +18,15 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.EnumMap; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; @@ -62,7 +64,7 @@ public ShareAcknowledgeResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.responses().forEach( topic -> topic.partitions().forEach( @@ -82,9 +84,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static ShareAcknowledgeResponse parse(Readable readable, short version) { + public static ShareAcknowledgeResponse parse(ByteBuffer buffer, short version) { return new ShareAcknowledgeResponse( - new ShareAcknowledgeResponseData(readable, version) + new ShareAcknowledgeResponseData(new ByteBufferAccessor(buffer), version) ); } @@ -114,21 +116,22 @@ public static ShareAcknowledgeResponse of(Errors error, public static ShareAcknowledgeResponseData toMessage(Errors error, int throttleTimeMs, Iterator> partIterator, List nodeEndpoints) { - ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponseCollection topicResponses = new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponseCollection(); + Map topicResponseList = new LinkedHashMap<>(); while (partIterator.hasNext()) { Map.Entry entry = partIterator.next(); ShareAcknowledgeResponseData.PartitionData partitionData = entry.getValue(); // Since PartitionData alone doesn't know the partition ID, we set it here partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); // Checking if the topic is already present in the map - ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse topicResponse = topicResponses.find(entry.getKey().topicId()); - if (topicResponse == null) { - topicResponse = new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() + if (topicResponseList.containsKey(entry.getKey().topicId())) { + topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); + } else { + List partitionResponses = new ArrayList<>(); + partitionResponses.add(partitionData); + topicResponseList.put(entry.getKey().topicId(), new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() .setTopicId(entry.getKey().topicId()) - .setPartitions(new ArrayList<>()); - topicResponses.add(topicResponse); + .setPartitions(partitionResponses)); } - topicResponse.partitions().add(partitionData); } ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list @@ -140,6 +143,6 @@ public static ShareAcknowledgeResponseData toMessage(Errors error, int throttleT .setRack(endpoint.rack()))); return data.setThrottleTimeMs(throttleTimeMs) .setErrorCode(error.code()) - .setResponses(topicResponses); + .setResponses(new ArrayList<>(topicResponseList.values())); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java index 5ede165c2eff0..7ed14b4bdb102 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchRequest.java @@ -20,15 +20,18 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchRequestData; +import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import java.util.Objects; public class ShareFetchRequest extends AbstractRequest { @@ -37,49 +40,45 @@ public static class Builder extends AbstractRequest.Builder { private final ShareFetchRequestData data; public Builder(ShareFetchRequestData data) { - super(ApiKeys.SHARE_FETCH); + this(data, false); + } + + public Builder(ShareFetchRequestData data, boolean enableUnstableLastVersion) { + super(ApiKeys.SHARE_FETCH, enableUnstableLastVersion); this.data = data; } public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, - int maxWait, int minBytes, int maxBytes, int maxRecords, - int batchSize, List send, List forget, + int maxWait, int minBytes, int maxBytes, int fetchSize, + List send, List forget, Map> acknowledgementsMap) { ShareFetchRequestData data = new ShareFetchRequestData(); data.setGroupId(groupId); + int ackOnlyPartitionMaxBytes = fetchSize; boolean isClosingShareSession = false; if (metadata != null) { data.setMemberId(metadata.memberId().toString()); data.setShareSessionEpoch(metadata.epoch()); if (metadata.isFinalEpoch()) { isClosingShareSession = true; + ackOnlyPartitionMaxBytes = 0; } } data.setMaxWaitMs(maxWait); data.setMinBytes(minBytes); data.setMaxBytes(maxBytes); - data.setMaxRecords(maxRecords); - data.setBatchSize(batchSize); // Build a map of topics to fetch keyed by topic ID, and within each a map of partitions keyed by index - ShareFetchRequestData.FetchTopicCollection fetchTopics = new ShareFetchRequestData.FetchTopicCollection(); + Map> fetchMap = new HashMap<>(); // First, start by adding the list of topic-partitions we are fetching if (!isClosingShareSession) { for (TopicIdPartition tip : send) { - ShareFetchRequestData.FetchTopic fetchTopic = fetchTopics.find(tip.topicId()); - if (fetchTopic == null) { - fetchTopic = new ShareFetchRequestData.FetchTopic() - .setTopicId(tip.topicId()) - .setPartitions(new ShareFetchRequestData.FetchPartitionCollection()); - fetchTopics.add(fetchTopic); - } - ShareFetchRequestData.FetchPartition fetchPartition = fetchTopic.partitions().find(tip.partition()); - if (fetchPartition == null) { - fetchPartition = new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tip.partition()); - fetchTopic.partitions().add(fetchPartition); - } + Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); + ShareFetchRequestData.FetchPartition fetchPartition = new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(tip.partition()) + .setPartitionMaxBytes(fetchSize); + partMap.put(tip.partition(), fetchPartition); } } @@ -87,26 +86,30 @@ public static Builder forConsumer(String groupId, ShareRequestMetadata metadata, // topic-partitions will be a subset, but if the assignment changes, there might be new entries to add for (Map.Entry> acknowledgeEntry : acknowledgementsMap.entrySet()) { TopicIdPartition tip = acknowledgeEntry.getKey(); - ShareFetchRequestData.FetchTopic fetchTopic = fetchTopics.find(tip.topicId()); - if (fetchTopic == null) { - fetchTopic = new ShareFetchRequestData.FetchTopic() - .setTopicId(tip.topicId()) - .setPartitions(new ShareFetchRequestData.FetchPartitionCollection()); - fetchTopics.add(fetchTopic); - } - ShareFetchRequestData.FetchPartition fetchPartition = fetchTopic.partitions().find(tip.partition()); + Map partMap = fetchMap.computeIfAbsent(tip.topicId(), k -> new HashMap<>()); + ShareFetchRequestData.FetchPartition fetchPartition = partMap.get(tip.partition()); if (fetchPartition == null) { fetchPartition = new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tip.partition()); - fetchTopic.partitions().add(fetchPartition); + .setPartitionIndex(tip.partition()) + .setPartitionMaxBytes(ackOnlyPartitionMaxBytes); + partMap.put(tip.partition(), fetchPartition); } fetchPartition.setAcknowledgementBatches(acknowledgeEntry.getValue()); } // Build up the data to fetch - data.setTopics(fetchTopics); + if (!fetchMap.isEmpty()) { + data.setTopics(new ArrayList<>()); + fetchMap.forEach((topicId, partMap) -> { + ShareFetchRequestData.FetchTopic fetchTopic = new ShareFetchRequestData.FetchTopic() + .setTopicId(topicId) + .setPartitions(new ArrayList<>()); + partMap.forEach((index, fetchPartition) -> fetchTopic.partitions().add(fetchPartition)); + data.topics().add(fetchTopic); + }); + } - Builder builder = new Builder(data); + Builder builder = new Builder(data, true); // And finally, forget the topic-partitions that are no longer in the session if (!forget.isEmpty()) { data.setForgottenTopicsData(new ArrayList<>()); @@ -147,7 +150,7 @@ public String toString() { } private final ShareFetchRequestData data; - private volatile List shareFetchData = null; + private volatile LinkedHashMap shareFetchData = null; private volatile List toForget = null; public ShareFetchRequest(ShareFetchRequestData data, short version) { @@ -163,16 +166,53 @@ public ShareFetchRequestData data() { @Override public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { Errors error = Errors.forException(e); - return ShareFetchResponse.of(error, throttleTimeMs, new LinkedHashMap<>(), List.of(), 0); + return new ShareFetchResponse(new ShareFetchResponseData() + .setThrottleTimeMs(throttleTimeMs) + .setErrorCode(error.code())); } - public static ShareFetchRequest parse(Readable readable, short version) { + public static ShareFetchRequest parse(ByteBuffer buffer, short version) { return new ShareFetchRequest( - new ShareFetchRequestData(readable, version), + new ShareFetchRequestData(new ByteBufferAccessor(buffer), version), version ); } + public static final class SharePartitionData { + public final Uuid topicId; + public final int maxBytes; + + public SharePartitionData( + Uuid topicId, + int maxBytes + ) { + this.topicId = topicId; + this.maxBytes = maxBytes; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ShareFetchRequest.SharePartitionData that = (ShareFetchRequest.SharePartitionData) o; + return Objects.equals(topicId, that.topicId) && + maxBytes == that.maxBytes; + } + + @Override + public int hashCode() { + return Objects.hash(topicId, maxBytes); + } + + @Override + public String toString() { + return "SharePartitionData(" + + "topicId=" + topicId + + ", maxBytes=" + maxBytes + + ')'; + } + } + public int minBytes() { return data.minBytes(); } @@ -185,18 +225,23 @@ public int maxWait() { return data.maxWaitMs(); } - public List shareFetchData(Map topicNames) { + public Map shareFetchData(Map topicNames) { if (shareFetchData == null) { synchronized (this) { if (shareFetchData == null) { // Assigning the lazy-initialized `shareFetchData` in the last step // to avoid other threads accessing a half-initialized object. - final List shareFetchDataTmp = new ArrayList<>(); + final LinkedHashMap shareFetchDataTmp = new LinkedHashMap<>(); data.topics().forEach(shareFetchTopic -> { String name = topicNames.get(shareFetchTopic.topicId()); shareFetchTopic.partitions().forEach(shareFetchPartition -> { // Topic name may be null here if the topic name was unable to be resolved using the topicNames map. - shareFetchDataTmp.add(new TopicIdPartition(shareFetchTopic.topicId(), shareFetchPartition.partitionIndex(), name)); + shareFetchDataTmp.put(new TopicIdPartition(shareFetchTopic.topicId(), new TopicPartition(name, shareFetchPartition.partitionIndex())), + new ShareFetchRequest.SharePartitionData( + shareFetchTopic.topicId(), + shareFetchPartition.partitionMaxBytes() + ) + ); }); }); shareFetchData = shareFetchDataTmp; diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java index 329c5430e7e66..619e740029dfa 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareFetchResponse.java @@ -22,15 +22,16 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.ObjectSerializationCache; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.Records; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.Iterator; import java.util.LinkedHashMap; import java.util.List; @@ -54,7 +55,9 @@ public class ShareFetchResponse extends AbstractResponse { private final ShareFetchResponseData data; - private ShareFetchResponse(ShareFetchResponseData data) { + private volatile LinkedHashMap responseData = null; + + public ShareFetchResponse(ShareFetchResponseData data) { super(ApiKeys.SHARE_FETCH); this.data = data; } @@ -70,7 +73,7 @@ public ShareFetchResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); updateErrorCounts(counts, Errors.forCode(data.errorCode())); data.responses().forEach( topic -> topic.partitions().forEach( @@ -81,14 +84,23 @@ public Map errorCounts() { } public LinkedHashMap responseData(Map topicNames) { - final LinkedHashMap responseData = new LinkedHashMap<>(); - data.responses().forEach(topicResponse -> { - String name = topicNames.get(topicResponse.topicId()); - if (name != null) { - topicResponse.partitions().forEach(partitionData -> responseData.put(new TopicIdPartition(topicResponse.topicId(), - new TopicPartition(name, partitionData.partitionIndex())), partitionData)); + if (responseData == null) { + synchronized (this) { + // Assigning the lazy-initialized `responseData` in the last step + // to avoid other threads accessing a half-initialized object. + if (responseData == null) { + final LinkedHashMap responseDataTmp = new LinkedHashMap<>(); + data.responses().forEach(topicResponse -> { + String name = topicNames.get(topicResponse.topicId()); + if (name != null) { + topicResponse.partitions().forEach(partitionData -> responseDataTmp.put(new TopicIdPartition(topicResponse.topicId(), + new TopicPartition(name, partitionData.partitionIndex())), partitionData)); + } + }); + responseData = responseDataTmp; + } } - }); + } return responseData; } @@ -102,16 +114,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - /** - * Creates a {@link org.apache.kafka.common.requests.ShareFetchResponse} from the given byte buffer. - * Unlike {@link org.apache.kafka.common.requests.ShareFetchResponse#of(Errors, int, LinkedHashMap, List, int)}, - * this method doesn't convert null records to {@link org.apache.kafka.common.record.MemoryRecords#EMPTY}. - * - *

        This method should only be used in client-side.

        - */ - public static ShareFetchResponse parse(Readable readable, short version) { + public static ShareFetchResponse parse(ByteBuffer buffer, short version) { return new ShareFetchResponse( - new ShareFetchResponseData(readable, version) + new ShareFetchResponseData(new ByteBufferAccessor(buffer), version) ); } @@ -139,7 +144,7 @@ public static int sizeOf(short version, Iterator> partIterator) { // Since the throttleTimeMs and metadata field sizes are constant and fixed, we can // use arbitrary values here without affecting the result. - ShareFetchResponseData data = toMessage(Errors.NONE, 0, partIterator, Collections.emptyList(), 0); + ShareFetchResponseData data = toMessage(Errors.NONE, 0, partIterator, Collections.emptyList()); ObjectSerializationCache cache = new ObjectSerializationCache(); return 4 + data.size(cache, version); } @@ -151,43 +156,32 @@ public static int recordsSize(ShareFetchResponseData.PartitionData partition) { return partition.records() == null ? 0 : partition.records().sizeInBytes(); } - /** - * Creates a {@link org.apache.kafka.common.requests.ShareFetchResponse} from the given data. - * This method converts null records to {@link org.apache.kafka.common.record.MemoryRecords#EMPTY} - * to ensure consistent record representation in the response. - * - *

        This method should only be used in server-side.

        - */ public static ShareFetchResponse of(Errors error, int throttleTimeMs, LinkedHashMap responseData, - List nodeEndpoints, int acquisitionLockTimeout) { - return new ShareFetchResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints, acquisitionLockTimeout)); + List nodeEndpoints) { + return new ShareFetchResponse(toMessage(error, throttleTimeMs, responseData.entrySet().iterator(), nodeEndpoints)); } - private static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs, + public static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs, Iterator> partIterator, - List nodeEndpoints, int acquisitionLockTimeout) { - ShareFetchResponseData.ShareFetchableTopicResponseCollection topicResponses = new ShareFetchResponseData.ShareFetchableTopicResponseCollection(); + List nodeEndpoints) { + Map topicResponseList = new LinkedHashMap<>(); while (partIterator.hasNext()) { Map.Entry entry = partIterator.next(); ShareFetchResponseData.PartitionData partitionData = entry.getValue(); // Since PartitionData alone doesn't know the partition ID, we set it here partitionData.setPartitionIndex(entry.getKey().topicPartition().partition()); - // To protect the clients from failing due to null records, - // we always convert null records to MemoryRecords.EMPTY - // We will propose a KIP to change the schema definitions in the future - if (partitionData.records() == null) - partitionData.setRecords(MemoryRecords.EMPTY); // Checking if the topic is already present in the map - ShareFetchResponseData.ShareFetchableTopicResponse topicResponse = topicResponses.find(entry.getKey().topicId()); - if (topicResponse == null) { - topicResponse = new ShareFetchResponseData.ShareFetchableTopicResponse() + if (topicResponseList.containsKey(entry.getKey().topicId())) { + topicResponseList.get(entry.getKey().topicId()).partitions().add(partitionData); + } else { + List partitionResponses = new ArrayList<>(); + partitionResponses.add(partitionData); + topicResponseList.put(entry.getKey().topicId(), new ShareFetchResponseData.ShareFetchableTopicResponse() .setTopicId(entry.getKey().topicId()) - .setPartitions(new ArrayList<>()); - topicResponses.add(topicResponse); + .setPartitions(partitionResponses)); } - topicResponse.partitions().add(partitionData); } ShareFetchResponseData data = new ShareFetchResponseData(); // KafkaApis should only pass in node endpoints on error, otherwise this should be an empty list @@ -199,18 +193,16 @@ private static ShareFetchResponseData toMessage(Errors error, int throttleTimeMs .setRack(endpoint.rack()))); return data.setThrottleTimeMs(throttleTimeMs) .setErrorCode(error.code()) - .setAcquisitionLockTimeoutMs(acquisitionLockTimeout) - .setResponses(topicResponses); + .setResponses(new ArrayList<>(topicResponseList.values())); } public static ShareFetchResponseData.PartitionData partitionResponse(TopicIdPartition topicIdPartition, Errors error) { return partitionResponse(topicIdPartition.topicPartition().partition(), error); } - private static ShareFetchResponseData.PartitionData partitionResponse(int partition, Errors error) { + public static ShareFetchResponseData.PartitionData partitionResponse(int partition, Errors error) { return new ShareFetchResponseData.PartitionData() .setPartitionIndex(partition) - .setErrorCode(error.code()) - .setRecords(MemoryRecords.EMPTY); + .setErrorCode(error.code()); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java index 1ad411f86010f..25c02e4a83c5e 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeRequest.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.ShareGroupDescribeRequestData; import org.apache.kafka.common.message.ShareGroupDescribeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.List; import java.util.stream.Collectors; @@ -32,7 +33,11 @@ public static class Builder extends AbstractRequest.Builder getErrorDescri .map(groupId -> new ShareGroupDescribeResponseData.DescribedGroup() .setGroupId(groupId) .setErrorCode(error.code()) - .setErrorMessage(error.message()) ).collect(Collectors.toList()); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java index 89b89822c4dae..95dd371eedfa7 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupDescribeResponse.java @@ -18,10 +18,11 @@ import org.apache.kafka.common.message.ShareGroupDescribeResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; /** @@ -34,7 +35,6 @@ * - {@link Errors#INVALID_REQUEST} * - {@link Errors#INVALID_GROUP_ID} * - {@link Errors#GROUP_ID_NOT_FOUND} - * - {@link Errors#TOPIC_AUTHORIZATION_FAILED} */ public class ShareGroupDescribeResponse extends AbstractResponse { @@ -52,7 +52,7 @@ public ShareGroupDescribeResponseData data() { @Override public Map errorCounts() { - Map counts = new EnumMap<>(Errors.class); + HashMap counts = new HashMap<>(); data.groups().forEach( group -> updateErrorCounts(counts, Errors.forCode(group.errorCode())) ); @@ -69,9 +69,9 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { data.setThrottleTimeMs(throttleTimeMs); } - public static ShareGroupDescribeResponse parse(Readable readable, short version) { + public static ShareGroupDescribeResponse parse(ByteBuffer buffer, short version) { return new ShareGroupDescribeResponse( - new ShareGroupDescribeResponseData(readable, version) + new ShareGroupDescribeResponseData(new ByteBufferAccessor(buffer), version) ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java index 5681a5a95af9f..500f8561a0209 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/ShareGroupHeartbeatRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class ShareGroupHeartbeatRequest extends AbstractRequest { /** @@ -37,7 +39,11 @@ public static class Builder extends AbstractRequest.Builder> assignment - ) { - List topicPartitions = assignment.entrySet().stream() - .map(keyValue -> new ShareGroupHeartbeatResponseData.TopicPartitions() - .setTopicId(keyValue.getKey()) - .setPartitions(new ArrayList<>(keyValue.getValue()))) - .collect(Collectors.toList()); - - return new ShareGroupHeartbeatResponseData.Assignment() - .setTopicPartitions(topicPartitions); + new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupRequest.java index ceb0e5248c7ce..d2da48fb018ee 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupRequest.java @@ -20,8 +20,8 @@ import org.apache.kafka.common.message.SyncGroupRequestData; import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import java.nio.ByteBuffer; import java.util.HashMap; @@ -87,8 +87,8 @@ public boolean areMandatoryProtocolTypeAndNamePresent() { return true; } - public static SyncGroupRequest parse(Readable readable, short version) { - return new SyncGroupRequest(new SyncGroupRequestData(readable, version), version); + public static SyncGroupRequest parse(ByteBuffer buffer, short version) { + return new SyncGroupRequest(new SyncGroupRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java index c31092bdbf78a..596110242902c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/SyncGroupResponse.java @@ -18,9 +18,10 @@ import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Map; public class SyncGroupResponse extends AbstractResponse { @@ -61,8 +62,8 @@ public String toString() { return data.toString(); } - public static SyncGroupResponse parse(Readable readable, short version) { - return new SyncGroupResponse(new SyncGroupResponseData(readable, version)); + public static SyncGroupResponse parse(ByteBuffer buffer, short version) { + return new SyncGroupResponse(new SyncGroupResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java index 3724c4ccc0920..14d1665a8dcd9 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitRequest.java @@ -25,10 +25,11 @@ import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition; import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.record.RecordBatch; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -216,9 +217,9 @@ public static TxnOffsetCommitResponseData getErrorResponse( return response; } - public static TxnOffsetCommitRequest parse(Readable readable, short version) { + public static TxnOffsetCommitRequest parse(ByteBuffer buffer, short version) { return new TxnOffsetCommitRequest(new TxnOffsetCommitRequestData( - readable, version), version); + new ByteBufferAccessor(buffer), version), version); } public static class CommittedOffset { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java index dea99cf2b0709..ce7dd9e7f1cbb 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/TxnOffsetCommitResponse.java @@ -21,9 +21,10 @@ import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition; import org.apache.kafka.common.message.TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -190,8 +191,8 @@ public Map errors() { return errorMap; } - public static TxnOffsetCommitResponse parse(Readable readable, short version) { - return new TxnOffsetCommitResponse(new TxnOffsetCommitResponseData(readable, version)); + public static TxnOffsetCommitResponse parse(ByteBuffer buffer, short version) { + return new TxnOffsetCommitResponse(new TxnOffsetCommitResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerRequest.java index a7f2391e8aa2a..253499f85afb3 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerRequest.java @@ -19,8 +19,10 @@ import org.apache.kafka.common.message.UnregisterBrokerRequestData; import org.apache.kafka.common.message.UnregisterBrokerResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; + +import java.nio.ByteBuffer; public class UnregisterBrokerRequest extends AbstractRequest { @@ -55,12 +57,11 @@ public UnregisterBrokerResponse getErrorResponse(int throttleTimeMs, Throwable e Errors error = Errors.forException(e); return new UnregisterBrokerResponse(new UnregisterBrokerResponseData() .setThrottleTimeMs(throttleTimeMs) - .setErrorCode(error.code()) - .setErrorMessage(e.getMessage())); + .setErrorCode(error.code())); } - public static UnregisterBrokerRequest parse(Readable readable, short version) { - return new UnregisterBrokerRequest(new UnregisterBrokerRequestData(readable, version), + public static UnregisterBrokerRequest parse(ByteBuffer buffer, short version) { + return new UnregisterBrokerRequest(new UnregisterBrokerRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerResponse.java index a0f71a7021f94..623e6f28076fa 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UnregisterBrokerResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.UnregisterBrokerResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; public class UnregisterBrokerResponse extends AbstractResponse { @@ -50,15 +51,15 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); if (data.errorCode() != 0) { errorCounts.put(Errors.forCode(data.errorCode()), 1); } return errorCounts; } - public static UnregisterBrokerResponse parse(Readable readable, short version) { - return new UnregisterBrokerResponse(new UnregisterBrokerResponseData(readable, version)); + public static UnregisterBrokerResponse parse(ByteBuffer buffer, short version) { + return new UnregisterBrokerResponse(new UnregisterBrokerResponseData(new ByteBufferAccessor(buffer), version)); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesRequest.java index 35b4cce20953e..b4acfa835495a 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesRequest.java @@ -19,8 +19,9 @@ import org.apache.kafka.clients.admin.FeatureUpdate; import org.apache.kafka.common.message.UpdateFeaturesRequestData; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.Readable; +import org.apache.kafka.common.protocol.ByteBufferAccessor; +import java.nio.ByteBuffer; import java.util.Collection; import java.util.Collections; import java.util.stream.Collectors; @@ -115,7 +116,7 @@ public UpdateFeaturesRequestData data() { return data; } - public static UpdateFeaturesRequest parse(Readable readable, short version) { - return new UpdateFeaturesRequest(new UpdateFeaturesRequestData(readable, version), version); + public static UpdateFeaturesRequest parse(ByteBuffer buffer, short version) { + return new UpdateFeaturesRequest(new UpdateFeaturesRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesResponse.java index 895cfd0f85fb6..88869040d4ac4 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateFeaturesResponse.java @@ -20,10 +20,11 @@ import org.apache.kafka.common.message.UpdateFeaturesResponseData.UpdatableFeatureResult; import org.apache.kafka.common.message.UpdateFeaturesResponseData.UpdatableFeatureResultCollection; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -50,7 +51,7 @@ public ApiError topLevelError() { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); updateErrorCounts(errorCounts, Errors.forCode(data.errorCode())); for (UpdatableFeatureResult result : data.results()) { updateErrorCounts(errorCounts, Errors.forCode(result.errorCode())); @@ -78,8 +79,8 @@ public UpdateFeaturesResponseData data() { return data; } - public static UpdateFeaturesResponse parse(Readable readable, short version) { - return new UpdateFeaturesResponse(new UpdateFeaturesResponseData(readable, version)); + public static UpdateFeaturesResponse parse(ByteBuffer buffer, short version) { + return new UpdateFeaturesResponse(new UpdateFeaturesResponseData(new ByteBufferAccessor(buffer), version)); } public static UpdateFeaturesResponse createWithErrors(ApiError topLevelError, Set updates, int throttleTimeMs) { diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterRequest.java index 337594b329272..eb02673e1da99 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterRequest.java @@ -20,10 +20,12 @@ import org.apache.kafka.common.message.UpdateRaftVoterRequestData; import org.apache.kafka.common.message.UpdateRaftVoterResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -public class UpdateRaftVoterRequest extends AbstractRequest { +import java.nio.ByteBuffer; + +public class UpdateRaftVoterRequest extends AbstractRequest { public static class Builder extends AbstractRequest.Builder { private final UpdateRaftVoterRequestData data; @@ -63,9 +65,9 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { setThrottleTimeMs(throttleTimeMs)); } - public static UpdateRaftVoterRequest parse(Readable readable, short version) { + public static UpdateRaftVoterRequest parse(ByteBuffer buffer, short version) { return new UpdateRaftVoterRequest( - new UpdateRaftVoterRequestData(readable, version), + new UpdateRaftVoterRequestData(new ByteBufferAccessor(buffer), version), version); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java index f52157234fa44..5c89caed2ef94 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/UpdateRaftVoterResponse.java @@ -19,9 +19,10 @@ import org.apache.kafka.common.message.UpdateRaftVoterResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; import java.util.Map; @@ -57,8 +58,8 @@ public Map errorCounts() { } } - public static UpdateRaftVoterResponse parse(Readable readable, short version) { + public static UpdateRaftVoterResponse parse(ByteBuffer buffer, short version) { return new UpdateRaftVoterResponse( - new UpdateRaftVoterResponseData(readable, version)); + new UpdateRaftVoterResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/VoteRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/VoteRequest.java index 732175583671c..619b5bd78e50c 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/VoteRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/VoteRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.VoteRequestData; import org.apache.kafka.common.message.VoteResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.Collections; public class VoteRequest extends AbstractRequest { @@ -64,8 +65,8 @@ public AbstractResponse getErrorResponse(int throttleTimeMs, Throwable e) { .setErrorCode(Errors.forException(e).code())); } - public static VoteRequest parse(Readable readable, short version) { - return new VoteRequest(new VoteRequestData(readable, version), version); + public static VoteRequest parse(ByteBuffer buffer, short version) { + return new VoteRequest(new VoteRequestData(new ByteBufferAccessor(buffer), version), version); } public static VoteRequestData singletonRequest(TopicPartition topicPartition, diff --git a/clients/src/main/java/org/apache/kafka/common/requests/VoteResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/VoteResponse.java index 9f3797dc3c017..c9a64743ccf8b 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/VoteResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/VoteResponse.java @@ -19,10 +19,11 @@ import org.apache.kafka.common.message.VoteResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.EnumMap; +import java.nio.ByteBuffer; +import java.util.HashMap; import java.util.Map; /** @@ -48,7 +49,7 @@ public VoteResponse(VoteResponseData data) { @Override public Map errorCounts() { - Map errors = new EnumMap<>(Errors.class); + Map errors = new HashMap<>(); errors.put(Errors.forCode(data.errorCode()), 1); @@ -75,7 +76,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // Not supported by the response schema } - public static VoteResponse parse(Readable readable, short version) { - return new VoteResponse(new VoteResponseData(readable, version)); + public static VoteResponse parse(ByteBuffer buffer, short version) { + return new VoteResponse(new VoteResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateRequest.java index 35619791540d9..bcfaf07652b3d 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateRequest.java @@ -20,9 +20,10 @@ import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.stream.Collectors; @@ -33,7 +34,11 @@ public static class Builder extends AbstractRequest.Builder results = new ArrayList<>(); data.topics().forEach( - topicResult -> results.add(new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicResult.topicId()) - .setPartitions(topicResult.partitions().stream() - .map(partitionData -> new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionData.partition()) - .setErrorCode(Errors.forException(e).code()) - .setErrorMessage(Errors.forException(e).message())) - .collect(Collectors.toList())))); + topicResult -> results.add(new WriteShareGroupStateResponseData.WriteStateResult() + .setTopicId(topicResult.topicId()) + .setPartitions(topicResult.partitions().stream() + .map(partitionData -> new WriteShareGroupStateResponseData.PartitionResult() + .setPartition(partitionData.partition()) + .setErrorCode(Errors.forException(e).code()) + .setErrorMessage(Errors.forException(e).message())) + .collect(Collectors.toList())))); return new WriteShareGroupStateResponse(new WriteShareGroupStateResponseData() - .setResults(results)); + .setResults(results)); } @Override @@ -76,10 +81,10 @@ public WriteShareGroupStateRequestData data() { return data; } - public static WriteShareGroupStateRequest parse(Readable readable, short version) { + public static WriteShareGroupStateRequest parse(ByteBuffer buffer, short version) { return new WriteShareGroupStateRequest( - new WriteShareGroupStateRequestData(readable, version), - version + new WriteShareGroupStateRequestData(new ByteBufferAccessor(buffer), version), + version ); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java index 799ec80d228e6..46ae81db646d8 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/WriteShareGroupStateResponse.java @@ -18,13 +18,13 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.message.WriteShareGroupStateRequestData; import org.apache.kafka.common.message.WriteShareGroupStateResponseData; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; -import java.util.ArrayList; +import java.nio.ByteBuffer; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -46,9 +46,9 @@ public WriteShareGroupStateResponseData data() { public Map errorCounts() { Map counts = new HashMap<>(); data.results().forEach( - result -> result.partitions().forEach( - partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) - ) + result -> result.partitions().forEach( + partitionResult -> updateErrorCounts(counts, Errors.forCode(partitionResult.errorCode())) + ) ); return counts; } @@ -63,60 +63,48 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { // No op } - public static WriteShareGroupStateResponse parse(Readable readable, short version) { + public static WriteShareGroupStateResponse parse(ByteBuffer buffer, short version) { return new WriteShareGroupStateResponse( - new WriteShareGroupStateResponseData(readable, version) + new WriteShareGroupStateResponseData(new ByteBufferAccessor(buffer), version) ); } public static WriteShareGroupStateResponseData toResponseData(Uuid topicId, int partitionId) { return new WriteShareGroupStateResponseData() - .setResults(List.of( - new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicId) - .setPartitions(List.of( - new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId))))); + .setResults(Collections.singletonList( + new WriteShareGroupStateResponseData.WriteStateResult() + .setTopicId(topicId) + .setPartitions(Collections.singletonList( + new WriteShareGroupStateResponseData.PartitionResult() + .setPartition(partitionId))))); } public static WriteShareGroupStateResponseData toErrorResponseData(Uuid topicId, int partitionId, Errors error, String errorMessage) { WriteShareGroupStateResponseData responseData = new WriteShareGroupStateResponseData(); - responseData.setResults(List.of(new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicId) - .setPartitions(List.of(new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage))))); + responseData.setResults(Collections.singletonList(new WriteShareGroupStateResponseData.WriteStateResult() + .setTopicId(topicId) + .setPartitions(Collections.singletonList(new WriteShareGroupStateResponseData.PartitionResult() + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage))))); return responseData; } public static WriteShareGroupStateResponseData.PartitionResult toErrorResponsePartitionResult(int partitionId, Errors error, String errorMessage) { return new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId) - .setErrorCode(error.code()) - .setErrorMessage(errorMessage); + .setPartition(partitionId) + .setErrorCode(error.code()) + .setErrorMessage(errorMessage); } public static WriteShareGroupStateResponseData.WriteStateResult toResponseWriteStateResult(Uuid topicId, List partitionResults) { return new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicId) - .setPartitions(partitionResults); + .setTopicId(topicId) + .setPartitions(partitionResults); } public static WriteShareGroupStateResponseData.PartitionResult toResponsePartitionResult(int partitionId) { return new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(partitionId); - } - - public static WriteShareGroupStateResponseData toGlobalErrorResponse(WriteShareGroupStateRequestData request, Errors error) { - List writeStateResults = new ArrayList<>(); - request.topics().forEach(topicData -> { - List partitionResults = new ArrayList<>(); - topicData.partitions().forEach(partitionData -> partitionResults.add( - toErrorResponsePartitionResult(partitionData.partition(), error, error.message())) - ); - writeStateResults.add(toResponseWriteStateResult(topicData.topicId(), partitionResults)); - }); - return new WriteShareGroupStateResponseData().setResults(writeStateResults); + .setPartition(partitionId); } } diff --git a/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java b/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java index d7b0f30eb39af..f68435fb4c550 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersRequest.java @@ -21,9 +21,10 @@ import org.apache.kafka.common.message.WriteTxnMarkersRequestData.WritableTxnMarker; import org.apache.kafka.common.message.WriteTxnMarkersRequestData.WritableTxnMarkerTopic; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -188,8 +189,8 @@ public List markers() { return markers; } - public static WriteTxnMarkersRequest parse(Readable readable, short version) { - return new WriteTxnMarkersRequest(new WriteTxnMarkersRequestData(readable, version), version); + public static WriteTxnMarkersRequest parse(ByteBuffer buffer, short version) { + return new WriteTxnMarkersRequest(new WriteTxnMarkersRequestData(new ByteBufferAccessor(buffer), version), version); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java b/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java index d4269ef81cfef..a7d22e4493e67 100644 --- a/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java +++ b/clients/src/main/java/org/apache/kafka/common/requests/WriteTxnMarkersResponse.java @@ -22,11 +22,11 @@ import org.apache.kafka.common.message.WriteTxnMarkersResponseData.WritableTxnMarkerResult; import org.apache.kafka.common.message.WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; +import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.EnumMap; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -115,7 +115,7 @@ public void maybeSetThrottleTimeMs(int throttleTimeMs) { @Override public Map errorCounts() { - Map errorCounts = new EnumMap<>(Errors.class); + Map errorCounts = new HashMap<>(); for (WritableTxnMarkerResult marker : data.markers()) { for (WritableTxnMarkerTopicResult topic : marker.topics()) { for (WritableTxnMarkerPartitionResult partitionResult : topic.partitions()) @@ -125,7 +125,7 @@ public Map errorCounts() { return errorCounts; } - public static WriteTxnMarkersResponse parse(Readable readable, short version) { - return new WriteTxnMarkersResponse(new WriteTxnMarkersResponseData(readable, version)); + public static WriteTxnMarkersResponse parse(ByteBuffer buffer, short version) { + return new WriteTxnMarkersResponse(new WriteTxnMarkersResponseData(new ByteBufferAccessor(buffer), version)); } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java b/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java index 865762c5c364c..029b6881fdb58 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java +++ b/clients/src/main/java/org/apache/kafka/common/security/JaasContext.java @@ -33,7 +33,6 @@ import javax.security.auth.login.AppConfigurationEntry; import javax.security.auth.login.Configuration; -import static org.apache.kafka.common.security.JaasUtils.ALLOWED_LOGIN_MODULES_CONFIG; import static org.apache.kafka.common.security.JaasUtils.DISALLOWED_LOGIN_MODULES_CONFIG; import static org.apache.kafka.common.security.JaasUtils.DISALLOWED_LOGIN_MODULES_DEFAULT; @@ -104,37 +103,15 @@ else if (contextModules.length != 1) return defaultContext(contextType, listenerContextName, globalContextName); } - @SuppressWarnings("deprecation") - // Visible for testing - static void throwIfLoginModuleIsNotAllowed(AppConfigurationEntry appConfigurationEntry) { - String disallowedProperty = System.getProperty(DISALLOWED_LOGIN_MODULES_CONFIG); - if (disallowedProperty != null) { - LOG.warn("System property '{}' is deprecated and will be removed in a future release. Use '{}' instead.", - DISALLOWED_LOGIN_MODULES_CONFIG, ALLOWED_LOGIN_MODULES_CONFIG); - } - String loginModuleName = appConfigurationEntry.getLoginModuleName().trim(); - String allowedProperty = System.getProperty(ALLOWED_LOGIN_MODULES_CONFIG); - if (allowedProperty != null) { - Set allowedLoginModuleList = Arrays.stream(allowedProperty.split(",")) - .map(String::trim) - .collect(Collectors.toSet()); - if (!allowedLoginModuleList.contains(loginModuleName)) { - throw new IllegalArgumentException(loginModuleName + " is not allowed. Update System property '" - + ALLOWED_LOGIN_MODULES_CONFIG + "' to allow " + loginModuleName); - } - return; - } - if (disallowedProperty == null) { - disallowedProperty = DISALLOWED_LOGIN_MODULES_DEFAULT; - } - Set disallowedLoginModuleList = Arrays.stream(disallowedProperty.split(",")) + private static void throwIfLoginModuleIsNotAllowed(AppConfigurationEntry appConfigurationEntry) { + Set disallowedLoginModuleList = Arrays.stream( + System.getProperty(DISALLOWED_LOGIN_MODULES_CONFIG, DISALLOWED_LOGIN_MODULES_DEFAULT).split(",")) .map(String::trim) .collect(Collectors.toSet()); + String loginModuleName = appConfigurationEntry.getLoginModuleName().trim(); if (disallowedLoginModuleList.contains(loginModuleName)) { - throw new IllegalArgumentException(loginModuleName + " is not allowed. " - + "The system property '" + DISALLOWED_LOGIN_MODULES_CONFIG + "' is deprecated. " - + "Use the " + ALLOWED_LOGIN_MODULES_CONFIG + " to allow this module. e.g.," - + "-D" + ALLOWED_LOGIN_MODULES_CONFIG + "=" + loginModuleName); + throw new IllegalArgumentException(loginModuleName + " is not allowed. Update System property '" + + DISALLOWED_LOGIN_MODULES_CONFIG + "' to allow " + loginModuleName); } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java b/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java index 16c25d06c1ac6..cfbca0c6d6185 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/JaasUtils.java @@ -18,10 +18,7 @@ public final class JaasUtils { public static final String JAVA_LOGIN_CONFIG_PARAM = "java.security.auth.login.config"; - @Deprecated(since = "4.2") public static final String DISALLOWED_LOGIN_MODULES_CONFIG = "org.apache.kafka.disallowed.login.modules"; - public static final String ALLOWED_LOGIN_MODULES_CONFIG = "org.apache.kafka.allowed.login.modules"; - @Deprecated(since = "4.2") public static final String DISALLOWED_LOGIN_MODULES_DEFAULT = "com.sun.security.auth.module.JndiLoginModule,com.sun.security.auth.module.LdapLoginModule"; public static final String SERVICE_NAME = "serviceName"; diff --git a/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java b/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java index 92be58ea2dcf0..ec4317268d1d5 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/security/auth/KafkaPrincipalBuilder.java @@ -23,8 +23,12 @@ * Note that the {@link org.apache.kafka.common.Configurable} and {@link java.io.Closeable} * interfaces are respected if implemented. Additionally, implementations must provide a * default no-arg constructor. + * + * Note that custom implementations of {@link KafkaPrincipalBuilder} + * must also implement {@link KafkaPrincipalSerde}, otherwise brokers will not be able to + * forward requests to the controller. */ -public interface KafkaPrincipalBuilder extends KafkaPrincipalSerde { +public interface KafkaPrincipalBuilder { /** * Build a kafka principal from the authentication context. * @param context The authentication context (either {@link SslAuthenticationContext} or diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java index 5ba472263ddeb..fa654bcb9280d 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/DefaultKafkaPrincipalBuilder.java @@ -25,6 +25,7 @@ import org.apache.kafka.common.security.auth.AuthenticationContext; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.security.auth.KafkaPrincipalBuilder; +import org.apache.kafka.common.security.auth.KafkaPrincipalSerde; import org.apache.kafka.common.security.auth.PlaintextAuthenticationContext; import org.apache.kafka.common.security.auth.SaslAuthenticationContext; import org.apache.kafka.common.security.auth.SslAuthenticationContext; @@ -49,7 +50,7 @@ * * NOTE: This is an internal class and can change without notice. */ -public class DefaultKafkaPrincipalBuilder implements KafkaPrincipalBuilder { +public class DefaultKafkaPrincipalBuilder implements KafkaPrincipalBuilder, KafkaPrincipalSerde { private final KerberosShortNamer kerberosShortNamer; private final SslPrincipalMapper sslPrincipalMapper; diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java index 25653636b403d..addacd92722c8 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslClientAuthenticator.java @@ -690,7 +690,7 @@ public void setAuthenticationEndAndSessionReauthenticationTimes(long nowNanos) { double pctToUse = pctWindowFactorToTakeNetworkLatencyAndClockDriftIntoAccount + RNG.nextDouble() * pctWindowJitterToAvoidReauthenticationStormAcrossManyChannelsSimultaneously; sessionLifetimeMsToUse = (long) (positiveSessionLifetimeMs * pctToUse); - clientSessionReauthenticationTimeNanos = Math.addExact(authenticationEndNanos, Utils.msToNs(sessionLifetimeMsToUse)); + clientSessionReauthenticationTimeNanos = authenticationEndNanos + 1000 * 1000 * sessionLifetimeMsToUse; log.debug( "Finished {} with session expiration in {} ms and session re-authentication on or after {} ms", authenticationOrReauthenticationText(), positiveSessionLifetimeMs, sessionLifetimeMsToUse); diff --git a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java index b84b5dc2abc94..e2ebaa31cd260 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java +++ b/clients/src/main/java/org/apache/kafka/common/security/authenticator/SaslServerAuthenticator.java @@ -318,7 +318,7 @@ public KafkaPrincipal principal() { @Override public Optional principalSerde() { - return Optional.of(principalBuilder); + return principalBuilder instanceof KafkaPrincipalSerde ? Optional.of((KafkaPrincipalSerde) principalBuilder) : Optional.empty(); } @Override @@ -681,7 +681,7 @@ else if (!maxReauthSet) else retvalSessionLifetimeMs = zeroIfNegative(Math.min(credentialExpirationMs - authenticationEndMs, connectionsMaxReauthMs)); - sessionExpirationTimeNanos = Math.addExact(authenticationEndNanos, Utils.msToNs(retvalSessionLifetimeMs)); + sessionExpirationTimeNanos = authenticationEndNanos + 1000 * 1000 * retvalSessionLifetimeMs; } if (credentialExpirationMs != null) { diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java index 6afd31df273f9..fc9e689611520 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandler.java @@ -17,15 +17,19 @@ package org.apache.kafka.common.security.oauthbearer; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; import org.apache.kafka.common.security.auth.SaslExtensions; import org.apache.kafka.common.security.auth.SaslExtensionsCallback; import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerClientInitialResponse; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenRetriever; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenRetrieverFactory; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory; import org.apache.kafka.common.security.oauthbearer.internals.secured.JaasOptionsUtils; -import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ValidateException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -41,14 +45,13 @@ import javax.security.sasl.SaslException; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; -import static org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils.getConfiguredInstance; /** *

        * OAuthBearerLoginCallbackHandler is an {@link AuthenticateCallbackHandler} that * accepts {@link OAuthBearerTokenCallback} and {@link SaslExtensionsCallback} callbacks to * perform the steps to request a JWT from an OAuth/OIDC provider using the - * client_credentials. This grant type is commonly used for non-interactive + * clientcredentials. This grant type is commonly used for non-interactive * "service accounts" where there is no user available to interactively supply credentials. *

        * @@ -176,56 +179,55 @@ public class OAuthBearerLoginCallbackHandler implements AuthenticateCallbackHand private Map moduleOptions; - private JwtRetriever jwtRetriever; + private AccessTokenRetriever accessTokenRetriever; - private JwtValidator jwtValidator; + private AccessTokenValidator accessTokenValidator; + + private boolean isInitialized = false; @Override public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries); - jwtRetriever = getConfiguredInstance( - configs, - saslMechanism, - jaasConfigEntries, - SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS, - JwtRetriever.class - ); - - jwtValidator = getConfiguredInstance( - configs, - saslMechanism, - jaasConfigEntries, - SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, - JwtValidator.class - ); + AccessTokenRetriever accessTokenRetriever = AccessTokenRetrieverFactory.create(configs, saslMechanism, moduleOptions); + AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs, saslMechanism); + init(accessTokenRetriever, accessTokenValidator); + } + + public void init(AccessTokenRetriever accessTokenRetriever, AccessTokenValidator accessTokenValidator) { + this.accessTokenRetriever = accessTokenRetriever; + this.accessTokenValidator = accessTokenValidator; + + try { + this.accessTokenRetriever.init(); + } catch (IOException e) { + throw new KafkaException("The OAuth login configuration encountered an error when initializing the AccessTokenRetriever", e); + } + + isInitialized = true; } /* * Package-visible for testing. */ - void configure(Map configs, - String saslMechanism, - List jaasConfigEntries, - JwtRetriever jwtRetriever, - JwtValidator jwtValidator) { - this.moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries); - - this.jwtRetriever = jwtRetriever; - this.jwtRetriever.configure(configs, saslMechanism, jaasConfigEntries); - - this.jwtValidator = jwtValidator; - this.jwtValidator.configure(configs, saslMechanism, jaasConfigEntries); + + AccessTokenRetriever getAccessTokenRetriever() { + return accessTokenRetriever; } @Override public void close() { - Utils.closeQuietly(jwtRetriever, "JWT retriever"); - Utils.closeQuietly(jwtValidator, "JWT validator"); + if (accessTokenRetriever != null) { + try { + this.accessTokenRetriever.close(); + } catch (IOException e) { + log.warn("The OAuth login configuration encountered an error when closing the AccessTokenRetriever", e); + } + } } @Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { - checkConfigured(); + checkInitialized(); for (Callback callback : callbacks) { if (callback instanceof OAuthBearerTokenCallback) { @@ -239,20 +241,20 @@ public void handle(Callback[] callbacks) throws IOException, UnsupportedCallback } private void handleTokenCallback(OAuthBearerTokenCallback callback) throws IOException { - checkConfigured(); - String accessToken = jwtRetriever.retrieve(); + checkInitialized(); + String accessToken = accessTokenRetriever.retrieve(); try { - OAuthBearerToken token = jwtValidator.validate(accessToken); + OAuthBearerToken token = accessTokenValidator.validate(accessToken); callback.token(token); - } catch (JwtValidatorException e) { + } catch (ValidateException e) { log.warn(e.getMessage(), e); callback.error("invalid_token", e.getMessage(), null); } } private void handleExtensionsCallback(SaslExtensionsCallback callback) { - checkConfigured(); + checkInitialized(); Map extensions = new HashMap<>(); @@ -284,9 +286,9 @@ private void handleExtensionsCallback(SaslExtensionsCallback callback) { callback.extensions(saslExtensions); } - private void checkConfigured() { - if (moduleOptions == null || jwtRetriever == null || jwtValidator == null) - throw new IllegalStateException(String.format("To use %s, first call the configure method", getClass().getSimpleName())); + private void checkInitialized() { + if (!isInitialized) + throw new IllegalStateException(String.format("To use %s, first call the configure or init method", getClass().getSimpleName())); } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java index 60fa8cdb6788a..f9422370db18b 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandler.java @@ -17,24 +17,34 @@ package org.apache.kafka.common.security.oauthbearer; -import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory; import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver; -import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.JaasOptionsUtils; +import org.apache.kafka.common.security.oauthbearer.internals.secured.RefreshingHttpsJwksVerificationKeyResolver; +import org.apache.kafka.common.security.oauthbearer.internals.secured.ValidateException; +import org.apache.kafka.common.security.oauthbearer.internals.secured.VerificationKeyResolverFactory; +import org.jose4j.jws.JsonWebSignature; +import org.jose4j.jwx.JsonWebStructure; +import org.jose4j.lang.UnresolvableKeyException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.IOException; +import java.security.Key; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import javax.security.auth.callback.Callback; import javax.security.auth.callback.UnsupportedCallbackException; import javax.security.auth.login.AppConfigurationEntry; -import static org.apache.kafka.common.security.oauthbearer.internals.secured.ConfigurationUtils.getConfiguredInstance; - /** *

        * OAuthBearerValidatorCallbackHandler is an {@link AuthenticateCallbackHandler} that @@ -98,45 +108,64 @@ public class OAuthBearerValidatorCallbackHandler implements AuthenticateCallback private static final Logger log = LoggerFactory.getLogger(OAuthBearerValidatorCallbackHandler.class); + /** + * Because a {@link CloseableVerificationKeyResolver} instance can spawn threads and issue + * HTTP(S) calls ({@link RefreshingHttpsJwksVerificationKeyResolver}), we only want to create + * a new instance for each particular set of configuration. Because each set of configuration + * may have multiple instances, we want to reuse the single instance. + */ + + private static final Map VERIFICATION_KEY_RESOLVER_CACHE = new HashMap<>(); + private CloseableVerificationKeyResolver verificationKeyResolver; - private JwtValidator jwtValidator; + private AccessTokenValidator accessTokenValidator; + + private boolean isInitialized = false; @Override public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { - jwtValidator = getConfiguredInstance( - configs, - saslMechanism, - jaasConfigEntries, - SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS, - JwtValidator.class - ); + Map moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries); + CloseableVerificationKeyResolver verificationKeyResolver; + + // Here's the logic which keeps our VerificationKeyResolvers down to a single instance. + synchronized (VERIFICATION_KEY_RESOLVER_CACHE) { + VerificationKeyResolverKey key = new VerificationKeyResolverKey(configs, moduleOptions); + verificationKeyResolver = VERIFICATION_KEY_RESOLVER_CACHE.computeIfAbsent(key, k -> + new RefCountingVerificationKeyResolver(VerificationKeyResolverFactory.create(configs, saslMechanism, moduleOptions))); + } + + AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs, saslMechanism, verificationKeyResolver); + init(verificationKeyResolver, accessTokenValidator); } - /* - * Package-visible for testing. - */ - void configure(Map configs, - String saslMechanism, - List jaasConfigEntries, - CloseableVerificationKeyResolver verificationKeyResolver, - JwtValidator jwtValidator) { + public void init(CloseableVerificationKeyResolver verificationKeyResolver, AccessTokenValidator accessTokenValidator) { this.verificationKeyResolver = verificationKeyResolver; - this.verificationKeyResolver.configure(configs, saslMechanism, jaasConfigEntries); + this.accessTokenValidator = accessTokenValidator; + + try { + verificationKeyResolver.init(); + } catch (Exception e) { + throw new KafkaException("The OAuth validator configuration encountered an error when initializing the VerificationKeyResolver", e); + } - this.jwtValidator = jwtValidator; - this.jwtValidator.configure(configs, saslMechanism, jaasConfigEntries); + isInitialized = true; } @Override public void close() { - Utils.closeQuietly(jwtValidator, "JWT validator"); - Utils.closeQuietly(verificationKeyResolver, "JWT verification key resolver"); + if (verificationKeyResolver != null) { + try { + verificationKeyResolver.close(); + } catch (Exception e) { + log.error(e.getMessage(), e); + } + } } @Override public void handle(Callback[] callbacks) throws IOException, UnsupportedCallbackException { - checkConfigured(); + checkInitialized(); for (Callback callback : callbacks) { if (callback instanceof OAuthBearerValidatorCallback) { @@ -150,27 +179,102 @@ public void handle(Callback[] callbacks) throws IOException, UnsupportedCallback } private void handleValidatorCallback(OAuthBearerValidatorCallback callback) { - checkConfigured(); + checkInitialized(); OAuthBearerToken token; try { - token = jwtValidator.validate(callback.tokenValue()); + token = accessTokenValidator.validate(callback.tokenValue()); callback.token(token); - } catch (JwtValidatorException e) { + } catch (ValidateException e) { log.warn(e.getMessage(), e); callback.error("invalid_token", null, null); } } private void handleExtensionsValidatorCallback(OAuthBearerExtensionsValidatorCallback extensionsValidatorCallback) { - checkConfigured(); + checkInitialized(); extensionsValidatorCallback.inputExtensions().map().forEach((extensionName, v) -> extensionsValidatorCallback.valid(extensionName)); } - private void checkConfigured() { - if (jwtValidator == null) - throw new IllegalStateException(String.format("To use %s, first call the configure method", getClass().getSimpleName())); + private void checkInitialized() { + if (!isInitialized) + throw new IllegalStateException(String.format("To use %s, first call the configure or init method", getClass().getSimpleName())); } + + /** + * VkrKey is a simple structure which encapsulates the criteria for different + * sets of configuration. This will allow us to use this object as a key in a {@link Map} + * to keep a single instance per key. + */ + + private static class VerificationKeyResolverKey { + + private final Map configs; + + private final Map moduleOptions; + + public VerificationKeyResolverKey(Map configs, Map moduleOptions) { + this.configs = configs; + this.moduleOptions = moduleOptions; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + + if (o == null || getClass() != o.getClass()) { + return false; + } + + VerificationKeyResolverKey that = (VerificationKeyResolverKey) o; + return configs.equals(that.configs) && moduleOptions.equals(that.moduleOptions); + } + + @Override + public int hashCode() { + return Objects.hash(configs, moduleOptions); + } + + } + + /** + * RefCountingVerificationKeyResolver allows us to share a single + * {@link CloseableVerificationKeyResolver} instance between multiple + * {@link AuthenticateCallbackHandler} instances and perform the lifecycle methods the + * appropriate number of times. + */ + + private static class RefCountingVerificationKeyResolver implements CloseableVerificationKeyResolver { + + private final CloseableVerificationKeyResolver delegate; + + private final AtomicInteger count = new AtomicInteger(0); + + public RefCountingVerificationKeyResolver(CloseableVerificationKeyResolver delegate) { + this.delegate = delegate; + } + + @Override + public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { + return delegate.resolveKey(jws, nestingContext); + } + + @Override + public void init() throws IOException { + if (count.incrementAndGet() == 1) + delegate.init(); + } + + @Override + public void close() throws IOException { + if (count.decrementAndGet() == 0) + delegate.close(); + } + + } + } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java index 447678163b4e8..6561f12f503a9 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslClient.java @@ -29,6 +29,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -130,14 +131,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("OAUTHBEARER supports neither integrity nor privacy"); + return Arrays.copyOfRange(incoming, offset, offset + len); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("OAUTHBEARER supports neither integrity nor privacy"); + return Arrays.copyOfRange(outgoing, offset, offset + len); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java index a60f33d0ef156..bf5c4723ee1a3 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/OAuthBearerSaslServer.java @@ -31,6 +31,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; import java.util.Map; import java.util.Objects; @@ -133,14 +134,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!complete) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("OAUTHBEARER supports neither integrity nor privacy"); + return Arrays.copyOfRange(incoming, offset, offset + len); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!complete) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("OAUTHBEARER supports neither integrity nor privacy"); + return Arrays.copyOfRange(outgoing, offset, offset + len); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetriever.java new file mode 100644 index 0000000000000..080ea4515b4dd --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetriever.java @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import java.io.Closeable; +import java.io.IOException; + +/** + * An AccessTokenRetriever is the internal API by which the login module will + * retrieve an access token for use in authorization by the broker. The implementation may + * involve authentication to a remote system, or it can be as simple as loading the contents + * of a file or configuration setting. + * + * Retrieval is a separate concern from validation, so it isn't necessary for + * the AccessTokenRetriever implementation to validate the integrity of the JWT + * access token. + * + * @see HttpAccessTokenRetriever + * @see FileTokenRetriever + */ + +public interface AccessTokenRetriever extends Initable, Closeable { + + /** + * Retrieves a JWT access token in its serialized three-part form. The implementation + * is free to determine how it should be retrieved but should not perform validation + * on the result. + * + * Note: This is a blocking function and callers should be aware that the + * implementation may be communicating over a network, with the file system, coordinating + * threads, etc. The facility in the {@link javax.security.auth.spi.LoginModule} from + * which this is ultimately called does not provide an asynchronous approach. + * + * @return Non-null JWT access token string + * + * @throws IOException Thrown on errors related to IO during retrieval + */ + + String retrieve() throws IOException; + + /** + * Lifecycle method to perform a clean shutdown of the retriever. This must + * be performed by the caller to ensure the correct state, freeing up and releasing any + * resources performed in {@link #init()}. + * + * @throws IOException Thrown on errors related to IO during closure + */ + + default void close() throws IOException { + // This method left intentionally blank. + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactory.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactory.java new file mode 100644 index 0000000000000..0ed4a1a230349 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactory.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.config.SaslConfigs; + +import java.net.URL; +import java.util.Locale; +import java.util.Map; + +import javax.net.ssl.SSLSocketFactory; + +import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_CONNECT_TIMEOUT_MS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_ID_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_SECRET_CONFIG; +import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.SCOPE_CONFIG; + +public class AccessTokenRetrieverFactory { + + /** + * Create an {@link AccessTokenRetriever} from the given SASL and JAAS configuration. + * + * Note: the returned AccessTokenRetriever is not initialized + * here and must be done by the caller prior to use. + * + * @param configs SASL configuration + * @param jaasConfig JAAS configuration + * + * @return Non-null {@link AccessTokenRetriever} + */ + + public static AccessTokenRetriever create(Map configs, Map jaasConfig) { + return create(configs, null, jaasConfig); + } + + public static AccessTokenRetriever create(Map configs, + String saslMechanism, + Map jaasConfig) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + URL tokenEndpointUrl = cu.validateUrl(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL); + + if (tokenEndpointUrl.getProtocol().toLowerCase(Locale.ROOT).equals("file")) { + return new FileTokenRetriever(cu.validateFile(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL)); + } else { + JaasOptionsUtils jou = new JaasOptionsUtils(jaasConfig); + String clientId = jou.validateString(CLIENT_ID_CONFIG); + String clientSecret = jou.validateString(CLIENT_SECRET_CONFIG); + String scope = jou.validateString(SCOPE_CONFIG, false); + + SSLSocketFactory sslSocketFactory = null; + + if (jou.shouldCreateSSLSocketFactory(tokenEndpointUrl)) + sslSocketFactory = jou.createSSLSocketFactory(); + + boolean urlencodeHeader = validateUrlencodeHeader(cu); + + return new HttpAccessTokenRetriever(clientId, + clientSecret, + scope, + sslSocketFactory, + tokenEndpointUrl.toString(), + cu.validateLong(SASL_LOGIN_RETRY_BACKOFF_MS), + cu.validateLong(SASL_LOGIN_RETRY_BACKOFF_MAX_MS), + cu.validateInteger(SASL_LOGIN_CONNECT_TIMEOUT_MS, false), + cu.validateInteger(SASL_LOGIN_READ_TIMEOUT_MS, false), + urlencodeHeader); + } + } + + /** + * In some cases, the incoming {@link Map} doesn't contain a value for + * {@link SaslConfigs#SASL_OAUTHBEARER_HEADER_URLENCODE}. Returning {@code null} from {@link Map#get(Object)} + * will cause a {@link NullPointerException} when it is later unboxed. + * + *

        + * + * This utility method ensures that we have a non-{@code null} value to use in the + * {@link HttpAccessTokenRetriever} constructor. + */ + static boolean validateUrlencodeHeader(ConfigurationUtils configurationUtils) { + Boolean urlencodeHeader = configurationUtils.validateBoolean(SASL_OAUTHBEARER_HEADER_URLENCODE, false); + + if (urlencodeHeader != null) + return urlencodeHeader; + else + return DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE; + } + +} \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidator.java new file mode 100644 index 0000000000000..0b107a09bc065 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidator.java @@ -0,0 +1,64 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; + +/** + * An instance of AccessTokenValidator acts as a function object that, given an access + * token in base-64 encoded JWT format, can parse the data, perform validation, and construct an + * {@link OAuthBearerToken} for use by the caller. + * + * The primary reason for this abstraction is that client and broker may have different libraries + * available to them to perform these operations. Additionally, the exact steps for validation may + * differ between implementations. To put this more concretely: the implementation in the Kafka + * client does not have bundled a robust library to perform this logic, and it is not the + * responsibility of the client to perform vigorous validation. However, the Kafka broker ships with + * a richer set of library dependencies that can perform more substantial validation and is also + * expected to perform a trust-but-verify test of the access token's signature. + * + * See: + * + *

        + * + * @see LoginAccessTokenValidator A basic AccessTokenValidator used by client-side login + * authentication + * @see ValidatorAccessTokenValidator A more robust AccessTokenValidator that is used on the broker + * to validate the token's contents and verify the signature + */ + +public interface AccessTokenValidator { + + /** + * Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + * OAuthBearerToken. + * + * @param accessToken Non-null JWT access token + * + * @return {@link OAuthBearerToken} + * + * @throws ValidateException Thrown on errors performing validation of given token + */ + + OAuthBearerToken validate(String accessToken) throws ValidateException; + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java new file mode 100644 index 0000000000000..5fa4c620d902f --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactory.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.jose4j.keys.resolvers.VerificationKeyResolver; + +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME; + +public class AccessTokenValidatorFactory { + + public static AccessTokenValidator create(Map configs) { + return create(configs, (String) null); + } + + public static AccessTokenValidator create(Map configs, String saslMechanism) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + String scopeClaimName = cu.get(SASL_OAUTHBEARER_SCOPE_CLAIM_NAME); + String subClaimName = cu.get(SASL_OAUTHBEARER_SUB_CLAIM_NAME); + return new LoginAccessTokenValidator(scopeClaimName, subClaimName); + } + + public static AccessTokenValidator create(Map configs, + VerificationKeyResolver verificationKeyResolver) { + return create(configs, null, verificationKeyResolver); + } + + public static AccessTokenValidator create(Map configs, + String saslMechanism, + VerificationKeyResolver verificationKeyResolver) { + ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); + Set expectedAudiences = null; + List l = cu.get(SASL_OAUTHBEARER_EXPECTED_AUDIENCE); + + if (l != null) + expectedAudiences = Collections.unmodifiableSet(new HashSet<>(l)); + + Integer clockSkew = cu.validateInteger(SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS, false); + String expectedIssuer = cu.validateString(SASL_OAUTHBEARER_EXPECTED_ISSUER, false); + String scopeClaimName = cu.validateString(SASL_OAUTHBEARER_SCOPE_CLAIM_NAME); + String subClaimName = cu.validateString(SASL_OAUTHBEARER_SUB_CLAIM_NAME); + + return new ValidatorAccessTokenValidator(clockSkew, + expectedAudiences, + expectedIssuer, + verificationKeyResolver, + scopeClaimName, + subClaimName); + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtils.java index 582b4e86f701b..5bf5ef068ed0f 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtils.java @@ -17,8 +17,6 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; -import org.apache.kafka.common.security.oauthbearer.JwtValidatorException; - import java.util.Collection; import java.util.Collections; import java.util.HashSet; @@ -49,14 +47,14 @@ public class ClaimValidationUtils { * @return Unmodifiable {@link Set} that includes the values of the original set, but with * each value trimmed * - * @throws JwtValidatorException Thrown if the value is null, contains duplicates, or + * @throws ValidateException Thrown if the value is null, contains duplicates, or * if any of the values in the set are null, empty, * or whitespace only */ - public static Set validateScopes(String scopeClaimName, Collection scopes) throws JwtValidatorException { + public static Set validateScopes(String scopeClaimName, Collection scopes) throws ValidateException { if (scopes == null) - throw new JwtValidatorException(String.format("%s value must be non-null", scopeClaimName)); + throw new ValidateException(String.format("%s value must be non-null", scopeClaimName)); Set copy = new HashSet<>(); @@ -64,7 +62,7 @@ public static Set validateScopes(String scopeClaimName, Collection validateScopes(String scopeClaimName, Collectionnull or negative + * @throws ValidateException Thrown if the value is null or negative */ - public static long validateExpiration(String claimName, Long claimValue) throws JwtValidatorException { + public static long validateExpiration(String claimName, Long claimValue) throws ValidateException { if (claimValue == null) - throw new JwtValidatorException(String.format("%s value must be non-null", claimName)); + throw new ValidateException(String.format("%s value must be non-null", claimName)); if (claimValue < 0) - throw new JwtValidatorException(String.format("%s value must be non-negative; value given was \"%s\"", claimName, claimValue)); + throw new ValidateException(String.format("%s value must be non-negative; value given was \"%s\"", claimName, claimValue)); return claimValue; } @@ -114,10 +112,10 @@ public static long validateExpiration(String claimName, Long claimValue) throws * * @return Trimmed version of the claimValue parameter * - * @throws JwtValidatorException Thrown if the value is null, empty, or whitespace only + * @throws ValidateException Thrown if the value is null, empty, or whitespace only */ - public static String validateSubject(String claimName, String claimValue) throws JwtValidatorException { + public static String validateSubject(String claimName, String claimValue) throws ValidateException { return validateString(claimName, claimValue); } @@ -134,12 +132,12 @@ public static String validateSubject(String claimName, String claimValue) throws * * @return Input parameter, as provided * - * @throws JwtValidatorException Thrown if the value is negative + * @throws ValidateException Thrown if the value is negative */ - public static Long validateIssuedAt(String claimName, Long claimValue) throws JwtValidatorException { + public static Long validateIssuedAt(String claimName, Long claimValue) throws ValidateException { if (claimValue != null && claimValue < 0) - throw new JwtValidatorException(String.format("%s value must be null or non-negative; value given was \"%s\"", claimName, claimValue)); + throw new ValidateException(String.format("%s value must be null or non-negative; value given was \"%s\"", claimName, claimValue)); return claimValue; } @@ -159,24 +157,24 @@ public static Long validateIssuedAt(String claimName, Long claimValue) throws Jw * * @return Trimmed version of the value parameter * - * @throws JwtValidatorException Thrown if the value is null, empty, or whitespace only + * @throws ValidateException Thrown if the value is null, empty, or whitespace only */ - public static String validateClaimNameOverride(String name, String value) throws JwtValidatorException { + public static String validateClaimNameOverride(String name, String value) throws ValidateException { return validateString(name, value); } - private static String validateString(String name, String value) throws JwtValidatorException { + private static String validateString(String name, String value) throws ValidateException { if (value == null) - throw new JwtValidatorException(String.format("%s value must be non-null", name)); + throw new ValidateException(String.format("%s value must be non-null", name)); if (value.isEmpty()) - throw new JwtValidatorException(String.format("%s value must be non-empty", name)); + throw new ValidateException(String.format("%s value must be non-empty", name)); value = value.trim(); if (value.isEmpty()) - throw new JwtValidatorException(String.format("%s value must not contain only whitespace", name)); + throw new ValidateException(String.format("%s value must not contain only whitespace", name)); return value; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CloseableVerificationKeyResolver.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CloseableVerificationKeyResolver.java index d38d0708e9446..bf8ca0cb82211 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CloseableVerificationKeyResolver.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/CloseableVerificationKeyResolver.java @@ -21,14 +21,33 @@ import org.jose4j.keys.resolvers.VerificationKeyResolver; +import java.io.Closeable; +import java.io.IOException; + /** * The {@link OAuthBearerValidatorCallbackHandler} uses a {@link VerificationKeyResolver} as * part of its validation of the incoming JWT. Some of the VerificationKeyResolver * implementations use resources like threads, connections, etc. that should be properly closed * when no longer needed. Since the VerificationKeyResolver interface itself doesn't * define a close method, we provide a means to do that here. + * + * @see OAuthBearerValidatorCallbackHandler + * @see VerificationKeyResolver + * @see Closeable */ -public interface CloseableVerificationKeyResolver extends OAuthBearerConfigurable, VerificationKeyResolver { +public interface CloseableVerificationKeyResolver extends Initable, Closeable, VerificationKeyResolver { + + /** + * Lifecycle method to perform a clean shutdown of the {@link VerificationKeyResolver}. + * This must be performed by the caller to ensure the correct state, freeing up + * and releasing any resources performed in {@link #init()}. + * + * @throws IOException Thrown on errors related to IO during closure + */ + + default void close() throws IOException { + // This method left intentionally blank. + } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtils.java index 3eebecf8fde10..10f700826c8bd 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtils.java @@ -18,28 +18,19 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.config.types.Password; import org.apache.kafka.common.network.ListenerName; -import org.apache.kafka.common.utils.Utils; - -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; import java.io.File; import java.net.MalformedURLException; import java.net.URISyntaxException; import java.net.URL; +import java.nio.file.Path; import java.util.Arrays; -import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Set; import java.util.stream.Collectors; -import javax.security.auth.login.AppConfigurationEntry; - -import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG; -import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_DEFAULT; import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_DEFAULT; @@ -50,8 +41,6 @@ public class ConfigurationUtils { - private static final Logger LOG = LoggerFactory.getLogger(ConfigurationUtils.class); - private final Map configs; private final String prefix; @@ -69,10 +58,6 @@ public ConfigurationUtils(Map configs, String saslMechanism) { this.prefix = null; } - public boolean containsKey(String name) { - return get(name) != null; - } - /** * Validates that, if a value is supplied, is a file that: * @@ -86,7 +71,7 @@ public boolean containsKey(String name) { * ignored. Any whitespace is trimmed off of the beginning and end. */ - public File validateFileUrl(String name) { + public Path validateFile(String name) { URL url = validateUrl(name); File file; @@ -96,35 +81,6 @@ public File validateFileUrl(String name) { throw new ConfigException(String.format("The OAuth configuration option %s contains a URL (%s) that is malformed: %s", name, url, e.getMessage())); } - return validateFile(name, file); - } - - /** - * Validates that the file: - * - *
      6. - *
          exists
        - *
          has read permission
        - *
          points to a file
        - *
      7. - */ - public File validateFile(String name) { - String s = validateString(name); - File file = validateFile(name, new File(s).getAbsoluteFile()); - throwIfFileIsNotAllowed(name, file.getAbsolutePath()); - return file; - } - - /** - * Validates that the file: - * - *
      8. - *
          exists
        - *
          has read permission
        - *
          points to a file
        - *
      9. - */ - private File validateFile(String name, File file) { if (!file.exists()) throw new ConfigException(String.format("The OAuth configuration option %s contains a file (%s) that doesn't exist", name, file)); @@ -134,7 +90,7 @@ private File validateFile(String name, File file) { if (file.isDirectory()) throw new ConfigException(String.format("The OAuth configuration option %s references a directory (%s), not a file", name, file)); - return file; + return file.toPath(); } /** @@ -154,7 +110,7 @@ public Integer validateInteger(String name, boolean isRequired) { if (value == null) { if (isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s is required", name)); + throw new ConfigException(String.format("The OAuth configuration option %s must be non-null", name)); else return null; } @@ -187,7 +143,7 @@ public Long validateLong(String name, boolean isRequired, Long min) { if (value == null) { if (isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s is required", name)); + throw new ConfigException(String.format("The OAuth configuration option %s must be non-null", name)); else return null; } @@ -231,42 +187,42 @@ public URL validateUrl(String name) { if (!(protocol.equals("http") || protocol.equals("https") || protocol.equals("file"))) throw new ConfigException(String.format("The OAuth configuration option %s contains a URL (%s) that contains an invalid protocol (%s); only \"http\", \"https\", and \"file\" protocol are supported", name, value, protocol)); - throwIfURLIsNotAllowed(name, value); + throwIfURLIsNotAllowed(value); return url; } - public String validatePassword(String name) { - Password value = get(name); - - if (value == null || Utils.isBlank(value.value())) - throw new ConfigException(String.format("The OAuth configuration option %s value is required", name)); - - return value.value().trim(); - } - - public String validateString(String name) { + public String validateString(String name) throws ValidateException { return validateString(name, true); } - public String validateString(String name, boolean isRequired) { + public String validateString(String name, boolean isRequired) throws ValidateException { String value = get(name); - if (Utils.isBlank(value)) { + if (value == null) { if (isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s value is required", name)); + throw new ConfigException(String.format("The OAuth configuration option %s value must be non-null", name)); else return null; } - return value.trim(); + value = value.trim(); + + if (value.isEmpty()) { + if (isRequired) + throw new ConfigException(String.format("The OAuth configuration option %s value must not contain only whitespace", name)); + else + return null; + } + + return value; } public Boolean validateBoolean(String name, boolean isRequired) { Boolean value = get(name); if (value == null && isRequired) - throw new ConfigException(String.format("The OAuth configuration option %s is required", name)); + throw new ConfigException(String.format("The OAuth configuration option %s must be non-null", name)); return value; } @@ -281,137 +237,16 @@ public T get(String name) { return (T) configs.get(name); } - public static T getConfiguredInstance(Map configs, - String saslMechanism, - List jaasConfigEntries, - String configName, - Class expectedClass) { - Object configValue = configs.get(configName); - Object o; - - if (configValue instanceof String) { - String implementationClassName = (String) configValue; - - try { - o = Utils.newInstance(implementationClassName, expectedClass); - } catch (Exception e) { - throw new ConfigException( - String.format( - "The class %s defined in the %s configuration could not be instantiated: %s", - implementationClassName, - configName, - e.getMessage() - ) - ); - } - } else if (configValue instanceof Class) { - Class implementationClass = (Class) configValue; - - try { - o = Utils.newInstance(implementationClass); - } catch (Exception e) { - throw new ConfigException( - String.format( - "The class %s defined in the %s configuration could not be instantiated: %s", - implementationClass.getName(), - configName, - e.getMessage() - ) - ); - } - } else if (configValue != null) { - throw new ConfigException( - String.format( - "The type for the %s configuration must be either %s or %s, but was %s", - configName, - String.class.getName(), - Class.class.getName(), - configValue.getClass().getName() - ) - ); - } else { - throw new ConfigException(String.format("The required configuration %s was null", configName)); - } - - if (!expectedClass.isInstance(o)) { - throw new ConfigException( - String.format( - "The configured class (%s) for the %s configuration is not an instance of %s, as is required", - o.getClass().getName(), - configName, - expectedClass.getName() - ) - ); - } - - if (o instanceof OAuthBearerConfigurable) { - try { - ((OAuthBearerConfigurable) o).configure(configs, saslMechanism, jaasConfigEntries); - } catch (Exception e) { - Utils.maybeCloseQuietly(o, "Instance of class " + o.getClass().getName() + " failed call to configure()"); - LOG.warn( - "The class {} defined in the {} configuration encountered an error on configure(): {}", - o.getClass().getName(), - configName, - e.getMessage(), - e - ); - throw new ConfigException( - String.format( - "The class %s defined in the %s configuration encountered an error on configure(): %s", - o.getClass().getName(), - configName, - e.getMessage() - ) - ); - } - } - - return expectedClass.cast(o); - } - // visible for testing // make sure the url is in the "org.apache.kafka.sasl.oauthbearer.allowed.urls" system property - void throwIfURLIsNotAllowed(String configName, String configValue) { - throwIfResourceIsNotAllowed( - "URL", - configName, - configValue, - ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, - ALLOWED_SASL_OAUTHBEARER_URLS_DEFAULT - ); - } - - // visible for testing - // make sure the file is in the "org.apache.kafka.sasl.oauthbearer.allowed.files" system property - void throwIfFileIsNotAllowed(String configName, String configValue) { - throwIfResourceIsNotAllowed( - "file", - configName, - configValue, - ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, - ALLOWED_SASL_OAUTHBEARER_FILES_DEFAULT - ); - } - - private void throwIfResourceIsNotAllowed(String resourceType, - String configName, - String configValue, - String propertyName, - String propertyDefault) { - String[] allowedArray = System.getProperty(propertyName, propertyDefault).split(","); - Set allowed = Arrays.stream(allowedArray) - .map(String::trim) - .collect(Collectors.toSet()); - - if (!allowed.contains(configValue)) { - String message = String.format( - "The %s cannot be accessed due to restrictions. Update the system property '%s' to allow the %s to be accessed.", - resourceType, - propertyName, - resourceType - ); - throw new ConfigException(configName, configValue, message); + void throwIfURLIsNotAllowed(String value) { + Set allowedUrls = Arrays.stream( + System.getProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, ALLOWED_SASL_OAUTHBEARER_URLS_DEFAULT).split(",")) + .map(String::trim) + .collect(Collectors.toSet()); + if (!allowedUrls.contains(value)) { + throw new ConfigException(value + " is not allowed. Update system property '" + + ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG + "' to allow " + value); } } } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/FileTokenRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/FileTokenRetriever.java new file mode 100644 index 0000000000000..c145cf7596959 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/FileTokenRetriever.java @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.utils.Utils; + +import java.io.IOException; +import java.nio.file.Path; + +/** + * FileTokenRetriever is an {@link AccessTokenRetriever} that will load the contents, + * interpreting them as a JWT access key in the serialized form. + * + * @see AccessTokenRetriever + */ + +public class FileTokenRetriever implements AccessTokenRetriever { + + private final Path accessTokenFile; + + private String accessToken; + + public FileTokenRetriever(Path accessTokenFile) { + this.accessTokenFile = accessTokenFile; + } + + @Override + public void init() throws IOException { + this.accessToken = Utils.readFileAsString(accessTokenFile.toFile().getPath()); + // always non-null; to remove any newline chars or backend will report err + this.accessToken = this.accessToken.trim(); + } + + @Override + public String retrieve() throws IOException { + if (accessToken == null) + throw new IllegalStateException("Access token is null; please call init() first"); + + return accessToken; + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java new file mode 100644 index 0000000000000..fdc5707278a60 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetriever.java @@ -0,0 +1,400 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler; +import org.apache.kafka.common.utils.Utils; + +import com.fasterxml.jackson.databind.JsonNode; +import com.fasterxml.jackson.databind.ObjectMapper; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.net.URL; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.Base64; +import java.util.Collections; +import java.util.HashSet; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import javax.net.ssl.HttpsURLConnection; +import javax.net.ssl.SSLSocketFactory; + +/** + * HttpAccessTokenRetriever is an {@link AccessTokenRetriever} that will + * communicate with an OAuth/OIDC provider directly via HTTP to post client credentials + * ({@link OAuthBearerLoginCallbackHandler#CLIENT_ID_CONFIG}/{@link OAuthBearerLoginCallbackHandler#CLIENT_SECRET_CONFIG}) + * to a publicized token endpoint URL + * ({@link SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL}). + * + * @see AccessTokenRetriever + * @see OAuthBearerLoginCallbackHandler#CLIENT_ID_CONFIG + * @see OAuthBearerLoginCallbackHandler#CLIENT_SECRET_CONFIG + * @see OAuthBearerLoginCallbackHandler#SCOPE_CONFIG + * @see SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL + */ + +public class HttpAccessTokenRetriever implements AccessTokenRetriever { + + private static final Logger log = LoggerFactory.getLogger(HttpAccessTokenRetriever.class); + + private static final Set UNRETRYABLE_HTTP_CODES; + + private static final int MAX_RESPONSE_BODY_LENGTH = 1000; + + public static final String AUTHORIZATION_HEADER = "Authorization"; + + static { + // This does not have to be an exhaustive list. There are other HTTP codes that + // are defined in different RFCs (e.g. https://datatracker.ietf.org/doc/html/rfc6585) + // that we won't worry about yet. The worst case if a status code is missing from + // this set is that the request will be retried. + UNRETRYABLE_HTTP_CODES = new HashSet<>(); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_BAD_REQUEST); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_UNAUTHORIZED); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_PAYMENT_REQUIRED); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_FORBIDDEN); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_NOT_FOUND); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_BAD_METHOD); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_NOT_ACCEPTABLE); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_PROXY_AUTH); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_CONFLICT); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_GONE); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_LENGTH_REQUIRED); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_PRECON_FAILED); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_ENTITY_TOO_LARGE); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_REQ_TOO_LONG); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_UNSUPPORTED_TYPE); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_NOT_IMPLEMENTED); + UNRETRYABLE_HTTP_CODES.add(HttpURLConnection.HTTP_VERSION); + } + + private final String clientId; + + private final String clientSecret; + + private final String scope; + + private final SSLSocketFactory sslSocketFactory; + + private final String tokenEndpointUrl; + + private final long loginRetryBackoffMs; + + private final long loginRetryBackoffMaxMs; + + private final Integer loginConnectTimeoutMs; + + private final Integer loginReadTimeoutMs; + + private final boolean urlencodeHeader; + + public HttpAccessTokenRetriever(String clientId, + String clientSecret, + String scope, + SSLSocketFactory sslSocketFactory, + String tokenEndpointUrl, + long loginRetryBackoffMs, + long loginRetryBackoffMaxMs, + Integer loginConnectTimeoutMs, + Integer loginReadTimeoutMs, + boolean urlencodeHeader) { + this.clientId = Objects.requireNonNull(clientId); + this.clientSecret = Objects.requireNonNull(clientSecret); + this.scope = scope; + this.sslSocketFactory = sslSocketFactory; + this.tokenEndpointUrl = Objects.requireNonNull(tokenEndpointUrl); + this.loginRetryBackoffMs = loginRetryBackoffMs; + this.loginRetryBackoffMaxMs = loginRetryBackoffMaxMs; + this.loginConnectTimeoutMs = loginConnectTimeoutMs; + this.loginReadTimeoutMs = loginReadTimeoutMs; + this.urlencodeHeader = urlencodeHeader; + } + + /** + * Retrieves a JWT access token in its serialized three-part form. The implementation + * is free to determine how it should be retrieved but should not perform validation + * on the result. + * + * Note: This is a blocking function and callers should be aware that the + * implementation communicates over a network. The facility in the + * {@link javax.security.auth.spi.LoginModule} from which this is ultimately called + * does not provide an asynchronous approach. + * + * @return Non-null JWT access token string + * + * @throws IOException Thrown on errors related to IO during retrieval + */ + + @Override + public String retrieve() throws IOException { + String authorizationHeader = formatAuthorizationHeader(clientId, clientSecret, urlencodeHeader); + String requestBody = formatRequestBody(scope); + Retry retry = new Retry<>(loginRetryBackoffMs, loginRetryBackoffMaxMs); + Map headers = Collections.singletonMap(AUTHORIZATION_HEADER, authorizationHeader); + + String responseBody; + + try { + responseBody = retry.execute(() -> { + HttpURLConnection con = null; + + try { + con = (HttpURLConnection) new URL(tokenEndpointUrl).openConnection(); + + if (sslSocketFactory != null && con instanceof HttpsURLConnection) + ((HttpsURLConnection) con).setSSLSocketFactory(sslSocketFactory); + + return post(con, headers, requestBody, loginConnectTimeoutMs, loginReadTimeoutMs); + } catch (IOException e) { + throw new ExecutionException(e); + } finally { + if (con != null) + con.disconnect(); + } + }); + } catch (ExecutionException e) { + if (e.getCause() instanceof IOException) + throw (IOException) e.getCause(); + else + throw new KafkaException(e.getCause()); + } + + return parseAccessToken(responseBody); + } + + public static String post(HttpURLConnection con, + Map headers, + String requestBody, + Integer connectTimeoutMs, + Integer readTimeoutMs) + throws IOException, UnretryableException { + handleInput(con, headers, requestBody, connectTimeoutMs, readTimeoutMs); + return handleOutput(con); + } + + private static void handleInput(HttpURLConnection con, + Map headers, + String requestBody, + Integer connectTimeoutMs, + Integer readTimeoutMs) + throws IOException, UnretryableException { + log.debug("handleInput - starting post for {}", con.getURL()); + con.setRequestMethod("POST"); + con.setRequestProperty("Accept", "application/json"); + + if (headers != null) { + for (Map.Entry header : headers.entrySet()) + con.setRequestProperty(header.getKey(), header.getValue()); + } + + con.setRequestProperty("Cache-Control", "no-cache"); + + if (requestBody != null) { + con.setRequestProperty("Content-Length", String.valueOf(requestBody.length())); + con.setDoOutput(true); + } + + con.setUseCaches(false); + + if (connectTimeoutMs != null) + con.setConnectTimeout(connectTimeoutMs); + + if (readTimeoutMs != null) + con.setReadTimeout(readTimeoutMs); + + log.debug("handleInput - preparing to connect to {}", con.getURL()); + con.connect(); + + if (requestBody != null) { + try (OutputStream os = con.getOutputStream()) { + ByteArrayInputStream is = new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8)); + log.debug("handleInput - preparing to write request body to {}", con.getURL()); + copy(is, os); + } + } + } + + static String handleOutput(final HttpURLConnection con) throws IOException { + int responseCode = con.getResponseCode(); + log.debug("handleOutput - responseCode: {}", responseCode); + + // NOTE: the contents of the response should not be logged so that we don't leak any + // sensitive data. + String responseBody = null; + + // NOTE: It is OK to log the error response body and/or its formatted version as + // per the OAuth spec, it doesn't include sensitive information. + // See https://www.ietf.org/rfc/rfc6749.txt, section 5.2 + String errorResponseBody = null; + + try (InputStream is = con.getInputStream()) { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + log.debug("handleOutput - preparing to read response body from {}", con.getURL()); + copy(is, os); + responseBody = os.toString(StandardCharsets.UTF_8); + } catch (Exception e) { + // there still can be useful error response from the servers, lets get it + try (InputStream is = con.getErrorStream()) { + ByteArrayOutputStream os = new ByteArrayOutputStream(); + log.debug("handleOutput - preparing to read error response body from {}", con.getURL()); + copy(is, os); + errorResponseBody = os.toString(StandardCharsets.UTF_8); + } catch (Exception e2) { + log.warn("handleOutput - error retrieving error information", e2); + } + log.warn("handleOutput - error retrieving data", e); + } + + if (responseCode == HttpURLConnection.HTTP_OK || responseCode == HttpURLConnection.HTTP_CREATED) { + log.debug("handleOutput - responseCode: {}, error response: {}", responseCode, + errorResponseBody); + + if (responseBody == null || responseBody.isEmpty()) + throw new IOException(String.format("The token endpoint response was unexpectedly empty despite response code %d from %s and error message %s", + responseCode, con.getURL(), formatErrorMessage(errorResponseBody))); + + return responseBody; + } else { + log.warn("handleOutput - error response code: {}, error response body: {}", responseCode, + formatErrorMessage(errorResponseBody)); + + if (UNRETRYABLE_HTTP_CODES.contains(responseCode)) { + // We know that this is a non-transient error, so let's not keep retrying the + // request unnecessarily. + throw new UnretryableException(new IOException(String.format("The response code %s and error response %s was encountered reading the token endpoint response; will not attempt further retries", + responseCode, formatErrorMessage(errorResponseBody)))); + } else { + // We don't know if this is a transient (retryable) error or not, so let's assume + // it is. + throw new IOException(String.format("The unexpected response code %s and error message %s was encountered reading the token endpoint response", + responseCode, formatErrorMessage(errorResponseBody))); + } + } + } + + static void copy(InputStream is, OutputStream os) throws IOException { + byte[] buf = new byte[4096]; + int b; + + while ((b = is.read(buf)) != -1) + os.write(buf, 0, b); + } + + static String formatErrorMessage(String errorResponseBody) { + // See https://www.ietf.org/rfc/rfc6749.txt, section 5.2 for the format + // of this error message. + if (errorResponseBody == null || errorResponseBody.trim().isEmpty()) { + return "{}"; + } + ObjectMapper mapper = new ObjectMapper(); + try { + JsonNode rootNode = mapper.readTree(errorResponseBody); + if (!rootNode.at("/error").isMissingNode()) { + return String.format("{%s - %s}", rootNode.at("/error"), rootNode.at("/error_description")); + } else if (!rootNode.at("/errorCode").isMissingNode()) { + return String.format("{%s - %s}", rootNode.at("/errorCode"), rootNode.at("/errorSummary")); + } else { + return errorResponseBody; + } + } catch (Exception e) { + log.warn("Error parsing error response", e); + } + return String.format("{%s}", errorResponseBody); + } + + static String parseAccessToken(String responseBody) throws IOException { + ObjectMapper mapper = new ObjectMapper(); + JsonNode rootNode = mapper.readTree(responseBody); + JsonNode accessTokenNode = rootNode.at("/access_token"); + + if (accessTokenNode == null) { + // Only grab the first N characters so that if the response body is huge, we don't + // blow up. + String snippet = responseBody; + + if (snippet.length() > MAX_RESPONSE_BODY_LENGTH) { + int actualLength = responseBody.length(); + String s = responseBody.substring(0, MAX_RESPONSE_BODY_LENGTH); + snippet = String.format("%s (trimmed to first %d characters out of %d total)", s, MAX_RESPONSE_BODY_LENGTH, actualLength); + } + + throw new IOException(String.format("The token endpoint response did not contain an access_token value. Response: (%s)", snippet)); + } + + return sanitizeString("the token endpoint response's access_token JSON attribute", accessTokenNode.textValue()); + } + + static String formatAuthorizationHeader(String clientId, String clientSecret, boolean urlencode) { + clientId = sanitizeString("the token endpoint request client ID parameter", clientId); + clientSecret = sanitizeString("the token endpoint request client secret parameter", clientSecret); + + // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 + if (urlencode) { + clientId = URLEncoder.encode(clientId, StandardCharsets.UTF_8); + clientSecret = URLEncoder.encode(clientSecret, StandardCharsets.UTF_8); + } + + String s = String.format("%s:%s", clientId, clientSecret); + // Per RFC-7617, we need to use the *non-URL safe* base64 encoder. See KAFKA-14496. + String encoded = Base64.getEncoder().encodeToString(Utils.utf8(s)); + return String.format("Basic %s", encoded); + } + + static String formatRequestBody(String scope) { + StringBuilder requestParameters = new StringBuilder(); + requestParameters.append("grant_type=client_credentials"); + + if (scope != null && !scope.trim().isEmpty()) { + scope = scope.trim(); + String encodedScope = URLEncoder.encode(scope, StandardCharsets.UTF_8); + requestParameters.append("&scope=").append(encodedScope); + } + + return requestParameters.toString(); + } + + private static String sanitizeString(String name, String value) { + if (value == null) + throw new IllegalArgumentException(String.format("The value for %s must be non-null", name)); + + if (value.isEmpty()) + throw new IllegalArgumentException(String.format("The value for %s must be non-empty", name)); + + value = value.trim(); + + if (value.isEmpty()) + throw new IllegalArgumentException(String.format("The value for %s must not contain only whitespace", name)); + + return value; + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/Initable.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/Initable.java new file mode 100644 index 0000000000000..0a38f2b5094d5 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/Initable.java @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import java.io.IOException; + +public interface Initable { + + /** + * Lifecycle method to perform any one-time initialization of the retriever. This must + * be performed by the caller to ensure the correct state before methods are invoked. + * + * @throws IOException Thrown on errors related to IO during initialization + */ + + default void init() throws IOException { + // This method left intentionally blank. + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JaasOptionsUtils.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JaasOptionsUtils.java index ec6d3daafe8e2..3e49595dbc1b2 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JaasOptionsUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/JaasOptionsUtils.java @@ -20,12 +20,10 @@ import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.config.types.Password; import org.apache.kafka.common.network.ConnectionMode; import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; import org.apache.kafka.common.security.ssl.DefaultSslEngineFactory; import org.apache.kafka.common.security.ssl.SslFactory; -import org.apache.kafka.common.utils.Utils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -54,10 +52,6 @@ public JaasOptionsUtils(Map options) { this.options = options; } - public JaasOptionsUtils(String saslMechanism, List jaasConfigEntries) { - this.options = getOptions(saslMechanism, jaasConfigEntries); - } - public static Map getOptions(String saslMechanism, List jaasConfigEntries) { if (!OAuthBearerLoginModule.OAUTHBEARER_MECHANISM.equals(saslMechanism)) throw new IllegalArgumentException(String.format("Unexpected SASL mechanism: %s", saslMechanism)); @@ -68,10 +62,6 @@ public static Map getOptions(String saslMechanism, ListJwksFileVerificationKeyResolver is a {@link VerificationKeyResolver} implementation @@ -83,46 +79,41 @@ * @see org.apache.kafka.common.config.SaslConfigs#SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL * @see VerificationKeyResolver */ + public class JwksFileVerificationKeyResolver implements CloseableVerificationKeyResolver { private static final Logger log = LoggerFactory.getLogger(JwksFileVerificationKeyResolver.class); - private CachedFile delegate; + private final Path jwksFile; - @Override - public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { - ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); - File file = cu.validateFileUrl(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL); - delegate = new CachedFile<>(file, new VerificationKeyResolverTransformer(), lastModifiedPolicy()); + private VerificationKeyResolver delegate; + + public JwksFileVerificationKeyResolver(Path jwksFile) { + this.jwksFile = jwksFile; } @Override - public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { - if (delegate == null) - throw new UnresolvableKeyException("VerificationKeyResolver delegate is null; please call configure() first"); - - return delegate.transformed().resolveKey(jws, nestingContext); - } + public void init() throws IOException { + log.debug("Starting creation of new VerificationKeyResolver from {}", jwksFile); + String json = Utils.readFileAsString(jwksFile.toFile().getPath()); - /** - * "Transforms" the raw file contents into a {@link VerificationKeyResolver} that can be used to resolve - * the keys provided in the JWT. - */ - private static class VerificationKeyResolverTransformer implements CachedFile.Transformer { + JsonWebKeySet jwks; - @Override - public VerificationKeyResolver transform(File file, String contents) { - log.debug("Starting creation of new VerificationKeyResolver from {}", file.getPath()); + try { + jwks = new JsonWebKeySet(json); + } catch (JoseException e) { + throw new IOException(e); + } - JsonWebKeySet jwks; + delegate = new JwksVerificationKeyResolver(jwks.getJsonWebKeys()); + } - try { - jwks = new JsonWebKeySet(contents); - } catch (Exception e) { - throw new ConfigException(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, file.getPath(), e.getMessage()); - } + @Override + public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { + if (delegate == null) + throw new UnresolvableKeyException("VerificationKeyResolver delegate is null; please call init() first"); - return new JwksVerificationKeyResolver(jwks.getJsonWebKeys()); - } + return delegate.resolveKey(jws, nestingContext); } + } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidator.java new file mode 100644 index 0000000000000..773311ff0ab18 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidator.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; +import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerIllegalTokenException; +import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerUnsecuredJws; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.Set; + +import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME; +import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME; + +/** + * LoginAccessTokenValidator is an implementation of {@link AccessTokenValidator} that is used + * by the client to perform some rudimentary validation of the JWT access token that is received + * as part of the response from posting the client credentials to the OAuth/OIDC provider's + * token endpoint. + * + * The validation steps performed are: + * + *
          + *
        1. + * Basic structural validation of the b64token value as defined in + * RFC 6750 Section 2.1 + *
        2. + *
        3. Basic conversion of the token into an in-memory map
        4. + *
        5. Presence of scope, exp, subject, and iat claims
        6. + *
        + */ + +public class LoginAccessTokenValidator implements AccessTokenValidator { + + private static final Logger log = LoggerFactory.getLogger(LoginAccessTokenValidator.class); + + public static final String EXPIRATION_CLAIM_NAME = "exp"; + + public static final String ISSUED_AT_CLAIM_NAME = "iat"; + + private final String scopeClaimName; + + private final String subClaimName; + + /** + * Creates a new LoginAccessTokenValidator that will be used by the client for lightweight + * validation of the JWT. + * + * @param scopeClaimName Name of the scope claim to use; must be non-null + * @param subClaimName Name of the subject claim to use; must be non-null + */ + + public LoginAccessTokenValidator(String scopeClaimName, String subClaimName) { + this.scopeClaimName = ClaimValidationUtils.validateClaimNameOverride(DEFAULT_SASL_OAUTHBEARER_SCOPE_CLAIM_NAME, scopeClaimName); + this.subClaimName = ClaimValidationUtils.validateClaimNameOverride(DEFAULT_SASL_OAUTHBEARER_SUB_CLAIM_NAME, subClaimName); + } + + /** + * Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + * OAuthBearerToken. + * + * @param accessToken Non-null JWT access token + * @return {@link OAuthBearerToken} + * @throws ValidateException Thrown on errors performing validation of given token + */ + + @SuppressWarnings("unchecked") + public OAuthBearerToken validate(String accessToken) throws ValidateException { + SerializedJwt serializedJwt = new SerializedJwt(accessToken); + Map payload; + + try { + payload = OAuthBearerUnsecuredJws.toMap(serializedJwt.getPayload()); + } catch (OAuthBearerIllegalTokenException e) { + throw new ValidateException(String.format("Could not validate the access token: %s", e.getMessage()), e); + } + + Object scopeRaw = getClaim(payload, scopeClaimName); + Collection scopeRawCollection; + + if (scopeRaw instanceof String) + scopeRawCollection = Collections.singletonList((String) scopeRaw); + else if (scopeRaw instanceof Collection) + scopeRawCollection = (Collection) scopeRaw; + else + scopeRawCollection = Collections.emptySet(); + + Number expirationRaw = (Number) getClaim(payload, EXPIRATION_CLAIM_NAME); + String subRaw = (String) getClaim(payload, subClaimName); + Number issuedAtRaw = (Number) getClaim(payload, ISSUED_AT_CLAIM_NAME); + + Set scopes = ClaimValidationUtils.validateScopes(scopeClaimName, scopeRawCollection); + long expiration = ClaimValidationUtils.validateExpiration(EXPIRATION_CLAIM_NAME, + expirationRaw != null ? expirationRaw.longValue() * 1000L : null); + String subject = ClaimValidationUtils.validateSubject(subClaimName, subRaw); + Long issuedAt = ClaimValidationUtils.validateIssuedAt(ISSUED_AT_CLAIM_NAME, + issuedAtRaw != null ? issuedAtRaw.longValue() * 1000L : null); + + return new BasicOAuthBearerToken(accessToken, + scopes, + expiration, + subject, + issuedAt); + } + + private Object getClaim(Map payload, String claimName) { + Object value = payload.get(claimName); + log.debug("getClaim - {}: {}", claimName, value); + return value; + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java index d8014010a7d7a..62261fed58df8 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwks.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; -import org.apache.kafka.common.security.oauthbearer.BrokerJwtValidator; import org.apache.kafka.common.utils.Time; import org.jose4j.jwk.HttpsJwks; @@ -26,6 +25,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.Closeable; import java.io.IOException; import java.util.Collections; import java.util.LinkedHashMap; @@ -49,14 +49,15 @@ * This instance is created and provided to the * {@link org.jose4j.keys.resolvers.HttpsJwksVerificationKeyResolver} that is used when using * an HTTP-/HTTPS-based {@link org.jose4j.keys.resolvers.VerificationKeyResolver}, which is then - * provided to the {@link BrokerJwtValidator} to use in validating the signature of + * provided to the {@link ValidatorAccessTokenValidator} to use in validating the signature of * a JWT. * * @see org.jose4j.keys.resolvers.HttpsJwksVerificationKeyResolver * @see org.jose4j.keys.resolvers.VerificationKeyResolver - * @see BrokerJwtValidator + * @see ValidatorAccessTokenValidator */ -public final class RefreshingHttpsJwks implements OAuthBearerConfigurable { + +public final class RefreshingHttpsJwks implements Initable, Closeable { private static final Logger log = LoggerFactory.getLogger(RefreshingHttpsJwks.class); @@ -170,6 +171,7 @@ public RefreshingHttpsJwks(Time time, this(time, httpsJwks, refreshMs, refreshRetryBackoffMs, refreshRetryBackoffMaxMs, Executors.newSingleThreadScheduledExecutor()); } + @Override public void init() throws IOException { try { log.debug("init started"); @@ -373,4 +375,5 @@ public boolean maybeExpediteRefresh(String keyId) { } } } + } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java index d6f6a01089419..52d0c6c39785f 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/RefreshingHttpsJwksVerificationKeyResolver.java @@ -17,8 +17,6 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; -import org.apache.kafka.common.KafkaException; - import org.jose4j.jwk.HttpsJwks; import org.jose4j.jwk.JsonWebKey; import org.jose4j.jwk.VerificationJwkSelector; @@ -33,9 +31,6 @@ import java.io.IOException; import java.security.Key; import java.util.List; -import java.util.Map; - -import javax.security.auth.login.AppConfigurationEntry; /** * RefreshingHttpsJwksVerificationKeyResolver is a @@ -85,6 +80,7 @@ * @see RefreshingHttpsJwks * @see HttpsJwks */ + public class RefreshingHttpsJwksVerificationKeyResolver implements CloseableVerificationKeyResolver { private static final Logger log = LoggerFactory.getLogger(RefreshingHttpsJwksVerificationKeyResolver.class); @@ -101,14 +97,15 @@ public RefreshingHttpsJwksVerificationKeyResolver(RefreshingHttpsJwks refreshing } @Override - public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { + public void init() throws IOException { try { - log.debug("configure started"); + log.debug("init started"); + refreshingHttpsJwks.init(); - } catch (IOException e) { - throw new KafkaException(e); } finally { isInitialized = true; + + log.debug("init completed"); } } @@ -126,7 +123,7 @@ public void close() { @Override public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { if (!isInitialized) - throw new IllegalStateException("Please call configure() first"); + throw new IllegalStateException("Please call init() first"); try { List jwks = refreshingHttpsJwks.getJsonWebKeys(); @@ -151,4 +148,5 @@ public Key resolveKey(JsonWebSignature jws, List nestingContex throw new UnresolvableKeyException(sb, e); } } + } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java index b9a500410964b..f45865fa63848 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/SerializedJwt.java @@ -17,8 +17,6 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; -import org.apache.kafka.common.security.oauthbearer.JwtValidatorException; - /** * SerializedJwt provides a modicum of structure and validation around a JWT's serialized form by * splitting and making the three sections (header, payload, and signature) available to the user. @@ -41,12 +39,12 @@ public SerializedJwt(String token) { token = token.trim(); if (token.isEmpty()) - throw new JwtValidatorException("Malformed JWT provided; expected three sections (header, payload, and signature)"); + throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); String[] splits = token.split("\\."); if (splits.length != 3) - throw new JwtValidatorException("Malformed JWT provided; expected three sections (header, payload, and signature)"); + throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); this.token = token.trim(); this.header = validateSection(splits[0]); @@ -94,11 +92,11 @@ public String getSignature() { return signature; } - private String validateSection(String section) throws JwtValidatorException { + private String validateSection(String section) throws ValidateException { section = section.trim(); if (section.isEmpty()) - throw new JwtValidatorException("Malformed JWT provided; expected three sections (header, payload, and signature)"); + throw new ValidateException("Malformed JWT provided; expected three sections (header, payload, and signature)"); return section; } diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidateException.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidateException.java new file mode 100644 index 0000000000000..430b9007830cb --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidateException.java @@ -0,0 +1,47 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.KafkaException; + +import javax.security.auth.callback.Callback; + +/** + * ValidateException is thrown in cases where a JWT access token cannot be determined to be + * valid for one reason or another. It is intended to be used when errors arise within the + * processing of a {@link javax.security.auth.callback.CallbackHandler#handle(Callback[])}. + * This error, however, is not thrown from that method directly. + * + * @see AccessTokenValidator#validate(String) + */ + +public class ValidateException extends KafkaException { + + public ValidateException(String message) { + super(message); + } + + public ValidateException(Throwable cause) { + super(cause); + } + + public ValidateException(String message, Throwable cause) { + super(message, cause); + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java new file mode 100644 index 0000000000000..c7ae8edae9d93 --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidator.java @@ -0,0 +1,209 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; + +import org.jose4j.jwt.JwtClaims; +import org.jose4j.jwt.MalformedClaimException; +import org.jose4j.jwt.NumericDate; +import org.jose4j.jwt.ReservedClaimNames; +import org.jose4j.jwt.consumer.InvalidJwtException; +import org.jose4j.jwt.consumer.JwtConsumer; +import org.jose4j.jwt.consumer.JwtConsumerBuilder; +import org.jose4j.jwt.consumer.JwtContext; +import org.jose4j.keys.resolvers.VerificationKeyResolver; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collection; +import java.util.Collections; +import java.util.Set; + +import static org.jose4j.jwa.AlgorithmConstraints.DISALLOW_NONE; + +/** + * ValidatorAccessTokenValidator is an implementation of {@link AccessTokenValidator} that is used + * by the broker to perform more extensive validation of the JWT access token that is received + * from the client, but ultimately from posting the client credentials to the OAuth/OIDC provider's + * token endpoint. + * + * The validation steps performed (primary by the jose4j library) are: + * + *
          + *
        1. + * Basic structural validation of the b64token value as defined in + * RFC 6750 Section 2.1 + *
        2. + *
        3. Basic conversion of the token into an in-memory data structure
        4. + *
        5. + * Presence of scope, exp, subject, iss, and + * iat claims + *
        6. + *
        7. + * Signature matching validation against the kid and those provided by + * the OAuth/OIDC provider's JWKS + *
        8. + *
        + */ + +public class ValidatorAccessTokenValidator implements AccessTokenValidator { + + private static final Logger log = LoggerFactory.getLogger(ValidatorAccessTokenValidator.class); + + private final JwtConsumer jwtConsumer; + + private final String scopeClaimName; + + private final String subClaimName; + + /** + * Creates a new ValidatorAccessTokenValidator that will be used by the broker for more + * thorough validation of the JWT. + * + * @param clockSkew The optional value (in seconds) to allow for differences + * between the time of the OAuth/OIDC identity provider and + * the broker. If null is provided, the broker + * and the OAUth/OIDC identity provider are assumed to have + * very close clock settings. + * @param expectedAudiences The (optional) set the broker will use to verify that + * the JWT was issued for one of the expected audiences. + * The JWT will be inspected for the standard OAuth + * aud claim and if this value is set, the + * broker will match the value from JWT's aud + * claim to see if there is an exact match. If there is no + * match, the broker will reject the JWT and authentication + * will fail. May be null to not perform any + * check to verify the JWT's aud claim matches any + * fixed set of known/expected audiences. + * @param expectedIssuer The (optional) value for the broker to use to verify that + * the JWT was created by the expected issuer. The JWT will + * be inspected for the standard OAuth iss claim + * and if this value is set, the broker will match it + * exactly against what is in the JWT's iss + * claim. If there is no match, the broker will reject the JWT + * and authentication will fail. May be null to not + * perform any check to verify the JWT's iss claim + * matches a specific issuer. + * @param verificationKeyResolver jose4j-based {@link VerificationKeyResolver} that is used + * to validate the signature matches the contents of the header + * and payload + * @param scopeClaimName Name of the scope claim to use; must be non-null + * @param subClaimName Name of the subject claim to use; must be + * non-null + * + * @see JwtConsumerBuilder + * @see JwtConsumer + * @see VerificationKeyResolver + */ + + public ValidatorAccessTokenValidator(Integer clockSkew, + Set expectedAudiences, + String expectedIssuer, + VerificationKeyResolver verificationKeyResolver, + String scopeClaimName, + String subClaimName) { + final JwtConsumerBuilder jwtConsumerBuilder = new JwtConsumerBuilder(); + + if (clockSkew != null) + jwtConsumerBuilder.setAllowedClockSkewInSeconds(clockSkew); + + if (expectedAudiences != null && !expectedAudiences.isEmpty()) + jwtConsumerBuilder.setExpectedAudience(expectedAudiences.toArray(new String[0])); + + if (expectedIssuer != null) + jwtConsumerBuilder.setExpectedIssuer(expectedIssuer); + + this.jwtConsumer = jwtConsumerBuilder + .setJwsAlgorithmConstraints(DISALLOW_NONE) + .setRequireExpirationTime() + .setRequireIssuedAt() + .setVerificationKeyResolver(verificationKeyResolver) + .build(); + this.scopeClaimName = scopeClaimName; + this.subClaimName = subClaimName; + } + + /** + * Accepts an OAuth JWT access token in base-64 encoded format, validates, and returns an + * OAuthBearerToken. + * + * @param accessToken Non-null JWT access token + * @return {@link OAuthBearerToken} + * @throws ValidateException Thrown on errors performing validation of given token + */ + + @SuppressWarnings("unchecked") + public OAuthBearerToken validate(String accessToken) throws ValidateException { + SerializedJwt serializedJwt = new SerializedJwt(accessToken); + + JwtContext jwt; + + try { + jwt = jwtConsumer.process(serializedJwt.getToken()); + } catch (InvalidJwtException e) { + throw new ValidateException(String.format("Could not validate the access token: %s", e.getMessage()), e); + } + + JwtClaims claims = jwt.getJwtClaims(); + + Object scopeRaw = getClaim(() -> claims.getClaimValue(scopeClaimName), scopeClaimName); + Collection scopeRawCollection; + + if (scopeRaw instanceof String) + scopeRawCollection = Collections.singletonList((String) scopeRaw); + else if (scopeRaw instanceof Collection) + scopeRawCollection = (Collection) scopeRaw; + else + scopeRawCollection = Collections.emptySet(); + + NumericDate expirationRaw = getClaim(claims::getExpirationTime, ReservedClaimNames.EXPIRATION_TIME); + String subRaw = getClaim(() -> claims.getStringClaimValue(subClaimName), subClaimName); + NumericDate issuedAtRaw = getClaim(claims::getIssuedAt, ReservedClaimNames.ISSUED_AT); + + Set scopes = ClaimValidationUtils.validateScopes(scopeClaimName, scopeRawCollection); + long expiration = ClaimValidationUtils.validateExpiration(ReservedClaimNames.EXPIRATION_TIME, + expirationRaw != null ? expirationRaw.getValueInMillis() : null); + String sub = ClaimValidationUtils.validateSubject(subClaimName, subRaw); + Long issuedAt = ClaimValidationUtils.validateIssuedAt(ReservedClaimNames.ISSUED_AT, + issuedAtRaw != null ? issuedAtRaw.getValueInMillis() : null); + + return new BasicOAuthBearerToken(accessToken, + scopes, + expiration, + sub, + issuedAt); + } + + private T getClaim(ClaimSupplier supplier, String claimName) throws ValidateException { + try { + T value = supplier.get(); + log.debug("getClaim - {}: {}", claimName, value); + return value; + } catch (MalformedClaimException e) { + throw new ValidateException(String.format("Could not extract the '%s' claim from the access token", claimName), e); + } + } + + public interface ClaimSupplier { + + T get() throws MalformedClaimException; + + } + +} diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactory.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactory.java index 85ad53246beda..0422045fc029d 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactory.java @@ -17,71 +17,55 @@ package org.apache.kafka.common.security.oauthbearer.internals.secured; -import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; import org.apache.kafka.common.utils.Time; import org.jose4j.http.Get; import org.jose4j.jwk.HttpsJwks; -import org.jose4j.jws.JsonWebSignature; -import org.jose4j.jwx.JsonWebStructure; -import org.jose4j.lang.UnresolvableKeyException; -import java.io.IOException; import java.net.URL; -import java.security.Key; -import java.util.HashMap; -import java.util.List; +import java.nio.file.Path; import java.util.Locale; import java.util.Map; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; import javax.net.ssl.SSLSocketFactory; -import javax.security.auth.login.AppConfigurationEntry; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL; -/** - * Because a {@link CloseableVerificationKeyResolver} instance can spawn threads and issue - * HTTP(S) calls ({@link RefreshingHttpsJwksVerificationKeyResolver}), we only want to create - * a new instance for each particular set of configuration. Because each set of configuration - * may have multiple instances, we want to reuse the single instance. - */ public class VerificationKeyResolverFactory { - private static final Map CACHE = new HashMap<>(); - - public static synchronized CloseableVerificationKeyResolver get(Map configs, - String saslMechanism, - List jaasConfigEntries) { - VerificationKeyResolverKey key = new VerificationKeyResolverKey(configs, saslMechanism, jaasConfigEntries); - - return CACHE.computeIfAbsent(key, k -> - new RefCountingVerificationKeyResolver( - create( - configs, - saslMechanism, - jaasConfigEntries - ) - ) - ); + /** + * Create an {@link AccessTokenRetriever} from the given + * {@link org.apache.kafka.common.config.SaslConfigs}. + * + * Note: the returned CloseableVerificationKeyResolver is not + * initialized here and must be done by the caller. + * + * Primarily exposed here for unit testing. + * + * @param configs SASL configuration + * + * @return Non-null {@link CloseableVerificationKeyResolver} + */ + public static CloseableVerificationKeyResolver create(Map configs, + Map jaasConfig) { + return create(configs, null, jaasConfig); } - static CloseableVerificationKeyResolver create(Map configs, - String saslMechanism, - List jaasConfigEntries) { + public static CloseableVerificationKeyResolver create(Map configs, + String saslMechanism, + Map jaasConfig) { ConfigurationUtils cu = new ConfigurationUtils(configs, saslMechanism); URL jwksEndpointUrl = cu.validateUrl(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL); - CloseableVerificationKeyResolver resolver; if (jwksEndpointUrl.getProtocol().toLowerCase(Locale.ROOT).equals("file")) { - resolver = new JwksFileVerificationKeyResolver(); + Path p = cu.validateFile(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL); + return new JwksFileVerificationKeyResolver(p); } else { long refreshIntervalMs = cu.validateLong(SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS, true, 0L); - JaasOptionsUtils jou = new JaasOptionsUtils(saslMechanism, jaasConfigEntries); + JaasOptionsUtils jou = new JaasOptionsUtils(jaasConfig); SSLSocketFactory sslSocketFactory = null; if (jou.shouldCreateSSLSocketFactory(jwksEndpointUrl)) @@ -101,87 +85,8 @@ static CloseableVerificationKeyResolver create(Map configs, refreshIntervalMs, cu.validateLong(SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS), cu.validateLong(SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS)); - resolver = new RefreshingHttpsJwksVerificationKeyResolver(refreshingHttpsJwks); - } - - resolver.configure(configs, saslMechanism, jaasConfigEntries); - return resolver; - } - - /** - * VkrKey is a simple structure which encapsulates the criteria for different - * sets of configuration. This will allow us to use this object as a key in a {@link Map} - * to keep a single instance per key. - */ - - private static class VerificationKeyResolverKey { - - private final Map configs; - - private final String saslMechanism; - - private final Map moduleOptions; - - public VerificationKeyResolverKey(Map configs, - String saslMechanism, - List jaasConfigEntries) { - this.configs = configs; - this.saslMechanism = saslMechanism; - this.moduleOptions = JaasOptionsUtils.getOptions(saslMechanism, jaasConfigEntries); - } - - @Override - public boolean equals(Object o) { - if (this == o) { - return true; - } - - if (o == null || getClass() != o.getClass()) { - return false; - } - - VerificationKeyResolverKey that = (VerificationKeyResolverKey) o; - return configs.equals(that.configs) && saslMechanism.equals(that.saslMechanism) && moduleOptions.equals(that.moduleOptions); - } - - @Override - public int hashCode() { - return Objects.hash(configs, saslMechanism, moduleOptions); + return new RefreshingHttpsJwksVerificationKeyResolver(refreshingHttpsJwks); } } - /** - * RefCountingVerificationKeyResolver allows us to share a single - * {@link CloseableVerificationKeyResolver} instance between multiple - * {@link AuthenticateCallbackHandler} instances and perform the lifecycle methods the - * appropriate number of times. - */ - - private static class RefCountingVerificationKeyResolver implements CloseableVerificationKeyResolver { - - private final CloseableVerificationKeyResolver delegate; - - private final AtomicInteger count = new AtomicInteger(0); - - public RefCountingVerificationKeyResolver(CloseableVerificationKeyResolver delegate) { - this.delegate = delegate; - } - - @Override - public Key resolveKey(JsonWebSignature jws, List nestingContext) throws UnresolvableKeyException { - return delegate.resolveKey(jws, nestingContext); - } - - @Override - public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { - if (count.incrementAndGet() == 1) - delegate.configure(configs, saslMechanism, jaasConfigEntries); - } - - @Override - public void close() throws IOException { - if (count.decrementAndGet() == 0) - delegate.close(); - } - } } \ No newline at end of file diff --git a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java index bea463a8d145a..6b1148e291b4c 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java +++ b/clients/src/main/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredJws.java @@ -30,6 +30,7 @@ import java.util.Collections; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Map.Entry; @@ -294,11 +295,12 @@ public String subject() throws OAuthBearerIllegalTokenException { public static Map toMap(String split) throws OAuthBearerIllegalTokenException { Map retval = new HashMap<>(); try { - byte[] decode = Base64.getUrlDecoder().decode(split); + byte[] decode = Base64.getDecoder().decode(split); JsonNode jsonNode = new ObjectMapper().readTree(decode); if (jsonNode == null) throw new OAuthBearerIllegalTokenException(OAuthBearerValidationResult.newFailure("malformed JSON")); - for (Entry entry : jsonNode.properties()) { + for (Iterator> iterator = jsonNode.fields(); iterator.hasNext();) { + Entry entry = iterator.next(); retval.put(entry.getKey(), convert(entry.getValue())); } return Collections.unmodifiableMap(retval); diff --git a/clients/src/main/java/org/apache/kafka/common/security/plain/internals/PlainSaslServer.java b/clients/src/main/java/org/apache/kafka/common/security/plain/internals/PlainSaslServer.java index 999862160f57f..6dcb6d62b1621 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/plain/internals/PlainSaslServer.java +++ b/clients/src/main/java/org/apache/kafka/common/security/plain/internals/PlainSaslServer.java @@ -21,6 +21,7 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.List; import java.util.Map; @@ -161,14 +162,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!complete) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("PLAIN supports neither integrity nor privacy"); + return Arrays.copyOfRange(incoming, offset, offset + len); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!complete) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("PLAIN supports neither integrity nor privacy"); + return Arrays.copyOfRange(outgoing, offset, offset + len); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslClient.java b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslClient.java index 9afcd6c07e37b..852875b9e5fe7 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslClient.java +++ b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslClient.java @@ -162,14 +162,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("SCRAM supports neither integrity nor privacy"); + return Arrays.copyOfRange(incoming, offset, offset + len); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("SCRAM supports neither integrity nor privacy"); + return Arrays.copyOfRange(outgoing, offset, offset + len); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java index e8576e03798de..2be4c4e24b6f4 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java +++ b/clients/src/main/java/org/apache/kafka/common/security/scram/internals/ScramSaslServer.java @@ -35,6 +35,7 @@ import java.security.InvalidKeyException; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; +import java.util.Arrays; import java.util.Collection; import java.util.Map; import java.util.Set; @@ -204,14 +205,14 @@ public boolean isComplete() { public byte[] unwrap(byte[] incoming, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("SCRAM supports neither integrity nor privacy"); + return Arrays.copyOfRange(incoming, offset, offset + len); } @Override public byte[] wrap(byte[] outgoing, int offset, int len) { if (!isComplete()) throw new IllegalStateException("Authentication exchange has not completed"); - throw new IllegalStateException("SCRAM supports neither integrity nor privacy"); + return Arrays.copyOfRange(outgoing, offset, offset + len); } @Override diff --git a/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java b/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java index a5068dc83abce..0a3d587df9060 100644 --- a/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java +++ b/clients/src/main/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactory.java @@ -137,14 +137,14 @@ public void configure(Map configs) { SecurityUtils.addConfiguredSecurityProviders(this.configs); List cipherSuitesList = (List) configs.get(SslConfigs.SSL_CIPHER_SUITES_CONFIG); - if (!cipherSuitesList.isEmpty()) { + if (cipherSuitesList != null && !cipherSuitesList.isEmpty()) { this.cipherSuites = cipherSuitesList.toArray(new String[0]); } else { this.cipherSuites = null; } List enabledProtocolsList = (List) configs.get(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG); - if (!enabledProtocolsList.isEmpty()) { + if (enabledProtocolsList != null && !enabledProtocolsList.isEmpty()) { this.enabledProtocols = enabledProtocolsList.toArray(new String[0]); } else { this.enabledProtocols = null; diff --git a/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java b/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java index d48574b54fabf..ad8d2bfe4d4f6 100644 --- a/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java +++ b/clients/src/main/java/org/apache/kafka/common/serialization/Deserializer.java @@ -25,13 +25,10 @@ /** * An interface for converting bytes to objects. - * A class that implements this interface is expected to have a constructor with no parameters. * - *

        This interface can be combined with {@link org.apache.kafka.common.ClusterResourceListener ClusterResourceListener} - * to receive cluster metadata once it's available, as well as {@link org.apache.kafka.common.metrics.Monitorable Monitorable} - * to enable the deserializer to register metrics. For the latter, the following tags are automatically added to - * all metrics registered: {@code config} set to either {@code key.deserializer} or {@code value.deserializer}, - * and {@code class} set to the deserializer class name. + * A class that implements this interface is expected to have a constructor with no parameters. + *

        + * Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. * * @param Type to be deserialized into. */ @@ -39,11 +36,8 @@ public interface Deserializer extends Closeable { /** * Configure this class. - * - * @param configs - * configs in key/value pairs - * @param isKey - * whether the deserializer is used for the key or the value + * @param configs configs in key/value pairs + * @param isKey whether is for key or value */ default void configure(Map configs, boolean isKey) { // intentionally left blank @@ -51,35 +45,18 @@ default void configure(Map configs, boolean isKey) { /** * Deserialize a record value from a byte array into a value or object. - * - *

        It is recommended to deserialize a {@code null} byte array to a {@code null} object. - * - * @param topic - * topic associated with the data - * @param data - * serialized bytes; may be {@code null} - * - * @return deserialized typed data; may be {@code null} + * @param topic topic associated with the data + * @param data serialized bytes; may be null; implementations are recommended to handle null by returning a value or null rather than throwing an exception. + * @return deserialized typed data; may be null */ T deserialize(String topic, byte[] data); /** * Deserialize a record value from a byte array into a value or object. - * - *

        It is recommended to deserialize a {@code null} byte array to a {@code null} object. - * - *

        Note that the passed in {@link Headers} may be empty, but never {@code null}. - * The implementation is allowed to modify the passed in headers, as a side effect of deserialization. - * It is considered best practice to not delete or modify existing headers, but rather only add new ones. - * - * @param topic - * topic associated with the data - * @param headers - * headers associated with the record - * @param data - * serialized bytes; may be {@code null} - * - * @return deserialized typed data; may be {@code null} + * @param topic topic associated with the data + * @param headers headers associated with the record; may be empty. + * @param data serialized bytes; may be null; implementations are recommended to handle null by returning a value or null rather than throwing an exception. + * @return deserialized typed data; may be null */ default T deserialize(String topic, Headers headers, byte[] data) { return deserialize(topic, data); @@ -95,20 +72,10 @@ default T deserialize(String topic, Headers headers, byte[] data) { *

        Similarly, if this method is overridden, the implementation cannot make any assumptions about the * passed in {@link ByteBuffer} either. * - *

        It is recommended to deserialize a {@code null} {@link ByteBuffer} to a {@code null} object. - * - *

        Note that the passed in {@link Headers} may be empty, but never {@code null}. - * The implementation is allowed to modify the passed in headers, as a side effect of deserialization. - * It is considered best practice to not delete or modify existing headers, but rather only add new ones. - * - * @param topic - * topic associated with the data - * @param headers - * headers associated with the record - * @param data - * serialized ByteBuffer; may be {@code null} - * - * @return deserialized typed data; may be {@code null} + * @param topic topic associated with the data + * @param headers headers associated with the record; may be empty. + * @param data serialized ByteBuffer; may be null; implementations are recommended to handle null by returning a value or null rather than throwing an exception. + * @return deserialized typed data; may be null */ default T deserialize(String topic, Headers headers, ByteBuffer data) { return deserialize(topic, headers, Utils.toNullableArray(data)); @@ -116,8 +83,8 @@ default T deserialize(String topic, Headers headers, ByteBuffer data) { /** * Close this deserializer. - * - *

        This method must be idempotent as it may be called multiple times. + *

        + * This method must be idempotent as it may be called multiple times. */ @Override default void close() { diff --git a/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java b/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java index 0730b71bcade1..144b5ab945ebf 100644 --- a/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java +++ b/clients/src/main/java/org/apache/kafka/common/serialization/Serializer.java @@ -23,13 +23,10 @@ /** * An interface for converting objects to bytes. - * A class that implements this interface is expected to have a constructor with no parameter. * - *

        This interface can be combined with {@link org.apache.kafka.common.ClusterResourceListener ClusterResourceListener} - * to receive cluster metadata once it's available, as well as {@link org.apache.kafka.common.metrics.Monitorable Monitorable} - * to enable the serializer to register metrics. For the latter, the following tags are automatically added to all - * metrics registered: {@code config} set to either {@code key.serializer} or {@code value.serializer}, - * and {@code class} set to the serializer class name. + * A class that implements this interface is expected to have a constructor with no parameter. + *

        + * Implement {@link org.apache.kafka.common.ClusterResourceListener} to receive cluster metadata once it's available. Please see the class documentation for ClusterResourceListener for more information. * * @param Type to be serialized from. */ @@ -37,11 +34,8 @@ public interface Serializer extends Closeable { /** * Configure this class. - * - * @param configs - * configs in key/value pairs - * @param isKey - * whether the serializer is used for the key or the value + * @param configs configs in key/value pairs + * @param isKey whether is for key or value */ default void configure(Map configs, boolean isKey) { // intentionally left blank @@ -50,34 +44,19 @@ default void configure(Map configs, boolean isKey) { /** * Convert {@code data} into a byte array. * - *

        It is recommended to serialize {@code null} data to the {@code null} byte array. - * - * @param topic - * topic associated with data - * @param data - * typed data; may be {@code null} - * - * @return serialized bytes; may be {@code null} + * @param topic topic associated with data + * @param data typed data + * @return serialized bytes */ byte[] serialize(String topic, T data); /** * Convert {@code data} into a byte array. * - *

        It is recommended to serialize {@code null} data to the {@code null} byte array. - * - *

        Note that the passed in {@link Headers} may be empty, but never {@code null}. - * The implementation is allowed to modify the passed in headers, as a side effect of serialization. - * It is considered best practice to not delete or modify existing headers, but rather only add new ones. - * - * @param topic - * topic associated with data - * @param headers - * headers associated with the record - * @param data - * typed data; may be {@code null} - * - * @return serialized bytes; may be {@code null} + * @param topic topic associated with data + * @param headers headers associated with the record + * @param data typed data + * @return serialized bytes */ default byte[] serialize(String topic, Headers headers, T data) { return serialize(topic, data); @@ -85,8 +64,8 @@ default byte[] serialize(String topic, Headers headers, T data) { /** * Close this serializer. - * - *

        This method must be idempotent as it may be called multiple times. + *

        + * This method must be idempotent as it may be called multiple times. */ @Override default void close() { diff --git a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java index bef65977be4c6..705aafaaa70db 100644 --- a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java +++ b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporter.java @@ -41,6 +41,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; import java.nio.ByteBuffer; import java.time.Duration; import java.util.Collections; @@ -50,7 +51,6 @@ import java.util.Optional; import java.util.Set; import java.util.StringJoiner; -import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.locks.Condition; @@ -270,7 +270,6 @@ class DefaultClientTelemetrySender implements ClientTelemetrySender { private static final double INITIAL_PUSH_JITTER_LOWER = 0.5; private static final double INITIAL_PUSH_JITTER_UPPER = 1.5; - private final Set unsupportedCompressionTypes = ConcurrentHashMap.newKeySet(); private final ReadWriteLock lock = new ReentrantReadWriteLock(); private final Condition subscriptionLoaded = lock.writeLock().newCondition(); /* @@ -715,26 +714,12 @@ private Optional> createPushRequest(ClientTelemetrySubscription local return Optional.empty(); } - CompressionType compressionType = ClientTelemetryUtils.preferredCompressionType(localSubscription.acceptedCompressionTypes(), unsupportedCompressionTypes); + CompressionType compressionType = ClientTelemetryUtils.preferredCompressionType(localSubscription.acceptedCompressionTypes()); ByteBuffer compressedPayload; try { compressedPayload = ClientTelemetryUtils.compress(payload, compressionType); - } catch (Throwable e) { - // Distinguish between recoverable errors (NoClassDefFoundError for missing compression libs) - // and fatal errors (OutOfMemoryError, etc.) that should terminate telemetry. - if (e instanceof Error && !(e instanceof NoClassDefFoundError) && !(e.getCause() instanceof NoClassDefFoundError)) { - lock.writeLock().lock(); - try { - state = ClientTelemetryState.TERMINATED; - } finally { - lock.writeLock().unlock(); - } - log.error("Unexpected error occurred while compressing telemetry payload for compression: {}, stopping client telemetry", compressionType, e); - throw new KafkaException("Unexpected compression error", e); - } - - log.debug("Failed to compress telemetry payload for compression: {}, sending uncompressed data", compressionType, e); - unsupportedCompressionTypes.add(compressionType); + } catch (IOException e) { + log.info("Failed to compress telemetry payload for compression: {}, sending uncompressed data", compressionType); compressedPayload = ByteBuffer.wrap(payload.toByteArray()); compressionType = CompressionType.NONE; } diff --git a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtils.java b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtils.java index 111b041946c6a..3c555afb3b05d 100644 --- a/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtils.java @@ -39,7 +39,6 @@ import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.function.Predicate; import io.opentelemetry.proto.metrics.v1.MetricsData; @@ -182,23 +181,13 @@ public static boolean validateRequiredResourceLabels(Map metadat return validateResourceLabel(metadata, MetricsContext.NAMESPACE); } - /** - * Determines the preferred compression type from broker-accepted types, avoiding unsupported ones. - * - * @param acceptedCompressionTypes the list of compression types accepted by the broker in order - * of preference (must not be null, use empty list if no compression is accepted) - * @param unsupportedCompressionTypes the set of compression types that should be avoided due to - * missing libraries or previous failures (must not be null) - * @return the preferred compression type to use, or {@link CompressionType#NONE} if no acceptable - * compression type is available - */ - public static CompressionType preferredCompressionType(List acceptedCompressionTypes, Set unsupportedCompressionTypes) { - // Broker is providing the compression types in order of preference. Grab the - // first one that's supported. - return acceptedCompressionTypes.stream() - .filter(t -> !unsupportedCompressionTypes.contains(t)) - .findFirst() - .orElse(CompressionType.NONE); + public static CompressionType preferredCompressionType(List acceptedCompressionTypes) { + if (acceptedCompressionTypes != null && !acceptedCompressionTypes.isEmpty()) { + // Broker is providing the compression types in order of preference. Grab the + // first one. + return acceptedCompressionTypes.get(0); + } + return CompressionType.NONE; } public static ByteBuffer compress(MetricsData metrics, CompressionType compressionType) throws IOException { diff --git a/clients/src/main/java/org/apache/kafka/common/utils/AppInfoParser.java b/clients/src/main/java/org/apache/kafka/common/utils/AppInfoParser.java index cc2d7b75f1e6d..f9ebd82ea11cf 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/AppInfoParser.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/AppInfoParser.java @@ -18,6 +18,7 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Gauge; +import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.slf4j.Logger; @@ -25,7 +26,6 @@ import java.io.InputStream; import java.lang.management.ManagementFactory; -import java.util.Map; import java.util.Properties; import javax.management.JMException; @@ -69,7 +69,7 @@ public static synchronized void registerAppInfo(String prefix, String id, Metric AppInfo mBean = new AppInfo(nowMs); server.registerMBean(mBean, name); - registerMetrics(metrics, mBean, id); // prefix will be added later by JmxReporter + registerMetrics(metrics, mBean); // prefix will be added later by JmxReporter } catch (JMException e) { log.warn("Error registering AppInfo mbean", e); } @@ -82,7 +82,7 @@ public static synchronized void unregisterAppInfo(String prefix, String id, Metr if (server.isRegistered(name)) server.unregisterMBean(name); - unregisterMetrics(metrics, id); + unregisterMetrics(metrics); } catch (JMException e) { log.warn("Error unregistering AppInfo mbean", e); } finally { @@ -90,36 +90,23 @@ public static synchronized void unregisterAppInfo(String prefix, String id, Metr } } - private static MetricName metricName(Metrics metrics, String name, Map tags) { - return metrics.metricName(name, "app-info", "Metric indicating " + name, tags); + private static MetricName metricName(Metrics metrics, String name) { + return metrics.metricName(name, "app-info", "Metric indicating " + name); } - private static void registerMetrics(Metrics metrics, AppInfo appInfo, String clientId) { - if (metrics == null) return; - // Most Kafka clients (producer/consumer/admin) set the client-id tag in the metrics config. - // Although we don’t explicitly parse client-id here, these metrics are automatically tagged with client-id. - metrics.addMetric(metricName(metrics, "version", Map.of()), (Gauge) (config, now) -> appInfo.getVersion()); - metrics.addMetric(metricName(metrics, "commit-id", Map.of()), (Gauge) (config, now) -> appInfo.getCommitId()); - metrics.addMetric(metricName(metrics, "start-time-ms", Map.of()), (Gauge) (config, now) -> appInfo.getStartTimeMs()); - // MirrorMaker/Worker doesn't set client-id tag into the metrics config, so we need to set it here. - if (!metrics.config().tags().containsKey("client-id") && clientId != null) { - metrics.addMetric(metricName(metrics, "version", Map.of("client-id", clientId)), (Gauge) (config, now) -> appInfo.getVersion()); - metrics.addMetric(metricName(metrics, "commit-id", Map.of("client-id", clientId)), (Gauge) (config, now) -> appInfo.getCommitId()); - metrics.addMetric(metricName(metrics, "start-time-ms", Map.of("client-id", clientId)), (Gauge) (config, now) -> appInfo.getStartTimeMs()); + private static void registerMetrics(Metrics metrics, AppInfo appInfo) { + if (metrics != null) { + metrics.addMetric(metricName(metrics, "version"), new ImmutableValue<>(appInfo.getVersion())); + metrics.addMetric(metricName(metrics, "commit-id"), new ImmutableValue<>(appInfo.getCommitId())); + metrics.addMetric(metricName(metrics, "start-time-ms"), new ImmutableValue<>(appInfo.getStartTimeMs())); } } - private static void unregisterMetrics(Metrics metrics, String clientId) { - if (metrics == null) return; - - metrics.removeMetric(metricName(metrics, "version", Map.of())); - metrics.removeMetric(metricName(metrics, "commit-id", Map.of())); - metrics.removeMetric(metricName(metrics, "start-time-ms", Map.of())); - - if (!metrics.config().tags().containsKey("client-id") && clientId != null) { - metrics.removeMetric(metricName(metrics, "version", Map.of("client-id", clientId))); - metrics.removeMetric(metricName(metrics, "commit-id", Map.of("client-id", clientId))); - metrics.removeMetric(metricName(metrics, "start-time-ms", Map.of("client-id", clientId))); + private static void unregisterMetrics(Metrics metrics) { + if (metrics != null) { + metrics.removeMetric(metricName(metrics, "version")); + metrics.removeMetric(metricName(metrics, "commit-id")); + metrics.removeMetric(metricName(metrics, "start-time-ms")); } } @@ -156,4 +143,17 @@ public Long getStartTimeMs() { } } + + static class ImmutableValue implements Gauge { + private final T value; + + public ImmutableValue(T value) { + this.value = value; + } + + @Override + public T value(MetricConfig config, long now) { + return value; + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java b/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java index ba51a8dd49114..a760f817b815f 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ByteBufferUnmapper.java @@ -40,7 +40,7 @@ public final class ByteBufferUnmapper { private static final RuntimeException UNMAP_NOT_SUPPORTED_EXCEPTION; static { - MethodHandle unmap = null; + Object unmap = null; RuntimeException exception = null; try { unmap = lookupUnmapMethodHandle(); @@ -48,7 +48,7 @@ public final class ByteBufferUnmapper { exception = e; } if (unmap != null) { - UNMAP = unmap; + UNMAP = (MethodHandle) unmap; UNMAP_NOT_SUPPORTED_EXCEPTION = null; } else { UNMAP = null; diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Checksums.java b/clients/src/main/java/org/apache/kafka/common/utils/Checksums.java index 546a2fdac3322..f3ebfddb1a31d 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Checksums.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Checksums.java @@ -16,15 +16,34 @@ */ package org.apache.kafka.common.utils; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.nio.ByteBuffer; import java.util.zip.Checksum; /** * Utility methods for `Checksum` instances. * + * Implementation note: we can add methods to our implementations of CRC32 and CRC32C, but we cannot do the same for + * the Java implementations (we prefer the Java 9 implementation of CRC32C if available). A utility class is the + * simplest way to add methods that are useful for all Checksum implementations. + * * NOTE: This class is intended for INTERNAL usage only within Kafka. */ public final class Checksums { + private static final MethodHandle BYTE_BUFFER_UPDATE; + + static { + MethodHandle byteBufferUpdate = null; + try { + byteBufferUpdate = MethodHandles.publicLookup().findVirtual(Checksum.class, "update", + MethodType.methodType(void.class, ByteBuffer.class)); + } catch (Throwable t) { + handleUpdateThrowable(t); + } + BYTE_BUFFER_UPDATE = byteBufferUpdate; + } private Checksums() { } @@ -44,7 +63,7 @@ public static void update(Checksum checksum, ByteBuffer buffer, int length) { public static void update(Checksum checksum, ByteBuffer buffer, int offset, int length) { if (buffer.hasArray()) { checksum.update(buffer.array(), buffer.position() + buffer.arrayOffset() + offset, length); - } else if (buffer.isDirect()) { + } else if (BYTE_BUFFER_UPDATE != null && buffer.isDirect()) { final int oldPosition = buffer.position(); final int oldLimit = buffer.limit(); try { @@ -52,7 +71,9 @@ public static void update(Checksum checksum, ByteBuffer buffer, int offset, int final int start = oldPosition + offset; buffer.limit(start + length); buffer.position(start); - checksum.update(buffer); + BYTE_BUFFER_UPDATE.invokeExact(checksum, buffer); + } catch (Throwable t) { + handleUpdateThrowable(t); } finally { // reset buffer's offsets buffer.limit(oldLimit); @@ -66,6 +87,16 @@ public static void update(Checksum checksum, ByteBuffer buffer, int offset, int } } } + + private static void handleUpdateThrowable(Throwable t) { + if (t instanceof RuntimeException) { + throw (RuntimeException) t; + } + if (t instanceof Error) { + throw (Error) t; + } + throw new IllegalStateException(t); + } public static void updateInt(Checksum checksum, int input) { checksum.update((byte) (input >> 24)); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java b/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java index ff422bbe53826..9a891e0846384 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ChildFirstClassLoader.java @@ -48,7 +48,7 @@ public ChildFirstClassLoader(String classPath, ClassLoader parent) { private static URL[] classpathToURLs(String classPath) { ArrayList urls = new ArrayList<>(); for (String path : classPath.split(File.pathSeparator)) { - if (path.trim().isEmpty()) + if (path == null || path.trim().isEmpty()) continue; File file = new File(path); diff --git a/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java b/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java index 2c3702af13167..397ab623b527b 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/ConfigUtils.java @@ -75,8 +75,7 @@ public static boolean getBoolean(final Map configs, final String } else if (value instanceof String) { return Boolean.parseBoolean((String) value); } else { - log.error("Invalid value ({}) on configuration '{}'. The default value '{}' will be used instead. Please specify a true/false value.", - value, key, defaultValue); + log.error("Invalid value (" + value + ") on configuration '" + key + "'. The default value '" + defaultValue + "' will be used instead. Please specify a true/false value."); return defaultValue; } } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Crc32C.java b/clients/src/main/java/org/apache/kafka/common/utils/Crc32C.java index 09c0e518bbf70..49ad34d19648a 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Crc32C.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Crc32C.java @@ -17,17 +17,35 @@ package org.apache.kafka.common.utils; +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodType; import java.nio.ByteBuffer; -import java.util.zip.CRC32C; import java.util.zip.Checksum; /** * A class that can be used to compute the CRC32C (Castagnoli) of a ByteBuffer or array of bytes. * + * We use java.util.zip.CRC32C (introduced in Java 9). + * java.util.zip.CRC32C is significantly faster on reasonably modern CPUs as it uses the CRC32 instruction introduced + * in SSE4.2. + * * NOTE: This class is intended for INTERNAL usage only within Kafka. */ public final class Crc32C { + private static final MethodHandle CRC32C_CONSTRUCTOR; + + static { + try { + Class cls = Class.forName("java.util.zip.CRC32C"); + CRC32C_CONSTRUCTOR = MethodHandles.publicLookup().findConstructor(cls, MethodType.methodType(void.class)); + } catch (ReflectiveOperationException e) { + // Should never happen + throw new RuntimeException(e); + } + } + private Crc32C() {} /** @@ -39,7 +57,7 @@ private Crc32C() {} * @return The CRC32C */ public static long compute(byte[] bytes, int offset, int size) { - Checksum crc = new CRC32C(); + Checksum crc = create(); crc.update(bytes, offset, size); return crc.getValue(); } @@ -53,8 +71,17 @@ public static long compute(byte[] bytes, int offset, int size) { * @return The CRC32C */ public static long compute(ByteBuffer buffer, int offset, int size) { - Checksum crc = new CRC32C(); + Checksum crc = create(); Checksums.update(crc, buffer, offset, size); return crc.getValue(); } + + public static Checksum create() { + try { + return (Checksum) CRC32C_CONSTRUCTOR.invoke(); + } catch (Throwable throwable) { + // Should never happen + throw new RuntimeException(throwable); + } + } } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/FlattenedIterator.java b/clients/src/main/java/org/apache/kafka/common/utils/FlattenedIterator.java new file mode 100644 index 0000000000000..4e28bb35c669c --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/utils/FlattenedIterator.java @@ -0,0 +1,45 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.utils; + +import java.util.Iterator; +import java.util.function.Function; + +/** + * Provides a flattened iterator over the inner elements of an outer iterator. + */ +public final class FlattenedIterator extends AbstractIterator { + private final Iterator outerIterator; + private final Function> innerIteratorFunction; + private Iterator innerIterator; + + public FlattenedIterator(Iterator outerIterator, Function> innerIteratorFunction) { + this.outerIterator = outerIterator; + this.innerIteratorFunction = innerIteratorFunction; + } + + @Override + protected I makeNext() { + while (innerIterator == null || !innerIterator.hasNext()) { + if (outerIterator.hasNext()) + innerIterator = innerIteratorFunction.apply(outerIterator.next()); + else + return allDone(); + } + return innerIterator.next(); + } +} diff --git a/clients/src/main/java/org/apache/kafka/common/utils/LogContext.java b/clients/src/main/java/org/apache/kafka/common/utils/LogContext.java index 0992cd2dfdc3b..10acf32ccd47f 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/LogContext.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/LogContext.java @@ -43,14 +43,7 @@ public LogContext() { } public Logger logger(Class clazz) { - return logger(LoggerFactory.getLogger(clazz)); - } - - public Logger logger(String clazz) { - return logger(LoggerFactory.getLogger(clazz)); - } - - private Logger logger(Logger logger) { + Logger logger = LoggerFactory.getLogger(clazz); if (logger instanceof LocationAwareLogger) { return new LocationAwareKafkaLogger(logPrefix, (LocationAwareLogger) logger); } else { diff --git a/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java b/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java index e0a8f89e84819..824a1c4ddb0d3 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/LoggingSignalHandler.java @@ -69,7 +69,7 @@ public void register() throws ReflectiveOperationException { for (String signal : SIGNALS) { register(signal, jvmSignalHandlers); } - log.info("Registered signal handlers for {}", String.join(", ", SIGNALS)); + log.info("Registered signal handlers for " + String.join(", ", SIGNALS)); } private Object createSignalHandler(final Map jvmSignalHandlers) { @@ -97,7 +97,7 @@ public Object invoke(Object proxy, Method method, Object[] args) throws Throwabl return null; } }; - return Proxy.newProxyInstance(Utils.getContextOrKafkaClassLoader(), new Class[] {signalHandlerClass}, + return Proxy.newProxyInstance(Utils.getContextOrKafkaClassLoader(), new Class[] {signalHandlerClass}, invocationHandler); } diff --git a/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java b/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java new file mode 100644 index 0000000000000..fb96901b309de --- /dev/null +++ b/clients/src/main/java/org/apache/kafka/common/utils/PureJavaCrc32C.java @@ -0,0 +1,645 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +/* + * Some portions of this file Copyright (c) 2004-2006 Intel Corporation and + * licensed under the BSD license. + */ +package org.apache.kafka.common.utils; + +import java.util.zip.Checksum; + +/** + * This class was taken from Hadoop: org.apache.hadoop.util.PureJavaCrc32C. + * + * A pure-java implementation of the CRC32 checksum that uses + * the CRC32-C polynomial, the same polynomial used by iSCSI + * and implemented on many Intel chipsets supporting SSE4.2. + * + * NOTE: This class is intended for INTERNAL usage only within Kafka. + */ +// The exact version that was retrieved from Hadoop: +// https://github.com/apache/hadoop/blob/224de4f92c222a7b915e9c5d6bdd1a4a3fcbcf31/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PureJavaCrc32C.java +public class PureJavaCrc32C implements Checksum { + + /** the current CRC value, bit-flipped */ + private int crc; + + public PureJavaCrc32C() { + reset(); + } + + @Override + public long getValue() { + long ret = crc; + return (~ret) & 0xffffffffL; + } + + @Override + public final void reset() { + crc = 0xffffffff; + } + + @SuppressWarnings("fallthrough") + @Override + public void update(byte[] b, int off, int len) { + int localCrc = crc; + + while (len > 7) { + final int c0 = (b[off + 0] ^ localCrc) & 0xff; + final int c1 = (b[off + 1] ^ (localCrc >>>= 8)) & 0xff; + final int c2 = (b[off + 2] ^ (localCrc >>>= 8)) & 0xff; + final int c3 = (b[off + 3] ^ (localCrc >>>= 8)) & 0xff; + localCrc = (T[T8_7_START + c0] ^ T[T8_6_START + c1]) + ^ (T[T8_5_START + c2] ^ T[T8_4_START + c3]); + + final int c4 = b[off + 4] & 0xff; + final int c5 = b[off + 5] & 0xff; + final int c6 = b[off + 6] & 0xff; + final int c7 = b[off + 7] & 0xff; + + localCrc ^= (T[T8_3_START + c4] ^ T[T8_2_START + c5]) + ^ (T[T8_1_START + c6] ^ T[T8_0_START + c7]); + + off += 8; + len -= 8; + } + + /* loop unroll - duff's device style */ + switch (len) { + case 7: + localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; + case 6: + localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; + case 5: + localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; + case 4: + localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; + case 3: + localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; + case 2: + localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; + case 1: + localCrc = (localCrc >>> 8) ^ T[T8_0_START + ((localCrc ^ b[off++]) & 0xff)]; + default: + /* nothing */ + } + + // Publish crc out to object + crc = localCrc; + } + + @Override + public final void update(int b) { + crc = (crc >>> 8) ^ T[T8_0_START + ((crc ^ b) & 0xff)]; + } + + // CRC polynomial tables generated by: + // java -cp build/test/classes/:build/classes/ \ + // org.apache.hadoop.util.TestPureJavaCrc32\$Table 82F63B78 + + private static final int T8_0_START = 0 * 256; + private static final int T8_1_START = 1 * 256; + private static final int T8_2_START = 2 * 256; + private static final int T8_3_START = 3 * 256; + private static final int T8_4_START = 4 * 256; + private static final int T8_5_START = 5 * 256; + private static final int T8_6_START = 6 * 256; + private static final int T8_7_START = 7 * 256; + + private static final int[] T = new int[]{ + /* T8_0 */ + 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, + 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, + 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, + 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, + 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, + 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, + 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, + 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, + 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, + 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, + 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, + 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, + 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, + 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, + 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, + 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, + 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, + 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, + 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, + 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, + 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, + 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, + 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, + 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, + 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, + 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, + 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, + 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, + 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, + 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, + 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, + 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, + 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, + 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, + 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, + 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, + 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, + 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, + 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, + 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, + 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, + 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, + 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, + 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, + 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, + 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, + 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, + 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, + 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, + 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, + 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, + 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, + 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, + 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, + 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, + 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, + 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, + 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, + 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, + 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, + 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, + 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, + 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, + 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, + /* T8_1 */ + 0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899, + 0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945, + 0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21, + 0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD, + 0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918, + 0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4, + 0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0, + 0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C, + 0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B, + 0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47, + 0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823, + 0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF, + 0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A, + 0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6, + 0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2, + 0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E, + 0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D, + 0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41, + 0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25, + 0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9, + 0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C, + 0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0, + 0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4, + 0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78, + 0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F, + 0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43, + 0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27, + 0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB, + 0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E, + 0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2, + 0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6, + 0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A, + 0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260, + 0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC, + 0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8, + 0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004, + 0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1, + 0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D, + 0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059, + 0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185, + 0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162, + 0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE, + 0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA, + 0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306, + 0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3, + 0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F, + 0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B, + 0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287, + 0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464, + 0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8, + 0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC, + 0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600, + 0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5, + 0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439, + 0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D, + 0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781, + 0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766, + 0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA, + 0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE, + 0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502, + 0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7, + 0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B, + 0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F, + 0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483, + /* T8_2 */ + 0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073, + 0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469, + 0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6, + 0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC, + 0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9, + 0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3, + 0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C, + 0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726, + 0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67, + 0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D, + 0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2, + 0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8, + 0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED, + 0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7, + 0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828, + 0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32, + 0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA, + 0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0, + 0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F, + 0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75, + 0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20, + 0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A, + 0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5, + 0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF, + 0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE, + 0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4, + 0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B, + 0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161, + 0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634, + 0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E, + 0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1, + 0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB, + 0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730, + 0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A, + 0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5, + 0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF, + 0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA, + 0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0, + 0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F, + 0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065, + 0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24, + 0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E, + 0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1, + 0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB, + 0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE, + 0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4, + 0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B, + 0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71, + 0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9, + 0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3, + 0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C, + 0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36, + 0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63, + 0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79, + 0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6, + 0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC, + 0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD, + 0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7, + 0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238, + 0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622, + 0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177, + 0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D, + 0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2, + 0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8, + /* T8_3 */ + 0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939, + 0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA, + 0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF, + 0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C, + 0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804, + 0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7, + 0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2, + 0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11, + 0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2, + 0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41, + 0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54, + 0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7, + 0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F, + 0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C, + 0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69, + 0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A, + 0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE, + 0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D, + 0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538, + 0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB, + 0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3, + 0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610, + 0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405, + 0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6, + 0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255, + 0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6, + 0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3, + 0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040, + 0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368, + 0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B, + 0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E, + 0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D, + 0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006, + 0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5, + 0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0, + 0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213, + 0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B, + 0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8, + 0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD, + 0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E, + 0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D, + 0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E, + 0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B, + 0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698, + 0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0, + 0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443, + 0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656, + 0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5, + 0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1, + 0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12, + 0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07, + 0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4, + 0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC, + 0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F, + 0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A, + 0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9, + 0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A, + 0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99, + 0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C, + 0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F, + 0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57, + 0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4, + 0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1, + 0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842, + /* T8_4 */ + 0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4, + 0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44, + 0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65, + 0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5, + 0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127, + 0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97, + 0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6, + 0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406, + 0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3, + 0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13, + 0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32, + 0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082, + 0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470, + 0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0, + 0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1, + 0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151, + 0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A, + 0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA, + 0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB, + 0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B, + 0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89, + 0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539, + 0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018, + 0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8, + 0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D, + 0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD, + 0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C, + 0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C, + 0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE, + 0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E, + 0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F, + 0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF, + 0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8, + 0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18, + 0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39, + 0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089, + 0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B, + 0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB, + 0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA, + 0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A, + 0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF, + 0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F, + 0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E, + 0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE, + 0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C, + 0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C, + 0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD, + 0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D, + 0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06, + 0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6, + 0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497, + 0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27, + 0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5, + 0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065, + 0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544, + 0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4, + 0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51, + 0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1, + 0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0, + 0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70, + 0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82, + 0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532, + 0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013, + 0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3, + /* T8_5 */ + 0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA, + 0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD, + 0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5, + 0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2, + 0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4, + 0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93, + 0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB, + 0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C, + 0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57, + 0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20, + 0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548, + 0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F, + 0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69, + 0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E, + 0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576, + 0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201, + 0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031, + 0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746, + 0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E, + 0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59, + 0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F, + 0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778, + 0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810, + 0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67, + 0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC, + 0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB, + 0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3, + 0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4, + 0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682, + 0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5, + 0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D, + 0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA, + 0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C, + 0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B, + 0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413, + 0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364, + 0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32, + 0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45, + 0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D, + 0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A, + 0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81, + 0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6, + 0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E, + 0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9, + 0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF, + 0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8, + 0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0, + 0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7, + 0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7, + 0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090, + 0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8, + 0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F, + 0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9, + 0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE, + 0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6, + 0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1, + 0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A, + 0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D, + 0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975, + 0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02, + 0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154, + 0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623, + 0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B, + 0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C, + /* T8_6 */ + 0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558, + 0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089, + 0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B, + 0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA, + 0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE, + 0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F, + 0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD, + 0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C, + 0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5, + 0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334, + 0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6, + 0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67, + 0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43, + 0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992, + 0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110, + 0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1, + 0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222, + 0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3, + 0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71, + 0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0, + 0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884, + 0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55, + 0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7, + 0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006, + 0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F, + 0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E, + 0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC, + 0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D, + 0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39, + 0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8, + 0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A, + 0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB, + 0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC, + 0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D, + 0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF, + 0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E, + 0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A, + 0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB, + 0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59, + 0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988, + 0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811, + 0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0, + 0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542, + 0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093, + 0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7, + 0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766, + 0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4, + 0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35, + 0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6, + 0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907, + 0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185, + 0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454, + 0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670, + 0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1, + 0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23, + 0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2, + 0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B, + 0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA, + 0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238, + 0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9, + 0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD, + 0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C, + 0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E, + 0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F, + /* T8_7 */ + 0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769, + 0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504, + 0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3, + 0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE, + 0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD, + 0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0, + 0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07, + 0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A, + 0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0, + 0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D, + 0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A, + 0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447, + 0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44, + 0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929, + 0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E, + 0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3, + 0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B, + 0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36, + 0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881, + 0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC, + 0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF, + 0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782, + 0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135, + 0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358, + 0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2, + 0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF, + 0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18, + 0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75, + 0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076, + 0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B, + 0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC, + 0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1, + 0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D, + 0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360, + 0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7, + 0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA, + 0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9, + 0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4, + 0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63, + 0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E, + 0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494, + 0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9, + 0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E, + 0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223, + 0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20, + 0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D, + 0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA, + 0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97, + 0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F, + 0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852, + 0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5, + 0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88, + 0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B, + 0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6, + 0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751, + 0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C, + 0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6, + 0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB, + 0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C, + 0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911, + 0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612, + 0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F, + 0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8, + 0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5 + }; +} diff --git a/clients/src/main/java/org/apache/kafka/common/utils/SecurityUtils.java b/clients/src/main/java/org/apache/kafka/common/utils/SecurityUtils.java index 0c8876ebd6a6a..47a280fee5808 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/SecurityUtils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/SecurityUtils.java @@ -33,7 +33,7 @@ public class SecurityUtils { - private static final Logger LOGGER = LoggerFactory.getLogger(SecurityUtils.class); + private static final Logger LOGGER = LoggerFactory.getLogger(SecurityConfig.class); private static final Map NAME_TO_RESOURCE_TYPES; private static final Map NAME_TO_OPERATIONS; diff --git a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java index dc7b0e7625a90..c826f90558483 100644 --- a/clients/src/main/java/org/apache/kafka/common/utils/Utils.java +++ b/clients/src/main/java/org/apache/kafka/common/utils/Utils.java @@ -64,7 +64,6 @@ import java.util.Collections; import java.util.Date; import java.util.EnumSet; -import java.util.Enumeration; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -101,7 +100,7 @@ private Utils() {} private static final Pattern VALID_HOST_CHARACTERS = Pattern.compile("([0-9a-zA-Z\\-%._:]*)"); - // Prints up to 2 decimal digits. Used for human-readable printing + // Prints up to 2 decimal digits. Used for human readable printing private static final DecimalFormat TWO_DIGIT_FORMAT = new DecimalFormat("0.##", DecimalFormatSymbols.getInstance(Locale.ENGLISH)); @@ -347,7 +346,7 @@ public static byte[] copyArray(byte[] src) { * Compares two character arrays for equality using a constant-time algorithm, which is needed * for comparing passwords. Two arrays are equal if they have the same length and all * characters at corresponding positions are equal. - *

        + * * All characters in the first array are examined to determine equality. * The calculation time depends only on the length of this first character array; it does not * depend on the length of the second character array or the contents of either array. @@ -573,12 +572,9 @@ public static String formatAddress(String host, Integer port) { } /** - * Formats a byte value into a human-readable string with an appropriate unit - * (e.g., "3.2 KB", "1.5 MB", "2.1 GB"). The format includes two decimal places. - * - * @param bytes the size in bytes - * @return a string representing the size with the appropriate unit (e.g., "3.2 KB", "1.5 MB"). - * If the value is negative or too large, the input is returned as a string (e.g., "-500", "999999999999999"). + * Formats a byte number as a human-readable String ("3.2 MB") + * @param bytes some size in bytes + * @return */ public static String formatBytes(long bytes) { if (bytes < 0) { @@ -619,7 +615,7 @@ public static String mkString(Map map, String begin, String end, /** * Converts an extensions string into a {@code Map}. - *

        + * * Example: * {@code parseMap("key=hey,keyTwo=hi,keyThree=hello", "=", ",") => { key: "hey", keyTwo: "hi", keyThree: "hello" }} * @@ -857,7 +853,7 @@ public static Properties mkObjectProperties(final Map properties public static void delete(final File rootFile) throws IOException { if (rootFile == null) return; - Files.walkFileTree(rootFile.toPath(), new SimpleFileVisitor<>() { + Files.walkFileTree(rootFile.toPath(), new SimpleFileVisitor() { @Override public FileVisitResult visitFileFailed(Path path, IOException exc) throws IOException { if (exc instanceof NoSuchFileException) { @@ -892,12 +888,9 @@ public FileVisitResult postVisitDirectory(Path path, IOException exc) throws IOE } /** - * Returns an empty list if the provided list is null, otherwise returns the list itself. - *

        - * This method is useful for avoiding {@code NullPointerException} when working with potentially null lists. - * - * @param other the list to check for null - * @return an empty list if the provided list is null, otherwise the original list + * Returns an empty collection if this list is null + * @param other + * @return */ public static List safe(List other) { return other == null ? Collections.emptyList() : other; @@ -913,7 +906,7 @@ public static ClassLoader getKafkaClassLoader() { /** * Get the Context ClassLoader on this thread or, if not present, the ClassLoader that * loaded Kafka. - *

        + * * This should be used whenever passing a ClassLoader to Class.forName */ public static ClassLoader getContextOrKafkaClassLoader() { @@ -964,7 +957,7 @@ public static void atomicMoveWithFallback(Path source, Path target, boolean need /** * Flushes dirty directories to guarantee crash consistency. - *

        + * * Note: We don't fsync directories on Windows OS because otherwise it'll throw AccessDeniedException (KAFKA-13391) * * @throws IOException if flushing the directory fails. @@ -1067,7 +1060,7 @@ public static void swallow(final Logger log, final Level level, final String wha /** * An {@link AutoCloseable} interface without a throws clause in the signature - *

        + * * This is used with lambda expressions in try-with-resources clauses * to avoid casting un-checked exceptions to checked exceptions unnecessarily. */ @@ -1156,7 +1149,7 @@ public static void closeAllQuietly(AtomicReference firstException, St /** * Invokes every function in `all` even if one or more functions throws an exception. - *

        + * * If any of the functions throws an exception, the first one will be rethrown at the end with subsequent exceptions * added as suppressed exceptions. */ @@ -1183,7 +1176,7 @@ public static void tryAll(List> all) throws Throwable { * positive, the original value is returned. When the input number is negative, the returned * positive value is the original value bit AND against 0x7fffffff which is not its absolute * value. - *

        + * * Note: changing this method in the future will possibly cause partition selection not to be * compatible with the existing messages already placed on a partition since it is used * in producer's partition selection logic {@link org.apache.kafka.clients.producer.KafkaProducer} @@ -1403,7 +1396,7 @@ public static Set from32BitField(final int intValue) { * @return new Collector, M, M> */ public static > Collector, M, M> entriesToMap(final Supplier mapSupplier) { - return new Collector<>() { + return new Collector, M, M>() { @Override public Supplier supplier() { return mapSupplier; @@ -1470,24 +1463,7 @@ public static Map filterMap(final Map map, final Predicate propsToMap(Properties properties) { - // This try catch block is to handle the case when the Properties object has non-String keys - // when calling the propertyNames() method. This is a workaround for the lack of a method that - // returns all properties including defaults and does not attempt to convert all keys to Strings. - Enumeration enumeration; - try { - enumeration = properties.propertyNames(); - } catch (ClassCastException e) { - throw new ConfigException("One or more keys is not a string."); - } - Map map = new HashMap<>(); - while (enumeration.hasMoreElements()) { - String key = (String) enumeration.nextElement(); - // properties.get(key) returns null for defaults, but properties.getProperty(key) returns null for - // non-string values. A combination of the two methods is used to cover all cases - Object value = (properties.get(key) != null) ? properties.get(key) : properties.getProperty(key); - map.put(key, value); - } - return map; + return castToStringObjectMap(properties); } /** @@ -1497,9 +1473,6 @@ public static Map propsToMap(Properties properties) { * @throws ConfigException if any key is not a String */ public static Map castToStringObjectMap(Map inputMap) { - if (inputMap instanceof Properties) { - return propsToMap((Properties) inputMap); - } Map map = new HashMap<>(inputMap.size()); for (Map.Entry entry : inputMap.entrySet()) { if (entry.getKey() instanceof String) { @@ -1711,7 +1684,6 @@ public static ConfigDef mergeConfigs(List configDefs) { configDefs.forEach(configDef -> configDef.configKeys().values().forEach(all::define)); return all; } - /** * A runnable that can throw checked exception. */ @@ -1719,17 +1691,4 @@ public static ConfigDef mergeConfigs(List configDefs) { public interface ThrowingRunnable { void run() throws Exception; } - - /** - * convert millisecond to nanosecond, or throw exception if overflow - * @param timeMs the time in millisecond - * @return the converted nanosecond - */ - public static long msToNs(long timeMs) { - try { - return Math.multiplyExact(1000 * 1000, timeMs); - } catch (ArithmeticException e) { - throw new IllegalArgumentException("Cannot convert " + timeMs + " millisecond to nanosecond due to arithmetic overflow", e); - } - } } diff --git a/clients/src/main/java/org/apache/kafka/server/authorizer/Authorizer.java b/clients/src/main/java/org/apache/kafka/server/authorizer/Authorizer.java index 68f1aaf678d1d..8bbb8662f4600 100644 --- a/clients/src/main/java/org/apache/kafka/server/authorizer/Authorizer.java +++ b/clients/src/main/java/org/apache/kafka/server/authorizer/Authorizer.java @@ -59,10 +59,6 @@ * * Authorizer implementation class may optionally implement @{@link org.apache.kafka.common.Reconfigurable} * to enable dynamic reconfiguration without restarting the broker. - *

        Authorizer implementation class may also optionally implement {@link org.apache.kafka.common.metrics.Monitorable} - * to enable the authorizer to register metrics. The following tags are automatically added to all metrics registered: - * config set to authorizer.class.name, class set to the Authorizer class name, - * and role set to either broker or controller. *

        * Threading model: *

          diff --git a/clients/src/main/java/org/apache/kafka/server/quota/ClientQuotaCallback.java b/clients/src/main/java/org/apache/kafka/server/quota/ClientQuotaCallback.java index 01a8181d86100..a9cb2bfb2af24 100644 --- a/clients/src/main/java/org/apache/kafka/server/quota/ClientQuotaCallback.java +++ b/clients/src/main/java/org/apache/kafka/server/quota/ClientQuotaCallback.java @@ -24,13 +24,6 @@ /** * Quota callback interface for brokers and controllers that enables customization of client quota computation. - * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the callback to register metrics. - * The following tags are automatically added to all metrics registered: - *
            - *
          • config set to client.quota.callback.class
          • - *
          • class set to the ClientQuotaCallback class name
          • - *
          • role set to broker/controller, which indicates the role of the server
          • - *
          */ public interface ClientQuotaCallback extends Configurable { diff --git a/clients/src/main/resources/common/message/AddRaftVoterRequest.json b/clients/src/main/resources/common/message/AddRaftVoterRequest.json index 1a6e58fbbd4f6..74b7638ea2463 100644 --- a/clients/src/main/resources/common/message/AddRaftVoterRequest.json +++ b/clients/src/main/resources/common/message/AddRaftVoterRequest.json @@ -18,8 +18,7 @@ "type": "request", "listeners": ["controller", "broker"], "name": "AddRaftVoterRequest", - // Version 1 adds the AckWhenCommitted field. - "validVersions": "0-1", + "validVersions": "0", "flexibleVersions": "0+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "0+", "nullableVersions": "0+", @@ -38,8 +37,6 @@ "about": "The hostname." }, { "name": "Port", "type": "uint16", "versions": "0+", "about": "The port." } - ]}, - { "name": "AckWhenCommitted", "type": "bool", "versions": "1+", "default": "true", - "about": "When true, return a response after the new voter set is committed. Otherwise, return after the leader writes the changes locally." } + ]} ] } diff --git a/clients/src/main/resources/common/message/AddRaftVoterResponse.json b/clients/src/main/resources/common/message/AddRaftVoterResponse.json index d2ae5b1ddcfab..c48f9cdda4e85 100644 --- a/clients/src/main/resources/common/message/AddRaftVoterResponse.json +++ b/clients/src/main/resources/common/message/AddRaftVoterResponse.json @@ -17,8 +17,7 @@ "apiKey": 80, "type": "response", "name": "AddRaftVoterResponse", - // Version 1 is the same as version 0 - "validVersions": "0-1", + "validVersions": "0", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/AlterPartitionReassignmentsRequest.json b/clients/src/main/resources/common/message/AlterPartitionReassignmentsRequest.json index d0ccc1c088ea8..f3047feb0a3ad 100644 --- a/clients/src/main/resources/common/message/AlterPartitionReassignmentsRequest.json +++ b/clients/src/main/resources/common/message/AlterPartitionReassignmentsRequest.json @@ -18,14 +18,11 @@ "type": "request", "listeners": ["broker", "controller"], "name": "AlterPartitionReassignmentsRequest", - // Version 1 adds the ability to allow/disallow changing the replication factor as part of the request. - "validVersions": "0-1", + "validVersions": "0", "flexibleVersions": "0+", "fields": [ { "name": "TimeoutMs", "type": "int32", "versions": "0+", "default": "60000", "about": "The time in ms to wait for the request to complete." }, - { "name": "AllowReplicationFactorChange", "type": "bool", "versions": "1+", "default": "true", - "about": "The option indicating whether changing the replication factor of any given partition as part of this request is a valid move." }, { "name": "Topics", "type": "[]ReassignableTopic", "versions": "0+", "about": "The topics to reassign.", "fields": [ { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", diff --git a/clients/src/main/resources/common/message/AlterPartitionReassignmentsResponse.json b/clients/src/main/resources/common/message/AlterPartitionReassignmentsResponse.json index 36ce87968ecc8..0b8f60b0baba8 100644 --- a/clients/src/main/resources/common/message/AlterPartitionReassignmentsResponse.json +++ b/clients/src/main/resources/common/message/AlterPartitionReassignmentsResponse.json @@ -17,14 +17,11 @@ "apiKey": 45, "type": "response", "name": "AlterPartitionReassignmentsResponse", - // Version 1 adds the ability to allow/disallow changing the replication factor as part of the request. - "validVersions": "0-1", + "validVersions": "0", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "AllowReplicationFactorChange", "type": "bool", "versions": "1+", "default": "true", "ignorable": true, - "about": "The option indicating whether changing the replication factor of any given partition as part of the request was allowed." }, { "name": "ErrorCode", "type": "int16", "versions": "0+", "about": "The top-level error code, or 0 if there was no error." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", diff --git a/clients/src/main/resources/common/message/BrokerHeartbeatRequest.json b/clients/src/main/resources/common/message/BrokerHeartbeatRequest.json index 8f574b41fc458..9ebdc3707d140 100644 --- a/clients/src/main/resources/common/message/BrokerHeartbeatRequest.json +++ b/clients/src/main/resources/common/message/BrokerHeartbeatRequest.json @@ -31,7 +31,7 @@ "about": "True if the broker wants to be fenced, false otherwise." }, { "name": "WantShutDown", "type": "bool", "versions": "0+", "about": "True if the broker wants to be shut down, false otherwise." }, - { "name": "OfflineLogDirs", "type": "[]uuid", "versions": "1+", "taggedVersions": "1+", "tag": 0, + { "name": "OfflineLogDirs", "type": "[]uuid", "versions": "1+", "taggedVersions": "1+", "tag": "0", "about": "Log directories that failed and went offline." } ] } diff --git a/clients/src/main/resources/common/message/DeleteAclsRequest.json b/clients/src/main/resources/common/message/DeleteAclsRequest.json index d794295af567e..db605305ae28b 100644 --- a/clients/src/main/resources/common/message/DeleteAclsRequest.json +++ b/clients/src/main/resources/common/message/DeleteAclsRequest.json @@ -30,7 +30,7 @@ { "name": "ResourceTypeFilter", "type": "int8", "versions": "0+", "about": "The resource type." }, { "name": "ResourceNameFilter", "type": "string", "versions": "0+", "nullableVersions": "0+", - "about": "The resource name, or null to match any resource name." }, + "about": "The resource name." }, { "name": "PatternTypeFilter", "type": "int8", "versions": "1+", "default": "3", "ignorable": false, "about": "The pattern type." }, { "name": "PrincipalFilter", "type": "string", "versions": "0+", "nullableVersions": "0+", diff --git a/clients/src/main/resources/common/message/DeleteRecordsRequest.json b/clients/src/main/resources/common/message/DeleteRecordsRequest.json index 969efd63e957d..fc697944a02bb 100644 --- a/clients/src/main/resources/common/message/DeleteRecordsRequest.json +++ b/clients/src/main/resources/common/message/DeleteRecordsRequest.json @@ -33,7 +33,7 @@ { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "Offset", "type": "int64", "versions": "0+", - "about": "The deletion offset. -1 means that records should be truncated to the high watermark." } + "about": "The deletion offset." } ]} ]}, { "name": "TimeoutMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/DeleteShareGroupStateRequest.json b/clients/src/main/resources/common/message/DeleteShareGroupStateRequest.json index ff5ad99735845..37672b92f6561 100644 --- a/clients/src/main/resources/common/message/DeleteShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/DeleteShareGroupStateRequest.json @@ -20,15 +20,16 @@ "name": "DeleteShareGroupStateRequest", "validVersions": "0", "flexibleVersions": "0+", + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", - "about": "The group identifier." }, + "about":"The group identifier." }, { "name": "Topics", "type": "[]DeleteStateData", "versions": "0+", "about": "The data for the topics.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." } ]} diff --git a/clients/src/main/resources/common/message/DescribeClusterResponse.json b/clients/src/main/resources/common/message/DescribeClusterResponse.json index 1911b1ec33d6f..a17e427c8c3e2 100644 --- a/clients/src/main/resources/common/message/DescribeClusterResponse.json +++ b/clients/src/main/resources/common/message/DescribeClusterResponse.json @@ -36,7 +36,7 @@ { "name": "ClusterId", "type": "string", "versions": "0+", "about": "The cluster ID that responding broker belongs to." }, { "name": "ControllerId", "type": "int32", "versions": "0+", "default": "-1", "entityType": "brokerId", - "about": "The ID of the controller. When handled by a controller, returns the current voter leader ID. When handled by a broker, returns a random alive broker ID as a fallback." }, + "about": "The ID of the controller broker." }, { "name": "Brokers", "type": "[]DescribeClusterBroker", "versions": "0+", "about": "Each broker in the response.", "fields": [ { "name": "BrokerId", "type": "int32", "versions": "0+", "mapKey": true, "entityType": "brokerId", diff --git a/clients/src/main/resources/common/message/DescribeLogDirsResponse.json b/clients/src/main/resources/common/message/DescribeLogDirsResponse.json index 725d1ad337b56..d05785fe8d862 100644 --- a/clients/src/main/resources/common/message/DescribeLogDirsResponse.json +++ b/clients/src/main/resources/common/message/DescribeLogDirsResponse.json @@ -51,11 +51,11 @@ "about": "True if this log is created by AlterReplicaLogDirsRequest and will replace the current log of the replica in the future." }]} ]}, { "name": "TotalBytes", "type": "int64", "versions": "4+", "ignorable": true, "default": "-1", - "about": "The total size in bytes of the volume the log directory is in. This value does not include the size of data stored in remote storage." + "about": "The total size in bytes of the volume the log directory is in." }, { "name": "UsableBytes", "type": "int64", "versions": "4+", "ignorable": true, "default": "-1", - "about": "The usable size in bytes of the volume the log directory is in. This value does not include the size of data stored in remote storage." + "about": "The usable size in bytes of the volume the log directory is in." } ]} ] -} \ No newline at end of file +} diff --git a/clients/src/main/resources/common/message/FetchRequest.json b/clients/src/main/resources/common/message/FetchRequest.json index 9ebf86ac424c9..b7ad185f60b39 100644 --- a/clients/src/main/resources/common/message/FetchRequest.json +++ b/clients/src/main/resources/common/message/FetchRequest.json @@ -27,7 +27,7 @@ // the request is now relevant. Partitions will be processed in the order // they appear in the request. // - // Version 4 adds IsolationLevel. Starting in version 4, the requestor must be + // Version 4 adds IsolationLevel. Starting in version 4, the reqestor must be // able to handle Kafka log message format version 2. // // Version 5 adds LogStartOffset to indicate the earliest available offset of @@ -56,9 +56,7 @@ // Version 16 is the same as version 15 (KIP-951). // // Version 17 adds directory id support from KIP-853 - // - // Version 18 adds high-watermark from KIP-1166 - "validVersions": "4-18", + "validVersions": "4-17", "flexibleVersions": "12+", "fields": [ { "name": "ClusterId", "type": "string", "versions": "12+", "nullableVersions": "12+", "default": "null", @@ -105,10 +103,7 @@ { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", "about": "The maximum bytes to fetch from this partition. See KIP-74 for cases where this limit may not be honored." }, { "name": "ReplicaDirectoryId", "type": "uuid", "versions": "17+", "taggedVersions": "17+", "tag": 0, "ignorable": true, - "about": "The directory id of the follower fetching." }, - { "name": "HighWatermark", "type": "int64", "versions": "18+", "default": "9223372036854775807", "taggedVersions": "18+", - "tag": 1, "ignorable": true, - "about": "The high-watermark known by the replica. -1 if the high-watermark is not known and 9223372036854775807 if the feature is not supported." } + "about": "The directory id of the follower fetching." } ]} ]}, { "name": "ForgottenTopicsData", "type": "[]ForgottenTopic", "versions": "7+", "ignorable": false, diff --git a/clients/src/main/resources/common/message/FetchResponse.json b/clients/src/main/resources/common/message/FetchResponse.json index 36dc05ff60ca4..dc8d35175661f 100644 --- a/clients/src/main/resources/common/message/FetchResponse.json +++ b/clients/src/main/resources/common/message/FetchResponse.json @@ -48,9 +48,7 @@ // Version 16 adds the 'NodeEndpoints' field (KIP-951). // // Version 17 no changes to the response (KIP-853). - // - // Version 18 no changes to the response (KIP-1166) - "validVersions": "4-18", + "validVersions": "4-17", "flexibleVersions": "12+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "1+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/InitProducerIdRequest.json b/clients/src/main/resources/common/message/InitProducerIdRequest.json index 5a056db520fbc..f1700f3a3d287 100644 --- a/clients/src/main/resources/common/message/InitProducerIdRequest.json +++ b/clients/src/main/resources/common/message/InitProducerIdRequest.json @@ -27,10 +27,7 @@ // Version 4 adds the support for new error code PRODUCER_FENCED. // // Verison 5 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - // - // Version 6 adds support for 2PC (KIP-939). - "latestVersionUnstable": true, - "validVersions": "0-6", + "validVersions": "0-5", "flexibleVersions": "2+", "fields": [ { "name": "TransactionalId", "type": "string", "versions": "0+", "nullableVersions": "0+", "entityType": "transactionalId", @@ -40,11 +37,6 @@ { "name": "ProducerId", "type": "int64", "versions": "3+", "default": "-1", "entityType": "producerId", "about": "The producer id. This is used to disambiguate requests if a transactional id is reused following its expiration." }, { "name": "ProducerEpoch", "type": "int16", "versions": "3+", "default": "-1", - "about": "The producer's current epoch. This will be checked against the producer epoch on the broker, and the request will return an error if they do not match." }, - { "name": "Enable2Pc", "type": "bool", "versions": "6+", "default": "false", - "about": "True if the client wants to enable two-phase commit (2PC) protocol for transactions." }, - { "name": "KeepPreparedTxn", "type": "bool", "versions": "6+", "default": "false", - "about": "True if the client wants to keep the currently ongoing transaction instead of aborting it." } - + "about": "The producer's current epoch. This will be checked against the producer epoch on the broker, and the request will return an error if they do not match." } ] } diff --git a/clients/src/main/resources/common/message/InitProducerIdResponse.json b/clients/src/main/resources/common/message/InitProducerIdResponse.json index c070c92f4e0ed..c5dfec6e321cb 100644 --- a/clients/src/main/resources/common/message/InitProducerIdResponse.json +++ b/clients/src/main/resources/common/message/InitProducerIdResponse.json @@ -26,9 +26,7 @@ // Version 4 adds the support for new error code PRODUCER_FENCED. // // Version 5 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). - // - // Version 6 adds support for 2PC (KIP-939). - "validVersions": "0-6", + "validVersions": "0-5", "flexibleVersions": "2+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, @@ -38,10 +36,6 @@ { "name": "ProducerId", "type": "int64", "versions": "0+", "entityType": "producerId", "default": -1, "about": "The current producer id." }, { "name": "ProducerEpoch", "type": "int16", "versions": "0+", - "about": "The current epoch associated with the producer id." }, - { "name": "OngoingTxnProducerId", "type": "int64", "versions": "6+", "entityType": "producerId", - "default": -1, "about": "The producer id for ongoing transaction when KeepPreparedTxn is used, -1 if there is no transaction ongoing." }, - { "name": "OngoingTxnProducerEpoch", "type": "int16", "default": -1, "versions": "6+", - "about": "The epoch associated with the producer id for ongoing transaction when KeepPreparedTxn is used, -1 if there is no transaction ongoing." } + "about": "The current epoch associated with the producer id." } ] } diff --git a/clients/src/main/resources/common/message/InitializeShareGroupStateRequest.json b/clients/src/main/resources/common/message/InitializeShareGroupStateRequest.json index 7512ce6e51335..76de19b48fb0d 100644 --- a/clients/src/main/resources/common/message/InitializeShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/InitializeShareGroupStateRequest.json @@ -20,6 +20,7 @@ "name": "InitializeShareGroupStateRequest", "validVersions": "0", "flexibleVersions": "0+", + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "about": "The group identifier." }, @@ -28,7 +29,7 @@ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "StateEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/JoinGroupRequest.json b/clients/src/main/resources/common/message/JoinGroupRequest.json index 31afdb1a32ae8..41d7c1acbaefe 100644 --- a/clients/src/main/resources/common/message/JoinGroupRequest.json +++ b/clients/src/main/resources/common/message/JoinGroupRequest.json @@ -18,6 +18,8 @@ "type": "request", "listeners": ["broker"], "name": "JoinGroupRequest", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Version 1 adds RebalanceTimeoutMs. Version 2 and 3 are the same as version 1. // // Starting from version 4, the client needs to issue a second request to join group @@ -32,7 +34,7 @@ // Version 8 adds the Reason field (KIP-800). // // Version 9 is the same as version 8. - "validVersions": "0-9", + "validVersions": "2-9", "flexibleVersions": "6+", "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", diff --git a/clients/src/main/resources/common/message/JoinGroupResponse.json b/clients/src/main/resources/common/message/JoinGroupResponse.json index d2f016f62f66c..364309596eb95 100644 --- a/clients/src/main/resources/common/message/JoinGroupResponse.json +++ b/clients/src/main/resources/common/message/JoinGroupResponse.json @@ -17,6 +17,8 @@ "apiKey": 11, "type": "response", "name": "JoinGroupResponse", + // Versions 0-1 were removed in Apache Kafka 4.0, Version 2 is the new baseline. + // // Version 1 is the same as version 0. // // Version 2 adds throttle time. @@ -35,7 +37,7 @@ // Version 8 is the same as version 7. // // Version 9 adds the SkipAssignment field. - "validVersions": "0-9", + "validVersions": "2-9", "flexibleVersions": "6+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "2+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ListClientMetricsResourcesRequest.json b/clients/src/main/resources/common/message/ListClientMetricsResourcesRequest.json new file mode 100644 index 0000000000000..b54dce6b7c749 --- /dev/null +++ b/clients/src/main/resources/common/message/ListClientMetricsResourcesRequest.json @@ -0,0 +1,26 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 74, + "type": "request", + "listeners": ["broker"], + "name": "ListClientMetricsResourcesRequest", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + ] +} + \ No newline at end of file diff --git a/clients/src/main/resources/common/message/ListClientMetricsResourcesResponse.json b/clients/src/main/resources/common/message/ListClientMetricsResourcesResponse.json new file mode 100644 index 0000000000000..281781c762733 --- /dev/null +++ b/clients/src/main/resources/common/message/ListClientMetricsResourcesResponse.json @@ -0,0 +1,33 @@ +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +{ + "apiKey": 74, + "type": "response", + "name": "ListClientMetricsResourcesResponse", + "validVersions": "0", + "flexibleVersions": "0+", + "fields": [ + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, + { "name": "ErrorCode", "type": "int16", "versions": "0+", + "about": "The error code, or 0 if there was no error." }, + { "name": "ClientMetricsResources", "type": "[]ClientMetricsResource", "versions": "0+", + "about": "Each client metrics resource in the response.", "fields": [ + { "name": "Name", "type": "string", "versions": "0+", + "about": "The resource name." } + ]} + ] +} diff --git a/clients/src/main/resources/common/message/ListOffsetsRequest.json b/clients/src/main/resources/common/message/ListOffsetsRequest.json index 1a2de6ca30a2f..6f8ff7d6cf935 100644 --- a/clients/src/main/resources/common/message/ListOffsetsRequest.json +++ b/clients/src/main/resources/common/message/ListOffsetsRequest.json @@ -40,9 +40,7 @@ // Version 9 enables listing offsets by last tiered offset (KIP-1005). // // Version 10 enables async remote list offsets support (KIP-1075) - // - // Version 11 enables listing offsets by earliest pending upload offset (KIP-1023) - "validVersions": "1-11", + "validVersions": "1-10", "flexibleVersions": "6+", "latestVersionUnstable": false, "fields": [ diff --git a/clients/src/main/resources/common/message/ListOffsetsResponse.json b/clients/src/main/resources/common/message/ListOffsetsResponse.json index 1407273bf4d8c..7f9588847b9a0 100644 --- a/clients/src/main/resources/common/message/ListOffsetsResponse.json +++ b/clients/src/main/resources/common/message/ListOffsetsResponse.json @@ -40,9 +40,7 @@ // Version 9 enables listing offsets by last tiered offset (KIP-1005). // // Version 10 enables async remote list offsets support (KIP-1075) - // - // Version 11 enables listing offsets by earliest pending upload offset (KIP-1023) - "validVersions": "1-11", + "validVersions": "1-10", "flexibleVersions": "6+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "2+", "ignorable": true, diff --git a/clients/src/main/resources/common/message/ListTransactionsRequest.json b/clients/src/main/resources/common/message/ListTransactionsRequest.json index 57d42e6b99c41..5d7c688da2213 100644 --- a/clients/src/main/resources/common/message/ListTransactionsRequest.json +++ b/clients/src/main/resources/common/message/ListTransactionsRequest.json @@ -19,9 +19,7 @@ "listeners": ["broker"], "name": "ListTransactionsRequest", // Version 1: adds DurationFilter to list transactions older than specified duration - - // Version 2: adds TransactionalIdPattern to list transactions with the same pattern(KIP-1152) - "validVersions": "0-2", + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ { "name": "StateFilters", "type": "[]string", "versions": "0+", @@ -32,9 +30,6 @@ }, { "name": "DurationFilter", "type": "int64", "versions": "1+", "default": -1, "about": "Duration (in millis) to filter by: if < 0, all transactions will be returned; otherwise, only transactions running longer than this duration will be returned." - }, - { "name": "TransactionalIdPattern", "type": "string", "versions": "2+", "nullableVersions": "2+", "default": "null", - "about": "The transactional ID regular expression pattern to filter by: if it is empty or null, all transactions are returned; Otherwise then only the transactions matching the given regular expression will be returned." } ] } diff --git a/clients/src/main/resources/common/message/ListTransactionsResponse.json b/clients/src/main/resources/common/message/ListTransactionsResponse.json index 0af1be699b435..3872cf24a3075 100644 --- a/clients/src/main/resources/common/message/ListTransactionsResponse.json +++ b/clients/src/main/resources/common/message/ListTransactionsResponse.json @@ -18,9 +18,7 @@ "type": "response", "name": "ListTransactionsResponse", // Version 1 is the same as version 0 (KIP-994). - - // This API can return InvalidRegularExpression (KIP-1152). - "validVersions": "0-2", + "validVersions": "0-1", "flexibleVersions": "0+", "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/OffsetCommitRequest.json b/clients/src/main/resources/common/message/OffsetCommitRequest.json index ba3c12f0e2b47..348ed2b90c5c8 100644 --- a/clients/src/main/resources/common/message/OffsetCommitRequest.json +++ b/clients/src/main/resources/common/message/OffsetCommitRequest.json @@ -36,11 +36,8 @@ // // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The // request is the same as version 8. - // - // Version 10 adds support for topic ids and removes support for topic names (KIP-848). - "validVersions": "2-10", + "validVersions": "2-9", "flexibleVersions": "8+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The unique group identifier." }, @@ -55,10 +52,8 @@ "about": "The time period in ms to retain the offset." }, { "name": "Topics", "type": "[]OffsetCommitRequestTopic", "versions": "0+", "about": "The topics to commit offsets for.", "fields": [ - { "name": "Name", "type": "string", "versions": "0-9", "entityType": "topicName", "ignorable": true, + { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, - "about": "The topic ID." }, { "name": "Partitions", "type": "[]OffsetCommitRequestPartition", "versions": "0+", "about": "Each partition to commit offsets for.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/OffsetCommitResponse.json b/clients/src/main/resources/common/message/OffsetCommitResponse.json index 0228733ce6bb0..0cccd64816c47 100644 --- a/clients/src/main/resources/common/message/OffsetCommitResponse.json +++ b/clients/src/main/resources/common/message/OffsetCommitResponse.json @@ -34,9 +34,7 @@ // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The response is // the same as version 8 but can return STALE_MEMBER_EPOCH when the new consumer group protocol is used and // GROUP_ID_NOT_FOUND when the group does not exist for both protocols. - // - // Version 10 adds support for topic ids and removes support for topic names (KIP-848). - "validVersions": "2-10", + "validVersions": "2-9", "flexibleVersions": "8+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -49,16 +47,13 @@ // - FENCED_MEMBER_EPOCH (version 7+) // - GROUP_ID_NOT_FOUND (version 9+) // - STALE_MEMBER_EPOCH (version 9+) - // - UNKNOWN_TOPIC_ID (version 10+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "3+", "ignorable": true, "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, { "name": "Topics", "type": "[]OffsetCommitResponseTopic", "versions": "0+", "about": "The responses for each topic.", "fields": [ - { "name": "Name", "type": "string", "versions": "0-9", "entityType": "topicName", "ignorable": true, + { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "about": "The topic name." }, - { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, - "about": "The topic ID." }, { "name": "Partitions", "type": "[]OffsetCommitResponsePartition", "versions": "0+", "about": "The responses for each partition in the topic.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/OffsetFetchRequest.json b/clients/src/main/resources/common/message/OffsetFetchRequest.json index df831eba756d9..88f5b568d724c 100644 --- a/clients/src/main/resources/common/message/OffsetFetchRequest.json +++ b/clients/src/main/resources/common/message/OffsetFetchRequest.json @@ -38,11 +38,8 @@ // // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). It adds // the MemberId and MemberEpoch fields. Those are filled in and validated when the new consumer protocol is used. - // - // Version 10 adds support for topic ids and removes support for topic names (KIP-848). - "validVersions": "1-10", + "validVersions": "1-9", "flexibleVersions": "6+", - "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0-7", "entityType": "groupId", "about": "The group to fetch offsets for." }, @@ -63,10 +60,8 @@ "about": "The member epoch if using the new consumer protocol (KIP-848)." }, { "name": "Topics", "type": "[]OffsetFetchRequestTopics", "versions": "8+", "nullableVersions": "8+", "about": "Each topic we would like to fetch offsets for, or null to fetch offsets for all topics.", "fields": [ - { "name": "Name", "type": "string", "versions": "8-9", "entityType": "topicName", "ignorable": true, + { "name": "Name", "type": "string", "versions": "8+", "entityType": "topicName", "about": "The topic name."}, - { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, - "about": "The topic ID." }, { "name": "PartitionIndexes", "type": "[]int32", "versions": "8+", "about": "The partition indexes we would like to fetch offsets for." } ]} diff --git a/clients/src/main/resources/common/message/OffsetFetchResponse.json b/clients/src/main/resources/common/message/OffsetFetchResponse.json index e92590e38e10c..9f0a5157cc424 100644 --- a/clients/src/main/resources/common/message/OffsetFetchResponse.json +++ b/clients/src/main/resources/common/message/OffsetFetchResponse.json @@ -38,9 +38,7 @@ // Version 9 is the first version that can be used with the new consumer group protocol (KIP-848). The response is // the same as version 8 but can return STALE_MEMBER_EPOCH and UNKNOWN_MEMBER_ID errors when the new consumer group // protocol is used. - // - // Version 10 adds support for topic ids and removes support for topic names (KIP-848). - "validVersions": "1-10", + "validVersions": "1-9", "flexibleVersions": "6+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -51,7 +49,6 @@ // - UNSTABLE_OFFSET_COMMIT (version 7+) // - UNKNOWN_MEMBER_ID (version 9+) // - STALE_MEMBER_EPOCH (version 9+) - // - UNKNOWN_TOPIC_ID (version 10+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "3+", "ignorable": true, "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, @@ -81,10 +78,8 @@ "about": "The group ID." }, { "name": "Topics", "type": "[]OffsetFetchResponseTopics", "versions": "8+", "about": "The responses per topic.", "fields": [ - { "name": "Name", "type": "string", "versions": "8-9", "entityType": "topicName", "ignorable": true, + { "name": "Name", "type": "string", "versions": "8+", "entityType": "topicName", "about": "The topic name." }, - { "name": "TopicId", "type": "uuid", "versions": "10+", "ignorable": true, - "about": "The topic ID." }, { "name": "Partitions", "type": "[]OffsetFetchResponsePartitions", "versions": "8+", "about": "The responses per partition.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "8+", diff --git a/clients/src/main/resources/common/message/ProduceRequest.json b/clients/src/main/resources/common/message/ProduceRequest.json index 3b46a1ff5f468..0bb29f92378dd 100644 --- a/clients/src/main/resources/common/message/ProduceRequest.json +++ b/clients/src/main/resources/common/message/ProduceRequest.json @@ -46,8 +46,7 @@ // transaction V2 (KIP_890 part 2) is enabled, the produce request will also include the function for a // AddPartitionsToTxn call. If V2 is disabled, the client can't use produce request version higher than 11 within // a transaction. - // Version 13 replaces topic names with topic IDs (KIP-516). May return UNKNOWN_TOPIC_ID error code. - "validVersions": "3-13", + "validVersions": "3-12", "flexibleVersions": "9+", "fields": [ { "name": "TransactionalId", "type": "string", "versions": "3+", "nullableVersions": "3+", "default": "null", "entityType": "transactionalId", @@ -58,9 +57,8 @@ "about": "The timeout to await a response in milliseconds." }, { "name": "TopicData", "type": "[]TopicProduceData", "versions": "0+", "about": "Each topic to produce to.", "fields": [ - { "name": "Name", "type": "string", "versions": "0-12", "entityType": "topicName", "mapKey": true, "ignorable": true, + { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true, "about": "The topic name." }, - { "name": "TopicId", "type": "uuid", "versions": "13+", "mapKey": true, "ignorable": true, "about": "The unique topic ID" }, { "name": "PartitionData", "type": "[]PartitionProduceData", "versions": "0+", "about": "Each partition to produce to.", "fields": [ { "name": "Index", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ProduceResponse.json b/clients/src/main/resources/common/message/ProduceResponse.json index adf08e94a35d4..fafcd86401d40 100644 --- a/clients/src/main/resources/common/message/ProduceResponse.json +++ b/clients/src/main/resources/common/message/ProduceResponse.json @@ -40,15 +40,13 @@ // Version 11 adds support for new error code TRANSACTION_ABORTABLE (KIP-890). // // Version 12 is the same as version 10 (KIP-890). - // Version 13 replaces topic names with topic IDs (KIP-516). May return UNKNOWN_TOPIC_ID error code. - "validVersions": "3-13", + "validVersions": "3-12", "flexibleVersions": "9+", "fields": [ { "name": "Responses", "type": "[]TopicProduceResponse", "versions": "0+", "about": "Each produce response.", "fields": [ - { "name": "Name", "type": "string", "versions": "0-12", "entityType": "topicName", "mapKey": true, "ignorable": true, + { "name": "Name", "type": "string", "versions": "0+", "entityType": "topicName", "mapKey": true, "about": "The topic name." }, - { "name": "TopicId", "type": "uuid", "versions": "13+", "mapKey": true, "ignorable": true, "about": "The unique topic ID" }, { "name": "PartitionResponses", "type": "[]PartitionProduceResponse", "versions": "0+", "about": "Each partition that we produced to within the topic.", "fields": [ { "name": "Index", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/README.md b/clients/src/main/resources/common/message/README.md index 435635214f25e..8c92fd6bc9475 100644 --- a/clients/src/main/resources/common/message/README.md +++ b/clients/src/main/resources/common/message/README.md @@ -34,7 +34,9 @@ specifies the versions of the protocol that our code understands. For example, specifying "0-2" indicates that we understand versions 0, 1, and 2. You must always specify the highest message version which is supported. -Dropping support for old message versions is no longer allowed without a KIP. +The only old message versions that are no longer supported are version 0 of +MetadataRequest and MetadataResponse. In general, since we adopted KIP-97, +dropping support for old message versions is no longer allowed without a KIP. Therefore, please be careful not to increase the lower end of the version support interval for any message. diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateRequest.json b/clients/src/main/resources/common/message/ReadShareGroupStateRequest.json index a19913dfde847..d32b99e0f59f0 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateRequest.json @@ -20,15 +20,16 @@ "name": "ReadShareGroupStateRequest", "validVersions": "0", "flexibleVersions": "0+", + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", - "about": "The group identifier." }, + "about":"The group identifier." }, { "name": "Topics", "type": "[]ReadStateData", "versions": "0+", "about": "The data for the topics.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json b/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json index 734a0fefdc4d3..7815f7b50c7d1 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateResponse.json @@ -24,7 +24,6 @@ // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) // - GROUP_ID_NOT_FOUND (version 0+) // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) - // - FENCED_LEADER_EPOCH (version 0+) // - INVALID_REQUEST (version 0+) "fields": [ { "name": "Results", "type": "[]ReadStateResult", "versions": "0+", @@ -40,17 +39,17 @@ { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The error message, or null if there was no error." }, { "name": "StateEpoch", "type": "int32", "versions": "0+", - "about": "The state epoch of the share-partition." }, + "about": "The state epoch for this share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0+", "about": "The share-partition start offset, which can be -1 if it is not yet initialized." }, { "name": "StateBatches", "type": "[]StateBatch", "versions": "0+", "about": "The state batches for this share-partition.", "fields":[ { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "The first offset of this state batch." }, + "about": "The base offset of this state batch." }, { "name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this state batch." }, { "name": "DeliveryState", "type": "int8", "versions": "0+", - "about": "The delivery state - 0:Available,2:Acked,4:Archived." }, + "about": "The state - 0:Available,2:Acked,4:Archived." }, { "name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count." } ]} diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryRequest.json b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryRequest.json index cdbad63bfa22b..870f01f3fd494 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryRequest.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryRequest.json @@ -20,15 +20,16 @@ "name": "ReadShareGroupStateSummaryRequest", "validVersions": "0", "flexibleVersions": "0+", + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", - "about": "The group identifier." }, + "about":"The group identifier." }, { "name": "Topics", "type": "[]ReadStateSummaryData", "versions": "0+", "about": "The data for the topics.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json index 81e3edc554ece..ddf9d7044a6a3 100644 --- a/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json +++ b/clients/src/main/resources/common/message/ReadShareGroupStateSummaryResponse.json @@ -41,8 +41,6 @@ "about": "The error message, or null if there was no error." }, { "name": "StateEpoch", "type": "int32", "versions": "0+", "about": "The state epoch of the share-partition." }, - { "name": "LeaderEpoch", "type": "int32", "versions": "0+", - "about": "The leader epoch of the share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0+", "about": "The share-partition start offset." } ]} diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json b/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json index 561f4a84d2f6d..db534cb4c1c13 100644 --- a/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json +++ b/clients/src/main/resources/common/message/ShareAcknowledgeRequest.json @@ -18,11 +18,12 @@ "type": "request", "listeners": ["broker"], "name": "ShareAcknowledgeRequest", - // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. - // - // Version 1 is the initial stable version (KIP-932). - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "0+", + // The ShareAcknowledgeRequest API is added as part of KIP-932 and is still under + // development. Hence, the API is not exposed by default by brokers unless + // explicitly enabled. + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", "about": "The group identifier." }, @@ -32,19 +33,19 @@ "about": "The current share session epoch: 0 to open a share session; -1 to close it; otherwise increments for consecutive requests." }, { "name": "Topics", "type": "[]AcknowledgeTopic", "versions": "0+", "about": "The topics containing records to acknowledge.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID.", "mapKey": true }, + { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]AcknowledgePartition", "versions": "0+", "about": "The partitions containing records to acknowledge.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", "mapKey": true, + { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", "about": "Record batches to acknowledge.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "First offset of batch of records to acknowledge." }, + "about": "First offset of batch of records to acknowledge."}, { "name": "LastOffset", "type": "int64", "versions": "0+", - "about": "Last offset (inclusive) of batch of records to acknowledge." }, + "about": "Last offset (inclusive) of batch of records to acknowledge."}, { "name": "AcknowledgeTypes", "type": "[]int8", "versions": "0+", - "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject." } + "about": "Array of acknowledge types - 0:Gap,1:Accept,2:Release,3:Reject."} ]} ]} ]} diff --git a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json index 65d0875698331..1f726a0c7d6a4 100644 --- a/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json +++ b/clients/src/main/resources/common/message/ShareAcknowledgeResponse.json @@ -17,10 +17,7 @@ "apiKey": 79, "type": "response", "name": "ShareAcknowledgeResponse", - // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. - // - // Version 1 is the initial stable version (KIP-932). - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "0+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -35,16 +32,16 @@ // - INVALID_REQUEST (version 0+) // - UNKNOWN_SERVER_ERROR (version 0+) "fields": [ - { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", + { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "ignorable": true, "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, - { "name": "ErrorCode", "type": "int16", "versions": "0+", + { "name": "ErrorCode", "type": "int16", "versions": "0+", "ignorable": true, "about": "The top level response error code." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The top-level error message, or null if there was no error." }, { "name": "Responses", "type": "[]ShareAcknowledgeTopicResponse", "versions": "0+", "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "mapKey": true, - "about": "The unique topic ID." }, + { "name": "TopicId", "type": "uuid", "versions": "0+", "ignorable": true, + "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "about": "The topic partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", diff --git a/clients/src/main/resources/common/message/ShareFetchRequest.json b/clients/src/main/resources/common/message/ShareFetchRequest.json index d7a4abf1fbb34..6af767979961f 100644 --- a/clients/src/main/resources/common/message/ShareFetchRequest.json +++ b/clients/src/main/resources/common/message/ShareFetchRequest.json @@ -18,11 +18,12 @@ "type": "request", "listeners": ["broker"], "name": "ShareFetchRequest", - // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. - // - // Version 1 is the initial stable version (KIP-932). - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "0+", + // The ShareFetchRequest API is added as part of KIP-932 and is still under + // development. Hence, the API is not exposed by default by brokers unless + // explicitly enabled. + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "groupId", "about": "The group identifier." }, @@ -35,19 +36,15 @@ { "name": "MinBytes", "type": "int32", "versions": "0+", "about": "The minimum bytes to accumulate in the response." }, { "name": "MaxBytes", "type": "int32", "versions": "0+", "default": "0x7fffffff", - "about": "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored." }, - { "name": "MaxRecords", "type": "int32", "versions": "1+", - "about": "The maximum number of records to fetch. This limit can be exceeded for alignment of batch boundaries." }, - { "name": "BatchSize", "type": "int32", "versions": "1+", - "about": "The optimal number of records for batches of acquired records and acknowledgements." }, + "about": "The maximum bytes to fetch. See KIP-74 for cases where this limit may not be honored." }, { "name": "Topics", "type": "[]FetchTopic", "versions": "0+", "about": "The topics to fetch.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID.", "mapKey": true }, + { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]FetchPartition", "versions": "0+", "about": "The partitions to fetch.", "fields": [ - { "name": "PartitionIndex", "type": "int32", "versions": "0+", "mapKey": true, + { "name": "PartitionIndex", "type": "int32", "versions": "0+", "about": "The partition index." }, - { "name": "PartitionMaxBytes", "type": "int32", "versions": "0", + { "name": "PartitionMaxBytes", "type": "int32", "versions": "0+", "about": "The maximum bytes to fetch from this partition. 0 when only acknowledgement with no fetching is required. See KIP-74 for cases where this limit may not be honored." }, { "name": "AcknowledgementBatches", "type": "[]AcknowledgementBatch", "versions": "0+", "about": "Record batches to acknowledge.", "fields": [ diff --git a/clients/src/main/resources/common/message/ShareFetchResponse.json b/clients/src/main/resources/common/message/ShareFetchResponse.json index 5d4ede78da3d3..858b0bdd46fca 100644 --- a/clients/src/main/resources/common/message/ShareFetchResponse.json +++ b/clients/src/main/resources/common/message/ShareFetchResponse.json @@ -17,10 +17,7 @@ "apiKey": 78, "type": "response", "name": "ShareFetchResponse", - // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. - // - // Version 1 is the initial stable version (KIP-932). - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "0+", // Supported errors for ErrorCode and AcknowledgeErrorCode: // - GROUP_AUTHORIZATION_FAILED (version 0+) @@ -42,12 +39,10 @@ "about": "The top-level response error code." }, { "name": "ErrorMessage", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "The top-level error message, or null if there was no error." }, - { "name": "AcquisitionLockTimeoutMs", "type": "int32", "versions": "1+", - "about": "The time in milliseconds for which the acquired records are locked." }, { "name": "Responses", "type": "[]ShareFetchableTopicResponse", "versions": "0+", "about": "The response topics.", "fields": [ - { "name": "TopicId", "type": "uuid", "versions": "0+", "mapKey": true, - "about": "The unique topic ID." }, + { "name": "TopicId", "type": "uuid", "versions": "0+", + "about": "The unique topic ID."}, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", "about": "The topic partitions.", "fields": [ { "name": "PartitionIndex", "type": "int32", "versions": "0+", @@ -67,11 +62,11 @@ { "name": "LeaderEpoch", "type": "int32", "versions": "0+", "about": "The latest known leader epoch." } ]}, - { "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0", "about": "The record data." }, + { "name": "Records", "type": "records", "versions": "0+", "nullableVersions": "0+", "about": "The record data."}, { "name": "AcquiredRecords", "type": "[]AcquiredRecords", "versions": "0+", "about": "The acquired records.", "fields": [ - { "name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The earliest offset in this batch of acquired records." }, - { "name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this batch of acquired records." }, - { "name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count of this batch of acquired records." } + {"name": "FirstOffset", "type": "int64", "versions": "0+", "about": "The earliest offset in this batch of acquired records."}, + {"name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this batch of acquired records."}, + {"name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count of this batch of acquired records."} ]} ]} ]}, diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json index 897b8bc7b2a94..5efd435939db1 100644 --- a/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json +++ b/clients/src/main/resources/common/message/ShareGroupDescribeRequest.json @@ -18,11 +18,12 @@ "type": "request", "listeners": ["broker"], "name": "ShareGroupDescribeRequest", - // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. - // - // Version 1 is the initial stable version (KIP-932). - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "0+", + // The ShareGroupDescribeRequest API is added as part of KIP-932 and is still under + // development. Hence, the API is not exposed by default by brokers unless + // explicitly enabled. + "latestVersionUnstable": true, "fields": [ { "name": "GroupIds", "type": "[]string", "versions": "0+", "entityType": "groupId", "about": "The ids of the groups to describe." }, diff --git a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json b/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json index 57595c1b51c9b..c093b788bfc2f 100644 --- a/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json +++ b/clients/src/main/resources/common/message/ShareGroupDescribeResponse.json @@ -17,20 +17,16 @@ "apiKey": 77, "type": "response", "name": "ShareGroupDescribeResponse", - // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. - // - // Version 1 is the initial stable version (KIP-932). - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "0+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - TOPIC_AUTHORIZATION_FAILED (version 1+) // - NOT_COORDINATOR (version 0+) // - COORDINATOR_NOT_AVAILABLE (version 0+) // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) // - INVALID_GROUP_ID (version 0+) // - GROUP_ID_NOT_FOUND (version 0+) - // - INVALID_REQUEST (version 0+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json index 37d5d04efb2a4..523150a92476c 100644 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json +++ b/clients/src/main/resources/common/message/ShareGroupHeartbeatRequest.json @@ -18,19 +18,20 @@ "type": "request", "listeners": ["broker"], "name": "ShareGroupHeartbeatRequest", - // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. - // - // Version 1 is the initial stable version (KIP-932). - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "0+", + // The ShareGroupHeartbeatRequest API is added as part of KIP-932 and is still under + // development. Hence, the API is not exposed by default by brokers unless + // explicitly enabled. + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", "entityType": "groupId", "about": "The group identifier." }, { "name": "MemberId", "type": "string", "versions": "0+", - "about": "The member id generated by the consumer. The member id must be kept during the entire lifetime of the consumer process." }, + "about": "The member id." }, { "name": "MemberEpoch", "type": "int32", "versions": "0+", "about": "The current member epoch; 0 to join the group; -1 to leave the group." }, - { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", + { "name": "RackId", "type": "string", "versions": "0+", "nullableVersions": "0+", "default": "null", "about": "null if not provided or if it didn't change since the last heartbeat; the rack ID of consumer otherwise." }, { "name": "SubscribedTopicNames", "type": "[]string", "versions": "0+", "nullableVersions": "0+", "default": "null", "entityType": "topicName", "about": "null if it didn't change since the last heartbeat; the subscribed topic names otherwise." } diff --git a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json index c12eb3dca2021..e0ff5a93d54ee 100644 --- a/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json +++ b/clients/src/main/resources/common/message/ShareGroupHeartbeatResponse.json @@ -17,20 +17,16 @@ "apiKey": 76, "type": "response", "name": "ShareGroupHeartbeatResponse", - // Version 0 was used for early access of KIP-932 in Apache Kafka 4.0 but removed in Apacke Kafka 4.1. - // - // Version 1 is the initial stable version (KIP-932). - "validVersions": "1", + "validVersions": "0", "flexibleVersions": "0+", // Supported errors: // - GROUP_AUTHORIZATION_FAILED (version 0+) - // - TOPIC_AUTHORIZATION_FAILED (version 1+) - // - NOT_COORDINATOR (version 0+) + // - NOT_COORDINATOR (version 0+) // - COORDINATOR_NOT_AVAILABLE (version 0+) // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) + // - INVALID_REQUEST (version 0+) // - UNKNOWN_MEMBER_ID (version 0+) // - GROUP_MAX_SIZE_REACHED (version 0+) - // - INVALID_REQUEST (version 0+) "fields": [ { "name": "ThrottleTimeMs", "type": "int32", "versions": "0+", "about": "The duration in milliseconds for which the request was throttled due to a quota violation, or zero if the request did not violate any quota." }, diff --git a/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json b/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json index 9ebe169c8d6ac..c0584542739ea 100644 --- a/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json +++ b/clients/src/main/resources/common/message/WriteShareGroupStateRequest.json @@ -20,19 +20,20 @@ "name": "WriteShareGroupStateRequest", "validVersions": "0", "flexibleVersions": "0+", + "latestVersionUnstable": true, "fields": [ { "name": "GroupId", "type": "string", "versions": "0+", - "about": "The group identifier." }, + "about":"The group identifier." }, { "name": "Topics", "type": "[]WriteStateData", "versions": "0+", "about": "The data for the topics.", "fields": [ { "name": "TopicId", "type": "uuid", "versions": "0+", "about": "The topic identifier." }, { "name": "Partitions", "type": "[]PartitionData", "versions": "0+", - "about": "The data for the partitions.", "fields": [ + "about": "The data for the partitions.", "fields": [ { "name": "Partition", "type": "int32", "versions": "0+", "about": "The partition index." }, { "name": "StateEpoch", "type": "int32", "versions": "0+", - "about": "The state epoch of the share-partition." }, + "about": "The state epoch for this share-partition." }, { "name": "LeaderEpoch", "type": "int32", "versions": "0+", "about": "The leader epoch of the share-partition." }, { "name": "StartOffset", "type": "int64", "versions": "0+", @@ -40,11 +41,11 @@ { "name": "StateBatches", "type": "[]StateBatch", "versions": "0+", "about": "The state batches for the share-partition.", "fields": [ { "name": "FirstOffset", "type": "int64", "versions": "0+", - "about": "The first offset of this state batch." }, + "about": "The base offset of this state batch." }, { "name": "LastOffset", "type": "int64", "versions": "0+", "about": "The last offset of this state batch." }, { "name": "DeliveryState", "type": "int8", "versions": "0+", - "about": "The delivery state - 0:Available,2:Acked,4:Archived." }, + "about": "The state - 0:Available,2:Acked,4:Archived." }, { "name": "DeliveryCount", "type": "int16", "versions": "0+", "about": "The delivery count." } ]} diff --git a/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json b/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json index 8d4050476519c..e529126c44b77 100644 --- a/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json +++ b/clients/src/main/resources/common/message/WriteShareGroupStateResponse.json @@ -24,7 +24,6 @@ // - COORDINATOR_LOAD_IN_PROGRESS (version 0+) // - GROUP_ID_NOT_FOUND (version 0+) // - UNKNOWN_TOPIC_OR_PARTITION (version 0+) - // - FENCED_LEADER_EPOCH (version 0+) // - FENCED_STATE_EPOCH (version 0+) // - INVALID_REQUEST (version 0+) "fields": [ diff --git a/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java b/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java index 9812f490ddd7f..f647d95445f00 100644 --- a/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/ClusterConnectionStatesTest.java @@ -186,7 +186,7 @@ public void testAuthorizationFailed() { connectionStates.authenticationFailed(nodeId1, time.milliseconds(), new AuthenticationException("No path to CA for certificate!")); time.sleep(1000); - assertEquals(ConnectionState.AUTHENTICATION_FAILED, connectionStates.connectionState(nodeId1)); + assertEquals(connectionStates.connectionState(nodeId1), ConnectionState.AUTHENTICATION_FAILED); assertNotNull(connectionStates.authenticationException(nodeId1)); assertFalse(connectionStates.hasReadyNodes(time.milliseconds())); assertFalse(connectionStates.canConnect(nodeId1, time.milliseconds())); @@ -210,7 +210,7 @@ public void testRemoveNode() { connectionStates.remove(nodeId1); assertTrue(connectionStates.canConnect(nodeId1, time.milliseconds())); assertFalse(connectionStates.isBlackedOut(nodeId1, time.milliseconds())); - assertEquals(0L, connectionStates.connectionDelay(nodeId1, time.milliseconds())); + assertEquals(connectionStates.connectionDelay(nodeId1, time.milliseconds()), 0L); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java index 1c50666c6afd0..3166206f0a41f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/FetchSessionHandlerTest.java @@ -217,8 +217,7 @@ public void testSessionless() { FetchResponse resp = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 0, 0), - new RespEntry("foo", 1, fooId, 0, 0)), - List.of()); + new RespEntry("foo", 1, fooId, 0, 0))); handler.handleResponse(resp, version); FetchSessionHandler.Builder builder2 = handler.newBuilder(); @@ -259,8 +258,7 @@ public void testIncrementals() { FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20), - new RespEntry("foo", 1, fooId, 10, 20)), - List.of()); + new RespEntry("foo", 1, fooId, 10, 20))); handler.handleResponse(resp, version); // Test an incremental fetch request which adds one partition and modifies another. @@ -282,15 +280,13 @@ public void testIncrementals() { data2.toSend()); FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 1, fooId, 20, 20)), - List.of()); + respMap(new RespEntry("foo", 1, fooId, 20, 20))); handler.handleResponse(resp2, version); // Skip building a new request. Test that handling an invalid fetch session epoch response results // in a request which closes the session. FetchResponse resp3 = FetchResponse.of(Errors.INVALID_FETCH_SESSION_EPOCH, 0, INVALID_SESSION_ID, - respMap(), - List.of()); + respMap()); handler.handleResponse(resp3, version); FetchSessionHandler.Builder builder4 = handler.newBuilder(); @@ -350,8 +346,7 @@ public void testIncrementalPartitionRemoval() { FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20), - new RespEntry("bar", 0, barId, 10, 20)), - List.of()); + new RespEntry("bar", 0, barId, 10, 20))); handler.handleResponse(resp, version); // Test an incremental fetch request which removes two partitions. @@ -371,9 +366,8 @@ public void testIncrementalPartitionRemoval() { // A FETCH_SESSION_ID_NOT_FOUND response triggers us to close the session. // The next request is a session establishing FULL request. - FetchResponse resp2 = FetchResponse.of(Errors.FETCH_SESSION_ID_NOT_FOUND, 0, INVALID_SESSION_ID, - respMap(), - List.of()); + FetchResponse resp2 = FetchResponse.of(Errors.FETCH_SESSION_ID_NOT_FOUND, 0, INVALID_SESSION_ID, + respMap()); handler.handleResponse(resp2, version); FetchSessionHandler.Builder builder3 = handler.newBuilder(); @@ -405,8 +399,7 @@ public void testTopicIdUsageGrantedOnIdUpgrade() { assertFalse(data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, Uuid.ZERO_UUID, 10, 20)), - List.of()); + respMap(new RespEntry("foo", 0, Uuid.ZERO_UUID, 10, 20))); handler.handleResponse(resp, (short) 12); // Try to add a topic ID to an already existing topic partition (0) or a new partition (1) in the session. @@ -443,8 +436,7 @@ public void testIdUsageRevokedOnIdDowngrade() { assertTrue(data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, fooId, 10, 20)), - List.of()); + respMap(new RespEntry("foo", 0, fooId, 10, 20))); handler.handleResponse(resp, ApiKeys.FETCH.latestVersion()); // Try to remove a topic ID from an existing topic partition (0) or add a new topic partition (1) without an ID. @@ -483,7 +475,7 @@ public void testTopicIdReplaced(boolean startsWithTopicIds, boolean endsWithTopi assertTrue(data.metadata().isFull()); assertEquals(startsWithTopicIds, data.canUseTopicIds()); - FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicId1, 10, 20)), List.of()); + FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicId1, 10, 20))); short version = startsWithTopicIds ? ApiKeys.FETCH.latestVersion() : 12; handler.handleResponse(resp, version); @@ -556,7 +548,7 @@ public void testSessionEpochWhenMixedUsageOfTopicIDs(boolean startsWithTopicIds) assertEquals(startsWithTopicIds, data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, fooId, 10, 20)), List.of()); + respMap(new RespEntry("foo", 0, fooId, 10, 20))); handler.handleResponse(resp, responseVersion); // Re-add the first partition. Then add a partition with opposite ID usage. @@ -591,8 +583,7 @@ public void testIdUsageWithAllForgottenPartitions(boolean useTopicIds) { assertEquals(useTopicIds, data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, topicId, 10, 20)), - List.of()); + respMap(new RespEntry("foo", 0, topicId, 10, 20))); handler.handleResponse(resp, responseVersion); // Remove the topic from the session @@ -619,8 +610,7 @@ public void testOkToAddNewIdAfterTopicRemovedFromSession() { assertTrue(data.canUseTopicIds()); FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, - respMap(new RespEntry("foo", 0, topicId, 10, 20)), - List.of()); + respMap(new RespEntry("foo", 0, topicId, 10, 20))); handler.handleResponse(resp, ApiKeys.FETCH.latestVersion()); // Remove the partition from the session. Return a session ID as though the session is still open. @@ -629,8 +619,7 @@ public void testOkToAddNewIdAfterTopicRemovedFromSession() { assertMapsEqual(new LinkedHashMap<>(), data2.toSend(), data2.sessionPartitions()); FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, - new LinkedHashMap<>(), - List.of()); + new LinkedHashMap<>()); handler.handleResponse(resp2, ApiKeys.FETCH.latestVersion()); // After the topic is removed, add a recreated topic with a new ID. @@ -662,8 +651,7 @@ public void testVerifyFullFetchResponsePartitions() { FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20), - new RespEntry("bar", 0, barId, 10, 20)), - List.of()); + new RespEntry("bar", 0, barId, 10, 20))); String issue = handler.verifyFullFetchResponsePartitions(resp1.responseData(topicNames, version).keySet(), resp1.topicIds(), version); assertTrue(issue.contains("extraPartitions=")); @@ -676,15 +664,13 @@ public void testVerifyFullFetchResponsePartitions() { FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 10, 20), new RespEntry("foo", 1, fooId, 10, 20), - new RespEntry("bar", 0, barId, 10, 20)), - List.of()); + new RespEntry("bar", 0, barId, 10, 20))); String issue2 = handler.verifyFullFetchResponsePartitions(resp2.responseData(topicNames, version).keySet(), resp2.topicIds(), version); assertNull(issue2); FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, fooId, 10, 20), - new RespEntry("foo", 1, fooId, 10, 20)), - List.of()); + new RespEntry("foo", 1, fooId, 10, 20))); String issue3 = handler.verifyFullFetchResponsePartitions(resp3.responseData(topicNames, version).keySet(), resp3.topicIds(), version); assertFalse(issue3.contains("extraPartitions=")); @@ -703,8 +689,7 @@ public void testVerifyFullFetchResponsePartitionsWithTopicIds() { FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), new RespEntry("extra2", 1, topicIds.get("extra2"), 10, 20), - new RespEntry("bar", 0, topicIds.get("bar"), 10, 20)), - List.of()); + new RespEntry("bar", 0, topicIds.get("bar"), 10, 20))); String issue = handler.verifyFullFetchResponsePartitions(resp1.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet(), resp1.topicIds(), ApiKeys.FETCH.latestVersion()); assertTrue(issue.contains("extraPartitions=")); @@ -718,16 +703,14 @@ public void testVerifyFullFetchResponsePartitionsWithTopicIds() { FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), new RespEntry("extra2", 1, topicIds.get("extra2"), 10, 20), - new RespEntry("bar", 0, topicIds.get("bar"), 10, 20)), - List.of()); + new RespEntry("bar", 0, topicIds.get("bar"), 10, 20))); String issue2 = handler.verifyFullFetchResponsePartitions(resp2.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet(), resp2.topicIds(), ApiKeys.FETCH.latestVersion()); assertTrue(issue2.contains("extraPartitions=")); assertFalse(issue2.contains("omittedPartitions=")); FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), - new RespEntry("bar", 0, topicIds.get("bar"), 10, 20)), - List.of()); + new RespEntry("bar", 0, topicIds.get("bar"), 10, 20))); String issue3 = handler.verifyFullFetchResponsePartitions(resp3.responseData(topicNames, ApiKeys.FETCH.latestVersion()).keySet(), resp3.topicIds(), ApiKeys.FETCH.latestVersion()); assertNull(issue3); @@ -751,8 +734,7 @@ public void testTopLevelErrorResetsMetadata() { FetchResponse resp = FetchResponse.of(Errors.NONE, 0, 123, respMap(new RespEntry("foo", 0, topicIds.get("foo"), 10, 20), - new RespEntry("foo", 1, topicIds.get("foo"), 10, 20)), - List.of()); + new RespEntry("foo", 1, topicIds.get("foo"), 10, 20))); handler.handleResponse(resp, ApiKeys.FETCH.latestVersion()); // Test an incremental fetch request which adds an ID unknown to the broker. @@ -767,8 +749,7 @@ public void testTopLevelErrorResetsMetadata() { // Return and handle a response with a top level error FetchResponse resp2 = FetchResponse.of(Errors.UNKNOWN_TOPIC_ID, 0, 123, - respMap(new RespEntry("unknown", 0, Uuid.randomUuid(), Errors.UNKNOWN_TOPIC_ID)), - List.of()); + respMap(new RespEntry("unknown", 0, Uuid.randomUuid(), Errors.UNKNOWN_TOPIC_ID))); assertFalse(handler.handleResponse(resp2, ApiKeys.FETCH.latestVersion())); // Ensure we start with a new epoch. This will close the session in the next request. diff --git a/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java b/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java index b60efe8950ff8..006de9d06d987 100644 --- a/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/InFlightRequestsTest.java @@ -102,7 +102,7 @@ public void testCompleteNext() { } @Test - public void testCompleteNextThrowsIfNoInFlights() { + public void testCompleteNextThrowsIfNoInflights() { assertThrows(IllegalStateException.class, () -> inFlightRequests.completeNext(dest)); } diff --git a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java index 9ac7519100465..fcbdb2e7b6466 100644 --- a/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/MetadataTest.java @@ -32,7 +32,6 @@ import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.requests.MetadataRequest; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestTestUtils; @@ -44,6 +43,7 @@ import org.junit.jupiter.api.Test; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.HashMap; @@ -209,8 +209,8 @@ public void testIgnoreLeaderEpochInOlderMetadataResponse() { .setBrokers(new MetadataResponseBrokerCollection()); for (short version = ApiKeys.METADATA.oldestVersion(); version < 9; version++) { - Readable readable = MessageUtil.toByteBufferAccessor(data, version); - MetadataResponse response = MetadataResponse.parse(readable, version); + ByteBuffer buffer = MessageUtil.toByteBuffer(data, version); + MetadataResponse response = MetadataResponse.parse(buffer, version); assertFalse(response.hasReliableLeaderEpochs()); metadata.updateWithCurrentRequestVersion(response, false, 100); assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent()); @@ -219,8 +219,8 @@ public void testIgnoreLeaderEpochInOlderMetadataResponse() { } for (short version = 9; version <= ApiKeys.METADATA.latestVersion(); version++) { - Readable readable = MessageUtil.toByteBufferAccessor(data, version); - MetadataResponse response = MetadataResponse.parse(readable, version); + ByteBuffer buffer = MessageUtil.toByteBuffer(data, version); + MetadataResponse response = MetadataResponse.parse(buffer, version); assertTrue(response.hasReliableLeaderEpochs()); metadata.updateWithCurrentRequestVersion(response, false, 100); assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent()); @@ -361,28 +361,28 @@ public void testUpdateLastEpoch() { // Metadata with newer epoch is handled metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 10); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 1L); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue())); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 10)); // Don't update to an older one assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 1)); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue())); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 10)); // Don't cause update if it's the same one assertFalse(metadata.updateLastSeenEpochIfNewer(tp, 10)); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(10, leaderAndEpoch.intValue())); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 10)); // Update if we see newer epoch assertTrue(metadata.updateLastSeenEpochIfNewer(tp, 12)); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue())); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 12)); metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 12); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 2L); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue())); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 12)); // Don't overwrite metadata with older epoch metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.singletonMap("topic-1", 1), _tp -> 11); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 3L); - assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(12, leaderAndEpoch.intValue())); + assertOptional(metadata.lastSeenLeaderEpoch(tp), leaderAndEpoch -> assertEquals(leaderAndEpoch.intValue(), 12)); } @Test @@ -465,7 +465,7 @@ public void testRejectOldMetadata() { metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L); assertNotNull(metadata.fetch().partition(tp)); assertTrue(metadata.lastSeenLeaderEpoch(tp).isPresent()); - assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); } // Fake an empty ISR, but with an older epoch, should reject it @@ -475,8 +475,8 @@ public void testRejectOldMetadata() { new MetadataResponse.PartitionMetadata(error, partition, leader, leaderEpoch, replicas, Collections.emptyList(), offlineReplicas), ApiKeys.METADATA.latestVersion(), Collections.emptyMap()); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L); - assertEquals(1, metadata.fetch().partition(tp).inSyncReplicas().length); - assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 1); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); } // Fake an empty ISR, with same epoch, accept it @@ -486,8 +486,8 @@ public void testRejectOldMetadata() { new MetadataResponse.PartitionMetadata(error, partition, leader, leaderEpoch, replicas, Collections.emptyList(), offlineReplicas), ApiKeys.METADATA.latestVersion(), Collections.emptyMap()); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L); - assertEquals(0, metadata.fetch().partition(tp).inSyncReplicas().length); - assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.fetch().partition(tp).inSyncReplicas().length, 0); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); } // Empty metadata response, should not keep old partition but should keep the last-seen epoch @@ -495,7 +495,7 @@ public void testRejectOldMetadata() { MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), Collections.emptyMap()); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L); assertNull(metadata.fetch().partition(tp)); - assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); } // Back in the metadata, with old epoch, should not get added @@ -503,7 +503,7 @@ public void testRejectOldMetadata() { MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 99); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L); assertNull(metadata.fetch().partition(tp)); - assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); } } @@ -522,31 +522,31 @@ public void testOutOfBandEpochUpdate() { metadata.updateWithCurrentRequestVersion(metadataResponse, false, 10L); assertNotNull(metadata.fetch().partition(tp)); assertTrue(metadata.lastSeenLeaderEpoch(tp).isPresent()); - assertEquals(100, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 100); // Simulate a leader epoch from another response, like a fetch response or list offsets assertTrue(metadata.updateLastSeenEpochIfNewer(tp, 101)); // Cache of partition stays, but current partition info is not available since it's stale assertNotNull(metadata.fetch().partition(tp)); - assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue()); + assertEquals(Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue(), 5); assertFalse(metadata.partitionMetadataIfCurrent(tp).isPresent()); - assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 101); // Metadata with older epoch is rejected, metadata state is unchanged metadata.updateWithCurrentRequestVersion(metadataResponse, false, 20L); assertNotNull(metadata.fetch().partition(tp)); - assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue()); + assertEquals(Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue(), 5); assertFalse(metadata.partitionMetadataIfCurrent(tp).isPresent()); - assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 101); // Metadata with equal or newer epoch is accepted metadataResponse = RequestTestUtils.metadataUpdateWith("dummy", 1, Collections.emptyMap(), partitionCounts, _tp -> 101); metadata.updateWithCurrentRequestVersion(metadataResponse, false, 30L); assertNotNull(metadata.fetch().partition(tp)); - assertEquals(5, Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue()); + assertEquals(Objects.requireNonNull(metadata.fetch().partitionCountForTopic("topic-1")).longValue(), 5); assertTrue(metadata.partitionMetadataIfCurrent(tp).isPresent()); - assertEquals(101, metadata.lastSeenLeaderEpoch(tp).get().longValue()); + assertEquals(metadata.lastSeenLeaderEpoch(tp).get().longValue(), 101); } @Test @@ -585,18 +585,18 @@ public void testClusterCopy() { metadata.updateWithCurrentRequestVersion(metadataResponse, false, 0L); Cluster cluster = metadata.fetch(); - assertEquals("dummy", cluster.clusterResource().clusterId()); - assertEquals(4, cluster.nodes().size()); + assertEquals(cluster.clusterResource().clusterId(), "dummy"); + assertEquals(cluster.nodes().size(), 4); // topic counts assertEquals(cluster.invalidTopics(), Collections.singleton("topic3")); assertEquals(cluster.unauthorizedTopics(), Collections.singleton("topic4")); - assertEquals(3, cluster.topics().size()); + assertEquals(cluster.topics().size(), 3); assertEquals(cluster.internalTopics(), Collections.singleton(Topic.GROUP_METADATA_TOPIC_NAME)); // partition counts - assertEquals(2, cluster.partitionsForTopic("topic1").size()); - assertEquals(3, cluster.partitionsForTopic("topic2").size()); + assertEquals(cluster.partitionsForTopic("topic1").size(), 2); + assertEquals(cluster.partitionsForTopic("topic2").size(), 3); // Sentinel instances InetSocketAddress address = InetSocketAddress.createUnresolved("localhost", 0); @@ -798,10 +798,10 @@ public void testNodeIfOffline() { TopicPartition tp = new TopicPartition("topic-1", 0); - assertOptional(metadata.fetch().nodeIfOnline(tp, 0), node -> assertEquals(0, node.id())); + assertOptional(metadata.fetch().nodeIfOnline(tp, 0), node -> assertEquals(node.id(), 0)); assertFalse(metadata.fetch().nodeIfOnline(tp, 1).isPresent()); - assertEquals(0, metadata.fetch().nodeById(0).id()); - assertEquals(1, metadata.fetch().nodeById(1).id()); + assertEquals(metadata.fetch().nodeById(0).id(), 0); + assertEquals(metadata.fetch().nodeById(1).id(), 1); } @Test @@ -831,7 +831,7 @@ public void testNodeIfOnlineNonExistentTopicPartition() { TopicPartition tp = new TopicPartition("topic-1", 0); - assertEquals(0, metadata.fetch().nodeById(0).id()); + assertEquals(metadata.fetch().nodeById(0).id(), 0); assertNull(metadata.fetch().partition(tp)); assertEquals(metadata.fetch().nodeIfOnline(tp, 0), Optional.empty()); } @@ -955,13 +955,13 @@ protected boolean retainTopic(String topic, boolean isInternal, long nowMs) { // Update the metadata to add a new topic variant, "new", which will be retained with "keep". Note this // means that all of the "old" topics should be dropped. Cluster cluster = metadata.fetch(); - assertEquals(oldClusterId, cluster.clusterResource().clusterId()); - assertEquals(oldNodes, cluster.nodes().size()); - assertEquals(cluster.invalidTopics(), Set.of("oldInvalidTopic", "keepInvalidTopic")); - assertEquals(cluster.unauthorizedTopics(), Set.of("oldUnauthorizedTopic", "keepUnauthorizedTopic")); - assertEquals(cluster.topics(), Set.of("oldValidTopic", "keepValidTopic")); - assertEquals(2, cluster.partitionsForTopic("oldValidTopic").size()); - assertEquals(3, cluster.partitionsForTopic("keepValidTopic").size()); + assertEquals(cluster.clusterResource().clusterId(), oldClusterId); + assertEquals(cluster.nodes().size(), oldNodes); + assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("oldInvalidTopic", "keepInvalidTopic"))); + assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("oldUnauthorizedTopic", "keepUnauthorizedTopic"))); + assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("oldValidTopic", "keepValidTopic"))); + assertEquals(cluster.partitionsForTopic("oldValidTopic").size(), 2); + assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 3); assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values())); String newClusterId = "newClusterId"; @@ -990,13 +990,13 @@ protected boolean retainTopic(String topic, boolean isInternal, long nowMs) { assertNull(metadataTopicIds2.get("oldValidTopic")); cluster = metadata.fetch(); - assertEquals(newClusterId, cluster.clusterResource().clusterId()); + assertEquals(cluster.clusterResource().clusterId(), newClusterId); assertEquals(cluster.nodes().size(), newNodes); - assertEquals(cluster.invalidTopics(), Set.of("keepInvalidTopic", "newInvalidTopic")); - assertEquals(cluster.unauthorizedTopics(), Set.of("keepUnauthorizedTopic", "newUnauthorizedTopic")); - assertEquals(cluster.topics(), Set.of("keepValidTopic", "newValidTopic")); - assertEquals(2, cluster.partitionsForTopic("keepValidTopic").size()); - assertEquals(4, cluster.partitionsForTopic("newValidTopic").size()); + assertEquals(cluster.invalidTopics(), new HashSet<>(Arrays.asList("keepInvalidTopic", "newInvalidTopic"))); + assertEquals(cluster.unauthorizedTopics(), new HashSet<>(Arrays.asList("keepUnauthorizedTopic", "newUnauthorizedTopic"))); + assertEquals(cluster.topics(), new HashSet<>(Arrays.asList("keepValidTopic", "newValidTopic"))); + assertEquals(cluster.partitionsForTopic("keepValidTopic").size(), 2); + assertEquals(cluster.partitionsForTopic("newValidTopic").size(), 4); assertEquals(new HashSet<>(cluster.topicIds()), new HashSet<>(topicIds.values())); // Perform another metadata update, but this time all topic metadata should be cleared. @@ -1008,7 +1008,7 @@ protected boolean retainTopic(String topic, boolean isInternal, long nowMs) { topicIds.forEach((topicName, topicId) -> assertNull(metadataTopicIds3.get(topicName))); cluster = metadata.fetch(); - assertEquals(newClusterId, cluster.clusterResource().clusterId()); + assertEquals(cluster.clusterResource().clusterId(), newClusterId); assertEquals(cluster.nodes().size(), newNodes); assertEquals(cluster.invalidTopics(), Collections.emptySet()); assertEquals(cluster.unauthorizedTopics(), Collections.emptySet()); diff --git a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java index e8dcf5843dcb8..cef48b65bb69f 100644 --- a/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/NetworkClientTest.java @@ -748,6 +748,24 @@ public void testConnectionThrottling() { assertEquals(0, client.throttleDelayMs(node, time.milliseconds())); } + // Creates expected ApiVersionsResponse from the specified node, where the max protocol version for the specified + // key is set to the specified version. + private ApiVersionsResponse createExpectedApiVersionsResponse(ApiKeys key, short maxVersion) { + ApiVersionCollection versionList = new ApiVersionCollection(); + for (ApiKeys apiKey : ApiKeys.values()) { + if (apiKey == key) { + versionList.add(new ApiVersion() + .setApiKey(apiKey.id) + .setMinVersion((short) 0) + .setMaxVersion(maxVersion)); + } else versionList.add(ApiVersionsResponse.toApiVersion(apiKey)); + } + return new ApiVersionsResponse(new ApiVersionsResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(0) + .setApiKeys(versionList)); + } + private int sendEmptyProduceRequest() { return sendEmptyProduceRequest(client, node.idString()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java b/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java index 02c094433703f..1a549a6fdec7c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/AdminClientTestUtils.java @@ -31,8 +31,6 @@ import java.util.Collection; import java.util.Collections; import java.util.Map; -import java.util.Optional; -import java.util.Set; import java.util.stream.Collectors; public class AdminClientTestUtils { @@ -165,50 +163,17 @@ public static ListConsumerGroupOffsetsResult listConsumerGroupOffsetsResult(Stri return new ListConsumerGroupOffsetsResult(Collections.singletonMap(CoordinatorKey.byGroupId(group), future)); } - public static ListConfigResourcesResult listConfigResourcesResult(Map> resourceNames) { - Collection resources = resourceNames.entrySet().stream() - .flatMap(entry -> entry.getValue().stream() - .map(name -> new ConfigResource(entry.getKey(), name))) - .collect(Collectors.toList()); - return new ListConfigResourcesResult(KafkaFuture.completedFuture(resources)); + public static ListClientMetricsResourcesResult listClientMetricsResourcesResult(String... names) { + return new ListClientMetricsResourcesResult( + KafkaFuture.completedFuture(Arrays.stream(names) + .map(ClientMetricsResourceListing::new) + .collect(Collectors.toList()))); } - public static ListConfigResourcesResult listConfigResourcesResult(String... names) { - return new ListConfigResourcesResult( - KafkaFuture.completedFuture(Arrays.stream(names) - .map(name -> new ConfigResource(ConfigResource.Type.CLIENT_METRICS, name)) - .collect(Collectors.toList()))); - } - - public static ListConfigResourcesResult listConfigResourcesResult(KafkaException exception) { - final KafkaFutureImpl> future = new KafkaFutureImpl<>(); + public static ListClientMetricsResourcesResult listClientMetricsResourcesResult(KafkaException exception) { + final KafkaFutureImpl> future = new KafkaFutureImpl<>(); future.completeExceptionally(exception); - return new ListConfigResourcesResult(future); - } - - public static ListShareGroupOffsetsResult createListShareGroupOffsetsResult(Map>> groupOffsets) { - Map>> coordinatorFutures = groupOffsets.entrySet().stream() - .collect(Collectors.toMap( - entry -> CoordinatorKey.byGroupId(entry.getKey()), - Map.Entry::getValue - )); - return new ListShareGroupOffsetsResult(coordinatorFutures); - } - - public static ListOffsetsResult createListOffsetsResult(Map partitionOffsets) { - Map> futures = - partitionOffsets.entrySet().stream() - .collect(Collectors.toMap( - Map.Entry::getKey, - entry -> KafkaFuture.completedFuture( - new ListOffsetsResult.ListOffsetsResultInfo( - entry.getValue().offset(), - System.currentTimeMillis(), - Optional.of(1) - ) - ) - )); - return new ListOffsetsResult(futures); + return new ListClientMetricsResourcesResult(future); } /** diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java index f3b1e73d72ef4..d09cca7ad665b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ConfigTest.java @@ -64,7 +64,7 @@ public void shouldImplementEqualsProperly() { assertEquals(config, config); assertEquals(config, new Config(config.entries())); assertNotEquals(new Config(Collections.singletonList(E1)), config); - assertNotEquals("this", config); + assertNotEquals(config, "this"); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java index 1e577b8319a42..b7dcab344bc82 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/DeleteConsumerGroupOffsetsResultTest.java @@ -59,11 +59,11 @@ public void setUp() { } @Test - public void testTopLevelErrorConstructor() { + public void testTopLevelErrorConstructor() throws InterruptedException { partitionFutures.completeExceptionally(Errors.GROUP_AUTHORIZATION_FAILED.exception()); DeleteConsumerGroupOffsetsResult topLevelErrorResult = new DeleteConsumerGroupOffsetsResult(partitionFutures, partitions); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, topLevelErrorResult.all()); + TestUtils.assertFutureError(topLevelErrorResult.all(), GroupAuthorizationException.class); } @Test @@ -79,9 +79,9 @@ public void testPartitionMissingInResponseErrorConstructor() throws InterruptedE DeleteConsumerGroupOffsetsResult missingPartitionResult = new DeleteConsumerGroupOffsetsResult(partitionFutures, partitions); - TestUtils.assertFutureThrows(IllegalArgumentException.class, missingPartitionResult.all()); + TestUtils.assertFutureError(missingPartitionResult.all(), IllegalArgumentException.class); assertNull(missingPartitionResult.partitionResult(tpZero).get()); - TestUtils.assertFutureThrows(IllegalArgumentException.class, missingPartitionResult.partitionResult(tpOne)); + TestUtils.assertFutureError(missingPartitionResult.partitionResult(tpOne), IllegalArgumentException.class); } @Test @@ -110,9 +110,9 @@ private DeleteConsumerGroupOffsetsResult createAndVerifyPartitionLevelError() th DeleteConsumerGroupOffsetsResult partitionLevelErrorResult = new DeleteConsumerGroupOffsetsResult(partitionFutures, partitions); - TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, partitionLevelErrorResult.all()); + TestUtils.assertFutureError(partitionLevelErrorResult.all(), UnknownTopicOrPartitionException.class); assertNull(partitionLevelErrorResult.partitionResult(tpZero).get()); - TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, partitionLevelErrorResult.partitionResult(tpOne)); + TestUtils.assertFutureError(partitionLevelErrorResult.partitionResult(tpOne), UnknownTopicOrPartitionException.class); return partitionLevelErrorResult; } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java index 9084a25836efc..9f53182bca39d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/KafkaAdminClientTest.java @@ -53,12 +53,10 @@ import org.apache.kafka.common.errors.ApiException; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.ClusterAuthorizationException; -import org.apache.kafka.common.errors.DuplicateVoterException; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.GroupSubscribedToTopicException; import org.apache.kafka.common.errors.InvalidConfigurationException; -import org.apache.kafka.common.errors.InvalidReplicaAssignmentException; import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.LogDirNotFoundException; @@ -71,24 +69,19 @@ import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.TopicDeletionDisabledException; import org.apache.kafka.common.errors.TopicExistsException; -import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.errors.UnknownTopicIdException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.errors.VoterNotFoundException; import org.apache.kafka.common.feature.Features; import org.apache.kafka.common.internals.Topic; import org.apache.kafka.common.message.AddRaftVoterRequestData; import org.apache.kafka.common.message.AddRaftVoterResponseData; import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData; -import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse; -import org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirPartitionResult; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData.AlterReplicaLogDirTopicResult; -import org.apache.kafka.common.message.AlterShareGroupOffsetsResponseData; import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.message.ApiMessageType; import org.apache.kafka.common.message.ApiVersionsResponseData; @@ -105,8 +98,6 @@ import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult; import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResultCollection; import org.apache.kafka.common.message.DeleteRecordsResponseData; -import org.apache.kafka.common.message.DeleteShareGroupOffsetsRequestData; -import org.apache.kafka.common.message.DeleteShareGroupOffsetsResponseData; import org.apache.kafka.common.message.DeleteTopicsResponseData; import org.apache.kafka.common.message.DeleteTopicsResponseData.DeletableTopicResult; import org.apache.kafka.common.message.DeleteTopicsResponseData.DeletableTopicResultCollection; @@ -120,8 +111,6 @@ import org.apache.kafka.common.message.DescribeLogDirsResponseData.DescribeLogDirsTopic; import org.apache.kafka.common.message.DescribeProducersResponseData; import org.apache.kafka.common.message.DescribeQuorumResponseData; -import org.apache.kafka.common.message.DescribeShareGroupOffsetsRequestData; -import org.apache.kafka.common.message.DescribeShareGroupOffsetsResponseData; import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData.DescribeTopicPartitionsResponsePartition; @@ -140,14 +129,11 @@ import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; import org.apache.kafka.common.message.LeaveGroupResponseData; import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse; -import org.apache.kafka.common.message.ListConfigResourcesResponseData; +import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData; import org.apache.kafka.common.message.ListGroupsResponseData; -import org.apache.kafka.common.message.ListGroupsResponseData.ListedGroup; import org.apache.kafka.common.message.ListOffsetsResponseData; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData; -import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment; -import org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment; import org.apache.kafka.common.message.ListTransactionsResponseData; import org.apache.kafka.common.message.MetadataResponseData; import org.apache.kafka.common.message.MetadataResponseData.MetadataResponsePartition; @@ -160,11 +146,9 @@ import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestGroup; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics; -import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.RemoveRaftVoterRequestData; import org.apache.kafka.common.message.RemoveRaftVoterResponseData; import org.apache.kafka.common.message.ShareGroupDescribeResponseData; -import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; import org.apache.kafka.common.message.UnregisterBrokerResponseData; import org.apache.kafka.common.message.WriteTxnMarkersResponseData; import org.apache.kafka.common.protocol.ApiKeys; @@ -178,7 +162,6 @@ import org.apache.kafka.common.requests.AlterClientQuotasResponse; import org.apache.kafka.common.requests.AlterPartitionReassignmentsResponse; import org.apache.kafka.common.requests.AlterReplicaLogDirsResponse; -import org.apache.kafka.common.requests.AlterShareGroupOffsetsResponse; import org.apache.kafka.common.requests.AlterUserScramCredentialsResponse; import org.apache.kafka.common.requests.ApiError; import org.apache.kafka.common.requests.ApiVersionsRequest; @@ -193,8 +176,6 @@ import org.apache.kafka.common.requests.DeleteAclsResponse; import org.apache.kafka.common.requests.DeleteGroupsResponse; import org.apache.kafka.common.requests.DeleteRecordsResponse; -import org.apache.kafka.common.requests.DeleteShareGroupOffsetsRequest; -import org.apache.kafka.common.requests.DeleteShareGroupOffsetsResponse; import org.apache.kafka.common.requests.DeleteTopicsRequest; import org.apache.kafka.common.requests.DeleteTopicsResponse; import org.apache.kafka.common.requests.DescribeAclsResponse; @@ -209,8 +190,6 @@ import org.apache.kafka.common.requests.DescribeProducersResponse; import org.apache.kafka.common.requests.DescribeQuorumRequest; import org.apache.kafka.common.requests.DescribeQuorumResponse; -import org.apache.kafka.common.requests.DescribeShareGroupOffsetsRequest; -import org.apache.kafka.common.requests.DescribeShareGroupOffsetsResponse; import org.apache.kafka.common.requests.DescribeTopicPartitionsResponse; import org.apache.kafka.common.requests.DescribeTransactionsRequest; import org.apache.kafka.common.requests.DescribeTransactionsResponse; @@ -224,8 +203,8 @@ import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.LeaveGroupRequest; import org.apache.kafka.common.requests.LeaveGroupResponse; -import org.apache.kafka.common.requests.ListConfigResourcesRequest; -import org.apache.kafka.common.requests.ListConfigResourcesResponse; +import org.apache.kafka.common.requests.ListClientMetricsResourcesRequest; +import org.apache.kafka.common.requests.ListClientMetricsResourcesResponse; import org.apache.kafka.common.requests.ListGroupsRequest; import org.apache.kafka.common.requests.ListGroupsResponse; import org.apache.kafka.common.requests.ListOffsetsRequest; @@ -239,11 +218,11 @@ import org.apache.kafka.common.requests.OffsetDeleteResponse; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; +import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; import org.apache.kafka.common.requests.RemoveRaftVoterRequest; import org.apache.kafka.common.requests.RemoveRaftVoterResponse; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.requests.ShareGroupDescribeResponse; -import org.apache.kafka.common.requests.StreamsGroupDescribeResponse; import org.apache.kafka.common.requests.UnregisterBrokerResponse; import org.apache.kafka.common.requests.UpdateFeaturesRequest; import org.apache.kafka.common.requests.UpdateFeaturesResponse; @@ -309,6 +288,10 @@ import static java.util.Collections.singletonList; import static java.util.Collections.singletonMap; import static org.apache.kafka.clients.admin.KafkaAdminClient.DEFAULT_LEAVE_GROUP_REASON; +import static org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignablePartitionResponse; +import static org.apache.kafka.common.message.AlterPartitionReassignmentsResponseData.ReassignableTopicResponse; +import static org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingPartitionReassignment; +import static org.apache.kafka.common.message.ListPartitionReassignmentsResponseData.OngoingTopicReassignment; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -336,6 +319,7 @@ public class KafkaAdminClientTest { private static final Logger log = LoggerFactory.getLogger(KafkaAdminClientTest.class); private static final String GROUP_ID = "group-0"; + private static final int THROTTLE = 10; public static final Uuid REPLICA_DIRECTORY_ID = Uuid.randomUuid(); @Test @@ -389,10 +373,10 @@ public void testParseSuccessfulDescribeClusterResponse(boolean includeController assertNull(cluster.controller()); } assertEquals("Ek8tjqq1QBWfnaoyHFZqDg", cluster.clusterResource().clusterId()); - assertEquals(Set.of( + assertEquals(new HashSet<>(asList( new Node(0, "controller0.com", 9092), new Node(1, "controller1.com", 9092), - new Node(2, "controller2.com", 9092)), new HashSet<>(cluster.nodes())); + new Node(2, "controller2.com", 9092))), new HashSet<>(cluster.nodes())); } @Test @@ -501,7 +485,7 @@ public void testExplicitlyEnableTelemetryReporter() { .map(r -> (ClientTelemetryReporter) r) .collect(Collectors.toList()); - assertEquals(1, telemetryReporterList.size()); + assertEquals(telemetryReporterList.size(), 1); } } @@ -551,8 +535,7 @@ public void testCloseAdminClient() { * Test if admin client can be closed in the callback invoked when * an api call completes. If calling {@link Admin#close()} in callback, AdminClient thread hangs */ - @Test - @Timeout(10) + @Test @Timeout(10) public void testCloseAdminClientInCallback() throws InterruptedException { MockTime time = new MockTime(); AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, mockCluster(3, 0)); @@ -869,7 +852,7 @@ public void testTimeoutWithoutMetadata() throws Exception { KafkaFuture future = env.adminClient().createTopics( singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); - TestUtils.assertFutureThrows(TimeoutException.class, future); + TestUtils.assertFutureError(future, TimeoutException.class); } } @@ -952,7 +935,7 @@ public void testPropagatedMetadataFetchException() throws Exception { KafkaFuture future = env.adminClient().createTopics( singleton(new NewTopic("myTopic", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(1000)).all(); - TestUtils.assertFutureThrows(SaslAuthenticationException.class, future); + TestUtils.assertFutureError(future, SaslAuthenticationException.class); } } @@ -982,7 +965,7 @@ public void testCreateTopicsPartialResponse() throws Exception { new NewTopic("myTopic2", Collections.singletonMap(0, asList(0, 1, 2)))), new CreateTopicsOptions().timeoutMs(10000)); topicsResult.values().get("myTopic").get(); - TestUtils.assertFutureThrows(ApiException.class, topicsResult.values().get("myTopic2")); + TestUtils.assertFutureThrows(topicsResult.values().get("myTopic2"), ApiException.class); } } @@ -1086,7 +1069,7 @@ public void testCreateTopicsRetryThrottlingExceptionWhenEnabled() throws Excepti assertNull(result.values().get("topic1").get()); assertNull(result.values().get("topic2").get()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.values().get("topic3")); + TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class); } } @@ -1131,9 +1114,10 @@ public void testCreateTopicsRetryThrottlingExceptionWhenEnabledUntilRequestTimeO time.sleep(defaultApiTimeout + 1); assertNull(result.values().get("topic1").get()); - ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(ThrottlingQuotaExceededException.class, result.values().get("topic2")); + ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.values().get("topic2"), + ThrottlingQuotaExceededException.class); assertEquals(0, e.throttleTimeMs()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.values().get("topic3")); + TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class); } } @@ -1157,9 +1141,10 @@ public void testCreateTopicsDontRetryThrottlingExceptionWhenDisabled() throws Ex new CreateTopicsOptions().retryOnQuotaViolation(false)); assertNull(result.values().get("topic1").get()); - ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(ThrottlingQuotaExceededException.class, result.values().get("topic2")); + ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.values().get("topic2"), + ThrottlingQuotaExceededException.class); assertEquals(1000, e.throttleTimeMs()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.values().get("topic3")); + TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class); } } @@ -1194,14 +1179,14 @@ public void testDeleteTopics() throws Exception { prepareDeleteTopicsResponse("myTopic", Errors.TOPIC_DELETION_DISABLED)); future = env.adminClient().deleteTopics(singletonList("myTopic"), new DeleteTopicsOptions()).all(); - TestUtils.assertFutureThrows(TopicDeletionDisabledException.class, future); + TestUtils.assertFutureError(future, TopicDeletionDisabledException.class); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopics("myTopic"), prepareDeleteTopicsResponse("myTopic", Errors.UNKNOWN_TOPIC_OR_PARTITION)); future = env.adminClient().deleteTopics(singletonList("myTopic"), new DeleteTopicsOptions()).all(); - TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, future); + TestUtils.assertFutureError(future, UnknownTopicOrPartitionException.class); // With topic IDs Uuid topicId = Uuid.randomUuid(); @@ -1218,14 +1203,14 @@ public void testDeleteTopics() throws Exception { prepareDeleteTopicsResponseWithTopicId(topicId, Errors.TOPIC_DELETION_DISABLED)); future = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(singletonList(topicId)), new DeleteTopicsOptions()).all(); - TestUtils.assertFutureThrows(TopicDeletionDisabledException.class, future); + TestUtils.assertFutureError(future, TopicDeletionDisabledException.class); env.kafkaClient().prepareResponse( expectDeleteTopicsRequestWithTopicIds(topicId), prepareDeleteTopicsResponseWithTopicId(topicId, Errors.UNKNOWN_TOPIC_ID)); future = env.adminClient().deleteTopics(TopicCollection.ofTopicIds(singletonList(topicId)), new DeleteTopicsOptions()).all(); - TestUtils.assertFutureThrows(UnknownTopicIdException.class, future); + TestUtils.assertFutureError(future, UnknownTopicIdException.class); } } @@ -1244,7 +1229,7 @@ public void testDeleteTopicsPartialResponse() throws Exception { asList("myTopic", "myOtherTopic"), new DeleteTopicsOptions()); result.topicNameValues().get("myTopic").get(); - TestUtils.assertFutureThrows(ApiException.class, result.topicNameValues().get("myOtherTopic")); + TestUtils.assertFutureThrows(result.topicNameValues().get("myOtherTopic"), ApiException.class); // With topic IDs Uuid topicId1 = Uuid.randomUuid(); @@ -1258,7 +1243,7 @@ public void testDeleteTopicsPartialResponse() throws Exception { TopicCollection.ofTopicIds(asList(topicId1, topicId2)), new DeleteTopicsOptions()); resultIds.topicIdValues().get(topicId1).get(); - TestUtils.assertFutureThrows(ApiException.class, resultIds.topicIdValues().get(topicId2)); + TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), ApiException.class); } } @@ -1290,7 +1275,7 @@ public void testDeleteTopicsRetryThrottlingExceptionWhenEnabled() throws Excepti assertNull(result.topicNameValues().get("topic1").get()); assertNull(result.topicNameValues().get("topic2").get()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.topicNameValues().get("topic3")); + TestUtils.assertFutureThrows(result.topicNameValues().get("topic3"), TopicExistsException.class); // With topic IDs Uuid topicId1 = Uuid.randomUuid(); @@ -1320,7 +1305,7 @@ public void testDeleteTopicsRetryThrottlingExceptionWhenEnabled() throws Excepti assertNull(resultIds.topicIdValues().get(topicId1).get()); assertNull(resultIds.topicIdValues().get(topicId2).get()); - TestUtils.assertFutureThrows(UnknownTopicIdException.class, resultIds.topicIdValues().get(topicId3)); + TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class); } } @@ -1362,9 +1347,10 @@ public void testDeleteTopicsRetryThrottlingExceptionWhenEnabledUntilRequestTimeO time.sleep(defaultApiTimeout + 1); assertNull(result.topicNameValues().get("topic1").get()); - ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(ThrottlingQuotaExceededException.class, result.topicNameValues().get("topic2")); + ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.topicNameValues().get("topic2"), + ThrottlingQuotaExceededException.class); assertEquals(0, e.throttleTimeMs()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.topicNameValues().get("topic3")); + TestUtils.assertFutureThrows(result.topicNameValues().get("topic3"), TopicExistsException.class); // With topic IDs Uuid topicId1 = Uuid.randomUuid(); @@ -1398,9 +1384,10 @@ public void testDeleteTopicsRetryThrottlingExceptionWhenEnabledUntilRequestTimeO time.sleep(defaultApiTimeout + 1); assertNull(resultIds.topicIdValues().get(topicId1).get()); - e = TestUtils.assertFutureThrows(ThrottlingQuotaExceededException.class, resultIds.topicIdValues().get(topicId2)); + e = TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), + ThrottlingQuotaExceededException.class); assertEquals(0, e.throttleTimeMs()); - TestUtils.assertFutureThrows(UnknownTopicIdException.class, resultIds.topicIdValues().get(topicId3)); + TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class); } } @@ -1421,9 +1408,10 @@ public void testDeleteTopicsDontRetryThrottlingExceptionWhenDisabled() throws Ex new DeleteTopicsOptions().retryOnQuotaViolation(false)); assertNull(result.topicNameValues().get("topic1").get()); - ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(ThrottlingQuotaExceededException.class, result.topicNameValues().get("topic2")); + ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.topicNameValues().get("topic2"), + ThrottlingQuotaExceededException.class); assertEquals(1000, e.throttleTimeMs()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.topicNameValues().get("topic3")); + TestUtils.assertFutureError(result.topicNameValues().get("topic3"), TopicExistsException.class); // With topic IDs Uuid topicId1 = Uuid.randomUuid(); @@ -1441,9 +1429,10 @@ public void testDeleteTopicsDontRetryThrottlingExceptionWhenDisabled() throws Ex new DeleteTopicsOptions().retryOnQuotaViolation(false)); assertNull(resultIds.topicIdValues().get(topicId1).get()); - e = TestUtils.assertFutureThrows(ThrottlingQuotaExceededException.class, resultIds.topicIdValues().get(topicId2)); + e = TestUtils.assertFutureThrows(resultIds.topicIdValues().get(topicId2), + ThrottlingQuotaExceededException.class); assertEquals(1000, e.throttleTimeMs()); - TestUtils.assertFutureThrows(UnknownTopicIdException.class, resultIds.topicIdValues().get(topicId3)); + TestUtils.assertFutureError(resultIds.topicIdValues().get(topicId3), UnknownTopicIdException.class); } } @@ -1475,14 +1464,14 @@ public void testInvalidTopicNames() throws Exception { List sillyTopicNames = asList("", null); Map> deleteFutures = env.adminClient().deleteTopics(sillyTopicNames).topicNameValues(); for (String sillyTopicName : sillyTopicNames) { - TestUtils.assertFutureThrows(InvalidTopicException.class, deleteFutures.get(sillyTopicName)); + TestUtils.assertFutureError(deleteFutures.get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); Map> describeFutures = env.adminClient().describeTopics(sillyTopicNames).topicNameValues(); for (String sillyTopicName : sillyTopicNames) { - TestUtils.assertFutureThrows(InvalidTopicException.class, describeFutures.get(sillyTopicName)); + TestUtils.assertFutureError(describeFutures.get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); @@ -1493,7 +1482,7 @@ public void testInvalidTopicNames() throws Exception { Map> createFutures = env.adminClient().createTopics(newTopics).values(); for (String sillyTopicName : sillyTopicNames) { - TestUtils.assertFutureThrows(InvalidTopicException.class, createFutures.get(sillyTopicName)); + TestUtils.assertFutureError(createFutures .get(sillyTopicName), InvalidTopicException.class); } assertEquals(0, env.kafkaClient().inFlightRequestCount()); } @@ -1592,7 +1581,7 @@ public void testDescribeTopicPartitionsApiWithAuthorizedOps() throws ExecutionEx Map topicDescriptions = result.allTopicNames().get(); TopicDescription topicDescription = topicDescriptions.get(topicName0); - assertEquals(Set.of(AclOperation.DESCRIBE, AclOperation.ALTER), + assertEquals(new HashSet<>(asList(AclOperation.DESCRIBE, AclOperation.ALTER)), topicDescription.authorizedOperations()); } } @@ -1789,7 +1778,7 @@ public void testDescribeTopicsWithDescribeTopicPartitionsApiErrorHandling() thro asList(topicName1, topicName0), new DescribeTopicsOptions() ); - TestUtils.assertFutureThrows(TopicAuthorizationException.class, result.allTopicNames()); + TestUtils.assertFutureError(result.allTopicNames(), TopicAuthorizationException.class); } } @@ -1888,10 +1877,11 @@ public void testDescribeAcls() throws Exception { env.kafkaClient().prepareResponse(new DescribeAclsResponse(new DescribeAclsResponseData() .setErrorCode(Errors.SECURITY_DISABLED.code()) .setErrorMessage("Security is disabled"), ApiKeys.DESCRIBE_ACLS.latestVersion())); - TestUtils.assertFutureThrows(SecurityDisabledException.class, env.adminClient().describeAcls(FILTER2).values()); + TestUtils.assertFutureError(env.adminClient().describeAcls(FILTER2).values(), SecurityDisabledException.class); // Test a call where we supply an invalid filter. - TestUtils.assertFutureThrows(InvalidRequestException.class, env.adminClient().describeAcls(UNKNOWN_FILTER).values()); + TestUtils.assertFutureError(env.adminClient().describeAcls(UNKNOWN_FILTER).values(), + InvalidRequestException.class); } } @@ -1983,9 +1973,9 @@ public void testCreateAcls() throws Exception { new CreateAclsResponseData.AclCreationResult())))); results = env.adminClient().createAcls(asList(ACL1, ACL2)); assertCollectionIs(results.values().keySet(), ACL1, ACL2); - TestUtils.assertFutureThrows(SecurityDisabledException.class, results.values().get(ACL1)); + TestUtils.assertFutureError(results.values().get(ACL1), SecurityDisabledException.class); results.values().get(ACL2).get(); - TestUtils.assertFutureThrows(SecurityDisabledException.class, results.all()); + TestUtils.assertFutureError(results.all(), SecurityDisabledException.class); } } @@ -2013,8 +2003,8 @@ public void testDeleteAcls() throws Exception { assertEquals(ACL1, filter1Results.values().get(0).binding()); assertNull(filter1Results.values().get(1).exception()); assertEquals(ACL2, filter1Results.values().get(1).binding()); - TestUtils.assertFutureThrows(SecurityDisabledException.class, filterResults.get(FILTER2)); - TestUtils.assertFutureThrows(SecurityDisabledException.class, results.all()); + TestUtils.assertFutureError(filterResults.get(FILTER2), SecurityDisabledException.class); + TestUtils.assertFutureError(results.all(), SecurityDisabledException.class); // Test a call where one deletion result has an error. env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData() @@ -2034,7 +2024,7 @@ public void testDeleteAcls() throws Exception { ApiKeys.DELETE_ACLS.latestVersion())); results = env.adminClient().deleteAcls(asList(FILTER1, FILTER2)); assertTrue(results.values().get(FILTER2).get().values().isEmpty()); - TestUtils.assertFutureThrows(SecurityDisabledException.class, results.all()); + TestUtils.assertFutureError(results.all(), SecurityDisabledException.class); // Test a call where there are no errors. env.kafkaClient().prepareResponse(new DeleteAclsResponse(new DeleteAclsResponseData() @@ -2084,8 +2074,8 @@ public void testElectLeaders() throws Exception { electionResults, ApiKeys.ELECT_LEADERS.latestVersion())); ElectLeadersResult results = env.adminClient().electLeaders( electionType, - Set.of(topic1, topic2)); - assertEquals(ClusterAuthorizationException.class, results.partitions().get().get(topic2).get().getClass()); + new HashSet<>(asList(topic1, topic2))); + assertEquals(results.partitions().get().get(topic2).get().getClass(), ClusterAuthorizationException.class); // Test a call where there are no errors. By mutating the internal of election results partition1Result.setErrorCode(ApiError.NONE.error().code()); @@ -2096,16 +2086,16 @@ public void testElectLeaders() throws Exception { env.kafkaClient().prepareResponse(new ElectLeadersResponse(0, Errors.NONE.code(), electionResults, ApiKeys.ELECT_LEADERS.latestVersion())); - results = env.adminClient().electLeaders(electionType, Set.of(topic1, topic2)); + results = env.adminClient().electLeaders(electionType, new HashSet<>(asList(topic1, topic2))); assertFalse(results.partitions().get().get(topic1).isPresent()); assertFalse(results.partitions().get().get(topic2).isPresent()); // Now try a timeout results = env.adminClient().electLeaders( electionType, - Set.of(topic1, topic2), + new HashSet<>(asList(topic1, topic2)), new ElectLeadersOptions().timeoutMs(100)); - TestUtils.assertFutureThrows(TimeoutException.class, results.partitions()); + TestUtils.assertFutureError(results.partitions(), TimeoutException.class); } } } @@ -2127,7 +2117,7 @@ public void testDescribeBrokerConfigs() throws Exception { Map> result = env.adminClient().describeConfigs(asList( broker0Resource, broker1Resource)).values(); - assertEquals(Set.of(broker0Resource, broker1Resource), result.keySet()); + assertEquals(new HashSet<>(asList(broker0Resource, broker1Resource)), result.keySet()); result.get(broker0Resource).get(); result.get(broker1Resource).get(); } @@ -2149,7 +2139,7 @@ public void testDescribeBrokerAndLogConfigs() throws Exception { Map> result = env.adminClient().describeConfigs(asList( brokerResource, brokerLoggerResource)).values(); - assertEquals(Set.of(brokerResource, brokerLoggerResource), result.keySet()); + assertEquals(new HashSet<>(asList(brokerResource, brokerLoggerResource)), result.keySet()); result.get(brokerResource).get(); result.get(brokerLoggerResource).get(); } @@ -2168,9 +2158,9 @@ public void testDescribeConfigsPartialResponse() { Map> result = env.adminClient().describeConfigs(asList( topic, topic2)).values(); - assertEquals(Set.of(topic, topic2), result.keySet()); + assertEquals(new HashSet<>(asList(topic, topic2)), result.keySet()); result.get(topic); - TestUtils.assertFutureThrows(ApiException.class, result.get(topic2)); + TestUtils.assertFutureThrows(result.get(topic2), ApiException.class); } } @@ -2189,7 +2179,7 @@ public void testDescribeConfigsUnrequested() throws Exception { .setConfigs(emptyList()))))); Map> result = env.adminClient().describeConfigs(singletonList( topic)).values(); - assertEquals(Set.of(topic), result.keySet()); + assertEquals(new HashSet<>(singletonList(topic)), result.keySet()); assertNotNull(result.get(topic).get()); assertNull(result.get(unrequested)); } @@ -2212,7 +2202,7 @@ public void testDescribeClientMetricsConfigs() throws Exception { Map> result = env.adminClient().describeConfigs(asList( resource, resource1)).values(); - assertEquals(Set.of(resource, resource1), result.keySet()); + assertEquals(new HashSet<>(asList(resource, resource1)), result.keySet()); assertNotNull(result.get(resource).get()); assertNotNull(result.get(resource1).get()); } @@ -2239,7 +2229,7 @@ public void testDescribeConsumerGroupConfigs() throws Exception { Map> result = env.adminClient().describeConfigs(asList( resource1, resource2)).values(); - assertEquals(Set.of(resource1, resource2), result.keySet()); + assertEquals(new HashSet<>(asList(resource1, resource2)), result.keySet()); assertNotNull(result.get(resource1).get()); assertNotNull(result.get(resource2).get()); } @@ -2291,7 +2281,7 @@ private static DescribeLogDirsResponse prepareDescribeLogDirsResponse(Errors err private static DescribeLogDirsResponse prepareEmptyDescribeLogDirsResponse(Optional error) { DescribeLogDirsResponseData data = new DescribeLogDirsResponseData(); - error.ifPresent(e -> data.setErrorCode(e.code())); + if (error.isPresent()) data.setErrorCode(error.get().code()); return new DescribeLogDirsResponse(data); } @@ -2578,7 +2568,7 @@ public void testCreatePartitionsRetryThrottlingExceptionWhenEnabled() throws Exc assertNull(result.values().get("topic1").get()); assertNull(result.values().get("topic2").get()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.values().get("topic3")); + TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class); } } @@ -2624,9 +2614,10 @@ public void testCreatePartitionsRetryThrottlingExceptionWhenEnabledUntilRequestT time.sleep(defaultApiTimeout + 1); assertNull(result.values().get("topic1").get()); - ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(ThrottlingQuotaExceededException.class, result.values().get("topic2")); + ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.values().get("topic2"), + ThrottlingQuotaExceededException.class); assertEquals(0, e.throttleTimeMs()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.values().get("topic3")); + TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class); } } @@ -2651,9 +2642,10 @@ public void testCreatePartitionsDontRetryThrottlingExceptionWhenDisabled() throw counts, new CreatePartitionsOptions().retryOnQuotaViolation(false)); assertNull(result.values().get("topic1").get()); - ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(ThrottlingQuotaExceededException.class, result.values().get("topic2")); + ThrottlingQuotaExceededException e = TestUtils.assertFutureThrows(result.values().get("topic2"), + ThrottlingQuotaExceededException.class); assertEquals(1000, e.throttleTimeMs()); - TestUtils.assertFutureThrows(TopicExistsException.class, result.values().get("topic3")); + TestUtils.assertFutureThrows(result.values().get("topic3"), TopicExistsException.class); } } @@ -2688,7 +2680,7 @@ public void testDeleteRecordsTopicAuthorizationError() { recordsToDelete.put(partition, RecordsToDelete.beforeOffset(10L)); DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete); - TestUtils.assertFutureThrows(TopicAuthorizationException.class, results.lowWatermarks().get(partition)); + TestUtils.assertFutureThrows(results.lowWatermarks().get(partition), TopicAuthorizationException.class); } } @@ -2734,7 +2726,7 @@ public void testDeleteRecordsMultipleSends() throws Exception { DeleteRecordsResult results = env.adminClient().deleteRecords(recordsToDelete); assertEquals(3L, results.lowWatermarks().get(tp0).get().lowWatermark()); - TestUtils.assertFutureThrows(SaslAuthenticationException.class, results.lowWatermarks().get(tp1)); + TestUtils.assertFutureThrows(results.lowWatermarks().get(tp1), AuthenticationException.class); } } @@ -2856,13 +2848,13 @@ public void testDescribeTopicsByIds() throws ExecutionException, InterruptedExce DescribeTopicsResult result1 = env.adminClient().describeTopics( TopicCollection.ofTopicIds(singletonList(nonExistID))); - TestUtils.assertFutureThrows(UnknownTopicIdException.class, result1.allTopicIds()); + TestUtils.assertFutureError(result1.allTopicIds(), UnknownTopicIdException.class); Exception e = assertThrows(Exception.class, () -> result1.allTopicIds().get(), "describe with non-exist topic ID should throw exception"); assertEquals(String.format("org.apache.kafka.common.errors.UnknownTopicIdException: TopicId %s not found.", nonExistID), e.getMessage()); DescribeTopicsResult result2 = env.adminClient().describeTopics( TopicCollection.ofTopicIds(singletonList(Uuid.ZERO_UUID))); - TestUtils.assertFutureThrows(InvalidTopicException.class, result2.allTopicIds()); + TestUtils.assertFutureError(result2.allTopicIds(), InvalidTopicException.class); e = assertThrows(Exception.class, () -> result2.allTopicIds().get(), "describe with non-exist topic ID should throw exception"); assertEquals("The given topic id 'AAAAAAAAAAAAAAAAAAAAAA' cannot be represented in a request.", e.getCause().getMessage()); @@ -2905,7 +2897,7 @@ public void testDescribeCluster() throws Exception { assertEquals(env.cluster().clusterResource().clusterId(), result2.clusterId().get()); assertEquals(new HashSet<>(env.cluster().nodes()), new HashSet<>(result2.nodes().get())); assertEquals(3, result2.controller().get().id()); - assertEquals(Set.of(AclOperation.DESCRIBE, AclOperation.ALTER), + assertEquals(new HashSet<>(asList(AclOperation.DESCRIBE, AclOperation.ALTER)), result2.authorizedOperations().get()); } } @@ -2924,10 +2916,14 @@ public void testDescribeClusterHandleError() { .setErrorMessage(errorMessage))); final DescribeClusterResult result = env.adminClient().describeCluster(); - TestUtils.assertFutureThrows(InvalidRequestException.class, result.clusterId(), errorMessage); - TestUtils.assertFutureThrows(InvalidRequestException.class, result.controller(), errorMessage); - TestUtils.assertFutureThrows(InvalidRequestException.class, result.nodes(), errorMessage); - TestUtils.assertFutureThrows(InvalidRequestException.class, result.authorizedOperations(), errorMessage); + TestUtils.assertFutureThrows(result.clusterId(), + InvalidRequestException.class, errorMessage); + TestUtils.assertFutureThrows(result.controller(), + InvalidRequestException.class, errorMessage); + TestUtils.assertFutureThrows(result.nodes(), + InvalidRequestException.class, errorMessage); + TestUtils.assertFutureThrows(result.authorizedOperations(), + InvalidRequestException.class, errorMessage); } } @@ -3088,7 +3084,7 @@ public void testListGroups() throws Exception { env.cluster().nodeById(3)); final ListGroupsResult result = env.adminClient().listGroups(); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + TestUtils.assertFutureError(result.all(), UnknownServerException.class); Collection listings = result.valid().get(); assertEquals(6, listings.size()); @@ -3122,7 +3118,7 @@ public void testListGroupsMetadataFailure() throws Exception { Collections.emptyList())); final ListGroupsResult result = env.adminClient().listGroups(); - TestUtils.assertFutureThrows(KafkaException.class, result.all()); + TestUtils.assertFutureError(result.all(), KafkaException.class); } } @@ -3189,42 +3185,6 @@ public void testListGroupsEmptyGroupType() throws Exception { } } - @Test - public void testListGroupsWithProtocolTypes() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - // Test with list group options. - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - - env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(), Set.of()), - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable") - .setGroupType(GroupType.CONSUMER.toString()), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setGroupState("Empty") - .setGroupType(GroupType.CONSUMER.toString())))), - env.cluster().nodeById(0)); - - final ListGroupsOptions options = new ListGroupsOptions().withProtocolTypes(Set.of("")); - final ListGroupsResult result = env.adminClient().listGroups(options); - Collection listing = result.valid().get(); - - assertEquals(1, listing.size()); - List expected = new ArrayList<>(); - expected.add(new GroupListing("group-2", Optional.of(GroupType.CONSUMER), "", Optional.of(GroupState.EMPTY))); - assertEquals(expected, listing); - assertEquals(0, result.errors().get().size()); - } - } - @Test public void testListGroupsWithTypes() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { @@ -3263,58 +3223,25 @@ public void testListGroupsWithTypes() throws Exception { } @Test - public void testListGroupsWithTypesOlderBrokerVersion() throws Exception { + public void testListGroupsWithTypesOlderBrokerVersion() { ApiVersion listGroupV4 = new ApiVersion() .setApiKey(ApiKeys.LIST_GROUPS.id) .setMinVersion((short) 0) .setMaxVersion((short) 4); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV4))); - - // Check that we cannot set a type filter with an older broker. - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - env.kafkaClient().prepareUnsupportedVersionResponse(request -> - request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() - ); - - ListGroupsOptions options = new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE)); - ListGroupsResult result = env.adminClient().listGroups(options); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(listGroupV4))); - // But a type filter which is just classic groups is permitted with an older broker, because they - // only know about classic groups so the types filter can be omitted. env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(), Set.of()), - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState(GroupState.STABLE.toString())))), - env.cluster().nodeById(0)); - - options = new ListGroupsOptions().withTypes(Set.of(GroupType.CLASSIC)); - result = env.adminClient().listGroups(options); - - Collection listing = result.all().get(); - assertEquals(1, listing.size()); - List expected = List.of( - new GroupListing("group-1", Optional.empty(), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE)) - ); - assertEquals(expected, listing); - - // But a type filter which is just consumer groups without classic groups is not permitted with an older broker. + // Check that we cannot set a type filter with an older broker. env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareUnsupportedVersionResponse(request -> request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() ); - options = new ListGroupsOptions().withTypes(Set.of(GroupType.CONSUMER)); - result = env.adminClient().listGroups(options); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + ListGroupsOptions options = new ListGroupsOptions().withTypes(singleton(GroupType.CLASSIC)); + ListGroupsResult result = env.adminClient().listGroups(options); + TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } } @@ -3331,7 +3258,7 @@ public void testDescribeClusterHandleUnsupportedVersionForIncludingFencedBrokers request -> request instanceof DescribeClusterRequest); final DescribeClusterResult result = env.adminClient().describeCluster(new DescribeClusterOptions().includeFencedBrokers(true)); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.nodes()); + TestUtils.assertFutureThrows(result.nodes(), UnsupportedVersionException.class); } } @@ -3343,100 +3270,100 @@ public void testListConsumerGroups() throws Exception { // Empty metadata response should be retried env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - List.of(), - env.cluster().clusterResource().clusterId(), - -1, - List.of())); + RequestTestUtils.metadataResponse( + Collections.emptyList(), + env.cluster().clusterResource().clusterId(), + -1, + Collections.emptyList())); env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - env.cluster().nodes(), - env.cluster().clusterResource().clusterId(), - env.cluster().controller().id(), - List.of())); + RequestTestUtils.metadataResponse( + env.cluster().nodes(), + env.cluster().clusterResource().clusterId(), + env.cluster().controller().id(), + Collections.emptyList())); env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-1") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(0)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(asList( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-1") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(0)); // handle retriable errors env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setGroups(Collections.emptyList()) - ), - env.cluster().nodeById(1)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) + .setGroups(Collections.emptyList()) + ), + env.cluster().nodeById(1)); env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) - .setGroups(Collections.emptyList()) - ), - env.cluster().nodeById(1)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) + .setGroups(Collections.emptyList()) + ), + env.cluster().nodeById(1)); env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(asList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-2") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(1)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(asList( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-2") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(1)); env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-3") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-3") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(2)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(asList( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-3") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-connect-3") + .setProtocolType("connector") + .setGroupState("Stable") + ))), + env.cluster().nodeById(2)); // fatal error env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) - .setGroups(Collections.emptyList())), - env.cluster().nodeById(3)); + new ListGroupsResponse( + new ListGroupsResponseData() + .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) + .setGroups(Collections.emptyList())), + env.cluster().nodeById(3)); - final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forConsumerGroups()); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(); + TestUtils.assertFutureError(result.all(), UnknownServerException.class); - Collection listings = result.valid().get(); + Collection listings = result.valid().get(); assertEquals(3, listings.size()); Set groupIds = new HashSet<>(); - for (GroupListing listing : listings) { + for (ConsumerGroupListing listing : listings) { groupIds.add(listing.groupId()); - assertTrue(listing.groupState().isPresent()); + assertTrue(listing.state().isPresent()); } assertEquals(Set.of("group-1", "group-2", "group-3"), groupIds); @@ -3456,14 +3383,14 @@ public void testListConsumerGroupsMetadataFailure() throws Exception { // Empty metadata causes the request to fail since we have no list of brokers // to send the ListGroups requests to env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - List.of(), - env.cluster().clusterResource().clusterId(), - -1, - List.of())); + RequestTestUtils.metadataResponse( + Collections.emptyList(), + env.cluster().clusterResource().clusterId(), + -1, + Collections.emptyList())); - final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forConsumerGroups()); - TestUtils.assertFutureThrows(KafkaException.class, result.all()); + final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(); + TestUtils.assertFutureError(result.all(), KafkaException.class); } } @@ -3477,60 +3404,24 @@ public void testListConsumerGroupsWithStates() throws Exception { env.kafkaClient().prepareResponseFrom( new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setGroupState("Empty")))), + .setGroups(asList( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) + .setGroupState("Stable"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-2") + .setGroupState("Empty")))), env.cluster().nodeById(0)); - final ListGroupsOptions options = ListGroupsOptions.forConsumerGroups(); - final ListGroupsResult result = env.adminClient().listGroups(options); - Collection listings = result.valid().get(); + final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions(); + final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); + Collection listings = result.valid().get(); assertEquals(2, listings.size()); - List expected = new ArrayList<>(); - expected.add(new GroupListing("group-2", Optional.empty(), "", Optional.of(GroupState.EMPTY))); - expected.add(new GroupListing("group-1", Optional.empty(), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); - assertEquals(expected, listings); - assertEquals(0, result.errors().get().size()); - } - } - - @Test - public void testListConsumerGroupsWithProtocolTypes() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - // Test with a specific protocol type filter in list consumer group options. - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - - env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(), Set.of(GroupType.CONSUMER.toString(), GroupType.CLASSIC.toString())), - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable") - .setGroupType(GroupType.CONSUMER.toString()), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setGroupState("Empty") - .setGroupType(GroupType.CONSUMER.toString())))), - env.cluster().nodeById(0)); - - final ListGroupsOptions options = ListGroupsOptions.forConsumerGroups().withProtocolTypes(Set.of(ConsumerProtocol.PROTOCOL_TYPE)); - final ListGroupsResult result = env.adminClient().listGroups(options); - Collection listings = result.valid().get(); - - assertEquals(1, listings.size()); - List expected = new ArrayList<>(); - expected.add(new GroupListing("group-1", Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); + List expected = new ArrayList<>(); + expected.add(new ConsumerGroupListing("group-2", Optional.of(GroupState.EMPTY), true)); + expected.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), false)); assertEquals(expected, listings); assertEquals(0, result.errors().get().size()); } @@ -3545,10 +3436,10 @@ public void testListConsumerGroupsWithTypes() throws Exception { env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(GroupState.STABLE.toString()), Set.of(GroupType.CONSUMER.toString(), GroupType.CLASSIC.toString())), + expectListGroupsRequestWithFilters(singleton(GroupState.STABLE.toString()), Collections.emptySet()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( + .setGroups(singletonList( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) @@ -3556,13 +3447,13 @@ public void testListConsumerGroupsWithTypes() throws Exception { .setGroupType(GroupType.CLASSIC.toString())))), env.cluster().nodeById(0)); - final ListGroupsOptions options = ListGroupsOptions.forConsumerGroups().inGroupStates(Set.of(GroupState.STABLE)); - final ListGroupsResult result = env.adminClient().listGroups(options); - Collection listings = result.valid().get(); + final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); + final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); + Collection listings = result.valid().get(); assertEquals(1, listings.size()); - List expected = new ArrayList<>(); - expected.add(new GroupListing("group-1", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); + List expected = new ArrayList<>(); + expected.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), Optional.of(GroupType.CLASSIC), false)); assertEquals(expected, listings); assertEquals(0, result.errors().get().size()); @@ -3570,10 +3461,10 @@ public void testListConsumerGroupsWithTypes() throws Exception { env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(), Set.of(GroupType.CONSUMER.toString())), + expectListGroupsRequestWithFilters(Collections.emptySet(), singleton(GroupType.CONSUMER.toString())), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( + .setGroups(asList( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) @@ -3585,14 +3476,14 @@ public void testListConsumerGroupsWithTypes() throws Exception { .setGroupType(GroupType.CONSUMER.toString())))), env.cluster().nodeById(0)); - final ListGroupsOptions options2 = ListGroupsOptions.forConsumerGroups().withTypes(Set.of(GroupType.CONSUMER)); - final ListGroupsResult result2 = env.adminClient().listGroups(options2); - Collection listings2 = result2.valid().get(); + final ListConsumerGroupsOptions options2 = new ListConsumerGroupsOptions().withTypes(singleton(GroupType.CONSUMER)); + final ListConsumerGroupsResult result2 = env.adminClient().listConsumerGroups(options2); + Collection listings2 = result2.valid().get(); assertEquals(2, listings2.size()); - List expected2 = new ArrayList<>(); - expected2.add(new GroupListing("group-2", Optional.of(GroupType.CONSUMER), "", Optional.of(GroupState.EMPTY))); - expected2.add(new GroupListing("group-1", Optional.of(GroupType.CONSUMER), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE))); + List expected2 = new ArrayList<>(); + expected2.add(new ConsumerGroupListing("group-2", Optional.of(GroupState.EMPTY), Optional.of(GroupType.CONSUMER), true)); + expected2.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), Optional.of(GroupType.CONSUMER), false)); assertEquals(expected2, listings2); assertEquals(0, result.errors().get().size()); } @@ -3605,38 +3496,34 @@ public void testListConsumerGroupsWithStatesOlderBrokerVersion() throws Exceptio .setMinVersion((short) 0) .setMaxVersion((short) 3); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV3))); + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(listGroupV3))); env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - // Check we can list groups v3 with older broker if we don't specify states, and use just consumer group types which can be omitted. + // Check we can list groups with older broker if we don't specify states env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)))), - env.cluster().nodeById(0)); - - ListGroupsOptions options = ListGroupsOptions.forConsumerGroups(); - ListGroupsResult result = env.adminClient().listGroups(options); - Collection listing = result.all().get(); + new ListGroupsResponse(new ListGroupsResponseData() + .setErrorCode(Errors.NONE.code()) + .setGroups(Collections.singletonList( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group-1") + .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)))), + env.cluster().nodeById(0)); + ListConsumerGroupsOptions options = new ListConsumerGroupsOptions(); + ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); + Collection listing = result.all().get(); assertEquals(1, listing.size()); - List expected = List.of(new GroupListing("group-1", Optional.empty(), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty())); + List expected = Collections.singletonList(new ConsumerGroupListing("group-1", false)); assertEquals(expected, listing); // But we cannot set a state filter with older broker env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + env.kafkaClient().prepareUnsupportedVersionResponse( + body -> body instanceof ListGroupsRequest); - env.kafkaClient().prepareUnsupportedVersionResponse(request -> - request instanceof ListGroupsRequest && - !((ListGroupsRequest) request).data().statesFilter().isEmpty() - ); - - options = ListGroupsOptions.forConsumerGroups().inGroupStates(Set.of(GroupState.STABLE)); - result = env.adminClient().listGroups(options); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); + result = env.adminClient().listConsumerGroups(options); + TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } } @@ -3647,158 +3534,83 @@ public void testListConsumerGroupsWithTypesOlderBrokerVersion() throws Exception .setMinVersion((short) 0) .setMaxVersion((short) 4); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV4))); + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(listGroupV4))); env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - // Check if we can list groups v4 with older broker if we specify states and don't specify types. + // Check if we can list groups with older broker if we specify states and don't specify types. env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(GroupState.STABLE.toString()), Set.of()), + expectListGroupsRequestWithFilters(singleton(GroupState.STABLE.toString()), Collections.emptySet()), new ListGroupsResponse(new ListGroupsResponseData() .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( + .setGroups(Collections.singletonList( new ListGroupsResponseData.ListedGroup() .setGroupId("group-1") .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) .setGroupState(GroupState.STABLE.toString())))), env.cluster().nodeById(0)); - ListGroupsOptions options = ListGroupsOptions.forConsumerGroups().inGroupStates(Set.of(GroupState.STABLE)); - ListGroupsResult result = env.adminClient().listGroups(options); + ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(singleton(GroupState.STABLE)); + ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); - Collection listing = result.all().get(); + Collection listing = result.all().get(); assertEquals(1, listing.size()); - List expected = List.of( - new GroupListing("group-1", Optional.empty(), ConsumerProtocol.PROTOCOL_TYPE, Optional.of(GroupState.STABLE)) + List expected = Collections.singletonList( + new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), false) ); assertEquals(expected, listing); // Check that we cannot set a type filter with an older broker. env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - // First attempt to build request will require v5 (type filter), but the broker only supports v4 env.kafkaClient().prepareUnsupportedVersionResponse(request -> request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() ); - options = ListGroupsOptions.forConsumerGroups().withTypes(Set.of(GroupType.SHARE)); - result = env.adminClient().listGroups(options); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + options = new ListConsumerGroupsOptions().withTypes(singleton(GroupType.CLASSIC)); + result = env.adminClient().listConsumerGroups(options); + TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } } - @Test - @SuppressWarnings("removal") - public void testListConsumerGroupsDeprecated() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(4, 0), - AdminClientConfig.RETRIES_CONFIG, "2")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - // Empty metadata response should be retried - env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - List.of(), - env.cluster().clusterResource().clusterId(), - -1, - List.of())); - - env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - env.cluster().nodes(), - env.cluster().clusterResource().clusterId(), - env.cluster().controller().id(), - List.of())); - - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-1") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(0)); - - // handle retriable errors - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setGroups(List.of()) - ), - env.cluster().nodeById(1)); - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) - .setGroups(List.of()) - ), - env.cluster().nodeById(1)); - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-2") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(1)); + private MockClient.RequestMatcher expectListGroupsRequestWithFilters( + Set expectedStates, + Set expectedTypes + ) { + return body -> { + if (body instanceof ListGroupsRequest) { + ListGroupsRequest request = (ListGroupsRequest) body; + return Objects.equals(new HashSet<>(request.data().statesFilter()), expectedStates) + && Objects.equals(new HashSet<>(request.data().typesFilter()), expectedTypes); + } + return false; + }; + } - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-3") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-connect-3") - .setProtocolType("connector") - .setGroupState("Stable") - ))), - env.cluster().nodeById(2)); + @Test + public void testOffsetCommitNumRetries() throws Exception { + final Cluster cluster = mockCluster(3, 0); + final Time time = new MockTime(); - // fatal error - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) - .setGroups(List.of())), - env.cluster().nodeById(3)); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, + AdminClientConfig.RETRIES_CONFIG, "0")) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + final TopicPartition tp1 = new TopicPartition("foo", 0); - Collection listings = result.valid().get(); - assertEquals(3, listings.size()); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + env.kafkaClient().prepareResponse(prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR)); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - Set groupIds = new HashSet<>(); - for (ConsumerGroupListing listing : listings) { - groupIds.add(listing.groupId()); - assertTrue(listing.state().isPresent()); - } + Map offsets = new HashMap<>(); + offsets.put(tp1, new OffsetAndMetadata(123L)); + final AlterConsumerGroupOffsetsResult result = env.adminClient().alterConsumerGroupOffsets(GROUP_ID, offsets); - assertEquals(Set.of("group-1", "group-2", "group-3"), groupIds); - assertEquals(1, result.errors().get().size()); + TestUtils.assertFutureError(result.all(), TimeoutException.class); } } @Test - @SuppressWarnings("removal") - public void testListConsumerGroupsDeprecatedMetadataFailure() throws Exception { + public void testOffsetCommitWithMultipleErrors() throws Exception { final Cluster cluster = mockCluster(3, 0); final Time time = new MockTime(); @@ -3806,353 +3618,111 @@ public void testListConsumerGroupsDeprecatedMetadataFailure() throws Exception { AdminClientConfig.RETRIES_CONFIG, "0")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - // Empty metadata causes the request to fail since we have no list of brokers - // to send the ListGroups requests to + final TopicPartition foo0 = new TopicPartition("foo", 0); + final TopicPartition foo1 = new TopicPartition("foo", 1); + env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - List.of(), - env.cluster().clusterResource().clusterId(), - -1, - List.of())); + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(); - TestUtils.assertFutureThrows(KafkaException.class, result.all()); + Map responseData = new HashMap<>(); + responseData.put(foo0, Errors.NONE); + responseData.put(foo1, Errors.UNKNOWN_TOPIC_OR_PARTITION); + env.kafkaClient().prepareResponse(new OffsetCommitResponse(0, responseData)); + + Map offsets = new HashMap<>(); + offsets.put(foo0, new OffsetAndMetadata(123L)); + offsets.put(foo1, new OffsetAndMetadata(456L)); + final AlterConsumerGroupOffsetsResult result = env.adminClient() + .alterConsumerGroupOffsets(GROUP_ID, offsets); + + assertNull(result.partitionResult(foo0).get()); + TestUtils.assertFutureError(result.partitionResult(foo1), UnknownTopicOrPartitionException.class); + + TestUtils.assertFutureError(result.all(), UnknownTopicOrPartitionException.class); } } @Test - @SuppressWarnings("removal") - public void testListConsumerGroupsDeprecatedWithStates() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + public void testOffsetCommitRetryBackoff() throws Exception { + MockTime time = new MockTime(); + int retryBackoff = 100; - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, + mockCluster(3, 0), + newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { + MockClient mockClient = env.kafkaClient(); - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setGroupState("Empty")))), - env.cluster().nodeById(0)); + mockClient.setNodeApiVersions(NodeApiVersions.create()); - final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions(); - final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); - Collection listings = result.valid().get(); + AtomicLong firstAttemptTime = new AtomicLong(0); + AtomicLong secondAttemptTime = new AtomicLong(0); - assertEquals(2, listings.size()); - List expected = new ArrayList<>(); - expected.add(new ConsumerGroupListing("group-2", Optional.of(GroupState.EMPTY), true)); - expected.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), false)); - assertEquals(expected, listings); - assertEquals(0, result.errors().get().size()); - } - } + final TopicPartition tp1 = new TopicPartition("foo", 0); - @Test - @SuppressWarnings("removal") - public void testListConsumerGroupsDeprecatedWithTypes() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + mockClient.prepareResponse(body -> { + firstAttemptTime.set(time.milliseconds()); + return true; + }, prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR)); - // Test with a specific state filter but no type filter in list consumer group options. - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(GroupState.STABLE.toString()), Set.of()), - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable") - .setGroupType(GroupType.CLASSIC.toString())))), - env.cluster().nodeById(0)); + mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + mockClient.prepareResponse(body -> { + secondAttemptTime.set(time.milliseconds()); + return true; + }, prepareOffsetCommitResponse(tp1, Errors.NONE)); - final ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(Set.of(GroupState.STABLE)); - final ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); - Collection listings = result.valid().get(); - assertEquals(1, listings.size()); - List expected = new ArrayList<>(); - expected.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), Optional.of(GroupType.CLASSIC), false)); - assertEquals(expected, listings); - assertEquals(0, result.errors().get().size()); + Map offsets = new HashMap<>(); + offsets.put(tp1, new OffsetAndMetadata(123L)); + final KafkaFuture future = env.adminClient().alterConsumerGroupOffsets(GROUP_ID, offsets).all(); - // Test with list consumer group options. - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting CommitOffsets first request failure"); + TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry CommitOffsets call on first failure"); - env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(), Set.of(GroupType.CONSUMER.toString())), - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState("Stable") - .setGroupType(GroupType.CONSUMER.toString()), - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-2") - .setGroupState("Empty") - .setGroupType(GroupType.CONSUMER.toString())))), - env.cluster().nodeById(0)); + long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); + long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); + time.sleep(upperBoundBackoffMs); - final ListConsumerGroupsOptions options2 = new ListConsumerGroupsOptions().withTypes(singleton(GroupType.CONSUMER)); - final ListConsumerGroupsResult result2 = env.adminClient().listConsumerGroups(options2); - Collection listings2 = result2.valid().get(); + future.get(); - assertEquals(2, listings2.size()); - List expected2 = new ArrayList<>(); - expected2.add(new ConsumerGroupListing("group-2", Optional.of(GroupState.EMPTY), Optional.of(GroupType.CONSUMER), true)); - expected2.add(new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), Optional.of(GroupType.CONSUMER), false)); - assertEquals(expected2, listings2); - assertEquals(0, result.errors().get().size()); + long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); + assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "CommitOffsets retry did not await expected backoff"); } } @Test - @SuppressWarnings("removal") - public void testListConsumerGroupsDeprecatedWithStatesOlderBrokerVersion() throws Exception { - ApiVersion listGroupV3 = new ApiVersion() - .setApiKey(ApiKeys.LIST_GROUPS.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 3); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV3))); + public void testDescribeConsumerGroupNumRetries() throws Exception { + final Cluster cluster = mockCluster(3, 0); + final Time time = new MockTime(); - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, + AdminClientConfig.RETRIES_CONFIG, "0")) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - // Check we can list groups with older broker if we don't specify states - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE)))), - env.cluster().nodeById(0)); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - ListConsumerGroupsOptions options = new ListConsumerGroupsOptions(); - ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); - Collection listing = result.all().get(); - assertEquals(1, listing.size()); - List expected = List.of(new ConsumerGroupListing("group-1", false)); - assertEquals(expected, listing); - - // But we cannot set a state filter with older broker - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - env.kafkaClient().prepareUnsupportedVersionResponse( - body -> body instanceof ListGroupsRequest); - - options = new ListConsumerGroupsOptions().inGroupStates(Set.of(GroupState.STABLE)); - result = env.adminClient().listConsumerGroups(options); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); - } - } - - @Test - @SuppressWarnings("removal") - public void testListConsumerGroupsDeprecatedWithTypesOlderBrokerVersion() throws Exception { - ApiVersion listGroupV4 = new ApiVersion() - .setApiKey(ApiKeys.LIST_GROUPS.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 4); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(List.of(listGroupV4))); - - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - - // Check if we can list groups with older broker if we specify states and don't specify types. - env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(GroupState.STABLE.toString()), Set.of()), - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState(GroupState.STABLE.toString())))), - env.cluster().nodeById(0)); - - ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inGroupStates(Set.of(GroupState.STABLE)); - ListConsumerGroupsResult result = env.adminClient().listConsumerGroups(options); - - Collection listing = result.all().get(); - assertEquals(1, listing.size()); - List expected = List.of( - new ConsumerGroupListing("group-1", Optional.of(GroupState.STABLE), false) - ); - assertEquals(expected, listing); - - // Check that we cannot set a type filter with an older broker. - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - env.kafkaClient().prepareUnsupportedVersionResponse(request -> - request instanceof ListGroupsRequest && !((ListGroupsRequest) request).data().typesFilter().isEmpty() - ); - - options = new ListConsumerGroupsOptions().withTypes(Set.of(GroupType.SHARE)); - result = env.adminClient().listConsumerGroups(options); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); - - // But a type filter which is just classic groups is permitted with an older broker, because they - // only know about classic groups so the types filter can be omitted. - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - - env.kafkaClient().prepareResponseFrom( - expectListGroupsRequestWithFilters(Set.of(), Set.of()), - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(List.of( - new ListGroupsResponseData.ListedGroup() - .setGroupId("group-1") - .setProtocolType(ConsumerProtocol.PROTOCOL_TYPE) - .setGroupState(GroupState.STABLE.toString())))), - env.cluster().nodeById(0)); - - options = new ListConsumerGroupsOptions().withTypes(Set.of(GroupType.CLASSIC)); - result = env.adminClient().listConsumerGroups(options); - - listing = result.all().get(); - assertEquals(1, listing.size()); - assertEquals(expected, listing); - } - } - - private MockClient.RequestMatcher expectListGroupsRequestWithFilters( - Set expectedStates, - Set expectedTypes - ) { - return body -> { - if (body instanceof ListGroupsRequest) { - ListGroupsRequest request = (ListGroupsRequest) body; - return Objects.equals(new HashSet<>(request.data().statesFilter()), expectedStates) - && Objects.equals(new HashSet<>(request.data().typesFilter()), expectedTypes); - } - return false; - }; - } - - @Test - public void testOffsetCommitNumRetries() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - final TopicPartition tp1 = new TopicPartition("foo", 0); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR)); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - final AlterConsumerGroupOffsetsResult result = env.adminClient().alterConsumerGroupOffsets(GROUP_ID, offsets); - - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); - } - } - - @Test - public void testStreamsOffsetCommitNumRetries() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - final TopicPartition tp1 = new TopicPartition("foo", 0); + DescribeGroupsResponseData data = new DescribeGroupsResponseData(); + data.groups().add(DescribeGroupsResponse.groupMetadata( + GROUP_ID, + Errors.NOT_COORDINATOR, + "", + "", + "", + Collections.emptyList(), + Collections.emptySet())); + env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR)); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - final AlterStreamsGroupOffsetsResult result = env.adminClient().alterStreamsGroupOffsets(GROUP_ID, offsets); - - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); - } - } - - @Test - public void testOffsetCommitWithMultipleErrors() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - final TopicPartition foo0 = new TopicPartition("foo", 0); - final TopicPartition foo1 = new TopicPartition("foo", 1); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - Map responseData = new HashMap<>(); - responseData.put(foo0, Errors.NONE); - responseData.put(foo1, Errors.UNKNOWN_TOPIC_OR_PARTITION); - env.kafkaClient().prepareResponse(new OffsetCommitResponse(0, responseData)); - - Map offsets = new HashMap<>(); - offsets.put(foo0, new OffsetAndMetadata(123L)); - offsets.put(foo1, new OffsetAndMetadata(456L)); - final AlterConsumerGroupOffsetsResult result = env.adminClient() - .alterConsumerGroupOffsets(GROUP_ID, offsets); - - assertNull(result.partitionResult(foo0).get()); - TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, result.partitionResult(foo1)); - TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, result.all()); - } - } - - @Test - public void testStreamsOffsetCommitWithMultipleErrors() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - final TopicPartition foo0 = new TopicPartition("foo", 0); - final TopicPartition foo1 = new TopicPartition("foo", 1); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - Map responseData = new HashMap<>(); - responseData.put(foo0, Errors.NONE); - responseData.put(foo1, Errors.UNKNOWN_TOPIC_OR_PARTITION); - env.kafkaClient().prepareResponse(new OffsetCommitResponse(0, responseData)); - - Map offsets = new HashMap<>(); - offsets.put(foo0, new OffsetAndMetadata(123L)); - offsets.put(foo1, new OffsetAndMetadata(456L)); - final AlterStreamsGroupOffsetsResult result = env.adminClient() - .alterStreamsGroupOffsets(GROUP_ID, offsets); - - assertNull(result.partitionResult(foo0).get()); - TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, result.partitionResult(foo1)); + final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)); - TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, result.all()); + TestUtils.assertFutureError(result.all(), TimeoutException.class); } } @Test - public void testOffsetCommitRetryBackoff() throws Exception { + public void testDescribeConsumerGroupRetryBackoff() throws Exception { MockTime time = new MockTime(); int retryBackoff = 100; @@ -4166,28 +3736,45 @@ public void testOffsetCommitRetryBackoff() throws Exception { AtomicLong firstAttemptTime = new AtomicLong(0); AtomicLong secondAttemptTime = new AtomicLong(0); - final TopicPartition tp1 = new TopicPartition("foo", 0); - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + DescribeGroupsResponseData data = new DescribeGroupsResponseData(); + data.groups().add(DescribeGroupsResponse.groupMetadata( + GROUP_ID, + Errors.NOT_COORDINATOR, + "", + "", + "", + Collections.emptyList(), + Collections.emptySet())); + mockClient.prepareResponse(body -> { firstAttemptTime.set(time.milliseconds()); return true; - }, prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR)); - + }, new DescribeGroupsResponse(data)); mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + data = new DescribeGroupsResponseData(); + data.groups().add(DescribeGroupsResponse.groupMetadata( + GROUP_ID, + Errors.NONE, + "", + ConsumerProtocol.PROTOCOL_TYPE, + "", + Collections.emptyList(), + Collections.emptySet())); + mockClient.prepareResponse(body -> { secondAttemptTime.set(time.milliseconds()); return true; - }, prepareOffsetCommitResponse(tp1, Errors.NONE)); - + }, new DescribeGroupsResponse(data)); - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - final KafkaFuture future = env.adminClient().alterConsumerGroupOffsets(GROUP_ID, offsets).all(); + final KafkaFuture> future = + env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)).all(); - TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting CommitOffsets first request failure"); - TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry CommitOffsets call on first failure"); + TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DescribeConsumerGroup first request failure"); + TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DescribeConsumerGroup call on first failure"); long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); @@ -4196,172 +3783,27 @@ public void testOffsetCommitRetryBackoff() throws Exception { future.get(); long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); - assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "CommitOffsets retry did not await expected backoff"); + assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "DescribeConsumerGroup retry did not await expected backoff!"); } } @Test - public void testStreamsOffsetCommitRetryBackoff() throws Exception { - MockTime time = new MockTime(); - int retryBackoff = 100; + public void testDescribeConsumerGroups() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, - mockCluster(3, 0), - newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { - MockClient mockClient = env.kafkaClient(); + // Retriable FindCoordinatorResponse errors should be retried + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); - mockClient.setNodeApiVersions(NodeApiVersions.create()); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - AtomicLong firstAttemptTime = new AtomicLong(0); - AtomicLong secondAttemptTime = new AtomicLong(0); + // The first request sent will be a ConsumerGroupDescribe request. Let's + // fail it in order to fail back to using the classic version. + env.kafkaClient().prepareUnsupportedVersionResponse( + request -> request instanceof ConsumerGroupDescribeRequest); - final TopicPartition tp1 = new TopicPartition("foo", 0); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - mockClient.prepareResponse(body -> { - firstAttemptTime.set(time.milliseconds()); - return true; - }, prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR)); - - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - mockClient.prepareResponse(body -> { - secondAttemptTime.set(time.milliseconds()); - return true; - }, prepareOffsetCommitResponse(tp1, Errors.NONE)); - - - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - final KafkaFuture future = env.adminClient().alterStreamsGroupOffsets(GROUP_ID, offsets).all(); - - TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting CommitOffsets first request failure"); - TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry CommitOffsets call on first failure"); - - long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); - long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); - time.sleep(upperBoundBackoffMs); - - future.get(); - - long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); - assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "CommitOffsets retry did not await expected backoff"); - } - } - - @Test - public void testDescribeConsumerGroupNumRetries() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - DescribeGroupsResponseData data = new DescribeGroupsResponseData(); - - data.groups().add(DescribeGroupsResponse.groupMetadata( - GROUP_ID, - Errors.NOT_COORDINATOR, - "", - "", - "", - Collections.emptyList(), - Collections.emptySet())); - env.kafkaClient().prepareResponse(new DescribeGroupsResponse(data)); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)); - - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); - } - } - - @Test - public void testDescribeConsumerGroupRetryBackoff() throws Exception { - MockTime time = new MockTime(); - int retryBackoff = 100; - - try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, - mockCluster(3, 0), - newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { - MockClient mockClient = env.kafkaClient(); - - mockClient.setNodeApiVersions(NodeApiVersions.create()); - - AtomicLong firstAttemptTime = new AtomicLong(0); - AtomicLong secondAttemptTime = new AtomicLong(0); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - DescribeGroupsResponseData data = new DescribeGroupsResponseData(); - data.groups().add(DescribeGroupsResponse.groupMetadata( - GROUP_ID, - Errors.NOT_COORDINATOR, - "", - "", - "", - Collections.emptyList(), - Collections.emptySet())); - - mockClient.prepareResponse(body -> { - firstAttemptTime.set(time.milliseconds()); - return true; - }, new DescribeGroupsResponse(data)); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - data = new DescribeGroupsResponseData(); - data.groups().add(DescribeGroupsResponse.groupMetadata( - GROUP_ID, - Errors.NONE, - "", - ConsumerProtocol.PROTOCOL_TYPE, - "", - Collections.emptyList(), - Collections.emptySet())); - - mockClient.prepareResponse(body -> { - secondAttemptTime.set(time.milliseconds()); - return true; - }, new DescribeGroupsResponse(data)); - - final KafkaFuture> future = - env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)).all(); - - TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DescribeConsumerGroup first request failure"); - TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DescribeConsumerGroup call on first failure"); - - long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); - long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); - time.sleep(upperBoundBackoffMs); - - future.get(); - - long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); - assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "DescribeConsumerGroup retry did not await expected backoff!"); - } - } - - @Test - public void testDescribeConsumerGroups() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - // Retriable FindCoordinatorResponse errors should be retried - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - // The first request sent will be a ConsumerGroupDescribe request. Let's - // fail it in order to fail back to using the classic version. - env.kafkaClient().prepareUnsupportedVersionResponse( - request -> request instanceof ConsumerGroupDescribeRequest); - - DescribeGroupsResponseData data = new DescribeGroupsResponseData(); + DescribeGroupsResponseData data = new DescribeGroupsResponseData(); // Retriable errors should be retried data.groups().add(DescribeGroupsResponse.groupMetadata( @@ -4639,7 +4081,7 @@ public void testDescribeNonConsumerGroups() throws Exception { final DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)); - TestUtils.assertFutureThrows(IllegalArgumentException.class, result.describedGroups().get(GROUP_ID)); + TestUtils.assertFutureError(result.describedGroups().get(GROUP_ID), IllegalArgumentException.class); } } @@ -4660,7 +4102,7 @@ public void testDescribeGroupsWithBothUnsupportedApis() throws InterruptedExcept request -> request instanceof DescribeGroupsRequest); DescribeConsumerGroupsResult result = env.adminClient().describeConsumerGroups(singletonList(GROUP_ID)); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.describedGroups().get(GROUP_ID)); + TestUtils.assertFutureError(result.describedGroups().get(GROUP_ID), UnsupportedVersionException.class); } } @@ -4734,1096 +4176,142 @@ public void testDescribeOldAndNewConsumerGroups() throws Exception { null, "clientId0", "clientHost", - ConsumerProtocol.serializeAssignment( - new ConsumerPartitionAssignor.Assignment( - Collections.singletonList(new TopicPartition("bar", 0)) - ) - ).array(), - null - ) - ), - Collections.emptySet() - ) - )) - )); - - DescribeConsumerGroupsResult result = env.adminClient() - .describeConsumerGroups(asList("grp1", "grp2")); - - Map expectedResult = new HashMap<>(); - expectedResult.put("grp1", new ConsumerGroupDescription( - "grp1", - false, - Collections.singletonList( - new MemberDescription( - "memberId", - Optional.of("instanceId"), - "clientId", - "host", - new MemberAssignment( - Collections.singleton(new TopicPartition("foo", 0)) - ), - Optional.of(new MemberAssignment( - Collections.singleton(new TopicPartition("foo", 1)) - )), - Optional.of(10), - Optional.of(true) - ) - ), - "range", - GroupType.CONSUMER, - GroupState.STABLE, - env.cluster().controller(), - Collections.emptySet(), - Optional.of(10), - Optional.of(10) - )); - expectedResult.put("grp2", new ConsumerGroupDescription( - "grp2", - false, - Collections.singletonList( - new MemberDescription( - "0", - Optional.empty(), - "clientId0", - "clientHost", - new MemberAssignment( - Collections.singleton(new TopicPartition("bar", 0)) - ), - Optional.empty(), - Optional.empty(), - Optional.empty() - ) - ), - "range", - GroupType.CLASSIC, - GroupState.STABLE, - env.cluster().controller(), - Collections.emptySet(), - Optional.empty(), - Optional.empty() - )); - - assertEquals(expectedResult, result.all().get()); - } - } - - @Test - public void testListConsumerGroupOffsetsOptionsWithBatchedApi() throws Exception { - verifyListConsumerGroupOffsetsOptions(); - } - - private void verifyListConsumerGroupOffsetsOptions() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final List partitions = Collections.singletonList(new TopicPartition("A", 0)); - final ListConsumerGroupOffsetsOptions options = new ListConsumerGroupOffsetsOptions() - .requireStable(true) - .timeoutMs(300); - - final ListConsumerGroupOffsetsSpec groupSpec = new ListConsumerGroupOffsetsSpec() - .topicPartitions(partitions); - env.adminClient().listConsumerGroupOffsets(Collections.singletonMap(GROUP_ID, groupSpec), options); - - final MockClient mockClient = env.kafkaClient(); - waitForRequest(mockClient, ApiKeys.OFFSET_FETCH); - - ClientRequest clientRequest = mockClient.requests().peek(); - assertNotNull(clientRequest); - assertEquals(300, clientRequest.requestTimeoutMs()); - OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).build().data(); - assertTrue(data.requireStable()); - assertEquals(Collections.singletonList(GROUP_ID), - data.groups().stream().map(OffsetFetchRequestGroup::groupId).collect(Collectors.toList())); - assertEquals(Collections.singletonList("A"), - data.groups().get(0).topics().stream().map(OffsetFetchRequestTopics::name).collect(Collectors.toList())); - assertEquals(Collections.singletonList(0), - data.groups().get(0).topics().get(0).partitionIndexes()); - } - } - - @Test - public void testListConsumerGroupOffsetsNumRetries() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR)); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(GROUP_ID); - - TestUtils.assertFutureThrows(TimeoutException.class, result.partitionsToOffsetAndMetadata()); - } - } - - @Test - public void testListConsumerGroupOffsetsRetryBackoff() throws Exception { - MockTime time = new MockTime(); - int retryBackoff = 100; - - try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, - mockCluster(3, 0), - newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { - MockClient mockClient = env.kafkaClient(); - - mockClient.setNodeApiVersions(NodeApiVersions.create()); - - AtomicLong firstAttemptTime = new AtomicLong(0); - AtomicLong secondAttemptTime = new AtomicLong(0); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - mockClient.prepareResponse(body -> { - firstAttemptTime.set(time.milliseconds()); - return true; - }, offsetFetchResponse(Errors.NOT_COORDINATOR)); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - mockClient.prepareResponse(body -> { - secondAttemptTime.set(time.milliseconds()); - return true; - }, offsetFetchResponse(Errors.NONE)); - - final KafkaFuture> future = env.adminClient().listConsumerGroupOffsets(GROUP_ID).partitionsToOffsetAndMetadata(); - - TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting ListConsumerGroupOffsets first request failure"); - TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry ListConsumerGroupOffsets call on first failure"); - - long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); - long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); - time.sleep(upperBoundBackoffMs); - - future.get(); - - long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); - assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "ListConsumerGroupOffsets retry did not await expected backoff!"); - } - } - - @Test - public void testListConsumerGroupOffsetsRetriableErrors() throws Exception { - // Retriable errors should be retried - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse( - offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); - - /* - * We need to return two responses here, one for NOT_COORDINATOR call when calling list consumer offsets - * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a - * FindCoordinatorResponse. - * - * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response - */ - env.kafkaClient().prepareResponse( - offsetFetchResponse(Errors.NOT_COORDINATOR)); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse( - offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE)); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse( - offsetFetchResponse(Errors.NONE)); - - final ListConsumerGroupOffsetsResult errorResult1 = env.adminClient().listConsumerGroupOffsets(GROUP_ID); - - assertEquals(Collections.emptyMap(), errorResult1.partitionsToOffsetAndMetadata().get()); - } - } - - @Test - public void testListConsumerGroupOffsetsNonRetriableErrors() throws Exception { - // Non-retriable errors throw an exception - final List nonRetriableErrors = asList( - Errors.GROUP_AUTHORIZATION_FAILED, Errors.INVALID_GROUP_ID, Errors.GROUP_ID_NOT_FOUND, - Errors.UNKNOWN_MEMBER_ID, Errors.STALE_MEMBER_EPOCH); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - for (Errors error : nonRetriableErrors) { - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse(offsetFetchResponse(error)); - - ListConsumerGroupOffsetsResult errorResult = env.adminClient().listConsumerGroupOffsets(GROUP_ID); - - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.partitionsToOffsetAndMetadata()); - } - } - } - - @Test - public void testListConsumerGroupOffsets() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - // Retriable FindCoordinatorResponse errors should be retried - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - // Retriable errors should be retried - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); - - /* - * We need to return two responses here, one for NOT_COORDINATOR error when calling list consumer group offsets - * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a - * FindCoordinatorResponse. - * - * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response - */ - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR)); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE)); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); - TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1); - TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2); - TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3); - - final OffsetFetchResponseData response = new OffsetFetchResponseData() - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(GROUP_ID) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("my_topic") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(myTopicPartition0.partition()) - .setCommittedOffset(10), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(myTopicPartition1.partition()) - .setCommittedOffset(0), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(myTopicPartition2.partition()) - .setCommittedOffset(20), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(myTopicPartition3.partition()) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - )) - )) - )); - - env.kafkaClient().prepareResponse(new OffsetFetchResponse(response, ApiKeys.OFFSET_FETCH.latestVersion())); - - final ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(GROUP_ID); - final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata().get(); - - assertEquals(4, partitionToOffsetAndMetadata.size()); - assertEquals(10, partitionToOffsetAndMetadata.get(myTopicPartition0).offset()); - assertEquals(0, partitionToOffsetAndMetadata.get(myTopicPartition1).offset()); - assertEquals(20, partitionToOffsetAndMetadata.get(myTopicPartition2).offset()); - assertTrue(partitionToOffsetAndMetadata.containsKey(myTopicPartition3)); - assertNull(partitionToOffsetAndMetadata.get(myTopicPartition3)); - } - } - - @Test - public void testBatchedListConsumerGroupOffsets() throws Exception { - Cluster cluster = mockCluster(1, 0); - Time time = new MockTime(); - Map groupSpecs = batchedListConsumerGroupOffsetsSpec(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); - - ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs, new ListConsumerGroupOffsetsOptions()); - sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, true, Errors.NONE); - - verifyListOffsetsForMultipleGroups(groupSpecs, result); - } - } - - @Test - public void testBatchedListStreamsGroupOffsets() throws Exception { - Cluster cluster = mockCluster(1, 0); - Time time = new MockTime(); - Map groupSpecs = batchedListStreamsGroupOffsetsSpec(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); - - ListStreamsGroupOffsetsResult result = env.adminClient().listStreamsGroupOffsets(groupSpecs, new ListStreamsGroupOffsetsOptions()); - sendStreamsOffsetFetchResponse(env.kafkaClient(), groupSpecs, true, Errors.NONE); - - verifyListStreamsOffsetsForMultipleGroups(groupSpecs, result); - } - } - - @Test - public void testBatchedListConsumerGroupOffsetsWithNoFindCoordinatorBatching() throws Exception { - Cluster cluster = mockCluster(1, 0); - Time time = new MockTime(); - Map groupSpecs = batchedListConsumerGroupOffsetsSpec(); - - ApiVersion findCoordinatorV3 = new ApiVersion() - .setApiKey(ApiKeys.FIND_COORDINATOR.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 3); - ApiVersion offsetFetchV7 = new ApiVersion() - .setApiKey(ApiKeys.OFFSET_FETCH.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 7); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(asList(findCoordinatorV3, offsetFetchV7))); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs); - - // Fail the first request in order to ensure that the group is not batched when retried. - sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.COORDINATOR_LOAD_IN_PROGRESS); - - sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - - verifyListOffsetsForMultipleGroups(groupSpecs, result); - } - } - - @Test - public void testBatchedListStreamsGroupOffsetsWithNoFindCoordinatorBatching() throws Exception { - Cluster cluster = mockCluster(1, 0); - Time time = new MockTime(); - Map groupSpecs = batchedListStreamsGroupOffsetsSpec(); - - ApiVersion findCoordinatorV3 = new ApiVersion() - .setApiKey(ApiKeys.FIND_COORDINATOR.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 3); - ApiVersion offsetFetchV7 = new ApiVersion() - .setApiKey(ApiKeys.OFFSET_FETCH.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 7); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(asList(findCoordinatorV3, offsetFetchV7))); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - ListStreamsGroupOffsetsResult result = env.adminClient().listStreamsGroupOffsets(groupSpecs); - - // Fail the first request in order to ensure that the group is not batched when retried. - sendStreamsOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.COORDINATOR_LOAD_IN_PROGRESS); - - sendStreamsOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - sendStreamsOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - - verifyListStreamsOffsetsForMultipleGroups(groupSpecs, result); - } - } - - @Test - public void testBatchedListConsumerGroupOffsetsWithNoOffsetFetchBatching() throws Exception { - Cluster cluster = mockCluster(1, 0); - Time time = new MockTime(); - Map groupSpecs = batchedListConsumerGroupOffsetsSpec(); - - ApiVersion offsetFetchV7 = new ApiVersion() - .setApiKey(ApiKeys.OFFSET_FETCH.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 7); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singleton(offsetFetchV7))); - env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); - // Prepare a response to force client to attempt batched request creation that throws - // NoBatchedOffsetFetchRequestException. This triggers creation of non-batched requests. - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE)); - - ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs); - - // The request handler attempts both FindCoordinator and OffsetFetch requests. This seems - // ok since we expect this scenario only during upgrades from versions < 3.0.0 where - // some upgraded brokers could handle batched FindCoordinator while non-upgraded coordinators - // rejected batched OffsetFetch requests. - sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); - sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); - sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - - verifyListOffsetsForMultipleGroups(groupSpecs, result); - } - } - - @Test - public void testBatchedListStreamsGroupOffsetsWithNoOffsetFetchBatching() throws Exception { - Cluster cluster = mockCluster(1, 0); - Time time = new MockTime(); - Map groupSpecs = batchedListStreamsGroupOffsetsSpec(); - - ApiVersion offsetFetchV7 = new ApiVersion() - .setApiKey(ApiKeys.OFFSET_FETCH.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 7); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singleton(offsetFetchV7))); - env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); - // Prepare a response to force client to attempt batched request creation that throws - // NoBatchedOffsetFetchRequestException. This triggers creation of non-batched requests. - env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE)); - - ListStreamsGroupOffsetsResult result = env.adminClient().listStreamsGroupOffsets(groupSpecs); - - // The request handler attempts both FindCoordinator and OffsetFetch requests. This seems - // ok since we expect this scenario only during upgrades from versions < 3.0.0 where - // some upgraded brokers could handle batched FindCoordinator while non-upgraded coordinators - // rejected batched OffsetFetch requests. - sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); - sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); - sendStreamsOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - sendStreamsOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - - verifyListStreamsOffsetsForMultipleGroups(groupSpecs, result); - } - } - - private Map batchedListConsumerGroupOffsetsSpec() { - Set groupAPartitions = Collections.singleton(new TopicPartition("A", 1)); - Set groupBPartitions = Collections.singleton(new TopicPartition("B", 2)); - - ListConsumerGroupOffsetsSpec groupASpec = new ListConsumerGroupOffsetsSpec().topicPartitions(groupAPartitions); - ListConsumerGroupOffsetsSpec groupBSpec = new ListConsumerGroupOffsetsSpec().topicPartitions(groupBPartitions); - return Utils.mkMap(Utils.mkEntry("groupA", groupASpec), Utils.mkEntry("groupB", groupBSpec)); - } - - private Map batchedListStreamsGroupOffsetsSpec() { - Set groupAPartitions = Collections.singleton(new TopicPartition("A", 1)); - Set groupBPartitions = Collections.singleton(new TopicPartition("B", 2)); - - ListStreamsGroupOffsetsSpec groupASpec = new ListStreamsGroupOffsetsSpec().topicPartitions(groupAPartitions); - ListStreamsGroupOffsetsSpec groupBSpec = new ListStreamsGroupOffsetsSpec().topicPartitions(groupBPartitions); - return Utils.mkMap(Utils.mkEntry("groupA", groupASpec), Utils.mkEntry("groupB", groupBSpec)); - } - - private void waitForRequest(MockClient mockClient, ApiKeys apiKeys) throws Exception { - TestUtils.waitForCondition(() -> { - ClientRequest clientRequest = mockClient.requests().peek(); - return clientRequest != null && clientRequest.apiKey() == apiKeys; - }, "Failed awaiting " + apiKeys + " request"); - } - - private void sendFindCoordinatorResponse(MockClient mockClient, Node coordinator) throws Exception { - waitForRequest(mockClient, ApiKeys.FIND_COORDINATOR); - - ClientRequest clientRequest = mockClient.requests().peek(); - FindCoordinatorRequestData data = ((FindCoordinatorRequest.Builder) clientRequest.requestBuilder()).data(); - mockClient.respond(prepareFindCoordinatorResponse(Errors.NONE, data.key(), coordinator)); - } - - private void sendOffsetFetchResponse(MockClient mockClient, Map groupSpecs, boolean batched, Errors error) throws Exception { - waitForRequest(mockClient, ApiKeys.OFFSET_FETCH); - - ClientRequest clientRequest = mockClient.requests().peek(); - OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).build().data(); - - if (!batched) { - assertEquals(1, data.groups().size()); - } - - OffsetFetchResponseData response = new OffsetFetchResponseData() - .setGroups(data.groups().stream().map(group -> - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group.groupId()) - .setErrorCode(error.code()) - .setTopics(groupSpecs.get(group.groupId()).topicPartitions().stream() - .collect(Collectors.groupingBy(TopicPartition::topic)).entrySet().stream().map(entry -> - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(entry.getKey()) - .setPartitions(entry.getValue().stream().map(partition -> - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(partition.partition()) - .setCommittedOffset(10) - ).collect(Collectors.toList())) - ).collect(Collectors.toList())) - ).collect(Collectors.toList())); - - mockClient.respond(new OffsetFetchResponse(response, ApiKeys.OFFSET_FETCH.latestVersion())); - } - - private void sendStreamsOffsetFetchResponse(MockClient mockClient, Map groupSpecs, boolean batched, Errors error) throws Exception { - waitForRequest(mockClient, ApiKeys.OFFSET_FETCH); - - ClientRequest clientRequest = mockClient.requests().peek(); - OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).build().data(); - - if (!batched) { - assertEquals(1, data.groups().size()); - } - - OffsetFetchResponseData response = new OffsetFetchResponseData() - .setGroups(data.groups().stream().map(group -> - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group.groupId()) - .setErrorCode(error.code()) - .setTopics(groupSpecs.get(group.groupId()).topicPartitions().stream() - .collect(Collectors.groupingBy(TopicPartition::topic)).entrySet().stream().map(entry -> - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(entry.getKey()) - .setPartitions(entry.getValue().stream().map(partition -> - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(partition.partition()) - .setCommittedOffset(10) - ).collect(Collectors.toList())) - ).collect(Collectors.toList())) - ).collect(Collectors.toList())); - - mockClient.respond(new OffsetFetchResponse(response, ApiKeys.OFFSET_FETCH.latestVersion())); - } - - private void verifyListOffsetsForMultipleGroups(Map groupSpecs, - ListConsumerGroupOffsetsResult result) throws Exception { - assertEquals(groupSpecs.size(), result.all().get(10, TimeUnit.SECONDS).size()); - for (Map.Entry entry : groupSpecs.entrySet()) { - assertEquals(entry.getValue().topicPartitions(), - result.partitionsToOffsetAndMetadata(entry.getKey()).get().keySet()); - } - } - - private void verifyListStreamsOffsetsForMultipleGroups(Map groupSpecs, - ListStreamsGroupOffsetsResult result) throws Exception { - assertEquals(groupSpecs.size(), result.all().get(10, TimeUnit.SECONDS).size()); - for (Map.Entry entry : groupSpecs.entrySet()) { - assertEquals(entry.getValue().topicPartitions(), - result.partitionsToOffsetAndMetadata(entry.getKey()).get().keySet()); - } - } - - @Test - public void testDeleteConsumerGroupsNumRetries() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - final List groupIds = singletonList("groupId"); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.NOT_COORDINATOR.code())); - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(validResponse) - )); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); - - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); - } - } - - @Test - public void testDeleteStreamsGroupsNumRetries() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - final List groupIds = singletonList("groupId"); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.NOT_COORDINATOR.code())); - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(validResponse) - )); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final DeleteStreamsGroupsResult result = env.adminClient().deleteStreamsGroups(groupIds); - - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); - } - } - - @Test - public void testDeleteConsumerGroupsRetryBackoff() throws Exception { - MockTime time = new MockTime(); - int retryBackoff = 100; - final List groupIds = singletonList(GROUP_ID); - - try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, - mockCluster(3, 0), - newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { - MockClient mockClient = env.kafkaClient(); - - mockClient.setNodeApiVersions(NodeApiVersions.create()); - - AtomicLong firstAttemptTime = new AtomicLong(0); - AtomicLong secondAttemptTime = new AtomicLong(0); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId(GROUP_ID) - .setErrorCode(Errors.NOT_COORDINATOR.code())); - - - mockClient.prepareResponse(body -> { - firstAttemptTime.set(time.milliseconds()); - return true; - }, new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse))); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId(GROUP_ID) - .setErrorCode(Errors.NONE.code())); - - mockClient.prepareResponse(body -> { - secondAttemptTime.set(time.milliseconds()); - return true; - }, new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse))); - - final KafkaFuture future = env.adminClient().deleteConsumerGroups(groupIds).all(); - - TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DeleteConsumerGroups first request failure"); - TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DeleteConsumerGroups call on first failure"); - - long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); - long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); - time.sleep(upperBoundBackoffMs); - - future.get(); - - long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); - assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "DeleteConsumerGroups retry did not await expected backoff!"); - } - } - - @Test - public void testDeleteStreamsGroupsRetryBackoff() throws Exception { - MockTime time = new MockTime(); - int retryBackoff = 100; - final List groupIds = singletonList(GROUP_ID); - - try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, - mockCluster(3, 0), - newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { - MockClient mockClient = env.kafkaClient(); - - mockClient.setNodeApiVersions(NodeApiVersions.create()); - - AtomicLong firstAttemptTime = new AtomicLong(0); - AtomicLong secondAttemptTime = new AtomicLong(0); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId(GROUP_ID) - .setErrorCode(Errors.NOT_COORDINATOR.code())); - - - mockClient.prepareResponse(body -> { - firstAttemptTime.set(time.milliseconds()); - return true; - }, new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse))); - - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId(GROUP_ID) - .setErrorCode(Errors.NONE.code())); - - mockClient.prepareResponse(body -> { - secondAttemptTime.set(time.milliseconds()); - return true; - }, new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse))); - - final KafkaFuture future = env.adminClient().deleteStreamsGroups(groupIds).all(); - - TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DeleteStreamsGroups first request failure"); - TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DeleteStreamsGroups call on first failure"); - - long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); - long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); - time.sleep(upperBoundBackoffMs); - - future.get(); - - long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); - assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "DeleteConsumerGroups retry did not await expected backoff!"); - } - } - - @Test - public void testDeleteConsumerGroupsWithOlderBroker() throws Exception { - final List groupIds = singletonList("groupId"); - ApiVersion findCoordinatorV3 = new ApiVersion() - .setApiKey(ApiKeys.FIND_COORDINATOR.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 3); - ApiVersion describeGroups = new ApiVersion() - .setApiKey(ApiKeys.DESCRIBE_GROUPS.id) - .setMinVersion((short) 0) - .setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion()); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(asList(findCoordinatorV3, describeGroups))); - - // Retriable FindCoordinatorResponse errors should be retried - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); - - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.NONE.code())); - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(validResponse) - )); - - final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); - - final KafkaFuture results = result.deletedGroups().get("groupId"); - assertNull(results.get()); - - // should throw error for non-retriable errors - env.kafkaClient().prepareResponse( - prepareOldFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); - - DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.deletedGroups().get("groupId")); - - // Retriable errors should be retried - env.kafkaClient().prepareResponse( - prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final DeletableGroupResultCollection errorResponse = new DeletableGroupResultCollection(); - errorResponse.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) - ); - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(errorResponse))); - - /* - * We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group - * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a - * FindCoordinatorResponse. - * - * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response - */ - - DeletableGroupResultCollection coordinatorMoved = new DeletableGroupResultCollection(); - coordinatorMoved.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.NOT_COORDINATOR.code()) - ); - - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(coordinatorMoved))); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - coordinatorMoved = new DeletableGroupResultCollection(); - coordinatorMoved.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - ); - - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(coordinatorMoved))); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(validResponse))); - - errorResult = env.adminClient().deleteConsumerGroups(groupIds); - - final KafkaFuture errorResults = errorResult.deletedGroups().get("groupId"); - assertNull(errorResults.get()); - } - } - - @Test - public void testDeleteStreamsGroupsWithOlderBroker() throws Exception { - final List groupIds = singletonList("groupId"); - ApiVersion findCoordinatorV3 = new ApiVersion() - .setApiKey(ApiKeys.FIND_COORDINATOR.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 3); - ApiVersion describeGroups = new ApiVersion() - .setApiKey(ApiKeys.DESCRIBE_GROUPS.id) - .setMinVersion((short) 0) - .setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion()); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(asList(findCoordinatorV3, describeGroups))); - - // Retriable FindCoordinatorResponse errors should be retried - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); - - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.NONE.code())); - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(validResponse) - )); - - final DeleteStreamsGroupsResult result = env.adminClient().deleteStreamsGroups(groupIds); - - final KafkaFuture results = result.deletedGroups().get("groupId"); - assertNull(results.get()); - - // should throw error for non-retriable errors - env.kafkaClient().prepareResponse( - prepareOldFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); - - DeleteStreamsGroupsResult errorResult = env.adminClient().deleteStreamsGroups(groupIds); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.deletedGroups().get("groupId")); - - // Retriable errors should be retried - env.kafkaClient().prepareResponse( - prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final DeletableGroupResultCollection errorResponse = new DeletableGroupResultCollection(); - errorResponse.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) - ); - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(errorResponse))); - - /* - * We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group - * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a - * FindCoordinatorResponse. - * - * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response - */ - - DeletableGroupResultCollection coordinatorMoved = new DeletableGroupResultCollection(); - coordinatorMoved.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.NOT_COORDINATOR.code()) - ); - - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(coordinatorMoved))); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - coordinatorMoved = new DeletableGroupResultCollection(); - coordinatorMoved.add(new DeletableGroupResult() - .setGroupId("groupId") - .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - ); - - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(coordinatorMoved))); - env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(validResponse))); - - errorResult = env.adminClient().deleteStreamsGroups(groupIds); - - final KafkaFuture errorResults = errorResult.deletedGroups().get("groupId"); - assertNull(errorResults.get()); - } - } - - @Test - public void testDeleteMultipleConsumerGroupsWithOlderBroker() throws Exception { - final List groupIds = asList("group1", "group2"); - ApiVersion findCoordinatorV3 = new ApiVersion() - .setApiKey(ApiKeys.FIND_COORDINATOR.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 3); - ApiVersion describeGroups = new ApiVersion() - .setApiKey(ApiKeys.DESCRIBE_GROUPS.id) - .setMinVersion((short) 0) - .setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion()); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions( - NodeApiVersions.create(asList(findCoordinatorV3, describeGroups))); - - // Dummy response for MockClient to handle the UnsupportedVersionException correctly to switch from batched to un-batched - env.kafkaClient().prepareResponse(null); - // Retriable FindCoordinatorResponse errors should be retried - for (int i = 0; i < groupIds.size(); i++) { - env.kafkaClient().prepareResponse( - prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - } - for (int i = 0; i < groupIds.size(); i++) { - env.kafkaClient().prepareResponse( - prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - } - - final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId("group1") - .setErrorCode(Errors.NONE.code())); - validResponse.add(new DeletableGroupResult() - .setGroupId("group2") - .setErrorCode(Errors.NONE.code())); - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(validResponse) - )); - - final DeleteConsumerGroupsResult result = env.adminClient() - .deleteConsumerGroups(groupIds); - - final KafkaFuture results = result.deletedGroups().get("group1"); - assertNull(results.get(5, TimeUnit.SECONDS)); - } - } - - @Test - public void testDeleteMultipleStreamsGroupsWithOlderBroker() throws Exception { - final List groupIds = asList("group1", "group2"); - ApiVersion findCoordinatorV3 = new ApiVersion() - .setApiKey(ApiKeys.FIND_COORDINATOR.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 3); - ApiVersion describeGroups = new ApiVersion() - .setApiKey(ApiKeys.DESCRIBE_GROUPS.id) - .setMinVersion((short) 0) - .setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion()); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions( - NodeApiVersions.create(asList(findCoordinatorV3, describeGroups))); + ConsumerProtocol.serializeAssignment( + new ConsumerPartitionAssignor.Assignment( + Collections.singletonList(new TopicPartition("bar", 0)) + ) + ).array(), + null + ) + ), + Collections.emptySet() + ) + )) + )); - // Dummy response for MockClient to handle the UnsupportedVersionException correctly to switch from batched to un-batched - env.kafkaClient().prepareResponse(null); - // Retriable FindCoordinatorResponse errors should be retried - for (int i = 0; i < groupIds.size(); i++) { - env.kafkaClient().prepareResponse( - prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - } - for (int i = 0; i < groupIds.size(); i++) { - env.kafkaClient().prepareResponse( - prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - } + DescribeConsumerGroupsResult result = env.adminClient() + .describeConsumerGroups(asList("grp1", "grp2")); - final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); - validResponse.add(new DeletableGroupResult() - .setGroupId("group1") - .setErrorCode(Errors.NONE.code())); - validResponse.add(new DeletableGroupResult() - .setGroupId("group2") - .setErrorCode(Errors.NONE.code())); - env.kafkaClient().prepareResponse(new DeleteGroupsResponse( - new DeleteGroupsResponseData() - .setResults(validResponse) + Map expectedResult = new HashMap<>(); + expectedResult.put("grp1", new ConsumerGroupDescription( + "grp1", + false, + Collections.singletonList( + new MemberDescription( + "memberId", + Optional.of("instanceId"), + "clientId", + "host", + new MemberAssignment( + Collections.singleton(new TopicPartition("foo", 0)) + ), + Optional.of(new MemberAssignment( + Collections.singleton(new TopicPartition("foo", 1)) + )), + Optional.of(10), + Optional.of(true) + ) + ), + "range", + GroupType.CONSUMER, + GroupState.STABLE, + env.cluster().controller(), + Collections.emptySet(), + Optional.of(10), + Optional.of(10) + )); + expectedResult.put("grp2", new ConsumerGroupDescription( + "grp2", + false, + Collections.singletonList( + new MemberDescription( + "0", + Optional.empty(), + "clientId0", + "clientHost", + new MemberAssignment( + Collections.singleton(new TopicPartition("bar", 0)) + ), + Optional.empty(), + Optional.empty(), + Optional.empty() + ) + ), + "range", + GroupType.CLASSIC, + GroupState.STABLE, + env.cluster().controller(), + Collections.emptySet(), + Optional.empty(), + Optional.empty() )); - final DeleteStreamsGroupsResult result = env.adminClient() - .deleteStreamsGroups(groupIds); - - final KafkaFuture results = result.deletedGroups().get("group1"); - assertNull(results.get(5, TimeUnit.SECONDS)); + assertEquals(expectedResult, result.all().get()); } } @Test - public void testDeleteConsumerGroupOffsetsNumRetries() throws Exception { + public void testListConsumerGroupOffsetsOptionsWithBatchedApi() throws Exception { + verifyListConsumerGroupOffsetsOptions(); + } + + private void verifyListConsumerGroupOffsetsOptions() throws Exception { final Cluster cluster = mockCluster(3, 0); final Time time = new MockTime(); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - final TopicPartition tp1 = new TopicPartition("foo", 0); - + AdminClientConfig.RETRIES_CONFIG, "0")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - final DeleteConsumerGroupOffsetsResult result = env.adminClient() - .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + final List partitions = Collections.singletonList(new TopicPartition("A", 0)); + final ListConsumerGroupOffsetsOptions options = new ListConsumerGroupOffsetsOptions() + .requireStable(true) + .timeoutMs(300); + + final ListConsumerGroupOffsetsSpec groupSpec = new ListConsumerGroupOffsetsSpec() + .topicPartitions(partitions); + env.adminClient().listConsumerGroupOffsets(Collections.singletonMap(GROUP_ID, groupSpec), options); + + final MockClient mockClient = env.kafkaClient(); + waitForRequest(mockClient, ApiKeys.OFFSET_FETCH); - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); + ClientRequest clientRequest = mockClient.requests().peek(); + assertNotNull(clientRequest); + assertEquals(300, clientRequest.requestTimeoutMs()); + OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).data; + assertTrue(data.requireStable()); + assertEquals(Collections.singletonList(GROUP_ID), + data.groups().stream().map(OffsetFetchRequestGroup::groupId).collect(Collectors.toList())); + assertEquals(Collections.singletonList("A"), + data.groups().get(0).topics().stream().map(OffsetFetchRequestTopics::name).collect(Collectors.toList())); + assertEquals(Collections.singletonList(0), + data.groups().get(0).topics().get(0).partitionIndexes()); } } @Test - public void testDeleteStreamsGroupOffsetsNumRetries() throws Exception { + public void testListConsumerGroupOffsetsNumRetries() throws Exception { final Cluster cluster = mockCluster(3, 0); final Time time = new MockTime(); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) { - final TopicPartition tp1 = new TopicPartition("foo", 0); - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse(prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap())); env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - final DeleteStreamsGroupOffsetsResult result = env.adminClient() - .deleteStreamsGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + final ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(GROUP_ID); - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); + TestUtils.assertFutureError(result.partitionsToOffsetAndMetadata(), TimeoutException.class); } } @Test - public void testDeleteConsumerGroupOffsetsRetryBackoff() throws Exception { + public void testListConsumerGroupOffsetsRetryBackoff() throws Exception { MockTime time = new MockTime(); int retryBackoff = 100; @@ -5837,27 +4325,23 @@ public void testDeleteConsumerGroupOffsetsRetryBackoff() throws Exception { AtomicLong firstAttemptTime = new AtomicLong(0); AtomicLong secondAttemptTime = new AtomicLong(0); - final TopicPartition tp1 = new TopicPartition("foo", 0); - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - mockClient.prepareResponse(body -> { firstAttemptTime.set(time.milliseconds()); return true; - }, prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); - + }, offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap())); mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); mockClient.prepareResponse(body -> { secondAttemptTime.set(time.milliseconds()); return true; - }, prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); + }, offsetFetchResponse(Errors.NONE, Collections.emptyMap())); - final KafkaFuture future = env.adminClient().deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())).all(); + final KafkaFuture> future = env.adminClient().listConsumerGroupOffsets(GROUP_ID).partitionsToOffsetAndMetadata(); - TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DeleteConsumerGroupOffsets first request failure"); - TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DeleteConsumerGroupOffsets call on first failure"); + TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting ListConsumerGroupOffsets first request failure"); + TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry ListConsumerGroupOffsets call on first failure"); long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); @@ -5866,799 +4350,729 @@ public void testDeleteConsumerGroupOffsetsRetryBackoff() throws Exception { future.get(); long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); - assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "DeleteConsumerGroupOffsets retry did not await expected backoff!"); + assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "ListConsumerGroupOffsets retry did not await expected backoff!"); } } @Test - public void testDeleteStreamsGroupOffsetsRetryBackoff() throws Exception { - MockTime time = new MockTime(); - int retryBackoff = 100; + public void testListConsumerGroupOffsetsRetriableErrors() throws Exception { + // Retriable errors should be retried - try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, - mockCluster(3, 0), - newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { - MockClient mockClient = env.kafkaClient(); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - mockClient.setNodeApiVersions(NodeApiVersions.create()); + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - AtomicLong firstAttemptTime = new AtomicLong(0); - AtomicLong secondAttemptTime = new AtomicLong(0); + env.kafkaClient().prepareResponse( + offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Collections.emptyMap())); - final TopicPartition tp1 = new TopicPartition("foo", 0); + /* + * We need to return two responses here, one for NOT_COORDINATOR call when calling list consumer offsets + * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a + * FindCoordinatorResponse. + * + * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response + */ + env.kafkaClient().prepareResponse( + offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap())); - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - mockClient.prepareResponse(body -> { - firstAttemptTime.set(time.milliseconds()); - return true; - }, prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); + env.kafkaClient().prepareResponse( + offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap())); + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + env.kafkaClient().prepareResponse( + offsetFetchResponse(Errors.NONE, Collections.emptyMap())); - mockClient.prepareResponse(body -> { - secondAttemptTime.set(time.milliseconds()); - return true; - }, prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); + final ListConsumerGroupOffsetsResult errorResult1 = env.adminClient().listConsumerGroupOffsets(GROUP_ID); + + assertEquals(Collections.emptyMap(), errorResult1.partitionsToOffsetAndMetadata().get()); + } + } + + @Test + public void testListConsumerGroupOffsetsNonRetriableErrors() throws Exception { + // Non-retriable errors throw an exception + final List nonRetriableErrors = asList( + Errors.GROUP_AUTHORIZATION_FAILED, Errors.INVALID_GROUP_ID, Errors.GROUP_ID_NOT_FOUND, + Errors.UNKNOWN_MEMBER_ID, Errors.STALE_MEMBER_EPOCH); - final KafkaFuture future = env.adminClient().deleteStreamsGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())).all(); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DeleteStreamsGroupOffsets first request failure"); - TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DeleteStreamsGroupOffsets call on first failure"); + for (Errors error : nonRetriableErrors) { + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); - long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); - time.sleep(upperBoundBackoffMs); + env.kafkaClient().prepareResponse(offsetFetchResponse(error, Collections.emptyMap())); - future.get(); + ListConsumerGroupOffsetsResult errorResult = env.adminClient().listConsumerGroupOffsets(GROUP_ID); - long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); - assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "DeleteStreamsGroupOffsets retry did not await expected backoff!"); + TestUtils.assertFutureError(errorResult.partitionsToOffsetAndMetadata(), error.exception().getClass()); + } } } @Test - public void testDeleteConsumerGroupOffsets() throws Exception { - // Happy path + public void testListConsumerGroupOffsets() throws Exception { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - final TopicPartition tp1 = new TopicPartition("foo", 0); - final TopicPartition tp2 = new TopicPartition("bar", 0); - final TopicPartition tp3 = new TopicPartition("foobar", 0); + // Retriable FindCoordinatorResponse errors should be retried + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); + + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + // Retriable errors should be retried + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Collections.emptyMap())); + + /* + * We need to return two responses here, one for NOT_COORDINATOR error when calling list consumer group offsets + * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a + * FindCoordinatorResponse. + * + * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response + */ + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NOT_COORDINATOR, Collections.emptyMap())); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap())); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); + TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1); + TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2); + TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3); + + final Map responseData = new HashMap<>(); + responseData.put(myTopicPartition0, new OffsetFetchResponse.PartitionData(10, + Optional.empty(), "", Errors.NONE)); + responseData.put(myTopicPartition1, new OffsetFetchResponse.PartitionData(0, + Optional.empty(), "", Errors.NONE)); + responseData.put(myTopicPartition2, new OffsetFetchResponse.PartitionData(20, + Optional.empty(), "", Errors.NONE)); + responseData.put(myTopicPartition3, new OffsetFetchResponse.PartitionData(OffsetFetchResponse.INVALID_OFFSET, + Optional.empty(), "", Errors.NONE)); + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.NONE, responseData)); + + final ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(GROUP_ID); + final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata().get(); + + assertEquals(4, partitionToOffsetAndMetadata.size()); + assertEquals(10, partitionToOffsetAndMetadata.get(myTopicPartition0).offset()); + assertEquals(0, partitionToOffsetAndMetadata.get(myTopicPartition1).offset()); + assertEquals(20, partitionToOffsetAndMetadata.get(myTopicPartition2).offset()); + assertTrue(partitionToOffsetAndMetadata.containsKey(myTopicPartition3)); + assertNull(partitionToOffsetAndMetadata.get(myTopicPartition3)); + } + } + + @Test + public void testBatchedListConsumerGroupOffsets() throws Exception { + Cluster cluster = mockCluster(1, 0); + Time time = new MockTime(); + Map groupSpecs = batchedListConsumerGroupOffsetsSpec(); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRIES_CONFIG, "0")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse(new OffsetDeleteResponse( - new OffsetDeleteResponseData() - .setTopics(new OffsetDeleteResponseTopicCollection(Stream.of( - new OffsetDeleteResponseTopic() - .setName("foo") - .setPartitions(new OffsetDeleteResponsePartitionCollection(Collections.singletonList( - new OffsetDeleteResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()) - ).iterator())), - new OffsetDeleteResponseTopic() - .setName("bar") - .setPartitions(new OffsetDeleteResponsePartitionCollection(Collections.singletonList( - new OffsetDeleteResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.GROUP_SUBSCRIBED_TO_TOPIC.code()) - ).iterator())) - ).collect(Collectors.toList()).iterator())) - ) - ); - - final DeleteConsumerGroupOffsetsResult errorResult = env.adminClient().deleteConsumerGroupOffsets( - GROUP_ID, Stream.of(tp1, tp2).collect(Collectors.toSet())); + ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs, new ListConsumerGroupOffsetsOptions()); + sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, true, Errors.NONE); - assertNull(errorResult.partitionResult(tp1).get()); - TestUtils.assertFutureThrows(GroupSubscribedToTopicException.class, errorResult.all()); - TestUtils.assertFutureThrows(GroupSubscribedToTopicException.class, errorResult.partitionResult(tp2)); - assertThrows(IllegalArgumentException.class, () -> errorResult.partitionResult(tp3)); + verifyListOffsetsForMultipleGroups(groupSpecs, result); } } @Test - public void testDeleteStreamsGroupOffsets() throws Exception { - // Happy path + public void testBatchedListConsumerGroupOffsetsWithNoFindCoordinatorBatching() throws Exception { + Cluster cluster = mockCluster(1, 0); + Time time = new MockTime(); + Map groupSpecs = batchedListConsumerGroupOffsetsSpec(); - final TopicPartition tp1 = new TopicPartition("foo", 0); - final TopicPartition tp2 = new TopicPartition("bar", 0); - final TopicPartition tp3 = new TopicPartition("foobar", 0); + ApiVersion findCoordinatorV3 = new ApiVersion() + .setApiKey(ApiKeys.FIND_COORDINATOR.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 3); + ApiVersion offsetFetchV7 = new ApiVersion() + .setApiKey(ApiKeys.OFFSET_FETCH.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 7); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0")) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(asList(findCoordinatorV3, offsetFetchV7))); + env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); + env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs); - env.kafkaClient().prepareResponse(new OffsetDeleteResponse( - new OffsetDeleteResponseData() - .setTopics(new OffsetDeleteResponseTopicCollection(Stream.of( - new OffsetDeleteResponseTopic() - .setName("foo") - .setPartitions(new OffsetDeleteResponsePartitionCollection(Collections.singletonList( - new OffsetDeleteResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()) - ).iterator())), - new OffsetDeleteResponseTopic() - .setName("bar") - .setPartitions(new OffsetDeleteResponsePartitionCollection(Collections.singletonList( - new OffsetDeleteResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.GROUP_SUBSCRIBED_TO_TOPIC.code()) - ).iterator())) - ).collect(Collectors.toList()).iterator())) - ) - ); + // Fail the first request in order to ensure that the group is not batched when retried. + sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.COORDINATOR_LOAD_IN_PROGRESS); - final DeleteStreamsGroupOffsetsResult errorResult = env.adminClient().deleteStreamsGroupOffsets( - GROUP_ID, Stream.of(tp1, tp2).collect(Collectors.toSet())); + sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); + sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - assertNull(errorResult.partitionResult(tp1).get()); - TestUtils.assertFutureThrows(GroupSubscribedToTopicException.class, errorResult.all()); - TestUtils.assertFutureThrows(GroupSubscribedToTopicException.class, errorResult.partitionResult(tp2)); - assertThrows(IllegalArgumentException.class, () -> errorResult.partitionResult(tp3)); + verifyListOffsetsForMultipleGroups(groupSpecs, result); } } @Test - public void testDeleteConsumerGroupOffsetsRetriableErrors() throws Exception { - // Retriable errors should be retried + public void testBatchedListConsumerGroupOffsetsWithNoOffsetFetchBatching() throws Exception { + Cluster cluster = mockCluster(1, 0); + Time time = new MockTime(); + Map groupSpecs = batchedListConsumerGroupOffsetsSpec(); - final TopicPartition tp1 = new TopicPartition("foo", 0); + ApiVersion offsetFetchV7 = new ApiVersion() + .setApiKey(ApiKeys.OFFSET_FETCH.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 7); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "0")) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singleton(offsetFetchV7))); + env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), groupSpecs.keySet())); + // Prepare a response to force client to attempt batched request creation that throws + // NoBatchedOffsetFetchRequestException. This triggers creation of non-batched requests. + env.kafkaClient().prepareResponse(offsetFetchResponse(Errors.COORDINATOR_NOT_AVAILABLE, Collections.emptyMap())); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + ListConsumerGroupOffsetsResult result = env.adminClient().listConsumerGroupOffsets(groupSpecs); - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); + // The request handler attempts both FindCoordinator and OffsetFetch requests. This seems + // ok since we expect this scenario only during upgrades from versions < 3.0.0 where + // some upgraded brokers could handle batched FindCoordinator while non-upgraded coordinators + // rejected batched OffsetFetch requests. + sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); + sendFindCoordinatorResponse(env.kafkaClient(), env.cluster().controller()); + sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); + sendOffsetFetchResponse(env.kafkaClient(), groupSpecs, false, Errors.NONE); - /* - * We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group - * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a - * FindCoordinatorResponse. - * - * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response - */ - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); + verifyListOffsetsForMultipleGroups(groupSpecs, result); + } + } - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + private Map batchedListConsumerGroupOffsetsSpec() { + Set groupAPartitions = Collections.singleton(new TopicPartition("A", 1)); + Set groupBPartitions = Collections.singleton(new TopicPartition("B", 2)); - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse(Errors.COORDINATOR_NOT_AVAILABLE)); + ListConsumerGroupOffsetsSpec groupASpec = new ListConsumerGroupOffsetsSpec().topicPartitions(groupAPartitions); + ListConsumerGroupOffsetsSpec groupBSpec = new ListConsumerGroupOffsetsSpec().topicPartitions(groupBPartitions); + return Utils.mkMap(Utils.mkEntry("groupA", groupASpec), Utils.mkEntry("groupB", groupBSpec)); + } - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + private void waitForRequest(MockClient mockClient, ApiKeys apiKeys) throws Exception { + TestUtils.waitForCondition(() -> { + ClientRequest clientRequest = mockClient.requests().peek(); + return clientRequest != null && clientRequest.apiKey() == apiKeys; + }, "Failed awaiting " + apiKeys + " request"); + } - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); + private void sendFindCoordinatorResponse(MockClient mockClient, Node coordinator) throws Exception { + waitForRequest(mockClient, ApiKeys.FIND_COORDINATOR); - final DeleteConsumerGroupOffsetsResult errorResult1 = env.adminClient() - .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + ClientRequest clientRequest = mockClient.requests().peek(); + FindCoordinatorRequestData data = ((FindCoordinatorRequest.Builder) clientRequest.requestBuilder()).data(); + mockClient.respond(prepareFindCoordinatorResponse(Errors.NONE, data.key(), coordinator)); + } - assertNull(errorResult1.all().get()); - assertNull(errorResult1.partitionResult(tp1).get()); + private void sendOffsetFetchResponse(MockClient mockClient, Map groupSpecs, boolean batched, Errors error) throws Exception { + waitForRequest(mockClient, ApiKeys.OFFSET_FETCH); + + ClientRequest clientRequest = mockClient.requests().peek(); + OffsetFetchRequestData data = ((OffsetFetchRequest.Builder) clientRequest.requestBuilder()).data; + Map> results = new HashMap<>(); + Map errors = new HashMap<>(); + data.groups().forEach(group -> { + Map partitionResults = new HashMap<>(); + for (TopicPartition tp : groupSpecs.get(group.groupId()).topicPartitions()) { + partitionResults.put(tp, new PartitionData(10, Optional.empty(), "", Errors.NONE)); + } + results.put(group.groupId(), partitionResults); + errors.put(group.groupId(), error); + }); + if (!batched) { + assertEquals(1, data.groups().size()); + mockClient.respond(new OffsetFetchResponse(THROTTLE, error, results.values().iterator().next())); + } else + mockClient.respond(new OffsetFetchResponse(THROTTLE, errors, results)); + } + + private void verifyListOffsetsForMultipleGroups(Map groupSpecs, + ListConsumerGroupOffsetsResult result) throws Exception { + assertEquals(groupSpecs.size(), result.all().get(10, TimeUnit.SECONDS).size()); + for (Map.Entry entry : groupSpecs.entrySet()) { + assertEquals(entry.getValue().topicPartitions(), + result.partitionsToOffsetAndMetadata(entry.getKey()).get().keySet()); } } @Test - public void testDeleteStreamsGroupOffsetsRetriableErrors() throws Exception { - // Retriable errors should be retried - - final TopicPartition tp1 = new TopicPartition("foo", 0); + public void testDeleteConsumerGroupsNumRetries() throws Exception { + final Cluster cluster = mockCluster(3, 0); + final Time time = new MockTime(); + final List groupIds = singletonList("groupId"); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, + AdminClientConfig.RETRIES_CONFIG, "0")) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); + validResponse.add(new DeletableGroupResult() + .setGroupId("groupId") + .setErrorCode(Errors.NOT_COORDINATOR.code())); + env.kafkaClient().prepareResponse(new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults(validResponse) + )); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); + final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); - /* - * We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group - * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a - * FindCoordinatorResponse. - * - * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response - */ - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); + TestUtils.assertFutureError(result.all(), TimeoutException.class); + } + } - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + @Test + public void testDeleteConsumerGroupsRetryBackoff() throws Exception { + MockTime time = new MockTime(); + int retryBackoff = 100; + final List groupIds = singletonList(GROUP_ID); - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse(Errors.COORDINATOR_NOT_AVAILABLE)); + try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, + mockCluster(3, 0), + newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { + MockClient mockClient = env.kafkaClient(); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + mockClient.setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); + AtomicLong firstAttemptTime = new AtomicLong(0); + AtomicLong secondAttemptTime = new AtomicLong(0); - final DeleteStreamsGroupOffsetsResult errorResult1 = env.adminClient() - .deleteStreamsGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - assertNull(errorResult1.all().get()); - assertNull(errorResult1.partitionResult(tp1).get()); - } - } + DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); + validResponse.add(new DeletableGroupResult() + .setGroupId(GROUP_ID) + .setErrorCode(Errors.NOT_COORDINATOR.code())); - @Test - public void testDeleteConsumerGroupOffsetsNonRetriableErrors() throws Exception { - // Non-retriable errors throw an exception + mockClient.prepareResponse(body -> { + firstAttemptTime.set(time.milliseconds()); + return true; + }, new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse))); - final TopicPartition tp1 = new TopicPartition("foo", 0); - final List nonRetriableErrors = asList( - Errors.GROUP_AUTHORIZATION_FAILED, Errors.INVALID_GROUP_ID, Errors.GROUP_ID_NOT_FOUND); + mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + validResponse = new DeletableGroupResultCollection(); + validResponse.add(new DeletableGroupResult() + .setGroupId(GROUP_ID) + .setErrorCode(Errors.NONE.code())); - for (Errors error : nonRetriableErrors) { - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + mockClient.prepareResponse(body -> { + secondAttemptTime.set(time.milliseconds()); + return true; + }, new DeleteGroupsResponse(new DeleteGroupsResponseData().setResults(validResponse))); + + final KafkaFuture future = env.adminClient().deleteConsumerGroups(groupIds).all(); + + TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DeleteConsumerGroups first request failure"); + TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DeleteConsumerGroups call on first failure"); - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse(error)); + long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); + long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); + time.sleep(upperBoundBackoffMs); - DeleteConsumerGroupOffsetsResult errorResult = env.adminClient() - .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + future.get(); - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.all()); - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.partitionResult(tp1)); - } + long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); + assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "DeleteConsumerGroups retry did not await expected backoff!"); } } @Test - public void testDeleteStreamsGroupOffsetsNonRetriableErrors() throws Exception { - // Non-retriable errors throw an exception - - final TopicPartition tp1 = new TopicPartition("foo", 0); - final List nonRetriableErrors = asList( - Errors.GROUP_AUTHORIZATION_FAILED, Errors.INVALID_GROUP_ID, Errors.GROUP_ID_NOT_FOUND); + public void testDeleteConsumerGroupsWithOlderBroker() throws Exception { + final List groupIds = singletonList("groupId"); + ApiVersion findCoordinatorV3 = new ApiVersion() + .setApiKey(ApiKeys.FIND_COORDINATOR.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 3); + ApiVersion describeGroups = new ApiVersion() + .setApiKey(ApiKeys.DESCRIBE_GROUPS.id) + .setMinVersion((short) 0) + .setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion()); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(asList(findCoordinatorV3, describeGroups))); - for (Errors error : nonRetriableErrors) { - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + // Retriable FindCoordinatorResponse errors should be retried + env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); + env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse(error)); + env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - DeleteStreamsGroupOffsetsResult errorResult = env.adminClient() - .deleteStreamsGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); + validResponse.add(new DeletableGroupResult() + .setGroupId("groupId") + .setErrorCode(Errors.NONE.code())); + env.kafkaClient().prepareResponse(new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults(validResponse) + )); - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.all()); - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.partitionResult(tp1)); - } - } - } + final DeleteConsumerGroupsResult result = env.adminClient().deleteConsumerGroups(groupIds); - @Test - public void testDeleteConsumerGroupOffsetsFindCoordinatorRetriableErrors() throws Exception { - // Retriable FindCoordinatorResponse errors should be retried + final KafkaFuture results = result.deletedGroups().get("groupId"); + assertNull(results.get()); - final TopicPartition tp1 = new TopicPartition("foo", 0); + // should throw error for non-retriable errors + env.kafkaClient().prepareResponse( + prepareOldFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + DeleteConsumerGroupsResult errorResult = env.adminClient().deleteConsumerGroups(groupIds); + TestUtils.assertFutureError(errorResult.deletedGroups().get("groupId"), GroupAuthorizationException.class); + // Retriable errors should be retried env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); + prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + final DeletableGroupResultCollection errorResponse = new DeletableGroupResultCollection(); + errorResponse.add(new DeletableGroupResult() + .setGroupId("groupId") + .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) + ); + env.kafkaClient().prepareResponse(new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults(errorResponse))); - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); + /* + * We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group + * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a + * FindCoordinatorResponse. + * + * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response + */ - final DeleteConsumerGroupOffsetsResult result = env.adminClient() - .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + DeletableGroupResultCollection coordinatorMoved = new DeletableGroupResultCollection(); + coordinatorMoved.add(new DeletableGroupResult() + .setGroupId("groupId") + .setErrorCode(Errors.NOT_COORDINATOR.code()) + ); - assertNull(result.all().get()); - assertNull(result.partitionResult(tp1).get()); + env.kafkaClient().prepareResponse(new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults(coordinatorMoved))); + env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + coordinatorMoved = new DeletableGroupResultCollection(); + coordinatorMoved.add(new DeletableGroupResult() + .setGroupId("groupId") + .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) + ); + + env.kafkaClient().prepareResponse(new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults(coordinatorMoved))); + env.kafkaClient().prepareResponse(prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + env.kafkaClient().prepareResponse(new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults(validResponse))); + + errorResult = env.adminClient().deleteConsumerGroups(groupIds); + + final KafkaFuture errorResults = errorResult.deletedGroups().get("groupId"); + assertNull(errorResults.get()); } } @Test - public void testDeleteStreamsGroupOffsetsFindCoordinatorRetriableErrors() throws Exception { - // Retriable FindCoordinatorResponse errors should be retried - - final TopicPartition tp1 = new TopicPartition("foo", 0); + public void testDeleteMultipleConsumerGroupsWithOlderBroker() throws Exception { + final List groupIds = asList("group1", "group2"); + ApiVersion findCoordinatorV3 = new ApiVersion() + .setApiKey(ApiKeys.FIND_COORDINATOR.id) + .setMinVersion((short) 0) + .setMaxVersion((short) 3); + ApiVersion describeGroups = new ApiVersion() + .setApiKey(ApiKeys.DESCRIBE_GROUPS.id) + .setMinVersion((short) 0) + .setMaxVersion(ApiKeys.DELETE_GROUPS.latestVersion()); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); + env.kafkaClient().setNodeApiVersions( + NodeApiVersions.create(asList(findCoordinatorV3, describeGroups))); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + // Dummy response for MockClient to handle the UnsupportedVersionException correctly to switch from batched to un-batched + env.kafkaClient().prepareResponse(null); + // Retriable FindCoordinatorResponse errors should be retried + for (int i = 0; i < groupIds.size(); i++) { + env.kafkaClient().prepareResponse( + prepareOldFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); + } + for (int i = 0; i < groupIds.size(); i++) { + env.kafkaClient().prepareResponse( + prepareOldFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + } - env.kafkaClient().prepareResponse( - prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); + final DeletableGroupResultCollection validResponse = new DeletableGroupResultCollection(); + validResponse.add(new DeletableGroupResult() + .setGroupId("group1") + .setErrorCode(Errors.NONE.code())); + validResponse.add(new DeletableGroupResult() + .setGroupId("group2") + .setErrorCode(Errors.NONE.code())); + env.kafkaClient().prepareResponse(new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults(validResponse) + )); - final DeleteStreamsGroupOffsetsResult result = env.adminClient() - .deleteStreamsGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + final DeleteConsumerGroupsResult result = env.adminClient() + .deleteConsumerGroups(groupIds); - assertNull(result.all().get()); - assertNull(result.partitionResult(tp1).get()); + final KafkaFuture results = result.deletedGroups().get("group1"); + assertNull(results.get(5, TimeUnit.SECONDS)); } } @Test - public void testDeleteConsumerGroupOffsetsFindCoordinatorNonRetriableErrors() throws Exception { - // Non-retriable FindCoordinatorResponse errors throw an exception + public void testDeleteConsumerGroupOffsetsNumRetries() throws Exception { + final Cluster cluster = mockCluster(3, 0); + final Time time = new MockTime(); - final TopicPartition tp1 = new TopicPartition("foo", 0); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, + AdminClientConfig.RETRIES_CONFIG, "0")) { + final TopicPartition tp1 = new TopicPartition("foo", 0); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + env.kafkaClient().prepareResponse(prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); + env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - final DeleteConsumerGroupOffsetsResult errorResult = env.adminClient() + final DeleteConsumerGroupOffsetsResult result = env.adminClient() .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.all()); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.partitionResult(tp1)); + TestUtils.assertFutureError(result.all(), TimeoutException.class); } } @Test - public void testDeleteStreamsGroupOffsetsFindCoordinatorNonRetriableErrors() throws Exception { - // Non-retriable FindCoordinatorResponse errors throw an exception - - final TopicPartition tp1 = new TopicPartition("foo", 0); + public void testDeleteConsumerGroupOffsetsRetryBackoff() throws Exception { + MockTime time = new MockTime(); + int retryBackoff = 100; - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + try (final AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, + mockCluster(3, 0), + newStrMap(AdminClientConfig.RETRY_BACKOFF_MS_CONFIG, "" + retryBackoff))) { + MockClient mockClient = env.kafkaClient(); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); + mockClient.setNodeApiVersions(NodeApiVersions.create()); - final DeleteStreamsGroupOffsetsResult errorResult = env.adminClient() - .deleteStreamsGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + AtomicLong firstAttemptTime = new AtomicLong(0); + AtomicLong secondAttemptTime = new AtomicLong(0); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.all()); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.partitionResult(tp1)); - } - } + final TopicPartition tp1 = new TopicPartition("foo", 0); - @Test - public void testDescribeStreamsGroups() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - // Retriable FindCoordinatorResponse errors should be retried - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + mockClient.prepareResponse(body -> { + firstAttemptTime.set(time.milliseconds()); + return true; + }, prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); - StreamsGroupDescribeResponseData data = new StreamsGroupDescribeResponseData(); - // Retriable errors should be retried - data.groups().add(new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(GROUP_ID) - .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code())); - env.kafkaClient().prepareResponse(new StreamsGroupDescribeResponse(data)); - - // We need to return two responses here, one with NOT_COORDINATOR error when calling describe streams group - // api using coordinator that has moved. This will retry whole operation. So we need to again respond with a - // FindCoordinatorResponse. - // - // And the same reason for COORDINATOR_NOT_AVAILABLE error response - data = new StreamsGroupDescribeResponseData(); - data.groups().add(new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(GROUP_ID) - .setErrorCode(Errors.NOT_COORDINATOR.code())); - env.kafkaClient().prepareResponse(new StreamsGroupDescribeResponse(data)); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + mockClient.prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - data = new StreamsGroupDescribeResponseData(); - data.groups().add(new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(GROUP_ID) - .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code())); - env.kafkaClient().prepareResponse(new StreamsGroupDescribeResponse(data)); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + mockClient.prepareResponse(body -> { + secondAttemptTime.set(time.milliseconds()); + return true; + }, prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); - data = makeFullStreamsGroupDescribeResponse(); - - env.kafkaClient().prepareResponse(new StreamsGroupDescribeResponse(data)); - - final DescribeStreamsGroupsResult result = env.adminClient().describeStreamsGroups(singletonList(GROUP_ID)); - final StreamsGroupDescription groupDescription = result.describedGroups().get(GROUP_ID).get(); - - final String subtopologyId = "my_subtopology"; - StreamsGroupMemberAssignment.TaskIds expectedActiveTasks1 = - new StreamsGroupMemberAssignment.TaskIds(subtopologyId, asList(0, 1, 2)); - StreamsGroupMemberAssignment.TaskIds expectedStandbyTasks1 = - new StreamsGroupMemberAssignment.TaskIds(subtopologyId, asList(3, 4, 5)); - StreamsGroupMemberAssignment.TaskIds expectedWarmupTasks1 = - new StreamsGroupMemberAssignment.TaskIds(subtopologyId, asList(6, 7, 8)); - StreamsGroupMemberAssignment.TaskIds expectedActiveTasks2 = - new StreamsGroupMemberAssignment.TaskIds(subtopologyId, asList(3, 4, 5)); - StreamsGroupMemberAssignment.TaskIds expectedStandbyTasks2 = - new StreamsGroupMemberAssignment.TaskIds(subtopologyId, asList(6, 7, 8)); - StreamsGroupMemberAssignment.TaskIds expectedWarmupTasks2 = - new StreamsGroupMemberAssignment.TaskIds(subtopologyId, asList(0, 1, 2)); - StreamsGroupMemberAssignment expectedMemberAssignment = new StreamsGroupMemberAssignment( - singletonList(expectedActiveTasks1), - singletonList(expectedStandbyTasks1), - singletonList(expectedWarmupTasks1) - ); - StreamsGroupMemberAssignment expectedTargetAssignment = new StreamsGroupMemberAssignment( - singletonList(expectedActiveTasks2), - singletonList(expectedStandbyTasks2), - singletonList(expectedWarmupTasks2) - ); - final String instanceId = "instance-id"; - final String rackId = "rack-id"; - StreamsGroupMemberDescription expectedMemberOne = new StreamsGroupMemberDescription( - "0", - 1, - Optional.of(instanceId), - Optional.of(rackId), - "clientId0", - "clientHost", - 0, - "processId", - Optional.of(new StreamsGroupMemberDescription.Endpoint("localhost", 8080)), - Collections.singletonMap("key", "value"), - Collections.singletonList(new StreamsGroupMemberDescription.TaskOffset(subtopologyId, 0, 0)), - Collections.singletonList(new StreamsGroupMemberDescription.TaskOffset(subtopologyId, 0, 1)), - expectedMemberAssignment, - expectedTargetAssignment, - true - ); + final KafkaFuture future = env.adminClient().deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())).all(); - StreamsGroupMemberDescription expectedMemberTwo = new StreamsGroupMemberDescription( - "1", - 2, - Optional.empty(), - Optional.empty(), - "clientId1", - "clientHost", - 1, - "processId2", - Optional.empty(), - Collections.emptyMap(), - Collections.emptyList(), - Collections.emptyList(), - new StreamsGroupMemberAssignment(Collections.emptyList(), Collections.emptyList(), Collections.emptyList()), - new StreamsGroupMemberAssignment(Collections.emptyList(), Collections.emptyList(), Collections.emptyList()), - false - ); + TestUtils.waitForCondition(() -> mockClient.numAwaitingResponses() == 1, "Failed awaiting DeleteConsumerGroupOffsets first request failure"); + TestUtils.waitForCondition(() -> ((KafkaAdminClient) env.adminClient()).numPendingCalls() == 1, "Failed to add retry DeleteConsumerGroupOffsets call on first failure"); - StreamsGroupSubtopologyDescription expectedSubtopologyDescription = new StreamsGroupSubtopologyDescription( - subtopologyId, - Collections.singletonList("my_source_topic"), - Collections.singletonList("my_repartition_sink_topic"), - Collections.singletonMap( - "my_changelog_topic", - new StreamsGroupSubtopologyDescription.TopicInfo( - 0, - (short) 3, - Collections.singletonMap("key1", "value1") - ) - ), - Collections.singletonMap( - "my_repartition_topic", - new StreamsGroupSubtopologyDescription.TopicInfo( - 99, - (short) 0, - Collections.emptyMap() - ) - ) - ); + long lowerBoundBackoffMs = (long) (retryBackoff * (1 - CommonClientConfigs.RETRY_BACKOFF_JITTER)); + long upperBoundBackoffMs = (long) (retryBackoff * CommonClientConfigs.RETRY_BACKOFF_EXP_BASE * (1 + CommonClientConfigs.RETRY_BACKOFF_JITTER)); + time.sleep(upperBoundBackoffMs); - assertEquals(1, result.describedGroups().size()); - assertEquals(GROUP_ID, groupDescription.groupId()); - assertEquals(2, groupDescription.members().size()); - Iterator members = groupDescription.members().iterator(); - assertEquals(expectedMemberOne, members.next()); - assertEquals(expectedMemberTwo, members.next()); - assertEquals(1, groupDescription.subtopologies().size()); - assertEquals(expectedSubtopologyDescription, groupDescription.subtopologies().iterator().next()); - assertEquals(2, groupDescription.groupEpoch()); - assertEquals(1, groupDescription.targetAssignmentEpoch()); + future.get(); + long actualRetryBackoff = secondAttemptTime.get() - firstAttemptTime.get(); + assertEquals(retryBackoff, actualRetryBackoff, upperBoundBackoffMs - lowerBoundBackoffMs, "DeleteConsumerGroupOffsets retry did not await expected backoff!"); } } @Test - public void testDescribeStreamsGroupsWithAuthorizedOperationsOmitted() throws Exception { + public void testDeleteConsumerGroupOffsets() throws Exception { + // Happy path + + final TopicPartition tp1 = new TopicPartition("foo", 0); + final TopicPartition tp2 = new TopicPartition("bar", 0); + final TopicPartition tp3 = new TopicPartition("foobar", 0); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); env.kafkaClient().prepareResponse( prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - StreamsGroupDescribeResponseData data = makeFullStreamsGroupDescribeResponse(); - - data.groups().iterator().next() - .setAuthorizedOperations(MetadataResponse.AUTHORIZED_OPERATIONS_OMITTED); - - env.kafkaClient().prepareResponse(new StreamsGroupDescribeResponse(data)); + env.kafkaClient().prepareResponse(new OffsetDeleteResponse( + new OffsetDeleteResponseData() + .setTopics(new OffsetDeleteResponseTopicCollection(Stream.of( + new OffsetDeleteResponseTopic() + .setName("foo") + .setPartitions(new OffsetDeleteResponsePartitionCollection(Collections.singletonList( + new OffsetDeleteResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code()) + ).iterator())), + new OffsetDeleteResponseTopic() + .setName("bar") + .setPartitions(new OffsetDeleteResponsePartitionCollection(Collections.singletonList( + new OffsetDeleteResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.GROUP_SUBSCRIBED_TO_TOPIC.code()) + ).iterator())) + ).collect(Collectors.toList()).iterator())) + ) + ); - final DescribeStreamsGroupsResult result = env.adminClient().describeStreamsGroups(singletonList(GROUP_ID)); - final StreamsGroupDescription groupDescription = result.describedGroups().get(GROUP_ID).get(); + final DeleteConsumerGroupOffsetsResult errorResult = env.adminClient().deleteConsumerGroupOffsets( + GROUP_ID, Stream.of(tp1, tp2).collect(Collectors.toSet())); - assertNull(groupDescription.authorizedOperations()); + assertNull(errorResult.partitionResult(tp1).get()); + TestUtils.assertFutureError(errorResult.all(), GroupSubscribedToTopicException.class); + TestUtils.assertFutureError(errorResult.partitionResult(tp2), GroupSubscribedToTopicException.class); + assertThrows(IllegalArgumentException.class, () -> errorResult.partitionResult(tp3)); } } @Test - public void testDescribeMultipleStreamsGroups() { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - StreamsGroupDescribeResponseData.TaskIds activeTasks = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(0, 1, 2)); - StreamsGroupDescribeResponseData.TaskIds standbyTasks = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(3, 4, 5)); - StreamsGroupDescribeResponseData.TaskIds warmupTasks = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(6, 7, 8)); - final StreamsGroupDescribeResponseData.Assignment memberAssignment = new StreamsGroupDescribeResponseData.Assignment() - .setActiveTasks(singletonList(activeTasks)) - .setStandbyTasks(singletonList(standbyTasks)) - .setWarmupTasks(singletonList(warmupTasks)); - StreamsGroupDescribeResponseData group0Data = new StreamsGroupDescribeResponseData(); - group0Data.groups().add(new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(GROUP_ID) - .setGroupState(GroupState.STABLE.toString()) - .setMembers(asList( - new StreamsGroupDescribeResponseData.Member() - .setMemberId("0") - .setClientId("clientId0") - .setClientHost("clientHost") - .setAssignment(memberAssignment), - new StreamsGroupDescribeResponseData.Member() - .setMemberId("1") - .setClientId("clientId1") - .setClientHost("clientHost") - .setAssignment(memberAssignment)))); - - StreamsGroupDescribeResponseData group1Data = new StreamsGroupDescribeResponseData(); - group1Data.groups().add(new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId("group-1") - .setGroupState(GroupState.STABLE.toString()) - .setMembers(asList( - new StreamsGroupDescribeResponseData.Member() - .setMemberId("0") - .setClientId("clientId0") - .setClientHost("clientHost") - .setAssignment(memberAssignment), - new StreamsGroupDescribeResponseData.Member() - .setMemberId("1") - .setClientId("clientId1") - .setClientHost("clientHost") - .setAssignment(memberAssignment)))); - - env.kafkaClient().prepareResponse(new StreamsGroupDescribeResponse(group0Data)); - env.kafkaClient().prepareResponse(new StreamsGroupDescribeResponse(group1Data)); + public void testDeleteConsumerGroupOffsetsRetriableErrors() throws Exception { + // Retriable errors should be retried - Collection groups = new HashSet<>(); - groups.add(GROUP_ID); - groups.add("group-1"); - final DescribeStreamsGroupsResult result = env.adminClient().describeStreamsGroups(groups); - assertEquals(2, result.describedGroups().size()); - assertEquals(groups, result.describedGroups().keySet()); - } - } + final TopicPartition tp1 = new TopicPartition("foo", 0); - @Test - public void testListStreamsGroups() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(4, 0), - AdminClientConfig.RETRIES_CONFIG, "2")) { + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - // Empty metadata response should be retried env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - Collections.emptyList(), - env.cluster().clusterResource().clusterId(), - -1, - Collections.emptyList())); + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - env.cluster().nodes(), - env.cluster().clusterResource().clusterId(), - env.cluster().controller().id(), - Collections.emptyList())); - - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(singletonList( - new ListedGroup() - .setGroupId("streams-group-1") - .setGroupType(GroupType.STREAMS.toString()) - .setGroupState("Stable") - ))), - env.cluster().nodeById(0)); + prepareOffsetDeleteResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS)); - // handle retriable errors - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code()) - .setGroups(Collections.emptyList()) - ), - env.cluster().nodeById(1)); - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()) - .setGroups(Collections.emptyList()) - ), - env.cluster().nodeById(1)); - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(Arrays.asList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("streams-group-2") - .setGroupType(GroupType.STREAMS.toString()) - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("streams-group-3") - .setGroupType(GroupType.STREAMS.toString()) - .setGroupState("Stable") - ))), - env.cluster().nodeById(1)); + /* + * We need to return two responses here, one for NOT_COORDINATOR call when calling delete a consumer group + * api using coordinator that has moved. This will retry whole operation. So we need to again respond with a + * FindCoordinatorResponse. + * + * And the same reason for the following COORDINATOR_NOT_AVAILABLE error response + */ + env.kafkaClient().prepareResponse( + prepareOffsetDeleteResponse(Errors.NOT_COORDINATOR)); - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(singletonList( - new ListedGroup() - .setGroupId("streams-group-4") - .setGroupType(GroupType.STREAMS.toString()) - .setGroupState("Stable") - ))), - env.cluster().nodeById(2)); + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - // fatal error - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse( - new ListGroupsResponseData() - .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) - .setGroups(Collections.emptyList())), - env.cluster().nodeById(3)); + env.kafkaClient().prepareResponse( + prepareOffsetDeleteResponse(Errors.COORDINATOR_NOT_AVAILABLE)); - final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forStreamsGroups()); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - Collection listings = result.valid().get(); - assertEquals(4, listings.size()); + env.kafkaClient().prepareResponse( + prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); - Set groupIds = new HashSet<>(); - for (GroupListing listing : listings) { - groupIds.add(listing.groupId()); - assertTrue(listing.groupState().isPresent()); - } + final DeleteConsumerGroupOffsetsResult errorResult1 = env.adminClient() + .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); - assertEquals(Set.of("streams-group-1", "streams-group-2", "streams-group-3", "streams-group-4"), groupIds); - assertEquals(1, result.errors().get().size()); + assertNull(errorResult1.all().get()); + assertNull(errorResult1.partitionResult(tp1).get()); } } @Test - public void testListStreamsGroupsMetadataFailure() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); + public void testDeleteConsumerGroupOffsetsNonRetriableErrors() throws Exception { + // Non-retriable errors throw an exception - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { + final TopicPartition tp1 = new TopicPartition("foo", 0); + final List nonRetriableErrors = asList( + Errors.GROUP_AUTHORIZATION_FAILED, Errors.INVALID_GROUP_ID, Errors.GROUP_ID_NOT_FOUND); + + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - // Empty metadata causes the request to fail since we have no list of brokers - // to send the ListGroups requests to - env.kafkaClient().prepareResponse( - RequestTestUtils.metadataResponse( - Collections.emptyList(), - env.cluster().clusterResource().clusterId(), - -1, - Collections.emptyList())); + for (Errors error : nonRetriableErrors) { + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); + + env.kafkaClient().prepareResponse( + prepareOffsetDeleteResponse(error)); + + DeleteConsumerGroupOffsetsResult errorResult = env.adminClient() + .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); - final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forStreamsGroups()); - TestUtils.assertFutureThrows(KafkaException.class, result.all()); + TestUtils.assertFutureError(errorResult.all(), error.exception().getClass()); + TestUtils.assertFutureError(errorResult.partitionResult(tp1), error.exception().getClass()); + } } } @Test - public void testListStreamsGroupsWithStates() throws Exception { + public void testDeleteConsumerGroupOffsetsFindCoordinatorRetriableErrors() throws Exception { + // Retriable FindCoordinatorResponse errors should be retried + + final TopicPartition tp1 = new TopicPartition("foo", 0); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(Arrays.asList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("streams-group-1") - .setGroupType(GroupType.STREAMS.toString()) - .setProtocolType("streams") - .setGroupState("Stable"), - new ListGroupsResponseData.ListedGroup() - .setGroupId("streams-group-2") - .setGroupType(GroupType.STREAMS.toString()) - .setProtocolType("streams") - .setGroupState("NotReady")))), - env.cluster().nodeById(0)); + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forStreamsGroups()); - Collection listings = result.valid().get(); + env.kafkaClient().prepareResponse( + prepareOffsetDeleteResponse("foo", 0, Errors.NONE)); - assertEquals(2, listings.size()); - List expected = new ArrayList<>(); - expected.add(new GroupListing("streams-group-1", Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE))); - expected.add(new GroupListing("streams-group-2", Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.NOT_READY))); - assertEquals(expected, listings); - assertEquals(0, result.errors().get().size()); + final DeleteConsumerGroupOffsetsResult result = env.adminClient() + .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + + assertNull(result.all().get()); + assertNull(result.partitionResult(tp1).get()); } } @Test - public void testListStreamsGroupsWithStatesOlderBrokerVersion() { - ApiVersion listGroupV4 = new ApiVersion() - .setApiKey(ApiKeys.LIST_GROUPS.id) - .setMinVersion((short) 0) - .setMaxVersion((short) 4); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create(Collections.singletonList(listGroupV4))); + public void testDeleteConsumerGroupOffsetsFindCoordinatorNonRetriableErrors() throws Exception { + // Non-retriable FindCoordinatorResponse errors throw an exception - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); + final TopicPartition tp1 = new TopicPartition("foo", 0); - // Check we should not be able to list streams groups with broker having version < 5 - env.kafkaClient().prepareResponseFrom( - new ListGroupsResponse(new ListGroupsResponseData() - .setErrorCode(Errors.NONE.code()) - .setGroups(Collections.singletonList( - new ListGroupsResponseData.ListedGroup() - .setGroupId("streams-group-1")))), - env.cluster().nodeById(0)); - ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forStreamsGroups()); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { + env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); + + env.kafkaClient().prepareResponse( + prepareFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); + + final DeleteConsumerGroupOffsetsResult errorResult = env.adminClient() + .deleteConsumerGroupOffsets(GROUP_ID, Stream.of(tp1).collect(Collectors.toSet())); + + TestUtils.assertFutureError(errorResult.all(), GroupAuthorizationException.class); + TestUtils.assertFutureError(errorResult.partitionResult(tp1), GroupAuthorizationException.class); } } - + @Test public void testDescribeShareGroups() throws Exception { try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { @@ -6970,8 +5384,8 @@ public void testListShareGroups() throws Exception { .setGroups(Collections.emptyList())), env.cluster().nodeById(3)); - final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forShareGroups()); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + TestUtils.assertFutureError(result.all(), UnknownServerException.class); Collection listings = result.valid().get(); assertEquals(4, listings.size()); @@ -7005,8 +5419,8 @@ public void testListShareGroupsMetadataFailure() throws Exception { -1, Collections.emptyList())); - final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forShareGroups()); - TestUtils.assertFutureThrows(KafkaException.class, result.all()); + final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + TestUtils.assertFutureError(result.all(), KafkaException.class); } } @@ -7033,7 +5447,7 @@ public void testListShareGroupsWithStates() throws Exception { .setGroupState("Empty")))), env.cluster().nodeById(0)); - final ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forShareGroups()); + final ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); Collection listings = result.valid().get(); assertEquals(2, listings.size()); @@ -7064,8 +5478,8 @@ public void testListShareGroupsWithStatesOlderBrokerVersion() { new ListGroupsResponseData.ListedGroup() .setGroupId("share-group-1")))), env.cluster().nodeById(0)); - ListGroupsResult result = env.adminClient().listGroups(ListGroupsOptions.forShareGroups()); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + ListGroupsResult result = env.adminClient().listGroups(new ListGroupsOptions().withTypes(Set.of(GroupType.SHARE))); + TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } } @@ -7301,10 +5715,10 @@ public void testIncrementalAlterConfigs() throws Exception { configs.put(groupResource, singletonList(alterConfigOp4)); AlterConfigsResult result = env.adminClient().incrementalAlterConfigs(configs); - TestUtils.assertFutureThrows(ClusterAuthorizationException.class, result.values().get(brokerResource)); - TestUtils.assertFutureThrows(InvalidRequestException.class, result.values().get(topicResource)); - TestUtils.assertFutureThrows(InvalidRequestException.class, result.values().get(metricResource)); - TestUtils.assertFutureThrows(InvalidConfigurationException.class, result.values().get(groupResource)); + TestUtils.assertFutureError(result.values().get(brokerResource), ClusterAuthorizationException.class); + TestUtils.assertFutureError(result.values().get(topicResource), InvalidRequestException.class); + TestUtils.assertFutureError(result.values().get(metricResource), InvalidRequestException.class); + TestUtils.assertFutureError(result.values().get(groupResource), InvalidConfigurationException.class); // Test a call where there are no errors. responseData = new IncrementalAlterConfigsResponseData(); @@ -7398,7 +5812,7 @@ public void testRemoveMembersFromGroupNumRetries() throws Exception { final RemoveMembersFromConsumerGroupResult result = env.adminClient().removeMembersFromConsumerGroup( GROUP_ID, new RemoveMembersFromConsumerGroupOptions(membersToRemove)); - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); + TestUtils.assertFutureError(result.all(), TimeoutException.class); } } @@ -7534,8 +5948,8 @@ public void testRemoveMembersFromGroupNonRetriableErrors() throws Exception { final RemoveMembersFromConsumerGroupResult result = env.adminClient().removeMembersFromConsumerGroup( GROUP_ID, new RemoveMembersFromConsumerGroupOptions(membersToRemove)); - TestUtils.assertFutureThrows(error.exception().getClass(), result.all()); - TestUtils.assertFutureThrows(error.exception().getClass(), result.memberResult(memberToRemove)); + TestUtils.assertFutureError(result.all(), error.exception().getClass()); + TestUtils.assertFutureError(result.memberResult(memberToRemove), error.exception().getClass()); } } } @@ -7570,8 +5984,8 @@ public void testRemoveMembersFromGroup() throws Exception { MemberToRemove memberOne = new MemberToRemove(instanceOne); MemberToRemove memberTwo = new MemberToRemove(instanceTwo); - TestUtils.assertFutureThrows(UnknownServerException.class, unknownErrorResult.memberResult(memberOne)); - TestUtils.assertFutureThrows(UnknownServerException.class, unknownErrorResult.memberResult(memberTwo)); + TestUtils.assertFutureError(unknownErrorResult.memberResult(memberOne), UnknownServerException.class); + TestUtils.assertFutureError(unknownErrorResult.memberResult(memberTwo), UnknownServerException.class); MemberResponse responseOne = new MemberResponse() .setGroupInstanceId(instanceOne) @@ -7592,8 +6006,8 @@ public void testRemoveMembersFromGroup() throws Exception { new RemoveMembersFromConsumerGroupOptions(membersToRemove) ); - TestUtils.assertFutureThrows(UnknownMemberIdException.class, memberLevelErrorResult.all()); - TestUtils.assertFutureThrows(UnknownMemberIdException.class, memberLevelErrorResult.memberResult(memberOne)); + TestUtils.assertFutureError(memberLevelErrorResult.all(), UnknownMemberIdException.class); + TestUtils.assertFutureError(memberLevelErrorResult.memberResult(memberOne), UnknownMemberIdException.class); assertNull(memberLevelErrorResult.memberResult(memberTwo).get()); // Return with missing member. @@ -7607,9 +6021,9 @@ public void testRemoveMembersFromGroup() throws Exception { new RemoveMembersFromConsumerGroupOptions(membersToRemove) ); - TestUtils.assertFutureThrows(IllegalArgumentException.class, missingMemberResult.all()); + TestUtils.assertFutureError(missingMemberResult.all(), IllegalArgumentException.class); // The memberOne was not included in the response. - TestUtils.assertFutureThrows(IllegalArgumentException.class, missingMemberResult.memberResult(memberOne)); + TestUtils.assertFutureError(missingMemberResult.memberResult(memberOne), IllegalArgumentException.class); assertNull(missingMemberResult.memberResult(memberTwo).get()); @@ -7761,8 +6175,8 @@ public void testAlterPartitionReassignments() throws Exception { AlterPartitionReassignmentsResult result1 = env.adminClient().alterPartitionReassignments(reassignments); Future future1 = result1.all(); Future future2 = result1.values().get(tp1); - TestUtils.assertFutureThrows(UnknownServerException.class, future1); - TestUtils.assertFutureThrows(UnknownServerException.class, future2); + TestUtils.assertFutureError(future1, UnknownServerException.class); + TestUtils.assertFutureError(future2, UnknownServerException.class); // 2. NOT_CONTROLLER error handling AlterPartitionReassignmentsResponseData controllerErrResponseData = @@ -7813,7 +6227,7 @@ public void testAlterPartitionReassignments() throws Exception { ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(partitionLevelErrData)); AlterPartitionReassignmentsResult partitionLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments); - TestUtils.assertFutureThrows(InvalidReplicaAssignmentException.class, partitionLevelErrResult.values().get(tp1)); + TestUtils.assertFutureError(partitionLevelErrResult.values().get(tp1), Errors.INVALID_REPLICA_ASSIGNMENT.exception().getClass()); partitionLevelErrResult.values().get(tp2).get(); // 4. top-level error @@ -7832,9 +6246,9 @@ public void testAlterPartitionReassignments() throws Exception { ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(topLevelErrResponseData)); AlterPartitionReassignmentsResult topLevelErrResult = env.adminClient().alterPartitionReassignments(reassignments); - assertEquals(errorMessage, TestUtils.assertFutureThrows(ClusterAuthorizationException.class, topLevelErrResult.all()).getMessage()); - assertEquals(errorMessage, TestUtils.assertFutureThrows(ClusterAuthorizationException.class, topLevelErrResult.values().get(tp1)).getMessage()); - assertEquals(errorMessage, TestUtils.assertFutureThrows(ClusterAuthorizationException.class, topLevelErrResult.values().get(tp2)).getMessage()); + assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.all(), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage()); + assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.values().get(tp1), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage()); + assertEquals(errorMessage, TestUtils.assertFutureThrows(topLevelErrResult.values().get(tp2), Errors.CLUSTER_AUTHORIZATION_FAILED.exception().getClass()).getMessage()); // 5. unrepresentable topic name error TopicPartition invalidTopicTP = new TopicPartition("", 0); @@ -7853,8 +6267,8 @@ public void testAlterPartitionReassignments() throws Exception { ); env.kafkaClient().prepareResponse(new AlterPartitionReassignmentsResponse(singlePartResponseData)); AlterPartitionReassignmentsResult unrepresentableTopicResult = env.adminClient().alterPartitionReassignments(invalidTopicReassignments); - TestUtils.assertFutureThrows(InvalidTopicException.class, unrepresentableTopicResult.values().get(invalidTopicTP)); - TestUtils.assertFutureThrows(InvalidTopicException.class, unrepresentableTopicResult.values().get(invalidPartitionTP)); + TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidTopicTP), InvalidTopicException.class); + TestUtils.assertFutureError(unrepresentableTopicResult.values().get(invalidPartitionTP), InvalidTopicException.class); unrepresentableTopicResult.values().get(tp1).get(); // Test success scenario @@ -7922,8 +6336,8 @@ public void testListPartitionReassignments() throws Exception { .setErrorMessage(Errors.UNKNOWN_TOPIC_OR_PARTITION.message()); env.kafkaClient().prepareResponse(new ListPartitionReassignmentsResponse(unknownTpData)); - ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(Set.of(tp1, tp2)); - TestUtils.assertFutureThrows(UnknownTopicOrPartitionException.class, unknownTpResult.reassignments()); + ListPartitionReassignmentsResult unknownTpResult = env.adminClient().listPartitionReassignments(new HashSet<>(asList(tp1, tp2))); + TestUtils.assertFutureError(unknownTpResult.reassignments(), UnknownTopicOrPartitionException.class); // 3. Success ListPartitionReassignmentsResponseData responseData = new ListPartitionReassignmentsResponseData() @@ -7974,39 +6388,7 @@ public void testAlterConsumerGroupOffsets() throws Exception { assertNull(result.all().get()); assertNull(result.partitionResult(tp1).get()); assertNull(result.partitionResult(tp2).get()); - TestUtils.assertFutureThrows(IllegalArgumentException.class, result.partitionResult(tp3)); - } - } - - @Test - public void testAlterStreamsGroupOffsets() throws Exception { - // Happy path - - final TopicPartition tp1 = new TopicPartition("foo", 0); - final TopicPartition tp2 = new TopicPartition("bar", 0); - final TopicPartition tp3 = new TopicPartition("foobar", 0); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - Map responseData = new HashMap<>(); - responseData.put(tp1, Errors.NONE); - responseData.put(tp2, Errors.NONE); - env.kafkaClient().prepareResponse(new OffsetCommitResponse(0, responseData)); - - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - offsets.put(tp2, new OffsetAndMetadata(456L)); - final AlterStreamsGroupOffsetsResult result = env.adminClient().alterStreamsGroupOffsets( - GROUP_ID, offsets); - - assertNull(result.all().get()); - assertNull(result.partitionResult(tp1).get()); - assertNull(result.partitionResult(tp2).get()); - TestUtils.assertFutureThrows(IllegalArgumentException.class, result.partitionResult(tp3)); + TestUtils.assertFutureError(result.partitionResult(tp3), IllegalArgumentException.class); } } @@ -8053,49 +6435,6 @@ public void testAlterConsumerGroupOffsetsRetriableErrors() throws Exception { } } - @Test - public void testAlterStreamsGroupOffsetsRetriableErrors() throws Exception { - // Retriable errors should be retried - - final TopicPartition tp1 = new TopicPartition("foo", 0); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse( - prepareOffsetCommitResponse(tp1, Errors.COORDINATOR_NOT_AVAILABLE)); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse( - prepareOffsetCommitResponse(tp1, Errors.COORDINATOR_LOAD_IN_PROGRESS)); - - env.kafkaClient().prepareResponse( - prepareOffsetCommitResponse(tp1, Errors.NOT_COORDINATOR)); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse( - prepareOffsetCommitResponse(tp1, Errors.REBALANCE_IN_PROGRESS)); - - env.kafkaClient().prepareResponse( - prepareOffsetCommitResponse(tp1, Errors.NONE)); - - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - final AlterStreamsGroupOffsetsResult result1 = env.adminClient() - .alterStreamsGroupOffsets(GROUP_ID, offsets); - - assertNull(result1.all().get()); - assertNull(result1.partitionResult(tp1).get()); - } - } - @Test public void testAlterConsumerGroupOffsetsNonRetriableErrors() throws Exception { // Non-retriable errors throw an exception @@ -8118,36 +6457,8 @@ public void testAlterConsumerGroupOffsetsNonRetriableErrors() throws Exception { AlterConsumerGroupOffsetsResult errorResult = env.adminClient() .alterConsumerGroupOffsets(GROUP_ID, offsets); - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.all()); - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.partitionResult(tp1)); - } - } - } - - @Test - public void testAlterStreamsGroupOffsetsNonRetriableErrors() throws Exception { - // Non-retriable errors throw an exception - - final TopicPartition tp1 = new TopicPartition("foo", 0); - final List nonRetriableErrors = asList( - Errors.GROUP_AUTHORIZATION_FAILED, Errors.INVALID_GROUP_ID, Errors.GROUP_ID_NOT_FOUND, Errors.STALE_MEMBER_EPOCH); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - for (Errors error : nonRetriableErrors) { - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse(prepareOffsetCommitResponse(tp1, error)); - - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - AlterStreamsGroupOffsetsResult errorResult = env.adminClient() - .alterStreamsGroupOffsets(GROUP_ID, offsets); - - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.all()); - TestUtils.assertFutureThrows(error.exception().getClass(), errorResult.partitionResult(tp1)); + TestUtils.assertFutureError(errorResult.all(), error.exception().getClass()); + TestUtils.assertFutureError(errorResult.partitionResult(tp1), error.exception().getClass()); } } } @@ -8182,36 +6493,6 @@ public void testAlterConsumerGroupOffsetsFindCoordinatorRetriableErrors() throws } } - @Test - public void testAlterStreamsGroupOffsetsFindCoordinatorRetriableErrors() throws Exception { - // Retriable FindCoordinatorResponse errors should be retried - - final TopicPartition tp1 = new TopicPartition("foo", 0); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode())); - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.COORDINATOR_LOAD_IN_PROGRESS, Node.noNode())); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - env.kafkaClient().prepareResponse( - prepareOffsetCommitResponse(tp1, Errors.NONE)); - - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - final AlterStreamsGroupOffsetsResult result = env.adminClient() - .alterStreamsGroupOffsets(GROUP_ID, offsets); - - assertNull(result.all().get()); - assertNull(result.partitionResult(tp1).get()); - } - } - @Test public void testAlterConsumerGroupOffsetsFindCoordinatorNonRetriableErrors() throws Exception { // Non-retriable FindCoordinatorResponse errors throw an exception @@ -8229,30 +6510,8 @@ public void testAlterConsumerGroupOffsetsFindCoordinatorNonRetriableErrors() thr final AlterConsumerGroupOffsetsResult errorResult = env.adminClient() .alterConsumerGroupOffsets(GROUP_ID, offsets); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.all()); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.partitionResult(tp1)); - } - } - - @Test - public void testAlterStreamsGroupOffsetsFindCoordinatorNonRetriableErrors() throws Exception { - // Non-retriable FindCoordinatorResponse errors throw an exception - - final TopicPartition tp1 = new TopicPartition("foo", 0); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse( - prepareFindCoordinatorResponse(Errors.GROUP_AUTHORIZATION_FAILED, Node.noNode())); - - Map offsets = new HashMap<>(); - offsets.put(tp1, new OffsetAndMetadata(123L)); - final AlterStreamsGroupOffsetsResult errorResult = env.adminClient() - .alterStreamsGroupOffsets(GROUP_ID, offsets); - - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.all()); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, errorResult.partitionResult(tp1)); + TestUtils.assertFutureError(errorResult.all(), GroupAuthorizationException.class); + TestUtils.assertFutureError(errorResult.partitionResult(tp1), GroupAuthorizationException.class); } } @@ -8426,7 +6685,7 @@ public void testListOffsetsNonRetriableErrors() throws Exception { partitions.put(tp0, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); - TestUtils.assertFutureThrows(TopicAuthorizationException.class, result.all()); + TestUtils.assertFutureError(result.all(), TopicAuthorizationException.class); } } @@ -8454,7 +6713,7 @@ public void testListOffsetsMaxTimestampUnsupportedSingleOffsetSpec() { ListOffsetsResult result = env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.maxTimestamp())); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), UnsupportedVersionException.class); } } @@ -8502,7 +6761,7 @@ public void testListOffsetsMaxTimestampUnsupportedMultipleOffsetSpec() throws Ex put(tp1, OffsetSpec.latest()); }}); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.partitionResult(tp0)); + TestUtils.assertFutureThrows(result.partitionResult(tp0), UnsupportedVersionException.class); ListOffsetsResultInfo tp1Offset = result.partitionResult(tp1).get(); assertEquals(345L, tp1Offset.offset()); @@ -8564,7 +6823,7 @@ public void testListOffsetsHandlesFulfillmentTimeouts() throws Exception { put(tp1, OffsetSpec.latest()); } }); - TestUtils.assertFutureThrows(TimeoutException.class, result.partitionResult(tp0)); + TestUtils.assertFutureThrows(result.partitionResult(tp0), TimeoutException.class); ListOffsetsResultInfo tp1Result = result.partitionResult(tp1).get(); assertEquals(345L, tp1Result.offset()); assertEquals(543, tp1Result.leaderEpoch().get().intValue()); @@ -8627,7 +6886,7 @@ public void testListOffsetsUnsupportedNonMaxTimestamp() { ListOffsetsResult result = env.adminClient().listOffsets( Collections.singletonMap(tp0, OffsetSpec.latest())); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.partitionResult(tp0)); + TestUtils.assertFutureThrows(result.partitionResult(tp0), UnsupportedVersionException.class); } } @@ -8730,34 +6989,6 @@ public void testListOffsetsLatestTierSpecSpecMinVersion() throws Exception { } } - @Test - public void testListOffsetsEarliestPendingUploadSpecSpecMinVersion() throws Exception { - Node node = new Node(0, "localhost", 8120); - List nodes = Collections.singletonList(node); - List pInfos = new ArrayList<>(); - pInfos.add(new PartitionInfo("foo", 0, node, new Node[]{node}, new Node[]{node})); - final Cluster cluster = new Cluster( - "mockClusterId", - nodes, - pInfos, - Collections.emptySet(), - Collections.emptySet(), - node); - final TopicPartition tp0 = new TopicPartition("foo", 0); - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(cluster, - AdminClientConfig.RETRIES_CONFIG, "2")) { - - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareMetadataResponse(env.cluster(), Errors.NONE)); - - env.adminClient().listOffsets(Collections.singletonMap(tp0, OffsetSpec.earliestPendingUpload())); - - TestUtils.waitForCondition(() -> env.kafkaClient().requests().stream().anyMatch(request -> - request.requestBuilder().apiKey().messageType == ApiMessageType.LIST_OFFSETS && request.requestBuilder().oldestAllowedVersion() == 11 - ), "no listOffsets request has the expected oldestAllowedVersion"); - } - } - private Map makeTestFeatureUpdates() { return Utils.mkMap( Utils.mkEntry("test_feature_1", new FeatureUpdate((short) 2, FeatureUpdate.UpgradeType.UPGRADE)), @@ -8890,7 +7121,7 @@ public void testDescribeFeaturesFailure() { options.timeoutMs(10000); final KafkaFuture future = env.adminClient().describeFeatures(options).featureMetadata(); final ExecutionException e = assertThrows(ExecutionException.class, future::get); - assertEquals(Errors.INVALID_REQUEST.exception().getClass(), e.getCause().getClass()); + assertEquals(e.getCause().getClass(), Errors.INVALID_REQUEST.exception().getClass()); } } @@ -8954,56 +7185,56 @@ public void testDescribeMetadataQuorumFailure() { body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.INVALID_REQUEST, Errors.NONE, false, false, false, false, false)); KafkaFuture future = env.adminClient().describeMetadataQuorum().quorumInfo(); - TestUtils.assertFutureThrows(InvalidRequestException.class, future); + TestUtils.assertFutureThrows(future, InvalidRequestException.class); // Test incorrect topic count env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.NONE, Errors.NONE, true, false, false, false, false)); future = env.adminClient().describeMetadataQuorum().quorumInfo(); - TestUtils.assertFutureThrows(UnknownServerException.class, future); + TestUtils.assertFutureThrows(future, UnknownServerException.class); // Test incorrect topic name env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.NONE, Errors.NONE, false, true, false, false, false)); future = env.adminClient().describeMetadataQuorum().quorumInfo(); - TestUtils.assertFutureThrows(UnknownServerException.class, future); + TestUtils.assertFutureThrows(future, UnknownServerException.class); // Test incorrect partition count env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.NONE, Errors.NONE, false, false, true, false, false)); future = env.adminClient().describeMetadataQuorum().quorumInfo(); - TestUtils.assertFutureThrows(UnknownServerException.class, future); + TestUtils.assertFutureThrows(future, UnknownServerException.class); // Test incorrect partition index env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.NONE, Errors.NONE, false, false, false, true, false)); future = env.adminClient().describeMetadataQuorum().quorumInfo(); - TestUtils.assertFutureThrows(UnknownServerException.class, future); + TestUtils.assertFutureThrows(future, UnknownServerException.class); // Test partition level error env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.NONE, Errors.INVALID_REQUEST, false, false, false, false, false)); future = env.adminClient().describeMetadataQuorum().quorumInfo(); - TestUtils.assertFutureThrows(InvalidRequestException.class, future); + TestUtils.assertFutureThrows(future, InvalidRequestException.class); // Test all incorrect and no errors env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.NONE, Errors.NONE, true, true, true, true, false)); future = env.adminClient().describeMetadataQuorum().quorumInfo(); - TestUtils.assertFutureThrows(UnknownServerException.class, future); + TestUtils.assertFutureThrows(future, UnknownServerException.class); // Test all incorrect and both errors env.kafkaClient().prepareResponse( body -> body instanceof DescribeQuorumRequest, prepareDescribeQuorumResponse(Errors.INVALID_REQUEST, Errors.INVALID_REQUEST, true, true, true, true, false)); future = env.adminClient().describeMetadataQuorum().quorumInfo(); - TestUtils.assertFutureThrows(InvalidRequestException.class, future); + TestUtils.assertFutureThrows(future, Errors.INVALID_REQUEST.exception().getClass()); } } @@ -9218,7 +7449,7 @@ public void testListOffsetsMetadataNonRetriableErrors( partitions.put(tp1, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); - TestUtils.assertFutureThrows(expectedFailure, result.all()); + TestUtils.assertFutureError(result.all(), expectedFailure); } } @@ -9286,8 +7517,8 @@ public void testListOffsetsPartialResponse() throws Exception { partitions.put(tp1, OffsetSpec.latest()); ListOffsetsResult result = env.adminClient().listOffsets(partitions); assertNotNull(result.partitionResult(tp0).get()); - TestUtils.assertFutureThrows(ApiException.class, result.partitionResult(tp1)); - TestUtils.assertFutureThrows(ApiException.class, result.all()); + TestUtils.assertFutureThrows(result.partitionResult(tp1), ApiException.class); + TestUtils.assertFutureThrows(result.all(), ApiException.class); } } @@ -9406,7 +7637,7 @@ private void testApiTimeout(int requestTimeoutMs, }, "Timed out waiting for Metadata request to be sent"); time.sleep(requestTimeoutMs + 1); - TestUtils.assertFutureThrows(TimeoutException.class, result.future); + TestUtils.assertFutureThrows(result.future, TimeoutException.class); } } @@ -9446,7 +7677,7 @@ public void testRequestTimeoutExceedingDefaultApiTimeout() throws Exception { // Now sleep the remaining time for the request timeout to expire time.sleep(60000); - TestUtils.assertFutureThrows(TimeoutException.class, result.future); + TestUtils.assertFutureThrows(result.future, TimeoutException.class); } } @@ -9479,15 +7710,15 @@ public void testDescribeClientQuotas() throws Exception { DescribeClientQuotasResult result = env.adminClient().describeClientQuotas(filter); Map> resultData = result.entities().get(); - assertEquals(2, resultData.size()); + assertEquals(resultData.size(), 2); assertTrue(resultData.containsKey(entity1)); Map config1 = resultData.get(entity1); - assertEquals(1, config1.size()); - assertEquals(10000.0, config1.get("consumer_byte_rate"), 1e-6); + assertEquals(config1.size(), 1); + assertEquals(config1.get("consumer_byte_rate"), 10000.0, 1e-6); assertTrue(resultData.containsKey(entity2)); Map config2 = resultData.get(entity2); - assertEquals(1, config2.size()); - assertEquals(20000.0, config2.get("producer_byte_rate"), 1e-6); + assertEquals(config2.size(), 1); + assertEquals(config2.get("producer_byte_rate"), 20000.0, 1e-6); } } @@ -9536,8 +7767,8 @@ public void testAlterClientQuotas() throws Exception { AlterClientQuotasResult result = env.adminClient().alterClientQuotas(entries); result.values().get(goodEntity); - TestUtils.assertFutureThrows(ClusterAuthorizationException.class, result.values().get(unauthorizedEntity)); - TestUtils.assertFutureThrows(InvalidRequestException.class, result.values().get(invalidEntity)); + TestUtils.assertFutureError(result.values().get(unauthorizedEntity), ClusterAuthorizationException.class); + TestUtils.assertFutureError(result.values().get(invalidEntity), InvalidRequestException.class); // ensure immutable assertThrows(UnsupportedOperationException.class, () -> result.values().put(newClientQuotaEntity(ClientQuotaEntity.USER, "user-3"), null)); @@ -9576,7 +7807,7 @@ public void testAlterReplicaLogDirsLogDirNotFound() throws Exception { logDirs.put(tpr1, "/data1"); AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs); assertNull(result.values().get(tpr0).get()); - TestUtils.assertFutureThrows(LogDirNotFoundException.class, result.values().get(tpr1)); + TestUtils.assertFutureError(result.values().get(tpr1), LogDirNotFoundException.class); } } @@ -9607,7 +7838,7 @@ public void testAlterReplicaLogDirsPartialResponse() throws Exception { logDirs.put(tpr2, "/data1"); AlterReplicaLogDirsResult result = env.adminClient().alterReplicaLogDirs(logDirs); assertNull(result.values().get(tpr1).get()); - TestUtils.assertFutureThrows(ApiException.class, result.values().get(tpr2)); + TestUtils.assertFutureThrows(result.values().get(tpr2), ApiException.class); } } @@ -9643,7 +7874,7 @@ public void testAlterReplicaLogDirsPartialFailure() throws Exception { // Advance time past the default api timeout to time out the inflight request time.sleep(defaultApiTimeout + 1); - TestUtils.assertFutureThrows(TimeoutException.class, result.values().get(tpr1)); + TestUtils.assertFutureThrows(result.values().get(tpr1), ApiException.class); assertNull(result.values().get(tpr2).get()); } } @@ -9831,7 +8062,7 @@ public void testDescribeLogDirsPartialFailure() throws Exception { // Advance time past the default api timeout to time out the inflight request time.sleep(defaultApiTimeout + 1); - TestUtils.assertFutureThrows(TimeoutException.class, result.descriptions().get(0)); + TestUtils.assertFutureThrows(result.descriptions().get(0), ApiException.class); assertNotNull(result.descriptions().get(1).get()); } } @@ -9890,7 +8121,7 @@ public void testUnregisterBrokerFailure() { UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId); // Validate response assertNotNull(result.all()); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), Errors.UNKNOWN_SERVER_ERROR.exception().getClass()); } } @@ -9924,7 +8155,7 @@ public void testUnregisterBrokerTimeoutAndFailureRetry() { // Validate response assertNotNull(result.all()); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), Errors.UNKNOWN_SERVER_ERROR.exception().getClass()); } } @@ -9941,7 +8172,7 @@ public void testUnregisterBrokerTimeoutMaxRetry() { // Validate response assertNotNull(result.all()); - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), Errors.REQUEST_TIMED_OUT.exception().getClass()); } } @@ -9952,11 +8183,13 @@ public void testUnregisterBrokerTimeoutMaxWait() { env.kafkaClient().setNodeApiVersions( NodeApiVersions.create(ApiKeys.UNREGISTER_BROKER.id, (short) 0, (short) 0)); - UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId, new UnregisterBrokerOptions().timeoutMs(10)); + UnregisterBrokerOptions options = new UnregisterBrokerOptions(); + options.timeoutMs = 10; + UnregisterBrokerResult result = env.adminClient().unregisterBroker(nodeId, options); // Validate response assertNotNull(result.all()); - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), Errors.REQUEST_TIMED_OUT.exception().getClass()); } } @@ -10016,7 +8249,7 @@ public void testDescribeProducersTimeout(boolean timeoutInMetadataLookup) throws "Future failed to timeout after expiration of timeout"); assertTrue(result.all().isCompletedExceptionally()); - TestUtils.assertFutureThrows(TimeoutException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), TimeoutException.class); assertFalse(env.kafkaClient().hasInFlightRequests()); } } @@ -10256,92 +8489,6 @@ public void testAbortTransactionFindLeaderAfterDisconnect() throws Exception { } } - @Test - public void testForceTerminateTransaction() throws Exception { - try (AdminClientUnitTestEnv env = mockClientEnv()) { - String transactionalId = "testForceTerminate"; - Node transactionCoordinator = env.cluster().nodes().iterator().next(); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse( - Errors.NONE, - transactionalId, - transactionCoordinator - )); - - // Complete the init PID request successfully - InitProducerIdResponseData initProducerIdResponseData = new InitProducerIdResponseData() - .setProducerId(5678) - .setProducerEpoch((short) 123); - - env.kafkaClient().prepareResponseFrom(request -> - request instanceof InitProducerIdRequest, - new InitProducerIdResponse(initProducerIdResponseData), - transactionCoordinator - ); - - // Call force terminate and verify results - TerminateTransactionResult result = env.adminClient().forceTerminateTransaction(transactionalId); - assertNull(result.result().get()); - } - } - - @Test - public void testForceTerminateTransactionWithError() throws Exception { - try (AdminClientUnitTestEnv env = mockClientEnv()) { - String transactionalId = "testForceTerminateError"; - Node transactionCoordinator = env.cluster().nodes().iterator().next(); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse( - Errors.NONE, - transactionalId, - transactionCoordinator - )); - - // Return an error from the InitProducerId request - env.kafkaClient().prepareResponseFrom(request -> - request instanceof InitProducerIdRequest, - new InitProducerIdResponse(new InitProducerIdResponseData() - .setErrorCode(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.code())), - transactionCoordinator - ); - - // Call force terminate and verify error is propagated - TerminateTransactionResult result = env.adminClient().forceTerminateTransaction(transactionalId); - ExecutionException exception = assertThrows(ExecutionException.class, () -> result.result().get()); - assertTrue(exception.getCause() instanceof TransactionalIdAuthorizationException); - } - } - - @Test - public void testForceTerminateTransactionWithCustomTimeout() throws Exception { - try (AdminClientUnitTestEnv env = mockClientEnv()) { - String transactionalId = "testForceTerminateTimeout"; - Node transactionCoordinator = env.cluster().nodes().iterator().next(); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse( - Errors.NONE, - transactionalId, - transactionCoordinator - )); - - // Complete the init PID request - InitProducerIdResponseData initProducerIdResponseData = new InitProducerIdResponseData() - .setProducerId(9012) - .setProducerEpoch((short) 456); - - env.kafkaClient().prepareResponseFrom(request -> - request instanceof InitProducerIdRequest, - new InitProducerIdResponse(initProducerIdResponseData), - transactionCoordinator - ); - - // Use custom timeout - TerminateTransactionOptions options = new TerminateTransactionOptions().timeoutMs(10000); - TerminateTransactionResult result = env.adminClient().forceTerminateTransaction(transactionalId, options); - assertNull(result.result().get()); - } - } - @Test public void testListTransactions() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { @@ -10657,16 +8804,10 @@ private DescribeLogDirsResponse prepareDescribeLogDirsResponse(Errors error, Str .setLogDir(logDir)))); } - private static OffsetFetchResponse offsetFetchResponse(Errors error) { - return new OffsetFetchResponse( - new OffsetFetchResponseData() - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(GROUP_ID) - .setErrorCode(error.code()) - )), - ApiKeys.OFFSET_FETCH.latestVersion() - ); + private OffsetFetchResponse offsetFetchResponse(Errors error, Map responseData) { + return new OffsetFetchResponse(THROTTLE, + Collections.singletonMap(GROUP_ID, error), + Collections.singletonMap(GROUP_ID, responseData)); } private static MemberDescription convertToMemberDescriptions(DescribedGroupMember member, @@ -10686,139 +8827,63 @@ private static ShareMemberDescription convertToShareMemberDescriptions(ShareGrou return new ShareMemberDescription(member.memberId(), member.clientId(), member.clientHost(), - assignment, - member.memberEpoch()); + assignment); } - @SuppressWarnings({"deprecation", "removal"}) @Test public void testListClientMetricsResources() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { - List expected = asList( - new ClientMetricsResourceListing("one"), - new ClientMetricsResourceListing("two") - ); - - ListConfigResourcesResponseData responseData = - new ListConfigResourcesResponseData().setErrorCode(Errors.NONE.code()); - - responseData.configResources() - .add(new ListConfigResourcesResponseData - .ConfigResource() - .setResourceName("one") - .setResourceType(ConfigResource.Type.CLIENT_METRICS.id()) - ); - responseData.configResources() - .add(new ListConfigResourcesResponseData - .ConfigResource() - .setResourceName("two") - .setResourceType(ConfigResource.Type.CLIENT_METRICS.id()) - ); - - env.kafkaClient().prepareResponse( - request -> request instanceof ListConfigResourcesRequest, - new ListConfigResourcesResponse(responseData)); - - ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); - assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get())); - } - } - - @SuppressWarnings({"deprecation", "removal"}) - @Test - public void testListClientMetricsResourcesEmpty() throws Exception { - try (AdminClientUnitTestEnv env = mockClientEnv()) { - List expected = Collections.emptyList(); - - ListConfigResourcesResponseData responseData = - new ListConfigResourcesResponseData().setErrorCode(Errors.NONE.code()); - - env.kafkaClient().prepareResponse( - request -> request instanceof ListConfigResourcesRequest, - new ListConfigResourcesResponse(responseData)); - - ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); - assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get())); - } - } - - @SuppressWarnings({"deprecation", "removal"}) - @Test - public void testListClientMetricsResourcesNotSupported() { - try (AdminClientUnitTestEnv env = mockClientEnv()) { - env.kafkaClient().prepareResponse( - request -> request instanceof ListConfigResourcesRequest, - prepareListClientMetricsResourcesResponse(Errors.UNSUPPORTED_VERSION)); - - ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); - - // Validate response - assertNotNull(result.all()); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); - } - } - - @Test - public void testListConfigResources() throws Exception { - try (AdminClientUnitTestEnv env = mockClientEnv()) { - List expected = List.of( - new ConfigResource(ConfigResource.Type.CLIENT_METRICS, "client-metrics"), - new ConfigResource(ConfigResource.Type.BROKER, "1"), - new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "1"), - new ConfigResource(ConfigResource.Type.TOPIC, "topic"), - new ConfigResource(ConfigResource.Type.GROUP, "group") + List expected = asList( + new ClientMetricsResourceListing("one"), + new ClientMetricsResourceListing("two") ); - ListConfigResourcesResponseData responseData = - new ListConfigResourcesResponseData().setErrorCode(Errors.NONE.code()); + ListClientMetricsResourcesResponseData responseData = + new ListClientMetricsResourcesResponseData().setErrorCode(Errors.NONE.code()); - expected.forEach(c -> - responseData.configResources() - .add(new ListConfigResourcesResponseData - .ConfigResource() - .setResourceName(c.name()) - .setResourceType(c.type().id()) - ) - ); + responseData.clientMetricsResources() + .add(new ListClientMetricsResourcesResponseData.ClientMetricsResource().setName("one")); + responseData.clientMetricsResources() + .add((new ListClientMetricsResourcesResponseData.ClientMetricsResource()).setName("two")); env.kafkaClient().prepareResponse( - request -> request instanceof ListConfigResourcesRequest, - new ListConfigResourcesResponse(responseData)); + request -> request instanceof ListClientMetricsResourcesRequest, + new ListClientMetricsResourcesResponse(responseData)); - ListConfigResourcesResult result = env.adminClient().listConfigResources(); - assertEquals(expected.size(), result.all().get().size()); + ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get())); } } @Test - public void testListConfigResourcesEmpty() throws Exception { + public void testListClientMetricsResourcesEmpty() throws Exception { try (AdminClientUnitTestEnv env = mockClientEnv()) { - ListConfigResourcesResponseData responseData = - new ListConfigResourcesResponseData().setErrorCode(Errors.NONE.code()); + List expected = Collections.emptyList(); + + ListClientMetricsResourcesResponseData responseData = + new ListClientMetricsResourcesResponseData().setErrorCode(Errors.NONE.code()); env.kafkaClient().prepareResponse( - request -> request instanceof ListConfigResourcesRequest, - new ListConfigResourcesResponse(responseData)); + request -> request instanceof ListClientMetricsResourcesRequest, + new ListClientMetricsResourcesResponse(responseData)); - ListConfigResourcesResult result = env.adminClient().listConfigResources(); - assertTrue(result.all().get().isEmpty()); + ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); + assertEquals(new HashSet<>(expected), new HashSet<>(result.all().get())); } } @Test - public void testListConfigResourcesNotSupported() { + public void testListClientMetricsResourcesNotSupported() { try (AdminClientUnitTestEnv env = mockClientEnv()) { env.kafkaClient().prepareResponse( - request -> request instanceof ListConfigResourcesRequest, - new ListConfigResourcesResponse(new ListConfigResourcesResponseData() - .setErrorCode(Errors.UNSUPPORTED_VERSION.code()))); + request -> request instanceof ListClientMetricsResourcesRequest, + prepareListClientMetricsResourcesResponse(Errors.UNSUPPORTED_VERSION)); - ListConfigResourcesResult result = env.adminClient().listConfigResources( - Set.of(ConfigResource.Type.UNKNOWN), new ListConfigResourcesOptions()); + ListClientMetricsResourcesResult result = env.adminClient().listClientMetricsResources(); + // Validate response assertNotNull(result.all()); - TestUtils.assertFutureThrows(UnsupportedVersionException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), Errors.UNSUPPORTED_VERSION.exception().getClass()); } } @@ -10877,8 +8942,8 @@ public void update(Time time, MockClient.MetadataUpdate update) { } } - private static ListConfigResourcesResponse prepareListClientMetricsResourcesResponse(Errors error) { - return new ListConfigResourcesResponse(new ListConfigResourcesResponseData() + private static ListClientMetricsResourcesResponse prepareListClientMetricsResourcesResponse(Errors error) { + return new ListClientMetricsResourcesResponse(new ListClientMetricsResourcesResponseData() .setErrorCode(error.code())); } @@ -10964,7 +9029,7 @@ public void testAddRaftVoterRequest(boolean fail, boolean sendClusterId) throws options); assertNotNull(result.all()); if (fail) { - TestUtils.assertFutureThrows(DuplicateVoterException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), Errors.DUPLICATE_VOTER.exception().getClass()); } else { result.all().get(); } @@ -11049,7 +9114,7 @@ public void testRemoveRaftVoterRequest(boolean fail, boolean sendClusterId) thro options); assertNotNull(result.all()); if (fail) { - TestUtils.assertFutureThrows(VoterNotFoundException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), Errors.VOTER_NOT_FOUND.exception().getClass()); } else { result.all().get(); } @@ -11100,622 +9165,4 @@ public void testRemoveRaftVoterRequest(boolean fail, boolean sendClusterId) thro } } } - - @Test - public void testListShareGroupOffsetsOptionsWithBatchedApi() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final List partitions = Collections.singletonList(new TopicPartition("A", 0)); - final ListShareGroupOffsetsOptions options = new ListShareGroupOffsetsOptions(); - - final ListShareGroupOffsetsSpec groupSpec = new ListShareGroupOffsetsSpec() - .topicPartitions(partitions); - Map groupSpecs = new HashMap<>(); - groupSpecs.put(GROUP_ID, groupSpec); - - env.adminClient().listShareGroupOffsets(groupSpecs, options); - - final MockClient mockClient = env.kafkaClient(); - waitForRequest(mockClient, ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS); - - ClientRequest clientRequest = mockClient.requests().peek(); - assertNotNull(clientRequest); - DescribeShareGroupOffsetsRequestData data = ((DescribeShareGroupOffsetsRequest.Builder) clientRequest.requestBuilder()).build().data(); - assertEquals(1, data.groups().size()); - assertEquals(GROUP_ID, data.groups().get(0).groupId()); - assertEquals(Collections.singletonList("A"), - data.groups().get(0).topics().stream().map(DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestTopic::topicName).collect(Collectors.toList())); - } - } - - @Test - public void testListShareGroupOffsets() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); - TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1); - TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2); - TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3); - TopicPartition myTopicPartition4 = new TopicPartition("my_topic_1", 4); - TopicPartition myTopicPartition5 = new TopicPartition("my_topic_2", 6); - - ListShareGroupOffsetsSpec groupSpec = new ListShareGroupOffsetsSpec(); - Map groupSpecs = new HashMap<>(); - groupSpecs.put(GROUP_ID, groupSpec); - - DescribeShareGroupOffsetsResponseData data = new DescribeShareGroupOffsetsResponseData().setGroups( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId(GROUP_ID).setTopics( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10).setLeaderEpoch(0), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11).setLeaderEpoch(0), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(2).setStartOffset(40).setLeaderEpoch(0), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(3).setStartOffset(50).setLeaderEpoch(1) - ) - ), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setStartOffset(100).setLeaderEpoch(2) - ) - ), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500).setLeaderEpoch(3) - ) - ) - ) - ) - ) - ); - env.kafkaClient().prepareResponse(new DescribeShareGroupOffsetsResponse(data)); - - final ListShareGroupOffsetsResult result = env.adminClient().listShareGroupOffsets(groupSpecs); - final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata(GROUP_ID).get(); - - assertEquals(6, partitionToOffsetAndMetadata.size()); - assertEquals(new OffsetAndMetadata(10, Optional.of(0), ""), partitionToOffsetAndMetadata.get(myTopicPartition0)); - assertEquals(new OffsetAndMetadata(11, Optional.of(0), ""), partitionToOffsetAndMetadata.get(myTopicPartition1)); - assertEquals(new OffsetAndMetadata(40, Optional.of(0), ""), partitionToOffsetAndMetadata.get(myTopicPartition2)); - assertEquals(new OffsetAndMetadata(50, Optional.of(1), ""), partitionToOffsetAndMetadata.get(myTopicPartition3)); - assertEquals(new OffsetAndMetadata(100, Optional.of(2), ""), partitionToOffsetAndMetadata.get(myTopicPartition4)); - assertEquals(new OffsetAndMetadata(500, Optional.of(3), ""), partitionToOffsetAndMetadata.get(myTopicPartition5)); - } - } - - @Test - public void testListShareGroupOffsetsMultipleGroups() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareBatchedFindCoordinatorResponse(Errors.NONE, env.cluster().controller(), Set.of(GROUP_ID, "group-1"))); - - TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); - TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1); - TopicPartition myTopicPartition2 = new TopicPartition("my_topic", 2); - TopicPartition myTopicPartition3 = new TopicPartition("my_topic", 3); - TopicPartition myTopicPartition4 = new TopicPartition("my_topic_1", 4); - TopicPartition myTopicPartition5 = new TopicPartition("my_topic_2", 6); - - ListShareGroupOffsetsSpec group0Specs = new ListShareGroupOffsetsSpec().topicPartitions( - List.of(myTopicPartition0, myTopicPartition1, myTopicPartition2, myTopicPartition3) - ); - ListShareGroupOffsetsSpec group1Specs = new ListShareGroupOffsetsSpec().topicPartitions( - List.of(myTopicPartition4, myTopicPartition5) - ); - Map groupSpecs = new HashMap<>(); - groupSpecs.put(GROUP_ID, group0Specs); - groupSpecs.put("group-1", group1Specs); - - DescribeShareGroupOffsetsResponseData data = new DescribeShareGroupOffsetsResponseData().setGroups( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId(GROUP_ID).setTopics( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10).setLeaderEpoch(0), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11).setLeaderEpoch(0), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(2).setStartOffset(40).setLeaderEpoch(0), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(3).setStartOffset(50).setLeaderEpoch(1) - ) - ) - ) - ), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId("group-1").setTopics( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setStartOffset(100).setLeaderEpoch(2) - ) - ), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500).setLeaderEpoch(2) - ) - ) - ) - ) - ) - ); - env.kafkaClient().prepareResponse(new DescribeShareGroupOffsetsResponse(data)); - - final ListShareGroupOffsetsResult result = env.adminClient().listShareGroupOffsets(groupSpecs); - assertEquals(2, result.all().get().size()); - - final Map partitionToOffsetAndMetadataGroup0 = result.partitionsToOffsetAndMetadata(GROUP_ID).get(); - assertEquals(4, partitionToOffsetAndMetadataGroup0.size()); - assertEquals(new OffsetAndMetadata(10, Optional.of(0), ""), partitionToOffsetAndMetadataGroup0.get(myTopicPartition0)); - assertEquals(new OffsetAndMetadata(11, Optional.of(0), ""), partitionToOffsetAndMetadataGroup0.get(myTopicPartition1)); - assertEquals(new OffsetAndMetadata(40, Optional.of(0), ""), partitionToOffsetAndMetadataGroup0.get(myTopicPartition2)); - assertEquals(new OffsetAndMetadata(50, Optional.of(1), ""), partitionToOffsetAndMetadataGroup0.get(myTopicPartition3)); - - final Map partitionToOffsetAndMetadataGroup1 = result.partitionsToOffsetAndMetadata("group-1").get(); - assertEquals(2, partitionToOffsetAndMetadataGroup1.size()); - assertEquals(new OffsetAndMetadata(100, Optional.of(2), ""), partitionToOffsetAndMetadataGroup1.get(myTopicPartition4)); - assertEquals(new OffsetAndMetadata(500, Optional.of(2), ""), partitionToOffsetAndMetadataGroup1.get(myTopicPartition5)); - } - } - - @Test - public void testListShareGroupOffsetsEmpty() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - ListShareGroupOffsetsSpec groupSpec = new ListShareGroupOffsetsSpec(); - Map groupSpecs = new HashMap<>(); - groupSpecs.put(GROUP_ID, groupSpec); - - DescribeShareGroupOffsetsResponseData data = new DescribeShareGroupOffsetsResponseData().setGroups( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId(GROUP_ID) - ) - ); - env.kafkaClient().prepareResponse(new DescribeShareGroupOffsetsResponse(data)); - - final ListShareGroupOffsetsResult result = env.adminClient().listShareGroupOffsets(groupSpecs); - final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata(GROUP_ID).get(); - - assertEquals(0, partitionToOffsetAndMetadata.size()); - } - } - - @Test - public void testListShareGroupOffsetsWithErrorInOnePartition() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - TopicPartition myTopicPartition0 = new TopicPartition("my_topic", 0); - TopicPartition myTopicPartition1 = new TopicPartition("my_topic", 1); - TopicPartition myTopicPartition2 = new TopicPartition("my_topic_1", 4); - TopicPartition myTopicPartition3 = new TopicPartition("my_topic_2", 6); - - - ListShareGroupOffsetsSpec groupSpec = new ListShareGroupOffsetsSpec().topicPartitions( - List.of(myTopicPartition0, myTopicPartition1, myTopicPartition2, myTopicPartition3) - ); - Map groupSpecs = new HashMap<>(); - groupSpecs.put(GROUP_ID, groupSpec); - - DescribeShareGroupOffsetsResponseData data = new DescribeShareGroupOffsetsResponseData().setGroups( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup().setGroupId(GROUP_ID).setTopics( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(0).setStartOffset(10).setLeaderEpoch(0), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(1).setStartOffset(11).setLeaderEpoch(1) - ) - ), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_1").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(4).setErrorCode(Errors.NOT_COORDINATOR.code()).setErrorMessage("Not a Coordinator") - ) - ), - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic().setTopicName("my_topic_2").setPartitions( - List.of( - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition().setPartitionIndex(6).setStartOffset(500).setLeaderEpoch(2) - ) - ) - ) - ) - ) - ); - env.kafkaClient().prepareResponse(new DescribeShareGroupOffsetsResponse(data)); - - final ListShareGroupOffsetsResult result = env.adminClient().listShareGroupOffsets(groupSpecs); - final Map partitionToOffsetAndMetadata = result.partitionsToOffsetAndMetadata(GROUP_ID).get(); - - // For myTopicPartition2 we have set an error as the response. Thus, it should be skipped from the final result - assertEquals(3, partitionToOffsetAndMetadata.size()); - assertEquals(new OffsetAndMetadata(10, Optional.of(0), ""), partitionToOffsetAndMetadata.get(myTopicPartition0)); - assertEquals(new OffsetAndMetadata(11, Optional.of(1), ""), partitionToOffsetAndMetadata.get(myTopicPartition1)); - assertEquals(new OffsetAndMetadata(500, Optional.of(2), ""), partitionToOffsetAndMetadata.get(myTopicPartition3)); - } - } - - @Test - public void testAlterShareGroupOffsets() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData().setResponses( - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection(List.of( - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("foo").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0), new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(1))), - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("bar").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0))) - ).iterator()) - ); - - TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); - TopicPartition fooTopicPartition1 = new TopicPartition("foo", 1); - TopicPartition barPartition0 = new TopicPartition("bar", 0); - TopicPartition zooTopicPartition0 = new TopicPartition("zoo", 0); - - env.kafkaClient().prepareResponse(new AlterShareGroupOffsetsResponse(data)); - final AlterShareGroupOffsetsResult result = env.adminClient().alterShareGroupOffsets(GROUP_ID, Map.of(fooTopicPartition0, 1L, fooTopicPartition1, 2L, barPartition0, 1L)); - - assertNull(result.all().get()); - assertNull(result.partitionResult(fooTopicPartition0).get()); - assertNull(result.partitionResult(fooTopicPartition1).get()); - assertNull(result.partitionResult(barPartition0).get()); - TestUtils.assertFutureThrows(IllegalArgumentException.class, result.partitionResult(zooTopicPartition0)); - } - } - - @Test - public void testAlterShareGroupOffsetsWithTopLevelError() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData().setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()).setErrorMessage("Group authorization failed."); - - TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); - TopicPartition fooTopicPartition1 = new TopicPartition("foo", 1); - TopicPartition barPartition0 = new TopicPartition("bar", 0); - TopicPartition zooTopicPartition0 = new TopicPartition("zoo", 0); - - env.kafkaClient().prepareResponse(new AlterShareGroupOffsetsResponse(data)); - final AlterShareGroupOffsetsResult result = env.adminClient().alterShareGroupOffsets(GROUP_ID, Map.of(fooTopicPartition0, 1L, fooTopicPartition1, 2L, barPartition0, 1L)); - - TestUtils.assertFutureThrows(GroupAuthorizationException.class, result.all()); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, result.partitionResult(fooTopicPartition1)); - TestUtils.assertFutureThrows(IllegalArgumentException.class, result.partitionResult(zooTopicPartition0)); - } - } - - @Test - public void testAlterShareGroupOffsetsWithErrorInOnePartition() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData().setResponses( - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection(List.of( - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("foo").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0), - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(1).setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code()).setErrorMessage("Topic authorization failed."))), - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic().setTopicName("bar").setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition().setPartitionIndex(0))) - ).iterator()) - ); - - TopicPartition fooTopicPartition0 = new TopicPartition("foo", 0); - TopicPartition fooTopicPartition1 = new TopicPartition("foo", 1); - TopicPartition barPartition0 = new TopicPartition("bar", 0); - - env.kafkaClient().prepareResponse(new AlterShareGroupOffsetsResponse(data)); - final AlterShareGroupOffsetsResult result = env.adminClient().alterShareGroupOffsets(GROUP_ID, Map.of(fooTopicPartition0, 1L, fooTopicPartition1, 2L, barPartition0, 1L)); - - TestUtils.assertFutureThrows(TopicAuthorizationException.class, result.all()); - assertNull(result.partitionResult(fooTopicPartition0).get()); - TestUtils.assertFutureThrows(TopicAuthorizationException.class, result.partitionResult(fooTopicPartition1)); - assertNull(result.partitionResult(barPartition0).get()); - } - } - - @Test - public void testDeleteShareGroupOffsetsOptionsWithBatchedApi() throws Exception { - final Cluster cluster = mockCluster(3, 0); - final Time time = new MockTime(); - - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(time, cluster, - AdminClientConfig.RETRIES_CONFIG, "0")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - final Set topics = Collections.singleton("A"); - final DeleteShareGroupOffsetsOptions options = new DeleteShareGroupOffsetsOptions(); - - env.adminClient().deleteShareGroupOffsets(GROUP_ID, topics, options); - - final MockClient mockClient = env.kafkaClient(); - waitForRequest(mockClient, ApiKeys.DELETE_SHARE_GROUP_OFFSETS); - - ClientRequest clientRequest = mockClient.requests().peek(); - assertNotNull(clientRequest); - DeleteShareGroupOffsetsRequestData data = ((DeleteShareGroupOffsetsRequest.Builder) clientRequest.requestBuilder()).build().data(); - assertEquals(GROUP_ID, data.groupId()); - assertEquals(1, data.topics().size()); - assertEquals(Collections.singletonList("A"), - data.topics().stream().map(DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic::topicName).collect(Collectors.toList())); - } - } - - @Test - public void testDeleteShareGroupOffsets() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - Uuid fooId = Uuid.randomUuid(); - String fooName = "foo"; - Uuid barId = Uuid.randomUuid(); - String barName = "bar"; - - String zooName = "zoo"; - - DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData().setResponses( - List.of( - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName(fooName).setTopicId(fooId), - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic().setTopicName(barName).setTopicId(barId) - ) - ); - - env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooName, barName)); - - assertNull(result.all().get()); - assertNull(result.topicResult(fooName).get()); - assertNull(result.topicResult(barName).get()); - assertThrows(IllegalArgumentException.class, () -> result.topicResult(zooName)); - } - } - - @Test - public void testDeleteShareGroupOffsetsEmpty() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData().setResponses( - List.of() - ); - env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Collections.emptySet()); - assertDoesNotThrow(() -> result.all().get()); - } - } - - @Test - public void testDeleteShareGroupOffsetsWithErrorInGroup() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData() - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()) - .setErrorMessage(Errors.GROUP_AUTHORIZATION_FAILED.message()); - - String fooName = "foo"; - String barName = "bar"; - - env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooName, barName)); - - TestUtils.assertFutureThrows(Errors.GROUP_AUTHORIZATION_FAILED.exception().getClass(), result.all()); - } - } - - @Test - public void testDeleteShareGroupOffsetsWithErrorInOneTopic() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - Uuid fooId = Uuid.randomUuid(); - String fooName = "foo"; - Uuid barId = Uuid.randomUuid(); - String barName = "bar"; - - DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData().setResponses( - List.of( - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() - .setTopicName(fooName) - .setTopicId(fooId) - .setErrorCode(Errors.KAFKA_STORAGE_ERROR.code()) - .setErrorMessage(Errors.KAFKA_STORAGE_ERROR.message()), - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() - .setTopicName(barName) - .setTopicId(barId) - ) - ); - - env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooName, barName)); - - TestUtils.assertFutureThrows(Errors.KAFKA_STORAGE_ERROR.exception().getClass(), result.all()); - TestUtils.assertFutureThrows(Errors.KAFKA_STORAGE_ERROR.exception().getClass(), result.topicResult(fooName)); - assertNull(result.topicResult(barName).get()); - } - } - - @Test - public void testDeleteShareGroupOffsetsWithPartitionNotPresentInResult() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(mockCluster(1, 0))) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - env.kafkaClient().prepareResponse(prepareFindCoordinatorResponse(Errors.NONE, env.cluster().controller())); - - Uuid fooId = Uuid.randomUuid(); - String fooName = "foo"; - - String barName = "bar"; - - DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData().setResponses( - List.of( - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() - .setTopicName(fooName) - .setTopicId(fooId) - ) - ); - - env.kafkaClient().prepareResponse(new DeleteShareGroupOffsetsResponse(data)); - final DeleteShareGroupOffsetsResult result = env.adminClient().deleteShareGroupOffsets(GROUP_ID, Set.of(fooName)); - - assertDoesNotThrow(() -> result.all().get()); - assertThrows(IllegalArgumentException.class, () -> result.topicResult(barName)); - assertNull(result.topicResult(fooName).get()); - } - } - - private static StreamsGroupDescribeResponseData makeFullStreamsGroupDescribeResponse() { - StreamsGroupDescribeResponseData data; - StreamsGroupDescribeResponseData.TaskIds activeTasks1 = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(0, 1, 2)); - StreamsGroupDescribeResponseData.TaskIds standbyTasks1 = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(3, 4, 5)); - StreamsGroupDescribeResponseData.TaskIds warmupTasks1 = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(6, 7, 8)); - StreamsGroupDescribeResponseData.TaskIds activeTasks2 = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(3, 4, 5)); - StreamsGroupDescribeResponseData.TaskIds standbyTasks2 = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(6, 7, 8)); - StreamsGroupDescribeResponseData.TaskIds warmupTasks2 = new StreamsGroupDescribeResponseData.TaskIds() - .setSubtopologyId("my_subtopology") - .setPartitions(asList(0, 1, 2)); - StreamsGroupDescribeResponseData.Assignment memberAssignment = new StreamsGroupDescribeResponseData.Assignment() - .setActiveTasks(singletonList(activeTasks1)) - .setStandbyTasks(singletonList(standbyTasks1)) - .setWarmupTasks(singletonList(warmupTasks1)); - StreamsGroupDescribeResponseData.Assignment targetAssignment = new StreamsGroupDescribeResponseData.Assignment() - .setActiveTasks(singletonList(activeTasks2)) - .setStandbyTasks(singletonList(standbyTasks2)) - .setWarmupTasks(singletonList(warmupTasks2)); - StreamsGroupDescribeResponseData.Member memberOne = new StreamsGroupDescribeResponseData.Member() - .setMemberId("0") - .setMemberEpoch(1) - .setInstanceId("instance-id") - .setRackId("rack-id") - .setClientId("clientId0") - .setClientHost("clientHost") - .setTopologyEpoch(0) - .setProcessId("processId") - .setUserEndpoint(new StreamsGroupDescribeResponseData.Endpoint() - .setHost("localhost") - .setPort(8080) - ) - .setClientTags(Collections.singletonList(new StreamsGroupDescribeResponseData.KeyValue() - .setKey("key") - .setValue("value") - )) - .setTaskOffsets(Collections.singletonList(new StreamsGroupDescribeResponseData.TaskOffset() - .setSubtopologyId("my_subtopology") - .setPartition(0) - .setOffset(0) - )) - .setTaskEndOffsets(Collections.singletonList(new StreamsGroupDescribeResponseData.TaskOffset() - .setSubtopologyId("my_subtopology") - .setPartition(0) - .setOffset(1) - )) - .setAssignment(memberAssignment) - .setTargetAssignment(targetAssignment) - .setIsClassic(true); - - StreamsGroupDescribeResponseData.Member memberTwo = new StreamsGroupDescribeResponseData.Member() - .setMemberId("1") - .setMemberEpoch(2) - .setInstanceId(null) - .setRackId(null) - .setClientId("clientId1") - .setClientHost("clientHost") - .setTopologyEpoch(1) - .setProcessId("processId2") - .setUserEndpoint(null) - .setClientTags(Collections.emptyList()) - .setTaskOffsets(Collections.emptyList()) - .setTaskEndOffsets(Collections.emptyList()) - .setAssignment(new StreamsGroupDescribeResponseData.Assignment()) - .setTargetAssignment(new StreamsGroupDescribeResponseData.Assignment()) - .setIsClassic(false); - - StreamsGroupDescribeResponseData.Subtopology subtopologyDescription = new StreamsGroupDescribeResponseData.Subtopology() - .setSubtopologyId("my_subtopology") - .setSourceTopics(Collections.singletonList("my_source_topic")) - .setRepartitionSinkTopics(Collections.singletonList("my_repartition_sink_topic")) - .setStateChangelogTopics(Collections.singletonList( - new StreamsGroupDescribeResponseData.TopicInfo() - .setName("my_changelog_topic") - .setPartitions(0) - .setReplicationFactor((short) 3) - .setTopicConfigs(Collections.singletonList(new StreamsGroupDescribeResponseData.KeyValue() - .setKey("key1") - .setValue("value1") - )) - )) - .setRepartitionSourceTopics(Collections.singletonList( - new StreamsGroupDescribeResponseData.TopicInfo() - .setName("my_repartition_topic") - .setPartitions(99) - .setReplicationFactor((short) 0) - .setTopicConfigs(Collections.emptyList()) - )); - - data = new StreamsGroupDescribeResponseData(); - data.groups().add(new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(GROUP_ID) - .setGroupState(GroupState.STABLE.toString()) - .setMembers(asList(memberOne, memberTwo)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setEpoch(1) - .setSubtopologies(Collections.singletonList(subtopologyDescription)) - ) - .setGroupEpoch(2) - .setAssignmentEpoch(1)); - return data; - } - - @Test - @Timeout(30) - public void testDescribeTopicsTimeoutWhenNoBrokerResponds() throws Exception { - try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv( - mockCluster(1, 0), - AdminClientConfig.RETRIES_CONFIG, "0", - AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "30000")) { - env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); - - // Not using prepareResponse is equivalent to "no brokers respond". - long start = System.currentTimeMillis(); - DescribeTopicsResult result = env.adminClient().describeTopics(List.of("test-topic"), new DescribeTopicsOptions().timeoutMs(200)); - Map> topicDescriptionMap = result.topicNameValues(); - KafkaFuture topicDescription = topicDescriptionMap.get("test-topic"); - ExecutionException exception = assertThrows(ExecutionException.class, topicDescription::get); - // Duration should be greater than or equal to 200 ms but less than 30000 ms. - long duration = System.currentTimeMillis() - start; - - assertInstanceOf(TimeoutException.class, exception.getCause()); - assertTrue(duration >= 150L && duration < 30000); - } - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java index f20d6e56c9595..75d6c1c88c537 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ListConsumerGroupsOptionsTest.java @@ -20,6 +20,8 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.HashSet; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -27,7 +29,7 @@ public class ListConsumerGroupsOptionsTest { @Test public void testState() { - Set consumerGroupStates = Set.of(ConsumerGroupState.values()); + Set consumerGroupStates = new HashSet<>(Arrays.asList(ConsumerGroupState.values())); ListConsumerGroupsOptions options = new ListConsumerGroupsOptions().inStates(consumerGroupStates); assertEquals(consumerGroupStates, options.states()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/ListTransactionsResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/ListTransactionsResultTest.java index 01556391ad313..853602b24f4d2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/ListTransactionsResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/ListTransactionsResultTest.java @@ -42,9 +42,9 @@ public class ListTransactionsResultTest { @Test public void testAllFuturesFailIfLookupFails() { future.completeExceptionally(new KafkaException()); - assertFutureThrows(KafkaException.class, result.all()); - assertFutureThrows(KafkaException.class, result.allByBrokerId()); - assertFutureThrows(KafkaException.class, result.byBrokerId()); + assertFutureThrows(result.all(), KafkaException.class); + assertFutureThrows(result.allByBrokerId(), KafkaException.class); + assertFutureThrows(result.byBrokerId(), KafkaException.class); } @Test @@ -111,9 +111,9 @@ public void testPartialFailure() throws Exception { assertEquals(broker1Listings, resultBrokerFutures.get(1).get()); // Everything else should fail - assertFutureThrows(KafkaException.class, result.all()); - assertFutureThrows(KafkaException.class, result.allByBrokerId()); - assertFutureThrows(KafkaException.class, resultBrokerFutures.get(2)); + assertFutureThrows(result.all(), KafkaException.class); + assertFutureThrows(result.allByBrokerId(), KafkaException.class); + assertFutureThrows(resultBrokerFutures.get(2), KafkaException.class); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/MemberDescriptionTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/MemberDescriptionTest.java index 7c3e928b3a636..16ce11d7361e5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/MemberDescriptionTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/MemberDescriptionTest.java @@ -41,31 +41,20 @@ public class MemberDescriptionTest { INSTANCE_ID, CLIENT_ID, HOST, - ASSIGNMENT, - Optional.empty(), - Optional.empty(), - Optional.empty()); + ASSIGNMENT); } @Test public void testEqualsWithoutGroupInstanceId() { MemberDescription dynamicMemberDescription = new MemberDescription(MEMBER_ID, - Optional.empty(), CLIENT_ID, HOST, - ASSIGNMENT, - Optional.empty(), - Optional.empty(), - Optional.empty()); + ASSIGNMENT); MemberDescription identityDescription = new MemberDescription(MEMBER_ID, - Optional.empty(), CLIENT_ID, HOST, - ASSIGNMENT, - Optional.empty(), - Optional.empty(), - Optional.empty()); + ASSIGNMENT); assertNotEquals(STATIC_MEMBER_DESCRIPTION, dynamicMemberDescription); assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), dynamicMemberDescription.hashCode()); @@ -85,10 +74,7 @@ public void testEqualsWithGroupInstanceId() { INSTANCE_ID, CLIENT_ID, HOST, - ASSIGNMENT, - Optional.empty(), - Optional.empty(), - Optional.empty()); + ASSIGNMENT); assertEquals(STATIC_MEMBER_DESCRIPTION, identityDescription); assertEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), identityDescription.hashCode()); @@ -100,10 +86,7 @@ public void testNonEqual() { INSTANCE_ID, CLIENT_ID, HOST, - ASSIGNMENT, - Optional.empty(), - Optional.empty(), - Optional.empty()); + ASSIGNMENT); assertNotEquals(STATIC_MEMBER_DESCRIPTION, newMemberDescription); assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), newMemberDescription.hashCode()); @@ -112,10 +95,7 @@ public void testNonEqual() { Optional.of("new_instance"), CLIENT_ID, HOST, - ASSIGNMENT, - Optional.empty(), - Optional.empty(), - Optional.empty()); + ASSIGNMENT); assertNotEquals(STATIC_MEMBER_DESCRIPTION, newInstanceDescription); assertNotEquals(STATIC_MEMBER_DESCRIPTION.hashCode(), newInstanceDescription.hashCode()); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java index 48874f1a1b2db..3be5dc7b3e8d6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/MockAdminClient.java @@ -66,10 +66,8 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Objects; import java.util.Optional; import java.util.Set; import java.util.stream.Collectors; @@ -462,14 +460,14 @@ public synchronized ListTopicsResult listTopics(ListTopicsOptions options) { @Override public synchronized DescribeTopicsResult describeTopics(TopicCollection topics, DescribeTopicsOptions options) { if (topics instanceof TopicIdCollection) - return DescribeTopicsResult.ofTopicIds(new HashMap<>(handleDescribeTopicsUsingIds(((TopicIdCollection) topics).topicIds()))); + return DescribeTopicsResult.ofTopicIds(new HashMap<>(handleDescribeTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options))); else if (topics instanceof TopicNameCollection) - return DescribeTopicsResult.ofTopicNames(new HashMap<>(handleDescribeTopicsByNames(((TopicNameCollection) topics).topicNames()))); + return DescribeTopicsResult.ofTopicNames(new HashMap<>(handleDescribeTopicsByNames(((TopicNameCollection) topics).topicNames(), options))); else throw new IllegalArgumentException("The TopicCollection provided did not match any supported classes for describeTopics."); } - private Map> handleDescribeTopicsByNames(Collection topicNames) { + private Map> handleDescribeTopicsByNames(Collection topicNames, DescribeTopicsOptions options) { Map> topicDescriptions = new HashMap<>(); if (timeoutNextRequests > 0) { @@ -509,7 +507,7 @@ private Map> handleDescribeTopicsByNames(C return topicDescriptions; } - public synchronized Map> handleDescribeTopicsUsingIds(Collection topicIds) { + public synchronized Map> handleDescribeTopicsUsingIds(Collection topicIds, DescribeTopicsOptions options) { Map> topicDescriptions = new HashMap<>(); @@ -555,15 +553,15 @@ public synchronized Map> handleDescribeTopic public synchronized DeleteTopicsResult deleteTopics(TopicCollection topics, DeleteTopicsOptions options) { DeleteTopicsResult result; if (topics instanceof TopicIdCollection) - result = DeleteTopicsResult.ofTopicIds(new HashMap<>(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds()))); + result = DeleteTopicsResult.ofTopicIds(new HashMap<>(handleDeleteTopicsUsingIds(((TopicIdCollection) topics).topicIds(), options))); else if (topics instanceof TopicNameCollection) - result = DeleteTopicsResult.ofTopicNames(new HashMap<>(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames()))); + result = DeleteTopicsResult.ofTopicNames(new HashMap<>(handleDeleteTopicsUsingNames(((TopicNameCollection) topics).topicNames(), options))); else throw new IllegalArgumentException("The TopicCollection provided did not match any supported classes for deleteTopics."); return result; } - private Map> handleDeleteTopicsUsingNames(Collection topicNameCollection) { + private Map> handleDeleteTopicsUsingNames(Collection topicNameCollection, DeleteTopicsOptions options) { Map> deleteTopicsResult = new HashMap<>(); Collection topicNames = new ArrayList<>(topicNameCollection); @@ -592,7 +590,7 @@ private Map> handleDeleteTopicsUsingNames(Collection> handleDeleteTopicsUsingIds(Collection topicIdCollection) { + private Map> handleDeleteTopicsUsingIds(Collection topicIdCollection, DeleteTopicsOptions options) { Map> deleteTopicsResult = new HashMap<>(); Collection topicIds = new ArrayList<>(topicIdCollection); @@ -737,7 +735,6 @@ public synchronized DescribeConsumerGroupsResult describeConsumerGroups(Collecti } @Override - @SuppressWarnings("removal") public synchronized ListConsumerGroupsResult listConsumerGroups(ListConsumerGroupsOptions options) { KafkaFutureImpl> future = new KafkaFutureImpl<>(); future.complete(groupConfigs.keySet().stream().map(g -> new ConsumerGroupListing(g, false)).collect(Collectors.toList())); @@ -759,36 +756,16 @@ public synchronized ListConsumerGroupOffsetsResult listConsumerGroupOffsets(Map< return new ListConsumerGroupOffsetsResult(Collections.singletonMap(CoordinatorKey.byGroupId(group), future)); } - @Override - public synchronized ListStreamsGroupOffsetsResult listStreamsGroupOffsets(Map groupSpecs, ListStreamsGroupOffsetsOptions options) { - Map consumerGroupSpecs = groupSpecs.entrySet().stream() - .collect(Collectors.toMap( - Map.Entry::getKey, - entry -> new ListConsumerGroupOffsetsSpec().topicPartitions(entry.getValue().topicPartitions()) - )); - return new ListStreamsGroupOffsetsResult(listConsumerGroupOffsets(consumerGroupSpecs, new ListConsumerGroupOffsetsOptions())); - } - @Override public synchronized DeleteConsumerGroupsResult deleteConsumerGroups(Collection groupIds, DeleteConsumerGroupsOptions options) { throw new UnsupportedOperationException("Not implemented yet"); } - @Override - public synchronized DeleteStreamsGroupsResult deleteStreamsGroups(Collection groupIds, DeleteStreamsGroupsOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - @Override public synchronized DeleteConsumerGroupOffsetsResult deleteConsumerGroupOffsets(String groupId, Set partitions, DeleteConsumerGroupOffsetsOptions options) { throw new UnsupportedOperationException("Not implemented yet"); } - @Override - public synchronized DeleteStreamsGroupOffsetsResult deleteStreamsGroupOffsets(String groupId, Set partitions, DeleteStreamsGroupOffsetsOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - @Override public synchronized ElectLeadersResult electLeaders( ElectionType electionType, @@ -1120,7 +1097,11 @@ public synchronized DescribeReplicaLogDirsResult describeReplicaLogDirs( DescribeLogDirsResponse.INVALID_OFFSET_LAG)); } else { ReplicaLogDirInfo info = replicaMoves.get(replica); - future.complete(Objects.requireNonNullElseGet(info, () -> new ReplicaLogDirInfo(currentLogDir, 0, null, 0))); + if (info == null) { + future.complete(new ReplicaLogDirInfo(currentLogDir, 0, null, 0)); + } else { + future.complete(info); + } } } } @@ -1170,7 +1151,8 @@ public synchronized ListPartitionReassignmentsResult listPartitionReassignments( Optional> partitions, ListPartitionReassignmentsOptions options) { Map map = new HashMap<>(); - for (TopicPartition partition : partitions.orElseGet(reassignments::keySet)) { + for (TopicPartition partition : partitions.isPresent() ? + partitions.get() : reassignments.keySet()) { PartitionReassignment reassignment = findPartitionReassignment(partition); if (reassignment != null) { map.put(partition, reassignment); @@ -1212,11 +1194,6 @@ public synchronized AlterConsumerGroupOffsetsResult alterConsumerGroupOffsets(St throw new UnsupportedOperationException("Not implement yet"); } - @Override - public synchronized AlterStreamsGroupOffsetsResult alterStreamsGroupOffsets(String groupId, Map offsets, AlterStreamsGroupOffsetsOptions options) { - throw new UnsupportedOperationException("Not implement yet"); - } - @Override public synchronized ListOffsetsResult listOffsets(Map topicPartitionOffsets, ListOffsetsOptions options) { Map> futures = new HashMap<>(); @@ -1380,11 +1357,6 @@ public AbortTransactionResult abortTransaction(AbortTransactionSpec spec, AbortT throw new UnsupportedOperationException("Not implemented yet"); } - @Override - public TerminateTransactionResult forceTerminateTransaction(String transactionalId, TerminateTransactionOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - @Override public ListTransactionsResult listTransactions(ListTransactionsOptions options) { throw new UnsupportedOperationException("Not implemented yet"); @@ -1396,38 +1368,6 @@ public FenceProducersResult fenceProducers(Collection transactionalIds, } @Override - public ListConfigResourcesResult listConfigResources(Set configResourceTypes, ListConfigResourcesOptions options) { - KafkaFutureImpl> future = new KafkaFutureImpl<>(); - Set configResources = new HashSet<>(); - if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.TOPIC)) { - allTopics.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.TOPIC, name))); - } - - if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.BROKER)) { - for (int i = 0; i < brokers.size(); i++) { - configResources.add(new ConfigResource(ConfigResource.Type.BROKER, String.valueOf(i))); - } - } - - if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.BROKER_LOGGER)) { - for (int i = 0; i < brokers.size(); i++) { - configResources.add(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, String.valueOf(i))); - } - } - - if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.CLIENT_METRICS)) { - clientMetricsConfigs.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.CLIENT_METRICS, name))); - } - - if (configResourceTypes.isEmpty() || configResourceTypes.contains(ConfigResource.Type.GROUP)) { - groupConfigs.keySet().forEach(name -> configResources.add(new ConfigResource(ConfigResource.Type.GROUP, name))); - } - future.complete(configResources); - return new ListConfigResourcesResult(future); - } - - @Override - @SuppressWarnings({"deprecation", "removal"}) public ListClientMetricsResourcesResult listClientMetricsResources(ListClientMetricsResourcesOptions options) { KafkaFutureImpl> future = new KafkaFutureImpl<>(); future.complete(clientMetricsConfigs.keySet().stream().map(ClientMetricsResourceListing::new).collect(Collectors.toList())); @@ -1449,31 +1389,6 @@ public synchronized DescribeShareGroupsResult describeShareGroups(Collection offsets, AlterShareGroupOffsetsOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - - @Override - public synchronized ListShareGroupOffsetsResult listShareGroupOffsets(Map groupSpecs, ListShareGroupOffsetsOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - - @Override - public synchronized DeleteShareGroupOffsetsResult deleteShareGroupOffsets(String groupId, Set topics, DeleteShareGroupOffsetsOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - - @Override - public synchronized DeleteShareGroupsResult deleteShareGroups(Collection groupIds, DeleteShareGroupsOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - - @Override - public synchronized DescribeStreamsGroupsResult describeStreamsGroups(Collection groupIds, DescribeStreamsGroupsOptions options) { - throw new UnsupportedOperationException("Not implemented yet"); - } - @Override public synchronized DescribeClassicGroupsResult describeClassicGroups(Collection groupIds, DescribeClassicGroupsOptions options) { throw new UnsupportedOperationException("Not implemented yet"); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java index ceb188a41759b..40f7f5ff49969 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/RemoveMembersFromConsumerGroupResultTest.java @@ -58,11 +58,11 @@ public void setUp() { } @Test - public void testTopLevelErrorConstructor() { + public void testTopLevelErrorConstructor() throws InterruptedException { memberFutures.completeExceptionally(Errors.GROUP_AUTHORIZATION_FAILED.exception()); RemoveMembersFromConsumerGroupResult topLevelErrorResult = new RemoveMembersFromConsumerGroupResult(memberFutures, membersToRemove); - TestUtils.assertFutureThrows(GroupAuthorizationException.class, topLevelErrorResult.all()); + TestUtils.assertFutureError(topLevelErrorResult.all(), GroupAuthorizationException.class); } @Test @@ -78,9 +78,9 @@ public void testMemberMissingErrorInRequestConstructor() throws InterruptedExcep RemoveMembersFromConsumerGroupResult missingMemberResult = new RemoveMembersFromConsumerGroupResult(memberFutures, membersToRemove); - TestUtils.assertFutureThrows(IllegalArgumentException.class, missingMemberResult.all()); + TestUtils.assertFutureError(missingMemberResult.all(), IllegalArgumentException.class); assertNull(missingMemberResult.memberResult(instanceOne).get()); - TestUtils.assertFutureThrows(IllegalArgumentException.class, missingMemberResult.memberResult(instanceTwo)); + TestUtils.assertFutureError(missingMemberResult.memberResult(instanceTwo), IllegalArgumentException.class); } @Test @@ -111,9 +111,9 @@ private RemoveMembersFromConsumerGroupResult createAndVerifyMemberLevelError() t RemoveMembersFromConsumerGroupResult memberLevelErrorResult = new RemoveMembersFromConsumerGroupResult(memberFutures, membersToRemove); - TestUtils.assertFutureThrows(FencedInstanceIdException.class, memberLevelErrorResult.all()); + TestUtils.assertFutureError(memberLevelErrorResult.all(), FencedInstanceIdException.class); assertNull(memberLevelErrorResult.memberResult(instanceOne).get()); - TestUtils.assertFutureThrows(FencedInstanceIdException.class, memberLevelErrorResult.memberResult(instanceTwo)); + TestUtils.assertFutureError(memberLevelErrorResult.memberResult(instanceTwo), FencedInstanceIdException.class); return memberLevelErrorResult; } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java index c4ffc657914cd..7c87f21c64380 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminApiDriverTest.java @@ -40,6 +40,7 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Objects; @@ -396,7 +397,7 @@ public void testRetryLookupAfterDisconnect() { public void testRetryLookupAndDisableBatchAfterNoBatchedFindCoordinatorsException() { MockTime time = new MockTime(); LogContext lc = new LogContext(); - Set groupIds = Set.of("g1", "g2"); + Set groupIds = new HashSet<>(Arrays.asList("g1", "g2")); DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(lc); AdminApiFuture future = AdminApiFuture.forKeys( groupIds.stream().map(CoordinatorKey::byGroupId).collect(Collectors.toSet())); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java index 0581d672fb8a0..a61a7bdfda5f1 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminBootstrapAddressesTest.java @@ -32,13 +32,17 @@ import static org.junit.jupiter.api.Assertions.assertThrows; public class AdminBootstrapAddressesTest { - - @Test - public void testNoBootstrapSet() { - Map map = Map.of( - AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "", - AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, "" - ); + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testNoBootstrapSet(boolean nullValue) { + Map map = new HashMap<>(); + if (nullValue) { + map.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, null); + map.put(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, null); + } else { + map.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, ""); + map.put(AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG, ""); + } AdminClientConfig config = new AdminClientConfig(map); assertEquals("You must set either bootstrap.servers or bootstrap.controllers", assertThrows(ConfigException.class, () -> AdminBootstrapAddresses.fromConfig(config)). diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminMetadataManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminMetadataManagerTest.java index 1e6823ea8ee64..5620dd06a5e71 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminMetadataManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AdminMetadataManagerTest.java @@ -20,7 +20,6 @@ import org.apache.kafka.common.Cluster; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; -import org.apache.kafka.common.errors.AuthorizationException; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; @@ -99,16 +98,6 @@ public void testAuthenticationFailure() { assertTrue(mgr.isReady()); } - @Test - public void testAuthorizationFailure() { - mgr.transitionToUpdatePending(time.milliseconds()); - mgr.updateFailed(new AuthorizationException("Authorization failed")); - assertEquals(refreshBackoffMs, mgr.metadataFetchDelayMs(time.milliseconds())); - assertThrows(AuthorizationException.class, mgr::isReady); - mgr.update(mockCluster(), time.milliseconds()); - assertTrue(mgr.isReady()); - } - @Test public void testNeedsRebootstrap() { long rebootstrapTriggerMs = 1000; diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyIntegrationTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyIntegrationTest.java index af69717375d24..25ba77d1b4dae 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyIntegrationTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/AllBrokersStrategyIntegrationTest.java @@ -77,7 +77,7 @@ public void testFatalLookupError() { driver.onFailure(time.milliseconds(), spec, new UnknownServerException()); assertTrue(result.all().isDone()); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all()); + TestUtils.assertFutureThrows(result.all(), UnknownServerException.class); assertEquals(Collections.emptyList(), driver.poll()); } @@ -200,7 +200,7 @@ public void testFatalFulfillmentError() throws Exception { driver.onFailure(time.milliseconds(), requestSpec, new UnknownServerException()); assertTrue(future.isDone()); - TestUtils.assertFutureThrows(UnknownServerException.class, future); + TestUtils.assertFutureThrows(future, UnknownServerException.class); assertEquals(Collections.emptyList(), driver.poll()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java index cbbbe93e2d4c4..8cd9545107f23 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/CoordinatorStrategyTest.java @@ -55,9 +55,9 @@ public void testBuildOldLookupRequest() { @Test public void testBuildLookupRequest() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); - FindCoordinatorRequest.Builder request = strategy.buildRequest(Set.of( + FindCoordinatorRequest.Builder request = strategy.buildRequest(new HashSet<>(Arrays.asList( CoordinatorKey.byGroupId("foo"), - CoordinatorKey.byGroupId("bar"))); + CoordinatorKey.byGroupId("bar")))); assertEquals("", request.data().key()); assertEquals(2, request.data().coordinatorKeys().size()); assertEquals(CoordinatorType.GROUP, CoordinatorType.forId(request.data().keyType())); @@ -67,8 +67,8 @@ public void testBuildLookupRequest() { public void testBuildLookupRequestNonRepresentable() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); FindCoordinatorRequest.Builder request = strategy.buildRequest(new HashSet<>(Arrays.asList( - CoordinatorKey.byGroupId("foo"), - null))); + CoordinatorKey.byGroupId("foo"), + null))); assertEquals("", request.data().key()); assertEquals(1, request.data().coordinatorKeys().size()); } @@ -90,7 +90,7 @@ public void testBuildOldLookupRequestRequiresAtLeastOneKey() { strategy.disableBatch(); assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest( - Set.of(CoordinatorKey.byTransactionalId("txnid")))); + new HashSet<>(Collections.singletonList(CoordinatorKey.byTransactionalId("txnid"))))); } @Test @@ -105,9 +105,9 @@ public void testBuildLookupRequestRequiresKeySameType() { CoordinatorStrategy strategy = new CoordinatorStrategy(CoordinatorType.GROUP, new LogContext()); assertThrows(IllegalArgumentException.class, () -> strategy.buildRequest( - Set.of( - CoordinatorKey.byGroupId("group"), - CoordinatorKey.byTransactionalId("txnid")))); + new HashSet<>(Arrays.asList( + CoordinatorKey.byGroupId("group"), + CoordinatorKey.byTransactionalId("txnid"))))); } @Test @@ -161,7 +161,7 @@ public void testSuccessfulCoordinatorLookup() { .setPort(9092) .setNodeId(2))); - AdminApiLookupStrategy.LookupResult result = runLookup(Set.of(group1, group2), responseData); + AdminApiLookupStrategy.LookupResult result = runLookup(new HashSet<>(Arrays.asList(group1, group2)), responseData); Map expectedResult = new HashMap<>(); expectedResult.put(group1, 1); expectedResult.put(group2, 2); @@ -204,7 +204,7 @@ private void testRetriableCoordinatorLookup(Errors error) { .setHost("localhost") .setPort(9092) .setNodeId(2))); - AdminApiLookupStrategy.LookupResult result = runLookup(Set.of(group1, group2), responseData); + AdminApiLookupStrategy.LookupResult result = runLookup(new HashSet<>(Arrays.asList(group1, group2)), responseData); assertEquals(emptyMap(), result.failedKeys); assertEquals(singletonMap(group2, 2), result.mappedKeys); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java index 5d14529915a92..e975b2acbaed6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupOffsetsHandlerTest.java @@ -34,8 +34,10 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -53,7 +55,7 @@ public class DeleteConsumerGroupOffsetsHandlerTest { private final TopicPartition t0p0 = new TopicPartition("t0", 0); private final TopicPartition t0p1 = new TopicPartition("t0", 1); private final TopicPartition t1p0 = new TopicPartition("t1", 0); - private final Set tps = Set.of(t0p0, t0p1, t1p0); + private final Set tps = new HashSet<>(Arrays.asList(t0p0, t0p1, t1p0)); @Test public void testBuildRequest() { diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandlerTest.java index 773708aa2f63f..8c4d9eb0eff9a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DeleteConsumerGroupsHandlerTest.java @@ -14,13 +14,117 @@ * See the License for the specific language governing permissions and * limitations under the License. */ - package org.apache.kafka.clients.admin.internals; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.errors.GroupAuthorizationException; +import org.apache.kafka.common.errors.GroupIdNotFoundException; +import org.apache.kafka.common.errors.GroupNotEmptyException; +import org.apache.kafka.common.errors.InvalidGroupIdException; +import org.apache.kafka.common.message.DeleteGroupsResponseData; +import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResult; +import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResultCollection; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.DeleteGroupsRequest; +import org.apache.kafka.common.requests.DeleteGroupsResponse; import org.apache.kafka.common.utils.LogContext; -public class DeleteConsumerGroupsHandlerTest extends DeleteGroupsHandlerTest { - protected DeleteGroupsHandler handler() { - return new DeleteConsumerGroupsHandler(new LogContext()); +import org.junit.jupiter.api.Test; + +import static java.util.Collections.emptyList; +import static java.util.Collections.emptySet; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; + +public class DeleteConsumerGroupsHandlerTest { + + private final LogContext logContext = new LogContext(); + private final String groupId1 = "group-id1"; + + @Test + public void testBuildRequest() { + DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(logContext); + DeleteGroupsRequest request = handler.buildBatchedRequest(1, singleton(CoordinatorKey.byGroupId(groupId1))).build(); + assertEquals(1, request.data().groupsNames().size()); + assertEquals(groupId1, request.data().groupsNames().get(0)); + } + + @Test + public void testSuccessfulHandleResponse() { + assertCompleted(handleWithError(Errors.NONE)); + } + + @Test + public void testUnmappedHandleResponse() { + assertUnmapped(handleWithError(Errors.NOT_COORDINATOR)); + assertUnmapped(handleWithError(Errors.COORDINATOR_NOT_AVAILABLE)); + } + + @Test + public void testRetriableHandleResponse() { + assertRetriable(handleWithError(Errors.COORDINATOR_LOAD_IN_PROGRESS)); + } + + @Test + public void testFailedHandleResponse() { + assertFailed(GroupAuthorizationException.class, handleWithError(Errors.GROUP_AUTHORIZATION_FAILED)); + assertFailed(GroupIdNotFoundException.class, handleWithError(Errors.GROUP_ID_NOT_FOUND)); + assertFailed(InvalidGroupIdException.class, handleWithError(Errors.INVALID_GROUP_ID)); + assertFailed(GroupNotEmptyException.class, handleWithError(Errors.NON_EMPTY_GROUP)); + } + + private DeleteGroupsResponse buildResponse(Errors error) { + return new DeleteGroupsResponse( + new DeleteGroupsResponseData() + .setResults(new DeletableGroupResultCollection(singletonList( + new DeletableGroupResult() + .setErrorCode(error.code()) + .setGroupId(groupId1)).iterator()))); + } + + private AdminApiHandler.ApiResult handleWithError( + Errors error + ) { + DeleteConsumerGroupsHandler handler = new DeleteConsumerGroupsHandler(logContext); + DeleteGroupsResponse response = buildResponse(error); + return handler.handleResponse(new Node(1, "host", 1234), singleton(CoordinatorKey.byGroupId(groupId1)), response); + } + + private void assertUnmapped( + AdminApiHandler.ApiResult result + ) { + assertEquals(emptySet(), result.completedKeys.keySet()); + assertEquals(emptySet(), result.failedKeys.keySet()); + assertEquals(singletonList(CoordinatorKey.byGroupId(groupId1)), result.unmappedKeys); + } + + private void assertRetriable( + AdminApiHandler.ApiResult result + ) { + assertEquals(emptySet(), result.completedKeys.keySet()); + assertEquals(emptySet(), result.failedKeys.keySet()); + assertEquals(emptyList(), result.unmappedKeys); + } + + private void assertCompleted( + AdminApiHandler.ApiResult result + ) { + CoordinatorKey key = CoordinatorKey.byGroupId(groupId1); + assertEquals(emptySet(), result.failedKeys.keySet()); + assertEquals(emptyList(), result.unmappedKeys); + assertEquals(singleton(key), result.completedKeys.keySet()); + } + + private void assertFailed( + Class expectedExceptionType, + AdminApiHandler.ApiResult result + ) { + CoordinatorKey key = CoordinatorKey.byGroupId(groupId1); + assertEquals(emptySet(), result.completedKeys.keySet()); + assertEquals(emptyList(), result.unmappedKeys); + assertEquals(singleton(key), result.failedKeys.keySet()); + assertInstanceOf(expectedExceptionType, result.failedKeys.get(key)); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java index eb3e99dc62167..444795b3680bc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/DescribeConsumerGroupsHandlerTest.java @@ -53,6 +53,7 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedHashSet; import java.util.List; @@ -81,10 +82,10 @@ public class DescribeConsumerGroupsHandlerTest { CoordinatorKey.byGroupId(groupId2) )); private final Node coordinator = new Node(1, "host", 1234); - private final Set tps = Set.of( + private final Set tps = new HashSet<>(Arrays.asList( new TopicPartition("foo", 0), new TopicPartition("bar", 1) - ); + )); @ParameterizedTest @ValueSource(booleans = {true, false}) diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java index 19c614d3c60c8..e3bb56347a8ae 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListConsumerGroupOffsetsHandlerTest.java @@ -24,14 +24,12 @@ import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.InvalidGroupIdException; -import org.apache.kafka.common.message.OffsetFetchRequestData; import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestGroup; -import org.apache.kafka.common.message.OffsetFetchResponseData; -import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.requests.OffsetFetchRequest; import org.apache.kafka.common.requests.OffsetFetchResponse; +import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; import org.apache.kafka.common.utils.LogContext; import org.junit.jupiter.api.Test; @@ -43,6 +41,7 @@ import java.util.HashSet; import java.util.List; import java.util.Map; +import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -59,11 +58,11 @@ public class ListConsumerGroupOffsetsHandlerTest { private final LogContext logContext = new LogContext(); - private final String group0 = "group0"; - private final String group1 = "group1"; - private final String group2 = "group2"; - private final String group3 = "group3"; - private final List groups = List.of(group0, group1, group2); + private final int throttleMs = 10; + private final String groupZero = "group0"; + private final String groupOne = "group1"; + private final String groupTwo = "group2"; + private final List groups = Arrays.asList(groupZero, groupOne, groupTwo); private final TopicPartition t0p0 = new TopicPartition("t0", 0); private final TopicPartition t0p1 = new TopicPartition("t0", 1); private final TopicPartition t1p0 = new TopicPartition("t1", 0); @@ -71,129 +70,84 @@ public class ListConsumerGroupOffsetsHandlerTest { private final TopicPartition t2p0 = new TopicPartition("t2", 0); private final TopicPartition t2p1 = new TopicPartition("t2", 1); private final TopicPartition t2p2 = new TopicPartition("t2", 2); - private final TopicPartition t3p0 = new TopicPartition("t3", 0); - private final TopicPartition t3p1 = new TopicPartition("t3", 1); - - private final Map singleGroupSpec = Map.of( - group0, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t0p1, t1p0, t1p1)) - ); - private final Map multiGroupSpecs = Map.of( - group0, new ListConsumerGroupOffsetsSpec().topicPartitions(singletonList(t0p0)), - group1, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t1p0, t1p1)), - group2, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t1p0, t1p1, t2p0, t2p1, t2p2)) - ); + private final Map singleRequestMap = Collections.singletonMap(groupZero, + new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t0p1, t1p0, t1p1))); + private final Map batchedRequestMap = + new HashMap() {{ + put(groupZero, new ListConsumerGroupOffsetsSpec().topicPartitions(singletonList(t0p0))); + put(groupOne, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t1p0, t1p1))); + put(groupTwo, new ListConsumerGroupOffsetsSpec().topicPartitions(Arrays.asList(t0p0, t1p0, t1p1, t2p0, t2p1, t2p2))); + }}; @Test public void testBuildRequest() { - var handler = new ListConsumerGroupOffsetsHandler( - singleGroupSpec, - false, - logContext - ); - - assertEquals( - new OffsetFetchRequestData() - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(group0) - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t0p0.topic()) - .setPartitionIndexes(List.of(t0p0.partition(), t0p1.partition())), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t1p0.topic()) - .setPartitionIndexes(List.of(t1p0.partition(), t1p1.partition())) - )) - )), - handler.buildBatchedRequest(coordinatorKeys(group0)).build().data() - ); + ListConsumerGroupOffsetsHandler handler = + new ListConsumerGroupOffsetsHandler(singleRequestMap, false, logContext); + OffsetFetchRequest request = handler.buildBatchedRequest(coordinatorKeys(groupZero)).build(); + assertEquals(groupZero, request.data().groups().get(0).groupId()); + assertEquals(2, request.data().groups().get(0).topics().size()); + assertEquals(2, request.data().groups().get(0).topics().get(0).partitionIndexes().size()); + assertEquals(2, request.data().groups().get(0).topics().get(1).partitionIndexes().size()); } @Test public void testBuildRequestWithMultipleGroups() { - var groupSpecs = new HashMap<>(multiGroupSpecs); - groupSpecs.put( - group3, - new ListConsumerGroupOffsetsSpec().topicPartitions(List.of(t3p0, t3p1)) - ); - - var handler = new ListConsumerGroupOffsetsHandler( - groupSpecs, - false, - logContext - ); - - var request1 = handler.buildBatchedRequest(coordinatorKeys(group0, group1, group2)).build(); - - assertEquals( - Set.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(group0) - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t0p0.topic()) - .setPartitionIndexes(List.of(t0p0.partition())) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(group1) - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t0p0.topic()) - .setPartitionIndexes(List.of(t0p0.partition())), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t1p0.topic()) - .setPartitionIndexes(List.of(t1p0.partition(), t1p1.partition())) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(group2) - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t0p0.topic()) - .setPartitionIndexes(List.of(t0p0.partition())), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t1p0.topic()) - .setPartitionIndexes(List.of(t1p0.partition(), t1p1.partition())), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t2p0.topic()) - .setPartitionIndexes(List.of(t2p0.partition(), t2p1.partition(), t2p2.partition())) - )) - ), - Set.copyOf(request1.data().groups()) - ); - - var request2 = handler.buildBatchedRequest(coordinatorKeys(group3)).build(); - - assertEquals( - Set.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(group3) - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(t3p0.topic()) - .setPartitionIndexes(List.of(t3p0.partition(), t3p1.partition())) - )) - ), - Set.copyOf(request2.data().groups()) - ); + Map requestMap = new HashMap<>(this.batchedRequestMap); + String groupThree = "group3"; + requestMap.put(groupThree, new ListConsumerGroupOffsetsSpec() + .topicPartitions(Arrays.asList(new TopicPartition("t3", 0), new TopicPartition("t3", 1)))); + + ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(requestMap, false, logContext); + OffsetFetchRequest request1 = handler.buildBatchedRequest(coordinatorKeys(groupZero, groupOne, groupTwo)).build(); + assertEquals(Set.of(groupZero, groupOne, groupTwo), requestGroups(request1)); + + OffsetFetchRequest request2 = handler.buildBatchedRequest(coordinatorKeys(groupThree)).build(); + assertEquals(Set.of(groupThree), requestGroups(request2)); + + Map builtRequests = new HashMap<>(); + request1.groupIdsToPartitions().forEach((group, partitions) -> + builtRequests.put(group, new ListConsumerGroupOffsetsSpec().topicPartitions(partitions))); + request2.groupIdsToPartitions().forEach((group, partitions) -> + builtRequests.put(group, new ListConsumerGroupOffsetsSpec().topicPartitions(partitions))); + + assertEquals(requestMap, builtRequests); + Map> groupIdsToTopics = request1.groupIdsToTopics(); + + assertEquals(3, groupIdsToTopics.size()); + assertEquals(1, groupIdsToTopics.get(groupZero).size()); + assertEquals(2, groupIdsToTopics.get(groupOne).size()); + assertEquals(3, groupIdsToTopics.get(groupTwo).size()); + + assertEquals(1, groupIdsToTopics.get(groupZero).get(0).partitionIndexes().size()); + assertEquals(1, groupIdsToTopics.get(groupOne).get(0).partitionIndexes().size()); + assertEquals(2, groupIdsToTopics.get(groupOne).get(1).partitionIndexes().size()); + assertEquals(1, groupIdsToTopics.get(groupTwo).get(0).partitionIndexes().size()); + assertEquals(2, groupIdsToTopics.get(groupTwo).get(1).partitionIndexes().size()); + assertEquals(3, groupIdsToTopics.get(groupTwo).get(2).partitionIndexes().size()); + + groupIdsToTopics = request2.groupIdsToTopics(); + assertEquals(1, groupIdsToTopics.size()); + assertEquals(1, groupIdsToTopics.get(groupThree).size()); + assertEquals(2, groupIdsToTopics.get(groupThree).get(0).partitionIndexes().size()); } @Test public void testBuildRequestBatchGroups() { - ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(multiGroupSpecs, false, logContext); - Collection> requests = handler.buildRequest(1, coordinatorKeys(group0, group1, group2)); + ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(batchedRequestMap, false, logContext); + Collection> requests = handler.buildRequest(1, coordinatorKeys(groupZero, groupOne, groupTwo)); assertEquals(1, requests.size()); - assertEquals(Set.of(group0, group1, group2), requestGroups((OffsetFetchRequest) requests.iterator().next().request.build())); + assertEquals(Set.of(groupZero, groupOne, groupTwo), requestGroups((OffsetFetchRequest) requests.iterator().next().request.build())); } @Test public void testBuildRequestDoesNotBatchGroup() { - ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(multiGroupSpecs, false, logContext); + ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(batchedRequestMap, false, logContext); // Disable batching. ((CoordinatorStrategy) handler.lookupStrategy()).disableBatch(); - Collection> requests = handler.buildRequest(1, coordinatorKeys(group0, group1, group2)); + Collection> requests = handler.buildRequest(1, coordinatorKeys(groupZero, groupOne, groupTwo)); assertEquals(3, requests.size()); assertEquals( - Set.of(Set.of(group0), Set.of(group1), Set.of(group2)), + Set.of(Set.of(groupZero), Set.of(groupOne), Set.of(groupTwo)), requests.stream().map(requestAndKey -> requestGroups((OffsetFetchRequest) requestAndKey.request.build())).collect(Collectors.toSet()) ); } @@ -216,31 +170,32 @@ public void testSuccessfulHandleResponseWithOnePartitionError() { @Test public void testSuccessfulHandleResponseWithOnePartitionErrorWithMultipleGroups() { - var expectedResult = Map.of( - group0, Map.of(t0p0, new OffsetAndMetadata(10L)), - group1, Map.of(t1p1, new OffsetAndMetadata(10L)), - group2, Map.of(t2p2, new OffsetAndMetadata(10L)) - ); + Map offsetAndMetadataMapZero = + Collections.singletonMap(t0p0, new OffsetAndMetadata(10L)); + Map offsetAndMetadataMapOne = + Collections.singletonMap(t1p1, new OffsetAndMetadata(10L)); + Map offsetAndMetadataMapTwo = + Collections.singletonMap(t2p2, new OffsetAndMetadata(10L)); + Map> expectedResult = + new HashMap<>() {{ + put(groupZero, offsetAndMetadataMapZero); + put(groupOne, offsetAndMetadataMapOne); + put(groupTwo, offsetAndMetadataMapTwo); + }}; assertCompletedForMultipleGroups( - handleWithPartitionErrorMultipleGroups(Errors.UNKNOWN_TOPIC_OR_PARTITION), - expectedResult - ); + handleWithPartitionErrorMultipleGroups(Errors.UNKNOWN_TOPIC_OR_PARTITION), expectedResult); assertCompletedForMultipleGroups( - handleWithPartitionErrorMultipleGroups(Errors.TOPIC_AUTHORIZATION_FAILED), - expectedResult - ); + handleWithPartitionErrorMultipleGroups(Errors.TOPIC_AUTHORIZATION_FAILED), expectedResult); assertCompletedForMultipleGroups( - handleWithPartitionErrorMultipleGroups(Errors.UNSTABLE_OFFSET_COMMIT), - expectedResult - ); + handleWithPartitionErrorMultipleGroups(Errors.UNSTABLE_OFFSET_COMMIT), expectedResult); } @Test public void testSuccessfulHandleResponseWithMultipleGroups() { Map> expected = new HashMap<>(); Map errorMap = errorMap(groups, Errors.NONE); - assertCompletedForMultipleGroups(handleWithErrorWithMultipleGroups(errorMap, multiGroupSpecs), expected); + assertCompletedForMultipleGroups(handleWithErrorWithMultipleGroups(errorMap, batchedRequestMap), expected); } @Test @@ -251,12 +206,11 @@ public void testUnmappedHandleResponse() { @Test public void testUnmappedHandleResponseWithMultipleGroups() { - var errorMap = Map.of( - group0, Errors.NOT_COORDINATOR, - group1, Errors.COORDINATOR_NOT_AVAILABLE, - group2, Errors.NOT_COORDINATOR - ); - assertUnmappedWithMultipleGroups(handleWithErrorWithMultipleGroups(errorMap, multiGroupSpecs)); + Map errorMap = new HashMap<>(); + errorMap.put(groupZero, Errors.NOT_COORDINATOR); + errorMap.put(groupOne, Errors.COORDINATOR_NOT_AVAILABLE); + errorMap.put(groupTwo, Errors.NOT_COORDINATOR); + assertUnmappedWithMultipleGroups(handleWithErrorWithMultipleGroups(errorMap, batchedRequestMap)); } @Test @@ -267,7 +221,7 @@ public void testRetriableHandleResponse() { @Test public void testRetriableHandleResponseWithMultipleGroups() { Map errorMap = errorMap(groups, Errors.COORDINATOR_LOAD_IN_PROGRESS); - assertRetriable(handleWithErrorWithMultipleGroups(errorMap, multiGroupSpecs)); + assertRetriable(handleWithErrorWithMultipleGroups(errorMap, batchedRequestMap)); } @Test @@ -279,226 +233,117 @@ public void testFailedHandleResponse() { @Test public void testFailedHandleResponseWithMultipleGroups() { - var errorMap = Map.of( - group0, Errors.GROUP_AUTHORIZATION_FAILED, - group1, Errors.GROUP_ID_NOT_FOUND, - group2, Errors.INVALID_GROUP_ID - ); - var groupToExceptionMap = Map.of( - group0, (Class) GroupAuthorizationException.class, - group1, (Class) GroupIdNotFoundException.class, - group2, (Class) InvalidGroupIdException.class - ); - assertFailedForMultipleGroups( - groupToExceptionMap, - handleWithErrorWithMultipleGroups(errorMap, multiGroupSpecs) - ); + Map errorMap = new HashMap<>(); + errorMap.put(groupZero, Errors.GROUP_AUTHORIZATION_FAILED); + errorMap.put(groupOne, Errors.GROUP_ID_NOT_FOUND); + errorMap.put(groupTwo, Errors.INVALID_GROUP_ID); + Map> groupToExceptionMap = new HashMap<>(); + groupToExceptionMap.put(groupZero, GroupAuthorizationException.class); + groupToExceptionMap.put(groupOne, GroupIdNotFoundException.class); + groupToExceptionMap.put(groupTwo, InvalidGroupIdException.class); + assertFailedForMultipleGroups(groupToExceptionMap, + handleWithErrorWithMultipleGroups(errorMap, batchedRequestMap)); } private OffsetFetchResponse buildResponse(Errors error) { return new OffsetFetchResponse( - new OffsetFetchResponseData() - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group0) - .setErrorCode(error.code()) - )), - ApiKeys.OFFSET_FETCH.latestVersion() - ); + throttleMs, + Collections.singletonMap(groupZero, error), + Collections.singletonMap(groupZero, new HashMap<>())); + } + + private OffsetFetchResponse buildResponseWithMultipleGroups( + Map errorMap, + Map> responseData + ) { + return new OffsetFetchResponse(throttleMs, errorMap, responseData); } private AdminApiHandler.ApiResult> handleWithErrorWithMultipleGroups( Map errorMap, Map groupSpecs ) { - var handler = new ListConsumerGroupOffsetsHandler( - groupSpecs, - false, - logContext - ); - var response = new OffsetFetchResponse( - new OffsetFetchResponseData() - .setGroups(errorMap.entrySet().stream().map(entry -> - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(entry.getKey()) - .setErrorCode(entry.getValue().code()) - ).collect(Collectors.toList())), - ApiKeys.OFFSET_FETCH.latestVersion() - ); + ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(groupSpecs, false, logContext); + Map> responseData = new HashMap<>(); + for (String group : errorMap.keySet()) { + responseData.put(group, new HashMap<>()); + } + OffsetFetchResponse response = buildResponseWithMultipleGroups(errorMap, responseData); return handler.handleResponse(new Node(1, "host", 1234), - errorMap.keySet() - .stream() - .map(CoordinatorKey::byGroupId) - .collect(Collectors.toSet()), - response - ); + errorMap.keySet() + .stream() + .map(CoordinatorKey::byGroupId) + .collect(Collectors.toSet()), + response); } private OffsetFetchResponse buildResponseWithPartitionError(Errors error) { - return new OffsetFetchResponse( - new OffsetFetchResponseData() - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group0) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t0p0.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t0p0.partition()) - .setCommittedOffset(10), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t0p1.partition()) - .setCommittedOffset(10) - .setErrorCode(error.code()) - )) - )) - )), - ApiKeys.OFFSET_FETCH.latestVersion() - ); + + Map responseData = new HashMap<>(); + responseData.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); + responseData.put(t0p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); + + return new OffsetFetchResponse(Errors.NONE, responseData); } private OffsetFetchResponse buildResponseWithPartitionErrorWithMultipleGroups(Errors error) { - var data = new OffsetFetchResponseData() - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group0) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t0p0.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t0p0.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(Errors.NONE.code()) - )) - )), - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group1) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t0p0.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t0p0.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(error.code()) - )), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t1p0.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t1p0.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(error.code()), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t1p1.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(Errors.NONE.code()) - )) - )), - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group2) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t0p0.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t0p0.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(error.code()) - )), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t1p0.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t1p0.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(error.code()), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t1p1.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(error.code()) - )), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t2p0.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t2p0.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(error.code()), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t2p1.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(error.code()), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t2p2.partition()) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setErrorCode(Errors.NONE.code()) - )) - )) - )); - - return new OffsetFetchResponse(data, ApiKeys.OFFSET_FETCH.latestVersion()); + Map responseDataZero = new HashMap<>(); + responseDataZero.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); + + Map responseDataOne = new HashMap<>(); + responseDataOne.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); + responseDataOne.put(t1p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); + responseDataOne.put(t1p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); + + Map responseDataTwo = new HashMap<>(); + responseDataTwo.put(t0p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); + responseDataTwo.put(t1p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); + responseDataTwo.put(t1p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); + responseDataTwo.put(t2p0, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); + responseDataTwo.put(t2p1, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", error)); + responseDataTwo.put(t2p2, new OffsetFetchResponse.PartitionData(10, Optional.empty(), "", Errors.NONE)); + + Map> responseData = + new HashMap<>() {{ + put(groupZero, responseDataZero); + put(groupOne, responseDataOne); + put(groupTwo, responseDataTwo); + }}; + + Map errorMap = errorMap(groups, Errors.NONE); + return new OffsetFetchResponse(0, errorMap, responseData); } private AdminApiHandler.ApiResult> handleWithPartitionError( Errors error ) { - ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler( - singleGroupSpec, - false, - logContext - ); + ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler(singleRequestMap, + false, logContext); OffsetFetchResponse response = buildResponseWithPartitionError(error); return handler.handleResponse(new Node(1, "host", 1234), - singleton(CoordinatorKey.byGroupId(group0)), response); + singleton(CoordinatorKey.byGroupId(groupZero)), response); } private AdminApiHandler.ApiResult> handleWithPartitionErrorMultipleGroups( Errors error ) { ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler( - multiGroupSpecs, - false, - logContext - ); + batchedRequestMap, false, logContext); OffsetFetchResponse response = buildResponseWithPartitionErrorWithMultipleGroups(error); return handler.handleResponse( new Node(1, "host", 1234), - coordinatorKeys(group0, group1, group2), - response - ); + coordinatorKeys(groupZero, groupOne, groupTwo), + response); } private AdminApiHandler.ApiResult> handleWithError( Errors error ) { ListConsumerGroupOffsetsHandler handler = new ListConsumerGroupOffsetsHandler( - singleGroupSpec, false, logContext); + singleRequestMap, false, logContext); OffsetFetchResponse response = buildResponse(error); return handler.handleResponse(new Node(1, "host", 1234), - singleton(CoordinatorKey.byGroupId(group0)), + singleton(CoordinatorKey.byGroupId(groupZero)), response); } @@ -507,7 +352,7 @@ private void assertUnmapped( ) { assertEquals(emptySet(), result.completedKeys.keySet()); assertEquals(emptySet(), result.failedKeys.keySet()); - assertEquals(singletonList(CoordinatorKey.byGroupId(group0)), result.unmappedKeys); + assertEquals(singletonList(CoordinatorKey.byGroupId(groupZero)), result.unmappedKeys); } private void assertUnmappedWithMultipleGroups( @@ -515,7 +360,7 @@ private void assertUnmappedWithMultipleGroups( ) { assertEquals(emptySet(), result.completedKeys.keySet()); assertEquals(emptySet(), result.failedKeys.keySet()); - assertEquals(coordinatorKeys(group0, group1, group2), new HashSet<>(result.unmappedKeys)); + assertEquals(coordinatorKeys(groupZero, groupOne, groupTwo), new HashSet<>(result.unmappedKeys)); } private void assertRetriable( @@ -530,7 +375,7 @@ private void assertCompleted( AdminApiHandler.ApiResult> result, Map expected ) { - CoordinatorKey key = CoordinatorKey.byGroupId(group0); + CoordinatorKey key = CoordinatorKey.byGroupId(groupZero); assertEquals(emptySet(), result.failedKeys.keySet()); assertEquals(emptyList(), result.unmappedKeys); assertEquals(singleton(key), result.completedKeys.keySet()); @@ -554,7 +399,7 @@ private void assertFailed( Class expectedExceptionType, AdminApiHandler.ApiResult> result ) { - CoordinatorKey key = CoordinatorKey.byGroupId(group0); + CoordinatorKey key = CoordinatorKey.byGroupId(groupZero); assertEquals(emptySet(), result.completedKeys.keySet()); assertEquals(emptyList(), result.unmappedKeys); assertEquals(singleton(key), result.failedKeys.keySet()); diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandlerTest.java index 2c2529a0664c1..52073551e3d79 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/ListTransactionsHandlerTest.java @@ -36,7 +36,6 @@ import java.util.List; import java.util.Map; import java.util.OptionalInt; -import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -44,7 +43,6 @@ import static java.util.Collections.singleton; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; public class ListTransactionsHandlerTest { @@ -88,42 +86,6 @@ public void testBuildRequestWithFilteredState() { assertEquals(Collections.emptyList(), request.data().producerIdFilters()); } - - @Test - public void testBuildRequestWithFilteredTransactionalIdPattern() { - int brokerId = 1; - BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId)); - String filteredTransactionalIdPattern = "^special-.*"; - ListTransactionsOptions options = new ListTransactionsOptions() - .filterOnTransactionalIdPattern(filteredTransactionalIdPattern); - ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext); - ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, Set.of(brokerKey)).build(); - assertEquals(filteredTransactionalIdPattern, request.data().transactionalIdPattern()); - assertEquals(List.of(), request.data().stateFilters()); - } - - @Test - public void testBuildRequestWithNullFilteredTransactionalIdPattern() { - int brokerId = 1; - BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId)); - ListTransactionsOptions options = new ListTransactionsOptions() - .filterOnTransactionalIdPattern(null); - ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext); - ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, Set.of(brokerKey)).build(); - assertNull(request.data().transactionalIdPattern()); - } - - @Test - public void testBuildRequestWithEmptyFilteredTransactionalIdPattern() { - int brokerId = 1; - BrokerKey brokerKey = new BrokerKey(OptionalInt.of(brokerId)); - ListTransactionsOptions options = new ListTransactionsOptions() - .filterOnTransactionalIdPattern(""); - ListTransactionsHandler handler = new ListTransactionsHandler(options, logContext); - ListTransactionsRequest request = handler.buildBatchedRequest(brokerId, Set.of(brokerKey)).build(); - assertNull(request.data().transactionalIdPattern()); - } - @Test public void testBuildRequestWithDurationFilter() { int brokerId = 1; diff --git a/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyIntegrationTest.java b/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyIntegrationTest.java index 778502505fb80..4e03ae7d952f6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyIntegrationTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/admin/internals/PartitionLeaderStrategyIntegrationTest.java @@ -367,7 +367,7 @@ public void testFatalLookupError() { driver.onFailure(time.milliseconds(), spec, new UnknownServerException()); assertTrue(result.all().get(tp0).isDone()); - TestUtils.assertFutureThrows(UnknownServerException.class, result.all().get(tp0)); + TestUtils.assertFutureThrows(result.all().get(tp0), UnknownServerException.class); assertEquals(Collections.emptyList(), driver.poll()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java index bed84d67befbd..2fa5515fb4073 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerConfigTest.java @@ -30,8 +30,6 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.CsvSource; -import java.io.FileInputStream; -import java.io.InputStream; import java.util.Arrays; import java.util.HashMap; import java.util.Locale; @@ -43,7 +41,6 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; public class ConsumerConfigTest { @@ -59,7 +56,6 @@ public class ConsumerConfigTest { public void setUp() { properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClassName); properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClassName); - properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); } @Test @@ -148,7 +144,6 @@ public void testInvalidGroupInstanceId() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, ""); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); assertTrue(ce.getMessage().contains(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG)); } @@ -159,7 +154,6 @@ public void testInvalidSecurityProtocol() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "abc"); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); assertTrue(ce.getMessage().contains(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -170,7 +164,6 @@ public void testCaseInsensitiveSecurityProtocol() { final Map configs = new HashMap<>(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, saslSslLowerCase); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); assertEquals(saslSslLowerCase, consumerConfig.originals().get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); @@ -181,7 +174,6 @@ public void testDefaultConsumerGroupConfig() { final Map configs = new HashMap<>(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); assertEquals("classic", consumerConfig.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG)); assertNull(consumerConfig.getString(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG)); @@ -196,7 +188,6 @@ public void testRemoteAssignorConfig() { configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, remoteAssignorName); configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, protocol); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); assertEquals(protocol, consumerConfig.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG)); assertEquals(remoteAssignorName, consumerConfig.getString(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG)); @@ -209,7 +200,6 @@ public void testRemoteAssignorWithClassicGroupProtocol() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, remoteAssignorName); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException exception = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); assertTrue(exception.getMessage().contains(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG + " cannot be set when " + ConsumerConfig.GROUP_PROTOCOL_CONFIG + "=" + GroupProtocol.CLASSIC.name())); } @@ -219,7 +209,6 @@ public void testDefaultMetadataRecoveryStrategy() { Map configs = new HashMap<>(); configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ConsumerConfig consumerConfig = new ConsumerConfig(configs); assertEquals(MetadataRecoveryStrategy.REBOOTSTRAP.name, consumerConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @@ -230,7 +219,6 @@ public void testInvalidMetadataRecoveryStrategy() { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "abc"); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); assertTrue(ce.getMessage().contains(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @@ -242,7 +230,6 @@ public void testProtocolConfigValidation(String protocol, boolean isValid) { configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass); configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, protocol); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); if (isValid) { ConsumerConfig config = new ConsumerConfig(configs); assertEquals(protocol, config.getString(ConsumerConfig.GROUP_PROTOCOL_CONFIG)); @@ -263,33 +250,10 @@ private void testUnsupportedConfigsWithConsumerGroupProtocol(String configName, ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, keyDeserializerClass, ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializerClass, ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name(), - ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092", configName, value ); ConfigException exception = assertThrows(ConfigException.class, () -> new ConsumerConfig(configs)); - assertEquals(configName + " cannot be set when " + + assertEquals(configName + " cannot be set when " + ConsumerConfig.GROUP_PROTOCOL_CONFIG + "=" + GroupProtocol.CONSUMER.name(), exception.getMessage()); } - - /** - * Validates config/consumer.properties file to avoid getting out of sync with ConsumerConfig. - */ - @Test - public void testValidateConfigPropertiesFile() { - Properties props = new Properties(); - - try (InputStream inputStream = new FileInputStream(System.getProperty("user.dir") + "/../config/consumer.properties")) { - props.load(inputStream); - } catch (Exception e) { - fail("Failed to load config/consumer.properties file: " + e.getMessage()); - } - - ConsumerConfig config = new ConsumerConfig(props); - - for (String key : config.originals().keySet()) { - if (!ConsumerConfig.configDef().configKeys().containsKey(key)) { - fail("Invalid configuration key: " + key); - } - } - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java index c08c7766ec1c7..b4f649de579ae 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/ConsumerPartitionAssignorTest.java @@ -151,7 +151,6 @@ private ConsumerConfig initConsumerConfigWithClassTypes(List classTypes) props.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, classTypes); props.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name()); - props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return new ConsumerConfig(props); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/CooperativeStickyAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/CooperativeStickyAssignorTest.java index 6a6aa919be149..b85d000e167b9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/CooperativeStickyAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/CooperativeStickyAssignorTest.java @@ -81,7 +81,7 @@ public void testEncodeAndDecodeGeneration() { Optional encodedGeneration = ((CooperativeStickyAssignor) assignor).memberData(subscription).generation; assertTrue(encodedGeneration.isPresent()); - assertEquals(DEFAULT_GENERATION, encodedGeneration.get()); + assertEquals(encodedGeneration.get(), DEFAULT_GENERATION); int generation = 10; assignor.onAssignment(null, new ConsumerGroupMetadata("dummy-group-id", generation, "dummy-member-id", Optional.empty())); @@ -90,7 +90,7 @@ public void testEncodeAndDecodeGeneration() { encodedGeneration = ((CooperativeStickyAssignor) assignor).memberData(subscription).generation; assertTrue(encodedGeneration.isPresent()); - assertEquals(generation, encodedGeneration.get()); + assertEquals(encodedGeneration.get(), generation); } @Test diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java index 78ff15cee5f8e..2749563df2742 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaConsumerTest.java @@ -60,15 +60,12 @@ import org.apache.kafka.common.message.ListOffsetsResponseData; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse; import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse; -import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.metrics.JmxReporter; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; -import org.apache.kafka.common.metrics.Monitorable; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.network.Selectable; @@ -104,7 +101,6 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.test.MockConsumerInterceptor; -import org.apache.kafka.test.MockDeserializer; import org.apache.kafka.test.MockMetricsReporter; import org.apache.kafka.test.TestUtils; @@ -153,10 +149,10 @@ import javax.management.MBeanServer; import javax.management.ObjectName; +import static java.util.Collections.singleton; import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.apache.kafka.clients.consumer.internals.ClassicKafkaConsumer.DEFAULT_REASON; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.COORDINATOR_METRICS_SUFFIX; import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID; import static org.apache.kafka.common.utils.Utils.propsToMap; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; @@ -227,7 +223,7 @@ public class KafkaConsumerTest { private final String partitionAssigned = "Hit partition assign "; private final String partitionLost = "Hit partition lost "; - private final Collection singleTopicPartition = Set.of(new TopicPartition(topic, 0)); + private final Collection singleTopicPartition = Collections.singleton(new TopicPartition(topic, 0)); private final Time time = new MockTime(); private final SubscriptionState subscription = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); private final ConsumerPartitionAssignor assignor = new RoundRobinAssignor(); @@ -237,7 +233,7 @@ public class KafkaConsumerTest { @AfterEach public void cleanup() { if (consumer != null) { - consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(Duration.ZERO); } } @@ -273,35 +269,6 @@ public void testSubscribingCustomMetricsWithSameNameDoesntAffectConsumerMetrics( } } - @ParameterizedTest - @EnumSource(GroupProtocol.class) - public void testAssignedPartitionsMetrics(GroupProtocol groupProtocol) throws InterruptedException { - consumer = newConsumer(groupProtocol, time, mock(KafkaClient.class), subscription, - mock(ConsumerMetadata.class), assignor, false, groupInstanceId); - Metrics metrics = consumer.metricsRegistry(); - - // This metric is added in the background thread for the AsyncConsumer, so waiting on it to avoid flakiness. - TestUtils.waitForCondition(() -> getMetric(metrics, "assigned-partitions") != null, - "Consumer should register the assigned-partitions metric"); - assertNotNull(getMetric(metrics, "assigned-partitions")); - assertEquals(0.0d, getMetric(metrics, "assigned-partitions").metricValue()); - - subscription.assignFromUser(Set.of(tp0)); - assertEquals(1.0d, getMetric(metrics, "assigned-partitions").metricValue()); - - subscription.assignFromUser(Set.of(tp0, tp1)); - assertEquals(2.0d, getMetric(metrics, "assigned-partitions").metricValue()); - - subscription.unsubscribe(); - subscription.subscribe(Set.of(topic), Optional.empty()); - subscription.assignFromSubscribed(Set.of(tp0)); - assertEquals(1.0d, getMetric(metrics, "assigned-partitions").metricValue()); - } - - private KafkaMetric getMetric(Metrics metrics, String name) { - return metrics.metrics().get(metrics.metricName(name, CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX)); - } - @ParameterizedTest @EnumSource(GroupProtocol.class) public void testUnsubscribingCustomMetricsWithSameNameDoesntAffectConsumerMetrics(GroupProtocol groupProtocol) { @@ -334,7 +301,7 @@ public void testShouldOnlyCallMetricReporterMetricChangeOnceWithExistingConsumer KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); consumer.registerMetricForSubscription(existingMetric); - // This test would fail without the check as the existing metric is registered in the consumer on startup + // This test would fail without the check as the exising metric is registered in the consumer on startup Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); } } @@ -383,10 +350,8 @@ public void testMetricsReporterAutoGeneratedClientId(GroupProtocol groupProtocol assertEquals(2, consumer.metricsRegistry().reporters().size()); MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) consumer.metricsRegistry().reporters().stream() - .filter(reporter -> reporter instanceof MockMetricsReporter).findFirst().orElseThrow(); + .filter(reporter -> reporter instanceof MockMetricsReporter).findFirst().get(); assertEquals(consumer.clientId(), mockMetricsReporter.clientId); - - consumer.close(CloseOptions.timeout(Duration.ZERO)); } @ParameterizedTest @@ -436,11 +401,11 @@ public void testPollReturnsRecords(GroupProtocol groupProtocol) { ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ZERO); - assertEquals(5, records.count()); - assertEquals(Set.of(tp0), records.partitions()); - assertEquals(5, records.records(tp0).size()); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(5), records.nextOffsets().get(tp0)); + assertEquals(records.count(), 5); + assertEquals(records.partitions(), Collections.singleton(tp0)); + assertEquals(records.records(tp0).size(), 5); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(5, Optional.empty(), "")); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -457,17 +422,17 @@ public void testSecondPollWithDeserializationErrorThrowsRecordDeserializationExc ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ZERO); assertEquals(invalidRecordNumber - 1, records.count()); - assertEquals(Set.of(tp0), records.partitions()); + assertEquals(Collections.singleton(tp0), records.partitions()); assertEquals(invalidRecordNumber - 1, records.records(tp0).size()); long lastOffset = records.records(tp0).get(records.records(tp0).size() - 1).offset(); assertEquals(invalidRecordNumber - 2, lastOffset); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(lastOffset + 1), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(lastOffset + 1, Optional.empty(), "")); RecordDeserializationException rde = assertThrows(RecordDeserializationException.class, () -> consumer.poll(Duration.ZERO)); assertEquals(invalidRecordOffset, rde.offset()); assertEquals(tp0, rde.topicPartition()); - assertEquals(consumer.position(tp0), rde.offset()); + assertEquals(rde.offset(), consumer.position(tp0)); } /* @@ -514,11 +479,11 @@ public String deserialize(String topic, Headers headers, ByteBuffer data) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, Optional.of(deserializer), false); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, List.of(tp), null); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + prepareRebalance(client, node, assignor, singletonList(tp), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); client.prepareResponseFrom(fetchResponse(tp, 0, recordCount), node); return consumer; @@ -610,17 +575,17 @@ public void shouldIgnoreGroupInstanceIdForEmptyGroupId(GroupProtocol groupProtoc public void testSubscription(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, groupId); - consumer.subscribe(List.of(topic)); - assertEquals(Set.of(topic), consumer.subscription()); + consumer.subscribe(singletonList(topic)); + assertEquals(singleton(topic), consumer.subscription()); assertTrue(consumer.assignment().isEmpty()); consumer.subscribe(Collections.emptyList()); assertTrue(consumer.subscription().isEmpty()); assertTrue(consumer.assignment().isEmpty()); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); assertTrue(consumer.subscription().isEmpty()); - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(singleton(tp0), consumer.assignment()); consumer.unsubscribe(); assertTrue(consumer.subscription().isEmpty()); @@ -646,7 +611,7 @@ public void testSubscriptionOnNullTopic(GroupProtocol groupProtocol) { public void testSubscriptionOnEmptyTopic(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, groupId); String emptyTopic = " "; - assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(List.of(emptyTopic))); + assertThrows(IllegalArgumentException.class, () -> consumer.subscribe(singletonList(emptyTopic))); } @ParameterizedTest @@ -675,14 +640,14 @@ public void testSubscriptionWithEmptyPartitionAssignment(GroupProtocol groupProt props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, groupId); consumer = newConsumer(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()); assertThrows(IllegalStateException.class, - () -> consumer.subscribe(List.of(topic))); + () -> consumer.subscribe(singletonList(topic))); } @ParameterizedTest @EnumSource(GroupProtocol.class) public void testSeekNegative(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, null); - consumer.assign(Set.of(new TopicPartition("nonExistTopic", 0))); + consumer.assign(singleton(new TopicPartition("nonExistTopic", 0))); assertThrows(IllegalArgumentException.class, () -> consumer.seek(new TopicPartition("nonExistTopic", 0), -1)); } @@ -707,14 +672,14 @@ public void testAssignOnEmptyTopicPartition(GroupProtocol groupProtocol) { @EnumSource(GroupProtocol.class) public void testAssignOnNullTopicInPartition(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, null); - assertThrows(IllegalArgumentException.class, () -> consumer.assign(Set.of(new TopicPartition(null, 0)))); + assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(null, 0)))); } @ParameterizedTest @EnumSource(GroupProtocol.class) public void testAssignOnEmptyTopicInPartition(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, null); - assertThrows(IllegalArgumentException.class, () -> consumer.assign(Set.of(new TopicPartition(" ", 0)))); + assertThrows(IllegalArgumentException.class, () -> consumer.assign(singleton(new TopicPartition(" ", 0)))); } @ParameterizedTest @@ -732,7 +697,7 @@ public void testInterceptorConstructorClose(GroupProtocol groupProtocol) { assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); assertEquals(0, MockConsumerInterceptor.CLOSE_COUNT.get()); - consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(Duration.ZERO); assertEquals(1, MockConsumerInterceptor.INIT_COUNT.get()); assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get()); // Cluster metadata will only be updated on calling poll. @@ -747,27 +712,26 @@ public void testInterceptorConstructorClose(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(GroupProtocol.class) public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemainingInstances(GroupProtocol groupProtocol) { - final int targetInterceptor = 1; + final int targetInterceptor = 3; try { Properties props = new Properties(); props.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()); props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, - CloseInterceptor.class.getName() + "," + MockConsumerInterceptor.class.getName()); + props.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MockConsumerInterceptor.class.getName() + ", " + + MockConsumerInterceptor.class.getName() + ", " + + MockConsumerInterceptor.class.getName()); MockConsumerInterceptor.setThrowOnConfigExceptionThreshold(targetInterceptor); assertThrows(KafkaException.class, () -> newConsumer( props, new StringDeserializer(), new StringDeserializer())); - assertEquals(1, MockConsumerInterceptor.CONFIG_COUNT.get()); - assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get()); + assertEquals(3, MockConsumerInterceptor.CONFIG_COUNT.get()); + assertEquals(3, MockConsumerInterceptor.CLOSE_COUNT.get()); - assertEquals(1, CloseInterceptor.CLOSE_COUNT.get()); } finally { MockConsumerInterceptor.resetCounters(); - CloseInterceptor.resetCounters(); } } @@ -776,14 +740,14 @@ public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemai public void testPause(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, groupId); - consumer.assign(List.of(tp0)); - assertEquals(Set.of(tp0), consumer.assignment()); + consumer.assign(singletonList(tp0)); + assertEquals(singleton(tp0), consumer.assignment()); assertTrue(consumer.paused().isEmpty()); - consumer.pause(Set.of(tp0)); - assertEquals(Set.of(tp0), consumer.paused()); + consumer.pause(singleton(tp0)); + assertEquals(singleton(tp0), consumer.paused()); - consumer.resume(Set.of(tp0)); + consumer.resume(singleton(tp0)); assertTrue(consumer.paused().isEmpty()); consumer.unsubscribe(); @@ -851,19 +815,19 @@ public void verifyHeartbeatSent(GroupProtocol groupProtocol) throws Exception { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); - Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); // initial fetch client.prepareResponseFrom(fetchResponse(tp0, 0, 0), node); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(singleton(tp0), consumer.assignment()); AtomicBoolean heartbeatReceived = prepareHeartbeatResponse(client, coordinator, Errors.NONE); @@ -884,12 +848,12 @@ public void verifyHeartbeatSentWhenFetchedDataReady(GroupProtocol groupProtocol) ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); - Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); @@ -915,12 +879,12 @@ public void verifyPollTimesOutDuringMetadataUpdate(GroupProtocol groupProtocol) final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); // Since we would enable the heartbeat thread after received join-response which could // send the sync-group on behalf of the consumer if it is enqueued, we may still complete // the rebalance and send out the fetch; in order to avoid it we do not prepare sync response here. @@ -940,22 +904,22 @@ public void verifyNoCoordinatorLookupForManualAssignmentWithSeek(GroupProtocol g ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, null, groupInstanceId, false); - consumer.assign(Set.of(tp0)); - consumer.seekToBeginning(Set.of(tp0)); + consumer.assign(singleton(tp0)); + consumer.seekToBeginning(singleton(tp0)); - // there shouldn't be any need to look up the coordinator or fetch committed offsets. - // we just look up the starting position and send the record fetch. - client.prepareResponse(listOffsetsResponse(Map.of(tp0, 50L))); + // there shouldn't be any need to lookup the coordinator or fetch committed offsets. + // we just lookup the starting position and send the record fetch. + client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); client.prepareResponse(fetchResponse(tp0, 50L, 5)); ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(55L), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(55L, Optional.empty(), "")); } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -966,12 +930,12 @@ public void verifyNoCoordinatorLookupForManualAssignmentWithOffsetCommit(GroupPr ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); // create a consumer with groupID with manual assignment consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(Set.of(tp0)); + consumer.assign(singleton(tp0)); // 1st coordinator error should cause coordinator unknown client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.COORDINATOR_NOT_AVAILABLE, groupId, node), node); @@ -980,23 +944,23 @@ public void verifyNoCoordinatorLookupForManualAssignmentWithOffsetCommit(GroupPr // 2nd coordinator error should find the correct coordinator and clear the findCoordinatorFuture client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); - client.prepareResponse(offsetResponse(Map.of(tp0, 50L), Errors.NONE)); + client.prepareResponse(offsetResponse(Collections.singletonMap(tp0, 50L), Errors.NONE)); client.prepareResponse(fetchResponse(tp0, 50L, 5)); @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(0)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(55L), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(55L, Optional.empty(), "")); // after coordinator found, consumer should be able to commit the offset successfully - client.prepareResponse(offsetCommitResponse(Map.of(tp0, Errors.NONE))); - consumer.commitSync(Map.of(tp0, new OffsetAndMetadata(55L))); + client.prepareResponse(offsetCommitResponse(Collections.singletonMap(tp0, Errors.NONE))); + consumer.commitSync(Collections.singletonMap(tp0, new OffsetAndMetadata(55L))); // verify the offset is committed - client.prepareResponse(offsetResponse(Map.of(tp0, 55L), Errors.NONE)); - assertEquals(55, consumer.committed(Set.of(tp0), Duration.ZERO).get(tp0).offset()); + client.prepareResponse(offsetResponse(Collections.singletonMap(tp0, 55L), Errors.NONE)); + assertEquals(55, consumer.committed(Collections.singleton(tp0), Duration.ZERO).get(tp0).offset()); } @ParameterizedTest @@ -1007,7 +971,7 @@ public void testFetchProgressWithMissingPartitionPosition(GroupProtocol groupPro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 2)); + initMetadata(client, Collections.singletonMap(topic, 2)); if (groupProtocol == GroupProtocol.CONSUMER) { Node node = metadata.fetch().nodes().get(0); @@ -1016,8 +980,8 @@ public void testFetchProgressWithMissingPartitionPosition(GroupProtocol groupPro consumer = newConsumerNoAutoCommit(groupProtocol, time, client, subscription, metadata); consumer.assign(Arrays.asList(tp0, tp1)); - consumer.seekToEnd(Set.of(tp0)); - consumer.seekToBeginning(Set.of(tp1)); + consumer.seekToEnd(singleton(tp0)); + consumer.seekToBeginning(singleton(tp1)); client.prepareResponse(body -> { ListOffsetsRequest request = (ListOffsetsRequest) body; @@ -1034,13 +998,13 @@ public void testFetchProgressWithMissingPartitionPosition(GroupProtocol groupPro .setPartitionIndex(tp1.partition()) .setTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP); return partitions.contains(expectedTp0) && partitions.contains(expectedTp1); - }, listOffsetsResponse(Map.of(tp0, 50L), Map.of(tp1, Errors.NOT_LEADER_OR_FOLLOWER))); + }, listOffsetsResponse(Collections.singletonMap(tp0, 50L), Collections.singletonMap(tp1, Errors.NOT_LEADER_OR_FOLLOWER))); client.prepareResponse( body -> { FetchRequest request = (FetchRequest) body; Map fetchData = request.fetchData(topicNames); TopicIdPartition tidp0 = new TopicIdPartition(topicIds.get(tp0.topic()), tp0); - return fetchData.keySet().equals(Set.of(tidp0)) && + return fetchData.keySet().equals(singleton(tidp0)) && fetchData.get(tidp0).fetchOffset == 50L; }, fetchResponse(tp0, 50L, 5)); @@ -1048,9 +1012,9 @@ public void testFetchProgressWithMissingPartitionPosition(GroupProtocol groupPro @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); - assertEquals(Set.of(tp0), records.partitions()); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(records.records(tp0).get(records.count() - 1).offset() + 1), records.nextOffsets().get(tp0)); + assertEquals(singleton(tp0), records.partitions()); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(records.records(tp0).get(records.count() - 1).offset() + 1, Optional.empty(), "")); } private void initMetadata(MockClient mockClient, Map partitionCounts) { @@ -1070,16 +1034,16 @@ public void testMissingOffsetNoResetPolicy(GroupProtocol groupProtocol) throws I ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); - client.prepareResponseFrom(offsetResponse(Map.of(tp0, -1L), Errors.NONE), coordinator); + client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); if (groupProtocol == GroupProtocol.CONSUMER) { // New consumer poll(ZERO) needs to wait for the offset fetch event added by a call to poll, to be processed @@ -1099,17 +1063,17 @@ public void testResetToCommittedOffset(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); - client.prepareResponseFrom(offsetResponse(Map.of(tp0, 539L), Errors.NONE), coordinator); + client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 539L), Errors.NONE), coordinator); consumer.poll(Duration.ZERO); assertEquals(539L, consumer.position(tp0)); @@ -1135,18 +1099,18 @@ private void setUpConsumerWithAutoResetPolicy(GroupProtocol groupProtocol, AutoO ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, false); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); - client.prepareResponseFrom(offsetResponse(Map.of(tp0, -1L), Errors.NONE), coordinator); - client.prepareResponse(listOffsetsResponse(Map.of(tp0, 50L))); + client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, -1L), Errors.NONE), coordinator); + client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 50L))); consumer.poll(Duration.ZERO); } @@ -1158,14 +1122,14 @@ public void testOffsetIsValidAfterSeek(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupId, Optional.empty(), false); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); consumer.seek(tp0, 20L); consumer.poll(Duration.ZERO); - assertEquals(20L, subscription.validPosition(tp0).offset); + assertEquals(subscription.validPosition(tp0).offset, 20L); } @ParameterizedTest @@ -1177,19 +1141,19 @@ public void testCommitsFetchedDuringAssign(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 2)); + initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); // lookup coordinator Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // fetch offset for one topic - client.prepareResponseFrom(offsetResponse(Map.of(tp0, offset1), Errors.NONE), coordinator); - assertEquals(offset1, consumer.committed(Set.of(tp0)).get(tp0).offset()); + client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); + assertEquals(offset1, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); consumer.assign(Arrays.asList(tp0, tp1)); @@ -1197,22 +1161,20 @@ public void testCommitsFetchedDuringAssign(GroupProtocol groupProtocol) { Map offsets = new HashMap<>(); offsets.put(tp0, offset1); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); - assertEquals(offset1, consumer.committed(Set.of(tp0)).get(tp0).offset()); + assertEquals(offset1, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); offsets.remove(tp0); offsets.put(tp1, offset2); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); - assertEquals(offset2, consumer.committed(Set.of(tp1)).get(tp1).offset()); + assertEquals(offset2, consumer.committed(Collections.singleton(tp1)).get(tp1).offset()); } - @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testFetchStableOffsetThrowInCommitted(GroupProtocol groupProtocol) { - assertThrows(UnsupportedVersionException.class, () -> setupThrowableConsumer(groupProtocol).committed(Set.of(tp0))); + assertThrows(UnsupportedVersionException.class, () -> setupThrowableConsumer(groupProtocol).committed(Collections.singleton(tp0))); } - @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testFetchStableOffsetThrowInPoll(GroupProtocol groupProtocol) throws InterruptedException { @@ -1229,7 +1191,6 @@ public void testFetchStableOffsetThrowInPoll(GroupProtocol groupProtocol) throws }, "Failed to throw UnsupportedVersionException in poll"); } - @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testFetchStableOffsetThrowInPosition(GroupProtocol groupProtocol) { @@ -1242,7 +1203,7 @@ public void testFetchStableOffsetThrowInPosition(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 2)); + initMetadata(client, Collections.singletonMap(topic, 2)); client.setNodeApiVersions(NodeApiVersions.create(ApiKeys.OFFSET_FETCH.id, (short) 0, (short) 6)); Node node = metadata.fetch().nodes().get(0); @@ -1250,12 +1211,12 @@ public void testFetchStableOffsetThrowInPosition(GroupProtocol groupProtocol) { client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer( groupProtocol, time, client, subscription, metadata, assignor, true, groupId, groupInstanceId, true); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(offsetResponse( - Map.of(tp0, offset1), Errors.NONE), coordinator); + Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); return consumer; } @@ -1267,7 +1228,7 @@ public void testNoCommittedOffsets(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 2)); + initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); @@ -1293,12 +1254,12 @@ public void testAutoCommitSentBeforePositionUpdate(GroupProtocol groupProtocol) ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); - Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); @@ -1336,7 +1297,7 @@ public void testRegexSubscription(GroupProtocol groupProtocol) { Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - prepareRebalance(client, node, Set.of(topic), assignor, List.of(tp0), null); + prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null); consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer)); @@ -1344,8 +1305,8 @@ public void testRegexSubscription(GroupProtocol groupProtocol) { consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); - assertEquals(Set.of(topic), consumer.subscription()); - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(singleton(topic), consumer.subscription()); + assertEquals(singleton(tp0), consumer.assignment()); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -1368,21 +1329,21 @@ public void testChangingRegexSubscription(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - Node coordinator = prepareRebalance(client, node, Set.of(topic), assignor, List.of(tp0), null); + Node coordinator = prepareRebalance(client, node, singleton(topic), assignor, singletonList(tp0), null); consumer.subscribe(Pattern.compile(topic), getConsumerRebalanceListener(consumer)); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); - assertEquals(Set.of(topic), consumer.subscription()); + assertEquals(singleton(topic), consumer.subscription()); consumer.subscribe(Pattern.compile(otherTopic), getConsumerRebalanceListener(consumer)); client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWithIds(1, partitionCounts, topicIds)); - prepareRebalance(client, node, Set.of(otherTopic), assignor, List.of(otherTopicPartition), coordinator); + prepareRebalance(client, node, singleton(otherTopic), assignor, singletonList(otherTopicPartition), coordinator); consumer.poll(Duration.ZERO); - assertEquals(Set.of(otherTopic), consumer.subscription()); + assertEquals(singleton(otherTopic), consumer.subscription()); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -1393,12 +1354,12 @@ public void testWakeupWithFetchDataAvailable(GroupProtocol groupProtocol) throws ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, List.of(tp0), null); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); @@ -1418,8 +1379,8 @@ public void testWakeupWithFetchDataAvailable(GroupProtocol groupProtocol) throws @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ZERO); assertEquals(5, records.count()); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(5), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(5, Optional.empty(), "")); // Increment time asynchronously to clear timeouts in closing the consumer final ScheduledExecutorService exec = Executors.newSingleThreadScheduledExecutor(); exec.scheduleAtFixedRate(() -> time.sleep(sessionTimeoutMs), 0L, 10L, TimeUnit.MILLISECONDS); @@ -1434,12 +1395,12 @@ public void testPollThrowsInterruptExceptionIfInterrupted(GroupProtocol groupPro final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, List.of(tp0), null); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); @@ -1461,13 +1422,13 @@ public void fetchResponseWithUnexpectedPartitionIsIgnored(GroupProtocol groupPro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(List.of(topic), getConsumerRebalanceListener(consumer)); + consumer.subscribe(singletonList(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, List.of(tp0), null); + prepareRebalance(client, node, assignor, singletonList(tp0), null); Map fetches1 = new HashMap<>(); fetches1.put(tp0, new FetchInfo(0, 1)); @@ -1479,7 +1440,7 @@ public void fetchResponseWithUnexpectedPartitionIsIgnored(GroupProtocol groupPro @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ZERO); assertEquals(0, records.count()); - assertEquals(0, records.nextOffsets().size()); + assertEquals(records.nextOffsets().size(), 0); } /** @@ -1579,9 +1540,9 @@ public void testSubscriptionChangesWithAutoCommitEnabled(GroupProtocol groupProt assertEquals(101, records.count()); assertEquals(2L, consumer.position(tp0)); assertEquals(100L, consumer.position(t3p0)); - assertEquals(2, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(2), records.nextOffsets().get(tp0)); - assertEquals(new OffsetAndMetadata(100), records.nextOffsets().get(t3p0)); + assertEquals(records.nextOffsets().size(), 2); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(2, Optional.empty(), "")); + assertEquals(records.nextOffsets().get(t3p0), new OffsetAndMetadata(100, Optional.empty(), "")); // verify that the offset commits occurred as expected assertTrue(commitReceived.get()); @@ -1629,23 +1590,23 @@ public void testSubscriptionChangesWithAutoCommitDisabled(GroupProtocol groupPro initializeSubscriptionWithSingleTopic(consumer, getConsumerRebalanceListener(consumer)); // mock rebalance responses - prepareRebalance(client, node, assignor, List.of(tp0), null); + prepareRebalance(client, node, assignor, singletonList(tp0), null); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); consumer.poll(Duration.ZERO); // verify that subscription is still the same, and now assignment has caught up - assertEquals(Set.of(topic), consumer.subscription()); - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(singleton(topic), consumer.subscription()); + assertEquals(singleton(tp0), consumer.assignment()); consumer.poll(Duration.ZERO); // subscription change - consumer.subscribe(Set.of(topic2), getConsumerRebalanceListener(consumer)); + consumer.subscribe(singleton(topic2), getConsumerRebalanceListener(consumer)); // verify that subscription has changed but assignment is still unchanged - assertEquals(Set.of(topic2), consumer.subscription()); - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(singleton(topic2), consumer.subscription()); + assertEquals(singleton(tp0), consumer.assignment()); // the auto commit is disabled, so no offset commit request should be sent for (ClientRequest req: client.requests()) @@ -1673,7 +1634,7 @@ public void testUnsubscribeShouldTriggerPartitionsRevokedWithValidGeneration(Gro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); CooperativeStickyAssignor assignor = new CooperativeStickyAssignor(); @@ -1681,7 +1642,7 @@ public void testUnsubscribeShouldTriggerPartitionsRevokedWithValidGeneration(Gro initializeSubscriptionWithSingleTopic(consumer, getExceptionConsumerRebalanceListener()); - prepareRebalance(client, node, assignor, List.of(tp0), null); + prepareRebalance(client, node, assignor, singletonList(tp0), null); RuntimeException assignmentException = assertThrows(RuntimeException.class, () -> consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE))); @@ -1699,14 +1660,14 @@ public void testUnsubscribeShouldTriggerPartitionsLostWithNoGeneration(GroupProt ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); CooperativeStickyAssignor assignor = new CooperativeStickyAssignor(); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); initializeSubscriptionWithSingleTopic(consumer, getExceptionConsumerRebalanceListener()); - Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); + Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); RuntimeException assignException = assertThrows(RuntimeException.class, () -> consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE))); @@ -1723,9 +1684,9 @@ public void testUnsubscribeShouldTriggerPartitionsLostWithNoGeneration(GroupProt private void initializeSubscriptionWithSingleTopic(KafkaConsumer consumer, ConsumerRebalanceListener consumerRebalanceListener) { - consumer.subscribe(Set.of(topic), consumerRebalanceListener); + consumer.subscribe(singleton(topic), consumerRebalanceListener); // verify that subscription has changed but assignment is still unchanged - assertEquals(Set.of(topic), consumer.subscription()); + assertEquals(singleton(topic), consumer.subscription()); assertEquals(Collections.emptySet(), consumer.assignment()); } @@ -1751,36 +1712,36 @@ public void testManualAssignmentChangeWithAutoCommitEnabled(GroupProtocol groupP Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // manual assignment - consumer.assign(Set.of(tp0)); - consumer.seekToBeginning(Set.of(tp0)); + consumer.assign(singleton(tp0)); + consumer.seekToBeginning(singleton(tp0)); // fetch offset for one topic - client.prepareResponseFrom(offsetResponse(Map.of(tp0, 0L), Errors.NONE), coordinator); - assertEquals(0, consumer.committed(Set.of(tp0)).get(tp0).offset()); + client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator); + assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); // verify that assignment immediately changes - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(consumer.assignment(), singleton(tp0)); - // there shouldn't be any need to look up the coordinator or fetch committed offsets. - // we just look up the starting position and send the record fetch. - client.prepareResponse(listOffsetsResponse(Map.of(tp0, 10L))); + // there shouldn't be any need to lookup the coordinator or fetch committed offsets. + // we just lookup the starting position and send the record fetch. + client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L))); client.prepareResponse(fetchResponse(tp0, 10L, 1)); ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(100)); assertEquals(1, records.count()); assertEquals(11L, consumer.position(tp0)); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(11L), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(11L, Optional.empty(), "")); // mock the offset commit response for to be revoked partitions AtomicBoolean commitReceived = prepareOffsetCommitResponse(client, coordinator, tp0, 11); // new manual assignment - consumer.assign(Set.of(t2p0)); + consumer.assign(singleton(t2p0)); // verify that assignment immediately changes - assertEquals(Set.of(t2p0), consumer.assignment()); + assertEquals(consumer.assignment(), singleton(t2p0)); // verify that the offset commits occurred as expected assertTrue(commitReceived.get()); @@ -1808,39 +1769,39 @@ public void testManualAssignmentChangeWithAutoCommitDisabled(GroupProtocol group Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); // manual assignment - consumer.assign(Set.of(tp0)); - consumer.seekToBeginning(Set.of(tp0)); + consumer.assign(singleton(tp0)); + consumer.seekToBeginning(singleton(tp0)); // fetch offset for one topic client.prepareResponseFrom( - offsetResponse(Map.of(tp0, 0L), Errors.NONE), + offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator); - assertEquals(0, consumer.committed(Set.of(tp0)).get(tp0).offset()); + assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); // verify that assignment immediately changes - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(consumer.assignment(), singleton(tp0)); - // there shouldn't be any need to look up the coordinator or fetch committed offsets. - // we just look up the starting position and send the record fetch. - client.prepareResponse(listOffsetsResponse(Map.of(tp0, 10L))); + // there shouldn't be any need to lookup the coordinator or fetch committed offsets. + // we just lookup the starting position and send the record fetch. + client.prepareResponse(listOffsetsResponse(Collections.singletonMap(tp0, 10L))); client.prepareResponse(fetchResponse(tp0, 10L, 1)); @SuppressWarnings("unchecked") ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(1)); assertEquals(1, records.count()); assertEquals(11L, consumer.position(tp0)); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(11L), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(11L, Optional.empty(), "")); // new manual assignment - consumer.assign(Set.of(t2p0)); + consumer.assign(singleton(t2p0)); // verify that assignment immediately changes - assertEquals(Set.of(t2p0), consumer.assignment()); + assertEquals(consumer.assignment(), singleton(t2p0)); // the auto commit is disabled, so no offset commit request should be sent for (ClientRequest req : client.requests()) - assertNotSame(ApiKeys.OFFSET_COMMIT, req.requestBuilder().apiKey()); + assertNotSame(req.requestBuilder().apiKey(), ApiKeys.OFFSET_COMMIT); client.requests().clear(); } @@ -1851,7 +1812,7 @@ public void testOffsetOfPausedPartitions(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 2)); + initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); ConsumerPartitionAssignor assignor = new RangeAssignor(); @@ -1877,12 +1838,12 @@ public void testOffsetOfPausedPartitions(GroupProtocol groupProtocol) { offsets.put(tp1, 0L); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); - assertEquals(0, consumer.committed(Set.of(tp0)).get(tp0).offset()); + assertEquals(0, consumer.committed(Collections.singleton(tp0)).get(tp0).offset()); offsets.remove(tp0); offsets.put(tp1, 0L); client.prepareResponseFrom(offsetResponse(offsets, Errors.NONE), coordinator); - assertEquals(0, consumer.committed(Set.of(tp1)).get(tp1).offset()); + assertEquals(0, consumer.committed(Collections.singleton(tp1)).get(tp1).offset()); // fetch and verify consumer's position in the two partitions final Map offsetResponse = new HashMap<>(); @@ -1928,7 +1889,7 @@ public void testGracefulClose(GroupProtocol groupProtocol) throws Exception { response.put(tp0, Errors.NONE); OffsetCommitResponse commitResponse = offsetCommitResponse(response); LeaveGroupResponse leaveGroupResponse = new LeaveGroupResponse(new LeaveGroupResponseData().setErrorCode(Errors.NONE.code())); - FetchResponse closeResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(), List.of()); + FetchResponse closeResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>()); consumerCloseTest(groupProtocol, 5000, Arrays.asList(commitResponse, leaveGroupResponse, closeResponse), 0, false); } @@ -1967,7 +1928,7 @@ public void testLeaveGroupTimeout(GroupProtocol groupProtocol) throws Exception Map response = new HashMap<>(); response.put(tp0, Errors.NONE); OffsetCommitResponse commitResponse = offsetCommitResponse(response); - consumerCloseTest(groupProtocol, 5000, List.of(commitResponse), 5000, false); + consumerCloseTest(groupProtocol, 5000, singletonList(commitResponse), 5000, false); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -1991,31 +1952,32 @@ public void testCloseInterrupt(GroupProtocol groupProtocol) throws Exception { public void testCloseShouldBeIdempotent(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = spy(new MockClient(time, metadata)); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, singletonMap(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - consumer.close(CloseOptions.timeout(Duration.ZERO)); - consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(Duration.ZERO); + consumer.close(Duration.ZERO); // verify that the call is idempotent by checking that the network client is only closed once. verify(client).close(); } - @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testOperationsBySubscribingConsumerWithDefaultGroupId(GroupProtocol groupProtocol) { - assertThrows(InvalidConfigurationException.class, - () -> newConsumer(groupProtocol, null, Optional.of(true)), - "Expected an InvalidConfigurationException"); + try (KafkaConsumer consumer = newConsumer(groupProtocol, null, Optional.of(Boolean.TRUE))) { + fail("Expected an InvalidConfigurationException"); + } catch (InvalidConfigurationException e) { + // OK, expected + } try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { - assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(Set.of(topic))); + assertThrows(InvalidGroupIdException.class, () -> consumer.subscribe(Collections.singleton(topic))); } try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { - assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Set.of(tp0)).get(tp0)); + assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); } try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { @@ -2031,9 +1993,9 @@ public void testOperationsBySubscribingConsumerWithDefaultGroupId(GroupProtocol @EnumSource(GroupProtocol.class) public void testOperationsByAssigningConsumerWithDefaultGroupId(GroupProtocol groupProtocol) { try (KafkaConsumer consumer = newConsumer(groupProtocol, null)) { - consumer.assign(Set.of(tp0)); + consumer.assign(singleton(tp0)); - assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Set.of(tp0)).get(tp0)); + assertThrows(InvalidGroupIdException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); assertThrows(InvalidGroupIdException.class, consumer::commitAsync); assertThrows(InvalidGroupIdException.class, consumer::commitSync); } @@ -2047,12 +2009,12 @@ public void testMetricConfigRecordingLevelInfo(GroupProtocol groupProtocol) { props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); KafkaConsumer consumer = newConsumer(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()); assertEquals(Sensor.RecordingLevel.INFO, consumer.metricsRegistry().config().recordLevel()); - consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(Duration.ZERO); props.put(ConsumerConfig.METRICS_RECORDING_LEVEL_CONFIG, "DEBUG"); KafkaConsumer consumer2 = newConsumer(props, new ByteArrayDeserializer(), new ByteArrayDeserializer()); assertEquals(Sensor.RecordingLevel.DEBUG, consumer2.metricsRegistry().config().recordLevel()); - consumer2.close(CloseOptions.timeout(Duration.ZERO)); + consumer2.close(Duration.ZERO); } // TODO: this test references RPCs to be sent that are not part of the CONSUMER group protocol. @@ -2064,17 +2026,17 @@ public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed(GroupProtocol gro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator); - client.prepareResponseFrom(syncGroupResponse(List.of(tp0), Errors.NONE), coordinator); + client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator); client.prepareResponseFrom(fetchResponse(tp0, 0, 1), node); client.prepareResponseFrom(fetchResponse(tp0, 1, 0), node); @@ -2087,7 +2049,7 @@ public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed(GroupProtocol gro new HeartbeatResponseData().setErrorCode(Errors.REBALANCE_IN_PROGRESS.code())), coordinator); // join group - final ByteBuffer byteBuffer = ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(List.of(topic))); + final ByteBuffer byteBuffer = ConsumerProtocol.serializeSubscription(new ConsumerPartitionAssignor.Subscription(singletonList(topic))); // This member becomes the leader final JoinGroupResponse leaderResponse = new JoinGroupResponse( @@ -2095,7 +2057,7 @@ public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed(GroupProtocol gro .setErrorCode(Errors.NONE.code()) .setGenerationId(1).setProtocolName(assignor.name()) .setLeader(memberId).setMemberId(memberId) - .setMembers(List.of( + .setMembers(Collections.singletonList( new JoinGroupResponseData.JoinGroupResponseMember() .setMemberId(memberId) .setMetadata(byteBuffer.array()) @@ -2107,16 +2069,16 @@ public void testShouldAttemptToRejoinGroupAfterSyncGroupFailed(GroupProtocol gro client.prepareResponseFrom(leaderResponse, coordinator); // sync group fails due to disconnect - client.prepareResponseFrom(syncGroupResponse(List.of(tp0), Errors.NONE), coordinator, true); + client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator, true); // should try and find the new coordinator client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); // rejoin group client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator); - client.prepareResponseFrom(syncGroupResponse(List.of(tp0), Errors.NONE), coordinator); + client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator); - client.prepareResponseFrom(body -> body instanceof FetchRequest + client.prepareResponseFrom(body -> body instanceof FetchRequest && ((FetchRequest) body).fetchData(topicNames).containsKey(new TopicIdPartition(topicId, tp0)), fetchResponse(tp0, 1, 1), node); time.sleep(heartbeatIntervalMs); Thread.sleep(heartbeatIntervalMs); @@ -2134,14 +2096,14 @@ private void consumerCloseTest(GroupProtocol groupProtocol, ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); final KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, Optional.empty()); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); - Node coordinator = prepareRebalance(client, node, assignor, List.of(tp0), null); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + Node coordinator = prepareRebalance(client, node, assignor, singletonList(tp0), null); - client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWithIds(1, Map.of(topic, 1), topicIds)); + client.prepareMetadataUpdate(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(topic, 1), topicIds)); consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE)); @@ -2159,7 +2121,7 @@ private void consumerCloseTest(GroupProtocol groupProtocol, Future future = executor.submit(() -> { consumer.commitAsync(); try { - consumer.close(CloseOptions.timeout(Duration.ofMillis(closeTimeoutMs))); + consumer.close(Duration.ofMillis(closeTimeoutMs)); } catch (Exception e) { closeException.set(e); } @@ -2171,7 +2133,7 @@ private void consumerCloseTest(GroupProtocol groupProtocol, future.get(100, TimeUnit.MILLISECONDS); if (closeTimeoutMs != 0) fail("Close completed without waiting for commit or leave response"); - } catch (TimeoutException swallow) { + } catch (TimeoutException e) { // Expected exception } @@ -2226,7 +2188,7 @@ public void testPartitionsForNonExistingTopic(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Cluster cluster = metadata.fetch(); MetadataResponse updateResponse = RequestTestUtils.metadataResponse(cluster.nodes(), @@ -2244,9 +2206,8 @@ public void testPartitionsForNonExistingTopic(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testPartitionsForAuthenticationFailure(GroupProtocol groupProtocol) { - try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { - assertThrows(AuthenticationException.class, () -> consumer.partitionsFor("some other topic")); - } + final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); + assertThrows(AuthenticationException.class, () -> consumer.partitionsFor("some other topic")); } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -2254,9 +2215,8 @@ public void testPartitionsForAuthenticationFailure(GroupProtocol groupProtocol) @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testBeginningOffsetsAuthenticationFailure(GroupProtocol groupProtocol) { - try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { - assertThrows(AuthenticationException.class, () -> consumer.beginningOffsets(Set.of(tp0))); - } + final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); + assertThrows(AuthenticationException.class, () -> consumer.beginningOffsets(Collections.singleton(tp0))); } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -2264,16 +2224,15 @@ public void testBeginningOffsetsAuthenticationFailure(GroupProtocol groupProtoco @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testEndOffsetsAuthenticationFailure(GroupProtocol groupProtocol) { - try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { - assertThrows(AuthenticationException.class, () -> consumer.endOffsets(Set.of(tp0))); - } + final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); + assertThrows(AuthenticationException.class, () -> consumer.endOffsets(Collections.singleton(tp0))); } @ParameterizedTest @EnumSource(GroupProtocol.class) public void testPollAuthenticationFailure(GroupProtocol groupProtocol) throws InterruptedException { final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); - consumer.subscribe(Set.of(topic)); + consumer.subscribe(singleton(topic)); if (groupProtocol == GroupProtocol.CONSUMER) { // New consumer poll(ZERO) needs to wait for the event added by a call to poll, to be processed @@ -2291,9 +2250,8 @@ public void testPollAuthenticationFailure(GroupProtocol groupProtocol) throws In @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testOffsetsForTimesAuthenticationFailure(GroupProtocol groupProtocol) { - try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { - assertThrows(AuthenticationException.class, () -> consumer.offsetsForTimes(Map.of(tp0, 0L))); - } + final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); + assertThrows(AuthenticationException.class, () -> consumer.offsetsForTimes(singletonMap(tp0, 0L))); } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -2301,10 +2259,10 @@ public void testOffsetsForTimesAuthenticationFailure(GroupProtocol groupProtocol @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testCommitSyncAuthenticationFailure(GroupProtocol groupProtocol) { - try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { - Map offsets = Map.of(tp0, new OffsetAndMetadata(10L)); - assertThrows(AuthenticationException.class, () -> consumer.commitSync(offsets)); - } + final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); + Map offsets = new HashMap<>(); + offsets.put(tp0, new OffsetAndMetadata(10L)); + assertThrows(AuthenticationException.class, () -> consumer.commitSync(offsets)); } // TODO: this test triggers a bug with the CONSUMER group protocol implementation. @@ -2312,27 +2270,24 @@ public void testCommitSyncAuthenticationFailure(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testCommittedAuthenticationFailure(GroupProtocol groupProtocol) { - try (final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol)) { - assertThrows(AuthenticationException.class, () -> consumer.committed(Set.of(tp0)).get(tp0)); - } + final KafkaConsumer consumer = consumerWithPendingAuthenticationError(groupProtocol); + assertThrows(AuthenticationException.class, () -> consumer.committed(Collections.singleton(tp0)).get(tp0)); } @ParameterizedTest @EnumSource(value = GroupProtocol.class) public void testMeasureCommitSyncDurationOnFailure(GroupProtocol groupProtocol) { - try (final KafkaConsumer consumer - = consumerWithPendingError(groupProtocol, new MockTime(Duration.ofSeconds(1).toMillis()))) { - - try { - consumer.commitSync(Map.of(tp0, new OffsetAndMetadata(10L))); - } catch (final RuntimeException swallow) { - // swallow - } + final KafkaConsumer consumer + = consumerWithPendingError(groupProtocol, new MockTime(Duration.ofSeconds(1).toMillis())); - final Metric metric = consumer.metrics() - .get(consumer.metricsRegistry().metricName("commit-sync-time-ns-total", "consumer-metrics")); - assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos()); + try { + consumer.commitSync(Collections.singletonMap(tp0, new OffsetAndMetadata(10L))); + } catch (final RuntimeException e) { } + + final Metric metric = consumer.metrics() + .get(consumer.metricsRegistry().metricName("commit-sync-time-ns-total", "consumer-metrics")); + assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos()); } @ParameterizedTest @@ -2343,21 +2298,21 @@ public void testMeasureCommitSyncDuration(GroupProtocol groupProtocol) { AutoOffsetResetStrategy.EARLIEST); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 2)); + initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); client.prepareResponseFrom( FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom( - offsetCommitResponse(Map.of(tp0, Errors.NONE)), + offsetCommitResponse(Collections.singletonMap(tp0, Errors.NONE)), coordinator ); - consumer.commitSync(Map.of(tp0, new OffsetAndMetadata(10L))); + consumer.commitSync(Collections.singletonMap(tp0, new OffsetAndMetadata(10L))); final Metric metric = consumer.metrics() .get(consumer.metricsRegistry().metricName("commit-sync-time-ns-total", "consumer-metrics")); @@ -2367,19 +2322,17 @@ public void testMeasureCommitSyncDuration(GroupProtocol groupProtocol) { @ParameterizedTest @EnumSource(value = GroupProtocol.class) public void testMeasureCommittedDurationOnFailure(GroupProtocol groupProtocol) { - try (final KafkaConsumer consumer - = consumerWithPendingError(groupProtocol, new MockTime(Duration.ofSeconds(1).toMillis()))) { + final KafkaConsumer consumer + = consumerWithPendingError(groupProtocol, new MockTime(Duration.ofSeconds(1).toMillis())); - try { - consumer.committed(Set.of(tp0)); - } catch (final RuntimeException swallow) { - // swallow - } - - final Metric metric = consumer.metrics() - .get(consumer.metricsRegistry().metricName("committed-time-ns-total", "consumer-metrics")); - assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos()); + try { + consumer.committed(Collections.singleton(tp0)); + } catch (final RuntimeException e) { } + + final Metric metric = consumer.metrics() + .get(consumer.metricsRegistry().metricName("committed-time-ns-total", "consumer-metrics")); + assertTrue((Double) metric.metricValue() >= Duration.ofMillis(999).toNanos()); } @ParameterizedTest @@ -2391,11 +2344,11 @@ public void testMeasureCommittedDuration(GroupProtocol groupProtocol) { AutoOffsetResetStrategy.EARLIEST); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 2)); + initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(List.of(tp0)); + consumer.assign(singletonList(tp0)); // lookup coordinator client.prepareResponseFrom( @@ -2404,9 +2357,9 @@ public void testMeasureCommittedDuration(GroupProtocol groupProtocol) { // fetch offset for one topic client.prepareResponseFrom( - offsetResponse(Map.of(tp0, offset1), Errors.NONE), coordinator); + offsetResponse(Collections.singletonMap(tp0, offset1), Errors.NONE), coordinator); - consumer.committed(Set.of(tp0)).get(tp0).offset(); + consumer.committed(Collections.singleton(tp0)).get(tp0).offset(); final Metric metric = consumer.metrics() .get(consumer.metricsRegistry().metricName("committed-time-ns-total", "consumer-metrics")); @@ -2421,30 +2374,30 @@ public void testRebalanceException(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(Set.of(topic), getExceptionConsumerRebalanceListener()); + consumer.subscribe(singleton(topic), getExceptionConsumerRebalanceListener()); Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); client.prepareResponseFrom(joinGroupFollowerResponse(assignor, 1, memberId, leaderId, Errors.NONE), coordinator); - client.prepareResponseFrom(syncGroupResponse(List.of(tp0), Errors.NONE), coordinator); + client.prepareResponseFrom(syncGroupResponse(singletonList(tp0), Errors.NONE), coordinator); // assign throws KafkaException exc = assertThrows(KafkaException.class, () -> consumer.updateAssignmentMetadataIfNeeded(time.timer(Long.MAX_VALUE))); assertEquals(partitionAssigned + singleTopicPartition, exc.getCause().getMessage()); // the assignment is still updated regardless of the exception - assertEquals(Set.of(tp0), subscription.assignedPartitions()); + assertEquals(singleton(tp0), subscription.assignedPartitions()); // close's revoke throws - exc = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ofMillis(0)))); + exc = assertThrows(KafkaException.class, () -> consumer.close(Duration.ofMillis(0))); assertEquals(partitionRevoked + singleTopicPartition, exc.getCause().getCause().getMessage()); - consumer.close(CloseOptions.timeout(Duration.ofMillis(0))); + consumer.close(Duration.ofMillis(0)); // the assignment is still updated regardless of the exception assertTrue(subscription.assignedPartitions().isEmpty()); @@ -2489,9 +2442,9 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws assertEquals(11, records.count()); assertEquals(1L, consumer.position(tp0)); assertEquals(10L, consumer.position(t2p0)); - assertEquals(2, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(1L), records.nextOffsets().get(tp0)); - assertEquals(new OffsetAndMetadata(10L), records.nextOffsets().get(t2p0)); + assertEquals(records.nextOffsets().size(), 2); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(1L, Optional.empty(), "")); + assertEquals(records.nextOffsets().get(t2p0), new OffsetAndMetadata(10L, Optional.empty(), "")); // prepare the next response of the prefetch fetches1.clear(); @@ -2521,11 +2474,11 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws // verify that the fetch still occurred as expected assertEquals(Set.of(topic, topic3), consumer.subscription()); - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(Collections.singleton(tp0), consumer.assignment()); assertEquals(1, records.count()); assertEquals(2L, consumer.position(tp0)); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(2L), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(2L, Optional.empty(), "")); // verify that the offset commits occurred as expected assertTrue(commitReceived.get()); @@ -2539,11 +2492,11 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws // should not finish the response yet assertEquals(Set.of(topic, topic3), consumer.subscription()); - assertEquals(Set.of(tp0), consumer.assignment()); + assertEquals(Collections.singleton(tp0), consumer.assignment()); assertEquals(1, records.count()); assertEquals(3L, consumer.position(tp0)); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(3L), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(3L, Optional.empty(), "")); fetches1.clear(); fetches1.put(tp0, new FetchInfo(3, 1)); @@ -2565,8 +2518,8 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws assertEquals(Set.of(tp0, t3p0), consumer.assignment()); assertEquals(4L, consumer.position(tp0)); assertEquals(0L, consumer.position(t3p0)); - assertEquals(1, recs1.get().nextOffsets().size()); - assertEquals(new OffsetAndMetadata(4L), recs1.get().nextOffsets().get(tp0)); + assertEquals(recs1.get().nextOffsets().size(), 1); + assertEquals(recs1.get().nextOffsets().get(tp0), new OffsetAndMetadata(4L, Optional.empty(), "")); fetches1.clear(); fetches1.put(tp0, new FetchInfo(4, 1)); @@ -2583,13 +2536,13 @@ public void testReturnRecordsDuringRebalance(GroupProtocol groupProtocol) throws assertEquals(5L, consumer.position(tp0)); assertEquals(100L, consumer.position(t3p0)); - assertEquals(2, recs2.get().nextOffsets().size()); - assertEquals(new OffsetAndMetadata(5L), recs2.get().nextOffsets().get(tp0)); - assertEquals(new OffsetAndMetadata(100L), recs2.get().nextOffsets().get(t3p0)); + assertEquals(recs2.get().nextOffsets().size(), 2); + assertEquals(recs2.get().nextOffsets().get(tp0), new OffsetAndMetadata(5L, Optional.empty(), "")); + assertEquals(recs2.get().nextOffsets().get(t3p0), new OffsetAndMetadata(100L, Optional.empty(), "")); client.requests().clear(); consumer.unsubscribe(); - consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(Duration.ZERO); } // TODO: this test requires rebalance logic which is not yet implemented in the CONSUMER group protocol. @@ -2600,7 +2553,7 @@ public void testGetGroupMetadata(GroupProtocol groupProtocol) { final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); final Node node = metadata.fetch().nodes().get(0); final KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); @@ -2611,8 +2564,8 @@ public void testGetGroupMetadata(GroupProtocol groupProtocol) { assertEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, groupMetadataOnStart.generationId()); assertEquals(groupInstanceId, groupMetadataOnStart.groupInstanceId()); - consumer.subscribe(Set.of(topic), getConsumerRebalanceListener(consumer)); - prepareRebalance(client, node, assignor, List.of(tp0), null); + consumer.subscribe(singleton(topic), getConsumerRebalanceListener(consumer)); + prepareRebalance(client, node, assignor, singletonList(tp0), null); // initial fetch client.prepareResponseFrom(fetchResponse(tp0, 0, 0), node); @@ -2630,10 +2583,10 @@ public void testGetGroupMetadata(GroupProtocol groupProtocol) { public void testInvalidGroupMetadata(GroupProtocol groupProtocol) throws InterruptedException { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, new RoundRobinAssignor(), true, groupInstanceId); - consumer.subscribe(List.of(topic)); + consumer.subscribe(singletonList(topic)); // concurrent access is illegal client.enableBlockingUntilWakeup(1); ExecutorService service = Executors.newSingleThreadExecutor(); @@ -2649,7 +2602,7 @@ public void testInvalidGroupMetadata(GroupProtocol groupProtocol) throws Interru } // accessing closed consumer is illegal - consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(Duration.ZERO); assertThrows(IllegalStateException.class, consumer::groupMetadata); } @@ -2660,14 +2613,14 @@ public void testCurrentLag(GroupProtocol groupProtocol) throws InterruptedExcept final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, singletonMap(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); // throws for unassigned partition assertThrows(IllegalStateException.class, () -> consumer.currentLag(tp0)); - consumer.assign(Set.of(tp0)); + consumer.assign(singleton(tp0)); // poll once to update with the current metadata consumer.poll(Duration.ofMillis(0)); @@ -2699,7 +2652,7 @@ public void testCurrentLag(GroupProtocol groupProtocol) throws InterruptedExcept // poll once again, which should return the list-offset response // and hence next call would return correct lag result ClientRequest listOffsetRequest = findRequest(client, ApiKeys.LIST_OFFSETS); - client.respondToRequest(listOffsetRequest, listOffsetsResponse(Map.of(tp0, 90L))); + client.respondToRequest(listOffsetRequest, listOffsetsResponse(singletonMap(tp0, 90L))); consumer.poll(Duration.ofMillis(0)); // For AsyncKafkaConsumer, subscription state is updated in background, so the result will eventually be updated. @@ -2713,13 +2666,13 @@ public void testCurrentLag(GroupProtocol groupProtocol) throws InterruptedExcept // one successful fetch should update the log end offset and the position ClientRequest fetchRequest = findRequest(client, ApiKeys.FETCH); final FetchInfo fetchInfo = new FetchInfo(1L, 99L, 50L, 5); - client.respondToRequest(fetchRequest, fetchResponse(Map.of(tp0, fetchInfo))); + client.respondToRequest(fetchRequest, fetchResponse(singletonMap(tp0, fetchInfo))); final ConsumerRecords records = (ConsumerRecords) consumer.poll(Duration.ofMillis(1)); assertEquals(5, records.count()); assertEquals(55L, consumer.position(tp0)); - assertEquals(1, records.nextOffsets().size()); - assertEquals(new OffsetAndMetadata(55L), records.nextOffsets().get(tp0)); + assertEquals(records.nextOffsets().size(), 1); + assertEquals(records.nextOffsets().get(tp0), new OffsetAndMetadata(55L, Optional.empty(), "")); // correct lag result assertEquals(OptionalLong.of(45L), consumer.currentLag(tp0)); @@ -2731,15 +2684,15 @@ public void testListOffsetShouldUpdateSubscriptions(GroupProtocol groupProtocol) final ConsumerMetadata metadata = createMetadata(subscription); final MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, singletonMap(topic, 1)); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, null, groupInstanceId, false); - consumer.assign(Set.of(tp0)); + consumer.assign(singleton(tp0)); consumer.seek(tp0, 50L); - client.prepareResponse(request -> request instanceof ListOffsetsRequest, listOffsetsResponse(Map.of(tp0, 90L))); - assertEquals(Map.of(tp0, 90L), consumer.endOffsets(Set.of(tp0))); + client.prepareResponse(request -> request instanceof ListOffsetsRequest, listOffsetsResponse(singletonMap(tp0, 90L))); + assertEquals(singletonMap(tp0, 90L), consumer.endOffsets(Collections.singleton(tp0))); // correct lag result should be returned as well assertEquals(OptionalLong.of(40L), consumer.currentLag(tp0)); } @@ -2755,7 +2708,7 @@ private KafkaConsumer consumerWithPendingAuthenticationError(Gro ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); ConsumerPartitionAssignor assignor = new RangeAssignor(); @@ -2884,7 +2837,7 @@ private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordi } private AtomicBoolean prepareOffsetCommitResponse(MockClient client, Node coordinator, final TopicPartition partition, final long offset) { - return prepareOffsetCommitResponse(client, coordinator, Map.of(partition, offset)); + return prepareOffsetCommitResponse(client, coordinator, Collections.singletonMap(partition, offset)); } private OffsetCommitResponse offsetCommitResponse(Map responseData) { @@ -2914,26 +2867,16 @@ private SyncGroupResponse syncGroupResponse(List partitions, Err } private OffsetFetchResponse offsetResponse(Map offsets, Errors error) { - var grouped = offsets.entrySet().stream().collect(Collectors.groupingBy(e -> e.getKey().topic())); - + Map partitionData = new HashMap<>(); + for (Map.Entry entry : offsets.entrySet()) { + partitionData.put(entry.getKey(), new OffsetFetchResponse.PartitionData(entry.getValue(), + Optional.empty(), "", error)); + } + int throttleMs = 10; return new OffsetFetchResponse( - new OffsetFetchResponseData() - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(groupId) - .setTopics(grouped.entrySet().stream().map(entry -> - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(entry.getKey()) - .setPartitions(entry.getValue().stream().map(partition -> - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(partition.getKey().partition()) - .setErrorCode(error.code()) - .setCommittedOffset(partition.getValue()) - ).collect(Collectors.toList())) - ).collect(Collectors.toList())) - )), - ApiKeys.OFFSET_FETCH.latestVersion() - ); + throttleMs, + Collections.singletonMap(groupId, Errors.NONE), + Collections.singletonMap(groupId, partitionData)); } private ListOffsetsResponse listOffsetsResponse(Map offsets) { @@ -2993,12 +2936,12 @@ private FetchResponse fetchResponse(Map fetches) { .setLogStartOffset(logStartOffset) .setRecords(records)); } - return FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, tpResponses, List.of()); + return FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, tpResponses); } private FetchResponse fetchResponse(TopicPartition partition, long fetchOffset, int count) { FetchInfo fetchInfo = new FetchInfo(fetchOffset, count); - return fetchResponse(Map.of(partition, fetchInfo)); + return fetchResponse(Collections.singletonMap(partition, fetchInfo)); } private KafkaConsumer newConsumer(GroupProtocol groupProtocol, @@ -3081,7 +3024,7 @@ private KafkaConsumer newConsumer(GroupProtocol groupProtocol, Deserializer keyDeserializer = new StringDeserializer(); Deserializer valueDeserializer = valueDeserializerOpt.orElse(new StringDeserializer()); LogContext logContext = new LogContext(); - List assignors = List.of(assignor); + List assignors = singletonList(assignor); ConsumerConfig config = newConsumerConfig( groupProtocol, autoCommitEnabled, @@ -3147,7 +3090,6 @@ private ConsumerConfig newConsumerConfig(GroupProtocol groupProtocol, configs.put(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, retryBackoffMs); configs.put(ConsumerConfig.THROW_ON_FETCH_STABLE_OFFSET_UNSUPPORTED, throwOnStableOffsetNotSupported); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); groupInstanceId.ifPresent(gi -> configs.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, gi)); return new ConsumerConfig(configs); @@ -3177,7 +3119,7 @@ public void testSubscriptionOnInvalidTopic(GroupProtocol groupProtocol) throws I ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Cluster cluster = metadata.fetch(); String invalidTopicName = "topic abc"; // Invalid topic name due to space @@ -3192,7 +3134,7 @@ public void testSubscriptionOnInvalidTopic(GroupProtocol groupProtocol) throws I client.prepareMetadataUpdate(updateResponse); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(Set.of(invalidTopicName), getConsumerRebalanceListener(consumer)); + consumer.subscribe(singleton(invalidTopicName), getConsumerRebalanceListener(consumer)); if (groupProtocol == GroupProtocol.CONSUMER) { // New consumer poll(ZERO) needs to wait for the event added by a call to poll, to be processed @@ -3222,10 +3164,10 @@ private static void assertPollEventuallyThrows(KafkaConsum public void testPollTimeMetrics(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(List.of(topic)); + consumer.subscribe(singletonList(topic)); // MetricName objects to check Metrics metrics = consumer.metricsRegistry(); MetricName lastPollSecondsAgoName = metrics.metricName("last-poll-seconds-ago", "consumer-metrics"); @@ -3268,7 +3210,7 @@ public void testPollTimeMetrics(GroupProtocol groupProtocol) { public void testPollIdleRatio(GroupProtocol groupProtocol) { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); // MetricName object to check @@ -3312,14 +3254,14 @@ private static boolean consumerMetricPresent(KafkaConsumer consu @ParameterizedTest @EnumSource(GroupProtocol.class) - public void testClosingConsumerUnregistersConsumerMetrics(GroupProtocol groupProtocol) { +public void testClosingConsumerUnregistersConsumerMetrics(GroupProtocol groupProtocol) { Time time = new MockTime(1L); ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, new RoundRobinAssignor(), true, groupInstanceId); - consumer.subscribe(List.of(topic)); + consumer.subscribe(singletonList(topic)); assertTrue(consumerMetricPresent(consumer, "last-poll-seconds-ago")); assertTrue(consumerMetricPresent(consumer, "time-between-poll-avg")); assertTrue(consumerMetricPresent(consumer, "time-between-poll-max")); @@ -3334,7 +3276,7 @@ public void testClosingConsumerUnregistersConsumerMetrics(GroupProtocol groupPro @EnumSource(value = GroupProtocol.class, names = "CLASSIC") public void testEnforceRebalanceWithManualAssignment(GroupProtocol groupProtocol) { consumer = newConsumer(groupProtocol, null); - consumer.assign(Set.of(new TopicPartition("topic", 0))); + consumer.assign(singleton(new TopicPartition("topic", 0))); assertThrows(IllegalStateException.class, consumer::enforceRebalance); } @@ -3358,15 +3300,15 @@ public void testEnforceRebalanceTriggersRebalanceOnNextPoll(GroupProtocol groupP consumer.poll(Duration.ZERO); // onPartitionsRevoked is not invoked when first joining the group - assertEquals(0, countingRebalanceListener.revokedCount); - assertEquals(1, countingRebalanceListener.assignedCount); + assertEquals(countingRebalanceListener.revokedCount, 0); + assertEquals(countingRebalanceListener.assignedCount, 1); consumer.enforceRebalance(); // the next poll should trigger a rebalance consumer.poll(Duration.ZERO); - assertEquals(1, countingRebalanceListener.revokedCount); + assertEquals(countingRebalanceListener.revokedCount, 1); } // NOTE: this test uses the enforceRebalance API which is not implemented in the CONSUMER group protocol. @@ -3390,7 +3332,7 @@ public void testEnforceRebalanceReason(GroupProtocol groupProtocol) { true, groupInstanceId ); - consumer.subscribe(List.of(topic)); + consumer.subscribe(Collections.singletonList(topic)); // Lookup coordinator. client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); @@ -3486,9 +3428,8 @@ public void testOffsetsForTimesTimeout(GroupProtocol groupProtocol) { final KafkaConsumer consumer = consumerForCheckingTimeoutException(groupProtocol); assertEquals( "Failed to get offsets by times in 60000ms", - assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.offsetsForTimes(Map.of(tp0, 0L))).getMessage() + assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.offsetsForTimes(singletonMap(tp0, 0L))).getMessage() ); - consumer.close(CloseOptions.timeout(Duration.ZERO)); } @ParameterizedTest @@ -3497,9 +3438,8 @@ public void testBeginningOffsetsTimeout(GroupProtocol groupProtocol) { final KafkaConsumer consumer = consumerForCheckingTimeoutException(groupProtocol); assertEquals( "Failed to get offsets by times in 60000ms", - assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.beginningOffsets(List.of(tp0))).getMessage() + assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.beginningOffsets(singletonList(tp0))).getMessage() ); - consumer.close(CloseOptions.timeout(Duration.ZERO)); } @ParameterizedTest @@ -3508,9 +3448,8 @@ public void testEndOffsetsTimeout(GroupProtocol groupProtocol) { final KafkaConsumer consumer = consumerForCheckingTimeoutException(groupProtocol); assertEquals( "Failed to get offsets by times in 60000ms", - assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.endOffsets(List.of(tp0))).getMessage() + assertThrows(org.apache.kafka.common.errors.TimeoutException.class, () -> consumer.endOffsets(singletonList(tp0))).getMessage() ); - consumer.close(CloseOptions.timeout(Duration.ZERO)); } @ParameterizedTest @@ -3563,7 +3502,6 @@ public void testClientInstanceIdNoTelemetryReporterRegistered(GroupProtocol grou assertEquals("Telemetry is not enabled. Set config `enable.metrics.push` to `true`.", exception.getMessage()); } - @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testEmptyGroupId(GroupProtocol groupProtocol) { @@ -3572,7 +3510,6 @@ public void testEmptyGroupId(GroupProtocol groupProtocol) { assertEquals("The configured group.id should not be an empty string or whitespace.", e.getCause().getMessage()); } - @SuppressWarnings("resource") @ParameterizedTest @EnumSource(GroupProtocol.class) public void testGroupIdWithWhitespace(GroupProtocol groupProtocol) { @@ -3585,7 +3522,7 @@ private KafkaConsumer consumerForCheckingTimeoutException(GroupP ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, singletonMap(topic, 1)); ConsumerPartitionAssignor assignor = new RangeAssignor(); @@ -3593,9 +3530,9 @@ private KafkaConsumer consumerForCheckingTimeoutException(GroupP Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); } - + final KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, false, groupInstanceId); - + int maxPreparedResponses = GroupProtocol.CLASSIC.equals(groupProtocol) ? 10 : 1; for (int i = 0; i < maxPreparedResponses; i++) { client.prepareResponse( @@ -3605,7 +3542,7 @@ private KafkaConsumer consumerForCheckingTimeoutException(GroupP }, listOffsetsResponse( Collections.emptyMap(), - Map.of(tp0, Errors.UNKNOWN_TOPIC_OR_PARTITION) + Collections.singletonMap(tp0, Errors.UNKNOWN_TOPIC_OR_PARTITION) )); } @@ -3616,24 +3553,24 @@ private KafkaConsumer consumerForCheckingTimeoutException(GroupP @EnumSource(GroupProtocol.class) public void testCommittedThrowsTimeoutExceptionForNoResponse(GroupProtocol groupProtocol) { Time time = new MockTime(Duration.ofSeconds(1).toMillis()); - + ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - - initMetadata(client, Map.of(topic, 2)); + + initMetadata(client, Collections.singletonMap(topic, 2)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.assign(List.of(tp0)); - + consumer.assign(singletonList(tp0)); + // lookup coordinator Node coordinator = new Node(Integer.MAX_VALUE - node.id(), node.host(), node.port()); - + // try to get committed offsets for one topic-partition - but it is disconnected so there's no response and it will time out - client.prepareResponseFrom(offsetResponse(Map.of(tp0, 0L), Errors.NONE), coordinator, true); + client.prepareResponseFrom(offsetResponse(Collections.singletonMap(tp0, 0L), Errors.NONE), coordinator, true); org.apache.kafka.common.errors.TimeoutException timeoutException = assertThrows(org.apache.kafka.common.errors.TimeoutException.class, - () -> consumer.committed(Set.of(tp0), Duration.ofMillis(1000L))); + () -> consumer.committed(Collections.singleton(tp0), Duration.ofMillis(1000L))); assertEquals("Timeout of 1000ms expired before the last committed offset for partitions [test-0] could be determined. " + "Try tuning default.api.timeout.ms larger to relax the threshold.", timeoutException.getMessage()); } @@ -3643,10 +3580,10 @@ public void testCommittedThrowsTimeoutExceptionForNoResponse(GroupProtocol group public void testPreventMultiThread(GroupProtocol groupProtocol) throws InterruptedException { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, new RoundRobinAssignor(), true, groupInstanceId); - consumer.subscribe(List.of(topic)); + consumer.subscribe(singletonList(topic)); client.enableBlockingUntilWakeup(1); @@ -3668,13 +3605,13 @@ public void testPreventMultiThread(GroupProtocol groupProtocol) throws Interrupt public void testPollSendsRequestToJoin(GroupProtocol groupProtocol) throws InterruptedException { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); - initMetadata(client, Map.of(topic, 1)); + initMetadata(client, Collections.singletonMap(topic, 1)); Node node = metadata.fetch().nodes().get(0); client.prepareResponseFrom(FindCoordinatorResponse.prepareResponse(Errors.NONE, groupId, node), node); KafkaConsumer consumer = newConsumer(groupProtocol, time, client, subscription, metadata, assignor, true, groupInstanceId); - consumer.subscribe(List.of(topic)); + consumer.subscribe(singletonList(topic)); assertFalse(groupProtocol == GroupProtocol.CLASSIC ? requestGenerated(client, ApiKeys.JOIN_GROUP) : requestGenerated(client, ApiKeys.CONSUMER_GROUP_HEARTBEAT), @@ -3748,116 +3685,4 @@ public void configure(Map configs) { CLIENT_IDS.add(configs.get(ConsumerConfig.CLIENT_ID_CONFIG).toString()); } } - - @ParameterizedTest - @EnumSource(value = GroupProtocol.class) - void testMonitorablePlugins(GroupProtocol groupProtocol) { - try { - String clientId = "testMonitorablePlugins"; - Map configs = new HashMap<>(); - configs.put(ConsumerConfig.CLIENT_ID_CONFIG, clientId); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - configs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, MonitorableDeserializer.class.getName()); - configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, MonitorableDeserializer.class.getName()); - configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name); - configs.put(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, MonitorableInterceptor.class.getName()); - - KafkaConsumer consumer = new KafkaConsumer<>(configs); - Map metrics = consumer.metrics(); - - MetricName expectedKeyDeserializerMetric = expectedMetricName( - clientId, - ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, - MonitorableDeserializer.class); - assertTrue(metrics.containsKey(expectedKeyDeserializerMetric)); - assertEquals(VALUE, metrics.get(expectedKeyDeserializerMetric).metricValue()); - - MetricName expectedValueDeserializerMetric = expectedMetricName( - clientId, - ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, - MonitorableDeserializer.class); - assertTrue(metrics.containsKey(expectedValueDeserializerMetric)); - assertEquals(VALUE, metrics.get(expectedValueDeserializerMetric).metricValue()); - - MetricName expectedInterceptorMetric = expectedMetricName( - clientId, - ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, - MonitorableInterceptor.class); - assertTrue(metrics.containsKey(expectedInterceptorMetric)); - assertEquals(VALUE, metrics.get(expectedInterceptorMetric).metricValue()); - - consumer.close(CloseOptions.timeout(Duration.ZERO)); - metrics = consumer.metrics(); - assertFalse(metrics.containsKey(expectedKeyDeserializerMetric)); - assertFalse(metrics.containsKey(expectedValueDeserializerMetric)); - assertFalse(metrics.containsKey(expectedInterceptorMetric)); - } finally { - MockConsumerInterceptor.resetCounters(); - } - } - - private MetricName expectedMetricName(String clientId, String config, Class clazz) { - Map expectedTags = new LinkedHashMap<>(); - expectedTags.put("client-id", clientId); - expectedTags.put("config", config); - expectedTags.put("class", clazz.getSimpleName()); - expectedTags.putAll(TAGS); - return new MetricName(NAME, "plugins", DESCRIPTION, expectedTags); - } - - private static final String NAME = "name"; - private static final String DESCRIPTION = "description"; - private static final LinkedHashMap TAGS = new LinkedHashMap<>(); - private static final double VALUE = 123.0; - - static { - TAGS.put("t1", "v1"); - } - - public static class MonitorableDeserializer extends MockDeserializer implements Monitorable { - - @Override - public void withPluginMetrics(PluginMetrics metrics) { - MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); - metrics.addMetric(name, (Measurable) (config, now) -> VALUE); - } - } - - public static class MonitorableInterceptor extends MockConsumerInterceptor implements Monitorable { - - @Override - public void withPluginMetrics(PluginMetrics metrics) { - MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); - metrics.addMetric(name, (Measurable) (config, now) -> VALUE); - } - } - - public static class CloseInterceptor implements ConsumerInterceptor { - - public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); - - @Override - public ConsumerRecords onConsume(ConsumerRecords records) { - return null; - } - - @Override - public void onCommit(Map offsets) { - // no-op - } - - @Override - public void close() { - CLOSE_COUNT.incrementAndGet(); - } - - @Override - public void configure(Map configs) { - // no-op - } - - public static void resetCounters() { - CLOSE_COUNT.set(0); - } - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java index a5417c3e00fd1..b69064905eca3 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/KafkaShareConsumerMetricsTest.java @@ -21,31 +21,20 @@ import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.consumer.internals.AutoOffsetResetStrategy; import org.apache.kafka.clients.consumer.internals.ConsumerMetadata; -import org.apache.kafka.clients.consumer.internals.ShareConsumerImpl; import org.apache.kafka.clients.consumer.internals.SubscriptionState; -import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.internals.ClusterResourceListeners; -import org.apache.kafka.common.metrics.KafkaMetric; -import org.apache.kafka.common.metrics.Measurable; -import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.serialization.Deserializer; import org.apache.kafka.common.serialization.StringDeserializer; -import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; -import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; -import org.apache.logging.log4j.Level; import org.junit.jupiter.api.Test; -import org.mockito.MockedStatic; -import org.mockito.Mockito; -import org.mockito.internal.stubbing.answers.CallsRealMethods; import java.time.Duration; import java.util.AbstractMap; @@ -57,16 +46,9 @@ import java.util.stream.Stream; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyString; -import static org.mockito.Mockito.atMostOnce; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.mockStatic; -import static org.mockito.Mockito.never; public class KafkaShareConsumerMetricsTest { private final String topic = "test"; @@ -175,7 +157,6 @@ public void testClosingConsumerUnregistersConsumerMetrics() { ConsumerMetadata metadata = createMetadata(subscription); MockClient client = new MockClient(time, metadata); initMetadata(client, Collections.singletonMap(topic, 1)); - KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); consumer.subscribe(Collections.singletonList(topic)); assertTrue(consumerMetricPresent(consumer, "last-poll-seconds-ago")); @@ -187,110 +168,6 @@ public void testClosingConsumerUnregistersConsumerMetrics() { assertFalse(consumerMetricPresent(consumer, "time-between-poll-max")); } - @Test - public void testRegisteringCustomMetricsDoesntAffectConsumerMetrics() { - Time time = new MockTime(1L); - ConsumerMetadata metadata = createMetadata(subscription); - MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); - - KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); - Map customMetrics = customMetrics(); - customMetrics.forEach((name, metric) -> consumer.registerMetricForSubscription(metric)); - - Map consumerMetrics = consumer.metrics(); - customMetrics.forEach((name, metric) -> assertFalse(consumerMetrics.containsKey(name))); - } - - @Test - public void testRegisteringCustomMetricsWithSameNameDoesntAffectConsumerMetrics() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { - appender.setClassLogger(ShareConsumerImpl.class, Level.DEBUG); - Time time = new MockTime(1L); - ConsumerMetadata metadata = createMetadata(subscription); - MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); - - KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); - KafkaMetric existingMetricToAdd = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); - consumer.registerMetricForSubscription(existingMetricToAdd); - final String expectedMessage = String.format("Skipping registration for metric %s. Existing consumer metrics cannot be overwritten.", existingMetricToAdd.metricName()); - assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); - } - } - - @Test - public void testUnregisteringCustomMetricsWithSameNameDoesntAffectConsumerMetrics() { - try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { - appender.setClassLogger(ShareConsumerImpl.class, Level.DEBUG); - Time time = new MockTime(1L); - ConsumerMetadata metadata = createMetadata(subscription); - MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); - - KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); - KafkaMetric existingMetricToRemove = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); - consumer.unregisterMetricFromSubscription(existingMetricToRemove); - final String expectedMessage = String.format("Skipping unregistration for metric %s. Existing consumer metrics cannot be removed.", existingMetricToRemove.metricName()); - assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); - } - } - - @Test - public void testShouldOnlyCallMetricReporterMetricChangeOnceWithExistingConsumerMetric() { - try (MockedStatic mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) { - ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class); - clientTelemetryReporter.configure(any()); - mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter)); - - Time time = new MockTime(1L); - ConsumerMetadata metadata = createMetadata(subscription); - MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); - - KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); - - KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); - consumer.registerMetricForSubscription(existingMetric); - // This test would fail without the check as the existing metric is registered in the consumer on startup - Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); - } - } - - @Test - public void testShouldNotCallMetricReporterMetricRemovalWithExistingConsumerMetric() { - try (MockedStatic mockedCommonClientConfigs = mockStatic(CommonClientConfigs.class, new CallsRealMethods())) { - ClientTelemetryReporter clientTelemetryReporter = mock(ClientTelemetryReporter.class); - clientTelemetryReporter.configure(any()); - mockedCommonClientConfigs.when(() -> CommonClientConfigs.telemetryReporter(anyString(), any())).thenReturn(Optional.of(clientTelemetryReporter)); - - Time time = new MockTime(1L); - ConsumerMetadata metadata = createMetadata(subscription); - MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); - - KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); - - KafkaMetric existingMetric = (KafkaMetric) consumer.metrics().entrySet().iterator().next().getValue(); - consumer.unregisterMetricFromSubscription(existingMetric); - Mockito.verify(clientTelemetryReporter, never()).metricRemoval(existingMetric); - } - } - - @Test - public void testUnregisteringNonexistingMetricsDoesntCauseError() { - Time time = new MockTime(1L); - ConsumerMetadata metadata = createMetadata(subscription); - MockClient client = new MockClient(time, metadata); - initMetadata(client, Collections.singletonMap(topic, 1)); - - KafkaShareConsumer consumer = newShareConsumer(time, client, subscription, metadata); - - Map customMetrics = customMetrics(); - // Metrics never registered but removed should not cause an error - customMetrics.forEach((name, metric) -> assertDoesNotThrow(() -> consumer.unregisterMetricFromSubscription(metric))); - } - private ConsumerMetadata createMetadata(SubscriptionState subscription) { return new ConsumerMetadata(0, 0, Long.MAX_VALUE, false, false, subscription, new LogContext(), new ClusterResourceListeners()); @@ -320,7 +197,7 @@ private KafkaShareConsumer newShareConsumer(Time time, Deserializer keyDeserializer = new StringDeserializer(); Deserializer valueDeserializer = valueDeserializerOpt.orElse(new StringDeserializer()); LogContext logContext = new LogContext(); - ShareConsumerConfig config = newConsumerConfig(groupId, valueDeserializer); + ConsumerConfig config = newConsumerConfig(groupId, valueDeserializer); return new KafkaShareConsumer<>( logContext, clientId, @@ -335,7 +212,7 @@ private KafkaShareConsumer newShareConsumer(Time time, ); } - private ShareConsumerConfig newConsumerConfig(String groupId, + private ConsumerConfig newConsumerConfig(String groupId, Deserializer valueDeserializer) { String clientId = "mock-consumer"; long retryBackoffMs = 100; @@ -367,9 +244,8 @@ private ShareConsumerConfig newConsumerConfig(String groupId, configs.put(ConsumerConfig.RETRY_BACKOFF_MAX_MS_CONFIG, retryBackoffMaxMs); configs.put(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, retryBackoffMs); configs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, valueDeserializer.getClass()); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); - return new ShareConsumerConfig(configs); + return new ConsumerConfig(configs); } private void initMetadata(MockClient mockClient, Map partitionCounts) { Map metadataIds = new HashMap<>(); @@ -380,15 +256,4 @@ private void initMetadata(MockClient mockClient, Map partitionC mockClient.updateMetadata(initialMetadata); } - - private Map customMetrics() { - MetricConfig metricConfig = new MetricConfig(); - Object lock = new Object(); - MetricName metricNameOne = new MetricName("metricOne", "stream-metrics", "description for metric one", new HashMap<>()); - MetricName metricNameTwo = new MetricName("metricTwo", "stream-metrics", "description for metric two", new HashMap<>()); - - KafkaMetric streamClientMetricOne = new KafkaMetric(lock, metricNameOne, (Measurable) (m, now) -> 1.0, metricConfig, Time.SYSTEM); - KafkaMetric streamClientMetricTwo = new KafkaMetric(lock, metricNameTwo, (Measurable) (m, now) -> 2.0, metricConfig, Time.SYSTEM); - return Map.of(metricNameOne, streamClientMetricOne, metricNameTwo, streamClientMetricTwo); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java index 6968b45a57b66..21cee3183bc69 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/MockConsumerTest.java @@ -32,7 +32,6 @@ import java.util.Iterator; import java.util.List; import java.util.Optional; -import java.util.stream.IntStream; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -203,31 +202,4 @@ public void testRe2JPatternSubscription() { assertThrows(IllegalStateException.class, () -> consumer.subscribe(List.of("topic1"))); } - @Test - public void shouldReturnMaxPollRecords() { - TopicPartition partition = new TopicPartition("test", 0); - consumer.assign(Collections.singleton(partition)); - consumer.updateBeginningOffsets(Collections.singletonMap(partition, 0L)); - - IntStream.range(0, 10).forEach(offset -> consumer.addRecord(new ConsumerRecord<>("test", 0, offset, null, null))); - - consumer.setMaxPollRecords(2L); - - ConsumerRecords records; - - records = consumer.poll(Duration.ofMillis(1)); - assertEquals(2, records.count()); - - records = consumer.poll(Duration.ofMillis(1)); - assertEquals(2, records.count()); - - consumer.setMaxPollRecords(Long.MAX_VALUE); - - records = consumer.poll(Duration.ofMillis(1)); - assertEquals(6, records.count()); - - records = consumer.poll(Duration.ofMillis(1)); - assertTrue(records.isEmpty()); - } - } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/OffsetAndMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/OffsetAndMetadataTest.java index c1a13c054eea4..3035703ff37ab 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/OffsetAndMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/OffsetAndMetadataTest.java @@ -65,19 +65,4 @@ public void testDeserializationCompatibilityWithLeaderEpoch() throws IOException assertEquals(new OffsetAndMetadata(10, Optional.of(235), "test commit metadata"), deserializedObject); } - @Test - public void testEqualsWithNullAndNegativeLeaderEpoch() { - OffsetAndMetadata metadataWithNullEpoch = new OffsetAndMetadata(100L, Optional.empty(), "metadata"); - OffsetAndMetadata metadataWithNegativeEpoch = new OffsetAndMetadata(100L, Optional.of(-1), "metadata"); - assertEquals(metadataWithNullEpoch, metadataWithNegativeEpoch); - assertEquals(metadataWithNullEpoch.hashCode(), metadataWithNegativeEpoch.hashCode()); - } - - @Test - public void testEqualsWithNullAndEmptyMetadata() { - OffsetAndMetadata metadataWithNullMetadata = new OffsetAndMetadata(100L, Optional.of(1), null); - OffsetAndMetadata metadataWithEmptyMetadata = new OffsetAndMetadata(100L, Optional.of(1), ""); - assertEquals(metadataWithNullMetadata, metadataWithEmptyMetadata); - assertEquals(metadataWithNullMetadata.hashCode(), metadataWithEmptyMetadata.hashCode()); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/StickyAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/StickyAssignorTest.java index 74180a4edcfa0..65ea13100eb76 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/StickyAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/StickyAssignorTest.java @@ -24,16 +24,13 @@ import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.protocol.types.Struct; -import org.apache.kafka.common.test.api.Flaky; import org.apache.kafka.common.utils.CollectionUtils; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.Timeout; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.EnumSource; import org.junit.jupiter.params.provider.MethodSource; -import org.junit.jupiter.params.provider.ValueSource; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -48,7 +45,6 @@ import static java.util.Collections.emptyList; import static org.apache.kafka.clients.consumer.StickyAssignor.serializeTopicPartitionAssignment; -import static org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignorTest.TEST_NAME_WITH_CONSUMER_RACK; import static org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignorTest.TEST_NAME_WITH_RACK_CONFIG; import static org.apache.kafka.clients.consumer.internals.AbstractStickyAssignor.DEFAULT_GENERATION; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -85,14 +81,6 @@ public ByteBuffer generateUserData(List topics, List par return serializeTopicPartitionAssignment(new MemberData(partitions, Optional.of(generation))); } - @Timeout(30) - @ParameterizedTest(name = TEST_NAME_WITH_CONSUMER_RACK) - @ValueSource(booleans = {false, true}) - @Flaky(value = "KAFKA-18797", comment = "Remove this override once the flakiness has been resolved.") - public void testLargeAssignmentAndGroupWithUniformSubscription(boolean hasConsumerRack) { - super.testLargeAssignmentAndGroupWithUniformSubscription(hasConsumerRack); - } - @ParameterizedTest(name = TEST_NAME_WITH_RACK_CONFIG) @EnumSource(RackConfig.class) public void testAllConsumersHaveOwnedPartitionInvalidatedWhenClaimedByMultipleConsumersInSameGenerationWithEqualPartitionsPerConsumer(RackConfig rackConfig) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java index 7ef7bb11380b8..1eb9a77a9f1c3 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractCoordinatorTest.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.MockClient; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.common.Node; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; @@ -52,6 +51,7 @@ import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.requests.SyncGroupRequest; import org.apache.kafka.common.requests.SyncGroupResponse; +import org.apache.kafka.common.test.api.Flaky; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -60,10 +60,8 @@ import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.MethodSource; import java.nio.ByteBuffer; import java.util.Arrays; @@ -78,8 +76,6 @@ import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Supplier; -import java.util.stream.Stream; import static java.util.Collections.emptyMap; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -91,8 +87,6 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.verify; public class AbstractCoordinatorTest { private static final ByteBuffer EMPTY_DATA = ByteBuffer.wrap(new byte[0]); @@ -127,20 +121,15 @@ public void closeCoordinator() { private void setupCoordinator() { setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS, - Optional.empty(), Optional.empty()); + Optional.empty()); } private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs) { setupCoordinator(retryBackoffMs, retryBackoffMaxMs, REBALANCE_TIMEOUT_MS, - Optional.empty(), Optional.empty()); + Optional.empty()); } - - private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional groupInstanceId, Optional> heartbeatThreadSupplier) { - setupCoordinator(retryBackoffMs, retryBackoffMaxMs, rebalanceTimeoutMs, groupInstanceId, heartbeatThreadSupplier, groupInstanceId.isEmpty()); - } - - private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional groupInstanceId, Optional> heartbeatThreadSupplier, boolean leaveOnClose) { + private void setupCoordinator(int retryBackoffMs, int retryBackoffMaxMs, int rebalanceTimeoutMs, Optional groupInstanceId) { LogContext logContext = new LogContext(); this.mockTime = new MockTime(); ConsumerMetadata metadata = new ConsumerMetadata(retryBackoffMs, retryBackoffMaxMs, 60 * 60 * 1000L, @@ -166,15 +155,13 @@ false, false, new SubscriptionState(logContext, AutoOffsetResetStrategy.EARLIEST HEARTBEAT_INTERVAL_MS, GROUP_ID, groupInstanceId, - null, retryBackoffMs, retryBackoffMaxMs, - leaveOnClose); + groupInstanceId.isEmpty()); this.coordinator = new DummyCoordinator(rebalanceConfig, consumerClient, metrics, - mockTime, - heartbeatThreadSupplier); + mockTime); } private void joinGroup() { @@ -363,7 +350,8 @@ public void testGroupMaxSizeExceptionIsFatal() { @Test public void testJoinGroupRequestTimeout() { - setupCoordinator(); + setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS, + Optional.empty()); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(mockTime.timer(0)); @@ -380,7 +368,7 @@ public void testJoinGroupRequestTimeout() { @Test public void testJoinGroupRequestTimeoutLowerBoundedByDefaultRequestTimeout() { int rebalanceTimeoutMs = REQUEST_TIMEOUT_MS - 10000; - setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, rebalanceTimeoutMs, Optional.empty(), Optional.empty()); + setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, rebalanceTimeoutMs, Optional.empty()); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(mockTime.timer(0)); @@ -400,7 +388,7 @@ public void testJoinGroupRequestMaxTimeout() { // Ensure we can handle the maximum allowed rebalance timeout setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, - Optional.empty(), Optional.empty()); + Optional.empty()); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(mockTime.timer(0)); @@ -1106,29 +1094,8 @@ public void testLeaveGroupSentWithGroupInstanceIdUnSet() { checkLeaveGroupRequestSent(Optional.of("groupInstanceId")); } - @ParameterizedTest - @MethodSource("groupInstanceIdAndMembershipOperationMatrix") - public void testLeaveGroupSentWithGroupInstanceIdUnSetAndDifferentGroupMembershipOperation(Optional groupInstanceId, CloseOptions.GroupMembershipOperation operation) { - checkLeaveGroupRequestSent(groupInstanceId, operation, Optional.empty(), true); - } - - private static Stream groupInstanceIdAndMembershipOperationMatrix() { - return Stream.of( - Arguments.of(Optional.empty(), CloseOptions.GroupMembershipOperation.DEFAULT), - Arguments.of(Optional.empty(), CloseOptions.GroupMembershipOperation.LEAVE_GROUP), - Arguments.of(Optional.empty(), CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP), - Arguments.of(Optional.of("groupInstanceId"), CloseOptions.GroupMembershipOperation.DEFAULT), - Arguments.of(Optional.of("groupInstanceId"), CloseOptions.GroupMembershipOperation.LEAVE_GROUP), - Arguments.of(Optional.of("groupInstanceId"), CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP) - ); - } - - private void checkLeaveGroupRequestSent(Optional groupInstanceId) { - checkLeaveGroupRequestSent(groupInstanceId, CloseOptions.GroupMembershipOperation.DEFAULT, Optional.empty(), groupInstanceId.isEmpty()); - } - - private void checkLeaveGroupRequestSent(Optional groupInstanceId, CloseOptions.GroupMembershipOperation operation, Optional> heartbeatThreadSupplier, boolean leaveOnClose) { - setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, groupInstanceId, heartbeatThreadSupplier, leaveOnClose); + private void checkLeaveGroupRequestSent(Optional groupInstanceId) { + setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, groupInstanceId); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE)); @@ -1145,13 +1112,12 @@ private void checkLeaveGroupRequestSent(Optional groupInstanceId, CloseO try { coordinator.ensureActiveGroup(); - coordinator.close(new MockTime().timer(0), operation); - if (CloseOptions.GroupMembershipOperation.LEAVE_GROUP == operation || - (CloseOptions.GroupMembershipOperation.DEFAULT == operation && coordinator.isDynamicMember())) { + coordinator.close(); + if (coordinator.isDynamicMember()) { fail("Expected leavegroup to raise an error."); } } catch (RuntimeException exception) { - if (CloseOptions.GroupMembershipOperation.LEAVE_GROUP == operation || coordinator.isDynamicMember()) { + if (coordinator.isDynamicMember()) { assertEquals(exception, e); } else { fail("Coordinator with group.instance.id set shouldn't send leave group request."); @@ -1224,7 +1190,7 @@ private RequestFuture setupLeaveGroup(LeaveGroupResponse leaveGroupRespons private RequestFuture setupLeaveGroup(LeaveGroupResponse leaveGroupResponse, String leaveReason, String expectedLeaveReason) { - setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, Optional.empty(), Optional.empty()); + setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, Integer.MAX_VALUE, Optional.empty()); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE)); @@ -1239,7 +1205,7 @@ private RequestFuture setupLeaveGroup(LeaveGroupResponse leaveGroupRespons }, leaveGroupResponse); coordinator.ensureActiveGroup(); - return coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, leaveReason); + return coordinator.maybeLeaveGroup(leaveReason); } @Test @@ -1470,10 +1436,10 @@ public void testWakeupAfterJoinGroupReceivedExternalCompletion() throws Exceptio awaitFirstHeartbeat(heartbeatReceived); } + @Flaky("KAFKA-18310") @Test public void testWakeupAfterSyncGroupSentExternalCompletion() throws Exception { - setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS, - Optional.empty(), Optional.of(() -> mock(BaseHeartbeatThread.class))); + setupCoordinator(); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE)); @@ -1489,13 +1455,13 @@ public boolean matches(AbstractRequest body) { return isSyncGroupRequest; } }, syncGroupResponse(Errors.NONE)); + AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()"); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); - assertNotNull(coordinator.heartbeatThread()); - verify(coordinator.heartbeatThread()).enable(); + assertFalse(heartbeatReceived.get()); // the join group completes in this poll() consumerClient.poll(mockTime.timer(0)); @@ -1503,12 +1469,14 @@ public boolean matches(AbstractRequest body) { assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); + + awaitFirstHeartbeat(heartbeatReceived); } + @Flaky("KAFKA-18310") @Test - public void testWakeupAfterSyncGroupReceived() { - setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS, - Optional.empty(), Optional.of(() -> mock(BaseHeartbeatThread.class))); + public void testWakeupAfterSyncGroupReceived() throws Exception { + setupCoordinator(); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE)); @@ -1519,6 +1487,7 @@ public void testWakeupAfterSyncGroupReceived() { consumerClient.wakeup(); return isSyncGroupRequest; }, syncGroupResponse(Errors.NONE)); + AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); try { coordinator.ensureActiveGroup(); @@ -1528,19 +1497,20 @@ public void testWakeupAfterSyncGroupReceived() { assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); - assertNotNull(coordinator.heartbeatThread()); - verify(coordinator.heartbeatThread()).enable(); + assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); + + awaitFirstHeartbeat(heartbeatReceived); } @Test - public void testWakeupAfterSyncGroupReceivedExternalCompletion() { - setupCoordinator(RETRY_BACKOFF_MS, RETRY_BACKOFF_MAX_MS, REBALANCE_TIMEOUT_MS, - Optional.empty(), Optional.of(() -> mock(BaseHeartbeatThread.class))); + @Disabled("KAFKA-15474") + public void testWakeupAfterSyncGroupReceivedExternalCompletion() throws Exception { + setupCoordinator(); mockClient.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); mockClient.prepareResponse(joinGroupFollowerResponse(1, memberId, leaderId, Errors.NONE)); @@ -1551,18 +1521,20 @@ public void testWakeupAfterSyncGroupReceivedExternalCompletion() { consumerClient.wakeup(); return isSyncGroupRequest; }, syncGroupResponse(Errors.NONE)); + AtomicBoolean heartbeatReceived = prepareFirstHeartbeat(); assertThrows(WakeupException.class, () -> coordinator.ensureActiveGroup(), "Should have woken up from ensureActiveGroup()"); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(0, coordinator.onJoinCompleteInvokes); - assertNotNull(coordinator.heartbeatThread()); - verify(coordinator.heartbeatThread()).enable(); + assertFalse(heartbeatReceived.get()); coordinator.ensureActiveGroup(); assertEquals(1, coordinator.onJoinPrepareInvokes); assertEquals(1, coordinator.onJoinCompleteInvokes); + + awaitFirstHeartbeat(heartbeatReceived); } @Test @@ -1602,9 +1574,12 @@ public void testAuthenticationErrorInEnsureCoordinatorReady() { mockClient.createPendingAuthenticationError(node, 300); - assertThrows(AuthenticationException.class, - () -> coordinator.ensureCoordinatorReady(mockTime.timer(Long.MAX_VALUE)), - "Expected an authentication error."); + try { + coordinator.ensureCoordinatorReady(mockTime.timer(Long.MAX_VALUE)); + fail("Expected an authentication error."); + } catch (AuthenticationException e) { + // OK + } } @Test @@ -1733,9 +1708,8 @@ public static class DummyCoordinator extends AbstractCoordinator { DummyCoordinator(GroupRebalanceConfig rebalanceConfig, ConsumerNetworkClient client, Metrics metrics, - Time time, - Optional> heartbeatThreadSupplier) { - super(rebalanceConfig, new LogContext(), client, metrics, METRIC_GROUP_PREFIX, time, Optional.empty(), heartbeatThreadSupplier); + Time time) { + super(rebalanceConfig, new LogContext(), client, metrics, METRIC_GROUP_PREFIX, time); } @Override @@ -1777,4 +1751,5 @@ protected void onJoinComplete(int generation, String memberId, String protocol, onJoinCompleteInvokes++; } } + } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java index 4e9525264a01d..31334b7b19c52 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AbstractStickyAssignorTest.java @@ -22,7 +22,6 @@ import org.apache.kafka.clients.consumer.internals.AbstractPartitionAssignorTest.RackConfig; import org.apache.kafka.common.PartitionInfo; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.test.api.Flaky; import org.apache.kafka.common.utils.CollectionUtils; import org.apache.kafka.common.utils.Utils; @@ -742,7 +741,6 @@ public void testLargeAssignmentAndGroupWithUniformSubscription(boolean hasConsum assignor.assignPartitions(partitionsPerTopic, subscriptions); } - @Flaky("KAFKA-13514") @Timeout(90) @ParameterizedTest(name = TEST_NAME_WITH_CONSUMER_RACK) @ValueSource(booleans = {false, true}) @@ -1025,7 +1023,7 @@ public void testAssignmentUpdatedForDeletedTopic(RackConfig rackConfig) { Map> assignment = assignor.assignPartitions(partitionsPerTopic, subscriptions); assertTrue(assignor.partitionsTransferringOwnership.isEmpty()); - assertEquals(1 + 100, assignment.values().stream().mapToInt(List::size).sum()); + assertEquals(assignment.values().stream().mapToInt(List::size).sum(), 1 + 100); assertEquals(Collections.singleton(consumerId), assignment.keySet()); assertTrue(isFullyBalanced(assignment)); } @@ -1043,7 +1041,7 @@ public void testNoExceptionThrownWhenOnlySubscribedTopicDeleted(RackConfig rackC assignment = assignor.assign(Collections.emptyMap(), subscriptions); assertTrue(assignor.partitionsTransferringOwnership.isEmpty()); - assertEquals(1, assignment.size()); + assertEquals(assignment.size(), 1); assertTrue(assignment.get(consumerId).isEmpty()); } @@ -1428,8 +1426,14 @@ private String getCanonicalName(String str, int i, int maxNum) { } private String pad(int num, int digits) { + StringBuilder sb = new StringBuilder(); int iDigits = Integer.toString(num).length(); - return "0".repeat(Math.max(0, digits - iDigits)) + num; + + for (int i = 1; i <= digits - iDigits; ++i) + sb.append("0"); + + sb.append(num); + return sb.toString(); } protected static List topics(String... topics) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementCommitCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementCommitCallbackHandlerTest.java index c6e10040d32f5..9a28b49ed7ed3 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementCommitCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementCommitCallbackHandlerTest.java @@ -85,7 +85,7 @@ public void testInvalidRecord() throws Exception { Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(0L, AcknowledgeType.ACCEPT); acknowledgements.add(1L, AcknowledgeType.REJECT); - acknowledgements.complete(Errors.INVALID_RECORD_STATE.exception()); + acknowledgements.setAcknowledgeErrorCode(Errors.INVALID_RECORD_STATE); acknowledgementsMap.put(tip0, acknowledgements); acknowledgementCommitCallbackHandler.onComplete(Collections.singletonList(acknowledgementsMap)); @@ -101,7 +101,7 @@ public void testUnauthorizedTopic() throws Exception { Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(0L, AcknowledgeType.ACCEPT); acknowledgements.add(1L, AcknowledgeType.REJECT); - acknowledgements.complete(Errors.TOPIC_AUTHORIZATION_FAILED.exception()); + acknowledgements.setAcknowledgeErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED); acknowledgementsMap.put(tip0, acknowledgements); acknowledgementCommitCallbackHandler.onComplete(Collections.singletonList(acknowledgementsMap)); @@ -116,12 +116,12 @@ public void testMultiplePartitions() throws Exception { Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(0L, AcknowledgeType.ACCEPT); acknowledgements.add(1L, AcknowledgeType.REJECT); - acknowledgements.complete(Errors.TOPIC_AUTHORIZATION_FAILED.exception()); + acknowledgements.setAcknowledgeErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED); acknowledgementsMap.put(tip0, acknowledgements); Acknowledgements acknowledgements1 = Acknowledgements.empty(); acknowledgements1.add(0L, AcknowledgeType.RELEASE); - acknowledgements1.complete(Errors.INVALID_RECORD_STATE.exception()); + acknowledgements1.setAcknowledgeErrorCode(Errors.INVALID_RECORD_STATE); acknowledgementsMap.put(tip1, acknowledgements1); Map acknowledgementsMap2 = new HashMap<>(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java index b6818ab51b5cf..779df4fb43c4a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AcknowledgementsTest.java @@ -83,7 +83,7 @@ public void testSingleStateMultiRecord() { } @Test - public void testSingleAcknowledgeTypeExceedingLimit() { + public void testSingleAcknowledgementTypeExceedingLimit() { int i = 0; for (; i < maxRecordsWithSameAcknowledgeType; i++) { acks.add(i, AcknowledgeType.ACCEPT); @@ -119,7 +119,7 @@ public void testSingleAcknowledgeTypeExceedingLimit() { } @Test - public void testSingleAcknowledgeTypeWithGap() { + public void testSingleAcknowledgementTypeWithGap() { for (int i = 0; i < maxRecordsWithSameAcknowledgeType; i++) { acks.add(i, null); } @@ -186,7 +186,7 @@ public void testOptimiseBatches() { } @Test - public void testSingleAcknowledgeTypeWithinLimit() { + public void testSingleAcknowledgementTypeWithinLimit() { acks.add(0L, AcknowledgeType.ACCEPT); acks.add(1L, AcknowledgeType.ACCEPT); acks.add(2L, AcknowledgeType.ACCEPT); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java index 402697227ee80..3430719b16ee6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ApplicationEventHandlerTest.java @@ -27,8 +27,7 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.api.Test; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; @@ -45,11 +44,10 @@ public class ApplicationEventHandlerTest { private final RequestManagers requestManagers = mock(RequestManagers.class); private final CompletableEventReaper applicationEventReaper = mock(CompletableEventReaper.class); - @ParameterizedTest - @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") - public void testRecordApplicationEventQueueSize(String groupName) { + @Test + public void testRecordApplicationEventQueueSize() { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = spy(new AsyncConsumerMetrics(metrics, groupName)); + AsyncConsumerMetrics asyncConsumerMetrics = spy(new AsyncConsumerMetrics(metrics)); ApplicationEventHandler applicationEventHandler = new ApplicationEventHandler( new LogContext(), time, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java index 8e44b3fcc25d5..cb03d585ba6a5 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/AsyncKafkaConsumerTest.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.Metadata.LeaderAndEpoch; import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.NodeApiVersions; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; @@ -30,6 +29,7 @@ import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.clients.consumer.OffsetAndTimestamp; import org.apache.kafka.clients.consumer.OffsetCommitCallback; +import org.apache.kafka.clients.consumer.RetriableCommitFailedException; import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.clients.consumer.internals.events.ApplicationEvent; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; @@ -68,6 +68,7 @@ import org.apache.kafka.common.errors.GroupAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidGroupIdException; +import org.apache.kafka.common.errors.RetriableException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.errors.WakeupException; @@ -83,7 +84,6 @@ import org.apache.kafka.common.requests.MetadataResponse; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.serialization.StringDeserializer; -import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -117,7 +117,6 @@ import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.UUID; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; @@ -170,7 +169,6 @@ public class AsyncKafkaConsumerTest { private AsyncKafkaConsumer consumer = null; private Time time = new MockTime(0); - private final Metrics metrics = new Metrics(); private final FetchCollector fetchCollector = mock(FetchCollector.class); private final ApplicationEventHandler applicationEventHandler = mock(ApplicationEventHandler.class); private final ConsumerMetadata metadata = mock(ConsumerMetadata.class); @@ -182,9 +180,9 @@ public void resetAll() { backgroundEventQueue.clear(); if (consumer != null) { try { - consumer.close(CloseOptions.timeout(Duration.ZERO)); - } catch (Exception swallow) { - // best effort to clean up after each test, but may throw (ex. if callbacks were + consumer.close(Duration.ZERO); + } catch (Exception e) { + // best effort to clean up after each test, but may throw (ex. if callbacks where // throwing errors) } } @@ -206,13 +204,6 @@ private AsyncKafkaConsumer newConsumerWithoutGroupId() { } private AsyncKafkaConsumer newConsumer(Properties props) { - return newConsumerWithStreamRebalanceData(props, null); - } - - private AsyncKafkaConsumer newConsumerWithStreamRebalanceData( - Properties props, - StreamsRebalanceData streamsRebalanceData - ) { // disable auto-commit by default, so we don't need to handle SyncCommitEvent for each case if (!props.containsKey(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG)) { props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false); @@ -223,12 +214,11 @@ private AsyncKafkaConsumer newConsumerWithStreamRebalanceData( new StringDeserializer(), new StringDeserializer(), time, - (logContext, time, applicationEventBlockingQueue, completableEventReaper, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, asyncConsumerMetrics) -> applicationEventHandler, - logContext -> backgroundEventReaper, - (logContext, consumerMetadata, subscriptionState, fetchConfig, deserializers, fetchMetricsManager, time) -> fetchCollector, - (consumerConfig, subscriptionState, logContext, clusterResourceListeners) -> metadata, - backgroundEventQueue, - Optional.ofNullable(streamsRebalanceData) + (a, b, c, d, e, f, g, h) -> applicationEventHandler, + a -> backgroundEventReaper, + (a, b, c, d, e, f, g) -> fetchCollector, + (a, b, c, d) -> metadata, + backgroundEventQueue ); } @@ -238,12 +228,11 @@ private AsyncKafkaConsumer newConsumer(ConsumerConfig config) { new StringDeserializer(), new StringDeserializer(), time, - (logContext, time, applicationEventBlockingQueue, completableEventReaper, applicationEventProcessorSupplier, networkClientDelegateSupplier, requestManagersSupplier, asyncConsumerMetrics) -> applicationEventHandler, - logContext -> backgroundEventReaper, - (logContext, consumerMetadata, subscriptionState, fetchConfig, deserializers, fetchMetricsManager, time) -> fetchCollector, - (consumerConfig, subscriptionState, logContext, clusterResourceListeners) -> metadata, - backgroundEventQueue, - Optional.empty() + (a, b, c, d, e, f, g, h) -> applicationEventHandler, + a -> backgroundEventReaper, + (a, b, c, d, e, f, g) -> fetchCollector, + (a, b, c, d) -> metadata, + backgroundEventQueue ); } @@ -251,14 +240,17 @@ private AsyncKafkaConsumer newConsumer( FetchBuffer fetchBuffer, ConsumerInterceptors interceptors, ConsumerRebalanceListenerInvoker rebalanceListenerInvoker, - SubscriptionState subscriptions) { + SubscriptionState subscriptions, + String groupId, + String clientId, + boolean autoCommitEnabled) { long retryBackoffMs = 100L; int requestTimeoutMs = 30000; int defaultApiTimeoutMs = 1000; return new AsyncKafkaConsumer<>( new LogContext(), - "client-id", - new Deserializers<>(new StringDeserializer(), new StringDeserializer(), metrics), + clientId, + new Deserializers<>(new StringDeserializer(), new StringDeserializer()), fetchBuffer, fetchCollector, interceptors, @@ -267,14 +259,14 @@ private AsyncKafkaConsumer newConsumer( backgroundEventQueue, backgroundEventReaper, rebalanceListenerInvoker, - metrics, + new Metrics(), subscriptions, metadata, retryBackoffMs, requestTimeoutMs, defaultApiTimeoutMs, - "group-id", - false); + groupId, + autoCommitEnabled); } @Test @@ -317,7 +309,7 @@ public void testCommitAsyncWithNullCallback() { // Clean-up. Close the consumer here as we know it will cause a TimeoutException to be thrown. // If we get an error *other* than the TimeoutException, we'll fail the test. try { - Exception e = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ZERO))); + Exception e = assertThrows(KafkaException.class, () -> consumer.close(Duration.ZERO)); assertInstanceOf(TimeoutException.class, e.getCause()); } finally { consumer = null; @@ -336,7 +328,7 @@ public void testCommitAsyncUserSuppliedCallbackNoException() { assertDoesNotThrow(() -> consumer.commitAsync(offsets, callback)); forceCommitCallbackInvocation(); - assertEquals(1, callback.invoked); + assertEquals(callback.invoked, 1); assertNull(callback.exception); } @@ -356,26 +348,6 @@ public void testCommitAsyncUserSuppliedCallbackWithException(Exception exception assertSame(exception.getClass(), callback.exception.getClass()); } - @Test - public void testCommitAsyncShouldCopyOffsets() { - consumer = newConsumer(); - - TopicPartition tp = new TopicPartition("t0", 2); - Map offsets = new HashMap<>(); - offsets.put(tp, new OffsetAndMetadata(10L)); - - markOffsetsReadyForCommitEvent(); - consumer.commitAsync(offsets, null); - - final ArgumentCaptor commitEventCaptor = ArgumentCaptor.forClass(AsyncCommitEvent.class); - verify(applicationEventHandler).add(commitEventCaptor.capture()); - final AsyncCommitEvent commitEvent = commitEventCaptor.getValue(); - assertTrue(commitEvent.offsets().isPresent()); - assertTrue(commitEvent.offsets().get().containsKey(tp)); - offsets.remove(tp); - assertTrue(commitEvent.offsets().get().containsKey(tp)); - } - private static Stream commitExceptionSupplier() { return Stream.of( new KafkaException("Test exception"), @@ -609,26 +581,6 @@ public void testCommitSyncAwaitsCommitAsyncButDoesNotFail() { assertDoesNotThrow(() -> consumer.commitSync(Collections.singletonMap(tp, new OffsetAndMetadata(20)), Duration.ofMillis(100))); } - @Test - public void testCommitSyncShouldCopyOffsets() { - consumer = newConsumer(); - - TopicPartition tp = new TopicPartition("t0", 2); - Map offsets = new HashMap<>(); - offsets.put(tp, new OffsetAndMetadata(10L)); - - completeCommitSyncApplicationEventSuccessfully(); - consumer.commitSync(offsets); - - final ArgumentCaptor commitEventCaptor = ArgumentCaptor.forClass(SyncCommitEvent.class); - verify(applicationEventHandler).add(commitEventCaptor.capture()); - final SyncCommitEvent commitEvent = commitEventCaptor.getValue(); - assertTrue(commitEvent.offsets().isPresent()); - assertTrue(commitEvent.offsets().get().containsKey(tp)); - offsets.remove(tp); - assertTrue(commitEvent.offsets().get().containsKey(tp)); - } - private CompletableFuture setUpConsumerWithIncompleteAsyncCommit(TopicPartition tp) { time = new MockTime(1); consumer = newConsumer(); @@ -674,7 +626,9 @@ public void testEnsurePollExecutedCommitAsyncCallbacks() { consumer.assign(Collections.singleton(new TopicPartition("foo", 0))); assertDoesNotThrow(() -> consumer.commitAsync(new HashMap<>(), callback)); markReconcileAndAutoCommitCompleteForPollEvent(); - assertMockCommitCallbackInvoked(() -> consumer.poll(Duration.ZERO), callback); + assertMockCommitCallbackInvoked(() -> consumer.poll(Duration.ZERO), + callback, + null); } @Test @@ -684,7 +638,9 @@ public void testEnsureShutdownExecutedCommitAsyncCallbacks() { MockCommitCallback callback = new MockCommitCallback(); completeCommitAsyncApplicationEventSuccessfully(); assertDoesNotThrow(() -> consumer.commitAsync(new HashMap<>(), callback)); - assertMockCommitCallbackInvoked(() -> consumer.close(), callback); + assertMockCommitCallbackInvoked(() -> consumer.close(), + callback, + null); } @Test @@ -705,8 +661,11 @@ public void testCloseLeavesGroup(long timeoutMs) { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions)); - consumer.close(CloseOptions.timeout(Duration.ofMillis(timeoutMs))); + subscriptions, + "group-id", + "client-id", + false)); + consumer.close(Duration.ofMillis(timeoutMs)); verify(applicationEventHandler).addAndGet(any(LeaveGroupOnCloseEvent.class)); } @@ -723,12 +682,15 @@ public void testCloseLeavesGroupDespiteOnPartitionsLostError() { consumer = spy(newConsumer( mock(FetchBuffer.class), - new ConsumerInterceptors<>(Collections.emptyList(), metrics), + new ConsumerInterceptors<>(Collections.emptyList()), invoker, - subscriptions)); + subscriptions, + "group-id", + "client-id", + false)); consumer.setGroupAssignmentSnapshot(partitions); - Throwable t = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ZERO))); + Throwable t = assertThrows(KafkaException.class, () -> consumer.close(Duration.ZERO)); assertNotNull(t.getCause()); assertEquals(rootError, t.getCause()); @@ -746,12 +708,15 @@ public void testCloseLeavesGroupDespiteInterrupt(long timeoutMs) { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions)); + subscriptions, + "group-id", + "client-id", + false)); Duration timeout = Duration.ofMillis(timeoutMs); try { - assertThrows(InterruptException.class, () -> consumer.close(CloseOptions.timeout(timeout))); + assertThrows(InterruptException.class, () -> consumer.close(timeout)); } finally { Thread.interrupted(); } @@ -767,7 +732,10 @@ public void testCommitSyncAllConsumed() { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions); + subscriptions, + "group-id", + "client-id", + false); completeTopicSubscriptionChangeEventSuccessfully(); consumer.subscribe(singleton("topic"), mock(ConsumerRebalanceListener.class)); subscriptions.assignFromSubscribed(singleton(new TopicPartition("topic", 0))); @@ -789,7 +757,10 @@ public void testAutoCommitSyncDisabled() { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions); + subscriptions, + "group-id", + "client-id", + false); completeTopicSubscriptionChangeEventSuccessfully(); consumer.subscribe(singleton("topic"), mock(ConsumerRebalanceListener.class)); subscriptions.assignFromSubscribed(singleton(new TopicPartition("topic", 0))); @@ -800,10 +771,15 @@ public void testAutoCommitSyncDisabled() { verify(applicationEventHandler, never()).add(any(SyncCommitEvent.class)); } - private void assertMockCommitCallbackInvoked(final Executable task, final MockCommitCallback callback) { + private void assertMockCommitCallbackInvoked(final Executable task, + final MockCommitCallback callback, + final Errors errors) { assertDoesNotThrow(task); assertEquals(1, callback.invoked); - assertNull(callback.exception); + if (errors == null) + assertNull(callback.exception); + else if (errors.exception() instanceof RetriableException) + assertInstanceOf(RetriableCommitFailedException.class, callback.exception); } private static class MockCommitCallback implements OffsetCommitCallback { @@ -1006,8 +982,9 @@ public void testBeginningOffsetsWithZeroTimeout() { TopicPartition tp = new TopicPartition("topic1", 0); Map result = assertDoesNotThrow(() -> consumer.beginningOffsets(Collections.singletonList(tp), Duration.ZERO)); - assertNotNull(result); - assertEquals(0, result.size()); + // The result should be {tp=null} + assertTrue(result.containsKey(tp)); + assertNull(result.get(tp)); verify(applicationEventHandler).add(ArgumentMatchers.isA(ListOffsetsEvent.class)); } @@ -1064,7 +1041,7 @@ public void testNoWakeupInCloseCommit() { return null; }).when(applicationEventHandler).add(any()); completeUnsubscribeApplicationEventSuccessfully(); - consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(Duration.ZERO); // A commit was triggered and not completed exceptionally by the wakeup assertNotNull(capturedEvent.get()); @@ -1087,7 +1064,7 @@ public void testCloseAwaitPendingAsyncCommitIncomplete() { markOffsetsReadyForCommitEvent(); consumer.commitAsync(); - Exception e = assertThrows(KafkaException.class, () -> consumer.close(CloseOptions.timeout(Duration.ofMillis(10)))); + Exception e = assertThrows(KafkaException.class, () -> consumer.close(Duration.ofMillis(10))); assertInstanceOf(TimeoutException.class, e.getCause()); } @@ -1108,7 +1085,7 @@ public void testCloseAwaitPendingAsyncCommitComplete() { consumer.commitAsync(cb); completeUnsubscribeApplicationEventSuccessfully(); - assertDoesNotThrow(() -> consumer.close(CloseOptions.timeout(Duration.ofMillis(10)))); + assertDoesNotThrow(() -> consumer.close(Duration.ofMillis(10))); assertEquals(1, cb.invoked); } @@ -1124,7 +1101,7 @@ public void testInterceptorAutoCommitOnClose() { completeCommitSyncApplicationEventSuccessfully(); completeUnsubscribeApplicationEventSuccessfully(); - consumer.close(CloseOptions.timeout(Duration.ZERO)); + consumer.close(Duration.ZERO); assertEquals(1, MockConsumerInterceptor.ON_COMMIT_COUNT.get()); assertEquals(1, MockConsumerInterceptor.CLOSE_COUNT.get()); @@ -1198,14 +1175,14 @@ public void testNoInterceptorCommitAsyncFailed() { @Test public void testRefreshCommittedOffsetsShouldNotResetIfFailedWithTimeout() { consumer = newConsumer(); - testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(); + testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(true); } @Test public void testRefreshCommittedOffsetsNotCalledIfNoGroupId() { // Create consumer without group id so committed offsets are not used for updating positions consumer = newConsumerWithoutGroupId(); - testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(); + testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(false); } @Test @@ -1337,8 +1314,7 @@ private MemberStateListener captureGroupMetadataUpdateListener(final MockedStati any(), any(), any(), - applicationThreadMemberStateListener.capture(), - any() + applicationThreadMemberStateListener.capture() )); return applicationThreadMemberStateListener.getValue(); } @@ -1391,51 +1367,6 @@ public void testGroupMetadataIsResetAfterUnsubscribe() { assertEquals(groupMetadataAfterUnsubscribe, consumer.groupMetadata()); } - private Optional captureStreamRebalanceData(final MockedStatic requestManagers) { - ArgumentCaptor> streamRebalanceData = ArgumentCaptor.forClass(Optional.class); - requestManagers.verify(() -> RequestManagers.supplier( - any(), - any(), - any(), - any(), - any(), - any(), - any(), - any(), - any(), - any(), - any(), - any(), - any(), - any(), - any(), - streamRebalanceData.capture() - )); - return streamRebalanceData.getValue(); - } - - @Test - public void testEmptyStreamRebalanceData() { - final String groupId = "consumerGroupA"; - try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { - consumer = newConsumer(requiredConsumerConfigAndGroupId(groupId)); - final Optional groupMetadataUpdateListener = captureStreamRebalanceData(requestManagers); - assertTrue(groupMetadataUpdateListener.isEmpty()); - } - } - - @Test - public void testStreamRebalanceData() { - final String groupId = "consumerGroupA"; - try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { - StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of()); - consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData); - final Optional groupMetadataUpdateListener = captureStreamRebalanceData(requestManagers); - assertTrue(groupMetadataUpdateListener.isPresent()); - assertEquals(streamsRebalanceData, groupMetadataUpdateListener.get()); - } - } - /** * Tests that the consumer correctly invokes the callbacks for {@link ConsumerRebalanceListener} that was * specified. We don't go through the full effort to emulate heartbeats and correct group management here. We're @@ -1632,9 +1563,12 @@ public void testEnsurePollEventSentOnConsumerPoll() { SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); consumer = newConsumer( mock(FetchBuffer.class), - new ConsumerInterceptors<>(Collections.emptyList(), metrics), + new ConsumerInterceptors<>(Collections.emptyList()), mock(ConsumerRebalanceListenerInvoker.class), - subscriptions); + subscriptions, + "group-id", + "client-id", + false); final TopicPartition tp = new TopicPartition("topic", 0); final List> records = singletonList( new ConsumerRecord<>("topic", 0, 2, "key1", "value1")); @@ -1657,7 +1591,7 @@ private Properties requiredConsumerConfigAndGroupId(final String groupId) { return props; } - private void testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout() { + private void testUpdateFetchPositionsWithFetchCommittedOffsetsTimeout(boolean committedOffsetsEnabled) { completeFetchedCommittedOffsetApplicationEventExceptionally(new TimeoutException()); doReturn(Fetch.empty()).when(fetchCollector).collectFetch(any(FetchBuffer.class)); when(applicationEventHandler.addAndGet(any(CheckAndUpdatePositionsEvent.class))).thenReturn(true); @@ -2014,14 +1948,17 @@ public void testRecordBackgroundEventQueueSizeAndBackgroundEventQueueTime() { mock(FetchBuffer.class), mock(ConsumerInterceptors.class), mock(ConsumerRebalanceListenerInvoker.class), - mock(SubscriptionState.class)); + mock(SubscriptionState.class), + "group-id", + "client-id", + false); Metrics metrics = consumer.metricsRegistry(); - AsyncConsumerMetrics asyncConsumerMetrics = consumer.asyncConsumerMetrics(); + AsyncConsumerMetrics kafkaConsumerMetrics = consumer.kafkaConsumerMetrics(); ConsumerRebalanceListenerCallbackNeededEvent event = new ConsumerRebalanceListenerCallbackNeededEvent(ON_PARTITIONS_REVOKED, Collections.emptySortedSet()); event.setEnqueuedMs(time.milliseconds()); backgroundEventQueue.add(event); - asyncConsumerMetrics.recordBackgroundEventQueueSize(1); + kafkaConsumerMetrics.recordBackgroundEventQueueSize(1); time.sleep(10); consumer.processBackgroundEvents(); @@ -2030,28 +1967,6 @@ public void testRecordBackgroundEventQueueSizeAndBackgroundEventQueueTime() { assertEquals(10, (double) metrics.metric(metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP)).metricValue()); } - @Test - public void testFailConstructor() { - final Properties props = requiredConsumerConfig(); - props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id"); - props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "an.invalid.class"); - final ConsumerConfig config = new ConsumerConfig(props); - - try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { - KafkaException ce = assertThrows( - KafkaException.class, - () -> newConsumer(config)); - assertTrue(ce.getMessage().contains("Failed to construct kafka consumer"), "Unexpected exception message: " + ce.getMessage()); - assertTrue(ce.getCause().getMessage().contains("Class an.invalid.class cannot be found"), "Unexpected cause: " + ce.getCause()); - - boolean npeLogged = appender.getEvents().stream() - .flatMap(event -> event.getThrowableInfo().stream()) - .anyMatch(str -> str.contains("NullPointerException")); - - assertFalse(npeLogged, "Unexpected NullPointerException during consumer construction"); - } - } - private Map mockTopicPartitionOffset() { final TopicPartition t0 = new TopicPartition("t0", 2); final TopicPartition t1 = new TopicPartition("t0", 3); @@ -2210,71 +2125,6 @@ private void markOffsetsReadyForCommitEvent() { }).when(applicationEventHandler).add(ArgumentMatchers.isA(CommitEvent.class)); } - @Test - public void testCloseInvokesStreamsRebalanceListenerOnTasksRevokedWhenMemberEpochPositive() { - final String groupId = "streamsGroup"; - final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of()); - - try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { - consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData); - StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class); - consumer.subscribe(singletonList("topic"), mockStreamsListener); - final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers); - final int memberEpoch = 42; - final String memberId = "memberId"; - groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId); - - consumer.close(CloseOptions.timeout(Duration.ZERO)); - - verify(mockStreamsListener).onTasksRevoked(any()); - } - } - - @Test - public void testCloseInvokesStreamsRebalanceListenerOnAllTasksLostWhenMemberEpochZeroOrNegative() { - final String groupId = "streamsGroup"; - final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of()); - - try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { - consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData); - StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class); - consumer.subscribe(singletonList("topic"), mockStreamsListener); - final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers); - final int memberEpoch = 0; - final String memberId = "memberId"; - groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId); - - consumer.close(CloseOptions.timeout(Duration.ZERO)); - - verify(mockStreamsListener).onAllTasksLost(); - } - } - - @Test - public void testCloseWrapsStreamsRebalanceListenerException() { - final String groupId = "streamsGroup"; - final StreamsRebalanceData streamsRebalanceData = new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of()); - - try (final MockedStatic requestManagers = mockStatic(RequestManagers.class)) { - consumer = newConsumerWithStreamRebalanceData(requiredConsumerConfigAndGroupId(groupId), streamsRebalanceData); - StreamsRebalanceListener mockStreamsListener = mock(StreamsRebalanceListener.class); - RuntimeException testException = new RuntimeException("Test streams listener exception"); - doThrow(testException).when(mockStreamsListener).onTasksRevoked(any()); - consumer.subscribe(singletonList("topic"), mockStreamsListener); - final MemberStateListener groupMetadataUpdateListener = captureGroupMetadataUpdateListener(requestManagers); - final int memberEpoch = 1; - final String memberId = "memberId"; - groupMetadataUpdateListener.onMemberEpochUpdated(Optional.of(memberEpoch), memberId); - - KafkaException thrownException = assertThrows(KafkaException.class, - () -> consumer.close(CloseOptions.timeout(Duration.ZERO))); - - assertInstanceOf(RuntimeException.class, thrownException.getCause()); - assertTrue(thrownException.getCause().getMessage().contains("Test streams listener exception")); - verify(mockStreamsListener).onTasksRevoked(any()); - } - } - private void markReconcileAndAutoCommitCompleteForPollEvent() { doAnswer(invocation -> { PollEvent event = invocation.getArgument(0); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java index 7a999e51163aa..63269b6f5542d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/BackgroundEventHandlerTest.java @@ -23,23 +23,22 @@ import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.MockTime; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.api.Test; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics.BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME; import static org.junit.jupiter.api.Assertions.assertEquals; public class BackgroundEventHandlerTest { private final BlockingQueue backgroundEventsQueue = new LinkedBlockingQueue<>(); - @ParameterizedTest - @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") - public void testRecordBackgroundEventQueueSize(String groupName) { + @Test + public void testRecordBackgroundEventQueueSize() { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName)) { + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics)) { BackgroundEventHandler backgroundEventHandler = new BackgroundEventHandler( backgroundEventsQueue, new MockTime(0), @@ -49,7 +48,7 @@ public void testRecordBackgroundEventQueueSize(String groupName) { assertEquals( 1, (double) metrics.metric( - metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, groupName) + metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, CONSUMER_METRIC_GROUP) ).metricValue() ); @@ -58,7 +57,7 @@ public void testRecordBackgroundEventQueueSize(String groupName) { assertEquals( 0, (double) metrics.metric( - metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, groupName) + metrics.metricName(BACKGROUND_EVENT_QUEUE_SIZE_SENSOR_NAME, CONSUMER_METRIC_GROUP) ).metricValue() ); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java index 26d39715d27ad..f44b285136abb 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CommitRequestManagerTest.java @@ -26,17 +26,11 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.GroupAuthorizationException; -import org.apache.kafka.common.errors.InvalidCommitOffsetSizeException; -import org.apache.kafka.common.errors.OffsetMetadataTooLarge; import org.apache.kafka.common.errors.RetriableException; -import org.apache.kafka.common.errors.StaleMemberEpochException; import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.errors.TopicAuthorizationException; -import org.apache.kafka.common.errors.UnknownMemberIdException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; import org.apache.kafka.common.message.OffsetFetchRequestData; -import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.ApiKeys; @@ -430,6 +424,16 @@ public void testCommitSyncRetriedAfterExpectedRetriableException(Errors error) { assertExceptionHandling(commitRequestManager, error, true); } + private static Stream commitSyncExpectedExceptions() { + return Stream.of( + Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, Errors.OFFSET_METADATA_TOO_LARGE.exception().getClass()), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, Errors.INVALID_COMMIT_OFFSET_SIZE.exception().getClass()), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, Errors.GROUP_AUTHORIZATION_FAILED.exception().getClass()), + Arguments.of(Errors.CORRUPT_MESSAGE, KafkaException.class), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); + } + @Test public void testCommitSyncFailsWithCommitFailedExceptionIfUnknownMemberId() { CommitRequestManager commitRequestManager = create(false, 100); @@ -445,7 +449,7 @@ public void testCommitSyncFailsWithCommitFailedExceptionIfUnknownMemberId() { NetworkClientDelegate.PollResult res = commitRequestManager.poll(time.milliseconds()); assertEquals(0, res.unsentRequests.size()); assertTrue(commitResult.isDone()); - assertFutureThrows(CommitFailedException.class, commitResult); + assertFutureThrows(commitResult, CommitFailedException.class); } @Test @@ -466,7 +470,7 @@ public void testCommitSyncFailsWithCommitFailedExceptionOnStaleMemberEpoch() { // Commit should fail with CommitFailedException assertTrue(commitResult.isDone()); - assertFutureThrows(CommitFailedException.class, commitResult); + assertFutureThrows(commitResult, CommitFailedException.class); } /** @@ -520,7 +524,7 @@ public void testCommitAsyncFailsWithRetriableOnCoordinatorDisconnected() { // Commit should mark the coordinator unknown and fail with RetriableCommitFailedException. assertTrue(commitResult.isDone()); - assertFutureThrows(RetriableCommitFailedException.class, commitResult); + assertFutureThrows(commitResult, RetriableCommitFailedException.class); assertCoordinatorDisconnectHandling(); } @@ -718,8 +722,7 @@ public void testOffsetFetchRequestErroredRequests(final Errors error) { @ParameterizedTest @MethodSource("offsetFetchExceptionSupplier") - public void testOffsetFetchRequestTimeoutRequests(final Errors error, - final Class expectedExceptionClass) { + public void testOffsetFetchRequestTimeoutRequests(final Errors error) { CommitRequestManager commitRequestManager = create(true, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); @@ -740,10 +743,10 @@ public void testOffsetFetchRequestTimeoutRequests(final Errors error, assertFalse(commitRequestManager.pendingRequests.unsentOffsetFetches.isEmpty()); NetworkClientDelegate.PollResult poll = commitRequestManager.poll(time.milliseconds()); mimicResponse(error, poll); - futures.forEach(f -> assertFutureThrows(expectedExceptionClass, f)); + futures.forEach(f -> assertFutureThrows(f, TimeoutException.class)); assertTrue(commitRequestManager.pendingRequests.unsentOffsetFetches.isEmpty()); } else { - futures.forEach(f -> assertFutureThrows(expectedExceptionClass, f)); + futures.forEach(f -> assertFutureThrows(f, KafkaException.class)); assertEmptyPendingRequests(commitRequestManager); } } @@ -753,10 +756,10 @@ public void testSuccessfulOffsetFetch() { CommitRequestManager commitManager = create(false, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); - TopicPartition tp = new TopicPartition("topic1", 0); long deadlineMs = time.milliseconds() + defaultApiTimeoutMs; CompletableFuture> fetchResult = - commitManager.fetchOffsets(Collections.singleton(tp), deadlineMs); + commitManager.fetchOffsets(Collections.singleton(new TopicPartition("test", 0)), + deadlineMs); // Send fetch request NetworkClientDelegate.PollResult result = commitManager.poll(time.milliseconds()); @@ -765,23 +768,14 @@ public void testSuccessfulOffsetFetch() { assertFalse(fetchResult.isDone()); // Complete request with a response + TopicPartition tp = new TopicPartition("topic1", 0); long expectedOffset = 100; - String expectedMetadata = "metadata"; NetworkClientDelegate.UnsentRequest req = result.unsentRequests.get(0); - OffsetFetchResponseData.OffsetFetchResponseGroup groupResponse = new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(DEFAULT_GROUP_ID) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(tp.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(tp.partition()) - .setCommittedOffset(expectedOffset) - .setCommittedLeaderEpoch(1) - .setMetadata(expectedMetadata) - )) - )); - req.handler().onComplete(buildOffsetFetchClientResponse(req, groupResponse, false)); + Map topicPartitionData = + Collections.singletonMap( + tp, + new OffsetFetchResponse.PartitionData(expectedOffset, Optional.of(1), "", Errors.NONE)); + req.handler().onComplete(buildOffsetFetchClientResponse(req, topicPartitionData, Errors.NONE, false)); // Validate request future completes with the response received assertTrue(fetchResult.isDone()); @@ -796,7 +790,6 @@ public void testSuccessfulOffsetFetch() { assertEquals(1, offsetsAndMetadata.size()); assertTrue(offsetsAndMetadata.containsKey(tp)); assertEquals(expectedOffset, offsetsAndMetadata.get(tp).offset()); - assertEquals(expectedMetadata, offsetsAndMetadata.get(tp).metadata()); assertEquals(0, commitManager.pendingRequests.inflightOffsetFetches.size(), "Inflight " + "request should be removed from the queue when a response is received."); } @@ -875,7 +868,7 @@ public void testOffsetCommitRequestErroredRequestsNotRetriedForAsyncCommit(final assertTrue(commitResult.isDone()); assertTrue(commitResult.isCompletedExceptionally()); if (error.exception() instanceof RetriableException) { - assertFutureThrows(RetriableCommitFailedException.class, commitResult); + assertFutureThrows(commitResult, RetriableCommitFailedException.class); } // We expect that the request should not have been retried on this async commit. @@ -916,9 +909,7 @@ public void testOffsetCommitSyncTimeoutNotReturnedOnPollAndFails() { */ @ParameterizedTest @MethodSource("offsetCommitExceptionSupplier") - public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExpires( - final Errors error, - final Class expectedExceptionClass) { + public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExpires(final Errors error) { CommitRequestManager commitRequestManager = create(false, 100); when(coordinatorRequestManager.coordinator()).thenReturn(Optional.of(mockedNode)); @@ -938,7 +929,10 @@ public void testOffsetCommitSyncFailedWithRetriableThrowsTimeoutWhenRetryTimeExp assertEquals(0, res.unsentRequests.size()); assertTrue(commitResult.isDone()); - assertFutureThrows(expectedExceptionClass, commitResult); + if (error.exception() instanceof RetriableException) + assertFutureThrows(commitResult, TimeoutException.class); + else + assertFutureThrows(commitResult, KafkaException.class); } /** @@ -966,7 +960,7 @@ public void testOffsetCommitAsyncFailedWithRetriableThrowsRetriableCommitExcepti assertExceptionHandling(commitRequestManager, retriableError, false); // Request should complete with a RetriableCommitException - assertFutureThrows(RetriableCommitFailedException.class, commitResult); + assertFutureThrows(commitResult, RetriableCommitFailedException.class); } @ParameterizedTest @@ -1316,7 +1310,7 @@ private void testRetriable(final CommitRequestManager commitRequestManager, mimicResponse(error, poll); futures.forEach(f -> { assertTrue(f.isCompletedExceptionally()); - assertFutureThrows(TimeoutException.class, f); + assertFutureThrows(f, TimeoutException.class); }); } @@ -1333,23 +1327,18 @@ private void testNonRetriable(final List offsetCommitExceptionSupplier() { return Stream.of( - // Retriable errors should result in TimeoutException when retry time expires - Arguments.of(Errors.NOT_COORDINATOR, TimeoutException.class), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, TimeoutException.class), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, TimeoutException.class), - Arguments.of(Errors.REQUEST_TIMED_OUT, TimeoutException.class), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, TimeoutException.class), - - // Non-retriable errors should result in their specific exceptions - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, GroupAuthorizationException.class), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, OffsetMetadataTooLarge.class), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, InvalidCommitOffsetSizeException.class), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, TopicAuthorizationException.class), - Arguments.of(Errors.UNKNOWN_MEMBER_ID, CommitFailedException.class), - Arguments.of(Errors.STALE_MEMBER_EPOCH, CommitFailedException.class), - - // Generic errors should result in KafkaException - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); + Arguments.of(Errors.NOT_COORDINATOR), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), + Arguments.of(Errors.REQUEST_TIMED_OUT), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), + Arguments.of(Errors.STALE_MEMBER_EPOCH), + Arguments.of(Errors.UNKNOWN_MEMBER_ID)); } /** @@ -1357,27 +1346,21 @@ private static Stream offsetCommitExceptionSupplier() { */ private static Stream offsetFetchExceptionSupplier() { return Stream.of( - // Retriable errors should result in TimeoutException when retry time expires - Arguments.of(Errors.NOT_COORDINATOR, TimeoutException.class), - Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS, TimeoutException.class), - Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE, TimeoutException.class), - Arguments.of(Errors.REQUEST_TIMED_OUT, TimeoutException.class), - Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT, TimeoutException.class), - Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION, TimeoutException.class), - - // Non-retriable errors should result in their specific exceptions - Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED, GroupAuthorizationException.class), - Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE, KafkaException.class), - Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE, KafkaException.class), - - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, KafkaException.class), - Arguments.of(Errors.UNKNOWN_MEMBER_ID, UnknownMemberIdException.class), + Arguments.of(Errors.NOT_COORDINATOR), + Arguments.of(Errors.COORDINATOR_LOAD_IN_PROGRESS), + Arguments.of(Errors.UNKNOWN_SERVER_ERROR), + Arguments.of(Errors.GROUP_AUTHORIZATION_FAILED), + Arguments.of(Errors.OFFSET_METADATA_TOO_LARGE), + Arguments.of(Errors.INVALID_COMMIT_OFFSET_SIZE), + Arguments.of(Errors.UNKNOWN_TOPIC_OR_PARTITION), + Arguments.of(Errors.COORDINATOR_NOT_AVAILABLE), + Arguments.of(Errors.REQUEST_TIMED_OUT), + Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED), + Arguments.of(Errors.UNKNOWN_MEMBER_ID), // Adding STALE_MEMBER_EPOCH as non-retriable here because it is only retried if a new // member epoch is received. Tested separately. - Arguments.of(Errors.STALE_MEMBER_EPOCH, StaleMemberEpochException.class), - - // Generic errors should result in KafkaException - Arguments.of(Errors.UNKNOWN_SERVER_ERROR, KafkaException.class)); + Arguments.of(Errors.STALE_MEMBER_EPOCH), + Arguments.of(Errors.UNSTABLE_OFFSET_COMMIT)); } /** @@ -1411,43 +1394,15 @@ public void testOffsetFetchRequestPartitionDataError(final Errors error, final b assertEquals(1, res.unsentRequests.size()); // Setting 1 partition with error - OffsetFetchResponseData.OffsetFetchResponseGroup groupResponse = new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(DEFAULT_GROUP_ID) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(tp1.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(tp1.partition()) - .setCommittedOffset(100L) - .setCommittedLeaderEpoch(1) - .setMetadata("metadata") - .setErrorCode(error.code()) - )), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(tp2.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(tp2.partition()) - .setCommittedOffset(100L) - .setCommittedLeaderEpoch(1) - .setMetadata("metadata") - )), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(tp3.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(tp3.partition()) - .setCommittedOffset(100L) - .setCommittedLeaderEpoch(1) - .setMetadata("metadata") - .setErrorCode(error.code()) - )) - )); + HashMap topicPartitionData = new HashMap<>(); + topicPartitionData.put(tp1, new OffsetFetchResponse.PartitionData(100L, Optional.of(1), "metadata", error)); + topicPartitionData.put(tp2, new OffsetFetchResponse.PartitionData(100L, Optional.of(1), "metadata", Errors.NONE)); + topicPartitionData.put(tp3, new OffsetFetchResponse.PartitionData(100L, Optional.of(1), "metadata", error)); res.unsentRequests.get(0).handler().onComplete(buildOffsetFetchClientResponse( res.unsentRequests.get(0), - groupResponse, + topicPartitionData, + Errors.NONE, false)); if (isRetriable) testRetriable(commitRequestManager, Collections.singletonList(future), error); @@ -1487,7 +1442,7 @@ public void testPollWithFatalErrorShouldFailAllUnsentRequests() { assertEmptyPendingRequests(commitRequestManager); } - + private static void assertEmptyPendingRequests(CommitRequestManager commitRequestManager) { assertTrue(commitRequestManager.pendingRequests.inflightOffsetFetches.isEmpty()); assertTrue(commitRequestManager.pendingRequests.unsentOffsetFetches.isEmpty()); @@ -1564,7 +1519,6 @@ private List assertPoll( private CommitRequestManager create(final boolean autoCommitEnabled, final long autoCommitInterval) { props.setProperty(AUTO_COMMIT_INTERVAL_MS_CONFIG, String.valueOf(autoCommitInterval)); props.setProperty(ENABLE_AUTO_COMMIT_CONFIG, String.valueOf(autoCommitEnabled)); - props.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); if (autoCommitEnabled) props.setProperty(GROUP_ID_CONFIG, TestUtils.randomString(10)); @@ -1589,26 +1543,18 @@ private ClientResponse buildOffsetFetchClientResponse( final NetworkClientDelegate.UnsentRequest request, final Set topicPartitions, final Errors error) { - OffsetFetchResponseData.OffsetFetchResponseGroup group = new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(DEFAULT_GROUP_ID) - .setErrorCode(error.code()) - .setTopics(topicPartitions.stream().collect(Collectors.groupingBy(TopicPartition::topic)).entrySet().stream().map(entry -> - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(entry.getKey()) - .setPartitions(entry.getValue().stream().map(partition -> - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(partition.partition()) - .setCommittedOffset(100L) - .setCommittedLeaderEpoch(1) - .setMetadata("metadata") - ).collect(Collectors.toList())) - ).collect(Collectors.toList())); - return buildOffsetFetchClientResponse(request, group, false); + HashMap topicPartitionData = new HashMap<>(); + topicPartitions.forEach(tp -> topicPartitionData.put(tp, new OffsetFetchResponse.PartitionData( + 100L, + Optional.of(1), + "metadata", + Errors.NONE))); + return buildOffsetFetchClientResponse(request, topicPartitionData, error, false); } private ClientResponse buildOffsetFetchClientResponseDisconnected( final NetworkClientDelegate.UnsentRequest request) { - return buildOffsetFetchClientResponse(request, new OffsetFetchResponseData.OffsetFetchResponseGroup(), true); + return buildOffsetFetchClientResponse(request, Collections.emptyMap(), Errors.NONE, true); } private ClientResponse buildOffsetCommitClientResponse(final OffsetCommitResponse commitResponse) { @@ -1724,12 +1670,14 @@ private ClientResponse mockOffsetCommitResponseDisconnected(String topic, int pa private ClientResponse buildOffsetFetchClientResponse( final NetworkClientDelegate.UnsentRequest request, - final OffsetFetchResponseData.OffsetFetchResponseGroup groupResponse, + final Map topicPartitionData, + final Errors error, final boolean disconnected) { AbstractRequest abstractRequest = request.requestBuilder().build(); assertInstanceOf(OffsetFetchRequest.class, abstractRequest); OffsetFetchRequest offsetFetchRequest = (OffsetFetchRequest) abstractRequest; - OffsetFetchResponse response = new OffsetFetchResponse.Builder(groupResponse).build(ApiKeys.OFFSET_FETCH.latestVersion()); + OffsetFetchResponse response = + new OffsetFetchResponse(error, topicPartitionData); return new ClientResponse( new RequestHeader(ApiKeys.OFFSET_FETCH, offsetFetchRequest.version(), "", 1), request.handler(), diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java index f206f23227b76..20389081ffddb 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CompletedFetchTest.java @@ -26,6 +26,7 @@ import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.record.ControlRecordType; import org.apache.kafka.common.record.EndTransactionMarker; import org.apache.kafka.common.record.MemoryRecords; @@ -226,15 +227,16 @@ private CompletedFetch newCompletedFetch(long fetchOffset, TP, partitionData, metricAggregator, - fetchOffset); + fetchOffset, + ApiKeys.FETCH.latestVersion()); } private static Deserializers newUuidDeserializers() { - return new Deserializers<>(new UUIDDeserializer(), new UUIDDeserializer(), null); + return new Deserializers<>(new UUIDDeserializer(), new UUIDDeserializer()); } private static Deserializers newStringDeserializers() { - return new Deserializers<>(new StringDeserializer(), new StringDeserializer(), null); + return new Deserializers<>(new StringDeserializer(), new StringDeserializer()); } private static FetchConfig newFetchConfig(IsolationLevel isolationLevel, boolean checkCrcs) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java index 623fd765f39e0..a3da5d58e0e70 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerCoordinatorTest.java @@ -20,7 +20,6 @@ import org.apache.kafka.clients.GroupRebalanceConfig; import org.apache.kafka.clients.MockClient; import org.apache.kafka.clients.NodeApiVersions; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.CommitFailedException; import org.apache.kafka.clients.consumer.ConsumerGroupMetadata; import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor; @@ -52,7 +51,6 @@ import org.apache.kafka.common.message.LeaveGroupResponseData; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitResponseData; -import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.SyncGroupResponseData; import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; @@ -74,6 +72,7 @@ import org.apache.kafka.common.requests.OffsetCommitRequest; import org.apache.kafka.common.requests.OffsetCommitResponse; import org.apache.kafka.common.requests.OffsetFetchResponse; +import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.requests.SyncGroupRequest; import org.apache.kafka.common.requests.SyncGroupResponse; @@ -163,7 +162,7 @@ public abstract class ConsumerCoordinatorTest { private final String consumerId2 = "consumer2"; private MockClient client; - private final MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap<>() { + private final MetadataResponse metadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap() { { put(topic1, 1); put(topic2, 1); @@ -208,7 +207,7 @@ public void setup() { this.rebalanceListener = new MockRebalanceListener(); this.mockOffsetCommitCallback = new MockCommitCallback(); this.partitionAssignor.clear(); - this.rebalanceConfig = buildRebalanceConfig(Optional.empty(), null); + this.rebalanceConfig = buildRebalanceConfig(Optional.empty()); this.coordinator = buildCoordinator(rebalanceConfig, metrics, assignors, @@ -216,22 +215,21 @@ public void setup() { subscriptions); } - private GroupRebalanceConfig buildRebalanceConfig(Optional groupInstanceId, String rackId) { + private GroupRebalanceConfig buildRebalanceConfig(Optional groupInstanceId) { return new GroupRebalanceConfig(sessionTimeoutMs, rebalanceTimeoutMs, heartbeatIntervalMs, groupId, groupInstanceId, - rackId, retryBackoffMs, retryBackoffMaxMs, - groupInstanceId.isEmpty()); + !groupInstanceId.isPresent()); } @AfterEach public void teardown() { this.metrics.close(); - this.coordinator.close(time.timer(0), CloseOptions.GroupMembershipOperation.DEFAULT); + this.coordinator.close(time.timer(0)); } @Test @@ -333,7 +331,7 @@ public void testPerformAssignmentShouldUpdateGroupSubscriptionAfterAssignmentIfN List> capturedTopics = topicsCaptor.getAllValues(); // expected the final group subscribed topics to be updated to "topic1" and "topic2" - Set expectedTopicsGotCalled = Set.of(topic1, topic2); + Set expectedTopicsGotCalled = new HashSet<>(Arrays.asList(topic1, topic2)); assertEquals(expectedTopicsGotCalled, capturedTopics.get(1)); } } @@ -368,7 +366,7 @@ private List validateCooperativeA List metadata = new ArrayList<>(); for (Map.Entry> subscriptionEntry : memberSubscriptions.entrySet()) { - ByteBuffer buf; + ByteBuffer buf = null; if (subscriptionEntry.getKey().equals(consumerId)) { buf = ConsumerProtocol.serializeSubscription(subscriptionConsumer1); } else { @@ -568,13 +566,13 @@ public void testCommitAsyncWithUserAssignedType() { assertFalse(client.hasInFlightRequests()); // should try to find coordinator since we are commit async - coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), (offsets, exception) -> - fail("Commit should not get responses, but got offsets:" + offsets + ", and exception:" + exception) - ); + coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), (offsets, exception) -> { + fail("Commit should not get responses, but got offsets:" + offsets + ", and exception:" + exception); + }); coordinator.poll(time.timer(0)); assertTrue(coordinator.coordinatorUnknown()); assertTrue(client.hasInFlightRequests()); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); client.respond(groupCoordinatorResponse(node, Errors.NONE)); coordinator.poll(time.timer(0)); @@ -582,7 +580,7 @@ public void testCommitAsyncWithUserAssignedType() { // after we've discovered the coordinator we should send // out the commit request immediately assertTrue(client.hasInFlightRequests()); - assertEquals(1, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 1); } @Test @@ -615,17 +613,19 @@ public void testEnsureCompletingAsyncCommitsWhenSyncCommitWithoutOffsets() { Map offsets = singletonMap(tp, new OffsetAndMetadata(123)); final AtomicBoolean committed = new AtomicBoolean(); - coordinator.commitOffsetsAsync(offsets, (committedOffsets, exception) -> committed.set(true)); + coordinator.commitOffsetsAsync(offsets, (committedOffsets, exception) -> { + committed.set(true); + }); assertFalse(coordinator.commitOffsetsSync(Collections.emptyMap(), time.timer(100L)), "expected sync commit to fail"); assertFalse(committed.get()); - assertEquals(1, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 1); prepareOffsetCommitRequest(singletonMap(tp, 123L), Errors.NONE); assertTrue(coordinator.commitOffsetsSync(Collections.emptyMap(), time.timer(Long.MAX_VALUE)), "expected sync commit to succeed"); assertTrue(committed.get(), "expected commit callback to be invoked"); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); } @Test @@ -646,13 +646,13 @@ public void testManyInFlightAsyncCommitsWithCoordinatorDisconnect() { "Unexpected exception cause type: " + (cause == null ? null : cause.getClass())); }); } - assertEquals(numRequests, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), numRequests); coordinator.markCoordinatorUnknown("test cause"); consumerClient.pollNoWakeup(); coordinator.invokeCompletedOffsetCommitCallbacks(); assertEquals(numRequests, responses.get()); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); } @Test @@ -681,8 +681,8 @@ public void testCoordinatorUnknownInUnsentCallbacksAfterCoordinatorDead() { ) ); - consumerClient.send(coordinator.checkAndGetCoordinator(), OffsetCommitRequest.Builder.forTopicNames(offsetCommitRequestData)) - .compose(new RequestFutureAdapter<>() { + consumerClient.send(coordinator.checkAndGetCoordinator(), new OffsetCommitRequest.Builder(offsetCommitRequestData)) + .compose(new RequestFutureAdapter() { @Override public void onSuccess(ClientResponse value, RequestFuture future) {} @@ -697,7 +697,7 @@ public void onFailure(RuntimeException e, RequestFuture future) { coordinator.markCoordinatorUnknown("test cause"); consumerClient.pollNoWakeup(); assertTrue(asyncCallbackInvoked.get()); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); } @Test @@ -1011,7 +1011,6 @@ public void testNormalJoinGroupLeader() { @Test public void testOutdatedCoordinatorAssignment() { - createMockHeartbeatThreadCoordinator(); final String consumerId = "outdated_assignment"; final List owned = Collections.emptyList(); final List oldSubscription = singletonList(topic2); @@ -1279,7 +1278,7 @@ public void testForceMetadataRefreshForPatternSubscriptionDuringRebalance() { coordinator.poll(time.timer(Long.MAX_VALUE)); // Make sure that the metadata was refreshed during the rebalance and thus subscriptions now contain two topics. - final Set updatedSubscriptionSet = Set.of(topic1, topic2); + final Set updatedSubscriptionSet = new HashSet<>(Arrays.asList(topic1, topic2)); assertEquals(updatedSubscriptionSet, subscriptions.subscription()); // Refresh the metadata again. Since there have been no changes since the last refresh, it won't trigger @@ -1293,19 +1292,23 @@ public void testForceMetadataRefreshForPatternSubscriptionDuringRebalance() { public void testForceMetadataDeleteForPatternSubscriptionDuringRebalance() { try (ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, true, subscriptions)) { subscriptions.subscribe(Pattern.compile("test.*"), Optional.of(rebalanceListener)); - client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, new HashMap<>() { + client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, new HashMap() { { put(topic1, 1); put(topic2, 1); } })); coordinator.maybeUpdateSubscriptionMetadata(); - assertEquals(Set.of(topic1, topic2), subscriptions.subscription()); + assertEquals(new HashSet<>(Arrays.asList(topic1, topic2)), subscriptions.subscription()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); - MetadataResponse deletedMetadataResponse = RequestTestUtils.metadataUpdateWith(1, Map.of(topic1, 1)); + MetadataResponse deletedMetadataResponse = RequestTestUtils.metadataUpdateWith(1, new HashMap() { + { + put(topic1, 1); + } + }); // Instrument the test so that metadata will contain only one topic after next refresh. client.prepareMetadataUpdate(deletedMetadataResponse); @@ -1791,7 +1794,7 @@ public void testLeaveGroupOnClose() { return validateLeaveGroup(groupId, consumerId, leaveRequest); }, new LeaveGroupResponse( new LeaveGroupResponseData().setErrorCode(Errors.NONE.code()))); - coordinator.close(time.timer(0), CloseOptions.GroupMembershipOperation.DEFAULT); + coordinator.close(time.timer(0)); assertTrue(received.get()); } @@ -1806,7 +1809,7 @@ public void testMaybeLeaveGroup() { LeaveGroupRequest leaveRequest = (LeaveGroupRequest) body; return validateLeaveGroup(groupId, consumerId, leaveRequest); }, new LeaveGroupResponse(new LeaveGroupResponseData().setErrorCode(Errors.NONE.code()))); - coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "test maybe leave group"); + coordinator.maybeLeaveGroup("test maybe leave group"); assertTrue(received.get()); AbstractCoordinator.Generation generation = coordinator.generationIfStable(); @@ -1850,7 +1853,7 @@ public void testPendingMemberShouldLeaveGroup() { return validateLeaveGroup(groupId, consumerId, leaveRequest); }, new LeaveGroupResponse(new LeaveGroupResponseData().setErrorCode(Errors.NONE.code()))); - coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "pending member leaves"); + coordinator.maybeLeaveGroup("pending member leaves"); assertTrue(received.get()); } @@ -2072,7 +2075,7 @@ public void testUpdateMetadataDuringRebalance() { coordinator.poll(time.timer(Long.MAX_VALUE)); assertFalse(coordinator.rejoinNeededOrPending()); - assertEquals(Set.of(tp1, tp2), subscriptions.assignedPartitions()); + assertEquals(new HashSet<>(Arrays.asList(tp1, tp2)), subscriptions.assignedPartitions()); } /** @@ -2264,7 +2267,7 @@ public void testRejoinGroup() { // and join the group again rebalanceListener.revoked = null; rebalanceListener.assigned = null; - subscriptions.subscribe(Set.of(topic1, otherTopic), Optional.of(rebalanceListener)); + subscriptions.subscribe(new HashSet<>(Arrays.asList(topic1, otherTopic)), Optional.of(rebalanceListener)); client.prepareResponse(joinGroupFollowerResponse(2, consumerId, "leader", Errors.NONE)); client.prepareResponse(syncGroupResponse(assigned, Errors.NONE)); coordinator.joinGroupIfNeeded(time.timer(Long.MAX_VALUE)); @@ -2350,7 +2353,7 @@ private void testInFlightRequestsFailedAfterCoordinatorMarkedDead(Errors error) MockCommitCallback secondCommitCallback = new MockCommitCallback(); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), firstCommitCallback); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), secondCommitCallback); - assertEquals(2, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 2); respondToOffsetCommitRequest(singletonMap(t1p, 100L), error); consumerClient.pollNoWakeup(); @@ -2360,7 +2363,7 @@ private void testInFlightRequestsFailedAfterCoordinatorMarkedDead(Errors error) assertTrue(coordinator.coordinatorUnknown()); assertInstanceOf(RetriableCommitFailedException.class, firstCommitCallback.exception); assertInstanceOf(RetriableCommitFailedException.class, secondCommitCallback.exception); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); } @Test @@ -2549,7 +2552,7 @@ public void testCommitOffsetAsyncWithDefaultCallback() { coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NONE); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), mockOffsetCommitCallback); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); coordinator.invokeCompletedOffsetCommitCallbacks(); assertEquals(invokedBeforeTest + 1, mockOffsetCommitCallback.invoked); assertNull(mockOffsetCommitCallback.exception); @@ -2566,7 +2569,7 @@ public void testCommitAfterLeaveGroup() { client.prepareResponse(new LeaveGroupResponse(new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code()))); subscriptions.unsubscribe(); - coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "test commit after leave"); + coordinator.maybeLeaveGroup("test commit after leave"); subscriptions.assignFromUser(singleton(t1p)); // the client should not reuse generation/memberId from auto-subscribed generation @@ -2580,7 +2583,7 @@ public void testCommitAfterLeaveGroup() { coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), callback(success)); coordinator.invokeCompletedOffsetCommitCallbacks(); assertTrue(success.get()); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); } @Test @@ -2590,7 +2593,7 @@ public void testCommitOffsetAsyncFailedWithDefaultCallback() { coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), mockOffsetCommitCallback); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); coordinator.invokeCompletedOffsetCommitCallbacks(); assertEquals(invokedBeforeTest + 1, mockOffsetCommitCallback.invoked); assertInstanceOf(RetriableCommitFailedException.class, mockOffsetCommitCallback.exception); @@ -2605,7 +2608,7 @@ public void testCommitOffsetAsyncCoordinatorNotAvailable() { MockCommitCallback cb = new MockCommitCallback(); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.COORDINATOR_NOT_AVAILABLE); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); coordinator.invokeCompletedOffsetCommitCallbacks(); assertTrue(coordinator.coordinatorUnknown()); @@ -2622,7 +2625,7 @@ public void testCommitOffsetAsyncNotCoordinator() { MockCommitCallback cb = new MockCommitCallback(); prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.NOT_COORDINATOR); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); coordinator.invokeCompletedOffsetCommitCallbacks(); assertTrue(coordinator.coordinatorUnknown()); @@ -2639,7 +2642,7 @@ public void testCommitOffsetAsyncDisconnected() { MockCommitCallback cb = new MockCommitCallback(); prepareOffsetCommitRequestDisconnect(singletonMap(t1p, 100L)); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), cb); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); coordinator.invokeCompletedOffsetCommitCallbacks(); assertTrue(coordinator.coordinatorUnknown()); @@ -2692,7 +2695,12 @@ public void testAsyncCommitCallbacksInvokedPriorToSyncCommitCompletion() throws final OffsetAndMetadata firstOffset = new OffsetAndMetadata(0L); final OffsetAndMetadata secondOffset = new OffsetAndMetadata(1L); - coordinator.commitOffsetsAsync(singletonMap(t1p, firstOffset), (offsets, exception) -> committedOffsets.add(firstOffset)); + coordinator.commitOffsetsAsync(singletonMap(t1p, firstOffset), new OffsetCommitCallback() { + @Override + public void onComplete(Map offsets, Exception exception) { + committedOffsets.add(firstOffset); + } + }); // Do a synchronous commit in the background so that we can send both responses at the same time Thread thread = new Thread() { @@ -2703,7 +2711,7 @@ public void run() { } }; - assertEquals(1, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 1); thread.start(); client.waitForRequests(2, 5000); @@ -2711,7 +2719,7 @@ public void run() { respondToOffsetCommitRequest(singletonMap(t1p, secondOffset.offset()), Errors.NONE); thread.join(); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); assertEquals(Arrays.asList(firstOffset, secondOffset), committedOffsets); } @@ -2975,7 +2983,7 @@ public void testCommitOffsetFencedInstanceWithNewGeneration() { @Test public void testCommitOffsetShouldNotSetInstanceIdIfMemberIdIsUnknown() { - rebalanceConfig = buildRebalanceConfig(groupInstanceId, null); + rebalanceConfig = buildRebalanceConfig(groupInstanceId); ConsumerCoordinator coordinator = buildCoordinator( rebalanceConfig, new Metrics(), @@ -3100,7 +3108,7 @@ public void testRefreshOffsetWithValidation() { assertEquals(Collections.emptySet(), subscriptions.initializingPartitions()); assertFalse(subscriptions.hasAllFetchPositions()); assertTrue(subscriptions.awaitingValidation(t1p)); - assertEquals(100L, subscriptions.position(t1p).offset); + assertEquals(subscriptions.position(t1p).offset, 100L); assertNull(subscriptions.validPosition(t1p)); } @@ -3112,19 +3120,10 @@ public void testFetchCommittedOffsets() { long offset = 500L; String metadata = "blahblah"; Optional leaderEpoch = Optional.of(15); + OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(offset, leaderEpoch, + metadata, Errors.NONE); - client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t1p.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t1p.partition()) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get()) - .setMetadata(metadata) - )) - ))); - + client.prepareResponse(offsetFetchResponse(Errors.NONE, singletonMap(t1p, data))); Map fetchedOffsets = coordinator.fetchCommittedOffsets(singleton(t1p), time.timer(Long.MAX_VALUE)); @@ -3137,17 +3136,10 @@ public void testTopicAuthorizationFailedInOffsetFetch() { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); - client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t1p.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t1p.partition()) - .setCommittedOffset(-1) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code()) - )) - ))); + OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(-1, Optional.empty(), + "", Errors.TOPIC_AUTHORIZATION_FAILED); + client.prepareResponse(offsetFetchResponse(Errors.NONE, singletonMap(t1p, data))); TopicAuthorizationException exception = assertThrows(TopicAuthorizationException.class, () -> coordinator.fetchCommittedOffsets(singleton(t1p), time.timer(Long.MAX_VALUE))); @@ -3160,7 +3152,7 @@ public void testRefreshOffsetsGroupNotAuthorized() { coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); subscriptions.assignFromUser(singleton(t1p)); - client.prepareResponse(offsetFetchResponse(Errors.GROUP_AUTHORIZATION_FAILED, List.of())); + client.prepareResponse(offsetFetchResponse(Errors.GROUP_AUTHORIZATION_FAILED, Collections.emptyMap())); try { coordinator.initWithCommittedOffsetsIfNeeded(time.timer(Long.MAX_VALUE)); fail("Expected group authorization error"); @@ -3209,7 +3201,7 @@ public void testRefreshOffsetRetriableErrorCoordinatorLookup(Errors error, boole coordinator.ensureCoordinatorReady(time.timer(Long.MAX_VALUE)); subscriptions.assignFromUser(singleton(t1p)); - client.prepareResponse(offsetFetchResponse(error, List.of())); + client.prepareResponse(offsetFetchResponse(error, Collections.emptyMap())); if (expectCoordinatorRelookup) { client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); } @@ -3269,9 +3261,12 @@ public void testNoCoordinatorDiscoveryIfPartitionAwaitingReset() { public void testAuthenticationFailureInEnsureActiveGroup() { client.createPendingAuthenticationError(node, 300); - assertThrows(AuthenticationException.class, - () -> coordinator.ensureActiveGroup(), - "Expected an authentication error."); + try { + coordinator.ensureActiveGroup(); + fail("Expected an authentication error."); + } catch (AuthenticationException e) { + // OK + } } @Test @@ -3470,7 +3465,7 @@ public void testCommitOffsetRequestAsyncAlwaysReceiveFencedException() { assertThrows(FencedInstanceIdException.class, this::receiveFencedInstanceIdException); assertThrows(FencedInstanceIdException.class, () -> coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), new MockCommitCallback())); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); assertThrows(FencedInstanceIdException.class, () -> coordinator.commitOffsetsSync(singletonMap(t1p, new OffsetAndMetadata(100L)), time.timer(Long.MAX_VALUE))); } @@ -3550,7 +3545,7 @@ public void testPrepareJoinAndRejoinAfterFailedRebalance() { assertEquals(memberId, coordinator.generation().memberId); // Imitating heartbeat thread that clears generation data. - coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "Clear generation data."); + coordinator.maybeLeaveGroup("Clear generation data."); assertEquals(AbstractCoordinator.Generation.NO_GENERATION, coordinator.generation()); @@ -3697,6 +3692,7 @@ private void supportStableFlag(final short upperVersion, final boolean expectThr autoCommitIntervalMs, null, true, + null, Optional.empty()); client.prepareResponse(groupCoordinatorResponse(node, Errors.NONE)); @@ -3705,19 +3701,14 @@ private void supportStableFlag(final short upperVersion, final boolean expectThr long offset = 500L; String metadata = "blahblah"; Optional leaderEpoch = Optional.of(15); + OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(offset, leaderEpoch, + metadata, Errors.NONE); - client.prepareResponse(offsetFetchResponse(Errors.NONE, List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(t1p.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(t1p.partition()) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get()) - .setMetadata(metadata) - )) - ))); - + if (upperVersion < 8) { + client.prepareResponse(new OffsetFetchResponse(Errors.NONE, singletonMap(t1p, data))); + } else { + client.prepareResponse(offsetFetchResponse(Errors.NONE, singletonMap(t1p, data))); + } if (expectThrows) { assertThrows(UnsupportedVersionException.class, () -> coordinator.fetchCommittedOffsets(singleton(t1p), time.timer(Long.MAX_VALUE))); @@ -3739,7 +3730,7 @@ private void receiveFencedInstanceIdException() { prepareOffsetCommitRequest(singletonMap(t1p, 100L), Errors.FENCED_INSTANCE_ID); coordinator.commitOffsetsAsync(singletonMap(t1p, new OffsetAndMetadata(100L)), new MockCommitCallback()); - assertEquals(0, coordinator.inFlightAsyncCommits.get()); + assertEquals(coordinator.inFlightAsyncCommits.get(), 0); coordinator.invokeCompletedOffsetCommitCallbacks(); } @@ -3747,7 +3738,7 @@ private ConsumerCoordinator prepareCoordinatorForCloseTest(final boolean useGrou final boolean autoCommit, final Optional groupInstanceId, final boolean shouldPoll) { - rebalanceConfig = buildRebalanceConfig(groupInstanceId, null); + rebalanceConfig = buildRebalanceConfig(groupInstanceId); ConsumerCoordinator coordinator = buildCoordinator(rebalanceConfig, new Metrics(), assignors, @@ -3791,7 +3782,7 @@ private void closeVerifyTimeout(final ConsumerCoordinator coordinator, // Run close on a different thread. Coordinator is locked by this thread, so it is // not safe to use the coordinator from the main thread until the task completes. Future future = executor.submit( - () -> coordinator.close(time.timer(Math.min(closeTimeoutMs, requestTimeoutMs)), CloseOptions.GroupMembershipOperation.DEFAULT)); + () -> coordinator.close(time.timer(Math.min(closeTimeoutMs, requestTimeoutMs)))); // Wait for close to start. If coordinator is known, wait for close to queue // at least one request. Otherwise, sleep for a short time. if (!coordinatorUnknown) @@ -3865,6 +3856,7 @@ private ConsumerCoordinator buildCoordinator(final GroupRebalanceConfig rebalanc autoCommitIntervalMs, null, false, + null, Optional.empty()); } @@ -3981,20 +3973,10 @@ private OffsetCommitResponse offsetCommitResponse(Map re return new OffsetCommitResponse(responseData); } - private OffsetFetchResponse offsetFetchResponse( - Errors errors, - List topics - ) { - return new OffsetFetchResponse( - new OffsetFetchResponseData() - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(groupId) - .setErrorCode(errors.code()) - .setTopics(topics) - )), - ApiKeys.OFFSET_FETCH.latestVersion() - ); + private OffsetFetchResponse offsetFetchResponse(Errors error, Map responseData) { + return new OffsetFetchResponse(throttleMs, + singletonMap(groupId, error), + singletonMap(groupId, responseData)); } private OffsetFetchResponse offsetFetchResponse(TopicPartition tp, Errors partitionLevelError, String metadata, long offset) { @@ -4002,18 +3984,9 @@ private OffsetFetchResponse offsetFetchResponse(TopicPartition tp, Errors partit } private OffsetFetchResponse offsetFetchResponse(TopicPartition tp, Errors partitionLevelError, String metadata, long offset, Optional epoch) { - return offsetFetchResponse(Errors.NONE, List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(tp.topic()) - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(tp.partition()) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(epoch.orElse(-1)) - .setMetadata(metadata) - .setErrorCode(partitionLevelError.code()) - )) - )); + OffsetFetchResponse.PartitionData data = new OffsetFetchResponse.PartitionData(offset, + epoch, metadata, partitionLevelError); + return offsetFetchResponse(Errors.NONE, singletonMap(tp, data)); } private OffsetCommitCallback callback(final AtomicBoolean success) { @@ -4104,14 +4077,13 @@ private OffsetCommitCallback callback(final Map> assign(Map partitionsPerTopic, Map subscriptions) { subscriptions.forEach((consumer, subscription) -> { - if (subscription.rackId().isEmpty()) + if (!subscription.rackId().isPresent()) throw new IllegalStateException("Rack id not provided in subscription for " + consumer); rackIds.add(subscription.rackId().get()); }); return super.assign(partitionsPerTopic, subscriptions); } } - - private void createMockHeartbeatThreadCoordinator() { - metrics.close(); - coordinator.close(time.timer(0), CloseOptions.GroupMembershipOperation.DEFAULT); - - metrics = new Metrics(time); - coordinator = new ConsumerCoordinator( - rebalanceConfig, - new LogContext(), - consumerClient, - assignors, - metadata, - subscriptions, - metrics, - consumerId + groupId, - time, - false, - autoCommitIntervalMs, - null, - false, - Optional.empty(), - Optional.of(() -> Mockito.mock(BaseHeartbeatThread.class))); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java index 9063ae5ab5bf4..7bf35f2261729 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerHeartbeatRequestManagerTest.java @@ -18,9 +18,9 @@ import org.apache.kafka.clients.ClientResponse; import org.apache.kafka.clients.Metadata; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.SubscriptionPattern; +import org.apache.kafka.clients.consumer.internals.AbstractHeartbeatRequestManager.HeartbeatRequestState; import org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.LocalAssignment; import org.apache.kafka.clients.consumer.internals.ConsumerHeartbeatRequestManager.HeartbeatState; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; @@ -65,11 +65,7 @@ import java.util.Properties; import java.util.Set; import java.util.SortedSet; -import java.util.stream.Stream; -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.DEFAULT; -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.LEAVE_GROUP; -import static org.apache.kafka.clients.consumer.CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP; import static org.apache.kafka.clients.consumer.internals.AbstractHeartbeatRequestManager.CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG; import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.REGEX_RESOLUTION_NOT_SUPPORTED_MSG; import static org.apache.kafka.common.utils.Utils.mkSortedSet; @@ -449,7 +445,7 @@ public void testHeartbeatRequestFailureNotifiedToGroupManagerAfterErrorPropagate time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); assertEquals(1, result.unsentRequests.size()); - createHeartbeatResponse(result.unsentRequests.get(0), Errors.GROUP_AUTHORIZATION_FAILED); + ClientResponse response = createHeartbeatResponse(result.unsentRequests.get(0), Errors.GROUP_AUTHORIZATION_FAILED); result.unsentRequests.get(0).handler().onFailure(time.milliseconds(), new AuthenticationException("Fatal error in HB")); // The error should be propagated before notifying the group manager. This ensures that the app thread is aware @@ -620,9 +616,9 @@ public void testHeartbeatResponseOnErrorHandling(final Errors error, final boole * 2. Required HB API version is not available. */ @ParameterizedTest - @ValueSource(strings = {CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG}) - public void testUnsupportedVersionFromBroker(String errorMsg) { - mockResponseWithException(new UnsupportedVersionException(errorMsg), true); + @ValueSource(strings = {CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG, REGEX_RESOLUTION_NOT_SUPPORTED_MSG}) + public void testUnsupportedVersion(String errorMsg) { + mockResponseWithException(new UnsupportedVersionException(errorMsg)); ArgumentCaptor errorEventArgumentCaptor = ArgumentCaptor.forClass(ErrorEvent.class); verify(backgroundEventHandler).add(errorEventArgumentCaptor.capture()); ErrorEvent errorEvent = errorEventArgumentCaptor.getValue(); @@ -631,30 +627,25 @@ public void testUnsupportedVersionFromBroker(String errorMsg) { clearInvocations(backgroundEventHandler); } - /** - * This validates the UnsupportedApiVersion the client generates while building a HB if: - * REGEX_RESOLUTION_NOT_SUPPORTED_MSG only generated on the client side. - */ - @ParameterizedTest - @ValueSource(strings = {CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG, REGEX_RESOLUTION_NOT_SUPPORTED_MSG}) - public void testUnsupportedVersionFromClient(String errorMsg) { - mockResponseWithException(new UnsupportedVersionException(errorMsg), false); - ArgumentCaptor errorEventArgumentCaptor = ArgumentCaptor.forClass(ErrorEvent.class); - verify(backgroundEventHandler).add(errorEventArgumentCaptor.capture()); - ErrorEvent errorEvent = errorEventArgumentCaptor.getValue(); - assertInstanceOf(Errors.UNSUPPORTED_VERSION.exception().getClass(), errorEvent.error()); - assertEquals(errorMsg, errorEvent.error().getMessage()); - clearInvocations(backgroundEventHandler); + private void mockErrorResponse(Errors error, String exceptionCustomMsg) { + time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); + NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); + assertEquals(1, result.unsentRequests.size()); + + when(subscriptions.hasAutoAssignedPartitions()).thenReturn(true); + ClientResponse response = createHeartbeatResponse( + result.unsentRequests.get(0), error, exceptionCustomMsg); + result.unsentRequests.get(0).handler().onComplete(response); } - private void mockResponseWithException(UnsupportedVersionException exception, boolean isFromBroker) { + private void mockResponseWithException(UnsupportedVersionException exception) { time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); assertEquals(1, result.unsentRequests.size()); when(subscriptions.hasAutoAssignedPartitions()).thenReturn(true); ClientResponse response = createHeartbeatResponseWithException( - result.unsentRequests.get(0), exception, isFromBroker); + result.unsentRequests.get(0), exception); result.unsentRequests.get(0).handler().onComplete(response); } @@ -771,29 +762,6 @@ public void testPollTimerExpiration() { assertHeartbeat(heartbeatRequestManager, DEFAULT_HEARTBEAT_INTERVAL_MS); } - @ParameterizedTest - @MethodSource("pollOnLeavingMatrix") - public void testPollOnLeaving(Optional groupInstanceId, CloseOptions.GroupMembershipOperation operation) { - heartbeatRequestManager = createHeartbeatRequestManager( - coordinatorRequestManager, - membershipManager, - heartbeatState, - heartbeatRequestState, - backgroundEventHandler); - when(membershipManager.state()).thenReturn(MemberState.LEAVING); - when(membershipManager.groupInstanceId()).thenReturn(groupInstanceId); - when(membershipManager.leaveGroupOperation()).thenReturn(operation); - - if (groupInstanceId.isEmpty() && REMAIN_IN_GROUP == operation) { - assertNoHeartbeat(heartbeatRequestManager); - verify(membershipManager, never()).onHeartbeatRequestGenerated(); - } else { - assertHeartbeat(heartbeatRequestManager, DEFAULT_HEARTBEAT_INTERVAL_MS); - verify(membershipManager).onHeartbeatRequestGenerated(); - } - - } - /** * This is expected to be the case where a member is already leaving the group and the poll * timer expires. The poll timer expiration should not transition the member to STALE, and @@ -876,7 +844,6 @@ public void testSendingLeaveGroupHeartbeatWhenPreviousOneInFlight(final short ve assertEquals(0, result.unsentRequests.size(), "No heartbeat should be sent while a previous one is in-flight"); when(membershipManager.state()).thenReturn(MemberState.LEAVING); - when(membershipManager.groupInstanceId()).thenReturn(Optional.empty()); when(heartbeatState.buildRequestData()).thenReturn(new ConsumerGroupHeartbeatRequestData().setMemberEpoch(-1)); ConsumerGroupHeartbeatRequest heartbeatToLeave = getHeartbeatRequest(heartbeatRequestManager, version); assertEquals(ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH, heartbeatToLeave.data().memberEpoch()); @@ -932,26 +899,17 @@ topicId, mkSortedSet(partition) assertEquals(Collections.singletonList(partition), topicPartitions.partitions()); } - @ParameterizedTest - @MethodSource("pollOnLeavingMatrix") - public void testPollOnCloseGeneratesRequestIfNeeded(Optional groupInstanceId, CloseOptions.GroupMembershipOperation operation) { - if (groupInstanceId.isEmpty() && REMAIN_IN_GROUP == operation) - when(membershipManager.isLeavingGroup()).thenReturn(false); - else - when(membershipManager.isLeavingGroup()).thenReturn(true); - when(membershipManager.groupInstanceId()).thenReturn(groupInstanceId); - when(membershipManager.leaveGroupOperation()).thenReturn(operation); - String membership = groupInstanceId.isEmpty() ? "dynamic" : "static"; + @Test + public void testPollOnCloseGeneratesRequestIfNeeded() { + when(membershipManager.isLeavingGroup()).thenReturn(true); NetworkClientDelegate.PollResult pollResult = heartbeatRequestManager.pollOnClose(time.milliseconds()); - if (groupInstanceId.isEmpty() && REMAIN_IN_GROUP == operation) { - assertTrue(pollResult.unsentRequests.isEmpty(), - "A request to leave the group should not be generated if the " + membership + " is still leaving when closing the manager " + - "and GroupMembershipOperation is " + operation.name()); - } else { - assertEquals(1, pollResult.unsentRequests.size(), - "A request to leave the group should be generated if the " + membership + " is still leaving when closing the manager " + - "and GroupMembershipOperation is " + operation.name()); - } + assertEquals(1, pollResult.unsentRequests.size(), + "A request to leave the group should be generated if the member is still leaving when closing the manager"); + + when(membershipManager.isLeavingGroup()).thenReturn(false); + pollResult = heartbeatRequestManager.pollOnClose(time.milliseconds()); + assertTrue(pollResult.unsentRequests.isEmpty(), + "No requests should be generated on close if the member is not leaving when closing the manager"); } @Test @@ -1009,34 +967,6 @@ public void testRegexInJoiningHeartbeat() { assertNull(data.subscribedTopicRegex()); } - @Test - public void testRackIdInHeartbeatLifecycle() { - heartbeatState = new HeartbeatState(subscriptions, membershipManager, DEFAULT_MAX_POLL_INTERVAL_MS); - createHeartbeatRequestStateWithZeroHeartbeatInterval(); - - // Initial heartbeat with rackId - mockJoiningMemberData(null); - when(membershipManager.rackId()).thenReturn(Optional.of("rack1")); - ConsumerGroupHeartbeatRequestData data = heartbeatState.buildRequestData(); - assertEquals("rack1", data.rackId()); - - // RackId not included in HB if member state is not JOINING - when(membershipManager.state()).thenReturn(MemberState.STABLE); - data = heartbeatState.buildRequestData(); - assertNull(data.rackId()); - - // RackId included in HB if member state changes to JOINING again - when(membershipManager.state()).thenReturn(MemberState.JOINING); - data = heartbeatState.buildRequestData(); - assertEquals("rack1", data.rackId()); - - // Empty rackId not included in HB - when(membershipManager.rackId()).thenReturn(Optional.empty()); - heartbeatState = new HeartbeatState(subscriptions, membershipManager, DEFAULT_MAX_POLL_INTERVAL_MS); - data = heartbeatState.buildRequestData(); - assertNull(data.rackId()); - } - private void assertHeartbeat(ConsumerHeartbeatRequestManager hrm, int nextPollMs) { NetworkClientDelegate.PollResult pollResult = hrm.poll(time.milliseconds()); assertEquals(1, pollResult.unsentRequests.size()); @@ -1083,8 +1013,7 @@ private static Collection errorProvider() { Arguments.of(Errors.UNSUPPORTED_VERSION, true), Arguments.of(Errors.UNRELEASED_INSTANCE_ID, true), Arguments.of(Errors.FENCED_INSTANCE_ID, true), - Arguments.of(Errors.GROUP_MAX_SIZE_REACHED, true), - Arguments.of(Errors.TOPIC_AUTHORIZATION_FAILED, false)); + Arguments.of(Errors.GROUP_MAX_SIZE_REACHED, true)); } private ClientResponse createHeartbeatResponse(NetworkClientDelegate.UnsentRequest request, @@ -1120,13 +1049,9 @@ private ClientResponse createHeartbeatResponse( private ClientResponse createHeartbeatResponseWithException( final NetworkClientDelegate.UnsentRequest request, - final UnsupportedVersionException exception, - final boolean isFromBroker + final UnsupportedVersionException exception ) { - ConsumerGroupHeartbeatResponse response = null; - if (isFromBroker) { - response = new ConsumerGroupHeartbeatResponse(null); - } + ConsumerGroupHeartbeatResponse response = new ConsumerGroupHeartbeatResponse(null); return new ClientResponse( new RequestHeader(ApiKeys.CONSUMER_GROUP_HEARTBEAT, ApiKeys.CONSUMER_GROUP_HEARTBEAT.latestVersion(), "client-id", 1), request.handler(), @@ -1204,15 +1129,4 @@ private void mockReconcilingMemberData(Map> assignment) when(membershipManager.groupId()).thenReturn(DEFAULT_GROUP_ID); when(membershipManager.serverAssignor()).thenReturn(Optional.of(DEFAULT_REMOTE_ASSIGNOR)); } - - private static Stream pollOnLeavingMatrix() { - return Stream.of( - Arguments.of(Optional.empty(), DEFAULT), - Arguments.of(Optional.empty(), LEAVE_GROUP), - Arguments.of(Optional.empty(), REMAIN_IN_GROUP), - Arguments.of(Optional.of("groupInstanceId"), DEFAULT), - Arguments.of(Optional.of("groupInstanceId"), LEAVE_GROUP), - Arguments.of(Optional.of("groupInstanceId"), REMAIN_IN_GROUP) - ); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java index e6b091e2fd20f..5a7d85369ea06 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerInterceptorsTest.java @@ -116,7 +116,7 @@ public void testOnConsumeChain() { FilterConsumerInterceptor interceptor2 = new FilterConsumerInterceptor<>(filterPartition2); interceptorList.add(interceptor1); interceptorList.add(interceptor2); - ConsumerInterceptors interceptors = new ConsumerInterceptors<>(interceptorList, null); + ConsumerInterceptors interceptors = new ConsumerInterceptors<>(interceptorList); // verify that onConsumer modifies ConsumerRecords Map>> records = new HashMap<>(); @@ -177,7 +177,7 @@ public void testOnCommitChain() { FilterConsumerInterceptor interceptor2 = new FilterConsumerInterceptor<>(filterPartition2); interceptorList.add(interceptor1); interceptorList.add(interceptor2); - ConsumerInterceptors interceptors = new ConsumerInterceptors<>(interceptorList, null); + ConsumerInterceptors interceptors = new ConsumerInterceptors<>(interceptorList); // verify that onCommit is called for all interceptors in the chain Map offsets = new HashMap<>(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java index 9edf178182831..3a93c25072da2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMembershipManagerTest.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.clients.consumer.internals; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.BackgroundEventHandler; @@ -71,8 +70,6 @@ import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR; import static org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer.invokeRebalanceCallbacks; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP_PREFIX; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.COORDINATOR_METRICS_SUFFIX; import static org.apache.kafka.common.requests.ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; @@ -127,7 +124,7 @@ public void setup() { time = new MockTime(0); backgroundEventHandler = new BackgroundEventHandler(backgroundEventQueue, time, mock(AsyncConsumerMetrics.class)); metrics = new Metrics(time); - rebalanceMetricsManager = new ConsumerRebalanceMetricsManager(metrics, subscriptionState); + rebalanceMetricsManager = new ConsumerRebalanceMetricsManager(metrics); when(commitRequestManager.maybeAutoCommitSyncBeforeRebalance(anyLong())).thenReturn(CompletableFuture.completedFuture(null)); } @@ -144,20 +141,17 @@ private ConsumerMembershipManager createMembershipManagerJoiningGroup(String gro private ConsumerMembershipManager createMembershipManager(String groupInstanceId) { ConsumerMembershipManager manager = spy(new ConsumerMembershipManager( - GROUP_ID, Optional.ofNullable(groupInstanceId), Optional.empty(), REBALANCE_TIMEOUT, Optional.empty(), + GROUP_ID, Optional.ofNullable(groupInstanceId), REBALANCE_TIMEOUT, Optional.empty(), subscriptionState, commitRequestManager, metadata, LOG_CONTEXT, backgroundEventHandler, time, rebalanceMetricsManager, true)); assertMemberIdIsGenerated(manager.memberId()); return manager; } - private ConsumerMembershipManager createMembershipManagerJoiningGroup( - String groupInstanceId, - String serverAssignor, - String rackId - ) { + private ConsumerMembershipManager createMembershipManagerJoiningGroup(String groupInstanceId, + String serverAssignor) { ConsumerMembershipManager manager = spy(new ConsumerMembershipManager( - GROUP_ID, Optional.ofNullable(groupInstanceId), Optional.ofNullable(rackId), REBALANCE_TIMEOUT, + GROUP_ID, Optional.ofNullable(groupInstanceId), REBALANCE_TIMEOUT, Optional.ofNullable(serverAssignor), subscriptionState, commitRequestManager, metadata, LOG_CONTEXT, backgroundEventHandler, time, rebalanceMetricsManager, true)); assertMemberIdIsGenerated(manager.memberId()); @@ -170,28 +164,10 @@ public void testMembershipManagerServerAssignor() { ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(); assertEquals(Optional.empty(), membershipManager.serverAssignor()); - membershipManager = createMembershipManagerJoiningGroup("instance1", "Uniform", null); + membershipManager = createMembershipManagerJoiningGroup("instance1", "Uniform"); assertEquals(Optional.of("Uniform"), membershipManager.serverAssignor()); } - @Test - public void testMembershipManagerRackId() { - ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(); - assertEquals(Optional.empty(), membershipManager.rackId()); - - membershipManager = createMembershipManagerJoiningGroup(null, null, "rack1"); - assertEquals(Optional.of("rack1"), membershipManager.rackId()); - } - - @Test - public void testAssignedPartitionCountMetricRegistered() { - MetricName metricName = metrics.metricName( - "assigned-partitions", - CONSUMER_METRIC_GROUP_PREFIX + COORDINATOR_METRICS_SUFFIX - ); - assertNotNull(metrics.metric(metricName), "Metric assigned-partitions should have been registered"); - } - @Test public void testMembershipManagerInitSupportsEmptyGroupInstanceId() { createMembershipManagerJoiningGroup(); @@ -254,7 +230,7 @@ public void testTransitionToFatal() { @Test public void testTransitionToFailedWhenTryingToJoin() { ConsumerMembershipManager membershipManager = new ConsumerMembershipManager( - GROUP_ID, Optional.empty(), Optional.empty(), REBALANCE_TIMEOUT, Optional.empty(), + GROUP_ID, Optional.empty(), REBALANCE_TIMEOUT, Optional.empty(), subscriptionState, commitRequestManager, metadata, LOG_CONTEXT, backgroundEventHandler, time, rebalanceMetricsManager, true); assertEquals(MemberState.UNSUBSCRIBED, membershipManager.state()); @@ -448,10 +424,6 @@ private void assertTransitionToUnsubscribeOnHBSentAndWaitForResponseToCompleteLe membershipManager.onHeartbeatSuccess(createConsumerGroupHeartbeatResponse(new Assignment(), membershipManager.memberId())); - assertFalse(sendLeave.isDone(), "Send leave operation should not complete until a leave response is received"); - - membershipManager.onHeartbeatSuccess(createConsumerGroupLeaveResponse(membershipManager.memberId())); - assertSendLeaveCompleted(membershipManager, sendLeave); } @@ -476,54 +448,6 @@ public void testLeaveGroupEpoch() { membershipManager.memberEpoch()); } - @Test - public void testLeaveGroupEpochOnClose() { - // Static member should leave the group with epoch -2 with GroupMembershipOperation.DEFAULT - ConsumerMembershipManager membershipManager = createMemberInStableState("instance1"); - mockLeaveGroup(); - membershipManager.leaveGroupOnClose(CloseOptions.GroupMembershipOperation.DEFAULT); - verify(subscriptionState).unsubscribe(); - assertEquals(MemberState.LEAVING, membershipManager.state()); - assertEquals(ConsumerGroupHeartbeatRequest.LEAVE_GROUP_STATIC_MEMBER_EPOCH, - membershipManager.memberEpoch()); - - // Static member should leave the group with epoch -1 with GroupMembershipOperation.LEAVE_GROUP - membershipManager = createMemberInStableState("instance1"); - mockLeaveGroup(); - membershipManager.leaveGroupOnClose(CloseOptions.GroupMembershipOperation.LEAVE_GROUP); - verify(subscriptionState).unsubscribe(); - assertEquals(MemberState.LEAVING, membershipManager.state()); - assertEquals(ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH, - membershipManager.memberEpoch()); - - // Static member should leave the group with epoch -2 with GroupMembershipOperation.REMAIN_IN_GROUP - membershipManager = createMemberInStableState("instance1"); - mockLeaveGroup(); - membershipManager.leaveGroupOnClose(CloseOptions.GroupMembershipOperation.REMAIN_IN_GROUP); - verify(subscriptionState).unsubscribe(); - assertEquals(MemberState.LEAVING, membershipManager.state()); - assertEquals(ConsumerGroupHeartbeatRequest.LEAVE_GROUP_STATIC_MEMBER_EPOCH, - membershipManager.memberEpoch()); - - // Dynamic member should leave the group with epoch -1 with GroupMembershipOperation.DEFAULT - membershipManager = createMemberInStableState(null); - mockLeaveGroup(); - membershipManager.leaveGroupOnClose(CloseOptions.GroupMembershipOperation.DEFAULT); - verify(subscriptionState).unsubscribe(); - assertEquals(MemberState.LEAVING, membershipManager.state()); - assertEquals(ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH, - membershipManager.memberEpoch()); - - // Dynamic member should leave the group with epoch -1 with GroupMembershipOperation.LEAVE_GROUP - membershipManager = createMemberInStableState(null); - mockLeaveGroup(); - membershipManager.leaveGroupOnClose(CloseOptions.GroupMembershipOperation.LEAVE_GROUP); - verify(subscriptionState).unsubscribe(); - assertEquals(MemberState.LEAVING, membershipManager.state()); - assertEquals(ConsumerGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH, - membershipManager.memberEpoch()); - } - /** * This is the case where a member is stuck reconciling and transitions out of the RECONCILING * state (due to failure). When the reconciliation completes it should not be applied because @@ -982,9 +906,6 @@ public void testHeartbeatSuccessfulResponseWhenLeavingGroupCompletesLeave() { assertFalse(leaveResult.isDone()); membershipManager.onHeartbeatSuccess(createConsumerGroupHeartbeatResponse(createAssignment(true), membershipManager.memberId())); - assertFalse(leaveResult.isDone()); - - membershipManager.onHeartbeatSuccess(createConsumerGroupLeaveResponse(membershipManager.memberId())); assertSendLeaveCompleted(membershipManager, leaveResult); } @@ -1028,43 +949,16 @@ public void testIgnoreHeartbeatResponseWhenNotInGroup(MemberState state) { assertEquals(state, membershipManager.state()); verify(responseData, never()).memberId(); - // In unsubscribed, we check if we received a leave group response, so we do verify member epoch. - if (state != MemberState.UNSUBSCRIBED) { - verify(responseData, never()).memberEpoch(); - } + verify(responseData, never()).memberEpoch(); verify(responseData, never()).assignment(); } @Test - public void testIgnoreLeaveResponseWhenNotLeavingGroup() { - ConsumerMembershipManager membershipManager = createMemberInStableState(); - - CompletableFuture leaveResult = membershipManager.leaveGroup(); - - // Send leave request, transitioning to UNSUBSCRIBED state - membershipManager.onHeartbeatRequestGenerated(); - assertEquals(MemberState.UNSUBSCRIBED, membershipManager.state()); - - // Receive a previous heartbeat response, which should be ignored - membershipManager.onHeartbeatSuccess(new ConsumerGroupHeartbeatResponse( - new ConsumerGroupHeartbeatResponseData() - .setErrorCode(Errors.NONE.code()) - .setMemberId(membershipManager.memberId()) - .setMemberEpoch(MEMBER_EPOCH) - )); - assertFalse(leaveResult.isDone()); - - // Receive a leave heartbeat response, which should unblock the consumer - membershipManager.onHeartbeatSuccess(createConsumerGroupLeaveResponse(membershipManager.memberId())); - - // Consumer unblocks and updates subscription - membershipManager.onSubscriptionUpdated(); - membershipManager.onConsumerPoll(); - - membershipManager.onHeartbeatSuccess(createConsumerGroupLeaveResponse(membershipManager.memberId())); + public void testLeaveGroupWhenStateIsReconciling() { + ConsumerMembershipManager membershipManager = mockJoinAndReceiveAssignment(false); + assertEquals(MemberState.RECONCILING, membershipManager.state()); - assertEquals(MemberState.JOINING, membershipManager.state()); - assertEquals(0, membershipManager.memberEpoch()); + testLeaveGroupReleasesAssignmentAndResetsEpochToSendLeaveGroup(membershipManager); } @Test @@ -2760,7 +2654,7 @@ private ConsumerMembershipManager createMemberInStableState() { } private ConsumerMembershipManager createMemberInStableState(String groupInstanceId) { - ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(groupInstanceId, null, null); + ConsumerMembershipManager membershipManager = createMembershipManagerJoiningGroup(groupInstanceId, null); ConsumerGroupHeartbeatResponse heartbeatResponse = createConsumerGroupHeartbeatResponse(new Assignment(), membershipManager.memberId()); when(subscriptionState.hasAutoAssignedPartitions()).thenReturn(true); when(subscriptionState.rebalanceListener()).thenReturn(Optional.empty()); @@ -2958,13 +2852,6 @@ private ConsumerGroupHeartbeatResponse createConsumerGroupHeartbeatResponse( .setAssignment(assignment)); } - private ConsumerGroupHeartbeatResponse createConsumerGroupLeaveResponse(String memberId) { - return new ConsumerGroupHeartbeatResponse(new ConsumerGroupHeartbeatResponseData() - .setErrorCode(Errors.NONE.code()) - .setMemberId(memberId) - .setMemberEpoch(LEAVE_GROUP_MEMBER_EPOCH)); - } - /** * Create heartbeat response with the given assignment and a bumped epoch (incrementing by 1 * as default but could be any increment). This will be used to mock when a member diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java index f57e93a2a15e8..949bdc9aa727d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerMetadataTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.Metadata; -import org.apache.kafka.clients.consumer.SubscriptionPattern; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.ClusterResourceListener; import org.apache.kafka.common.Node; @@ -101,78 +100,6 @@ private void testPatternSubscription(boolean includeInternalTopics) { assertEquals(Collections.singleton("__matching_topic"), metadata.fetch().topics()); } - @Test - public void testSubscriptionToBrokerRegexDoesNotRequestAllTopicsMetadata() { - // Subscribe to broker-side regex - subscription.subscribe(new SubscriptionPattern("__.*"), Optional.empty()); - - // Receive assignment from coordinator with topic IDs only - Uuid assignedTopicId = Uuid.randomUuid(); - subscription.setAssignedTopicIds(Set.of(assignedTopicId)); - - // Metadata request should only include the assigned topic IDs - try (ConsumerMetadata metadata = newConsumerMetadata(false)) { - MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder(); - assertFalse(builder.isAllTopics(), "Should not request all topics when using broker-side regex"); - assertEquals(List.of(assignedTopicId), builder.topicIds(), "Should only request assigned topic IDs when using broker-side regex"); - } - } - - @Test - public void testSubscriptionToBrokerRegexRetainsAssignedTopics() { - // Subscribe to broker-side regex - subscription.subscribe(new SubscriptionPattern("__.*"), Optional.empty()); - - // Receive assignment from coordinator with topic IDs only - Uuid assignedTopicId = Uuid.randomUuid(); - subscription.setAssignedTopicIds(Set.of(assignedTopicId)); - - // Metadata request for assigned topic IDs - try (ConsumerMetadata metadata = newConsumerMetadata(false)) { - MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder(); - assertEquals(List.of(assignedTopicId), builder.topicIds()); - - // Metadata response with the assigned topic ID and name - Map topicIds = Map.of("__matching_topic", assignedTopicId); - MetadataResponse response = RequestTestUtils.metadataUpdateWithIds(1, singletonMap("__matching_topic", 1), topicIds); - metadata.updateWithCurrentRequestVersion(response, false, time.milliseconds()); - - assertEquals(Set.of("__matching_topic"), new HashSet<>(metadata.fetch().topics())); - assertEquals(Set.of("__matching_topic"), metadata.fetch().topics()); - } - } - - @Test - public void testSubscriptionToBrokerRegexAllowsTransientTopics() { - // Subscribe to broker-side regex - subscription.subscribe(new SubscriptionPattern("__.*"), Optional.empty()); - - // Receive assignment from coordinator with topic IDs only - Uuid assignedTopicId = Uuid.randomUuid(); - subscription.setAssignedTopicIds(Set.of(assignedTopicId)); - - // Metadata request should only include the assigned topic IDs - try (ConsumerMetadata metadata = newConsumerMetadata(false)) { - MetadataRequest.Builder builder = metadata.newMetadataRequestBuilder(); - assertFalse(builder.isAllTopics()); - assertEquals(List.of(assignedTopicId), builder.topicIds()); - - // Call to offsets-related APIs starts. Metadata requests should move to requesting topic names temporarily. - String transientTopic = "__transient_topic"; - metadata.addTransientTopics(Set.of(transientTopic)); - builder = metadata.newMetadataRequestBuilder(); - assertFalse(builder.isAllTopics()); - // assertTrue(builder.topicIds().isEmpty()); - assertEquals(List.of(transientTopic), builder.topics()); - - // Call to offsets-related APIs ends. Metadata requests should move back to requesting topic IDs for RE2J. - metadata.clearTransientTopics(); - builder = metadata.newMetadataRequestBuilder(); - assertFalse(builder.isAllTopics()); - assertEquals(List.of(assignedTopicId), builder.topicIds()); - } - } - @Test public void testUserAssignment() { subscription.assignFromUser(Set.of( diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java index 1f5551e7df121..b5ab39e62c720 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkClientTest.java @@ -41,6 +41,7 @@ import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.test.TestUtils; +import org.junit.jupiter.api.Disabled; import org.junit.jupiter.api.Test; import java.time.Duration; @@ -265,34 +266,45 @@ public void testMetadataFailurePropagated() { assertEquals(metadataException, exc); } + @Disabled("KAFKA-17554") @Test public void testFutureCompletionOutsidePoll() throws Exception { // Tests the scenario in which the request that is being awaited in one thread // is received and completed in another thread. + + final CountDownLatch t1TheardCountDownLatch = new CountDownLatch(1); + final CountDownLatch t2ThreadCountDownLatch = new CountDownLatch(2); + final RequestFuture future = consumerClient.send(node, heartbeat()); consumerClient.pollNoWakeup(); // dequeue and send the request - CountDownLatch bothThreadsReady = new CountDownLatch(2); - client.enableBlockingUntilWakeup(2); - - Thread t1 = new Thread(() -> { - bothThreadsReady.countDown(); + Thread t1 = new Thread(() -> { + t1TheardCountDownLatch.countDown(); consumerClient.pollNoWakeup(); + t2ThreadCountDownLatch.countDown(); }); + + t1.start(); Thread t2 = new Thread(() -> { - bothThreadsReady.countDown(); - consumerClient.poll(future); + try { + t2ThreadCountDownLatch.await(); + consumerClient.poll(future); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } }); - - t1.start(); t2.start(); - - // Wait until both threads are blocked in poll - bothThreadsReady.await(); + + // Simulate a network response and return from the poll in t1 client.respond(heartbeatResponse(Errors.NONE)); + // Wait for t1 to block in poll + t1TheardCountDownLatch.await(); + client.wakeup(); + // while t1 is blocked in poll, t2 should be able to complete the future + t2ThreadCountDownLatch.countDown(); // Both threads should complete since t1 should wakeup t2 t1.join(); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java index 35ccb17dfab43..520279fc8d454 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ConsumerNetworkThreadTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.clients.consumer.internals.events.PollEvent; import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; @@ -33,21 +32,21 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import org.junit.jupiter.params.provider.ValueSource; import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedList; import java.util.List; +import java.util.Optional; import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import java.util.function.Supplier; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyLong; @@ -118,7 +117,10 @@ public void testEnsureCloseStopsRunningThread() { @ParameterizedTest @ValueSource(longs = {ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS - 1, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS + 1}) public void testConsumerNetworkThreadPollTimeComputations(long exampleTime) { - List list = List.of(coordinatorRequestManager, heartbeatRequestManager); + List> list = new ArrayList<>(); + list.add(Optional.of(coordinatorRequestManager)); + list.add(Optional.of(heartbeatRequestManager)); + when(requestManagers.entries()).thenReturn(list); NetworkClientDelegate.PollResult pollResult = new NetworkClientDelegate.PollResult(exampleTime); @@ -156,13 +158,16 @@ public void testStartupAndTearDown() throws InterruptedException { @Test public void testRequestsTransferFromManagersToClientOnThreadRun() { - List list = List.of(coordinatorRequestManager, heartbeatRequestManager, offsetsRequestManager); + List> list = new ArrayList<>(); + list.add(Optional.of(coordinatorRequestManager)); + list.add(Optional.of(heartbeatRequestManager)); + list.add(Optional.of(offsetsRequestManager)); when(requestManagers.entries()).thenReturn(list); when(coordinatorRequestManager.poll(anyLong())).thenReturn(mock(NetworkClientDelegate.PollResult.class)); consumerNetworkThread.runOnce(); - requestManagers.entries().forEach(rm -> verify(rm).poll(anyLong())); - requestManagers.entries().forEach(rm -> verify(rm).maximumTimeToWait(anyLong())); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).poll(anyLong()))); + requestManagers.entries().forEach(rmo -> rmo.ifPresent(rm -> verify(rm).maximumTimeToWait(anyLong()))); verify(networkClientDelegate).addAll(any(NetworkClientDelegate.PollResult.class)); verify(networkClientDelegate).poll(anyLong(), anyLong()); } @@ -173,7 +178,7 @@ public void testMaximumTimeToWait() { // Initial value before runOnce has been called assertEquals(ConsumerNetworkThread.MAX_POLL_TIMEOUT_MS, consumerNetworkThread.maximumTimeToWait()); - when(requestManagers.entries()).thenReturn(List.of(heartbeatRequestManager)); + when(requestManagers.entries()).thenReturn(Collections.singletonList(Optional.of(heartbeatRequestManager))); when(heartbeatRequestManager.maximumTimeToWait(time.milliseconds())).thenReturn((long) defaultHeartbeatIntervalMs); consumerNetworkThread.runOnce(); @@ -206,11 +211,10 @@ public void testSendUnsentRequests() { verify(networkClientDelegate, times(2)).poll(anyLong(), anyLong()); } - @ParameterizedTest - @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") - public void testRunOnceRecordTimeBetweenNetworkThreadPoll(String groupName) { + @Test + public void testRunOnceRecordTimeBetweenNetworkThreadPoll() { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread( new LogContext(), time, @@ -229,23 +233,22 @@ public void testRunOnceRecordTimeBetweenNetworkThreadPoll(String groupName) { assertEquals( 10, (double) metrics.metric( - metrics.metricName("time-between-network-thread-poll-avg", groupName) + metrics.metricName("time-between-network-thread-poll-avg", CONSUMER_METRIC_GROUP) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("time-between-network-thread-poll-max", groupName) + metrics.metricName("time-between-network-thread-poll-max", CONSUMER_METRIC_GROUP) ).metricValue() ); } } - @ParameterizedTest - @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") - public void testRunOnceRecordApplicationEventQueueSizeAndApplicationEventQueueTime(String groupName) { + @Test + public void testRunOnceRecordApplicationEventQueueSizeAndApplicationEventQueueTime() { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); ConsumerNetworkThread consumerNetworkThread = new ConsumerNetworkThread( new LogContext(), time, @@ -268,74 +271,21 @@ public void testRunOnceRecordApplicationEventQueueSizeAndApplicationEventQueueTi assertEquals( 0, (double) metrics.metric( - metrics.metricName("application-event-queue-size", groupName) + metrics.metricName("application-event-queue-size", CONSUMER_METRIC_GROUP) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("application-event-queue-time-avg", groupName) + metrics.metricName("application-event-queue-time-avg", CONSUMER_METRIC_GROUP) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("application-event-queue-time-max", groupName) + metrics.metricName("application-event-queue-time-max", CONSUMER_METRIC_GROUP) ).metricValue() ); } } - - @Test - public void testNetworkClientDelegateInitializeResourcesError() { - Supplier networkClientDelegateSupplier = () -> { - throw new KafkaException("Injecting NetworkClientDelegate initialization failure"); - }; - Supplier requestManagersSupplier = () -> requestManagers; - testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier); - } - - @Test - public void testRequestManagersInitializeResourcesError() { - Supplier networkClientDelegateSupplier = () -> networkClientDelegate; - Supplier requestManagersSupplier = () -> { - throw new KafkaException("Injecting RequestManagers initialization failure"); - }; - testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier); - } - - @Test - public void testNetworkClientDelegateAndRequestManagersInitializeResourcesError() { - Supplier networkClientDelegateSupplier = () -> { - throw new KafkaException("Injecting NetworkClientDelegate initialization failure"); - }; - Supplier requestManagersSupplier = () -> { - throw new KafkaException("Injecting RequestManagers initialization failure"); - }; - testInitializeResourcesError(networkClientDelegateSupplier, requestManagersSupplier); - } - - /** - * Tests that when an error occurs during {@link ConsumerNetworkThread#initializeResources()} that the - * logic in {@link ConsumerNetworkThread#cleanup()} will not throw errors when closing. - */ - private void testInitializeResourcesError(Supplier networkClientDelegateSupplier, - Supplier requestManagersSupplier) { - // A new ConsumerNetworkThread is created because the shared one doesn't have any issues initializing its - // resources. However, most of the mocks can be reused, so this is mostly boilerplate except for the error - // when a supplier is invoked. - try (ConsumerNetworkThread thread = new ConsumerNetworkThread( - new LogContext(), - time, - applicationEventQueue, - applicationEventReaper, - () -> applicationEventProcessor, - networkClientDelegateSupplier, - requestManagersSupplier, - asyncConsumerMetrics - )) { - assertThrows(KafkaException.class, thread::initializeResources, "initializeResources should fail because one or more Supplier throws an error on get()"); - assertDoesNotThrow(thread::cleanup, "cleanup() should not cause an error because all references are checked before use"); - } - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java index addb68070faf4..0ed902d7f278d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/CoordinatorRequestManagerTest.java @@ -26,7 +26,6 @@ import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorResponse; import org.apache.kafka.common.requests.RequestHeader; -import org.apache.kafka.common.test.api.Flaky; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; @@ -99,13 +98,15 @@ public void testSuccessfulResponse() { * * @see CoordinatorRequestManager#markCoordinatorUnknown(String, long) */ - @Flaky("KAFKA-18776") @Test public void testMarkCoordinatorUnknownLoggingAccuracy() { long oneMinute = 60000; try (final LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { - appender.setClassLogger(CoordinatorRequestManager.class, Level.WARN); + // You'd be forgiven for assuming that a warning message would be logged at WARN, but + // markCoordinatorUnknown logs the warning at DEBUG. This is partly for historical parity with the + // ClassicKafkaConsumer. + appender.setClassLogger(CoordinatorRequestManager.class, Level.DEBUG); CoordinatorRequestManager coordinatorRequestManager = setupCoordinatorManager(GROUP_ID); assertFalse(coordinatorRequestManager.coordinator().isPresent()); @@ -132,7 +133,7 @@ public void testMarkCoordinatorUnknownLoggingAccuracy() { } private Optional millisecondsFromLog(LogCaptureAppender appender) { - Pattern pattern = Pattern.compile("^Consumer has been disconnected from the group coordinator for (?\\d+)+ms$"); + Pattern pattern = Pattern.compile("\\s+(?\\d+)+ms"); List milliseconds = appender.getMessages().stream() .map(pattern::matcher) .filter(Matcher::find) diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java index 5b2f6d6f48e6f..7c831f2d487d8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchBufferTest.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.LogContext; @@ -32,6 +33,7 @@ import java.time.Duration; import java.util.Arrays; +import java.util.HashSet; import java.util.Properties; import java.util.Set; @@ -177,7 +179,7 @@ public void testWakeup() throws Exception { try (FetchBuffer fetchBuffer = new FetchBuffer(logContext)) { final Thread waitingThread = new Thread(() -> { final Timer timer = time.timer(Duration.ofMinutes(1)); - fetchBuffer.awaitWakeup(timer); + fetchBuffer.awaitNotEmpty(timer); }); waitingThread.start(); fetchBuffer.wakeup(); @@ -196,13 +198,14 @@ private CompletedFetch completedFetch(TopicPartition tp) { tp, partitionData, metricsAggregator, - 0L); + 0L, + ApiKeys.FETCH.latestVersion()); } /** * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicPartition... partitions) { - return Set.of(partitions); + return new HashSet<>(Arrays.asList(partitions)); } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java index c2b4e6ca4c8da..01d52b3662b0b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchCollectorTest.java @@ -26,6 +26,7 @@ import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.ControlRecordType; import org.apache.kafka.common.record.EndTransactionMarker; @@ -53,6 +54,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; +import java.util.HashSet; import java.util.List; import java.util.Optional; import java.util.Properties; @@ -710,7 +712,7 @@ private FetchCollector createFetchCollector(final SubscriptionSt mock(ConsumerMetadata.class), subscriptions, new FetchConfig(new ConsumerConfig(consumerProps)), - new Deserializers<>(new StringDeserializer(), new StringDeserializer(), null), + new Deserializers<>(new StringDeserializer(), new StringDeserializer()), mock(FetchMetricsManager.class), new MockTime() ); @@ -720,7 +722,7 @@ private FetchCollector createFetchCollector(final SubscriptionSt * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicPartition... partitions) { - return Set.of(partitions); + return new HashSet<>(Arrays.asList(partitions)); } private void buildDependencies() { @@ -739,11 +741,12 @@ private void buildDependencies(int maxPollRecords, IsolationLevel isolationLevel Properties p = consumerProperties(maxPollRecords); ConsumerConfig config = new ConsumerConfig(p); + deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer()); + subscriptions = createSubscriptionState(config, logContext); fetchConfig = createFetchConfig(config, isolationLevel); Metrics metrics = createMetrics(config, time); metricsManager = createFetchMetricsManager(metrics); - deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer(), metrics); metadata = new ConsumerMetadata( 0, 1000, @@ -919,7 +922,8 @@ private CompletedFetch build() { topicPartition, partitionData, metricsAggregator, - fetchOffset); + fetchOffset, + ApiKeys.FETCH.latestVersion()); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManagerTest.java index 8dc50b1e66a39..c7daeb5334358 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchMetricsManagerTest.java @@ -24,7 +24,6 @@ import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; -import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -33,12 +32,10 @@ import org.junit.jupiter.api.Test; import java.util.Map; -import java.util.Set; import static org.apache.kafka.clients.consumer.internals.FetchMetricsManager.topicPartitionTags; import static org.apache.kafka.clients.consumer.internals.FetchMetricsManager.topicTags; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertTrue; public class FetchMetricsManagerTest { @@ -46,6 +43,7 @@ public class FetchMetricsManagerTest { private final Time time = new MockTime(1, 0, 0); private static final String TOPIC_NAME = "test"; + private static final TopicPartition TP = new TopicPartition(TOPIC_NAME, 0); private Metrics metrics; private FetchMetricsRegistry metricsRegistry; @@ -117,43 +115,22 @@ public void testBytesFetched() { } @Test - @SuppressWarnings("deprecation") public void testBytesFetchedTopic() { String topicName1 = TOPIC_NAME; - String topicName2 = "another.topic"; - Map tags1 = Map.of("topic", topicName1); - Map tags2 = Map.of("topic", topicName2); - Map deprecatedTags = topicTags(topicName2); - int initialMetricsSize = metrics.metrics().size(); + String topicName2 = "another-topic"; + Map tags1 = topicTags(topicName1); + Map tags2 = topicTags(topicName2); metricsManager.recordBytesFetched(topicName1, 2); - // 4 new metrics shall be registered. - assertEquals(4, metrics.metrics().size() - initialMetricsSize); metricsManager.recordBytesFetched(topicName2, 1); - // Another 8 metrics get registered as deprecated metrics should be reported for topicName2. - assertEquals(12, metrics.metrics().size() - initialMetricsSize); - time.sleep(metrics.config().timeWindowMs() + 1); metricsManager.recordBytesFetched(topicName1, 10); metricsManager.recordBytesFetched(topicName2, 5); - // Subsequent calls should not register new metrics. - assertEquals(12, metrics.metrics().size() - initialMetricsSize); - // Validate metrics for topicName1. assertEquals(6, metricValue(metricsRegistry.topicFetchSizeAvg, tags1), EPSILON); assertEquals(10, metricValue(metricsRegistry.topicFetchSizeMax, tags1), EPSILON); - assertTrue(metricValue(metricsRegistry.topicBytesConsumedRate, tags1) > 0); - assertEquals(12, metricValue(metricsRegistry.topicBytesConsumedTotal, tags1), EPSILON); - // Validate metrics for topicName2. assertEquals(3, metricValue(metricsRegistry.topicFetchSizeAvg, tags2), EPSILON); assertEquals(5, metricValue(metricsRegistry.topicFetchSizeMax, tags2), EPSILON); - assertTrue(metricValue(metricsRegistry.topicBytesConsumedRate, tags2) > 0); - assertEquals(6, metricValue(metricsRegistry.topicBytesConsumedTotal, tags2), EPSILON); - // Validate metrics for deprecated topic. - assertEquals(3, metricValue(metricsRegistry.topicFetchSizeAvg, deprecatedTags), EPSILON); - assertEquals(5, metricValue(metricsRegistry.topicFetchSizeMax, deprecatedTags), EPSILON); - assertTrue(metricValue(metricsRegistry.topicBytesConsumedRate, deprecatedTags) > 0); - assertEquals(6, metricValue(metricsRegistry.topicBytesConsumedTotal, deprecatedTags), EPSILON); } @Test @@ -166,216 +143,48 @@ public void testRecordsFetched() { } @Test - @SuppressWarnings("deprecation") public void testRecordsFetchedTopic() { String topicName1 = TOPIC_NAME; - String topicName2 = "another.topic"; - Map tags1 = Map.of("topic", topicName1); - Map tags2 = Map.of("topic", topicName2); - Map deprecatedTags = topicTags(topicName2); - int initialMetricsSize = metrics.metrics().size(); + String topicName2 = "another-topic"; + Map tags1 = topicTags(topicName1); + Map tags2 = topicTags(topicName2); metricsManager.recordRecordsFetched(topicName1, 2); - // 3 new metrics shall be registered. - assertEquals(3, metrics.metrics().size() - initialMetricsSize); metricsManager.recordRecordsFetched(topicName2, 1); - // Another 6 metrics get registered as deprecated metrics should be reported for topicName2. - assertEquals(9, metrics.metrics().size() - initialMetricsSize); - time.sleep(metrics.config().timeWindowMs() + 1); metricsManager.recordRecordsFetched(topicName1, 10); metricsManager.recordRecordsFetched(topicName2, 5); - // Subsequent calls should not register new metrics. - assertEquals(9, metrics.metrics().size() - initialMetricsSize); - // Validate metrics for topicName1. assertEquals(6, metricValue(metricsRegistry.topicRecordsPerRequestAvg, tags1), EPSILON); - assertTrue(metricValue(metricsRegistry.topicRecordsConsumedRate, tags1) > 0); - assertEquals(12, metricValue(metricsRegistry.topicRecordsConsumedTotal, tags1), EPSILON); - // Validate metrics for topicName2. assertEquals(3, metricValue(metricsRegistry.topicRecordsPerRequestAvg, tags2), EPSILON); - assertTrue(metricValue(metricsRegistry.topicRecordsConsumedRate, tags2) > 0); - assertEquals(6, metricValue(metricsRegistry.topicRecordsConsumedTotal, tags2), EPSILON); - // Validate metrics for deprecated topic. - assertEquals(3, metricValue(metricsRegistry.topicRecordsPerRequestAvg, deprecatedTags), EPSILON); - assertTrue(metricValue(metricsRegistry.topicRecordsConsumedRate, deprecatedTags) > 0); - assertEquals(6, metricValue(metricsRegistry.topicRecordsConsumedTotal, deprecatedTags), EPSILON); } @Test - @SuppressWarnings("deprecation") public void testPartitionLag() { - TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 0); - TopicPartition tp2 = new TopicPartition("another.topic", 0); - - Map tags1 = Map.of("topic", tp1.topic(), "partition", String.valueOf(tp1.partition())); - Map tags2 = Map.of("topic", tp2.topic(), "partition", String.valueOf(tp2.partition())); - Map deprecatedTags = topicPartitionTags(tp2); - int initialMetricsSize = metrics.metrics().size(); - - metricsManager.recordPartitionLag(tp1, 14); - // 3 new metrics shall be registered. - assertEquals(3, metrics.metrics().size() - initialMetricsSize); - - metricsManager.recordPartitionLag(tp1, 8); + Map tags = topicPartitionTags(TP); + metricsManager.recordPartitionLag(TP, 14); + metricsManager.recordPartitionLag(TP, 8); time.sleep(metrics.config().timeWindowMs() + 1); - metricsManager.recordPartitionLag(tp1, 5); + metricsManager.recordPartitionLag(TP, 5); - // Subsequent calls should not register new metrics. - assertEquals(3, metrics.metrics().size() - initialMetricsSize); - // Validate metrics for tp1. assertEquals(14, metricValue(metricsRegistry.recordsLagMax), EPSILON); - assertEquals(5, metricValue(metricsRegistry.partitionRecordsLag, tags1), EPSILON); - assertEquals(14, metricValue(metricsRegistry.partitionRecordsLagMax, tags1), EPSILON); - assertEquals(9, metricValue(metricsRegistry.partitionRecordsLagAvg, tags1), EPSILON); - - metricsManager.recordPartitionLag(tp2, 7); - // Another 6 metrics get registered as deprecated metrics should be reported for tp2. - assertEquals(9, metrics.metrics().size() - initialMetricsSize); - metricsManager.recordPartitionLag(tp2, 3); - time.sleep(metrics.config().timeWindowMs() + 1); - metricsManager.recordPartitionLag(tp2, 2); - - // Subsequent calls should not register new metrics. - assertEquals(9, metrics.metrics().size() - initialMetricsSize); - // Validate metrics for tp2. - assertEquals(7, metricValue(metricsRegistry.recordsLagMax), EPSILON); - assertEquals(2, metricValue(metricsRegistry.partitionRecordsLag, tags2), EPSILON); - assertEquals(7, metricValue(metricsRegistry.partitionRecordsLagMax, tags2), EPSILON); - assertEquals(4, metricValue(metricsRegistry.partitionRecordsLagAvg, tags2), EPSILON); - // Validate metrics for deprecated topic. - assertEquals(2, metricValue(metricsRegistry.partitionRecordsLag, deprecatedTags), EPSILON); - assertEquals(7, metricValue(metricsRegistry.partitionRecordsLagMax, deprecatedTags), EPSILON); - assertEquals(4, metricValue(metricsRegistry.partitionRecordsLagAvg, deprecatedTags), EPSILON); + assertEquals(5, metricValue(metricsRegistry.partitionRecordsLag, tags), EPSILON); + assertEquals(14, metricValue(metricsRegistry.partitionRecordsLagMax, tags), EPSILON); + assertEquals(9, metricValue(metricsRegistry.partitionRecordsLagAvg, tags), EPSILON); } @Test - @SuppressWarnings("deprecation") public void testPartitionLead() { - TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 0); - TopicPartition tp2 = new TopicPartition("another.topic", 0); - - Map tags1 = Map.of("topic", tp1.topic(), "partition", String.valueOf(tp1.partition())); - Map tags2 = Map.of("topic", tp2.topic(), "partition", String.valueOf(tp2.partition())); - Map deprecatedTags = topicPartitionTags(tp2); - int initialMetricsSize = metrics.metrics().size(); - - metricsManager.recordPartitionLead(tp1, 15); - // 3 new metrics shall be registered. - assertEquals(3, metrics.metrics().size() - initialMetricsSize); - - metricsManager.recordPartitionLead(tp1, 11); + Map tags = topicPartitionTags(TP); + metricsManager.recordPartitionLead(TP, 15); + metricsManager.recordPartitionLead(TP, 11); time.sleep(metrics.config().timeWindowMs() + 1); - metricsManager.recordPartitionLead(tp1, 13); + metricsManager.recordPartitionLead(TP, 13); - // Subsequent calls should not register new metrics. - assertEquals(3, metrics.metrics().size() - initialMetricsSize); - // Validate metrics for tp1. assertEquals(11, metricValue(metricsRegistry.recordsLeadMin), EPSILON); - assertEquals(13, metricValue(metricsRegistry.partitionRecordsLead, tags1), EPSILON); - assertEquals(11, metricValue(metricsRegistry.partitionRecordsLeadMin, tags1), EPSILON); - assertEquals(13, metricValue(metricsRegistry.partitionRecordsLeadAvg, tags1), EPSILON); - - metricsManager.recordPartitionLead(tp2, 18); - // Another 6 metrics get registered as deprecated metrics should be reported for tp2. - assertEquals(9, metrics.metrics().size() - initialMetricsSize); - - metricsManager.recordPartitionLead(tp2, 12); - time.sleep(metrics.config().timeWindowMs() + 1); - metricsManager.recordPartitionLead(tp2, 15); - - // Subsequent calls should not register new metrics. - assertEquals(9, metrics.metrics().size() - initialMetricsSize); - // Validate metrics for tp2. - assertEquals(12, metricValue(metricsRegistry.recordsLeadMin), EPSILON); - assertEquals(15, metricValue(metricsRegistry.partitionRecordsLead, tags2), EPSILON); - assertEquals(12, metricValue(metricsRegistry.partitionRecordsLeadMin, tags2), EPSILON); - assertEquals(15, metricValue(metricsRegistry.partitionRecordsLeadAvg, tags2), EPSILON); - // Validate metrics for deprecated topic. - assertEquals(15, metricValue(metricsRegistry.partitionRecordsLead, deprecatedTags), EPSILON); - assertEquals(12, metricValue(metricsRegistry.partitionRecordsLeadMin, deprecatedTags), EPSILON); - assertEquals(15, metricValue(metricsRegistry.partitionRecordsLeadAvg, deprecatedTags), EPSILON); - } - - @Test - @SuppressWarnings("deprecation") - public void testMaybeUpdateAssignment() { - TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 0); - TopicPartition tp2 = new TopicPartition("another.topic", 0); - TopicPartition tp3 = new TopicPartition("another.topic", 1); - int initialMetricsSize = metrics.metrics().size(); - - SubscriptionState subscriptionState = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - subscriptionState.assignFromUser(Set.of(tp1)); - - metricsManager.maybeUpdateAssignment(subscriptionState); - // 1 new metrics shall be registered. - assertEquals(1, metrics.metrics().size() - initialMetricsSize); - - subscriptionState.assignFromUser(Set.of(tp1, tp2)); - subscriptionState.updatePreferredReadReplica(tp2, 1, () -> 0L); - metricsManager.maybeUpdateAssignment(subscriptionState); - // Another 2 metrics get registered as deprecated metrics should be reported for tp2. - assertEquals(3, metrics.metrics().size() - initialMetricsSize); - - Map tags1 = Map.of("topic", tp1.topic(), "partition", String.valueOf(tp1.partition())); - Map tags2 = Map.of("topic", tp2.topic(), "partition", String.valueOf(tp2.partition())); - Map deprecatedTags = topicPartitionTags(tp2); - // Validate preferred read replica metrics. - assertEquals(-1, readReplicaMetricValue(metricsRegistry.partitionPreferredReadReplica, tags1), EPSILON); - assertEquals(1, readReplicaMetricValue(metricsRegistry.partitionPreferredReadReplica, tags2), EPSILON); - assertEquals(1, readReplicaMetricValue(metricsRegistry.partitionPreferredReadReplica, deprecatedTags), EPSILON); - - // Remove tp2 from subscription set. - subscriptionState.assignFromUser(Set.of(tp1, tp3)); - metricsManager.maybeUpdateAssignment(subscriptionState); - // Metrics count shall remain same as tp2 should be removed and tp3 gets added. - assertEquals(3, metrics.metrics().size() - initialMetricsSize); - - // Remove all partitions. - subscriptionState.assignFromUser(Set.of()); - metricsManager.maybeUpdateAssignment(subscriptionState); - // Metrics count shall be same as initial count as all new metrics shall be removed. - assertEquals(initialMetricsSize, metrics.metrics().size()); - } - - @Test - public void testMaybeUpdateAssignmentWithAdditionalRegisteredMetrics() { - TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 0); - TopicPartition tp2 = new TopicPartition("another.topic", 0); - TopicPartition tp3 = new TopicPartition("another.topic", 1); - - int initialMetricsSize = metrics.metrics().size(); - - metricsManager.recordPartitionLag(tp1, 14); - metricsManager.recordPartitionLead(tp1, 11); - metricsManager.recordPartitionLag(tp2, 5); - metricsManager.recordPartitionLead(tp2, 1); - metricsManager.recordPartitionLag(tp3, 4); - metricsManager.recordPartitionLead(tp3, 2); - - int additionalRegisteredMetricsSize = metrics.metrics().size(); - - SubscriptionState subscriptionState = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - subscriptionState.assignFromUser(Set.of(tp1, tp2, tp3)); - metricsManager.maybeUpdateAssignment(subscriptionState); - - // 5 new metrics shall be registered. - assertEquals(5, metrics.metrics().size() - additionalRegisteredMetricsSize); - - // Remove 1 partition which has deprecated metrics as well. - subscriptionState.assignFromUser(Set.of(tp1, tp2)); - metricsManager.maybeUpdateAssignment(subscriptionState); - // For tp2, 14 metrics will be unregistered. 3 for partition lag, 3 for partition lead, 1 for - // preferred read replica and similarly 7 deprecated metrics. Hence, we should have 9 metrics - // removed from additionalRegisteredMetricsSize. - assertEquals(9, additionalRegisteredMetricsSize - metrics.metrics().size()); - - // Remove all partitions. - subscriptionState.assignFromUser(Set.of()); - metricsManager.maybeUpdateAssignment(subscriptionState); - // Metrics count shall be same as initial count as all new metrics shall be removed. - assertEquals(initialMetricsSize, metrics.metrics().size()); + assertEquals(13, metricValue(metricsRegistry.partitionRecordsLead, tags), EPSILON); + assertEquals(11, metricValue(metricsRegistry.partitionRecordsLeadMin, tags), EPSILON); + assertEquals(13, metricValue(metricsRegistry.partitionRecordsLeadAvg, tags), EPSILON); } private void registerNodeLatencyMetric(String connectionId, MetricName nodeLatencyAvg, MetricName nodeLatencyMax) { @@ -400,9 +209,4 @@ private double metricValue(MetricName metricName) { return (Double) metric.metricValue(); } - private Integer readReplicaMetricValue(MetricNameTemplate name, Map tags) { - MetricName metricName = metrics.metricInstance(name, tags); - KafkaMetric metric = metrics.metric(metricName); - return (Integer) metric.metricValue(); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java index 1378e4b53a1e2..e0b96f8d29705 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetchRequestManagerTest.java @@ -382,7 +382,7 @@ public void testFetcherCloseClosesFetchSessionsInBroker() { // NOTE: by design the FetchRequestManager doesn't perform network I/O internally. That means that calling // the close() method with a Timer will NOT send out the close session requests on close. The network // I/O logic is handled inside ConsumerNetworkThread.runAtClose, so we need to run that logic here. - ConsumerNetworkThread.runAtClose(List.of(fetcher), networkClientDelegate, time.milliseconds()); + ConsumerNetworkThread.runAtClose(singletonList(Optional.of(fetcher)), networkClientDelegate, time.milliseconds()); // the network is polled during the last state of clean up. networkClientDelegate.poll(time.timer(1)); // validate that closing the fetcher has sent a request with final epoch. 2 requests are sent, one for the @@ -1733,7 +1733,7 @@ public void testFetchPositionAfterException() { .setPartitionIndex(tp0.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); networkClientDelegate.poll(time.timer(0)); List> allFetchedRecords = new ArrayList<>(); @@ -1794,7 +1794,7 @@ public void testCompletedFetchRemoval() { .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(partialRecords)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); networkClientDelegate.poll(time.timer(0)); List> fetchedRecords = new ArrayList<>(); @@ -1815,7 +1815,7 @@ public void testCompletedFetchRemoval() { assertEquals(1, oorExceptions.size()); OffsetOutOfRangeException oor = oorExceptions.get(0); assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0)); - assertEquals(1, oor.offsetOutOfRangePartitions().size()); + assertEquals(oor.offsetOutOfRangePartitions().size(), 1); fetchRecordsInto(fetchedRecords); @@ -1865,7 +1865,7 @@ public void testSeekBeforeException() { .setPartitionIndex(tp1.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); networkClientDelegate.poll(time.timer(0)); assertEquals(1, fetchRecords().get(tp0).size()); @@ -2113,7 +2113,7 @@ public void testFetchResponseMetrics() { } assertEquals(1, sendFetches()); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData, List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData)); networkClientDelegate.poll(time.timer(0)); Map>> fetchedRecords = fetchRecords(); @@ -2185,7 +2185,7 @@ public void testFetchResponseMetricsWithOnePartitionError() { .setLogStartOffset(0)); assertEquals(1, sendFetches()); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); networkClientDelegate.poll(time.timer(0)); collectFetch(); @@ -2231,7 +2231,7 @@ public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() { .setLogStartOffset(0) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("val".getBytes())))); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); networkClientDelegate.poll(time.timer(0)); collectFetch(); @@ -2359,7 +2359,7 @@ public void testReturnCommittedTransactions() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(2, fetchedRecords.get(tp0).size()); + assertEquals(fetchedRecords.get(tp0).size(), 2); } @Test @@ -2477,9 +2477,9 @@ public void testMultipleAbortMarkers() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(2, fetchedRecords.get(tp0).size()); + assertEquals(fetchedRecords.get(tp0).size(), 2); List> fetchedConsumerRecords = fetchedRecords.get(tp0); - Set expectedCommittedKeys = Set.of("commit1-1", "commit1-2"); + Set expectedCommittedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2")); Set actuallyCommittedKeys = new HashSet<>(); for (ConsumerRecord consumerRecord : fetchedConsumerRecords) { actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8)); @@ -2741,7 +2741,7 @@ public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() { public void testConsumingViaIncrementalFetchRequests() { buildFetcher(2); - assignFromUser(Set.of(tp0, tp1)); + assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); @@ -2758,7 +2758,7 @@ public void testConsumingViaIncrementalFetchRequests() { .setHighWatermark(100) .setLogStartOffset(0) .setRecords(emptyRecords)); - FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1, List.of()); + FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1); client.prepareResponse(resp1); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); @@ -2784,7 +2784,7 @@ public void testConsumingViaIncrementalFetchRequests() { // The second response contains no new records. LinkedHashMap partitions2 = new LinkedHashMap<>(); - FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2, List.of()); + FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2); client.prepareResponse(resp2); assertEquals(1, sendFetches()); networkClientDelegate.poll(time.timer(0)); @@ -2801,7 +2801,7 @@ public void testConsumingViaIncrementalFetchRequests() { .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(nextRecords)); - FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3, List.of()); + FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3); client.prepareResponse(resp3); assertEquals(1, sendFetches()); networkClientDelegate.poll(time.timer(0)); @@ -2854,7 +2854,7 @@ public void testEmptyControlBatch() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(2, fetchedRecords.get(tp0).size()); + assertEquals(fetchedRecords.get(tp0).size(), 2); } private MemoryRecords buildRecords(long baseOffset, int count, long firstMessageId) { @@ -2939,8 +2939,8 @@ public void testSubscriptionPositionUpdatedWithEpoch() { Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertEquals(3L, subscriptions.position(tp0).offset); - assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(1, value.intValue())); + assertEquals(subscriptions.position(tp0).offset, 3L); + assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1)); } @Test @@ -3110,7 +3110,7 @@ public void testPreferredReadReplicaOffsetError() { fetchRecords(); Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); - assertEquals(1, selected.id()); + assertEquals(selected.id(), 1); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); @@ -3124,7 +3124,7 @@ public void testPreferredReadReplicaOffsetError() { fetchRecords(); selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); - assertEquals(-1, selected.id()); + assertEquals(selected.id(), -1); } @Test @@ -3196,7 +3196,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(Set.of(tp0, tp1)); + subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3246,7 +3246,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo .setLastStableOffset(FetchResponse.INVALID_LAST_STABLE_OFFSET) .setLogStartOffset(0) .setRecords(nextRecords)); - client.prepareResponseFrom(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitions, List.of()), nodeId0); + client.prepareResponseFrom(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitions), nodeId0); networkClientDelegate.poll(time.timer(0)); partitionRecords = fetchRecords(); assertFalse(partitionRecords.containsKey(tp0)); @@ -3289,7 +3289,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorAndNewLeaderInform // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(Set.of(tp0, tp1)); + subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3418,7 +3418,7 @@ public void testPollWithCreateFetchRequestsError() { assertFalse(future.isDone()); assertDoesNotThrow(() -> sendFetches(false)); - assertFutureThrows(AuthenticationException.class, future); + assertFutureThrows(future, AuthenticationException.class); } @Test @@ -3747,7 +3747,7 @@ public void testFetchRequestWithBufferedPartitionMissingPosition() { Future future = fetcher.createFetchRequests(); List call2 = fetcher.sendFetches(); assertEquals(0, call2.size()); - assertFutureThrows(IllegalStateException.class, future); + assertFutureThrows(future, IllegalStateException.class); } @Test @@ -3851,7 +3851,7 @@ private void prepareFetchResponses(Node node, Collection partiti }); client.prepareResponseFrom( - FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitionDataMap, List.of()), + FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitionDataMap), node ); } @@ -3906,7 +3906,7 @@ private FetchResponse fetchResponseWithTopLevelError(TopicIdPartition tp, Errors .setPartitionIndex(tp.topicPartition().partition()) .setErrorCode(error.code()) .setHighWatermark(FetchResponse.INVALID_HIGH_WATERMARK)); - return FetchResponse.of(error, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(error, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); } private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords records, @@ -3924,7 +3924,7 @@ private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords rec .setLogStartOffset(0) .setAbortedTransactions(abortedTransactions) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); } private FetchResponse fullFetchResponse(int sessionId, TopicIdPartition tp, MemoryRecords records, Errors error, long hw, int throttleTime) { @@ -3950,7 +3950,7 @@ private FetchResponse fullFetchResponse(int sessionId, TopicIdPartition tp, Memo .setLastStableOffset(lastStableOffset) .setLogStartOffset(0) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, sessionId, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(Errors.NONE, throttleTime, sessionId, new LinkedHashMap<>(partitions)); } private FetchResponse fullFetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, long hw, @@ -3964,7 +3964,7 @@ private FetchResponse fullFetchResponse(TopicIdPartition tp, MemoryRecords recor .setLogStartOffset(0) .setRecords(records) .setPreferredReadReplica(preferredReplicaId.orElse(FetchResponse.INVALID_PREFERRED_REPLICA_ID))); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); } private FetchResponse fetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, long hw, @@ -3977,7 +3977,7 @@ private FetchResponse fetchResponse(TopicIdPartition tp, MemoryRecords records, .setLastStableOffset(lastStableOffset) .setLogStartOffset(logStartOffset) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); } /** @@ -4059,7 +4059,7 @@ private void buildFetcher(MetricConfig metricConfig, SubscriptionState subscriptionState, LogContext logContext) { buildDependencies(metricConfig, metadataExpireMs, subscriptionState, logContext); - Deserializers deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); + Deserializers deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); FetchConfig fetchConfig = new FetchConfig( minBytes, maxBytes, @@ -4125,7 +4125,6 @@ private void buildDependencies(MetricConfig metricConfig, properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); - properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConsumerConfig config = new ConsumerConfig(properties); networkClientDelegate = spy(new TestableNetworkClientDelegate(time, config, logContext, client, metadata, backgroundEventHandler, true)); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java index ee051a42ca81a..bc82aeae9fa16 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/FetcherTest.java @@ -1720,7 +1720,7 @@ public void testFetchPositionAfterException() { .setPartitionIndex(tp0.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); consumerClient.poll(time.timer(0)); List> allFetchedRecords = new ArrayList<>(); @@ -1781,7 +1781,7 @@ public void testCompletedFetchRemoval() { .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(partialRecords)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); consumerClient.poll(time.timer(0)); List> fetchedRecords = new ArrayList<>(); @@ -1802,7 +1802,7 @@ public void testCompletedFetchRemoval() { assertEquals(1, oorExceptions.size()); OffsetOutOfRangeException oor = oorExceptions.get(0); assertTrue(oor.offsetOutOfRangePartitions().containsKey(tp0)); - assertEquals(1, oor.offsetOutOfRangePartitions().size()); + assertEquals(oor.offsetOutOfRangePartitions().size(), 1); fetchRecordsInto(fetchedRecords); @@ -1852,7 +1852,7 @@ public void testSeekBeforeException() { .setPartitionIndex(tp1.partition()) .setErrorCode(Errors.OFFSET_OUT_OF_RANGE.code()) .setHighWatermark(100)); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); consumerClient.poll(time.timer(0)); assertEquals(1, fetchRecords().get(tp0).size()); @@ -2100,7 +2100,7 @@ public void testFetchResponseMetrics() { } assertEquals(1, sendFetches()); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData, List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, fetchPartitionData)); consumerClient.poll(time.timer(0)); Map>> fetchedRecords = fetchRecords(); @@ -2172,7 +2172,7 @@ public void testFetchResponseMetricsWithOnePartitionError() { .setLogStartOffset(0)); assertEquals(1, sendFetches()); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); consumerClient.poll(time.timer(0)); collectFetch(); @@ -2218,7 +2218,7 @@ public void testFetchResponseMetricsWithOnePartitionAtTheWrongOffset() { .setLogStartOffset(0) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("val".getBytes())))); - client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of())); + client.prepareResponse(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, new LinkedHashMap<>(partitions))); consumerClient.poll(time.timer(0)); collectFetch(); @@ -2346,7 +2346,7 @@ public void testReturnCommittedTransactions() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(2, fetchedRecords.get(tp0).size()); + assertEquals(fetchedRecords.get(tp0).size(), 2); } @Test @@ -2464,9 +2464,9 @@ public void testMultipleAbortMarkers() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(2, fetchedRecords.get(tp0).size()); + assertEquals(fetchedRecords.get(tp0).size(), 2); List> fetchedConsumerRecords = fetchedRecords.get(tp0); - Set expectedCommittedKeys = Set.of("commit1-1", "commit1-2"); + Set expectedCommittedKeys = new HashSet<>(Arrays.asList("commit1-1", "commit1-2")); Set actuallyCommittedKeys = new HashSet<>(); for (ConsumerRecord consumerRecord : fetchedConsumerRecords) { actuallyCommittedKeys.add(new String(consumerRecord.key(), StandardCharsets.UTF_8)); @@ -2728,7 +2728,7 @@ public void testConsumerPositionUpdatedWhenSkippingAbortedTransactions() { public void testConsumingViaIncrementalFetchRequests() { buildFetcher(2); - assignFromUser(Set.of(tp0, tp1)); + assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); subscriptions.seekValidated(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), metadata.currentLeader(tp0))); subscriptions.seekValidated(tp1, new SubscriptionState.FetchPosition(1, Optional.empty(), metadata.currentLeader(tp1))); @@ -2745,7 +2745,7 @@ public void testConsumingViaIncrementalFetchRequests() { .setHighWatermark(100) .setLogStartOffset(0) .setRecords(emptyRecords)); - FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1, List.of()); + FetchResponse resp1 = FetchResponse.of(Errors.NONE, 0, 123, partitions1); client.prepareResponse(resp1); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); @@ -2771,7 +2771,7 @@ public void testConsumingViaIncrementalFetchRequests() { // The second response contains no new records. LinkedHashMap partitions2 = new LinkedHashMap<>(); - FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2, List.of()); + FetchResponse resp2 = FetchResponse.of(Errors.NONE, 0, 123, partitions2); client.prepareResponse(resp2); assertEquals(1, sendFetches()); consumerClient.poll(time.timer(0)); @@ -2788,7 +2788,7 @@ public void testConsumingViaIncrementalFetchRequests() { .setLastStableOffset(4) .setLogStartOffset(0) .setRecords(nextRecords)); - FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3, List.of()); + FetchResponse resp3 = FetchResponse.of(Errors.NONE, 0, 123, partitions3); client.prepareResponse(resp3); assertEquals(1, sendFetches()); consumerClient.poll(time.timer(0)); @@ -2824,7 +2824,7 @@ public void testFetcherConcurrency() throws Exception { isolationLevel, apiVersions); - Deserializers deserializers = new Deserializers<>(new ByteArrayDeserializer(), new ByteArrayDeserializer(), metrics); + Deserializers deserializers = new Deserializers<>(new ByteArrayDeserializer(), new ByteArrayDeserializer()); FetchConfig fetchConfig = new FetchConfig( minBytes, maxBytes, @@ -2881,11 +2881,11 @@ private void verifySessionPartitions() { field.setAccessible(true); LinkedHashMap sessionPartitions = (LinkedHashMap) field.get(handler); - // If `sessionPartitions` are modified on another thread, Thread.yield will increase the - // possibility of ConcurrentModificationException if appropriate synchronization is not used. - sessionPartitions.forEach( - (key, value) -> Thread.yield() - ); + for (Map.Entry entry : sessionPartitions.entrySet()) { + // If `sessionPartitions` are modified on another thread, Thread.yield will increase the + // possibility of ConcurrentModificationException if appropriate synchronization is not used. + Thread.yield(); + } } catch (Exception e) { throw new RuntimeException(e); } @@ -2922,7 +2922,7 @@ private void verifySessionPartitions() { .setLogStartOffset(0) .setRecords(buildRecords(offset, 2, offset))); } - client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap, List.of())); + client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap)); consumerClient.poll(time.timer(0)); } } @@ -2985,7 +2985,7 @@ public void testFetcherSessionEpochUpdate() throws Exception { .setLogStartOffset(0) .setRecords(buildRecords(nextOffset, 2, nextOffset))); nextOffset += 2; - client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap, List.of())); + client.respondToRequest(request, FetchResponse.of(Errors.NONE, 0, 123, responseMap)); consumerClient.poll(time.timer(0)); } } @@ -3054,7 +3054,7 @@ public void testEmptyControlBatch() { Map>> fetchedRecords = fetchRecords(); assertTrue(fetchedRecords.containsKey(tp0)); - assertEquals(2, fetchedRecords.get(tp0).size()); + assertEquals(fetchedRecords.get(tp0).size(), 2); } private MemoryRecords buildRecords(long baseOffset, int count, long firstMessageId) { @@ -3139,8 +3139,8 @@ public void testSubscriptionPositionUpdatedWithEpoch() { Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertEquals(3L, subscriptions.position(tp0).offset); - assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(1, value.intValue())); + assertEquals(subscriptions.position(tp0).offset, 3L); + assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1)); } @Test @@ -3217,8 +3217,8 @@ public void testTruncationDetected() { Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertEquals(3L, subscriptions.position(tp0).offset); - assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(1, value.intValue())); + assertEquals(subscriptions.position(tp0).offset, 3L); + assertOptional(subscriptions.position(tp0).offsetEpoch, value -> assertEquals(value.intValue(), 1)); } @Test @@ -3388,7 +3388,7 @@ public void testPreferredReadReplicaOffsetError() { fetchRecords(); Node selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); - assertEquals(1, selected.id()); + assertEquals(selected.id(), 1); assertEquals(1, sendFetches()); assertFalse(fetcher.hasCompletedFetches()); @@ -3402,7 +3402,7 @@ public void testPreferredReadReplicaOffsetError() { fetchRecords(); selected = fetcher.selectReadReplica(tp0, Node.noNode(), time.milliseconds()); - assertEquals(-1, selected.id()); + assertEquals(selected.id(), -1); } @Test @@ -3473,7 +3473,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(Set.of(tp0, tp1)); + subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3523,7 +3523,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorButNoNewLeaderInfo .setLastStableOffset(FetchResponse.INVALID_LAST_STABLE_OFFSET) .setLogStartOffset(0) .setRecords(nextRecords)); - client.prepareResponseFrom(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitions, List.of()), nodeId0); + client.prepareResponseFrom(FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, partitions), nodeId0); consumerClient.poll(time.timer(0)); partitionRecords = fetchRecords(); assertFalse(partitionRecords.containsKey(tp0)); @@ -3566,7 +3566,7 @@ public void testWhenFetchResponseReturnsALeaderShipChangeErrorAndNewLeaderInform // Setup so that tp0 & tp1 are subscribed and will be fetched from. // Also, setup client's metadata for tp0 & tp1. - subscriptions.assignFromUser(Set.of(tp0, tp1)); + subscriptions.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); client.updateMetadata( RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 4), tp -> validLeaderEpoch, topicIds, false)); @@ -3662,8 +3662,9 @@ public void testFetcherDontCacheAnyData() { LinkedHashMap responseData = fetchResponse.responseData(topicNames, version); assertEquals(topicNames.size(), responseData.size()); responseData.forEach((topicPartition, partitionData) -> assertEquals(records, partitionData.records())); - LinkedHashMap nonResponseData = fetchResponse.responseData(Map.of(), version); - assertTrue(nonResponseData.isEmpty()); + LinkedHashMap nonResponseData = fetchResponse.responseData(emptyMap(), version); + assertEquals(emptyMap().size(), nonResponseData.size()); + nonResponseData.forEach((topicPartition, partitionData) -> assertEquals(MemoryRecords.EMPTY, partitionData.records())); } private OffsetsForLeaderEpochResponse prepareOffsetsForLeaderEpochResponse( @@ -3689,7 +3690,7 @@ private FetchResponse fetchResponseWithTopLevelError(TopicIdPartition tp, Errors .setPartitionIndex(tp.topicPartition().partition()) .setErrorCode(error.code()) .setHighWatermark(FetchResponse.INVALID_HIGH_WATERMARK)); - return FetchResponse.of(error, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(error, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); } private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords records, @@ -3707,7 +3708,7 @@ private FetchResponse fullFetchResponseWithAbortedTransactions(MemoryRecords rec .setLogStartOffset(0) .setAbortedTransactions(abortedTransactions) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); } private FetchResponse fullFetchResponse(int sessionId, TopicIdPartition tp, MemoryRecords records, Errors error, long hw, int throttleTime) { @@ -3733,7 +3734,7 @@ private FetchResponse fullFetchResponse(int sessionId, TopicIdPartition tp, Memo .setLastStableOffset(lastStableOffset) .setLogStartOffset(0) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, sessionId, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(Errors.NONE, throttleTime, sessionId, new LinkedHashMap<>(partitions)); } private FetchResponse fullFetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, long hw, @@ -3747,7 +3748,7 @@ private FetchResponse fullFetchResponse(TopicIdPartition tp, MemoryRecords recor .setLogStartOffset(0) .setRecords(records) .setPreferredReadReplica(preferredReplicaId.orElse(FetchResponse.INVALID_PREFERRED_REPLICA_ID))); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); } private FetchResponse fetchResponse(TopicIdPartition tp, MemoryRecords records, Errors error, long hw, @@ -3760,7 +3761,7 @@ private FetchResponse fetchResponse(TopicIdPartition tp, MemoryRecords records, .setLastStableOffset(lastStableOffset) .setLogStartOffset(logStartOffset) .setRecords(records)); - return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions), List.of()); + return FetchResponse.of(Errors.NONE, throttleTime, INVALID_SESSION_ID, new LinkedHashMap<>(partitions)); } /** @@ -3857,7 +3858,7 @@ private void buildFetcher(MetricConfig metricConfig, metadata, subscriptionState, fetchConfig, - new Deserializers<>(keyDeserializer, valueDeserializer, metrics), + new Deserializers<>(keyDeserializer, valueDeserializer), metricsManager, time, apiVersions)); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java index de7937673c888..49102da976603 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/HeartbeatTest.java @@ -45,7 +45,6 @@ public void setUp() { heartbeatIntervalMs, "group_id", Optional.empty(), - null, retryBackoffMs, retryBackoffMaxMs, true); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetricsTest.java index 7fa9f7e31f145..c75ee906e535b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/KafkaConsumerMetricsTest.java @@ -18,27 +18,23 @@ package org.apache.kafka.clients.consumer.internals; import org.apache.kafka.clients.consumer.internals.metrics.KafkaConsumerMetrics; -import org.apache.kafka.common.MetricName; import org.apache.kafka.common.metrics.Metrics; import org.junit.jupiter.api.Test; -import java.util.Set; - import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertTrue; class KafkaConsumerMetricsTest { private static final long METRIC_VALUE = 123L; + private static final String CONSUMER_GROUP_PREFIX = "consumer"; private static final String CONSUMER_METRIC_GROUP = "consumer-metrics"; private static final String COMMIT_SYNC_TIME_TOTAL = "commit-sync-time-ns-total"; private static final String COMMITTED_TIME_TOTAL = "committed-time-ns-total"; private final Metrics metrics = new Metrics(); private final KafkaConsumerMetrics consumerMetrics - = new KafkaConsumerMetrics(metrics); + = new KafkaConsumerMetrics(metrics, CONSUMER_GROUP_PREFIX); @Test public void shouldRecordCommitSyncTime() { @@ -68,39 +64,14 @@ public void shouldRemoveMetricsOnClose() { assertMetricRemoved(COMMITTED_TIME_TOTAL); } - @Test - public void checkMetricsAfterCreation() { - Set expectedMetrics = Set.of( - metrics.metricName("last-poll-seconds-ago", CONSUMER_METRIC_GROUP), - metrics.metricName("time-between-poll-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("time-between-poll-max", CONSUMER_METRIC_GROUP), - metrics.metricName("poll-idle-ratio-avg", CONSUMER_METRIC_GROUP), - metrics.metricName("commit-sync-time-ns-total", CONSUMER_METRIC_GROUP), - metrics.metricName("committed-time-ns-total", CONSUMER_METRIC_GROUP) - ); - expectedMetrics.forEach( - metricName -> assertTrue( - metrics.metrics().containsKey(metricName), - "Missing metric: " + metricName - ) - ); - consumerMetrics.close(); - expectedMetrics.forEach( - metricName -> assertFalse( - metrics.metrics().containsKey(metricName), - "Metric present after close: " + metricName - ) - ); - } - private void assertMetricRemoved(final String name) { assertNull(metrics.metric(metrics.metricName(name, CONSUMER_METRIC_GROUP))); } private void assertMetricValue(final String name) { assertEquals( - (double) METRIC_VALUE, - metrics.metric(metrics.metricName(name, CONSUMER_METRIC_GROUP)).metricValue() + metrics.metric(metrics.metricName(name, CONSUMER_METRIC_GROUP)).metricValue(), + (double) METRIC_VALUE ); } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java index 0347423137b57..81eb5187fecfb 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/NetworkClientDelegateTest.java @@ -40,8 +40,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; import java.util.ArrayList; import java.util.Collections; @@ -51,11 +49,11 @@ import java.util.concurrent.BlockingQueue; import java.util.concurrent.LinkedBlockingQueue; -import static org.apache.kafka.clients.consumer.ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -131,7 +129,7 @@ public void testTimeoutBeforeSend() throws Exception { time.sleep(REQUEST_TIMEOUT_MS); ncd.poll(0, time.milliseconds()); assertTrue(unsentRequest.future().isDone()); - TestUtils.assertFutureThrows(TimeoutException.class, unsentRequest.future()); + TestUtils.assertFutureThrows(unsentRequest.future(), TimeoutException.class); } } @@ -144,7 +142,7 @@ public void testTimeoutAfterSend() throws Exception { time.sleep(REQUEST_TIMEOUT_MS); ncd.poll(0, time.milliseconds()); assertTrue(unsentRequest.future().isDone()); - TestUtils.assertFutureThrows(DisconnectException.class, unsentRequest.future()); + TestUtils.assertFutureThrows(unsentRequest.future(), DisconnectException.class); } } @@ -248,11 +246,10 @@ public void testPropagateMetadataErrorWithErrorEvent() { assertEquals(authException, ((ErrorEvent) event).error()); } - @ParameterizedTest - @MethodSource("org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetricsTest#groupNameProvider") - public void testRecordUnsentRequestsQueueTime(String groupName) throws Exception { + @Test + public void testRecordUnsentRequestsQueueTime() throws Exception { try (Metrics metrics = new Metrics(); - AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + AsyncConsumerMetrics asyncConsumerMetrics = new AsyncConsumerMetrics(metrics); NetworkClientDelegate networkClientDelegate = newNetworkClientDelegate(false, asyncConsumerMetrics)) { NetworkClientDelegate.UnsentRequest unsentRequest = newUnsentFindCoordinatorRequest(); networkClientDelegate.add(unsentRequest); @@ -264,19 +261,19 @@ public void testRecordUnsentRequestsQueueTime(String groupName) throws Exception assertEquals( 0, (double) metrics.metric( - metrics.metricName("unsent-requests-queue-size", groupName) + metrics.metricName("unsent-requests-queue-size", CONSUMER_METRIC_GROUP) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("unsent-requests-queue-time-avg", groupName) + metrics.metricName("unsent-requests-queue-time-avg", CONSUMER_METRIC_GROUP) ).metricValue() ); assertEquals( 10, (double) metrics.metric( - metrics.metricName("unsent-requests-queue-time-max", groupName) + metrics.metricName("unsent-requests-queue-time-max", CONSUMER_METRIC_GROUP) ).metricValue() ); } @@ -293,7 +290,6 @@ public NetworkClientDelegate newNetworkClientDelegate(boolean notifyMetadataErro properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.put(GROUP_ID_CONFIG, GROUP_ID); properties.put(REQUEST_TIMEOUT_MS_CONFIG, REQUEST_TIMEOUT_MS); - properties.put(BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return new NetworkClientDelegate(time, new ConsumerConfig(properties), logContext, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java index 182900c0207ac..96d6e5e0b3db9 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetFetcherTest.java @@ -246,7 +246,7 @@ public void testFetchOffsetErrors() { assertTrue(subscriptions.hasValidPosition(tp0)); assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(subscriptions.isFetchable(tp0)); - assertEquals(5L, subscriptions.position(tp0).offset); + assertEquals(subscriptions.position(tp0).offset, 5L); } @Test @@ -395,7 +395,7 @@ public void testListOffsetUpdateEpoch() { assertFalse(subscriptions.isOffsetResetNeeded(tp0)); assertTrue(metadata.updateRequested()); - assertOptional(metadata.lastSeenLeaderEpoch(tp0), epoch -> assertEquals(2, (long) epoch)); + assertOptional(metadata.lastSeenLeaderEpoch(tp0), epoch -> assertEquals((long) epoch, 2)); } @Test @@ -902,7 +902,7 @@ public void testGetOffsetsIncludesLeaderEpoch() { ListOffsetsRequest offsetRequest = (ListOffsetsRequest) body; int epoch = offsetRequest.topics().get(0).partitions().get(0).currentLeaderEpoch(); assertTrue(epoch != ListOffsetsResponse.UNKNOWN_EPOCH, "Expected Fetcher to set leader epoch in request"); - assertEquals(99, epoch, "Expected leader epoch to match epoch from metadata update"); + assertEquals(epoch, 99, "Expected leader epoch to match epoch from metadata update"); return true; } else { fail("Should have seen ListOffsetRequest"); @@ -1249,7 +1249,7 @@ public void testOffsetValidationSkippedForOldBroker() { metadata, subscriptions, fetchConfig, - new Deserializers<>(new ByteArrayDeserializer(), new ByteArrayDeserializer(), metrics), + new Deserializers<>(new ByteArrayDeserializer(), new ByteArrayDeserializer()), new FetchMetricsManager(metrics, metricsRegistry), time, apiVersions); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java index 8a3617d61c752..a48b32b43efb6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetForLeaderEpochClientTest.java @@ -102,8 +102,8 @@ public void testOkResponse() { assertTrue(result.partitionsToRetry().isEmpty()); assertTrue(result.endOffsets().containsKey(tp0)); assertEquals(result.endOffsets().get(tp0).errorCode(), Errors.NONE.code()); - assertEquals(1, result.endOffsets().get(tp0).leaderEpoch()); - assertEquals(10L, result.endOffsets().get(tp0).endOffset()); + assertEquals(result.endOffsets().get(tp0).leaderEpoch(), 1); + assertEquals(result.endOffsets().get(tp0).endOffset(), 10L); } @Test @@ -121,7 +121,7 @@ public void testUnauthorizedTopic() { consumerClient.pollNoWakeup(); assertTrue(future.failed()); - assertEquals(TopicAuthorizationException.class, future.exception().getClass()); + assertEquals(future.exception().getClass(), TopicAuthorizationException.class); assertTrue(((TopicAuthorizationException) future.exception()).unauthorizedTopics().contains(tp0.topic())); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java index ed96b81790002..2f92740c41411 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/OffsetsRequestManagerTest.java @@ -51,6 +51,7 @@ import org.mockito.ArgumentCaptor; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -564,7 +565,7 @@ public void testResetOffsetsAuthorizationFailure() { CompletableFuture nextReset = assertDoesNotThrow(() -> requestManager.resetPositionsIfNeeded()); assertEquals(0, requestManager.requestsToSend()); assertTrue(nextReset.isCompletedExceptionally()); - assertFutureThrows(TopicAuthorizationException.class, nextReset); + assertFutureThrows(nextReset, TopicAuthorizationException.class); } @Test @@ -773,7 +774,7 @@ public void testUpdatePositionsDoesNotResetPositionBeforeRetrievingOffsetsForNew // tp2 added to the assignment when the Offset Fetch request is already sent including tp1 only TopicPartition tp2 = new TopicPartition("topic2", 2); - Set initPartitions2 = Set.of(tp1, tp2); + Set initPartitions2 = new HashSet<>(Arrays.asList(tp1, tp2)); mockAssignedPartitionsMissingPositions(initPartitions2, initPartitions2, leaderAndEpoch); // tp2 requires a position, but shouldn't be reset after receiving the offset fetch response that will only diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestManagersTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestManagersTest.java index 67628c513406a..405ecabcf165a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestManagersTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/RequestManagersTest.java @@ -26,13 +26,10 @@ import org.junit.jupiter.api.Test; -import java.util.Map; import java.util.Optional; import java.util.Properties; -import java.util.UUID; import static org.apache.kafka.test.TestUtils.requiredConsumerConfig; -import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; @@ -65,56 +62,10 @@ public void testMemberStateListenerRegistered() { Optional.empty(), new Metrics(), mock(OffsetCommitCallbackInvoker.class), - listener, - Optional.empty() + listener ).get(); - assertTrue(requestManagers.consumerMembershipManager.isPresent()); - assertTrue(requestManagers.streamsMembershipManager.isEmpty()); - assertTrue(requestManagers.streamsGroupHeartbeatRequestManager.isEmpty()); - - assertEquals(2, requestManagers.consumerMembershipManager.get().stateListeners().size()); - assertTrue(requestManagers.consumerMembershipManager.get().stateListeners().stream() - .anyMatch(m -> m instanceof CommitRequestManager)); - assertTrue(requestManagers.consumerMembershipManager.get().stateListeners().contains(listener)); - } - - @Test - public void testStreamMemberStateListenerRegistered() { - - final MemberStateListener listener = (memberEpoch, memberId) -> { }; - - final Properties properties = requiredConsumerConfig(); - properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "consumerGroup"); - final ConsumerConfig config = new ConsumerConfig(properties); - final GroupRebalanceConfig groupRebalanceConfig = new GroupRebalanceConfig( - config, - GroupRebalanceConfig.ProtocolType.CONSUMER + requestManagers.consumerMembershipManager.ifPresent( + membershipManager -> assertTrue(membershipManager.stateListeners().contains(listener)) ); - final RequestManagers requestManagers = RequestManagers.supplier( - new MockTime(), - new LogContext(), - mock(BackgroundEventHandler.class), - mock(ConsumerMetadata.class), - mock(SubscriptionState.class), - mock(FetchBuffer.class), - config, - groupRebalanceConfig, - mock(ApiVersions.class), - mock(FetchMetricsManager.class), - () -> mock(NetworkClientDelegate.class), - Optional.empty(), - new Metrics(), - mock(OffsetCommitCallbackInvoker.class), - listener, - Optional.of(new StreamsRebalanceData(UUID.randomUUID(), Optional.empty(), Map.of(), Map.of())) - ).get(); - assertTrue(requestManagers.streamsMembershipManager.isPresent()); - assertTrue(requestManagers.streamsGroupHeartbeatRequestManager.isPresent()); - assertTrue(requestManagers.consumerMembershipManager.isEmpty()); - - assertEquals(2, requestManagers.streamsMembershipManager.get().stateListeners().size()); - assertTrue(requestManagers.streamsMembershipManager.get().stateListeners().stream() - .anyMatch(m -> m instanceof CommitRequestManager)); - assertTrue(requestManagers.streamsMembershipManager.get().stateListeners().contains(listener)); } -} +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java index a1814fd935c9c..b117af177b17d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareCompletedFetchTest.java @@ -246,8 +246,8 @@ public void testCorruptedMessage() { // Record 1 then results in an empty batch batch = completedFetch.fetchRecords(deserializers, 10, false); - assertEquals(RecordDeserializationException.class, batch.getException().cause().getClass()); - RecordDeserializationException thrown = (RecordDeserializationException) batch.getException().cause(); + assertEquals(RecordDeserializationException.class, batch.getException().getClass()); + RecordDeserializationException thrown = (RecordDeserializationException) batch.getException(); assertEquals(RecordDeserializationException.DeserializationExceptionOrigin.KEY, thrown.origin()); assertEquals(1, thrown.offset()); assertEquals(TOPIC_NAME, thrown.topicPartition().topic()); @@ -264,8 +264,8 @@ public void testCorruptedMessage() { // Record 2 then results in an empty batch, because record 1 has now been skipped batch = completedFetch.fetchRecords(deserializers, 10, false); - assertEquals(RecordDeserializationException.class, batch.getException().cause().getClass()); - thrown = (RecordDeserializationException) batch.getException().cause(); + assertEquals(RecordDeserializationException.class, batch.getException().getClass()); + thrown = (RecordDeserializationException) batch.getException(); assertEquals(RecordDeserializationException.DeserializationExceptionOrigin.VALUE, thrown.origin()); assertEquals(2L, thrown.offset()); assertEquals(TOPIC_NAME, thrown.topicPartition().topic()); @@ -367,7 +367,6 @@ private ShareCompletedFetch newShareCompletedFetch(ShareFetchResponseData.Partit return new ShareCompletedFetch( logContext, BufferSupplier.create(), - 0, TIP, partitionData, shareFetchMetricsAggregator, @@ -375,11 +374,11 @@ private ShareCompletedFetch newShareCompletedFetch(ShareFetchResponseData.Partit } private static Deserializers newUuidDeserializers() { - return new Deserializers<>(new UUIDDeserializer(), new UUIDDeserializer(), null); + return new Deserializers<>(new UUIDDeserializer(), new UUIDDeserializer()); } private static Deserializers newStringDeserializers() { - return new Deserializers<>(new StringDeserializer(), new StringDeserializer(), null); + return new Deserializers<>(new StringDeserializer(), new StringDeserializer()); } private Records newRecords(long baseOffset, int count) { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java index a4268b7eca0a7..220483cf22d75 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumeRequestManagerTest.java @@ -38,16 +38,12 @@ import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.AuthenticationException; import org.apache.kafka.common.errors.DisconnectException; -import org.apache.kafka.common.errors.InvalidRecordStateException; -import org.apache.kafka.common.errors.ShareSessionNotFoundException; import org.apache.kafka.common.errors.TopicAuthorizationException; -import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.message.RequestHeaderData; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; -import org.apache.kafka.common.message.ShareFetchRequestData; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; @@ -64,7 +60,6 @@ import org.apache.kafka.common.requests.RequestHeader; import org.apache.kafka.common.requests.RequestTestUtils; import org.apache.kafka.common.requests.ShareAcknowledgeResponse; -import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.serialization.ByteArrayDeserializer; import org.apache.kafka.common.serialization.Deserializer; @@ -110,14 +105,13 @@ import static java.util.Collections.singleton; import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.apache.kafka.clients.consumer.ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.consumer.ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG; import static org.apache.kafka.clients.consumer.internals.events.CompletableEvent.calculateDeadlineMs; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -154,11 +148,10 @@ public class ShareConsumeRequestManagerTest { private final TopicIdPartition t2ip0 = new TopicIdPartition(topicId2, t2p0); private final int validLeaderEpoch = 0; private final MetadataResponse initialUpdateResponse = - RequestTestUtils.metadataUpdateWithIds(1, Map.of(topicName, 2), topicIds); + RequestTestUtils.metadataUpdateWithIds(1, singletonMap(topicName, 2), topicIds); private final long retryBackoffMs = 100; private final long requestTimeoutMs = 30000; - private final long defaultApiTimeoutMs = 60000; private MockTime time = new MockTime(1); private SubscriptionState subscriptions; private ConsumerMetadata metadata; @@ -210,7 +203,14 @@ public void testFetchNormal() { buildRequestManager(); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); + + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); @@ -224,7 +224,15 @@ public void testFetchWithAcquiredRecords() { buildRequestManager(); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, ShareCompletedFetchTest.acquiredRecords(1L, 1), Errors.NONE); + + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, + ShareCompletedFetchTest.acquiredRecords(1L, 1), Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); @@ -239,9 +247,16 @@ public void testMultipleFetches() { buildRequestManager(); // Enabling the config so that background event is sent when the acknowledgement response is received. shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); + assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, ShareCompletedFetchTest.acquiredRecords(1L, 1), Errors.NONE); + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, + ShareCompletedFetchTest.acquiredRecords(1L, 1), Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); @@ -252,24 +267,35 @@ public void testMultipleFetches() { Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(1L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); + + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - sendFetchAndVerifyResponse(records, ShareCompletedFetchTest.acquiredRecords(2L, 1), Errors.NONE); + client.prepareResponse(fullFetchResponse(tip0, records, + ShareCompletedFetchTest.acquiredRecords(2L, 1), Errors.NONE, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); assertEquals(1.0, metrics.metrics().get(metrics.metricInstance(shareFetchMetricsRegistry.acknowledgementSendTotal)).metricValue()); partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertEquals(Map.of(tip0, acknowledgements), completedAcknowledgements.get(0)); + assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); completedAcknowledgements.clear(); Acknowledgements acknowledgements2 = Acknowledgements.empty(); acknowledgements2.add(2L, AcknowledgeType.REJECT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements2)), Collections.emptyMap()); + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements2)); - // Preparing a response with an acknowledgement error. - sendFetchAndVerifyResponse(records, Collections.emptyList(), Errors.NONE, Errors.INVALID_RECORD_STATE); + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + // Preparing a response with an acknowledgement error. + client.prepareResponse(fullFetchResponse(tip0, records, + Collections.emptyList(), Errors.NONE, Errors.INVALID_RECORD_STATE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); assertEquals(2.0, metrics.metrics().get(metrics.metricInstance(shareFetchMetricsRegistry.acknowledgementSendTotal)).metricValue()); assertEquals(1.0, @@ -277,7 +303,7 @@ public void testMultipleFetches() { partitionRecords = fetchRecords(); assertTrue(partitionRecords.isEmpty()); - assertEquals(Map.of(tip0, acknowledgements2), completedAcknowledgements.get(0)); + assertEquals(Collections.singletonMap(tip0, acknowledgements2), completedAcknowledgements.get(0)); } @Test @@ -287,12 +313,21 @@ public void testCommitSync() { shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); - shareConsumeRequestManager.commitSync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(2000))); + shareConsumeRequestManager.commitSync(Collections.singletonMap(tip0, acknowledgements), time.milliseconds() + 2000); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); @@ -300,7 +335,8 @@ public void testCommitSync() { networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(Map.of(tip0, acknowledgements), completedAcknowledgements.get(0)); + assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); + completedAcknowledgements.clear(); } @Test @@ -310,12 +346,21 @@ public void testCommitAsync() { shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); @@ -323,7 +368,8 @@ public void testCommitAsync() { networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(Map.of(tip0, acknowledgements), completedAcknowledgements.get(0)); + assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); + completedAcknowledgements.clear(); } @Test @@ -333,29 +379,35 @@ public void testServerDisconnectedOnShareAcknowledge() throws InterruptedExcepti shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); + + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); fetchRecords(); - Acknowledgements acknowledgements = getAcknowledgements(1, - AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); Acknowledgements acknowledgements2 = Acknowledgements.empty(); acknowledgements2.add(3L, AcknowledgeType.REJECT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements2)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements2)); client.prepareResponse(null, true); networkClientDelegate.poll(time.timer(0)); - assertEquals(Map.of(tip0, acknowledgements), completedAcknowledgements.get(0)); - assertInstanceOf(UnknownServerException.class, completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); + assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); + assertEquals(Errors.UNKNOWN_SERVER_ERROR, completedAcknowledgements.get(0).get(tip0).getAcknowledgeErrorCode()); completedAcknowledgements.clear(); assertEquals(1, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getAcknowledgementsToSendCount(tip0)); @@ -365,8 +417,8 @@ public void testServerDisconnectedOnShareAcknowledge() throws InterruptedExcepti // We expect the remaining acknowledgements to be cleared due to share session epoch being set to 0. assertNull(shareConsumeRequestManager.requestStates(0)); // The callback for these unsent acknowledgements will be invoked with an error code. - assertEquals(Map.of(tip0, acknowledgements2), completedAcknowledgements.get(0)); - assertInstanceOf(ShareSessionNotFoundException.class, completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); + assertEquals(Collections.singletonMap(tip0, acknowledgements2), completedAcknowledgements.get(0)); + assertEquals(Errors.SHARE_SESSION_NOT_FOUND, completedAcknowledgements.get(0).get(tip0).getAcknowledgeErrorCode()); }); // Attempt a normal fetch to check if nodesWithPendingRequests is empty. @@ -385,18 +437,27 @@ public void testAcknowledgeOnClose() { shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); + + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); Acknowledgements acknowledgements = Acknowledgements.empty(); acknowledgements.add(1L, AcknowledgeType.ACCEPT); // Piggyback acknowledgements - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); // Remaining acknowledgements sent with close(). - Acknowledgements acknowledgements2 = getAcknowledgements(2, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + Acknowledgements acknowledgements2 = Acknowledgements.empty(); + acknowledgements2.add(2L, AcknowledgeType.ACCEPT); + acknowledgements2.add(3L, AcknowledgeType.REJECT); - shareConsumeRequestManager.acknowledgeOnClose(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements2)), + shareConsumeRequestManager.acknowledgeOnClose(Collections.singletonMap(tip0, acknowledgements2), calculateDeadlineMs(time.timer(100))); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); @@ -406,7 +467,7 @@ public void testAcknowledgeOnClose() { assertEquals(1, completedAcknowledgements.size()); Acknowledgements mergedAcks = acknowledgements.merge(acknowledgements2); - mergedAcks.complete(null); + mergedAcks.setAcknowledgeErrorCode(Errors.NONE); // Verifying that all 3 offsets were acknowledged as part of the final ShareAcknowledge on close. assertEquals(mergedAcks.getAcknowledgementsTypeMap(), completedAcknowledgements.get(0).get(tip0).getAcknowledgementsTypeMap()); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); @@ -419,12 +480,21 @@ public void testAcknowledgeOnCloseWithPendingCommitAsync() { shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); shareConsumeRequestManager.acknowledgeOnClose(Collections.emptyMap(), calculateDeadlineMs(time.timer(100))); @@ -437,7 +507,8 @@ public void testAcknowledgeOnCloseWithPendingCommitAsync() { networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(Map.of(tip0, acknowledgements), completedAcknowledgements.get(0)); + assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); + completedAcknowledgements.clear(); } @Test @@ -447,11 +518,21 @@ public void testAcknowledgeOnCloseWithPendingCommitSync() { shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); - shareConsumeRequestManager.commitSync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), + shareConsumeRequestManager.commitSync(Collections.singletonMap(tip0, acknowledgements), calculateDeadlineMs(time.timer(100))); shareConsumeRequestManager.acknowledgeOnClose(Collections.emptyMap(), calculateDeadlineMs(time.timer(100))); @@ -465,7 +546,8 @@ public void testAcknowledgeOnCloseWithPendingCommitSync() { networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(Map.of(tip0, acknowledgements), completedAcknowledgements.get(0)); + assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); + completedAcknowledgements.clear(); } @Test @@ -474,21 +556,24 @@ public void testResultHandlerOnCommitAsync() { // Enabling the config so that background event is sent when the acknowledgement response is received. shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); ShareConsumeRequestManager.ResultHandler resultHandler = shareConsumeRequestManager.buildResultHandler(null, Optional.empty()); // Passing null acknowledgements should mean we do not send the background event at all. - resultHandler.complete(tip0, null, ShareConsumeRequestManager.AcknowledgeRequestType.COMMIT_ASYNC); + resultHandler.complete(tip0, null, true); assertEquals(0, completedAcknowledgements.size()); - // Setting the request type to COMMIT_SYNC should still not send any background event + // Setting isCommitAsync to false should still not send any background event // as we have initialized remainingResults to null. - resultHandler.complete(tip0, acknowledgements, ShareConsumeRequestManager.AcknowledgeRequestType.COMMIT_SYNC); + resultHandler.complete(tip0, acknowledgements, false); assertEquals(0, completedAcknowledgements.size()); // Sending non-null acknowledgements means we do send the background event - resultHandler.complete(tip0, acknowledgements, ShareConsumeRequestManager.AcknowledgeRequestType.COMMIT_ASYNC); + resultHandler.complete(tip0, acknowledgements, true); assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); } @@ -498,7 +583,10 @@ public void testResultHandlerOnCommitSync() { // Enabling the config so that background event is sent when the acknowledgement response is received. shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); final CompletableFuture> future = new CompletableFuture<>(); @@ -508,16 +596,16 @@ public void testResultHandlerOnCommitSync() { ShareConsumeRequestManager.ResultHandler resultHandler = shareConsumeRequestManager.buildResultHandler(resultCount, Optional.of(future)); // We only send the background event after all results have been completed. - resultHandler.complete(tip0, acknowledgements, ShareConsumeRequestManager.AcknowledgeRequestType.COMMIT_SYNC); + resultHandler.complete(tip0, acknowledgements, false); assertEquals(0, completedAcknowledgements.size()); assertFalse(future.isDone()); - resultHandler.complete(t2ip0, null, ShareConsumeRequestManager.AcknowledgeRequestType.COMMIT_SYNC); + resultHandler.complete(t2ip0, null, false); assertEquals(0, completedAcknowledgements.size()); assertFalse(future.isDone()); // After third response is received, we send the background event. - resultHandler.complete(tip1, acknowledgements, ShareConsumeRequestManager.AcknowledgeRequestType.COMMIT_SYNC); + resultHandler.complete(tip1, acknowledgements, false); assertEquals(1, completedAcknowledgements.size()); assertEquals(2, completedAcknowledgements.get(0).size()); assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); @@ -550,18 +638,29 @@ public void testBatchingAcknowledgeRequestStates() { buildRequestManager(); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(buildRecords(1L, 6, 1), - ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, buildRecords(1L, 6, 1), + ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); - Acknowledgements acknowledgements2 = getAcknowledgements(4, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + Acknowledgements acknowledgements2 = Acknowledgements.empty(); + acknowledgements.add(4L, AcknowledgeType.ACCEPT); + acknowledgements.add(5L, AcknowledgeType.ACCEPT); + acknowledgements.add(6L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements2)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements2)); assertEquals(6, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getAcknowledgementsToSendCount(tip0)); @@ -582,18 +681,29 @@ public void testPendingCommitAsyncBeforeCommitSync() { buildRequestManager(); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(buildRecords(1L, 6, 1), - ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, buildRecords(1L, 6, 1), + ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); - Acknowledgements acknowledgements2 = getAcknowledgements(4, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + Acknowledgements acknowledgements2 = Acknowledgements.empty(); + acknowledgements2.add(4L, AcknowledgeType.ACCEPT); + acknowledgements2.add(5L, AcknowledgeType.ACCEPT); + acknowledgements2.add(6L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.commitSync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements2)), - calculateDeadlineMs(time.timer(60000L))); + shareConsumeRequestManager.commitSync(Collections.singletonMap(tip0, acknowledgements2), 60000L); assertEquals(3, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getAcknowledgementsToSendCount(tip0)); assertEquals(1, shareConsumeRequestManager.requestStates(0).getSyncRequestQueue().size()); @@ -626,13 +736,25 @@ public void testRetryAcknowledgements() throws InterruptedException { buildRequestManager(); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(buildRecords(1L, 6, 1), - ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT, - AcknowledgeType.ACCEPT, AcknowledgeType.RELEASE, AcknowledgeType.ACCEPT); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, buildRecords(1L, 6, 1), + ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); + acknowledgements.add(4L, AcknowledgeType.ACCEPT); + acknowledgements.add(5L, AcknowledgeType.RELEASE); + acknowledgements.add(6L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.commitSync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), 60000L); + shareConsumeRequestManager.commitSync(Collections.singletonMap(tip0, acknowledgements), 60000L); assertNull(shareConsumeRequestManager.requestStates(0).getAsyncRequest()); assertEquals(1, shareConsumeRequestManager.requestStates(0).getSyncRequestQueue().size()); @@ -658,146 +780,29 @@ public void testRetryAcknowledgements() throws InterruptedException { assertEquals(0, shareConsumeRequestManager.requestStates(0).getSyncRequestQueue().peek().getIncompleteAcknowledgementsCount(tip0)); } - @ParameterizedTest - @EnumSource(value = Errors.class, names = {"FENCED_LEADER_EPOCH", "NOT_LEADER_OR_FOLLOWER", "UNKNOWN_TOPIC_OR_PARTITION"}) - public void testFatalErrorsAcknowledgementResponse(Errors error) { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); - - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); - client.prepareResponse(fullAcknowledgeResponse(tip0, error)); - networkClientDelegate.poll(time.timer(0)); - - // Assert these errors are not retried even if they are retriable. They are treated as fatal and a metadata update is triggered. - assertEquals(0, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getInFlightAcknowledgementsCount(tip0)); - assertEquals(0, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getIncompleteAcknowledgementsCount(tip0)); - assertEquals(1, completedAcknowledgements.size()); - assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); - } - - @Test - public void testRetryAcknowledgementsMultipleCommitAsync() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(buildRecords(1L, 6, 1), - ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE); - - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT); - - // commitAsync() acknowledges the first 2 records. - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), calculateDeadlineMs(time, 1000L)); - - assertEquals(2, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getAcknowledgementsToSendCount(tip0)); - - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); - assertEquals(2, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getInFlightAcknowledgementsCount(tip0)); - - // Response contains a retriable exception, so we retry. - client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.REQUEST_TIMED_OUT)); - networkClientDelegate.poll(time.timer(0)); - - Acknowledgements acknowledgements1 = getAcknowledgements(3, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - - // 2nd commitAsync() acknowledges the next 2 records. - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements1)), calculateDeadlineMs(time, 1000L)); - assertEquals(2, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getIncompleteAcknowledgementsCount(tip0)); - - Acknowledgements acknowledgements2 = getAcknowledgements(5, AcknowledgeType.RELEASE, AcknowledgeType.ACCEPT); - - // 3rd commitAsync() acknowledges the next 2 records. - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements2)), calculateDeadlineMs(time, 1000L)); - - time.sleep(2000L); - - // As the timer for the initial commitAsync() was 1000ms, the request times out, and we fill the callback with a timeout exception. - assertEquals(0, shareConsumeRequestManager.sendAcknowledgements()); - assertEquals(1, completedAcknowledgements.size()); - assertEquals(2, completedAcknowledgements.get(0).get(tip0).size()); - assertEquals(Errors.REQUEST_TIMED_OUT.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - completedAcknowledgements.clear(); - - // Further requests which came before the timeout are processed as expected. - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); - assertEquals(4, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getInFlightAcknowledgementsCount(tip0)); - - client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - - assertEquals(0, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getInFlightAcknowledgementsCount(tip0)); - assertEquals(1, completedAcknowledgements.size()); - assertEquals(4, completedAcknowledgements.get(0).get(tip0).size()); - assertNull(completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - } - @Test - public void testRetryAcknowledgementsMultipleCommitSync() { + public void testPiggybackAcknowledgementsInFlight() { buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(buildRecords(1L, 6, 1), - ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE); - - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT); - - // commitSync() for the first 2 acknowledgements. - shareConsumeRequestManager.commitSync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), calculateDeadlineMs(time, 1000L)); - - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); - - // Response contains a retriable exception. - client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.REQUEST_TIMED_OUT)); - networkClientDelegate.poll(time.timer(0)); - assertEquals(2, shareConsumeRequestManager.requestStates(0).getSyncRequestQueue().peek().getIncompleteAcknowledgementsCount(tip0)); - - // We expire the commitSync request as it had a timer of 1000ms. - time.sleep(2000L); - - Acknowledgements acknowledgements1 = getAcknowledgements(3, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT, AcknowledgeType.RELEASE, AcknowledgeType.ACCEPT); - - // commitSync() for the next 4 acknowledgements. - shareConsumeRequestManager.commitSync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements1)), calculateDeadlineMs(time, 1000L)); - // We send the 2nd commitSync request, and fail the first one as timer has expired. - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); - assertEquals(2, completedAcknowledgements.get(0).get(tip0).size()); - assertEquals(Errors.REQUEST_TIMED_OUT.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - completedAcknowledgements.clear(); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - // We get a successful response for the 2nd commitSync request. - client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE)); + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(1, completedAcknowledgements.size()); - assertEquals(4, completedAcknowledgements.get(0).get(tip0).size()); - assertNull(completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - } - - @Test - public void testPiggybackAcknowledgementsInFlight() { - buildRequestManager(); - - assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - - Acknowledgements acknowledgements = getAcknowledgements(1, - AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); // Reading records from the share fetch buffer. fetchRecords(); // Piggyback acknowledgements - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); assertEquals(1, sendFetches()); assertFalse(shareConsumeRequestManager.hasCompletedFetches()); @@ -807,7 +812,7 @@ public void testPiggybackAcknowledgementsInFlight() { Acknowledgements acknowledgements2 = Acknowledgements.empty(); acknowledgements2.add(3L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements2)), Collections.emptyMap()); + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements2)); client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); networkClientDelegate.poll(time.timer(0)); @@ -824,287 +829,60 @@ public void testPiggybackAcknowledgementsInFlight() { @Test public void testCommitAsyncWithSubscriptionChange() { buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName2)); - subscriptions.assignFromSubscribed(Collections.singleton(t2p0)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Map.of(topicName2, 1), - tp -> validLeaderEpoch, topicIds, false)); - - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); - - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE)); + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); - assertNull(completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); - // We should send a fetch to the newly subscribed partition. - assertEquals(1, sendFetches()); + assignFromSubscribed(singleton(tp1)); - client.prepareResponse(fullFetchResponse(t2ip0, records, acquiredRecords, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); + + assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); } @Test - public void testCommitSyncWithSubscriptionChange() { + public void testShareFetchWithSubscriptionChange() { buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); assignFromSubscribed(singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName2)); - subscriptions.assignFromSubscribed(Collections.singleton(t2p0)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Map.of(topicName2, 1), - tp -> validLeaderEpoch, topicIds, false)); - - shareConsumeRequestManager.commitSync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(100))); - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE)); + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); - assertNull(completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(0L, AcknowledgeType.ACCEPT); + acknowledgements.add(1L, AcknowledgeType.RELEASE); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); - // We should send a fetch to the newly subscribed partition. - assertEquals(1, sendFetches()); + // Send acknowledgements via ShareFetch + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); + fetchRecords(); + // Subscription changes. + assignFromSubscribed(singleton(tp1)); - client.prepareResponse(fullFetchResponse(t2ip0, records, acquiredRecords, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + assertEquals(3.0, + metrics.metrics().get(metrics.metricInstance(shareFetchMetricsRegistry.acknowledgementSendTotal)).metricValue()); } @Test - public void testCloseWithSubscriptionChange() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - assignFromSubscribed(singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName2)); - subscriptions.assignFromSubscribed(Collections.singleton(t2p0)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Map.of(topicName2, 1), - tp -> validLeaderEpoch, topicIds, false)); - - shareConsumeRequestManager.acknowledgeOnClose(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(100))); - - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); - - client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - - assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); - assertNull(completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - - // As we are closing, we would not send any more fetches. - assertEquals(0, sendFetches()); - } - - @Test - public void testShareFetchWithSubscriptionChange() { - buildRequestManager(); - - assignFromSubscribed(singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.RELEASE, AcknowledgeType.ACCEPT); - - // Send acknowledgements via ShareFetch - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); - fetchRecords(); - // Subscription changes. - subscriptions.subscribeToShareGroup(Collections.singleton(topicName2)); - subscriptions.assignFromSubscribed(Collections.singleton(t2p0)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Map.of(topicName2, 1), - tp -> validLeaderEpoch, topicIds, false)); - - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(3.0, - metrics.metrics().get(metrics.metricInstance(shareFetchMetricsRegistry.acknowledgementSendTotal)).metricValue()); - } - - @Test - public void testShareFetchWithSubscriptionChangeMultipleNodes() { - buildRequestManager(); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - subscriptions.assignFromSubscribed(Collections.singletonList(tp0)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); - Node nodeId0 = metadata.fetch().nodeById(0); - Node nodeId1 = metadata.fetch().nodeById(1); - Node tp0Leader = metadata.fetch().leaderFor(tp0); - Node tp1Leader = metadata.fetch().leaderFor(tp1); - - assertEquals(nodeId0, tp0Leader); - assertEquals(nodeId1, tp1Leader); - - sendFetchAndVerifyResponse(records, emptyAcquiredRecords, Errors.NONE); - - Acknowledgements acknowledgements = getAcknowledgements(0, AcknowledgeType.ACCEPT, AcknowledgeType.RELEASE, AcknowledgeType.ACCEPT); - - // Send acknowledgements via ShareFetch - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); - fetchRecords(); - // Subscription changes. - subscriptions.assignFromSubscribed(Collections.singletonList(tp1)); - - NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); - assertEquals(2, pollResult.unsentRequests.size()); - - ShareFetchRequest.Builder builder1, builder2; - if (pollResult.unsentRequests.get(0).node().get() == nodeId0) { - builder1 = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); - builder2 = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(1).requestBuilder(); - assertEquals(nodeId1, pollResult.unsentRequests.get(1).node().get()); - } else { - builder1 = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(1).requestBuilder(); - builder2 = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); - assertEquals(nodeId0, pollResult.unsentRequests.get(1).node().get()); - assertEquals(nodeId1, pollResult.unsentRequests.get(0).node().get()); - } - - // Verify the builder data for node0. - assertEquals(1, builder1.data().topics().size()); - ShareFetchRequestData.FetchTopic fetchTopic = builder1.data().topics().stream().findFirst().get(); - assertEquals(tip0.topicId(), fetchTopic.topicId()); - assertEquals(1, fetchTopic.partitions().size()); - ShareFetchRequestData.FetchPartition fetchPartition = fetchTopic.partitions().stream().findFirst().get(); - assertEquals(0, fetchPartition.partitionIndex()); - assertEquals(1, fetchPartition.acknowledgementBatches().size()); - assertEquals(0L, fetchPartition.acknowledgementBatches().get(0).firstOffset()); - assertEquals(2L, fetchPartition.acknowledgementBatches().get(0).lastOffset()); - - assertEquals(1, builder1.data().forgottenTopicsData().size()); - assertEquals(tip0.topicId(), builder1.data().forgottenTopicsData().get(0).topicId()); - assertEquals(1, builder1.data().forgottenTopicsData().get(0).partitions().size()); - assertEquals(0, builder1.data().forgottenTopicsData().get(0).partitions().get(0)); - - // Verify the builder data for node1. - assertEquals(1, builder2.data().topics().size()); - fetchTopic = builder2.data().topics().stream().findFirst().get(); - assertEquals(tip1.topicId(), fetchTopic.topicId()); - assertEquals(1, fetchTopic.partitions().size()); - assertEquals(1, fetchTopic.partitions().stream().findFirst().get().partitionIndex()); - } - - @Test - public void testShareFetchWithSubscriptionChangeMultipleNodesEmptyAcknowledgements() { - buildRequestManager(); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - subscriptions.assignFromSubscribed(Collections.singletonList(tp0)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); - Node nodeId0 = metadata.fetch().nodeById(0); - Node nodeId1 = metadata.fetch().nodeById(1); - Node tp0Leader = metadata.fetch().leaderFor(tp0); - Node tp1Leader = metadata.fetch().leaderFor(tp1); - - assertEquals(nodeId0, tp0Leader); - assertEquals(nodeId1, tp1Leader); - - // Send the first ShareFetch with an empty response - sendFetchAndVerifyResponse(records, emptyAcquiredRecords, Errors.NONE); - - fetchRecords(); - - // Change the subscription. - subscriptions.assignFromSubscribed(Collections.singletonList(tp1)); - - - // Now we will be sending the request to node1 only as leader for tip1 is node1. - // We do not build the request for tip0 as there are no acknowledgements to send. - NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); - assertEquals(1, pollResult.unsentRequests.size()); - assertEquals(nodeId1, pollResult.unsentRequests.get(0).node().get()); - - ShareFetchRequest.Builder builder = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); - - assertEquals(1, builder.data().topics().size()); - ShareFetchRequestData.FetchTopic fetchTopic = builder.data().topics().stream().findFirst().get(); - assertEquals(tip1.topicId(), fetchTopic.topicId()); - assertEquals(1, fetchTopic.partitions().size()); - assertEquals(1, fetchTopic.partitions().stream().findFirst().get().partitionIndex()); - assertEquals(0, builder.data().forgottenTopicsData().size()); - } - - @Test - public void testShareFetchAndCloseMultipleNodes() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - subscriptions.assignFromSubscribed(List.of(tp0, tp1)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); - - assertEquals(2, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); - client.prepareResponse(fullFetchResponse(tip1, records, acquiredRecords, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - Acknowledgements acknowledgements1 = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - - Map acknowledgementsMap = new HashMap<>(); - acknowledgementsMap.put(tip0, new NodeAcknowledgements(0, acknowledgements)); - acknowledgementsMap.put(tip1, new NodeAcknowledgements(1, acknowledgements1)); - shareConsumeRequestManager.acknowledgeOnClose(acknowledgementsMap, calculateDeadlineMs(time, 1000L)); - - assertEquals(2, shareConsumeRequestManager.sendAcknowledgements()); - - client.prepareResponse(fullAcknowledgeResponse(tip0, Errors.NONE)); - client.prepareResponse(fullAcknowledgeResponse(tip1, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - - assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); - assertEquals(3, completedAcknowledgements.get(0).get(tip1).size()); - - assertEquals(0, shareConsumeRequestManager.sendAcknowledgements()); - assertNull(shareConsumeRequestManager.requestStates(0)); - assertNull(shareConsumeRequestManager.requestStates(1)); - } - - @Test - public void testRetryAcknowledgementsWithLeaderChange() { + public void testRetryAcknowledgementsWithLeaderChange() { buildRequestManager(); subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); @@ -1113,20 +891,30 @@ public void testRetryAcknowledgementsWithLeaderChange() { subscriptions.assignFromSubscribed(partitions); client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 1), + RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 1), tp -> validLeaderEpoch, topicIds, false)); Node nodeId0 = metadata.fetch().nodeById(0); Node nodeId1 = metadata.fetch().nodeById(1); LinkedList nodeList = new LinkedList<>(Arrays.asList(nodeId0, nodeId1)); - sendFetchAndVerifyResponse(buildRecords(1L, 6, 1), - ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, buildRecords(1L, 6, 1), + ShareCompletedFetchTest.acquiredRecords(1L, 6), Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT, - AcknowledgeType.ACCEPT, AcknowledgeType.RELEASE, AcknowledgeType.ACCEPT); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); + acknowledgements.add(3L, AcknowledgeType.REJECT); + acknowledgements.add(4L, AcknowledgeType.ACCEPT); + acknowledgements.add(5L, AcknowledgeType.RELEASE); + acknowledgements.add(6L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.commitSync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(60000L))); + shareConsumeRequestManager.commitSync(Collections.singletonMap(tip0, acknowledgements), 60000L); assertNull(shareConsumeRequestManager.requestStates(0).getAsyncRequest()); assertEquals(1, shareConsumeRequestManager.requestStates(0).getSyncRequestQueue().size()); @@ -1153,13 +941,19 @@ public void testCallbackHandlerConfig() throws InterruptedException { assignFromSubscribed(Collections.singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); + // normal fetch + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - Acknowledgements acknowledgements = getAcknowledgements(1, - AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements)); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); @@ -1167,7 +961,7 @@ public void testCallbackHandlerConfig() throws InterruptedException { networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(Map.of(tip0, acknowledgements), completedAcknowledgements.get(0)); + assertEquals(Collections.singletonMap(tip0, acknowledgements), completedAcknowledgements.get(0)); completedAcknowledgements.clear(); @@ -1177,8 +971,7 @@ public void testCallbackHandlerConfig() throws InterruptedException { Acknowledgements acknowledgements2 = Acknowledgements.empty(); acknowledgements2.add(3L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements2)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(Collections.singletonMap(tip0, acknowledgements2)); TestUtils.retryOnExceptionWithTimeout(() -> assertEquals(1, shareConsumeRequestManager.sendAcknowledgements())); @@ -1207,20 +1000,26 @@ public void testAcknowledgementCommitCallbackMultiplePartitionCommitAsync() { LinkedHashMap partitionDataMap = new LinkedHashMap<>(); partitionDataMap.put(tip0, partitionDataForFetch(tip0, records, acquiredRecords, Errors.NONE, Errors.NONE)); partitionDataMap.put(t2ip0, partitionDataForFetch(t2ip0, records, acquiredRecords, Errors.NONE, Errors.NONE)); - client.prepareResponse(ShareFetchResponse.of(Errors.NONE, 0, partitionDataMap, Collections.emptyList(), 0)); + client.prepareResponse(ShareFetchResponse.of(Errors.NONE, 0, partitionDataMap, Collections.emptyList())); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(0L, AcknowledgeType.ACCEPT); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + acknowledgements.add(2L, AcknowledgeType.ACCEPT); - Acknowledgements acknowledgements2 = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); + Acknowledgements acknowledgements2 = Acknowledgements.empty(); + acknowledgements2.add(0L, AcknowledgeType.ACCEPT); + acknowledgements2.add(1L, AcknowledgeType.ACCEPT); + acknowledgements2.add(2L, AcknowledgeType.ACCEPT); - Map acks = new HashMap<>(); - acks.put(tip0, new NodeAcknowledgements(0, acknowledgements)); - acks.put(t2ip0, new NodeAcknowledgements(0, acknowledgements2)); + Map acks = new HashMap<>(); + acks.put(tip0, acknowledgements); + acks.put(t2ip0, acknowledgements2); - shareConsumeRequestManager.commitAsync(acks, calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); + shareConsumeRequestManager.commitAsync(acks); assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); @@ -1253,7 +1052,7 @@ public void testMultipleTopicsFetch() { LinkedHashMap partitionDataMap = new LinkedHashMap<>(); partitionDataMap.put(tip0, partitionDataForFetch(tip0, records, acquiredRecords, Errors.NONE, Errors.NONE)); partitionDataMap.put(t2ip0, partitionDataForFetch(t2ip0, records, emptyAcquiredRecords, Errors.TOPIC_AUTHORIZATION_FAILED, Errors.NONE)); - client.prepareResponse(ShareFetchResponse.of(Errors.NONE, 0, partitionDataMap, Collections.emptyList(), 0)); + client.prepareResponse(ShareFetchResponse.of(Errors.NONE, 0, partitionDataMap, Collections.emptyList())); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); @@ -1282,7 +1081,7 @@ public void testMultipleTopicsFetchError() { LinkedHashMap partitionDataMap = new LinkedHashMap<>(); partitionDataMap.put(t2ip0, partitionDataForFetch(t2ip0, records, emptyAcquiredRecords, Errors.TOPIC_AUTHORIZATION_FAILED, Errors.NONE)); partitionDataMap.put(tip0, partitionDataForFetch(tip0, records, acquiredRecords, Errors.NONE, Errors.NONE)); - client.prepareResponse(ShareFetchResponse.of(Errors.NONE, 0, partitionDataMap, Collections.emptyList(), 0)); + client.prepareResponse(ShareFetchResponse.of(Errors.NONE, 0, partitionDataMap, Collections.emptyList())); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); @@ -1299,82 +1098,6 @@ public void testMultipleTopicsFetchError() { assertThrows(NullPointerException.class, (Executable) shareFetch.records().get(t2p0)); } - @Test - public void testShareFetchInvalidResponse() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - subscriptions.assignFromSubscribed(Collections.singleton(tp0)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Map.of(topicName, 1), - tp -> validLeaderEpoch, topicIds, false)); - - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - client.prepareResponse(fullFetchResponse(t2ip0, records, acquiredRecords, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - } - - @Test - public void testShareAcknowledgeInvalidResponse() throws InterruptedException { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - subscriptions.assignFromSubscribed(Collections.singleton(tp0)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Map.of(topicName, 1), - tp -> validLeaderEpoch, topicIds, false)); - - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - - fetchRecords(); - - Acknowledgements acknowledgements = Acknowledgements.empty(); - acknowledgements.add(1L, AcknowledgeType.ACCEPT); - - shareConsumeRequestManager.commitAsync(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), - calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); - - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); - - // If a top-level error is received, we still retry the acknowledgements independent of the topic-partitions received in the response. - client.prepareResponse(acknowledgeResponseWithTopLevelError(t2ip0, Errors.LEADER_NOT_AVAILABLE)); - networkClientDelegate.poll(time.timer(0)); - - assertEquals(1, shareConsumeRequestManager.requestStates(0).getAsyncRequest().getIncompleteAcknowledgementsCount(tip0)); - - TestUtils.retryOnExceptionWithTimeout(() -> assertEquals(1, shareConsumeRequestManager.sendAcknowledgements())); - - client.prepareResponse(fullAcknowledgeResponse(t2ip0, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - - // If we do not get the expected partitions in the response, we fail these acknowledgements with InvalidRecordStateException. - assertEquals(InvalidRecordStateException.class, completedAcknowledgements.get(0).get(tip0).getAcknowledgeException().getClass()); - completedAcknowledgements.clear(); - - // Send remaining acknowledgements through piggybacking on the next fetch. - Acknowledgements acknowledgements1 = getAcknowledgements(2, - AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements1)), Collections.emptyMap()); - - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - client.prepareResponse(fullFetchResponse(t2ip0, records, acquiredRecords, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - // If we do not get the expected partitions in the response, we fail these acknowledgements with InvalidRecordStateException. - assertEquals(InvalidRecordStateException.class, completedAcknowledgements.get(0).get(tip0).getAcknowledgeException().getClass()); - } - @Test public void testCloseShouldBeIdempotent() { buildRequestManager(); @@ -1391,101 +1114,16 @@ public void testFetchError() { buildRequestManager(); assignFromSubscribed(singleton(tp0)); - sendFetchAndVerifyResponse(records, emptyAcquiredRecords, Errors.NOT_LEADER_OR_FOLLOWER); - - Map>> partitionRecords = fetchRecords(); - assertFalse(partitionRecords.containsKey(tp0)); - } - - @Test - public void testPiggybackAcknowledgementsOnInitialShareSessionError() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - assignFromSubscribed(singleton(tp0)); - - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); - - NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); - assertEquals(1, pollResult.unsentRequests.size()); - ShareFetchRequest.Builder builder = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); - assertEquals(1, builder.data().topics().size()); - // We should not add the acknowledgements as part of the request. - assertEquals(0, builder.data().topics().find(tip0.topicId()).partitions().find(0).acknowledgementBatches().size()); - - assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); - assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - } - - @Test - public void testPiggybackAcknowledgementsOnInitialShareSessionErrorSubscriptionChange() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - assignFromSubscribed(singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - - fetchRecords(); - - // Simulate a broker restart, but no leader change, this resets share session epoch to 0. - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - client.prepareResponse(fetchResponseWithTopLevelError(tip0, Errors.SHARE_SESSION_NOT_FOUND)); - networkClientDelegate.poll(time.timer(0)); - - // Simulate a metadata update with no topics in the response. - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Collections.emptyMap(), - tp -> validLeaderEpoch, null, false)); - - // The acknowledgements for the initial fetch from tip0 are processed now and sent to the background thread. - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); - - assertEquals(0, completedAcknowledgements.size()); - - // Next fetch would not include any acknowledgements. - NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); - assertEquals(0, pollResult.unsentRequests.size()); - - // We should fail any waiting acknowledgements for tip-0 as it would have a share session epoch equal to 0. - assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); - assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - } - - @Test - public void testPiggybackAcknowledgementsOnInitialShareSession_ShareSessionNotFound() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - assignFromSubscribed(singleton(tp0)); - sendFetchAndVerifyResponse(records, acquiredRecords, Errors.NONE); - - fetchRecords(); - - // The acknowledgements for the initial fetch from tip0 are processed now and sent to the background thread. - Acknowledgements acknowledgements = getAcknowledgements(1, AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT, AcknowledgeType.REJECT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); - // We attempt to send the acknowledgements piggybacking on the fetch. assertEquals(1, sendFetches()); assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - // Simulate a broker restart, but no leader change, this resets share session epoch to 0. - client.prepareResponse(fetchResponseWithTopLevelError(tip0, Errors.SHARE_SESSION_NOT_FOUND)); + client.prepareResponse(fullFetchResponse(tip0, records, emptyAcquiredRecords, Errors.NOT_LEADER_OR_FOLLOWER)); networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - // We would complete these acknowledgements with the error code from the response. - assertEquals(3, completedAcknowledgements.get(0).get(tip0).size()); - assertEquals(Errors.SHARE_SESSION_NOT_FOUND.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - - // Next fetch would proceed as expected and would not include any acknowledgements. - NetworkClientDelegate.PollResult pollResult = shareConsumeRequestManager.sendFetchesReturnPollResult(); - assertEquals(1, pollResult.unsentRequests.size()); - ShareFetchRequest.Builder builder = (ShareFetchRequest.Builder) pollResult.unsentRequests.get(0).requestBuilder(); - assertEquals(0, builder.data().topics().find(topicId).partitions().find(0).acknowledgementBatches().size()); + Map>> partitionRecords = fetchRecords(); + assertFalse(partitionRecords.containsKey(tp0)); } @Test @@ -1677,574 +1315,94 @@ private static Stream handleFetchResponseErrorSupplier() { public void testFetchDisconnected() { buildRequestManager(); - assignFromSubscribed(singleton(tp0)); - - assertEquals(1, sendFetches()); - client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE), true); - networkClientDelegate.poll(time.timer(0)); - assertEmptyFetch("Should not return records on disconnect"); - } - - @Test - public void testFetchWithLastRecordMissingFromBatch() { - buildRequestManager(); - - MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, - new SimpleRecord("0".getBytes(), "v".getBytes()), - new SimpleRecord("1".getBytes(), "v".getBytes()), - new SimpleRecord("2".getBytes(), "v".getBytes()), - new SimpleRecord(null, "value".getBytes())); - - // Remove the last record to simulate compaction - MemoryRecords.FilterResult result = records.filterTo(new MemoryRecords.RecordFilter(0, 0) { - @Override - protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { - return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false); - } - - @Override - protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { - return record.key() != null; - } - }, ByteBuffer.allocate(1024), BufferSupplier.NO_CACHING); - result.outputBuffer().flip(); - MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer()); - - assignFromSubscribed(singleton(tp0)); - assertEquals(1, sendFetches()); - client.prepareResponse(fullFetchResponse(tip0, - compactedRecords, - ShareCompletedFetchTest.acquiredRecords(0L, 3), - Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - Map>> allFetchedRecords = fetchRecords(); - assertTrue(allFetchedRecords.containsKey(tp0)); - List> fetchedRecords = allFetchedRecords.get(tp0); - assertEquals(3, fetchedRecords.size()); - - for (int i = 0; i < 3; i++) { - assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key())); - } - } - - private MemoryRecords buildRecords(long baseOffset, int count, long firstMessageId) { - MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE, TimestampType.CREATE_TIME, baseOffset); - for (int i = 0; i < count; i++) - builder.append(0L, "key".getBytes(), ("value-" + (firstMessageId + i)).getBytes()); - return builder.build(); - } - - @Test - public void testCorruptMessageError() { - buildRequestManager(); - assignFromSubscribed(singleton(tp0)); - - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - // Prepare a response with the CORRUPT_MESSAGE error. - client.prepareResponse(fullFetchResponse( - tip0, - buildRecords(1L, 1, 1), - ShareCompletedFetchTest.acquiredRecords(1L, 1), - Errors.CORRUPT_MESSAGE)); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - // Trigger the exception. - assertThrows(KafkaException.class, this::fetchRecords); - } - - /** - * Test the scenario that ShareFetchResponse returns with an error indicating leadership change for the partition, - * but it does not contain new leader info (defined in KIP-951). - */ - @ParameterizedTest - @EnumSource(value = Errors.class, names = {"FENCED_LEADER_EPOCH", "NOT_LEADER_OR_FOLLOWER"}) - public void testWhenShareFetchResponseReturnsALeadershipChangeErrorButNoNewLeaderInformation(Errors error) { - buildRequestManager(); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - Set partitions = new HashSet<>(); - partitions.add(tp0); - partitions.add(tp1); - subscriptions.assignFromSubscribed(partitions); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); - Node nodeId0 = metadata.fetch().nodeById(0); - Node nodeId1 = metadata.fetch().nodeById(1); - Node tp0Leader = metadata.fetch().leaderFor(tp0); - Node tp1Leader = metadata.fetch().leaderFor(tp1); - - Cluster startingClusterMetadata = metadata.fetch(); - assertFalse(metadata.updateRequested()); - - assertEquals(2, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - LinkedHashMap partitionData = new LinkedHashMap<>(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); - partitionData.clear(); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setErrorCode(error.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - Map>> partitionRecords = fetchRecords(); - assertTrue(partitionRecords.containsKey(tp0)); - assertFalse(partitionRecords.containsKey(tp1)); - - List> fetchedRecords = partitionRecords.get(tp0); - assertEquals(1, fetchedRecords.size()); - - Acknowledgements acknowledgements = Acknowledgements.empty(); - acknowledgements.add(1L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); - - assertEquals(startingClusterMetadata, metadata.fetch()); - - // Validate metadata update is requested due to the leadership error - assertTrue(metadata.updateRequested()); - - // Move the leadership of tp1 onto node 1 - HashMap partitionLeaders = new HashMap<>(); - partitionLeaders.put(tp1, new Metadata.LeaderIdAndEpoch(Optional.of(nodeId0.id()), Optional.of(validLeaderEpoch + 1))); - LinkedList leaderNodes = new LinkedList<>(Arrays.asList(tp0Leader, tp1Leader)); - metadata.updatePartitionLeadership(partitionLeaders, leaderNodes); - - assertNotEquals(startingClusterMetadata, metadata.fetch()); - - // And now the partitions are on the same leader so only one fetch is sent - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - partitionData.clear(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(2L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - partitionRecords = fetchRecords(); - assertTrue(partitionRecords.containsKey(tp0)); - assertTrue(partitionRecords.containsKey(tp1)); - - fetchedRecords = partitionRecords.get(tp0); - assertEquals(1, fetchedRecords.size()); - fetchedRecords = partitionRecords.get(tp1); - assertEquals(1, fetchedRecords.size()); - } - - /** - * Test the scenario that ShareFetchResponse returns with an error indicating leadership change for the partition, - * along with new leader info (defined in KIP-951). - */ - @ParameterizedTest - @EnumSource(value = Errors.class, names = {"FENCED_LEADER_EPOCH", "NOT_LEADER_OR_FOLLOWER"}) - public void testWhenFetchResponseReturnsWithALeadershipChangeErrorAndNewLeaderInformation(Errors error) { - buildRequestManager(); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - Set partitions = new HashSet<>(); - partitions.add(tp0); - partitions.add(tp1); - subscriptions.assignFromSubscribed(partitions); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); - Node nodeId0 = metadata.fetch().nodeById(0); - Node nodeId1 = metadata.fetch().nodeById(1); - Node tp0Leader = metadata.fetch().leaderFor(tp0); - - Cluster startingClusterMetadata = metadata.fetch(); - assertFalse(metadata.updateRequested()); - - assertEquals(2, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - LinkedHashMap partitionData = new LinkedHashMap<>(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); - partitionData.clear(); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setErrorCode(error.code()) - .setCurrentLeader(new ShareFetchResponseData.LeaderIdAndEpoch() - .setLeaderId(tp0Leader.id()) - .setLeaderEpoch(validLeaderEpoch + 1))); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, singletonList(tp0Leader), 0), nodeId1); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - Map>> partitionRecords = fetchRecords(); - assertTrue(partitionRecords.containsKey(tp0)); - assertFalse(partitionRecords.containsKey(tp1)); - - List> fetchedRecords = partitionRecords.get(tp0); - assertEquals(1, fetchedRecords.size()); - - Acknowledgements acknowledgements = Acknowledgements.empty(); - acknowledgements.add(1L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); - - // The metadata snapshot will have been updated with the new leader information - assertNotEquals(startingClusterMetadata, metadata.fetch()); - - // Validate metadata update is still requested even though the current leader was returned - assertTrue(metadata.updateRequested()); - - // And now the partitions are on the same leader so only one fetch is sent - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - partitionData.clear(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(2L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - partitionRecords = fetchRecords(); - assertTrue(partitionRecords.containsKey(tp0)); - assertTrue(partitionRecords.containsKey(tp1)); - - fetchedRecords = partitionRecords.get(tp0); - assertEquals(1, fetchedRecords.size()); - fetchedRecords = partitionRecords.get(tp1); - assertEquals(1, fetchedRecords.size()); - } - - /** - * Test the scenario that the metadata indicated a change in leadership between ShareFetch requests such - * as could occur when metadata is periodically updated. - */ - @ParameterizedTest - @EnumSource(value = Errors.class, names = {"FENCED_LEADER_EPOCH", "NOT_LEADER_OR_FOLLOWER"}) - public void testWhenLeadershipChangeBetweenShareFetchRequests(Errors error) { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - Set partitions = new HashSet<>(); - partitions.add(tp0); - partitions.add(tp1); - subscriptions.assignFromSubscribed(partitions); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); - Node nodeId0 = metadata.fetch().nodeById(0); - Node nodeId1 = metadata.fetch().nodeById(1); - - Cluster startingClusterMetadata = metadata.fetch(); - assertFalse(metadata.updateRequested()); - - assertEquals(2, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - LinkedHashMap partitionData = new LinkedHashMap<>(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); - partitionData.clear(); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - Map>> partitionRecords = fetchRecords(); - assertTrue(partitionRecords.containsKey(tp0)); - assertFalse(partitionRecords.containsKey(tp1)); - - List> fetchedRecords = partitionRecords.get(tp0); - assertEquals(1, fetchedRecords.size()); - - Acknowledgements acknowledgements = Acknowledgements.empty(); - acknowledgements.add(1L, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); - - assertEquals(startingClusterMetadata, metadata.fetch()); - - // Move the leadership of tp0 onto node 1 - HashMap partitionLeaders = new HashMap<>(); - partitionLeaders.put(tp0, new Metadata.LeaderIdAndEpoch(Optional.of(nodeId1.id()), Optional.of(validLeaderEpoch + 1))); - metadata.updatePartitionLeadership(partitionLeaders, List.of()); - - assertNotEquals(startingClusterMetadata, metadata.fetch()); - - // Even though the partitions are on the same leader, records were fetched on the previous leader. - // We do not send those acknowledgements to the previous leader, we fail them with NOT_LEADER_OR_FOLLOWER exception. - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(acknowledgements, completedAcknowledgements.get(0).get(tip0)); - assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - - partitionData.clear(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setAcknowledgeErrorCode(error.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); - partitionData.clear(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - partitionRecords = fetchRecords(); - assertTrue(partitionRecords.containsKey(tp0)); - assertTrue(partitionRecords.containsKey(tp1)); - - fetchedRecords = partitionRecords.get(tp0); - assertEquals(1, fetchedRecords.size()); - fetchedRecords = partitionRecords.get(tp1); - assertEquals(1, fetchedRecords.size()); - } - - @Test - void testLeadershipChangeAfterFetchBeforeCommitAsync() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - Set partitions = new HashSet<>(); - partitions.add(tp0); - partitions.add(tp1); - subscriptions.assignFromSubscribed(partitions); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); - Node nodeId0 = metadata.fetch().nodeById(0); - Node nodeId1 = metadata.fetch().nodeById(1); - - Cluster startingClusterMetadata = metadata.fetch(); - assertFalse(metadata.updateRequested()); - - assertEquals(2, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - LinkedHashMap partitionData = new LinkedHashMap<>(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); - partitionData.clear(); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 2)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - Map>> partitionRecords = fetchRecords(); - assertTrue(partitionRecords.containsKey(tp0)); - assertTrue(partitionRecords.containsKey(tp1)); - - List> fetchedRecords = partitionRecords.get(tp0); - assertEquals(1, fetchedRecords.size()); - - fetchedRecords = partitionRecords.get(tp1); - assertEquals(2, fetchedRecords.size()); - - Acknowledgements acknowledgementsTp0 = Acknowledgements.empty(); - acknowledgementsTp0.add(1L, AcknowledgeType.ACCEPT); - - Acknowledgements acknowledgementsTp1 = getAcknowledgements(1, - AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT); - - Map commitAcks = new HashMap<>(); - commitAcks.put(tip0, new NodeAcknowledgements(0, acknowledgementsTp0)); - commitAcks.put(tip1, new NodeAcknowledgements(1, acknowledgementsTp1)); - - // Move the leadership of tp0 onto node 1 - HashMap partitionLeaders = new HashMap<>(); - partitionLeaders.put(tp0, new Metadata.LeaderIdAndEpoch(Optional.of(nodeId1.id()), Optional.of(validLeaderEpoch + 1))); - metadata.updatePartitionLeadership(partitionLeaders, List.of()); - - assertNotEquals(startingClusterMetadata, metadata.fetch()); - - // We fail the acknowledgements for records which were received from node0 with NOT_LEADER_OR_FOLLOWER exception. - shareConsumeRequestManager.commitAsync(commitAcks, calculateDeadlineMs(time.timer(defaultApiTimeoutMs))); - assertEquals(1, completedAcknowledgements.get(0).size()); - assertEquals(acknowledgementsTp0, completedAcknowledgements.get(0).get(tip0)); - assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - - // We only send acknowledgements for tip1 to node1. - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); - - client.prepareResponse(fullAcknowledgeResponse(tip1, Errors.NONE)); - networkClientDelegate.poll(time.timer(0)); - - assertEquals(1, completedAcknowledgements.get(1).size()); - assertEquals(acknowledgementsTp1, completedAcknowledgements.get(1).get(tip1)); - assertNull(completedAcknowledgements.get(1).get(tip1).getAcknowledgeException()); - } - - @Test - void testLeadershipChangeAfterFetchBeforeCommitSync() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); - subscriptions.assignFromSubscribed(List.of(tp0, tp1)); - - client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); - Node nodeId0 = metadata.fetch().nodeById(0); - Node nodeId1 = metadata.fetch().nodeById(1); - - Cluster startingClusterMetadata = metadata.fetch(); - assertFalse(metadata.updateRequested()); - - assertEquals(2, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); + assignFromSubscribed(singleton(tp0)); - LinkedHashMap partitionData = new LinkedHashMap<>(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); - partitionData.clear(); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 2)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); + assertEquals(1, sendFetches()); + client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, Errors.NONE), true); networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - Map>> partitionRecords = fetchRecords(); - assertTrue(partitionRecords.containsKey(tp0)); - assertTrue(partitionRecords.containsKey(tp1)); + assertEmptyFetch("Should not return records on disconnect"); + } - List> fetchedRecords = partitionRecords.get(tp0); - assertEquals(1, fetchedRecords.size()); + @Test + public void testFetchWithLastRecordMissingFromBatch() { + buildRequestManager(); - fetchedRecords = partitionRecords.get(tp1); - assertEquals(2, fetchedRecords.size()); + MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, + new SimpleRecord("0".getBytes(), "v".getBytes()), + new SimpleRecord("1".getBytes(), "v".getBytes()), + new SimpleRecord("2".getBytes(), "v".getBytes()), + new SimpleRecord(null, "value".getBytes())); - Acknowledgements acknowledgementsTp0 = Acknowledgements.empty(); - acknowledgementsTp0.add(1L, AcknowledgeType.ACCEPT); + // Remove the last record to simulate compaction + MemoryRecords.FilterResult result = records.filterTo(new MemoryRecords.RecordFilter(0, 0) { + @Override + protected BatchRetentionResult checkBatchRetention(RecordBatch batch) { + return new BatchRetentionResult(BatchRetention.DELETE_EMPTY, false); + } - Acknowledgements acknowledgementsTp1 = getAcknowledgements(1, - AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT); + @Override + protected boolean shouldRetainRecord(RecordBatch recordBatch, Record record) { + return record.key() != null; + } + }, ByteBuffer.allocate(1024), BufferSupplier.NO_CACHING); + result.outputBuffer().flip(); + MemoryRecords compactedRecords = MemoryRecords.readableRecords(result.outputBuffer()); - Map commitAcks = new HashMap<>(); - commitAcks.put(tip0, new NodeAcknowledgements(0, acknowledgementsTp0)); - commitAcks.put(tip1, new NodeAcknowledgements(1, acknowledgementsTp1)); + assignFromSubscribed(singleton(tp0)); + assertEquals(1, sendFetches()); + client.prepareResponse(fullFetchResponse(tip0, + compactedRecords, + ShareCompletedFetchTest.acquiredRecords(0L, 3), + Errors.NONE)); + networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - // Move the leadership of tp0 onto node 1 - HashMap partitionLeaders = new HashMap<>(); - partitionLeaders.put(tp0, new Metadata.LeaderIdAndEpoch(Optional.of(nodeId1.id()), Optional.of(validLeaderEpoch + 1))); - metadata.updatePartitionLeadership(partitionLeaders, List.of()); + Map>> allFetchedRecords = fetchRecords(); + assertTrue(allFetchedRecords.containsKey(tp0)); + List> fetchedRecords = allFetchedRecords.get(tp0); + assertEquals(3, fetchedRecords.size()); - assertNotEquals(startingClusterMetadata, metadata.fetch()); + for (int i = 0; i < 3; i++) { + assertEquals(Integer.toString(i), new String(fetchedRecords.get(i).key())); + } + } - // We fail the acknowledgements for records which were received from node0 with NOT_LEADER_OR_FOLLOWER exception. - shareConsumeRequestManager.commitSync(commitAcks, calculateDeadlineMs(time.timer(100))); + private MemoryRecords buildRecords(long baseOffset, int count, long firstMessageId) { + MemoryRecordsBuilder builder = MemoryRecords.builder(ByteBuffer.allocate(1024), Compression.NONE, TimestampType.CREATE_TIME, baseOffset); + for (int i = 0; i < count; i++) + builder.append(0L, "key".getBytes(), ("value-" + (firstMessageId + i)).getBytes()); + return builder.build(); + } - // Verify if the callback was invoked with the failed acknowledgements. - assertEquals(1, completedAcknowledgements.get(0).size()); - assertEquals(acknowledgementsTp0, completedAcknowledgements.get(0).get(tip0)); - assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); + @Test + public void testCorruptMessageError() { + buildRequestManager(); + assignFromSubscribed(singleton(tp0)); - // We only send acknowledgements for tip1 to node1. - assertEquals(1, shareConsumeRequestManager.sendAcknowledgements()); + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - client.prepareResponse(fullAcknowledgeResponse(tip1, Errors.NONE)); + // Prepare a response with the CORRUPT_MESSAGE error. + client.prepareResponse(fullFetchResponse( + tip0, + buildRecords(1L, 1, 1), + ShareCompletedFetchTest.acquiredRecords(1L, 1), + Errors.CORRUPT_MESSAGE)); networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertEquals(1, completedAcknowledgements.get(1).size()); - assertEquals(acknowledgementsTp1, completedAcknowledgements.get(1).get(tip1)); - assertNull(completedAcknowledgements.get(1).get(tip1).getAcknowledgeException()); + // Trigger the exception. + assertThrows(KafkaException.class, this::fetchRecords); } - @Test - void testLeadershipChangeAfterFetchBeforeClose() { + /** + * Test the scenario that ShareFetchResponse returns with an error indicating leadership change for the partition, + * but it does not contain new leader info (defined in KIP-951). + */ + @ParameterizedTest + @EnumSource(value = Errors.class, names = {"FENCED_LEADER_EPOCH", "NOT_LEADER_OR_FOLLOWER"}) + public void testWhenShareFetchResponseReturnsALeadershipChangeErrorButNoNewLeaderInformation(Errors error) { buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); Set partitions = new HashSet<>(); @@ -2253,10 +1411,12 @@ void testLeadershipChangeAfterFetchBeforeClose() { subscriptions.assignFromSubscribed(partitions); client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), - tp -> validLeaderEpoch, topicIds, false)); + RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 2), + tp -> validLeaderEpoch, topicIds, false)); Node nodeId0 = metadata.fetch().nodeById(0); Node nodeId1 = metadata.fetch().nodeById(1); + Node tp0Leader = metadata.fetch().leaderFor(tp0); + Node tp1Leader = metadata.fetch().leaderFor(tp1); Cluster startingClusterMetadata = metadata.fetch(); assertFalse(metadata.updateRequested()); @@ -2266,78 +1426,86 @@ void testLeadershipChangeAfterFetchBeforeClose() { LinkedHashMap partitionData = new LinkedHashMap<>(); partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); + new ShareFetchResponseData.PartitionData() + .setPartitionIndex(tip0.topicPartition().partition()) + .setErrorCode(Errors.NONE.code()) + .setRecords(records) + .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) + .setAcknowledgeErrorCode(Errors.NONE.code())); + client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList()), nodeId0); partitionData.clear(); partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 2)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); + new ShareFetchResponseData.PartitionData() + .setPartitionIndex(tip1.topicPartition().partition()) + .setErrorCode(error.code())); + client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList()), nodeId1); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); Map>> partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertTrue(partitionRecords.containsKey(tp1)); + assertFalse(partitionRecords.containsKey(tp1)); List> fetchedRecords = partitionRecords.get(tp0); assertEquals(1, fetchedRecords.size()); - fetchedRecords = partitionRecords.get(tp1); - assertEquals(2, fetchedRecords.size()); - - Acknowledgements acknowledgementsTp0 = Acknowledgements.empty(); - acknowledgementsTp0.add(1L, AcknowledgeType.ACCEPT); + Acknowledgements acknowledgements = Acknowledgements.empty(); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); - Acknowledgements acknowledgementsTp1 = getAcknowledgements(1, - AcknowledgeType.ACCEPT, AcknowledgeType.ACCEPT); + assertEquals(startingClusterMetadata, metadata.fetch()); - shareConsumeRequestManager.fetch(Map.of(tip1, new NodeAcknowledgements(1, acknowledgementsTp1)), Collections.emptyMap()); + // Validate metadata update is requested due to the leadership error + assertTrue(metadata.updateRequested()); - // Move the leadership of tp0 onto node 1 + // Move the leadership of tp1 onto node 1 HashMap partitionLeaders = new HashMap<>(); - partitionLeaders.put(tp0, new Metadata.LeaderIdAndEpoch(Optional.of(nodeId1.id()), Optional.of(validLeaderEpoch + 1))); - metadata.updatePartitionLeadership(partitionLeaders, List.of()); + partitionLeaders.put(tp1, new Metadata.LeaderIdAndEpoch(Optional.of(nodeId0.id()), Optional.of(validLeaderEpoch + 1))); + LinkedList leaderNodes = new LinkedList<>(Arrays.asList(tp0Leader, tp1Leader)); + metadata.updatePartitionLeadership(partitionLeaders, leaderNodes); assertNotEquals(startingClusterMetadata, metadata.fetch()); - // We fail the acknowledgements for records which were received from node0 with NOT_LEADER_OR_FOLLOWER exception. - shareConsumeRequestManager.acknowledgeOnClose(Map.of(tip0, new NodeAcknowledgements(0, acknowledgementsTp0)), - calculateDeadlineMs(time.timer(100))); - - // Verify if the callback was invoked with the failed acknowledgements. - assertEquals(1, completedAcknowledgements.get(0).size()); - assertEquals(acknowledgementsTp0.getAcknowledgementsTypeMap(), completedAcknowledgements.get(0).get(tip0).getAcknowledgementsTypeMap()); - assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.exception(), completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - completedAcknowledgements.clear(); - - // As we are closing, we still send the request to both the nodes, but with empty acknowledgements to node0, as it is no longer the leader. - assertEquals(2, shareConsumeRequestManager.sendAcknowledgements()); + // And now the partitions are on the same leader so only one fetch is sent + assertEquals(1, sendFetches()); + assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - client.prepareResponseFrom(fullAcknowledgeResponse(tip1, Errors.NONE), nodeId1); + partitionData.clear(); + partitionData.put(tip0, + new ShareFetchResponseData.PartitionData() + .setPartitionIndex(tip0.topicPartition().partition()) + .setErrorCode(Errors.NONE.code()) + .setRecords(records) + .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(2L, 1)) + .setAcknowledgeErrorCode(Errors.NONE.code())); + partitionData.put(tip1, + new ShareFetchResponseData.PartitionData() + .setPartitionIndex(tip1.topicPartition().partition()) + .setRecords(records) + .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) + .setAcknowledgeErrorCode(Errors.NONE.code())); + client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList()), nodeId0); networkClientDelegate.poll(time.timer(0)); + assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - client.prepareResponseFrom(emptyAcknowledgeResponse(), nodeId0); - networkClientDelegate.poll(time.timer(0)); + partitionRecords = fetchRecords(); + assertTrue(partitionRecords.containsKey(tp0)); + assertTrue(partitionRecords.containsKey(tp1)); - assertEquals(1, completedAcknowledgements.get(0).size()); - assertEquals(acknowledgementsTp1, completedAcknowledgements.get(0).get(tip1)); - assertNull(completedAcknowledgements.get(0).get(tip1).getAcknowledgeException()); + fetchedRecords = partitionRecords.get(tp0); + assertEquals(1, fetchedRecords.size()); + fetchedRecords = partitionRecords.get(tp1); + assertEquals(1, fetchedRecords.size()); } - @Test - void testWhenLeadershipChangedAfterDisconnected() { + /** + * Test the scenario that ShareFetchResponse returns with an error indicating leadership change for the partition, + * along with new leader info (defined in KIP-951). + */ + @ParameterizedTest + @EnumSource(value = Errors.class, names = {"FENCED_LEADER_EPOCH", "NOT_LEADER_OR_FOLLOWER"}) + public void testWhenFetchResponseReturnsWithALeadershipChangeErrorAndNewLeaderInformation(Errors error) { buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); subscriptions.subscribeToShareGroup(Collections.singleton(topicName)); Set partitions = new HashSet<>(); @@ -2346,10 +1514,11 @@ void testWhenLeadershipChangedAfterDisconnected() { subscriptions.assignFromSubscribed(partitions); client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(2, Map.of(topicName, 2), + RequestTestUtils.metadataUpdateWithIds(2, singletonMap(topicName, 2), tp -> validLeaderEpoch, topicIds, false)); Node nodeId0 = metadata.fetch().nodeById(0); Node nodeId1 = metadata.fetch().nodeById(1); + Node tp0Leader = metadata.fetch().leaderFor(tp0); Cluster startingClusterMetadata = metadata.fetch(); assertFalse(metadata.updateRequested()); @@ -2365,13 +1534,16 @@ void testWhenLeadershipChangedAfterDisconnected() { .setRecords(records) .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0); + client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList()), nodeId0); partitionData.clear(); partitionData.put(tip1, new ShareFetchResponseData.PartitionData() .setPartitionIndex(tip1.topicPartition().partition()) - .setErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); + .setErrorCode(error.code()) + .setCurrentLeader(new ShareFetchResponseData.LeaderIdAndEpoch() + .setLeaderId(tp0Leader.id()) + .setLeaderEpoch(validLeaderEpoch + 1))); + client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, singletonList(tp0Leader)), nodeId1); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); @@ -2383,16 +1555,17 @@ void testWhenLeadershipChangedAfterDisconnected() { assertEquals(1, fetchedRecords.size()); Acknowledgements acknowledgements = Acknowledgements.empty(); - acknowledgements.add(1, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); + acknowledgements.add(1L, AcknowledgeType.ACCEPT); + shareConsumeRequestManager.fetch(Collections.singletonMap(tip0, acknowledgements)); - assertEquals(startingClusterMetadata, metadata.fetch()); + // The metadata snapshot will have been updated with the new leader information + assertNotEquals(startingClusterMetadata, metadata.fetch()); - acknowledgements = Acknowledgements.empty(); - acknowledgements.add(1, AcknowledgeType.ACCEPT); - shareConsumeRequestManager.fetch(Map.of(tip0, new NodeAcknowledgements(0, acknowledgements)), Collections.emptyMap()); + // Validate metadata update is still requested even though the current leader was returned + assertTrue(metadata.updateRequested()); - assertEquals(2, sendFetches()); + // And now the partitions are on the same leader so only one fetch is sent + assertEquals(1, sendFetches()); assertFalse(shareConsumeRequestManager.hasCompletedFetches()); partitionData.clear(); @@ -2400,103 +1573,35 @@ void testWhenLeadershipChangedAfterDisconnected() { new ShareFetchResponseData.PartitionData() .setPartitionIndex(tip0.topicPartition().partition()) .setErrorCode(Errors.NONE.code()) + .setRecords(records) + .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(2L, 1)) .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId0, true); - partitionData.clear(); partitionData.put(tip1, new ShareFetchResponseData.PartitionData() .setPartitionIndex(tip1.topicPartition().partition()) .setRecords(records) .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) .setAcknowledgeErrorCode(Errors.NONE.code())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - - // The node was disconnected, so the acknowledgement failed - assertInstanceOf(DisconnectException.class, completedAcknowledgements.get(0).get(tip0).getAcknowledgeException()); - completedAcknowledgements.clear(); - - partitionRecords = fetchRecords(); - assertFalse(partitionRecords.containsKey(tp0)); - assertTrue(partitionRecords.containsKey(tp1)); - - fetchedRecords = partitionRecords.get(tp1); - assertEquals(1, fetchedRecords.size()); - - // Move the leadership of tp0 onto node 1 - HashMap partitionLeaders = new HashMap<>(); - partitionLeaders.put(tp0, new Metadata.LeaderIdAndEpoch(Optional.of(nodeId1.id()), Optional.of(validLeaderEpoch + 1))); - metadata.updatePartitionLeadership(partitionLeaders, List.of()); - - assertNotEquals(startingClusterMetadata, metadata.fetch()); - - shareConsumeRequestManager.fetch(Map.of(tip1, new NodeAcknowledgements(1, acknowledgements)), Collections.emptyMap()); - - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - partitionData.clear(); - partitionData.put(tip0, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip0.topicPartition().partition()) - .setErrorCode(Errors.NONE.code()) - .setRecords(records) - .setAcquiredRecords(ShareCompletedFetchTest.acquiredRecords(1L, 1)) - .setAcknowledgeErrorCode(Errors.NONE.code())); - partitionData.put(tip1, - new ShareFetchResponseData.PartitionData() - .setPartitionIndex(tip1.topicPartition().partition())); - client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList(), 0), nodeId1); + client.prepareResponseFrom(ShareFetchResponse.of(Errors.NONE, 0, partitionData, Collections.emptyList()), nodeId0); networkClientDelegate.poll(time.timer(0)); assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - assertNull(completedAcknowledgements.get(0).get(tip1).getAcknowledgeException()); - partitionRecords = fetchRecords(); assertTrue(partitionRecords.containsKey(tp0)); - assertFalse(partitionRecords.containsKey(tp1)); + assertTrue(partitionRecords.containsKey(tp1)); fetchedRecords = partitionRecords.get(tp0); assertEquals(1, fetchedRecords.size()); - } - - @Test - public void testCloseInternalClosesShareFetchMetricsManager() throws Exception { - buildRequestManager(); - - // Define all sensor names that should be created and removed - String[] sensorNames = { - "fetch-throttle-time", - "bytes-fetched", - "records-fetched", - "fetch-latency", - "sent-acknowledgements", - "failed-acknowledgements" - }; - - // Verify that sensors exist before closing - for (String sensorName : sensorNames) { - assertNotNull(metrics.getSensor(sensorName), - "Sensor " + sensorName + " should exist before closing"); - } - - // Close the request manager - shareConsumeRequestManager.close(); - - // Verify that all sensors are removed after closing - for (String sensorName : sensorNames) { - assertNull(metrics.getSensor(sensorName), - "Sensor " + sensorName + " should be removed after closing"); - } + fetchedRecords = partitionRecords.get(tp1); + assertEquals(1, fetchedRecords.size()); } private ShareFetchResponse fetchResponseWithTopLevelError(TopicIdPartition tp, Errors error) { - Map partitions = Map.of(tp, + Map partitions = Collections.singletonMap(tp, new ShareFetchResponseData.PartitionData() .setPartitionIndex(tp.topicPartition().partition()) .setErrorCode(error.code())); - return ShareFetchResponse.of(error, 0, new LinkedHashMap<>(partitions), Collections.emptyList(), 0); + return ShareFetchResponse.of(error, 0, new LinkedHashMap<>(partitions), Collections.emptyList()); } private ShareFetchResponse fullFetchResponse(TopicIdPartition tp, @@ -2511,9 +1616,9 @@ private ShareFetchResponse fullFetchResponse(TopicIdPartition tp, List acquiredRecords, Errors error, Errors acknowledgeError) { - Map partitions = Map.of(tp, + Map partitions = Collections.singletonMap(tp, partitionDataForFetch(tp, records, acquiredRecords, error, acknowledgeError)); - return ShareFetchResponse.of(Errors.NONE, 0, new LinkedHashMap<>(partitions), Collections.emptyList(), 0); + return ShareFetchResponse.of(Errors.NONE, 0, new LinkedHashMap<>(partitions), Collections.emptyList()); } private ShareAcknowledgeResponse emptyAcknowledgeResponse() { @@ -2521,14 +1626,8 @@ private ShareAcknowledgeResponse emptyAcknowledgeResponse() { return ShareAcknowledgeResponse.of(Errors.NONE, 0, new LinkedHashMap<>(partitions), Collections.emptyList()); } - private ShareAcknowledgeResponse acknowledgeResponseWithTopLevelError(TopicIdPartition tp, Errors error) { - Map partitions = Map.of(tp, - partitionDataForAcknowledge(tp, Errors.NONE)); - return ShareAcknowledgeResponse.of(error, 0, new LinkedHashMap<>(partitions), Collections.emptyList()); - } - private ShareAcknowledgeResponse fullAcknowledgeResponse(TopicIdPartition tp, Errors error) { - Map partitions = Map.of(tp, + Map partitions = Collections.singletonMap(tp, partitionDataForAcknowledge(tp, error)); return ShareAcknowledgeResponse.of(Errors.NONE, 0, new LinkedHashMap<>(partitions), Collections.emptyList()); } @@ -2543,7 +1642,7 @@ private ShareAcknowledgeResponse fullAcknowledgeResponse(TopicIdPartition tp, Errors error, ShareAcknowledgeResponseData.LeaderIdAndEpoch currentLeader, List nodeEndpoints) { - Map partitions = Map.of(tp, + Map partitions = Collections.singletonMap(tp, partitionDataForAcknowledge(tp, error, currentLeader)); return ShareAcknowledgeResponse.of(Errors.NONE, 0, new LinkedHashMap<>(partitions), nodeEndpoints); } @@ -2588,15 +1687,6 @@ private void assertEmptyFetch(String reason) { assertTrue(fetch.isEmpty(), reason); } - private Acknowledgements getAcknowledgements(int startIndex, AcknowledgeType... acknowledgeTypes) { - Acknowledgements acknowledgements = Acknowledgements.empty(); - int index = startIndex; - for (AcknowledgeType type : acknowledgeTypes) { - acknowledgements.add(index++, type); - } - return acknowledgements; - } - private Map>> fetchRecords() { ShareFetch fetch = collectFetch(); if (fetch.isEmpty()) { @@ -2634,7 +1724,7 @@ private void buildRequestManager(MetricConfig metricConfig, SubscriptionState subscriptionState, LogContext logContext) { buildDependencies(metricConfig, subscriptionState, logContext); - Deserializers deserializers = new Deserializers<>(keyDeserializer, valueDeserializer, metrics); + Deserializers deserializers = new Deserializers<>(keyDeserializer, valueDeserializer); int maxWaitMs = 0; int maxBytes = Integer.MAX_VALUE; int fetchSize = 1000; @@ -2683,7 +1773,6 @@ private void buildDependencies(MetricConfig metricConfig, properties.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); properties.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, String.valueOf(requestTimeoutMs)); properties.setProperty(ConsumerConfig.RETRY_BACKOFF_MS_CONFIG, String.valueOf(retryBackoffMs)); - properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConsumerConfig config = new ConsumerConfig(properties); networkClientDelegate = spy(new TestableNetworkClientDelegate( time, config, logContext, client, metadata, @@ -2714,19 +1803,12 @@ private ShareFetch collectFetch() { } private int sendFetches() { - fetch(new HashMap<>(), new HashMap<>()); + fetch(new HashMap<>()); NetworkClientDelegate.PollResult pollResult = poll(time.milliseconds()); networkClientDelegate.addAll(pollResult.unsentRequests); return pollResult.unsentRequests.size(); } - private NetworkClientDelegate.PollResult sendFetchesReturnPollResult() { - fetch(new HashMap<>(), new HashMap<>()); - NetworkClientDelegate.PollResult pollResult = poll(time.milliseconds()); - networkClientDelegate.addAll(pollResult.unsentRequests); - return pollResult; - } - private int sendAcknowledgements() { NetworkClientDelegate.PollResult pollResult = poll(time.milliseconds()); networkClientDelegate.addAll(pollResult.unsentRequests); @@ -2860,47 +1942,4 @@ public void add(BackgroundEvent event) { } } } - - @Test - void testFetchWithControlRecords() { - buildRequestManager(); - shareConsumeRequestManager.setAcknowledgementCommitCallbackRegistered(true); - - Map nodeAcknowledgementsMap = new HashMap<>(); - - Acknowledgements acknowledgements = Acknowledgements.empty(); - acknowledgements.add(1L, AcknowledgeType.ACCEPT); - nodeAcknowledgementsMap.put(tip0, new NodeAcknowledgements(0, acknowledgements)); - - Map nodeAcknowledgementsControlRecordMap = new HashMap<>(); - - Acknowledgements controlAcknowledgements = Acknowledgements.empty(); - controlAcknowledgements.addGap(2L); - nodeAcknowledgementsControlRecordMap.put(tip0, new NodeAcknowledgements(0, controlAcknowledgements)); - - shareConsumeRequestManager.fetch(nodeAcknowledgementsMap, nodeAcknowledgementsControlRecordMap); - - Map fetchAcksToSend = shareConsumeRequestManager.getFetchAcknowledgementsToSend(0); - assertEquals(1, fetchAcksToSend.size()); - assertEquals(AcknowledgeType.ACCEPT, fetchAcksToSend.get(tip0).get(1L)); - assertEquals(2, fetchAcksToSend.get(tip0).size()); - assertNull(fetchAcksToSend.get(tip0).get(3L)); - } - - private void sendFetchAndVerifyResponse(MemoryRecords records, - List acquiredRecords, - Errors... error) { - // normal fetch - assertEquals(1, sendFetches()); - assertFalse(shareConsumeRequestManager.hasCompletedFetches()); - - if (error.length > 1) { - client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, error[0], error[1])); - } else { - client.prepareResponse(fullFetchResponse(tip0, records, acquiredRecords, error[0])); - } - networkClientDelegate.poll(time.timer(0)); - assertTrue(shareConsumeRequestManager.hasCompletedFetches()); - } - } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java index 5dddd0772df2f..04db229c8df35 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareConsumerImplTest.java @@ -19,33 +19,24 @@ import org.apache.kafka.clients.consumer.AcknowledgementCommitCallback; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.consumer.ConsumerRecord; -import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.internals.events.ApplicationEventHandler; import org.apache.kafka.clients.consumer.internals.events.BackgroundEvent; import org.apache.kafka.clients.consumer.internals.events.CompletableEventReaper; import org.apache.kafka.clients.consumer.internals.events.ErrorEvent; import org.apache.kafka.clients.consumer.internals.events.PollEvent; import org.apache.kafka.clients.consumer.internals.events.ShareAcknowledgeOnCloseEvent; -import org.apache.kafka.clients.consumer.internals.events.ShareAcknowledgementCommitCallbackEvent; import org.apache.kafka.clients.consumer.internals.events.ShareAcknowledgementCommitCallbackRegistrationEvent; -import org.apache.kafka.clients.consumer.internals.events.ShareFetchEvent; import org.apache.kafka.clients.consumer.internals.events.ShareSubscriptionChangeEvent; import org.apache.kafka.clients.consumer.internals.events.ShareUnsubscribeEvent; -import org.apache.kafka.clients.consumer.internals.events.StopFindCoordinatorOnCloseEvent; -import org.apache.kafka.clients.consumer.internals.metrics.AsyncConsumerMetrics; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InvalidGroupIdException; -import org.apache.kafka.common.errors.InvalidTopicException; import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.metrics.Metrics; -import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.serialization.StringDeserializer; -import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -54,34 +45,25 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.EnumSource; import org.mockito.ArgumentMatchers; -import org.mockito.InOrder; import org.mockito.Mockito; import java.time.Duration; import java.util.Collections; import java.util.HashSet; -import java.util.Iterator; import java.util.List; -import java.util.Map; import java.util.Properties; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Predicate; import static java.util.Collections.singleton; import static java.util.Collections.singletonList; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -90,7 +72,6 @@ import static org.mockito.ArgumentMatchers.argThat; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; @@ -155,19 +136,16 @@ private ShareConsumerImpl newConsumer( mock(ShareFetchBuffer.class), subscriptions, "group-id", - "client-id", - "implicit"); + "client-id"); } private ShareConsumerImpl newConsumer( ShareFetchBuffer fetchBuffer, SubscriptionState subscriptions, String groupId, - String clientId, - String acknowledgementMode + String clientId ) { final int defaultApiTimeoutMs = 1000; - final int requestTimeoutMs = 30000; return new ShareConsumerImpl<>( new LogContext(), @@ -183,10 +161,8 @@ private ShareConsumerImpl newConsumer( new Metrics(), subscriptions, metadata, - requestTimeoutMs, defaultApiTimeoutMs, - groupId, - acknowledgementMode + groupId ); } @@ -212,20 +188,11 @@ public void testFailConstructor() { props.put(ConsumerConfig.GROUP_ID_CONFIG, "group-id"); props.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, "an.invalid.class"); final ConsumerConfig config = new ConsumerConfig(props); - - try (LogCaptureAppender appender = LogCaptureAppender.createAndRegister()) { - KafkaException ce = assertThrows( + KafkaException ce = assertThrows( KafkaException.class, () -> newConsumer(config)); - assertTrue(ce.getMessage().contains("Failed to construct Kafka share consumer"), "Unexpected exception message: " + ce.getMessage()); - assertTrue(ce.getCause().getMessage().contains("Class an.invalid.class cannot be found"), "Unexpected cause: " + ce.getCause()); - - boolean npeLogged = appender.getEvents().stream() - .flatMap(event -> event.getThrowableInfo().stream()) - .anyMatch(str -> str.contains("NullPointerException")); - - assertFalse(npeLogged, "Unexpected NullPointerException during consumer construction"); - } + assertTrue(ce.getMessage().contains("Failed to construct Kafka share consumer"), "Unexpected exception message: " + ce.getMessage()); + assertTrue(ce.getCause().getMessage().contains("Class an.invalid.class cannot be found"), "Unexpected cause: " + ce.getCause()); } @Test @@ -246,48 +213,6 @@ public void testWakeupBeforeCallingPoll() { assertDoesNotThrow(() -> consumer.poll(Duration.ZERO)); } - @Test - public void testControlRecordsOnEmptyFetch() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - consumer = newConsumer(subscriptions); - - // Setup subscription - final String topicName = "foo"; - final List subscriptionTopic = Collections.singletonList(topicName); - completeShareSubscriptionChangeApplicationEventSuccessfully(subscriptions, subscriptionTopic); - consumer.subscribe(subscriptionTopic); - - // Create a fetch with only GAP (no records) - final TopicIdPartition tip = new TopicIdPartition(Uuid.randomUuid(), 0, topicName); - final ShareInFlightBatch batch = new ShareInFlightBatch<>(0, tip); - // Add GAP without adding any records - batch.addGap(1); - - final ShareFetch fetchWithOnlyGap = ShareFetch.empty(); - fetchWithOnlyGap.add(tip, batch); - doReturn(fetchWithOnlyGap).when(fetchCollector).collect(any(ShareFetchBuffer.class)); - - consumer.poll(Duration.ZERO); - - // Verify that next ShareFetchEvent was sent with the acknowledgement GAP for offset 1 - verify(applicationEventHandler).add(argThat(event -> { - if (!(event instanceof ShareFetchEvent)) { - return false; - } - ShareFetchEvent fetchEvent = (ShareFetchEvent) event; - - // Regular acknowledgements map should be empty - if (!fetchEvent.acknowledgementsMap().isEmpty()) { - return false; - } - - // Control record acknowledgements map should contain the GAP for offset 1 - Map controlRecordAcks = fetchEvent.controlRecordAcknowledgements(); - return controlRecordAcks.containsKey(tip) && - controlRecordAcks.get(tip).acknowledgements().get(1L) == null; // Null indicates GAP - })); - } - @Test public void testWakeupAfterEmptyFetch() { SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); @@ -315,7 +240,7 @@ public void testWakeupAfterNonEmptyFetch() { final String topicName = "foo"; final int partition = 3; final TopicIdPartition tip = new TopicIdPartition(Uuid.randomUuid(), partition, topicName); - final ShareInFlightBatch batch = new ShareInFlightBatch<>(0, tip); + final ShareInFlightBatch batch = new ShareInFlightBatch<>(tip); batch.addRecord(new ConsumerRecord<>(topicName, partition, 2, "key1", "value1")); doAnswer(invocation -> { consumer.wakeup(); @@ -346,133 +271,6 @@ public void testFailOnClosedConsumer() { assertEquals("This consumer has already been closed.", res.getMessage()); } - @Test - public void testUnsubscribeWithTopicAuthorizationException() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - consumer = newConsumer(subscriptions); - - backgroundEventQueue.add(new ErrorEvent(new TopicAuthorizationException(Set.of("test-topic")))); - completeShareUnsubscribeApplicationEventSuccessfully(subscriptions); - assertDoesNotThrow(() -> consumer.unsubscribe()); - assertDoesNotThrow(() -> consumer.close()); - } - - @Test - public void testCloseWithInvalidTopicException() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - consumer = newConsumer(subscriptions); - - backgroundEventQueue.add(new ErrorEvent(new InvalidTopicException(Set.of("!test-topic")))); - completeShareUnsubscribeApplicationEventSuccessfully(subscriptions); - assertDoesNotThrow(() -> consumer.close()); - } - - @Test - public void testExplicitModeUnacknowledgedRecords() { - // Setup consumer with explicit acknowledgement mode - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - consumer = newConsumer( - mock(ShareFetchBuffer.class), - subscriptions, - "group-id", - "client-id", - "explicit"); - - // Setup test data - String topic = "test-topic"; - int partition = 0; - TopicIdPartition tip = new TopicIdPartition(Uuid.randomUuid(), partition, topic); - ShareInFlightBatch batch = new ShareInFlightBatch<>(0, tip); - batch.addRecord(new ConsumerRecord<>(topic, partition, 0, "key1", "value1")); - batch.addRecord(new ConsumerRecord<>(topic, partition, 1, "key2", "value2")); - - // Setup first fetch to return records - ShareFetch firstFetch = ShareFetch.empty(); - firstFetch.add(tip, batch); - doReturn(firstFetch) - .doReturn(ShareFetch.empty()) - .when(fetchCollector) - .collect(any(ShareFetchBuffer.class)); - - // Setup subscription - List topics = Collections.singletonList(topic); - completeShareSubscriptionChangeApplicationEventSuccessfully(subscriptions, topics); - consumer.subscribe(topics); - - // First poll should succeed and return records - ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); - assertEquals(2, records.count(), "Should have received 2 records"); - - // Second poll should fail because records weren't acknowledged - IllegalStateException exception = assertThrows( - IllegalStateException.class, - () -> consumer.poll(Duration.ofMillis(100)) - ); - assertTrue( - exception.getMessage().contains("All records must be acknowledged in explicit acknowledgement mode."), - "Unexpected error message: " + exception.getMessage() - ); - - // Verify that acknowledging one record but not all still throws exception - Iterator> iterator = records.iterator(); - consumer.acknowledge(iterator.next()); - exception = assertThrows( - IllegalStateException.class, - () -> consumer.poll(Duration.ofMillis(100)) - ); - assertTrue( - exception.getMessage().contains("All records must be acknowledged in explicit acknowledgement mode."), - "Unexpected error message: " + exception.getMessage() - ); - - // Verify that after acknowledging all records, poll succeeds - consumer.acknowledge(iterator.next()); - - // Setup second fetch to return new records - ShareFetch secondFetch = ShareFetch.empty(); - ShareInFlightBatch newBatch = new ShareInFlightBatch<>(2, tip); - newBatch.addRecord(new ConsumerRecord<>(topic, partition, 2, "key3", "value3")); - newBatch.addRecord(new ConsumerRecord<>(topic, partition, 3, "key4", "value4")); - secondFetch.add(tip, newBatch); - - // Reset mock to return new records - doReturn(secondFetch) - .when(fetchCollector) - .collect(any(ShareFetchBuffer.class)); - - // Verify that poll succeeds and returns new records - ConsumerRecords newRecords = consumer.poll(Duration.ofMillis(100)); - assertEquals(2, newRecords.count(), "Should have received 2 new records"); - } - - @Test - public void testCloseWithTopicAuthorizationException() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - consumer = newConsumer(subscriptions); - - completeShareUnsubscribeApplicationEventSuccessfully(subscriptions); - assertDoesNotThrow(() -> consumer.close()); - } - - @Test - public void testStopFindCoordinatorOnClose() { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - consumer = newConsumer(subscriptions); - - // Setup the expected successful completion of close events - completeShareAcknowledgeOnCloseApplicationEventSuccessfully(); - completeShareUnsubscribeApplicationEventSuccessfully(subscriptions); - - // Close the consumer - consumer.close(); - - // Verify events are sent in correct order using InOrder - InOrder inOrder = inOrder(applicationEventHandler); - inOrder.verify(applicationEventHandler).addAndGet(any(ShareAcknowledgeOnCloseEvent.class)); - inOrder.verify(applicationEventHandler).add(any(ShareUnsubscribeEvent.class)); - inOrder.verify(applicationEventHandler).add(any(StopFindCoordinatorOnCloseEvent.class)); - } - @Test public void testVerifyApplicationEventOnShutdown() { SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); @@ -667,7 +465,7 @@ public void testEnsurePollEventSentOnConsumerPoll() { final TopicPartition tp = new TopicPartition("topic", 0); final TopicIdPartition tip = new TopicIdPartition(Uuid.randomUuid(), tp); - final ShareInFlightBatch batch = new ShareInFlightBatch<>(0, tip); + final ShareInFlightBatch batch = new ShareInFlightBatch<>(tip); batch.addRecord(new ConsumerRecord<>("topic", 0, 2, "key1", "value1")); final ShareFetch fetch = ShareFetch.empty(); fetch.add(tip, batch); @@ -689,32 +487,6 @@ public void testEnsurePollEventSentOnConsumerPoll() { verify(applicationEventHandler).addAndGet(any(ShareAcknowledgeOnCloseEvent.class)); } - @ParameterizedTest - @EnumSource(value = Errors.class, names = {"TOPIC_AUTHORIZATION_FAILED", "GROUP_AUTHORIZATION_FAILED", "INVALID_TOPIC_EXCEPTION"}) - public void testCloseWithBackgroundQueueErrorsAfterUnsubscribe(Errors error) { - SubscriptionState subscriptions = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - consumer = newConsumer(subscriptions); - - // Complete the acknowledge on close event successfully - completeShareAcknowledgeOnCloseApplicationEventSuccessfully(); - - // Complete the unsubscribe event successfully - completeShareUnsubscribeApplicationEventSuccessfully(subscriptions); - - // Mock the applicationEventHandler to add errors to the queue after unsubscribe - doAnswer(invocation -> { - // Add errors to the queue after unsubscribe event is processed - backgroundEventQueue.add(new ErrorEvent(error.exception())); - return null; - }).when(applicationEventHandler).add(any(StopFindCoordinatorOnCloseEvent.class)); - - // Close should complete successfully despite the errors in the background queue - assertDoesNotThrow(() -> consumer.close()); - - // Verify that the background queue was processed - assertTrue(backgroundEventQueue.isEmpty(), "Background queue should be empty after close"); - } - private Properties requiredConsumerPropertiesAndGroupId(final String groupId) { final Properties props = requiredConsumerProperties(); props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); @@ -730,7 +502,7 @@ private Properties requiredConsumerProperties() { } /** - * Tests {@link ShareConsumerImpl#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents} + * Tests {@link ShareConsumerImpl#processBackgroundEvents(Future, Timer) processBackgroundEvents} * handles the case where the {@link Future} takes a bit of time to complete, but does within the timeout. */ @Test @@ -757,14 +529,14 @@ public void testProcessBackgroundEventsWithInitialDelay() throws Exception { return null; }).when(future).get(any(Long.class), any(TimeUnit.class)); - consumer.processBackgroundEvents(future, timer, e -> false); + consumer.processBackgroundEvents(future, timer); // 800 is the 1000 ms timeout (above) minus the 200 ms delay for the two incremental timeouts/retries. assertEquals(800, timer.remainingMs()); } /** - * Tests {@link ShareConsumerImpl#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents} + * Tests {@link ShareConsumerImpl#processBackgroundEvents(Future, Timer) processBackgroundEvents} * handles the case where the {@link Future} is already complete when invoked, so it doesn't have to wait. */ @Test @@ -776,31 +548,15 @@ public void testProcessBackgroundEventsWithoutDelay() { // Create a future that is already completed. CompletableFuture future = CompletableFuture.completedFuture(null); - consumer.processBackgroundEvents(future, timer, e -> false); + consumer.processBackgroundEvents(future, timer); // Because we didn't need to perform a timed get, we should still have every last millisecond // of our initial timeout. assertEquals(1000, timer.remainingMs()); } - @Test - public void testRecordBackgroundEventQueueSize() { - consumer = newConsumer(); - Metrics metrics = consumer.metricsRegistry(); - AsyncConsumerMetrics asyncConsumerMetrics = consumer.asyncConsumerMetrics(); - - ShareAcknowledgementCommitCallbackEvent event = new ShareAcknowledgementCommitCallbackEvent(Map.of()); - backgroundEventQueue.add(event); - asyncConsumerMetrics.recordBackgroundEventQueueSize(1); - - assertEquals(1, (double) metrics.metric(metrics.metricName("background-event-queue-size", CONSUMER_SHARE_METRIC_GROUP)).metricValue()); - - consumer.processBackgroundEvents(); - assertEquals(0, (double) metrics.metric(metrics.metricName("background-event-queue-size", CONSUMER_SHARE_METRIC_GROUP)).metricValue()); - } - /** - * Tests {@link ShareConsumerImpl#processBackgroundEvents(Future, Timer, Predicate) processBackgroundEvents} + * Tests {@link ShareConsumerImpl#processBackgroundEvents(Future, Timer) processBackgroundEvents} * handles the case where the {@link Future} does not complete within the timeout. */ @Test @@ -816,7 +572,7 @@ public void testProcessBackgroundEventsTimesOut() throws Exception { throw new java.util.concurrent.TimeoutException("Intentional timeout"); }).when(future).get(any(Long.class), any(TimeUnit.class)); - assertThrows(TimeoutException.class, () -> consumer.processBackgroundEvents(future, timer, e -> false)); + assertThrows(TimeoutException.class, () -> consumer.processBackgroundEvents(future, timer)); // Because we forced our mocked future to continuously time out, we should have no time remaining. assertEquals(0, timer.remainingMs()); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java index 2a06324f72a7b..2fa8d6cb6598a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchBufferTest.java @@ -33,6 +33,8 @@ import org.junit.jupiter.api.Test; import java.time.Duration; +import java.util.Arrays; +import java.util.HashSet; import java.util.Properties; import java.util.Set; import java.util.stream.Collectors; @@ -168,7 +170,6 @@ private ShareCompletedFetch completedFetch(TopicIdPartition tp) { return new ShareCompletedFetch( logContext, BufferSupplier.create(), - 0, tp, partitionData, shareFetchMetricsAggregator, @@ -179,6 +180,6 @@ private ShareCompletedFetch completedFetch(TopicIdPartition tp) { * This is a handy utility method for returning a set from a varargs array. */ private static Set partitions(TopicIdPartition... partitions) { - return Set.of(partitions); + return new HashSet<>(Arrays.asList(partitions)); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java index 194d9b2a2c459..893840de4c65c 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchCollectorTest.java @@ -236,8 +236,8 @@ private void buildDependencies() { ConsumerConfig config = new ConsumerConfig(p); + deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer()); Metrics metrics = createMetrics(config, Time.SYSTEM); - deserializers = new Deserializers<>(new StringDeserializer(), new StringDeserializer(), metrics); ShareFetchMetricsManager shareFetchMetricsManager = createShareFetchMetricsManager(metrics); Set partitionSet = new HashSet<>(); partitionSet.add(topicAPartition0.topicPartition()); @@ -347,7 +347,6 @@ private ShareCompletedFetch build() { return new ShareCompletedFetch( logContext, BufferSupplier.create(), - 0, topicAPartition0, partitionData, shareFetchMetricsAggregator, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java index 79b5deecadb56..27b44966f0a8d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareFetchMetricsManagerTest.java @@ -30,12 +30,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.io.IOException; - import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; class ShareFetchMetricsManagerTest { private static final double EPSILON = 0.0001; @@ -118,41 +114,6 @@ public void testRecordsFetched() { assertEquals(8, (double) getMetric(shareFetchMetricsRegistry.recordsPerRequestAvg).metricValue(), EPSILON); } - @Test - public void testAcknowledgements() { - shareFetchMetricsManager.recordAcknowledgementSent(5); - shareFetchMetricsManager.recordFailedAcknowledgements(2); - - assertEquals(5, (double) getMetric(shareFetchMetricsRegistry.acknowledgementSendTotal).metricValue()); - assertEquals(2, (double) getMetric(shareFetchMetricsRegistry.acknowledgementErrorTotal).metricValue()); - } - - @Test - public void testCloseRemovesAllSensors() throws IOException { - // Define all sensor names that should be created and removed - String[] sensorNames = { - "fetch-throttle-time", - "bytes-fetched", - "records-fetched", - "fetch-latency", - "sent-acknowledgements", - "failed-acknowledgements" - }; - - // Verify that sensors exist before closing - for (String sensorName : sensorNames) { - assertNotNull(metrics.getSensor(sensorName), "Sensor " + sensorName + " should exist before closing"); - } - - // Close the metrics manager - shareFetchMetricsManager.close(); - - // Verify that all sensors are removed - for (String sensorName : sensorNames) { - assertNull(metrics.getSensor(sensorName), "Sensor " + sensorName + " should be removed after closing"); - } - } - private KafkaMetric getMetric(MetricNameTemplate name) { return metrics.metric(metrics.metricInstance(name)); } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java index 8952271b250d5..f9e46571795bc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareHeartbeatRequestManagerTest.java @@ -25,7 +25,6 @@ import org.apache.kafka.common.Node; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.TimeoutException; -import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.metrics.KafkaMetric; @@ -59,8 +58,6 @@ import java.util.Set; import java.util.concurrent.TimeUnit; -import static org.apache.kafka.clients.consumer.internals.ShareHeartbeatRequestManager.SHARE_PROTOCOL_NOT_SUPPORTED_MSG; -import static org.apache.kafka.clients.consumer.internals.ShareHeartbeatRequestManager.SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -70,7 +67,6 @@ import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.anyLong; -import static org.mockito.Mockito.clearInvocations; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.spy; @@ -95,7 +91,7 @@ public class ShareHeartbeatRequestManagerTest { private Metadata metadata; private ShareHeartbeatRequestManager heartbeatRequestManager; private ShareMembershipManager membershipManager; - private HeartbeatRequestState heartbeatRequestState; + private ShareHeartbeatRequestManager.HeartbeatRequestState heartbeatRequestState; private ShareHeartbeatRequestManager.HeartbeatState heartbeatState; private BackgroundEventHandler backgroundEventHandler; private Metrics metrics; @@ -115,7 +111,7 @@ public void setUp() { logContext = new LogContext(); ConsumerConfig config = mock(ConsumerConfig.class); - heartbeatRequestState = spy(new HeartbeatRequestState( + heartbeatRequestState = spy(new ShareHeartbeatRequestManager.HeartbeatRequestState( logContext, time, DEFAULT_HEARTBEAT_INTERVAL_MS, @@ -138,7 +134,7 @@ public void setUp() { } private void createHeartbeatRequestStateWithZeroHeartbeatInterval() { - heartbeatRequestState = spy(new HeartbeatRequestState(logContext, + heartbeatRequestState = spy(new ShareHeartbeatRequestManager.HeartbeatRequestState(logContext, time, 0, DEFAULT_RETRY_BACKOFF_MS, @@ -367,7 +363,7 @@ public void testNoCoordinator() { @ParameterizedTest @MethodSource("errorProvider") public void testHeartbeatResponseOnErrorHandling(final Errors error, final boolean isFatal) { - // Handling errors on the second heartbeat + // Handling errors on the second heartbeat time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); assertEquals(1, result.unsentRequests.size()); @@ -426,46 +422,6 @@ public void testHeartbeatResponseOnErrorHandling(final Errors error, final boole } } - @ParameterizedTest - @ValueSource(strings = {SHARE_PROTOCOL_NOT_SUPPORTED_MSG}) - public void testUnsupportedVersionGeneratedOnTheBroker(String errorMsg) { - mockResponseWithException(new UnsupportedVersionException(errorMsg), true); - - ArgumentCaptor errorEventArgumentCaptor = ArgumentCaptor.forClass(ErrorEvent.class); - verify(backgroundEventHandler).add(errorEventArgumentCaptor.capture()); - ErrorEvent errorEvent = errorEventArgumentCaptor.getValue(); - assertInstanceOf(Errors.UNSUPPORTED_VERSION.exception().getClass(), errorEvent.error()); - assertEquals(errorMsg, errorEvent.error().getMessage()); - clearInvocations(backgroundEventHandler); - } - - @ParameterizedTest - @ValueSource(strings = {SHARE_PROTOCOL_VERSION_NOT_SUPPORTED_MSG}) - public void testUnsupportedVersionGeneratedOnTheClient(String errorMsg) { - mockResponseWithException(new UnsupportedVersionException(errorMsg), false); - - ArgumentCaptor errorEventArgumentCaptor = ArgumentCaptor.forClass(ErrorEvent.class); - verify(backgroundEventHandler).add(errorEventArgumentCaptor.capture()); - ErrorEvent errorEvent = errorEventArgumentCaptor.getValue(); - assertInstanceOf(Errors.UNSUPPORTED_VERSION.exception().getClass(), errorEvent.error()); - assertEquals(errorMsg, errorEvent.error().getMessage()); - clearInvocations(backgroundEventHandler); - } - - private void mockResponseWithException(UnsupportedVersionException exception, boolean isFromBroker) { - time.sleep(DEFAULT_HEARTBEAT_INTERVAL_MS); - NetworkClientDelegate.PollResult result = heartbeatRequestManager.poll(time.milliseconds()); - assertEquals(1, result.unsentRequests.size()); - - // Manually completing the response to test error handling - when(subscriptions.hasAutoAssignedPartitions()).thenReturn(true); - ClientResponse response = createHeartbeatResponseWithException( - result.unsentRequests.get(0), - exception, - isFromBroker); - result.unsentRequests.get(0).handler().onComplete(response); - } - @Test public void testHeartbeatState() { mockJoiningMemberData(); @@ -690,27 +646,6 @@ private ClientResponse createHeartbeatResponse( response); } - private ClientResponse createHeartbeatResponseWithException( - final NetworkClientDelegate.UnsentRequest request, - final UnsupportedVersionException exception, - final boolean isFromBroker - ) { - ShareGroupHeartbeatResponse response = null; - if (isFromBroker) { - response = new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData().setErrorCode(Errors.UNSUPPORTED_VERSION.code())); - } - return new ClientResponse( - new RequestHeader(ApiKeys.SHARE_GROUP_HEARTBEAT, ApiKeys.SHARE_GROUP_HEARTBEAT.latestVersion(), "client-id", 1), - request.handler(), - "0", - time.milliseconds(), - time.milliseconds(), - false, - isFromBroker ? null : exception, - null, - response); - } - private ConsumerConfig config() { Properties prop = new Properties(); prop.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); @@ -731,7 +666,7 @@ private ShareHeartbeatRequestManager createHeartbeatRequestManager( final CoordinatorRequestManager coordinatorRequestManager, final ShareMembershipManager membershipManager, final ShareHeartbeatRequestManager.HeartbeatState heartbeatState, - final HeartbeatRequestState heartbeatRequestState, + final ShareHeartbeatRequestManager.HeartbeatRequestState heartbeatRequestState, final BackgroundEventHandler backgroundEventHandler) { LogContext logContext = new LogContext(); pollTimer = time.timer(DEFAULT_MAX_POLL_INTERVAL_MS); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java index e95f8dd86f26b..7c4c5684bcce0 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareMembershipManagerTest.java @@ -23,6 +23,7 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData.Assignment; +import org.apache.kafka.common.metrics.KafkaMetric; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.ShareGroupHeartbeatRequest; @@ -56,6 +57,7 @@ import java.util.stream.Stream; import static org.apache.kafka.clients.consumer.internals.AbstractMembershipManager.TOPIC_PARTITION_COMPARATOR; +import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP_PREFIX; import static org.apache.kafka.common.requests.ShareGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH; import static org.apache.kafka.common.utils.Utils.mkEntry; import static org.apache.kafka.common.utils.Utils.mkMap; @@ -312,10 +314,6 @@ private void assertTransitionToUnsubscribeOnHBSentAndWaitForResponseToCompleteLe membershipManager.onHeartbeatSuccess(createShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData.Assignment(), membershipManager.memberId())); - assertFalse(sendLeave.isDone(), "Send leave operation should not complete until a leave response is received"); - - membershipManager.onHeartbeatSuccess(createShareGroupLeaveResponse(membershipManager.memberId())); - assertSendLeaveCompleted(membershipManager, sendLeave); } @@ -522,9 +520,6 @@ public void testHeartbeatSuccessfulResponseWhenLeavingGroupCompletesLeave() { assertFalse(leaveResult.isDone()); membershipManager.onHeartbeatSuccess(createShareGroupHeartbeatResponse(createAssignment(true), membershipManager.memberId())); - assertFalse(leaveResult.isDone()); - - membershipManager.onHeartbeatSuccess(createShareGroupLeaveResponse(membershipManager.memberId())); assertSendLeaveCompleted(membershipManager, leaveResult); } @@ -568,46 +563,10 @@ public void testIgnoreHeartbeatResponseWhenNotInGroup(MemberState state) { assertEquals(state, membershipManager.state()); verify(responseData, never()).memberId(); - // In unsubscribed, we check if we received a leave group response, so we do verify member epoch. - if (state != MemberState.UNSUBSCRIBED) { - verify(responseData, never()).memberEpoch(); - } + verify(responseData, never()).memberEpoch(); verify(responseData, never()).assignment(); } - @Test - public void testIgnoreLeaveResponseWhenNotLeavingGroup() { - ShareMembershipManager membershipManager = createMemberInStableState(); - - CompletableFuture leaveResult = membershipManager.leaveGroup(); - - // Send leave request, transitioning to UNSUBSCRIBED state - membershipManager.onHeartbeatRequestGenerated(); - assertEquals(MemberState.UNSUBSCRIBED, membershipManager.state()); - - // Receive a previous heartbeat response, which should be ignored - membershipManager.onHeartbeatSuccess(new ShareGroupHeartbeatResponse( - new ShareGroupHeartbeatResponseData() - .setErrorCode(Errors.NONE.code()) - .setMemberId(membershipManager.memberId()) - .setMemberEpoch(MEMBER_EPOCH) - )); - assertFalse(leaveResult.isDone()); - - // Receive a leave heartbeat response, which should unblock the consumer - membershipManager.onHeartbeatSuccess(createShareGroupLeaveResponse(membershipManager.memberId())); - assertTrue(leaveResult.isDone()); - - // Share unblocks and updates subscription - membershipManager.onSubscriptionUpdated(); - membershipManager.onConsumerPoll(); - - membershipManager.onHeartbeatSuccess(createShareGroupLeaveResponse(membershipManager.memberId())); - - assertEquals(MemberState.JOINING, membershipManager.state()); - assertEquals(0, membershipManager.memberEpoch()); - } - @Test public void testLeaveGroupWhenStateIsReconciling() { ShareMembershipManager membershipManager = mockJoinAndReceiveAssignment(false); @@ -986,6 +945,7 @@ public void testReconcileNewPartitionsAssignedWhenNoPartitionOwned() { @Test public void testReconcileNewPartitionsAssignedWhenOtherPartitionsOwned() { + // ANDREW MANGLED THIS Uuid topicId = Uuid.randomUuid(); String topicName = "topic1"; TopicIdPartition ownedPartition = new TopicIdPartition(topicId, new TopicPartition(topicName, 0)); @@ -1268,6 +1228,23 @@ private ShareMembershipManager mockStaleMember() { return membershipManager; } + private void mockPartitionOwnedAndNewPartitionAdded(String topicName, + int partitionOwned, + int partitionAdded, + CounterConsumerRebalanceListener listener, + ShareMembershipManager membershipManager) { + Uuid topicId = Uuid.randomUuid(); + TopicPartition owned = new TopicPartition(topicName, partitionOwned); + when(subscriptionState.assignedPartitions()).thenReturn(Collections.singleton(owned)); + membershipManager.updateAssignment(Collections.singletonMap(topicId, mkSortedSet(partitionOwned))); + when(metadata.topicNames()).thenReturn(Collections.singletonMap(topicId, topicName)); + when(subscriptionState.hasAutoAssignedPartitions()).thenReturn(true); + when(subscriptionState.rebalanceListener()).thenReturn(Optional.ofNullable(listener)); + + // Receive assignment adding a new partition + receiveAssignment(topicId, Arrays.asList(partitionOwned, partitionAdded), membershipManager); + } + private SortedSet topicIdPartitionsSet(Uuid topicId, String topicName, int... partitions) { SortedSet topicIdPartitions = new TreeSet<>(new Utils.TopicIdPartitionComparator()); @@ -1596,6 +1573,17 @@ private void mockLeaveGroup() { doNothing().when(subscriptionState).markPendingRevocation(anySet()); } + private void mockPrepareLeaving(ShareMembershipManager membershipManager) { + String topicName = "topic1"; + TopicPartition ownedPartition = new TopicPartition(topicName, 0); + + // Start leaving group, blocked waiting for callback to complete. + when(subscriptionState.assignedPartitions()).thenReturn(Collections.singleton(ownedPartition)); + when(subscriptionState.hasAutoAssignedPartitions()).thenReturn(true); + doNothing().when(subscriptionState).markPendingRevocation(anySet()); + membershipManager.leaveGroup(); + } + private void testStateUpdateOnFatalFailure(ShareMembershipManager membershipManager) { String memberId = membershipManager.memberId(); int lastEpoch = membershipManager.memberEpoch(); @@ -1620,13 +1608,6 @@ private ShareGroupHeartbeatResponse createShareGroupHeartbeatResponse( .setMemberEpoch(MEMBER_EPOCH) .setAssignment(assignment)); } - - private ShareGroupHeartbeatResponse createShareGroupLeaveResponse(String memberId) { - return new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData() - .setErrorCode(Errors.NONE.code()) - .setMemberId(memberId) - .setMemberEpoch(ShareGroupHeartbeatRequest.LEAVE_GROUP_MEMBER_EPOCH)); - } private ShareGroupHeartbeatResponse createShareGroupHeartbeatResponseWithError(String memberId) { return new ShareGroupHeartbeatResponse(new ShareGroupHeartbeatResponseData() @@ -1660,6 +1641,10 @@ private ShareGroupHeartbeatResponseData.Assignment createAssignment(boolean mock )); } + private KafkaMetric getMetric(final String name) { + return metrics.metrics().get(metrics.metricName(name, CONSUMER_SHARE_METRIC_GROUP_PREFIX + "-coordinator-metrics")); + } + private ShareMembershipManager memberJoinWithAssignment() { Uuid topicId = Uuid.randomUuid(); ShareMembershipManager membershipManager = mockJoinAndReceiveAssignment(true); diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java index 07e490ae4c4d0..0ce2f349f98ee 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/ShareSessionHandlerTest.java @@ -31,8 +31,6 @@ import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.EnumSource; import java.util.ArrayList; import java.util.Collections; @@ -141,25 +139,26 @@ private static final class RespEntry { } } - private static LinkedHashMap buildResponseData(RespEntry... entries) { - LinkedHashMap topicIdPartitionToPartition = new LinkedHashMap<>(); + private static List respList(RespEntry... entries) { + HashMap map = new HashMap<>(); for (RespEntry entry : entries) { - ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(entry.part.partition()); - topicIdPartitionToPartition.put(entry.part, partitionData); + ShareFetchResponseData.ShareFetchableTopicResponse response = map.computeIfAbsent(entry.part, topicIdPartition -> + new ShareFetchResponseData.ShareFetchableTopicResponse().setTopicId(topicIdPartition.topicId())); + response.partitions().add(new ShareFetchResponseData.PartitionData() + .setPartitionIndex(entry.part.partition())); } - return topicIdPartitionToPartition; + return new ArrayList<>(map.values()); } - @ParameterizedTest - @EnumSource(value = Errors.class, names = {"INVALID_SHARE_SESSION_EPOCH", "SHARE_SESSION_NOT_FOUND", "SHARE_SESSION_LIMIT_REACHED"}) - public void testShareSession(Errors error) { + @Test + public void testShareSession() { String groupId = "G1"; Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); + Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid fooId = addTopicId(topicNames, "foo"); + Uuid fooId = addTopicId(topicIds, topicNames, "foo"); TopicIdPartition foo0 = new TopicIdPartition(fooId, 0, "foo"); TopicIdPartition foo1 = new TopicIdPartition(fooId, 1, "foo"); handler.addPartitionToFetch(foo0, null); @@ -171,15 +170,17 @@ public void testShareSession(Errors error) { assertListEquals(expectedToSend1, reqFetchList(requestData1, topicNames)); assertEquals(memberId.toString(), requestData1.memberId()); - ShareFetchResponse resp = ShareFetchResponse.of(Errors.NONE, - 0, - buildResponseData(new RespEntry("foo", 0, fooId), new RespEntry("foo", 1, fooId)), - List.of(), - 0); + ShareFetchResponse resp = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(0) + .setResponses(respList( + new RespEntry("foo", 0, fooId), + new RespEntry("foo", 1, fooId)))); handler.handleResponse(resp, ApiKeys.SHARE_FETCH.latestVersion(true)); // Test a fetch request which adds one partition - Uuid barId = addTopicId(topicNames, "bar"); + Uuid barId = addTopicId(topicIds, topicNames, "bar"); TopicIdPartition bar0 = new TopicIdPartition(barId, 0, "bar"); handler.addPartitionToFetch(foo0, null); handler.addPartitionToFetch(foo1, null); @@ -193,15 +194,18 @@ public void testShareSession(Errors error) { expectedToSend2.add(new TopicIdPartition(barId, 0, "bar")); assertListEquals(expectedToSend2, reqFetchList(requestData2, topicNames)); - ShareFetchResponse resp2 = ShareFetchResponse.of(Errors.NONE, - 0, - buildResponseData(new RespEntry("foo", 1, fooId)), - List.of(), - 0); + ShareFetchResponse resp2 = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(0) + .setResponses(respList( + new RespEntry("foo", 1, fooId)))); handler.handleResponse(resp2, ApiKeys.SHARE_FETCH.latestVersion(true)); // A top-level error code will reset the session epoch - ShareFetchResponse resp3 = ShareFetchResponse.of(error, 0, new LinkedHashMap<>(), List.of(), 0); + ShareFetchResponse resp3 = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.INVALID_SHARE_SESSION_EPOCH.code())); handler.handleResponse(resp3, ApiKeys.SHARE_FETCH.latestVersion(true)); ShareFetchRequestData requestData4 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -224,9 +228,10 @@ public void testPartitionRemoval() { Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); + Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid fooId = addTopicId(topicNames, "foo"); - Uuid barId = addTopicId(topicNames, "bar"); + Uuid fooId = addTopicId(topicIds, topicNames, "foo"); + Uuid barId = addTopicId(topicIds, topicNames, "bar"); TopicIdPartition foo0 = new TopicIdPartition(fooId, 0, "foo"); TopicIdPartition foo1 = new TopicIdPartition(fooId, 1, "foo"); TopicIdPartition bar0 = new TopicIdPartition(barId, 0, "bar"); @@ -246,14 +251,14 @@ public void testPartitionRemoval() { assertListEquals(expectedToSend1, reqFetchList(requestData1, topicNames)); assertEquals(memberId.toString(), requestData1.memberId()); - ShareFetchResponse resp = ShareFetchResponse.of(Errors.NONE, - 0, - buildResponseData( - new RespEntry("foo", 0, fooId), - new RespEntry("foo", 1, fooId), - new RespEntry("bar", 0, barId)), - List.of(), - 0); + ShareFetchResponse resp = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(0) + .setResponses(respList( + new RespEntry("foo", 0, fooId), + new RespEntry("foo", 1, fooId), + new RespEntry("bar", 0, barId)))); handler.handleResponse(resp, ApiKeys.SHARE_FETCH.latestVersion(true)); // Test a fetch request which removes two partitions @@ -270,7 +275,9 @@ public void testPartitionRemoval() { assertListEquals(expectedToForget2, reqForgetList(requestData2, topicNames)); // A top-level error code will reset the session epoch - ShareFetchResponse resp2 = ShareFetchResponse.of(Errors.INVALID_SHARE_SESSION_EPOCH, 0, new LinkedHashMap<>(), List.of(), 0); + ShareFetchResponse resp2 = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.INVALID_SHARE_SESSION_EPOCH.code())); handler.handleResponse(resp2, ApiKeys.SHARE_FETCH.latestVersion(true)); handler.addPartitionToFetch(foo1, null); @@ -290,8 +297,9 @@ public void testTopicIdReplaced() { Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); + Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid topicId1 = addTopicId(topicNames, "foo"); + Uuid topicId1 = addTopicId(topicIds, topicNames, "foo"); TopicIdPartition tp = new TopicIdPartition(topicId1, 0, "foo"); handler.addPartitionToFetch(tp, null); ShareFetchRequestData requestData1 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -301,15 +309,16 @@ public void testTopicIdReplaced() { expectedToSend1.add(new TopicIdPartition(topicId1, 0, "foo")); assertListEquals(expectedToSend1, reqFetchList(requestData1, topicNames)); - ShareFetchResponse resp = ShareFetchResponse.of(Errors.NONE, - 0, - buildResponseData(new RespEntry("foo", 0, topicId1)), - List.of(), - 0); + ShareFetchResponse resp = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(0) + .setResponses(respList( + new RespEntry("foo", 0, topicId1)))); handler.handleResponse(resp, ApiKeys.SHARE_FETCH.latestVersion(true)); // Try to add a new topic ID - Uuid topicId2 = addTopicId(topicNames, "foo"); + Uuid topicId2 = addTopicId(topicIds, topicNames, "foo"); TopicIdPartition tp2 = new TopicIdPartition(topicId2, 0, "foo"); // Use the same data besides the topic ID handler.addPartitionToFetch(tp2, null); @@ -327,40 +336,6 @@ public void testTopicIdReplaced() { assertEquals(1, requestData2.shareSessionEpoch(), "Did not have correct epoch"); } - @Test - public void testPartitionForgottenOnAcknowledgeOnly() { - String groupId = "G1"; - Uuid memberId = Uuid.randomUuid(); - ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); - - // We want to test when all topics are removed from the session - Map topicNames = new HashMap<>(); - Uuid topicId = addTopicId(topicNames, "foo"); - TopicIdPartition foo0 = new TopicIdPartition(topicId, 0, "foo"); - handler.addPartitionToFetch(foo0, null); - ShareFetchRequestData requestData1 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); - assertMapsEqual(reqMap(foo0), handler.sessionPartitionMap()); - ArrayList expectedToSend1 = new ArrayList<>(); - expectedToSend1.add(new TopicIdPartition(topicId, 0, "foo")); - assertListEquals(expectedToSend1, reqFetchList(requestData1, topicNames)); - - ShareFetchResponse resp = ShareFetchResponse.of(Errors.NONE, - 0, - buildResponseData(new RespEntry("foo", 0, topicId)), - List.of(), - 0); - handler.handleResponse(resp, ApiKeys.SHARE_FETCH.latestVersion(true)); - - // Remove the topic from the session by setting acknowledgements only - this is not asking to fetch records - ShareFetchRequestData requestData2 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); - handler.addPartitionToAcknowledgeOnly(foo0, Acknowledgements.empty()); - assertEquals(Collections.singletonList(foo0), reqForgetList(requestData2, topicNames)); - - // Should have the same session ID, next epoch, and same ID usage - assertEquals(memberId.toString(), requestData2.memberId(), "Did not use same session"); - assertEquals(1, requestData2.shareSessionEpoch(), "Did not have correct epoch"); - } - @Test public void testForgottenPartitions() { String groupId = "G1"; @@ -368,8 +343,9 @@ public void testForgottenPartitions() { ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); // We want to test when all topics are removed from the session + Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid topicId = addTopicId(topicNames, "foo"); + Uuid topicId = addTopicId(topicIds, topicNames, "foo"); TopicIdPartition foo0 = new TopicIdPartition(topicId, 0, "foo"); handler.addPartitionToFetch(foo0, null); ShareFetchRequestData requestData1 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -378,11 +354,12 @@ public void testForgottenPartitions() { expectedToSend1.add(new TopicIdPartition(topicId, 0, "foo")); assertListEquals(expectedToSend1, reqFetchList(requestData1, topicNames)); - ShareFetchResponse resp = ShareFetchResponse.of(Errors.NONE, - 0, - buildResponseData(new RespEntry("foo", 0, topicId)), - List.of(), - 0); + ShareFetchResponse resp = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(0) + .setResponses(respList( + new RespEntry("foo", 0, topicId)))); handler.handleResponse(resp, ApiKeys.SHARE_FETCH.latestVersion(true)); // Remove the topic from the session @@ -400,8 +377,9 @@ public void testAddNewIdAfterTopicRemovedFromSession() { Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); + Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid topicId = addTopicId(topicNames, "foo"); + Uuid topicId = addTopicId(topicIds, topicNames, "foo"); handler.addPartitionToFetch(new TopicIdPartition(topicId, 0, "foo"), null); ShareFetchRequestData requestData1 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); assertMapsEqual(reqMap(new TopicIdPartition(topicId, 0, "foo")), @@ -410,22 +388,27 @@ public void testAddNewIdAfterTopicRemovedFromSession() { expectedToSend1.add(new TopicIdPartition(topicId, 0, "foo")); assertListEquals(expectedToSend1, reqFetchList(requestData1, topicNames)); - ShareFetchResponse resp = ShareFetchResponse.of(Errors.NONE, - 0, - buildResponseData(new RespEntry("foo", 0, topicId)), - List.of(), - 0); + ShareFetchResponse resp = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(0) + .setResponses(respList( + new RespEntry("foo", 0, topicId)))); handler.handleResponse(resp, ApiKeys.SHARE_FETCH.latestVersion(true)); // Remove the partition from the session ShareFetchRequestData requestData2 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); assertTrue(handler.sessionPartitionMap().isEmpty()); assertTrue(requestData2.topics().isEmpty()); - ShareFetchResponse resp2 = ShareFetchResponse.of(Errors.NONE, 0, new LinkedHashMap<>(), List.of(), 0); + ShareFetchResponse resp2 = new ShareFetchResponse( + new ShareFetchResponseData() + .setErrorCode(Errors.NONE.code()) + .setThrottleTimeMs(0) + .setResponses(respList())); handler.handleResponse(resp2, ApiKeys.SHARE_FETCH.latestVersion(true)); // After the topic is removed, add a recreated topic with a new ID - Uuid topicId2 = addTopicId(topicNames, "foo"); + Uuid topicId2 = addTopicId(topicIds, topicNames, "foo"); handler.addPartitionToFetch(new TopicIdPartition(topicId2, 0, "foo"), null); ShareFetchRequestData requestData3 = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); @@ -440,8 +423,9 @@ public void testNextAcknowledgementsClearedOnInvalidRequest() { Uuid memberId = Uuid.randomUuid(); ShareSessionHandler handler = new ShareSessionHandler(LOG_CONTEXT, 1, memberId); + Map topicIds = new HashMap<>(); Map topicNames = new HashMap<>(); - Uuid fooId = addTopicId(topicNames, "foo"); + Uuid fooId = addTopicId(topicIds, topicNames, "foo"); TopicIdPartition foo0 = new TopicIdPartition(fooId, 0, "foo"); Acknowledgements acknowledgements = Acknowledgements.empty(); @@ -458,7 +442,7 @@ public void testNextAcknowledgementsClearedOnInvalidRequest() { ShareFetchRequestData requestData = handler.newShareFetchBuilder(groupId, fetchConfig).build().data(); // We should have cleared the unsent acknowledgements before this ShareFetch. - assertEquals(0, requestData.topics().stream().findFirst().get().partitions().stream().findFirst().get().acknowledgementBatches().size()); + assertEquals(0, requestData.topics().get(0).partitions().get(0).acknowledgementBatches().size()); ArrayList expectedToSend1 = new ArrayList<>(); expectedToSend1.add(new TopicIdPartition(fooId, 1, "foo")); @@ -466,7 +450,9 @@ public void testNextAcknowledgementsClearedOnInvalidRequest() { assertEquals(memberId.toString(), requestData.memberId()); } - private Uuid addTopicId(Map topicNames, String name) { + private Uuid addTopicId(Map topicIds, Map topicNames, String name) { + // If the same topic name is added more than once, the latest mapping will be in the + // topicIds, but all mappings will be in topicNames. This is needed in the replace tests. Uuid id = Uuid.randomUuid(); topicNames.put(id, name); return id; diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java index 4d4a725d45c49..f697990b54425 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/SubscriptionStateTest.java @@ -26,7 +26,6 @@ import org.apache.kafka.common.IsolationLevel; import org.apache.kafka.common.Node; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.utils.LogContext; @@ -34,15 +33,16 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.Collection; -import java.util.List; +import java.util.Collections; +import java.util.HashSet; import java.util.Optional; import java.util.Set; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.LongSupplier; -import java.util.function.Predicate; import java.util.regex.Pattern; +import static java.util.Collections.singleton; import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH; import static org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -64,14 +64,14 @@ public class SubscriptionStateTest { @Test public void partitionAssignment() { - state.assignFromUser(Set.of(tp0)); - assertEquals(Set.of(tp0), state.assignedPartitions()); + state.assignFromUser(singleton(tp0)); + assertEquals(singleton(tp0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); assertFalse(state.hasAllFetchPositions()); state.seek(tp0, 1); assertTrue(state.isFetchable(tp0)); assertEquals(1L, state.position(tp0).offset); - state.assignFromUser(Set.of()); + state.assignFromUser(Collections.emptySet()); assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); assertFalse(state.isAssigned(tp0)); @@ -80,7 +80,7 @@ public void partitionAssignment() { @Test public void partitionAssignmentChangeOnTopicSubscription() { - state.assignFromUser(Set.of(tp0, tp1)); + state.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); // assigned partitions should immediately change assertEquals(2, state.assignedPartitions().size()); assertEquals(2, state.numAssignedPartitions()); @@ -92,20 +92,20 @@ public void partitionAssignmentChangeOnTopicSubscription() { assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - state.subscribe(Set.of(topic1), Optional.of(rebalanceListener)); + state.subscribe(singleton(topic1), Optional.of(rebalanceListener)); // assigned partitions should remain unchanged assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - assertTrue(state.checkAssignmentMatchedSubscription(Set.of(t1p0))); - state.assignFromSubscribed(Set.of(t1p0)); + assertTrue(state.checkAssignmentMatchedSubscription(singleton(t1p0))); + state.assignFromSubscribed(singleton(t1p0)); // assigned partitions should immediately change - assertEquals(Set.of(t1p0), state.assignedPartitions()); + assertEquals(singleton(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); // assigned partitions should remain unchanged - assertEquals(Set.of(t1p0), state.assignedPartitions()); + assertEquals(singleton(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); state.unsubscribe(); @@ -114,74 +114,26 @@ public void partitionAssignmentChangeOnTopicSubscription() { assertEquals(0, state.numAssignedPartitions()); } - @Test - public void testIsFetchableOnManualAssignment() { - state.assignFromUser(Set.of(tp0, tp1)); - assertAssignedPartitionIsFetchable(); - } - - @Test - public void testIsFetchableOnAutoAssignment() { - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); - state.assignFromSubscribed(Set.of(tp0, tp1)); - assertAssignedPartitionIsFetchable(); - } - - private void assertAssignedPartitionIsFetchable() { - assertEquals(2, state.assignedPartitions().size()); - assertTrue(state.assignedPartitions().contains(tp0)); - assertTrue(state.assignedPartitions().contains(tp1)); - - assertFalse(state.isFetchable(tp0), "Should not be fetchable without a valid position"); - assertFalse(state.isFetchable(tp1), "Should not be fetchable without a valid position"); - - state.seek(tp0, 1); - state.seek(tp1, 1); - - assertTrue(state.isFetchable(tp0)); - assertTrue(state.isFetchable(tp1)); - } - - @Test - public void testIsFetchableConsidersExplicitTopicSubscription() { - state.subscribe(Set.of(topic1), Optional.of(rebalanceListener)); - state.assignFromSubscribed(Set.of(t1p0)); - state.seek(t1p0, 1); - - assertEquals(Set.of(t1p0), state.assignedPartitions()); - assertTrue(state.isFetchable(t1p0)); - - // Change subscription. Assigned partitions should remain unchanged but not fetchable. - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); - assertEquals(Set.of(t1p0), state.assignedPartitions()); - assertFalse(state.isFetchable(t1p0), "Assigned partitions not in the subscription should not be fetchable"); - - // Unsubscribe. Assigned partitions should be cleared and not fetchable. - state.unsubscribe(); - assertTrue(state.assignedPartitions().isEmpty()); - assertFalse(state.isFetchable(t1p0)); - } - @Test public void testGroupSubscribe() { - state.subscribe(Set.of(topic1), Optional.of(rebalanceListener)); - assertEquals(Set.of(topic1), state.metadataTopics()); + state.subscribe(singleton(topic1), Optional.of(rebalanceListener)); + assertEquals(singleton(topic1), state.metadataTopics()); - assertFalse(state.groupSubscribe(Set.of(topic1))); - assertEquals(Set.of(topic1), state.metadataTopics()); + assertFalse(state.groupSubscribe(singleton(topic1))); + assertEquals(singleton(topic1), state.metadataTopics()); assertTrue(state.groupSubscribe(Set.of(topic, topic1))); assertEquals(Set.of(topic, topic1), state.metadataTopics()); // `groupSubscribe` does not accumulate - assertFalse(state.groupSubscribe(Set.of(topic1))); - assertEquals(Set.of(topic1), state.metadataTopics()); + assertFalse(state.groupSubscribe(singleton(topic1))); + assertEquals(singleton(topic1), state.metadataTopics()); - state.subscribe(Set.of("anotherTopic"), Optional.of(rebalanceListener)); + state.subscribe(singleton("anotherTopic"), Optional.of(rebalanceListener)); assertEquals(Set.of(topic1, "anotherTopic"), state.metadataTopics()); - assertFalse(state.groupSubscribe(Set.of("anotherTopic"))); - assertEquals(Set.of("anotherTopic"), state.metadataTopics()); + assertFalse(state.groupSubscribe(singleton("anotherTopic"))); + assertEquals(singleton("anotherTopic"), state.metadataTopics()); } @Test @@ -191,44 +143,44 @@ public void partitionAssignmentChangeOnPatternSubscription() { assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - state.subscribeFromPattern(Set.of(topic)); + state.subscribeFromPattern(Collections.singleton(topic)); // assigned partitions should remain unchanged assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp1))); - state.assignFromSubscribed(Set.of(tp1)); + assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp1))); + state.assignFromSubscribed(singleton(tp1)); // assigned partitions should immediately change - assertEquals(Set.of(tp1), state.assignedPartitions()); + assertEquals(singleton(tp1), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertEquals(Set.of(topic), state.subscription()); + assertEquals(singleton(topic), state.subscription()); - assertTrue(state.checkAssignmentMatchedSubscription(Set.of(t1p0))); - state.assignFromSubscribed(Set.of(t1p0)); + assertTrue(state.checkAssignmentMatchedSubscription(singleton(t1p0))); + state.assignFromSubscribed(singleton(t1p0)); // assigned partitions should immediately change - assertEquals(Set.of(t1p0), state.assignedPartitions()); + assertEquals(singleton(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertEquals(Set.of(topic), state.subscription()); + assertEquals(singleton(topic), state.subscription()); state.subscribe(Pattern.compile(".*t"), Optional.of(rebalanceListener)); // assigned partitions should remain unchanged - assertEquals(Set.of(t1p0), state.assignedPartitions()); + assertEquals(singleton(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - state.subscribeFromPattern(Set.of(topic)); + state.subscribeFromPattern(singleton(topic)); // assigned partitions should remain unchanged - assertEquals(Set.of(t1p0), state.assignedPartitions()); + assertEquals(singleton(t1p0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp0))); - state.assignFromSubscribed(Set.of(tp0)); + assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp0))); + state.assignFromSubscribed(singleton(tp0)); // assigned partitions should immediately change - assertEquals(Set.of(tp0), state.assignedPartitions()); + assertEquals(singleton(tp0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); - assertEquals(Set.of(topic), state.subscription()); + assertEquals(singleton(topic), state.subscription()); state.unsubscribe(); // assigned partitions should immediately change @@ -246,10 +198,10 @@ public void verifyAssignmentId() { state.unsubscribe(); assertEquals(2, state.assignmentId()); - assertEquals(Set.of(), state.assignedPartitions()); + assertEquals(Collections.emptySet(), state.assignedPartitions()); Set autoAssignment = Set.of(t1p0); - state.subscribe(Set.of(topic1), Optional.of(rebalanceListener)); + state.subscribe(singleton(topic1), Optional.of(rebalanceListener)); assertTrue(state.checkAssignmentMatchedSubscription(autoAssignment)); state.assignFromSubscribed(autoAssignment); assertEquals(3, state.assignmentId()); @@ -258,7 +210,7 @@ public void verifyAssignmentId() { @Test public void partitionReset() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(singleton(tp0)); state.seek(tp0, 5); assertEquals(5L, state.position(tp0).offset); state.requestOffsetReset(tp0); @@ -274,29 +226,29 @@ public void partitionReset() { @Test public void topicSubscription() { - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); assertEquals(1, state.subscription().size()); assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); assertTrue(state.hasAutoAssignedPartitions()); - assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp0))); - state.assignFromSubscribed(Set.of(tp0)); + assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp0))); + state.assignFromSubscribed(singleton(tp0)); state.seek(tp0, 1); assertEquals(1L, state.position(tp0).offset); - assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp1))); - state.assignFromSubscribed(Set.of(tp1)); + assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp1))); + state.assignFromSubscribed(singleton(tp1)); assertTrue(state.isAssigned(tp1)); assertFalse(state.isAssigned(tp0)); assertFalse(state.isFetchable(tp1)); - assertEquals(Set.of(tp1), state.assignedPartitions()); + assertEquals(singleton(tp1), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); } @Test public void partitionPause() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(singleton(tp0)); state.seek(tp0, 100); assertTrue(state.isFetchable(tp0)); state.pause(tp0); @@ -307,10 +259,10 @@ public void partitionPause() { @Test public void testMarkingPartitionPending() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(singleton(tp0)); state.seek(tp0, 100); assertTrue(state.isFetchable(tp0)); - state.markPendingRevocation(Set.of(tp0)); + state.markPendingRevocation(singleton(tp0)); assertFalse(state.isFetchable(tp0)); assertFalse(state.isPaused(tp0)); } @@ -318,17 +270,16 @@ public void testMarkingPartitionPending() { @Test public void testAssignedPartitionsAwaitingCallbackKeepPositionDefinedInCallback() { // New partition assigned. Should not be fetchable or initializing positions. - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); - state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + state.assignFromSubscribedAwaitingCallback(singleton(tp0), singleton(tp0)); assertAssignmentAppliedAwaitingCallback(tp0); - assertEquals(Set.of(tp0.topic()), state.subscription()); // Simulate callback setting position to start fetching from state.seek(tp0, 100); // Callback completed. Partition should be fetchable, and should not require // initializing positions (position already defined in the callback) - state.enablePartitionsAwaitingCallback(Set.of(tp0)); + state.enablePartitionsAwaitingCallback(singleton(tp0)); assertEquals(0, state.initializingPartitions().size()); assertTrue(state.isFetchable(tp0)); assertTrue(state.hasAllFetchPositions()); @@ -338,14 +289,13 @@ public void testAssignedPartitionsAwaitingCallbackKeepPositionDefinedInCallback( @Test public void testAssignedPartitionsAwaitingCallbackInitializePositionsWhenCallbackCompletes() { // New partition assigned. Should not be fetchable or initializing positions. - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); - state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + state.assignFromSubscribedAwaitingCallback(singleton(tp0), singleton(tp0)); assertAssignmentAppliedAwaitingCallback(tp0); - assertEquals(Set.of(tp0.topic()), state.subscription()); // Callback completed (without updating positions). Partition should require initializing // positions, and start fetching once a valid position is set. - state.enablePartitionsAwaitingCallback(Set.of(tp0)); + state.enablePartitionsAwaitingCallback(singleton(tp0)); assertEquals(1, state.initializingPartitions().size()); state.seek(tp0, 100); assertTrue(state.isFetchable(tp0)); @@ -356,23 +306,22 @@ public void testAssignedPartitionsAwaitingCallbackInitializePositionsWhenCallbac @Test public void testAssignedPartitionsAwaitingCallbackDoesNotAffectPreviouslyOwnedPartitions() { // First partition assigned and callback completes. - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); - state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + state.assignFromSubscribedAwaitingCallback(singleton(tp0), singleton(tp0)); assertAssignmentAppliedAwaitingCallback(tp0); - assertEquals(Set.of(tp0.topic()), state.subscription()); - state.enablePartitionsAwaitingCallback(Set.of(tp0)); + state.enablePartitionsAwaitingCallback(singleton(tp0)); state.seek(tp0, 100); assertTrue(state.isFetchable(tp0)); // New partition added to the assignment. Owned partitions should continue to be // fetchable, while the newly added should not be fetchable until callback completes. - state.assignFromSubscribedAwaitingCallback(Set.of(tp0, tp1), Set.of(tp1)); + state.assignFromSubscribedAwaitingCallback(Set.of(tp0, tp1), singleton(tp1)); assertTrue(state.isFetchable(tp0)); assertFalse(state.isFetchable(tp1)); assertEquals(1, state.initializingPartitions().size()); // Callback completed. Added partition be initializing positions and become fetchable when it gets one. - state.enablePartitionsAwaitingCallback(Set.of(tp1)); + state.enablePartitionsAwaitingCallback(singleton(tp1)); assertEquals(1, state.initializingPartitions().size()); assertEquals(tp1, state.initializingPartitions().iterator().next()); state.seek(tp1, 200); @@ -380,8 +329,9 @@ public void testAssignedPartitionsAwaitingCallbackDoesNotAffectPreviouslyOwnedPa } private void assertAssignmentAppliedAwaitingCallback(TopicPartition topicPartition) { - assertEquals(Set.of(topicPartition), state.assignedPartitions()); + assertEquals(singleton(topicPartition), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); + assertEquals(singleton(topicPartition.topic()), state.subscription()); assertFalse(state.isFetchable(topicPartition)); assertEquals(1, state.initializingPartitions().size()); @@ -390,9 +340,9 @@ private void assertAssignmentAppliedAwaitingCallback(TopicPartition topicPartiti @Test public void invalidPositionUpdate() { - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); - assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp0))); - state.assignFromSubscribed(Set.of(tp0)); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp0))); + state.assignFromSubscribed(singleton(tp0)); assertThrows(IllegalStateException.class, () -> state.position(tp0, new SubscriptionState.FetchPosition(0, Optional.empty(), leaderAndEpoch))); @@ -400,15 +350,15 @@ public void invalidPositionUpdate() { @Test public void cantAssignPartitionForUnsubscribedTopics() { - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); - assertFalse(state.checkAssignmentMatchedSubscription(List.of(t1p0))); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + assertFalse(state.checkAssignmentMatchedSubscription(Collections.singletonList(t1p0))); } @Test public void cantAssignPartitionForUnmatchedPattern() { state.subscribe(Pattern.compile(".*t"), Optional.of(rebalanceListener)); - state.subscribeFromPattern(Set.of(topic)); - assertFalse(state.checkAssignmentMatchedSubscription(List.of(t1p0))); + state.subscribeFromPattern(Collections.singleton(topic)); + assertFalse(state.checkAssignmentMatchedSubscription(Collections.singletonList(t1p0))); } @Test @@ -419,32 +369,32 @@ public void cantChangePositionForNonAssignedPartition() { @Test public void cantSubscribeTopicAndPattern() { - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); assertThrows(IllegalStateException.class, () -> state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener))); } @Test public void cantSubscribePartitionAndPattern() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(singleton(tp0)); assertThrows(IllegalStateException.class, () -> state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener))); } @Test public void cantSubscribePatternAndTopic() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - assertThrows(IllegalStateException.class, () -> state.subscribe(Set.of(topic), Optional.of(rebalanceListener))); + assertThrows(IllegalStateException.class, () -> state.subscribe(singleton(topic), Optional.of(rebalanceListener))); } @Test public void cantSubscribePatternAndPartition() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - assertThrows(IllegalStateException.class, () -> state.assignFromUser(Set.of(tp0))); + assertThrows(IllegalStateException.class, () -> state.assignFromUser(singleton(tp0))); } @Test public void patternSubscription() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - state.subscribeFromPattern(Set.of(topic, topic1)); + state.subscribeFromPattern(new HashSet<>(Arrays.asList(topic, topic1))); assertEquals(2, state.subscription().size(), "Expected subscribed topics count is incorrect"); } @@ -454,75 +404,6 @@ public void testSubscribeToRe2JPattern() { state.subscribe(new SubscriptionPattern(pattern), Optional.of(rebalanceListener)); assertTrue(state.toString().contains("type=AUTO_PATTERN_RE2J")); assertTrue(state.toString().contains("subscribedPattern=" + pattern)); - assertTrue(state.assignedTopicIds().isEmpty()); - } - - @Test - public void testIsAssignedFromRe2j() { - assertFalse(state.isAssignedFromRe2j(null)); - Uuid assignedUuid = Uuid.randomUuid(); - assertFalse(state.isAssignedFromRe2j(assignedUuid)); - - state.subscribe(new SubscriptionPattern("foo.*"), Optional.empty()); - assertTrue(state.hasRe2JPatternSubscription()); - assertFalse(state.isAssignedFromRe2j(assignedUuid)); - - state.setAssignedTopicIds(Set.of(assignedUuid)); - assertTrue(state.isAssignedFromRe2j(assignedUuid)); - - state.unsubscribe(); - assertFalse(state.isAssignedFromRe2j(assignedUuid)); - assertFalse(state.hasRe2JPatternSubscription()); - - } - - @Test - public void testAssignedPartitionsWithTopicIdsForRe2Pattern() { - state.subscribe(new SubscriptionPattern("t.*"), Optional.of(rebalanceListener)); - assertTrue(state.assignedTopicIds().isEmpty()); - - TopicIdPartitionSet reconciledAssignmentFromRegex = new TopicIdPartitionSet(); - reconciledAssignmentFromRegex.addAll(Uuid.randomUuid(), topic, Set.of(0)); - state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); - assertAssignmentAppliedAwaitingCallback(tp0); - - // Simulate callback setting position to start fetching from - state.seek(tp0, 100); - - // Callback completed. Partition should be fetchable, from the position previously defined - state.enablePartitionsAwaitingCallback(Set.of(tp0)); - assertEquals(0, state.initializingPartitions().size()); - assertTrue(state.isFetchable(tp0)); - assertTrue(state.hasAllFetchPositions()); - assertEquals(100L, state.position(tp0).offset); - } - - @Test - public void testAssignedTopicIdsPreservedWhenReconciliationCompletes() { - state.subscribe(new SubscriptionPattern("t.*"), Optional.of(rebalanceListener)); - assertTrue(state.assignedTopicIds().isEmpty()); - - // First assignment received from coordinator - Uuid firstAssignedUuid = Uuid.randomUuid(); - state.setAssignedTopicIds(Set.of(firstAssignedUuid)); - - // Second assignment received from coordinator (while the 1st still be reconciling) - Uuid secondAssignedUuid = Uuid.randomUuid(); - state.setAssignedTopicIds(Set.of(firstAssignedUuid, secondAssignedUuid)); - - // First reconciliation completes and updates the subscription state - state.assignFromSubscribedAwaitingCallback(Set.of(tp0), Set.of(tp0)); - - // First assignment should have been applied - assertAssignmentAppliedAwaitingCallback(tp0); - - // Assigned topic IDs should still have both topics (one reconciled, one not reconciled yet) - assertEquals( - Set.of(firstAssignedUuid, secondAssignedUuid), - state.assignedTopicIds(), - "Updating the subscription state when a reconciliation completes " + - "should not overwrite assigned topics that have not been reconciled yet" - ); } @Test @@ -553,29 +434,29 @@ public void testSubscriptionPattern() { @Test public void unsubscribeUserAssignment() { - state.assignFromUser(Set.of(tp0, tp1)); + state.assignFromUser(new HashSet<>(Arrays.asList(tp0, tp1))); state.unsubscribe(); - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); - assertEquals(Set.of(topic), state.subscription()); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); + assertEquals(singleton(topic), state.subscription()); } @Test public void unsubscribeUserSubscribe() { - state.subscribe(Set.of(topic), Optional.of(rebalanceListener)); + state.subscribe(singleton(topic), Optional.of(rebalanceListener)); state.unsubscribe(); - state.assignFromUser(Set.of(tp0)); - assertEquals(Set.of(tp0), state.assignedPartitions()); + state.assignFromUser(singleton(tp0)); + assertEquals(singleton(tp0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); } @Test public void unsubscription() { state.subscribe(Pattern.compile(".*"), Optional.of(rebalanceListener)); - state.subscribeFromPattern(Set.of(topic, topic1)); - assertTrue(state.checkAssignmentMatchedSubscription(Set.of(tp1))); - state.assignFromSubscribed(Set.of(tp1)); + state.subscribeFromPattern(new HashSet<>(Arrays.asList(topic, topic1))); + assertTrue(state.checkAssignmentMatchedSubscription(singleton(tp1))); + state.assignFromSubscribed(singleton(tp1)); - assertEquals(Set.of(tp1), state.assignedPartitions()); + assertEquals(singleton(tp1), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); state.unsubscribe(); @@ -583,8 +464,8 @@ public void unsubscription() { assertTrue(state.assignedPartitions().isEmpty()); assertEquals(0, state.numAssignedPartitions()); - state.assignFromUser(Set.of(tp0)); - assertEquals(Set.of(tp0), state.assignedPartitions()); + state.assignFromUser(singleton(tp0)); + assertEquals(singleton(tp0), state.assignedPartitions()); assertEquals(1, state.numAssignedPartitions()); state.unsubscribe(); @@ -595,15 +476,15 @@ public void unsubscription() { @Test public void testPreferredReadReplicaLease() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); // Default state assertFalse(state.preferredReadReplica(tp0, 0L).isPresent()); // Set the preferred replica with lease state.updatePreferredReadReplica(tp0, 42, () -> 10L); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 9L), value -> assertEquals(42, value.intValue())); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 10L), value -> assertEquals(42, value.intValue())); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 9L), value -> assertEquals(value.intValue(), 42)); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 10L), value -> assertEquals(value.intValue(), 42)); assertFalse(state.preferredReadReplica(tp0, 11L).isPresent()); // Unset the preferred replica @@ -613,20 +494,20 @@ public void testPreferredReadReplicaLease() { // Set to new preferred replica with lease state.updatePreferredReadReplica(tp0, 43, () -> 20L); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 11L), value -> assertEquals(43, value.intValue())); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 20L), value -> assertEquals(43, value.intValue())); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 11L), value -> assertEquals(value.intValue(), 43)); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 20L), value -> assertEquals(value.intValue(), 43)); assertFalse(state.preferredReadReplica(tp0, 21L).isPresent()); // Set to new preferred replica without clearing first state.updatePreferredReadReplica(tp0, 44, () -> 30L); - TestUtils.assertOptional(state.preferredReadReplica(tp0, 30L), value -> assertEquals(44, value.intValue())); + TestUtils.assertOptional(state.preferredReadReplica(tp0, 30L), value -> assertEquals(value.intValue(), 44)); assertFalse(state.preferredReadReplica(tp0, 31L).isPresent()); } @Test public void testSeekUnvalidatedWithNoOffsetEpoch() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); // Seek with no offset epoch requires no validation no matter what the current leader is state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0L, Optional.empty(), @@ -650,7 +531,7 @@ public void testSeekUnvalidatedWithNoOffsetEpoch() { @Test public void testSeekUnvalidatedWithNoEpochClearsAwaitingValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); // Seek with no offset epoch requires no validation no matter what the current leader is state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0L, Optional.of(2), @@ -670,7 +551,7 @@ public void testSeekUnvalidatedWithOffsetEpoch() { ApiVersions apiVersions = new ApiVersions(); apiVersions.update(broker1.idString(), NodeApiVersions.create()); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(0L, Optional.of(2), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(5)))); @@ -699,7 +580,7 @@ public void testSeekUnvalidatedWithOffsetEpoch() { @Test public void testSeekValidatedShouldClearAwaitingValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(5), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); @@ -717,7 +598,7 @@ public void testSeekValidatedShouldClearAwaitingValidation() { @Test public void testCompleteValidationShouldClearAwaitingValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(5), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); @@ -734,7 +615,7 @@ public void testCompleteValidationShouldClearAwaitingValidation() { @Test public void testOffsetResetWhileAwaitingValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(5), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); @@ -748,7 +629,7 @@ public void testOffsetResetWhileAwaitingValidation() { @Test public void testMaybeCompleteValidation() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -775,7 +656,7 @@ public void testMaybeValidatePositionForCurrentLeader() { apiVersions.update("1", oldApis); Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); state.seekUnvalidated(tp0, new SubscriptionState.FetchPosition(10L, Optional.of(5), new Metadata.LeaderAndEpoch(Optional.of(broker1), Optional.of(10)))); @@ -804,7 +685,7 @@ public void testMaybeValidatePositionForCurrentLeader() { @Test public void testMaybeCompleteValidationAfterPositionChange() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -833,7 +714,7 @@ public void testMaybeCompleteValidationAfterPositionChange() { @Test public void testMaybeCompleteValidationAfterOffsetReset() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -859,7 +740,7 @@ public void testMaybeCompleteValidationAfterOffsetReset() { @Test public void testTruncationDetectionWithResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -888,7 +769,7 @@ public void testTruncationDetectionWithResetPolicy() { public void testTruncationDetectionWithoutResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -918,7 +799,7 @@ public void testTruncationDetectionWithoutResetPolicy() { public void testTruncationDetectionUnknownDivergentOffsetWithResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.EARLIEST); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -943,7 +824,7 @@ public void testTruncationDetectionUnknownDivergentOffsetWithResetPolicy() { public void testTruncationDetectionUnknownDivergentOffsetWithoutResetPolicy() { Node broker1 = new Node(1, "localhost", 9092); state = new SubscriptionState(new LogContext(), AutoOffsetResetStrategy.NONE); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); int currentEpoch = 10; long initialOffset = 10L; @@ -991,7 +872,7 @@ public void resetOffsetNoValidation() { // Check that offset reset works when we can't validate offsets (older brokers) Node broker1 = new Node(1, "localhost", 9092); - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); // Reset offsets state.requestOffsetReset(tp0, AutoOffsetResetStrategy.EARLIEST); @@ -1037,7 +918,7 @@ public void resetOffsetNoValidation() { @Test public void nullPositionLagOnNoPosition() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); assertNull(state.partitionLag(tp0, IsolationLevel.READ_UNCOMMITTED)); assertNull(state.partitionLag(tp0, IsolationLevel.READ_COMMITTED)); @@ -1051,7 +932,7 @@ public void nullPositionLagOnNoPosition() { @Test public void testPositionOrNull() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); state.seek(tp0, 5); @@ -1061,7 +942,7 @@ public void testPositionOrNull() { @Test public void testTryUpdatingHighWatermark() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final long highWatermark = 10L; @@ -1072,7 +953,7 @@ public void testTryUpdatingHighWatermark() { @Test public void testTryUpdatingLogStartOffset() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final long position = 25; state.seek(tp0, position); @@ -1085,7 +966,7 @@ public void testTryUpdatingLogStartOffset() { @Test public void testTryUpdatingLastStableOffset() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final long lastStableOffset = 10L; @@ -1096,7 +977,7 @@ public void testTryUpdatingLastStableOffset() { @Test public void testTryUpdatingPreferredReadReplica() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); final int preferredReadReplicaId = 10; @@ -1109,7 +990,7 @@ public void testTryUpdatingPreferredReadReplica() { @Test public void testRequestOffsetResetIfPartitionAssigned() { - state.assignFromUser(Set.of(tp0)); + state.assignFromUser(Collections.singleton(tp0)); final TopicPartition unassignedPartition = new TopicPartition("unassigned", 0); state.requestOffsetResetIfPartitionAssigned(tp0); @@ -1120,33 +1001,4 @@ public void testRequestOffsetResetIfPartitionAssigned() { assertThrows(IllegalStateException.class, () -> state.isOffsetResetNeeded(unassignedPartition)); } - - // This test ensures the "fetchablePartitions" does not run the custom predicate if the partition is not fetchable - // This func is used in the hot path for fetching, to find fetchable partitions that are not in the buffer, - // so it should avoid evaluating the predicate if not needed. - @Test - public void testFetchablePartitionsPerformsCheapChecksFirst() { - // Setup fetchable partition and pause it - state.assignFromUser(Set.of(tp0)); - state.seek(tp0, 100); - assertTrue(state.isFetchable(tp0)); - state.pause(tp0); - - // Retrieve fetchable partitions with custom predicate. - AtomicBoolean predicateEvaluated = new AtomicBoolean(false); - Predicate isBuffered = tp -> { - predicateEvaluated.set(true); - return true; - }; - List fetchablePartitions = state.fetchablePartitions(isBuffered); - assertTrue(fetchablePartitions.isEmpty()); - assertFalse(predicateEvaluated.get(), "Custom predicate should not be evaluated when partitions are not fetchable"); - - // Resume partition and retrieve fetchable again - state.resume(tp0); - predicateEvaluated.set(false); - fetchablePartitions = state.fetchablePartitions(isBuffered); - assertTrue(predicateEvaluated.get()); - assertEquals(tp0, fetchablePartitions.get(0)); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java index f8cc3ee0cccde..c977c3ebf3da8 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/TopicMetadataRequestManagerTest.java @@ -73,7 +73,6 @@ public void setup() { props.put(ALLOW_AUTO_CREATE_TOPICS_CONFIG, false); props.put(KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); - props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); this.topicMetadataRequestManager = spy(new TopicMetadataRequestManager( new LogContext(), time, diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java index dde3f567132fc..3d55b30052bbe 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/events/ApplicationEventProcessorTest.java @@ -31,19 +31,15 @@ import org.apache.kafka.clients.consumer.internals.NetworkClientDelegate; import org.apache.kafka.clients.consumer.internals.OffsetsRequestManager; import org.apache.kafka.clients.consumer.internals.RequestManagers; -import org.apache.kafka.clients.consumer.internals.StreamsGroupHeartbeatRequestManager; -import org.apache.kafka.clients.consumer.internals.StreamsMembershipManager; import org.apache.kafka.clients.consumer.internals.SubscriptionState; import org.apache.kafka.clients.consumer.internals.TopicMetadataRequestManager; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; -import org.apache.logging.log4j.Level; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.Arguments; @@ -90,8 +86,6 @@ public class ApplicationEventProcessorTest { private final OffsetsRequestManager offsetsRequestManager = mock(OffsetsRequestManager.class); private SubscriptionState subscriptionState = mock(SubscriptionState.class); private final ConsumerMetadata metadata = mock(ConsumerMetadata.class); - private final StreamsGroupHeartbeatRequestManager streamsGroupHeartbeatRequestManager = mock(StreamsGroupHeartbeatRequestManager.class); - private final StreamsMembershipManager streamsMembershipManager = mock(StreamsMembershipManager.class); private ApplicationEventProcessor processor; private void setupProcessor(boolean withGroupId) { @@ -103,10 +97,7 @@ private void setupProcessor(boolean withGroupId) { withGroupId ? Optional.of(mock(CoordinatorRequestManager.class)) : Optional.empty(), withGroupId ? Optional.of(commitRequestManager) : Optional.empty(), withGroupId ? Optional.of(heartbeatRequestManager) : Optional.empty(), - withGroupId ? Optional.of(membershipManager) : Optional.empty(), - Optional.empty(), - Optional.empty() - ); + withGroupId ? Optional.of(membershipManager) : Optional.empty()); processor = new ApplicationEventProcessor( new LogContext(), requestManagers, @@ -115,27 +106,6 @@ private void setupProcessor(boolean withGroupId) { ); } - private void setupStreamProcessor(boolean withGroupId) { - RequestManagers requestManagers = new RequestManagers( - new LogContext(), - offsetsRequestManager, - mock(TopicMetadataRequestManager.class), - mock(FetchRequestManager.class), - withGroupId ? Optional.of(mock(CoordinatorRequestManager.class)) : Optional.empty(), - withGroupId ? Optional.of(commitRequestManager) : Optional.empty(), - withGroupId ? Optional.of(heartbeatRequestManager) : Optional.empty(), - Optional.empty(), - withGroupId ? Optional.of(streamsGroupHeartbeatRequestManager) : Optional.empty(), - withGroupId ? Optional.of(streamsMembershipManager) : Optional.empty() - ); - processor = new ApplicationEventProcessor( - new LogContext(), - requestManagers, - metadata, - subscriptionState - ); - } - @Test public void testPrepClosingCommitEvents() { setupProcessor(true); @@ -468,7 +438,7 @@ public void testR2JPatternSubscriptionEventFailureWithMixedSubscriptionType() { processor.process(event); verify(subscriptionState).subscribe(pattern, listener); - Exception thrown = assertFutureThrows(IllegalStateException.class, event.future()); + Exception thrown = assertFutureThrows(event.future(), mixedSubscriptionError.getClass()); assertEquals(mixedSubscriptionError, thrown); } @@ -509,7 +479,7 @@ public void testSyncCommitEventWithoutCommitRequestManager() { setupProcessor(false); processor.process(event); - assertFutureThrows(KafkaException.class, event.future()); + assertFutureThrows(event.future(), KafkaException.class); } @Test @@ -524,7 +494,7 @@ public void testSyncCommitEventWithException() { verify(commitRequestManager).commitSync(Collections.emptyMap(), 12345); assertTrue(event.offsetsReady.isDone()); - assertFutureThrows(IllegalStateException.class, event.future()); + assertFutureThrows(event.future(), IllegalStateException.class); } @Test @@ -564,7 +534,7 @@ public void testAsyncCommitEventWithoutCommitRequestManager() { setupProcessor(false); processor.process(event); - assertFutureThrows(KafkaException.class, event.future()); + assertFutureThrows(event.future(), KafkaException.class); } @Test @@ -580,79 +550,7 @@ public void testAsyncCommitEventWithException() { verify(commitRequestManager).commitAsync(Collections.emptyMap()); assertTrue(event.offsetsReady.isDone()); - assertFutureThrows(IllegalStateException.class, event.future()); - } - - @Test - public void testStreamsOnTasksRevokedCallbackCompletedEvent() { - setupStreamProcessor(true); - StreamsOnTasksRevokedCallbackCompletedEvent event = - new StreamsOnTasksRevokedCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); - processor.process(event); - verify(streamsMembershipManager).onTasksRevokedCallbackCompleted(event); - } - - @Test - public void testStreamsOnTasksRevokedCallbackCompletedEventWithoutStreamsMembershipManager() { - setupStreamProcessor(false); - StreamsOnTasksRevokedCallbackCompletedEvent event = - new StreamsOnTasksRevokedCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); - try (final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister()) { - logAppender.setClassLogger(ApplicationEventProcessor.class, Level.WARN); - processor.process(event); - assertTrue(logAppender.getMessages().stream().anyMatch(e -> - e.contains("An internal error occurred; the Streams membership manager was not present, so the notification " + - "of the onTasksRevoked callback execution could not be sent"))); - verify(streamsMembershipManager, never()).onTasksRevokedCallbackCompleted(event); - } - } - - @Test - public void testStreamsOnTasksAssignedCallbackCompletedEvent() { - setupStreamProcessor(true); - StreamsOnTasksAssignedCallbackCompletedEvent event = - new StreamsOnTasksAssignedCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); - processor.process(event); - verify(streamsMembershipManager).onTasksAssignedCallbackCompleted(event); - } - - @Test - public void testStreamsOnTasksAssignedCallbackCompletedEventWithoutStreamsMembershipManager() { - setupStreamProcessor(false); - StreamsOnTasksAssignedCallbackCompletedEvent event = - new StreamsOnTasksAssignedCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); - try (final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister()) { - logAppender.setClassLogger(ApplicationEventProcessor.class, Level.WARN); - processor.process(event); - assertTrue(logAppender.getMessages().stream().anyMatch(e -> - e.contains("An internal error occurred; the Streams membership manager was not present, so the notification " + - "of the onTasksAssigned callback execution could not be sent"))); - verify(streamsMembershipManager, never()).onTasksAssignedCallbackCompleted(event); - } - } - - @Test - public void testStreamsOnAllTasksLostCallbackCompletedEvent() { - setupStreamProcessor(true); - StreamsOnAllTasksLostCallbackCompletedEvent event = - new StreamsOnAllTasksLostCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); - processor.process(event); - verify(streamsMembershipManager).onAllTasksLostCallbackCompleted(event); - } - - @Test - public void testStreamsOnAllTasksLostCallbackCompletedEventWithoutStreamsMembershipManager() { - setupStreamProcessor(false); - StreamsOnAllTasksLostCallbackCompletedEvent event = - new StreamsOnAllTasksLostCallbackCompletedEvent(new CompletableFuture<>(), Optional.empty()); - try (final LogCaptureAppender logAppender = LogCaptureAppender.createAndRegister()) { - logAppender.setClassLogger(ApplicationEventProcessor.class, Level.WARN); - processor.process(event); - assertTrue(logAppender.getMessages().stream().anyMatch(e -> - e.contains("An internal error occurred; the Streams membership manager was not present, so the notification " + - "of the onAllTasksLost callback execution could not be sent"))); - verify(streamsMembershipManager, never()).onAllTasksLostCallbackCompleted(event); - } + assertFutureThrows(event.future(), IllegalStateException.class); } private List mockCommitResults() { diff --git a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java index 876bc3ffa12da..2913bcfad70f1 100644 --- a/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/consumer/internals/metrics/AsyncConsumerMetricsTest.java @@ -20,14 +20,12 @@ import org.apache.kafka.common.metrics.Metrics; import org.junit.jupiter.api.AfterEach; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.MethodSource; +import org.junit.jupiter.api.Test; -import java.util.Set; -import java.util.stream.Stream; +import java.util.Arrays; +import java.util.HashSet; import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_METRIC_GROUP; -import static org.apache.kafka.clients.consumer.internals.ConsumerUtils.CONSUMER_SHARE_METRIC_GROUP; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -38,13 +36,6 @@ public class AsyncConsumerMetricsTest { private final Metrics metrics = new Metrics(); private AsyncConsumerMetrics consumerMetrics; - public static Stream groupNameProvider() { - return Stream.of( - CONSUMER_METRIC_GROUP, - CONSUMER_SHARE_METRIC_GROUP - ); - } - @AfterEach public void tearDown() { if (consumerMetrics != null) { @@ -53,28 +44,18 @@ public void tearDown() { metrics.close(); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldMetricNames(String groupName) { + @Test + public void shouldMetricNames() { // create - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); - Set expectedMetrics = Set.of( - metrics.metricName("time-between-network-thread-poll-avg", groupName), - metrics.metricName("time-between-network-thread-poll-max", groupName), - metrics.metricName("application-event-queue-size", groupName), - metrics.metricName("application-event-queue-time-avg", groupName), - metrics.metricName("application-event-queue-time-max", groupName), - metrics.metricName("application-event-queue-processing-time-avg", groupName), - metrics.metricName("application-event-queue-processing-time-max", groupName), - metrics.metricName("unsent-requests-queue-size", groupName), - metrics.metricName("unsent-requests-queue-time-avg", groupName), - metrics.metricName("unsent-requests-queue-time-max", groupName), - metrics.metricName("background-event-queue-size", groupName), - metrics.metricName("background-event-queue-time-avg", groupName), - metrics.metricName("background-event-queue-time-max", groupName), - metrics.metricName("background-event-queue-processing-time-avg", groupName), - metrics.metricName("background-event-queue-processing-time-max", groupName) - ); + consumerMetrics = new AsyncConsumerMetrics(metrics); + HashSet expectedMetrics = new HashSet<>(Arrays.asList( + metrics.metricName("last-poll-seconds-ago", CONSUMER_METRIC_GROUP), + metrics.metricName("time-between-poll-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("time-between-poll-max", CONSUMER_METRIC_GROUP), + metrics.metricName("poll-idle-ratio-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("commit-sync-time-ns-total", CONSUMER_METRIC_GROUP), + metrics.metricName("committed-time-ns-total", CONSUMER_METRIC_GROUP) + )); expectedMetrics.forEach( metricName -> assertTrue( metrics.metrics().containsKey(metricName), @@ -82,6 +63,30 @@ public void shouldMetricNames(String groupName) { ) ); + HashSet expectedConsumerMetrics = new HashSet<>(Arrays.asList( + metrics.metricName("time-between-network-thread-poll-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("time-between-network-thread-poll-max", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-size", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-time-max", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-processing-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("application-event-queue-processing-time-max", CONSUMER_METRIC_GROUP), + metrics.metricName("unsent-requests-queue-size", CONSUMER_METRIC_GROUP), + metrics.metricName("unsent-requests-queue-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("unsent-requests-queue-time-max", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-size", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-time-max", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-processing-time-avg", CONSUMER_METRIC_GROUP), + metrics.metricName("background-event-queue-processing-time-max", CONSUMER_METRIC_GROUP) + )); + expectedConsumerMetrics.forEach( + metricName -> assertTrue( + metrics.metrics().containsKey(metricName), + "Missing metric: " + metricName + ) + ); + // close consumerMetrics.close(); expectedMetrics.forEach( @@ -90,146 +95,143 @@ public void shouldMetricNames(String groupName) { "Metric present after close: " + metricName ) ); + expectedConsumerMetrics.forEach( + metricName -> assertFalse( + metrics.metrics().containsKey(metricName), + "Metric present after close: " + metricName + ) + ); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordTimeBetweenNetworkThreadPoll(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordTimeBetweenNetworkThreadPoll() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordTimeBetweenNetworkThreadPoll(METRIC_VALUE); // Then: - assertMetricValue("time-between-network-thread-poll-avg", groupName); - assertMetricValue("time-between-network-thread-poll-max", groupName); + assertMetricValue("time-between-network-thread-poll-avg"); + assertMetricValue("time-between-network-thread-poll-max"); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordApplicationEventQueueSize(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordApplicationEventQueueSize() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordApplicationEventQueueSize(10); // Then: assertEquals( - (double) 10, metrics.metric( metrics.metricName( "application-event-queue-size", - groupName + CONSUMER_METRIC_GROUP ) - ).metricValue() + ).metricValue(), + (double) 10 ); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordApplicationEventQueueTime(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordApplicationEventQueueTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordApplicationEventQueueTime(METRIC_VALUE); // Then: - assertMetricValue("application-event-queue-time-avg", groupName); - assertMetricValue("application-event-queue-time-max", groupName); + assertMetricValue("application-event-queue-time-avg"); + assertMetricValue("application-event-queue-time-max"); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordApplicationEventQueueProcessingTime(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordApplicationEventQueueProcessingTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordApplicationEventQueueProcessingTime(METRIC_VALUE); // Then: - assertMetricValue("application-event-queue-processing-time-avg", groupName); - assertMetricValue("application-event-queue-processing-time-max", groupName); + assertMetricValue("application-event-queue-processing-time-avg"); + assertMetricValue("application-event-queue-processing-time-max"); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordUnsentRequestsQueueSize(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordUnsentRequestsQueueSize() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordUnsentRequestsQueueSize(10, 100); // Then: assertEquals( - (double) 10, metrics.metric( metrics.metricName( "unsent-requests-queue-size", - groupName + CONSUMER_METRIC_GROUP ) - ).metricValue() + ).metricValue(), + (double) 10 ); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordUnsentRequestsQueueTime(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordUnsentRequestsQueueTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordUnsentRequestsQueueTime(METRIC_VALUE); // Then: - assertMetricValue("unsent-requests-queue-time-avg", groupName); - assertMetricValue("unsent-requests-queue-time-max", groupName); + assertMetricValue("unsent-requests-queue-time-avg"); + assertMetricValue("unsent-requests-queue-time-max"); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordBackgroundEventQueueSize(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordBackgroundEventQueueSize() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordBackgroundEventQueueSize(10); // Then: assertEquals( - (double) 10, metrics.metric( metrics.metricName( "background-event-queue-size", - groupName + CONSUMER_METRIC_GROUP ) - ).metricValue() + ).metricValue(), + (double) 10 ); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordBackgroundEventQueueTime(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordBackgroundEventQueueTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordBackgroundEventQueueTime(METRIC_VALUE); // Then: - assertMetricValue("background-event-queue-time-avg", groupName); - assertMetricValue("background-event-queue-time-max", groupName); + assertMetricValue("background-event-queue-time-avg"); + assertMetricValue("background-event-queue-time-max"); } - @ParameterizedTest - @MethodSource("groupNameProvider") - public void shouldRecordBackgroundEventQueueProcessingTime(String groupName) { - consumerMetrics = new AsyncConsumerMetrics(metrics, groupName); + @Test + public void shouldRecordBackgroundEventQueueProcessingTime() { + consumerMetrics = new AsyncConsumerMetrics(metrics); // When: consumerMetrics.recordBackgroundEventQueueProcessingTime(METRIC_VALUE); // Then: - assertMetricValue("background-event-queue-processing-time-avg", groupName); - assertMetricValue("background-event-queue-processing-time-max", groupName); + assertMetricValue("background-event-queue-processing-time-avg"); + assertMetricValue("background-event-queue-processing-time-avg"); } - private void assertMetricValue(final String name, final String groupName) { + private void assertMetricValue(final String name) { assertEquals( - (double) METRIC_VALUE, metrics.metric( metrics.metricName( name, - groupName + CONSUMER_METRIC_GROUP ) - ).metricValue() + ).metricValue(), + (double) METRIC_VALUE ); } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java index 9d8aa35f8fe90..9d40775779f9d 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/KafkaProducerTest.java @@ -32,14 +32,12 @@ import org.apache.kafka.clients.producer.internals.RecordAccumulator; import org.apache.kafka.clients.producer.internals.Sender; import org.apache.kafka.clients.producer.internals.TransactionManager; -import org.apache.kafka.clients.producer.internals.TransactionalRequestResult; import org.apache.kafka.common.Cluster; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Metric; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.Node; import org.apache.kafka.common.PartitionInfo; -import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.config.ConfigException; @@ -47,13 +45,10 @@ import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.InterruptException; import org.apache.kafka.common.errors.InvalidTopicException; -import org.apache.kafka.common.errors.InvalidTxnStateException; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; -import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeader; -import org.apache.kafka.common.header.internals.RecordHeaders; import org.apache.kafka.common.internals.ClusterResourceListeners; import org.apache.kafka.common.message.AddOffsetsToTxnResponseData; import org.apache.kafka.common.message.ApiVersionsResponseData; @@ -65,8 +60,6 @@ import org.apache.kafka.common.metrics.Measurable; import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; -import org.apache.kafka.common.metrics.Monitorable; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.network.Selectable; @@ -78,7 +71,6 @@ import org.apache.kafka.common.requests.EndTxnResponse; import org.apache.kafka.common.requests.FindCoordinatorRequest; import org.apache.kafka.common.requests.FindCoordinatorResponse; -import org.apache.kafka.common.requests.InitProducerIdRequest; import org.apache.kafka.common.requests.InitProducerIdResponse; import org.apache.kafka.common.requests.JoinGroupRequest; import org.apache.kafka.common.requests.MetadataResponse; @@ -91,10 +83,10 @@ import org.apache.kafka.common.serialization.StringSerializer; import org.apache.kafka.common.telemetry.internals.ClientTelemetryReporter; import org.apache.kafka.common.telemetry.internals.ClientTelemetrySender; +import org.apache.kafka.common.utils.KafkaThread; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; -import org.apache.kafka.common.utils.ProducerIdAndEpoch; import org.apache.kafka.common.utils.Time; import org.apache.kafka.test.MockMetricsReporter; import org.apache.kafka.test.MockPartitioner; @@ -103,12 +95,10 @@ import org.apache.kafka.test.TestUtils; import org.apache.logging.log4j.Level; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.TestInfo; import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.CsvSource; import org.junit.jupiter.params.provider.ValueSource; import org.mockito.MockedStatic; import org.mockito.Mockito; @@ -122,7 +112,6 @@ import java.util.Collection; import java.util.Collections; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -146,7 +135,6 @@ import static java.util.Collections.emptyMap; import static java.util.Collections.singletonMap; -import static org.apache.kafka.clients.producer.KafkaProducer.NETWORK_THREAD_PREFIX; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -165,7 +153,6 @@ import static org.mockito.ArgumentMatchers.eq; import static org.mockito.ArgumentMatchers.notNull; import static org.mockito.Mockito.atMostOnce; -import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.mockStatic; import static org.mockito.Mockito.never; @@ -218,12 +205,6 @@ public void setup(TestInfo testInfo) { this.testInfo = testInfo; } - @AfterEach - public void detectLeaks() throws InterruptedException { - // Assert no thread leakage of Kafka producer. - TestUtils.assertNoLeakedThreadsWithNameAndDaemonStatus(NETWORK_THREAD_PREFIX, Boolean.TRUE); - } - @Test public void testOverwriteAcksAndRetriesForIdempotentProducers() { Properties props = new Properties(); @@ -235,14 +216,21 @@ public void testOverwriteAcksAndRetriesForIdempotentProducers() { ProducerConfig config = new ProducerConfig(props); assertTrue(config.getBoolean(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG)); assertTrue(Stream.of("-1", "all").anyMatch(each -> each.equalsIgnoreCase(config.getString(ProducerConfig.ACKS_CONFIG)))); - assertEquals(Integer.MAX_VALUE, (int) config.getInt(ProducerConfig.RETRIES_CONFIG)); + assertEquals((int) config.getInt(ProducerConfig.RETRIES_CONFIG), Integer.MAX_VALUE); assertTrue(config.getString(ProducerConfig.CLIENT_ID_CONFIG).equalsIgnoreCase("producer-" + config.getString(ProducerConfig.TRANSACTIONAL_ID_CONFIG))); } @Test public void testAcksAndIdempotenceForIdempotentProducers() { - Properties baseProps = baseProperties(); + Properties baseProps = new Properties() {{ + setProperty( + ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + setProperty( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + setProperty( + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + }}; Properties validProps = new Properties() {{ putAll(baseProps); @@ -345,7 +333,11 @@ public void testAcksAndIdempotenceForIdempotentProducers() { @Test public void testRetriesAndIdempotenceForIdempotentProducers() { - Properties baseProps = baseProperties(); + Properties baseProps = new Properties() {{ + setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + }}; Properties validProps = new Properties() {{ putAll(baseProps); @@ -407,17 +399,13 @@ public void testRetriesAndIdempotenceForIdempotentProducers() { "Must set retries to non-zero when using the transactional producer."); } - private Properties baseProperties() { - Properties baseProps = new Properties(); - baseProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - baseProps.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - baseProps.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); - return baseProps; - } - @Test public void testInflightRequestsAndIdempotenceForIdempotentProducers() { - Properties baseProps = baseProperties(); + Properties baseProps = new Properties() {{ + setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + }}; Properties validProps = new Properties() {{ putAll(baseProps); @@ -433,16 +421,16 @@ public void testInflightRequestsAndIdempotenceForIdempotentProducers() { config.getInt(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION), "max.in.flight.requests.per.connection should be overwritten"); - Properties invalidProps1 = new Properties() {{ + Properties validProps2 = new Properties() {{ putAll(baseProps); setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); }}; - ConfigException configException = assertThrows(ConfigException.class, () -> new ProducerConfig(invalidProps1)); + ConfigException configException = assertThrows(ConfigException.class, () -> new ProducerConfig(validProps2)); assertEquals("To use the idempotent producer, " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + " must be set to at most 5. Current value is 6.", configException.getMessage()); - Properties invalidProps2 = new Properties() {{ + Properties invalidProps = new Properties() {{ putAll(baseProps); setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false"); @@ -450,10 +438,10 @@ public void testInflightRequestsAndIdempotenceForIdempotentProducers() { }}; assertThrows( ConfigException.class, - () -> new ProducerConfig(invalidProps2), + () -> new ProducerConfig(invalidProps), "Cannot set a transactional.id without also enabling idempotence"); - Properties invalidProps3 = new Properties() {{ + Properties invalidProps2 = new Properties() {{ putAll(baseProps); setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); // explicitly enabling idempotence should still throw exception @@ -461,17 +449,17 @@ public void testInflightRequestsAndIdempotenceForIdempotentProducers() { }}; assertThrows( ConfigException.class, - () -> new ProducerConfig(invalidProps3), + () -> new ProducerConfig(invalidProps2), "Must set max.in.flight.requests.per.connection to at most 5 when using the idempotent producer."); - Properties invalidProps4 = new Properties() {{ + Properties invalidProps3 = new Properties() {{ putAll(baseProps); setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "6"); setProperty(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transactionalId"); }}; assertThrows( ConfigException.class, - () -> new ProducerConfig(invalidProps4), + () -> new ProducerConfig(invalidProps3), "Must set retries to non-zero when using the idempotent producer."); } @@ -480,14 +468,16 @@ public void testMetricsReporterAutoGeneratedClientId() { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - assertEquals(2, producer.metrics.reporters().size()); + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); - MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) producer.metrics.reporters().stream() - .filter(reporter -> reporter instanceof MockMetricsReporter).findFirst().get(); - assertEquals(producer.getClientId(), mockMetricsReporter.clientId); - } + assertEquals(2, producer.metrics.reporters().size()); + + MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) producer.metrics.reporters().stream() + .filter(reporter -> reporter instanceof MockMetricsReporter).findFirst().get(); + assertEquals(producer.getClientId(), mockMetricsReporter.clientId); + + producer.close(); } @Test @@ -496,9 +486,9 @@ public void testDisableJmxAndClientTelemetryReporter() { props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, ""); props.setProperty(ProducerConfig.ENABLE_METRICS_PUSH_CONFIG, "false"); - try (KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) { - assertTrue(producer.metrics.reporters().isEmpty()); - } + KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); + assertTrue(producer.metrics.reporters().isEmpty()); + producer.close(); } @Test @@ -507,10 +497,10 @@ public void testExplicitlyOnlyEnableJmxReporter() { props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, "org.apache.kafka.common.metrics.JmxReporter"); props.setProperty(ProducerConfig.ENABLE_METRICS_PUSH_CONFIG, "false"); - try (KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) { - assertEquals(1, producer.metrics.reporters().size()); - assertInstanceOf(JmxReporter.class, producer.metrics.reporters().get(0)); - } + KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); + assertEquals(1, producer.metrics.reporters().size()); + assertInstanceOf(JmxReporter.class, producer.metrics.reporters().get(0)); + producer.close(); } @Test @@ -518,10 +508,10 @@ public void testExplicitlyOnlyEnableClientTelemetryReporter() { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, ""); - try (KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) { - assertEquals(1, producer.metrics.reporters().size()); - assertInstanceOf(ClientTelemetryReporter.class, producer.metrics.reporters().get(0)); - } + KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); + assertEquals(1, producer.metrics.reporters().size()); + assertInstanceOf(ClientTelemetryReporter.class, producer.metrics.reporters().get(0)); + producer.close(); } @Test @@ -533,24 +523,15 @@ public void testConstructorWithSerializers() { @Test public void testNoSerializerProvided() { - Properties producerProps = new Properties(); producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); - - assertThrows(ConfigException.class, () -> { - try (KafkaProducer producer = new KafkaProducer<>(producerProps)) { - // KafkaProducer will be closed automatically after the block - } - }); + assertThrows(ConfigException.class, () -> new KafkaProducer(producerProps)); final Map configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); + // Invalid value null for configuration key.serializer: must be non-null. - assertThrows(ConfigException.class, () -> { - try (KafkaProducer producer = new KafkaProducer<>(configs)) { - // KafkaProducer will be closed automatically after the block - } - }); + assertThrows(ConfigException.class, () -> new KafkaProducer(configs)); } @Test @@ -579,7 +560,7 @@ public void testConstructorWithNotStringKey() { ConfigException ce = assertThrows( ConfigException.class, () -> new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())); - assertTrue(ce.getMessage().contains("One or more keys is not a string."), "Unexpected exception message: " + ce.getMessage()); + assertTrue(ce.getMessage().contains("not string key"), "Unexpected exception message: " + ce.getMessage()); } @Test @@ -604,11 +585,12 @@ public void testSerializerClose() { final int oldInitCount = MockSerializer.INIT_COUNT.get(); final int oldCloseCount = MockSerializer.CLOSE_COUNT.get(); - try (var ignored = new KafkaProducer<>(configs, new MockSerializer(), new MockSerializer())) { - assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); - assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get()); - } + KafkaProducer producer = new KafkaProducer<>( + configs, new MockSerializer(), new MockSerializer()); + assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); + assertEquals(oldCloseCount, MockSerializer.CLOSE_COUNT.get()); + producer.close(); assertEquals(oldInitCount + 2, MockSerializer.INIT_COUNT.get()); assertEquals(oldCloseCount + 2, MockSerializer.CLOSE_COUNT.get()); } @@ -622,14 +604,15 @@ public void testInterceptorConstructClose() { props.setProperty(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MockProducerInterceptor.class.getName()); props.setProperty(MockProducerInterceptor.APPEND_STRING_PROP, "something"); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); - assertEquals(0, MockProducerInterceptor.CLOSE_COUNT.get()); + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); + assertEquals(0, MockProducerInterceptor.CLOSE_COUNT.get()); - // Cluster metadata will only be updated on calling onSend. - assertNull(MockProducerInterceptor.CLUSTER_META.get()); - } + // Cluster metadata will only be updated on calling onSend. + assertNull(MockProducerInterceptor.CLUSTER_META.get()); + + producer.close(); assertEquals(1, MockProducerInterceptor.INIT_COUNT.get()); assertEquals(1, MockProducerInterceptor.CLOSE_COUNT.get()); } finally { @@ -637,15 +620,15 @@ props, new StringSerializer(), new StringSerializer())) { MockProducerInterceptor.resetCounters(); } } - @Test public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemainingInstances() { - final int targetInterceptor = 1; + final int targetInterceptor = 3; try { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.setProperty(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, - CloseInterceptor.class.getName() + "," + MockProducerInterceptor.class.getName()); + props.setProperty(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, org.apache.kafka.test.MockProducerInterceptor.class.getName() + ", " + + org.apache.kafka.test.MockProducerInterceptor.class.getName() + ", " + + org.apache.kafka.test.MockProducerInterceptor.class.getName()); props.setProperty(MockProducerInterceptor.APPEND_STRING_PROP, "something"); MockProducerInterceptor.setThrowOnConfigExceptionThreshold(targetInterceptor); @@ -654,16 +637,13 @@ public void testInterceptorConstructorConfigurationWithExceptionShouldCloseRemai new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()) ); - assertEquals(1, MockProducerInterceptor.CONFIG_COUNT.get()); - assertEquals(1, MockProducerInterceptor.CLOSE_COUNT.get()); - assertEquals(1, CloseInterceptor.CLOSE_COUNT.get()); + assertEquals(3, MockProducerInterceptor.CONFIG_COUNT.get()); + assertEquals(3, MockProducerInterceptor.CLOSE_COUNT.get()); } finally { MockProducerInterceptor.resetCounters(); - CloseInterceptor.resetCounters(); } } - @Test public void testPartitionerClose() { try { @@ -672,12 +652,12 @@ public void testPartitionerClose() { MockPartitioner.resetCounters(); props.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, MockPartitioner.class.getName()); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - assertEquals(1, MockPartitioner.INIT_COUNT.get()); - assertEquals(0, MockPartitioner.CLOSE_COUNT.get()); - } + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + assertEquals(1, MockPartitioner.INIT_COUNT.get()); + assertEquals(0, MockPartitioner.CLOSE_COUNT.get()); + producer.close(); assertEquals(1, MockPartitioner.INIT_COUNT.get()); assertEquals(1, MockPartitioner.CLOSE_COUNT.get()); } finally { @@ -1087,14 +1067,13 @@ public void testTopicExpiryInMetadata() throws InterruptedException { @SuppressWarnings("unchecked") @Test - public void testHeadersSuccess() { + public void testHeaders() { doTestHeaders(Serializer.class); } private > void doTestHeaders(Class serializerClassToMock) { Map configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - configs.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptorForHeaders.class.getName()); Serializer keySerializer = mock(serializerClassToMock); Serializer valueSerializer = mock(serializerClassToMock); @@ -1123,9 +1102,7 @@ private > void doTestHeaders(Class serializerCla producer.send(record, null); //ensure headers are closed and cannot be mutated post send - RecordHeaders recordHeaders = (RecordHeaders) record.headers(); - assertTrue(recordHeaders.isReadOnly()); - assertThrows(IllegalStateException.class, () -> recordHeaders.add(new RecordHeader("test", "test".getBytes()))); + assertThrows(IllegalStateException.class, () -> record.headers().add(new RecordHeader("test", "test".getBytes()))); //ensure existing headers are not changed, and last header for key is still original value assertArrayEquals(record.headers().lastHeader("test").value(), "header2".getBytes()); @@ -1136,28 +1113,6 @@ private > void doTestHeaders(Class serializerCla producer.close(Duration.ofMillis(0)); } - @Test - public void testHeadersFailure() { - Properties props = new Properties(); - props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - props.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 5); - props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, ProducerInterceptorForHeaders.class.getName()); - Serializer keySerializer = mock(StringSerializer.class); - Serializer valueSerializer = mock(StringSerializer.class); - - KafkaProducer producer = new KafkaProducer<>(props, keySerializer, valueSerializer); - ProducerRecord record = new ProducerRecord<>("topic", "key", "value"); - Future future = producer.send(record, (recordMetadata, exception) -> { }); - try { - TestUtils.assertFutureThrows(TimeoutException.class, future); - //ensure headers are writable if send failure - RecordHeaders recordHeaders = (RecordHeaders) record.headers(); - assertFalse(recordHeaders.isReadOnly()); - } finally { - producer.close(Duration.ofMillis(0)); - } - } - @Test public void closeShouldBeIdempotent() { Properties producerProps = new Properties(); @@ -1317,12 +1272,12 @@ public void testInitTransactionsResponseAfterTimeout() throws Exception { ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), FindCoordinatorResponse.prepareResponse(Errors.NONE, "bad-transaction", NODE)); - Future future = executor.submit(() -> producer.initTransactions()); + Future future = executor.submit(producer::initTransactions); TestUtils.waitForCondition(client::hasInFlightRequests, "Timed out while waiting for expected `InitProducerId` request to be sent"); time.sleep(maxBlockMs); - TestUtils.assertFutureThrows(TimeoutException.class, future); + TestUtils.assertFutureThrows(future, TimeoutException.class); client.respond(initProducerIdResponse(1L, (short) 5, Errors.NONE)); @@ -1392,297 +1347,6 @@ public void testInitTransactionWhileThrottled() { } } - @ParameterizedTest - @CsvSource({ - "true, false", - "true, true", - "false, true" - }) - public void testInitTransactionsWithKeepPreparedTxnAndTwoPhaseCommit(boolean keepPreparedTxn, boolean enable2PC) { - Map configs = new HashMap<>(); - configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "test-txn-id"); - configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 10000); - configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); - if (enable2PC) { - configs.put(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG, true); - } - - Time time = new MockTime(1); - MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1)); - ProducerMetadata metadata = newMetadata(0, 0, Long.MAX_VALUE); - MockClient client = new MockClient(time, metadata); - client.updateMetadata(initialUpdateResponse); - - // Capture flags from the InitProducerIdRequest - boolean[] requestFlags = new boolean[2]; // [keepPreparedTxn, enable2Pc] - - client.prepareResponse( - request -> request instanceof FindCoordinatorRequest && - ((FindCoordinatorRequest) request).data().keyType() == FindCoordinatorRequest.CoordinatorType.TRANSACTION.id(), - FindCoordinatorResponse.prepareResponse(Errors.NONE, "test-txn-id", NODE)); - - client.prepareResponse( - request -> { - if (request instanceof InitProducerIdRequest) { - InitProducerIdRequest initRequest = (InitProducerIdRequest) request; - requestFlags[0] = initRequest.data().keepPreparedTxn(); - requestFlags[1] = initRequest.data().enable2Pc(); - return true; - } - return false; - }, - initProducerIdResponse(1L, (short) 5, Errors.NONE)); - - try (Producer producer = kafkaProducer(configs, new StringSerializer(), - new StringSerializer(), metadata, client, null, time)) { - producer.initTransactions(keepPreparedTxn); - - // Verify request flags match expected values - assertEquals(keepPreparedTxn, requestFlags[0], - "keepPreparedTxn flag should match input parameter"); - assertEquals(enable2PC, requestFlags[1], - "enable2Pc flag should match producer configuration"); - } - } - - @Test - public void testPrepareTransactionSuccess() throws Exception { - StringSerializer serializer = new StringSerializer(); - KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); - - when(ctx.transactionManager.isTransactionV2Enabled()).thenReturn(true); - when(ctx.transactionManager.is2PCEnabled()).thenReturn(true); - when(ctx.sender.isRunning()).thenReturn(true); - - doNothing().when(ctx.transactionManager).prepareTransaction(); - - long expectedProducerId = 12345L; - short expectedEpoch = 5; - ProducerIdAndEpoch expectedProducerIdAndEpoch = new ProducerIdAndEpoch(expectedProducerId, expectedEpoch); - when(ctx.transactionManager.preparedTransactionState()).thenReturn(expectedProducerIdAndEpoch); - - try (KafkaProducer producer = ctx.newKafkaProducer()) { - PreparedTxnState returned = producer.prepareTransaction(); - assertEquals(expectedProducerId, returned.producerId()); - assertEquals(expectedEpoch, returned.epoch()); - - verify(ctx.transactionManager).prepareTransaction(); - verify(ctx.accumulator).beginFlush(); - verify(ctx.accumulator).awaitFlushCompletion(); - } - } - - @Test - public void testSendNotAllowedInPreparedTransactionState() throws Exception { - StringSerializer serializer = new StringSerializer(); - KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); - - String topic = "foo"; - Cluster cluster = TestUtils.singletonCluster(topic, 1); - - when(ctx.sender.isRunning()).thenReturn(true); - when(ctx.metadata.fetch()).thenReturn(cluster); - - // Mock transaction manager to simulate being in a prepared state - when(ctx.transactionManager.isTransactional()).thenReturn(true); - when(ctx.transactionManager.isPrepared()).thenReturn(true); - - // Create record to send - long timestamp = ctx.time.milliseconds(); - ProducerRecord record = new ProducerRecord<>(topic, 0, timestamp, "key", "value"); - - try (KafkaProducer producer = ctx.newKafkaProducer()) { - // Verify that sending a record throws IllegalStateException with the correct message - IllegalStateException exception = assertThrows( - IllegalStateException.class, - () -> producer.send(record) - ); - - assertTrue(exception.getMessage().contains("Cannot perform operation while the transaction is in a prepared state")); - - // Verify transactionManager methods were called - verify(ctx.transactionManager).isTransactional(); - verify(ctx.transactionManager).isPrepared(); - - // Verify that no message was actually sent (accumulator was not called) - verify(ctx.accumulator, never()).append( - eq(topic), - anyInt(), - anyLong(), - any(), - any(), - any(), - any(), - anyLong(), - anyLong(), - any() - ); - } - } - - @Test - public void testSendOffsetsNotAllowedInPreparedTransactionState() throws Exception { - StringSerializer serializer = new StringSerializer(); - KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); - - String topic = "foo"; - Cluster cluster = TestUtils.singletonCluster(topic, 1); - - when(ctx.sender.isRunning()).thenReturn(true); - when(ctx.metadata.fetch()).thenReturn(cluster); - - // Mock transaction manager to simulate being in a prepared state - when(ctx.transactionManager.isTransactional()).thenReturn(true); - when(ctx.transactionManager.isPrepared()).thenReturn(true); - - // Create consumer group metadata - String groupId = "test-group"; - Map offsets = new HashMap<>(); - offsets.put(new TopicPartition(topic, 0), new OffsetAndMetadata(100L)); - ConsumerGroupMetadata groupMetadata = new ConsumerGroupMetadata(groupId); - - try (KafkaProducer producer = ctx.newKafkaProducer()) { - // Verify that sending offsets throws IllegalStateException with the correct message - IllegalStateException exception = assertThrows( - IllegalStateException.class, - () -> producer.sendOffsetsToTransaction(offsets, groupMetadata) - ); - - assertTrue(exception.getMessage().contains("Cannot perform operation while the transaction is in a prepared state")); - - // Verify transactionManager methods were called - verify(ctx.transactionManager).isTransactional(); - verify(ctx.transactionManager).isPrepared(); - - // Verify that no offsets were actually sent - verify(ctx.transactionManager, never()).sendOffsetsToTransaction( - eq(offsets), - eq(groupMetadata) - ); - } - } - - @Test - public void testBeginTransactionNotAllowedInPreparedTransactionState() throws Exception { - StringSerializer serializer = new StringSerializer(); - KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); - - when(ctx.sender.isRunning()).thenReturn(true); - - // Mock transaction manager to simulate being in a prepared state - when(ctx.transactionManager.isTransactional()).thenReturn(true); - when(ctx.transactionManager.isPrepared()).thenReturn(true); - - try (KafkaProducer producer = ctx.newKafkaProducer()) { - // Verify that calling beginTransaction throws IllegalStateException with the correct message - IllegalStateException exception = assertThrows( - IllegalStateException.class, - producer::beginTransaction - ); - - assertTrue(exception.getMessage().contains("Cannot perform operation while the transaction is in a prepared state")); - - // Verify transactionManager methods were called - verify(ctx.transactionManager).isTransactional(); - verify(ctx.transactionManager).isPrepared(); - } - } - - @Test - public void testPrepareTransactionFailsWhen2PCDisabled() { - StringSerializer serializer = new StringSerializer(); - KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); - - // Disable 2PC - when(ctx.transactionManager.isTransactionV2Enabled()).thenReturn(true); - when(ctx.transactionManager.is2PCEnabled()).thenReturn(false); - when(ctx.sender.isRunning()).thenReturn(true); - - try (KafkaProducer producer = ctx.newKafkaProducer()) { - assertThrows( - InvalidTxnStateException.class, - producer::prepareTransaction, - "prepareTransaction() should fail if 2PC is disabled" - ); - } - } - - @Test - public void testCompleteTransactionWithMatchingState() throws Exception { - StringSerializer serializer = new StringSerializer(); - KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); - - when(ctx.transactionManager.isPrepared()).thenReturn(true); - when(ctx.sender.isRunning()).thenReturn(true); - - // Create prepared states with matching values - long producerId = 12345L; - short epoch = 5; - PreparedTxnState inputState = new PreparedTxnState(producerId, epoch); - ProducerIdAndEpoch currentProducerIdAndEpoch = new ProducerIdAndEpoch(producerId, epoch); - - // Set up the transaction manager to return the prepared state - when(ctx.transactionManager.preparedTransactionState()).thenReturn(currentProducerIdAndEpoch); - - // Should trigger commit when states match - TransactionalRequestResult commitResult = mock(TransactionalRequestResult.class); - when(ctx.transactionManager.beginCommit()).thenReturn(commitResult); - - try (KafkaProducer producer = ctx.newKafkaProducer()) { - // Call completeTransaction with the matching state - producer.completeTransaction(inputState); - - // Verify methods called in order - verify(ctx.transactionManager).isPrepared(); - verify(ctx.transactionManager).preparedTransactionState(); - verify(ctx.transactionManager).beginCommit(); - - // Verify abort was never called - verify(ctx.transactionManager, never()).beginAbort(); - - // Verify sender was woken up - verify(ctx.sender).wakeup(); - } - } - - @Test - public void testCompleteTransactionWithNonMatchingState() throws Exception { - StringSerializer serializer = new StringSerializer(); - KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); - - when(ctx.transactionManager.isPrepared()).thenReturn(true); - when(ctx.sender.isRunning()).thenReturn(true); - - // Create txn prepared states with different values - long producerId = 12345L; - short epoch = 5; - PreparedTxnState inputState = new PreparedTxnState(producerId + 1, epoch); - ProducerIdAndEpoch currentProducerIdAndEpoch = new ProducerIdAndEpoch(producerId, epoch); - - // Set up the transaction manager to return the prepared state - when(ctx.transactionManager.preparedTransactionState()).thenReturn(currentProducerIdAndEpoch); - - // Should trigger abort when states don't match - TransactionalRequestResult abortResult = mock(TransactionalRequestResult.class); - when(ctx.transactionManager.beginAbort()).thenReturn(abortResult); - - try (KafkaProducer producer = ctx.newKafkaProducer()) { - // Call completeTransaction with the non-matching state - producer.completeTransaction(inputState); - - // Verify methods called in order - verify(ctx.transactionManager).isPrepared(); - verify(ctx.transactionManager).preparedTransactionState(); - verify(ctx.transactionManager).beginAbort(); - - // Verify commit was never called - verify(ctx.transactionManager, never()).beginCommit(); - - // Verify sender was woken up - verify(ctx.sender).wakeup(); - } - } - @Test public void testClusterAuthorizationFailure() throws Exception { int maxBlockMs = 500; @@ -1743,7 +1407,7 @@ public void testTransactionV2ProduceWithConcurrentTransactionError() throws Exce KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); String topic = "foo"; - TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, topic); + TopicPartition topicPartition = new TopicPartition(topic, 0); Cluster cluster = TestUtils.singletonCluster(topic, 1); when(ctx.sender.isRunning()).thenReturn(true); @@ -1778,12 +1442,12 @@ public void testTransactionV2ProduceWithConcurrentTransactionError() throws Exce ApiVersions apiVersions = new ApiVersions(); apiVersions.update(NODE.idString(), nodeApiVersions); - ProducerInterceptors interceptor = new ProducerInterceptors<>(Collections.emptyList(), null); + ProducerInterceptors interceptor = new ProducerInterceptors<>(Collections.emptyList()); client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some-txn", NODE)); client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE)); - client.prepareResponse(produceResponse(topicIdPartition, 1L, Errors.CONCURRENT_TRANSACTIONS, 0, 1)); - client.prepareResponse(produceResponse(topicIdPartition, 1L, Errors.NONE, 0, 1)); + client.prepareResponse(produceResponse(topicPartition, 1L, Errors.CONCURRENT_TRANSACTIONS, 0, 1)); + client.prepareResponse(produceResponse(topicPartition, 1L, Errors.NONE, 0, 1)); client.prepareResponse(endTxnResponse(Errors.NONE)); try (KafkaProducer producer = new KafkaProducer<>( @@ -1827,7 +1491,7 @@ public void testMeasureAbortTransactionDuration() { } @Test - public void testCommitTransactionWithRecordTooLargeException() { + public void testCommitTransactionWithRecordTooLargeException() throws Exception { Map configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); @@ -1851,13 +1515,13 @@ public void testCommitTransactionWithRecordTooLargeException() { client.prepareResponse(endTxnResponse(Errors.NONE)); producer.beginTransaction(); - TestUtils.assertFutureThrows(RecordTooLargeException.class, producer.send(largeRecord)); + TestUtils.assertFutureError(producer.send(largeRecord), RecordTooLargeException.class); assertThrows(KafkaException.class, producer::commitTransaction); } } @Test - public void testCommitTransactionWithMetadataTimeoutForMissingTopic() { + public void testCommitTransactionWithMetadataTimeoutForMissingTopic() throws Exception { Map configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); @@ -1888,13 +1552,13 @@ public void testCommitTransactionWithMetadataTimeoutForMissingTopic() { producer.initTransactions(); producer.beginTransaction(); - TestUtils.assertFutureThrows(TimeoutException.class, producer.send(record)); + TestUtils.assertFutureError(producer.send(record), TimeoutException.class); assertThrows(KafkaException.class, producer::commitTransaction); } } @Test - public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() { + public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() throws Exception { Map configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); @@ -1925,13 +1589,13 @@ public void testCommitTransactionWithMetadataTimeoutForPartitionOutOfRange() { producer.initTransactions(); producer.beginTransaction(); - TestUtils.assertFutureThrows(TimeoutException.class, producer.send(record)); + TestUtils.assertFutureError(producer.send(record), TimeoutException.class); assertThrows(KafkaException.class, producer::commitTransaction); } } @Test - public void testCommitTransactionWithSendToInvalidTopic() { + public void testCommitTransactionWithSendToInvalidTopic() throws Exception { Map configs = new HashMap<>(); configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "some.id"); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); @@ -1964,7 +1628,7 @@ public void testCommitTransactionWithSendToInvalidTopic() { producer.initTransactions(); producer.beginTransaction(); - TestUtils.assertFutureThrows(InvalidTopicException.class, producer.send(record)); + TestUtils.assertFutureError(producer.send(record), InvalidTopicException.class); assertThrows(KafkaException.class, producer::commitTransaction); } } @@ -2052,7 +1716,7 @@ public void testSendTxnOffsetsWithGroupIdTransactionV2() { try (KafkaProducer producer = new KafkaProducer<>( new ProducerConfig(properties), new StringSerializer(), new StringSerializer(), metadata, client, - new ProducerInterceptors<>(Collections.emptyList(), null), apiVersions, time)) { + new ProducerInterceptors<>(Collections.emptyList()), apiVersions, time)) { producer.initTransactions(); producer.beginTransaction(); producer.sendOffsetsToTransaction(Collections.singletonMap( @@ -2069,7 +1733,6 @@ public void testTransactionV2Produce() throws Exception { KafkaProducerTestContext ctx = new KafkaProducerTestContext<>(testInfo, serializer); String topic = "foo"; - Uuid topicId = Uuid.fromString("klZ9sa2rSvig6QpgGXzALT"); TopicPartition topicPartition = new TopicPartition(topic, 0); Cluster cluster = TestUtils.singletonCluster(topic, 1); @@ -2105,11 +1768,11 @@ public void testTransactionV2Produce() throws Exception { ApiVersions apiVersions = new ApiVersions(); apiVersions.update(NODE.idString(), nodeApiVersions); - ProducerInterceptors interceptor = new ProducerInterceptors<>(Collections.emptyList(), null); + ProducerInterceptors interceptor = new ProducerInterceptors<>(Collections.emptyList()); client.prepareResponse(FindCoordinatorResponse.prepareResponse(Errors.NONE, "some-txn", NODE)); client.prepareResponse(initProducerIdResponse(1L, (short) 5, Errors.NONE)); - client.prepareResponse(produceResponse(new TopicIdPartition(topicId, topicPartition), 1L, Errors.NONE, 0, 1)); + client.prepareResponse(produceResponse(topicPartition, 1L, Errors.NONE, 0, 1)); client.prepareResponse(endTxnResponse(Errors.NONE)); try (KafkaProducer producer = new KafkaProducer<>( @@ -2271,10 +1934,11 @@ public void testClientInstanceIdInvalidTimeout() { Properties props = new Properties(); props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - try (KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) { - Exception exception = assertThrows(IllegalArgumentException.class, () -> producer.clientInstanceId(Duration.ofMillis(-1))); - assertEquals("The timeout cannot be negative.", exception.getMessage()); - } + KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); + Exception exception = assertThrows(IllegalArgumentException.class, () -> producer.clientInstanceId(Duration.ofMillis(-1))); + assertEquals("The timeout cannot be negative.", exception.getMessage()); + + producer.close(); } @Test @@ -2283,10 +1947,11 @@ public void testClientInstanceIdNoTelemetryReporterRegistered() { props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.setProperty(ProducerConfig.ENABLE_METRICS_PUSH_CONFIG, "false"); - try (KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer())) { - Exception exception = assertThrows(IllegalStateException.class, () -> producer.clientInstanceId(Duration.ofMillis(0))); - assertEquals("Telemetry is not enabled. Set config `enable.metrics.push` to `true`.", exception.getMessage()); - } + KafkaProducer producer = new KafkaProducer<>(props, new StringSerializer(), new StringSerializer()); + Exception exception = assertThrows(IllegalStateException.class, () -> producer.clientInstanceId(Duration.ofMillis(0))); + assertEquals("Telemetry is not enabled. Set config `enable.metrics.push` to `true`.", exception.getMessage()); + + producer.close(); } private void verifyInvalidGroupMetadata(ConsumerGroupMetadata groupMetadata) { @@ -2368,7 +2033,7 @@ public void testOnlyCanExecuteCloseAfterInitTransactionsTimeout() { } @Test - public void testSendToInvalidTopic() { + public void testSendToInvalidTopic() throws Exception { Map configs = new HashMap<>(); configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); configs.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, "15000"); @@ -2400,7 +2065,7 @@ public void testSendToInvalidTopic() { assertEquals(Collections.singleton(invalidTopicName), metadata.fetch().invalidTopics(), "Cluster has incorrect invalid topic list."); - TestUtils.assertFutureThrows(InvalidTopicException.class, future); + TestUtils.assertFutureError(future, InvalidTopicException.class); producer.close(Duration.ofMillis(0)); } @@ -2562,14 +2227,15 @@ public void testProducerJmxPrefix() throws Exception { props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); props.put("client.id", "client-1"); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - MBeanServer server = ManagementFactory.getPlatformMBeanServer(); - MetricName testMetricName = producer.metrics.metricName("test-metric", - "grp1", "test metric"); - producer.metrics.addMetric(testMetricName, new Avg()); - assertNotNull(server.getObjectInstance(new ObjectName("kafka.producer:type=grp1,client-id=client-1"))); - } + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + + MBeanServer server = ManagementFactory.getPlatformMBeanServer(); + MetricName testMetricName = producer.metrics.metricName("test-metric", + "grp1", "test metric"); + producer.metrics.addMetric(testMetricName, new Avg()); + assertNotNull(server.getObjectInstance(new ObjectName("kafka.producer:type=grp1,client-id=client-1"))); + producer.close(); } private static ProducerMetadata newMetadata(long refreshBackoffMs, long refreshBackoffMaxMs, long expirationMs) { @@ -2633,7 +2299,7 @@ public void testCallbackAndInterceptorHandleError() { String invalidTopicName = "topic abc"; // Invalid topic name due to space ProducerInterceptors producerInterceptors = - new ProducerInterceptors<>(Collections.singletonList(new MockProducerInterceptor()), null); + new ProducerInterceptors<>(Collections.singletonList(new MockProducerInterceptor())); try (Producer producer = kafkaProducer(configs, new StringSerializer(), new StringSerializer(), producerMetadata, client, producerInterceptors, time)) { @@ -2664,34 +2330,6 @@ public void testCallbackAndInterceptorHandleError() { } } - @Test - public void shouldNotInvokeFlushInCallback() { - Map configs = new HashMap<>(); - configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9000"); - // only test in idempotence disabled producer for simplicity - configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, false); - - Time time = new MockTime(1); - MetadataResponse initialUpdateResponse = RequestTestUtils.metadataUpdateWith(1, singletonMap("topic", 1)); - ProducerMetadata metadata = newMetadata(0, 0, Long.MAX_VALUE); - - MockClient client = new MockClient(time, metadata); - client.updateMetadata(initialUpdateResponse); - AtomicReference kafkaException = new AtomicReference<>(); - - try (Producer producer = kafkaProducer(configs, new StringSerializer(), - new StringSerializer(), metadata, client, null, time)) { - producer.send( - new ProducerRecord<>("topic", "value"), - (recordMetadata, exception) -> kafkaException.set(assertThrows(KafkaException.class, producer::flush)) - ); - } - - assertNotNull(kafkaException.get()); - assertEquals("KafkaProducer.flush() invocation inside a callback is not permitted because it may lead to deadlock.", - kafkaException.get().getMessage()); - } - @Test public void negativePartitionShouldThrow() { Map configs = new HashMap<>(); @@ -2820,29 +2458,6 @@ public void configure(Map configs) { } } - public static class ProducerInterceptorForHeaders implements ProducerInterceptor { - - @Override - public ProducerRecord onSend(ProducerRecord record) { - return record; - } - - @Override - public void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { - RecordHeaders recordHeaders = (RecordHeaders) headers; - // Ensure that the headers are read-only, no matter send success or send failure - assertTrue(recordHeaders.isReadOnly()); - } - - @Override - public void close() { - } - - @Override - public void configure(Map configs) { - } - } - public static class ProducerInterceptorForClientId implements ProducerInterceptor { @Override @@ -2885,7 +2500,7 @@ private static class KafkaProducerTestContext { private final Map configs; private final Serializer serializer; private final Partitioner partitioner = mock(Partitioner.class); - private final Sender.SenderThread senderThread = mock(Sender.SenderThread.class); + private final KafkaThread ioThread = mock(KafkaThread.class); private final List> interceptors = new ArrayList<>(); private ProducerMetadata metadata = mock(ProducerMetadata.class); private RecordAccumulator accumulator = mock(RecordAccumulator.class); @@ -2951,7 +2566,7 @@ public KafkaProducer newKafkaProducer() { ProducerConfig producerConfig = new ProducerConfig( ProducerConfig.appendSerializerToConfig(configs, serializer, serializer)); - ProducerInterceptors interceptors = new ProducerInterceptors<>(this.interceptors, metrics); + ProducerInterceptors interceptors = new ProducerInterceptors<>(this.interceptors); return new KafkaProducer<>( producerConfig, @@ -2966,7 +2581,7 @@ public KafkaProducer newKafkaProducer() { interceptors, partitioner, time, - senderThread, + ioThread, Optional.empty() ); } @@ -2992,9 +2607,9 @@ void testDeliveryTimeoutAndLingerMsConfig() { } @SuppressWarnings("deprecation") - private ProduceResponse produceResponse(TopicIdPartition topicIdPartition, long offset, Errors error, int throttleTimeMs, int logStartOffset) { + private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs, int logStartOffset) { ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, logStartOffset); - Map partResp = singletonMap(topicIdPartition, resp); + Map partResp = singletonMap(tp, resp); return new ProduceResponse(partResp, throttleTimeMs); } @@ -3003,14 +2618,14 @@ public void testSubscribingCustomMetricsDoesntAffectProducerMetrics() { Map props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - Map customMetrics = customMetrics(); - customMetrics.forEach((name, metric) -> producer.registerMetricForSubscription(metric)); + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); - Map producerMetrics = producer.metrics(); - customMetrics.forEach((name, metric) -> assertFalse(producerMetrics.containsKey(name))); - } + Map customMetrics = customMetrics(); + customMetrics.forEach((name, metric) -> producer.registerMetricForSubscription(metric)); + + Map producerMetrics = producer.metrics(); + customMetrics.forEach((name, metric) -> assertFalse(producerMetrics.containsKey(name))); } @Test @@ -3018,12 +2633,12 @@ public void testUnSubscribingNonExisingMetricsDoesntCauseError() { Map props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - Map customMetrics = customMetrics(); - //Metrics never registered but removed should not cause an error - customMetrics.forEach((name, metric) -> assertDoesNotThrow(() -> producer.unregisterMetricFromSubscription(metric))); - } + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + + Map customMetrics = customMetrics(); + //Metrics never registered but removed should not cause an error + customMetrics.forEach((name, metric) -> assertDoesNotThrow(() -> producer.unregisterMetricFromSubscription(metric))); } @Test @@ -3032,13 +2647,12 @@ public void testSubscribingCustomMetricsWithSameNameDoesntAffectProducerMetrics( appender.setClassLogger(KafkaProducer.class, Level.DEBUG); Map props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - KafkaMetric existingMetricToAdd = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); - producer.registerMetricForSubscription(existingMetricToAdd); - final String expectedMessage = String.format("Skipping registration for metric %s. Existing producer metrics cannot be overwritten.", existingMetricToAdd.metricName()); - assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); - } + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + KafkaMetric existingMetricToAdd = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); + producer.registerMetricForSubscription(existingMetricToAdd); + final String expectedMessage = String.format("Skipping registration for metric %s. Existing producer metrics cannot be overwritten.", existingMetricToAdd.metricName()); + assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); } } @@ -3048,13 +2662,12 @@ public void testUnsubscribingCustomMetricWithSameNameAsExistingMetricDoesntAffec appender.setClassLogger(KafkaProducer.class, Level.DEBUG); Map props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - KafkaMetric existingMetricToRemove = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); - producer.unregisterMetricFromSubscription(existingMetricToRemove); - final String expectedMessage = String.format("Skipping unregistration for metric %s. Existing producer metrics cannot be removed.", existingMetricToRemove.metricName()); - assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); - } + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + KafkaMetric existingMetricToRemove = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); + producer.unregisterMetricFromSubscription(existingMetricToRemove); + final String expectedMessage = String.format("Skipping unregistration for metric %s. Existing producer metrics cannot be removed.", existingMetricToRemove.metricName()); + assertTrue(appender.getMessages().stream().anyMatch(m -> m.contains(expectedMessage))); } } @@ -3067,13 +2680,12 @@ public void testShouldOnlyCallMetricReporterMetricChangeOnceWithExistingProducer Map props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - KafkaMetric existingMetric = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); - producer.registerMetricForSubscription(existingMetric); - // This test would fail without the check as the existing metric is registered in the producer on startup - Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); - } + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + KafkaMetric existingMetric = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); + producer.registerMetricForSubscription(existingMetric); + // This test would fail without the check as the exising metric is registered in the producer on startup + Mockito.verify(clientTelemetryReporter, atMostOnce()).metricChange(existingMetric); } } @@ -3086,13 +2698,12 @@ public void testShouldNotCallMetricReporterMetricRemovalWithExistingProducerMetr Map props = new HashMap<>(); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - try (KafkaProducer producer = new KafkaProducer<>( - props, new StringSerializer(), new StringSerializer())) { - KafkaMetric existingMetric = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); - producer.unregisterMetricFromSubscription(existingMetric); - // This test would fail without the check as the existing metric is registered in the consumer on startup - Mockito.verify(clientTelemetryReporter, never()).metricRemoval(existingMetric); - } + KafkaProducer producer = new KafkaProducer<>( + props, new StringSerializer(), new StringSerializer()); + KafkaMetric existingMetric = (KafkaMetric) producer.metrics().entrySet().iterator().next().getValue(); + producer.unregisterMetricFromSubscription(existingMetric); + // This test would fail without the check as the exising metric is registered in the consumer on startup + Mockito.verify(clientTelemetryReporter, never()).metricRemoval(existingMetric); } } @@ -3107,138 +2718,4 @@ private Map customMetrics() { KafkaMetric streamClientMetricTwo = new KafkaMetric(lock, metricNameTwo, (Measurable) (m, now) -> 2.0, metricConfig, Time.SYSTEM); return Map.of(metricNameOne, streamClientMetricOne, metricNameTwo, streamClientMetricTwo); } - - @Test - void testMonitorablePlugins() { - try { - String clientId = "testMonitorablePlugins"; - Map configs = new HashMap<>(); - configs.put(ProducerConfig.CLIENT_ID_CONFIG, clientId); - configs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9999"); - configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, MonitorableSerializer.class.getName()); - configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, MonitorableSerializer.class.getName()); - configs.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, MonitorablePartitioner.class.getName()); - configs.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, MonitorableInterceptor.class.getName()); - configs.put(MockProducerInterceptor.APPEND_STRING_PROP, ""); - - KafkaProducer producer = new KafkaProducer<>(configs); - Map metrics = producer.metrics(); - - MetricName expectedKeySerializerMetric = expectedMetricName( - clientId, - ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, - MonitorableSerializer.class); - assertTrue(metrics.containsKey(expectedKeySerializerMetric)); - assertEquals(VALUE, metrics.get(expectedKeySerializerMetric).metricValue()); - - MetricName expectedValueSerializerMetric = expectedMetricName( - clientId, - ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, - MonitorableSerializer.class); - assertTrue(metrics.containsKey(expectedValueSerializerMetric)); - assertEquals(VALUE, metrics.get(expectedValueSerializerMetric).metricValue()); - - MetricName expectedPartitionerMetric = expectedMetricName( - clientId, - ProducerConfig.PARTITIONER_CLASS_CONFIG, - MonitorablePartitioner.class); - assertTrue(metrics.containsKey(expectedPartitionerMetric)); - assertEquals(VALUE, metrics.get(expectedPartitionerMetric).metricValue()); - - MetricName expectedInterceptorMetric = expectedMetricName( - clientId, - ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, - MonitorableInterceptor.class); - assertTrue(metrics.containsKey(expectedInterceptorMetric)); - assertEquals(VALUE, metrics.get(expectedInterceptorMetric).metricValue()); - - producer.close(); - metrics = producer.metrics(); - assertFalse(metrics.containsKey(expectedKeySerializerMetric)); - assertFalse(metrics.containsKey(expectedValueSerializerMetric)); - assertFalse(metrics.containsKey(expectedPartitionerMetric)); - assertFalse(metrics.containsKey(expectedInterceptorMetric)); - } finally { - MockProducerInterceptor.resetCounters(); - } - } - - private MetricName expectedMetricName(String clientId, String config, Class clazz) { - Map expectedTags = new LinkedHashMap<>(); - expectedTags.put("client-id", clientId); - expectedTags.put("config", config); - expectedTags.put("class", clazz.getSimpleName()); - expectedTags.putAll(TAGS); - return new MetricName(NAME, "plugins", DESCRIPTION, expectedTags); - } - - private static final String NAME = "name"; - private static final String DESCRIPTION = "description"; - private static final LinkedHashMap TAGS = new LinkedHashMap<>(); - private static final double VALUE = 123.0; - - static { - TAGS.put("t1", "v1"); - } - - public static class MonitorableSerializer extends MockSerializer implements Monitorable { - - @Override - public void withPluginMetrics(PluginMetrics metrics) { - MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); - metrics.addMetric(name, (Measurable) (config, now) -> VALUE); - } - } - - public static class MonitorablePartitioner extends MockPartitioner implements Monitorable { - - @Override - public void withPluginMetrics(PluginMetrics metrics) { - MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); - metrics.addMetric(name, (Measurable) (config, now) -> VALUE); - } - } - - public static class MonitorableInterceptor extends MockProducerInterceptor implements Monitorable { - - @Override - public void withPluginMetrics(PluginMetrics metrics) { - MetricName name = metrics.metricName(NAME, DESCRIPTION, TAGS); - metrics.addMetric(name, (Measurable) (config, now) -> VALUE); - } - } - - public static class CloseInterceptor implements ProducerInterceptor { - - public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); - - @Override - public ProducerRecord onSend(ProducerRecord record) { - return null; - } - - @Override - public void onAcknowledgement(RecordMetadata metadata, Exception exception) { - ProducerInterceptor.super.onAcknowledgement(metadata, exception); - } - - @Override - public void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { - ProducerInterceptor.super.onAcknowledgement(metadata, exception, headers); - } - - @Override - public void close() { - CLOSE_COUNT.incrementAndGet(); - } - - @Override - public void configure(Map configs) { - // no-op - } - - public static void resetCounters() { - CLOSE_COUNT.set(0); - } - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java index e66dcca504438..6ec8164c26805 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/MockProducerTest.java @@ -53,9 +53,9 @@ public class MockProducerTest { private final String topic = "topic"; - private MockProducer producer; - private final ProducerRecord record1 = new ProducerRecord<>(topic, "key1", "value1"); - private final ProducerRecord record2 = new ProducerRecord<>(topic, "key2", "value2"); + private MockProducer producer; + private final ProducerRecord record1 = new ProducerRecord<>(topic, "key1".getBytes(), "value1".getBytes()); + private final ProducerRecord record2 = new ProducerRecord<>(topic, "key2".getBytes(), "value2".getBytes()); private final String groupId = "group"; private void buildMockProducer(boolean autoComplete) { @@ -318,7 +318,7 @@ public void shouldPublishMessagesOnlyAfterCommitIfTransactionsAreEnabled() { producer.commitTransaction(); - List> expectedResult = new ArrayList<>(); + List> expectedResult = new ArrayList<>(); expectedResult.add(record1); expectedResult.add(record2); @@ -385,7 +385,7 @@ public void shouldPreserveCommittedMessagesOnAbortIfTransactionsAreEnabled() { producer.beginTransaction(); producer.abortTransaction(); - List> expectedResult = new ArrayList<>(); + List> expectedResult = new ArrayList<>(); expectedResult.add(record1); expectedResult.add(record2); @@ -724,10 +724,10 @@ public void testMetadataOnException() throws InterruptedException { buildMockProducer(false); Future metadata = producer.send(record2, (md, exception) -> { assertNotNull(md); - assertEquals(-1L, md.offset(), "Invalid offset"); - assertEquals(RecordBatch.NO_TIMESTAMP, md.timestamp(), "Invalid timestamp"); - assertEquals(-1L, md.serializedKeySize(), "Invalid Serialized Key size"); - assertEquals(-1L, md.serializedValueSize(), "Invalid Serialized value size"); + assertEquals(md.offset(), -1L, "Invalid offset"); + assertEquals(md.timestamp(), RecordBatch.NO_TIMESTAMP, "Invalid timestamp"); + assertEquals(md.serializedKeySize(), -1L, "Invalid Serialized Key size"); + assertEquals(md.serializedValueSize(), -1L, "Invalid Serialized value size"); }); IllegalArgumentException e = new IllegalArgumentException("dummy exception"); assertTrue(producer.errorNext(e), "Complete the second request with an error"); diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java index 5fd9ab727e046..830711c0e5449 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/ProducerConfigTest.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.MetadataRecoveryStrategy; -import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.common.security.auth.SecurityProtocol; import org.apache.kafka.common.serialization.ByteArraySerializer; @@ -27,18 +26,14 @@ import org.junit.jupiter.api.Test; -import java.io.FileInputStream; -import java.io.InputStream; import java.util.HashMap; import java.util.Locale; import java.util.Map; -import java.util.Properties; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; -import static org.junit.jupiter.api.Assertions.fail; public class ProducerConfigTest { @@ -93,7 +88,6 @@ public void testInvalidCompressionType() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "abc"); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); } @@ -103,7 +97,6 @@ public void testInvalidSecurityProtocol() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "abc"); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); assertTrue(ce.getMessage().contains(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -113,7 +106,6 @@ public void testDefaultMetadataRecoveryStrategy() { Map configs = new HashMap<>(); configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ProducerConfig producerConfig = new ProducerConfig(configs); assertEquals(MetadataRecoveryStrategy.REBOOTSTRAP.name, producerConfig.getString(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @@ -124,7 +116,6 @@ public void testInvalidMetadataRecoveryStrategy() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "abc"); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException ce = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); assertTrue(ce.getMessage().contains(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG)); } @@ -136,7 +127,6 @@ public void testCaseInsensitiveSecurityProtocol() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, saslSslLowerCase); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); final ProducerConfig producerConfig = new ProducerConfig(configs); assertEquals(saslSslLowerCase, producerConfig.originals().get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -148,7 +138,6 @@ void testUpperboundCheckOfEnableIdempotence() { configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); configs.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, inFlightConnection); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); ConfigException configException = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); assertEquals("To use the idempotent producer, " + ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION + " must be set to at most 5. Current value is " + inFlightConnection + ".", configException.getMessage()); @@ -156,50 +145,4 @@ void testUpperboundCheckOfEnableIdempotence() { configs.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "5"); assertDoesNotThrow(() -> new ProducerConfig(configs)); } - - @Test - void testTwoPhaseCommitIncompatibleWithTransactionTimeout() { - Map configs = new HashMap<>(); - configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, keySerializerClass); - configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, valueSerializerClass); - configs.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true); - configs.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "test-txn-id"); - configs.put(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG, true); - configs.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 60000); - configs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); - - ConfigException ce = assertThrows(ConfigException.class, () -> new ProducerConfig(configs)); - assertTrue(ce.getMessage().contains(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG)); - assertTrue(ce.getMessage().contains(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG)); - - // Verify that setting one but not the other is valid - configs.remove(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG); - assertDoesNotThrow(() -> new ProducerConfig(configs)); - - configs.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, 60000); - configs.put(ProducerConfig.TRANSACTION_TWO_PHASE_COMMIT_ENABLE_CONFIG, false); - assertDoesNotThrow(() -> new ProducerConfig(configs)); - } - - /** - * Validates config/producer.properties file to avoid getting out of sync with ProducerConfig. - */ - @Test - public void testValidateConfigPropertiesFile() { - Properties props = new Properties(); - - try (InputStream inputStream = new FileInputStream(System.getProperty("user.dir") + "/../config/producer.properties")) { - props.load(inputStream); - } catch (Exception e) { - fail("Failed to load config/producer.properties file: " + e.getMessage()); - } - - ProducerConfig config = new ProducerConfig(props); - - for (String key : config.originals().keySet()) { - if (!ProducerConfig.configDef().configKeys().containsKey(key)) { - fail("Invalid configuration key: " + key); - } - } - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java index 727368e8edd1b..128e15ed6c60a 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/BufferPoolTest.java @@ -219,7 +219,7 @@ public void testCleanupMemoryAvailabilityWaiterOnInterruption() throws Exception t1.join(); t2.join(); // both the allocate() called by threads t1 and t2 should have been interrupted and the waiters queue should be empty - assertEquals(0, pool.queued()); + assertEquals(pool.queued(), 0); } @Test @@ -332,7 +332,7 @@ protected ByteBuffer allocateByteBuffer(int size) { } - assertEquals(1024, bufferPool.availableMemory()); + assertEquals(bufferPool.availableMemory(), 1024); } public static class StressTestThread extends Thread { diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetricsTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetricsTest.java index 383aa82ee2d88..46d1ed329eee2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/KafkaProducerMetricsTest.java @@ -121,8 +121,8 @@ private void assertMetricRemoved(final String name) { private void assertMetricValue(final String name) { assertEquals( - (double) METRIC_VALUE, - metrics.metric(metrics.metricName(name, KafkaProducerMetrics.GROUP)).metricValue() + metrics.metric(metrics.metricName(name, KafkaProducerMetrics.GROUP)).metricValue(), + (double) METRIC_VALUE ); } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerBatchTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerBatchTest.java index 38ac88e95ca38..ab9a56f2b3eb7 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerBatchTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerBatchTest.java @@ -352,7 +352,7 @@ private void testCompleteExceptionally( for (int i = 0; i < futures.size(); i++) { FutureRecordMetadata future = futures.get(i); - RuntimeException caughtException = TestUtils.assertFutureThrows(RuntimeException.class, future); + RuntimeException caughtException = TestUtils.assertFutureThrows(future, RuntimeException.class); RuntimeException expectedException = recordExceptions.apply(i); assertEquals(expectedException, caughtException); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java index c81de1d74fc52..13d4957a78e4b 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerInterceptorsTest.java @@ -22,7 +22,6 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.header.Headers; import org.junit.jupiter.api.Test; @@ -69,10 +68,9 @@ public void onAcknowledgement(RecordMetadata metadata, Exception exception) { onAckCount++; if (exception != null) { onErrorAckCount++; - if (metadata != null) { - if (metadata.topic() == null) { - throw new NullPointerException("Topic is null"); - } + // the length check is just to call topic() method and let it throw an exception + // if RecordMetadata.TopicPartition is null + if (metadata != null && metadata.topic().length() >= 0) { onErrorAckWithTopicSetCount++; if (metadata.partition() >= 0) onErrorAckWithTopicPartitionSetCount++; @@ -97,72 +95,16 @@ public void injectOnAcknowledgementError(boolean on) { } } - private class AppendNewProducerInterceptor implements ProducerInterceptor { - private final String appendStr; - private boolean throwExceptionOnSend = false; - private boolean throwExceptionOnAck = false; - - public AppendNewProducerInterceptor(String appendStr) { - this.appendStr = appendStr; - } - - @Override - public void configure(Map configs) { - } - - @Override - public ProducerRecord onSend(ProducerRecord record) { - onSendCount++; - if (throwExceptionOnSend) - throw new KafkaException("Injected exception in AppendNewProducerInterceptor.onSend"); - - return new ProducerRecord<>( - record.topic(), record.partition(), record.key(), record.value().concat(appendStr)); - } - - @Override - public void onAcknowledgement(RecordMetadata metadata, Exception exception, Headers headers) { - onAckCount++; - if (exception != null) { - onErrorAckCount++; - if (metadata != null) { - if (metadata.topic() == null) { - throw new NullPointerException("Topic is null"); - } - onErrorAckWithTopicSetCount++; - if (metadata.partition() >= 0) - onErrorAckWithTopicPartitionSetCount++; - } - } - if (throwExceptionOnAck) - throw new KafkaException("Injected exception in AppendNewProducerInterceptor.onAcknowledgement"); - } - - @Override - public void close() { - } - - // if 'on' is true, onSend will always throw an exception - public void injectOnSendError(boolean on) { - throwExceptionOnSend = on; - } - - // if 'on' is true, onAcknowledgement will always throw an exception - public void injectOnAcknowledgementError(boolean on) { - throwExceptionOnAck = on; - } - } - @Test public void testOnSendChain() { List> interceptorList = new ArrayList<>(); // we are testing two different interceptors by configuring the same interceptor differently, which is not // how it would be done in KafkaProducer, but ok for testing interceptor callbacks AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); - AppendNewProducerInterceptor interceptor2 = new AppendNewProducerInterceptor("Two"); + AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); interceptorList.add(interceptor1); interceptorList.add(interceptor2); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); + ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); // verify that onSend() mutates the record as expected ProducerRecord interceptedRecord = interceptors.onSend(producerRecord); @@ -197,23 +139,23 @@ public void testOnAcknowledgementChain() { // we are testing two different interceptors by configuring the same interceptor differently, which is not // how it would be done in KafkaProducer, but ok for testing interceptor callbacks AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); - AppendNewProducerInterceptor interceptor2 = new AppendNewProducerInterceptor("Two"); + AppendProducerInterceptor interceptor2 = new AppendProducerInterceptor("Two"); interceptorList.add(interceptor1); interceptorList.add(interceptor2); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); + ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); // verify onAck is called on all interceptors RecordMetadata meta = new RecordMetadata(tp, 0, 0, 0, 0, 0); - interceptors.onAcknowledgement(meta, null, null); + interceptors.onAcknowledgement(meta, null); assertEquals(2, onAckCount); // verify that onAcknowledgement exceptions do not propagate interceptor1.injectOnAcknowledgementError(true); - interceptors.onAcknowledgement(meta, null, null); + interceptors.onAcknowledgement(meta, null); assertEquals(4, onAckCount); interceptor2.injectOnAcknowledgementError(true); - interceptors.onAcknowledgement(meta, null, null); + interceptors.onAcknowledgement(meta, null); assertEquals(6, onAckCount); interceptors.close(); @@ -223,29 +165,27 @@ public void testOnAcknowledgementChain() { public void testOnAcknowledgementWithErrorChain() { List> interceptorList = new ArrayList<>(); AppendProducerInterceptor interceptor1 = new AppendProducerInterceptor("One"); - AppendNewProducerInterceptor interceptor2 = new AppendNewProducerInterceptor("Two"); interceptorList.add(interceptor1); - interceptorList.add(interceptor2); - ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList, null); + ProducerInterceptors interceptors = new ProducerInterceptors<>(interceptorList); // verify that metadata contains both topic and partition interceptors.onSendError(producerRecord, new TopicPartition(producerRecord.topic(), producerRecord.partition()), new KafkaException("Test")); - assertEquals(2, onErrorAckCount); - assertEquals(2, onErrorAckWithTopicPartitionSetCount); + assertEquals(1, onErrorAckCount); + assertEquals(1, onErrorAckWithTopicPartitionSetCount); // verify that metadata contains both topic and partition (because record already contains partition) interceptors.onSendError(producerRecord, null, new KafkaException("Test")); - assertEquals(4, onErrorAckCount); - assertEquals(4, onErrorAckWithTopicPartitionSetCount); + assertEquals(2, onErrorAckCount); + assertEquals(2, onErrorAckWithTopicPartitionSetCount); // if producer record does not contain partition, interceptor should get partition == -1 ProducerRecord record2 = new ProducerRecord<>("test2", null, 1, "value"); interceptors.onSendError(record2, null, new KafkaException("Test")); - assertEquals(6, onErrorAckCount); - assertEquals(6, onErrorAckWithTopicSetCount); - assertEquals(4, onErrorAckWithTopicPartitionSetCount); + assertEquals(3, onErrorAckCount); + assertEquals(3, onErrorAckWithTopicSetCount); + assertEquals(2, onErrorAckWithTopicPartitionSetCount); // if producer record does not contain partition, but topic/partition is passed to // onSendError, then interceptor should get valid partition @@ -253,15 +193,15 @@ public void testOnAcknowledgementWithErrorChain() { interceptors.onSendError(record2, new TopicPartition(record2.topic(), reassignedPartition), new KafkaException("Test")); - assertEquals(8, onErrorAckCount); - assertEquals(8, onErrorAckWithTopicSetCount); - assertEquals(6, onErrorAckWithTopicPartitionSetCount); + assertEquals(4, onErrorAckCount); + assertEquals(4, onErrorAckWithTopicSetCount); + assertEquals(3, onErrorAckWithTopicPartitionSetCount); // if both record and topic/partition are null, interceptor should not receive metadata interceptors.onSendError(null, null, new KafkaException("Test")); - assertEquals(10, onErrorAckCount); - assertEquals(8, onErrorAckWithTopicSetCount); - assertEquals(6, onErrorAckWithTopicPartitionSetCount); + assertEquals(5, onErrorAckCount); + assertEquals(4, onErrorAckWithTopicSetCount); + assertEquals(3, onErrorAckWithTopicPartitionSetCount); interceptors.close(); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java index bec0eb2fcff30..64a1b41a14ec2 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/ProducerMetadataTest.java @@ -29,8 +29,10 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; import java.util.concurrent.atomic.AtomicReference; @@ -255,14 +257,14 @@ public void testMetadataPartialUpdate() { assertTrue(metadata.updateRequested()); assertEquals(0, metadata.timeToNextUpdate(now)); - assertEquals(metadata.topics(), Set.of(topic1, topic2, topic3)); - assertEquals(metadata.newTopics(), Set.of(topic2, topic3)); + assertEquals(metadata.topics(), new HashSet<>(Arrays.asList(topic1, topic2, topic3))); + assertEquals(metadata.newTopics(), new HashSet<>(Arrays.asList(topic2, topic3))); // Perform the partial update for a subset of the new topics. now += 1000; assertTrue(metadata.updateRequested()); metadata.updateWithCurrentRequestVersion(responseWithTopics(Collections.singleton(topic2)), true, now); - assertEquals(metadata.topics(), Set.of(topic1, topic2, topic3)); + assertEquals(metadata.topics(), new HashSet<>(Arrays.asList(topic1, topic2, topic3))); assertEquals(metadata.newTopics(), Collections.singleton(topic3)); } @@ -300,7 +302,7 @@ public void testRequestUpdateForTopic() { // Perform the full update. This should clear the update request. now += 1000; - metadata.updateWithCurrentRequestVersion(responseWithTopics(Set.of(topic1, topic2)), false, now); + metadata.updateWithCurrentRequestVersion(responseWithTopics(new HashSet<>(Arrays.asList(topic1, topic2))), false, now); assertFalse(metadata.updateRequested()); } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java index 750440d2595a5..ff50033b388a6 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/RecordAccumulatorTest.java @@ -58,15 +58,13 @@ import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.Comparator; import java.util.Deque; -import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalInt; -import java.util.PriorityQueue; import java.util.Random; import java.util.Set; import java.util.concurrent.ExecutionException; @@ -80,7 +78,6 @@ import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -170,7 +167,7 @@ public void testDrainBatches() throws Exception { accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); // drain batches from 2 nodes: node1 => tp1, node2 => tp3, because the max request size is full after the first batch drained - Map> batches1 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); + Map> batches1 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); verifyTopicPartitionInBatches(batches1, tp1, tp3); // add record for tp1, tp3 @@ -179,11 +176,11 @@ public void testDrainBatches() throws Exception { // drain batches from 2 nodes: node1 => tp2, node2 => tp4, because the max request size is full after the first batch drained // The drain index should start from next topic partition, that is, node1 => tp2, node2 => tp4 - Map> batches2 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); + Map> batches2 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); verifyTopicPartitionInBatches(batches2, tp2, tp4); // make sure in next run, the drain index will start from the beginning - Map> batches3 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); + Map> batches3 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); verifyTopicPartitionInBatches(batches3, tp1, tp3); // add record for tp2, tp3, tp4 and mute the tp4 @@ -192,7 +189,7 @@ public void testDrainBatches() throws Exception { accum.append(topic, partition4, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.mutePartition(tp4); // drain batches from 2 nodes: node1 => tp2, node2 => tp3 (because tp4 is muted) - Map> batches4 = accum.drain(metadataCache, Set.of(node1, node2), (int) batchSize, 0); + Map> batches4 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), (int) batchSize, 0); verifyTopicPartitionInBatches(batches4, tp2, tp3); // add record for tp1, tp2, tp3, and unmute tp4 @@ -201,12 +198,12 @@ public void testDrainBatches() throws Exception { accum.append(topic, partition3, 0L, key, value, Record.EMPTY_HEADERS, null, maxBlockTimeMs, time.milliseconds(), cluster); accum.unmutePartition(tp4); // set maxSize as a max value, so that the all partitions in 2 nodes should be drained: node1 => [tp1, tp2], node2 => [tp3, tp4] - Map> batches5 = accum.drain(metadataCache, Set.of(node1, node2), Integer.MAX_VALUE, 0); + Map> batches5 = accum.drain(metadataCache, new HashSet<>(Arrays.asList(node1, node2)), Integer.MAX_VALUE, 0); verifyTopicPartitionInBatches(batches5, tp1, tp2, tp3, tp4); } private void verifyTopicPartitionInBatches(Map> nodeBatches, TopicPartition... tp) { - int allTpBatchCount = (int) nodeBatches.values().stream().mapToLong(Collection::size).sum(); + int allTpBatchCount = (int) nodeBatches.values().stream().flatMap(Collection::stream).count(); assertEquals(tp.length, allTpBatchCount); List topicPartitionsInBatch = new ArrayList<>(); for (Map.Entry> entry : nodeBatches.entrySet()) { @@ -400,7 +397,7 @@ public void testStressfulSituation() throws Exception { List batches = accum.drain(metadataCache, nodes, 5 * 1024, 0).get(node1.id()); if (batches != null) { for (ProducerBatch batch : batches) { - for (@SuppressWarnings("UnusedLocalVariable") Record ignored : batch.records().records()) + for (Record record : batch.records().records()) read++; accum.deallocate(batch); } @@ -463,7 +460,7 @@ public void testRetryBackoff() throws Exception { final RecordAccumulator accum = new RecordAccumulator(logContext, batchSize, Compression.NONE, lingerMs, retryBackoffMs, retryBackoffMaxMs, - deliveryTimeoutMs, metrics, metricGrpName, time, null, + deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); long now = time.milliseconds(); @@ -528,7 +525,7 @@ public void testExponentialRetryBackoff() throws Exception { final RecordAccumulator accum = new RecordAccumulator(logContext, batchSize, Compression.NONE, lingerMs, retryBackoffMs, retryBackoffMaxMs, - deliveryTimeoutMs, metrics, metricGrpName, time, null, + deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); long now = time.milliseconds(); @@ -589,7 +586,7 @@ public void testExponentialRetryBackoffLeaderChange() throws Exception { final RecordAccumulator accum = new RecordAccumulator(logContext, batchSize, Compression.NONE, lingerMs, retryBackoffMs, retryBackoffMaxMs, - deliveryTimeoutMs, metrics, metricGrpName, time, null, + deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); long now = time.milliseconds(); @@ -892,7 +889,7 @@ public void testExpiredBatches() throws InterruptedException { readyNodes = accum.ready(metadataCache, time.milliseconds()).readyNodes; assertEquals(Collections.singleton(node1), readyNodes, "Our partition's leader should be ready"); Map> drained = accum.drain(metadataCache, readyNodes, Integer.MAX_VALUE, time.milliseconds()); - assertEquals(1, drained.get(node1.id()).size(), "There should be only one batch."); + assertEquals(drained.get(node1.id()).size(), 1, "There should be only one batch."); time.sleep(1000L); accum.reenqueue(drained.get(node1.id()).get(0), time.milliseconds()); @@ -1269,7 +1266,7 @@ public void testAdaptiveBuiltInPartitioner() throws Exception { long totalSize = 1024 * 1024; int batchSize = 128; RecordAccumulator accum = new RecordAccumulator(logContext, batchSize, Compression.NONE, 0, 0L, 0L, - 3200, config, metrics, "producer-metrics", time, null, + 3200, config, metrics, "producer-metrics", time, new ApiVersions(), null, new BufferPool(totalSize, batchSize, metrics, time, "producer-internal-metrics")) { @Override BuiltInPartitioner createBuiltInPartitioner(LogContext logContext, String topic, @@ -1402,7 +1399,7 @@ public void testReadyAndDrainWhenABatchIsBeingRetried() throws InterruptedExcept String metricGrpName = "producer-metrics"; final RecordAccumulator accum = new RecordAccumulator(logContext, batchSize, Compression.NONE, lingerMs, retryBackoffMs, retryBackoffMaxMs, - deliveryTimeoutMs, metrics, metricGrpName, time, null, + deliveryTimeoutMs, metrics, metricGrpName, time, new ApiVersions(), null, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); // Create 1 batch(batchA) to be produced to partition1. @@ -1433,7 +1430,7 @@ public void testReadyAndDrainWhenABatchIsBeingRetried() throws InterruptedExcept // Try to drain from node1, it should return no batches. Map> batches = accum.drain(metadataCache, - Set.of(node1), 999999 /* maxSize */, now); + new HashSet<>(Collections.singletonList(node1)), 999999 /* maxSize */, now); assertTrue(batches.containsKey(node1.id()) && batches.get(node1.id()).isEmpty(), "No batches ready to be drained on Node1"); } @@ -1514,7 +1511,7 @@ public void testDrainWithANodeThatDoesntHostAnyPartitions() { // Drain for node2, it should return 0 batches, Map> batches = accum.drain(metadataCache, - Set.of(node2), 999999 /* maxSize */, time.milliseconds()); + new HashSet<>(Collections.singletonList(node2)), 999999 /* maxSize */, time.milliseconds()); assertTrue(batches.get(node2.id()).isEmpty()); } @@ -1613,6 +1610,22 @@ private int expectedNumAppends(int batchSize) { } } + /** + * Return the offset delta when there is no key. + */ + private int expectedNumAppendsNoKey(int batchSize) { + int size = 0; + int offsetDelta = 0; + while (true) { + int recordSize = DefaultRecord.sizeInBytes(offsetDelta, 0, 0, value.length, + Record.EMPTY_HEADERS); + if (size + recordSize > batchSize) + return offsetDelta; + offsetDelta += 1; + size += recordSize; + } + } + private RecordAccumulator createTestRecordAccumulator(int batchSize, long totalSize, Compression compression, int lingerMs) { int deliveryTimeoutMs = 3200; return createTestRecordAccumulator(deliveryTimeoutMs, batchSize, totalSize, compression, lingerMs); @@ -1648,6 +1661,7 @@ private RecordAccumulator createTestRecordAccumulator( metrics, metricGrpName, time, + new ApiVersions(), txnManager, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)) { @Override @@ -1669,125 +1683,4 @@ int randomPartition() { return mockRandom == null ? super.randomPartition() : mockRandom.getAndIncrement(); } } - - /** - * This test verifies that RecordAccumulator's batch splitting functionality - * correctly handles oversized batches - * by splitting them down to individual records when necessary. It ensures that: - * 1. The splitting process can reduce batches to single-record size - * 2. The process does not enter infinite recursion loops - * 3. No records are lost or duplicated during splitting - * 4. The correct batch state is maintained throughout the process - */ - @Test - public void testSplitAndReenqueuePreventInfiniteRecursion() throws InterruptedException { - // Initialize test environment with a large batch size - long now = time.milliseconds(); - int batchSize = 1024 * 1024; // 1MB batch size - RecordAccumulator accum = createTestRecordAccumulator(batchSize, 10 * batchSize, Compression.gzip().build(), - 10); - - // Create a large producer batch manually (bypassing the accumulator's normal - // append process) - ByteBuffer buffer = ByteBuffer.allocate(batchSize); - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, TimestampType.CREATE_TIME, 0L); - ProducerBatch bigBatch = new ProducerBatch(tp1, builder, now, true); - - // Populate the batch with multiple records (100 records of 1KB each) - byte[] largeValue = new byte[1024]; // Each record is 1KB - for (int i = 0; i < 100; i++) { - ByteBuffer keyBytes = ByteBuffer.allocate(4); - keyBytes.putInt(i); // Use the loop counter as the key for verification later - FutureRecordMetadata result = bigBatch.tryAppend(time.milliseconds(), keyBytes.array(), largeValue, - Record.EMPTY_HEADERS, null, time.milliseconds()); - assertNotNull(result); - } - bigBatch.close(); - - time.sleep(101L); // Ensure the batch has time to become ready for processing - - // Add the batch to the accumulator for splitting - accum.reenqueue(bigBatch, time.milliseconds()); - - // Iteratively split batches until we find single-record batches - // This section tests the core batch splitting functionality - int splitOperations = 0; - int maxSplitOperations = 100; // Safety limit to prevent infinite recursion - boolean foundSingleRecordBatch = false; - - // Use a comparator that puts the batch with the most records first - Comparator reverseComparator = (batch1, batch2) -> Integer.compare(batch2.recordCount, - batch1.recordCount); - - while (splitOperations < maxSplitOperations && !foundSingleRecordBatch) { - // Get the current batches for this topic-partition - Deque tp1Deque = accum.getDeque(tp1); - if (tp1Deque.isEmpty()) { - break; - } - - // Find the batch with the most records - PriorityQueue tp1PriorityQue = new PriorityQueue<>(reverseComparator); - tp1PriorityQue.addAll(tp1Deque); - ProducerBatch batch = tp1PriorityQue.poll(); - if (batch == null) { - break; - } - - // If we've found a batch with only one record, we've reached our goal - if (batch.recordCount == 1) { - foundSingleRecordBatch = true; - break; - } - - // Remove the batch from the deque before splitting it - tp1Deque.remove(batch); - - // Split the batch and track the operation - int numSplitBatches = accum.splitAndReenqueue(batch); - splitOperations++; - - // If splitting produced no new batches (shouldn't happen with multi-record - // batches) - // mark the batch as complete - if (numSplitBatches == 0) { - assertEquals(1, batch.recordCount, "Unsplittable batch should have only 1 record"); - batch.complete(0L, 0L); - foundSingleRecordBatch = true; - } - } - - // Verification section: Check that the splitting process worked as expected - - // Verify that we found a single-record batch, proving that splitting can reach - // that level - assertTrue(foundSingleRecordBatch, "Should eventually produce batches with single records"); - - // Verify we didn't hit our safety limit, which would indicate potential - // infinite recursion - assertTrue(splitOperations < maxSplitOperations, - "Should not hit the safety limit, indicating no infinite recursion"); - - // Verify all remaining batches have at most one record - Deque finalDeque = accum.getDeque(tp1); - - Map keyFoundMap = new HashMap<>(); - // Check each batch and verify record integrity - for (ProducerBatch batch : finalDeque) { - assertTrue(batch.recordCount <= 1, "All remaining batches should have at most 1 record"); - - // Extract the record and its key - MemoryRecords batchRecords = batch.records(); - Iterator recordIterator = batchRecords.records().iterator(); - Record singleRecord = recordIterator.next(); - - // Track keys to ensure no duplicates (putIfAbsent returns null if the key - // wasn't present) - assertNull(keyFoundMap.putIfAbsent(singleRecord.key().getInt(), true), - "Each key should appear exactly once in the split batches"); - } - - // Verify all original records are accounted for (no data loss) - assertEquals(100, keyFoundMap.size(), "All original 100 records should be present after splitting"); - } } diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java index cd984ac2a343e..25b9333da5bcc 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/SenderTest.java @@ -33,9 +33,7 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.MetricNameTemplate; import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.ClusterAuthorizationException; import org.apache.kafka.common.errors.InvalidRequestException; @@ -96,8 +94,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.EnumSource; import org.mockito.InOrder; import java.nio.ByteBuffer; @@ -157,15 +153,9 @@ public class SenderTest { private static final int DELIVERY_TIMEOUT_MS = 1500; private static final long TOPIC_IDLE_MS = 60 * 1000; - private static final String TOPIC_NAME = "test"; - private static final Uuid TOPIC_ID = Uuid.fromString("MKXx1fIkQy2J9jXHhK8m1w"); - private static final Map TOPIC_IDS = Map.of( - TOPIC_NAME, TOPIC_ID, - "testSplitBatchAndSend", Uuid.fromString("2J9hK8m1wHMKjXfIkQyXx1") - ); - private final TopicPartition tp0 = new TopicPartition(TOPIC_NAME, 0); - private final TopicPartition tp1 = new TopicPartition(TOPIC_NAME, 1); - private final TopicPartition tp2 = new TopicPartition(TOPIC_NAME, 2); + private final TopicPartition tp0 = new TopicPartition("test", 0); + private final TopicPartition tp1 = new TopicPartition("test", 1); + private final TopicPartition tp2 = new TopicPartition("test", 2); private MockTime time = new MockTime(); private final int batchSize = 16 * 1024; private final ProducerMetadata metadata = new ProducerMetadata(0, 0, Long.MAX_VALUE, TOPIC_IDLE_MS, @@ -181,11 +171,6 @@ public class SenderTest { @BeforeEach public void setup() { setupWithTransactionState(null); - apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())); - this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, - Collections.singletonMap(TOPIC_NAME, 3), - TOPIC_IDS)); } @AfterEach @@ -196,18 +181,12 @@ public void tearDown() { private static Map partitionRecords(ProduceRequest request) { Map partitionRecords = new HashMap<>(); request.data().topicData().forEach(tpData -> tpData.partitionData().forEach(p -> { - String topicName = tpData.name(); - - if (request.version() >= 13 && tpData.topicId() != Uuid.ZERO_UUID) { - topicName = TOPIC_IDS.entrySet().stream().filter(e -> e.getValue() == tpData.topicId()).map(Map.Entry::getKey).findFirst().get(); - } - - TopicPartition tp = new TopicPartition(topicName, p.index()); + TopicPartition tp = new TopicPartition(tpData.name(), p.index()); partitionRecords.put(tp, (MemoryRecords) p.records()); })); return Collections.unmodifiableMap(partitionRecords); } - + @Test public void testSimple() throws Exception { long offset = 0; @@ -234,7 +213,7 @@ public void testSimple() throws Exception { public void testQuotaMetrics() { MockSelector selector = new MockSelector(time); Sensor throttleTimeSensor = Sender.throttleTimeSensor(this.senderMetricsRegistry); - Cluster cluster = TestUtils.singletonCluster(TOPIC_NAME, 1); + Cluster cluster = TestUtils.singletonCluster("test", 1); Node node = cluster.nodes().get(0); NetworkClient client = new NetworkClient(selector, metadata, "mock", Integer.MAX_VALUE, 1000, 1000, 64 * 1024, 64 * 1024, 1000, 10 * 1000, 127 * 1000, @@ -286,7 +265,7 @@ public void testSenderMetricsTemplates() throws Exception { metrics = new Metrics(new MetricConfig().tags(clientTags)); SenderMetricsRegistry metricsRegistry = new SenderMetricsRegistry(metrics); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - 1, metricsRegistry, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null); + 1, metricsRegistry, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null, apiVersions); // Append a message so that topic metrics are created appendToAccumulator(tp0, 0L, "key", "value"); @@ -314,7 +293,7 @@ public void testRetries() throws Exception { SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); try { Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null); + maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null, apiVersions); // do a successful retry Future future = appendToAccumulator(tp0, 0L, "key", "value"); sender.runOnce(); // connect @@ -372,13 +351,13 @@ public void testSendInOrder() throws Exception { try { Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, - senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null); + senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null, apiVersions); // Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1 - MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWithIds(2, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS); + MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap("test", 2)); client.prepareMetadataUpdate(metadataUpdate1); // Send the first message. - TopicPartition tp2 = new TopicPartition(TOPIC_NAME, 1); + TopicPartition tp2 = new TopicPartition("test", 1); appendToAccumulator(tp2, 0L, "key1", "value1"); sender.runOnce(); // connect sender.runOnce(); // send produce request @@ -395,7 +374,7 @@ public void testSendInOrder() throws Exception { appendToAccumulator(tp2, 0L, "key2", "value2"); // Update metadata before sender receives response from broker 0. Now partition 2 moves to broker 0 - MetadataResponse metadataUpdate2 = RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS); + MetadataResponse metadataUpdate2 = RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2)); client.prepareMetadataUpdate(metadataUpdate2); // Sender should not send the second message to node 0. assertEquals(1, sender.inFlightBatches(tp2).size()); @@ -470,12 +449,12 @@ public void onCompletion(RecordMetadata metadata, Exception exception) { @Test public void testMetadataTopicExpiry() throws Exception { long offset = 0; - client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); + client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); Future future = appendToAccumulator(tp0); sender.runOnce(); assertTrue(metadata.containsTopic(tp0.topic()), "Topic not added to metadata"); - client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); + client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); sender.runOnce(); // send produce request client.respond(produceResponse(tp0, offset, Errors.NONE, 0)); sender.runOnce(); @@ -487,12 +466,12 @@ public void testMetadataTopicExpiry() throws Exception { assertTrue(metadata.containsTopic(tp0.topic()), "Topic not retained in metadata list"); time.sleep(TOPIC_IDLE_MS); - client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); + client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); assertFalse(metadata.containsTopic(tp0.topic()), "Unused topic has not been expired"); future = appendToAccumulator(tp0); sender.runOnce(); assertTrue(metadata.containsTopic(tp0.topic()), "Topic not added to metadata"); - client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); + client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); sender.runOnce(); // send produce request client.respond(produceResponse(tp0, offset + 1, Errors.NONE, 0)); sender.runOnce(); @@ -511,7 +490,7 @@ public void senderThreadShouldNotGetStuckWhenThrottledAndAddingPartitionsToTxn() ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -524,7 +503,7 @@ public void senderThreadShouldNotGetStuckWhenThrottledAndAddingPartitionsToTxn() // Verify node is throttled a little bit. In real-life Apache Kafka, we observe that this can happen // as done above by throttling or with a disconnect / backoff. long currentPollDelay = client.pollDelayMs(nodeToThrottle, startTime); - assertEquals(throttleTimeMs, currentPollDelay); + assertEquals(currentPollDelay, throttleTimeMs); txnManager.beginTransaction(); txnManager.maybeAddPartition(tp0); @@ -551,14 +530,12 @@ public void testNodeLatencyStats() throws Exception { RecordAccumulator.PartitionerConfig config = new RecordAccumulator.PartitionerConfig(false, 42); long totalSize = 1024 * 1024; accumulator = new RecordAccumulator(logContext, batchSize, Compression.NONE, 0, 0L, 0L, - DELIVERY_TIMEOUT_MS, config, m, "producer-metrics", time, null, + DELIVERY_TIMEOUT_MS, config, m, "producer-metrics", time, apiVersions, null, new BufferPool(totalSize, batchSize, m, time, "producer-internal-metrics")); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); - apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())); - Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, 1, - senderMetrics, time, REQUEST_TIMEOUT, 1000L, null); + senderMetrics, time, REQUEST_TIMEOUT, 1000L, null, new ApiVersions()); // Produce and send batch. long time1 = time.milliseconds(); @@ -639,15 +616,15 @@ public void testInitProducerIdWithMaxInFlightOne() { // Initialize transaction manager. InitProducerId will be queued up until metadata response // is processed and FindCoordinator can be sent to `leastLoadedNode`. TransactionManager transactionManager = new TransactionManager(new LogContext(), "testInitProducerIdWithPendingMetadataRequest", - 60000, 100L, new ApiVersions(), false); + 60000, 100L, new ApiVersions()); setupWithTransactionState(transactionManager, false, null, false); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, (short) 0); - transactionManager.initializeTransactions(false); + transactionManager.initializeTransactions(); sender.runOnce(); // Process metadata response, prepare FindCoordinator and InitProducerId responses. // Verify producerId after the sender is run to process responses. - MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, Collections.emptyMap(), Collections.emptyMap()); + MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWith(1, Collections.emptyMap()); client.respond(metadataUpdate); prepareFindCoordinatorResponse(Errors.NONE, "testInitProducerIdWithPendingMetadataRequest"); prepareInitProducerResponse(Errors.NONE, producerIdAndEpoch.producerId, producerIdAndEpoch.epoch); @@ -672,7 +649,7 @@ public void testIdempotentInitProducerIdWithMaxInFlightOne() { // Process metadata and InitProducerId responses. // Verify producerId after the sender is run to process responses. - MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWithIds(1, Collections.emptyMap(), Collections.emptyMap()); + MetadataResponse metadataUpdate = RequestTestUtils.metadataUpdateWith(1, Collections.emptyMap()); client.respond(metadataUpdate); sender.runOnce(); sender.runOnce(); @@ -691,10 +668,10 @@ public void testNodeNotReady() { client = new MockClient(time, metadata); TransactionManager transactionManager = new TransactionManager(new LogContext(), "testNodeNotReady", - 60000, 100L, new ApiVersions(), false); + 60000, 100L, new ApiVersions()); setupWithTransactionState(transactionManager, false, null, true); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(producerId, (short) 0); - transactionManager.initializeTransactions(false); + transactionManager.initializeTransactions(); sender.runOnce(); Node node = metadata.fetch().nodes().get(0); @@ -1533,7 +1510,7 @@ public void testExpiryOfFirstBatchShouldCauseEpochBumpIfFutureBatchesFail() thro public void testUnresolvedSequencesAreNotFatal() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -1621,7 +1598,7 @@ public void testResetOfProducerStateShouldAllowQueuedBatchesToDrain() throws Exc SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, - senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); appendToAccumulator(tp0); // failed response Future successfulResponse = appendToAccumulator(tp1); @@ -1662,7 +1639,7 @@ public void testCloseWithProducerIdReset() throws Exception { SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, 10, - senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); appendToAccumulator(tp0); // failed response appendToAccumulator(tp1); // success response @@ -1695,9 +1672,9 @@ public void testForceCloseWithProducerIdReset() throws Exception { SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, 10, - senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); - appendToAccumulator(tp0); + Future failedResponse = appendToAccumulator(tp0); Future successfulResponse = appendToAccumulator(tp1); sender.runOnce(); // connect and send. @@ -1728,7 +1705,7 @@ public void testBatchesDrainedWithOldProducerIdShouldSucceedOnSubsequentRetry() SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, - senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); Future outOfOrderResponse = appendToAccumulator(tp0); Future successfulResponse = appendToAccumulator(tp1); @@ -1818,7 +1795,7 @@ public void testCorrectHandlingOfDuplicateSequenceError() throws Exception { @Test public void testTransactionalUnknownProducerHandlingWhenRetentionLimitReached() throws Exception { final long producerId = 343434L; - TransactionManager transactionManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions, false); + TransactionManager transactionManager = new TransactionManager(logContext, "testUnresolvedSeq", 60000, 100, apiVersions); setupWithTransactionState(transactionManager); doInitTransactions(transactionManager, new ProducerIdAndEpoch(producerId, (short) 0)); @@ -2266,7 +2243,7 @@ public void testSequenceNumberIncrement() throws InterruptedException { SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, - senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); Future responseFuture = appendToAccumulator(tp0); client.prepareResponse(body -> { @@ -2306,7 +2283,7 @@ public void testRetryWhenProducerIdChanges() throws InterruptedException { Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, - senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); Future responseFuture = appendToAccumulator(tp0); sender.runOnce(); // connect. @@ -2342,7 +2319,7 @@ public void testBumpEpochWhenOutOfOrderSequenceReceived() throws InterruptedExce SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, - senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); Future responseFuture = appendToAccumulator(tp0); sender.runOnce(); // connect. @@ -2362,44 +2339,38 @@ public void testBumpEpochWhenOutOfOrderSequenceReceived() throws InterruptedExce @Test public void testIdempotentSplitBatchAndSend() throws Exception { - TopicIdPartition tpId = new TopicIdPartition( - TOPIC_IDS.getOrDefault("testSplitBatchAndSend", Uuid.ZERO_UUID), - new TopicPartition("testSplitBatchAndSend", 1)); + TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1); TransactionManager txnManager = createTransactionManager(); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); setupWithTransactionState(txnManager); prepareAndReceiveInitProducerId(123456L, Errors.NONE); assertTrue(txnManager.hasProducerId()); - testSplitBatchAndSend(txnManager, producerIdAndEpoch, tpId); + testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp); } @Test public void testTransactionalSplitBatchAndSend() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); - TopicIdPartition tpId = new TopicIdPartition( - TOPIC_IDS.getOrDefault("testSplitBatchAndSend", Uuid.ZERO_UUID), - new TopicPartition("testSplitBatchAndSend", 1)); - - TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions, false); + TopicPartition tp = new TopicPartition("testSplitBatchAndSend", 1); + TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); txnManager.beginTransaction(); - txnManager.maybeAddPartition(tpId.topicPartition()); - apiVersions.update("0", NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())); - client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tpId.topicPartition(), Errors.NONE))); + txnManager.maybeAddPartition(tp); + client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp, Errors.NONE))); sender.runOnce(); - testSplitBatchAndSend(txnManager, producerIdAndEpoch, tpId); + testSplitBatchAndSend(txnManager, producerIdAndEpoch, tp); } @SuppressWarnings("deprecation") private void testSplitBatchAndSend(TransactionManager txnManager, ProducerIdAndEpoch producerIdAndEpoch, - TopicIdPartition tpId) throws Exception { + TopicPartition tp) throws Exception { int maxRetries = 1; - String topic = tpId.topic(); + String topic = tp.topic(); int deliveryTimeoutMs = 3000; long totalSize = 1024 * 1024; String metricGrpName = "producer-metrics"; @@ -2407,45 +2378,41 @@ private void testSplitBatchAndSend(TransactionManager txnManager, CompressionRatioEstimator.setEstimation(topic, CompressionType.GZIP, 0.2f); try (Metrics m = new Metrics()) { accumulator = new RecordAccumulator(logContext, batchSize, Compression.gzip().build(), - 0, 0L, 0L, deliveryTimeoutMs, m, metricGrpName, time, txnManager, + 0, 0L, 0L, deliveryTimeoutMs, m, metricGrpName, time, new ApiVersions(), txnManager, new BufferPool(totalSize, batchSize, metrics, time, "producer-internal-metrics")); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); Sender sender = new Sender(logContext, client, metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, maxRetries, - senderMetrics, time, REQUEST_TIMEOUT, 1000L, txnManager); + senderMetrics, time, REQUEST_TIMEOUT, 1000L, txnManager, new ApiVersions()); // Create a two broker cluster, with partition 0 on broker 0 and partition 1 on broker 1 - MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWithIds(2, Collections.singletonMap(topic, 2), TOPIC_IDS); + MetadataResponse metadataUpdate1 = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap(topic, 2)); client.prepareMetadataUpdate(metadataUpdate1); - metadataUpdate1.brokers().forEach(node -> - apiVersions.update(node.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())) - ); - // Send the first message. long nowMs = time.milliseconds(); Cluster cluster = TestUtils.singletonCluster(); Future f1 = - accumulator.append(tpId.topic(), tpId.partition(), 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; + accumulator.append(tp.topic(), tp.partition(), 0L, "key1".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; Future f2 = - accumulator.append(tpId.topic(), tpId.partition(), 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; + accumulator.append(tp.topic(), tp.partition(), 0L, "key2".getBytes(), new byte[batchSize / 2], null, null, MAX_BLOCK_TIMEOUT, nowMs, cluster).future; sender.runOnce(); // connect sender.runOnce(); // send produce request - assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence should be 2"); + assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence should be 2"); String id = client.requests().peek().destination(); assertEquals(ApiKeys.PRODUCE, client.requests().peek().requestBuilder().apiKey()); Node node = new Node(Integer.parseInt(id), "localhost", 0); assertEquals(1, client.inFlightRequestCount()); assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true"); - Map responseMap = new HashMap<>(); - responseMap.put(tpId, new ProduceResponse.PartitionResponse(Errors.MESSAGE_TOO_LARGE)); + Map responseMap = new HashMap<>(); + responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.MESSAGE_TOO_LARGE)); client.respond(new ProduceResponse(responseMap)); sender.runOnce(); // split and reenqueue - assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence should be 2"); + assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence should be 2"); // The compression ratio should have been improved once. assertEquals(CompressionType.GZIP.rate - CompressionRatioEstimator.COMPRESSION_RATIO_IMPROVING_STEP, CompressionRatioEstimator.estimation(topic, CompressionType.GZIP), 0.01); sender.runOnce(); // send the first produce request - assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence number should be 2"); + assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence number should be 2"); assertFalse(f1.isDone(), "The future shouldn't have been done."); assertFalse(f2.isDone(), "The future shouldn't have been done."); id = client.requests().peek().destination(); @@ -2454,14 +2421,14 @@ private void testSplitBatchAndSend(TransactionManager txnManager, assertEquals(1, client.inFlightRequestCount()); assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true"); - responseMap.put(tpId, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); - client.respond(produceRequestMatcher(tpId.topicPartition(), producerIdAndEpoch, 0, txnManager.isTransactional()), + responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); + client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 0, txnManager.isTransactional()), new ProduceResponse(responseMap)); sender.runOnce(); // receive assertTrue(f1.isDone(), "The future should have been done."); - assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence number should still be 2"); - assertEquals(OptionalInt.of(0), txnManager.lastAckedSequence(tpId.topicPartition()), "The last ack'd sequence number should be 0"); + assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence number should still be 2"); + assertEquals(OptionalInt.of(0), txnManager.lastAckedSequence(tp), "The last ack'd sequence number should be 0"); assertFalse(f2.isDone(), "The future shouldn't have been done."); assertEquals(0L, f1.get().offset(), "Offset of the first message should be 0"); sender.runOnce(); // send the second produce request @@ -2471,16 +2438,16 @@ private void testSplitBatchAndSend(TransactionManager txnManager, assertEquals(1, client.inFlightRequestCount()); assertTrue(client.isReady(node, time.milliseconds()), "Client ready status should be true"); - responseMap.put(tpId, new ProduceResponse.PartitionResponse(Errors.NONE, 1L, 0L, 0L)); - client.respond(produceRequestMatcher(tpId.topicPartition(), producerIdAndEpoch, 1, txnManager.isTransactional()), + responseMap.put(tp, new ProduceResponse.PartitionResponse(Errors.NONE, 1L, 0L, 0L)); + client.respond(produceRequestMatcher(tp, producerIdAndEpoch, 1, txnManager.isTransactional()), new ProduceResponse(responseMap)); sender.runOnce(); // receive assertTrue(f2.isDone(), "The future should have been done."); - assertEquals(2, txnManager.sequenceNumber(tpId.topicPartition()), "The next sequence number should be 2"); - assertEquals(OptionalInt.of(1), txnManager.lastAckedSequence(tpId.topicPartition()), "The last ack'd sequence number should be 1"); + assertEquals(2, txnManager.sequenceNumber(tp), "The next sequence number should be 2"); + assertEquals(OptionalInt.of(1), txnManager.lastAckedSequence(tp), "The last ack'd sequence number should be 1"); assertEquals(1L, f2.get().offset(), "Offset of the first message should be 1"); - assertTrue(accumulator.getDeque(tpId.topicPartition()).isEmpty(), "There should be no batch in the accumulator"); + assertTrue(accumulator.getDeque(tp).isEmpty(), "There should be no batch in the accumulator"); assertTrue((Double) (m.metrics().get(senderMetrics.batchSplitRate).metricValue()) > 0, "There should be a split"); } } @@ -2524,8 +2491,8 @@ public void testInflightBatchesExpireOnDeliveryTimeout() throws InterruptedExcep assertEquals(1, client.inFlightRequestCount()); assertEquals(1, sender.inFlightBatches(tp0).size(), "Expect one in-flight batch in accumulator"); - Map responseMap = new HashMap<>(); - responseMap.put(new TopicIdPartition(TOPIC_ID, tp0), new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); + Map responseMap = new HashMap<>(); + responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); client.respond(new ProduceResponse(responseMap)); time.sleep(deliveryTimeoutMs); @@ -2565,17 +2532,15 @@ public void testRecordErrorPropagatedToApplication() throws InterruptedException FutureRecordMetadata future = futureEntry.getValue(); assertTrue(future.isDone()); + KafkaException exception = TestUtils.assertFutureThrows(future, KafkaException.class); Integer index = futureEntry.getKey(); if (index == 0 || index == 2) { - InvalidRecordException exception = TestUtils.assertFutureThrows(InvalidRecordException.class, future); assertInstanceOf(InvalidRecordException.class, exception); assertEquals(index.toString(), exception.getMessage()); } else if (index == 3) { - InvalidRecordException exception = TestUtils.assertFutureThrows(InvalidRecordException.class, future); assertInstanceOf(InvalidRecordException.class, exception); assertEquals(Errors.INVALID_RECORD.message(), exception.getMessage()); } else { - KafkaException exception = TestUtils.assertFutureThrows(KafkaException.class, future); assertEquals(KafkaException.class, exception.getClass()); } } @@ -2704,8 +2669,8 @@ public void testExpiredBatchesInMultiplePartitions() throws Exception { assertEquals(1, client.inFlightRequestCount()); assertEquals(1, sender.inFlightBatches(tp0).size(), "Expect one in-flight batch in accumulator"); - Map responseMap = new HashMap<>(); - responseMap.put(new TopicIdPartition(TOPIC_ID, tp0), new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); + Map responseMap = new HashMap<>(); + responseMap.put(tp0, new ProduceResponse.PartitionResponse(Errors.NONE, 0L, 0L, 0L)); client.respond(new ProduceResponse(responseMap)); // Successfully expire both batches. @@ -2727,9 +2692,9 @@ public void testTransactionalRequestsSentOnShutdown() { Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); try { - TransactionManager txnManager = new TransactionManager(logContext, "testTransactionalRequestsSentOnShutdown", 6000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testTransactionalRequestsSentOnShutdown", 6000, 100, apiVersions); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager); + maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); TopicPartition tp = new TopicPartition("testTransactionalRequestsSentOnShutdown", 1); @@ -2760,11 +2725,11 @@ public void testRecordsFlushedImmediatelyOnTransactionCompletion() throws Except int lingerMs = 50; SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); - TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions); setupWithTransactionState(txnManager, lingerMs); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - 1, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager); + 1, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions); // Begin a transaction and successfully add one partition to it. ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); @@ -2817,11 +2782,11 @@ public void testAwaitPendingRecordsBeforeCommittingTransaction() throws Exceptio try (Metrics m = new Metrics()) { SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); - TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "txnId", 6000, 100, apiVersions); setupWithTransactionState(txnManager); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - 1, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager); + 1, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions); // Begin a transaction and successfully add one partition to it. ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); @@ -2888,9 +2853,9 @@ public void testIncompleteTransactionAbortOnShutdown() { Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); try { - TransactionManager txnManager = new TransactionManager(logContext, "testIncompleteTransactionAbortOnShutdown", 6000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testIncompleteTransactionAbortOnShutdown", 6000, 100, apiVersions); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager); + maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); TopicPartition tp = new TopicPartition("testIncompleteTransactionAbortOnShutdown", 1); @@ -2922,9 +2887,9 @@ public void testForceShutdownWithIncompleteTransaction() { Metrics m = new Metrics(); SenderMetricsRegistry senderMetrics = new SenderMetricsRegistry(m); try { - TransactionManager txnManager = new TransactionManager(logContext, "testForceShutdownWithIncompleteTransaction", 6000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testForceShutdownWithIncompleteTransaction", 6000, 100, apiVersions); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager); + maxRetries, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, txnManager, apiVersions); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); TopicPartition tp = new TopicPartition("testForceShutdownWithIncompleteTransaction", 1); @@ -2952,7 +2917,7 @@ public void testForceShutdownWithIncompleteTransaction() { @Test public void testTransactionAbortedExceptionOnAbortWithoutError() throws InterruptedException { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); - TransactionManager txnManager = new TransactionManager(logContext, "testTransactionAbortedExceptionOnAbortWithoutError", 60000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testTransactionAbortedExceptionOnAbortWithoutError", 60000, 100, apiVersions); setupWithTransactionState(txnManager, false, null); doInitTransactions(txnManager, producerIdAndEpoch); @@ -2971,14 +2936,14 @@ public void testTransactionAbortedExceptionOnAbortWithoutError() throws Interrup // drain all the unsent batches with a TransactionAbortedException. sender.runOnce(); // Now attempt to fetch the result for the record. - TestUtils.assertFutureThrows(TransactionAbortedException.class, metadata); + TestUtils.assertFutureThrows(metadata, TransactionAbortedException.class); } @Test public void testDoNotPollWhenNoRequestSent() { client = spy(new MockClient(time, metadata)); - TransactionManager txnManager = new TransactionManager(logContext, "testDoNotPollWhenNoRequestSent", 6000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testDoNotPollWhenNoRequestSent", 6000, 100, apiVersions); ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -2990,7 +2955,7 @@ public void testDoNotPollWhenNoRequestSent() { @Test public void testTooLargeBatchesAreSafelyRemoved() throws InterruptedException { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); - TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testSplitBatchAndSend", 60000, 100, apiVersions); setupWithTransactionState(txnManager, false, null); doInitTransactions(txnManager, producerIdAndEpoch); @@ -3033,62 +2998,8 @@ public void testCustomErrorMessage() throws Exception { verifyErrorMessage(produceResponse(tp0, 0L, Errors.INVALID_REQUEST, 0, -1, errorMessage), errorMessage); } - @ParameterizedTest - @EnumSource(value = Errors.class, names = {"COORDINATOR_LOAD_IN_PROGRESS", "INVALID_TXN_STATE"}) - public void testTransactionShouldTransitionToAbortableForSenderAPI(Errors error) throws InterruptedException { - ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); - TransactionManager txnManager = new TransactionManager( - logContext, - "testRetriableException", - 60000, - RETRY_BACKOFF_MS, - apiVersions, - false - ); - - // Setup with transaction state and initialize transactions with single retry - setupWithTransactionState(txnManager, false, null, 1); - doInitTransactions(txnManager, producerIdAndEpoch); - - // Begin transaction and add partition - txnManager.beginTransaction(); - txnManager.maybeAddPartition(tp0); - client.prepareResponse(buildAddPartitionsToTxnResponseData(0, Collections.singletonMap(tp0, Errors.NONE))); - sender.runOnce(); - - // First produce request - appendToAccumulator(tp0); - client.prepareResponse(produceResponse(tp0, -1, error, -1)); - sender.runOnce(); - - // Sleep for retry backoff - time.sleep(RETRY_BACKOFF_MS); - - // Second attempt to process record - PREPARE the response before sending - client.prepareResponse(produceResponse(tp0, -1, error, -1)); - sender.runOnce(); - - // Now transaction should be in abortable state after retry is exhausted - assertTrue(txnManager.hasAbortableError()); - - // Second produce request - should fail with TransactionAbortableException - Future future2 = appendToAccumulator(tp0); - client.prepareResponse(produceResponse(tp0, -1, Errors.NONE, -1)); - // Sender will try to send and fail with TransactionAbortableException instead of COORDINATOR_LOAD_IN_PROGRESS, because we're in abortable state - sender.runOnce(); - assertFutureFailure(future2, TransactionAbortableException.class); - - // Verify transaction API requests also fail with TransactionAbortableException - try { - txnManager.beginCommit(); - fail("Expected beginCommit() to fail with TransactionAbortableException when in abortable error state"); - } catch (KafkaException e) { - assertEquals(TransactionAbortableException.class, e.getCause().getClass()); - } - } - @Test - public void testSenderShouldRetryWithBackoffOnRetriableError() throws InterruptedException { + public void testSenderShouldRetryWithBackoffOnRetriableError() { final long producerId = 343434L; TransactionManager transactionManager = createTransactionManager(); setupWithTransactionState(transactionManager); @@ -3113,7 +3024,7 @@ public void testSenderShouldRetryWithBackoffOnRetriableError() throws Interrupte public void testReceiveFailedBatchTwiceWithTransactions() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "testFailTwice", 60000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testFailTwice", 60000, 100, apiVersions); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -3163,7 +3074,7 @@ public void testReceiveFailedBatchTwiceWithTransactions() throws Exception { public void testInvalidTxnStateIsAnAbortableError() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "testInvalidTxnState", 60000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "testInvalidTxnState", 60000, 100, apiVersions); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -3202,7 +3113,7 @@ public void testInvalidTxnStateIsAnAbortableError() throws Exception { public void testTransactionAbortableExceptionIsAnAbortableError() throws Exception { ProducerIdAndEpoch producerIdAndEpoch = new ProducerIdAndEpoch(123456L, (short) 0); apiVersions.update("0", NodeApiVersions.create(ApiKeys.INIT_PRODUCER_ID.id, (short) 0, (short) 3)); - TransactionManager txnManager = new TransactionManager(logContext, "textTransactionAbortableException", 60000, 100, apiVersions, false); + TransactionManager txnManager = new TransactionManager(logContext, "textTransactionAbortableException", 60000, 100, apiVersions); setupWithTransactionState(txnManager); doInitTransactions(txnManager, producerIdAndEpoch); @@ -3236,61 +3147,6 @@ public void testTransactionAbortableExceptionIsAnAbortableError() throws Excepti txnManager.beginTransaction(); } - - @Test - public void testAbortableErrorIsConvertedToFatalErrorDuringAbort() throws Exception { - - // Initialize and begin transaction - TransactionManager transactionManager = new TransactionManager(logContext, "testAbortableErrorIsConvertedToFatalErrorDuringAbort", 60000, 100, apiVersions, false); - setupWithTransactionState(transactionManager); - doInitTransactions(transactionManager, new ProducerIdAndEpoch(1L, (short) 0)); - transactionManager.beginTransaction(); - - // Add partition and send record - TopicPartition tp = new TopicPartition("test", 0); - addPartitionToTxn(sender, transactionManager, tp); - appendToAccumulator(tp); - - // Send record and get response - sender.runOnce(); - sendIdempotentProducerResponse(0, tp, Errors.NONE, 0); - sender.runOnce(); - - // Commit API with TRANSACTION_ABORTABLE error should set TM to Abortable state - client.prepareResponse(new EndTxnResponse(new EndTxnResponseData() - .setErrorCode(Errors.TRANSACTION_ABORTABLE.code()))); - - // Attempt to commit transaction - TransactionalRequestResult commitResult = transactionManager.beginCommit(); - sender.runOnce(); - try { - commitResult.await(1000, TimeUnit.MILLISECONDS); - fail("Expected abortable error to be thrown for commit"); - } catch (KafkaException e) { - assertTrue(transactionManager.hasAbortableError()); - assertEquals(TransactionAbortableException.class, commitResult.error().getClass()); - } - - // Abort API with TRANSACTION_ABORTABLE error should convert to Fatal error i.e. KafkaException - client.prepareResponse(new EndTxnResponse(new EndTxnResponseData() - .setErrorCode(Errors.TRANSACTION_ABORTABLE.code()))); - - // Attempt to abort transaction - TransactionalRequestResult abortResult = transactionManager.beginAbort(); - sender.runOnce(); - - // Verify the error is converted to KafkaException (not TransactionAbortableException) - try { - abortResult.await(1000, TimeUnit.MILLISECONDS); - fail("Expected KafkaException to be thrown"); - } catch (KafkaException e) { - // Verify TM is in FATAL_ERROR state - assertTrue(transactionManager.hasFatalError()); - assertFalse(e instanceof TransactionAbortableException); - assertEquals(KafkaException.class, abortResult.error().getClass()); - } - } - @Test public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exception { Metrics m = new Metrics(); @@ -3305,16 +3161,16 @@ public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exceptio // lingerMs is 0 to send batch as soon as any records are available on it. this.accumulator = new RecordAccumulator(logContext, batchSize, Compression.NONE, 0, 10L, retryBackoffMaxMs, - DELIVERY_TIMEOUT_MS, metrics, metricGrpName, time, null, pool); + DELIVERY_TIMEOUT_MS, metrics, metricGrpName, time, apiVersions, null, pool); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - 10, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null); + 10, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null, + apiVersions); // Update metadata with leader-epochs. int tp0LeaderEpoch = 100; int epoch = tp0LeaderEpoch; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), - new TopicIdPartition(TOPIC_ID, tp1)), + RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2), tp -> { if (tp0.equals(tp)) { return epoch; @@ -3341,8 +3197,7 @@ public void testProducerBatchRetriesWhenPartitionLeaderChanges() throws Exceptio // Update leader epoch for tp0 int newEpoch = ++tp0LeaderEpoch; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), - new TopicIdPartition(TOPIC_ID, tp1)), + RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2), tp -> { if (tp0.equals(tp)) { return newEpoch; @@ -3390,7 +3245,7 @@ public void testSenderShouldCloseWhenTransactionManagerInErrorState() { TransactionManager transactionManager = mock(TransactionManager.class); SenderMetricsRegistry metricsRegistry = new SenderMetricsRegistry(metrics); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - 1, metricsRegistry, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + 1, metricsRegistry, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); when(transactionManager.hasOngoingTransaction()).thenReturn(true); when(transactionManager.beginAbort()).thenThrow(new IllegalStateException()); sender.initiateClose(); @@ -3420,17 +3275,17 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorButNoNewLead // lingerMs is 0 to send batch as soon as any records are available on it. this.accumulator = new RecordAccumulator(logContext, batchSize, Compression.NONE, 0, 10L, retryBackoffMaxMs, - DELIVERY_TIMEOUT_MS, metrics, metricGrpName, time, null, pool); + DELIVERY_TIMEOUT_MS, metrics, metricGrpName, time, apiVersions, null, pool); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - 10, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null); + 10, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null, + apiVersions); // Update metadata with leader-epochs. int tp0LeaderEpoch = 100; int tp1LeaderEpoch = 200; int tp2LeaderEpoch = 300; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), - new TopicIdPartition(TOPIC_ID, tp1), new TopicIdPartition(TOPIC_ID, tp2)), + RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 3), tp -> { if (tp0.equals(tp)) { return tp0LeaderEpoch; @@ -3500,17 +3355,17 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorAndNewLeader // lingerMs is 0 to send batch as soon as any records are available on it. this.accumulator = new RecordAccumulator(logContext, batchSize, Compression.NONE, 0, 10L, retryBackoffMaxMs, - DELIVERY_TIMEOUT_MS, metrics, metricGrpName, time, null, pool); + DELIVERY_TIMEOUT_MS, metrics, metricGrpName, time, apiVersions, null, pool); Sender sender = new Sender(logContext, client, metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, - 10, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null); + 10, senderMetrics, time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, null, + apiVersions); // Update metadata with leader-epochs. int tp0LeaderEpoch = 100; int tp1LeaderEpoch = 200; int tp2LeaderEpoch = 300; this.client.updateMetadata( - RequestTestUtils.metadataUpdateWithIds(1, Set.of(new TopicIdPartition(TOPIC_ID, tp0), - new TopicIdPartition(TOPIC_ID, tp1), new TopicIdPartition(TOPIC_ID, tp2)), + RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 3), tp -> { if (tp0.equals(tp)) { return tp0LeaderEpoch; @@ -3523,9 +3378,6 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorAndNewLeader } })); Cluster startingMetadataCluster = metadata.fetch(); - startingMetadataCluster.nodes().forEach(node -> - apiVersions.update(node.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())) - ); // Produce to tp0/1/2, where NO_LEADER_OR_FOLLOWER with new leader info is returned for tp0/1, and tp2 is returned without errors. Future futureIsProducedTp0 = appendToAccumulator(tp0, 0L, "key", "value"); @@ -3546,9 +3398,7 @@ public void testWhenProduceResponseReturnsWithALeaderShipChangeErrorAndNewLeader responses.put(tp0, new OffsetAndError(-1, Errors.NOT_LEADER_OR_FOLLOWER)); responses.put(tp1, new OffsetAndError(-1, Errors.NOT_LEADER_OR_FOLLOWER)); responses.put(tp2, new OffsetAndError(100, Errors.NONE)); - newNodes.forEach(node -> - apiVersions.update(node.idString(), NodeApiVersions.create(ApiKeys.PRODUCE.id, ApiKeys.PRODUCE.oldestVersion(), ApiKeys.PRODUCE.latestVersion())) - ); + Map partitionLeaderInfo = new HashMap<>(); ProduceResponseData.LeaderIdAndEpoch tp0LeaderInfo = new ProduceResponseData.LeaderIdAndEpoch(); tp0LeaderInfo.setLeaderEpoch(tp0LeaderEpoch + 1); @@ -3712,7 +3562,7 @@ private FutureRecordMetadata appendToAccumulator(TopicPartition tp, long timesta private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs, long logStartOffset, String errorMessage) { ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, logStartOffset, Collections.emptyList(), errorMessage); - Map partResp = Collections.singletonMap(new TopicIdPartition(TOPIC_ID, tp), resp); + Map partResp = Collections.singletonMap(tp, resp); return new ProduceResponse(partResp, throttleTimeMs); } @@ -3728,11 +3578,9 @@ private ProduceResponse produceResponse(Map resp for (Map.Entry entry : responses.entrySet()) { TopicPartition topicPartition = entry.getKey(); - ProduceResponseData.TopicProduceResponse topicData = data.responses().find(topicPartition.topic(), TOPIC_ID); + ProduceResponseData.TopicProduceResponse topicData = data.responses().find(topicPartition.topic()); if (topicData == null) { - topicData = new ProduceResponseData.TopicProduceResponse() - .setTopicId(TOPIC_ID) - .setName(topicPartition.topic()); + topicData = new ProduceResponseData.TopicProduceResponse().setName(topicPartition.topic()); data.responses().add(topicData); } @@ -3770,7 +3618,7 @@ private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors e } private TransactionManager createTransactionManager() { - return new TransactionManager(new LogContext(), null, 0, RETRY_BACKOFF_MS, new ApiVersions(), false); + return new TransactionManager(new LogContext(), null, 0, RETRY_BACKOFF_MS, new ApiVersions()); } private void setupWithTransactionState(TransactionManager transactionManager) { @@ -3785,10 +3633,6 @@ private void setupWithTransactionState(TransactionManager transactionManager, bo setupWithTransactionState(transactionManager, guaranteeOrder, customPool, true, Integer.MAX_VALUE, 0); } - private void setupWithTransactionState(TransactionManager transactionManager, boolean guaranteeOrder, BufferPool customPool, int retries) { - setupWithTransactionState(transactionManager, guaranteeOrder, customPool, true, retries, 0); - } - private void setupWithTransactionState( TransactionManager transactionManager, boolean guaranteeOrder, @@ -3813,14 +3657,14 @@ private void setupWithTransactionState( BufferPool pool = (customPool == null) ? new BufferPool(totalSize, batchSize, metrics, time, metricGrpName) : customPool; this.accumulator = new RecordAccumulator(logContext, batchSize, Compression.NONE, lingerMs, 0L, 0L, - DELIVERY_TIMEOUT_MS, metrics, metricGrpName, time, transactionManager, pool); + DELIVERY_TIMEOUT_MS, metrics, metricGrpName, time, apiVersions, transactionManager, pool); this.senderMetricsRegistry = new SenderMetricsRegistry(this.metrics); this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, guaranteeOrder, MAX_REQUEST_SIZE, ACKS_ALL, - retries, this.senderMetricsRegistry, this.time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager); + retries, this.senderMetricsRegistry, this.time, REQUEST_TIMEOUT, RETRY_BACKOFF_MS, transactionManager, apiVersions); - metadata.add(TOPIC_NAME, time.milliseconds()); + metadata.add("test", time.milliseconds()); if (updateMetadata) - this.client.updateMetadata(RequestTestUtils.metadataUpdateWithIds(1, Collections.singletonMap(TOPIC_NAME, 2), TOPIC_IDS)); + this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("test", 2))); } private void assertSuccessfulSend() throws InterruptedException { @@ -3876,7 +3720,7 @@ private InitProducerIdResponse initProducerIdResponse(long producerId, short pro } private void doInitTransactions(TransactionManager transactionManager, ProducerIdAndEpoch producerIdAndEpoch) { - TransactionalRequestResult result = transactionManager.initializeTransactions(false); + TransactionalRequestResult result = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, transactionManager.transactionalId()); sender.runOnce(); sender.runOnce(); diff --git a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java index 7815b751d8004..a557478d197ab 100644 --- a/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java +++ b/clients/src/test/java/org/apache/kafka/clients/producer/internals/TransactionManagerTest.java @@ -26,9 +26,7 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Node; -import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.FencedInstanceIdException; import org.apache.kafka.common.errors.GroupAuthorizationException; @@ -38,7 +36,6 @@ import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.errors.TransactionAbortableException; -import org.apache.kafka.common.errors.TransactionAbortedException; import org.apache.kafka.common.errors.TransactionalIdAuthorizationException; import org.apache.kafka.common.errors.UnsupportedForMessageFormatException; import org.apache.kafka.common.errors.UnsupportedVersionException; @@ -133,13 +130,10 @@ public class TransactionManagerTest { private final int transactionTimeoutMs = 1121; private final String topic = "test"; - private static final Uuid TOPIC_ID = Uuid.fromString("y2J9jXHhfIkQ1wK8mMKXx1"); private final TopicPartition tp0 = new TopicPartition(topic, 0); private final TopicPartition tp1 = new TopicPartition(topic, 1); private final long producerId = 13131L; private final short epoch = 1; - private final long ongoingProducerId = 999L; - private final short bumpedOngoingEpoch = 11; private final String consumerGroupId = "myConsumerGroup"; private final String memberId = "member"; private final int generationId = 5; @@ -154,7 +148,7 @@ public class TransactionManagerTest { private RecordAccumulator accumulator = null; private Sender sender = null; - private TestableTransactionManager transactionManager = null; + private TransactionManager transactionManager = null; private Node brokerNode = null; private long finalizedFeaturesEpoch = 0; @@ -164,28 +158,17 @@ public void setup() { this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, singletonMap("test", 2))); this.brokerNode = new Node(0, "localhost", 2211); - initializeTransactionManager(Optional.of(transactionalId), false, false); - } - - private void initializeTransactionManager( - Optional transactionalId, - boolean transactionV2Enabled - ) { - initializeTransactionManager(transactionalId, transactionV2Enabled, false); + initializeTransactionManager(Optional.of(transactionalId), false); } - private void initializeTransactionManager( - Optional transactionalId, - boolean transactionV2Enabled, - boolean enable2pc - ) { + private void initializeTransactionManager(Optional transactionalId, boolean transactionV2Enabled) { Metrics metrics = new Metrics(time); apiVersions.update("0", new NodeApiVersions(Arrays.asList( new ApiVersion() .setApiKey(ApiKeys.INIT_PRODUCER_ID.id) .setMinVersion((short) 0) - .setMaxVersion((short) 6), + .setMaxVersion((short) 3), new ApiVersion() .setApiKey(ApiKeys.PRODUCE.id) .setMinVersion((short) 0) @@ -204,9 +187,8 @@ private void initializeTransactionManager( .setMinVersionLevel(transactionV2Enabled ? (short) 2 : (short) 1)), finalizedFeaturesEpoch)); finalizedFeaturesEpoch += 1; - this.transactionManager = new TestableTransactionManager(logContext, transactionalId.orElse(null), - transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions, enable2pc); - + this.transactionManager = new TransactionManager(logContext, transactionalId.orElse(null), + transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions); int batchSize = 16 * 1024; int deliveryTimeoutMs = 3000; @@ -215,12 +197,12 @@ private void initializeTransactionManager( this.brokerNode = new Node(0, "localhost", 2211); this.accumulator = new RecordAccumulator(logContext, batchSize, Compression.NONE, 0, 0L, 0L, - deliveryTimeoutMs, metrics, metricGrpName, time, transactionManager, + deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), this.time, REQUEST_TIMEOUT, - 50, transactionManager); + 50, transactionManager, apiVersions); } @Test @@ -623,9 +605,9 @@ public void testIsSendToPartitionAllowedWithPartitionNotAdded() { @ValueSource(booleans = {true, false}) public void testDefaultSequenceNumber(boolean transactionV2Enabled) { initializeTransactionManager(Optional.empty(), transactionV2Enabled); - assertEquals(0, transactionManager.sequenceNumber(tp0)); + assertEquals(transactionManager.sequenceNumber(tp0), 0); transactionManager.incrementSequenceNumber(tp0, 3); - assertEquals(3, transactionManager.sequenceNumber(tp0)); + assertEquals(transactionManager.sequenceNumber(tp0), 3); } @ParameterizedTest @@ -754,12 +736,12 @@ public void testDuplicateSequenceAfterProducerReset(boolean transactionV2Enabled final int deliveryTimeout = 15000; RecordAccumulator accumulator = new RecordAccumulator(logContext, 16 * 1024, Compression.NONE, 0, 0L, 0L, - deliveryTimeout, metrics, "", time, transactionManager, + deliveryTimeout, metrics, "", time, apiVersions, transactionManager, new BufferPool(1024 * 1024, 16 * 1024, metrics, time, "")); Sender sender = new Sender(logContext, this.client, this.metadata, accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), this.time, requestTimeout, - 0, transactionManager); + 0, transactionManager, apiVersions); assertEquals(0, transactionManager.sequenceNumber(tp0)); @@ -786,7 +768,7 @@ MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), thi // is reached even though the delivery timeout has expired and the // future has completed exceptionally. assertTrue(responseFuture1.isDone()); - TestUtils.assertFutureThrows(TimeoutException.class, responseFuture1); + TestUtils.assertFutureThrows(responseFuture1, TimeoutException.class); assertFalse(transactionManager.hasInFlightRequest()); assertEquals(1, client.inFlightRequestCount()); @@ -849,13 +831,13 @@ private ProducerBatch batchWithValue(TopicPartition tp, String value) { @ValueSource(booleans = {true, false}) public void testSequenceNumberOverflow(boolean transactionV2Enabled) { initializeTransactionManager(Optional.empty(), transactionV2Enabled); - assertEquals(0, transactionManager.sequenceNumber(tp0)); + assertEquals(transactionManager.sequenceNumber(tp0), 0); transactionManager.incrementSequenceNumber(tp0, Integer.MAX_VALUE); - assertEquals(Integer.MAX_VALUE, transactionManager.sequenceNumber(tp0)); + assertEquals(transactionManager.sequenceNumber(tp0), Integer.MAX_VALUE); transactionManager.incrementSequenceNumber(tp0, 100); - assertEquals(99, transactionManager.sequenceNumber(tp0)); + assertEquals(transactionManager.sequenceNumber(tp0), 99); transactionManager.incrementSequenceNumber(tp0, Integer.MAX_VALUE); - assertEquals(98, transactionManager.sequenceNumber(tp0)); + assertEquals(transactionManager.sequenceNumber(tp0), 98); } @ParameterizedTest @@ -863,17 +845,17 @@ public void testSequenceNumberOverflow(boolean transactionV2Enabled) { public void testProducerIdReset(boolean transactionV2Enabled) { initializeTransactionManager(Optional.empty(), transactionV2Enabled); initializeIdempotentProducerId(15L, Short.MAX_VALUE); - assertEquals(0, transactionManager.sequenceNumber(tp0)); - assertEquals(0, transactionManager.sequenceNumber(tp1)); + assertEquals(transactionManager.sequenceNumber(tp0), 0); + assertEquals(transactionManager.sequenceNumber(tp1), 0); transactionManager.incrementSequenceNumber(tp0, 3); - assertEquals(3, transactionManager.sequenceNumber(tp0)); + assertEquals(transactionManager.sequenceNumber(tp0), 3); transactionManager.incrementSequenceNumber(tp1, 3); - assertEquals(3, transactionManager.sequenceNumber(tp1)); + assertEquals(transactionManager.sequenceNumber(tp1), 3); transactionManager.requestIdempotentEpochBumpForPartition(tp0); transactionManager.bumpIdempotentEpochAndResetIdIfNeeded(); - assertEquals(0, transactionManager.sequenceNumber(tp0)); - assertEquals(3, transactionManager.sequenceNumber(tp1)); + assertEquals(transactionManager.sequenceNumber(tp0), 0); + assertEquals(transactionManager.sequenceNumber(tp1), 3); } @Test @@ -1055,8 +1037,8 @@ public void testTransactionManagerDisablesV2() { .setMaxVersionLevel((short) 1) .setMinVersionLevel((short) 1)), 0)); - this.transactionManager = new TestableTransactionManager(logContext, transactionalId, - transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions, false); + this.transactionManager = new TransactionManager(logContext, transactionalId, + transactionTimeoutMs, DEFAULT_RETRY_BACKOFF_MS, apiVersions); int batchSize = 16 * 1024; int deliveryTimeoutMs = 3000; @@ -1065,12 +1047,12 @@ public void testTransactionManagerDisablesV2() { this.brokerNode = new Node(0, "localhost", 2211); this.accumulator = new RecordAccumulator(logContext, batchSize, Compression.NONE, 0, 0L, 0L, - deliveryTimeoutMs, metrics, metricGrpName, time, transactionManager, + deliveryTimeoutMs, metrics, metricGrpName, time, apiVersions, transactionManager, new BufferPool(totalSize, batchSize, metrics, time, metricGrpName)); this.sender = new Sender(logContext, this.client, this.metadata, this.accumulator, true, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), this.time, REQUEST_TIMEOUT, - 50, transactionManager); + 50, transactionManager, apiVersions); doInitTransactions(); assertFalse(transactionManager.isTransactionV2Enabled()); @@ -1080,7 +1062,7 @@ MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(metrics), thi public void testDisconnectAndRetry() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. - transactionManager.initializeTransactions(false); + transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, true, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) == null); @@ -1093,15 +1075,15 @@ public void testDisconnectAndRetry() { public void testInitializeTransactionsTwiceRaisesError() { doInitTransactions(producerId, epoch); assertTrue(transactionManager.hasProducerId()); - assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); + assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions()); } @Test public void testUnsupportedFindCoordinator() { - transactionManager.initializeTransactions(false); + transactionManager.initializeTransactions(); client.prepareUnsupportedVersionResponse(body -> { FindCoordinatorRequest findCoordinatorRequest = (FindCoordinatorRequest) body; - assertEquals(CoordinatorType.TRANSACTION, CoordinatorType.forId(findCoordinatorRequest.data().keyType())); + assertEquals(CoordinatorType.forId(findCoordinatorRequest.data().keyType()), CoordinatorType.TRANSACTION); assertTrue(findCoordinatorRequest.data().key().isEmpty()); assertEquals(1, findCoordinatorRequest.data().coordinatorKeys().size()); assertTrue(findCoordinatorRequest.data().coordinatorKeys().contains(transactionalId)); @@ -1115,7 +1097,7 @@ public void testUnsupportedFindCoordinator() { @Test public void testUnsupportedInitTransactions() { - transactionManager.initializeTransactions(false); + transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertFalse(transactionManager.hasError()); @@ -1260,7 +1242,7 @@ public void testIllegalGenerationInTxnOffsetCommitByGroupMetadata() { public void testLookupCoordinatorOnDisconnectAfterSend() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1292,7 +1274,7 @@ public void testLookupCoordinatorOnDisconnectAfterSend() { public void testLookupCoordinatorOnDisconnectBeforeSend() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1323,7 +1305,7 @@ public void testLookupCoordinatorOnDisconnectBeforeSend() { public void testLookupCoordinatorOnNotCoordinatorError() { // This is called from the initTransactions method in the producer as the first order of business. // It finds the coordinator and then gets a PID. - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1348,7 +1330,7 @@ public void testLookupCoordinatorOnNotCoordinatorError() { @Test public void testTransactionalIdAuthorizationFailureInFindCoordinator() { - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED, false, CoordinatorType.TRANSACTION, transactionalId); @@ -1363,7 +1345,7 @@ public void testTransactionalIdAuthorizationFailureInFindCoordinator() { @Test public void testTransactionalIdAuthorizationFailureInInitProducerId() { - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1544,8 +1526,8 @@ public void testTopicAuthorizationFailureInAddPartitions() throws InterruptedExc assertAbortableError(TopicAuthorizationException.class); sender.runOnce(); - TestUtils.assertFutureThrows(TransactionAbortedException.class, firstPartitionAppend); - TestUtils.assertFutureThrows(TransactionAbortedException.class, secondPartitionAppend); + TestUtils.assertFutureThrows(firstPartitionAppend, KafkaException.class); + TestUtils.assertFutureThrows(secondPartitionAppend, KafkaException.class); } @Test @@ -1592,8 +1574,8 @@ public void testCommitWithTopicAuthorizationFailureInAddPartitionsInFlight() thr // the pending transaction commit. sender.runOnce(); assertTrue(commitResult.isCompleted()); - TestUtils.assertFutureThrows(TopicAuthorizationException.class, firstPartitionAppend); - TestUtils.assertFutureThrows(TopicAuthorizationException.class, secondPartitionAppend); + TestUtils.assertFutureThrows(firstPartitionAppend, KafkaException.class); + TestUtils.assertFutureThrows(secondPartitionAppend, KafkaException.class); assertInstanceOf(TopicAuthorizationException.class, commitResult.error()); } @@ -1663,7 +1645,7 @@ public void testRetryAbortTransactionAfterTimeout() throws Exception { assertFalse(result.isAcked()); assertFalse(transactionManager.hasOngoingTransaction()); - assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); + assertThrows(IllegalStateException.class, transactionManager::initializeTransactions); assertThrows(IllegalStateException.class, transactionManager::beginTransaction); assertThrows(IllegalStateException.class, transactionManager::beginCommit); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); @@ -1697,7 +1679,7 @@ public void testRetryCommitTransactionAfterTimeout() throws Exception { assertFalse(result.isAcked()); assertFalse(transactionManager.hasOngoingTransaction()); - assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); + assertThrows(IllegalStateException.class, transactionManager::initializeTransactions); assertThrows(IllegalStateException.class, transactionManager::beginTransaction); assertThrows(IllegalStateException.class, transactionManager::beginAbort); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); @@ -1711,7 +1693,7 @@ public void testRetryCommitTransactionAfterTimeout() throws Exception { @Test public void testRetryInitTransactionsAfterTimeout() { - TransactionalRequestResult result = transactionManager.initializeTransactions(false); + TransactionalRequestResult result = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -1732,10 +1714,10 @@ public void testRetryInitTransactionsAfterTimeout() { assertThrows(IllegalStateException.class, transactionManager::beginCommit); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); - assertSame(result, transactionManager.initializeTransactions(false)); + assertSame(result, transactionManager.initializeTransactions()); result.await(); assertTrue(result.isAcked()); - assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); + assertThrows(IllegalStateException.class, transactionManager::initializeTransactions); transactionManager.beginTransaction(); assertTrue(transactionManager.hasOngoingTransaction()); @@ -1977,7 +1959,7 @@ public void testMultipleAddPartitionsPerForOneProduce() throws InterruptedExcept }) public void testRetriableErrors(Errors error) { // Ensure FindCoordinator retries. - TransactionalRequestResult result = transactionManager.initializeTransactions(false); + TransactionalRequestResult result = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(error, false, CoordinatorType.TRANSACTION, transactionalId); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); @@ -2011,7 +1993,7 @@ public void testRetriableErrors(Errors error) { @Test public void testCoordinatorNotAvailable() { // Ensure FindCoordinator with COORDINATOR_NOT_AVAILABLE error retries. - TransactionalRequestResult result = transactionManager.initializeTransactions(false); + TransactionalRequestResult result = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.COORDINATOR_NOT_AVAILABLE, false, CoordinatorType.TRANSACTION, transactionalId); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); @@ -2034,7 +2016,7 @@ public void testInvalidProducerEpochConvertToProducerFencedInInitProducerId() { } private void verifyProducerFencedForInitProducerId(Errors error) { - TransactionalRequestResult result = transactionManager.initializeTransactions(false); + TransactionalRequestResult result = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -2201,7 +2183,7 @@ public void testDisallowCommitOnProduceFailure() throws InterruptedException { runUntil(commitResult::isCompleted); // commit should be cancelled with exception without being sent. assertThrows(KafkaException.class, commitResult::await); - TestUtils.assertFutureThrows(OutOfOrderSequenceException.class, responseFuture); + TestUtils.assertFutureThrows(responseFuture, OutOfOrderSequenceException.class); // Commit is not allowed, so let's abort and try again. TransactionalRequestResult abortResult = transactionManager.beginAbort(); @@ -2390,7 +2372,7 @@ public void testCancelUnsentAddPartitionsAndProduceOnAbort() throws InterruptedE assertTrue(abortResult.isSuccessful()); assertTrue(transactionManager.isReady()); // make sure we are ready for a transaction now. - TestUtils.assertFutureThrows(TransactionAbortedException.class, responseFuture); + TestUtils.assertFutureThrows(responseFuture, KafkaException.class); } @Test @@ -2416,7 +2398,7 @@ public void testAbortResendsAddPartitionErrorIfRetried() throws InterruptedExcep assertTrue(abortResult.isSuccessful()); assertTrue(transactionManager.isReady()); // make sure we are ready for a transaction now. - TestUtils.assertFutureThrows(TransactionAbortedException.class, responseFuture); + TestUtils.assertFutureThrows(responseFuture, KafkaException.class); } @Test @@ -2957,7 +2939,7 @@ public void testDropCommitOnBatchExpiry() throws InterruptedException { "Expected to get a TimeoutException since the queued ProducerBatch should have been expired"); runUntil(commitResult::isCompleted); // the commit shouldn't be completed without being sent since the produce request failed. assertFalse(commitResult.isSuccessful()); // the commit shouldn't succeed since the produce request failed. - assertInstanceOf(TimeoutException.class, assertThrows(TransactionAbortableException.class, commitResult::await).getCause()); + assertThrows(TimeoutException.class, commitResult::await); assertTrue(transactionManager.hasAbortableError()); assertTrue(transactionManager.hasOngoingTransaction()); @@ -3155,7 +3137,7 @@ public void testEpochBumpAfterLastInFlightBatchFailsIdempotentProducer(boolean t @ParameterizedTest @ValueSource(booleans = {true, false}) - public void testMaybeResolveSequencesTransactionalProducer(boolean transactionV2Enabled) { + public void testMaybeResolveSequencesTransactionalProducer(boolean transactionV2Enabled) throws Exception { initializeTransactionManager(Optional.of(transactionalId), transactionV2Enabled); // Initialize transaction with initial producer ID and epoch. @@ -3602,7 +3584,7 @@ public void testHealthyPartitionRetriesDuringEpochBump(boolean transactionV2Enab initializeTransactionManager(Optional.empty(), transactionV2Enabled); Sender sender = new Sender(logContext, this.client, this.metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(new Metrics(time)), this.time, - REQUEST_TIMEOUT, 50, transactionManager); + REQUEST_TIMEOUT, 50, transactionManager, apiVersions); initializeIdempotentProducerId(producerId, epoch); ProducerBatch tp0b1 = writeIdempotentBatchWithValue(transactionManager, tp0, "1"); @@ -3727,7 +3709,7 @@ public void testFailedInflightBatchAfterEpochBump(boolean transactionV2Enabled) initializeTransactionManager(Optional.empty(), transactionV2Enabled); Sender sender = new Sender(logContext, this.client, this.metadata, this.accumulator, false, MAX_REQUEST_SIZE, ACKS_ALL, MAX_RETRIES, new SenderMetricsRegistry(new Metrics(time)), this.time, - REQUEST_TIMEOUT, 50, transactionManager); + REQUEST_TIMEOUT, 50, transactionManager, apiVersions); initializeIdempotentProducerId(producerId, epoch); ProducerBatch tp0b1 = writeIdempotentBatchWithValue(transactionManager, tp0, "1"); @@ -3819,7 +3801,7 @@ public void testBackgroundInvalidStateTransitionIsFatal() { doInitTransactions(); assertTrue(transactionManager.isTransactional()); - transactionManager.setShouldPoisonStateOnInvalidTransitionOverride(true); + transactionManager.setPoisonStateOnInvalidTransition(true); // Intentionally perform an operation that will cause an invalid state transition. The detection of this // will result in a poisoning of the transaction manager for all subsequent transactional operations since @@ -3832,7 +3814,7 @@ public void testBackgroundInvalidStateTransitionIsFatal() { assertThrows(IllegalStateException.class, () -> transactionManager.beginAbort()); assertThrows(IllegalStateException.class, () -> transactionManager.beginCommit()); assertThrows(IllegalStateException.class, () -> transactionManager.maybeAddPartition(tp0)); - assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions(false)); + assertThrows(IllegalStateException.class, () -> transactionManager.initializeTransactions()); assertThrows(IllegalStateException.class, () -> transactionManager.sendOffsetsToTransaction(Collections.emptyMap(), new ConsumerGroupMetadata("fake-group-id"))); } @@ -3869,7 +3851,7 @@ public void testForegroundInvalidStateTransitionIsRecoverable() { @Test public void testTransactionAbortableExceptionInInitProducerId() { - TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(false); + TransactionalRequestResult initPidResult = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -4024,137 +4006,6 @@ private void verifyCommitOrAbortTransactionRetriable(TransactionResult firstTran assertFalse(transactionManager.hasOngoingTransaction()); } - @Test - public void testInitializeTransactionsWithKeepPreparedTxn() { - doInitTransactionsWith2PCEnabled(true); - runUntil(transactionManager::hasProducerId); - - // Expect a bumped epoch in the response. - assertTrue(transactionManager.hasProducerId()); - assertFalse(transactionManager.hasOngoingTransaction()); - assertEquals(ongoingProducerId, transactionManager.producerIdAndEpoch().producerId); - assertEquals(bumpedOngoingEpoch, transactionManager.producerIdAndEpoch().epoch); - } - - @Test - public void testPrepareTransaction() { - doInitTransactionsWith2PCEnabled(false); - runUntil(transactionManager::hasProducerId); - - // Begin a transaction - transactionManager.beginTransaction(); - assertTrue(transactionManager.hasOngoingTransaction()); - - // Add a partition to the transaction - transactionManager.maybeAddPartition(tp0); - - // Capture the current producer ID and epoch before preparing the response - long producerId = transactionManager.producerIdAndEpoch().producerId; - short epoch = transactionManager.producerIdAndEpoch().epoch; - - // Simulate a produce request - try { - // Prepare the response before sending to ensure it's ready - prepareProduceResponse(Errors.NONE, producerId, epoch); - - appendToAccumulator(tp0); - // Wait until the request is processed - runUntil(() -> !client.hasPendingResponses()); - } catch (InterruptedException e) { - fail("Unexpected interruption: " + e); - } - - transactionManager.prepareTransaction(); - assertTrue(transactionManager.isPrepared()); - - ProducerIdAndEpoch preparedState = transactionManager.preparedTransactionState(); - // Validate the state contains the correct producer ID and epoch - assertEquals(producerId, preparedState.producerId); - assertEquals(epoch, preparedState.epoch); - } - - @Test - public void testInitPidResponseWithKeepPreparedTrueAndOngoingTransaction() { - // Initialize transaction manager with 2PC enabled - initializeTransactionManager(Optional.of(transactionalId), true, true); - - // Start initializeTransactions with keepPreparedTxn=true - TransactionalRequestResult result = transactionManager.initializeTransactions(true); - - // Prepare coordinator response - prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); - runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); - - // Simulate InitProducerId response with ongoing transaction - long ongoingPid = 12345L; - short ongoingEpoch = 5; - prepareInitPidResponse( - Errors.NONE, - false, - producerId, - epoch, - true, - true, - ongoingPid, - ongoingEpoch - ); - - runUntil(transactionManager::hasProducerId); - transactionManager.maybeUpdateTransactionV2Enabled(true); - - result.await(); - assertTrue(result.isSuccessful()); - - // Verify transaction manager transitioned to PREPARED_TRANSACTION state - assertTrue(transactionManager.isPrepared()); - - // Verify preparedTxnState was set with ongoing producer ID and epoch - ProducerIdAndEpoch preparedState = transactionManager.preparedTransactionState(); - assertNotNull(preparedState); - assertEquals(ongoingPid, preparedState.producerId); - assertEquals(ongoingEpoch, preparedState.epoch); - } - - @Test - public void testInitPidResponseWithKeepPreparedTrueAndNoOngoingTransaction() { - // Initialize transaction manager without 2PC enabled - // keepPrepared can be true even when enable2Pc is false, and we expect the same behavior - initializeTransactionManager(Optional.of(transactionalId), true, false); - - // Start initializeTransactions with keepPreparedTxn=true - TransactionalRequestResult result = transactionManager.initializeTransactions(true); - - // Prepare coordinator response - prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); - runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); - - // Simulate InitProducerId response without ongoing transaction - prepareInitPidResponse( - Errors.NONE, - false, - producerId, - epoch, - true, - false, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH - ); - - runUntil(transactionManager::hasProducerId); - transactionManager.maybeUpdateTransactionV2Enabled(true); - - result.await(); - assertTrue(result.isSuccessful()); - - // Verify transaction manager transitioned to READY state (not PREPARED_TRANSACTION) - assertFalse(transactionManager.isPrepared()); - assertTrue(transactionManager.isReady()); - - // Verify preparedTxnState was not set or is empty - ProducerIdAndEpoch preparedState = transactionManager.preparedTransactionState(); - assertEquals(ProducerIdAndEpoch.NONE, preparedState); - } - private void prepareAddPartitionsToTxn(final Map errors) { AddPartitionsToTxnResult result = AddPartitionsToTxnResponse.resultForTransaction(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID, errors); AddPartitionsToTxnResponseData data = new AddPartitionsToTxnResponseData().setResultsByTopicV3AndBelow(result.topicResults()).setThrottleTimeMs(0); @@ -4183,39 +4034,16 @@ private void prepareFindCoordinatorResponse(Errors error, boolean shouldDisconne }, FindCoordinatorResponse.prepareResponse(error, coordinatorKey, brokerNode), shouldDisconnect); } - private void prepareInitPidResponse( - Errors error, - boolean shouldDisconnect, - long producerId, - short producerEpoch - ) { - prepareInitPidResponse(error, shouldDisconnect, producerId, producerEpoch, false, false, -1, (short) -1); - } - - private void prepareInitPidResponse( - Errors error, - boolean shouldDisconnect, - long producerId, - short producerEpoch, - boolean keepPreparedTxn, - boolean enable2Pc, - long ongoingProducerId, - short ongoingProducerEpoch - ) { + private void prepareInitPidResponse(Errors error, boolean shouldDisconnect, long producerId, short producerEpoch) { InitProducerIdResponseData responseData = new InitProducerIdResponseData() - .setErrorCode(error.code()) - .setProducerEpoch(producerEpoch) - .setProducerId(producerId) - .setThrottleTimeMs(0) - .setOngoingTxnProducerId(ongoingProducerId) - .setOngoingTxnProducerEpoch(ongoingProducerEpoch); - + .setErrorCode(error.code()) + .setProducerEpoch(producerEpoch) + .setProducerId(producerId) + .setThrottleTimeMs(0); client.prepareResponse(body -> { InitProducerIdRequest initProducerIdRequest = (InitProducerIdRequest) body; assertEquals(transactionalId, initProducerIdRequest.data().transactionalId()); assertEquals(transactionTimeoutMs, initProducerIdRequest.data().transactionTimeoutMs()); - assertEquals(keepPreparedTxn, initProducerIdRequest.data().keepPreparedTxn()); - assertEquals(enable2Pc, initProducerIdRequest.data().enable2Pc()); return true; }, new InitProducerIdResponse(responseData), shouldDisconnect); } @@ -4456,7 +4284,7 @@ private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors e @SuppressWarnings("deprecation") private ProduceResponse produceResponse(TopicPartition tp, long offset, Errors error, int throttleTimeMs, int logStartOffset) { ProduceResponse.PartitionResponse resp = new ProduceResponse.PartitionResponse(error, offset, RecordBatch.NO_TIMESTAMP, logStartOffset); - Map partResp = singletonMap(new TopicIdPartition(TOPIC_ID, tp), resp); + Map partResp = singletonMap(tp, resp); return new ProduceResponse(partResp, throttleTimeMs); } @@ -4480,7 +4308,7 @@ private void doInitTransactions() { } private void doInitTransactions(long producerId, short epoch) { - TransactionalRequestResult result = transactionManager.initializeTransactions(false); + TransactionalRequestResult result = transactionManager.initializeTransactions(); prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); @@ -4494,48 +4322,6 @@ private void doInitTransactions(long producerId, short epoch) { assertTrue(result.isAcked()); } - private void doInitTransactionsWith2PCEnabled(boolean keepPrepared) { - initializeTransactionManager(Optional.of(transactionalId), true, true); - TransactionalRequestResult result = transactionManager.initializeTransactions(keepPrepared); - - prepareFindCoordinatorResponse(Errors.NONE, false, CoordinatorType.TRANSACTION, transactionalId); - runUntil(() -> transactionManager.coordinator(CoordinatorType.TRANSACTION) != null); - assertEquals(brokerNode, transactionManager.coordinator(CoordinatorType.TRANSACTION)); - - if (keepPrepared) { - // Simulate an ongoing prepared transaction (ongoingProducerId != -1). - short ongoingEpoch = bumpedOngoingEpoch - 1; - prepareInitPidResponse( - Errors.NONE, - false, - ongoingProducerId, - bumpedOngoingEpoch, - true, - true, - ongoingProducerId, - ongoingEpoch - ); - } else { - prepareInitPidResponse( - Errors.NONE, - false, - producerId, - epoch, - false, - true, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH - ); - } - - runUntil(transactionManager::hasProducerId); - transactionManager.maybeUpdateTransactionV2Enabled(true); - - result.await(); - assertTrue(result.isSuccessful()); - assertTrue(result.isAcked()); - } - private void assertAbortableError(Class cause) { try { transactionManager.beginCommit(); @@ -4586,32 +4372,4 @@ private void runUntil(Supplier condition) { ProducerTestUtils.runUntil(sender, condition); } - /** - * This subclass exists only to optionally change the default behavior related to poisoning the state - * on invalid state transition attempts. - */ - private static class TestableTransactionManager extends TransactionManager { - - private Optional shouldPoisonStateOnInvalidTransitionOverride; - - public TestableTransactionManager(LogContext logContext, - String transactionalId, - int transactionTimeoutMs, - long retryBackoffMs, - ApiVersions apiVersions, - boolean enable2Pc) { - super(logContext, transactionalId, transactionTimeoutMs, retryBackoffMs, apiVersions, enable2Pc); - this.shouldPoisonStateOnInvalidTransitionOverride = Optional.empty(); - } - - private void setShouldPoisonStateOnInvalidTransitionOverride(boolean override) { - shouldPoisonStateOnInvalidTransitionOverride = Optional.of(override); - } - - @Override - protected boolean shouldPoisonStateOnInvalidTransition() { - // If there's an override, use it, otherwise invoke the default (i.e. super class) logic. - return shouldPoisonStateOnInvalidTransitionOverride.orElseGet(super::shouldPoisonStateOnInvalidTransition); - } - } } diff --git a/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java b/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java index 5df435149deb7..ceb819dee8f6a 100644 --- a/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java +++ b/clients/src/test/java/org/apache/kafka/common/KafkaFutureTest.java @@ -318,7 +318,7 @@ public void testThenApplyOnSucceededFutureAndFunctionThrowsCompletionException() awaitAndAssertResult(future, 21, null); Throwable cause = awaitAndAssertFailure(dependantFuture, CompletionException.class, "java.lang.RuntimeException: We require more vespene gas"); assertInstanceOf(RuntimeException.class, cause.getCause()); - assertEquals("We require more vespene gas", cause.getCause().getMessage()); + assertEquals(cause.getCause().getMessage(), "We require more vespene gas"); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/UuidTest.java b/clients/src/test/java/org/apache/kafka/common/UuidTest.java index 9acc8145be84a..65316469c69e2 100644 --- a/clients/src/test/java/org/apache/kafka/common/UuidTest.java +++ b/clients/src/test/java/org/apache/kafka/common/UuidTest.java @@ -35,8 +35,8 @@ public class UuidTest { public void testSignificantBits() { Uuid id = new Uuid(34L, 98L); - assertEquals(34L, id.getMostSignificantBits()); - assertEquals(98L, id.getLeastSignificantBits()); + assertEquals(id.getMostSignificantBits(), 34L); + assertEquals(id.getLeastSignificantBits(), 98L); } @Test @@ -74,15 +74,15 @@ public void testStringConversion() { String zeroIdString = Uuid.ZERO_UUID.toString(); - assertEquals(Uuid.ZERO_UUID, Uuid.fromString(zeroIdString)); + assertEquals(Uuid.fromString(zeroIdString), Uuid.ZERO_UUID); } @RepeatedTest(value = 100, name = RepeatedTest.LONG_DISPLAY_NAME) public void testRandomUuid() { Uuid randomID = Uuid.randomUuid(); - assertNotEquals(Uuid.ZERO_UUID, randomID); - assertNotEquals(Uuid.METADATA_TOPIC_ID, randomID); + assertNotEquals(randomID, Uuid.ZERO_UUID); + assertNotEquals(randomID, Uuid.METADATA_TOPIC_ID); assertFalse(randomID.toString().startsWith("-")); } diff --git a/clients/src/test/java/org/apache/kafka/common/acl/AclOperationTest.java b/clients/src/test/java/org/apache/kafka/common/acl/AclOperationTest.java index ede6fbbb4394d..2e81a6d1eaafa 100644 --- a/clients/src/test/java/org/apache/kafka/common/acl/AclOperationTest.java +++ b/clients/src/test/java/org/apache/kafka/common/acl/AclOperationTest.java @@ -50,8 +50,7 @@ private static class AclOperationTestInfo { new AclOperationTestInfo(AclOperation.ALTER_CONFIGS, 11, "alter_configs", false), new AclOperationTestInfo(AclOperation.IDEMPOTENT_WRITE, 12, "idempotent_write", false), new AclOperationTestInfo(AclOperation.CREATE_TOKENS, 13, "create_tokens", false), - new AclOperationTestInfo(AclOperation.DESCRIBE_TOKENS, 14, "describe_tokens", false), - new AclOperationTestInfo(AclOperation.TWO_PHASE_COMMIT, 15, "two_phase_commit", false) + new AclOperationTestInfo(AclOperation.DESCRIBE_TOKENS, 14, "describe_tokens", false) }; @Test diff --git a/clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java b/clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java index b517f55534e67..31ffdfb7117c4 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/AbstractConfigTest.java @@ -114,13 +114,6 @@ public void testOriginalsWithPrefix() { assertEquals(expected, originalsWithPrefix); } - @Test - public void testPreprocessConfig() { - Properties props = new Properties(); - TestConfig config = new TestConfig(props); - assertEquals("success", config.get("preprocess")); - } - @Test public void testValuesWithPrefixOverride() { String prefix = "prefix."; @@ -379,8 +372,8 @@ public void testOriginalWithOverrides() { Properties props = new Properties(); props.put("config.providers", "file"); TestIndirectConfigResolution config = new TestIndirectConfigResolution(props); - assertEquals("file", config.originals().get("config.providers")); - assertEquals("file2", config.originals(Collections.singletonMap("config.providers", "file2")).get("config.providers")); + assertEquals(config.originals().get("config.providers"), "file"); + assertEquals(config.originals(Collections.singletonMap("config.providers", "file2")).get("config.providers"), "file2"); } @Test @@ -709,32 +702,17 @@ private static class TestConfig extends AbstractConfig { public static final String METRIC_REPORTER_CLASSES_CONFIG = "metric.reporters"; private static final String METRIC_REPORTER_CLASSES_DOC = "A list of classes to use as metrics reporters."; - public static final String PREPROCESSOR_CONFIG = "preprocess"; - private static final String PREPROCESSOR_CONFIG_DOC = "Override from preprocess step."; - static { CONFIG = new ConfigDef().define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, "", Importance.LOW, - METRIC_REPORTER_CLASSES_DOC) - .define(PREPROCESSOR_CONFIG, - Type.STRING, - "", - Importance.LOW, - PREPROCESSOR_CONFIG_DOC); + METRIC_REPORTER_CLASSES_DOC); } public TestConfig(Map props) { super(CONFIG, props); } - - @Override - protected Map preProcessParsedConfig(Map parsedValues) { - Map ret = new HashMap<>(parsedValues); - ret.put("preprocess", "success"); - return ret; - } } public static class ConfiguredFakeMetricsReporter extends FakeMetricsReporter { diff --git a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java index c6c2390b07c47..6e1f0e232429b 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/ConfigDefTest.java @@ -135,9 +135,12 @@ private void testBadInputs(Type type, Object... values) { Map m = new HashMap<>(); m.put("name", value); ConfigDef def = new ConfigDef().define("name", type, Importance.HIGH, "docs"); - assertThrows(ConfigException.class, - () -> def.parse(m), - "Expected a config exception on bad input for value " + value); + try { + def.parse(m); + fail("Expected a config exception on bad input for value " + value); + } catch (ConfigException e) { + // this is good + } } } @@ -413,7 +416,7 @@ public void testNames() { .define("a", Type.STRING, Importance.LOW, "docs") .define("b", Type.STRING, Importance.LOW, "docs"); Set names = configDef.names(); - assertEquals(Set.of("a", "b"), names); + assertEquals(new HashSet<>(Arrays.asList("a", "b")), names); // should be unmodifiable try { names.add("new"); @@ -436,13 +439,13 @@ public void testBaseConfigDefDependents() { // Creating a ConfigDef based on another should compute the correct number of configs with no parent, even // if the base ConfigDef has already computed its parentless configs final ConfigDef baseConfigDef = new ConfigDef().define("a", Type.STRING, Importance.LOW, "docs"); - assertEquals(Set.of("a"), baseConfigDef.getConfigsWithNoParent()); + assertEquals(new HashSet<>(singletonList("a")), baseConfigDef.getConfigsWithNoParent()); final ConfigDef configDef = new ConfigDef(baseConfigDef) .define("parent", Type.STRING, Importance.HIGH, "parent docs", "group", 1, Width.LONG, "Parent", singletonList("child")) .define("child", Type.STRING, Importance.HIGH, "docs"); - assertEquals(Set.of("a", "parent"), configDef.getConfigsWithNoParent()); + assertEquals(new HashSet<>(Arrays.asList("a", "parent")), configDef.getConfigsWithNoParent()); } @@ -483,9 +486,12 @@ private void testValidators(Type type, Validator validator, Object defaultVal, O for (Object value : badValues) { Map m = new HashMap<>(); m.put("name", value); - assertThrows(ConfigException.class, - () -> def.parse(m), - "Expected a config exception due to invalid value " + value); + try { + def.parse(m); + fail("Expected a config exception due to invalid value " + value); + } catch (ConfigException e) { + // this is good + } } } @@ -758,59 +764,4 @@ public void testListSizeValidatorToString() { assertEquals("List containing maximum of 5 elements", ListSize.atMostOfSize(5).toString()); } - @Test - public void testListValidatorAnyNonDuplicateValues() { - ConfigDef.ValidList allowAnyNonDuplicateValues = ConfigDef.ValidList.anyNonDuplicateValues(true, true); - assertDoesNotThrow(() -> allowAnyNonDuplicateValues.ensureValid("test.config", List.of("a", "b", "c"))); - assertDoesNotThrow(() -> allowAnyNonDuplicateValues.ensureValid("test.config", List.of())); - assertDoesNotThrow(() -> allowAnyNonDuplicateValues.ensureValid("test.config", null)); - ConfigException exception1 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValues.ensureValid("test.config", List.of("a", "a"))); - assertEquals("Configuration 'test.config' values must not be duplicated.", exception1.getMessage()); - ConfigException exception2 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValues.ensureValid("test.config", List.of(""))); - assertEquals("Configuration 'test.config' values must not be empty.", exception2.getMessage()); - - ConfigDef.ValidList allowAnyNonDuplicateValuesAndNull = ConfigDef.ValidList.anyNonDuplicateValues(false, true); - assertDoesNotThrow(() -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", List.of("a", "b", "c"))); - assertDoesNotThrow(() -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", null)); - ConfigException exception3 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", List.of())); - assertEquals("Configuration 'test.config' must not be empty. Valid values include: any non-empty value", exception3.getMessage()); - ConfigException exception4 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", List.of("a", "a"))); - assertEquals("Configuration 'test.config' values must not be duplicated.", exception4.getMessage()); - ConfigException exception5 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndNull.ensureValid("test.config", List.of(""))); - assertEquals("Configuration 'test.config' values must not be empty.", exception5.getMessage()); - - ConfigDef.ValidList allowAnyNonDuplicateValuesAndEmptyList = ConfigDef.ValidList.anyNonDuplicateValues(true, false); - assertDoesNotThrow(() -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", List.of("a", "b", "c"))); - assertDoesNotThrow(() -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", List.of())); - ConfigException exception6 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", null)); - assertEquals("Configuration 'test.config' values must not be null.", exception6.getMessage()); - ConfigException exception7 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", List.of("a", "a"))); - assertEquals("Configuration 'test.config' values must not be duplicated.", exception7.getMessage()); - ConfigException exception8 = assertThrows(ConfigException.class, () -> allowAnyNonDuplicateValuesAndEmptyList.ensureValid("test.config", List.of(""))); - assertEquals("Configuration 'test.config' values must not be empty.", exception8.getMessage()); - } - - @Test - public void testListValidatorIn() { - ConfigDef.ValidList allowEmptyValidator = ConfigDef.ValidList.in(true, "a", "b", "c"); - assertDoesNotThrow(() -> allowEmptyValidator.ensureValid("test.config", List.of("a", "b"))); - assertDoesNotThrow(() -> allowEmptyValidator.ensureValid("test.config", List.of())); - ConfigException exception1 = assertThrows(ConfigException.class, () -> allowEmptyValidator.ensureValid("test.config", null)); - assertEquals("Configuration 'test.config' values must not be null.", exception1.getMessage()); - ConfigException exception2 = assertThrows(ConfigException.class, () -> allowEmptyValidator.ensureValid("test.config", List.of("d"))); - assertEquals("Invalid value d for configuration test.config: String must be one of: a, b, c", exception2.getMessage()); - ConfigException exception3 = assertThrows(ConfigException.class, () -> allowEmptyValidator.ensureValid("test.config", List.of("a", "a"))); - assertEquals("Configuration 'test.config' values must not be duplicated.", exception3.getMessage()); - - ConfigDef.ValidList notAllowEmptyValidator = ConfigDef.ValidList.in(false, "a", "b", "c"); - assertDoesNotThrow(() -> notAllowEmptyValidator.ensureValid("test.config", List.of("a", "b"))); - ConfigException exception4 = assertThrows(ConfigException.class, () -> notAllowEmptyValidator.ensureValid("test.config", List.of())); - assertEquals("Configuration 'test.config' must not be empty. Valid values include: [a, b, c]", exception4.getMessage()); - ConfigException exception5 = assertThrows(ConfigException.class, () -> notAllowEmptyValidator.ensureValid("test.config", null)); - assertEquals("Configuration 'test.config' values must not be null.", exception5.getMessage()); - ConfigException exception6 = assertThrows(ConfigException.class, () -> notAllowEmptyValidator.ensureValid("test.config", List.of("d"))); - assertEquals("Invalid value d for configuration test.config: String must be one of: a, b, c", exception6.getMessage()); - ConfigException exception7 = assertThrows(ConfigException.class, () -> notAllowEmptyValidator.ensureValid("test.config", List.of("a", "a"))); - assertEquals("Configuration 'test.config' values must not be duplicated.", exception7.getMessage()); - } } diff --git a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java index 9a31a63915d3d..bbd2268e7cb8f 100644 --- a/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/config/provider/EnvVarConfigProviderTest.java @@ -22,8 +22,10 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -86,7 +88,7 @@ public void testGetOneKeyWithEmptyPath() { @Test void testGetEnvVarsByKeyList() { - Set keyList = Set.of("test_var1", "secret_var2"); + Set keyList = new HashSet<>(Arrays.asList("test_var1", "secret_var2")); Set keys = envVarConfigProvider.get(null, keyList).data().keySet(); assertEquals(keyList, keys); } diff --git a/clients/src/test/java/org/apache/kafka/common/feature/SupportedVersionRangeTest.java b/clients/src/test/java/org/apache/kafka/common/feature/SupportedVersionRangeTest.java index 9bc6f05106ea8..1d6679e62ce2b 100644 --- a/clients/src/test/java/org/apache/kafka/common/feature/SupportedVersionRangeTest.java +++ b/clients/src/test/java/org/apache/kafka/common/feature/SupportedVersionRangeTest.java @@ -130,7 +130,7 @@ public void testToString() { public void testEquals() { SupportedVersionRange tested = new SupportedVersionRange((short) 1, (short) 1); assertEquals(tested, tested); - assertNotEquals(new SupportedVersionRange((short) 1, (short) 2), tested); + assertNotEquals(tested, new SupportedVersionRange((short) 1, (short) 2)); assertNotEquals(null, tested); } diff --git a/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java b/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java index 41104194991d9..0cdb9b170f43b 100644 --- a/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java +++ b/clients/src/test/java/org/apache/kafka/common/header/internals/RecordHeadersTest.java @@ -30,6 +30,7 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class RecordHeadersTest { @@ -47,21 +48,6 @@ public void testAdd() { assertEquals(2, getCount(headers)); } - @Test - public void testAddHeadersPreserveOrder() { - Headers headers = new RecordHeaders(); - headers.add(new RecordHeader("key", "value".getBytes())); - headers.add(new RecordHeader("key2", "value2".getBytes())); - headers.add(new RecordHeader("key3", "value3".getBytes())); - - Header[] headersArr = headers.toArray(); - assertHeader("key", "value", headersArr[0]); - assertHeader("key2", "value2", headersArr[1]); - assertHeader("key3", "value3", headersArr[2]); - - assertEquals(3, getCount(headers)); - } - @Test public void testRemove() { Headers headers = new RecordHeaders(); @@ -74,27 +60,6 @@ public void testRemove() { assertFalse(headers.iterator().hasNext()); } - @Test - public void testPreserveOrderAfterRemove() { - Headers headers = new RecordHeaders(); - headers.add(new RecordHeader("key", "value".getBytes())); - headers.add(new RecordHeader("key2", "value2".getBytes())); - headers.add(new RecordHeader("key3", "value3".getBytes())); - - headers.remove("key"); - Header[] headersArr = headers.toArray(); - assertHeader("key2", "value2", headersArr[0]); - assertHeader("key3", "value3", headersArr[1]); - assertEquals(2, getCount(headers)); - - headers.add(new RecordHeader("key4", "value4".getBytes())); - headers.remove("key3"); - headersArr = headers.toArray(); - assertHeader("key2", "value2", headersArr[0]); - assertHeader("key4", "value4", headersArr[1]); - assertEquals(2, getCount(headers)); - } - @Test public void testAddRemoveInterleaved() { Headers headers = new RecordHeaders(); @@ -108,7 +73,7 @@ public void testAddRemoveInterleaved() { assertEquals(1, getCount(headers)); headers.add(new RecordHeader("key3", "value3".getBytes())); - + assertNull(headers.lastHeader("key")); assertHeader("key2", "value2", headers.lastHeader("key2")); @@ -163,44 +128,42 @@ public void testLastHeader() { } - @Test - public void testHeadersIteratorRemove() { - Headers headers = new RecordHeaders(); - headers.add(new RecordHeader("key", "value".getBytes())); - - Iterator
          headersIterator = headers.headers("key").iterator(); - headersIterator.next(); - assertThrows(UnsupportedOperationException.class, - headersIterator::remove); - } - @Test public void testReadOnly() { RecordHeaders headers = new RecordHeaders(); headers.add(new RecordHeader("key", "value".getBytes())); Iterator
          headerIteratorBeforeClose = headers.iterator(); headers.setReadOnly(); - - assertThrows(IllegalStateException.class, - () -> headers.add(new RecordHeader("key", "value".getBytes())), - "IllegalStateException expected as headers are closed."); - - assertThrows(IllegalStateException.class, - () -> headers.remove("key"), - "IllegalStateException expected as headers are closed."); - - Iterator
          headerIterator = headers.iterator(); - headerIterator.next(); - - assertThrows(IllegalStateException.class, - headerIterator::remove, - "IllegalStateException expected as headers are closed."); - - headerIteratorBeforeClose.next(); - - assertThrows(IllegalStateException.class, - headerIterator::remove, - "IllegalStateException expected as headers are closed."); + try { + headers.add(new RecordHeader("key", "value".getBytes())); + fail("IllegalStateException expected as headers are closed"); + } catch (IllegalStateException ise) { + //expected + } + + try { + headers.remove("key"); + fail("IllegalStateException expected as headers are closed"); + } catch (IllegalStateException ise) { + //expected + } + + try { + Iterator
          headerIterator = headers.iterator(); + headerIterator.next(); + headerIterator.remove(); + fail("IllegalStateException expected as headers are closed"); + } catch (IllegalStateException ise) { + //expected + } + + try { + headerIteratorBeforeClose.next(); + headerIteratorBeforeClose.remove(); + fail("IllegalStateException expected as headers are closed"); + } catch (IllegalStateException ise) { + //expected + } } @Test @@ -259,7 +222,7 @@ public void shouldThrowNpeWhenAddingCollectionWithNullHeader() { private int getCount(Headers headers) { return headers.toArray().length; } - + static void assertHeader(String key, String value, Header actual) { assertEquals(key, actual.key()); assertArrayEquals(value.getBytes(), actual.value()); diff --git a/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java b/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java index 0bcd9731c462d..638d60fee4443 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/MessageTest.java @@ -56,13 +56,11 @@ import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.protocol.types.RawTaggedField; -import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; import com.fasterxml.jackson.databind.JsonNode; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.junit.jupiter.params.ParameterizedTest; import java.lang.reflect.Method; import java.nio.ByteBuffer; @@ -84,6 +82,7 @@ public final class MessageTest { private final String memberId = "memberId"; private final String instanceId = "instanceId"; + private final List listOfVersionsNonBatchOffsetFetch = Arrays.asList(1, 2, 3, 4, 5, 6, 7); @Test public void testAddOffsetsToTxnVersions() throws Exception { @@ -410,49 +409,90 @@ public void testOffsetForLeaderEpochVersions() throws Exception { new OffsetForLeaderEpochRequestData().setReplicaId(-2)); } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) - public void testOffsetCommitRequestVersions(short version) throws Exception { - OffsetCommitRequestData request = new OffsetCommitRequestData() - .setGroupId("groupId") - .setMemberId("memberId") - .setGenerationIdOrMemberEpoch(version >= 1 ? 10 : -1) - .setGroupInstanceId(version >= 7 ? "instanceId" : null) - .setRetentionTimeMs((version >= 2 && version <= 4) ? 20 : -1) - .setTopics(singletonList( - new OffsetCommitRequestTopic() - .setTopicId(version >= 10 ? Uuid.randomUuid() : Uuid.ZERO_UUID) - .setName(version < 10 ? "topic" : "") - .setPartitions(singletonList( - new OffsetCommitRequestPartition() - .setPartitionIndex(1) - .setCommittedMetadata("metadata") - .setCommittedOffset(100) - .setCommittedLeaderEpoch(version >= 6 ? 10 : -1) - - )) - )); - - testMessageRoundTrip(version, request, request); - } - - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) - public void testOffsetCommitResponseVersions(short version) throws Exception { - OffsetCommitResponseData response = new OffsetCommitResponseData() - .setThrottleTimeMs(version >= 3 ? 20 : 0) - .setTopics(singletonList( - new OffsetCommitResponseTopic() - .setTopicId(version >= 10 ? Uuid.randomUuid() : Uuid.ZERO_UUID) - .setName(version < 10 ? "topic" : "") - .setPartitions(singletonList( - new OffsetCommitResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()) - )) - )); - - testMessageRoundTrip(version, response, response); + @Test + public void testOffsetCommitRequestVersions() throws Exception { + String groupId = "groupId"; + String topicName = "topic"; + String metadata = "metadata"; + int partition = 2; + int offset = 100; + + testAllMessageRoundTrips(new OffsetCommitRequestData() + .setGroupId(groupId) + .setTopics(Collections.singletonList( + new OffsetCommitRequestTopic() + .setName(topicName) + .setPartitions(Collections.singletonList( + new OffsetCommitRequestPartition() + .setPartitionIndex(partition) + .setCommittedMetadata(metadata) + .setCommittedOffset(offset) + ))))); + + Supplier request = + () -> new OffsetCommitRequestData() + .setGroupId(groupId) + .setMemberId("memberId") + .setGroupInstanceId("instanceId") + .setTopics(Collections.singletonList( + new OffsetCommitRequestTopic() + .setName(topicName) + .setPartitions(Collections.singletonList( + new OffsetCommitRequestPartition() + .setPartitionIndex(partition) + .setCommittedLeaderEpoch(10) + .setCommittedMetadata(metadata) + .setCommittedOffset(offset) + )))) + .setRetentionTimeMs(20); + + for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { + OffsetCommitRequestData requestData = request.get(); + + if (version > 4) { + requestData.setRetentionTimeMs(-1); + } + + if (version < 6) { + requestData.topics().get(0).partitions().get(0).setCommittedLeaderEpoch(-1); + } + + if (version < 7) { + requestData.setGroupInstanceId(null); + } + + if (version >= 2 && version <= 4) { + testAllMessageRoundTripsBetweenVersions(version, (short) 5, requestData, requestData); + } else { + testAllMessageRoundTripsFromVersion(version, requestData); + } + } + } + + @Test + public void testOffsetCommitResponseVersions() throws Exception { + Supplier response = + () -> new OffsetCommitResponseData() + .setTopics( + singletonList( + new OffsetCommitResponseTopic() + .setName("topic") + .setPartitions(singletonList( + new OffsetCommitResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()) + )) + ) + ) + .setThrottleTimeMs(20); + + for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { + OffsetCommitResponseData responseData = response.get(); + if (version < 3) { + responseData.setThrottleTimeMs(0); + } + testAllMessageRoundTripsFromVersion(version, responseData); + } } @Test @@ -543,92 +583,296 @@ public void testTxnOffsetCommitResponseVersions() throws Exception { .setThrottleTimeMs(20)); } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testOffsetFetchRequestVersions(short version) throws Exception { - OffsetFetchRequestData request; + @Test + public void testOffsetFetchV1ToV7() throws Exception { + String groupId = "groupId"; + String topicName = "topic"; - if (version < 8) { - request = new OffsetFetchRequestData() - .setGroupId("groupId") - .setRequireStable(version == 7) - .setTopics(List.of( - new OffsetFetchRequestTopic() - .setName("foo") - .setPartitionIndexes(List.of(0, 1, 2)) - )); - } else { - request = new OffsetFetchRequestData() - .setRequireStable(true) - .setGroups(List.of( - new OffsetFetchRequestGroup() - .setGroupId("groupId") - .setMemberId(version >= 9 ? "memberId" : null) - .setMemberEpoch(version >= 9 ? 10 : -1) - .setTopics(List.of( - new OffsetFetchRequestTopics() - .setName(version < 10 ? "foo" : "") - .setTopicId(version >= 10 ? Uuid.randomUuid() : Uuid.ZERO_UUID) - .setPartitionIndexes(List.of(0, 1, 2)) - )) - )); + List topics = Collections.singletonList( + new OffsetFetchRequestTopic() + .setName(topicName) + .setPartitionIndexes(Collections.singletonList(5))); + testAllMessageRoundTripsOffsetFetchV0ToV7(new OffsetFetchRequestData() + .setTopics(new ArrayList<>()) + .setGroupId(groupId)); + + testAllMessageRoundTripsOffsetFetchV0ToV7(new OffsetFetchRequestData() + .setGroupId(groupId) + .setTopics(topics)); + + OffsetFetchRequestData allPartitionData = new OffsetFetchRequestData() + .setGroupId(groupId) + .setTopics(null); + + OffsetFetchRequestData requireStableData = new OffsetFetchRequestData() + .setGroupId(groupId) + .setTopics(topics) + .setRequireStable(true); + + for (int version : listOfVersionsNonBatchOffsetFetch) { + final short finalVersion = (short) version; + if (version < 2) { + assertThrows(NullPointerException.class, () -> testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, allPartitionData)); + } else { + testAllMessageRoundTripsOffsetFetchFromVersionToV7((short) version, allPartitionData); + } + + if (version < 7) { + assertThrows(UnsupportedVersionException.class, () -> testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, requireStableData)); + } else { + testAllMessageRoundTripsOffsetFetchFromVersionToV7(finalVersion, requireStableData); + } } - testMessageRoundTrip(version, request, request); - } - - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testOffsetFetchResponseVersions(short version) throws Exception { - OffsetFetchResponseData response; - - if (version < 8) { - response = new OffsetFetchResponseData() - .setThrottleTimeMs(version >= 3 ? 1000 : 0) - .setErrorCode(version >= 2 ? Errors.INVALID_GROUP_ID.code() : 0) - .setTopics(List.of( - new OffsetFetchResponseTopic() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponsePartition() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setMetadata("meta") - .setCommittedLeaderEpoch(version >= 5 ? 20 : -1) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) - )) - )); - } else { - response = new OffsetFetchResponseData() - .setThrottleTimeMs(1000) - .setGroups(List.of( + Supplier response = + () -> new OffsetFetchResponseData() + .setTopics(Collections.singletonList( + new OffsetFetchResponseTopic() + .setName(topicName) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartition() + .setPartitionIndex(5) + .setMetadata(null) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(3) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()))))) + .setErrorCode(Errors.NOT_COORDINATOR.code()) + .setThrottleTimeMs(10); + for (int version : listOfVersionsNonBatchOffsetFetch) { + OffsetFetchResponseData responseData = response.get(); + if (version <= 1) { + responseData.setErrorCode(Errors.NONE.code()); + } + + if (version <= 2) { + responseData.setThrottleTimeMs(0); + } + + if (version <= 4) { + responseData.topics().get(0).partitions().get(0).setCommittedLeaderEpoch(-1); + } + + testAllMessageRoundTripsOffsetFetchFromVersionToV7((short) version, responseData); + } + } + + private void testAllMessageRoundTripsOffsetFetchV0ToV7(Message message) throws Exception { + testDuplication(message); + testAllMessageRoundTripsOffsetFetchFromVersionToV7(message.lowestSupportedVersion(), message); + } + + private void testAllMessageRoundTripsOffsetFetchFromVersionToV7(short fromVersion, + Message message) throws Exception { + for (short version = fromVersion; version <= 7; version++) { + testEquivalentMessageRoundTrip(version, message); + } + } + + @Test + public void testOffsetFetchV8AndAboveSingleGroup() throws Exception { + String groupId = "groupId"; + String topicName = "topic"; + + List topic = Collections.singletonList( + new OffsetFetchRequestTopics() + .setName(topicName) + .setPartitionIndexes(Collections.singletonList(5))); + + OffsetFetchRequestData allPartitionData = new OffsetFetchRequestData() + .setGroups(Collections.singletonList( + new OffsetFetchRequestGroup() + .setGroupId(groupId) + .setTopics(null))); + + OffsetFetchRequestData specifiedPartitionData = new OffsetFetchRequestData() + .setGroups(Collections.singletonList( + new OffsetFetchRequestGroup() + .setGroupId(groupId) + .setTopics(topic))) + .setRequireStable(true); + + testAllMessageRoundTripsOffsetFetchV8AndAbove(allPartitionData); + testAllMessageRoundTripsOffsetFetchV8AndAbove(specifiedPartitionData); + + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version >= 8) { + testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, specifiedPartitionData); + testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, allPartitionData); + } + } + + Supplier response = + () -> new OffsetFetchResponseData() + .setGroups(Collections.singletonList( new OffsetFetchResponseGroup() - .setGroupId("groupId") - .setErrorCode(Errors.INVALID_GROUP_ID.code()) - .setTopics(List.of( + .setGroupId(groupId) + .setTopics(Collections.singletonList( new OffsetFetchResponseTopics() - .setName(version < 10 ? "foo" : "") - .setTopicId(version >= 10 ? Uuid.randomUuid() : Uuid.ZERO_UUID) - .setPartitions(List.of( + .setPartitions(Collections.singletonList( new OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setMetadata("meta") - .setCommittedLeaderEpoch(20) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) - )) - )) - )); + .setPartitionIndex(5) + .setMetadata(null) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(3) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()))))) + .setErrorCode(Errors.NOT_COORDINATOR.code()))) + .setThrottleTimeMs(10); + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version >= 8) { + OffsetFetchResponseData responseData = response.get(); + testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, responseData); + } } + } - testMessageRoundTrip(version, response, response); + @Test + public void testOffsetFetchV8AndAbove() throws Exception { + String groupOne = "group1"; + String groupTwo = "group2"; + String groupThree = "group3"; + String groupFour = "group4"; + String groupFive = "group5"; + String topic1 = "topic1"; + String topic2 = "topic2"; + String topic3 = "topic3"; + + OffsetFetchRequestTopics topicOne = new OffsetFetchRequestTopics() + .setName(topic1) + .setPartitionIndexes(Collections.singletonList(5)); + OffsetFetchRequestTopics topicTwo = new OffsetFetchRequestTopics() + .setName(topic2) + .setPartitionIndexes(Collections.singletonList(10)); + OffsetFetchRequestTopics topicThree = new OffsetFetchRequestTopics() + .setName(topic3) + .setPartitionIndexes(Collections.singletonList(15)); + + List groupOneTopics = singletonList(topicOne); + OffsetFetchRequestGroup group1 = + new OffsetFetchRequestGroup() + .setGroupId(groupOne) + .setTopics(groupOneTopics); + + List groupTwoTopics = Arrays.asList(topicOne, topicTwo); + OffsetFetchRequestGroup group2 = + new OffsetFetchRequestGroup() + .setGroupId(groupTwo) + .setTopics(groupTwoTopics); + + List groupThreeTopics = Arrays.asList(topicOne, topicTwo, topicThree); + OffsetFetchRequestGroup group3 = + new OffsetFetchRequestGroup() + .setGroupId(groupThree) + .setTopics(groupThreeTopics); + + OffsetFetchRequestGroup group4 = + new OffsetFetchRequestGroup() + .setGroupId(groupFour) + .setTopics(null); + + OffsetFetchRequestGroup group5 = + new OffsetFetchRequestGroup() + .setGroupId(groupFive) + .setTopics(null); + + OffsetFetchRequestData requestData = new OffsetFetchRequestData() + .setGroups(Arrays.asList(group1, group2, group3, group4, group5)) + .setRequireStable(true); + + testAllMessageRoundTripsOffsetFetchV8AndAbove(requestData); + + testAllMessageRoundTripsOffsetFetchV8AndAbove(requestData.setRequireStable(false)); + + + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version >= 8) { + testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, requestData); + } + } + + OffsetFetchResponseTopics responseTopic1 = + new OffsetFetchResponseTopics() + .setName(topic1) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartitions() + .setPartitionIndex(5) + .setMetadata(null) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(3) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()))); + OffsetFetchResponseTopics responseTopic2 = + new OffsetFetchResponseTopics() + .setName(topic2) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartitions() + .setPartitionIndex(10) + .setMetadata("foo") + .setCommittedOffset(200) + .setCommittedLeaderEpoch(2) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code()))); + OffsetFetchResponseTopics responseTopic3 = + new OffsetFetchResponseTopics() + .setName(topic3) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartitions() + .setPartitionIndex(15) + .setMetadata("bar") + .setCommittedOffset(300) + .setCommittedLeaderEpoch(1) + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()))); + + OffsetFetchResponseGroup responseGroup1 = + new OffsetFetchResponseGroup() + .setGroupId(groupOne) + .setTopics(Collections.singletonList(responseTopic1)) + .setErrorCode(Errors.NOT_COORDINATOR.code()); + OffsetFetchResponseGroup responseGroup2 = + new OffsetFetchResponseGroup() + .setGroupId(groupTwo) + .setTopics(Arrays.asList(responseTopic1, responseTopic2)) + .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code()); + OffsetFetchResponseGroup responseGroup3 = + new OffsetFetchResponseGroup() + .setGroupId(groupThree) + .setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)) + .setErrorCode(Errors.NONE.code()); + OffsetFetchResponseGroup responseGroup4 = + new OffsetFetchResponseGroup() + .setGroupId(groupFour) + .setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)) + .setErrorCode(Errors.NONE.code()); + OffsetFetchResponseGroup responseGroup5 = + new OffsetFetchResponseGroup() + .setGroupId(groupFive) + .setTopics(Arrays.asList(responseTopic1, responseTopic2, responseTopic3)) + .setErrorCode(Errors.NONE.code()); + + Supplier response = + () -> new OffsetFetchResponseData() + .setGroups(Arrays.asList(responseGroup1, responseGroup2, responseGroup3, + responseGroup4, responseGroup5)) + .setThrottleTimeMs(10); + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version >= 8) { + OffsetFetchResponseData responseData = response.get(); + testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(version, responseData); + } + } + } + + private void testAllMessageRoundTripsOffsetFetchV8AndAbove(Message message) throws Exception { + testDuplication(message); + testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove((short) 8, message); + } + + private void testAllMessageRoundTripsOffsetFetchFromVersionV8AndAbove(short fromVersion, Message message) throws Exception { + for (short version = fromVersion; version <= message.highestSupportedVersion(); version++) { + testEquivalentMessageRoundTrip(version, message); + } } @Test public void testProduceResponseVersions() throws Exception { String topicName = "topic"; - Uuid topicId = Uuid.fromString("klZ9sa2rSvig6QpgGXzALT"); - int partitionIndex = 0; short errorCode = Errors.INVALID_TOPIC_EXCEPTION.code(); long baseOffset = 12L; @@ -642,6 +886,7 @@ public void testProduceResponseVersions() throws Exception { testAllMessageRoundTrips(new ProduceResponseData() .setResponses(new ProduceResponseData.TopicProduceResponseCollection(singletonList( new ProduceResponseData.TopicProduceResponse() + .setName(topicName) .setPartitionResponses(singletonList( new ProduceResponseData.PartitionProduceResponse() .setIndex(partitionIndex) @@ -651,6 +896,7 @@ public void testProduceResponseVersions() throws Exception { Supplier response = () -> new ProduceResponseData() .setResponses(new ProduceResponseData.TopicProduceResponseCollection(singletonList( new ProduceResponseData.TopicProduceResponse() + .setName(topicName) .setPartitionResponses(singletonList( new ProduceResponseData.PartitionProduceResponse() .setIndex(partitionIndex) @@ -685,18 +931,10 @@ public void testProduceResponseVersions() throws Exception { responseData.setThrottleTimeMs(0); } - if (version >= 13) { - responseData.responses().iterator().next().setTopicId(topicId); - } else { - responseData.responses().iterator().next().setName(topicName); - } - if (version >= 3 && version <= 4) { testAllMessageRoundTripsBetweenVersions(version, (short) 5, responseData, responseData); } else if (version >= 6 && version <= 7) { testAllMessageRoundTripsBetweenVersions(version, (short) 8, responseData, responseData); - } else if (version <= 12) { - testAllMessageRoundTripsBetweenVersions(version, (short) 12, responseData, responseData); } else { testEquivalentMessageRoundTrip(version, responseData); } @@ -706,7 +944,7 @@ public void testProduceResponseVersions() throws Exception { @Test public void defaultValueShouldBeWritable() { for (short version = SimpleExampleMessageData.LOWEST_SUPPORTED_VERSION; version <= SimpleExampleMessageData.HIGHEST_SUPPORTED_VERSION; ++version) { - MessageUtil.toByteBufferAccessor(new SimpleExampleMessageData(), version).buffer(); + MessageUtil.toByteBuffer(new SimpleExampleMessageData(), version); } } diff --git a/clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java b/clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java index ee4336ed0a068..1dbc579db0375 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/NullableStructMessageTest.java @@ -129,7 +129,7 @@ private NullableStructMessageData deserialize(ByteBuffer buf, short version) { } private ByteBuffer serialize(NullableStructMessageData message, short version) { - return MessageUtil.toByteBufferAccessor(message, version).buffer(); + return MessageUtil.toByteBuffer(message, version); } private NullableStructMessageData roundTrip(NullableStructMessageData message, short version) { diff --git a/clients/src/test/java/org/apache/kafka/common/message/RecordsSerdeTest.java b/clients/src/test/java/org/apache/kafka/common/message/RecordsSerdeTest.java index dc5ab86920579..82e63ca5541ce 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/RecordsSerdeTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/RecordsSerdeTest.java @@ -70,7 +70,7 @@ private void testAllRoundTrips(SimpleRecordsMessageData message) { } private void testRoundTrip(SimpleRecordsMessageData message, short version) { - ByteBuffer buf = MessageUtil.toByteBufferAccessor(message, version).buffer(); + ByteBuffer buf = MessageUtil.toByteBuffer(message, version); SimpleRecordsMessageData message2 = deserialize(buf.duplicate(), version); assertEquals(message, message2); assertEquals(message.hashCode(), message2.hashCode()); diff --git a/clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java b/clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java index ba3bc23bf6054..341e327cda904 100644 --- a/clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java +++ b/clients/src/test/java/org/apache/kafka/common/message/SimpleExampleMessageTest.java @@ -84,7 +84,7 @@ public void shouldRoundTripFieldThroughBuffer() { out.setProcessId(uuid); out.setZeroCopyByteBuffer(buf); - final ByteBuffer buffer = MessageUtil.toByteBufferAccessor(out, (short) 1).buffer(); + final ByteBuffer buffer = MessageUtil.toByteBuffer(out, (short) 1); final SimpleExampleMessageData in = new SimpleExampleMessageData(); in.read(new ByteBufferAccessor(buffer), (short) 1); @@ -106,7 +106,7 @@ public void shouldRoundTripFieldThroughBufferWithNullable() { out.setZeroCopyByteBuffer(buf1); out.setNullableZeroCopyByteBuffer(buf2); - final ByteBuffer buffer = MessageUtil.toByteBufferAccessor(out, (short) 1).buffer(); + final ByteBuffer buffer = MessageUtil.toByteBuffer(out, (short) 1); final SimpleExampleMessageData in = new SimpleExampleMessageData(); in.read(new ByteBufferAccessor(buffer), (short) 1); @@ -359,7 +359,7 @@ private SimpleExampleMessageData roundTripSerde( SimpleExampleMessageData message, short version ) { - ByteBuffer buf = MessageUtil.toByteBufferAccessor(message, version).buffer(); + ByteBuffer buf = MessageUtil.toByteBuffer(message, version); // Check size calculation assertEquals(buf.remaining(), message.size(new ObjectSerializationCache(), version)); return deserialize(buf.duplicate(), version); diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java index eda6648068c6f..8f50e35b6e3f8 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/MetricsTest.java @@ -64,6 +64,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; public class MetricsTest { private static final Logger log = LoggerFactory.getLogger(MetricsTest.class); @@ -97,9 +98,12 @@ public void testMetricName() { MetricName n2 = metrics.metricName("name", "group", "description", tags); assertEquals(n1, n2, "metric names created in two different ways should be equal"); - assertThrows(IllegalArgumentException.class, - () -> metrics.metricName("name", "group", "description", "key1"), - "Creating MetricName with an odd number of keyValue should fail, IllegalArgumentException expected."); + try { + metrics.metricName("name", "group", "description", "key1"); + fail("Creating MetricName with an odd number of keyValue should fail"); + } catch (IllegalArgumentException e) { + // this is expected + } } @Test @@ -415,14 +419,20 @@ public void testQuotas() { sensor.add(metrics.metricName("test1.total", "grp1"), new CumulativeSum(), new MetricConfig().quota(Quota.upperBound(5.0))); sensor.add(metrics.metricName("test2.total", "grp1"), new CumulativeSum(), new MetricConfig().quota(Quota.lowerBound(0.0))); sensor.record(5.0); - assertThrows(QuotaViolationException.class, - () -> sensor.record(1.0), - "Should have gotten a quota violation."); + try { + sensor.record(1.0); + fail("Should have gotten a quota violation."); + } catch (QuotaViolationException e) { + // this is good + } assertEquals(6.0, (Double) metrics.metrics().get(metrics.metricName("test1.total", "grp1")).metricValue(), EPS); sensor.record(-6.0); - assertThrows(QuotaViolationException.class, - () -> sensor.record(-1.0), - "Should have gotten a quota violation."); + try { + sensor.record(-1.0); + fail("Should have gotten a quota violation."); + } catch (QuotaViolationException e) { + // this is good + } } @Test @@ -660,7 +670,7 @@ private void record(Rate rate, MetricConfig config, int value) { private Double measure(Measurable rate, MetricConfig config) { return rate.measure(config, time.milliseconds()); } - + @Test public void testMetricInstances() { MetricName n1 = metrics.metricInstance(SampleMetrics.METRIC1, "key1", "value1", "key2", "value2"); @@ -670,10 +680,13 @@ public void testMetricInstances() { MetricName n2 = metrics.metricInstance(SampleMetrics.METRIC2, tags); assertEquals(n1, n2, "metric names created in two different ways should be equal"); - assertThrows(IllegalArgumentException.class, - () -> metrics.metricInstance(SampleMetrics.METRIC1, "key1"), - "Creating MetricName with an odd number of keyValue should fail, IllegalArgumentException expected."); - + try { + metrics.metricInstance(SampleMetrics.METRIC1, "key1"); + fail("Creating MetricName with an odd number of keyValue should fail"); + } catch (IllegalArgumentException e) { + // this is expected + } + Map parentTagsWithValues = new HashMap<>(); parentTagsWithValues.put("parent-tag", "parent-tag-value"); @@ -684,20 +697,27 @@ public void testMetricInstances() { MetricName inheritedMetric = inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, childTagsWithValues); Map filledOutTags = inheritedMetric.tags(); - assertEquals("parent-tag-value", filledOutTags.get("parent-tag"), "parent-tag should be set properly"); - assertEquals("child-tag-value", filledOutTags.get("child-tag"), "child-tag should be set properly"); + assertEquals(filledOutTags.get("parent-tag"), "parent-tag-value", "parent-tag should be set properly"); + assertEquals(filledOutTags.get("child-tag"), "child-tag-value", "child-tag should be set properly"); - assertThrows(IllegalArgumentException.class, - () -> inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, parentTagsWithValues), - "Creating MetricName should throw IllegalArgumentException if the child metrics are not defined at runtime."); + try { + inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, parentTagsWithValues); + fail("Creating MetricName should fail if the child metrics are not defined at runtime"); + } catch (IllegalArgumentException e) { + // this is expected + } - Map runtimeTags = new HashMap<>(); - runtimeTags.put("child-tag", "child-tag-value"); - runtimeTags.put("tag-not-in-template", "unexpected-value"); + try { + + Map runtimeTags = new HashMap<>(); + runtimeTags.put("child-tag", "child-tag-value"); + runtimeTags.put("tag-not-in-template", "unexpected-value"); - assertThrows(IllegalArgumentException.class, - () -> inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, runtimeTags), - "Creating MetricName should throw IllegalArgumentException if there is a tag at runtime that is not in the template."); + inherited.metricInstance(SampleMetrics.METRIC_WITH_INHERITED_TAGS, runtimeTags); + fail("Creating MetricName should fail if there is a tag at runtime that is not in the template"); + } catch (IllegalArgumentException e) { + // this is expected + } } } diff --git a/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java b/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java index 6b806c6bb7bd5..5cdcebc858d6b 100644 --- a/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/metrics/SensorTest.java @@ -70,12 +70,12 @@ public void testRecordLevelEnum() { assertTrue(Sensor.RecordingLevel.DEBUG.shouldRecord(configLevel.id)); assertTrue(Sensor.RecordingLevel.TRACE.shouldRecord(configLevel.id)); - assertEquals(Sensor.RecordingLevel.DEBUG, - Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.DEBUG.toString())); - assertEquals(Sensor.RecordingLevel.INFO, - Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.INFO.toString())); - assertEquals(Sensor.RecordingLevel.TRACE, - Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.TRACE.toString())); + assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.DEBUG.toString()), + Sensor.RecordingLevel.DEBUG); + assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.INFO.toString()), + Sensor.RecordingLevel.INFO); + assertEquals(Sensor.RecordingLevel.valueOf(Sensor.RecordingLevel.TRACE.toString()), + Sensor.RecordingLevel.TRACE); } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/network/ChannelBuildersTest.java b/clients/src/test/java/org/apache/kafka/common/network/ChannelBuildersTest.java index 9aa90811b95ad..e84c7c5e7c2e0 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/ChannelBuildersTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/ChannelBuildersTest.java @@ -62,10 +62,10 @@ public void testChannelBuilderConfigs() { assertNull(configs.get("listener.name.listener1.gssapi.sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("listener.name.listener1.gssapi.sasl.kerberos.service.name")); - assertEquals("testkafka", configs.get("gssapi.sasl.kerberos.service.name")); + assertEquals(configs.get("gssapi.sasl.kerberos.service.name"), "testkafka"); assertFalse(securityConfig.unused().contains("gssapi.sasl.kerberos.service.name")); - assertEquals("testkafkaglobal", configs.get("sasl.kerberos.service.name")); + assertEquals(configs.get("sasl.kerberos.service.name"), "testkafkaglobal"); assertFalse(securityConfig.unused().contains("sasl.kerberos.service.name")); assertNull(configs.get("listener.name.listener1.sasl.kerberos.service.name")); @@ -74,35 +74,35 @@ public void testChannelBuilderConfigs() { assertNull(configs.get("plain.sasl.server.callback.handler.class")); assertFalse(securityConfig.unused().contains("plain.sasl.server.callback.handler.class")); - assertEquals("custom.config1", configs.get("listener.name.listener1.gssapi.config1.key")); + assertEquals(configs.get("listener.name.listener1.gssapi.config1.key"), "custom.config1"); assertFalse(securityConfig.unused().contains("listener.name.listener1.gssapi.config1.key")); - assertEquals("custom.config2", configs.get("custom.config2.key")); + assertEquals(configs.get("custom.config2.key"), "custom.config2"); assertFalse(securityConfig.unused().contains("custom.config2.key")); // test configs without listener prefix securityConfig = new TestSecurityConfig(props); configs = ChannelBuilders.channelBuilderConfigs(securityConfig, null); - assertEquals("testkafka", configs.get("listener.name.listener1.gssapi.sasl.kerberos.service.name")); + assertEquals(configs.get("listener.name.listener1.gssapi.sasl.kerberos.service.name"), "testkafka"); assertFalse(securityConfig.unused().contains("listener.name.listener1.gssapi.sasl.kerberos.service.name")); assertNull(configs.get("gssapi.sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("gssapi.sasl.kerberos.service.name")); - assertEquals("testkafkaglobal", configs.get("listener.name.listener1.sasl.kerberos.service.name")); + assertEquals(configs.get("listener.name.listener1.sasl.kerberos.service.name"), "testkafkaglobal"); assertFalse(securityConfig.unused().contains("listener.name.listener1.sasl.kerberos.service.name")); assertNull(configs.get("sasl.kerberos.service.name")); assertFalse(securityConfig.unused().contains("sasl.kerberos.service.name")); - assertEquals("callback", configs.get("plain.sasl.server.callback.handler.class")); + assertEquals(configs.get("plain.sasl.server.callback.handler.class"), "callback"); assertFalse(securityConfig.unused().contains("plain.sasl.server.callback.handler.class")); - assertEquals("custom.config1", configs.get("listener.name.listener1.gssapi.config1.key")); + assertEquals(configs.get("listener.name.listener1.gssapi.config1.key"), "custom.config1"); assertFalse(securityConfig.unused().contains("listener.name.listener1.gssapi.config1.key")); - assertEquals("custom.config2", configs.get("custom.config2.key")); + assertEquals(configs.get("custom.config2.key"), "custom.config2"); assertFalse(securityConfig.unused().contains("custom.config2.key")); } @@ -118,15 +118,5 @@ public void configure(Map configs) { public KafkaPrincipal build(AuthenticationContext context) { return null; } - - @Override - public byte[] serialize(KafkaPrincipal principal) { - return new byte[0]; - } - - @Override - public KafkaPrincipal deserialize(byte[] bytes) { - return null; - } } } diff --git a/clients/src/test/java/org/apache/kafka/common/network/NioEchoServer.java b/clients/src/test/java/org/apache/kafka/common/network/NioEchoServer.java index df5e1aea7f8df..90dd34bb07835 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/NioEchoServer.java +++ b/clients/src/test/java/org/apache/kafka/common/network/NioEchoServer.java @@ -183,6 +183,11 @@ public void verifyReauthenticationMetrics(int successfulReauthentications, final } } + public void verifyAuthenticationNoReauthMetric(int successfulAuthenticationNoReauths) throws InterruptedException { + waitForMetrics("successful-authentication-no-reauth", successfulAuthenticationNoReauths, + EnumSet.of(MetricType.TOTAL)); + } + public void waitForMetric(String name, final double expectedValue) throws InterruptedException { waitForMetrics(name, expectedValue, EnumSet.of(MetricType.TOTAL, MetricType.RATE)); } diff --git a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java index 347f76135866d..d955c7939ae8f 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SelectorTest.java @@ -891,7 +891,7 @@ private KafkaMetric getMetric(String name, Map tags) throws Exce .filter(entry -> entry.getKey().name().equals(name) && entry.getKey().tags().equals(tags)) .findFirst(); - if (metric.isEmpty()) + if (!metric.isPresent()) throw new Exception(String.format("Could not find metric called %s with tags %s", name, tags.toString())); return metric.get().getValue(); @@ -1112,7 +1112,7 @@ private KafkaMetric getMetric(String name) throws Exception { Optional> metric = metrics.metrics().entrySet().stream() .filter(entry -> entry.getKey().name().equals(name)) .findFirst(); - if (metric.isEmpty()) + if (!metric.isPresent()) throw new Exception(String.format("Could not find metric called %s", name)); return metric.get().getValue(); diff --git a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java index 72f130ca4e3ff..9208171d1a926 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/SslTransportLayerTest.java @@ -110,8 +110,7 @@ public Args(String tlsProtocol, boolean useInlinePem) throws Exception { this.useInlinePem = useInlinePem; sslConfigOverrides = new HashMap<>(); sslConfigOverrides.put(SslConfigs.SSL_PROTOCOL_CONFIG, tlsProtocol); - sslConfigOverrides.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, List.of(tlsProtocol)); - sslConfigOverrides.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); + sslConfigOverrides.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList(tlsProtocol)); init(); } @@ -608,7 +607,7 @@ public void testInvalidKeyPassword(Args args) throws Exception { public void testTlsDefaults(Args args) throws Exception { args.sslServerConfigs = args.serverCertStores.getTrustingConfig(args.clientCertStores); args.sslClientConfigs = args.clientCertStores.getTrustingConfig(args.serverCertStores); - args.sslClientConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); + assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, args.sslServerConfigs.get(SslConfigs.SSL_PROTOCOL_CONFIG)); assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, args.sslClientConfigs.get(SslConfigs.SSL_PROTOCOL_CONFIG)); @@ -1097,14 +1096,14 @@ false, securityProtocol, config, null, null, TIME, new LogContext(), CertStores invalidCertStores = certBuilder(true, "server", args.useInlinePem).addHostName("127.0.0.1").build(); Map invalidConfigs = args.getTrustingConfig(invalidCertStores, args.clientCertStores); - verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs); + verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs, "keystore with different SubjectAltName"); Map missingStoreConfigs = new HashMap<>(); missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_TYPE_CONFIG, "PKCS12"); missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "some.keystore.path"); missingStoreConfigs.put(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, new Password("some.keystore.password")); missingStoreConfigs.put(SslConfigs.SSL_KEY_PASSWORD_CONFIG, new Password("some.key.password")); - verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs); + verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs, "keystore not found"); // Verify that new connections continue to work with the server with previously configured keystore after failed reconfiguration newClientSelector.connect("3", addr, BUFFER_SIZE, BUFFER_SIZE); @@ -1168,7 +1167,7 @@ false, securityProtocol, config, null, null, TIME, new LogContext(), for (String propName : CertStores.KEYSTORE_PROPS) { invalidKeystoreConfigs.put(propName, invalidConfig.get(propName)); } - verifyInvalidReconfigure(reconfigurableBuilder, invalidKeystoreConfigs); + verifyInvalidReconfigure(reconfigurableBuilder, invalidKeystoreConfigs, "keystore without existing SubjectAltName"); String node3 = "3"; selector.connect(node3, addr, BUFFER_SIZE, BUFFER_SIZE); NetworkTestUtils.checkClientConnection(selector, node3, 100, 10); @@ -1224,13 +1223,13 @@ false, securityProtocol, config, null, null, TIME, new LogContext(), Map invalidConfigs = new HashMap<>(newTruststoreConfigs); invalidConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "INVALID_TYPE"); - verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs); + verifyInvalidReconfigure(reconfigurableBuilder, invalidConfigs, "invalid truststore type"); Map missingStoreConfigs = new HashMap<>(); missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12"); missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG, "some.truststore.path"); missingStoreConfigs.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, new Password("some.truststore.password")); - verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs); + verifyInvalidReconfigure(reconfigurableBuilder, missingStoreConfigs, "truststore not found"); // Verify that new connections continue to work with the server with previously configured keystore after failed reconfiguration newClientSelector.connect("3", addr, BUFFER_SIZE, BUFFER_SIZE); @@ -1281,7 +1280,7 @@ public void testInvalidSslEngineFactory(Args args) { } private void verifyInvalidReconfigure(ListenerReconfigurable reconfigurable, - Map invalidConfigs) { + Map invalidConfigs, String errorMessage) { assertThrows(KafkaException.class, () -> reconfigurable.validateReconfiguration(invalidConfigs)); assertThrows(KafkaException.class, () -> reconfigurable.reconfigure(invalidConfigs)); } diff --git a/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java b/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java index 0b6003c963ac8..38804321d5977 100644 --- a/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/network/Tls13SelectorTest.java @@ -18,7 +18,6 @@ package org.apache.kafka.common.network; import org.apache.kafka.common.config.SslConfigs; -import org.apache.kafka.common.test.api.Flaky; import org.apache.kafka.test.TestSslUtils; import org.apache.kafka.test.TestUtils; @@ -47,13 +46,6 @@ protected Map createSslClientConfigs(File trustStoreFile) throws return configs; } - @Flaky(value = "KAFKA-14249", comment = "Copied from base class. Remove this override once the flakiness has been resolved.") - @Test - @Override - public void testCloseOldestConnection() throws Exception { - super.testCloseOldestConnection(); - } - /** * TLS 1.3 has a post-handshake key and IV update, which will update the sending and receiving keys * for one side of the connection. diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/ErrorsTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/ErrorsTest.java index 528fefed3f3db..23c49412ab9e9 100644 --- a/clients/src/test/java/org/apache/kafka/common/protocol/ErrorsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/ErrorsTest.java @@ -53,7 +53,7 @@ public void testUniqueExceptions() { public void testExceptionsAreNotGeneric() { for (Errors error : Errors.values()) { if (error != Errors.NONE) - assertNotEquals(ApiException.class, error.exception().getClass(), "Generic ApiException should not be used"); + assertNotEquals(error.exception().getClass(), ApiException.class, "Generic ApiException should not be used"); } } diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/ProtocolTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/ProtocolTest.java index 7a6d05778eda0..adcabba1a4977 100644 --- a/clients/src/test/java/org/apache/kafka/common/protocol/ProtocolTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/ProtocolTest.java @@ -19,7 +19,6 @@ import org.junit.jupiter.api.Test; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertTrue; public class ProtocolTest { @@ -28,18 +27,6 @@ public void testToHtml() { var html = Protocol.toHtml(); assertFalse(html.isBlank()); assertFalse(html.contains("LeaderAndIsr"), "Removed LeaderAndIsr should not show in HTML"); - - String requestVersion; - String responseVersion; - for (ApiKeys key : ApiKeys.clientApis()) { - for (short version = key.oldestVersion(); version <= key.latestVersion(); version++) { - requestVersion = key.name + " Request (Version: " + version; - responseVersion = key.name + " Response (Version: " + version; - - assertTrue(html.contains(requestVersion), "Missing request header for " + key.name + " version:" + version); - assertTrue(html.contains(responseVersion), "Missing response header for " + key.name + " version:" + version); - } - } } } diff --git a/clients/src/test/java/org/apache/kafka/common/protocol/types/ProtocolSerializationTest.java b/clients/src/test/java/org/apache/kafka/common/protocol/types/ProtocolSerializationTest.java index 32ba528fe3eed..027dca8bf84fc 100644 --- a/clients/src/test/java/org/apache/kafka/common/protocol/types/ProtocolSerializationTest.java +++ b/clients/src/test/java/org/apache/kafka/common/protocol/types/ProtocolSerializationTest.java @@ -195,9 +195,12 @@ public void testReadArraySizeTooLarge() { for (int i = 0; i < size; i++) invalidBuffer.put((byte) i); invalidBuffer.rewind(); - assertThrows(SchemaException.class, - () -> type.read(invalidBuffer), - "Array size not validated"); + try { + type.read(invalidBuffer); + fail("Array size not validated"); + } catch (SchemaException e) { + // Expected exception + } } @Test @@ -210,9 +213,12 @@ public void testReadCompactArraySizeTooLarge() { for (int i = 0; i < size; i++) invalidBuffer.put((byte) i); invalidBuffer.rewind(); - assertThrows(SchemaException.class, - () -> type.read(invalidBuffer), - "Array size not validated"); + try { + type.read(invalidBuffer); + fail("Array size not validated"); + } catch (SchemaException e) { + // Expected exception + } } @Test @@ -246,9 +252,12 @@ public void testReadNegativeArraySize() { for (int i = 0; i < size; i++) invalidBuffer.put((byte) i); invalidBuffer.rewind(); - assertThrows(SchemaException.class, - () -> type.read(invalidBuffer), - "Array size not validated"); + try { + type.read(invalidBuffer); + fail("Array size not validated"); + } catch (SchemaException e) { + // Expected exception + } } @Test @@ -261,9 +270,12 @@ public void testReadZeroCompactArraySize() { for (int i = 0; i < size; i++) invalidBuffer.put((byte) i); invalidBuffer.rewind(); - assertThrows(SchemaException.class, - () -> type.read(invalidBuffer), - "Array size not validated"); + try { + type.read(invalidBuffer); + fail("Array size not validated"); + } catch (SchemaException e) { + // Expected exception + } } @Test @@ -273,14 +285,19 @@ public void testReadStringSizeTooLarge() { invalidBuffer.putShort((short) (stringBytes.length * 5)); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); - assertThrows(SchemaException.class, - () -> Type.STRING.read(invalidBuffer), - "String size not validated"); - + try { + Type.STRING.read(invalidBuffer); + fail("String size not validated"); + } catch (SchemaException e) { + // Expected exception + } invalidBuffer.rewind(); - assertThrows(SchemaException.class, - () -> Type.NULLABLE_STRING.read(invalidBuffer), - "String size not validated"); + try { + Type.NULLABLE_STRING.read(invalidBuffer); + fail("String size not validated"); + } catch (SchemaException e) { + // Expected exception + } } @Test @@ -290,10 +307,12 @@ public void testReadNegativeStringSize() { invalidBuffer.putShort((short) -1); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); - - assertThrows(SchemaException.class, - () -> Type.STRING.read(invalidBuffer), - "String size not validated"); + try { + Type.STRING.read(invalidBuffer); + fail("String size not validated"); + } catch (SchemaException e) { + // Expected exception + } } @Test @@ -303,14 +322,19 @@ public void testReadBytesSizeTooLarge() { invalidBuffer.putInt(stringBytes.length * 5); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); - assertThrows(SchemaException.class, - () -> Type.BYTES.read(invalidBuffer), - "Bytes size not validated"); - + try { + Type.BYTES.read(invalidBuffer); + fail("Bytes size not validated"); + } catch (SchemaException e) { + // Expected exception + } invalidBuffer.rewind(); - assertThrows(SchemaException.class, - () -> Type.NULLABLE_BYTES.read(invalidBuffer), - "Bytes size not validated"); + try { + Type.NULLABLE_BYTES.read(invalidBuffer); + fail("Bytes size not validated"); + } catch (SchemaException e) { + // Expected exception + } } @Test @@ -320,10 +344,12 @@ public void testReadNegativeBytesSize() { invalidBuffer.putInt(-20); invalidBuffer.put(stringBytes); invalidBuffer.rewind(); - - assertThrows(SchemaException.class, - () -> Type.BYTES.read(invalidBuffer), - "Bytes size not validated"); + try { + Type.BYTES.read(invalidBuffer); + fail("Bytes size not validated"); + } catch (SchemaException e) { + // Expected exception + } } @Test diff --git a/clients/src/test/java/org/apache/kafka/common/record/EndTransactionMarkerTest.java b/clients/src/test/java/org/apache/kafka/common/record/EndTransactionMarkerTest.java index c9a3b3b10b7d7..64224e003285f 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/EndTransactionMarkerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/EndTransactionMarkerTest.java @@ -17,31 +17,16 @@ package org.apache.kafka.common.record; import org.apache.kafka.common.InvalidRecordException; -import org.apache.kafka.common.message.EndTxnMarker; -import org.apache.kafka.common.protocol.types.Field; -import org.apache.kafka.common.protocol.types.Schema; -import org.apache.kafka.common.protocol.types.Struct; -import org.apache.kafka.common.protocol.types.Type; -import org.apache.kafka.common.utils.ByteUtils; import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; -import java.util.Arrays; -import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; public class EndTransactionMarkerTest { - // Old hard-coded schema, used to validate old hard-coded schema format is exactly the same as new auto generated protocol format - private final Schema v0Schema = new Schema( - new Field("version", Type.INT16), - new Field("coordinator_epoch", Type.INT32)); - - private static final List VALID_CONTROLLER_RECORD_TYPE = Arrays.asList(ControlRecordType.COMMIT, ControlRecordType.ABORT); - @Test public void testUnknownControlTypeNotAllowed() { assertThrows(IllegalArgumentException.class, @@ -55,13 +40,19 @@ public void testCannotDeserializeUnknownControlType() { } @Test - public void testIllegalVersion() { + public void testIllegalNegativeVersion() { ByteBuffer buffer = ByteBuffer.allocate(2); buffer.putShort((short) -1); buffer.flip(); assertThrows(InvalidRecordException.class, () -> EndTransactionMarker.deserializeValue(ControlRecordType.ABORT, buffer)); } + @Test + public void testNotEnoughBytes() { + assertThrows(InvalidRecordException.class, + () -> EndTransactionMarker.deserializeValue(ControlRecordType.COMMIT, ByteBuffer.wrap(new byte[0]))); + } + @Test public void testSerde() { int coordinatorEpoch = 79; @@ -82,70 +73,4 @@ public void testDeserializeNewerVersion() { EndTransactionMarker deserialized = EndTransactionMarker.deserializeValue(ControlRecordType.COMMIT, buffer); assertEquals(coordinatorEpoch, deserialized.coordinatorEpoch()); } - - @Test - public void testSerializeAndDeserialize() { - for (ControlRecordType type: VALID_CONTROLLER_RECORD_TYPE) { - for (short version = EndTxnMarker.LOWEST_SUPPORTED_VERSION; - version <= EndTxnMarker.HIGHEST_SUPPORTED_VERSION; version++) { - EndTransactionMarker marker = new EndTransactionMarker(type, 1); - - ByteBuffer buffer = marker.serializeValue(); - EndTransactionMarker deserializedMarker = EndTransactionMarker.deserializeValue(type, buffer); - assertEquals(marker, deserializedMarker); - } - } - } - - @Test - public void testEndTxnMarkerValueSize() { - for (ControlRecordType type: VALID_CONTROLLER_RECORD_TYPE) { - EndTransactionMarker marker = new EndTransactionMarker(type, 1); - int offsetSize = ByteUtils.sizeOfVarint(0); - int timestampSize = ByteUtils.sizeOfVarlong(0); - int keySize = ControlRecordType.CURRENT_CONTROL_RECORD_KEY_SIZE; - int valueSize = marker.serializeValue().remaining(); - int headerSize = ByteUtils.sizeOfVarint(Record.EMPTY_HEADERS.length); - int totalSize = 1 + offsetSize + timestampSize + ByteUtils.sizeOfVarint(keySize) + keySize + ByteUtils.sizeOfVarint(valueSize) + valueSize + headerSize; - assertEquals(ByteUtils.sizeOfVarint(totalSize) + totalSize, marker.endTxnMarkerValueSize()); - } - } - - @Test - public void testBackwardDeserializeCompatibility() { - int coordinatorEpoch = 10; - for (ControlRecordType type: VALID_CONTROLLER_RECORD_TYPE) { - for (short version = EndTxnMarker.LOWEST_SUPPORTED_VERSION; - version <= EndTxnMarker.HIGHEST_SUPPORTED_VERSION; version++) { - - Struct struct = new Struct(v0Schema); - struct.set("version", version); - struct.set("coordinator_epoch", coordinatorEpoch); - - ByteBuffer oldVersionBuffer = ByteBuffer.allocate(struct.sizeOf()); - struct.writeTo(oldVersionBuffer); - oldVersionBuffer.flip(); - - EndTransactionMarker deserializedMarker = EndTransactionMarker.deserializeValue(type, oldVersionBuffer); - assertEquals(coordinatorEpoch, deserializedMarker.coordinatorEpoch()); - assertEquals(type, deserializedMarker.controlType()); - } - } - } - - @Test - public void testForwardDeserializeCompatibility() { - int coordinatorEpoch = 10; - for (ControlRecordType type: VALID_CONTROLLER_RECORD_TYPE) { - for (short version = EndTxnMarker.LOWEST_SUPPORTED_VERSION; - version <= EndTxnMarker.HIGHEST_SUPPORTED_VERSION; version++) { - EndTransactionMarker marker = new EndTransactionMarker(type, coordinatorEpoch); - ByteBuffer newVersionBuffer = marker.serializeValue(); - - Struct struct = v0Schema.read(newVersionBuffer); - EndTransactionMarker deserializedMarker = new EndTransactionMarker(type, struct.getInt("coordinator_epoch")); - assertEquals(marker, deserializedMarker); - } - } - } } diff --git a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java index a9d12285f1298..4461108713c07 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/FileRecordsTest.java @@ -18,16 +18,17 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.compress.GzipCompression; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.internals.RecordHeader; import org.apache.kafka.common.network.TransferableChannel; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import org.mockito.Mockito; import java.io.File; @@ -39,27 +40,27 @@ import java.util.Iterator; import java.util.List; import java.util.Optional; +import java.util.Random; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.Future; -import java.util.stream.IntStream; import static java.util.Arrays.asList; +import static org.apache.kafka.common.utils.Utils.utf8; import static org.apache.kafka.test.TestUtils.tempFile; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.atLeastOnce; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -72,10 +73,12 @@ public class FileRecordsTest { "ijkl".getBytes() }; private FileRecords fileRecords; + private Time time; @BeforeEach public void setup() throws IOException { this.fileRecords = createFileRecords(values); + this.time = new MockTime(); } @AfterEach @@ -89,7 +92,7 @@ public void testAppendProtectsFromOverflow() throws Exception { FileChannel fileChannelMock = mock(FileChannel.class); when(fileChannelMock.size()).thenReturn((long) Integer.MAX_VALUE); - FileRecords records = new FileRecords(fileMock, fileChannelMock, Integer.MAX_VALUE); + FileRecords records = new FileRecords(fileMock, fileChannelMock, 0, Integer.MAX_VALUE, false); assertThrows(IllegalArgumentException.class, () -> append(records, values)); } @@ -99,7 +102,7 @@ public void testOpenOversizeFile() throws Exception { FileChannel fileChannelMock = mock(FileChannel.class); when(fileChannelMock.size()).thenReturn(Integer.MAX_VALUE + 5L); - assertThrows(KafkaException.class, () -> new FileRecords(fileMock, fileChannelMock, Integer.MAX_VALUE)); + assertThrows(KafkaException.class, () -> new FileRecords(fileMock, fileChannelMock, 0, Integer.MAX_VALUE, false)); } @Test @@ -142,7 +145,7 @@ public void testSliceSizeLimitWithConcurrentWrite() throws Exception { Future readerCompletion = executor.submit(() -> { while (log.sizeInBytes() < maxSizeInBytes) { int currentSize = log.sizeInBytes(); - Records slice = log.slice(0, currentSize); + FileRecords slice = log.slice(0, currentSize); assertEquals(currentSize, slice.sizeInBytes()); } return null; @@ -197,7 +200,7 @@ public void testIterationDoesntChangePosition() throws IOException { * Test a simple append and read. */ @Test - public void testRead() { + public void testRead() throws IOException { FileRecords read = fileRecords.slice(0, fileRecords.sizeInBytes()); assertEquals(fileRecords.sizeInBytes(), read.sizeInBytes()); TestUtils.checkEquals(fileRecords.batches(), read.batches()); @@ -253,25 +256,25 @@ public void testSearch() throws IOException { int message1Size = batches.get(0).sizeInBytes(); assertEquals(new FileRecords.LogOffsetPosition(0L, position, message1Size), - fileRecords.searchForOffsetFromPosition(0, 0), + fileRecords.searchForOffsetWithSize(0, 0), "Should be able to find the first message by its offset"); position += message1Size; int message2Size = batches.get(1).sizeInBytes(); assertEquals(new FileRecords.LogOffsetPosition(1L, position, message2Size), - fileRecords.searchForOffsetFromPosition(1, 0), + fileRecords.searchForOffsetWithSize(1, 0), "Should be able to find second message when starting from 0"); assertEquals(new FileRecords.LogOffsetPosition(1L, position, message2Size), - fileRecords.searchForOffsetFromPosition(1, position), + fileRecords.searchForOffsetWithSize(1, position), "Should be able to find second message starting from its offset"); position += message2Size + batches.get(2).sizeInBytes(); int message4Size = batches.get(3).sizeInBytes(); assertEquals(new FileRecords.LogOffsetPosition(50L, position, message4Size), - fileRecords.searchForOffsetFromPosition(3, position), + fileRecords.searchForOffsetWithSize(3, position), "Should be able to find fourth message from a non-existent offset"); assertEquals(new FileRecords.LogOffsetPosition(50L, position, message4Size), - fileRecords.searchForOffsetFromPosition(50, position), + fileRecords.searchForOffsetWithSize(50, position), "Should be able to find fourth message by correct offset"); } @@ -279,13 +282,13 @@ public void testSearch() throws IOException { * Test that the message set iterator obeys start and end slicing */ @Test - public void testIteratorWithLimits() { + public void testIteratorWithLimits() throws IOException { RecordBatch batch = batches(fileRecords).get(1); - int start = fileRecords.searchForOffsetFromPosition(1, 0).position; + int start = fileRecords.searchForOffsetWithSize(1, 0).position; int size = batch.sizeInBytes(); - Records slice = fileRecords.slice(start, size); + FileRecords slice = fileRecords.slice(start, size); assertEquals(Collections.singletonList(batch), batches(slice)); - Records slice2 = fileRecords.slice(start, size - 1); + FileRecords slice2 = fileRecords.slice(start, size - 1); assertEquals(Collections.emptyList(), batches(slice2)); } @@ -295,7 +298,7 @@ public void testIteratorWithLimits() { @Test public void testTruncate() throws IOException { RecordBatch batch = batches(fileRecords).get(0); - int end = fileRecords.searchForOffsetFromPosition(1, 0).position; + int end = fileRecords.searchForOffsetWithSize(1, 0).position; fileRecords.truncateTo(end); assertEquals(Collections.singletonList(batch), batches(fileRecords)); assertEquals(batch.sizeInBytes(), fileRecords.sizeInBytes()); @@ -313,7 +316,7 @@ public void testTruncateNotCalledIfSizeIsSameAsTargetSize() throws IOException { when(channelMock.size()).thenReturn(42L); when(channelMock.position(42L)).thenReturn(null); - FileRecords fileRecords = new FileRecords(tempFile(), channelMock, Integer.MAX_VALUE); + FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); fileRecords.truncateTo(42); verify(channelMock, atLeastOnce()).size(); @@ -330,7 +333,7 @@ public void testTruncateNotCalledIfSizeIsBiggerThanTargetSize() throws IOExcepti when(channelMock.size()).thenReturn(42L); - FileRecords fileRecords = new FileRecords(tempFile(), channelMock, Integer.MAX_VALUE); + FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); try { fileRecords.truncateTo(43); @@ -352,7 +355,7 @@ public void testTruncateIfSizeIsDifferentToTargetSize() throws IOException { when(channelMock.size()).thenReturn(42L); when(channelMock.truncate(anyLong())).thenReturn(channelMock); - FileRecords fileRecords = new FileRecords(tempFile(), channelMock, Integer.MAX_VALUE); + FileRecords fileRecords = new FileRecords(tempFile(), channelMock, 0, Integer.MAX_VALUE, false); fileRecords.truncateTo(23); verify(channelMock, atLeastOnce()).size(); @@ -413,40 +416,20 @@ public void testPreallocateClearShutdown() throws IOException { } @Test - public void testSearchForTimestamp() throws IOException { - for (RecordVersion version : RecordVersion.values()) { - testSearchForTimestamp(version); - } + public void testFormatConversionWithPartialMessage() throws IOException { + RecordBatch batch = batches(fileRecords).get(1); + int start = fileRecords.searchForOffsetWithSize(1, 0).position; + int size = batch.sizeInBytes(); + FileRecords slice = fileRecords.slice(start, size - 1); + Records messageV0 = slice.downConvert(RecordBatch.MAGIC_VALUE_V0, 0, time).records(); + assertTrue(batches(messageV0).isEmpty(), "No message should be there"); + assertEquals(size - 1, messageV0.sizeInBytes(), "There should be " + (size - 1) + " bytes"); } - /** - * Test slice when already sliced file records have start position greater than available bytes - * in the file records. - */ @Test - public void testSliceForAlreadySlicedFileRecords() throws IOException { - byte[][] values = new byte[][] { - "abcd".getBytes(), - "efgh".getBytes(), - "ijkl".getBytes(), - "mnopqr".getBytes(), - "stuv".getBytes() - }; - try (FileRecords fileRecords = createFileRecords(values)) { - List items = batches(fileRecords.slice(0, fileRecords.sizeInBytes())); - - // Slice from fourth message until the end. - int position = IntStream.range(0, 3).map(i -> items.get(i).sizeInBytes()).sum(); - Records sliced = fileRecords.slice(position, fileRecords.sizeInBytes() - position); - assertEquals(fileRecords.sizeInBytes() - position, sliced.sizeInBytes()); - assertEquals(items.subList(3, items.size()), batches(sliced), "Read starting from the fourth message"); - - // Further slice the already sliced file records, from fifth message until the end. Now the - // bytes available in the sliced records are less than the moved position from original records. - position = items.get(3).sizeInBytes(); - Records finalSliced = sliced.slice(position, sliced.sizeInBytes() - position); - assertEquals(sliced.sizeInBytes() - position, finalSliced.sizeInBytes()); - assertEquals(items.subList(4, items.size()), batches(finalSliced), "Read starting from the fifth message"); + public void testSearchForTimestamp() throws IOException { + for (RecordVersion version : RecordVersion.values()) { + testSearchForTimestamp(version); } } @@ -492,6 +475,39 @@ private void appendWithOffsetAndTimestamp(FileRecords fileRecords, fileRecords.append(builder.build()); } + @Test + public void testDownconversionAfterMessageFormatDowngrade() throws IOException { + // random bytes + Random random = new Random(); + byte[] bytes = new byte[3000]; + random.nextBytes(bytes); + + // records + GzipCompression compression = Compression.gzip().build(); + List offsets = asList(0L, 1L); + List magic = asList(RecordBatch.MAGIC_VALUE_V2, RecordBatch.MAGIC_VALUE_V1); // downgrade message format from v2 to v1 + List records = asList( + new SimpleRecord(1L, "k1".getBytes(), bytes), + new SimpleRecord(2L, "k2".getBytes(), bytes)); + byte toMagic = 1; + + // create MemoryRecords + ByteBuffer buffer = ByteBuffer.allocate(8000); + for (int i = 0; i < records.size(); i++) { + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, magic.get(i), compression, TimestampType.CREATE_TIME, 0L); + builder.appendWithOffset(offsets.get(i), records.get(i)); + builder.close(); + } + buffer.flip(); + + // create FileRecords, down-convert and verify + try (FileRecords fileRecords = FileRecords.open(tempFile())) { + fileRecords.append(MemoryRecords.readableRecords(buffer)); + fileRecords.flush(); + downConvertAndVerifyRecords(records, offsets, fileRecords, compression, toMagic, 0L, time); + } + } + @Test public void testConversion() throws IOException { doTestConversion(Compression.NONE, RecordBatch.MAGIC_VALUE_V0); @@ -521,194 +537,6 @@ public void testBytesLengthOfWriteTo() throws IOException { verify(channel).transferFrom(any(), anyLong(), eq((long) size - firstWritten)); } - /** - * Test two conditions: - * 1. If the target offset equals the base offset of the first batch - * 2. If the target offset is less than the base offset of the first batch - *

          - * If the base offset of the first batch is equal to or greater than the target offset, it should return the - * position of the first batch and the lastOffset method should not be called. - */ - @ParameterizedTest - @ValueSource(longs = {5, 10}) - public void testSearchForOffsetFromPosition1(long baseOffset) throws IOException { - File mockFile = mock(File.class); - FileChannel mockChannel = mock(FileChannel.class); - FileLogInputStream.FileChannelRecordBatch batch = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch.baseOffset()).thenReturn(baseOffset); - - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); - mockFileRecordBatches(fileRecords, batch); - - FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(5L, 0); - - assertEquals(FileRecords.LogOffsetPosition.fromBatch(batch), result); - verify(batch, never()).lastOffset(); - } - - /** - * Test the case when the target offset equals the last offset of the first batch. - */ - @Test - public void testSearchForOffsetFromPosition2() throws IOException { - File mockFile = mock(File.class); - FileChannel mockChannel = mock(FileChannel.class); - FileLogInputStream.FileChannelRecordBatch batch = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch.baseOffset()).thenReturn(3L); - when(batch.lastOffset()).thenReturn(5L); - - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); - mockFileRecordBatches(fileRecords, batch); - - FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(5L, 0); - - assertEquals(FileRecords.LogOffsetPosition.fromBatch(batch), result); - // target is equal to the last offset of the batch, we should call lastOffset - verify(batch, times(1)).lastOffset(); - } - - /** - * Test the case when the target offset equals the last offset of the last batch. - */ - @Test - public void testSearchForOffsetFromPosition3() throws IOException { - File mockFile = mock(File.class); - FileChannel mockChannel = mock(FileChannel.class); - FileLogInputStream.FileChannelRecordBatch prevBatch = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(prevBatch.baseOffset()).thenReturn(5L); - when(prevBatch.lastOffset()).thenReturn(12L); - FileLogInputStream.FileChannelRecordBatch currentBatch = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(currentBatch.baseOffset()).thenReturn(15L); - when(currentBatch.lastOffset()).thenReturn(20L); - - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); - mockFileRecordBatches(fileRecords, prevBatch, currentBatch); - - FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(20L, 0); - - assertEquals(FileRecords.LogOffsetPosition.fromBatch(currentBatch), result); - // Because the target offset is in the current batch, we should not call lastOffset in the previous batch - verify(prevBatch, never()).lastOffset(); - verify(currentBatch, times(1)).lastOffset(); - } - - /** - * Test the case when the target offset is within the range of the previous batch. - */ - @Test - public void testSearchForOffsetFromPosition4() throws IOException { - File mockFile = mock(File.class); - FileChannel mockChannel = mock(FileChannel.class); - FileLogInputStream.FileChannelRecordBatch prevBatch = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(prevBatch.baseOffset()).thenReturn(5L); - when(prevBatch.lastOffset()).thenReturn(12L); // > targetOffset - FileLogInputStream.FileChannelRecordBatch currentBatch = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(currentBatch.baseOffset()).thenReturn(15L); // >= targetOffset - - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); - mockFileRecordBatches(fileRecords, prevBatch, currentBatch); - - FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(10L, 0); - - assertEquals(FileRecords.LogOffsetPosition.fromBatch(prevBatch), result); - // Because the target offset is in the current batch, we should call lastOffset - // on the previous batch - verify(prevBatch, times(1)).lastOffset(); - } - - /** - * Test the case when no batch matches the target offset. - */ - @Test - public void testSearchForOffsetFromPosition5() throws IOException { - File mockFile = mock(File.class); - FileChannel mockChannel = mock(FileChannel.class); - FileLogInputStream.FileChannelRecordBatch batch1 = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch1.baseOffset()).thenReturn(5L); // < targetOffset - FileLogInputStream.FileChannelRecordBatch batch2 = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch2.baseOffset()).thenReturn(8L); // < targetOffset - when(batch2.lastOffset()).thenReturn(9L); // < targetOffset - - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); - mockFileRecordBatches(fileRecords, batch1, batch2); - - FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(10L, 0); - - assertNull(result); - // Because the target offset is exceeded by the last offset of the batch2, - // we should call lastOffset on the batch2 - verify(batch1, never()).lastOffset(); - verify(batch2, times(1)).lastOffset(); - } - - /** - * Test two conditions: - * 1. If the target offset is less than the base offset of the last batch - * 2. If the target offset equals the base offset of the last batch - */ - @ParameterizedTest - @ValueSource(longs = {8, 10}) - public void testSearchForOffsetFromPosition6(long baseOffset) throws IOException { - File mockFile = mock(File.class); - FileChannel mockChannel = mock(FileChannel.class); - FileLogInputStream.FileChannelRecordBatch batch1 = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch1.baseOffset()).thenReturn(5L); // < targetOffset - FileLogInputStream.FileChannelRecordBatch batch2 = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch2.baseOffset()).thenReturn(baseOffset); // < targetOffset or == targetOffset - when(batch2.lastOffset()).thenReturn(12L); // >= targetOffset - - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); - mockFileRecordBatches(fileRecords, batch1, batch2); - - long targetOffset = 10L; - FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(targetOffset, 0); - - assertEquals(FileRecords.LogOffsetPosition.fromBatch(batch2), result); - if (targetOffset == baseOffset) { - // Because the target offset is equal to the base offset of the batch2, we should not call - // lastOffset on batch2 and batch1 - verify(batch1, never()).lastOffset(); - verify(batch2, never()).lastOffset(); - } else { - // Because the target offset is in the batch2, we should not call - // lastOffset on batch1 - verify(batch1, never()).lastOffset(); - verify(batch2, times(1)).lastOffset(); - } - } - - /** - * Test the case when the target offset is between two batches. - */ - @Test - public void testSearchForOffsetFromPosition7() throws IOException { - File mockFile = mock(File.class); - FileChannel mockChannel = mock(FileChannel.class); - FileLogInputStream.FileChannelRecordBatch batch1 = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch1.baseOffset()).thenReturn(5L); - when(batch1.lastOffset()).thenReturn(10L); - FileLogInputStream.FileChannelRecordBatch batch2 = mock(FileLogInputStream.FileChannelRecordBatch.class); - when(batch2.baseOffset()).thenReturn(15L); - when(batch2.lastOffset()).thenReturn(20L); - - FileRecords fileRecords = Mockito.spy(new FileRecords(mockFile, mockChannel, 100)); - mockFileRecordBatches(fileRecords, batch1, batch2); - - FileRecords.LogOffsetPosition result = fileRecords.searchForOffsetFromPosition(13L, 0); - - assertEquals(FileRecords.LogOffsetPosition.fromBatch(batch2), result); - // Because the target offset is between the two batches, we should call lastOffset on the batch1 - verify(batch1, times(1)).lastOffset(); - verify(batch2, never()).lastOffset(); - } - - private void mockFileRecordBatches(FileRecords fileRecords, FileLogInputStream.FileChannelRecordBatch... batch) { - List batches = asList(batch); - doReturn((Iterable) batches::iterator) - .when(fileRecords) - .batchesFrom(anyInt()); - } - private void doTestConversion(Compression compression, byte toMagic) throws IOException { List offsets = asList(0L, 2L, 3L, 9L, 11L, 15L, 16L, 17L, 22L, 24L); @@ -752,6 +580,7 @@ private void doTestConversion(Compression compression, byte toMagic) throws IOEx try (FileRecords fileRecords = FileRecords.open(tempFile())) { fileRecords.append(MemoryRecords.readableRecords(buffer)); fileRecords.flush(); + downConvertAndVerifyRecords(records, offsets, fileRecords, compression, toMagic, 0L, time); if (toMagic <= RecordBatch.MAGIC_VALUE_V1 && compression.type() == CompressionType.NONE) { long firstOffset; @@ -764,8 +593,75 @@ private void doTestConversion(Compression compression, byte toMagic) throws IOEx int index = filteredOffsets.indexOf(firstOffset) - 1; filteredRecords.remove(index); filteredOffsets.remove(index); + downConvertAndVerifyRecords(filteredRecords, filteredOffsets, fileRecords, compression, toMagic, firstOffset, time); + } else { + // firstOffset doesn't have any effect in this case + downConvertAndVerifyRecords(records, offsets, fileRecords, compression, toMagic, 10L, time); + } + } + } + + private void downConvertAndVerifyRecords(List initialRecords, + List initialOffsets, + FileRecords fileRecords, + Compression compression, + byte toMagic, + long firstOffset, + Time time) { + long minBatchSize = Long.MAX_VALUE; + long maxBatchSize = Long.MIN_VALUE; + for (RecordBatch batch : fileRecords.batches()) { + minBatchSize = Math.min(minBatchSize, batch.sizeInBytes()); + maxBatchSize = Math.max(maxBatchSize, batch.sizeInBytes()); + } + + // Test the normal down-conversion path + List convertedRecords = new ArrayList<>(); + convertedRecords.add(fileRecords.downConvert(toMagic, firstOffset, time).records()); + verifyConvertedRecords(initialRecords, initialOffsets, convertedRecords, compression, toMagic); + convertedRecords.clear(); + } + + private void verifyConvertedRecords(List initialRecords, + List initialOffsets, + List convertedRecordsList, + Compression compression, + byte magicByte) { + int i = 0; + + for (Records convertedRecords : convertedRecordsList) { + for (RecordBatch batch : convertedRecords.batches()) { + assertTrue(batch.magic() <= magicByte, "Magic byte should be lower than or equal to " + magicByte); + if (batch.magic() == RecordBatch.MAGIC_VALUE_V0) + assertEquals(TimestampType.NO_TIMESTAMP_TYPE, batch.timestampType()); + else + assertEquals(TimestampType.CREATE_TIME, batch.timestampType()); + assertEquals(compression.type(), batch.compressionType(), "Compression type should not be affected by conversion"); + for (Record record : batch) { + assertTrue(record.hasMagic(batch.magic()), "Inner record should have magic " + magicByte); + assertEquals(initialOffsets.get(i).longValue(), record.offset(), "Offset should not change"); + assertEquals(utf8(initialRecords.get(i).key()), utf8(record.key()), "Key should not change"); + assertEquals(utf8(initialRecords.get(i).value()), utf8(record.value()), "Value should not change"); + assertFalse(record.hasTimestampType(TimestampType.LOG_APPEND_TIME)); + if (batch.magic() == RecordBatch.MAGIC_VALUE_V0) { + assertEquals(RecordBatch.NO_TIMESTAMP, record.timestamp()); + assertFalse(record.hasTimestampType(TimestampType.CREATE_TIME)); + assertTrue(record.hasTimestampType(TimestampType.NO_TIMESTAMP_TYPE)); + } else if (batch.magic() == RecordBatch.MAGIC_VALUE_V1) { + assertEquals(initialRecords.get(i).timestamp(), record.timestamp(), "Timestamp should not change"); + assertTrue(record.hasTimestampType(TimestampType.CREATE_TIME)); + assertFalse(record.hasTimestampType(TimestampType.NO_TIMESTAMP_TYPE)); + } else { + assertEquals(initialRecords.get(i).timestamp(), record.timestamp(), "Timestamp should not change"); + assertFalse(record.hasTimestampType(TimestampType.CREATE_TIME)); + assertFalse(record.hasTimestampType(TimestampType.NO_TIMESTAMP_TYPE)); + assertArrayEquals(initialRecords.get(i).headers(), record.headers(), "Headers should not change"); + } + i += 1; + } } } + assertEquals(initialOffsets.size(), i); } private static List batches(Records buffer) { diff --git a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java index f102bd5fd9534..e6549d072278a 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsBuilderTest.java @@ -17,11 +17,13 @@ package org.apache.kafka.common.record; import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.message.LeaderChangeMessage; import org.apache.kafka.common.message.LeaderChangeMessage.Voter; import org.apache.kafka.common.utils.BufferSupplier; import org.apache.kafka.common.utils.ByteBufferOutputStream; import org.apache.kafka.common.utils.CloseableIterator; +import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.test.TestUtils; @@ -31,6 +33,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.ArgumentsProvider; import org.junit.jupiter.params.provider.ArgumentsSource; +import org.junit.jupiter.params.provider.EnumSource; import java.nio.ByteBuffer; import java.util.ArrayList; @@ -47,8 +50,10 @@ import static org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V0; import static org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V1; import static org.apache.kafka.common.record.RecordBatch.MAGIC_VALUE_V2; +import static org.apache.kafka.common.utils.Utils.utf8; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -89,6 +94,8 @@ public Stream provideArguments(ExtensionContext context) { } } + private final Time time = Time.SYSTEM; + @Test public void testUnsupportedCompress() { BiFunction builderBiFunction = (magic, compression) -> @@ -528,6 +535,153 @@ public void testAppendAtInvalidOffset(Args args) { "b".getBytes(), null)); } + @ParameterizedTest + @EnumSource(CompressionType.class) + public void convertV2ToV1UsingMixedCreateAndLogAppendTime(CompressionType compressionType) { + ByteBuffer buffer = ByteBuffer.allocate(512); + Compression compression = Compression.of(compressionType).build(); + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, + compression, TimestampType.LOG_APPEND_TIME, 0L); + builder.append(10L, "1".getBytes(), "a".getBytes()); + builder.close(); + + int sizeExcludingTxnMarkers = buffer.position(); + + MemoryRecords.writeEndTransactionalMarker(buffer, 1L, System.currentTimeMillis(), 0, 15L, (short) 0, + new EndTransactionMarker(ControlRecordType.ABORT, 0)); + + int position = buffer.position(); + + builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, compression, + TimestampType.CREATE_TIME, 1L); + builder.append(12L, "2".getBytes(), "b".getBytes()); + builder.append(13L, "3".getBytes(), "c".getBytes()); + builder.close(); + + sizeExcludingTxnMarkers += buffer.position() - position; + + MemoryRecords.writeEndTransactionalMarker(buffer, 14L, System.currentTimeMillis(), 0, 1L, (short) 0, + new EndTransactionMarker(ControlRecordType.COMMIT, 0)); + + buffer.flip(); + + Supplier> convertedRecordsSupplier = () -> + MemoryRecords.readableRecords(buffer).downConvert(MAGIC_VALUE_V1, 0, time); + + if (compression.type() != CompressionType.ZSTD) { + ConvertedRecords convertedRecords = convertedRecordsSupplier.get(); + MemoryRecords records = convertedRecords.records(); + + // Transactional markers are skipped when down converting to V1, so exclude them from size + verifyRecordsProcessingStats(compression, convertedRecords.recordConversionStats(), + 3, 3, records.sizeInBytes(), sizeExcludingTxnMarkers); + + List batches = Utils.toList(records.batches().iterator()); + if (compression.type() != CompressionType.NONE) { + assertEquals(2, batches.size()); + assertEquals(TimestampType.LOG_APPEND_TIME, batches.get(0).timestampType()); + assertEquals(TimestampType.CREATE_TIME, batches.get(1).timestampType()); + } else { + assertEquals(3, batches.size()); + assertEquals(TimestampType.LOG_APPEND_TIME, batches.get(0).timestampType()); + assertEquals(TimestampType.CREATE_TIME, batches.get(1).timestampType()); + assertEquals(TimestampType.CREATE_TIME, batches.get(2).timestampType()); + } + + List logRecords = Utils.toList(records.records().iterator()); + assertEquals(3, logRecords.size()); + assertEquals(ByteBuffer.wrap("1".getBytes()), logRecords.get(0).key()); + assertEquals(ByteBuffer.wrap("2".getBytes()), logRecords.get(1).key()); + assertEquals(ByteBuffer.wrap("3".getBytes()), logRecords.get(2).key()); + } else { + Exception e = assertThrows(UnsupportedCompressionTypeException.class, convertedRecordsSupplier::get); + assertEquals("Down-conversion of zstandard-compressed batches is not supported", e.getMessage()); + } + } + + @ParameterizedTest + @EnumSource(CompressionType.class) + public void convertToV1WithMixedV0AndV2Data(CompressionType compressionType) { + ByteBuffer buffer = ByteBuffer.allocate(512); + + Compression compression = Compression.of(compressionType).build(); + Supplier supplier = () -> MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V0, + compression, TimestampType.NO_TIMESTAMP_TYPE, 0L); + + if (compressionType == CompressionType.ZSTD) { + assertThrows(IllegalArgumentException.class, supplier::get); + } else { + MemoryRecordsBuilder builder = supplier.get(); + builder.append(RecordBatch.NO_TIMESTAMP, "1".getBytes(), "a".getBytes()); + builder.close(); + + builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, compression, + TimestampType.CREATE_TIME, 1L); + builder.append(11L, "2".getBytes(), "b".getBytes()); + builder.append(12L, "3".getBytes(), "c".getBytes()); + builder.close(); + + buffer.flip(); + + ConvertedRecords convertedRecords = MemoryRecords.readableRecords(buffer) + .downConvert(MAGIC_VALUE_V1, 0, time); + MemoryRecords records = convertedRecords.records(); + verifyRecordsProcessingStats(compression, convertedRecords.recordConversionStats(), 3, 2, + records.sizeInBytes(), buffer.limit()); + + List batches = Utils.toList(records.batches().iterator()); + if (compressionType != CompressionType.NONE) { + assertEquals(2, batches.size()); + assertEquals(RecordBatch.MAGIC_VALUE_V0, batches.get(0).magic()); + assertEquals(0, batches.get(0).baseOffset()); + assertEquals(MAGIC_VALUE_V1, batches.get(1).magic()); + assertEquals(1, batches.get(1).baseOffset()); + } else { + assertEquals(3, batches.size()); + assertEquals(RecordBatch.MAGIC_VALUE_V0, batches.get(0).magic()); + assertEquals(0, batches.get(0).baseOffset()); + assertEquals(MAGIC_VALUE_V1, batches.get(1).magic()); + assertEquals(1, batches.get(1).baseOffset()); + assertEquals(MAGIC_VALUE_V1, batches.get(2).magic()); + assertEquals(2, batches.get(2).baseOffset()); + } + + List logRecords = Utils.toList(records.records().iterator()); + assertEquals("1", utf8(logRecords.get(0).key())); + assertEquals("2", utf8(logRecords.get(1).key())); + assertEquals("3", utf8(logRecords.get(2).key())); + + convertedRecords = MemoryRecords.readableRecords(buffer).downConvert(MAGIC_VALUE_V1, 2L, time); + records = convertedRecords.records(); + + batches = Utils.toList(records.batches().iterator()); + logRecords = Utils.toList(records.records().iterator()); + + if (compressionType != CompressionType.NONE) { + assertEquals(2, batches.size()); + assertEquals(RecordBatch.MAGIC_VALUE_V0, batches.get(0).magic()); + assertEquals(0, batches.get(0).baseOffset()); + assertEquals(MAGIC_VALUE_V1, batches.get(1).magic()); + assertEquals(1, batches.get(1).baseOffset()); + assertEquals("1", utf8(logRecords.get(0).key())); + assertEquals("2", utf8(logRecords.get(1).key())); + assertEquals("3", utf8(logRecords.get(2).key())); + verifyRecordsProcessingStats(compression, convertedRecords.recordConversionStats(), 3, 2, + records.sizeInBytes(), buffer.limit()); + } else { + assertEquals(2, batches.size()); + assertEquals(RecordBatch.MAGIC_VALUE_V0, batches.get(0).magic()); + assertEquals(0, batches.get(0).baseOffset()); + assertEquals(MAGIC_VALUE_V1, batches.get(1).magic()); + assertEquals(2, batches.get(1).baseOffset()); + assertEquals("1", utf8(logRecords.get(0).key())); + assertEquals("3", utf8(logRecords.get(1).key())); + verifyRecordsProcessingStats(compression, convertedRecords.recordConversionStats(), 3, 1, + records.sizeInBytes(), buffer.limit()); + } + } + } + @ParameterizedTest @ArgumentsSource(MemoryRecordsBuilderArgumentsProvider.class) public void shouldThrowIllegalStateExceptionOnBuildWhenAborted(Args args) { @@ -634,6 +788,31 @@ public Stream provideArguments(ExtensionContext context) { } } + private void verifyRecordsProcessingStats(Compression compression, RecordValidationStats processingStats, + int numRecords, int numRecordsConverted, long finalBytes, + long preConvertedBytes) { + assertNotNull(processingStats, "Records processing info is null"); + assertEquals(numRecordsConverted, processingStats.numRecordsConverted()); + // Since nanoTime accuracy on build machines may not be sufficient to measure small conversion times, + // only check if the value >= 0. Default is -1, so this checks if time has been recorded. + assertTrue(processingStats.conversionTimeNanos() >= 0, "Processing time not recorded: " + processingStats); + long tempBytes = processingStats.temporaryMemoryBytes(); + if (compression.type() == CompressionType.NONE) { + if (numRecordsConverted == 0) + assertEquals(finalBytes, tempBytes); + else if (numRecordsConverted == numRecords) + assertEquals(preConvertedBytes + finalBytes, tempBytes); + else { + assertTrue(tempBytes > finalBytes && tempBytes < finalBytes + preConvertedBytes, + String.format("Unexpected temp bytes %d final %d pre %d", tempBytes, finalBytes, preConvertedBytes)); + } + } else { + long compressedBytes = finalBytes - Records.LOG_OVERHEAD - LegacyRecord.RECORD_OVERHEAD_V0; + assertTrue(tempBytes > compressedBytes, + String.format("Uncompressed size expected temp=%d, compressed=%d", tempBytes, compressedBytes)); + } + } + private ByteBuffer allocateBuffer(int size, Args args) { ByteBuffer buffer = ByteBuffer.allocate(size); buffer.position(args.bufferOffset); diff --git a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java index 7092928010b30..3818976e423fd 100644 --- a/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/record/MemoryRecordsTest.java @@ -39,14 +39,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.LinkedHashMap; import java.util.List; -import java.util.Map; import java.util.OptionalLong; import java.util.function.BiFunction; import java.util.function.Supplier; -import java.util.stream.Collectors; -import java.util.stream.IntStream; import java.util.stream.Stream; import static java.util.Arrays.asList; @@ -135,6 +131,10 @@ public void testIterator(Args args) { ByteBuffer buffer = ByteBuffer.allocate(1024); int partitionLeaderEpoch = 998; + MemoryRecordsBuilder builder = new MemoryRecordsBuilder(buffer, magic, compression, + TimestampType.CREATE_TIME, firstOffset, logAppendTime, pid, epoch, firstSequence, false, false, + partitionLeaderEpoch, buffer.limit()); + SimpleRecord[] records = new SimpleRecord[] { new SimpleRecord(1L, "a".getBytes(), "1".getBytes()), new SimpleRecord(2L, "b".getBytes(), "2".getBytes()), @@ -144,30 +144,10 @@ public void testIterator(Args args) { new SimpleRecord(6L, (byte[]) null, null) }; - final MemoryRecords memoryRecords; - try (var builder = new MemoryRecordsBuilder( - buffer, - magic, - compression, - TimestampType.CREATE_TIME, - firstOffset, - logAppendTime, - pid, - epoch, - firstSequence, - false, - false, - partitionLeaderEpoch, - buffer.limit() - ) - ) { - for (SimpleRecord record : records) { - builder.append(record); - } - - memoryRecords = builder.build(); - } + for (SimpleRecord record : records) + builder.append(record); + MemoryRecords memoryRecords = builder.build(); for (int iteration = 0; iteration < 2; iteration++) { int total = 0; for (RecordBatch batch : memoryRecords.batches()) { @@ -1088,146 +1068,6 @@ public void testUnsupportedCompress() { }); } - @ParameterizedTest - @ArgumentsSource(MemoryRecordsArgumentsProvider.class) - public void testSlice(Args args) { - // Create a MemoryRecords instance with multiple batches. Prior RecordBatch.MAGIC_VALUE_V2, - // every append in a batch is a new batch. After RecordBatch.MAGIC_VALUE_V2, we can have multiple - // batches in a single MemoryRecords instance. Though with compression, we can have multiple - // appends resulting in a single batch prior RecordBatch.MAGIC_VALUE_V2 as well. - LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); - recordsPerOffset.put(args.firstOffset, 3); - recordsPerOffset.put(args.firstOffset + 6L, 8); - recordsPerOffset.put(args.firstOffset + 15L, 4); - MemoryRecords records = createMemoryRecords(args, recordsPerOffset); - - // Test slicing from start - MemoryRecords sliced = records.slice(0, records.sizeInBytes()); - assertEquals(records.sizeInBytes(), sliced.sizeInBytes()); - assertEquals(records.validBytes(), sliced.validBytes()); - TestUtils.checkEquals(records.batches(), sliced.batches()); - - List items = batches(records); - // Test slicing first message. - RecordBatch first = items.get(0); - sliced = records.slice(first.sizeInBytes(), records.sizeInBytes() - first.sizeInBytes()); - assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); - assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); - assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); - - // Read from second message and size is past the end of the file. - sliced = records.slice(first.sizeInBytes(), records.sizeInBytes()); - assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); - assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); - assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); - - // Read from second message and position + size overflows. - sliced = records.slice(first.sizeInBytes(), Integer.MAX_VALUE); - assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); - assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); - assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); - - // Read a single message starting from second message. - RecordBatch second = items.get(1); - sliced = records.slice(first.sizeInBytes(), second.sizeInBytes()); - assertEquals(second.sizeInBytes(), sliced.sizeInBytes()); - assertEquals(Collections.singletonList(second), batches(sliced), "Read a single message starting from the second message"); - - // Read from already sliced view. - List remainingItems = IntStream.range(0, items.size()).filter(i -> i != 0 && i != 1).mapToObj(items::get).collect(Collectors.toList()); - int remainingSize = remainingItems.stream().mapToInt(RecordBatch::sizeInBytes).sum(); - sliced = records.slice(first.sizeInBytes(), records.sizeInBytes() - first.sizeInBytes()) - .slice(second.sizeInBytes(), records.sizeInBytes() - first.sizeInBytes() - second.sizeInBytes()); - assertEquals(remainingSize, sliced.sizeInBytes()); - assertEquals(remainingItems, batches(sliced), "Read starting from the third message"); - - // Read from second message and size is past the end of the file on the already sliced view. - sliced = records.slice(1, records.sizeInBytes() - 1) - .slice(first.sizeInBytes() - 1, records.sizeInBytes()); - assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); - assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); - assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); - - // Read from second message and position + size overflows on the already sliced view. - sliced = records.slice(1, records.sizeInBytes() - 1) - .slice(first.sizeInBytes() - 1, Integer.MAX_VALUE); - assertEquals(records.sizeInBytes() - first.sizeInBytes(), sliced.sizeInBytes()); - assertEquals(items.subList(1, items.size()), batches(sliced), "Read starting from the second message"); - assertTrue(sliced.validBytes() <= sliced.sizeInBytes()); - } - - @ParameterizedTest - @ArgumentsSource(MemoryRecordsArgumentsProvider.class) - public void testSliceInvalidPosition(Args args) { - MemoryRecords records = createMemoryRecords(args, Map.of(args.firstOffset, 1)); - assertThrows(IllegalArgumentException.class, () -> records.slice(-1, records.sizeInBytes())); - assertThrows(IllegalArgumentException.class, () -> records.slice(records.sizeInBytes() + 1, records.sizeInBytes())); - } - - @ParameterizedTest - @ArgumentsSource(MemoryRecordsArgumentsProvider.class) - public void testSliceInvalidSize(Args args) { - MemoryRecords records = createMemoryRecords(args, Map.of(args.firstOffset, 1)); - assertThrows(IllegalArgumentException.class, () -> records.slice(0, -1)); - } - - @Test - public void testSliceEmptyRecords() { - MemoryRecords empty = MemoryRecords.EMPTY; - Records sliced = empty.slice(0, 0); - assertEquals(0, sliced.sizeInBytes()); - assertEquals(0, batches(sliced).size()); - } - - /** - * Test slice when already sliced memory records have start position greater than available bytes - * in the memory records. - */ - @ParameterizedTest - @ArgumentsSource(MemoryRecordsArgumentsProvider.class) - public void testSliceForAlreadySlicedMemoryRecords(Args args) { - LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); - recordsPerOffset.put(args.firstOffset, 5); - recordsPerOffset.put(args.firstOffset + 5L, 10); - recordsPerOffset.put(args.firstOffset + 15L, 12); - recordsPerOffset.put(args.firstOffset + 27L, 4); - - MemoryRecords records = createMemoryRecords(args, recordsPerOffset); - List items = batches(records.slice(0, records.sizeInBytes())); - - // Slice from third message until the end. - int position = IntStream.range(0, 2).map(i -> items.get(i).sizeInBytes()).sum(); - Records sliced = records.slice(position, records.sizeInBytes() - position); - assertEquals(records.sizeInBytes() - position, sliced.sizeInBytes()); - assertEquals(items.subList(2, items.size()), batches(sliced), "Read starting from the third message"); - - // Further slice the already sliced memory records, from fourth message until the end. Now the - // bytes available in the sliced records are less than the moved position from original records. - position = items.get(2).sizeInBytes(); - Records finalSliced = sliced.slice(position, sliced.sizeInBytes() - position); - assertEquals(sliced.sizeInBytes() - position, finalSliced.sizeInBytes()); - assertEquals(items.subList(3, items.size()), batches(finalSliced), "Read starting from the fourth message"); - } - - private MemoryRecords createMemoryRecords(Args args, Map recordsPerOffset) { - ByteBuffer buffer = ByteBuffer.allocate(1024); - recordsPerOffset.forEach((offset, numOfRecords) -> { - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, args.magic, args.compression, - TimestampType.CREATE_TIME, offset); - for (int i = 0; i < numOfRecords; i++) { - builder.appendWithOffset(offset + i, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - } - builder.close(); - }); - buffer.flip(); - - return MemoryRecords.readableRecords(buffer); - } - - private static List batches(Records buffer) { - return TestUtils.toList(buffer.batches()); - } - private static class RetainNonNullKeysFilter extends MemoryRecords.RecordFilter { public RetainNonNullKeysFilter() { super(0, 0); diff --git a/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java b/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java index d9d42d4d92295..c66b0411fc510 100644 --- a/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/replica/ReplicaSelectorTest.java @@ -46,8 +46,8 @@ public void testSameRackSelector() { ReplicaSelector selector = new RackAwareReplicaSelector(); Optional selected = selector.select(tp, metadata("rack-b"), partitionView); assertOptional(selected, replicaInfo -> { - assertEquals("rack-b", replicaInfo.endpoint().rack(), "Expect replica to be in rack-b"); - assertEquals(3, replicaInfo.endpoint().id(), "Expected replica 3 since it is more caught-up"); + assertEquals(replicaInfo.endpoint().rack(), "rack-b", "Expect replica to be in rack-b"); + assertEquals(replicaInfo.endpoint().id(), 3, "Expected replica 3 since it is more caught-up"); }); selected = selector.select(tp, metadata("not-a-rack"), partitionView); @@ -57,7 +57,7 @@ public void testSameRackSelector() { selected = selector.select(tp, metadata("rack-a"), partitionView); assertOptional(selected, replicaInfo -> { - assertEquals("rack-a", replicaInfo.endpoint().rack(), "Expect replica to be in rack-a"); + assertEquals(replicaInfo.endpoint().rack(), "rack-a", "Expect replica to be in rack-a"); assertEquals(replicaInfo, leader, "Expect the leader since it's in rack-a"); }); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponseTest.java index 6a100158558e6..490a5ba95460f 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/AddPartitionsToTxnResponseTest.java @@ -32,7 +32,6 @@ import org.junit.jupiter.params.ParameterizedTest; import java.util.Collections; -import java.util.EnumMap; import java.util.HashMap; import java.util.Map; @@ -58,7 +57,7 @@ public class AddPartitionsToTxnResponseTest { @BeforeEach public void setUp() { - expectedErrorCounts = new EnumMap<>(Errors.class); + expectedErrorCounts = new HashMap<>(); expectedErrorCounts.put(errorOne, 1); expectedErrorCounts.put(errorTwo, 1); @@ -108,7 +107,7 @@ public void testParse(short version) { .setThrottleTimeMs(throttleTimeMs); AddPartitionsToTxnResponse response = new AddPartitionsToTxnResponse(data); - Map newExpectedErrorCounts = new EnumMap<>(Errors.class); + Map newExpectedErrorCounts = new HashMap<>(); newExpectedErrorCounts.put(Errors.NONE, 1); // top level error newExpectedErrorCounts.put(errorOne, 2); newExpectedErrorCounts.put(errorTwo, 1); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java index 85f35e683ec8a..1714c053a9bab 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/CreateAclsRequestTest.java @@ -23,13 +23,13 @@ import org.apache.kafka.common.acl.AclPermissionType; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.CreateAclsRequestData; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.List; import java.util.stream.Collectors; @@ -44,6 +44,9 @@ public class CreateAclsRequestTest { private static final AclBinding LITERAL_ACL1 = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "foo", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "127.0.0.1", AclOperation.READ, AclPermissionType.DENY)); + private static final AclBinding LITERAL_ACL2 = new AclBinding(new ResourcePattern(ResourceType.GROUP, "group", PatternType.LITERAL), + new AccessControlEntry("User:*", "127.0.0.1", AclOperation.WRITE, AclPermissionType.ALLOW)); + private static final AclBinding PREFIXED_ACL1 = new AclBinding(new ResourcePattern(ResourceType.GROUP, "prefix", PatternType.PREFIXED), new AccessControlEntry("User:*", "127.0.0.1", AclOperation.CREATE, AclPermissionType.ALLOW)); @@ -63,9 +66,9 @@ public void shouldThrowOnIfUnknown() { @Test public void shouldRoundTripV1() { final CreateAclsRequest original = new CreateAclsRequest(data(LITERAL_ACL1, PREFIXED_ACL1), V1); - final Readable readable = original.serialize(); + final ByteBuffer buffer = original.serialize(); - final CreateAclsRequest result = CreateAclsRequest.parse(readable, V1); + final CreateAclsRequest result = CreateAclsRequest.parse(buffer, V1); assertRequestEquals(original, result); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java index 126a19a8160d7..0f7cc66de850e 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsRequestTest.java @@ -23,13 +23,13 @@ import org.apache.kafka.common.acl.AclPermissionType; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.DeleteAclsRequestData; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePatternFilter; import org.apache.kafka.common.resource.ResourceType; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.stream.Collectors; @@ -67,9 +67,9 @@ public void shouldRoundTripV1() { final DeleteAclsRequest original = new DeleteAclsRequest.Builder( requestData(LITERAL_FILTER, PREFIXED_FILTER, ANY_FILTER) ).build(V1); - final Readable readable = original.serialize(); + final ByteBuffer buffer = original.serialize(); - final DeleteAclsRequest result = DeleteAclsRequest.parse(readable, V1); + final DeleteAclsRequest result = DeleteAclsRequest.parse(buffer, V1); assertRequestEquals(original, result); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java index bf199275db868..a4abf88c83703 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteAclsResponseTest.java @@ -22,12 +22,13 @@ import org.apache.kafka.common.message.DeleteAclsResponseData; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult; import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsMatchingAcl; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourceType; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; + import static java.util.Arrays.asList; import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -97,9 +98,9 @@ public void shouldRoundTripV1() { .setThrottleTimeMs(10) .setFilterResults(asList(LITERAL_RESPONSE, PREFIXED_RESPONSE)), V1); - final Readable readable = original.serialize(V1); + final ByteBuffer buffer = original.serialize(V1); - final DeleteAclsResponse result = DeleteAclsResponse.parse(readable, V1); + final DeleteAclsResponse result = DeleteAclsResponse.parse(buffer, V1); assertEquals(original.filterResults(), result.filterResults()); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DeleteGroupsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DeleteGroupsResponseTest.java index 3d911de99f91f..5d4a684919079 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DeleteGroupsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DeleteGroupsResponseTest.java @@ -24,7 +24,6 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; -import java.util.EnumMap; import java.util.HashMap; import java.util.Map; @@ -61,7 +60,7 @@ public void testGetErrorWithExistingGroupIds() { expectedErrors.put(GROUP_ID_2, Errors.GROUP_AUTHORIZATION_FAILED); assertEquals(expectedErrors, DELETE_GROUPS_RESPONSE.errors()); - Map expectedErrorCounts = new EnumMap<>(Errors.class); + Map expectedErrorCounts = new HashMap<>(); expectedErrorCounts.put(Errors.NONE, 1); expectedErrorCounts.put(Errors.GROUP_AUTHORIZATION_FAILED, 1); assertEquals(expectedErrorCounts, DELETE_GROUPS_RESPONSE.errorCounts()); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java index 5062677e12300..e09c1eee72155 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsRequestTest.java @@ -33,6 +33,9 @@ public class DescribeAclsRequestTest { private static final short V1 = 1; + private static final AclBindingFilter LITERAL_FILTER = new AclBindingFilter(new ResourcePatternFilter(ResourceType.TOPIC, "foo", PatternType.LITERAL), + new AccessControlEntryFilter("User:ANONYMOUS", "127.0.0.1", AclOperation.READ, AclPermissionType.DENY)); + private static final AclBindingFilter PREFIXED_FILTER = new AclBindingFilter(new ResourcePatternFilter(ResourceType.GROUP, "prefix", PatternType.PREFIXED), new AccessControlEntryFilter("User:*", "127.0.0.1", AclOperation.CREATE, AclPermissionType.ALLOW)); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java index c4ec20385e102..243b3a80e6f29 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/DescribeAclsResponseTest.java @@ -25,13 +25,13 @@ import org.apache.kafka.common.message.DescribeAclsResponseData.AclDescription; import org.apache.kafka.common.message.DescribeAclsResponseData.DescribeAclsResource; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.resource.PatternType; import org.apache.kafka.common.resource.ResourcePattern; import org.apache.kafka.common.resource.ResourceType; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; import java.util.HashSet; @@ -74,6 +74,12 @@ public class DescribeAclsResponseTest { PatternType.LITERAL, Collections.singletonList(ALLOW_CREATE_ACL)); + private static final DescribeAclsResource LITERAL_ACL2 = buildResource( + "group", + ResourceType.GROUP, + PatternType.LITERAL, + Collections.singletonList(DENY_READ_ACL)); + @Test public void shouldThrowIfUnknown() { assertThrows(IllegalArgumentException.class, @@ -84,9 +90,9 @@ public void shouldThrowIfUnknown() { public void shouldRoundTripV1() { List resources = Arrays.asList(LITERAL_ACL1, PREFIXED_ACL1); final DescribeAclsResponse original = buildResponse(100, Errors.NONE, resources); - final Readable readable = original.serialize(V1); + final ByteBuffer buffer = original.serialize(V1); - final DescribeAclsResponse result = DescribeAclsResponse.parse(readable, V1); + final DescribeAclsResponse result = DescribeAclsResponse.parse(buffer, V1); assertResponseEquals(original, result); final DescribeAclsResponse result2 = buildResponse(100, Errors.NONE, DescribeAclsResponse.aclsResources( diff --git a/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java index c70b7eda5da98..60d10a689394c 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/JoinGroupRequestTest.java @@ -19,15 +19,14 @@ import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.JoinGroupRequestData; -import org.apache.kafka.common.protocol.MessageUtil; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; import java.util.Arrays; -import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; public class JoinGroupRequestTest { @@ -48,9 +47,12 @@ public void shouldThrowOnInvalidGroupInstanceIds() { String[] invalidGroupInstanceIds = {"", "foo bar", "..", "foo:bar", "foo=bar", ".", new String(longString)}; for (String instanceId : invalidGroupInstanceIds) { - assertThrows(InvalidConfigurationException.class, - () -> JoinGroupRequest.validateGroupInstanceId(instanceId), - "InvalidConfigurationException expected as instance id is invalid."); + try { + JoinGroupRequest.validateGroupInstanceId(instanceId); + fail("No exception was thrown for invalid instance id: " + instanceId); + } catch (InvalidConfigurationException e) { + // Good + } } } @Test @@ -63,20 +65,4 @@ public void testRequestVersionCompatibilityFailBuild() { .setProtocolType("consumer") ).build((short) 4)); } - - @Test - public void testRebalanceTimeoutDefaultsToSessionTimeoutV0() { - int sessionTimeoutMs = 30000; - short version = 0; - - var buffer = MessageUtil.toByteBufferAccessor(new JoinGroupRequestData() - .setGroupId("groupId") - .setMemberId("consumerId") - .setProtocolType("consumer") - .setSessionTimeoutMs(sessionTimeoutMs), version); - - JoinGroupRequest request = JoinGroupRequest.parse(buffer, version); - assertEquals(sessionTimeoutMs, request.data().sessionTimeoutMs()); - assertEquals(sessionTimeoutMs, request.data().rebalanceTimeoutMs()); - } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java index 4d73c042c5bb7..194d485b4d06e 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/LeaveGroupResponseTest.java @@ -22,16 +22,16 @@ import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; -import org.apache.kafka.common.protocol.Readable; import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; -import java.util.EnumMap; +import java.util.HashMap; import java.util.List; import java.util.Map; @@ -62,7 +62,7 @@ public void setUp() { @Test public void testConstructorWithMemberResponses() { - Map expectedErrorCounts = new EnumMap<>(Errors.class); + Map expectedErrorCounts = new HashMap<>(); expectedErrorCounts.put(Errors.NONE, 1); // top level expectedErrorCounts.put(Errors.UNKNOWN_MEMBER_ID, 1); expectedErrorCounts.put(Errors.FENCED_INSTANCE_ID, 1); @@ -111,9 +111,9 @@ public void testEqualityWithSerialization() { .setThrottleTimeMs(throttleTimeMs); for (short version : ApiKeys.LEAVE_GROUP.allVersions()) { LeaveGroupResponse primaryResponse = LeaveGroupResponse.parse( - MessageUtil.toByteBufferAccessor(responseData, version), version); + MessageUtil.toByteBuffer(responseData, version), version); LeaveGroupResponse secondaryResponse = LeaveGroupResponse.parse( - MessageUtil.toByteBufferAccessor(responseData, version), version); + MessageUtil.toByteBuffer(responseData, version), version); assertEquals(primaryResponse, primaryResponse); assertEquals(primaryResponse, secondaryResponse); @@ -130,7 +130,7 @@ public void testParse() { .setThrottleTimeMs(throttleTimeMs); for (short version : ApiKeys.LEAVE_GROUP.allVersions()) { - Readable buffer = MessageUtil.toByteBufferAccessor(data, version); + ByteBuffer buffer = MessageUtil.toByteBuffer(data, version); LeaveGroupResponse leaveGroupResponse = LeaveGroupResponse.parse(buffer, version); assertEquals(expectedErrorCounts, leaveGroupResponse.errorCounts()); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java index 48542c1a2fd7d..fd1d585206497 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ListOffsetsRequestTest.java @@ -54,7 +54,7 @@ public void testDuplicatePartitions() { ListOffsetsRequestData data = new ListOffsetsRequestData() .setTopics(topics) .setReplicaId(-1); - ListOffsetsRequest request = ListOffsetsRequest.parse(MessageUtil.toByteBufferAccessor(data, (short) 1), (short) 1); + ListOffsetsRequest request = ListOffsetsRequest.parse(MessageUtil.toByteBuffer(data, (short) 1), (short) 1); assertEquals(Collections.singleton(new TopicPartition("topic", 0)), request.duplicatePartitions()); assertEquals(0, data.timeoutMs()); // default value } @@ -127,23 +127,19 @@ public void testListOffsetsRequestOldestVersion() { .forConsumer(false, IsolationLevel.READ_COMMITTED); ListOffsetsRequest.Builder maxTimestampRequestBuilder = ListOffsetsRequest.Builder - .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, true, false, false, false); + .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, true, false, false); ListOffsetsRequest.Builder requireEarliestLocalTimestampRequestBuilder = ListOffsetsRequest.Builder - .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, true, false, false); + .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, true, false); ListOffsetsRequest.Builder requireTieredStorageTimestampRequestBuilder = ListOffsetsRequest.Builder - .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, false, true, false); + .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, false, true); - ListOffsetsRequest.Builder requireEarliestPendingUploadTimestampRequestBuilder = ListOffsetsRequest.Builder - .forConsumer(false, IsolationLevel.READ_UNCOMMITTED, false, false, false, true); - - assertEquals((short) 1, consumerRequestBuilder.oldestAllowedVersion()); + assertEquals((short) 0, consumerRequestBuilder.oldestAllowedVersion()); assertEquals((short) 1, requireTimestampRequestBuilder.oldestAllowedVersion()); assertEquals((short) 2, requestCommittedRequestBuilder.oldestAllowedVersion()); assertEquals((short) 7, maxTimestampRequestBuilder.oldestAllowedVersion()); assertEquals((short) 8, requireEarliestLocalTimestampRequestBuilder.oldestAllowedVersion()); assertEquals((short) 9, requireTieredStorageTimestampRequestBuilder.oldestAllowedVersion()); - assertEquals((short) 11, requireEarliestPendingUploadTimestampRequestBuilder.oldestAllowedVersion()); } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java index 9cd95cfec769e..161a4dd5f1192 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitRequestTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.OffsetCommitRequestData; import org.apache.kafka.common.message.OffsetCommitRequestData.OffsetCommitRequestPartition; @@ -46,8 +45,6 @@ public class OffsetCommitRequestTest { protected static String groupId = "groupId"; protected static String memberId = "consumerId"; protected static String groupInstanceId = "groupInstanceId"; - protected static Uuid topicIdOne = Uuid.randomUuid(); - protected static Uuid topicIdTwo = Uuid.randomUuid(); protected static String topicOne = "topicOne"; protected static String topicTwo = "topicTwo"; protected static int partitionOne = 1; @@ -64,7 +61,6 @@ public class OffsetCommitRequestTest { public void setUp() { List topics = Arrays.asList( new OffsetCommitRequestTopic() - .setTopicId(topicIdOne) .setName(topicOne) .setPartitions(Collections.singletonList( new OffsetCommitRequestPartition() @@ -74,7 +70,6 @@ public void setUp() { .setCommittedMetadata(metadata) )), new OffsetCommitRequestTopic() - .setTopicId(topicIdTwo) .setName(topicTwo) .setPartitions(Collections.singletonList( new OffsetCommitRequestPartition() @@ -95,7 +90,7 @@ public void testConstructor() { expectedOffsets.put(new TopicPartition(topicOne, partitionOne), offset); expectedOffsets.put(new TopicPartition(topicTwo, partitionTwo), offset); - OffsetCommitRequest.Builder builder = OffsetCommitRequest.Builder.forTopicNames(data); + OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder(data); for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { OffsetCommitRequest request = builder.build(version); @@ -110,7 +105,7 @@ public void testConstructor() { @Test public void testVersionSupportForGroupInstanceId() { - OffsetCommitRequest.Builder builder = OffsetCommitRequest.Builder.forTopicNames( + OffsetCommitRequest.Builder builder = new OffsetCommitRequest.Builder( new OffsetCommitRequestData() .setGroupId(groupId) .setMemberId(memberId) @@ -132,14 +127,12 @@ public void testGetErrorResponse() { OffsetCommitResponseData expectedResponse = new OffsetCommitResponseData() .setTopics(Arrays.asList( new OffsetCommitResponseTopic() - .setTopicId(topicIdOne) .setName(topicOne) .setPartitions(Collections.singletonList( new OffsetCommitResponsePartition() .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code()) .setPartitionIndex(partitionOne))), new OffsetCommitResponseTopic() - .setTopicId(topicIdTwo) .setName(topicTwo) .setPartitions(Collections.singletonList( new OffsetCommitResponsePartition() diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitResponseTest.java index dcfb988116797..cd73df6e33e05 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetCommitResponseTest.java @@ -23,14 +23,13 @@ import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.MessageUtil; -import org.apache.kafka.common.protocol.Readable; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.util.Arrays; import java.util.Collections; -import java.util.EnumMap; import java.util.HashMap; import java.util.Map; @@ -55,7 +54,7 @@ public class OffsetCommitResponseTest { @BeforeEach public void setUp() { - expectedErrorCounts = new EnumMap<>(Errors.class); + expectedErrorCounts = new HashMap<>(); expectedErrorCounts.put(errorOne, 1); expectedErrorCounts.put(errorTwo, 1); @@ -88,8 +87,8 @@ public void testParse() { .setThrottleTimeMs(throttleTimeMs); for (short version : ApiKeys.OFFSET_COMMIT.allVersions()) { - Readable readable = MessageUtil.toByteBufferAccessor(data, version); - OffsetCommitResponse response = OffsetCommitResponse.parse(readable, version); + ByteBuffer buffer = MessageUtil.toByteBuffer(data, version); + OffsetCommitResponse response = OffsetCommitResponse.parse(buffer, version); assertEquals(expectedErrorCounts, response.errorCounts()); if (version >= 3) { diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java index 9ea1bc540d78d..1098925e42a0c 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchRequestTest.java @@ -16,250 +16,220 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.message.OffsetFetchRequestData; -import org.apache.kafka.common.message.OffsetFetchResponseData; +import org.apache.kafka.common.message.OffsetFetchRequestData.OffsetFetchRequestTopics; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; +import org.apache.kafka.common.requests.OffsetFetchRequest.Builder; +import org.apache.kafka.common.requests.OffsetFetchRequest.NoBatchedOffsetFetchRequestException; +import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Optional; +import static org.apache.kafka.common.requests.AbstractResponse.DEFAULT_THROTTLE_TIME; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class OffsetFetchRequestTest { - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testWithMultipleGroups(short version) { - var data = new OffsetFetchRequestData() - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(Uuid.randomUuid()) - .setPartitionIndexes(List.of(0, 1, 2)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp2") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("bar") - .setTopicId(Uuid.randomUuid()) - .setPartitionIndexes(List.of(0, 1, 2)) - )) - )); - var builder = OffsetFetchRequest.Builder.forTopicIdsOrNames(data, false, true); - - if (version < 8) { - assertThrows(OffsetFetchRequest.NoBatchedOffsetFetchRequestException.class, () -> builder.build(version)); - } else { - assertEquals(data, builder.build(version).data()); - } - } - - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testThrowOnFetchStableOffsetsUnsupported(short version) { - var builder = OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setRequireStable(true) - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(Uuid.randomUuid()) - .setPartitionIndexes(List.of(0, 1, 2)) - )) - )), - true, - true - ); - - if (version < 7) { - assertThrows(UnsupportedVersionException.class, () -> builder.build(version)); - } else { - builder.build(version); - } - } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testSingleGroup(short version) { - var data = new OffsetFetchRequestData() - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(Uuid.randomUuid()) - .setPartitionIndexes(List.of(0, 1, 2)) - )) + private final String topicOne = "topic1"; + private final int partitionOne = 1; + private final String topicTwo = "topic2"; + private final int partitionTwo = 2; + private final String topicThree = "topic3"; + private final String group1 = "group1"; + private final String group2 = "group2"; + private final String group3 = "group3"; + private final String group4 = "group4"; + private final String group5 = "group5"; + private final List groups = Arrays.asList(group1, group2, group3, group4, group5); + + private final List listOfVersionsNonBatchOffsetFetch = Arrays.asList(0, 1, 2, 3, 4, 5, 6, 7); + + + private OffsetFetchRequest.Builder builder; + + @Test + public void testConstructor() { + List partitions = Arrays.asList( + new TopicPartition(topicOne, partitionOne), + new TopicPartition(topicTwo, partitionTwo)); + int throttleTimeMs = 10; + + Map expectedData = new HashMap<>(); + for (TopicPartition partition : partitions) { + expectedData.put(partition, new PartitionData( + OffsetFetchResponse.INVALID_OFFSET, + Optional.empty(), + OffsetFetchResponse.NO_METADATA, + Errors.NONE )); - var builder = OffsetFetchRequest.Builder.forTopicIdsOrNames(data, false, true); + } - if (version < 8) { - var expectedRequest = new OffsetFetchRequestData() - .setGroupId("grp1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopic() - .setName("foo") - .setPartitionIndexes(List.of(0, 1, 2)) - )); - assertEquals(expectedRequest, builder.build(version).data()); - } else { - assertEquals(data, builder.build(version).data()); + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version < 8) { + builder = new OffsetFetchRequest.Builder( + group1, + false, + partitions, + false); + OffsetFetchRequest request = builder.build(version); + assertFalse(request.isAllPartitions()); + assertEquals(group1, request.groupId()); + assertEquals(partitions, request.partitions()); + + OffsetFetchResponse response = request.getErrorResponse(throttleTimeMs, Errors.NONE); + assertEquals(Errors.NONE, response.error()); + assertFalse(response.hasError()); + assertEquals(Collections.singletonMap(Errors.NONE, version <= (short) 1 ? 3 : 1), response.errorCounts(), + "Incorrect error count for version " + version); + + if (version <= 1) { + assertEquals(expectedData, response.responseDataV0ToV7()); + } + + if (version >= 3) { + assertEquals(throttleTimeMs, response.throttleTimeMs()); + } else { + assertEquals(DEFAULT_THROTTLE_TIME, response.throttleTimeMs()); + } + } else { + builder = new Builder(Collections.singletonMap(group1, partitions), false, false); + OffsetFetchRequest request = builder.build(version); + Map> groupToPartitionMap = + request.groupIdsToPartitions(); + Map> groupToTopicMap = + request.groupIdsToTopics(); + assertFalse(request.isAllPartitionsForGroup(group1)); + assertTrue(groupToPartitionMap.containsKey(group1) && groupToTopicMap.containsKey( + group1)); + assertEquals(partitions, groupToPartitionMap.get(group1)); + OffsetFetchResponse response = request.getErrorResponse(throttleTimeMs, Errors.NONE); + assertEquals(Errors.NONE, response.groupLevelError(group1)); + assertFalse(response.groupHasError(group1)); + assertEquals(Collections.singletonMap(Errors.NONE, 1), response.errorCounts(), + "Incorrect error count for version " + version); + assertEquals(throttleTimeMs, response.throttleTimeMs()); + } } } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testSingleGroupWithAllTopics(short version) { - var data = new OffsetFetchRequestData() - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(null) - )); - var builder = OffsetFetchRequest.Builder.forTopicIdsOrNames(data, false, true); - - if (version < 2) { - assertThrows(UnsupportedVersionException.class, () -> builder.build(version)); - } else if (version < 8) { - var expectedRequest = new OffsetFetchRequestData() - .setGroupId("grp1") - .setTopics(null); - assertEquals(expectedRequest, builder.build(version).data()); - } else { - assertEquals(data, builder.build(version).data()); + @Test + public void testConstructorWithMultipleGroups() { + List topic1Partitions = Arrays.asList( + new TopicPartition(topicOne, partitionOne), + new TopicPartition(topicOne, partitionTwo)); + List topic2Partitions = Arrays.asList( + new TopicPartition(topicTwo, partitionOne), + new TopicPartition(topicTwo, partitionTwo)); + List topic3Partitions = Arrays.asList( + new TopicPartition(topicThree, partitionOne), + new TopicPartition(topicThree, partitionTwo)); + Map> groupToTp = new HashMap<>(); + groupToTp.put(group1, topic1Partitions); + groupToTp.put(group2, topic2Partitions); + groupToTp.put(group3, topic3Partitions); + groupToTp.put(group4, null); + groupToTp.put(group5, null); + int throttleTimeMs = 10; + + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version >= 8) { + builder = new Builder(groupToTp, false, false); + OffsetFetchRequest request = builder.build(version); + Map> groupToPartitionMap = + request.groupIdsToPartitions(); + Map> groupToTopicMap = + request.groupIdsToTopics(); + assertEquals(groupToTp.keySet(), groupToTopicMap.keySet()); + assertEquals(groupToTp.keySet(), groupToPartitionMap.keySet()); + assertFalse(request.isAllPartitionsForGroup(group1)); + assertFalse(request.isAllPartitionsForGroup(group2)); + assertFalse(request.isAllPartitionsForGroup(group3)); + assertTrue(request.isAllPartitionsForGroup(group4)); + assertTrue(request.isAllPartitionsForGroup(group5)); + OffsetFetchResponse response = request.getErrorResponse(throttleTimeMs, Errors.NONE); + for (String group : groups) { + assertEquals(Errors.NONE, response.groupLevelError(group)); + assertFalse(response.groupHasError(group)); + } + assertEquals(Collections.singletonMap(Errors.NONE, 5), response.errorCounts(), + "Incorrect error count for version " + version); + assertEquals(throttleTimeMs, response.throttleTimeMs()); + } } } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testGetErrorResponse(short version) { - var request = OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(Uuid.randomUuid()) - .setPartitionIndexes(List.of(0, 1)) - )) - )), - false, - true - ).build(version); - - if (version < 2) { - var expectedResponse = new OffsetFetchResponseData() - .setThrottleTimeMs(1000) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopic() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.INVALID_GROUP_ID.code()) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH), - new OffsetFetchResponseData.OffsetFetchResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.INVALID_GROUP_ID.code()) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - .setMetadata(OffsetFetchResponse.NO_METADATA) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - )) - )); - assertEquals(expectedResponse, request.getErrorResponse(1000, Errors.INVALID_GROUP_ID.exception()).data()); - } else if (version < 8) { - var expectedResponse = new OffsetFetchResponseData() - .setThrottleTimeMs(1000) - .setErrorCode(Errors.INVALID_GROUP_ID.code()); - assertEquals(expectedResponse, request.getErrorResponse(1000, Errors.INVALID_GROUP_ID.exception()).data()); - } else { - var expectedResponse = new OffsetFetchResponseData() - .setThrottleTimeMs(1000) - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("grp1") - .setErrorCode(Errors.INVALID_GROUP_ID.code()) - )); - assertEquals(expectedResponse, request.getErrorResponse(1000, Errors.INVALID_GROUP_ID.exception()).data()); + @Test + public void testBuildThrowForUnsupportedBatchRequest() { + for (int version : listOfVersionsNonBatchOffsetFetch) { + Map> groupPartitionMap = new HashMap<>(); + groupPartitionMap.put(group1, null); + groupPartitionMap.put(group2, null); + builder = new Builder(groupPartitionMap, true, false); + final short finalVersion = (short) version; + assertThrows(NoBatchedOffsetFetchRequestException.class, () -> builder.build(finalVersion)); } } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testGroups(short version) { - var request = OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(Uuid.randomUuid()) - .setPartitionIndexes(List.of(0, 1, 2)) - )) - )), - false, - true - ).build(version); - - if (version < 8) { - var expectedGroups = List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setPartitionIndexes(List.of(0, 1, 2)) - )) - ); - assertEquals(expectedGroups, request.groups()); - } else { - assertEquals(request.data().groups(), request.groups()); + @Test + public void testConstructorFailForUnsupportedRequireStable() { + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version < 8) { + // The builder needs to be initialized every cycle as the internal data `requireStable` flag is flipped. + builder = new OffsetFetchRequest.Builder(group1, true, null, false); + final short finalVersion = version; + if (version < 2) { + assertThrows(UnsupportedVersionException.class, () -> builder.build(finalVersion)); + } else { + OffsetFetchRequest request = builder.build(finalVersion); + assertEquals(group1, request.groupId()); + assertNull(request.partitions()); + assertTrue(request.isAllPartitions()); + if (version < 7) { + assertFalse(request.requireStable()); + } else { + assertTrue(request.requireStable()); + } + } + } else { + builder = new Builder(Collections.singletonMap(group1, null), true, false); + OffsetFetchRequest request = builder.build(version); + Map> groupToPartitionMap = + request.groupIdsToPartitions(); + Map> groupToTopicMap = + request.groupIdsToTopics(); + assertTrue(groupToPartitionMap.containsKey(group1) && groupToTopicMap.containsKey( + group1)); + assertNull(groupToPartitionMap.get(group1)); + assertTrue(request.isAllPartitionsForGroup(group1)); + assertTrue(request.requireStable()); + } } } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 2) - public void testGroupsWithAllTopics(short version) { - var request = OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(null) - )), - false, - true - ).build(version); - - if (version < 8) { - var expectedGroups = List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp1") - .setTopics(null) - ); - assertEquals(expectedGroups, request.groups()); - } else { - assertEquals(request.data().groups(), request.groups()); + @Test + public void testBuildThrowForUnsupportedRequireStable() { + for (int version : listOfVersionsNonBatchOffsetFetch) { + builder = new OffsetFetchRequest.Builder(group1, true, null, true); + if (version < 7) { + final short finalVersion = (short) version; + assertThrows(UnsupportedVersionException.class, () -> builder.build(finalVersion)); + } else { + OffsetFetchRequest request = builder.build((short) version); + assertTrue(request.requireStable()); + } } } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java index f3750784c6ed0..d0ef79b4479e8 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/OffsetFetchResponseTest.java @@ -16,349 +16,427 @@ */ package org.apache.kafka.common.requests; -import org.apache.kafka.common.errors.UnsupportedVersionException; -import org.apache.kafka.common.message.OffsetFetchRequestData; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.OffsetFetchResponseData; +import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseGroup; +import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartition; +import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponsePartitions; +import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopic; +import org.apache.kafka.common.message.OffsetFetchResponseData.OffsetFetchResponseTopics; import org.apache.kafka.common.protocol.ApiKeys; +import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData; +import org.apache.kafka.common.utils.Utils; -import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; -import java.util.List; +import java.util.Collections; +import java.util.HashMap; import java.util.Map; +import java.util.Optional; -import static org.apache.kafka.common.record.RecordBatch.NO_PARTITION_LEADER_EPOCH; -import static org.apache.kafka.common.requests.OffsetFetchResponse.INVALID_OFFSET; -import static org.apache.kafka.common.requests.OffsetFetchResponse.NO_METADATA; +import static org.apache.kafka.common.requests.AbstractResponse.DEFAULT_THROTTLE_TIME; import static org.junit.jupiter.api.Assertions.assertEquals; -import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; public class OffsetFetchResponseTest { - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testBuilderWithSingleGroup(short version) { - var group = new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") - )) - )); - - if (version < 8) { - assertEquals( - new OffsetFetchResponseData() - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopic() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartition() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") - )) - )), - new OffsetFetchResponse.Builder(group).build(version).data() - ); - } else { - assertEquals( - new OffsetFetchResponseData() - .setGroups(List.of(group)), - new OffsetFetchResponse.Builder(group).build(version).data() - ); + private final int throttleTimeMs = 10; + private final int offset = 100; + private final String metadata = "metadata"; + + private final String groupOne = "group1"; + private final String groupTwo = "group2"; + private final String groupThree = "group3"; + private final String topicOne = "topic1"; + private final int partitionOne = 1; + private final Optional leaderEpochOne = Optional.of(1); + private final String topicTwo = "topic2"; + private final int partitionTwo = 2; + private final Optional leaderEpochTwo = Optional.of(2); + private final String topicThree = "topic3"; + private final int partitionThree = 3; + private final Optional leaderEpochThree = Optional.of(3); + + + private Map partitionDataMap; + + @BeforeEach + public void setUp() { + partitionDataMap = new HashMap<>(); + partitionDataMap.put(new TopicPartition(topicOne, partitionOne), new PartitionData( + offset, + leaderEpochOne, + metadata, + Errors.TOPIC_AUTHORIZATION_FAILED + )); + partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData( + offset, + leaderEpochTwo, + metadata, + Errors.UNKNOWN_TOPIC_OR_PARTITION + )); + } + + @Test + public void testConstructor() { + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version < 8) { + OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.NOT_COORDINATOR, partitionDataMap); + assertEquals(Errors.NOT_COORDINATOR, response.error()); + assertEquals(3, response.errorCounts().size()); + assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NOT_COORDINATOR, 1), + Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1), + Utils.mkEntry(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1)), + response.errorCounts()); + + assertEquals(throttleTimeMs, response.throttleTimeMs()); + + Map responseData = response.responseDataV0ToV7(); + assertEquals(partitionDataMap, responseData); + responseData.forEach((tp, data) -> assertTrue(data.hasError())); + } else { + OffsetFetchResponse response = new OffsetFetchResponse( + throttleTimeMs, + Collections.singletonMap(groupOne, Errors.NOT_COORDINATOR), + Collections.singletonMap(groupOne, partitionDataMap)); + assertEquals(Errors.NOT_COORDINATOR, response.groupLevelError(groupOne)); + assertEquals(3, response.errorCounts().size()); + assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NOT_COORDINATOR, 1), + Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1), + Utils.mkEntry(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1)), + response.errorCounts()); + + assertEquals(throttleTimeMs, response.throttleTimeMs()); + + Map responseData = response.partitionDataMap(groupOne); + assertEquals(partitionDataMap, responseData); + responseData.forEach((tp, data) -> assertTrue(data.hasError())); + } } } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testBuilderWithMultipleGroups(short version) { - var groups = List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group1") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") - )) - )), - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group2") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("bar") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") - )) - )) - ); + @Test + public void testConstructorWithMultipleGroups() { + Map> responseData = new HashMap<>(); + Map errorMap = new HashMap<>(); + Map pd1 = new HashMap<>(); + Map pd2 = new HashMap<>(); + Map pd3 = new HashMap<>(); + pd1.put(new TopicPartition(topicOne, partitionOne), new PartitionData( + offset, + leaderEpochOne, + metadata, + Errors.TOPIC_AUTHORIZATION_FAILED)); + pd2.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData( + offset, + leaderEpochTwo, + metadata, + Errors.UNKNOWN_TOPIC_OR_PARTITION)); + pd3.put(new TopicPartition(topicThree, partitionThree), new PartitionData( + offset, + leaderEpochThree, + metadata, + Errors.NONE)); + responseData.put(groupOne, pd1); + responseData.put(groupTwo, pd2); + responseData.put(groupThree, pd3); + errorMap.put(groupOne, Errors.NOT_COORDINATOR); + errorMap.put(groupTwo, Errors.COORDINATOR_LOAD_IN_PROGRESS); + errorMap.put(groupThree, Errors.NONE); + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version >= 8) { + OffsetFetchResponse response = new OffsetFetchResponse( + throttleTimeMs, errorMap, responseData); - if (version < 8) { - assertThrows(UnsupportedVersionException.class, - () -> new OffsetFetchResponse.Builder(groups).build(version)); - } else { - assertEquals( - new OffsetFetchResponseData() - .setGroups(groups), - new OffsetFetchResponse.Builder(groups).build(version).data() - ); + assertEquals(Errors.NOT_COORDINATOR, response.groupLevelError(groupOne)); + assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, response.groupLevelError(groupTwo)); + assertEquals(Errors.NONE, response.groupLevelError(groupThree)); + assertTrue(response.groupHasError(groupOne)); + assertTrue(response.groupHasError(groupTwo)); + assertFalse(response.groupHasError(groupThree)); + assertEquals(5, response.errorCounts().size()); + assertEquals(Utils.mkMap(Utils.mkEntry(Errors.NOT_COORDINATOR, 1), + Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1), + Utils.mkEntry(Errors.UNKNOWN_TOPIC_OR_PARTITION, 1), + Utils.mkEntry(Errors.COORDINATOR_LOAD_IN_PROGRESS, 1), + Utils.mkEntry(Errors.NONE, 2)), + response.errorCounts()); + + assertEquals(throttleTimeMs, response.throttleTimeMs()); + + Map responseData1 = response.partitionDataMap(groupOne); + assertEquals(pd1, responseData1); + responseData1.forEach((tp, data) -> assertTrue(data.hasError())); + Map responseData2 = response.partitionDataMap(groupTwo); + assertEquals(pd2, responseData2); + responseData2.forEach((tp, data) -> assertTrue(data.hasError())); + Map responseData3 = response.partitionDataMap(groupThree); + assertEquals(pd3, responseData3); + responseData3.forEach((tp, data) -> assertFalse(data.hasError())); + } } } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testGroupWithSingleGroup(short version) { - var data = new OffsetFetchResponseData(); - - if (version < 8) { - data.setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopic() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartition() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") - )) - )); - } else { - data.setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("foo") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") - )) - )) - )); + /** + * Test behavior changes over the versions. Refer to resources.common.messages.OffsetFetchResponse.json + */ + @Test + public void testStructBuild() { + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version < 8) { + partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData( + offset, + leaderEpochTwo, + metadata, + Errors.GROUP_AUTHORIZATION_FAILED + )); + + OffsetFetchResponse latestResponse = new OffsetFetchResponse(throttleTimeMs, Errors.NONE, partitionDataMap); + OffsetFetchResponseData data = new OffsetFetchResponseData( + new ByteBufferAccessor(latestResponse.serialize(version)), version); + + OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version); + + if (version <= 1) { + assertEquals(Errors.NONE.code(), data.errorCode()); + + // Partition level error populated in older versions. + assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, oldResponse.error()); + assertEquals(Utils.mkMap(Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 2), + Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), + oldResponse.errorCounts()); + } else { + assertEquals(Errors.NONE.code(), data.errorCode()); + + assertEquals(Errors.NONE, oldResponse.error()); + assertEquals(Utils.mkMap( + Utils.mkEntry(Errors.NONE, 1), + Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), + Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), + oldResponse.errorCounts()); + } + + if (version <= 2) { + assertEquals(DEFAULT_THROTTLE_TIME, oldResponse.throttleTimeMs()); + } else { + assertEquals(throttleTimeMs, oldResponse.throttleTimeMs()); + } + + Map expectedDataMap = new HashMap<>(); + for (Map.Entry entry : partitionDataMap.entrySet()) { + PartitionData partitionData = entry.getValue(); + expectedDataMap.put(entry.getKey(), new PartitionData( + partitionData.offset, + version <= 4 ? Optional.empty() : partitionData.leaderEpoch, + partitionData.metadata, + partitionData.error + )); + } + + Map responseData = oldResponse.responseDataV0ToV7(); + assertEquals(expectedDataMap, responseData); + + responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError())); + } else { + partitionDataMap.put(new TopicPartition(topicTwo, partitionTwo), new PartitionData( + offset, + leaderEpochTwo, + metadata, + Errors.GROUP_AUTHORIZATION_FAILED)); + OffsetFetchResponse latestResponse = new OffsetFetchResponse( + throttleTimeMs, + Collections.singletonMap(groupOne, Errors.NONE), + Collections.singletonMap(groupOne, partitionDataMap)); + OffsetFetchResponseData data = new OffsetFetchResponseData( + new ByteBufferAccessor(latestResponse.serialize(version)), version); + OffsetFetchResponse oldResponse = new OffsetFetchResponse(data, version); + assertEquals(Errors.NONE.code(), data.groups().get(0).errorCode()); + + assertEquals(Errors.NONE, oldResponse.groupLevelError(groupOne)); + assertEquals(Utils.mkMap( + Utils.mkEntry(Errors.NONE, 1), + Utils.mkEntry(Errors.GROUP_AUTHORIZATION_FAILED, 1), + Utils.mkEntry(Errors.TOPIC_AUTHORIZATION_FAILED, 1)), + oldResponse.errorCounts()); + assertEquals(throttleTimeMs, oldResponse.throttleTimeMs()); + + Map expectedDataMap = new HashMap<>(); + for (Map.Entry entry : partitionDataMap.entrySet()) { + PartitionData partitionData = entry.getValue(); + expectedDataMap.put(entry.getKey(), new PartitionData( + partitionData.offset, + partitionData.leaderEpoch, + partitionData.metadata, + partitionData.error + )); + } + + Map responseData = oldResponse.partitionDataMap(groupOne); + assertEquals(expectedDataMap, responseData); + + responseData.forEach((tp, rdata) -> assertTrue(rdata.hasError())); + } } + } - assertEquals( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("foo") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") - )) - )), - new OffsetFetchResponse(data, version).group("foo") - ); + @Test + public void testShouldThrottle() { + for (short version : ApiKeys.OFFSET_FETCH.allVersions()) { + if (version < 8) { + OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.NONE, partitionDataMap); + if (version >= 4) { + assertTrue(response.shouldClientThrottle(version)); + } else { + assertFalse(response.shouldClientThrottle(version)); + } + } else { + OffsetFetchResponse response = new OffsetFetchResponse( + throttleTimeMs, + Collections.singletonMap(groupOne, Errors.NOT_COORDINATOR), + Collections.singletonMap(groupOne, partitionDataMap)); + assertTrue(response.shouldClientThrottle(version)); + } + } } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 8) - public void testGroupWithMultipleGroups(short version) { - var groups = List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group1") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") - )) - )), - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group2") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("bar") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(10) - .setCommittedLeaderEpoch(5) - .setMetadata("metadata") + @Test + public void testNullableMetadataV0ToV7() { + PartitionData pd = new PartitionData( + offset, + leaderEpochOne, + null, + Errors.UNKNOWN_TOPIC_OR_PARTITION); + // test PartitionData.equals with null metadata + assertEquals(pd, pd); + partitionDataMap.clear(); + partitionDataMap.put(new TopicPartition(topicOne, partitionOne), pd); + + OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.GROUP_AUTHORIZATION_FAILED, partitionDataMap); + OffsetFetchResponseData expectedData = + new OffsetFetchResponseData() + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()) + .setThrottleTimeMs(throttleTimeMs) + .setTopics(Collections.singletonList( + new OffsetFetchResponseTopic() + .setName(topicOne) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartition() + .setPartitionIndex(partitionOne) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpochOne.orElse(-1)) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) + .setMetadata(null)) )) - )) - ); + ); + assertEquals(expectedData, response.data()); + } - var response = new OffsetFetchResponse( - new OffsetFetchResponseData().setGroups(groups), - version - ); + @Test + public void testNullableMetadataV8AndAbove() { + PartitionData pd = new PartitionData( + offset, + leaderEpochOne, + null, + Errors.UNKNOWN_TOPIC_OR_PARTITION); + // test PartitionData.equals with null metadata + assertEquals(pd, pd); + partitionDataMap.clear(); + partitionDataMap.put(new TopicPartition(topicOne, partitionOne), pd); - groups.forEach(group -> - assertEquals(group, response.group(group.groupId())) - ); + OffsetFetchResponse response = new OffsetFetchResponse( + throttleTimeMs, + Collections.singletonMap(groupOne, Errors.GROUP_AUTHORIZATION_FAILED), + Collections.singletonMap(groupOne, partitionDataMap)); + OffsetFetchResponseData expectedData = + new OffsetFetchResponseData() + .setGroups(Collections.singletonList( + new OffsetFetchResponseGroup() + .setGroupId(groupOne) + .setTopics(Collections.singletonList( + new OffsetFetchResponseTopics() + .setName(topicOne) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartitions() + .setPartitionIndex(partitionOne) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(leaderEpochOne.orElse(-1)) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) + .setMetadata(null))))) + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code()))) + .setThrottleTimeMs(throttleTimeMs); + assertEquals(expectedData, response.data()); } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testGroupWithSingleGroupWithTopLevelError(short version) { - var data = new OffsetFetchResponseData(); - - if (version < 2) { - data.setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopic() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.INVALID_GROUP_ID.code()) - )) - )); - } else if (version < 8) { - data.setErrorCode(Errors.INVALID_GROUP_ID.code()); - } else { - data.setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("foo") - .setErrorCode(Errors.INVALID_GROUP_ID.code()) - )); - } + @Test + public void testUseDefaultLeaderEpochV0ToV7() { + final Optional emptyLeaderEpoch = Optional.empty(); + partitionDataMap.clear(); - assertEquals( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("foo") - .setErrorCode(Errors.INVALID_GROUP_ID.code()), - new OffsetFetchResponse(data, version).group("foo") + partitionDataMap.put(new TopicPartition(topicOne, partitionOne), + new PartitionData( + offset, + emptyLeaderEpoch, + metadata, + Errors.UNKNOWN_TOPIC_OR_PARTITION) ); - } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testSingleGroupWithError(short version) { - var group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setPartitionIndexes(List.of(0)) - )); - - if (version < 2) { - assertEquals( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group1") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setErrorCode(Errors.INVALID_GROUP_ID.code()) - .setCommittedOffset(INVALID_OFFSET) - .setMetadata(NO_METADATA) - .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) - )) - )), - OffsetFetchResponse.groupError(group, Errors.INVALID_GROUP_ID, version) - ); - } else { - assertEquals( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group1") - .setErrorCode(Errors.INVALID_GROUP_ID.code()), - OffsetFetchResponse.groupError(group, Errors.INVALID_GROUP_ID, version) - ); - } + OffsetFetchResponse response = new OffsetFetchResponse(throttleTimeMs, Errors.NOT_COORDINATOR, partitionDataMap); + OffsetFetchResponseData expectedData = + new OffsetFetchResponseData() + .setErrorCode(Errors.NOT_COORDINATOR.code()) + .setThrottleTimeMs(throttleTimeMs) + .setTopics(Collections.singletonList( + new OffsetFetchResponseTopic() + .setName(topicOne) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartition() + .setPartitionIndex(partitionOne) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) + .setMetadata(metadata)) + )) + ); + assertEquals(expectedData, response.data()); } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) - public void testErrorCounts(short version) { - if (version < 2) { - var data = new OffsetFetchResponseData() - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopic() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code()) - .setCommittedOffset(INVALID_OFFSET) - .setMetadata(NO_METADATA) - .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) - )) - )); - assertEquals( - Map.of(Errors.UNSTABLE_OFFSET_COMMIT, 1), - new OffsetFetchResponse(data, version).errorCounts() - ); - } else if (version < 8) { - // Version 2 returns a top level error code for group or coordinator level errors. - var data = new OffsetFetchResponseData() - .setErrorCode(Errors.NONE.code()) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopic() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code()) - .setCommittedOffset(INVALID_OFFSET) - .setMetadata(NO_METADATA) - .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) - )) - )); - assertEquals( - Map.of( - Errors.NONE, 1, - Errors.UNSTABLE_OFFSET_COMMIT, 1 - ), - new OffsetFetchResponse(data, version).errorCounts() - ); - } else { - // Version 8 adds support for fetching offsets for multiple groups at a time. - var data = new OffsetFetchResponseData() - .setGroups(List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group1") - .setErrorCode(Errors.NONE.code()) - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setErrorCode(Errors.UNSTABLE_OFFSET_COMMIT.code()) - .setCommittedOffset(INVALID_OFFSET) - .setMetadata(NO_METADATA) - .setCommittedLeaderEpoch(NO_PARTITION_LEADER_EPOCH) - )) - )) - )); - assertEquals( - Map.of( - Errors.NONE, 1, - Errors.UNSTABLE_OFFSET_COMMIT, 1 - ), - new OffsetFetchResponse(data, version).errorCounts() - ); - } + @Test + public void testUseDefaultLeaderEpochV8() { + final Optional emptyLeaderEpoch = Optional.empty(); + partitionDataMap.clear(); + + partitionDataMap.put(new TopicPartition(topicOne, partitionOne), + new PartitionData( + offset, + emptyLeaderEpoch, + metadata, + Errors.UNKNOWN_TOPIC_OR_PARTITION) + ); + OffsetFetchResponse response = new OffsetFetchResponse( + throttleTimeMs, + Collections.singletonMap(groupOne, Errors.NOT_COORDINATOR), + Collections.singletonMap(groupOne, partitionDataMap)); + OffsetFetchResponseData expectedData = + new OffsetFetchResponseData() + .setGroups(Collections.singletonList( + new OffsetFetchResponseGroup() + .setGroupId(groupOne) + .setTopics(Collections.singletonList( + new OffsetFetchResponseTopics() + .setName(topicOne) + .setPartitions(Collections.singletonList( + new OffsetFetchResponsePartitions() + .setPartitionIndex(partitionOne) + .setCommittedOffset(offset) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) + .setMetadata(metadata))))) + .setErrorCode(Errors.NOT_COORDINATOR.code()))) + .setThrottleTimeMs(throttleTimeMs); + assertEquals(expectedData, response.data()); } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java index eb1627055e11d..42a1e1f39681d 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ProduceRequestTest.java @@ -18,7 +18,6 @@ package org.apache.kafka.common.requests; import org.apache.kafka.common.InvalidRecordException; -import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.UnsupportedCompressionTypeException; import org.apache.kafka.common.message.ProduceRequestData; @@ -55,7 +54,7 @@ public void shouldBeFlaggedAsTransactionalWhenTransactionalRecords() { final ProduceRequest request = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("topic") .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(1) @@ -84,7 +83,7 @@ public void shouldBeFlaggedAsIdempotentWhenIdempotentRecords() { final ProduceRequest request = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("topic") .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(1) @@ -103,36 +102,13 @@ public void testBuildWithCurrentMessageFormat() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))) + new ProduceRequestData.TopicProduceData().setName("test").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))) .iterator())) .setAcks((short) 1) .setTimeoutMs(5000), false); - assertEquals(ApiKeys.PRODUCE.oldestVersion(), requestBuilder.oldestAllowedVersion()); - assertEquals(ApiKeys.PRODUCE.latestVersion(), requestBuilder.latestAllowedVersion()); - } - - @Test - public void testBuildWithCurrentMessageFormatWithoutTopicId() { - ByteBuffer buffer = ByteBuffer.allocate(256); - MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, - Compression.NONE, TimestampType.CREATE_TIME, 0L); - builder.append(10L, null, "a".getBytes()); - ProduceRequest.Builder requestBuilder = ProduceRequest.builder( - new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( - new ProduceRequestData.TopicProduceData() - .setName("topic") // TopicId will default to Uuid.ZERO and client will get UNKNOWN_TOPIC_ID error. - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(9).setRecords(builder.build())))) - .iterator())) - .setAcks((short) 1) - .setTimeoutMs(5000), - false); - assertEquals(ApiKeys.PRODUCE.oldestVersion(), requestBuilder.oldestAllowedVersion()); + assertEquals(3, requestBuilder.oldestAllowedVersion()); assertEquals(ApiKeys.PRODUCE.latestVersion(), requestBuilder.latestAllowedVersion()); } @@ -153,7 +129,7 @@ public void testV3AndAboveShouldContainOnlyOneRecordBatch() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("test") .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(0) @@ -168,7 +144,7 @@ public void testV3AndAboveCannotHaveNoRecordBatches() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("test") .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(0) @@ -188,7 +164,7 @@ public void testV3AndAboveCannotUseMagicV0() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("test") .setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(0) @@ -208,7 +184,7 @@ public void testV3AndAboveCannotUseMagicV1() { ProduceRequest.Builder requestBuilder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("test") .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(0) .setRecords(builder.build())))) @@ -228,7 +204,7 @@ public void testV6AndBelowCannotUseZStdCompression() { ProduceRequestData produceData = new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("test") .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(0) .setRecords(builder.build())))) @@ -259,12 +235,10 @@ public void testMixedTransactionalData() { ProduceRequest.Builder builder = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( - new ProduceRequestData.TopicProduceData().setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), - new ProduceRequestData.TopicProduceData().setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))) + new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(txnRecords))), + new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonTxnRecords)))) .iterator())) .setAcks((short) -1) .setTimeoutMs(5000), @@ -288,12 +262,10 @@ public void testMixedIdempotentData() { ProduceRequest.Builder builder = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Arrays.asList( - new ProduceRequestData.TopicProduceData().setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(idempotentRecords))), - new ProduceRequestData.TopicProduceData().setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) - .setPartitionData(Collections.singletonList( - new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonIdempotentRecords)))) + new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(0).setRecords(idempotentRecords))), + new ProduceRequestData.TopicProduceData().setName("foo").setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData().setIndex(1).setRecords(nonIdempotentRecords)))) .iterator())) .setAcks((short) -1) .setTimeoutMs(5000), @@ -309,7 +281,7 @@ public void testBuilderOldestAndLatestAllowed() { ProduceRequest.Builder builder = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("topic") .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(1) .setRecords(MemoryRecords.withRecords(Compression.NONE, simpleRecord)))) @@ -330,7 +302,7 @@ private ProduceRequest createNonIdempotentNonTransactionalRecords() { return ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(Uuid.fromString("H3Emm3vW7AKKO4NTRPaCWt")) + .setName("topic") .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(1) .setRecords(MemoryRecords.withRecords(Compression.NONE, simpleRecord))))) diff --git a/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java index 75d9c6d9232c9..2c4f1c792244f 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/ProduceResponseTest.java @@ -17,8 +17,7 @@ package org.apache.kafka.common.requests; -import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.message.ProduceResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.RecordBatch; @@ -41,10 +40,8 @@ public class ProduceResponseTest { @SuppressWarnings("deprecation") @Test public void produceResponseVersionTest() { - Map responseData = new HashMap<>(); - Uuid topicId = Uuid.fromString("5JkYABorYD4w0AQXe9TvBG"); - TopicIdPartition topicIdPartition = new TopicIdPartition(topicId, 0, "test"); - responseData.put(topicIdPartition, new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); + Map responseData = new HashMap<>(); + responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); ProduceResponse v0Response = new ProduceResponse(responseData); ProduceResponse v1Response = new ProduceResponse(responseData, 10); ProduceResponse v2Response = new ProduceResponse(responseData, 10); @@ -64,16 +61,14 @@ public void produceResponseVersionTest() { assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse.errorCode())); assertNull(partitionProduceResponse.errorMessage()); assertTrue(partitionProduceResponse.recordErrors().isEmpty()); - assertEquals(topicIdPartition.topicId(), topicProduceResponse.topicId()); } } @SuppressWarnings("deprecation") @Test public void produceResponseRecordErrorsTest() { - Map responseData = new HashMap<>(); - Uuid topicId = Uuid.fromString("4w0AQXe9TvBG5JkYABorYD"); - TopicIdPartition tp = new TopicIdPartition(topicId, 0, "test"); + Map responseData = new HashMap<>(); + TopicPartition tp = new TopicPartition("test", 0); ProduceResponse.PartitionResponse partResponse = new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100, Collections.singletonList(new ProduceResponse.RecordError(3, "Record error")), diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java index ba53edcacf8ce..aad3be459a682 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestContextTest.java @@ -84,7 +84,7 @@ public void testSerdeUnsupportedApiVersionRequest() throws Exception { assertEquals(correlationId, responseHeader.correlationId()); ApiVersionsResponse response = (ApiVersionsResponse) AbstractResponse.parseResponse(ApiKeys.API_VERSIONS, - new ByteBufferAccessor(responseBuffer), (short) 0); + responseBuffer, (short) 0); assertEquals(Errors.UNSUPPORTED_VERSION.code(), response.data().errorCode()); assertTrue(response.data().apiKeys().isEmpty()); } @@ -156,7 +156,7 @@ private ByteBuffer produceRequest(short version) { .setTimeoutMs(1); data.topicData().add( new ProduceRequestData.TopicProduceData() - .setName("foo") // versions in this test < 13, topicId can't be used + .setName("foo") .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(42)))); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java index ffb95673e5517..6578302e81e79 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/RequestResponseTest.java @@ -58,8 +58,6 @@ import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic; import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopicCollection; import org.apache.kafka.common.message.AlterReplicaLogDirsResponseData; -import org.apache.kafka.common.message.AlterShareGroupOffsetsRequestData; -import org.apache.kafka.common.message.AlterShareGroupOffsetsResponseData; import org.apache.kafka.common.message.AlterUserScramCredentialsRequestData; import org.apache.kafka.common.message.AlterUserScramCredentialsResponseData; import org.apache.kafka.common.message.ApiMessageType; @@ -108,8 +106,6 @@ import org.apache.kafka.common.message.DeleteGroupsResponseData.DeletableGroupResultCollection; import org.apache.kafka.common.message.DeleteRecordsRequestData; import org.apache.kafka.common.message.DeleteRecordsResponseData; -import org.apache.kafka.common.message.DeleteShareGroupOffsetsRequestData; -import org.apache.kafka.common.message.DeleteShareGroupOffsetsResponseData; import org.apache.kafka.common.message.DeleteShareGroupStateRequestData; import org.apache.kafka.common.message.DeleteShareGroupStateResponseData; import org.apache.kafka.common.message.DeleteTopicsRequestData; @@ -136,8 +132,6 @@ import org.apache.kafka.common.message.DescribeProducersResponseData; import org.apache.kafka.common.message.DescribeQuorumRequestData; import org.apache.kafka.common.message.DescribeQuorumResponseData; -import org.apache.kafka.common.message.DescribeShareGroupOffsetsRequestData; -import org.apache.kafka.common.message.DescribeShareGroupOffsetsResponseData; import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData; import org.apache.kafka.common.message.DescribeTransactionsRequestData; @@ -175,8 +169,8 @@ import org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember; import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity; import org.apache.kafka.common.message.LeaveGroupResponseData; -import org.apache.kafka.common.message.ListConfigResourcesRequestData; -import org.apache.kafka.common.message.ListConfigResourcesResponseData; +import org.apache.kafka.common.message.ListClientMetricsResourcesRequestData; +import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData; import org.apache.kafka.common.message.ListGroupsRequestData; import org.apache.kafka.common.message.ListGroupsResponseData; import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition; @@ -199,8 +193,6 @@ import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection; import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopic; import org.apache.kafka.common.message.OffsetDeleteResponseData.OffsetDeleteResponseTopicCollection; -import org.apache.kafka.common.message.OffsetFetchRequestData; -import org.apache.kafka.common.message.OffsetFetchResponseData; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic; import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopicCollection; @@ -231,10 +223,6 @@ import org.apache.kafka.common.message.ShareGroupDescribeResponseData; import org.apache.kafka.common.message.ShareGroupHeartbeatRequestData; import org.apache.kafka.common.message.ShareGroupHeartbeatResponseData; -import org.apache.kafka.common.message.StreamsGroupDescribeRequestData; -import org.apache.kafka.common.message.StreamsGroupDescribeResponseData; -import org.apache.kafka.common.message.StreamsGroupHeartbeatRequestData; -import org.apache.kafka.common.message.StreamsGroupHeartbeatResponseData; import org.apache.kafka.common.message.SyncGroupRequestData; import org.apache.kafka.common.message.SyncGroupRequestData.SyncGroupRequestAssignment; import org.apache.kafka.common.message.SyncGroupResponseData; @@ -278,6 +266,8 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; import java.nio.BufferUnderflowException; import java.nio.ByteBuffer; @@ -315,7 +305,7 @@ import static org.apache.kafka.common.protocol.ApiKeys.PRODUCE; import static org.apache.kafka.common.protocol.ApiKeys.SASL_AUTHENTICATE; import static org.apache.kafka.common.protocol.ApiKeys.SYNC_GROUP; -import static org.apache.kafka.common.protocol.ApiKeys.UNREGISTER_BROKER; +import static org.apache.kafka.common.protocol.ApiKeys.WRITE_TXN_MARKERS; import static org.apache.kafka.common.requests.EndTxnRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2; import static org.apache.kafka.common.requests.FetchMetadata.INVALID_SESSION_ID; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -324,12 +314,11 @@ import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; // This class performs tests requests and responses for all API keys public class RequestResponseTest { - private static final Uuid TOPIC_ID = Uuid.randomUuid(); - // Exception includes a message that we verify is not included in error responses private final UnknownServerException unknownServerException = new UnknownServerException("secret"); @@ -337,13 +326,11 @@ public class RequestResponseTest { public void testSerialization() { Map> toSkip = new HashMap<>(); // It's not possible to create a MetadataRequest v0 via the builder - toSkip.put(METADATA, List.of((short) 0)); + toSkip.put(METADATA, singletonList((short) 0)); // DescribeLogDirsResponse v0, v1 and v2 don't have a top level error field - toSkip.put(DESCRIBE_LOG_DIRS, List.of((short) 0, (short) 1, (short) 2)); + toSkip.put(DESCRIBE_LOG_DIRS, Arrays.asList((short) 0, (short) 1, (short) 2)); // ElectLeaders v0 does not have a top level error field, when accessing it, it defaults to NONE - toSkip.put(ELECT_LEADERS, List.of((short) 0)); - // UnregisterBroker v0 contains the error message in the response - toSkip.put(UNREGISTER_BROKER, List.of((short) 0)); + toSkip.put(ELECT_LEADERS, singletonList((short) 0)); for (ApiKeys apikey : ApiKeys.values()) { for (short version : apikey.allVersions()) { @@ -454,10 +441,8 @@ public void cannotUseFindCoordinatorV0ToFindTransactionCoordinator() { @Test public void testProduceRequestPartitionSize() { - Uuid topicId = Uuid.fromString("e9TvBGX5JkYAB0AQorYD4w"); - String topicName = "foo"; - TopicIdPartition tpId0 = createTopicIdPartition(topicId, 0, topicName); - TopicIdPartition tpId1 = createTopicIdPartition(topicId, 1, topicName); + TopicPartition tp0 = new TopicPartition("test", 0); + TopicPartition tp1 = new TopicPartition("test", 1); MemoryRecords records0 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, Compression.NONE, new SimpleRecord("woot".getBytes())); MemoryRecords records1 = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, @@ -465,24 +450,19 @@ public void testProduceRequestPartitionSize() { ProduceRequest request = ProduceRequest.builder( new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(asList( - createTopicProduceData(PRODUCE.latestVersion(), records0, tpId0), - createTopicProduceData(PRODUCE.latestVersion(), records1, tpId1)).iterator())) + new ProduceRequestData.TopicProduceData().setName(tp0.topic()).setPartitionData( + singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp0.partition()).setRecords(records0))), + new ProduceRequestData.TopicProduceData().setName(tp1.topic()).setPartitionData( + singletonList(new ProduceRequestData.PartitionProduceData().setIndex(tp1.partition()).setRecords(records1)))) + .iterator())) .setAcks((short) 1) .setTimeoutMs(5000) .setTransactionalId("transactionalId"), true) .build((short) 7); assertEquals(2, request.partitionSizes().size()); - - assertEquals(records0.sizeInBytes(), partitionSize(request.partitionSizes(), tpId0)); - assertEquals(records1.sizeInBytes(), partitionSize(request.partitionSizes(), tpId1)); - } - - private int partitionSize(Map partitionSizes, TopicIdPartition topicIdPartition) { - return partitionSizes.entrySet().stream() - .filter(tpId -> tpId.getKey().topicId() == topicIdPartition.topicId() && - tpId.getKey().partition() == topicIdPartition.partition()).map(Map.Entry::getValue) - .findFirst().get(); + assertEquals(records0.sizeInBytes(), (int) request.partitionSizes().get(tp0)); + assertEquals(records1.sizeInBytes(), (int) request.partitionSizes().get(tp1)); } @Test @@ -495,9 +475,12 @@ public void produceRequestToStringTest() { assertFalse(request.toString(true).contains("numPartitions")); request.clearPartitionRecords(); - assertThrows(IllegalStateException.class, - request::data, - "DataOrException should fail after clearPartitionRecords()"); + try { + request.data(); + fail("dataOrException should fail after clearPartitionRecords()"); + } catch (IllegalStateException e) { + // OK + } // `toString` should behave the same after `clearPartitionRecords` assertFalse(request.toString(false).contains("partitionSizes")); @@ -534,6 +517,11 @@ public void produceRequestGetErrorResponseTest() { public void fetchResponseVersionTest() { Uuid id = Uuid.randomUuid(); MemoryRecords records = MemoryRecords.readableRecords(ByteBuffer.allocate(10)); + FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() + .setPartitionIndex(0) + .setHighWatermark(1000000) + .setLogStartOffset(-1) + .setRecords(records); LinkedHashMap idResponseData = new LinkedHashMap<>(); idResponseData.put(new TopicIdPartition(id, new TopicPartition("test", 0)), new FetchResponseData.PartitionData() @@ -541,7 +529,7 @@ public void fetchResponseVersionTest() { .setHighWatermark(1000000) .setLogStartOffset(-1) .setRecords(records)); - FetchResponse idTestResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, idResponseData, List.of()); + FetchResponse idTestResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, idResponseData); FetchResponse v12Deserialized = FetchResponse.parse(idTestResponse.serialize((short) 12), (short) 12); FetchResponse newestDeserialized = FetchResponse.parse(idTestResponse.serialize(FETCH.latestVersion()), FETCH.latestVersion()); assertTrue(v12Deserialized.topicIds().isEmpty()); @@ -582,63 +570,12 @@ public void testFetchResponseV4() { .setLastStableOffset(6) .setRecords(records)); - FetchResponse response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData, List.of()); + FetchResponse response = FetchResponse.of(Errors.NONE, 10, INVALID_SESSION_ID, responseData); FetchResponse deserialized = FetchResponse.parse(response.serialize((short) 4), (short) 4); assertEquals(responseData.entrySet().stream().collect(Collectors.toMap(e -> e.getKey().topicPartition(), Map.Entry::getValue)), deserialized.responseData(topicNames, (short) 4)); } - @Test - public void testFetchResponseShouldNotHaveNullRecords() { - Uuid id = Uuid.randomUuid(); - FetchResponseData.PartitionData partitionData = new FetchResponseData.PartitionData() - .setPartitionIndex(0) - .setHighWatermark(1000000) - .setLogStartOffset(100) - .setLastStableOffset(200) - .setRecords(null); - FetchResponseData.FetchableTopicResponse response = new FetchResponseData.FetchableTopicResponse() - .setTopic("topic") - .setPartitions(List.of(partitionData)) - .setTopicId(id); - FetchResponseData data = new FetchResponseData().setResponses(List.of(response)); - - response.setPartitions(List.of(FetchResponse.partitionResponse(0, Errors.NONE))); - FetchResponse fetchResponse = FetchResponse.of(data); - validateNoNullRecords(fetchResponse); - - TopicIdPartition topicIdPartition = new TopicIdPartition(id, new TopicPartition("test", 0)); - LinkedHashMap tpToData = new LinkedHashMap<>(Map.of(topicIdPartition, partitionData)); - fetchResponse = FetchResponse.of(Errors.NONE, 0, INVALID_SESSION_ID, tpToData, List.of()); - validateNoNullRecords(fetchResponse); - } - - private void validateNoNullRecords(FetchResponse fetchResponse) { - fetchResponse.data().responses().stream() - .flatMap(response -> response.partitions().stream()) - .forEach(partition -> assertEquals(MemoryRecords.EMPTY, partition.records())); - } - - @Test - public void testShareFetchResponseShouldNotHaveNullRecords() { - Uuid id = Uuid.randomUuid(); - ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(0) - .setAcquiredRecords(List.of()) - .setRecords(null); - - TopicIdPartition topicIdPartition = new TopicIdPartition(id, new TopicPartition("test", 0)); - LinkedHashMap tpToData = new LinkedHashMap<>(Map.of(topicIdPartition, partitionData)); - ShareFetchResponse shareFetchResponse = ShareFetchResponse.of(Errors.NONE, 0, tpToData, List.of(), 0); - validateNoNullRecords(shareFetchResponse); - } - - private void validateNoNullRecords(ShareFetchResponse fetchResponse) { - fetchResponse.data().responses().stream() - .flatMap(response -> response.partitions().stream()) - .forEach(partition -> assertEquals(MemoryRecords.EMPTY, partition.records())); - } - @Test public void verifyFetchResponseFullWrites() throws Exception { verifyFetchResponseFullWrite(FETCH.latestVersion(), createFetchResponse(123)); @@ -668,7 +605,7 @@ private void verifyFetchResponseFullWrite(short version, FetchResponse fetchResp ResponseHeader responseHeader = ResponseHeader.parse(channel.buffer(), responseHeaderVersion); assertEquals(correlationId, responseHeader.correlationId()); - assertEquals(fetchResponse.serialize(version).buffer(), buf); + assertEquals(fetchResponse.serialize(version), buf); FetchResponseData deserialized = new FetchResponseData(new ByteBufferAccessor(buf), version); ObjectSerializationCache serializationCache = new ObjectSerializationCache(); assertEquals(size, responseHeader.size() + deserialized.size(serializationCache, version)); @@ -728,14 +665,6 @@ public void testFetchRequestWithMetadata() { assertEquals(request.isolationLevel(), deserialized.isolationLevel()); } - @Test - public void testJoinGroupRequestV0RebalanceTimeout() { - final short version = 0; - JoinGroupRequest jgr = createJoinGroupRequest(version); - JoinGroupRequest jgr2 = JoinGroupRequest.parse(jgr.serialize(), version); - assertEquals(jgr2.data().rebalanceTimeoutMs(), jgr.data().rebalanceTimeoutMs()); - } - @Test public void testSerializeWithHeader() { CreatableTopicCollection topicsToCreate = new CreatableTopicCollection(1); @@ -759,7 +688,7 @@ public void testSerializeWithHeader() { assertEquals(requestHeader, parsedHeader); RequestAndSize parsedRequest = AbstractRequest.parseRequest( - CREATE_TOPICS, requestVersion, new ByteBufferAccessor(serializedRequest)); + CREATE_TOPICS, requestVersion, serializedRequest); assertEquals(createTopicsRequest.data(), parsedRequest.request.data()); } @@ -783,6 +712,55 @@ public void testSerializeWithInconsistentHeaderVersion() { assertThrows(IllegalArgumentException.class, () -> createTopicsRequest.serializeWithHeader(requestHeader)); } + @Test + public void testOffsetFetchRequestBuilderToStringV0ToV7() { + List stableFlags = asList(true, false); + for (Boolean requireStable : stableFlags) { + String allTopicPartitionsString = new OffsetFetchRequest.Builder( + "someGroup", + requireStable, + null, + false + ).toString(); + + assertTrue(allTopicPartitionsString.contains("groupId='', topics=[]," + + " groups=[OffsetFetchRequestGroup(groupId='someGroup', memberId=null, memberEpoch=-1, topics=null)], requireStable=" + requireStable)); + String string = new OffsetFetchRequest.Builder( + "group1", + requireStable, + singletonList( + new TopicPartition("test11", 1)), + false + ).toString(); + assertTrue(string.contains("test11")); + assertTrue(string.contains("group1")); + assertTrue(string.contains("requireStable=" + requireStable)); + } + } + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testOffsetFetchRequestBuilderToStringV8AndAbove(boolean requireStable) { + String allTopicPartitionsString = new OffsetFetchRequest.Builder( + Collections.singletonMap("someGroup", null), + requireStable, + false + ).toString(); + assertTrue(allTopicPartitionsString.contains("groups=[OffsetFetchRequestGroup" + + "(groupId='someGroup', memberId=null, memberEpoch=-1, topics=null)], requireStable=" + requireStable)); + + String subsetTopicPartitionsString = new OffsetFetchRequest.Builder( + Collections.singletonMap( + "group1", + singletonList(new TopicPartition("test11", 1))), + requireStable, + false + ).toString(); + assertTrue(subsetTopicPartitionsString.contains("test11")); + assertTrue(subsetTopicPartitionsString.contains("group1")); + assertTrue(subsetTopicPartitionsString.contains("requireStable=" + requireStable)); + } + @Test public void testApiVersionsRequestBeforeV3Validation() { for (short version = 0; version < 3; version++) { @@ -855,23 +833,6 @@ public void testApiVersionResponseWithNotUnsupportedError() { } } - @Test - public void testUnregisterBrokerResponseWithUnknownServerError() { - UnregisterBrokerRequest request = new UnregisterBrokerRequest.Builder( - new UnregisterBrokerRequestData() - ).build((short) 0); - String customerErrorMessage = "customer error message"; - - UnregisterBrokerResponse response = request.getErrorResponse( - 0, - new RuntimeException(customerErrorMessage) - ); - - assertEquals(0, response.throttleTimeMs()); - assertEquals(Errors.UNKNOWN_SERVER_ERROR.code(), response.data().errorCode()); - assertEquals(customerErrorMessage, response.data().errorMessage()); - } - private ApiVersionsResponse defaultApiVersionsResponse() { return TestUtils.defaultApiVersionsResponse(ApiMessageType.ListenerType.BROKER); } @@ -879,8 +840,8 @@ private ApiVersionsResponse defaultApiVersionsResponse() { @Test public void testApiVersionResponseParsingFallback() { for (short version : API_VERSIONS.allVersions()) { - ByteBufferAccessor readable = defaultApiVersionsResponse().serialize((short) 0); - ApiVersionsResponse response = ApiVersionsResponse.parse(readable, version); + ByteBuffer buffer = defaultApiVersionsResponse().serialize((short) 0); + ApiVersionsResponse response = ApiVersionsResponse.parse(buffer, version); assertEquals(Errors.NONE.code(), response.data().errorCode()); } } @@ -888,16 +849,15 @@ public void testApiVersionResponseParsingFallback() { @Test public void testApiVersionResponseParsingFallbackException() { for (final short version : API_VERSIONS.allVersions()) { - assertThrows(BufferUnderflowException.class, - () -> ApiVersionsResponse.parse(new ByteBufferAccessor(ByteBuffer.allocate(0)), version)); + assertThrows(BufferUnderflowException.class, () -> ApiVersionsResponse.parse(ByteBuffer.allocate(0), version)); } } @Test public void testApiVersionResponseParsing() { for (short version : API_VERSIONS.allVersions()) { - ByteBufferAccessor readable = defaultApiVersionsResponse().serialize(version); - ApiVersionsResponse response = ApiVersionsResponse.parse(readable, version); + ByteBuffer buffer = defaultApiVersionsResponse().serialize(version); + ApiVersionsResponse response = ApiVersionsResponse.parse(buffer, version); assertEquals(Errors.NONE.code(), response.data().errorCode()); } } @@ -1056,7 +1016,7 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case GET_TELEMETRY_SUBSCRIPTIONS: return createGetTelemetrySubscriptionsRequest(version); case PUSH_TELEMETRY: return createPushTelemetryRequest(version); case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsRequest(version); - case LIST_CONFIG_RESOURCES: return createListConfigResourcesRequest(version); + case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesRequest(version); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsRequest(version); case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatRequest(version); case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeRequest(version); @@ -1070,11 +1030,6 @@ private AbstractRequest getRequest(ApiKeys apikey, short version) { case WRITE_SHARE_GROUP_STATE: return createWriteShareGroupStateRequest(version); case DELETE_SHARE_GROUP_STATE: return createDeleteShareGroupStateRequest(version); case READ_SHARE_GROUP_STATE_SUMMARY: return createReadShareGroupStateSummaryRequest(version); - case STREAMS_GROUP_HEARTBEAT: return createStreamsGroupHeartbeatRequest(version); - case STREAMS_GROUP_DESCRIBE: return createStreamsGroupDescribeRequest(version); - case DESCRIBE_SHARE_GROUP_OFFSETS: return createDescribeShareGroupOffsetsRequest(version); - case ALTER_SHARE_GROUP_OFFSETS: return createAlterShareGroupOffsetsRequest(version); - case DELETE_SHARE_GROUP_OFFSETS: return createDeleteShareGroupOffsetsRequest(version); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1129,7 +1084,7 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case ALTER_CLIENT_QUOTAS: return createAlterClientQuotasResponse(); case DESCRIBE_USER_SCRAM_CREDENTIALS: return createDescribeUserScramCredentialsResponse(); case ALTER_USER_SCRAM_CREDENTIALS: return createAlterUserScramCredentialsResponse(); - case VOTE: return createVoteResponse(); + case VOTE: return createVoteResponse(version); case BEGIN_QUORUM_EPOCH: return createBeginQuorumEpochResponse(); case END_QUORUM_EPOCH: return createEndQuorumEpochResponse(); case DESCRIBE_QUORUM: return createDescribeQuorumResponse(); @@ -1151,7 +1106,7 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case GET_TELEMETRY_SUBSCRIPTIONS: return createGetTelemetrySubscriptionsResponse(); case PUSH_TELEMETRY: return createPushTelemetryResponse(); case ASSIGN_REPLICAS_TO_DIRS: return createAssignReplicasToDirsResponse(); - case LIST_CONFIG_RESOURCES: return createListConfigResourcesResponse(); + case LIST_CLIENT_METRICS_RESOURCES: return createListClientMetricsResourcesResponse(); case DESCRIBE_TOPIC_PARTITIONS: return createDescribeTopicPartitionsResponse(); case SHARE_GROUP_HEARTBEAT: return createShareGroupHeartbeatResponse(); case SHARE_GROUP_DESCRIBE: return createShareGroupDescribeResponse(); @@ -1165,11 +1120,6 @@ private AbstractResponse getResponse(ApiKeys apikey, short version) { case WRITE_SHARE_GROUP_STATE: return createWriteShareGroupStateResponse(); case DELETE_SHARE_GROUP_STATE: return createDeleteShareGroupStateResponse(); case READ_SHARE_GROUP_STATE_SUMMARY: return createReadShareGroupStateSummaryResponse(); - case STREAMS_GROUP_HEARTBEAT: return createStreamsGroupHeartbeatResponse(); - case STREAMS_GROUP_DESCRIBE: return createStreamsGroupDescribeResponse(); - case DESCRIBE_SHARE_GROUP_OFFSETS: return createDescribeShareGroupOffsetsResponse(); - case ALTER_SHARE_GROUP_OFFSETS: return createAlterShareGroupOffsetsResponse(); - case DELETE_SHARE_GROUP_OFFSETS: return createDeleteShareGroupOffsetsResponse(); default: throw new IllegalArgumentException("Unknown API key " + apikey); } } @@ -1192,7 +1142,7 @@ private ConsumerGroupDescribeResponse createConsumerGroupDescribeResponse() { .setGroupEpoch(0) .setAssignmentEpoch(0) .setAssignorName("range") - .setMembers(new ArrayList<>(0)) + .setMembers(new ArrayList(0)) )) .setThrottleTimeMs(1000); return new ConsumerGroupDescribeResponse(data); @@ -1451,14 +1401,15 @@ private ShareFetchRequest createShareFetchRequest(short version) { ShareFetchRequestData data = new ShareFetchRequestData() .setGroupId("group") .setMemberId(Uuid.randomUuid().toString()) - .setTopics(new ShareFetchRequestData.FetchTopicCollection(List.of(new ShareFetchRequestData.FetchTopic() + .setTopics(singletonList(new ShareFetchRequestData.FetchTopic() .setTopicId(Uuid.randomUuid()) - .setPartitions(new ShareFetchRequestData.FetchPartitionCollection(List.of(new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator()))).iterator())); + .setPartitions(singletonList(new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(0))))); return new ShareFetchRequest.Builder(data).build(version); } private ShareFetchResponse createShareFetchResponse() { + ShareFetchResponseData data = new ShareFetchResponseData(); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("blah".getBytes())); ShareFetchResponseData.PartitionData partition = new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) @@ -1468,33 +1419,37 @@ private ShareFetchResponse createShareFetchResponse() { .setFirstOffset(0) .setLastOffset(0) .setDeliveryCount((short) 1))); - TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("", partition.partitionIndex())); - LinkedHashMap topicIdPartitionToPartition = new LinkedHashMap<>(); - topicIdPartitionToPartition.put(topicIdPartition, partition); - return ShareFetchResponse.of(Errors.NONE, 345, topicIdPartitionToPartition, List.of(), 0); + ShareFetchResponseData.ShareFetchableTopicResponse response = new ShareFetchResponseData.ShareFetchableTopicResponse() + .setTopicId(Uuid.randomUuid()) + .setPartitions(singletonList(partition)); + + data.setResponses(singletonList(response)); + data.setThrottleTimeMs(345); + data.setErrorCode(Errors.NONE.code()); + return new ShareFetchResponse(data); } private ShareAcknowledgeRequest createShareAcknowledgeRequest(short version) { ShareAcknowledgeRequestData data = new ShareAcknowledgeRequestData() .setMemberId(Uuid.randomUuid().toString()) - .setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopics(singletonList(new ShareAcknowledgeRequestData.AcknowledgeTopic() .setTopicId(Uuid.randomUuid()) - .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(List.of(new ShareAcknowledgeRequestData.AcknowledgePartition() + .setPartitions(singletonList(new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) .setAcknowledgementBatches(singletonList(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(0) - .setAcknowledgeTypes(Collections.singletonList((byte) 0))))).iterator()))).iterator())); + .setAcknowledgeTypes(Collections.singletonList((byte) 0)))))))); return new ShareAcknowledgeRequest.Builder(data).build(version); } private ShareAcknowledgeResponse createShareAcknowledgeResponse() { ShareAcknowledgeResponseData data = new ShareAcknowledgeResponseData(); - data.setResponses(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponseCollection(List.of(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() + data.setResponses(singletonList(new ShareAcknowledgeResponseData.ShareAcknowledgeTopicResponse() .setTopicId(Uuid.randomUuid()) .setPartitions(singletonList(new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code())))).iterator())); + .setErrorCode(Errors.NONE.code()))))); data.setThrottleTimeMs(345); data.setErrorCode(Errors.NONE.code()); return new ShareAcknowledgeResponse(data); @@ -1665,7 +1620,7 @@ private VoteRequest createVoteRequest(short version) { return new VoteRequest.Builder(data).build(version); } - private VoteResponse createVoteResponse() { + private VoteResponse createVoteResponse(short version) { VoteResponseData.PartitionData partitionData = new VoteResponseData.PartitionData() .setErrorCode(Errors.NONE.code()) .setLeaderEpoch(0) @@ -1948,11 +1903,11 @@ private void checkRequest(AbstractRequest req) { // Check for equality of the ByteBuffer only if indicated (it is likely to fail if any of the fields // in the request is a HashMap with multiple elements since ordering of the elements may vary) try { - ByteBufferAccessor serializedBytes = req.serialize(); + ByteBuffer serializedBytes = req.serialize(); AbstractRequest deserialized = AbstractRequest.parseRequest(req.apiKey(), req.version(), serializedBytes).request; - ByteBufferAccessor serializedBytes2 = deserialized.serialize(); - serializedBytes.buffer().rewind(); - assertEquals(serializedBytes.buffer(), serializedBytes2.buffer(), "Request " + req + "failed equality test"); + ByteBuffer serializedBytes2 = deserialized.serialize(); + serializedBytes.rewind(); + assertEquals(serializedBytes, serializedBytes2, "Request " + req + "failed equality test"); } catch (Exception e) { throw new RuntimeException("Failed to deserialize request " + req + " with type " + req.getClass(), e); } @@ -1963,10 +1918,9 @@ private void checkResponse(AbstractResponse response, short version) { // Check for equality and hashCode of the Struct only if indicated (it is likely to fail if any of the fields // in the response is a HashMap with multiple elements since ordering of the elements may vary) try { - ByteBufferAccessor readable = response.serialize(version); - ByteBuffer serializedBytes = readable.buffer(); - AbstractResponse deserialized = AbstractResponse.parseResponse(response.apiKey(), readable, version); - ByteBuffer serializedBytes2 = deserialized.serialize(version).buffer(); + ByteBuffer serializedBytes = response.serialize(version); + AbstractResponse deserialized = AbstractResponse.parseResponse(response.apiKey(), serializedBytes, version); + ByteBuffer serializedBytes2 = deserialized.serialize(version); serializedBytes.rewind(); assertEquals(serializedBytes, serializedBytes2, "Response " + response + "failed equality test"); } catch (Exception e) { @@ -2029,7 +1983,7 @@ private FetchRequest createFetchRequest(short version) { private FetchResponse createFetchResponse(Errors error, int sessionId) { return FetchResponse.parse( - FetchResponse.of(error, 25, sessionId, new LinkedHashMap<>(), List.of()).serialize(FETCH.latestVersion()), FETCH.latestVersion()); + FetchResponse.of(error, 25, sessionId, new LinkedHashMap<>()).serialize(FETCH.latestVersion()), FETCH.latestVersion()); } private FetchResponse createFetchResponse(int sessionId) { @@ -2051,7 +2005,7 @@ private FetchResponse createFetchResponse(int sessionId) { .setAbortedTransactions(abortedTransactions) .setRecords(MemoryRecords.EMPTY)); return FetchResponse.parse(FetchResponse.of(Errors.NONE, 25, sessionId, - responseData, List.of()).serialize(FETCH.latestVersion()), FETCH.latestVersion()); + responseData).serialize(FETCH.latestVersion()), FETCH.latestVersion()); } private FetchResponse createFetchResponse(boolean includeAborted) { @@ -2076,7 +2030,7 @@ private FetchResponse createFetchResponse(boolean includeAborted) { .setAbortedTransactions(abortedTransactions) .setRecords(MemoryRecords.EMPTY)); return FetchResponse.parse(FetchResponse.of(Errors.NONE, 25, INVALID_SESSION_ID, - responseData, List.of()).serialize(FETCH.latestVersion()), FETCH.latestVersion()); + responseData).serialize(FETCH.latestVersion()), FETCH.latestVersion()); } private FetchResponse createFetchResponse(short version) { @@ -2115,7 +2069,7 @@ private FetchResponse createFetchResponse(short version) { response.setTopicId(Uuid.randomUuid()); } data.setResponses(singletonList(response)); - return FetchResponse.of(data); + return new FetchResponse(data); } private HeartbeatRequest createHeartBeatRequest(short version) { @@ -2367,7 +2321,7 @@ private MetadataResponse createMetadataResponse() { } private OffsetCommitRequest createOffsetCommitRequest(short version) { - return OffsetCommitRequest.Builder.forTopicNames(new OffsetCommitRequestData() + return new OffsetCommitRequest.Builder(new OffsetCommitRequestData() .setGroupId("group1") .setMemberId("consumer1") .setGroupInstanceId(null) @@ -2375,7 +2329,6 @@ private OffsetCommitRequest createOffsetCommitRequest(short version) { .setTopics(singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("test") - .setTopicId(TOPIC_ID) .setPartitions(asList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) @@ -2397,7 +2350,6 @@ private OffsetCommitResponse createOffsetCommitResponse() { .setTopics(singletonList( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("test") - .setTopicId(TOPIC_ID) .setPartitions(singletonList( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) @@ -2408,118 +2360,93 @@ private OffsetCommitResponse createOffsetCommitResponse() { } private OffsetFetchRequest createOffsetFetchRequest(short version, boolean requireStable) { - return OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setRequireStable(requireStable) - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group1") - .setMemberId(version >= 9 ? "memberid" : null) - .setMemberEpoch(version >= 9 ? 10 : -1) - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(version < 10 ? "test11" : "") - .setTopicId(version >= 10 ? TOPIC_ID : Uuid.ZERO_UUID) - .setPartitionIndexes(List.of(1)) - )) - )), - false, - true - ).build(version); + if (version < 8) { + return new OffsetFetchRequest.Builder( + "group1", + requireStable, + singletonList(new TopicPartition("test11", 1)), + false) + .build(version); + } + return new OffsetFetchRequest.Builder( + Collections.singletonMap( + "group1", + singletonList(new TopicPartition("test11", 1))), + requireStable, + false) + .build(version); } private OffsetFetchRequest createOffsetFetchRequestWithMultipleGroups(short version, boolean requireStable) { - return OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setRequireStable(requireStable) - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group1") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("topic1") - .setPartitionIndexes(List.of(0)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group2") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("topic1") - .setPartitionIndexes(List.of(0)), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("topic2") - .setPartitionIndexes(List.of(0, 1)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group3") - .setTopics(List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("topic1") - .setPartitionIndexes(List.of(0)), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("topic2") - .setPartitionIndexes(List.of(0, 1)), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("topic3") - .setPartitionIndexes(List.of(0, 1, 2)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group4") - .setTopics(null), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group5") - .setTopics(null) - )), - false, - true + Map> groupToPartitionMap = new HashMap<>(); + List topic1 = singletonList( + new TopicPartition("topic1", 0)); + List topic2 = asList( + new TopicPartition("topic1", 0), + new TopicPartition("topic2", 0), + new TopicPartition("topic2", 1)); + List topic3 = asList( + new TopicPartition("topic1", 0), + new TopicPartition("topic2", 0), + new TopicPartition("topic2", 1), + new TopicPartition("topic3", 0), + new TopicPartition("topic3", 1), + new TopicPartition("topic3", 2)); + groupToPartitionMap.put("group1", topic1); + groupToPartitionMap.put("group2", topic2); + groupToPartitionMap.put("group3", topic3); + groupToPartitionMap.put("group4", null); + groupToPartitionMap.put("group5", null); + + return new OffsetFetchRequest.Builder( + groupToPartitionMap, + requireStable, + false ).build(version); } private OffsetFetchRequest createOffsetFetchRequestForAllPartition(short version, boolean requireStable) { - return OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setRequireStable(requireStable) - .setGroups(List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group1") - .setMemberId(version >= 9 ? "memberid" : null) - .setMemberEpoch(version >= 9 ? 10 : -1) - .setTopics(null) - )), - false, - true - ).build(version); + if (version < 8) { + return new OffsetFetchRequest.Builder( + "group1", + requireStable, + null, + false) + .build(version); + } + return new OffsetFetchRequest.Builder( + Collections.singletonMap( + "group1", null), + requireStable, + false) + .build(version); } private OffsetFetchResponse createOffsetFetchResponse(short version) { - var group = new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group1") - .setTopics(List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("test") - .setPartitions(List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(100), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(10) - .setMetadata(null) - )) - )); - return new OffsetFetchResponse.Builder(group).build(version); + Map responseData = new HashMap<>(); + responseData.put(new TopicPartition("test", 0), new OffsetFetchResponse.PartitionData( + 100L, Optional.empty(), "", Errors.NONE)); + responseData.put(new TopicPartition("test", 1), new OffsetFetchResponse.PartitionData( + 100L, Optional.of(10), null, Errors.NONE)); + if (version < 8) { + return new OffsetFetchResponse(Errors.NONE, responseData); + } + int throttleMs = 10; + return new OffsetFetchResponse(throttleMs, Collections.singletonMap("group1", Errors.NONE), + Collections.singletonMap("group1", responseData)); } private ProduceRequest createProduceRequest(short version) { - TopicIdPartition topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "test"); MemoryRecords records = MemoryRecords.withRecords(RecordBatch.MAGIC_VALUE_V2, Compression.NONE, new SimpleRecord("woot".getBytes())); return ProduceRequest.builder( new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - singletonList(createTopicProduceData(version, records, topicIdPartition)).iterator() - )) + .setTopicData(new ProduceRequestData.TopicProduceDataCollection(singletonList( + new ProduceRequestData.TopicProduceData() + .setName("test") + .setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData() + .setIndex(0) + .setRecords(records)))).iterator())) .setAcks((short) 1) .setTimeoutMs(5000) .setTransactionalId(version >= 3 ? "transactionalId" : null), @@ -2527,37 +2454,18 @@ private ProduceRequest createProduceRequest(short version) { .build(version); } - private static ProduceRequestData.TopicProduceData createTopicProduceData(short version, MemoryRecords records, TopicIdPartition tp) { - ProduceRequestData.TopicProduceData topicProduceData = new ProduceRequestData.TopicProduceData() - .setPartitionData(singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition()) - .setRecords(records))); - if (version >= 13) { - topicProduceData.setTopicId(tp.topicId()); - } else { - topicProduceData.setName(tp.topic()); - } - return topicProduceData; - } - - private static TopicIdPartition createTopicIdPartition(Uuid topicId, int partitionIndex, String topicName) { - return new TopicIdPartition(topicId, partitionIndex, topicName); - } - @SuppressWarnings("deprecation") private ProduceResponse createProduceResponse() { - Map responseData = new HashMap<>(); - Uuid topicId = Uuid.fromString("0AQorYD4we9TvBGX5JkYAB"); - responseData.put(new TopicIdPartition(topicId, 0, "test"), new ProduceResponse.PartitionResponse(Errors.NONE, + Map responseData = new HashMap<>(); + responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100)); return new ProduceResponse(responseData, 0); } @SuppressWarnings("deprecation") private ProduceResponse createProduceResponseWithErrorMessage() { - Map responseData = new HashMap<>(); - Uuid topicId = Uuid.fromString("0AQorYD4we9TvBGX5JkYAB"); - responseData.put(new TopicIdPartition(topicId, 0, "test"), new ProduceResponse.PartitionResponse(Errors.NONE, + Map responseData = new HashMap<>(); + responseData.put(new TopicPartition("test", 0), new ProduceResponse.PartitionResponse(Errors.NONE, 10000, RecordBatch.NO_TIMESTAMP, 100, singletonList(new ProduceResponse.RecordError(0, "error message")), "global error message")); return new ProduceResponse(responseData, 0); @@ -3640,244 +3548,142 @@ private PushTelemetryResponse createPushTelemetryResponse() { return new PushTelemetryResponse(response); } - private ListConfigResourcesRequest createListConfigResourcesRequest(short version) { - return version == 0 ? - new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() - .setResourceTypes(List.of(ConfigResource.Type.CLIENT_METRICS.id()))).build(version) : - new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()).build(version); + private ListClientMetricsResourcesRequest createListClientMetricsResourcesRequest(short version) { + return new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build(version); } - private ListConfigResourcesResponse createListConfigResourcesResponse() { - ListConfigResourcesResponseData response = new ListConfigResourcesResponseData(); + private ListClientMetricsResourcesResponse createListClientMetricsResourcesResponse() { + ListClientMetricsResourcesResponseData response = new ListClientMetricsResourcesResponseData(); response.setErrorCode(Errors.NONE.code()); response.setThrottleTimeMs(10); - return new ListConfigResourcesResponse(response); + return new ListClientMetricsResourcesResponse(response); } private InitializeShareGroupStateRequest createInitializeShareGroupStateRequest(short version) { InitializeShareGroupStateRequestData data = new InitializeShareGroupStateRequestData() - .setGroupId("group") - .setTopics(Collections.singletonList(new InitializeShareGroupStateRequestData.InitializeStateData() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new InitializeShareGroupStateRequestData.PartitionData() - .setPartition(0) - .setStateEpoch(0) - .setStartOffset(0))))); + .setGroupId("group") + .setTopics(Collections.singletonList(new InitializeShareGroupStateRequestData.InitializeStateData() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new InitializeShareGroupStateRequestData.PartitionData() + .setPartition(0) + .setStateEpoch(0) + .setStartOffset(0))))); return new InitializeShareGroupStateRequest.Builder(data).build(version); } private InitializeShareGroupStateResponse createInitializeShareGroupStateResponse() { InitializeShareGroupStateResponseData data = new InitializeShareGroupStateResponseData(); data.setResults(Collections.singletonList(new InitializeShareGroupStateResponseData.InitializeStateResult() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new InitializeShareGroupStateResponseData.PartitionResult() - .setPartition(0) - .setErrorCode(Errors.NONE.code()))))); + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new InitializeShareGroupStateResponseData.PartitionResult() + .setPartition(0) + .setErrorCode(Errors.NONE.code()))))); return new InitializeShareGroupStateResponse(data); } private ReadShareGroupStateRequest createReadShareGroupStateRequest(short version) { ReadShareGroupStateRequestData data = new ReadShareGroupStateRequestData() - .setGroupId("group") - .setTopics(Collections.singletonList(new ReadShareGroupStateRequestData.ReadStateData() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new ReadShareGroupStateRequestData.PartitionData() - .setPartition(0))))); + .setGroupId("group") + .setTopics(Collections.singletonList(new ReadShareGroupStateRequestData.ReadStateData() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new ReadShareGroupStateRequestData.PartitionData() + .setPartition(0))))); return new ReadShareGroupStateRequest.Builder(data).build(version); } private ReadShareGroupStateResponse createReadShareGroupStateResponse() { ReadShareGroupStateResponseData data = new ReadShareGroupStateResponseData() - .setResults(Collections.singletonList(new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(0) - .setErrorCode(Errors.NONE.code()) - .setStateEpoch(0) - .setStartOffset(0) - .setStateBatches(Collections.singletonList(new ReadShareGroupStateResponseData.StateBatch() - .setFirstOffset(0) - .setLastOffset(0) - .setDeliveryState((byte) 0x0) - .setDeliveryCount((short) 0))))))); + .setResults(Collections.singletonList(new ReadShareGroupStateResponseData.ReadStateResult() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new ReadShareGroupStateResponseData.PartitionResult() + .setPartition(0) + .setErrorCode(Errors.NONE.code()) + .setStateEpoch(0) + .setStartOffset(0) + .setStateBatches(Collections.singletonList(new ReadShareGroupStateResponseData.StateBatch() + .setFirstOffset(0) + .setLastOffset(0) + .setDeliveryState((byte) 0x0) + .setDeliveryCount((short) 0))))))); return new ReadShareGroupStateResponse(data); } private WriteShareGroupStateRequest createWriteShareGroupStateRequest(short version) { WriteShareGroupStateRequestData data = new WriteShareGroupStateRequestData() - .setGroupId("group") - .setTopics(Collections.singletonList(new WriteShareGroupStateRequestData.WriteStateData() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new WriteShareGroupStateRequestData.PartitionData() - .setPartition(0) - .setStateEpoch(0) - .setStartOffset(0) - .setStateBatches(singletonList(new WriteShareGroupStateRequestData.StateBatch() - .setFirstOffset(0) - .setLastOffset(0) - .setDeliveryState((byte) 0x0) - .setDeliveryCount((short) 0))))))); + .setGroupId("group") + .setTopics(Collections.singletonList(new WriteShareGroupStateRequestData.WriteStateData() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new WriteShareGroupStateRequestData.PartitionData() + .setPartition(0) + .setStateEpoch(0) + .setStartOffset(0) + .setStateBatches(singletonList(new WriteShareGroupStateRequestData.StateBatch() + .setFirstOffset(0) + .setLastOffset(0) + .setDeliveryState((byte) 0x0) + .setDeliveryCount((short) 0))))))); return new WriteShareGroupStateRequest.Builder(data).build(version); } private WriteShareGroupStateResponse createWriteShareGroupStateResponse() { WriteShareGroupStateResponseData data = new WriteShareGroupStateResponseData() - .setResults(Collections.singletonList(new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(0) - .setErrorCode(Errors.NONE.code()))))); + .setResults(Collections.singletonList(new WriteShareGroupStateResponseData.WriteStateResult() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new WriteShareGroupStateResponseData.PartitionResult() + .setPartition(0) + .setErrorCode(Errors.NONE.code()))))); return new WriteShareGroupStateResponse(data); } private DeleteShareGroupStateRequest createDeleteShareGroupStateRequest(short version) { DeleteShareGroupStateRequestData data = new DeleteShareGroupStateRequestData() - .setGroupId("group") - .setTopics(Collections.singletonList(new DeleteShareGroupStateRequestData.DeleteStateData() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new DeleteShareGroupStateRequestData.PartitionData() - .setPartition(0))))); + .setGroupId("group") + .setTopics(Collections.singletonList(new DeleteShareGroupStateRequestData.DeleteStateData() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new DeleteShareGroupStateRequestData.PartitionData() + .setPartition(0))))); return new DeleteShareGroupStateRequest.Builder(data).build(version); } private DeleteShareGroupStateResponse createDeleteShareGroupStateResponse() { DeleteShareGroupStateResponseData data = new DeleteShareGroupStateResponseData() - .setResults(Collections.singletonList(new DeleteShareGroupStateResponseData.DeleteStateResult() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new DeleteShareGroupStateResponseData.PartitionResult() - .setPartition(0) - .setErrorCode(Errors.NONE.code()))))); + .setResults(Collections.singletonList(new DeleteShareGroupStateResponseData.DeleteStateResult() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new DeleteShareGroupStateResponseData.PartitionResult() + .setPartition(0) + .setErrorCode(Errors.NONE.code()))))); return new DeleteShareGroupStateResponse(data); } private ReadShareGroupStateSummaryRequest createReadShareGroupStateSummaryRequest(short version) { ReadShareGroupStateSummaryRequestData data = new ReadShareGroupStateSummaryRequestData() - .setGroupId("group") - .setTopics(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.PartitionData() - .setPartition(0))))); + .setGroupId("group") + .setTopics(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryRequestData.PartitionData() + .setPartition(0))))); return new ReadShareGroupStateSummaryRequest.Builder(data).build(version); } private ReadShareGroupStateSummaryResponse createReadShareGroupStateSummaryResponse() { ReadShareGroupStateSummaryResponseData data = new ReadShareGroupStateSummaryResponseData() - .setResults(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.PartitionResult() - .setPartition(0) - .setErrorCode(Errors.NONE.code()) - .setStartOffset(0) - .setStateEpoch(0))))); + .setResults(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() + .setTopicId(Uuid.randomUuid()) + .setPartitions(Collections.singletonList(new ReadShareGroupStateSummaryResponseData.PartitionResult() + .setPartition(0) + .setErrorCode(Errors.NONE.code()) + .setStartOffset(0) + .setStateEpoch(0))))); return new ReadShareGroupStateSummaryResponse(data); } - private DescribeShareGroupOffsetsRequest createDescribeShareGroupOffsetsRequest(short version) { - DescribeShareGroupOffsetsRequestData data = new DescribeShareGroupOffsetsRequestData() - .setGroups(Collections.singletonList(new DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestGroup() - .setGroupId("group") - .setTopics(Collections.singletonList(new DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestTopic() - .setTopicName("topic-1") - .setPartitions(Collections.singletonList(0)))))); - return new DescribeShareGroupOffsetsRequest.Builder(data).build(version); - } - - private AlterShareGroupOffsetsRequest createAlterShareGroupOffsetsRequest(short version) { - AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopicCollection alterShareGroupOffsetsRequestTopics = new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopicCollection(); - alterShareGroupOffsetsRequestTopics.add(new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() - .setTopicName("topic") - .setPartitions(List.of(new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(0) - .setStartOffset(0))) - ); - AlterShareGroupOffsetsRequestData data = new AlterShareGroupOffsetsRequestData() - .setGroupId("group") - .setTopics(alterShareGroupOffsetsRequestTopics); - return new AlterShareGroupOffsetsRequest.Builder(data).build(version); - } - - private DeleteShareGroupOffsetsRequest createDeleteShareGroupOffsetsRequest(short version) { - DeleteShareGroupOffsetsRequestData data = new DeleteShareGroupOffsetsRequestData() - .setGroupId("group") - .setTopics(List.of(new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic() - .setTopicName("topic-1"))); - return new DeleteShareGroupOffsetsRequest.Builder(data).build(version); - } - - private DescribeShareGroupOffsetsResponse createDescribeShareGroupOffsetsResponse() { - DescribeShareGroupOffsetsResponseData data = new DescribeShareGroupOffsetsResponseData() - .setGroups(Collections.singletonList(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group") - .setTopics(Collections.singletonList(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic() - .setTopicName("topic-1") - .setTopicId(Uuid.randomUuid()) - .setPartitions(Collections.singletonList(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()) - .setStartOffset(0) - .setLeaderEpoch(0))))))); - return new DescribeShareGroupOffsetsResponse(data); - } - - private AlterShareGroupOffsetsResponse createAlterShareGroupOffsetsResponse() { - AlterShareGroupOffsetsResponseData data = new AlterShareGroupOffsetsResponseData() - .setResponses(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection(List.of( - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic() - .setPartitions(List.of(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()))) - .setTopicName("topic") - .setTopicId(Uuid.randomUuid())).iterator())); - return new AlterShareGroupOffsetsResponse(data); - } - - private DeleteShareGroupOffsetsResponse createDeleteShareGroupOffsetsResponse() { - DeleteShareGroupOffsetsResponseData data = new DeleteShareGroupOffsetsResponseData() - .setResponses(List.of(new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() - .setTopicName("topic-1") - .setTopicId(Uuid.randomUuid()) - .setErrorCode(Errors.NONE.code()))); - return new DeleteShareGroupOffsetsResponse(data); - } - - private AbstractRequest createStreamsGroupDescribeRequest(final short version) { - return new StreamsGroupDescribeRequest.Builder(new StreamsGroupDescribeRequestData() - .setGroupIds(Collections.singletonList("group")) - .setIncludeAuthorizedOperations(false)).build(version); - } - - private AbstractRequest createStreamsGroupHeartbeatRequest(final short version) { - return new StreamsGroupHeartbeatRequest.Builder(new StreamsGroupHeartbeatRequestData()).build(version); - } - - private AbstractResponse createStreamsGroupDescribeResponse() { - StreamsGroupDescribeResponseData data = new StreamsGroupDescribeResponseData() - .setGroups(Collections.singletonList( - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId("group") - .setErrorCode((short) 0) - .setErrorMessage(Errors.forCode((short) 0).message()) - .setGroupState("EMPTY") - .setGroupEpoch(0) - .setAssignmentEpoch(0) - .setMembers(new ArrayList<>(0)) - .setTopology(null) - )) - .setThrottleTimeMs(1000); - return new StreamsGroupDescribeResponse(data); - } - - private AbstractResponse createStreamsGroupHeartbeatResponse() { - return new StreamsGroupHeartbeatResponse(new StreamsGroupHeartbeatResponseData()); - } - @Test public void testInvalidSaslHandShakeRequest() { AbstractRequest request = new SaslHandshakeRequest.Builder( new SaslHandshakeRequestData().setMechanism("PLAIN")).build(); - ByteBufferAccessor serializedBytes = request.serialize(); + ByteBuffer serializedBytes = request.serialize(); // corrupt the length of the sasl mechanism string - serializedBytes.buffer().putShort(0, Short.MAX_VALUE); + serializedBytes.putShort(0, Short.MAX_VALUE); String msg = assertThrows(RuntimeException.class, () -> AbstractRequest. parseRequest(request.apiKey(), request.version(), serializedBytes)).getMessage(); @@ -3896,10 +3702,10 @@ public void testInvalidSaslAuthenticateRequest() { }; SaslAuthenticateRequestData data = new SaslAuthenticateRequestData().setAuthBytes(b); AbstractRequest request = new SaslAuthenticateRequest(data, version); - ByteBufferAccessor serializedBytes = request.serialize(); + ByteBuffer serializedBytes = request.serialize(); // corrupt the length of the bytes array - serializedBytes.buffer().putInt(0, Integer.MAX_VALUE); + serializedBytes.putInt(0, Integer.MAX_VALUE); String msg = assertThrows(RuntimeException.class, () -> AbstractRequest. parseRequest(request.apiKey(), request.version(), serializedBytes)).getMessage(); @@ -3928,7 +3734,7 @@ public void testValidTaggedFieldsWithSaslAuthenticateRequest() { accessor.flip(); SaslAuthenticateRequest saslAuthenticateRequest = (SaslAuthenticateRequest) AbstractRequest. - parseRequest(SASL_AUTHENTICATE, SASL_AUTHENTICATE.latestVersion(), accessor).request; + parseRequest(SASL_AUTHENTICATE, SASL_AUTHENTICATE.latestVersion(), accessor.buffer()).request; Assertions.assertArrayEquals(authBytes, saslAuthenticateRequest.data().authBytes()); assertEquals(1, saslAuthenticateRequest.data().unknownTaggedFields().size()); assertEquals(taggedField, saslAuthenticateRequest.data().unknownTaggedFields().get(0)); @@ -3956,28 +3762,7 @@ public void testInvalidTaggedFieldsWithSaslAuthenticateRequest() { accessor.flip(); String msg = assertThrows(RuntimeException.class, () -> AbstractRequest. - parseRequest(SASL_AUTHENTICATE, SASL_AUTHENTICATE.latestVersion(), accessor)).getMessage(); + parseRequest(SASL_AUTHENTICATE, SASL_AUTHENTICATE.latestVersion(), accessor.buffer())).getMessage(); assertEquals("Error reading byte array of 32767 byte(s): only 3 byte(s) available", msg); } - - @Test - public void testListConfigResourcesRequestV0FailsWithConfigResourceTypeOtherThanClientMetrics() { - // One type which is not CLIENT_METRICS - Arrays.stream(ConfigResource.Type.values()) - .filter(t -> t != ConfigResource.Type.CLIENT_METRICS) - .forEach(t -> { - ListConfigResourcesRequestData data = new ListConfigResourcesRequestData() - .setResourceTypes(List.of(t.id())); - assertThrows(UnsupportedVersionException.class, () -> new ListConfigResourcesRequest.Builder(data).build((short) 0)); - }); - - // Multiple types with CLIENT_METRICS - Arrays.stream(ConfigResource.Type.values()) - .filter(t -> t != ConfigResource.Type.CLIENT_METRICS) - .forEach(t -> { - ListConfigResourcesRequestData data = new ListConfigResourcesRequestData() - .setResourceTypes(List.of(t.id(), ConfigResource.Type.CLIENT_METRICS.id())); - assertThrows(UnsupportedVersionException.class, () -> new ListConfigResourcesRequest.Builder(data).build((short) 0)); - }); - } } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitResponseTest.java index bd6f98ed33937..954bb451014a2 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/TxnOffsetCommitResponseTest.java @@ -57,7 +57,7 @@ public void testParse() { for (short version : ApiKeys.TXN_OFFSET_COMMIT.allVersions()) { TxnOffsetCommitResponse response = TxnOffsetCommitResponse.parse( - MessageUtil.toByteBufferAccessor(data, version), version); + MessageUtil.toByteBuffer(data, version), version); assertEquals(expectedErrorCounts, response.errorCounts()); assertEquals(throttleTimeMs, response.throttleTimeMs()); assertEquals(version >= 1, response.shouldClientThrottle(version)); diff --git a/clients/src/test/java/org/apache/kafka/common/requests/UpdateFeaturesRequestTest.java b/clients/src/test/java/org/apache/kafka/common/requests/UpdateFeaturesRequestTest.java index e63d1949c8aec..b62b916d384a8 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/UpdateFeaturesRequestTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/UpdateFeaturesRequestTest.java @@ -21,10 +21,10 @@ import org.apache.kafka.common.errors.UnsupportedVersionException; import org.apache.kafka.common.message.UpdateFeaturesRequestData; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.protocol.Readable; import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; import java.util.List; @@ -80,13 +80,13 @@ public void testUpdateFeaturesV0() { new UpdateFeaturesRequestData().setFeatureUpdates(features), UpdateFeaturesRequestData.LOWEST_SUPPORTED_VERSION ); - Readable readable = request.serialize(); - request = UpdateFeaturesRequest.parse(readable, UpdateFeaturesRequestData.LOWEST_SUPPORTED_VERSION); + ByteBuffer buffer = request.serialize(); + request = UpdateFeaturesRequest.parse(buffer, UpdateFeaturesRequestData.LOWEST_SUPPORTED_VERSION); List updates = new ArrayList<>(request.featureUpdates()); - assertEquals(2, updates.size()); - assertEquals(FeatureUpdate.UpgradeType.SAFE_DOWNGRADE, updates.get(0).upgradeType()); - assertEquals(FeatureUpdate.UpgradeType.UPGRADE, updates.get(1).upgradeType()); + assertEquals(updates.size(), 2); + assertEquals(updates.get(0).upgradeType(), FeatureUpdate.UpgradeType.SAFE_DOWNGRADE); + assertEquals(updates.get(1).upgradeType(), FeatureUpdate.UpgradeType.UPGRADE); } @Test @@ -110,13 +110,13 @@ public void testUpdateFeaturesV1() { UpdateFeaturesRequestData.HIGHEST_SUPPORTED_VERSION ); - Readable readable = request.serialize(); - request = UpdateFeaturesRequest.parse(readable, UpdateFeaturesRequestData.HIGHEST_SUPPORTED_VERSION); + ByteBuffer buffer = request.serialize(); + request = UpdateFeaturesRequest.parse(buffer, UpdateFeaturesRequestData.HIGHEST_SUPPORTED_VERSION); List updates = new ArrayList<>(request.featureUpdates()); - assertEquals(2, updates.size()); - assertEquals(FeatureUpdate.UpgradeType.SAFE_DOWNGRADE, updates.get(0).upgradeType()); - assertEquals(FeatureUpdate.UpgradeType.UPGRADE, updates.get(1).upgradeType()); + assertEquals(updates.size(), 2); + assertEquals(updates.get(0).upgradeType(), FeatureUpdate.UpgradeType.SAFE_DOWNGRADE); + assertEquals(updates.get(1).upgradeType(), FeatureUpdate.UpgradeType.UPGRADE); } diff --git a/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersResponseTest.java b/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersResponseTest.java index d5c4d9e9631b5..b35a1cdce5999 100644 --- a/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersResponseTest.java +++ b/clients/src/test/java/org/apache/kafka/common/requests/WriteTxnMarkersResponseTest.java @@ -23,7 +23,6 @@ import org.junit.jupiter.api.Test; import java.util.Collections; -import java.util.EnumMap; import java.util.HashMap; import java.util.Map; @@ -51,7 +50,7 @@ public void setUp() { @Test public void testConstructor() { - Map expectedErrorCounts = new EnumMap<>(Errors.class); + Map expectedErrorCounts = new HashMap<>(); expectedErrorCounts.put(Errors.UNKNOWN_PRODUCER_ID, 1); expectedErrorCounts.put(Errors.INVALID_PRODUCER_EPOCH, 1); WriteTxnMarkersResponse response = new WriteTxnMarkersResponse(errorMap); diff --git a/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java b/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java index 760b1afc41fa6..59b08fc147691 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/JaasContextTest.java @@ -39,12 +39,10 @@ import javax.security.auth.login.AppConfigurationEntry.LoginModuleControlFlag; import javax.security.auth.login.Configuration; -import static org.apache.kafka.common.security.JaasContext.throwIfLoginModuleIsNotAllowed; import static org.apache.kafka.common.security.JaasUtils.DISALLOWED_LOGIN_MODULES_CONFIG; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; -import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; /** @@ -226,7 +224,7 @@ public void testDisallowedLoginModulesSystemProperty() throws Exception { "SOME-MECHANISM", Collections.emptyMap())); - // clear disallowed login modules + //Remove default value for org.apache.kafka.disallowed.login.modules System.setProperty(DISALLOWED_LOGIN_MODULES_CONFIG, ""); checkConfiguration("com.sun.security.auth.module.JndiLoginModule", LoginModuleControlFlag.REQUIRED, new HashMap<>()); @@ -254,39 +252,6 @@ public void testDisallowedLoginModulesSystemProperty() throws Exception { checkEntry(context.configurationEntries().get(0), "com.sun.security.auth.module.LdapLoginModule", LoginModuleControlFlag.REQUISITE, Collections.emptyMap()); } - - @Test - void testAllowedLoginModulesSystemProperty() { - AppConfigurationEntry ldap = new AppConfigurationEntry( - "com.ibm.security.auth.module.LdapLoginModule", - AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, - Map.of() - ); - AppConfigurationEntry jndi = new AppConfigurationEntry( - "com.sun.security.auth.module.JndiLoginModule", - AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, - Map.of() - ); - // default - throwIfLoginModuleIsNotAllowed(ldap); - - // set allowed list, but not set disallowed list - System.setProperty(JaasUtils.ALLOWED_LOGIN_MODULES_CONFIG, "com.ibm.security.auth.module.LdapLoginModule"); - throwIfLoginModuleIsNotAllowed(ldap); - assertThrows(IllegalArgumentException.class, () -> throwIfLoginModuleIsNotAllowed(jndi)); - - // set both allowed list and disallowed list - System.setProperty(JaasUtils.DISALLOWED_LOGIN_MODULES_CONFIG, "com.ibm.security.auth.module.LdapLoginModule"); - throwIfLoginModuleIsNotAllowed(ldap); - assertThrows(IllegalArgumentException.class, () -> throwIfLoginModuleIsNotAllowed(jndi)); - - // set disallowed list, but not set allowed list - System.clearProperty(JaasUtils.ALLOWED_LOGIN_MODULES_CONFIG); - IllegalArgumentException error = assertThrows(IllegalArgumentException.class, () -> throwIfLoginModuleIsNotAllowed(ldap)); - // Ensure the exception message includes the deprecation warning for the disallowed login modules config - assertTrue(error.getMessage().contains("The system property '" + DISALLOWED_LOGIN_MODULES_CONFIG + "' is deprecated.")); - throwIfLoginModuleIsNotAllowed(jndi); - } @Test public void testNumericOptionWithQuotes() throws Exception { diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java index 68bd5e7879fe1..821f99a3e4317 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/ClientAuthenticationFailureTest.java @@ -106,7 +106,7 @@ public void testProducerWithInvalidCredentials() { try (KafkaProducer producer = new KafkaProducer<>(props, serializer, serializer)) { ProducerRecord record = new ProducerRecord<>(topic, "message"); Future future = producer.send(record); - TestUtils.assertFutureThrows(SaslAuthenticationException.class, future); + TestUtils.assertFutureThrows(future, SaslAuthenticationException.class); } } @@ -116,7 +116,7 @@ public void testAdminClientWithInvalidCredentials() { props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + server.port()); try (Admin client = Admin.create(props)) { KafkaFuture> future = client.describeTopics(Collections.singleton("test")).allTopicNames(); - TestUtils.assertFutureThrows(SaslAuthenticationException.class, future); + TestUtils.assertFutureThrows(future, SaslAuthenticationException.class); } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java index 13ffba2715d56..8261c90014cf3 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/authenticator/SaslAuthenticatorTest.java @@ -44,7 +44,6 @@ import org.apache.kafka.common.network.ChannelState; import org.apache.kafka.common.network.ConnectionMode; import org.apache.kafka.common.network.ListenerName; -import org.apache.kafka.common.network.NetworkReceive; import org.apache.kafka.common.network.NetworkSend; import org.apache.kafka.common.network.NetworkTestUtils; import org.apache.kafka.common.network.NioEchoServer; @@ -52,7 +51,6 @@ import org.apache.kafka.common.network.Selector; import org.apache.kafka.common.network.TransportLayer; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.protocol.ByteBufferAccessor; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.types.SchemaException; import org.apache.kafka.common.requests.AbstractRequest; @@ -121,7 +119,6 @@ import java.util.Map; import java.util.Random; import java.util.Set; -import java.util.concurrent.Semaphore; import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Function; import java.util.stream.Collectors; @@ -158,7 +155,6 @@ public class SaslAuthenticatorTest { private static final long CONNECTIONS_MAX_REAUTH_MS_VALUE = 100L; private static final int BUFFER_SIZE = 4 * 1024; private static Time time = Time.SYSTEM; - private static boolean needLargeExpiration = false; private NioEchoServer server; private Selector selector; @@ -182,7 +178,6 @@ public void setup() throws Exception { @AfterEach public void teardown() throws Exception { - needLargeExpiration = false; if (server != null) this.server.close(); if (selector != null) @@ -768,7 +763,7 @@ public void testApiVersionsRequestWithServerUnsupportedVersion() throws Exceptio selector.send(new NetworkSend(node, request.toSend(header))); ByteBuffer responseBuffer = waitForResponse(); ResponseHeader.parse(responseBuffer, ApiKeys.API_VERSIONS.responseHeaderVersion((short) 0)); - ApiVersionsResponse response = ApiVersionsResponse.parse(new ByteBufferAccessor(responseBuffer), (short) 0); + ApiVersionsResponse response = ApiVersionsResponse.parse(responseBuffer, (short) 0); assertEquals(Errors.UNSUPPORTED_VERSION.code(), response.data().errorCode()); ApiVersion apiVersion = response.data().apiKeys().find(ApiKeys.API_VERSIONS.id); @@ -827,7 +822,7 @@ public void testInvalidApiVersionsRequest() throws Exception { ByteBuffer responseBuffer = waitForResponse(); ResponseHeader.parse(responseBuffer, ApiKeys.API_VERSIONS.responseHeaderVersion(version)); ApiVersionsResponse response = - ApiVersionsResponse.parse(new ByteBufferAccessor(responseBuffer), version); + ApiVersionsResponse.parse(responseBuffer, version); assertEquals(Errors.INVALID_REQUEST.code(), response.data().errorCode()); // Send ApiVersionsRequest with a supported version. This should succeed. @@ -866,7 +861,7 @@ public void testValidApiVersionsRequest() throws Exception { selector.send(new NetworkSend(node, request.toSend(header))); ByteBuffer responseBuffer = waitForResponse(); ResponseHeader.parse(responseBuffer, ApiKeys.API_VERSIONS.responseHeaderVersion(version)); - ApiVersionsResponse response = ApiVersionsResponse.parse(new ByteBufferAccessor(responseBuffer), version); + ApiVersionsResponse response = ApiVersionsResponse.parse(responseBuffer, version); assertEquals(Errors.NONE.code(), response.data().errorCode()); // Test that client can authenticate successfully @@ -1612,42 +1607,6 @@ public void testCannotReauthenticateWithDifferentPrincipal() throws Exception { server.verifyReauthenticationMetrics(0, 1); } - @Test - public void testReauthenticateWithLargeReauthValue() throws Exception { - // enable it, we'll get a large expiration timestamp token - needLargeExpiration = true; - String node = "0"; - SecurityProtocol securityProtocol = SecurityProtocol.SASL_SSL; - - configureMechanisms(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, - List.of(OAuthBearerLoginModule.OAUTHBEARER_MECHANISM)); - // set a large re-auth timeout in server side - saslServerConfigs.put(BrokerSecurityConfigs.CONNECTIONS_MAX_REAUTH_MS_CONFIG, Long.MAX_VALUE); - server = createEchoServer(securityProtocol); - - // set to default value for sasl login configs for initialization in ExpiringCredentialRefreshConfig - saslClientConfigs.put(SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_FACTOR, SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_FACTOR); - saslClientConfigs.put(SaslConfigs.SASL_LOGIN_REFRESH_WINDOW_JITTER, SaslConfigs.DEFAULT_LOGIN_REFRESH_WINDOW_JITTER); - saslClientConfigs.put(SaslConfigs.SASL_LOGIN_REFRESH_MIN_PERIOD_SECONDS, SaslConfigs.DEFAULT_LOGIN_REFRESH_MIN_PERIOD_SECONDS); - saslClientConfigs.put(SaslConfigs.SASL_LOGIN_REFRESH_BUFFER_SECONDS, SaslConfigs.DEFAULT_LOGIN_REFRESH_BUFFER_SECONDS); - saslClientConfigs.put(SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS, AlternateLoginCallbackHandler.class); - - createCustomClientConnection(securityProtocol, OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, node, true); - - // channel should be not null before sasl handshake - assertNotNull(selector.channel(node)); - - TestUtils.waitForCondition(() -> { - selector.poll(1000); - // this channel should be closed due to session timeout calculation overflow - return selector.channel(node) == null; - }, "channel didn't close with large re-authentication value"); - - // ensure metrics are as expected - server.verifyAuthenticationMetrics(0, 0); - server.verifyReauthenticationMetrics(0, 0); - } - @Test public void testCorrelationId() { SaslClientAuthenticator authenticator = new SaslClientAuthenticator( @@ -1896,69 +1855,6 @@ public void testSslClientAuthRequiredOverriddenForSaslSslListener() throws Excep verifySslClientAuthForSaslSslListener(false, SslClientAuth.REQUIRED); } - @Test - public void testServerSidePendingSendDuringReauthentication() throws Exception { - SecurityProtocol securityProtocol = SecurityProtocol.SASL_PLAINTEXT; - TestJaasConfig jaasConfig = configureMechanisms("PLAIN", Collections.singletonList("PLAIN")); - jaasConfig.createOrUpdateEntry(TestJaasConfig.LOGIN_CONTEXT_SERVER, PlainLoginModule.class.getName(), new HashMap<>()); - jaasConfig.setClientOptions("PLAIN", TestServerCallbackHandler.USERNAME, TestServerCallbackHandler.PASSWORD); - String callbackPrefix = ListenerName.forSecurityProtocol(securityProtocol).saslMechanismConfigPrefix("PLAIN"); - saslServerConfigs.put(callbackPrefix + BrokerSecurityConfigs.SASL_SERVER_CALLBACK_HANDLER_CLASS_CONFIG, - TestServerCallbackHandler.class.getName()); - server = createEchoServer(securityProtocol); - - String node = "node1"; - try { - createClientConnection(securityProtocol, node); - NetworkTestUtils.waitForChannelReady(selector, node); - server.verifyAuthenticationMetrics(1, 0); - - /* - * Now start the reauthentication on the connection. First, we have to sleep long enough so - * that the next write will cause re-authentication - */ - delay((long) (CONNECTIONS_MAX_REAUTH_MS_VALUE * 1.1)); - server.verifyReauthenticationMetrics(0, 0); - - // block reauthentication to complete - TestServerCallbackHandler.sem.acquire(); - - String prefix = TestUtils.randomString(100); - // send a client request to start a reauthentication. - selector.send(new NetworkSend(node, ByteBufferSend.sizePrefixed(ByteBuffer.wrap((prefix + "-0").getBytes(StandardCharsets.UTF_8))))); - // wait till reauthentication is blocked - TestUtils.waitForCondition(() -> { - selector.poll(10L); - return TestServerCallbackHandler.sem.hasQueuedThreads(); - }, 5000, "Reauthentication is not blocked"); - - // Set the client's channel `send` to null to allow setting a new send on the server's selector. - // Without this, NioEchoServer will throw an error while processing the client request, - // since we're manually setting a server side send to simulate the issue. - TestUtils.setFieldValue(selector.channel(node), "send", null); - - // extract the channel id from the server's selector and directly set a send on it. - String channelId = server.selector().channels().get(0).id(); - String payload = prefix + "-1"; - server.selector().send(new NetworkSend(channelId, ByteBufferSend.sizePrefixed(ByteBuffer.wrap(payload.getBytes(StandardCharsets.UTF_8))))); - // allow reauthentication to complete - TestServerCallbackHandler.sem.release(); - - TestUtils.waitForCondition(() -> { - selector.poll(10L); - for (NetworkReceive receive : selector.completedReceives()) { - assertEquals(payload, new String(Utils.toArray(receive.payload()), StandardCharsets.UTF_8)); - return true; - } - return false; - }, 5000, "Failed Receive the server send after reauthentication"); - - server.verifyReauthenticationMetrics(1, 0); - } finally { - closeClientConnectionIfNecessary(); - } - } - private void verifySslClientAuthForSaslSslListener(boolean useListenerPrefix, SslClientAuth configuredClientAuth) throws Exception { @@ -2040,7 +1936,7 @@ private void createClientConnection(SecurityProtocol securityProtocol, String sa if (enableSaslAuthenticateHeader) createClientConnection(securityProtocol, node); else - createCustomClientConnection(securityProtocol, saslMechanism, node, false); + createClientConnectionWithoutSaslAuthenticateHeader(securityProtocol, saslMechanism, node); } private NioEchoServer startServerApiVersionsUnsupportedByClient(final SecurityProtocol securityProtocol, String saslMechanism) throws Exception { @@ -2128,13 +2024,15 @@ protected void enableKafkaSaslAuthenticateHeaders(boolean flag) { return server; } - private SaslChannelBuilder saslChannelBuilderWithoutHeader( - final SecurityProtocol securityProtocol, - final String saslMechanism, - final Map jaasContexts, - final ListenerName listenerName - ) { - return new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, + private void createClientConnectionWithoutSaslAuthenticateHeader(final SecurityProtocol securityProtocol, + final String saslMechanism, String node) throws Exception { + + final ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); + final Map configs = Collections.emptyMap(); + final JaasContext jaasContext = JaasContext.loadClientContext(configs); + final Map jaasContexts = Collections.singletonMap(saslMechanism, jaasContext); + + SaslChannelBuilder clientChannelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, listenerName, false, saslMechanism, null, null, null, time, new LogContext(), null) { @@ -2161,42 +2059,6 @@ protected void setSaslAuthenticateAndHandshakeVersions(ApiVersionsResponse apiVe }; } }; - } - - private void createCustomClientConnection( - final SecurityProtocol securityProtocol, - final String saslMechanism, - String node, - boolean withSaslAuthenticateHeader - ) throws Exception { - - final ListenerName listenerName = ListenerName.forSecurityProtocol(securityProtocol); - final Map configs = Collections.emptyMap(); - final JaasContext jaasContext = JaasContext.loadClientContext(configs); - final Map jaasContexts = Collections.singletonMap(saslMechanism, jaasContext); - - SaslChannelBuilder clientChannelBuilder; - if (!withSaslAuthenticateHeader) { - clientChannelBuilder = saslChannelBuilderWithoutHeader(securityProtocol, saslMechanism, jaasContexts, listenerName); - } else { - clientChannelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, - securityProtocol, listenerName, false, saslMechanism, - null, null, null, time, new LogContext(), null) { - - @Override - protected SaslClientAuthenticator buildClientAuthenticator(Map configs, - AuthenticateCallbackHandler callbackHandler, - String id, - String serverHost, - String servicePrincipal, - TransportLayer transportLayer, - Subject subject) { - - return new SaslClientAuthenticator(configs, callbackHandler, id, subject, - servicePrincipal, serverHost, saslMechanism, transportLayer, time, new LogContext()); - } - }; - } clientChannelBuilder.configure(saslClientConfigs); this.selector = NetworkTestUtils.createSelector(clientChannelBuilder, time); InetSocketAddress addr = new InetSocketAddress("localhost", server.port()); @@ -2448,7 +2310,6 @@ public static class TestServerCallbackHandler extends PlainServerCallbackHandler static final String USERNAME = "TestServerCallbackHandler-user"; static final String PASSWORD = "TestServerCallbackHandler-password"; private volatile boolean configured; - public static Semaphore sem = new Semaphore(1); @Override public void configure(Map configs, String mechanism, List jaasConfigEntries) { @@ -2462,14 +2323,7 @@ public void configure(Map configs, String mechanism, List authenticator.authenticate()); verify(transportLayer, times(2)).read(any(ByteBuffer.class)); } @@ -156,7 +155,7 @@ public void testInvalidRequestHeader() throws IOException { return headerBuffer.remaining(); }); - assertThrows(InvalidRequestException.class, authenticator::authenticate); + assertThrows(InvalidRequestException.class, () -> authenticator.authenticate()); verify(transportLayer, times(2)).read(any(ByteBuffer.class)); } @@ -199,7 +198,7 @@ public void testSessionExpiresAtTokenExpiryDespiteNoReauthIsSet() throws IOExcep ByteBuffer secondResponseSent = getResponses(transportLayer).get(1); consumeSizeAndHeader(secondResponseSent); - SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(new ByteBufferAccessor(secondResponseSent), (short) 2); + SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(secondResponseSent, (short) 2); assertEquals(tokenExpirationDuration.toMillis(), response.sessionLifetimeMs()); } } @@ -232,7 +231,7 @@ public void testSessionExpiresAtMaxReauthTime() throws IOException { ByteBuffer secondResponseSent = getResponses(transportLayer).get(1); consumeSizeAndHeader(secondResponseSent); - SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(new ByteBufferAccessor(secondResponseSent), (short) 2); + SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(secondResponseSent, (short) 2); assertEquals(maxReauthMs, response.sessionLifetimeMs()); } } @@ -265,40 +264,11 @@ public void testSessionExpiresAtTokenExpiry() throws IOException { ByteBuffer secondResponseSent = getResponses(transportLayer).get(1); consumeSizeAndHeader(secondResponseSent); - SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(new ByteBufferAccessor(secondResponseSent), (short) 2); + SaslAuthenticateResponse response = SaslAuthenticateResponse.parse(secondResponseSent, (short) 2); assertEquals(tokenExpiryShorterThanMaxReauth.toMillis(), response.sessionLifetimeMs()); } } - @Test - public void testSessionWontExpireWithLargeExpirationTime() throws IOException { - String mechanism = OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; - SaslServer saslServer = mock(SaslServer.class); - MockTime time = new MockTime(0, 1, 1000); - // set a Long.MAX_VALUE as the expiration time - Duration largeExpirationTime = Duration.ofMillis(Long.MAX_VALUE); - - try ( - MockedStatic ignored = mockSaslServer(saslServer, mechanism, time, largeExpirationTime); - MockedStatic ignored2 = mockKafkaPrincipal("[principal-type]", "[principal-name"); - TransportLayer transportLayer = mockTransportLayer() - ) { - - SaslServerAuthenticator authenticator = getSaslServerAuthenticatorForOAuth(mechanism, transportLayer, time, largeExpirationTime.toMillis()); - - mockRequest(saslHandshakeRequest(mechanism), transportLayer); - authenticator.authenticate(); - - when(saslServer.isComplete()).thenReturn(false).thenReturn(true); - mockRequest(saslAuthenticateRequest(), transportLayer); - - Throwable t = assertThrows(IllegalArgumentException.class, authenticator::authenticate); - assertEquals(ArithmeticException.class, t.getCause().getClass()); - assertEquals("Cannot convert " + Long.MAX_VALUE + " millisecond to nanosecond due to arithmetic overflow", - t.getMessage()); - } - } - private SaslServerAuthenticator getSaslServerAuthenticatorForOAuth(String mechanism, TransportLayer transportLayer, Time time, Long maxReauth) { Map configs = Collections.singletonMap(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, Collections.singletonList(mechanism)); @@ -384,7 +354,7 @@ private void mockRequest(AbstractRequest request, TransportLayer transportLayer) private void mockRequest(RequestHeader header, AbstractRequest request, TransportLayer transportLayer) throws IOException { ByteBuffer headerBuffer = RequestTestUtils.serializeRequestHeader(header); - ByteBuffer requestBuffer = request.serialize().buffer(); + ByteBuffer requestBuffer = request.serialize(); requestBuffer.rewind(); when(transportLayer.read(any(ByteBuffer.class))).then(invocation -> { diff --git a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java index 31c01849bc7df..5980a0d3b3c5f 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/kerberos/KerberosRuleTest.java @@ -26,12 +26,12 @@ public class KerberosRuleTest { @Test public void testReplaceParameters() throws BadFormatString { // positive test cases - assertEquals("", KerberosRule.replaceParameters("", new String[0])); - assertEquals("hello", KerberosRule.replaceParameters("hello", new String[0])); - assertEquals("", KerberosRule.replaceParameters("", new String[]{"too", "many", "parameters", "are", "ok"})); - assertEquals("hello", KerberosRule.replaceParameters("hello", new String[]{"too", "many", "parameters", "are", "ok"})); - assertEquals("hello too", KerberosRule.replaceParameters("hello $0", new String[]{"too", "many", "parameters", "are", "ok"})); - assertEquals("hello no recursion $1", KerberosRule.replaceParameters("hello $0", new String[]{"no recursion $1"})); + assertEquals(KerberosRule.replaceParameters("", new String[0]), ""); + assertEquals(KerberosRule.replaceParameters("hello", new String[0]), "hello"); + assertEquals(KerberosRule.replaceParameters("", new String[]{"too", "many", "parameters", "are", "ok"}), ""); + assertEquals(KerberosRule.replaceParameters("hello", new String[]{"too", "many", "parameters", "are", "ok"}), "hello"); + assertEquals(KerberosRule.replaceParameters("hello $0", new String[]{"too", "many", "parameters", "are", "ok"}), "hello too"); + assertEquals(KerberosRule.replaceParameters("hello $0", new String[]{"no recursion $1"}), "hello no recursion $1"); // negative test cases assertThrows( diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandlerTest.java index 54857cd8cc07b..5b1b2976662b6 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerLoginCallbackHandlerTest.java @@ -21,15 +21,26 @@ import org.apache.kafka.common.security.auth.SaslExtensionsCallback; import org.apache.kafka.common.security.oauthbearer.internals.OAuthBearerClientInitialResponse; import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenBuilder; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenRetriever; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory; +import org.apache.kafka.common.security.oauthbearer.internals.secured.FileTokenRetriever; +import org.apache.kafka.common.security.oauthbearer.internals.secured.HttpAccessTokenRetriever; import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; +import org.apache.kafka.common.utils.Utils; import org.jose4j.jws.AlgorithmIdentifiers; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; +import java.io.File; import java.io.IOException; +import java.util.Base64; +import java.util.Calendar; +import java.util.Collections; import java.util.HashMap; import java.util.Map; +import java.util.TimeZone; import javax.security.auth.callback.Callback; import javax.security.auth.callback.UnsupportedCallbackException; @@ -38,9 +49,8 @@ import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_ID_CONFIG; import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler.CLIENT_SECRET_CONFIG; -import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; -import static org.apache.kafka.test.TestUtils.tempFile; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -48,7 +58,6 @@ import static org.junit.jupiter.api.Assertions.fail; public class OAuthBearerLoginCallbackHandlerTest extends OAuthBearerTest { - @AfterEach public void tearDown() throws Exception { System.clearProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); @@ -61,10 +70,9 @@ public void testHandleTokenCallback() throws Exception { .jwk(createRsaJwk()) .alg(AlgorithmIdentifiers.RSA_USING_SHA256); String accessToken = builder.build(); - JwtRetriever jwtRetriever = () -> accessToken; - JwtValidator jwtValidator = createJwtValidator(); - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); + AccessTokenRetriever accessTokenRetriever = () -> accessToken; + + OAuthBearerLoginCallbackHandler handler = createHandler(accessTokenRetriever, configs); try { OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); @@ -83,6 +91,7 @@ public void testHandleTokenCallback() throws Exception { @Test public void testHandleSaslExtensionsCallback() throws Exception { + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, "http://www.example.com"); System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, "http://www.example.com"); Map jaasConfig = new HashMap<>(); @@ -91,11 +100,7 @@ public void testHandleSaslExtensionsCallback() throws Exception { jaasConfig.put("extension_foo", "1"); jaasConfig.put("extension_bar", 2); jaasConfig.put("EXTENSION_baz", "3"); - - JwtRetriever jwtRetriever = createJwtRetriever(); - JwtValidator jwtValidator = createJwtValidator(); - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(jaasConfig), jwtRetriever, jwtValidator); + configureHandler(handler, configs, jaasConfig); try { SaslExtensionsCallback callback = new SaslExtensionsCallback(); @@ -116,17 +121,14 @@ public void testHandleSaslExtensionsCallback() throws Exception { public void testHandleSaslExtensionsCallbackWithInvalidExtension() { String illegalKey = "extension_" + OAuthBearerClientInitialResponse.AUTH_KEY; + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, "http://www.example.com"); System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, "http://www.example.com"); Map jaasConfig = new HashMap<>(); jaasConfig.put(CLIENT_ID_CONFIG, "an ID"); jaasConfig.put(CLIENT_SECRET_CONFIG, "a secret"); jaasConfig.put(illegalKey, "this key isn't allowed per OAuthBearerClientInitialResponse.validateExtensions"); - - JwtRetriever jwtRetriever = createJwtRetriever(); - JwtValidator jwtValidator = createJwtValidator(); - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(jaasConfig), jwtRetriever, jwtValidator); + configureHandler(handler, configs, jaasConfig); try { SaslExtensionsCallback callback = new SaslExtensionsCallback(); @@ -141,10 +143,10 @@ public void testHandleSaslExtensionsCallbackWithInvalidExtension() { @Test public void testInvalidCallbackGeneratesUnsupportedCallbackException() { Map configs = getSaslConfigs(); - JwtRetriever jwtRetriever = () -> "test"; - JwtValidator jwtValidator = createJwtValidator(); OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); + AccessTokenRetriever accessTokenRetriever = () -> "foo"; + AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs); + handler.init(accessTokenRetriever, accessTokenValidator); try { Callback unsupportedCallback = new Callback() { }; @@ -158,23 +160,21 @@ public void testInvalidCallbackGeneratesUnsupportedCallbackException() { public void testInvalidAccessToken() throws Exception { testInvalidAccessToken("this isn't valid", "Malformed JWT provided"); testInvalidAccessToken("this.isn't.valid", "malformed Base64 URL encoded value"); - testInvalidAccessToken(createJwt("this", "isn't", "valid"), "malformed JSON"); - testInvalidAccessToken(createJwt("{}", "{}", "{}"), "exp value must be non-null"); + testInvalidAccessToken(createAccessKey("this", "isn't", "valid"), "malformed JSON"); + testInvalidAccessToken(createAccessKey("{}", "{}", "{}"), "exp value must be non-null"); } @Test public void testMissingAccessToken() { - Map configs = getSaslConfigs(); - JwtRetriever jwtRetriever = () -> { - throw new JwtRetrieverException("The token endpoint response access_token value must be non-null"); + AccessTokenRetriever accessTokenRetriever = () -> { + throw new IOException("The token endpoint response access_token value must be non-null"); }; - JwtValidator jwtValidator = createJwtValidator(); - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); + Map configs = getSaslConfigs(); + OAuthBearerLoginCallbackHandler handler = createHandler(accessTokenRetriever, configs); try { OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); - assertThrowsWithMessage(JwtRetrieverException.class, + assertThrowsWithMessage(IOException.class, () -> handler.handle(new Callback[]{callback}), "token endpoint response access_token value must be non-null"); } finally { @@ -184,18 +184,19 @@ public void testMissingAccessToken() { @Test public void testFileTokenRetrieverHandlesNewline() throws IOException { - String expected = createJwt("jdoe"); - String withNewline = expected + "\n"; + Calendar cal = Calendar.getInstance(TimeZone.getTimeZone("UTC")); + long cur = cal.getTimeInMillis() / 1000; + String exp = "" + (cur + 60 * 60); // 1 hour in future + String iat = "" + cur; - String accessTokenFile = tempFile(withNewline).toURI().toString(); + String expected = createAccessKey("{}", String.format("{\"exp\":%s, \"iat\":%s, \"sub\":\"subj\"}", exp, iat), "sign"); + String withNewline = expected + "\n"; - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, accessTokenFile); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile); - JwtRetriever jwtRetriever = new FileJwtRetriever(); - JwtValidator jwtValidator = createJwtValidator(); - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); + File tmpDir = createTempDir("access-token"); + File accessTokenFile = createTempFile(tmpDir, "access-token-", ".json", withNewline); + Map configs = getSaslConfigs(); + OAuthBearerLoginCallbackHandler handler = createHandler(new FileTokenRetriever(accessTokenFile.toPath()), configs); OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); try { handler.handle(new Callback[]{callback}); @@ -210,15 +211,39 @@ public void testFileTokenRetrieverHandlesNewline() throws IOException { @Test public void testNotConfigured() { OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - assertThrowsWithMessage(IllegalStateException.class, () -> handler.handle(new Callback[] {}), "first call the configure method"); + assertThrowsWithMessage(IllegalStateException.class, () -> handler.handle(new Callback[] {}), "first call the configure or init method"); + } + + @Test + public void testConfigureWithAccessTokenFile() throws Exception { + String expected = "{}"; + + File tmpDir = createTempDir("access-token"); + File accessTokenFile = createTempFile(tmpDir, "access-token-", ".json", expected); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, accessTokenFile.toURI().toString()); + + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString()); + Map jaasConfigs = Collections.emptyMap(); + configureHandler(handler, configs, jaasConfigs); + assertInstanceOf(FileTokenRetriever.class, handler.getAccessTokenRetriever()); + } + + @Test + public void testConfigureWithAccessClientCredentials() { + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, "http://www.example.com"); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, "http://www.example.com"); + Map jaasConfigs = new HashMap<>(); + jaasConfigs.put(CLIENT_ID_CONFIG, "an ID"); + jaasConfigs.put(CLIENT_SECRET_CONFIG, "a secret"); + configureHandler(handler, configs, jaasConfigs); + assertInstanceOf(HttpAccessTokenRetriever.class, handler.getAccessTokenRetriever()); } private void testInvalidAccessToken(String accessToken, String expectedMessageSubstring) throws Exception { Map configs = getSaslConfigs(); - JwtRetriever jwtRetriever = () -> accessToken; - JwtValidator jwtValidator = createJwtValidator(); - OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); - handler.configure(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries(), jwtRetriever, jwtValidator); + OAuthBearerLoginCallbackHandler handler = createHandler(() -> accessToken, configs); try { OAuthBearerTokenCallback callback = new OAuthBearerTokenCallback(); @@ -235,11 +260,19 @@ private void testInvalidAccessToken(String accessToken, String expectedMessageSu } } - private static DefaultJwtRetriever createJwtRetriever() { - return new DefaultJwtRetriever(); + private String createAccessKey(String header, String payload, String signature) { + Base64.Encoder enc = Base64.getEncoder(); + header = enc.encodeToString(Utils.utf8(header)); + payload = enc.encodeToString(Utils.utf8(payload)); + signature = enc.encodeToString(Utils.utf8(signature)); + return String.format("%s.%s.%s", header, payload, signature); } - private static DefaultJwtValidator createJwtValidator() { - return new DefaultJwtValidator(); + private OAuthBearerLoginCallbackHandler createHandler(AccessTokenRetriever accessTokenRetriever, Map configs) { + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs); + handler.init(accessTokenRetriever, accessTokenValidator); + return handler; } + } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandlerTest.java index adabec6bc958d..d682a05ec11cc 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/OAuthBearerValidatorCallbackHandlerTest.java @@ -17,29 +17,27 @@ package org.apache.kafka.common.security.oauthbearer; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenBuilder; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidator; +import org.apache.kafka.common.security.oauthbearer.internals.secured.AccessTokenValidatorFactory; import org.apache.kafka.common.security.oauthbearer.internals.secured.CloseableVerificationKeyResolver; import org.apache.kafka.common.security.oauthbearer.internals.secured.OAuthBearerTest; +import org.apache.kafka.common.utils.Utils; import org.jose4j.jws.AlgorithmIdentifiers; import org.junit.jupiter.api.Test; -import java.io.IOException; import java.util.Arrays; +import java.util.Base64; import java.util.List; import java.util.Map; import javax.security.auth.callback.Callback; -import javax.security.auth.login.AppConfigurationEntry; import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE; -import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; public class OAuthBearerValidatorCallbackHandlerTest extends OAuthBearerTest { @@ -55,16 +53,7 @@ public void testBasic() throws Exception { String accessToken = builder.build(); Map configs = getSaslConfigs(SASL_OAUTHBEARER_EXPECTED_AUDIENCE, allAudiences); - CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); - JwtValidator jwtValidator = createJwtValidator(verificationKeyResolver); - OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); - handler.configure( - configs, - OAUTHBEARER_MECHANISM, - getJaasConfigEntries(), - verificationKeyResolver, - jwtValidator - ); + OAuthBearerValidatorCallbackHandler handler = createHandler(configs, builder); try { OAuthBearerValidatorCallback callback = new OAuthBearerValidatorCallback(accessToken); @@ -88,90 +77,13 @@ public void testInvalidAccessToken() throws Exception { String substring = "invalid_token"; assertInvalidAccessTokenFails("this isn't valid", substring); assertInvalidAccessTokenFails("this.isn't.valid", substring); - assertInvalidAccessTokenFails(createJwt("this", "isn't", "valid"), substring); - assertInvalidAccessTokenFails(createJwt("{}", "{}", "{}"), substring); - } - - @Test - public void testHandlerConfigureThrowsException() throws IOException { - KafkaException configureError = new KafkaException("configure() error"); - - AccessTokenBuilder builder = new AccessTokenBuilder() - .alg(AlgorithmIdentifiers.RSA_USING_SHA256); - CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); - JwtValidator jwtValidator = new JwtValidator() { - @Override - public void configure(Map configs, String saslMechanism, List jaasConfigEntries) { - throw configureError; - } - - @Override - public OAuthBearerToken validate(String accessToken) throws JwtValidatorException { - return null; - } - }; - - OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); - - // An error initializing the JwtValidator should cause OAuthBearerValidatorCallbackHandler.init() to fail. - KafkaException error = assertThrows( - KafkaException.class, - () -> handler.configure( - getSaslConfigs(), - OAUTHBEARER_MECHANISM, - getJaasConfigEntries(), - verificationKeyResolver, - jwtValidator - ) - ); - assertEquals(configureError, error); - } - - @Test - public void testHandlerCloseDoesNotThrowException() throws IOException { - AccessTokenBuilder builder = new AccessTokenBuilder() - .alg(AlgorithmIdentifiers.RSA_USING_SHA256); - CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); - JwtValidator jwtValidator = new JwtValidator() { - @Override - public void close() throws IOException { - throw new IOException("close() error"); - } - - @Override - public OAuthBearerToken validate(String accessToken) throws JwtValidatorException { - return null; - } - }; - - OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); - handler.configure( - getSaslConfigs(), - OAUTHBEARER_MECHANISM, - getJaasConfigEntries(), - verificationKeyResolver, - jwtValidator - ); - - // An error closings the JwtValidator should *not* cause OAuthBearerValidatorCallbackHandler.close() to fail. - assertDoesNotThrow(handler::close); + assertInvalidAccessTokenFails(createAccessKey("this", "isn't", "valid"), substring); + assertInvalidAccessTokenFails(createAccessKey("{}", "{}", "{}"), substring); } private void assertInvalidAccessTokenFails(String accessToken, String expectedMessageSubstring) throws Exception { - AccessTokenBuilder builder = new AccessTokenBuilder() - .alg(AlgorithmIdentifiers.RSA_USING_SHA256); Map configs = getSaslConfigs(); - CloseableVerificationKeyResolver verificationKeyResolver = createVerificationKeyResolver(builder); - JwtValidator jwtValidator = createJwtValidator(verificationKeyResolver); - - OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); - handler.configure( - configs, - OAUTHBEARER_MECHANISM, - getJaasConfigEntries(), - verificationKeyResolver, - jwtValidator - ); + OAuthBearerValidatorCallbackHandler handler = createHandler(configs, new AccessTokenBuilder()); try { OAuthBearerValidatorCallback callback = new OAuthBearerValidatorCallback(accessToken); @@ -186,11 +98,22 @@ private void assertInvalidAccessTokenFails(String accessToken, String expectedMe } } - private JwtValidator createJwtValidator(CloseableVerificationKeyResolver verificationKeyResolver) { - return new DefaultJwtValidator(verificationKeyResolver); + private OAuthBearerValidatorCallbackHandler createHandler(Map options, + AccessTokenBuilder builder) { + OAuthBearerValidatorCallbackHandler handler = new OAuthBearerValidatorCallbackHandler(); + CloseableVerificationKeyResolver verificationKeyResolver = (jws, nestingContext) -> + builder.jwk().getPublicKey(); + AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(options, verificationKeyResolver); + handler.init(verificationKeyResolver, accessTokenValidator); + return handler; } - private CloseableVerificationKeyResolver createVerificationKeyResolver(AccessTokenBuilder builder) { - return (jws, nestingContext) -> builder.jwk().getPublicKey(); + private String createAccessKey(String header, String payload, String signature) { + Base64.Encoder enc = Base64.getEncoder(); + header = enc.encodeToString(Utils.utf8(header)); + payload = enc.encodeToString(Utils.utf8(payload)); + signature = enc.encodeToString(Utils.utf8(signature)); + return String.format("%s.%s.%s", header, payload, signature); } + } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenBuilder.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenBuilder.java index b0828d5d2815e..cc910e0d16c4f 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenBuilder.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenBuilder.java @@ -36,10 +36,6 @@ public class AccessTokenBuilder { - private final String scopeClaimName = "scope"; - - private final Long issuedAtSeconds; - private final ObjectMapper objectMapper = new ObjectMapper(); private String alg; @@ -52,6 +48,10 @@ public class AccessTokenBuilder { private Object scope = "engineering"; + private final String scopeClaimName = "scope"; + + private final Long issuedAtSeconds; + private Long expirationSeconds; private PublicJsonWebKey jwk; diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactoryTest.java new file mode 100644 index 0000000000000..3e85f7b0ce4fa --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenRetrieverFactoryTest.java @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.config.ConfigException; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.Arguments; +import org.junit.jupiter.params.provider.MethodSource; + +import java.io.File; +import java.util.Collections; +import java.util.Map; +import java.util.stream.Stream; + +import static org.apache.kafka.common.config.SaslConfigs.DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE; +import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL; +import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class AccessTokenRetrieverFactoryTest extends OAuthBearerTest { + + @AfterEach + public void tearDown() throws Exception { + System.clearProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); + } + + @Test + public void testConfigureRefreshingFileAccessTokenRetriever() throws Exception { + String expected = "{}"; + + File tmpDir = createTempDir("access-token"); + File accessTokenFile = createTempFile(tmpDir, "access-token-", ".json", expected); + + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, accessTokenFile.toURI().toString()); + Map configs = Collections.singletonMap(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString()); + Map jaasConfig = Collections.emptyMap(); + + try (AccessTokenRetriever accessTokenRetriever = AccessTokenRetrieverFactory.create(configs, jaasConfig)) { + accessTokenRetriever.init(); + assertEquals(expected, accessTokenRetriever.retrieve()); + } + } + + @Test + public void testConfigureRefreshingFileAccessTokenRetrieverWithInvalidDirectory() { + // Should fail because the parent path doesn't exist. + String file = new File("/tmp/this-directory-does-not-exist/foo.json").toURI().toString(); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, file); + Map jaasConfig = Collections.emptyMap(); + assertThrowsWithMessage(ConfigException.class, () -> AccessTokenRetrieverFactory.create(configs, jaasConfig), "that doesn't exist"); + } + + @Test + public void testConfigureRefreshingFileAccessTokenRetrieverWithInvalidFile() throws Exception { + // Should fail because while the parent path exists, the file itself doesn't. + File tmpDir = createTempDir("this-directory-does-exist"); + File accessTokenFile = new File(tmpDir, "this-file-does-not-exist.json"); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, accessTokenFile.toURI().toString()); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString()); + Map jaasConfig = Collections.emptyMap(); + assertThrowsWithMessage(ConfigException.class, () -> AccessTokenRetrieverFactory.create(configs, jaasConfig), "that doesn't exist"); + } + + @Test + public void testSaslOauthbearerTokenEndpointUrlIsNotAllowed() throws Exception { + // Should fail if the URL is not allowed + File tmpDir = createTempDir("not_allowed"); + File accessTokenFile = new File(tmpDir, "not_allowed.json"); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL, accessTokenFile.toURI().toString()); + assertThrowsWithMessage(ConfigException.class, () -> AccessTokenRetrieverFactory.create(configs, Collections.emptyMap()), + ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); + } + + @ParameterizedTest + @MethodSource("urlencodeHeaderSupplier") + public void testUrlencodeHeader(Map configs, boolean expectedValue) { + ConfigurationUtils cu = new ConfigurationUtils(configs); + boolean actualValue = AccessTokenRetrieverFactory.validateUrlencodeHeader(cu); + assertEquals(expectedValue, actualValue); + } + + private static Stream urlencodeHeaderSupplier() { + return Stream.of( + Arguments.of(Collections.emptyMap(), DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE), + Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, null), DEFAULT_SASL_OAUTHBEARER_HEADER_URLENCODE), + Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, true), true), + Arguments.of(Collections.singletonMap(SASL_OAUTHBEARER_HEADER_URLENCODE, false), false) + ); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactoryTest.java new file mode 100644 index 0000000000000..2fd02e3f9a826 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorFactoryTest.java @@ -0,0 +1,73 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginCallbackHandler; + +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Map; + +public class AccessTokenValidatorFactoryTest extends OAuthBearerTest { + + @Test + public void testConfigureThrowsExceptionOnAccessTokenValidatorInit() { + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + AccessTokenRetriever accessTokenRetriever = new AccessTokenRetriever() { + @Override + public void init() throws IOException { + throw new IOException("My init had an error!"); + } + @Override + public String retrieve() { + return "dummy"; + } + }; + + Map configs = getSaslConfigs(); + AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs); + + assertThrowsWithMessage( + KafkaException.class, () -> handler.init(accessTokenRetriever, accessTokenValidator), "encountered an error when initializing"); + } + + @Test + public void testConfigureThrowsExceptionOnAccessTokenValidatorClose() { + OAuthBearerLoginCallbackHandler handler = new OAuthBearerLoginCallbackHandler(); + AccessTokenRetriever accessTokenRetriever = new AccessTokenRetriever() { + @Override + public void close() throws IOException { + throw new IOException("My close had an error!"); + } + @Override + public String retrieve() { + return "dummy"; + } + }; + + Map configs = getSaslConfigs(); + AccessTokenValidator accessTokenValidator = AccessTokenValidatorFactory.create(configs); + handler.init(accessTokenRetriever, accessTokenValidator); + + // Basically asserting this doesn't throw an exception :( + handler.close(); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java new file mode 100644 index 0000000000000..0adaf34bbbeea --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/AccessTokenValidatorTest.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.jose4j.jws.AlgorithmIdentifiers; +import org.jose4j.jwx.HeaderParameterNames; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.TestInstance; +import org.junit.jupiter.api.TestInstance.Lifecycle; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +@TestInstance(Lifecycle.PER_CLASS) +public abstract class AccessTokenValidatorTest extends OAuthBearerTest { + + protected abstract AccessTokenValidator createAccessTokenValidator(AccessTokenBuilder accessTokenBuilder) throws Exception; + + protected AccessTokenValidator createAccessTokenValidator() throws Exception { + AccessTokenBuilder builder = new AccessTokenBuilder(); + return createAccessTokenValidator(builder); + } + + @Test + public void testNull() throws Exception { + AccessTokenValidator validator = createAccessTokenValidator(); + assertThrowsWithMessage(ValidateException.class, () -> validator.validate(null), "Malformed JWT provided; expected three sections (header, payload, and signature)"); + } + + @Test + public void testEmptyString() throws Exception { + AccessTokenValidator validator = createAccessTokenValidator(); + assertThrowsWithMessage(ValidateException.class, () -> validator.validate(""), "Malformed JWT provided; expected three sections (header, payload, and signature)"); + } + + @Test + public void testWhitespace() throws Exception { + AccessTokenValidator validator = createAccessTokenValidator(); + assertThrowsWithMessage(ValidateException.class, () -> validator.validate(" "), "Malformed JWT provided; expected three sections (header, payload, and signature)"); + } + + @Test + public void testEmptySections() throws Exception { + AccessTokenValidator validator = createAccessTokenValidator(); + assertThrowsWithMessage(ValidateException.class, () -> validator.validate(".."), "Malformed JWT provided; expected three sections (header, payload, and signature)"); + } + + @Test + public void testMissingHeader() throws Exception { + AccessTokenValidator validator = createAccessTokenValidator(); + String header = ""; + String payload = createBase64JsonJwtSection(node -> { }); + String signature = ""; + String accessToken = String.format("%s.%s.%s", header, payload, signature); + assertThrows(ValidateException.class, () -> validator.validate(accessToken)); + } + + @Test + public void testMissingPayload() throws Exception { + AccessTokenValidator validator = createAccessTokenValidator(); + String header = createBase64JsonJwtSection(node -> node.put(HeaderParameterNames.ALGORITHM, AlgorithmIdentifiers.NONE)); + String payload = ""; + String signature = ""; + String accessToken = String.format("%s.%s.%s", header, payload, signature); + assertThrows(ValidateException.class, () -> validator.validate(accessToken)); + } + + @Test + public void testMissingSignature() throws Exception { + AccessTokenValidator validator = createAccessTokenValidator(); + String header = createBase64JsonJwtSection(node -> node.put(HeaderParameterNames.ALGORITHM, AlgorithmIdentifiers.NONE)); + String payload = createBase64JsonJwtSection(node -> { }); + String signature = ""; + String accessToken = String.format("%s.%s.%s", header, payload, signature); + assertThrows(ValidateException.class, () -> validator.validate(accessToken)); + } + +} \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtilsTest.java new file mode 100644 index 0000000000000..89387797cdc30 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ClaimValidationUtilsTest.java @@ -0,0 +1,165 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.junit.jupiter.api.Test; + +import java.util.Arrays; +import java.util.Set; +import java.util.SortedSet; +import java.util.TreeSet; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class ClaimValidationUtilsTest extends OAuthBearerTest { + + @Test + public void testValidateScopes() { + Set scopes = ClaimValidationUtils.validateScopes("scope", Arrays.asList(" a ", " b ")); + + assertEquals(2, scopes.size()); + assertTrue(scopes.contains("a")); + assertTrue(scopes.contains("b")); + } + + @Test + public void testValidateScopesDisallowsDuplicates() { + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "b", "a"))); + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", "b", " a "))); + } + + @Test + public void testValidateScopesDisallowsEmptyNullAndWhitespace() { + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", ""))); + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", null))); + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateScopes("scope", Arrays.asList("a", " "))); + } + + @Test + public void testValidateScopesResultIsImmutable() { + SortedSet callerSet = new TreeSet<>(Arrays.asList("a", "b", "c")); + Set scopes = ClaimValidationUtils.validateScopes("scope", callerSet); + + assertEquals(3, scopes.size()); + + callerSet.add("d"); + assertEquals(4, callerSet.size()); + assertTrue(callerSet.contains("d")); + assertEquals(3, scopes.size()); + assertFalse(scopes.contains("d")); + + callerSet.remove("c"); + assertEquals(3, callerSet.size()); + assertFalse(callerSet.contains("c")); + assertEquals(3, scopes.size()); + assertTrue(scopes.contains("c")); + + callerSet.clear(); + assertEquals(0, callerSet.size()); + assertEquals(3, scopes.size()); + } + + @Test + public void testValidateScopesResultThrowsExceptionOnMutation() { + SortedSet callerSet = new TreeSet<>(Arrays.asList("a", "b", "c")); + Set scopes = ClaimValidationUtils.validateScopes("scope", callerSet); + assertThrows(UnsupportedOperationException.class, scopes::clear); + } + + @Test + public void testValidateExpiration() { + Long expected = 1L; + Long actual = ClaimValidationUtils.validateExpiration("exp", expected); + assertEquals(expected, actual); + } + + @Test + public void testValidateExpirationAllowsZero() { + Long expected = 0L; + Long actual = ClaimValidationUtils.validateExpiration("exp", expected); + assertEquals(expected, actual); + } + + @Test + public void testValidateExpirationDisallowsNull() { + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateExpiration("exp", null)); + } + + @Test + public void testValidateExpirationDisallowsNegatives() { + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateExpiration("exp", -1L)); + } + + @Test + public void testValidateSubject() { + String expected = "jdoe"; + String actual = ClaimValidationUtils.validateSubject("sub", expected); + assertEquals(expected, actual); + } + + @Test + public void testValidateSubjectDisallowsEmptyNullAndWhitespace() { + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", "")); + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", null)); + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", " ")); + } + + @Test + public void testValidateClaimNameOverride() { + String expected = "email"; + String actual = ClaimValidationUtils.validateClaimNameOverride("sub", String.format(" %s ", expected)); + assertEquals(expected, actual); + } + + @Test + public void testValidateClaimNameOverrideDisallowsEmptyNullAndWhitespace() { + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", "")); + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", null)); + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateSubject("sub", " ")); + } + + @Test + public void testValidateIssuedAt() { + Long expected = 1L; + Long actual = ClaimValidationUtils.validateIssuedAt("iat", expected); + assertEquals(expected, actual); + } + + @Test + public void testValidateIssuedAtAllowsZero() { + Long expected = 0L; + Long actual = ClaimValidationUtils.validateIssuedAt("iat", expected); + assertEquals(expected, actual); + } + + @Test + public void testValidateIssuedAtAllowsNull() { + Long expected = null; + Long actual = ClaimValidationUtils.validateIssuedAt("iat", expected); + assertEquals(expected, actual); + } + + @Test + public void testValidateIssuedAtDisallowsNegatives() { + assertThrows(ValidateException.class, () -> ClaimValidationUtils.validateIssuedAt("iat", -1L)); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtilsTest.java index efc41d64b3290..9a62f480215f7 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ConfigurationUtilsTest.java @@ -26,16 +26,16 @@ import java.io.File; import java.io.IOException; import java.util.Collections; +import java.util.HashMap; import java.util.Map; -import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG; import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; public class ConfigurationUtilsTest extends OAuthBearerTest { - private static final String URL_CONFIG_NAME = "fictitious.url.config"; - private static final String FILE_CONFIG_NAME = "fictitious.file.config"; + private static final String URL_CONFIG_NAME = "url"; + private static final String FILE_CONFIG_NAME = "file"; @AfterEach public void tearDown() throws Exception { @@ -59,7 +59,7 @@ public void testUrlCaseInsensitivity() { @Test public void testUrlFile() { - assertThrowsWithMessage(ConfigException.class, () -> testFileUrl("file:///tmp/foo.txt"), "that doesn't exist"); + testUrl("file:///tmp/foo.txt"); } @Test @@ -74,34 +74,41 @@ public void testUrlMissingProtocol() { @Test public void testUrlInvalidProtocol() { - assertThrowsWithMessage(ConfigException.class, () -> testFileUrl("ftp://ftp.example.com"), "invalid protocol"); + assertThrowsWithMessage(ConfigException.class, () -> testUrl("ftp://ftp.example.com"), "invalid protocol"); } @Test public void testUrlNull() { - assertThrowsWithMessage(ConfigException.class, () -> testUrl(null), "is required"); + assertThrowsWithMessage(ConfigException.class, () -> testUrl(null), "must be non-null"); } @Test public void testUrlEmptyString() { - assertThrowsWithMessage(ConfigException.class, () -> testUrl(""), "is required"); + assertThrowsWithMessage(ConfigException.class, () -> testUrl(""), "must not contain only whitespace"); } @Test public void testUrlWhitespace() { - assertThrowsWithMessage(ConfigException.class, () -> testUrl(" "), "is required"); + assertThrowsWithMessage(ConfigException.class, () -> testUrl(" "), "must not contain only whitespace"); + } + + private void testUrl(String value) { + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, value == null ? "" : value); + Map configs = Collections.singletonMap(URL_CONFIG_NAME, value); + ConfigurationUtils cu = new ConfigurationUtils(configs); + cu.validateUrl(URL_CONFIG_NAME); } @Test public void testFile() throws IOException { File file = TestUtils.tempFile("some contents!"); - testFile(file.getAbsolutePath()); + testFile(file.toURI().toURL().toString()); } @Test public void testFileWithSuperfluousWhitespace() throws IOException { File file = TestUtils.tempFile(); - testFile(String.format(" %s ", file.getAbsolutePath())); + testFile(String.format(" %s ", file.toURI().toURL())); } @Test @@ -116,90 +123,56 @@ public void testFileUnreadable() throws IOException { if (!file.setReadable(false)) throw new IllegalStateException(String.format("Can't test file permissions as test couldn't programmatically make temp file %s un-readable", file.getAbsolutePath())); - assertThrowsWithMessage(ConfigException.class, () -> testFile(file.getAbsolutePath()), "that doesn't have read permission"); + assertThrowsWithMessage(ConfigException.class, () -> testFile(file.toURI().toURL().toString()), "that doesn't have read permission"); } @Test public void testFileNull() { - assertThrowsWithMessage(ConfigException.class, () -> testFile(null), "is required"); + assertThrowsWithMessage(ConfigException.class, () -> testFile(null), "must be non-null"); } @Test public void testFileEmptyString() { - assertThrowsWithMessage(ConfigException.class, () -> testFile(""), "is required"); + assertThrowsWithMessage(ConfigException.class, () -> testFile(""), "must not contain only whitespace"); } @Test public void testFileWhitespace() { - assertThrowsWithMessage(ConfigException.class, () -> testFile(" "), "is required"); + assertThrowsWithMessage(ConfigException.class, () -> testFile(" "), "must not contain only whitespace"); } @Test public void testThrowIfURLIsNotAllowed() { String url = "http://www.example.com"; String fileUrl = "file:///etc/passwd"; - ConfigurationUtils cu = new ConfigurationUtils(Map.of()); + Map configs = new HashMap<>(); + configs.put(URL_CONFIG_NAME, url); + configs.put(FILE_CONFIG_NAME, fileUrl); + ConfigurationUtils cu = new ConfigurationUtils(configs); // By default, no URL is allowed - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(URL_CONFIG_NAME, url), + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(url), ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(FILE_CONFIG_NAME, fileUrl), + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(fileUrl), ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); // add one url into allowed list System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, url); - assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(URL_CONFIG_NAME, url)); - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(FILE_CONFIG_NAME, fileUrl), + assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(url)); + assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfURLIsNotAllowed(fileUrl), ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); // add all urls into allowed list System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, url + "," + fileUrl); - assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(URL_CONFIG_NAME, url)); - assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(FILE_CONFIG_NAME, fileUrl)); + assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(url)); + assertDoesNotThrow(() -> cu.throwIfURLIsNotAllowed(fileUrl)); } - @Test - public void testThrowIfFileIsNotAllowed() { - String file1 = "file1"; - String file2 = "file2"; - ConfigurationUtils cu = new ConfigurationUtils(Map.of()); - - // By default, no file is allowed - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file1), - ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG); - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file1), - ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG); - - // add one file into allowed list - System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, file1); - assertDoesNotThrow(() -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file1)); - assertThrowsWithMessage(ConfigException.class, () -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file2), - ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG); - - // add all files into allowed list - System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, file1 + "," + file2); - assertDoesNotThrow(() -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file1)); - assertDoesNotThrow(() -> cu.throwIfFileIsNotAllowed(FILE_CONFIG_NAME, file2)); - } - - private void testUrl(String value) { + protected void testFile(String value) { System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, value == null ? "" : value); Map configs = Collections.singletonMap(URL_CONFIG_NAME, value); ConfigurationUtils cu = new ConfigurationUtils(configs); - cu.validateUrl(URL_CONFIG_NAME); + cu.validateFile(URL_CONFIG_NAME); } - private void testFile(String value) { - System.setProperty(ALLOWED_SASL_OAUTHBEARER_FILES_CONFIG, value == null ? "" : value); - Map configs = Collections.singletonMap(FILE_CONFIG_NAME, value); - ConfigurationUtils cu = new ConfigurationUtils(configs); - cu.validateFile(FILE_CONFIG_NAME); - } - - private void testFileUrl(String value) { - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, value == null ? "" : value); - Map configs = Collections.singletonMap(URL_CONFIG_NAME, value); - ConfigurationUtils cu = new ConfigurationUtils(configs); - cu.validateFileUrl(URL_CONFIG_NAME); - } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java new file mode 100644 index 0000000000000..8b1c5a370652e --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/HttpAccessTokenRetrieverTest.java @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import com.fasterxml.jackson.databind.ObjectMapper; +import com.fasterxml.jackson.databind.node.ObjectNode; + +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.net.HttpURLConnection; +import java.nio.charset.StandardCharsets; +import java.util.Random; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class HttpAccessTokenRetrieverTest extends OAuthBearerTest { + + @Test + public void test() throws IOException { + String expectedResponse = "Hiya, buddy"; + HttpURLConnection mockedCon = createHttpURLConnection(expectedResponse); + String response = HttpAccessTokenRetriever.post(mockedCon, null, null, null, null); + assertEquals(expectedResponse, response); + } + + @Test + public void testEmptyResponse() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection(""); + assertThrows(IOException.class, () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); + } + + @Test + public void testErrorReadingResponse() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection("dummy"); + when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); + + assertThrows(IOException.class, () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); + } + + @Test + public void testErrorResponseUnretryableCode() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection("dummy"); + when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "{\"error\":\"some_arg\", \"error_description\":\"some problem with arg\"}" + .getBytes(StandardCharsets.UTF_8))); + when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_BAD_REQUEST); + UnretryableException ioe = assertThrows(UnretryableException.class, + () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); + } + + @Test + public void testErrorResponseRetryableCode() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection("dummy"); + when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "{\"error\":\"some_arg\", \"error_description\":\"some problem with arg\"}" + .getBytes(StandardCharsets.UTF_8))); + when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_INTERNAL_ERROR); + IOException ioe = assertThrows(IOException.class, + () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); + + // error response body has different keys + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "{\"errorCode\":\"some_arg\", \"errorSummary\":\"some problem with arg\"}" + .getBytes(StandardCharsets.UTF_8))); + ioe = assertThrows(IOException.class, + () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{\"some_arg\" - \"some problem with arg\"}")); + + // error response is valid json but unknown keys + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "{\"err\":\"some_arg\", \"err_des\":\"some problem with arg\"}" + .getBytes(StandardCharsets.UTF_8))); + ioe = assertThrows(IOException.class, + () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{\"err\":\"some_arg\", \"err_des\":\"some problem with arg\"}")); + } + + @Test + public void testErrorResponseIsInvalidJson() throws IOException { + HttpURLConnection mockedCon = createHttpURLConnection("dummy"); + when(mockedCon.getInputStream()).thenThrow(new IOException("Can't read")); + when(mockedCon.getErrorStream()).thenReturn(new ByteArrayInputStream( + "non json error output".getBytes(StandardCharsets.UTF_8))); + when(mockedCon.getResponseCode()).thenReturn(HttpURLConnection.HTTP_INTERNAL_ERROR); + IOException ioe = assertThrows(IOException.class, + () -> HttpAccessTokenRetriever.post(mockedCon, null, null, null, null)); + assertTrue(ioe.getMessage().contains("{non json error output}")); + } + + @Test + public void testCopy() throws IOException { + byte[] expected = new byte[4096 + 1]; + Random r = new Random(); + r.nextBytes(expected); + InputStream in = new ByteArrayInputStream(expected); + ByteArrayOutputStream out = new ByteArrayOutputStream(); + HttpAccessTokenRetriever.copy(in, out); + assertArrayEquals(expected, out.toByteArray()); + } + + @Test + public void testCopyError() throws IOException { + InputStream mockedIn = mock(InputStream.class); + OutputStream out = new ByteArrayOutputStream(); + when(mockedIn.read(any(byte[].class))).thenThrow(new IOException()); + assertThrows(IOException.class, () -> HttpAccessTokenRetriever.copy(mockedIn, out)); + } + + @Test + public void testParseAccessToken() throws IOException { + String expected = "abc"; + ObjectMapper mapper = new ObjectMapper(); + ObjectNode node = mapper.createObjectNode(); + node.put("access_token", expected); + + String actual = HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node)); + assertEquals(expected, actual); + } + + @Test + public void testParseAccessTokenEmptyAccessToken() { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode node = mapper.createObjectNode(); + node.put("access_token", ""); + + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node))); + } + + @Test + public void testParseAccessTokenMissingAccessToken() { + ObjectMapper mapper = new ObjectMapper(); + ObjectNode node = mapper.createObjectNode(); + node.put("sub", "jdoe"); + + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.parseAccessToken(mapper.writeValueAsString(node))); + } + + @Test + public void testParseAccessTokenInvalidJson() { + assertThrows(IOException.class, () -> HttpAccessTokenRetriever.parseAccessToken("not valid JSON")); + } + + @Test + public void testFormatAuthorizationHeader() { + assertAuthorizationHeader("id", "secret", false, "Basic aWQ6c2VjcmV0"); + } + + @Test + public void testFormatAuthorizationHeaderEncoding() { + // according to RFC-7617, we need to use the *non-URL safe* base64 encoder. See KAFKA-14496. + assertAuthorizationHeader("SOME_RANDOM_LONG_USER_01234", "9Q|0`8i~ute-n9ksjLWb\\50\"AX@UUED5E", false, "Basic U09NRV9SQU5ET01fTE9OR19VU0VSXzAxMjM0OjlRfDBgOGl+dXRlLW45a3NqTFdiXDUwIkFYQFVVRUQ1RQ=="); + // according to RFC-6749 clientId & clientSecret must be urlencoded, see https://tools.ietf.org/html/rfc6749#section-2.3.1 + assertAuthorizationHeader("user!@~'", "secret-(*)!", true, "Basic dXNlciUyMSU0MCU3RSUyNzpzZWNyZXQtJTI4KiUyOSUyMQ=="); + } + + private void assertAuthorizationHeader(String clientId, String clientSecret, boolean urlencode, String expected) { + String actual = HttpAccessTokenRetriever.formatAuthorizationHeader(clientId, clientSecret, urlencode); + assertEquals(expected, actual, String.format("Expected the HTTP Authorization header generated for client ID \"%s\" and client secret \"%s\" to match", clientId, clientSecret)); + } + + @Test + public void testFormatAuthorizationHeaderMissingValues() { + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader(null, "secret", false)); + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("id", null, false)); + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader(null, null, false)); + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("", "secret", false)); + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("id", "", false)); + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("", "", false)); + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader(" ", "secret", false)); + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader("id", " ", false)); + assertThrows(IllegalArgumentException.class, () -> HttpAccessTokenRetriever.formatAuthorizationHeader(" ", " ", false)); + } + + @Test + public void testFormatRequestBody() { + String expected = "grant_type=client_credentials&scope=scope"; + String actual = HttpAccessTokenRetriever.formatRequestBody("scope"); + assertEquals(expected, actual); + } + + @Test + public void testFormatRequestBodyWithEscaped() { + String questionMark = "%3F"; + String exclamationMark = "%21"; + + String expected = String.format("grant_type=client_credentials&scope=earth+is+great%s", exclamationMark); + String actual = HttpAccessTokenRetriever.formatRequestBody("earth is great!"); + assertEquals(expected, actual); + + expected = String.format("grant_type=client_credentials&scope=what+on+earth%s%s%s%s%s", questionMark, exclamationMark, questionMark, exclamationMark, questionMark); + actual = HttpAccessTokenRetriever.formatRequestBody("what on earth?!?!?"); + assertEquals(expected, actual); + } + + @Test + public void testFormatRequestBodyMissingValues() { + String expected = "grant_type=client_credentials"; + String actual = HttpAccessTokenRetriever.formatRequestBody(null); + assertEquals(expected, actual); + + actual = HttpAccessTokenRetriever.formatRequestBody(""); + assertEquals(expected, actual); + + actual = HttpAccessTokenRetriever.formatRequestBody(" "); + assertEquals(expected, actual); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidatorTest.java new file mode 100644 index 0000000000000..fc2e3d2a2e83a --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/LoginAccessTokenValidatorTest.java @@ -0,0 +1,27 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +public class LoginAccessTokenValidatorTest extends AccessTokenValidatorTest { + + @Override + protected AccessTokenValidator createAccessTokenValidator(AccessTokenBuilder builder) { + return new LoginAccessTokenValidator(builder.scopeClaimName(), builder.subjectClaimName()); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerTest.java index 6cfee84178021..7f20b9464faea 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/OAuthBearerTest.java @@ -19,8 +19,9 @@ import org.apache.kafka.common.config.AbstractConfig; import org.apache.kafka.common.config.ConfigDef; +import org.apache.kafka.common.security.auth.AuthenticateCallbackHandler; +import org.apache.kafka.common.security.authenticator.TestJaasConfig; import org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule; -import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import com.fasterxml.jackson.databind.ObjectMapper; @@ -29,35 +30,24 @@ import org.jose4j.jwk.PublicJsonWebKey; import org.jose4j.jwk.RsaJsonWebKey; import org.jose4j.jwk.RsaJwkGenerator; -import org.jose4j.jwt.consumer.InvalidJwtException; -import org.jose4j.jwt.consumer.JwtConsumer; -import org.jose4j.jwt.consumer.JwtConsumerBuilder; -import org.jose4j.jwt.consumer.JwtContext; import org.jose4j.lang.JoseException; import org.junit.jupiter.api.TestInstance; import org.junit.jupiter.api.TestInstance.Lifecycle; import org.junit.jupiter.api.function.Executable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; import java.io.File; +import java.io.FileWriter; import java.io.IOException; import java.net.HttpURLConnection; import java.net.URL; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.file.StandardOpenOption; -import java.security.KeyPair; -import java.security.KeyPairGenerator; -import java.security.NoSuchAlgorithmException; -import java.security.PrivateKey; -import java.security.PublicKey; import java.util.Arrays; import java.util.Base64; import java.util.Collections; -import java.util.EnumSet; import java.util.Iterator; -import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.function.Consumer; @@ -73,6 +63,8 @@ @TestInstance(Lifecycle.PER_CLASS) public abstract class OAuthBearerTest { + protected final Logger log = LoggerFactory.getLogger(getClass()); + protected ObjectMapper mapper = new ObjectMapper(); protected void assertThrowsWithMessage(Class clazz, @@ -88,6 +80,18 @@ protected void assertErrorMessageContains(String actual, String expectedSubstrin expectedSubstring)); } + protected void configureHandler(AuthenticateCallbackHandler handler, + Map configs, + Map jaasConfig) { + TestJaasConfig config = new TestJaasConfig(); + config.createOrUpdateEntry("KafkaClient", OAuthBearerLoginModule.class.getName(), jaasConfig); + AppConfigurationEntry kafkaClient = config.getAppConfigurationEntry("KafkaClient")[0]; + + handler.configure(configs, + OAuthBearerLoginModule.OAUTHBEARER_MECHANISM, + Collections.singletonList(kafkaClient)); + } + protected String createBase64JsonJwtSection(Consumer c) { String json = createJsonJwtSection(c); @@ -143,6 +147,36 @@ protected HttpURLConnection createHttpURLConnection(String response) throws IOEx return mockedCon; } + protected File createTempDir(String directory) throws IOException { + File tmpDir = new File(System.getProperty("java.io.tmpdir")); + + if (directory != null) + tmpDir = new File(tmpDir, directory); + + if (!tmpDir.exists() && !tmpDir.mkdirs()) + throw new IOException("Could not create " + tmpDir); + + tmpDir.deleteOnExit(); + log.debug("Created temp directory {}", tmpDir); + return tmpDir; + } + + protected File createTempFile(File tmpDir, + String prefix, + String suffix, + String contents) + throws IOException { + File file = File.createTempFile(prefix, suffix, tmpDir); + log.debug("Created new temp file {}", file); + file.deleteOnExit(); + + try (FileWriter writer = new FileWriter(file)) { + writer.write(contents); + } + + return file; + } + protected Map getSaslConfigs(Map configs) { ConfigDef configDef = new ConfigDef(); configDef.withClientSaslSupport(); @@ -158,20 +192,6 @@ protected HttpURLConnection createHttpURLConnection(String response) throws IOEx return getSaslConfigs(Collections.emptyMap()); } - protected List getJaasConfigEntries() { - return getJaasConfigEntries(Map.of()); - } - - protected List getJaasConfigEntries(Map options) { - return List.of( - new AppConfigurationEntry( - OAuthBearerLoginModule.class.getName(), - AppConfigurationEntry.LoginModuleControlFlag.REQUIRED, - options - ) - ); - } - protected PublicJsonWebKey createRsaJwk() throws JoseException { RsaJsonWebKey jwk = RsaJwkGenerator.generateJwk(2048); jwk.setKeyId("key-1"); @@ -192,75 +212,4 @@ protected PublicJsonWebKey createEcJwk() throws JoseException { return jwk; } - protected String createJwt(String header, String payload, String signature) { - Base64.Encoder enc = Base64.getUrlEncoder(); - header = enc.encodeToString(Utils.utf8(header)); - payload = enc.encodeToString(Utils.utf8(payload)); - signature = enc.encodeToString(Utils.utf8(signature)); - return String.format("%s.%s.%s", header, payload, signature); - } - - protected String createJwt(String subject) { - Time time = Time.SYSTEM; - long nowSeconds = time.milliseconds() / 1000; - - return createJwt( - "{}", - String.format( - "{\"iat\":%s, \"exp\":%s, \"sub\":\"%s\"}", - nowSeconds, - nowSeconds + 300, - subject - ), - "sign" - ); - } - - - protected void assertClaims(PublicKey publicKey, String assertion) throws InvalidJwtException { - JwtConsumer jwtConsumer = jwtConsumer(publicKey); - jwtConsumer.processToClaims(assertion); - } - - protected JwtContext assertContext(PublicKey publicKey, String assertion) throws InvalidJwtException { - JwtConsumer jwtConsumer = jwtConsumer(publicKey); - return jwtConsumer.process(assertion); - } - - protected JwtConsumer jwtConsumer(PublicKey publicKey) { - return new JwtConsumerBuilder() - .setVerificationKey(publicKey) - .setRequireExpirationTime() - .setAllowedClockSkewInSeconds(30) // Sure, let's give it some slack - .build(); - } - - protected File generatePrivateKey(PrivateKey privateKey) throws IOException { - File file = File.createTempFile("private-", ".key"); - byte[] bytes = Base64.getEncoder().encode(privateKey.getEncoded()); - - try (FileChannel channel = FileChannel.open(file.toPath(), EnumSet.of(StandardOpenOption.WRITE))) { - Utils.writeFully(channel, ByteBuffer.wrap(bytes)); - } - - return file; - } - - protected File generatePrivateKey() throws IOException { - return generatePrivateKey(generateKeyPair().getPrivate()); - } - - protected KeyPair generateKeyPair() { - return generateKeyPair("RSA"); - } - - protected KeyPair generateKeyPair(String algorithm) { - try { - KeyPairGenerator keyGen = KeyPairGenerator.getInstance(algorithm); - keyGen.initialize(2048); - return keyGen.generateKeyPair(); - } catch (NoSuchAlgorithmException e) { - throw new IllegalStateException("Received unexpected error during private key generation", e); - } - } } \ No newline at end of file diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidatorTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidatorTest.java new file mode 100644 index 0000000000000..4db20e9ee10d6 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/ValidatorAccessTokenValidatorTest.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.kafka.common.security.oauthbearer.internals.secured; + +import org.apache.kafka.common.security.oauthbearer.OAuthBearerToken; + +import org.jose4j.jwk.PublicJsonWebKey; +import org.jose4j.jws.AlgorithmIdentifiers; +import org.jose4j.lang.InvalidAlgorithmException; +import org.junit.jupiter.api.Test; + +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ValidatorAccessTokenValidatorTest extends AccessTokenValidatorTest { + + @Override + protected AccessTokenValidator createAccessTokenValidator(AccessTokenBuilder builder) { + return new ValidatorAccessTokenValidator(30, + Collections.emptySet(), + null, + (jws, nestingContext) -> builder.jwk().getKey(), + builder.scopeClaimName(), + builder.subjectClaimName()); + } + + @Test + public void testRsaEncryptionAlgorithm() throws Exception { + PublicJsonWebKey jwk = createRsaJwk(); + testEncryptionAlgorithm(jwk, AlgorithmIdentifiers.RSA_USING_SHA256); + } + + @Test + public void testEcdsaEncryptionAlgorithm() throws Exception { + PublicJsonWebKey jwk = createEcJwk(); + testEncryptionAlgorithm(jwk, AlgorithmIdentifiers.ECDSA_USING_P256_CURVE_AND_SHA256); + } + + @Test + public void testInvalidEncryptionAlgorithm() throws Exception { + PublicJsonWebKey jwk = createRsaJwk(); + + assertThrowsWithMessage(InvalidAlgorithmException.class, + () -> testEncryptionAlgorithm(jwk, "fake"), + "fake is an unknown, unsupported or unavailable alg algorithm"); + } + + @Test + public void testMissingSubShouldBeValid() throws Exception { + String subClaimName = "client_id"; + String subject = "otherSub"; + PublicJsonWebKey jwk = createRsaJwk(); + AccessTokenBuilder tokenBuilder = new AccessTokenBuilder() + .jwk(jwk) + .alg(AlgorithmIdentifiers.RSA_USING_SHA256) + .addCustomClaim(subClaimName, subject) + .subjectClaimName(subClaimName) + .subject(null); + AccessTokenValidator validator = createAccessTokenValidator(tokenBuilder); + + // Validation should succeed (e.g. signature verification) even if sub claim is missing + OAuthBearerToken token = validator.validate(tokenBuilder.build()); + + assertEquals(subject, token.principalName()); + } + + private void testEncryptionAlgorithm(PublicJsonWebKey jwk, String alg) throws Exception { + AccessTokenBuilder builder = new AccessTokenBuilder().jwk(jwk).alg(alg); + AccessTokenValidator validator = createAccessTokenValidator(builder); + String accessToken = builder.build(); + OAuthBearerToken token = validator.validate(accessToken); + + assertEquals(builder.subject(), token.principalName()); + assertEquals(builder.issuedAtSeconds() * 1000, token.startTimeMs()); + assertEquals(builder.expirationSeconds() * 1000, token.lifetimeMs()); + assertEquals(1, token.scope().size()); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactoryTest.java index b515255147f7b..c2324b9d2dac2 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactoryTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/secured/VerificationKeyResolverFactoryTest.java @@ -28,8 +28,6 @@ import static org.apache.kafka.common.config.SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL; import static org.apache.kafka.common.config.internals.BrokerSecurityConfigs.ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG; -import static org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule.OAUTHBEARER_MECHANISM; -import static org.apache.kafka.test.TestUtils.tempFile; public class VerificationKeyResolverFactoryTest extends OAuthBearerTest { @@ -40,10 +38,15 @@ public void tearDown() throws Exception { @Test public void testConfigureRefreshingFileVerificationKeyResolver() throws Exception { - String file = tempFile("{}").toURI().toString(); - System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); - Map configs = Collections.singletonMap(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, file); - assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()), "The JSON JWKS content does not include the keys member"); + File tmpDir = createTempDir("access-token"); + File verificationKeyFile = createTempFile(tmpDir, "access-token-", ".json", "{}"); + + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, verificationKeyFile.toURI().toString()); + Map configs = Collections.singletonMap(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, verificationKeyFile.toURI().toString()); + Map jaasConfig = Collections.emptyMap(); + + // verify it won't throw exception + try (CloseableVerificationKeyResolver verificationKeyResolver = VerificationKeyResolverFactory.create(configs, jaasConfig)) { } } @Test @@ -52,15 +55,28 @@ public void testConfigureRefreshingFileVerificationKeyResolverWithInvalidDirecto String file = new File("/tmp/this-directory-does-not-exist/foo.json").toURI().toString(); System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, file); Map configs = getSaslConfigs(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, file); - assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()), "that doesn't exist"); + Map jaasConfig = Collections.emptyMap(); + assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, jaasConfig), "that doesn't exist"); + } + + @Test + public void testConfigureRefreshingFileVerificationKeyResolverWithInvalidFile() throws Exception { + // Should fail because while the parent path exists, the file itself doesn't. + File tmpDir = createTempDir("this-directory-does-exist"); + File verificationKeyFile = new File(tmpDir, "this-file-does-not-exist.json"); + System.setProperty(ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG, verificationKeyFile.toURI().toString()); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, verificationKeyFile.toURI().toString()); + Map jaasConfig = Collections.emptyMap(); + assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, jaasConfig), "that doesn't exist"); } @Test public void testSaslOauthbearerTokenEndpointUrlIsNotAllowed() throws Exception { // Should fail if the URL is not allowed - String file = tempFile("{}").toURI().toString(); - Map configs = getSaslConfigs(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, file); - assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, OAUTHBEARER_MECHANISM, getJaasConfigEntries()), + File tmpDir = createTempDir("not_allowed"); + File verificationKeyFile = new File(tmpDir, "not_allowed.json"); + Map configs = getSaslConfigs(SASL_OAUTHBEARER_JWKS_ENDPOINT_URL, verificationKeyFile.toURI().toString()); + assertThrowsWithMessage(ConfigException.class, () -> VerificationKeyResolverFactory.create(configs, Collections.emptyMap()), ALLOWED_SASL_OAUTHBEARER_URLS_CONFIG); } } diff --git a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java index 89e6de42c1dc5..097a14366d83a 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/oauthbearer/internals/unsecured/OAuthBearerUnsecuredLoginCallbackHandlerTest.java @@ -31,7 +31,6 @@ import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import javax.security.auth.callback.Callback; import javax.security.auth.callback.UnsupportedCallbackException; @@ -87,7 +86,7 @@ public void minimalToken() throws IOException, UnsupportedCallbackException { assertNotNull(jws, "create token failed"); long startMs = mockTime.milliseconds(); confirmCorrectValues(jws, user, startMs, 1000 * 60 * 60); - assertEquals(Set.of("sub", "iat", "exp"), jws.claims().keySet()); + assertEquals(new HashSet<>(Arrays.asList("sub", "iat", "exp")), jws.claims().keySet()); } @SuppressWarnings("unchecked") @@ -124,11 +123,11 @@ public void validOptionsWithExplicitOptionValues() long startMs = mockTime.milliseconds(); confirmCorrectValues(jws, user, startMs, lifetimeSeconds * 1000); Map claims = jws.claims(); - assertEquals(Set.of(actualScopeClaimName, principalClaimName, "iat", "exp", "number", - "list", "emptyList1", "emptyList2"), claims.keySet()); - assertEquals(Set.of(explicitScope1, explicitScope2), + assertEquals(new HashSet<>(Arrays.asList(actualScopeClaimName, principalClaimName, "iat", "exp", "number", + "list", "emptyList1", "emptyList2")), claims.keySet()); + assertEquals(new HashSet<>(Arrays.asList(explicitScope1, explicitScope2)), new HashSet<>((List) claims.get(actualScopeClaimName))); - assertEquals(Set.of(explicitScope1, explicitScope2), jws.scope()); + assertEquals(new HashSet<>(Arrays.asList(explicitScope1, explicitScope2)), jws.scope()); assertEquals(1.0, jws.claim("number", Number.class)); assertEquals(Arrays.asList("1", "2", ""), jws.claim("list", List.class)); assertEquals(Collections.emptyList(), jws.claim("emptyList1", List.class)); @@ -152,7 +151,7 @@ private static OAuthBearerUnsecuredLoginCallbackHandler createCallbackHandler(Ma private static void confirmCorrectValues(OAuthBearerUnsecuredJws jws, String user, long startMs, long lifetimeSeconds) throws OAuthBearerIllegalTokenException { Map header = jws.header(); - assertEquals(1, header.size()); + assertEquals(header.size(), 1); assertEquals("none", header.get("alg")); assertEquals(user != null ? user : "", jws.principalName()); assertEquals(Long.valueOf(startMs), jws.startTimeMs()); diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactoryTest.java index ed76495fa03a8..232d4d7327bf6 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactoryTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/DefaultSslEngineFactoryTest.java @@ -202,8 +202,6 @@ public class DefaultSslEngineFactoryTest { public void setUp() { factory = sslEngineFactory(); configs.put(SslConfigs.SSL_PROTOCOL_CONFIG, "TLSv1.2"); - configs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); - configs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, List.of()); } protected DefaultSslEngineFactory sslEngineFactory() { @@ -218,7 +216,7 @@ public void testPemTrustStoreConfigWithOneCert() throws Exception { KeyStore trustStore = factory.truststore(); List aliases = Collections.list(trustStore.aliases()); - assertEquals(List.of("kafka0"), aliases); + assertEquals(Collections.singletonList("kafka0"), aliases); assertNotNull(trustStore.getCertificate("kafka0"), "Certificate not loaded"); assertNull(trustStore.getKey("kafka0", null), "Unexpected private key"); } @@ -272,7 +270,7 @@ private void verifyPemKeyStoreConfig(String keyFileName, Password keyPassword) t KeyStore keyStore = factory.keystore(); List aliases = Collections.list(keyStore.aliases()); - assertEquals(List.of("kafka"), aliases); + assertEquals(Collections.singletonList("kafka"), aliases); assertNotNull(keyStore.getCertificate("kafka"), "Certificate not loaded"); assertNotNull(keyStore.getKey("kafka", keyPassword == null ? null : keyPassword.value().toCharArray()), "Private key not loaded"); @@ -286,7 +284,7 @@ public void testPemTrustStoreFile() throws Exception { KeyStore trustStore = factory.truststore(); List aliases = Collections.list(trustStore.aliases()); - assertEquals(List.of("kafka0"), aliases); + assertEquals(Collections.singletonList("kafka0"), aliases); assertNotNull(trustStore.getCertificate("kafka0"), "Certificate not found"); assertNull(trustStore.getKey("kafka0", null), "Unexpected private key"); } @@ -301,7 +299,7 @@ public void testPemKeyStoreFileNoKeyPassword() throws Exception { KeyStore keyStore = factory.keystore(); List aliases = Collections.list(keyStore.aliases()); - assertEquals(List.of("kafka"), aliases); + assertEquals(Collections.singletonList("kafka"), aliases); assertNotNull(keyStore.getCertificate("kafka"), "Certificate not loaded"); assertNotNull(keyStore.getKey("kafka", null), "Private key not loaded"); } @@ -316,7 +314,7 @@ public void testPemKeyStoreFileWithKeyPassword() throws Exception { KeyStore keyStore = factory.keystore(); List aliases = Collections.list(keyStore.aliases()); - assertEquals(List.of("kafka"), aliases); + assertEquals(Collections.singletonList("kafka"), aliases); assertNotNull(keyStore.getCertificate("kafka"), "Certificate not found"); assertNotNull(keyStore.getKey("kafka", KEY_PASSWORD.value().toCharArray()), "Private key not found"); } diff --git a/clients/src/test/java/org/apache/kafka/common/security/ssl/SslFactoryTest.java b/clients/src/test/java/org/apache/kafka/common/security/ssl/SslFactoryTest.java index 1c82dc62ff0d4..6770e4702ff1f 100644 --- a/clients/src/test/java/org/apache/kafka/common/security/ssl/SslFactoryTest.java +++ b/clients/src/test/java/org/apache/kafka/common/security/ssl/SslFactoryTest.java @@ -359,6 +359,7 @@ public void testPemReconfiguration() throws Exception { sslConfig = new TestSecurityConfig(props); sslFactory.reconfigure(sslConfig.values()); assertNotSame(sslEngineFactory, sslFactory.sslEngineFactory(), "SslEngineFactory not recreated"); + sslEngineFactory = sslFactory.sslEngineFactory(); } @Test @@ -399,15 +400,15 @@ public void testUntrustedKeyStoreValidationFails() throws Exception { @Test public void testKeystoreVerifiableUsingTruststore() throws Exception { - verifyKeystoreVerifiableUsingTruststore(false); + verifyKeystoreVerifiableUsingTruststore(false, tlsProtocol); } @Test public void testPemKeystoreVerifiableUsingTruststore() throws Exception { - verifyKeystoreVerifiableUsingTruststore(true); + verifyKeystoreVerifiableUsingTruststore(true, tlsProtocol); } - private void verifyKeystoreVerifiableUsingTruststore(boolean usePem) throws Exception { + private void verifyKeystoreVerifiableUsingTruststore(boolean usePem, String tlsProtocol) throws Exception { File trustStoreFile1 = usePem ? null : TestUtils.tempFile("truststore1", ".jks"); Map sslConfig1 = sslConfigsBuilder(ConnectionMode.SERVER) .createNewTrustStore(trustStoreFile1) @@ -435,15 +436,15 @@ private void verifyKeystoreVerifiableUsingTruststore(boolean usePem) throws Exce @Test public void testCertificateEntriesValidation() throws Exception { - verifyCertificateEntriesValidation(false); + verifyCertificateEntriesValidation(false, tlsProtocol); } @Test public void testPemCertificateEntriesValidation() throws Exception { - verifyCertificateEntriesValidation(true); + verifyCertificateEntriesValidation(true, tlsProtocol); } - private void verifyCertificateEntriesValidation(boolean usePem) throws Exception { + private void verifyCertificateEntriesValidation(boolean usePem, String tlsProtocol) throws Exception { File trustStoreFile = usePem ? null : TestUtils.tempFile("truststore", ".jks"); Map serverSslConfig = sslConfigsBuilder(ConnectionMode.SERVER) .createNewTrustStore(trustStoreFile) diff --git a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java index 935c02dbf833f..b708b4eeb602d 100644 --- a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java +++ b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryReporterTest.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.message.GetTelemetrySubscriptionsRequestData; import org.apache.kafka.common.message.GetTelemetrySubscriptionsResponseData; @@ -64,10 +63,8 @@ import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertNull; -import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; public class ClientTelemetryReporterTest { @@ -416,134 +413,6 @@ public void testCreateRequestPushCompressionException() { } } - @Test - public void testCreateRequestPushCompressionFallbackToNextType() { - clientTelemetryReporter.configure(configs); - clientTelemetryReporter.contextChange(metricsContext); - - ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); - assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); - assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); - - // Set up subscription with multiple compression types: GZIP -> LZ4 -> SNAPPY - ClientTelemetryReporter.ClientTelemetrySubscription subscription = new ClientTelemetryReporter.ClientTelemetrySubscription( - uuid, 1234, 20000, List.of(CompressionType.GZIP, CompressionType.LZ4, CompressionType.SNAPPY), true, null); - telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); - - try (MockedStatic mockedCompress = Mockito.mockStatic(ClientTelemetryUtils.class, new CallsRealMethods())) { - // First request: GZIP fails with NoClassDefFoundError, should use NONE for this request - mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.GZIP))).thenThrow(new NoClassDefFoundError("GZIP not available")); - - Optional> requestOptional = telemetrySender.createRequest(); - assertNotNull(requestOptional); - assertTrue(requestOptional.isPresent()); - assertInstanceOf(PushTelemetryRequest.class, requestOptional.get().build()); - PushTelemetryRequest request = (PushTelemetryRequest) requestOptional.get().build(); - - // Should fallback to NONE for this request (GZIP gets cached as unsupported) - assertEquals(CompressionType.NONE.id, request.data().compressionType()); - assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); - - // Reset state for next request - assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); - - // Second request: LZ4 is selected (since GZIP is now cached as unsupported), LZ4 fails, should use NONE - // Note that some libraries eg. LZ4 return KafkaException with cause as NoClassDefFoundError - mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.LZ4))).thenThrow(new KafkaException(new NoClassDefFoundError("LZ4 not available"))); - - requestOptional = telemetrySender.createRequest(); - assertNotNull(requestOptional); - assertTrue(requestOptional.isPresent()); - assertInstanceOf(PushTelemetryRequest.class, requestOptional.get().build()); - request = (PushTelemetryRequest) requestOptional.get().build(); - - // Should fallback to NONE for this request (LZ4 gets cached as unsupported) - assertEquals(CompressionType.NONE.id, request.data().compressionType()); - assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); - - // Reset state for next request - assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); - - // Third request: SNAPPY is selected (since GZIP and LZ4 are now cached as unsupported), SNAPPY fails, should use NONE - mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.SNAPPY))).thenThrow(new NoClassDefFoundError("SNAPPY not available")); - - requestOptional = telemetrySender.createRequest(); - assertNotNull(requestOptional); - assertTrue(requestOptional.isPresent()); - assertInstanceOf(PushTelemetryRequest.class, requestOptional.get().build()); - request = (PushTelemetryRequest) requestOptional.get().build(); - - // Should fallback to NONE for this request (SNAPPY gets cached as unsupported) - assertEquals(CompressionType.NONE.id, request.data().compressionType()); - assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); - - // Reset state for next request - assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); - - // Fourth request: All compression types are now cached as unsupported, should use NONE directly - requestOptional = telemetrySender.createRequest(); - assertNotNull(requestOptional); - assertTrue(requestOptional.isPresent()); - assertInstanceOf(PushTelemetryRequest.class, requestOptional.get().build()); - request = (PushTelemetryRequest) requestOptional.get().build(); - - // Should use NONE directly (no compression types are supported) - assertEquals(CompressionType.NONE.id, request.data().compressionType()); - assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); - } - } - - @Test - public void testCreateRequestPushCompressionFallbackAndTermination() { - clientTelemetryReporter.configure(configs); - clientTelemetryReporter.contextChange(metricsContext); - - ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); - assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.SUBSCRIPTION_IN_PROGRESS)); - assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); - - // Set up subscription with ZSTD compression type - ClientTelemetryReporter.ClientTelemetrySubscription subscription = new ClientTelemetryReporter.ClientTelemetrySubscription( - uuid, 1234, 20000, List.of(CompressionType.ZSTD, CompressionType.LZ4), true, null); - telemetrySender.updateSubscriptionResult(subscription, time.milliseconds()); - - try (MockedStatic mockedCompress = Mockito.mockStatic(ClientTelemetryUtils.class, new CallsRealMethods())) { - - // === Test 1: NoClassDefFoundError fallback (recoverable) === - mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.ZSTD))) - .thenThrow(new NoClassDefFoundError("com/github/luben/zstd/BufferPool")); - - assertEquals(ClientTelemetryState.PUSH_NEEDED, telemetrySender.state()); - - Optional> request1 = telemetrySender.createRequest(); - assertNotNull(request1); - assertTrue(request1.isPresent()); - assertInstanceOf(PushTelemetryRequest.class, request1.get().build()); - PushTelemetryRequest pushRequest1 = (PushTelemetryRequest) request1.get().build(); - assertEquals(CompressionType.NONE.id, pushRequest1.data().compressionType()); // Fallback to NONE - assertEquals(ClientTelemetryState.PUSH_IN_PROGRESS, telemetrySender.state()); - - // Reset state (simulate successful response handling) - assertTrue(telemetrySender.maybeSetState(ClientTelemetryState.PUSH_NEEDED)); - - // === Test 2: OutOfMemoryError causes termination (non-recoverable Error) === - mockedCompress.reset(); - mockedCompress.when(() -> ClientTelemetryUtils.compress(any(), eq(CompressionType.LZ4))) - .thenThrow(new OutOfMemoryError("Out of memory during compression")); - - assertEquals(ClientTelemetryState.PUSH_NEEDED, telemetrySender.state()); - - assertThrows(KafkaException.class, telemetrySender::createRequest); - assertEquals(ClientTelemetryState.TERMINATED, telemetrySender.state()); - - // === Test 3: After termination, no more requests === - Optional> request3 = telemetrySender.createRequest(); - assertNotNull(request3); - assertFalse(request3.isPresent()); // No request created - assertEquals(ClientTelemetryState.TERMINATED, telemetrySender.state()); // State remains TERMINATED - } - } - @Test public void testHandleResponseGetSubscriptions() { ClientTelemetryReporter.DefaultClientTelemetrySender telemetrySender = (ClientTelemetryReporter.DefaultClientTelemetrySender) clientTelemetryReporter.telemetrySender(); diff --git a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtilsTest.java b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtilsTest.java index 47925ff8e0a02..41679bed3f7ac 100644 --- a/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/telemetry/internals/ClientTelemetryUtilsTest.java @@ -30,9 +30,10 @@ import java.nio.ByteBuffer; import java.time.Instant; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Optional; -import java.util.Set; import java.util.function.Predicate; import io.opentelemetry.proto.metrics.v1.Metric; @@ -68,12 +69,12 @@ public void testMaybeFetchErrorIntervalMs() { @Test public void testGetSelectorFromRequestedMetrics() { // no metrics selector - assertEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(List.of())); + assertEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(Collections.emptyList())); assertEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(null)); // all metrics selector - assertEquals(ClientTelemetryUtils.SELECTOR_ALL_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(List.of("*"))); + assertEquals(ClientTelemetryUtils.SELECTOR_ALL_METRICS, ClientTelemetryUtils.getSelectorFromRequestedMetrics(Collections.singletonList("*"))); // specific metrics selector - Predicate selector = ClientTelemetryUtils.getSelectorFromRequestedMetrics(List.of("metric1", "metric2")); + Predicate selector = ClientTelemetryUtils.getSelectorFromRequestedMetrics(Arrays.asList("metric1", "metric2")); assertNotEquals(ClientTelemetryUtils.SELECTOR_NO_METRICS, selector); assertNotEquals(ClientTelemetryUtils.SELECTOR_ALL_METRICS, selector); assertTrue(selector.test(new MetricKey("metric1.test"))); @@ -85,7 +86,7 @@ public void testGetSelectorFromRequestedMetrics() { @Test public void testGetCompressionTypesFromAcceptedList() { assertEquals(0, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(null).size()); - assertEquals(0, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(List.of()).size()); + assertEquals(0, ClientTelemetryUtils.getCompressionTypesFromAcceptedList(Collections.emptyList()).size()); List compressionTypes = new ArrayList<>(); compressionTypes.add(CompressionType.GZIP.id); @@ -122,24 +123,10 @@ public void testValidateIntervalMsInvalid(int pushIntervalMs) { @Test public void testPreferredCompressionType() { - // Test with no unsupported types - assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(List.of(), Set.of())); - assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.NONE, CompressionType.GZIP), Set.of())); - assertEquals(CompressionType.GZIP, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.NONE), Set.of())); - - // Test unsupported type filtering (returns first available type, or NONE if all are unsupported) - assertEquals(CompressionType.LZ4, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.LZ4), Set.of(CompressionType.GZIP))); - assertEquals(CompressionType.SNAPPY, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.LZ4, CompressionType.SNAPPY), Set.of(CompressionType.GZIP, CompressionType.LZ4))); - assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.LZ4), Set.of(CompressionType.GZIP, CompressionType.LZ4))); - - // Test edge case: no match between requested and supported types - assertEquals(CompressionType.GZIP, ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.LZ4), Set.of(CompressionType.SNAPPY))); - - // Test NullPointerException for null parameters - assertThrows(NullPointerException.class, () -> - ClientTelemetryUtils.preferredCompressionType(null, Set.of())); - assertThrows(NullPointerException.class, () -> - ClientTelemetryUtils.preferredCompressionType(List.of(CompressionType.GZIP, CompressionType.NONE), null)); + assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(Collections.emptyList())); + assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(null)); + assertEquals(CompressionType.NONE, ClientTelemetryUtils.preferredCompressionType(Arrays.asList(CompressionType.NONE, CompressionType.GZIP))); + assertEquals(CompressionType.GZIP, ClientTelemetryUtils.preferredCompressionType(Arrays.asList(CompressionType.GZIP, CompressionType.NONE))); } @ParameterizedTest @@ -163,19 +150,19 @@ public void testCompressDecompress(CompressionType compressionType) throws IOExc private MetricsData getMetricsData() { List metricsList = new ArrayList<>(); metricsList.add(SinglePointMetric.sum( - new MetricKey("metricName"), 1.0, true, Instant.now(), null, Set.of()) + new MetricKey("metricName"), 1.0, true, Instant.now(), null, Collections.emptySet()) .builder().build()); metricsList.add(SinglePointMetric.sum( - new MetricKey("metricName1"), 100.0, false, Instant.now(), Instant.now(), Set.of()) + new MetricKey("metricName1"), 100.0, false, Instant.now(), Instant.now(), Collections.emptySet()) .builder().build()); metricsList.add(SinglePointMetric.deltaSum( - new MetricKey("metricName2"), 1.0, true, Instant.now(), Instant.now(), Set.of()) + new MetricKey("metricName2"), 1.0, true, Instant.now(), Instant.now(), Collections.emptySet()) .builder().build()); metricsList.add(SinglePointMetric.gauge( - new MetricKey("metricName3"), 1.0, Instant.now(), Set.of()) + new MetricKey("metricName3"), 1.0, Instant.now(), Collections.emptySet()) .builder().build()); metricsList.add(SinglePointMetric.gauge( - new MetricKey("metricName4"), Long.valueOf(100), Instant.now(), Set.of()) + new MetricKey("metricName4"), Long.valueOf(100), Instant.now(), Collections.emptySet()) .builder().build()); MetricsData.Builder builder = MetricsData.newBuilder(); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/AppInfoParserTest.java b/clients/src/test/java/org/apache/kafka/common/utils/AppInfoParserTest.java index 7e153be5862c7..aac13f299fe2d 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/AppInfoParserTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/AppInfoParserTest.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.common.utils; -import org.apache.kafka.common.metrics.MetricConfig; import org.apache.kafka.common.metrics.Metrics; import org.junit.jupiter.api.AfterEach; @@ -24,7 +23,6 @@ import org.junit.jupiter.api.Test; import java.lang.management.ManagementFactory; -import java.util.Map; import javax.management.JMException; import javax.management.MBeanServer; @@ -43,49 +41,38 @@ public class AppInfoParserTest { private static final String METRICS_PREFIX = "app-info-test"; private static final String METRICS_ID = "test"; + private Metrics metrics; private MBeanServer mBeanServer; @BeforeEach public void setUp() { + metrics = new Metrics(new MockTime(1)); mBeanServer = ManagementFactory.getPlatformMBeanServer(); } @AfterEach - public void tearDown() throws JMException { - if (mBeanServer.isRegistered(expectedAppObjectName())) { - mBeanServer.unregisterMBean(expectedAppObjectName()); - } + public void tearDown() { + metrics.close(); } @Test public void testRegisterAppInfoRegistersMetrics() throws JMException { - try (Metrics metrics = new Metrics(new MockTime(1))) { - registerAppInfo(metrics); - registerAppInfoMultipleTimes(metrics); - AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); - } + registerAppInfo(); + registerAppInfoMultipleTimes(); } @Test public void testUnregisterAppInfoUnregistersMetrics() throws JMException { - try (Metrics metrics = new Metrics(new MockTime(1))) { - registerAppInfo(metrics); - AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); - - assertFalse(mBeanServer.isRegistered(expectedAppObjectName())); - assertNull(metrics.metric(metrics.metricName("commit-id", "app-info"))); - assertNull(metrics.metric(metrics.metricName("version", "app-info"))); - assertNull(metrics.metric(metrics.metricName("start-time-ms", "app-info"))); - - Map idTag = Map.of("client-id", METRICS_ID); - assertNull(metrics.metric(metrics.metricName("commit-id", "app-info", idTag))); - assertNull(metrics.metric(metrics.metricName("version", "app-info", idTag))); - assertNull(metrics.metric(metrics.metricName("start-time-ms", "app-info", idTag))); - AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); - } + registerAppInfo(); + AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); + + assertFalse(mBeanServer.isRegistered(expectedAppObjectName())); + assertNull(metrics.metric(metrics.metricName("commit-id", "app-info"))); + assertNull(metrics.metric(metrics.metricName("version", "app-info"))); + assertNull(metrics.metric(metrics.metricName("start-time-ms", "app-info"))); } - private void registerAppInfo(Metrics metrics) throws JMException { + private void registerAppInfo() throws JMException { assertEquals(EXPECTED_COMMIT_VERSION, AppInfoParser.getCommitId()); assertEquals(EXPECTED_VERSION, AppInfoParser.getVersion()); @@ -95,15 +82,9 @@ private void registerAppInfo(Metrics metrics) throws JMException { assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info")).metricValue()); assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info")).metricValue()); assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info")).metricValue()); - - Map idTag = Map.of("client-id", METRICS_ID); - assertTrue(mBeanServer.isRegistered(expectedAppObjectName())); - assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info", idTag)).metricValue()); - assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info", idTag)).metricValue()); - assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info", idTag)).metricValue()); } - private void registerAppInfoMultipleTimes(Metrics metrics) throws JMException { + private void registerAppInfoMultipleTimes() throws JMException { assertEquals(EXPECTED_COMMIT_VERSION, AppInfoParser.getCommitId()); assertEquals(EXPECTED_VERSION, AppInfoParser.getVersion()); @@ -114,37 +95,9 @@ private void registerAppInfoMultipleTimes(Metrics metrics) throws JMException { assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info")).metricValue()); assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info")).metricValue()); assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info")).metricValue()); - - Map idTag = Map.of("client-id", METRICS_ID); - assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info", idTag)).metricValue()); - assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info", idTag)).metricValue()); - assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info", idTag)).metricValue()); } private ObjectName expectedAppObjectName() throws MalformedObjectNameException { return new ObjectName(METRICS_PREFIX + ":type=app-info,id=" + METRICS_ID); } - - @Test - public void testClientIdWontAddRepeatedly() throws JMException { - Map tags = Map.of( - "client-id", METRICS_ID, - "other-tag", "tag-value", - "another-tag", "another-value" - ); - Metrics metrics = new Metrics(new MetricConfig().tags(tags), new MockTime(1)); - AppInfoParser.registerAppInfo(METRICS_PREFIX, METRICS_ID, metrics, EXPECTED_START_MS); - - assertTrue(mBeanServer.isRegistered(expectedAppObjectName())); - assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info", tags)).metricValue()); - assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info", tags)).metricValue()); - assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info", tags)).metricValue()); - - Map idTag = Map.of("client-id", METRICS_ID); - assertEquals(EXPECTED_COMMIT_VERSION, metrics.metric(metrics.metricName("commit-id", "app-info", idTag)).metricValue()); - assertEquals(EXPECTED_VERSION, metrics.metric(metrics.metricName("version", "app-info", idTag)).metricValue()); - assertEquals(EXPECTED_START_MS, metrics.metric(metrics.metricName("start-time-ms", "app-info", idTag)).metricValue()); - metrics.close(); - AppInfoParser.unregisterAppInfo(METRICS_PREFIX, METRICS_ID, metrics); - } } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/ChecksumsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/ChecksumsTest.java index 4c214005eac80..1eb65dbe8a8dd 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/ChecksumsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/ChecksumsTest.java @@ -20,7 +20,6 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; -import java.util.zip.CRC32C; import java.util.zip.Checksum; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -37,7 +36,7 @@ public void testUpdateByteBuffer() { private void doTestUpdateByteBuffer(byte[] bytes, ByteBuffer buffer) { buffer.put(bytes); buffer.flip(); - Checksum bufferCrc = new CRC32C(); + Checksum bufferCrc = Crc32C.create(); Checksums.update(bufferCrc, buffer, buffer.remaining()); assertEquals(Crc32C.compute(bytes, 0, bytes.length), bufferCrc.getValue()); assertEquals(0, buffer.position()); @@ -56,8 +55,8 @@ public void testUpdateInt() { final ByteBuffer buffer = ByteBuffer.allocate(4); buffer.putInt(value); - Checksum crc1 = new CRC32C(); - Checksum crc2 = new CRC32C(); + Checksum crc1 = Crc32C.create(); + Checksum crc2 = Crc32C.create(); Checksums.updateInt(crc1, value); crc2.update(buffer.array(), buffer.arrayOffset(), 4); @@ -71,8 +70,8 @@ public void testUpdateLong() { final ByteBuffer buffer = ByteBuffer.allocate(8); buffer.putLong(value); - Checksum crc1 = new CRC32C(); - Checksum crc2 = new CRC32C(); + Checksum crc1 = Crc32C.create(); + Checksum crc2 = Crc32C.create(); Checksums.updateLong(crc1, value); crc2.update(buffer.array(), buffer.arrayOffset(), 8); @@ -85,7 +84,7 @@ private void doTestUpdateByteBufferWithOffsetPosition(byte[] bytes, ByteBuffer b buffer.flip(); buffer.position(offset); - Checksum bufferCrc = new CRC32C(); + Checksum bufferCrc = Crc32C.create(); Checksums.update(bufferCrc, buffer, buffer.remaining()); assertEquals(Crc32C.compute(bytes, offset, buffer.remaining()), bufferCrc.getValue()); assertEquals(offset, buffer.position()); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java b/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java index b8e5d1daaa417..2c6d148e3a1e2 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/Crc32CTest.java @@ -18,10 +18,31 @@ import org.junit.jupiter.api.Test; +import java.util.zip.Checksum; + import static org.junit.jupiter.api.Assertions.assertEquals; public class Crc32CTest { + @Test + public void testUpdate() { + final byte[] bytes = "Any String you want".getBytes(); + final int len = bytes.length; + + Checksum crc1 = Crc32C.create(); + Checksum crc2 = Crc32C.create(); + Checksum crc3 = Crc32C.create(); + + crc1.update(bytes, 0, len); + for (byte b : bytes) + crc2.update(b); + crc3.update(bytes, 0, len / 2); + crc3.update(bytes, len / 2, len - len / 2); + + assertEquals(crc1.getValue(), crc2.getValue(), "Crc values should be the same"); + assertEquals(crc1.getValue(), crc3.getValue(), "Crc values should be the same"); + } + @Test public void testValue() { final byte[] bytes = "Some String".getBytes(); diff --git a/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java b/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java new file mode 100644 index 0000000000000..057b6118e07a7 --- /dev/null +++ b/clients/src/test/java/org/apache/kafka/common/utils/FlattenedIteratorTest.java @@ -0,0 +1,116 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.kafka.common.utils; + +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class FlattenedIteratorTest { + + @Test + public void testNestedLists() { + List> list = asList( + asList("foo", "a", "bc"), + Collections.singletonList("ddddd"), + asList("", "bar2", "baz45")); + + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + List flattened = new ArrayList<>(); + flattenedIterable.forEach(flattened::add); + + assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); + + // Ensure we can iterate multiple times + List flattened2 = new ArrayList<>(); + flattenedIterable.forEach(flattened2::add); + + assertEquals(flattened, flattened2); + } + + @Test + public void testEmptyList() { + List> list = emptyList(); + + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + List flattened = new ArrayList<>(); + flattenedIterable.forEach(flattened::add); + + assertEquals(emptyList(), flattened); + } + + @Test + public void testNestedSingleEmptyList() { + List> list = Collections.singletonList(emptyList()); + + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + List flattened = new ArrayList<>(); + flattenedIterable.forEach(flattened::add); + + assertEquals(emptyList(), flattened); + } + + @Test + public void testEmptyListFollowedByNonEmpty() { + List> list = asList( + emptyList(), + asList("boo", "b", "de")); + + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + List flattened = new ArrayList<>(); + flattenedIterable.forEach(flattened::add); + + assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); + } + + @Test + public void testEmptyListInBetweenNonEmpty() { + List> list = asList( + Collections.singletonList("aadwdwdw"), + emptyList(), + asList("ee", "aa", "dd")); + + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + List flattened = new ArrayList<>(); + flattenedIterable.forEach(flattened::add); + + assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); + } + + @Test + public void testEmptyListAtTheEnd() { + List> list = asList( + asList("ee", "dd"), + Collections.singletonList("e"), + emptyList()); + + Iterable flattenedIterable = () -> new FlattenedIterator<>(list.iterator(), List::iterator); + List flattened = new ArrayList<>(); + flattenedIterable.forEach(flattened::add); + + assertEquals(list.stream().flatMap(Collection::stream).collect(Collectors.toList()), flattened); + } + +} diff --git a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java index 74518fe0f442f..16fc6af154b20 100755 --- a/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/UtilsTest.java @@ -202,7 +202,6 @@ public void testAbs() { assertEquals(10, Utils.abs(10)); assertEquals(0, Utils.abs(0)); assertEquals(1, Utils.abs(-1)); - assertEquals(Integer.MAX_VALUE, Utils.abs(Integer.MAX_VALUE)); } @Test @@ -896,171 +895,12 @@ public void testPropsToMap() { assertValue(Collections.emptyMap()); } - @Test - public void testPropsToMapNonStringKey() { - ConfigException ce = assertThrows(ConfigException.class, () -> { - Properties props = new Properties(); - props.put(1, "value"); - Utils.propsToMap(props); - }); - assertTrue(ce.getMessage().contains("One or more keys is not a string.")); - - ce = assertThrows(ConfigException.class, () -> { - Properties props = new Properties(); - props.put(true, "value"); - props.put('a', "value"); - Utils.propsToMap(props); - }); - assertEquals("One or more keys is not a string.", ce.getMessage()); - } - - @Test - public void testPropsToMapWithDefaults() { - Properties defaultProperties = new Properties(); - defaultProperties.setProperty("DefaultKey1", "DefaultValue1"); - defaultProperties.setProperty("DefaultKey2", "DefaultValue2"); - - Properties actualProperties = new Properties(defaultProperties); - actualProperties.setProperty("ActualKey1", "ActualValue1"); - actualProperties.setProperty("ActualKey2", "ActualValue2"); - - final Map mapProperties = Utils.propsToMap(actualProperties); - - Map expectedMap = new HashMap<>(); - expectedMap.put("DefaultKey1", "DefaultValue1"); - expectedMap.put("DefaultKey2", "DefaultValue2"); - expectedMap.put("ActualKey1", "ActualValue1"); - expectedMap.put("ActualKey2", "ActualValue2"); - - assertEquals(expectedMap, mapProperties); - } - - @Test - public void testPropsToMapWithDefaultsAndSameKey() { - Properties defaultProperties = new Properties(); - defaultProperties.setProperty("DefaultKey1", "DefaultValue1"); - defaultProperties.setProperty("DefaultKey2", "DefaultValue2"); - - Properties actualProperties = new Properties(defaultProperties); - actualProperties.setProperty("DefaultKey1", "ActualValue1"); - actualProperties.setProperty("ActualKey2", "ActualValue2"); - - final Map mapProperties = Utils.propsToMap(actualProperties); - - Map expectedMap = new HashMap<>(); - expectedMap.put("DefaultKey1", "ActualValue1"); - expectedMap.put("DefaultKey2", "DefaultValue2"); - expectedMap.put("ActualKey2", "ActualValue2"); - - assertEquals(expectedMap, mapProperties); - } - private static void assertValue(Object value) { Properties props = new Properties(); props.put("key", value); assertEquals(Utils.propsToMap(props).get("key"), value); } - @Test - public void testCastToStringObjectMap() { - Map map = new HashMap<>(); - map.put("key1", "value1"); - map.put("key2", 1); - - Map expectedMap = new HashMap<>(); - expectedMap.put("key1", "value1"); - expectedMap.put("key2", 1); - - assertEquals(map, expectedMap); - } - - @Test - public void testCastToStringObjectMapNonStringKey() { - ConfigException ce = assertThrows(ConfigException.class, () -> { - Map map = new HashMap<>(); - map.put(1, "value"); - Utils.castToStringObjectMap(map); - }); - assertTrue(ce.getMessage().contains("Key must be a string.")); - - ce = assertThrows(ConfigException.class, () -> { - Map map = new HashMap<>(); - map.put(true, "value"); - map.put('a', "value"); - Utils.castToStringObjectMap(map); - }); - assertTrue(ce.getMessage().contains("Key must be a string.")); - } - - @Test - public void testCastToStringObjectMapPropertiesAsInput() { - Properties props = new Properties(); - props.put("key1", "value1"); - props.put("key2", "value2"); - - Map expectedMap = new HashMap<>(); - expectedMap.put("key1", "value1"); - expectedMap.put("key2", "value2"); - - assertEquals(expectedMap, Utils.castToStringObjectMap(props)); - assertEquals(Utils.propsToMap(props), Utils.castToStringObjectMap(props)); - } - - @Test - public void testCastToStringObjectMapPropertiesNonStringKey() { - ConfigException ce = assertThrows(ConfigException.class, () -> { - Properties props = new Properties(); - props.put(1, "value"); - Utils.castToStringObjectMap(props); - }); - assertEquals("One or more keys is not a string.", ce.getMessage()); - - ce = assertThrows(ConfigException.class, () -> { - Properties props = new Properties(); - props.put(true, "value"); - props.put('a', "value"); - Utils.castToStringObjectMap(props); - }); - assertEquals("One or more keys is not a string.", ce.getMessage()); - } - - @Test - public void testCastToStringObjectMapPropertiesWithDefaults() { - Properties defaultProperties = new Properties(); - defaultProperties.setProperty("DefaultKey1", "DefaultValue1"); - defaultProperties.setProperty("DefaultKey2", "DefaultValue2"); - - Properties actualProperties = new Properties(defaultProperties); - actualProperties.setProperty("ActualKey1", "ActualValue1"); - actualProperties.setProperty("ActualKey2", "ActualValue2"); - - Map expectedMap = new HashMap<>(); - expectedMap.put("DefaultKey1", "DefaultValue1"); - expectedMap.put("DefaultKey2", "DefaultValue2"); - expectedMap.put("ActualKey1", "ActualValue1"); - expectedMap.put("ActualKey2", "ActualValue2"); - - assertEquals(expectedMap, Utils.castToStringObjectMap(actualProperties)); - } - - @Test - public void testCastToStringObjectMapPropertiesWithDefaultsAndSameKey() { - Properties defaultProperties = new Properties(); - defaultProperties.setProperty("DefaultKey1", "DefaultValue1"); - defaultProperties.setProperty("DefaultKey2", "DefaultValue2"); - - Properties actualProperties = new Properties(defaultProperties); - actualProperties.setProperty("DefaultKey1", "ActualValue1"); - actualProperties.setProperty("ActualKey2", "ActualValue2"); - - Map expectedMap = new HashMap<>(); - expectedMap.put("DefaultKey1", "ActualValue1"); - expectedMap.put("DefaultKey2", "DefaultValue2"); - expectedMap.put("ActualKey2", "ActualValue2"); - - assertEquals(expectedMap, Utils.castToStringObjectMap(actualProperties)); - } - @Test public void testCloseAllQuietly() { AtomicReference exception = new AtomicReference<>(); @@ -1269,13 +1109,6 @@ public void testTryAll() throws Throwable { assertEquals(expected, recorded); } - @Test - public void testMsToNs() { - assertEquals(1000000, Utils.msToNs(1)); - assertEquals(0, Utils.msToNs(0)); - assertThrows(IllegalArgumentException.class, () -> Utils.msToNs(Long.MAX_VALUE)); - } - private Callable recordingCallable(Map recordingMap, String success, TestException failure) { return () -> { if (success == null) diff --git a/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsProvider.java b/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsProvider.java index 6c80d2b5df5f1..ac424cd288e0c 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsProvider.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsProvider.java @@ -23,40 +23,16 @@ import org.junit.jupiter.params.provider.ArgumentsProvider; import org.junit.jupiter.params.support.AnnotationConsumer; -import java.util.stream.IntStream; import java.util.stream.Stream; public class ApiKeyVersionsProvider implements ArgumentsProvider, AnnotationConsumer { private ApiKeys apiKey; - private short fromVersion; - private short toVersion; public void accept(ApiKeyVersionsSource source) { apiKey = source.apiKey(); - - short oldestVersion = apiKey.oldestVersion(); - short latestVersion = apiKey.latestVersion(source.enableUnstableLastVersion()); - - fromVersion = source.fromVersion() == -1 ? oldestVersion : source.fromVersion(); - toVersion = source.toVersion() == -1 ? latestVersion : source.toVersion(); - - if (fromVersion > toVersion) { - throw new IllegalArgumentException(String.format("The fromVersion %s is larger than the toVersion %s", - fromVersion, toVersion)); - } - - if (fromVersion < oldestVersion) { - throw new IllegalArgumentException(String.format("The fromVersion %s is older than the oldest version %s", - fromVersion, oldestVersion)); - } - - if (toVersion > latestVersion) { - throw new IllegalArgumentException(String.format("The toVersion %s is newer than the latest version %s", - toVersion, latestVersion)); - } } public Stream provideArguments(ExtensionContext context) { - return IntStream.rangeClosed(fromVersion, toVersion).mapToObj(i -> Arguments.of((short) i)); + return apiKey.allVersions().stream().map(Arguments::of); } } diff --git a/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsSource.java b/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsSource.java index 4806605a54564..c89cdb70b0f3a 100644 --- a/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsSource.java +++ b/clients/src/test/java/org/apache/kafka/common/utils/annotation/ApiKeyVersionsSource.java @@ -30,7 +30,4 @@ @ArgumentsSource(ApiKeyVersionsProvider.class) public @interface ApiKeyVersionsSource { ApiKeys apiKey(); - short fromVersion() default -1; - short toVersion() default -1; - boolean enableUnstableLastVersion() default true; } diff --git a/clients/src/test/java/org/apache/kafka/server/policy/AlterConfigPolicyTest.java b/clients/src/test/java/org/apache/kafka/server/policy/AlterConfigPolicyTest.java index 5a6d8b291b0ba..06d5a4e93eb53 100644 --- a/clients/src/test/java/org/apache/kafka/server/policy/AlterConfigPolicyTest.java +++ b/clients/src/test/java/org/apache/kafka/server/policy/AlterConfigPolicyTest.java @@ -38,8 +38,8 @@ public void testRequestMetadataEquals() { assertEquals(requestMetadata, requestMetadata); - assertNotEquals(null, requestMetadata); - assertNotEquals(new Object(), requestMetadata); + assertNotEquals(requestMetadata, null); + assertNotEquals(requestMetadata, new Object()); assertNotEquals(requestMetadata, new RequestMetadata( new ConfigResource(Type.BROKER, "1"), Collections.singletonMap("foo", "bar") diff --git a/clients/src/test/java/org/apache/kafka/test/MockConsumerInterceptor.java b/clients/src/test/java/org/apache/kafka/test/MockConsumerInterceptor.java index 715486c1ae36b..a1d676e15a3aa 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockConsumerInterceptor.java +++ b/clients/src/test/java/org/apache/kafka/test/MockConsumerInterceptor.java @@ -118,7 +118,6 @@ public static void resetCounters() { CONFIG_COUNT.set(0); THROW_CONFIG_EXCEPTION.set(0); CLUSTER_META.set(null); - THROW_ON_CONFIG_EXCEPTION_THRESHOLD.set(0); CLUSTER_ID_BEFORE_ON_CONSUME.set(NO_CLUSTER_ID); } diff --git a/clients/src/test/java/org/apache/kafka/test/MockDeserializer.java b/clients/src/test/java/org/apache/kafka/test/MockDeserializer.java index d88792a06e81b..ac2865e9bb8b6 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockDeserializer.java +++ b/clients/src/test/java/org/apache/kafka/test/MockDeserializer.java @@ -20,12 +20,11 @@ import org.apache.kafka.common.ClusterResourceListener; import org.apache.kafka.common.serialization.Deserializer; -import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -public class MockDeserializer implements ClusterResourceListener, Deserializer { +public class MockDeserializer implements ClusterResourceListener, Deserializer { public static AtomicInteger initCount = new AtomicInteger(0); public static AtomicInteger closeCount = new AtomicInteger(0); public static AtomicReference clusterMeta = new AtomicReference<>(); @@ -53,12 +52,11 @@ public void configure(Map configs, boolean isKey) { } @Override - public String deserialize(String topic, byte[] data) { + public byte[] deserialize(String topic, byte[] data) { // This will ensure that we get the cluster metadata when deserialize is called for the first time // as subsequent compareAndSet operations will fail. clusterIdBeforeDeserialize.compareAndSet(noClusterId, clusterMeta.get()); - if (data == null) return null; - return new String(data, StandardCharsets.UTF_8); + return data; } @Override diff --git a/clients/src/test/java/org/apache/kafka/test/MockProducerInterceptor.java b/clients/src/test/java/org/apache/kafka/test/MockProducerInterceptor.java index 9e69f57c96f2c..acc69ab44e31a 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockProducerInterceptor.java +++ b/clients/src/test/java/org/apache/kafka/test/MockProducerInterceptor.java @@ -110,7 +110,6 @@ public static void resetCounters() { ON_SUCCESS_COUNT.set(0); ON_ERROR_COUNT.set(0); ON_ERROR_WITH_METADATA_COUNT.set(0); - THROW_ON_CONFIG_EXCEPTION_THRESHOLD.set(0); CLUSTER_META.set(null); CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT.set(NO_CLUSTER_ID); } diff --git a/clients/src/test/java/org/apache/kafka/test/MockSerializer.java b/clients/src/test/java/org/apache/kafka/test/MockSerializer.java index 890b01a400f61..bfab4b592b88e 100644 --- a/clients/src/test/java/org/apache/kafka/test/MockSerializer.java +++ b/clients/src/test/java/org/apache/kafka/test/MockSerializer.java @@ -20,11 +20,10 @@ import org.apache.kafka.common.ClusterResourceListener; import org.apache.kafka.common.serialization.Serializer; -import java.nio.charset.StandardCharsets; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; -public class MockSerializer implements ClusterResourceListener, Serializer { +public class MockSerializer implements ClusterResourceListener, Serializer { public static final AtomicInteger INIT_COUNT = new AtomicInteger(0); public static final AtomicInteger CLOSE_COUNT = new AtomicInteger(0); public static final AtomicReference CLUSTER_META = new AtomicReference<>(); @@ -36,12 +35,11 @@ public MockSerializer() { } @Override - public byte[] serialize(String topic, String data) { + public byte[] serialize(String topic, byte[] data) { // This will ensure that we get the cluster metadata when serialize is called for the first time // as subsequent compareAndSet operations will fail. CLUSTER_ID_BEFORE_SERIALIZE.compareAndSet(NO_CLUSTER_ID, CLUSTER_META.get()); - if (data == null) return null; - return data.getBytes(StandardCharsets.UTF_8); + return data; } @Override diff --git a/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java b/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java index 889ebcbc607b8..72a1ccfe65106 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestSslUtils.java @@ -84,6 +84,7 @@ import java.security.cert.CertificateException; import java.security.cert.X509Certificate; import java.util.ArrayList; +import java.util.Collections; import java.util.Date; import java.util.Enumeration; import java.util.HashMap; @@ -210,7 +211,6 @@ public static Map createSslConfig(String keyManagerAlgorithm, St sslConfigs.put(SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, keyManagerAlgorithm); sslConfigs.put(SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, trustManagerAlgorithm); - sslConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); List enabledProtocols = new ArrayList<>(); enabledProtocols.add(tlsProtocol); @@ -372,7 +372,7 @@ static String pem(Certificate cert) throws IOException { try (PemWriter pemWriter = new PemWriter(new OutputStreamWriter(out, StandardCharsets.UTF_8))) { pemWriter.writeObject(new JcaMiscPEMGenerator(cert)); } - return out.toString(StandardCharsets.UTF_8); + return new String(out.toByteArray(), StandardCharsets.UTF_8); } static String pem(PrivateKey privateKey, Password password) throws IOException { @@ -390,7 +390,7 @@ static String pem(PrivateKey privateKey, Password password) throws IOException { } } } - return out.toString(StandardCharsets.UTF_8); + return new String(out.toByteArray(), StandardCharsets.UTF_8); } public static class CertificateBuilder { @@ -444,19 +444,14 @@ public X509Certificate generate(X500Name dn, KeyPair keyPair) throws Certificate SubjectPublicKeyInfo subPubKeyInfo = SubjectPublicKeyInfo.getInstance(keyPair.getPublic().getEncoded()); BcContentSignerBuilder signerBuilder; String keyAlgorithm = keyPair.getPublic().getAlgorithm(); - switch (keyAlgorithm) { - case "RSA": - signerBuilder = new BcRSAContentSignerBuilder(sigAlgId, digAlgId); - break; - case "DSA": - signerBuilder = new BcDSAContentSignerBuilder(sigAlgId, digAlgId); - break; - case "EC": - signerBuilder = new BcECContentSignerBuilder(sigAlgId, digAlgId); - break; - default: - throw new IllegalArgumentException("Unsupported algorithm " + keyAlgorithm); - } + if (keyAlgorithm.equals("RSA")) + signerBuilder = new BcRSAContentSignerBuilder(sigAlgId, digAlgId); + else if (keyAlgorithm.equals("DSA")) + signerBuilder = new BcDSAContentSignerBuilder(sigAlgId, digAlgId); + else if (keyAlgorithm.equals("EC")) + signerBuilder = new BcECContentSignerBuilder(sigAlgId, digAlgId); + else + throw new IllegalArgumentException("Unsupported algorithm " + keyAlgorithm); ContentSigner sigGen = signerBuilder.build(privateKeyAsymKeyParam); // Negative numbers for "days" can be used to generate expired certificates Date now = new Date(); @@ -525,19 +520,14 @@ public X509Certificate generateSignedCertificate(X500Name dn, KeyPair keyPair, SubjectPublicKeyInfo.getInstance(keyPair.getPublic().getEncoded()); BcContentSignerBuilder signerBuilder; String keyAlgorithm = keyPair.getPublic().getAlgorithm(); - switch (keyAlgorithm) { - case "RSA": - signerBuilder = new BcRSAContentSignerBuilder(sigAlgId, digAlgId); - break; - case "DSA": - signerBuilder = new BcDSAContentSignerBuilder(sigAlgId, digAlgId); - break; - case "EC": - signerBuilder = new BcECContentSignerBuilder(sigAlgId, digAlgId); - break; - default: - throw new IllegalArgumentException("Unsupported algorithm " + keyAlgorithm); - } + if (keyAlgorithm.equals("RSA")) + signerBuilder = new BcRSAContentSignerBuilder(sigAlgId, digAlgId); + else if (keyAlgorithm.equals("DSA")) + signerBuilder = new BcDSAContentSignerBuilder(sigAlgId, digAlgId); + else if (keyAlgorithm.equals("EC")) + signerBuilder = new BcECContentSignerBuilder(sigAlgId, digAlgId); + else + throw new IllegalArgumentException("Unsupported algorithm " + keyAlgorithm); ContentSigner sigGen = signerBuilder.build(privateKeyAsymKeyParam); // Negative numbers for "days" can be used to generate expired certificates Date now = new Date(); @@ -696,7 +686,6 @@ private Map buildJks() throws IOException, GeneralSecurityExcept sslConfigs.put(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, trustStorePassword); sslConfigs.put(SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG, "JKS"); sslConfigs.put(SslConfigs.SSL_TRUSTMANAGER_ALGORITHM_CONFIG, TrustManagerFactory.getDefaultAlgorithm()); - sslConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); List enabledProtocols = new ArrayList<>(); enabledProtocols.add(tlsProtocol); @@ -712,8 +701,7 @@ private Map buildPem() throws IOException, GeneralSecurityExcept Map sslConfigs = new HashMap<>(); sslConfigs.put(SslConfigs.SSL_PROTOCOL_CONFIG, tlsProtocol); - sslConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, List.of(tlsProtocol)); - sslConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); + sslConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Collections.singletonList(tlsProtocol)); if (connectionMode != ConnectionMode.CLIENT || useClientCert) { KeyPair keyPair = generateKeyPair(algorithm); @@ -850,7 +838,6 @@ public static Map generateConfigsWithCertificateChains(String tl List enabledProtocols = new ArrayList<>(); enabledProtocols.add(tlsProtocol); sslConfigs.put(SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, enabledProtocols); - sslConfigs.put(SslConfigs.SSL_CIPHER_SUITES_CONFIG, List.of()); return sslConfigs; } diff --git a/clients/src/test/java/org/apache/kafka/test/TestUtils.java b/clients/src/test/java/org/apache/kafka/test/TestUtils.java index 078d006e37a37..84b9e6ce2c2a9 100644 --- a/clients/src/test/java/org/apache/kafka/test/TestUtils.java +++ b/clients/src/test/java/org/apache/kafka/test/TestUtils.java @@ -52,7 +52,6 @@ import java.nio.file.Path; import java.util.ArrayList; import java.util.Base64; -import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -65,11 +64,8 @@ import java.util.Random; import java.util.Set; import java.util.UUID; -import java.util.concurrent.CancellationException; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; import java.util.function.Consumer; import java.util.function.Supplier; import java.util.regex.Matcher; @@ -77,7 +73,9 @@ import static java.util.Arrays.asList; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; @@ -156,27 +154,6 @@ public static MetadataSnapshot metadataSnapshotWith(final int nodes, final Map Thread.getAllStackTraces().keySet().stream() - .noneMatch(t -> t.isDaemon() == isDaemon && t.isAlive() && t.getName().startsWith(threadName)), String.format("Thread leak detected: %s", threadName)); - } - /** * Test utility function to get MetadataSnapshot of cluster with configured, and 0 partitions. * @param nodes number of nodes in the cluster. @@ -210,17 +187,6 @@ public static String randomString(final int len) { return b.toString(); } - /** - * Select a random element from collections - * - * @param elements A collection we can select - * @return A element from collection - */ - public static T randomSelect(final Collection elements) { - List elementsCopy = new ArrayList<>(elements); - return elementsCopy.get(SEEDED_RANDOM.nextInt(elementsCopy.size())); - } - /** * Create an empty file in the default temporary-file directory, using the given prefix and suffix * to generate its name. @@ -515,7 +481,7 @@ public static void isValidClusterId(String clusterId) { assertNotNull(clusterId); // Base 64 encoded value is 22 characters - assertEquals(22, clusterId.length()); + assertEquals(clusterId.length(), 22); Pattern clusterIdPattern = Pattern.compile("[a-zA-Z0-9_\\-]+"); Matcher matcher = clusterIdPattern.matcher(clusterId); @@ -526,7 +492,7 @@ public static void isValidClusterId(String clusterId) { byte[] decodedUuid = Base64.getDecoder().decode(originalClusterId); // We expect 16 bytes, same as the input UUID. - assertEquals(16, decodedUuid.length); + assertEquals(decodedUuid.length, 16); //Check if it can be converted back to a UUID. try { @@ -580,46 +546,55 @@ public static ByteBuffer toBuffer(UnalignedRecords records) { return toBuffer(records.toSend()); } + public static Set generateRandomTopicPartitions(int numTopic, int numPartitionPerTopic) { + Set tps = new HashSet<>(); + for (int i = 0; i < numTopic; i++) { + String topic = randomString(32); + for (int j = 0; j < numPartitionPerTopic; j++) { + tps.add(new TopicPartition(topic, j)); + } + } + return tps; + } + /** - * Assert that a future raises an expected exception cause type. - * This method will wait for the future to complete or timeout(15000 milliseconds). + * Assert that a future raises an expected exception cause type. Return the exception cause + * if the assertion succeeds; otherwise raise AssertionError. * - * @param Exception cause type parameter - * @param exceptionCauseClass Class of the expected exception cause * @param future The future to await + * @param exceptionCauseClass Class of the expected exception cause + * @param Exception cause type parameter * @return The caught exception cause */ - public static T assertFutureThrows(Class exceptionCauseClass, Future future) { - try { - future.get(DEFAULT_MAX_WAIT_MS, TimeUnit.MILLISECONDS); - fail("Should throw expected exception " + exceptionCauseClass.getSimpleName() + " but nothing was thrown."); - } catch (InterruptedException | ExecutionException | CancellationException e) { - Throwable cause = e instanceof ExecutionException ? e.getCause() : e; - // Enable strict type checking. - // This ensures we're testing for the exact exception type, not its subclasses. - assertEquals( - exceptionCauseClass, - cause.getClass(), - "Expected " + exceptionCauseClass.getSimpleName() + ", but got " + cause.getClass().getSimpleName() - ); - return exceptionCauseClass.cast(cause); - } catch (TimeoutException e) { - fail("Future is not completed within " + DEFAULT_MAX_WAIT_MS + " milliseconds."); - } catch (Exception e) { - fail("Expected " + exceptionCauseClass.getSimpleName() + ", but got " + e.getClass().getSimpleName()); - } - return null; + public static T assertFutureThrows(Future future, Class exceptionCauseClass) { + ExecutionException exception = assertThrows(ExecutionException.class, future::get); + assertInstanceOf(exceptionCauseClass, exception.getCause(), + "Unexpected exception cause " + exception.getCause()); + return exceptionCauseClass.cast(exception.getCause()); } public static void assertFutureThrows( - Class expectedCauseClassApiException, Future future, + Class expectedCauseClassApiException, String expectedMessage ) { - T receivedException = assertFutureThrows(expectedCauseClassApiException, future); + T receivedException = assertFutureThrows(future, expectedCauseClassApiException); assertEquals(expectedMessage, receivedException.getMessage()); } + public static void assertFutureError(Future future, Class exceptionClass) + throws InterruptedException { + try { + future.get(); + fail("Expected a " + exceptionClass.getSimpleName() + " exception, but got success."); + } catch (ExecutionException ee) { + Throwable cause = ee.getCause(); + assertEquals(exceptionClass, cause.getClass(), + "Expected a " + exceptionClass.getSimpleName() + " exception, but got " + + cause.getClass().getSimpleName()); + } + } + public static ApiKeys apiKeyFrom(NetworkReceive networkReceive) { return RequestHeader.parse(networkReceive.payload().duplicate()).apiKey(); } diff --git a/committer-tools/README.md b/committer-tools/README.md index 94f714f959a23..92558d4a29691 100644 --- a/committer-tools/README.md +++ b/committer-tools/README.md @@ -46,13 +46,11 @@ See: https://cli.github.com/ brew install gh ``` -## Find Reviewers and Update to PR body +## Find Reviewers The reviewers.py script is used to simplify the process of producing our "Reviewers:" -Git trailer to PR body. It parses the Git log to gather a set of "Authors" and "Reviewers". -Some simple string prefix matching is done to find candidates. -After entering the pull request number, the script updates the "Reviewers:" trailer accordingly. -If the PR body already contains a "Reviewers:" trailer, the script replaces it with the updated list of reviewers. +Git trailer. It parses the Git log to gather a set of "Authors" and "Reviewers". +Some simple string prefix matching is done to find candidates. Usage: diff --git a/committer-tools/kafka-merge-pr.py b/committer-tools/kafka-merge-pr.py index be86078687997..63439f3e0e408 100755 --- a/committer-tools/kafka-merge-pr.py +++ b/committer-tools/kafka-merge-pr.py @@ -70,7 +70,7 @@ DEV_BRANCH_NAME = "trunk" -DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "4.2.0") +DEFAULT_FIX_VERSION = os.environ.get("DEFAULT_FIX_VERSION", "4.0.0") ORIGINAL_HEAD = "" diff --git a/committer-tools/reviewers.py b/committer-tools/reviewers.py index 06ba4919213ab..ccac5e3182cee 100755 --- a/committer-tools/reviewers.py +++ b/committer-tools/reviewers.py @@ -1,10 +1,6 @@ #!/usr/bin/env python3 # -*- coding: utf-8 -*- -import json -import shlex -import subprocess -import tempfile # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with @@ -39,43 +35,6 @@ def prompt_for_user(): return clean_input -def update_trailers(body, trailer): - with tempfile.NamedTemporaryFile() as fp: - fp.write(body.encode()) - fp.flush() - cmd = f"git interpret-trailers --if-exists replace --trailer '{trailer}' {fp.name} " - p = subprocess.run(shlex.split(cmd), capture_output=True, text=True) - fp.close() - - return p.stdout - - -def append_message_to_pr_body(pr: int , message: str): - try: - pr_url = f"https://github.com/apache/kafka/pull/{pr}" - cmd_get_pr = f"gh pr view {pr_url} --json title,body" - result = subprocess.run(shlex.split(cmd_get_pr), capture_output=True, text=True, check=True) - current_pr_body = json.loads(result.stdout).get("body", {}).strip() + "\n" - pr_title = json.loads(result.stdout).get("title", {}) - updated_pr_body = update_trailers(current_pr_body, message) - except subprocess.CalledProcessError as e: - print("Failed to retrieve PR body:", e.stderr) - return - - print(f"""New PR body will be:\n\n---\n{updated_pr_body}---\n""") - choice = input(f'Update the body of "{pr_title}"? [Y/n] ').strip().lower() - if choice not in ['', 'y']: - print("Abort.") - return - - try: - cmd_edit_body = f"gh pr edit {pr_url} --body {shlex.quote(updated_pr_body)}" - subprocess.run(shlex.split(cmd_edit_body), check=True) - print("PR body updated successfully!") - except subprocess.CalledProcessError as e: - print("Failed to update PR body:", e.stderr) - - if __name__ == "__main__": print("Utility to help generate 'Reviewers' string for Pull Requests. Use Ctrl+D or Ctrl+C to exit") @@ -128,12 +87,9 @@ def append_message_to_pr_body(pr: int , message: str): continue if selected_reviewers: - reviewer_message = "Reviewers: " - reviewer_message += ", ".join([f"{name} <{email}>" for name, email, _ in selected_reviewers]) - print(f"\n\n{reviewer_message}\n") + out = "\n\nReviewers: " + out += ", ".join([f"{name} <{email}>" for name, email, _ in selected_reviewers]) + out += "\n" + print(out) + - try: - pr_number = int(input("\nPull Request (Ctrl+D or Ctrl+C to skip): ")) - append_message_to_pr_body(pr_number, reviewer_message) - except (EOFError, KeyboardInterrupt): - exit(0) diff --git a/committer-tools/update-cache.sh b/committer-tools/update-cache.sh index 6dbc12e8a1bb1..015c2b51d5dfd 100755 --- a/committer-tools/update-cache.sh +++ b/committer-tools/update-cache.sh @@ -16,9 +16,22 @@ # specific language governing permissions and limitations # under the License. -# Get the latest commit SHA that contains the Gradle build cache. -sha=$(curl -s "https://api.github.com/repos/apache/kafka/actions/caches?key=gradle-home-v1&ref=refs/heads/trunk" \ - | jq -r '.actions_caches | max_by(.created_at) | .key | split("-")[4]') +if ! git config --get alias.update-cache > /dev/null; then + printf '\e[36m%s\n\n %s\n\e[0m\n' \ + 'Hint: you can create a Git alias to execute this script. Example:' \ + "git config alias.update-cache '!bash $(realpath "$0")'" +fi + +key="$( + gh cache list \ + --key 'gradle-home-v1|Linux-X64|test' \ + --sort 'created_at' \ + --limit 1 \ + --json 'key' \ + --jq '.[].key' +)" + +sha="$(cut -d '-' -f 5 <<< "$key")" if ! git show "$sha" &> /dev/null; then printf '\e[33m%s\n%s\e[0m\n' \ diff --git a/committer-tools/verify_license.py b/committer-tools/verify_license.py index c8489008cae67..7dc29f5517eaa 100644 --- a/committer-tools/verify_license.py +++ b/committer-tools/verify_license.py @@ -24,7 +24,6 @@ import tarfile import tempfile import subprocess -import argparse # Constant: Regex to extract dependency tokens from the LICENSE file. # Matches lines that start with a dash and then a dependency token of the form: @@ -45,7 +44,7 @@ def get_tarball_path(project_dir): print("Error: Distributions directory not found:", distributions_dir) sys.exit(1) - pattern = re.compile(r'^kafka_2\.13-(?!.*docs).+\.tgz$', re.IGNORECASE) + pattern = re.compile(r'^kafka_2\.13-.+\.tgz$', re.IGNORECASE) candidates = [ os.path.join(distributions_dir, f) for f in os.listdir(distributions_dir) @@ -75,20 +74,12 @@ def get_license_deps(license_text): return set(LICENSE_DEP_PATTERN.findall(license_text)) def main(): - # Argument parser - parser = argparse.ArgumentParser(description="Whether to skip executing ReleaseTarGz.") - parser.add_argument("--skip-build", action="store_true", help="skip the build") - args = parser.parse_args() - # Assume the current working directory is the project root. project_dir = os.getcwd() print("Using project directory:", project_dir) - - if args.skip_build: - print("Skip running './gradlew clean releaseTarGz'") - else: - # Build the tarball. - run_gradlew(project_dir) + + # Build the tarball. + run_gradlew(project_dir) tarball = get_tarball_path(project_dir) print("Tarball created at:", tarball) diff --git a/config/broker.properties b/config/broker.properties index 4a75f0b12d6c0..61a536c9b3071 100644 --- a/config/broker.properties +++ b/config/broker.properties @@ -75,8 +75,8 @@ log.dirs=/tmp/kraft-broker-logs num.partitions=1 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased based on the installation resources. -num.recovery.threads.per.data.dir=2 +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 ############################# Internal Topic Settings ############################# # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" diff --git a/config/consumer.properties b/config/consumer.properties index f65e529904148..01bb12eb0899f 100644 --- a/config/consumer.properties +++ b/config/consumer.properties @@ -4,135 +4,23 @@ # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at -# +# # http://www.apache.org/licenses/LICENSE-2.0 -# +# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# see org.apache.kafka.clients.consumer.ConsumerConfig for more details -# See org.apache.kafka.clients.consumer.ConsumerConfig for more details. -# Consider using environment variables or external configuration management -# for sensitive information like passwords and environment-specific settings. - -##################### Consumer Basics ####################### - -# List of Kafka brokers used for initial cluster discovery and metadata retrieval. -# Format: host1:port1,host2:port2,host3:port3 -# Include all brokers for high availability +# list of brokers used for bootstrapping knowledge about the rest of the cluster +# format: host1:port1,host2:port2 ... bootstrap.servers=localhost:9092 -# Client identifier for logging and metrics. -# Helps with debugging and monitoring. -client.id=test-consumer - -##################### Transaction Support ##################### - -# Isolation level for reading messages. -# Options: read_uncommitted (default), read_committed (for exactly-once semantics). -isolation.level=read_uncommitted - -##################### Consumer Group Configuration ##################### - -# Unique identifier for this consumer group. -# All consumers with the same group.id will share partition consumption. +# consumer group id group.id=test-consumer-group -# What to do when there is no initial offset or if the current offset no longer exists. -# Options: earliest (from beginning), latest (from end), none (throw exception). -# Use 'earliest' to avoid data loss on first run. -auto.offset.reset=earliest - -##################### Partition Assignment Strategy ##################### - -# Strategy for assigning partitions to consumers in a group. -# Options: RangeAssignor, RoundRobinAssignor, StickyAssignor, CooperativeStickyAssignor. -# CooperativeStickyAssignor is recommended (requires Kafka 2.4+). -partition.assignment.strategy=org.apache.kafka.clients.consumer.CooperativeStickyAssignor - -##################### Deserialization ##################### - -# Deserializer class for message keys. -# Common options: StringDeserializer, ByteArrayDeserializer, AvroDeserializer. -key.deserializer=org.apache.kafka.common.serialization.StringDeserializer - -# Deserializer class for message values. -value.deserializer=org.apache.kafka.common.serialization.StringDeserializer - -##################### Offset Management ##################### - -# Whether to automatically commit offsets in the background. -# Set to false for manual offset management and exactly-once processing. -enable.auto.commit=true - -# Frequency (in milliseconds) at which offsets are auto-committed. -# Lower values provide better fault tolerance but increase broker load. -auto.commit.interval.ms=5000 - -##################### Classic Group Session Management ##################### - -# Timeout for detecting consumer failures when using group management. -# Must be between group.min.session.timeout.ms and group.max.session.timeout.ms (broker config). -session.timeout.ms=30000 - -# Expected time between heartbeats when using group management. -# Should be lower than session.timeout.ms (typically 1/3 of session timeout). -heartbeat.interval.ms=10000 - -# Maximum time between successive calls to poll(). -# If exceeded, consumer is considered failed and partition rebalancing occurs. -max.poll.interval.ms=300000 - -##################### Retry And Error Handling ##################### - -# Initial and max time to wait for failed request retries. -# The retry.backoff.ms is the initial backoff value and will increase exponentially -# for each failed request, up to the retry.backoff.max.ms value. -retry.backoff.ms=100 -retry.backoff.max.ms=1000 - -# Total time to wait for a response to a request. -request.timeout.ms=40000 - -# Close idle connections after this many milliseconds. -connections.max.idle.ms=540000 - -##################### Security Configuration ##################### - -# Security protocol for communication with brokers. -# Options: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL -#security.protocol=SASL_SSL - -# SSL configuration. -#ssl.truststore.location=/path/to/truststore.jks -#ssl.truststore.password=truststore-password - -# SASL configuration. -#sasl.mechanism=PLAIN -#sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ -# username="your-username" \ -# password="your-password"; - -##################### Performance And Throughput ##################### - -# Minimum data size (bytes) and maximum polling timeout (ms). -# Whichever condition is met first will trigger the fetch operation. -# Balances response latency against message batching efficiency. -# For remote partition fetching, configure remote.fetch.max.wait.ms instead. -fetch.min.bytes=1 -fetch.max.wait.ms=500 - -# Set soft limits to the amount of bytes per fetch request and partition. -# Both max.partition.fetch.bytes and fetch.max.bytes limits can be exceeded when -# the first batch in the first non-empty partition is larger than the configured -# value to ensure that the consumer can make progress. -# Configuring message.max.bytes (broker config) or max.message.bytes (topic config) -# <= fetch.max.bytes prevents oversized fetch responses. -fetch.max.bytes=52428800 -max.partition.fetch.bytes=1048576 - -# Maximum number of records returned in a single poll() call. -# Higher values increase throughput but may cause longer processing delays. -max.poll.records=500 +# What to do when there is no initial offset in Kafka or if the current +# offset does not exist any more on the server: latest, earliest, none +#auto.offset.reset= diff --git a/config/controller.properties b/config/controller.properties index 3cf3a58b606d6..84963c95701d1 100644 --- a/config/controller.properties +++ b/config/controller.properties @@ -75,8 +75,8 @@ log.dirs=/tmp/kraft-controller-logs num.partitions=1 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased based on the installation resources. -num.recovery.threads.per.data.dir=2 +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 ############################# Internal Topic Settings ############################# # The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" diff --git a/config/log4j2.yaml b/config/log4j2.yaml index de263c57c928e..7ee6f001e18ea 100644 --- a/config/log4j2.yaml +++ b/config/log4j2.yaml @@ -44,7 +44,7 @@ Configuration: # State Change appender - name: StateChangeAppender fileName: "${sys:kafka.logs.dir}/state-change.log" - filePattern: "${sys:kafka.logs.dir}/state-change.log.%d{yyyy-MM-dd-HH}" + filePattern: "${sys:kafka.logs.dir}/stage-change.log.%d{yyyy-MM-dd-HH}" PatternLayout: pattern: "${logPattern}" TimeBasedTriggeringPolicy: @@ -133,17 +133,7 @@ Configuration: AppenderRef: ref: ControllerAppender # LogCleaner logger - - name: org.apache.kafka.storage.internals.log.LogCleaner - level: INFO - additivity: false - AppenderRef: - ref: CleanerAppender - - name: org.apache.kafka.storage.internals.log.LogCleaner$CleanerThread - level: INFO - additivity: false - AppenderRef: - ref: CleanerAppender - - name: org.apache.kafka.storage.internals.log.Cleaner + - name: kafka.log.LogCleaner level: INFO additivity: false AppenderRef: diff --git a/config/producer.properties b/config/producer.properties index 6165ce9ff571c..3a999e7c17e8c 100644 --- a/config/producer.properties +++ b/config/producer.properties @@ -12,127 +12,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# see org.apache.kafka.clients.producer.ProducerConfig for more details -# See org.apache.kafka.clients.producer.ProducerConfig for more details. -# Consider using environment variables or external configuration management -# for sensitive information like passwords and environment-specific settings. +############################# Producer Basics ############################# -##################### Producer Basics ##################### - -# List of Kafka brokers used for initial cluster discovery and metadata retrieval. -# Format: host1:port1,host2:port2,host3:port3 -# Include all brokers for high availability. +# list of brokers used for bootstrapping knowledge about the rest of the cluster +# format: host1:port1,host2:port2 ... bootstrap.servers=localhost:9092 -# Client identifier for logging and metrics. -# Helps with debugging and monitoring. -client.id=test-producer - -##################### Transaction Support ##################### - -# Transactional ID for the producer. -# Must be unique across all producer instances. -# Enables exactly-once semantics across multiple partitions/topics. -#transactional.id=test-transactional-id - -# Maximum amount of time in milliseconds that a transaction will remain open. -# Only applies when transactional.id is set. -transaction.timeout.ms=60000 - -##################### Partitioning ##################### - -# Name of the partitioner class for partitioning records. -# Default uses "sticky" partitioning which improves throughput by filling batches -# Options: DefaultPartitioner, RoundRobinPartitioner, UniformStickyPartitioner. -#partitioner.class=org.apache.kafka.clients.producer.RoundRobinPartitioner - -##################### Serialization ##################### - -# Serializer class for message keys. -# Common options: StringSerializer, ByteArraySerializer, AvroSerializer. -key.serializer=org.apache.kafka.common.serialization.StringSerializer - -# Serializer class for message values. -value.serializer=org.apache.kafka.common.serialization.StringSerializer - -##################### Reliability And Durability ##################### - -# Number of acknowledgments the producer requires the leader to have received. -# Options: 0 (no ack), 1 (leader only), all/-1 (all in-sync replicas). -# Use 'all' for maximum durability. -acks=all - -# Number of retries for failed sends. -# Set to high value or Integer.MAX_VALUE for maximum reliability. -retries=2147483647 - -# Initial and max time to wait for failed request retries. -# The retry.backoff.ms is the initial backoff value and will increase exponentially -# for each failed request, up to the retry.backoff.max.ms value. -retry.backoff.ms=100 -retry.backoff.max.ms=1000 - -# Enable idempotent producer to prevent duplicate messages. -# Ensures exactly-once delivery semantics when combined with proper consumer settings. -enable.idempotence=true - -# Maximum number of unacknowledged requests the client will send on a single connection. -# Must be <= 5 when enable.idempotence=true to maintain ordering guarantees. -max.in.flight.requests.per.connection=5 - -##################### Timeouts And Blocking ##################### - -# Maximum amount of time the client will wait for the response of a request. -# Should be higher than replica.lag.time.max.ms (broker config). -request.timeout.ms=30000 - -# How long KafkaProducer.send() and KafkaProducer.partitionsFor() will block. -# Should be higher than request.timeout.ms. -max.block.ms=60000 - -# Timeout for broker requests, including produce requests. -# Should be greater than or equal to the sum of request.timeout.ms and linger.ms. -delivery.timeout.ms=120000 - -##################### Security Configuration ##################### - -# Security protocol for communication with brokers. -# Options: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL -#security.protocol=SASL_SSL - -# SSL configuration. -#ssl.truststore.location=/path/to/truststore.jks -#ssl.truststore.password=truststore-password +# specify the compression codec for all data generated: none, gzip, snappy, lz4, zstd +compression.type=none -# SASL configuration. -#sasl.mechanism=PLAIN -#sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ -# username="your-username" \ -# password="your-password"; +# name of the partitioner class for partitioning records; +# The default uses "sticky" partitioning logic which spreads the load evenly between partitions, but improves throughput by attempting to fill the batches sent to each partition. +#partitioner.class= -##################### Performance And Throughput ##################### +# the maximum amount of time the client will wait for the response of a request +#request.timeout.ms= -# Compression codec for all data generated. -# Options: none, gzip, snappy, lz4, zstd. -# Can greatly improve throughput at the cost of increased CPU usage. -compression.type=none +# how long `KafkaProducer.send` and `KafkaProducer.partitionsFor` will block for +#max.block.ms= -# Producer will wait up to this delay to batch records together. -# Higher values increase throughput but add latency. -# Set to 0 for lowest latency, 5-100ms for balanced throughput/latency. -linger.ms=5 +# the producer will wait for up to the given delay to allow other records to be sent so that the sends can be batched together +#linger.ms= -# Default batch size in bytes when batching multiple records sent to a partition. -# Larger batches improve throughput but use more memory. -# 16KB is a good starting point, adjust based on message size and throughput needs. -batch.size=16384 +# the maximum size of a request in bytes +#max.request.size= -# Total bytes of memory the producer can use to buffer records waiting to be sent. -# Should be larger than batch.size * number of partitions you're writing to. -# 32MB is reasonable for most use cases. -buffer.memory=33554432 +# the default batch size in bytes when batching multiple records sent to a partition +#batch.size= -# Maximum size of a request in bytes. -# Should accommodate your largest batch size plus overhead. -# 1MB is default and suitable for most cases. -max.request.size=1048576 +# the total bytes of memory the producer can use to buffer records waiting to be sent to the server +#buffer.memory= diff --git a/config/server.properties b/config/server.properties index 7f1773d354ea1..d4b1fe0bc4dbd 100644 --- a/config/server.properties +++ b/config/server.properties @@ -78,8 +78,8 @@ log.dirs=/tmp/kraft-combined-logs num.partitions=1 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased based on the installation resources. -num.recovery.threads.per.data.dir=2 +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 ############################# Internal Topic Settings ############################# # The replication factor for the group metadata internal topics "__consumer_offsets", "__share_group_state" and "__transaction_state" diff --git a/connect/api/src/main/java/org/apache/kafka/connect/connector/ConnectorContext.java b/connect/api/src/main/java/org/apache/kafka/connect/connector/ConnectorContext.java index 09d1a71eb2a13..10151fa68d067 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/connector/ConnectorContext.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/connector/ConnectorContext.java @@ -16,8 +16,6 @@ */ package org.apache.kafka.connect.connector; -import org.apache.kafka.common.metrics.PluginMetrics; - /** * ConnectorContext allows {@link Connector}s to proactively interact with the Kafka Connect runtime. */ @@ -35,26 +33,4 @@ public interface ConnectorContext { * @param e Exception to be raised. */ void raiseError(Exception e); - - /** - * Get a {@link PluginMetrics} that can be used to define metrics - * - *

          This method was added in Apache Kafka 4.1. Connectors that use this method but want to - * maintain backward compatibility so they can also be deployed to older Connect runtimes - * should guard the call to this method with a try-catch block, since calling this method will result in a - * {@link NoSuchMethodError} or {@link NoClassDefFoundError} when the connector is deployed to - * Connect runtimes older than Kafka 4.1. For example: - *

          -     *     PluginMetrics pluginMetrics;
          -     *     try {
          -     *         pluginMetrics = context.pluginMetrics();
          -     *     } catch (NoSuchMethodError | NoClassDefFoundError e) {
          -     *         pluginMetrics = null;
          -     *     }
          -     * 
          - * - * @return the pluginMetrics instance - * @since 4.1 - */ - PluginMetrics pluginMetrics(); } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java b/connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java index 25abb06bde135..d41c5cd5b3c61 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/connector/policy/ConnectorClientConfigOverridePolicy.java @@ -30,11 +30,6 @@ *

          Kafka Connect discovers implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism. * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.connector.policy.ConnectorClientConfigOverridePolicy}. - *

          - * Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the policy to register metrics. - * The following tags are automatically added to all metrics registered: config set to - * connector.client.config.override.policy, and class set to the - * ConnectorClientConfigOverridePolicy class name. */ public interface ConnectorClientConfigOverridePolicy extends Configurable, AutoCloseable { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java b/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java index b8e10c3dbde18..cf5f01502c83b 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/ConnectSchema.java @@ -20,55 +20,59 @@ import java.math.BigDecimal; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.Collections; import java.util.EnumMap; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.stream.Collectors; public class ConnectSchema implements Schema { /** * Maps {@link Schema.Type}s to a list of Java classes that can be used to represent them. */ - private static final Map>> SCHEMA_TYPE_CLASSES = Collections.unmodifiableMap(new EnumMap<>(Map.ofEntries( - Map.entry(Type.INT8, List.of(Byte.class)), - Map.entry(Type.INT16, List.of(Short.class)), - Map.entry(Type.INT32, List.of(Integer.class)), - Map.entry(Type.INT64, List.of(Long.class)), - Map.entry(Type.FLOAT32, List.of(Float.class)), - Map.entry(Type.FLOAT64, List.of(Double.class)), - Map.entry(Type.BOOLEAN, List.of(Boolean.class)), - Map.entry(Type.STRING, List.of(String.class)), - // Bytes are special and have 2 representations. byte[] causes problems because it doesn't handle equals() and - // hashCode() like we want objects to, so we support both byte[] and ByteBuffer. Using plain byte[] can cause - // those methods to fail, so ByteBuffers are recommended - Map.entry(Type.BYTES, List.of(byte[].class, ByteBuffer.class)), - Map.entry(Type.ARRAY, List.of(List.class)), - Map.entry(Type.MAP, List.of(Map.class)), - Map.entry(Type.STRUCT, List.of(Struct.class)) - ))); + private static final Map>> SCHEMA_TYPE_CLASSES = new EnumMap<>(Type.class); /** * Maps known logical types to a list of Java classes that can be used to represent them. */ - // We don't need to put these into JAVA_CLASS_SCHEMA_TYPES since that's only used to determine schemas for - // schemaless data and logical types will have ambiguous schemas (e.g. many of them use the same Java class) so - // they should not be used without schemas. - private static final Map>> LOGICAL_TYPE_CLASSES = Map.of( - Decimal.LOGICAL_NAME, List.of(BigDecimal.class), - Date.LOGICAL_NAME, List.of(java.util.Date.class), - Time.LOGICAL_NAME, List.of(java.util.Date.class), - Timestamp.LOGICAL_NAME, List.of(java.util.Date.class) - ); + private static final Map>> LOGICAL_TYPE_CLASSES = new HashMap<>(); /** * Maps the Java classes to the corresponding {@link Schema.Type}. */ - private static final Map, Type> JAVA_CLASS_SCHEMA_TYPES = SCHEMA_TYPE_CLASSES.entrySet() - .stream() - .flatMap(entry -> entry.getValue().stream().map(klass -> Map.entry(klass, entry.getKey()))) - .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + private static final Map, Type> JAVA_CLASS_SCHEMA_TYPES = new HashMap<>(); + + static { + SCHEMA_TYPE_CLASSES.put(Type.INT8, Collections.singletonList(Byte.class)); + SCHEMA_TYPE_CLASSES.put(Type.INT16, Collections.singletonList(Short.class)); + SCHEMA_TYPE_CLASSES.put(Type.INT32, Collections.singletonList(Integer.class)); + SCHEMA_TYPE_CLASSES.put(Type.INT64, Collections.singletonList(Long.class)); + SCHEMA_TYPE_CLASSES.put(Type.FLOAT32, Collections.singletonList(Float.class)); + SCHEMA_TYPE_CLASSES.put(Type.FLOAT64, Collections.singletonList(Double.class)); + SCHEMA_TYPE_CLASSES.put(Type.BOOLEAN, Collections.singletonList(Boolean.class)); + SCHEMA_TYPE_CLASSES.put(Type.STRING, Collections.singletonList(String.class)); + // Bytes are special and have 2 representations. byte[] causes problems because it doesn't handle equals() and + // hashCode() like we want objects to, so we support both byte[] and ByteBuffer. Using plain byte[] can cause + // those methods to fail, so ByteBuffers are recommended + SCHEMA_TYPE_CLASSES.put(Type.BYTES, Arrays.asList(byte[].class, ByteBuffer.class)); + SCHEMA_TYPE_CLASSES.put(Type.ARRAY, Collections.singletonList(List.class)); + SCHEMA_TYPE_CLASSES.put(Type.MAP, Collections.singletonList(Map.class)); + SCHEMA_TYPE_CLASSES.put(Type.STRUCT, Collections.singletonList(Struct.class)); + + for (Map.Entry>> schemaClasses : SCHEMA_TYPE_CLASSES.entrySet()) { + for (Class schemaClass : schemaClasses.getValue()) + JAVA_CLASS_SCHEMA_TYPES.put(schemaClass, schemaClasses.getKey()); + } + + LOGICAL_TYPE_CLASSES.put(Decimal.LOGICAL_NAME, Collections.singletonList(BigDecimal.class)); + LOGICAL_TYPE_CLASSES.put(Date.LOGICAL_NAME, Collections.singletonList(java.util.Date.class)); + LOGICAL_TYPE_CLASSES.put(Time.LOGICAL_NAME, Collections.singletonList(java.util.Date.class)); + LOGICAL_TYPE_CLASSES.put(Timestamp.LOGICAL_NAME, Collections.singletonList(java.util.Date.class)); + // We don't need to put these into JAVA_CLASS_SCHEMA_TYPES since that's only used to determine schemas for + // schemaless data and logical types will have ambiguous schemas (e.g. many of them use the same Java class) so + // they should not be used without schemas. + } // The type of the field private final Type type; @@ -106,7 +110,7 @@ public ConnectSchema(Type type, boolean optional, Object defaultValue, String na this.parameters = parameters; if (this.type == Type.STRUCT) { - this.fields = fields == null ? List.of() : fields; + this.fields = fields == null ? Collections.emptyList() : fields; this.fieldsByName = new HashMap<>(this.fields.size()); for (Field field : this.fields) fieldsByName.put(field.name(), field); @@ -279,12 +283,9 @@ private static Schema assertSchemaNotNull(Schema schema, String location) { } private static List> expectedClassesFor(Schema schema) { - List> expectedClasses = null; - if (schema.name() != null) { - expectedClasses = LOGICAL_TYPE_CLASSES.get(schema.name()); - } + List> expectedClasses = LOGICAL_TYPE_CLASSES.get(schema.name()); if (expectedClasses == null) - expectedClasses = SCHEMA_TYPE_CLASSES.getOrDefault(schema.type(), List.of()); + expectedClasses = SCHEMA_TYPE_CLASSES.getOrDefault(schema.type(), Collections.emptyList()); return expectedClasses; } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java b/connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java index 4f25e3611a099..4a57663f4c527 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Schema.java @@ -117,10 +117,19 @@ public String getName() { } public boolean isPrimitive() { - return switch (this) { - case INT8, INT16, INT32, INT64, FLOAT32, FLOAT64, BOOLEAN, STRING, BYTES -> true; - default -> false; - }; + switch (this) { + case INT8: + case INT16: + case INT32: + case INT64: + case FLOAT32: + case FLOAT64: + case BOOLEAN: + case STRING: + case BYTES: + return true; + } + return false; } } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java index d8c55573e5c6f..8115675f5a532 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaBuilder.java @@ -420,7 +420,7 @@ public Schema valueSchema() { public Schema build() { return new ConnectSchema(type, isOptional(), defaultValue, name, version, doc, parameters == null ? null : Collections.unmodifiableMap(parameters), - fields == null ? null : List.copyOf(fields.values()), keySchema, valueSchema); + fields == null ? null : Collections.unmodifiableList(new ArrayList<>(fields.values())), keySchema, valueSchema); } /** @@ -441,4 +441,4 @@ private static void checkNotNull(String fieldName, Object val, String fieldToSet if (val == null) throw new SchemaBuilderException("Invalid SchemaBuilder call: " + fieldName + " must be specified to set " + fieldToSet); } -} +} \ No newline at end of file diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java index ed096504e09a2..ee2d6cca43d52 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/SchemaProjector.java @@ -78,13 +78,25 @@ public static Object project(Schema source, Object record, Schema target) throws } private static Object projectRequiredSchema(Schema source, Object record, Schema target) throws SchemaProjectorException { - return switch (target.type()) { - case INT8, INT16, INT32, INT64, FLOAT32, FLOAT64, BOOLEAN, BYTES, STRING -> - projectPrimitive(source, record, target); - case STRUCT -> projectStruct(source, (Struct) record, target); - case ARRAY -> projectArray(source, record, target); - case MAP -> projectMap(source, record, target); - }; + switch (target.type()) { + case INT8: + case INT16: + case INT32: + case INT64: + case FLOAT32: + case FLOAT64: + case BOOLEAN: + case BYTES: + case STRING: + return projectPrimitive(source, record, target); + case STRUCT: + return projectStruct(source, (Struct) record, target); + case ARRAY: + return projectArray(source, record, target); + case MAP: + return projectMap(source, record, target); + } + return null; } private static Object projectStruct(Schema source, Struct sourceStruct, Schema target) throws SchemaProjectorException { @@ -148,16 +160,30 @@ private static Object projectPrimitive(Schema source, Object record, Schema targ assert source.type().isPrimitive(); assert target.type().isPrimitive(); Object result; - if (isPromotable(source.type(), target.type()) && record instanceof Number numberRecord) { - result = switch (target.type()) { - case INT8 -> numberRecord.byteValue(); - case INT16 -> numberRecord.shortValue(); - case INT32 -> numberRecord.intValue(); - case INT64 -> numberRecord.longValue(); - case FLOAT32 -> numberRecord.floatValue(); - case FLOAT64 -> numberRecord.doubleValue(); - default -> throw new SchemaProjectorException("Not promotable type."); - }; + if (isPromotable(source.type(), target.type()) && record instanceof Number) { + Number numberRecord = (Number) record; + switch (target.type()) { + case INT8: + result = numberRecord.byteValue(); + break; + case INT16: + result = numberRecord.shortValue(); + break; + case INT32: + result = numberRecord.intValue(); + break; + case INT64: + result = numberRecord.longValue(); + break; + case FLOAT32: + result = numberRecord.floatValue(); + break; + case FLOAT64: + result = numberRecord.doubleValue(); + break; + default: + throw new SchemaProjectorException("Not promotable type."); + } } else { result = record; } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java b/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java index 266d31534a81c..df389fa5652dc 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/data/Values.java @@ -430,20 +430,33 @@ protected static Object convertTo(Schema toSchema, Schema fromSchema, Object val } throw new DataException("Unable to convert a null value to a schema that requires a value"); } - return switch (toSchema.type()) { - case BYTES -> convertMaybeLogicalBytes(toSchema, value); - case STRING -> convertToString(fromSchema, value); - case BOOLEAN -> convertToBoolean(fromSchema, value); - case INT8 -> convertToByte(fromSchema, value); - case INT16 -> convertToShort(fromSchema, value); - case INT32 -> convertMaybeLogicalInteger(toSchema, fromSchema, value); - case INT64 -> convertMaybeLogicalLong(toSchema, fromSchema, value); - case FLOAT32 -> convertToFloat(fromSchema, value); - case FLOAT64 -> convertToDouble(fromSchema, value); - case ARRAY -> convertToArray(toSchema, value); - case MAP -> convertToMapInternal(toSchema, value); - case STRUCT -> convertToStructInternal(toSchema, value); - }; + switch (toSchema.type()) { + case BYTES: + return convertMaybeLogicalBytes(toSchema, value); + case STRING: + return convertToString(fromSchema, value); + case BOOLEAN: + return convertToBoolean(fromSchema, value); + case INT8: + return convertToByte(fromSchema, value); + case INT16: + return convertToShort(fromSchema, value); + case INT32: + return convertMaybeLogicalInteger(toSchema, fromSchema, value); + case INT64: + return convertMaybeLogicalLong(toSchema, fromSchema, value); + case FLOAT32: + return convertToFloat(fromSchema, value); + case FLOAT64: + return convertToDouble(fromSchema, value); + case ARRAY: + return convertToArray(toSchema, value); + case MAP: + return convertToMapInternal(toSchema, value); + case STRUCT: + return convertToStructInternal(toSchema, value); + } + throw new DataException("Unable to convert " + value + " (" + value.getClass() + ") to " + toSchema); } private static Serializable convertMaybeLogicalBytes(Schema toSchema, Object value) { @@ -570,7 +583,8 @@ private static java.util.Date convertToTimestamp(Schema toSchema, Schema fromSch SchemaAndValue parsed = parseString(value.toString()); value = parsed.value(); } - if (value instanceof java.util.Date date) { + if (value instanceof java.util.Date) { + java.util.Date date = (java.util.Date) value; if (fromSchema != null) { String fromSchemaName = fromSchema.name(); if (Date.LOGICAL_NAME.equals(fromSchemaName)) { @@ -640,7 +654,8 @@ private static Struct convertToStructInternal(Schema toSchema, Object value) { */ protected static long asLong(Object value, Schema fromSchema, Throwable error) { try { - if (value instanceof Number number) { + if (value instanceof Number) { + Number number = (Number) value; return number.longValue(); } if (value instanceof String) { @@ -679,7 +694,8 @@ protected static long asLong(Object value, Schema fromSchema, Throwable error) { */ protected static double asDouble(Object value, Schema schema, Throwable error) { try { - if (value instanceof Number number) { + if (value instanceof Number) { + Number number = (Number) value; return number.doubleValue(); } if (value instanceof String) { @@ -716,15 +732,18 @@ protected static void append(StringBuilder sb, Object value, boolean embedded) { } else if (value instanceof ByteBuffer) { byte[] bytes = Utils.readBytes((ByteBuffer) value); append(sb, bytes, embedded); - } else if (value instanceof List list) { + } else if (value instanceof List) { + List list = (List) value; sb.append('['); appendIterable(sb, list.iterator()); sb.append(']'); - } else if (value instanceof Map map) { + } else if (value instanceof Map) { + Map map = (Map) value; sb.append('{'); appendIterable(sb, map.entrySet().iterator()); sb.append('}'); - } else if (value instanceof Struct struct) { + } else if (value instanceof Struct) { + Struct struct = (Struct) value; Schema schema = struct.schema(); boolean first = true; sb.append('{'); @@ -739,11 +758,13 @@ protected static void append(StringBuilder sb, Object value, boolean embedded) { append(sb, struct.get(field), true); } sb.append('}'); - } else if (value instanceof Map.Entry entry) { + } else if (value instanceof Map.Entry) { + Map.Entry entry = (Map.Entry) value; append(sb, entry.getKey(), true); sb.append(':'); append(sb, entry.getValue(), true); - } else if (value instanceof java.util.Date dateValue) { + } else if (value instanceof java.util.Date) { + java.util.Date dateValue = (java.util.Date) value; String formatted = dateFormatFor(dateValue).format(dateValue); sb.append(formatted); } else { @@ -1131,15 +1152,21 @@ private static Schema mergeSchemas(Schema previous, Schema newSchema) { Type previousType = previous.type(); Type newType = newSchema.type(); if (previousType != newType) { - return switch (previous.type()) { - case INT8 -> commonSchemaForInt8(newSchema, newType); - case INT16 -> commonSchemaForInt16(previous, newSchema, newType); - case INT32 -> commonSchemaForInt32(previous, newSchema, newType); - case INT64 -> commonSchemaForInt64(previous, newSchema, newType); - case FLOAT32 -> commonSchemaForFloat32(previous, newSchema, newType); - case FLOAT64 -> commonSchemaForFloat64(previous, newType); - default -> null; - }; + switch (previous.type()) { + case INT8: + return commonSchemaForInt8(newSchema, newType); + case INT16: + return commonSchemaForInt16(previous, newSchema, newType); + case INT32: + return commonSchemaForInt32(previous, newSchema, newType); + case INT64: + return commonSchemaForInt64(previous, newSchema, newType); + case FLOAT32: + return commonSchemaForFloat32(previous, newSchema, newType); + case FLOAT64: + return commonSchemaForFloat64(previous, newType); + } + return null; } if (previous.isOptional() == newSchema.isOptional()) { // Use the optional one @@ -1254,8 +1281,10 @@ public boolean canDetect(Object value) { } if (knownType == null) { knownType = schema.type(); + } else if (knownType != schema.type()) { + return false; } - return knownType == schema.type(); + return true; } public Schema schema() { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeader.java b/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeader.java index 376e95847c44e..3b9f3470f6c14 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeader.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeader.java @@ -81,7 +81,8 @@ public boolean equals(Object obj) { if (obj == this) { return true; } - if (obj instanceof Header that) { + if (obj instanceof Header) { + Header that = (Header) obj; return Objects.equals(this.key, that.key()) && Objects.equals(this.schema(), that.schema()) && Objects.equals(this.value(), that.value()); } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeaders.java b/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeaders.java index 63ee8ab6598d1..5c37ddc5e58b4 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeaders.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/header/ConnectHeaders.java @@ -55,7 +55,8 @@ public ConnectHeaders(Iterable

          original) { if (original == null) { return; } - if (original instanceof ConnectHeaders originalHeaders) { + if (original instanceof ConnectHeaders) { + ConnectHeaders originalHeaders = (ConnectHeaders) original; if (!originalHeaders.isEmpty()) { headers = new LinkedList<>(originalHeaders.headers); } @@ -342,7 +343,8 @@ public boolean equals(Object obj) { if (obj == this) { return true; } - if (obj instanceof Headers that) { + if (obj instanceof Headers) { + Headers that = (Headers) obj; Iterator
          thisIter = this.iterator(); Iterator
          thatIter = that.iterator(); while (thisIter.hasNext() && thatIter.hasNext()) { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java index 5110a832459ad..ca960414dd5b8 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/rest/ConnectRestExtension.java @@ -43,10 +43,6 @@ * *

          When the Connect worker shuts down, it will call the extension's {@link #close} method to allow the implementation to release all of * its resources. - * - *

          Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the extension to register metrics. - * The following tags are automatically added to all metrics registered: config set to - * rest.extension.classes, and class set to the ConnectRestExtension class name. */ public interface ConnectRestExtension extends Configurable, Versioned, Closeable { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTaskContext.java b/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTaskContext.java index 5f392ada8fd2a..35daae5453043 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTaskContext.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/sink/SinkTaskContext.java @@ -17,7 +17,6 @@ package org.apache.kafka.connect.sink; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.metrics.PluginMetrics; import java.util.Map; import java.util.Set; @@ -124,26 +123,4 @@ default ErrantRecordReporter errantRecordReporter() { return null; } - /** - * Get a {@link PluginMetrics} that can be used to define metrics - * - *

          This method was added in Apache Kafka 4.1. Tasks that use this method but want to - * maintain backward compatibility so they can also be deployed to older Connect runtimes - * should guard the call to this method with a try-catch block, since calling this method will result in a - * {@link NoSuchMethodError} or {@link NoClassDefFoundError} when the connector is deployed to - * Connect runtimes older than Kafka 4.1. For example: - *

          -     *     PluginMetrics pluginMetrics;
          -     *     try {
          -     *         pluginMetrics = context.pluginMetrics();
          -     *     } catch (NoSuchMethodError | NoClassDefFoundError e) {
          -     *         pluginMetrics = null;
          -     *     }
          -     * 
          - * - * @return the PluginMetrics instance - * @since 4.1 - */ - PluginMetrics pluginMetrics(); - } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTaskContext.java b/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTaskContext.java index 66879d303ceed..e90b4b11e24de 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTaskContext.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/source/SourceTaskContext.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.connect.source; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.storage.OffsetStorageReader; import java.util.Map; @@ -64,26 +63,4 @@ public interface SourceTaskContext { default TransactionContext transactionContext() { return null; } - - /** - * Get a {@link PluginMetrics} that can be used to define metrics - * - *

          This method was added in Apache Kafka 4.1. Tasks that use this method but want to - * maintain backward compatibility so they can also be deployed to older Connect runtimes - * should guard the call to this method with a try-catch block, since calling this method will result in a - * {@link NoSuchMethodError} or {@link NoClassDefFoundError} when the connector is deployed to - * Connect runtimes older than Kafka 4.1. For example: - *

          -     *     PluginMetrics pluginMetrics;
          -     *     try {
          -     *         pluginMetrics = context.pluginMetrics();
          -     *     } catch (NoSuchMethodError | NoClassDefFoundError e) {
          -     *         pluginMetrics = null;
          -     *     }
          -     * 
          - * - * @return the pluginMetrics instance - * @since 4.1 - */ - PluginMetrics pluginMetrics(); } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/storage/Converter.java b/connect/api/src/main/java/org/apache/kafka/connect/storage/Converter.java index 1b2300e2330fe..a2fb3ba0acb77 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/storage/Converter.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/storage/Converter.java @@ -21,8 +21,6 @@ import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; -import java.io.Closeable; -import java.io.IOException; import java.util.Map; /** @@ -32,12 +30,8 @@ *

          Kafka Connect may discover implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism. * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.storage.Converter}. - * - *

          Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the converter to register metrics. - * The following tags are automatically added to all metrics registered: connector set to connector name, - * task set to the task id and converter set to either key or value. */ -public interface Converter extends Closeable { +public interface Converter { /** * Configure this class. @@ -104,9 +98,4 @@ default SchemaAndValue toConnectData(String topic, Headers headers, byte[] value default ConfigDef config() { return new ConfigDef(); } - - @Override - default void close() throws IOException { - // no op - } } diff --git a/connect/api/src/main/java/org/apache/kafka/connect/storage/ConverterType.java b/connect/api/src/main/java/org/apache/kafka/connect/storage/ConverterType.java index 2da2bd8d07e16..ecd7b2e755ae4 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/storage/ConverterType.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/storage/ConverterType.java @@ -16,10 +16,10 @@ */ package org.apache.kafka.connect.storage; -import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.Locale; import java.util.Map; -import java.util.stream.Collectors; /** * The type of {@link Converter} and {@link HeaderConverter}. @@ -29,8 +29,16 @@ public enum ConverterType { VALUE, HEADER; - private static final Map NAME_TO_TYPE = Arrays.stream(ConverterType.values()) - .collect(Collectors.toUnmodifiableMap(ConverterType::getName, t -> t)); + private static final Map NAME_TO_TYPE; + + static { + ConverterType[] types = ConverterType.values(); + Map nameToType = new HashMap<>(types.length); + for (ConverterType type : types) { + nameToType.put(type.name, type); + } + NAME_TO_TYPE = Collections.unmodifiableMap(nameToType); + } /** * Find the ConverterType with the given name, using a case-insensitive match. diff --git a/connect/api/src/main/java/org/apache/kafka/connect/storage/HeaderConverter.java b/connect/api/src/main/java/org/apache/kafka/connect/storage/HeaderConverter.java index 810905c095906..de89b1678cbd9 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/storage/HeaderConverter.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/storage/HeaderConverter.java @@ -31,10 +31,6 @@ *

          Kafka Connect may discover implementations of this interface using the Java {@link java.util.ServiceLoader} mechanism. * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.storage.HeaderConverter}. - * - *

          Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the converter to register metrics. - * The following tags are automatically added to all metrics registered: connector set to connector name, - * task set to the task id and converter set to header. */ public interface HeaderConverter extends Configurable, Closeable { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java b/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java index 1902061089cc3..d5e42ebe8bc8c 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/transforms/Transformation.java @@ -30,10 +30,6 @@ * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.transforms.Transformation}. * - *

          Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the transformation to register metrics. - * The following tags are automatically added to all metrics registered: connector set to connector name, - * task set to the task id and transformation set to the transformation alias. - * * @param The type of record (must be an implementation of {@link ConnectRecord}) */ public interface Transformation> extends Configurable, Closeable { diff --git a/connect/api/src/main/java/org/apache/kafka/connect/transforms/predicates/Predicate.java b/connect/api/src/main/java/org/apache/kafka/connect/transforms/predicates/Predicate.java index c2942e8a6307e..1cd7abb75f591 100644 --- a/connect/api/src/main/java/org/apache/kafka/connect/transforms/predicates/Predicate.java +++ b/connect/api/src/main/java/org/apache/kafka/connect/transforms/predicates/Predicate.java @@ -31,10 +31,6 @@ * To support this, implementations of this interface should also contain a service provider configuration file in * {@code META-INF/services/org.apache.kafka.connect.transforms.predicates.Predicate}. * - *

          Implement {@link org.apache.kafka.common.metrics.Monitorable} to enable the predicate to register metrics. - * The following tags are automatically added to all metrics registered: connector set to connector name, - * task set to the task id and predicate set to the predicate alias. - * * @param The type of record. */ public interface Predicate> extends Configurable, AutoCloseable { diff --git a/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java b/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java index 8b9c16bfe6421..efa56aca4692d 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/connector/ConnectorReconfigurationTest.java @@ -21,6 +21,7 @@ import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -32,15 +33,15 @@ public class ConnectorReconfigurationTest { @Test public void testDefaultReconfigure() { TestConnector conn = new TestConnector(false); - conn.reconfigure(Map.of()); - assertEquals(0, conn.stopOrder); - assertEquals(1, conn.configureOrder); + conn.reconfigure(Collections.emptyMap()); + assertEquals(conn.stopOrder, 0); + assertEquals(conn.configureOrder, 1); } @Test public void testReconfigureStopException() { TestConnector conn = new TestConnector(true); - assertThrows(ConnectException.class, () -> conn.reconfigure(Map.of())); + assertThrows(ConnectException.class, () -> conn.reconfigure(Collections.emptyMap())); } private static class TestConnector extends Connector { diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java index 95cc36edb1bd1..b4e9f81ce8163 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/ConnectSchemaTest.java @@ -27,7 +27,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -87,17 +86,17 @@ public void testValidateValueMatchingType() { ConnectSchema.validateValue(Schema.STRING_SCHEMA, "a string"); ConnectSchema.validateValue(Schema.BYTES_SCHEMA, "a byte array".getBytes()); ConnectSchema.validateValue(Schema.BYTES_SCHEMA, ByteBuffer.wrap("a byte array".getBytes())); - ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), List.of(1, 2, 3)); + ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, 3)); ConnectSchema.validateValue( SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.STRING_SCHEMA).build(), - Map.of(1, "value") + Collections.singletonMap(1, "value") ); // Struct tests the basic struct layout + complex field types + nested structs Struct structValue = new Struct(STRUCT_SCHEMA) .put("first", 1) .put("second", "foo") - .put("array", List.of(1, 2, 3)) - .put("map", Map.of(1, "value")) + .put("array", Arrays.asList(1, 2, 3)) + .put("map", Collections.singletonMap(1, "value")) .put("nested", new Struct(FLAT_STRUCT_SCHEMA).put("field", 12)); ConnectSchema.validateValue(STRUCT_SCHEMA, structValue); } @@ -172,7 +171,7 @@ public void testValidateValueMismatchBytes() { @Test public void testValidateValueMismatchArray() { assertThrows(DataException.class, - () -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), List.of("a", "b", "c"))); + () -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList("a", "b", "c"))); } @Test @@ -180,19 +179,19 @@ public void testValidateValueMismatchArraySomeMatch() { // Even if some match the right type, this should fail if any mismatch. In this case, type erasure loses // the fact that the list is actually List, but we couldn't tell if only checking the first element assertThrows(DataException.class, - () -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), List.of(1, 2, "c"))); + () -> ConnectSchema.validateValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, "c"))); } @Test public void testValidateValueMismatchMapKey() { assertThrows(DataException.class, - () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Map.of("wrong key type", "value"))); + () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap("wrong key type", "value"))); } @Test public void testValidateValueMismatchMapValue() { assertThrows(DataException.class, - () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Map.of(1, 2))); + () -> ConnectSchema.validateValue(MAP_INT_STRING_SCHEMA, Collections.singletonMap(1, 2))); } @Test @@ -260,7 +259,7 @@ public void testPrimitiveEquality() { ConnectSchema differentName = new ConnectSchema(Schema.Type.INT8, false, null, "otherName", 2, "doc"); ConnectSchema differentVersion = new ConnectSchema(Schema.Type.INT8, false, null, "name", 4, "doc"); ConnectSchema differentDoc = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "other doc"); - ConnectSchema differentParameters = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "doc", Map.of("param", "value"), null, null, null); + ConnectSchema differentParameters = new ConnectSchema(Schema.Type.INT8, false, null, "name", 2, "doc", Collections.singletonMap("param", "value"), null, null, null); assertEquals(s1, s2); assertNotEquals(s1, differentType); @@ -312,13 +311,13 @@ public void testStructEquality() { // Same as testArrayEquality, but checks differences in fields. Only does a simple check, relying on tests of // Field's equals() method to validate all variations in the list of fields will be checked ConnectSchema s1 = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null, - List.of(new Field("field", 0, SchemaBuilder.int8().build()), + Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()), new Field("field2", 1, SchemaBuilder.int16().build())), null, null); ConnectSchema s2 = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null, - List.of(new Field("field", 0, SchemaBuilder.int8().build()), + Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()), new Field("field2", 1, SchemaBuilder.int16().build())), null, null); ConnectSchema differentField = new ConnectSchema(Schema.Type.STRUCT, false, null, null, null, null, null, - List.of(new Field("field", 0, SchemaBuilder.int8().build()), + Arrays.asList(new Field("field", 0, SchemaBuilder.int8().build()), new Field("different field name", 1, SchemaBuilder.int16().build())), null, null); assertEquals(s1, s2); @@ -366,44 +365,44 @@ public void testValidateList() { // Optional element schema Schema optionalStrings = SchemaBuilder.array(Schema.OPTIONAL_STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, optionalStrings, List.of()); - ConnectSchema.validateValue(fieldName, optionalStrings, List.of("hello")); + ConnectSchema.validateValue(fieldName, optionalStrings, Collections.emptyList()); + ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonList("hello")); ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonList(null)); - ConnectSchema.validateValue(fieldName, optionalStrings, List.of("hello", "world")); + ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList("hello", "world")); ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList("hello", null)); ConnectSchema.validateValue(fieldName, optionalStrings, Arrays.asList(null, "world")); - assertInvalidValueForSchema(fieldName, optionalStrings, List.of(true), + assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonList(true), "Invalid Java object for schema with type STRING: class java.lang.Boolean for element of array field: \"field\""); // Required element schema Schema requiredStrings = SchemaBuilder.array(Schema.STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, requiredStrings, List.of()); - ConnectSchema.validateValue(fieldName, requiredStrings, List.of("hello")); + ConnectSchema.validateValue(fieldName, requiredStrings, Collections.emptyList()); + ConnectSchema.validateValue(fieldName, requiredStrings, Collections.singletonList("hello")); assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonList(null), "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); - ConnectSchema.validateValue(fieldName, requiredStrings, List.of("hello", "world")); + ConnectSchema.validateValue(fieldName, requiredStrings, Arrays.asList("hello", "world")); assertInvalidValueForSchema(fieldName, requiredStrings, Arrays.asList("hello", null), "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); assertInvalidValueForSchema(fieldName, requiredStrings, Arrays.asList(null, "world"), "Invalid value: null used for required element of array field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, optionalStrings, List.of(true), + assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonList(true), "Invalid Java object for schema with type STRING: class java.lang.Boolean for element of array field: \"field\""); // Null element schema Schema nullElements = SchemaBuilder.type(Schema.Type.ARRAY); - assertInvalidValueForSchema(fieldName, nullElements, List.of(), + assertInvalidValueForSchema(fieldName, nullElements, Collections.emptyList(), "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, List.of("hello"), + assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList("hello"), "No schema defined for element of array field: \"field\""); assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList(null), "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, List.of("hello", "world"), + assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList("hello", "world"), "No schema defined for element of array field: \"field\""); assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList("hello", null), "No schema defined for element of array field: \"field\""); assertInvalidValueForSchema(fieldName, nullElements, Arrays.asList(null, "world"), "No schema defined for element of array field: \"field\""); - assertInvalidValueForSchema(fieldName, nullElements, List.of(true), + assertInvalidValueForSchema(fieldName, nullElements, Collections.singletonList(true), "No schema defined for element of array field: \"field\""); } @@ -413,36 +412,36 @@ public void testValidateMap() { // Optional element schema Schema optionalStrings = SchemaBuilder.map(Schema.OPTIONAL_STRING_SCHEMA, Schema.OPTIONAL_STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, optionalStrings, Map.of()); - ConnectSchema.validateValue(fieldName, optionalStrings, Map.of("key", "value")); + ConnectSchema.validateValue(fieldName, optionalStrings, Collections.emptyMap()); + ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap("key", "value")); ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap("key", null)); ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap(null, "value")); ConnectSchema.validateValue(fieldName, optionalStrings, Collections.singletonMap(null, null)); - assertInvalidValueForSchema(fieldName, optionalStrings, Map.of("key", true), + assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonMap("key", true), "Invalid Java object for schema with type STRING: class java.lang.Boolean for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, optionalStrings, Map.of(true, "value"), + assertInvalidValueForSchema(fieldName, optionalStrings, Collections.singletonMap(true, "value"), "Invalid Java object for schema with type STRING: class java.lang.Boolean for key of map field: \"field\""); // Required element schema Schema requiredStrings = SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA); - ConnectSchema.validateValue(fieldName, requiredStrings, Map.of()); - ConnectSchema.validateValue(fieldName, requiredStrings, Map.of("key", "value")); + ConnectSchema.validateValue(fieldName, requiredStrings, Collections.emptyMap()); + ConnectSchema.validateValue(fieldName, requiredStrings, Collections.singletonMap("key", "value")); assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap("key", null), "Invalid value: null used for required value of map field: \"field\", schema type: STRING"); assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(null, "value"), "Invalid value: null used for required key of map field: \"field\", schema type: STRING"); assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(null, null), "Invalid value: null used for required key of map field: \"field\", schema type: STRING"); - assertInvalidValueForSchema(fieldName, requiredStrings, Map.of("key", true), + assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap("key", true), "Invalid Java object for schema with type STRING: class java.lang.Boolean for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, requiredStrings, Map.of(true, "value"), + assertInvalidValueForSchema(fieldName, requiredStrings, Collections.singletonMap(true, "value"), "Invalid Java object for schema with type STRING: class java.lang.Boolean for key of map field: \"field\""); // Null key schema Schema nullKeys = SchemaBuilder.type(Schema.Type.MAP); - assertInvalidValueForSchema(fieldName, nullKeys, Map.of(), + assertInvalidValueForSchema(fieldName, nullKeys, Collections.emptyMap(), "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Map.of("key", "value"), + assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", "value"), "No schema defined for key of map field: \"field\""); assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", null), "No schema defined for key of map field: \"field\""); @@ -450,16 +449,16 @@ public void testValidateMap() { "No schema defined for key of map field: \"field\""); assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap(null, null), "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Map.of("key", true), + assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap("key", true), "No schema defined for key of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullKeys, Map.of(true, "value"), + assertInvalidValueForSchema(fieldName, nullKeys, Collections.singletonMap(true, "value"), "No schema defined for key of map field: \"field\""); // Null value schema Schema nullValues = SchemaBuilder.mapWithNullValues(Schema.OPTIONAL_STRING_SCHEMA); - assertInvalidValueForSchema(fieldName, nullValues, Map.of(), + assertInvalidValueForSchema(fieldName, nullValues, Collections.emptyMap(), "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Map.of("key", "value"), + assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", "value"), "No schema defined for value of map field: \"field\""); assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", null), "No schema defined for value of map field: \"field\""); @@ -467,9 +466,9 @@ public void testValidateMap() { "No schema defined for value of map field: \"field\""); assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap(null, null), "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Map.of("key", true), + assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap("key", true), "No schema defined for value of map field: \"field\""); - assertInvalidValueForSchema(fieldName, nullValues, Map.of(true, "value"), + assertInvalidValueForSchema(fieldName, nullValues, Collections.singletonMap(true, "value"), "No schema defined for value of map field: \"field\""); } } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/DecimalTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/DecimalTest.java index efb4a75ed296b..9592fb918e759 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/DecimalTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/DecimalTest.java @@ -20,7 +20,7 @@ import java.math.BigDecimal; import java.math.BigInteger; -import java.util.Map; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -36,7 +36,7 @@ public class DecimalTest { public void testBuilder() { Schema plain = Decimal.builder(2).build(); assertEquals(Decimal.LOGICAL_NAME, plain.name()); - assertEquals(Map.of(Decimal.SCALE_FIELD, "2"), plain.parameters()); + assertEquals(Collections.singletonMap(Decimal.SCALE_FIELD, "2"), plain.parameters()); assertEquals(1, (Object) plain.version()); } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaBuilderTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaBuilderTest.java index 23a96f92c15ae..c789541ae5377 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaBuilderTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaBuilderTest.java @@ -21,6 +21,8 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -232,14 +234,14 @@ public void testNonStructCantHaveFields() { public void testArrayBuilder() { Schema schema = SchemaBuilder.array(Schema.INT8_SCHEMA).build(); assertTypeAndDefault(schema, Schema.Type.ARRAY, false, null); - assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); + assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); assertNoMetadata(schema); // Default value - List defArray = List.of((byte) 1, (byte) 2); + List defArray = Arrays.asList((byte) 1, (byte) 2); schema = SchemaBuilder.array(Schema.INT8_SCHEMA).defaultValue(defArray).build(); assertTypeAndDefault(schema, Schema.Type.ARRAY, false, defArray); - assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); + assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); assertNoMetadata(schema); } @@ -247,7 +249,7 @@ public void testArrayBuilder() { public void testArrayBuilderInvalidDefault() { // Array, but wrong embedded type assertThrows(SchemaBuilderException.class, - () -> SchemaBuilder.array(Schema.INT8_SCHEMA).defaultValue(List.of("string")).build()); + () -> SchemaBuilder.array(Schema.INT8_SCHEMA).defaultValue(Collections.singletonList("string")).build()); } @Test @@ -255,30 +257,30 @@ public void testMapBuilder() { // SchemaBuilder should also pass the check Schema schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA); assertTypeAndDefault(schema, Schema.Type.MAP, false, null); - assertEquals(Schema.INT8_SCHEMA, schema.keySchema()); - assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); + assertEquals(schema.keySchema(), Schema.INT8_SCHEMA); + assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); assertNoMetadata(schema); schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA).build(); assertTypeAndDefault(schema, Schema.Type.MAP, false, null); - assertEquals(Schema.INT8_SCHEMA, schema.keySchema()); - assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); + assertEquals(schema.keySchema(), Schema.INT8_SCHEMA); + assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); assertNoMetadata(schema); // Default value - Map defMap = Map.of((byte) 5, (byte) 10); + Map defMap = Collections.singletonMap((byte) 5, (byte) 10); schema = SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA) .defaultValue(defMap).build(); assertTypeAndDefault(schema, Schema.Type.MAP, false, defMap); - assertEquals(Schema.INT8_SCHEMA, schema.keySchema()); - assertEquals(Schema.INT8_SCHEMA, schema.valueSchema()); + assertEquals(schema.keySchema(), Schema.INT8_SCHEMA); + assertEquals(schema.valueSchema(), Schema.INT8_SCHEMA); assertNoMetadata(schema); } @Test public void testMapBuilderInvalidDefault() { // Map, but wrong embedded type - Map defMap = Map.of((byte) 5, "foo"); + Map defMap = Collections.singletonMap((byte) 5, "foo"); assertThrows(SchemaBuilderException.class, () -> SchemaBuilder.map(Schema.INT8_SCHEMA, Schema.INT8_SCHEMA) .defaultValue(defMap).build()); } diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java index 0f438c0e0ff2b..4ec35d369adb9 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/SchemaProjectorTest.java @@ -24,6 +24,8 @@ import java.math.BigDecimal; import java.math.BigInteger; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -76,12 +78,12 @@ public void testNumericTypeProjection() { Object[] values = {(byte) 127, (short) 255, 32767, 327890L, 1.2F, 1.2345}; Map> expectedProjected = new HashMap<>(); - expectedProjected.put(values[0], List.of((byte) 127, (short) 127, 127, 127L, 127.F, 127.)); - expectedProjected.put(values[1], List.of((short) 255, 255, 255L, 255.F, 255.)); - expectedProjected.put(values[2], List.of(32767, 32767L, 32767.F, 32767.)); - expectedProjected.put(values[3], List.of(327890L, 327890.F, 327890.)); - expectedProjected.put(values[4], List.of(1.2F, 1.2)); - expectedProjected.put(values[5], List.of(1.2345)); + expectedProjected.put(values[0], Arrays.asList((byte) 127, (short) 127, 127, 127L, 127.F, 127.)); + expectedProjected.put(values[1], Arrays.asList((short) 255, 255, 255L, 255.F, 255.)); + expectedProjected.put(values[2], Arrays.asList(32767, 32767L, 32767.F, 32767.)); + expectedProjected.put(values[3], Arrays.asList(327890L, 327890.F, 327890.)); + expectedProjected.put(values[4], Arrays.asList(1.2F, 1.2)); + expectedProjected.put(values[5], Collections.singletonList(1.2345)); Object promoted; for (int i = 0; i < promotableSchemas.length; ++i) { @@ -296,16 +298,16 @@ public void testNestedSchemaProjection() { Struct sourceNestedStruct = new Struct(sourceNestedSchema); sourceNestedStruct.put("first", 1); sourceNestedStruct.put("second", "abc"); - sourceNestedStruct.put("array", List.of(1, 2)); - sourceNestedStruct.put("map", Map.of(5, "def")); + sourceNestedStruct.put("array", Arrays.asList(1, 2)); + sourceNestedStruct.put("map", Collections.singletonMap(5, "def")); sourceNestedStruct.put("nested", sourceFlatStruct); Struct targetNestedStruct = (Struct) SchemaProjector.project(sourceNestedSchema, sourceNestedStruct, targetNestedSchema); assertEquals(1, targetNestedStruct.get("first")); assertEquals("abc", targetNestedStruct.get("second")); - assertEquals(List.of(1, 2), targetNestedStruct.get("array")); - assertEquals(Map.of(5, "def"), targetNestedStruct.get("map")); + assertEquals(Arrays.asList(1, 2), targetNestedStruct.get("array")); + assertEquals(Collections.singletonMap(5, "def"), targetNestedStruct.get("map")); Struct projectedStruct = (Struct) targetNestedStruct.get("nested"); assertEquals(113, projectedStruct.get("field")); @@ -358,22 +360,22 @@ public void testLogicalTypeProjection() { public void testArrayProjection() { Schema source = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); - Object projected = SchemaProjector.project(source, List.of(1, 2, 3), source); - assertEquals(List.of(1, 2, 3), projected); + Object projected = SchemaProjector.project(source, Arrays.asList(1, 2, 3), source); + assertEquals(Arrays.asList(1, 2, 3), projected); Schema optionalSource = SchemaBuilder.array(Schema.INT32_SCHEMA).optional().build(); - Schema target = SchemaBuilder.array(Schema.INT32_SCHEMA).defaultValue(List.of(1, 2, 3)).build(); - projected = SchemaProjector.project(optionalSource, List.of(4, 5), target); - assertEquals(List.of(4, 5), projected); + Schema target = SchemaBuilder.array(Schema.INT32_SCHEMA).defaultValue(Arrays.asList(1, 2, 3)).build(); + projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), target); + assertEquals(Arrays.asList(4, 5), projected); projected = SchemaProjector.project(optionalSource, null, target); - assertEquals(List.of(1, 2, 3), projected); + assertEquals(Arrays.asList(1, 2, 3), projected); - Schema promotedTarget = SchemaBuilder.array(Schema.INT64_SCHEMA).defaultValue(List.of(1L, 2L, 3L)).build(); - projected = SchemaProjector.project(optionalSource, List.of(4, 5), promotedTarget); - List expectedProjected = List.of(4L, 5L); + Schema promotedTarget = SchemaBuilder.array(Schema.INT64_SCHEMA).defaultValue(Arrays.asList(1L, 2L, 3L)).build(); + projected = SchemaProjector.project(optionalSource, Arrays.asList(4, 5), promotedTarget); + List expectedProjected = Arrays.asList(4L, 5L); assertEquals(expectedProjected, projected); projected = SchemaProjector.project(optionalSource, null, promotedTarget); - assertEquals(List.of(1L, 2L, 3L), projected); + assertEquals(Arrays.asList(1L, 2L, 3L), projected); Schema noDefaultValueTarget = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); assertThrows(SchemaProjectorException.class, () -> SchemaProjector.project(optionalSource, null, @@ -389,18 +391,18 @@ public void testArrayProjection() { public void testMapProjection() { Schema source = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).optional().build(); - Schema target = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).defaultValue(Map.of(1, 2)).build(); - Object projected = SchemaProjector.project(source, Map.of(3, 4), target); - assertEquals(Map.of(3, 4), projected); + Schema target = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).defaultValue(Collections.singletonMap(1, 2)).build(); + Object projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), target); + assertEquals(Collections.singletonMap(3, 4), projected); projected = SchemaProjector.project(source, null, target); - assertEquals(Map.of(1, 2), projected); + assertEquals(Collections.singletonMap(1, 2), projected); Schema promotedTarget = SchemaBuilder.map(Schema.INT64_SCHEMA, Schema.FLOAT32_SCHEMA).defaultValue( - Map.of(3L, 4.5F)).build(); - projected = SchemaProjector.project(source, Map.of(3, 4), promotedTarget); - assertEquals(Map.of(3L, 4.F), projected); + Collections.singletonMap(3L, 4.5F)).build(); + projected = SchemaProjector.project(source, Collections.singletonMap(3, 4), promotedTarget); + assertEquals(Collections.singletonMap(3L, 4.F), projected); projected = SchemaProjector.project(source, null, promotedTarget); - assertEquals(Map.of(3L, 4.5F), projected); + assertEquals(Collections.singletonMap(3L, 4.5F), projected); Schema noDefaultValueTarget = SchemaBuilder.map(Schema.INT32_SCHEMA, Schema.INT32_SCHEMA).build(); assertThrows(SchemaProjectorException.class, @@ -422,7 +424,7 @@ public void testMaybeCompatible() { () -> SchemaProjector.project(source, 12, target), "Source name and target name mismatch."); - Schema targetWithParameters = SchemaBuilder.int32().parameters(Map.of("key", "value")); + Schema targetWithParameters = SchemaBuilder.int32().parameters(Collections.singletonMap("key", "value")); assertThrows(SchemaProjectorException.class, () -> SchemaProjector.project(source, 34, targetWithParameters), "Source parameters and target parameters mismatch."); diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java index bfdec2fcb9b65..6dee26ca83ac5 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/StructTest.java @@ -21,6 +21,8 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -91,8 +93,8 @@ public void testFlatStruct() { @Test public void testComplexStruct() { - List array = List.of((byte) 1, (byte) 2); - Map map = Map.of(1, "string"); + List array = Arrays.asList((byte) 1, (byte) 2); + Map map = Collections.singletonMap(1, "string"); Struct struct = new Struct(NESTED_SCHEMA) .put("array", array) .put("map", map) @@ -122,13 +124,13 @@ public void testInvalidFieldType() { @Test public void testInvalidArrayFieldElements() { assertThrows(DataException.class, - () -> new Struct(NESTED_SCHEMA).put("array", List.of("should fail since elements should be int8s"))); + () -> new Struct(NESTED_SCHEMA).put("array", Collections.singletonList("should fail since elements should be int8s"))); } @Test public void testInvalidMapKeyElements() { assertThrows(DataException.class, - () -> new Struct(NESTED_SCHEMA).put("map", Map.of("should fail because keys should be int8s", (byte) 12))); + () -> new Struct(NESTED_SCHEMA).put("map", Collections.singletonMap("should fail because keys should be int8s", (byte) 12))); } @Test @@ -217,20 +219,20 @@ public void testEquals() { assertEquals(struct1, struct2); assertNotEquals(struct1, struct3); - List array = List.of((byte) 1, (byte) 2); - Map map = Map.of(1, "string"); + List array = Arrays.asList((byte) 1, (byte) 2); + Map map = Collections.singletonMap(1, "string"); struct1 = new Struct(NESTED_SCHEMA) .put("array", array) .put("map", map) .put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12)); - List array2 = List.of((byte) 1, (byte) 2); - Map map2 = Map.of(1, "string"); + List array2 = Arrays.asList((byte) 1, (byte) 2); + Map map2 = Collections.singletonMap(1, "string"); struct2 = new Struct(NESTED_SCHEMA) .put("array", array2) .put("map", map2) .put("nested", new Struct(NESTED_CHILD_SCHEMA).put("int8", (byte) 12)); - List array3 = List.of((byte) 1, (byte) 2, (byte) 3); - Map map3 = Map.of(2, "string"); + List array3 = Arrays.asList((byte) 1, (byte) 2, (byte) 3); + Map map3 = Collections.singletonMap(2, "string"); struct3 = new Struct(NESTED_SCHEMA) .put("array", array3) .put("map", map3) diff --git a/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java b/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java index d100be29b4db9..e552e6f4de0c6 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/data/ValuesTest.java @@ -29,14 +29,8 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; import java.text.SimpleDateFormat; -import java.time.Instant; -import java.time.LocalDateTime; -import java.time.LocalTime; -import java.time.ZoneId; -import java.time.ZoneOffset; -import java.time.format.DateTimeFormatter; -import java.time.temporal.ChronoUnit; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; @@ -182,7 +176,7 @@ public void shouldParseBooleanLiteralsEmbeddedInArray() { SchemaAndValue schemaAndValue = Values.parseString("[true, false]"); assertEquals(Type.ARRAY, schemaAndValue.schema().type()); assertEquals(Type.BOOLEAN, schemaAndValue.schema().valueSchema().type()); - assertEquals(List.of(true, false), schemaAndValue.value()); + assertEquals(Arrays.asList(true, false), schemaAndValue.value()); } @Test @@ -215,14 +209,14 @@ public void shouldNotParseAsArrayWithoutCommas() { public void shouldParseEmptyMap() { SchemaAndValue schemaAndValue = Values.parseString("{}"); assertEquals(Type.MAP, schemaAndValue.schema().type()); - assertEquals(Map.of(), schemaAndValue.value()); + assertEquals(Collections.emptyMap(), schemaAndValue.value()); } @Test public void shouldParseEmptyArray() { SchemaAndValue schemaAndValue = Values.parseString("[]"); assertEquals(Type.ARRAY, schemaAndValue.schema().type()); - assertEquals(List.of(), schemaAndValue.value()); + assertEquals(Collections.emptyList(), schemaAndValue.value()); } @Test @@ -466,16 +460,16 @@ public void shouldConvertIntegralTypesToDouble() { @Test public void shouldParseStringListWithMultipleElementTypes() { assertParseStringArrayWithNoSchema( - List.of((byte) 1, (byte) 2, (short) 300, "four"), + Arrays.asList((byte) 1, (byte) 2, (short) 300, "four"), "[1, 2, 300, \"four\"]"); assertParseStringArrayWithNoSchema( - List.of((byte) 2, (short) 300, "four", (byte) 1), + Arrays.asList((byte) 2, (short) 300, "four", (byte) 1), "[2, 300, \"four\", 1]"); assertParseStringArrayWithNoSchema( - List.of((short) 300, "four", (byte) 1, (byte) 2), + Arrays.asList((short) 300, "four", (byte) 1, (byte) 2), "[300, \"four\", 1, 2]"); assertParseStringArrayWithNoSchema( - List.of("four", (byte) 1, (byte) 2, (short) 300), + Arrays.asList("four", (byte) 1, (byte) 2, (short) 300), "[\"four\", 1, 2, 300]"); } @@ -646,7 +640,7 @@ public void shouldParseDateStringAsDateInArray() throws Exception { assertEquals(Type.INT32, elementSchema.type()); assertEquals(Date.LOGICAL_NAME, elementSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_DATE_FORMAT_PATTERN).parse(dateStr); - assertEquals(List.of(expected), result.value()); + assertEquals(Collections.singletonList(expected), result.value()); } @Test @@ -659,7 +653,7 @@ public void shouldParseTimeStringAsTimeInArray() throws Exception { assertEquals(Type.INT32, elementSchema.type()); assertEquals(Time.LOGICAL_NAME, elementSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr); - assertEquals(List.of(expected), result.value()); + assertEquals(Collections.singletonList(expected), result.value()); } @Test @@ -672,7 +666,7 @@ public void shouldParseTimestampStringAsTimestampInArray() throws Exception { assertEquals(Type.INT64, elementSchema.type()); assertEquals(Timestamp.LOGICAL_NAME, elementSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr); - assertEquals(List.of(expected), result.value()); + assertEquals(Collections.singletonList(expected), result.value()); } @Test @@ -689,7 +683,7 @@ public void shouldParseMultipleTimestampStringAsTimestampInArray() throws Except java.util.Date expected1 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr1); java.util.Date expected2 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr2); java.util.Date expected3 = new SimpleDateFormat(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN).parse(tsStr3); - assertEquals(List.of(expected1, expected2, expected3), result.value()); + assertEquals(Arrays.asList(expected1, expected2, expected3), result.value()); } @Test @@ -705,7 +699,7 @@ public void shouldParseQuotedTimeStringAsTimeInMap() throws Exception { assertEquals(Type.INT32, valueSchema.type()); assertEquals(Time.LOGICAL_NAME, valueSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr); - assertEquals(Map.of(keyStr, expected), result.value()); + assertEquals(Collections.singletonMap(keyStr, expected), result.value()); } @Test @@ -721,7 +715,7 @@ public void shouldParseTimeStringAsTimeInMap() throws Exception { assertEquals(Type.INT32, valueSchema.type()); assertEquals(Time.LOGICAL_NAME, valueSchema.name()); java.util.Date expected = new SimpleDateFormat(Values.ISO_8601_TIME_FORMAT_PATTERN).parse(timeStr); - assertEquals(Map.of(keyStr, expected), result.value()); + assertEquals(Collections.singletonMap(keyStr, expected), result.value()); } @Test @@ -855,10 +849,7 @@ public void shouldParseStringsWithMultipleDelimiters() { @Test public void shouldConvertTimeValues() { - LocalDateTime localTime = LocalDateTime.now(); - LocalTime localTimeTruncated = localTime.toLocalTime().truncatedTo(ChronoUnit.MILLIS); - ZoneOffset zoneOffset = ZoneId.systemDefault().getRules().getOffset(localTime); - java.util.Date current = new java.util.Date(localTime.toEpochSecond(zoneOffset) * 1000); + java.util.Date current = new java.util.Date(); long currentMillis = current.getTime() % MILLIS_PER_DAY; // java.util.Date - just copy @@ -866,28 +857,23 @@ public void shouldConvertTimeValues() { assertEquals(current, t1); // java.util.Date as a Timestamp - discard the date and keep just day's milliseconds - java.util.Date t2 = Values.convertToTime(Timestamp.SCHEMA, current); - assertEquals(new java.util.Date(currentMillis), t2); + t1 = Values.convertToTime(Timestamp.SCHEMA, current); + assertEquals(new java.util.Date(currentMillis), t1); - // ISO8601 strings - accept a string matching pattern "HH:mm:ss.SSS'Z'" - java.util.Date t3 = Values.convertToTime(Time.SCHEMA, localTime.format(DateTimeFormatter.ofPattern(Values.ISO_8601_TIME_FORMAT_PATTERN))); - LocalTime time3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(t3.getTime()), ZoneId.systemDefault()).toLocalTime(); - assertEquals(localTimeTruncated, time3); + // ISO8601 strings - currently broken because tokenization breaks at colon // Millis as string - java.util.Date t4 = Values.convertToTime(Time.SCHEMA, Long.toString(currentMillis)); - assertEquals(currentMillis, t4.getTime()); + java.util.Date t3 = Values.convertToTime(Time.SCHEMA, Long.toString(currentMillis)); + assertEquals(currentMillis, t3.getTime()); // Millis as long - java.util.Date t5 = Values.convertToTime(Time.SCHEMA, currentMillis); - assertEquals(currentMillis, t5.getTime()); + java.util.Date t4 = Values.convertToTime(Time.SCHEMA, currentMillis); + assertEquals(currentMillis, t4.getTime()); } @Test public void shouldConvertDateValues() { - LocalDateTime localTime = LocalDateTime.now(); - ZoneOffset zoneOffset = ZoneId.systemDefault().getRules().getOffset(localTime); - java.util.Date current = new java.util.Date(localTime.toEpochSecond(zoneOffset) * 1000); + java.util.Date current = new java.util.Date(); long currentMillis = current.getTime() % MILLIS_PER_DAY; long days = current.getTime() / MILLIS_PER_DAY; @@ -897,30 +883,23 @@ public void shouldConvertDateValues() { // java.util.Date as a Timestamp - discard the day's milliseconds and keep the date java.util.Date currentDate = new java.util.Date(current.getTime() - currentMillis); - java.util.Date d2 = Values.convertToDate(Timestamp.SCHEMA, currentDate); - assertEquals(currentDate, d2); + d1 = Values.convertToDate(Timestamp.SCHEMA, currentDate); + assertEquals(currentDate, d1); - // ISO8601 strings - accept a string matching pattern "yyyy-MM-dd" - LocalDateTime localTimeTruncated = localTime.truncatedTo(ChronoUnit.DAYS); - java.util.Date d3 = Values.convertToDate(Date.SCHEMA, localTime.format(DateTimeFormatter.ISO_LOCAL_DATE)); - LocalDateTime date3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(d3.getTime()), ZoneId.systemDefault()); - assertEquals(localTimeTruncated, date3); + // ISO8601 strings - currently broken because tokenization breaks at colon // Days as string - java.util.Date d4 = Values.convertToDate(Date.SCHEMA, Long.toString(days)); - assertEquals(currentDate, d4); + java.util.Date d3 = Values.convertToDate(Date.SCHEMA, Long.toString(days)); + assertEquals(currentDate, d3); // Days as long - java.util.Date d5 = Values.convertToDate(Date.SCHEMA, days); - assertEquals(currentDate, d5); + java.util.Date d4 = Values.convertToDate(Date.SCHEMA, days); + assertEquals(currentDate, d4); } @Test public void shouldConvertTimestampValues() { - LocalDateTime localTime = LocalDateTime.now(); - LocalDateTime localTimeTruncated = localTime.truncatedTo(ChronoUnit.MILLIS); - ZoneOffset zoneOffset = ZoneId.systemDefault().getRules().getOffset(localTime); - java.util.Date current = new java.util.Date(localTime.toEpochSecond(zoneOffset) * 1000); + java.util.Date current = new java.util.Date(); long currentMillis = current.getTime() % MILLIS_PER_DAY; // java.util.Date - just copy @@ -933,21 +912,18 @@ public void shouldConvertTimestampValues() { assertEquals(currentDate, ts1); // java.util.Date as a Time - discard the date and keep the day's milliseconds - java.util.Date ts2 = Values.convertToTimestamp(Time.SCHEMA, currentMillis); - assertEquals(new java.util.Date(currentMillis), ts2); + ts1 = Values.convertToTimestamp(Time.SCHEMA, currentMillis); + assertEquals(new java.util.Date(currentMillis), ts1); - // ISO8601 strings - accept a string matching pattern "yyyy-MM-dd'T'HH:mm:ss.SSS'Z'" - java.util.Date ts3 = Values.convertToTime(Time.SCHEMA, localTime.format(DateTimeFormatter.ofPattern(Values.ISO_8601_TIMESTAMP_FORMAT_PATTERN))); - LocalDateTime time3 = LocalDateTime.ofInstant(Instant.ofEpochMilli(ts3.getTime()), ZoneId.systemDefault()); - assertEquals(localTimeTruncated, time3); + // ISO8601 strings - currently broken because tokenization breaks at colon // Millis as string - java.util.Date ts4 = Values.convertToTimestamp(Timestamp.SCHEMA, Long.toString(current.getTime())); - assertEquals(current, ts4); + java.util.Date ts3 = Values.convertToTimestamp(Timestamp.SCHEMA, Long.toString(current.getTime())); + assertEquals(current, ts3); // Millis as long - java.util.Date ts5 = Values.convertToTimestamp(Timestamp.SCHEMA, current.getTime()); - assertEquals(current, ts5); + java.util.Date ts4 = Values.convertToTimestamp(Timestamp.SCHEMA, current.getTime()); + assertEquals(current, ts4); } @Test @@ -989,25 +965,25 @@ public void shouldInferStructSchema() { @Test public void shouldInferNoSchemaForEmptyList() { - Schema listSchema = Values.inferSchema(List.of()); + Schema listSchema = Values.inferSchema(Collections.emptyList()); assertNull(listSchema); } @Test public void shouldInferNoSchemaForListContainingObject() { - Schema listSchema = Values.inferSchema(List.of(new Object())); + Schema listSchema = Values.inferSchema(Collections.singletonList(new Object())); assertNull(listSchema); } @Test public void shouldInferNoSchemaForEmptyMap() { - Schema listSchema = Values.inferSchema(Map.of()); + Schema listSchema = Values.inferSchema(Collections.emptyMap()); assertNull(listSchema); } @Test public void shouldInferNoSchemaForMapContainingObject() { - Schema listSchema = Values.inferSchema(Map.of(new Object(), new Object())); + Schema listSchema = Values.inferSchema(Collections.singletonMap(new Object(), new Object())); assertNull(listSchema); } @@ -1017,7 +993,7 @@ public void shouldInferNoSchemaForMapContainingObject() { */ @Test public void shouldNotConvertArrayValuesToDecimal() { - List decimals = List.of("\"1.0\"", BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), + List decimals = Arrays.asList("\"1.0\"", BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE), (byte) 1, (byte) 1); List expected = new ArrayList<>(decimals); // most values are directly reproduced with the same type expected.set(0, "1.0"); // The quotes are parsed away, but the value remains a string @@ -1030,7 +1006,7 @@ public void shouldNotConvertArrayValuesToDecimal() { @Test public void shouldParseArrayOfOnlyDecimals() { - List decimals = List.of(BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), + List decimals = Arrays.asList(BigDecimal.valueOf(Long.MAX_VALUE).add(BigDecimal.ONE), BigDecimal.valueOf(Long.MIN_VALUE).subtract(BigDecimal.ONE)); SchemaAndValue schemaAndValue = Values.parseString(decimals.toString()); Schema schema = schemaAndValue.schema(); diff --git a/connect/api/src/test/java/org/apache/kafka/connect/header/ConnectHeadersTest.java b/connect/api/src/test/java/org/apache/kafka/connect/header/ConnectHeadersTest.java index 52aeff4a1a2cc..44073f7722927 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/header/ConnectHeadersTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/header/ConnectHeadersTest.java @@ -37,11 +37,10 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Calendar; +import java.util.Collections; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.Iterator; -import java.util.List; -import java.util.Map; import java.util.TimeZone; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -381,9 +380,9 @@ public void shouldValidateBuildInTypes() { assertSchemaMatches(Schema.FLOAT64_SCHEMA, 1.0d); assertSchemaMatches(Schema.STRING_SCHEMA, "value"); assertSchemaMatches(SchemaBuilder.array(Schema.STRING_SCHEMA), new ArrayList()); - assertSchemaMatches(SchemaBuilder.array(Schema.STRING_SCHEMA), List.of("value")); + assertSchemaMatches(SchemaBuilder.array(Schema.STRING_SCHEMA), Collections.singletonList("value")); assertSchemaMatches(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), new HashMap()); - assertSchemaMatches(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), Map.of("a", 0)); + assertSchemaMatches(SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.INT32_SCHEMA), Collections.singletonMap("a", 0)); Schema emptyStructSchema = SchemaBuilder.struct(); assertSchemaMatches(emptyStructSchema, new Struct(emptyStructSchema)); Schema structSchema = SchemaBuilder.struct().field("foo", Schema.OPTIONAL_BOOLEAN_SCHEMA).field("bar", Schema.STRING_SCHEMA) diff --git a/connect/api/src/test/java/org/apache/kafka/connect/sink/SinkConnectorTest.java b/connect/api/src/test/java/org/apache/kafka/connect/sink/SinkConnectorTest.java index 176ebdf2cafc6..dc89ff59f2937 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/sink/SinkConnectorTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/sink/SinkConnectorTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.connect.sink; import org.apache.kafka.common.config.ConfigDef; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.connector.ConnectorContext; import org.apache.kafka.connect.connector.ConnectorTest; import org.apache.kafka.connect.connector.Task; @@ -54,12 +53,6 @@ public void raiseError(Exception e) { // Unexpected in these tests throw new UnsupportedOperationException(); } - - @Override - public PluginMetrics pluginMetrics() { - // Unexpected in these tests - throw new UnsupportedOperationException(); - } } protected static class TestSinkConnector extends SinkConnector implements ConnectorTest.AssertableConnector { diff --git a/connect/api/src/test/java/org/apache/kafka/connect/source/SourceConnectorTest.java b/connect/api/src/test/java/org/apache/kafka/connect/source/SourceConnectorTest.java index 913c5e2019d7e..e1a6c54ebfd77 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/source/SourceConnectorTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/source/SourceConnectorTest.java @@ -17,7 +17,6 @@ package org.apache.kafka.connect.source; import org.apache.kafka.common.config.ConfigDef; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.connector.ConnectorContext; import org.apache.kafka.connect.connector.ConnectorTest; import org.apache.kafka.connect.connector.Task; @@ -56,12 +55,6 @@ public void raiseError(Exception e) { throw new UnsupportedOperationException(); } - @Override - public PluginMetrics pluginMetrics() { - // Unexpected in these tests - throw new UnsupportedOperationException(); - } - @Override public OffsetStorageReader offsetStorageReader() { return null; diff --git a/connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java b/connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java index 32716da897567..90bd4f897df28 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/source/SourceRecordTest.java @@ -25,6 +25,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -37,8 +38,8 @@ public class SourceRecordTest { - private static final Map SOURCE_PARTITION = Map.of("src", "abc"); - private static final Map SOURCE_OFFSET = Map.of("offset", "1"); + private static final Map SOURCE_PARTITION = Collections.singletonMap("src", "abc"); + private static final Map SOURCE_OFFSET = Collections.singletonMap("offset", "1"); private static final String TOPIC_NAME = "myTopic"; private static final Integer PARTITION_NUMBER = 0; private static final Long KAFKA_TIMESTAMP = 0L; diff --git a/connect/api/src/test/java/org/apache/kafka/connect/storage/StringConverterTest.java b/connect/api/src/test/java/org/apache/kafka/connect/storage/StringConverterTest.java index 119d0594a8fdc..463125e09404f 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/storage/StringConverterTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/storage/StringConverterTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; -import java.util.Map; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -58,7 +58,7 @@ public void testToBytesIgnoresSchema() { @Test public void testToBytesNonUtf8Encoding() { - converter.configure(Map.of("converter.encoding", StandardCharsets.UTF_16.name()), true); + converter.configure(Collections.singletonMap("converter.encoding", StandardCharsets.UTF_16.name()), true); assertArrayEquals(SAMPLE_STRING.getBytes(StandardCharsets.UTF_16), converter.fromConnectData(TOPIC, Schema.STRING_SCHEMA, SAMPLE_STRING)); } @@ -78,7 +78,7 @@ public void testBytesNullToString() { @Test public void testBytesToStringNonUtf8Encoding() { - converter.configure(Map.of("converter.encoding", StandardCharsets.UTF_16.name()), true); + converter.configure(Collections.singletonMap("converter.encoding", StandardCharsets.UTF_16.name()), true); SchemaAndValue data = converter.toConnectData(TOPIC, SAMPLE_STRING.getBytes(StandardCharsets.UTF_16)); assertEquals(Schema.OPTIONAL_STRING_SCHEMA, data.schema()); assertEquals(SAMPLE_STRING, data.value()); diff --git a/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java b/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java index 0a72a0a181c86..1972ff7a89d58 100644 --- a/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java +++ b/connect/api/src/test/java/org/apache/kafka/connect/util/ConnectorUtilsTest.java @@ -18,6 +18,8 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collections; import java.util.List; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -25,37 +27,37 @@ public class ConnectorUtilsTest { - private static final List FIVE_ELEMENTS = List.of(1, 2, 3, 4, 5); + private static final List FIVE_ELEMENTS = Arrays.asList(1, 2, 3, 4, 5); @Test public void testGroupPartitions() { List> grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 1); - assertEquals(List.of(FIVE_ELEMENTS), grouped); + assertEquals(Collections.singletonList(FIVE_ELEMENTS), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 2); - assertEquals(List.of(List.of(1, 2, 3), List.of(4, 5)), grouped); + assertEquals(Arrays.asList(Arrays.asList(1, 2, 3), Arrays.asList(4, 5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 3); - assertEquals(List.of(List.of(1, 2), - List.of(3, 4), - List.of(5)), grouped); + assertEquals(Arrays.asList(Arrays.asList(1, 2), + Arrays.asList(3, 4), + Collections.singletonList(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 5); - assertEquals(List.of(List.of(1), - List.of(2), - List.of(3), - List.of(4), - List.of(5)), grouped); + assertEquals(Arrays.asList(Collections.singletonList(1), + Collections.singletonList(2), + Collections.singletonList(3), + Collections.singletonList(4), + Collections.singletonList(5)), grouped); grouped = ConnectorUtils.groupPartitions(FIVE_ELEMENTS, 7); - assertEquals(List.of(List.of(1), - List.of(2), - List.of(3), - List.of(4), - List.of(5), - List.of(), - List.of()), grouped); + assertEquals(Arrays.asList(Collections.singletonList(1), + Collections.singletonList(2), + Collections.singletonList(3), + Collections.singletonList(4), + Collections.singletonList(5), + Collections.emptyList(), + Collections.emptyList()), grouped); } @Test diff --git a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java index 5c1b0ee454084..d404bdc7dc19a 100644 --- a/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java +++ b/connect/basic-auth-extension/src/main/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilter.java @@ -26,7 +26,9 @@ import java.nio.charset.StandardCharsets; import java.security.Principal; import java.util.ArrayList; +import java.util.Arrays; import java.util.Base64; +import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.function.Predicate; @@ -52,10 +54,10 @@ public class JaasBasicAuthFilter implements ContainerRequestFilter { private static final Logger log = LoggerFactory.getLogger(JaasBasicAuthFilter.class); - private static final Set INTERNAL_REQUEST_MATCHERS = Set.of( + private static final Set INTERNAL_REQUEST_MATCHERS = new HashSet<>(Arrays.asList( new RequestMatcher(HttpMethod.POST, "/?connectors/([^/]+)/tasks/?"), new RequestMatcher(HttpMethod.PUT, "/?connectors/[^/]+/fence/?") - ); + )); private static final String CONNECT_LOGIN_MODULE = "KafkaConnect"; static final String AUTHORIZATION = "Authorization"; diff --git a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java index d8439309e1d22..81f3a7327d576 100644 --- a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java +++ b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/BasicAuthSecurityRestExtensionTest.java @@ -26,7 +26,7 @@ import org.mockito.ArgumentCaptor; import java.io.IOException; -import java.util.Map; +import java.util.Collections; import java.util.concurrent.atomic.AtomicBoolean; import java.util.function.Supplier; @@ -107,7 +107,7 @@ public void testBadJaasConfigExtensionSetup() { BasicAuthSecurityRestExtension extension = new BasicAuthSecurityRestExtension(configuration); - Exception thrownException = assertThrows(Exception.class, () -> extension.configure(Map.of())); + Exception thrownException = assertThrows(Exception.class, () -> extension.configure(Collections.emptyMap())); assertEquals(jaasConfigurationException, thrownException); thrownException = assertThrows(Exception.class, () -> extension.register(mock(ConnectRestExtensionContext.class))); diff --git a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java index d168e6466db2d..bcd6e0ab31995 100644 --- a/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java +++ b/connect/basic-auth-extension/src/test/java/org/apache/kafka/connect/rest/basic/auth/extension/JaasBasicAuthFilterTest.java @@ -32,6 +32,7 @@ import java.nio.file.Files; import java.util.ArrayList; import java.util.Base64; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -260,8 +261,8 @@ private File setupPropertyLoginFile(boolean includeUsers) throws IOException { private JaasBasicAuthFilter setupJaasFilter(String name, String credentialFilePath) { TestJaasConfig configuration = new TestJaasConfig(); Map moduleOptions = credentialFilePath != null - ? Map.of("file", credentialFilePath) - : Map.of(); + ? Collections.singletonMap("file", credentialFilePath) + : Collections.emptyMap(); configuration.addEntry(name, LOGIN_MODULE, moduleOptions); return new JaasBasicAuthFilter(configuration); } diff --git a/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSinkTask.java b/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSinkTask.java index d2b1489fca903..0b5f112ce3083 100644 --- a/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSinkTask.java +++ b/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSinkTask.java @@ -69,7 +69,7 @@ public void start(Map props) { outputStream = new PrintStream( Files.newOutputStream(Paths.get(filename), StandardOpenOption.CREATE, StandardOpenOption.APPEND), false, - StandardCharsets.UTF_8); + StandardCharsets.UTF_8.name()); } catch (IOException e) { throw new ConnectException("Couldn't find or create file '" + filename + "' for FileStreamSinkTask", e); } diff --git a/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSourceTask.java b/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSourceTask.java index 8a76a480a7ae6..79478c57d1fa4 100644 --- a/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSourceTask.java +++ b/connect/file/src/main/java/org/apache/kafka/connect/file/FileStreamSourceTask.java @@ -90,7 +90,7 @@ public List poll() throws InterruptedException { if (stream == null) { try { stream = Files.newInputStream(Paths.get(filename)); - Map offset = context.offsetStorageReader().offset(Map.of(FILENAME_FIELD, filename)); + Map offset = context.offsetStorageReader().offset(Collections.singletonMap(FILENAME_FIELD, filename)); if (offset != null) { Object lastRecordedOffset = offset.get(POSITION_FIELD); if (lastRecordedOffset != null && !(lastRecordedOffset instanceof Long)) diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java index cda7a771c51d2..dde20105e3731 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSinkTaskTest.java @@ -32,8 +32,9 @@ import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -57,19 +58,19 @@ public void setup() { @Test public void testPutFlush() { - Map offsets = new HashMap<>(); + HashMap offsets = new HashMap<>(); final String newLine = System.lineSeparator(); // We do not call task.start() since it would override the output stream - task.put(List.of( + task.put(Collections.singletonList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); task.flush(offsets); assertEquals("line1" + newLine, os.toString()); - task.put(List.of( + task.put(Arrays.asList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line2", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line3", 1) )); @@ -87,7 +88,7 @@ public void testStart() throws IOException { task.start(props); HashMap offsets = new HashMap<>(); - task.put(List.of( + task.put(Collections.singletonList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line0", 1) )); offsets.put(new TopicPartition("topic1", 0), new OffsetAndMetadata(1L)); @@ -98,7 +99,7 @@ public void testStart() throws IOException { int i = 0; try (BufferedReader reader = Files.newBufferedReader(Paths.get(outputFile))) { lines[i++] = reader.readLine(); - task.put(List.of( + task.put(Arrays.asList( new SinkRecord("topic1", 0, null, null, Schema.STRING_SCHEMA, "line1", 2), new SinkRecord("topic2", 0, null, null, Schema.STRING_SCHEMA, "line2", 1) )); diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceConnectorTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceConnectorTest.java index 94c046182f199..e0c14a1e6cb19 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceConnectorTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceConnectorTest.java @@ -158,48 +158,48 @@ public void testInvalidBatchSize() { @Test public void testAlterOffsetsStdin() { sourceProperties.remove(FileStreamSourceConnector.FILE_CONFIG); - Map, Map> offsets = Map.of( - Map.of(FILENAME_FIELD, FILENAME), - Map.of(POSITION_FIELD, 0L) + Map, Map> offsets = Collections.singletonMap( + Collections.singletonMap(FILENAME_FIELD, FILENAME), + Collections.singletonMap(POSITION_FIELD, 0L) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, offsets)); } @Test public void testAlterOffsetsIncorrectPartitionKey() { - assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, Map.of( - Map.of("other_partition_key", FILENAME), - Map.of(POSITION_FIELD, 0L) + assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, Collections.singletonMap( + Collections.singletonMap("other_partition_key", FILENAME), + Collections.singletonMap(POSITION_FIELD, 0L) ))); // null partitions are invalid assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, Collections.singletonMap( null, - Map.of(POSITION_FIELD, 0L) + Collections.singletonMap(POSITION_FIELD, 0L) ))); } @Test public void testAlterOffsetsMultiplePartitions() { Map, Map> offsets = new HashMap<>(); - offsets.put(Map.of(FILENAME_FIELD, FILENAME), Map.of(POSITION_FIELD, 0L)); + offsets.put(Collections.singletonMap(FILENAME_FIELD, FILENAME), Collections.singletonMap(POSITION_FIELD, 0L)); offsets.put(Collections.singletonMap(FILENAME_FIELD, "/someotherfilename"), null); assertTrue(connector.alterOffsets(sourceProperties, offsets)); } @Test public void testAlterOffsetsIncorrectOffsetKey() { - Map, Map> offsets = Map.of( - Map.of(FILENAME_FIELD, FILENAME), - Map.of("other_offset_key", 0L) + Map, Map> offsets = Collections.singletonMap( + Collections.singletonMap(FILENAME_FIELD, FILENAME), + Collections.singletonMap("other_offset_key", 0L) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(sourceProperties, offsets)); } @Test public void testAlterOffsetsOffsetPositionValues() { - Function alterOffsets = offset -> connector.alterOffsets(sourceProperties, Map.of( - Map.of(FILENAME_FIELD, FILENAME), + Function alterOffsets = offset -> connector.alterOffsets(sourceProperties, Collections.singletonMap( + Collections.singletonMap(FILENAME_FIELD, FILENAME), Collections.singletonMap(POSITION_FIELD, offset) )); @@ -217,9 +217,9 @@ public void testAlterOffsetsOffsetPositionValues() { @Test public void testSuccessfulAlterOffsets() { - Map, Map> offsets = Map.of( - Map.of(FILENAME_FIELD, FILENAME), - Map.of(POSITION_FIELD, 0L) + Map, Map> offsets = Collections.singletonMap( + Collections.singletonMap(FILENAME_FIELD, FILENAME), + Collections.singletonMap(POSITION_FIELD, 0L) ); // Expect no exception to be thrown when a valid offsets map is passed. An empty offsets map is treated as valid @@ -237,9 +237,9 @@ public void testAlterOffsetsTombstones() { ); assertTrue(alterOffsets.apply(null)); - assertTrue(alterOffsets.apply(Map.of())); - assertTrue(alterOffsets.apply(Map.of(FILENAME_FIELD, FILENAME))); - assertTrue(alterOffsets.apply(Map.of(FILENAME_FIELD, "/someotherfilename"))); - assertTrue(alterOffsets.apply(Map.of("garbage_partition_key", "garbage_partition_value"))); + assertTrue(alterOffsets.apply(Collections.emptyMap())); + assertTrue(alterOffsets.apply(Collections.singletonMap(FILENAME_FIELD, FILENAME))); + assertTrue(alterOffsets.apply(Collections.singletonMap(FILENAME_FIELD, "/someotherfilename"))); + assertTrue(alterOffsets.apply(Collections.singletonMap("garbage_partition_key", "garbage_partition_value"))); } } diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceTaskTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceTaskTest.java index c8118faf1589e..e0e77a8433c72 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceTaskTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/FileStreamSourceTaskTest.java @@ -30,6 +30,7 @@ import java.io.OutputStream; import java.nio.file.Files; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -86,8 +87,8 @@ public void testNormalLifecycle() throws InterruptedException, IOException { assertEquals(1, records.size()); assertEquals(TOPIC, records.get(0).topic()); assertEquals("partial line finished", records.get(0).value()); - assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); - assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 22L), records.get(0).sourceOffset()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 22L), records.get(0).sourceOffset()); assertNull(task.poll()); // Different line endings, and make sure the final \r doesn't result in a line until we can @@ -97,25 +98,25 @@ public void testNormalLifecycle() throws InterruptedException, IOException { records = task.poll(); assertEquals(4, records.size()); assertEquals("line1", records.get(0).value()); - assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); - assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 28L), records.get(0).sourceOffset()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 28L), records.get(0).sourceOffset()); assertEquals("line2", records.get(1).value()); - assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(1).sourcePartition()); - assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 35L), records.get(1).sourceOffset()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(1).sourcePartition()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 35L), records.get(1).sourceOffset()); assertEquals("line3", records.get(2).value()); - assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(2).sourcePartition()); - assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 41L), records.get(2).sourceOffset()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(2).sourcePartition()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 41L), records.get(2).sourceOffset()); assertEquals("line4", records.get(3).value()); - assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(3).sourcePartition()); - assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 47L), records.get(3).sourceOffset()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(3).sourcePartition()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 47L), records.get(3).sourceOffset()); os.write("subsequent text".getBytes()); os.flush(); records = task.poll(); assertEquals(1, records.size()); assertEquals("", records.get(0).value()); - assertEquals(Map.of(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); - assertEquals(Map.of(FileStreamSourceTask.POSITION_FIELD, 48L), records.get(0).sourceOffset()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.FILENAME_FIELD, tempFile.getAbsolutePath()), records.get(0).sourcePartition()); + assertEquals(Collections.singletonMap(FileStreamSourceTask.POSITION_FIELD, 48L), records.get(0).sourceOffset()); os.close(); task.stop(); diff --git a/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java index 51096d32107ba..577b07bb5bdb4 100644 --- a/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java +++ b/connect/file/src/test/java/org/apache/kafka/connect/file/integration/FileStreamSourceConnectorIntegrationTest.java @@ -30,6 +30,7 @@ import java.io.PrintStream; import java.nio.file.Files; import java.nio.file.StandardOpenOption; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.Map; @@ -129,8 +130,8 @@ public void testAlterOffsets() throws Exception { // Alter the offsets to make the connector re-process the last line in the file connect.alterSourceConnectorOffset( CONNECTOR_NAME, - Map.of(FILENAME_FIELD, sourceFile.getAbsolutePath()), - Map.of(POSITION_FIELD, 28L) + Collections.singletonMap(FILENAME_FIELD, sourceFile.getAbsolutePath()), + Collections.singletonMap(POSITION_FIELD, 28L) ); connect.resumeConnector(CONNECTOR_NAME); diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java index dac2ce5674150..a7abef315c569 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverter.java @@ -53,6 +53,7 @@ import java.util.Collection; import java.util.EnumMap; import java.util.HashMap; +import java.util.Iterator; import java.util.Map; import java.util.Set; @@ -102,7 +103,9 @@ public class JsonConverter implements Converter, HeaderConverter, Versioned { if (schema == null || keySchema.type() == Schema.Type.STRING) { if (!value.isObject()) throw new DataException("Maps with string fields should be encoded as JSON objects, but found " + value.getNodeType()); - for (Map.Entry entry : value.properties()) { + Iterator> fieldIt = value.fields(); + while (fieldIt.hasNext()) { + Map.Entry entry = fieldIt.next(); result.put(entry.getKey(), convertToConnect(valueSchema, entry.getValue(), config)); } } else { @@ -146,13 +149,18 @@ public class JsonConverter implements Converter, HeaderConverter, Versioned { LOGICAL_CONVERTERS.put(Decimal.LOGICAL_NAME, new LogicalTypeConverter() { @Override public JsonNode toJson(final Schema schema, final Object value, final JsonConverterConfig config) { - if (!(value instanceof BigDecimal decimal)) + if (!(value instanceof BigDecimal)) throw new DataException("Invalid type for Decimal, expected BigDecimal but was " + value.getClass()); - return switch (config.decimalFormat()) { - case NUMERIC -> JSON_NODE_FACTORY.numberNode(decimal); - case BASE64 -> JSON_NODE_FACTORY.binaryNode(Decimal.fromLogical(schema, decimal)); - }; + final BigDecimal decimal = (BigDecimal) value; + switch (config.decimalFormat()) { + case NUMERIC: + return JSON_NODE_FACTORY.numberNode(decimal); + case BASE64: + return JSON_NODE_FACTORY.binaryNode(Decimal.fromLogical(schema, decimal)); + default: + throw new DataException("Unexpected " + JsonConverterConfig.DECIMAL_FORMAT_CONFIG + ": " + config.decimalFormat()); + } } @Override @@ -222,7 +230,6 @@ public Object toConnect(final Schema schema, final JsonNode value) { private JsonConverterConfig config; private Cache fromConnectSchemaCache; private Cache toConnectSchemaCache; - private Schema schema = null; // if a schema is provided in config, this schema will be used for all messages for sink connector private final JsonSerializer serializer; private final JsonDeserializer deserializer; @@ -285,16 +292,6 @@ public void configure(Map configs) { fromConnectSchemaCache = new SynchronizedCache<>(new LRUCache<>(config.schemaCacheSize())); toConnectSchemaCache = new SynchronizedCache<>(new LRUCache<>(config.schemaCacheSize())); - - try { - final byte[] schemaContent = config.schemaContent(); - if (schemaContent != null) { - final JsonNode schemaNode = deserializer.deserialize("", schemaContent); - this.schema = asConnectSchema(schemaNode); - } - } catch (SerializationException e) { - throw new DataException("Failed to parse schema in converter config due to serialization error: ", e); - } } @Override @@ -349,16 +346,13 @@ public SchemaAndValue toConnectData(String topic, byte[] value) { throw new DataException("Converting byte[] to Kafka Connect data failed due to serialization error: ", e); } - if (config.schemasEnabled()) { - if (schema != null) { - return new SchemaAndValue(schema, convertToConnect(schema, jsonValue, config)); - } else if (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME)) { - throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + + if (config.schemasEnabled() && (!jsonValue.isObject() || jsonValue.size() != 2 || !jsonValue.has(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME) || !jsonValue.has(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME))) + throw new DataException("JsonConverter with schemas.enable requires \"schema\" and \"payload\" fields and may not contain additional fields." + " If you are trying to deserialize plain JSON data, set schemas.enable=false in your converter configuration."); - } - } else { - // The deserialized data should either be an envelope object containing the schema and the payload or the schema - // was stripped during serialization and we need to fill in an all-encompassing schema. + + // The deserialized data should either be an envelope object containing the schema and the payload or the schema + // was stripped during serialization and we need to fill in an all-encompassing schema. + if (!config.schemasEnabled()) { ObjectNode envelope = JSON_NODE_FACTORY.objectNode(); envelope.set(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME, null); envelope.set(JsonSchema.ENVELOPE_PAYLOAD_FIELD_NAME, jsonValue); @@ -547,7 +541,9 @@ public Schema asConnectSchema(JsonNode jsonSchema) { JsonNode schemaParamsNode = jsonSchema.get(JsonSchema.SCHEMA_PARAMETERS_FIELD_NAME); if (schemaParamsNode != null && schemaParamsNode.isObject()) { - for (Map.Entry entry : schemaParamsNode.properties()) { + Iterator> paramsIt = schemaParamsNode.fields(); + while (paramsIt.hasNext()) { + Map.Entry entry = paramsIt.next(); JsonNode paramValue = entry.getValue(); if (!paramValue.isTextual()) throw new DataException("Schema parameters must have string values."); diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverterConfig.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverterConfig.java index 17d48c7f14c3d..f02d54ac26307 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverterConfig.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonConverterConfig.java @@ -22,7 +22,6 @@ import org.apache.kafka.common.config.ConfigDef.Width; import org.apache.kafka.connect.storage.ConverterConfig; -import java.nio.charset.StandardCharsets; import java.util.Locale; import java.util.Map; @@ -36,12 +35,6 @@ public final class JsonConverterConfig extends ConverterConfig { private static final String SCHEMAS_ENABLE_DOC = "Include schemas within each of the serialized values and keys."; private static final String SCHEMAS_ENABLE_DISPLAY = "Enable Schemas"; - public static final String SCHEMA_CONTENT_CONFIG = "schema.content"; - public static final String SCHEMA_CONTENT_DEFAULT = null; - private static final String SCHEMA_CONTENT_DOC = "When set, this is used as the schema for all messages, and the schemas within each of the message will be ignored." - + "Otherwise, the schema will be included in the content of each message. This configuration applies only 'schemas.enable' is true, and it exclusively affects the sink connector."; - private static final String SCHEMA_CONTENT_DISPLAY = "Schema Content"; - public static final String SCHEMAS_CACHE_SIZE_CONFIG = "schemas.cache.size"; public static final int SCHEMAS_CACHE_SIZE_DEFAULT = 1000; private static final String SCHEMAS_CACHE_SIZE_DOC = "The maximum number of schemas that can be cached in this converter instance."; @@ -68,8 +61,6 @@ public final class JsonConverterConfig extends ConverterConfig { orderInGroup++, Width.MEDIUM, SCHEMAS_ENABLE_DISPLAY); CONFIG.define(SCHEMAS_CACHE_SIZE_CONFIG, Type.INT, SCHEMAS_CACHE_SIZE_DEFAULT, Importance.HIGH, SCHEMAS_CACHE_SIZE_DOC, group, orderInGroup++, Width.MEDIUM, SCHEMAS_CACHE_SIZE_DISPLAY); - CONFIG.define(SCHEMA_CONTENT_CONFIG, Type.STRING, SCHEMA_CONTENT_DEFAULT, Importance.HIGH, SCHEMA_CONTENT_DOC, group, - orderInGroup++, Width.MEDIUM, SCHEMA_CONTENT_DISPLAY); group = "Serialization"; orderInGroup = 0; @@ -95,7 +86,6 @@ public static ConfigDef configDef() { private final int schemaCacheSize; private final DecimalFormat decimalFormat; private final boolean replaceNullWithDefault; - private final byte[] schemaContent; public JsonConverterConfig(Map props) { super(CONFIG, props); @@ -103,10 +93,6 @@ public JsonConverterConfig(Map props) { this.schemaCacheSize = getInt(SCHEMAS_CACHE_SIZE_CONFIG); this.decimalFormat = DecimalFormat.valueOf(getString(DECIMAL_FORMAT_CONFIG).toUpperCase(Locale.ROOT)); this.replaceNullWithDefault = getBoolean(REPLACE_NULL_WITH_DEFAULT_CONFIG); - String schemaContentStr = getString(SCHEMA_CONTENT_CONFIG); - this.schemaContent = (schemaContentStr == null || schemaContentStr.isEmpty()) - ? null - : schemaContentStr.getBytes(StandardCharsets.UTF_8); } /** @@ -144,15 +130,4 @@ public boolean replaceNullWithDefault() { return replaceNullWithDefault; } - /** - * If a default schema is provided in the converter config, this will be - * used for all messages. - * - * This is only relevant if schemas are enabled. - * - * @return Schema Contents, will return null if no value is provided - */ - public byte[] schemaContent() { - return schemaContent; - } } diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java index f88c1d838abc1..775768f37d31d 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonDeserializer.java @@ -26,6 +26,7 @@ import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.module.blackbird.BlackbirdModule; +import java.util.Collections; import java.util.Set; /** @@ -39,7 +40,7 @@ public class JsonDeserializer implements Deserializer { * Default constructor needed by Kafka */ public JsonDeserializer() { - this(Set.of(), new JsonNodeFactory(true), true); + this(Collections.emptySet(), new JsonNodeFactory(true), true); } /** diff --git a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java index 8f6adfaf1ff38..e40f530469af6 100644 --- a/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java +++ b/connect/json/src/main/java/org/apache/kafka/connect/json/JsonSerializer.java @@ -25,6 +25,7 @@ import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.module.blackbird.BlackbirdModule; +import java.util.Collections; import java.util.Set; /** @@ -38,7 +39,7 @@ public class JsonSerializer implements Serializer { * Default constructor needed by Kafka */ public JsonSerializer() { - this(Set.of(), new JsonNodeFactory(true), true); + this(Collections.emptySet(), new JsonNodeFactory(true), true); } /** diff --git a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterConfigTest.java b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterConfigTest.java index 6b2eabaab1e06..930fb3bb4b84e 100644 --- a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterConfigTest.java +++ b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterConfigTest.java @@ -35,7 +35,7 @@ public void shouldBeCaseInsensitiveForDecimalFormatConfig() { configValues.put(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, "NuMeRiC"); final JsonConverterConfig config = new JsonConverterConfig(configValues); - assertEquals(DecimalFormat.NUMERIC, config.decimalFormat()); + assertEquals(config.decimalFormat(), DecimalFormat.NUMERIC); } } \ No newline at end of file diff --git a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java index 200b33d1774e2..d79c8527b3c21 100644 --- a/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java +++ b/connect/json/src/test/java/org/apache/kafka/connect/json/JsonConverterTest.java @@ -36,8 +36,6 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; import java.io.File; import java.io.IOException; @@ -46,11 +44,12 @@ import java.net.URISyntaxException; import java.net.URL; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.Calendar; +import java.util.Collections; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -75,7 +74,7 @@ public class JsonConverterTest { @BeforeEach public void setUp() { - converter.configure(Map.of(), false); + converter.configure(Collections.emptyMap(), false); } // Schema metadata @@ -156,7 +155,7 @@ public void stringToConnect() { @Test public void arrayToConnect() { byte[] arrayJson = "{ \"schema\": { \"type\": \"array\", \"items\": { \"type\" : \"int32\" } }, \"payload\": [1, 2, 3] }".getBytes(); - assertEquals(new SchemaAndValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), List.of(1, 2, 3)), converter.toConnectData(TOPIC, arrayJson)); + assertEquals(new SchemaAndValue(SchemaBuilder.array(Schema.INT32_SCHEMA).build(), Arrays.asList(1, 2, 3)), converter.toConnectData(TOPIC, arrayJson)); } @Test @@ -212,7 +211,7 @@ public void nullToConnect() { @Test public void emptyBytesToConnect() { // This characterizes the messages with empty data when Json schemas is disabled - Map props = Map.of("schemas.enable", false); + Map props = Collections.singletonMap("schemas.enable", false); converter.configure(props, true); SchemaAndValue converted = converter.toConnectData(TOPIC, "".getBytes()); assertEquals(SchemaAndValue.NULL, converted); @@ -224,7 +223,7 @@ public void emptyBytesToConnect() { @Test public void schemalessWithEmptyFieldValueToConnect() { // This characterizes the messages with empty data when Json schemas is disabled - Map props = Map.of("schemas.enable", false); + Map props = Collections.singletonMap("schemas.enable", false); converter.configure(props, true); String input = "{ \"a\": \"\", \"b\": null}"; SchemaAndValue converted = converter.toConnectData(TOPIC, input.getBytes()); @@ -255,7 +254,7 @@ public void nullSchemaPrimitiveToConnect() { assertEquals(new SchemaAndValue(null, "a string"), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": [1, \"2\", 3] }".getBytes()); - assertEquals(new SchemaAndValue(null, List.of(1L, "2", 3L)), converted); + assertEquals(new SchemaAndValue(null, Arrays.asList(1L, "2", 3L)), converted); converted = converter.toConnectData(TOPIC, "{ \"schema\": null, \"payload\": { \"field1\": 1, \"field2\": 2} }".getBytes()); Map obj = new HashMap<>(); @@ -588,7 +587,7 @@ public void stringToJson() { @Test public void arrayToJson() { Schema int32Array = SchemaBuilder.array(Schema.INT32_SCHEMA).build(); - JsonNode converted = parse(converter.fromConnectData(TOPIC, int32Array, List.of(1, 2, 3))); + JsonNode converted = parse(converter.fromConnectData(TOPIC, int32Array, Arrays.asList(1, 2, 3))); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"array\", \"items\": { \"type\": \"int32\", \"optional\": false }, \"optional\": false }"), converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME)); @@ -627,8 +626,8 @@ public void mapToJsonNonStringKeys() { Set payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); - assertEquals(Set.of(JsonNodeFactory.instance.arrayNode().add(1).add(12), - JsonNodeFactory.instance.arrayNode().add(2).add(15)), + assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add(1).add(12), + JsonNodeFactory.instance.arrayNode().add(2).add(15))), payloadEntries ); } @@ -676,7 +675,7 @@ public void decimalToJson() throws IOException { @Test public void decimalToNumericJson() { - converter.configure(Map.of(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name()), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name()), false); JsonNode converted = parse(converter.fromConnectData(TOPIC, Decimal.schema(2), new BigDecimal(new BigInteger("156"), 2))); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"bytes\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"parameters\": { \"scale\": \"2\" } }"), @@ -687,7 +686,7 @@ public void decimalToNumericJson() { @Test public void decimalWithTrailingZerosToNumericJson() { - converter.configure(Map.of(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name()), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.DECIMAL_FORMAT_CONFIG, DecimalFormat.NUMERIC.name()), false); JsonNode converted = parse(converter.fromConnectData(TOPIC, Decimal.schema(4), new BigDecimal(new BigInteger("15600"), 4))); validateEnvelope(converted); assertEquals(parse("{ \"type\": \"bytes\", \"optional\": false, \"name\": \"org.apache.kafka.connect.data.Decimal\", \"version\": 1, \"parameters\": { \"scale\": \"4\" } }"), @@ -767,7 +766,7 @@ public void nullSchemaAndPrimitiveToJson() { public void nullSchemaAndArrayToJson() { // This still needs to do conversion of data, null schema means "anything goes". Make sure we mix and match // types to verify conversion still works. - JsonNode converted = parse(converter.fromConnectData(TOPIC, null, List.of(1, "string", true))); + JsonNode converted = parse(converter.fromConnectData(TOPIC, null, Arrays.asList(1, "string", true))); validateEnvelopeNullSchema(converted); assertTrue(converted.get(JsonSchema.ENVELOPE_SCHEMA_FIELD_NAME).isNull()); assertEquals(JsonNodeFactory.instance.arrayNode().add(1).add("string").add(true), @@ -806,9 +805,9 @@ public void nullSchemaAndMapNonStringKeysToJson() { Set payloadEntries = new HashSet<>(); for (JsonNode elem : payload) payloadEntries.add(elem); - assertEquals(Set.of(JsonNodeFactory.instance.arrayNode().add("string").add(12), + assertEquals(new HashSet<>(Arrays.asList(JsonNodeFactory.instance.arrayNode().add("string").add(12), JsonNodeFactory.instance.arrayNode().add(52).add("string"), - JsonNodeFactory.instance.arrayNode().add(false).add(true)), + JsonNodeFactory.instance.arrayNode().add(false).add(true))), payloadEntries ); } @@ -816,7 +815,7 @@ public void nullSchemaAndMapNonStringKeysToJson() { @Test public void nullSchemaAndNullValueToJson() { // This characterizes the production of tombstone messages when Json schemas is enabled - Map props = Map.of("schemas.enable", true); + Map props = Collections.singletonMap("schemas.enable", true); converter.configure(props, true); byte[] converted = converter.fromConnectData(TOPIC, null, null); assertNull(converted); @@ -825,7 +824,7 @@ public void nullSchemaAndNullValueToJson() { @Test public void nullValueToJson() { // This characterizes the production of tombstone messages when Json schemas is not enabled - Map props = Map.of("schemas.enable", false); + Map props = Collections.singletonMap("schemas.enable", false); converter.configure(props, true); byte[] converted = converter.fromConnectData(TOPIC, null, null); assertNull(converted); @@ -840,14 +839,14 @@ public void mismatchSchemaJson() { @Test public void noSchemaToConnect() { - Map props = Map.of("schemas.enable", false); + Map props = Collections.singletonMap("schemas.enable", false); converter.configure(props, true); assertEquals(new SchemaAndValue(null, true), converter.toConnectData(TOPIC, "true".getBytes())); } @Test public void noSchemaToJson() { - Map props = Map.of("schemas.enable", false); + Map props = Collections.singletonMap("schemas.enable", false); converter.configure(props, true); JsonNode converted = parse(converter.fromConnectData(TOPIC, null, true)); assertTrue(converted.isBoolean()); @@ -877,7 +876,7 @@ public void testJsonSchemaCacheSizeFromConfigFile() throws URISyntaxException, I File propFile = new File(url.toURI()); String workerPropsFile = propFile.getAbsolutePath(); Map workerProps = !workerPropsFile.isEmpty() ? - Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Map.of(); + Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap(); JsonConverter rc = new JsonConverter(); rc.configure(workerProps, false); @@ -902,7 +901,7 @@ public void stringHeaderToConnect() { @Test public void serializeNullToDefault() { - converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, schema, null)); JsonNode expected = parse("{\"schema\":{\"type\":\"string\",\"optional\":true,\"default\":\"default\"},\"payload\":\"default\"}"); @@ -911,7 +910,7 @@ public void serializeNullToDefault() { @Test public void serializeNullToNull() { - converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, schema, null)); JsonNode expected = parse("{\"schema\":{\"type\":\"string\",\"optional\":true,\"default\":\"default\"},\"payload\":null}"); @@ -920,7 +919,7 @@ public void serializeNullToNull() { @Test public void deserializeNullToDefault() { - converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); String value = "{\"schema\":{\"type\":\"string\",\"optional\":true,\"default\":\"default\"},\"payload\":null}"; SchemaAndValue sav = converter.toConnectData(TOPIC, null, value.getBytes()); assertEquals("default", sav.value()); @@ -928,7 +927,7 @@ public void deserializeNullToDefault() { @Test public void deserializeNullToNull() { - converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); String value = "{\"schema\":{\"type\":\"string\",\"optional\":true,\"default\":\"default\"},\"payload\":null}"; SchemaAndValue sav = converter.toConnectData(TOPIC, null, value.getBytes()); assertNull(sav.value()); @@ -936,7 +935,7 @@ public void deserializeNullToNull() { @Test public void serializeFieldNullToDefault() { - converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); Schema structSchema = SchemaBuilder.struct().field("field1", schema).build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, structSchema, new Struct(structSchema))); @@ -946,7 +945,7 @@ public void serializeFieldNullToDefault() { @Test public void serializeFieldNullToNull() { - converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); Schema structSchema = SchemaBuilder.struct().field("field1", schema).build(); JsonNode converted = parse(converter.fromConnectData(TOPIC, structSchema, new Struct(structSchema))); @@ -956,7 +955,7 @@ public void serializeFieldNullToNull() { @Test public void deserializeFieldNullToDefault() { - converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, true), false); String value = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"field\":\"field1\",\"type\":\"string\",\"optional\":true,\"default\":\"default\"}],\"optional\":false},\"payload\":{\"field1\":null}}"; SchemaAndValue sav = converter.toConnectData(TOPIC, null, value.getBytes()); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); @@ -966,7 +965,7 @@ public void deserializeFieldNullToDefault() { @Test public void deserializeFieldNullToNull() { - converter.configure(Map.of(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); + converter.configure(Collections.singletonMap(JsonConverterConfig.REPLACE_NULL_WITH_DEFAULT_CONFIG, false), false); String value = "{\"schema\":{\"type\":\"struct\",\"fields\":[{\"field\":\"field1\",\"type\":\"string\",\"optional\":true,\"default\":\"default\"}],\"optional\":false},\"payload\":{\"field1\":null}}"; SchemaAndValue sav = converter.toConnectData(TOPIC, null, value.getBytes()); Schema schema = SchemaBuilder.string().optional().defaultValue("default").build(); @@ -979,58 +978,6 @@ public void testVersionRetrievedFromAppInfoParser() { assertEquals(AppInfoParser.getVersion(), converter.version()); } - @Test - public void testSchemaContentIsNull() { - Map config = new HashMap<>(); - config.put(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, null); - converter.configure(config, false); - byte[] jsonBytes = "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes(); - SchemaAndValue result = converter.toConnectData(TOPIC, jsonBytes); - assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), result); - } - - @Test - public void testSchemaContentIsEmptyString() { - converter.configure(Map.of(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, ""), false); - assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes())); - } - - @Test - public void testSchemaContentValidSchema() { - converter.configure(Map.of(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, "{ \"type\": \"string\" }"), false); - assertEquals(new SchemaAndValue(Schema.STRING_SCHEMA, "foo-bar-baz"), converter.toConnectData(TOPIC, "\"foo-bar-baz\"".getBytes())); - } - - @Test - public void testSchemaContentInValidSchema() { - assertThrows( - DataException.class, - () -> converter.configure(Map.of(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, "{ \"string\" }"), false), - " Provided schema is invalid , please recheck the schema you have provided"); - } - - @Test - public void testSchemaContentLooksLikeSchema() { - converter.configure(Map.of(JsonConverterConfig.SCHEMA_CONTENT_CONFIG, "{ \"type\": \"struct\", \"fields\": [{\"field\": \"schema\", \"type\": \"struct\",\"fields\": [{\"field\": \"type\", \"type\": \"string\" }]}, {\"field\": \"payload\", \"type\": \"string\"}]}"), false); - SchemaAndValue connectData = converter.toConnectData(TOPIC, "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\" }".getBytes()); - assertEquals("foo-bar-baz", ((Struct) connectData.value()).getString("payload")); - } - - @ParameterizedTest - @ValueSource(strings = { - "{ }", - "{ \"wrong\": \"schema\" }", - "{ \"schema\": { \"type\": \"string\" } }", - "{ \"payload\": \"foo-bar-baz\" }", - "{ \"schema\": { \"type\": \"string\" }, \"payload\": \"foo-bar-baz\", \"extra\": \"field\" }", - }) - public void testNullSchemaContentWithWrongConnectDataValue(String value) { - converter.configure(Map.of(), false); - assertThrows( - DataException.class, - () -> converter.toConnectData(TOPIC, value.getBytes())); - } - private JsonNode parse(byte[] json) { try { return objectMapper.readTree(json); diff --git a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClient.java b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClient.java index 06dec5b25ba38..0b74b64ebbb4e 100644 --- a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClient.java +++ b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClient.java @@ -33,6 +33,7 @@ import java.time.Duration; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -164,7 +165,7 @@ public Map remoteConsumerOffsets(String consu // to use ReplicationPolicy to create the checkpoint topic here. String checkpointTopic = replicationPolicy.checkpointsTopic(remoteClusterAlias); List checkpointAssignment = - List.of(new TopicPartition(checkpointTopic, 0)); + Collections.singletonList(new TopicPartition(checkpointTopic, 0)); consumer.assign(checkpointAssignment); consumer.seekToBeginning(checkpointAssignment); while (System.currentTimeMillis() < deadline && !endOfStream(consumer, checkpointAssignment)) { diff --git a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClientConfig.java b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClientConfig.java index cb42f5fe654ba..53a4f9f5f051d 100644 --- a/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClientConfig.java +++ b/connect/mirror-client/src/main/java/org/apache/kafka/connect/mirror/MirrorClientConfig.java @@ -32,7 +32,6 @@ import java.util.Map; import static org.apache.kafka.common.config.ConfigDef.CaseInsensitiveValidString.in; -import static org.apache.kafka.common.config.ConfigDef.NO_DEFAULT_VALUE; /** * Configuration required for {@link MirrorClient} to talk to a given target cluster. @@ -106,7 +105,7 @@ public Map consumerConfig() { public Map producerConfig() { return clientConfig(PRODUCER_CLIENT_PREFIX); } - + private Map clientConfig(String prefix) { Map props = new HashMap<>(valuesWithPrefixOverride(prefix)); props.keySet().retainAll(CLIENT_CONFIG_DEF.names()); @@ -118,8 +117,7 @@ private Map clientConfig(String prefix) { static final ConfigDef CLIENT_CONFIG_DEF = new ConfigDef() .define(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, Type.LIST, - NO_DEFAULT_VALUE, - ConfigDef.ValidList.anyNonDuplicateValues(false, false), + null, Importance.HIGH, CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) // security support @@ -131,14 +129,13 @@ private Map clientConfig(String prefix) { CommonClientConfigs.SECURITY_PROTOCOL_DOC) .withClientSslSupport() .withClientSaslSupport(); - + static final ConfigDef CONFIG_DEF = new ConfigDef() .define(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, - Type.LIST, - NO_DEFAULT_VALUE, - ConfigDef.ValidList.anyNonDuplicateValues(false, false), + Type.STRING, + null, Importance.HIGH, - CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) + CommonClientConfigs.BOOTSTRAP_SERVERS_DOC) .define( REPLICATION_POLICY_CLASS, ConfigDef.Type.CLASS, diff --git a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java index 5e99f6cd74eeb..be728a0ebe98a 100644 --- a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java +++ b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/MirrorClientTest.java @@ -20,9 +20,10 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,7 +46,7 @@ private static class FakeMirrorClient extends MirrorClient { } FakeMirrorClient() { - this(List.of()); + this(Collections.emptyList()); } @Override @@ -93,25 +94,25 @@ public void countHopsForTopicTest() { @Test public void heartbeatTopicsTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "heartbeats", + MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats", "source1.heartbeats", "source2.source1.heartbeats", "source3.heartbeats")); Set heartbeatTopics = client.heartbeatTopics(); - assertEquals(heartbeatTopics, Set.of("heartbeats", "source1.heartbeats", - "source2.source1.heartbeats", "source3.heartbeats")); + assertEquals(heartbeatTopics, new HashSet<>(Arrays.asList("heartbeats", "source1.heartbeats", + "source2.source1.heartbeats", "source3.heartbeats"))); } @Test public void checkpointsTopicsTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "checkpoints.internal", + MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "checkpoints.internal", "source1.checkpoints.internal", "source2.source1.checkpoints.internal", "source3.checkpoints.internal")); Set checkpointTopics = client.checkpointTopics(); - assertEquals(Set.of("source1.checkpoints.internal", - "source2.source1.checkpoints.internal", "source3.checkpoints.internal"), checkpointTopics); + assertEquals(new HashSet<>(Arrays.asList("source1.checkpoints.internal", + "source2.source1.checkpoints.internal", "source3.checkpoints.internal")), checkpointTopics); } @Test public void replicationHopsTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "heartbeats", + MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats", "source1.heartbeats", "source1.source2.heartbeats", "source3.heartbeats")); assertEquals(1, client.replicationHops("source1")); assertEquals(2, client.replicationHops("source2")); @@ -121,7 +122,7 @@ public void replicationHopsTest() throws InterruptedException { @Test public void upstreamClustersTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "heartbeats", + MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "heartbeats", "source1.heartbeats", "source1.source2.heartbeats", "source3.source4.source5.heartbeats")); Set sources = client.upstreamClusters(); assertTrue(sources.contains("source1")); @@ -137,7 +138,7 @@ public void upstreamClustersTest() throws InterruptedException { @Test public void testIdentityReplicationUpstreamClusters() throws InterruptedException { // IdentityReplicationPolicy treats heartbeats as a special case, so these should work as usual. - MirrorClient client = new FakeMirrorClient(identityReplicationPolicy("source"), List.of("topic1", + MirrorClient client = new FakeMirrorClient(identityReplicationPolicy("source"), Arrays.asList("topic1", "topic2", "heartbeats", "source1.heartbeats", "source1.source2.heartbeats", "source3.source4.source5.heartbeats")); Set sources = client.upstreamClusters(); @@ -153,7 +154,7 @@ public void testIdentityReplicationUpstreamClusters() throws InterruptedExceptio @Test public void remoteTopicsTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "topic3", + MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "topic3", "source1.topic4", "source1.source2.topic5", "source3.source4.source5.topic6")); Set remoteTopics = client.remoteTopics(); assertFalse(remoteTopics.contains("topic1")); @@ -167,7 +168,7 @@ public void remoteTopicsTest() throws InterruptedException { @Test public void testIdentityReplicationRemoteTopics() throws InterruptedException { // IdentityReplicationPolicy should consider any topic to be remote. - MirrorClient client = new FakeMirrorClient(identityReplicationPolicy("source"), List.of( + MirrorClient client = new FakeMirrorClient(identityReplicationPolicy("source"), Arrays.asList( "topic1", "topic2", "topic3", "heartbeats", "backup.heartbeats")); Set remoteTopics = client.remoteTopics(); assertTrue(remoteTopics.contains("topic1")); @@ -180,10 +181,10 @@ public void testIdentityReplicationRemoteTopics() throws InterruptedException { @Test public void remoteTopicsSeparatorTest() throws InterruptedException { - MirrorClient client = new FakeMirrorClient(List.of("topic1", "topic2", "topic3", + MirrorClient client = new FakeMirrorClient(Arrays.asList("topic1", "topic2", "topic3", "source1__topic4", "source1__source2__topic5", "source3__source4__source5__topic6")); ((Configurable) client.replicationPolicy()).configure( - Map.of("replication.policy.separator", "__")); + Collections.singletonMap("replication.policy.separator", "__")); Set remoteTopics = client.remoteTopics(); assertFalse(remoteTopics.contains("topic1")); assertFalse(remoteTopics.contains("topic2")); @@ -196,7 +197,7 @@ public void remoteTopicsSeparatorTest() throws InterruptedException { @Test public void testIdentityReplicationTopicSource() { MirrorClient client = new FakeMirrorClient( - identityReplicationPolicy("primary"), List.of()); + identityReplicationPolicy("primary"), Collections.emptyList()); assertEquals("topic1", client.replicationPolicy() .formatRemoteTopic("primary", "topic1")); assertEquals("primary", client.replicationPolicy() @@ -210,7 +211,8 @@ public void testIdentityReplicationTopicSource() { private ReplicationPolicy identityReplicationPolicy(String source) { IdentityReplicationPolicy policy = new IdentityReplicationPolicy(); - policy.configure(Map.of(IdentityReplicationPolicy.SOURCE_CLUSTER_ALIAS_CONFIG, source)); + policy.configure(Collections.singletonMap( + IdentityReplicationPolicy.SOURCE_CLUSTER_ALIAS_CONFIG, source)); return policy; } } diff --git a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java index e59348b05a494..86aaf8ffd0e2b 100644 --- a/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java +++ b/connect/mirror-client/src/test/java/org/apache/kafka/connect/mirror/ReplicationPolicyTest.java @@ -20,6 +20,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -32,7 +33,7 @@ public class ReplicationPolicyTest { @BeforeEach public void setUp() { - DEFAULT_REPLICATION_POLICY.configure(Map.of()); + DEFAULT_REPLICATION_POLICY.configure(Collections.emptyMap()); } @Test diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/CheckpointStore.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/CheckpointStore.java index 39aea181b6674..2e88977d93cea 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/CheckpointStore.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/CheckpointStore.java @@ -30,6 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -96,7 +97,7 @@ public void update(String group, Map newCheckpoints) public Map get(String group) { Map result = checkpointsPerConsumerGroup.get(group); - return result == null ? null : Map.copyOf(result); + return result == null ? null : Collections.unmodifiableMap(result); } public Map> computeConvertedUpstreamOffset() { diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java index ded82a8571c7f..f88ed4e704661 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/DefaultGroupFilter.java @@ -65,13 +65,11 @@ static class GroupFilterConfig extends AbstractConfig { .define(GROUPS_INCLUDE_CONFIG, Type.LIST, GROUPS_INCLUDE_DEFAULT, - ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.HIGH, GROUPS_INCLUDE_DOC) .define(GROUPS_EXCLUDE_CONFIG, Type.LIST, GROUPS_EXCLUDE_DEFAULT, - ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.HIGH, GROUPS_EXCLUDE_DOC); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfig.java index b7625da619ddf..3fb2859d2dd46 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfig.java @@ -194,14 +194,12 @@ private static ConfigDef defineCheckpointConfig(ConfigDef baseConfig) { GROUPS, ConfigDef.Type.LIST, GROUPS_DEFAULT, - ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, GROUPS_DOC) .define( GROUPS_EXCLUDE, ConfigDef.Type.LIST, GROUPS_EXCLUDE_DEFAULT, - ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, GROUPS_EXCLUDE_DOC) .define( diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java index 9f8472c9c35b6..218c64e85a478 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnector.java @@ -17,9 +17,8 @@ package org.apache.kafka.connect.mirror; import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.GroupListing; +import org.apache.kafka.clients.admin.ConsumerGroupListing; import org.apache.kafka.clients.admin.ListConsumerGroupOffsetsSpec; -import org.apache.kafka.clients.admin.ListGroupsOptions; import org.apache.kafka.clients.consumer.OffsetAndMetadata; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.Config; @@ -38,6 +37,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -135,7 +135,7 @@ public List> taskConfigs(int maxTasks) { // If the replication is disabled or checkpoint emission is disabled by setting 'emit.checkpoints.enabled' to false, // the interval of checkpoint emission will be negative and no 'MirrorCheckpointTask' will be created. if (!config.enabled() || config.emitCheckpointsInterval().isNegative()) { - return List.of(); + return Collections.emptyList(); } if (knownConsumerGroups == null) { @@ -147,7 +147,7 @@ public List> taskConfigs(int maxTasks) { // If the consumer group is empty, no 'MirrorCheckpointTask' will be created. if (knownConsumerGroups.isEmpty()) { - return List.of(); + return Collections.emptyList(); } int numTasks = Math.min(maxTasks, knownConsumerGroups.size()); @@ -198,7 +198,7 @@ private void refreshConsumerGroups() throws InterruptedException, ExecutionException { // If loadInitialConsumerGroups fails for any reason(e.g., timeout), knownConsumerGroups may be null. // We still want this method to recover gracefully in such cases. - Set knownConsumerGroups = this.knownConsumerGroups == null ? Set.of() : this.knownConsumerGroups; + Set knownConsumerGroups = this.knownConsumerGroups == null ? Collections.emptySet() : this.knownConsumerGroups; Set consumerGroups = findConsumerGroups(); Set newConsumerGroups = new HashSet<>(consumerGroups); newConsumerGroups.removeAll(knownConsumerGroups); @@ -225,7 +225,7 @@ private void loadInitialConsumerGroups() Set findConsumerGroups() throws InterruptedException, ExecutionException { List filteredGroups = listConsumerGroups().stream() - .map(GroupListing::groupId) + .map(ConsumerGroupListing::groupId) .filter(this::shouldReplicateByGroupFilter) .collect(Collectors.toList()); @@ -252,10 +252,10 @@ Set findConsumerGroups() return checkpointGroups; } - Collection listConsumerGroups() + Collection listConsumerGroups() throws InterruptedException, ExecutionException { return adminCall( - () -> sourceAdminClient.listGroups(ListGroupsOptions.forConsumerGroups()).valid().get(), + () -> sourceAdminClient.listConsumerGroups().valid().get(), () -> "list consumer groups on " + config.sourceClusterAlias() + " cluster" ); } diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java index 71e3edebf5b63..8ace7d1fc3bdb 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointMetrics.java @@ -26,7 +26,9 @@ import org.apache.kafka.common.metrics.stats.Min; import org.apache.kafka.common.metrics.stats.Value; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; @@ -36,7 +38,7 @@ class MirrorCheckpointMetrics implements AutoCloseable { private static final String CHECKPOINT_CONNECTOR_GROUP = MirrorCheckpointConnector.class.getSimpleName(); - private static final Set GROUP_TAGS = Set.of("source", "target", "group", "topic", "partition"); + private static final Set GROUP_TAGS = new HashSet<>(Arrays.asList("source", "target", "group", "topic", "partition")); private static final MetricNameTemplate CHECKPOINT_LATENCY = new MetricNameTemplate( "checkpoint-latency-ms", CHECKPOINT_CONNECTOR_GROUP, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java index db86fbdb40be7..6ce91baf123b6 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTask.java @@ -35,6 +35,7 @@ import java.time.Duration; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -45,6 +46,7 @@ import java.util.concurrent.ExecutionException; import java.util.function.Function; import java.util.stream.Collectors; +import java.util.stream.Stream; import static org.apache.kafka.connect.mirror.MirrorUtils.adminCall; @@ -185,7 +187,7 @@ List sourceRecordsForGroup(String group) throws InterruptedExcepti .collect(Collectors.toList()); } catch (ExecutionException e) { log.error("Error querying offsets for consumer group {} on cluster {}.", group, sourceClusterAlias, e); - return List.of(); + return Collections.emptyList(); } } @@ -194,7 +196,7 @@ Map checkpointsForGroup(Map shouldCheckpointTopic(x.getKey().topic())) // Only perform relevant checkpoints filtered by "topic filter" .map(x -> checkpoint(group, x.getKey(), x.getValue())) - .flatMap(Optional::stream) // do not emit checkpoints for partitions that don't have offset-syncs + .flatMap(o -> o.map(Stream::of).orElseGet(Stream::empty)) // do not emit checkpoints for partitions that don't have offset-syncs .filter(x -> x.downstreamOffset() >= 0) // ignore offsets we cannot translate accurately .filter(this::checkpointIsMoreRecent) // do not emit checkpoints for partitions that have a later checkpoint .collect(Collectors.toMap(Checkpoint::topicPartition, Function.identity())); @@ -233,7 +235,7 @@ private Map listConsumerGroupOffsets(String g throws InterruptedException, ExecutionException { if (stopping) { // short circuit if stopping - return Map.of(); + return Collections.emptyMap(); } return adminCall( () -> sourceAdminClient.listConsumerGroupOffsets(group).partitionsToOffsetAndMetadata().get(), @@ -371,7 +373,7 @@ Map> syncGroupOffset() throws Exe offsetToSync.put(topicPartition, convertedOffset); } - if (offsetToSync.isEmpty()) { + if (offsetToSync.size() == 0) { log.trace("skip syncing the offset for consumer group: {}", consumerGroupId); continue; } diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskConfig.java index 3d2cfda6dcc9a..a8db4989b297c 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskConfig.java @@ -18,7 +18,9 @@ import org.apache.kafka.common.config.ConfigDef; +import java.util.Collections; import java.util.HashSet; +import java.util.List; import java.util.Map; import java.util.Set; @@ -31,7 +33,11 @@ public MirrorCheckpointTaskConfig(Map props) { } Set taskConsumerGroups() { - return new HashSet<>(getList(TASK_CONSUMER_GROUPS)); + List fields = getList(TASK_CONSUMER_GROUPS); + if (fields == null || fields.isEmpty()) { + return Collections.emptySet(); + } + return new HashSet<>(fields); } MirrorCheckpointMetrics metrics() { @@ -49,8 +55,7 @@ String entityLabel() { .define( TASK_CONSUMER_GROUPS, ConfigDef.Type.LIST, - ConfigDef.NO_DEFAULT_VALUE, - ConfigDef.ValidList.anyNonDuplicateValues(false, false), + null, ConfigDef.Importance.LOW, TASK_CONSUMER_GROUPS_DOC) .define(TASK_INDEX, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java index 9baf7c1f35cb5..920f1d93d0406 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorConnectorConfig.java @@ -311,7 +311,6 @@ String entityLabel() { CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, ConfigDef.Type.LIST, JmxReporter.class.getName(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define( @@ -321,12 +320,6 @@ String entityLabel() { in(Utils.enumOptions(SecurityProtocol.class)), ConfigDef.Importance.MEDIUM, CommonClientConfigs.SECURITY_PROTOCOL_DOC) - .define(CONFIG_PROVIDERS_CONFIG, - ConfigDef.Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), - ConfigDef.Importance.LOW, - CONFIG_PROVIDERS_DOC) .withClientSslSupport() .withClientSaslSupport(); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java index 201339229379c..f9a844fecfa13 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatConnector.java @@ -24,6 +24,7 @@ import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.source.SourceConnector; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -72,10 +73,10 @@ public List> taskConfigs(int maxTasks) { // if the heartbeats emission is disabled by setting `emit.heartbeats.enabled` to `false`, // the interval heartbeat emission will be negative and no `MirrorHeartbeatTask` will be created if (config.emitHeartbeatsInterval().isNegative()) { - return List.of(); + return Collections.emptyList(); } // just need a single task - return List.of(config.originalsStrings()); + return Collections.singletonList(config.originalsStrings()); } @Override diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTask.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTask.java index 0a4a1374ca2a2..35c9c8feccb29 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTask.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTask.java @@ -22,6 +22,7 @@ import org.apache.kafka.connect.source.SourceTask; import java.time.Duration; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; @@ -75,7 +76,7 @@ public List poll() throws InterruptedException { Schema.BYTES_SCHEMA, heartbeat.recordKey(), Schema.BYTES_SCHEMA, heartbeat.recordValue(), timestamp); - return List.of(record); + return Collections.singletonList(record); } @Override diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java index f0aab090bb247..3bc7aed02b36e 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMaker.java @@ -52,18 +52,17 @@ import org.slf4j.LoggerFactory; import java.io.File; -import java.net.InetAddress; -import java.net.URI; +import java.io.UnsupportedEncodingException; import java.net.URLEncoder; -import java.net.UnknownHostException; import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.Set; -import java.util.UUID; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -103,14 +102,18 @@ public class MirrorMaker { private static final long SHUTDOWN_TIMEOUT_SECONDS = 60L; - public static final List> CONNECTOR_CLASSES = List.of(MirrorSourceConnector.class, MirrorHeartbeatConnector.class, MirrorCheckpointConnector.class); + public static final List> CONNECTOR_CLASSES = Collections.unmodifiableList( + Arrays.asList( + MirrorSourceConnector.class, + MirrorHeartbeatConnector.class, + MirrorCheckpointConnector.class)); private final Map herders = new HashMap<>(); private CountDownLatch startLatch; private CountDownLatch stopLatch; private final AtomicBoolean shutdown = new AtomicBoolean(false); private final ShutdownHook shutdownHook; - private final URI advertisedUrl; + private final String advertisedUrl; private final Time time; private final MirrorMakerConfig config; private final Set clusters; @@ -131,11 +134,11 @@ public MirrorMaker(MirrorMakerConfig config, List clusters, Time time) { this.restClient = new RestClient(config); internalServer = new MirrorRestServer(config.originals(), restClient); internalServer.initializeServer(); - this.advertisedUrl = internalServer.advertisedUrl(); + this.advertisedUrl = internalServer.advertisedUrl().toString(); } else { internalServer = null; restClient = null; - this.advertisedUrl = URI.create("NOTUSED"); + this.advertisedUrl = "NOTUSED"; } this.config = config; if (clusters != null && !clusters.isEmpty()) { @@ -231,12 +234,17 @@ private void checkHerder(SourceAndTarget sourceAndTarget) { } private void addHerder(SourceAndTarget sourceAndTarget) { - log.info("creating herder for {}", sourceAndTarget.toString()); + log.info("creating herder for " + sourceAndTarget.toString()); Map workerProps = config.workerConfig(sourceAndTarget); - String encodedSource = encodePath(sourceAndTarget.source()); - String encodedTarget = encodePath(sourceAndTarget.target()); - List restNamespace = List.of(encodedSource, encodedTarget); - String workerId = generateWorkerId(sourceAndTarget); + List restNamespace; + try { + String encodedSource = encodePath(sourceAndTarget.source()); + String encodedTarget = encodePath(sourceAndTarget.target()); + restNamespace = Arrays.asList(encodedSource, encodedTarget); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException("Unable to create encoded URL paths for source and target using UTF-8", e); + } + String workerId = sourceAndTarget.toString(); Plugins plugins = new Plugins(workerProps); plugins.compareAndSwapWithDelegatingLoader(); DistributedConfig distributedConfig = new DistributedConfig(workerProps); @@ -249,7 +257,7 @@ private void addHerder(SourceAndTarget sourceAndTarget) { SharedTopicAdmin sharedAdmin = new SharedTopicAdmin(adminProps); KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin, () -> clientIdBase, plugins.newInternalConverter(true, JsonConverter.class.getName(), - Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); + Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); offsetBackingStore.configure(distributedConfig); ConnectorClientConfigOverridePolicy clientConfigOverridePolicy = new AllConnectorClientConfigOverridePolicy(); clientConfigOverridePolicy.configure(config.originals()); @@ -269,13 +277,13 @@ private void addHerder(SourceAndTarget sourceAndTarget) { // tracking the various shared admin objects in this class. Herder herder = new MirrorHerder(config, sourceAndTarget, distributedConfig, time, worker, kafkaClusterId, statusBackingStore, configBackingStore, - advertisedUrl.toString(), restClient, clientConfigOverridePolicy, + advertisedUrl, restClient, clientConfigOverridePolicy, restNamespace, sharedAdmin); herders.put(sourceAndTarget, herder); } - private static String encodePath(String rawPath) { - return URLEncoder.encode(rawPath, StandardCharsets.UTF_8) + private static String encodePath(String rawPath) throws UnsupportedEncodingException { + return URLEncoder.encode(rawPath, StandardCharsets.UTF_8.name()) // Java's out-of-the-box URL encoder encodes spaces (' ') as pluses ('+'), // and pluses as '%2B' // But Jetty doesn't decode pluses at all and leaves them as-are in decoded @@ -287,18 +295,6 @@ private static String encodePath(String rawPath) { .replaceAll("\\+", "%20"); } - private String generateWorkerId(SourceAndTarget sourceAndTarget) { - if (config.enableInternalRest()) { - return advertisedUrl.getHost() + ":" + advertisedUrl.getPort() + "/" + sourceAndTarget.toString(); - } - try { - //UUID to make sure it is unique even if multiple workers running on the same host - return InetAddress.getLocalHost().getCanonicalHostName() + "/" + sourceAndTarget.toString() + "/" + UUID.randomUUID(); - } catch (UnknownHostException e) { - return sourceAndTarget.toString() + "/" + UUID.randomUUID(); - } - } - private class ShutdownHook extends Thread { @Override public void run() { diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java index 33fc2641a394c..aba62cf8464ff 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorMakerConfig.java @@ -31,6 +31,8 @@ import org.apache.kafka.connect.runtime.rest.RestServerConfig; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -213,9 +215,14 @@ public Map workerConfig(SourceAndTarget sourceAndTarget) { Set allConfigNames() { Set allNames = new HashSet<>(); - allNames.addAll(MirrorCheckpointConfig.CONNECTOR_CONFIG_DEF.names()); - allNames.addAll(MirrorSourceConfig.CONNECTOR_CONFIG_DEF.names()); - allNames.addAll(MirrorHeartbeatConfig.CONNECTOR_CONFIG_DEF.names()); + List connectorConfigDefs = Arrays.asList( + MirrorCheckpointConfig.CONNECTOR_CONFIG_DEF, + MirrorSourceConfig.CONNECTOR_CONFIG_DEF, + MirrorHeartbeatConfig.CONNECTOR_CONFIG_DEF + ); + for (ConfigDef cd : connectorConfigDefs) { + allNames.addAll(cd.names()); + } return allNames; } @@ -277,11 +284,11 @@ Map transform(Map props) { return transformed; } - private static ConfigDef config() { + protected static ConfigDef config() { ConfigDef result = new ConfigDef() - .define(CLUSTERS_CONFIG, Type.LIST, ConfigDef.NO_DEFAULT_VALUE, ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.HIGH, CLUSTERS_DOC) + .define(CLUSTERS_CONFIG, Type.LIST, Importance.HIGH, CLUSTERS_DOC) .define(ENABLE_INTERNAL_REST_CONFIG, Type.BOOLEAN, false, Importance.HIGH, ENABLE_INTERNAL_REST_DOC) - .define(CONFIG_PROVIDERS_CONFIG, Type.LIST, List.of(), ConfigDef.ValidList.anyNonDuplicateValues(true, false), Importance.LOW, CONFIG_PROVIDERS_DOC) + .define(CONFIG_PROVIDERS_CONFIG, Type.LIST, Collections.emptyList(), Importance.LOW, CONFIG_PROVIDERS_DOC) // security support .define(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, Type.STRING, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConfig.java index 2ec663ad2fc32..dc0da5382338c 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConfig.java @@ -209,21 +209,18 @@ private static ConfigDef defineSourceConfig(ConfigDef baseConfig) { TOPICS, ConfigDef.Type.LIST, TOPICS_DEFAULT, - ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, TOPICS_DOC) .define( TOPICS_EXCLUDE, ConfigDef.Type.LIST, TOPICS_EXCLUDE_DEFAULT, - ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, TOPICS_EXCLUDE_DOC) .define( CONFIG_PROPERTIES_EXCLUDE, ConfigDef.Type.LIST, CONFIG_PROPERTIES_EXCLUDE_DEFAULT, - ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, CONFIG_PROPERTIES_EXCLUDE_DOC) .define( diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java index a9d7779673ed7..f65899dac6e3a 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceConnector.java @@ -55,6 +55,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -98,8 +99,8 @@ public class MirrorSourceConnector extends SourceConnector { private String connectorName; private TopicFilter topicFilter; private ConfigPropertyFilter configPropertyFilter; - private List knownSourceTopicPartitions = List.of(); - private List knownTargetTopicPartitions = List.of(); + private List knownSourceTopicPartitions = Collections.emptyList(); + private List knownTargetTopicPartitions = Collections.emptyList(); private ReplicationPolicy replicationPolicy; private int replicationFactor; private Admin sourceAdminClient; @@ -201,7 +202,7 @@ public Class taskClass() { @Override public List> taskConfigs(int maxTasks) { if (!config.enabled() || knownSourceTopicPartitions.isEmpty()) { - return List.of(); + return Collections.emptyList(); } int numTasks = Math.min(maxTasks, knownSourceTopicPartitions.size()); List> roundRobinByTask = new ArrayList<>(numTasks); @@ -419,7 +420,7 @@ private Set toTopics(Collection tps) { void syncTopicAcls() throws InterruptedException, ExecutionException { Optional> rawBindings = listTopicAclBindings(); - if (rawBindings.isEmpty()) + if (!rawBindings.isPresent()) return; List filteredBindings = rawBindings.get().stream() .filter(x -> x.pattern().resourceType() == ResourceType.TOPIC) diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java index c297c4c5fcf3d..7e33967c9f1f0 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceMetrics.java @@ -27,6 +27,8 @@ import org.apache.kafka.common.metrics.stats.Min; import org.apache.kafka.common.metrics.stats.Value; +import java.util.Arrays; +import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; import java.util.Set; @@ -60,7 +62,7 @@ class MirrorSourceMetrics implements AutoCloseable { this.source = taskConfig.sourceClusterAlias(); this.metrics = new Metrics(); - Set partitionTags = Set.of("source", "target", "topic", "partition"); + Set partitionTags = new HashSet<>(Arrays.asList("source", "target", "topic", "partition")); recordCount = new MetricNameTemplate( "record-count", SOURCE_CONNECTOR_GROUP, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTask.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTask.java index 6ab65bebdca2e..879984e86aabc 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTask.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTask.java @@ -22,6 +22,8 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.TopicExistsException; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.errors.WakeupException; import org.apache.kafka.common.header.Header; import org.apache.kafka.common.utils.Utils; @@ -36,6 +38,7 @@ import java.time.Duration; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; @@ -56,6 +59,10 @@ public class MirrorSourceTask extends SourceTask { private Semaphore consumerAccess; private OffsetSyncWriter offsetSyncWriter; + // Enhanced MirrorMaker 2 features for fault tolerance + private Map previousOffsets = new HashMap<>(); + private Map topicResetDetected = new HashMap<>(); + public MirrorSourceTask() {} // for testing @@ -132,7 +139,14 @@ public List poll() { return null; } try { + // Enhanced MirrorMaker 2: Check for topic resets before polling + detectAndHandleTopicResets(); + ConsumerRecords records = consumer.poll(pollTimeout); + + // Enhanced MirrorMaker 2: Detect log truncation + detectLogTruncation(records); + List sourceRecords = new ArrayList<>(records.count()); for (ConsumerRecord record : records) { SourceRecord converted = convertRecord(record); @@ -140,6 +154,10 @@ public List poll() { TopicPartition topicPartition = new TopicPartition(converted.topic(), converted.kafkaPartition()); metrics.recordAge(topicPartition, System.currentTimeMillis() - record.timestamp()); metrics.recordBytes(topicPartition, byteSize(record.value())); + + // Enhanced MirrorMaker 2: Track offsets for truncation detection + TopicPartition sourceTopicPartition = new TopicPartition(record.topic(), record.partition()); + previousOffsets.put(sourceTopicPartition, record.offset()); } if (sourceRecords.isEmpty()) { // WorkerSourceTasks expects non-zero batch size @@ -150,6 +168,10 @@ public List poll() { } } catch (WakeupException e) { return null; + } catch (UnknownTopicOrPartitionException e) { + log.warn("Topic or partition not found, attempting recovery: {}", e.getMessage()); + handleTopicRecovery(e); + return null; } catch (KafkaException e) { log.warn("Failure during poll.", e); return null; @@ -255,4 +277,121 @@ private static int byteSize(byte[] bytes) { private boolean isUncommitted(Long offset) { return offset == null || offset < 0; } + + /** + * Enhanced MirrorMaker 2: Detect log truncation by checking for offset gaps + * When aggressive retention policies purge messages before replication completes, + * this creates undetectable gaps in the replicated data stream. + */ + private void detectLogTruncation(ConsumerRecords records) { + for (ConsumerRecord record : records) { + TopicPartition sourceTopicPartition = new TopicPartition(record.topic(), record.partition()); + Long lastKnownOffset = previousOffsets.get(sourceTopicPartition); + + if (lastKnownOffset != null && record.offset() > lastKnownOffset + 1) { + long gapSize = record.offset() - lastKnownOffset - 1; + String errorMessage = String.format( + "CRITICAL: Log truncation detected for %s. Expected offset %d, but got %d. " + + "Gap of %d messages detected, indicating potential data loss due to retention policies. " + + "Timestamp: %d, Topic: %s, Partition: %d", + sourceTopicPartition, lastKnownOffset + 1, record.offset(), gapSize, + System.currentTimeMillis(), record.topic(), record.partition() + ); + + log.error(errorMessage); + + // Fail-fast: Throw exception to stop replication immediately + throw new KafkaException("Log truncation detected: " + errorMessage); + } + } + } + + /** + * Enhanced MirrorMaker 2: Detect and handle topic resets (deletion/recreation) + * When topics are deleted and recreated, offsets reset to 0, causing replication failures. + * This method detects such scenarios and automatically recovers. + */ + private void detectAndHandleTopicResets() { + try { + Set assignedPartitions = consumer.assignment(); + + for (TopicPartition tp : assignedPartitions) { + if (topicResetDetected.getOrDefault(tp, false)) { + continue; // Already handled this reset + } + + try { + // Try to get current position - this will fail if topic was reset + long currentPosition = consumer.position(tp); + Long lastKnownOffset = previousOffsets.get(tp); + + // If we had a previous offset but current position is 0, likely a topic reset + if (lastKnownOffset != null && lastKnownOffset > 0 && currentPosition == 0) { + log.warn("Topic reset detected for {}. Last known offset: {}, current position: {}. " + + "Timestamp: {}", + tp, lastKnownOffset, currentPosition, System.currentTimeMillis()); + + handleTopicReset(tp); + } + } catch (Exception e) { + log.debug("Error checking position for {}: {}", tp, e.getMessage()); + // This might indicate a topic reset, attempt recovery + handleTopicReset(tp); + } + } + } catch (Exception e) { + log.warn("Error during topic reset detection: {}", e.getMessage()); + } + } + + /** + * Enhanced MirrorMaker 2: Handle topic reset by resubscribing from beginning + */ + private void handleTopicReset(TopicPartition topicPartition) { + try { + log.info("Handling topic reset for {}. Seeking to beginning offset. Timestamp: {}", + topicPartition, System.currentTimeMillis()); + + // Mark this topic as reset to avoid repeated handling + topicResetDetected.put(topicPartition, true); + + // Seek to beginning to restart replication from the start + consumer.seekToBeginning(List.of(topicPartition)); + + // Clear previous offset tracking for this partition + previousOffsets.remove(topicPartition); + + log.info("Successfully recovered from topic reset for {}. Replication will restart from beginning.", + topicPartition); + + } catch (Exception e) { + log.error("Failed to handle topic reset for {}: {}", topicPartition, e.getMessage(), e); + // Re-throw to let Connect framework handle the error + throw new KafkaException("Failed to recover from topic reset for " + topicPartition, e); + } + } + + /** + * Enhanced MirrorMaker 2: Handle topic recovery for unknown topics/partitions + */ + private void handleTopicRecovery(UnknownTopicOrPartitionException e) { + try { + log.info("Attempting to recover from unknown topic/partition error: {}. Timestamp: {}", + e.getMessage(), System.currentTimeMillis()); + + // Wait a bit for topic to be available again + Thread.sleep(5000); + + // Clear reset detection state to allow re-detection + topicResetDetected.clear(); + + log.info("Topic recovery attempt completed. Will retry on next poll."); + + } catch (InterruptedException ie) { + Thread.currentThread().interrupt(); + log.warn("Topic recovery interrupted"); + } catch (Exception ex) { + log.error("Failed during topic recovery: {}", ex.getMessage(), ex); + } + } } diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java index aa5d300c00ab9..f0c562bbcbb08 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorSourceTaskConfig.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigDef; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; @@ -34,6 +35,9 @@ public MirrorSourceTaskConfig(Map props) { Set taskTopicPartitions() { List fields = getList(TASK_TOPIC_PARTITIONS); + if (fields == null || fields.isEmpty()) { + return Collections.emptySet(); + } return fields.stream() .map(MirrorUtils::decodeTopicPartition) .collect(Collectors.toSet()); @@ -54,8 +58,7 @@ String entityLabel() { .define( TASK_TOPIC_PARTITIONS, ConfigDef.Type.LIST, - ConfigDef.NO_DEFAULT_VALUE, - ConfigDef.ValidList.anyNonDuplicateValues(false, false), + null, ConfigDef.Importance.LOW, TASK_TOPIC_PARTITIONS_DOC) .define(TASK_INDEX, diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorUtils.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorUtils.java index fdf091c106fe2..d8cbba184a48c 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorUtils.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/MirrorUtils.java @@ -38,16 +38,19 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; import java.util.concurrent.Callable; import java.util.concurrent.ExecutionException; import java.util.function.Supplier; import java.util.regex.Pattern; +import static java.util.Collections.singleton; + /** Internal utility methods. */ public final class MirrorUtils { @@ -81,7 +84,7 @@ static Map wrapPartition(TopicPartition topicPartition, String s } public static Map wrapOffset(long offset) { - return Map.of(OFFSET_KEY, offset); + return Collections.singletonMap(OFFSET_KEY, offset); } public static TopicPartition unwrapPartition(Map wrapped) { @@ -262,7 +265,7 @@ static Pattern compilePatternList(List fields) { } static Pattern compilePatternList(String fields) { - return compilePatternList(List.of(fields.split("\\W*,\\W*"))); + return compilePatternList(Arrays.asList(fields.split("\\W*,\\W*"))); } static void createCompactedTopic(String topicName, short partitions, short replicationFactor, Admin admin) { @@ -274,7 +277,7 @@ static void createCompactedTopic(String topicName, short partitions, short repli CreateTopicsOptions args = new CreateTopicsOptions().validateOnly(false); try { - admin.createTopics(Set.of(topicDescription), args).values().get(topicName).get(); + admin.createTopics(singleton(topicDescription), args).values().get(topicName).get(); log.info("Created topic '{}'", topicName); } catch (InterruptedException e) { Thread.interrupted(); diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSync.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSync.java index 6e366573cfef1..c46aac634fba5 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSync.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSync.java @@ -25,7 +25,7 @@ import java.nio.ByteBuffer; -public record OffsetSync(TopicPartition topicPartition, long upstreamOffset, long downstreamOffset) { +public class OffsetSync { public static final String TOPIC_KEY = "topic"; public static final String PARTITION_KEY = "partition"; public static final String UPSTREAM_OFFSET_KEY = "upstreamOffset"; @@ -39,6 +39,28 @@ public record OffsetSync(TopicPartition topicPartition, long upstreamOffset, lon new Field(TOPIC_KEY, Type.STRING), new Field(PARTITION_KEY, Type.INT32)); + private final TopicPartition topicPartition; + private final long upstreamOffset; + private final long downstreamOffset; + + public OffsetSync(TopicPartition topicPartition, long upstreamOffset, long downstreamOffset) { + this.topicPartition = topicPartition; + this.upstreamOffset = upstreamOffset; + this.downstreamOffset = downstreamOffset; + } + + public TopicPartition topicPartition() { + return topicPartition; + } + + public long upstreamOffset() { + return upstreamOffset; + } + + public long downstreamOffset() { + return downstreamOffset; + } + @Override public String toString() { return String.format("OffsetSync{topicPartition=%s, upstreamOffset=%d, downstreamOffset=%d}", diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java index 1a5ef6cc4583a..75ce230218366 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/OffsetSyncWriter.java @@ -186,7 +186,8 @@ boolean update(long upstreamOffset, long downstreamOffset) { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof PartitionState that)) return false; + if (!(o instanceof PartitionState)) return false; + PartitionState that = (PartitionState) o; return previousUpstreamOffset == that.previousUpstreamOffset && previousDownstreamOffset == that.previousDownstreamOffset && lastSyncDownstreamOffset == that.lastSyncDownstreamOffset && diff --git a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java index 3cc50819e2c94..f09cb12b0f060 100644 --- a/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java +++ b/connect/mirror/src/main/java/org/apache/kafka/connect/mirror/rest/MirrorRestServer.java @@ -28,7 +28,7 @@ import org.glassfish.jersey.server.ResourceConfig; import java.util.Collection; -import java.util.List; +import java.util.Collections; import java.util.Map; public class MirrorRestServer extends RestServer { @@ -48,12 +48,14 @@ public void initializeInternalResources(Map herders) { @Override protected Collection> regularResources() { - return List.of(InternalMirrorResource.class); + return Collections.singletonList( + InternalMirrorResource.class + ); } @Override protected Collection> adminResources() { - return List.of(); + return Collections.emptyList(); } @Override @@ -68,4 +70,5 @@ protected void configure() { bind(restClient).to(RestClient.class); } } + } diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/CheckpointStoreTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/CheckpointStoreTest.java index fb65a1162e2bf..476fbcceaef82 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/CheckpointStoreTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/CheckpointStoreTest.java @@ -23,7 +23,9 @@ import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Map; import java.util.Set; @@ -37,7 +39,8 @@ public class CheckpointStoreTest { @Test public void testReadCheckpointsTopic() { - Set consumerGroups = Set.of("group1"); + Set consumerGroups = new HashSet<>(); + consumerGroups.add("group1"); MirrorCheckpointTaskConfig config = mock(MirrorCheckpointTaskConfig.class); when(config.checkpointsTopic()).thenReturn("checkpoint.topic"); @@ -60,7 +63,7 @@ void readCheckpointsImpl(MirrorCheckpointTaskConfig config, Callback> expected = new HashMap<>(); - expected.put("group1", Map.of(new TopicPartition("t1", 0), + expected.put("group1", Collections.singletonMap(new TopicPartition("t1", 0), new Checkpoint("group1", new TopicPartition("t1", 0), 1, 1, ""))); assertEquals(expected, store.checkpointsPerConsumerGroup); } @@ -68,7 +71,8 @@ void readCheckpointsImpl(MirrorCheckpointTaskConfig config, Callback consumerGroups = Set.of("group1"); + Set consumerGroups = new HashSet<>(); + consumerGroups.add("group1"); MirrorCheckpointTaskConfig config = mock(MirrorCheckpointTaskConfig.class); when(config.checkpointsTopic()).thenReturn("checkpoint.topic"); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfigTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfigTest.java index 1ee27ba0ffe40..ccd381ceadbc8 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfigTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConfigTest.java @@ -20,6 +20,8 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -35,7 +37,7 @@ public class MirrorCheckpointConfigTest { @Test public void testTaskConfigConsumerGroups() { - List groups = List.of("consumer-1", "consumer-2", "consumer-3"); + List groups = Arrays.asList("consumer-1", "consumer-2", "consumer-3"); MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); Map props = config.taskConfigForConsumerGroups(groups, 1); MirrorCheckpointTaskConfig taskConfig = new MirrorCheckpointTaskConfig(props); @@ -116,7 +118,7 @@ public void testValidateIfConnectorEnabled() { Map configValues = MirrorCheckpointConfig.validate(makeProps( MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED, "false", MirrorCheckpointConfig.SYNC_GROUP_OFFSETS_ENABLED, "false")); - assertEquals(configValues.keySet(), Set.of(MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED)); + assertEquals(configValues.keySet(), Collections.singleton(MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED)); configValues = MirrorCheckpointConfig.validate(makeProps(MirrorCheckpointConfig.EMIT_CHECKPOINTS_ENABLED, "true", MirrorCheckpointConfig.EMIT_OFFSET_SYNCS_ENABLED, "false")); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnectorTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnectorTest.java index ecb07dc529d42..d726ee7c0ecb1 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnectorTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointConnectorTest.java @@ -16,22 +16,21 @@ */ package org.apache.kafka.connect.mirror; -import org.apache.kafka.clients.admin.GroupListing; +import org.apache.kafka.clients.admin.ConsumerGroupListing; import org.apache.kafka.clients.consumer.OffsetAndMetadata; -import org.apache.kafka.clients.consumer.internals.ConsumerProtocol; -import org.apache.kafka.common.GroupType; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.RetriableException; import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.function.Function; import java.util.stream.Collectors; @@ -142,12 +141,12 @@ public void testReplicationEnabled() { @Test public void testFindConsumerGroups() throws Exception { MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); - MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Set.of(), config); + MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Collections.emptySet(), config); connector = spy(connector); - List groups = List.of( - new GroupListing("g1", Optional.of(GroupType.CLASSIC), "", Optional.empty()), - new GroupListing("g2", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty())); + Collection groups = Arrays.asList( + new ConsumerGroupListing("g1", true), + new ConsumerGroupListing("g2", false)); Map offsets = new HashMap<>(); offsets.put(new TopicPartition("t1", 0), new OffsetAndMetadata(0)); doReturn(groups).when(connector).listConsumerGroups(); @@ -160,26 +159,26 @@ public void testFindConsumerGroups() throws Exception { doReturn(groupToOffsets).when(connector).listConsumerGroupOffsets(anyList()); Set groupFound = connector.findConsumerGroups(); - Set expectedGroups = groups.stream().map(GroupListing::groupId).collect(Collectors.toSet()); + Set expectedGroups = groups.stream().map(ConsumerGroupListing::groupId).collect(Collectors.toSet()); assertEquals(expectedGroups, groupFound, "Expected groups are not the same as findConsumerGroups"); doReturn(false).when(connector).shouldReplicateByTopicFilter(anyString()); Set topicFilterGroupFound = connector.findConsumerGroups(); - assertEquals(Set.of(), topicFilterGroupFound); + assertEquals(Collections.emptySet(), topicFilterGroupFound); } @Test public void testFindConsumerGroupsInCommonScenarios() throws Exception { MirrorCheckpointConfig config = new MirrorCheckpointConfig(makeProps()); - MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Set.of(), config); + MirrorCheckpointConnector connector = new MirrorCheckpointConnector(Collections.emptySet(), config); connector = spy(connector); - List groups = List.of( - new GroupListing("g1", Optional.of(GroupType.CLASSIC), "", Optional.empty()), - new GroupListing("g2", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty()), - new GroupListing("g3", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty()), - new GroupListing("g4", Optional.of(GroupType.CLASSIC), ConsumerProtocol.PROTOCOL_TYPE, Optional.empty())); + Collection groups = Arrays.asList( + new ConsumerGroupListing("g1", true), + new ConsumerGroupListing("g2", false), + new ConsumerGroupListing("g3", false), + new ConsumerGroupListing("g4", false)); Map offsetsForGroup1 = new HashMap<>(); Map offsetsForGroup2 = new HashMap<>(); Map offsetsForGroup3 = new HashMap<>(); @@ -201,7 +200,7 @@ public void testFindConsumerGroupsInCommonScenarios() throws Exception { groupToOffsets.put("g1", offsetsForGroup1); groupToOffsets.put("g2", offsetsForGroup2); groupToOffsets.put("g3", offsetsForGroup3); - doReturn(groupToOffsets).when(connector).listConsumerGroupOffsets(List.of("g1", "g2", "g3")); + doReturn(groupToOffsets).when(connector).listConsumerGroupOffsets(Arrays.asList("g1", "g2", "g3")); Set groupFound = connector.findConsumerGroups(); Set verifiedSet = new HashSet<>(); @@ -213,8 +212,8 @@ public void testFindConsumerGroupsInCommonScenarios() throws Exception { @Test public void testAlterOffsetsIncorrectPartitionKey() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( - Map.of("unused_partition_key", "unused_partition_value"), + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( + Collections.singletonMap("unused_partition_key", "unused_partition_value"), SOURCE_OFFSET ))); @@ -229,7 +228,7 @@ public void testAlterOffsetsIncorrectPartitionKey() { public void testAlterOffsetsMissingPartitionKey() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Map.of( + Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Collections.singletonMap( partition, SOURCE_OFFSET )); @@ -238,7 +237,7 @@ public void testAlterOffsetsMissingPartitionKey() { // Sanity check to make sure our valid partition is actually valid assertTrue(alterOffsets.apply(validPartition)); - for (String key : List.of(CONSUMER_GROUP_ID_KEY, TOPIC_KEY, PARTITION_KEY)) { + for (String key : Arrays.asList(CONSUMER_GROUP_ID_KEY, TOPIC_KEY, PARTITION_KEY)) { Map invalidPartition = new HashMap<>(validPartition); invalidPartition.remove(key); assertThrows(ConnectException.class, () -> alterOffsets.apply(invalidPartition)); @@ -250,7 +249,7 @@ public void testAlterOffsetsInvalidPartitionPartition() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); Map partition = sourcePartition("consumer-app-2", "t", 3); partition.put(PARTITION_KEY, "a string"); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( partition, SOURCE_OFFSET ))); @@ -274,9 +273,9 @@ public void testAlterOffsetsMultiplePartitions() { public void testAlterOffsetsIncorrectOffsetKey() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - Map, Map> offsets = Map.of( + Map, Map> offsets = Collections.singletonMap( sourcePartition("consumer-app-5", "t1", 2), - Map.of("unused_offset_key", 0) + Collections.singletonMap("unused_offset_key", 0) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets)); } @@ -285,7 +284,7 @@ public void testAlterOffsetsIncorrectOffsetKey() { public void testAlterOffsetsOffsetValues() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - Function alterOffsets = offset -> connector.alterOffsets(null, Map.of( + Function alterOffsets = offset -> connector.alterOffsets(null, Collections.singletonMap( sourcePartition("consumer-app-6", "t", 5), Collections.singletonMap(MirrorUtils.OFFSET_KEY, offset) )); @@ -306,7 +305,7 @@ public void testAlterOffsetsOffsetValues() { public void testSuccessfulAlterOffsets() { MirrorCheckpointConnector connector = new MirrorCheckpointConnector(); - Map, Map> offsets = Map.of( + Map, Map> offsets = Collections.singletonMap( sourcePartition("consumer-app-7", "t2", 0), SOURCE_OFFSET ); @@ -315,7 +314,7 @@ public void testSuccessfulAlterOffsets() { // since it could indicate that the offsets were reset previously or that no offsets have been committed yet // (for a reset operation) assertTrue(connector.alterOffsets(null, offsets)); - assertTrue(connector.alterOffsets(null, Map.of())); + assertTrue(connector.alterOffsets(null, Collections.emptyMap())); } @Test @@ -335,8 +334,8 @@ public void testAlterOffsetsTombstones() { assertTrue(() -> alterOffsets.apply(partition)); assertTrue(() -> alterOffsets.apply(null)); - assertTrue(() -> alterOffsets.apply(Map.of())); - assertTrue(() -> alterOffsets.apply(Map.of("unused_partition_key", "unused_partition_value"))); + assertTrue(() -> alterOffsets.apply(Collections.emptyMap())); + assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value"))); } private static Map sourcePartition(String consumerGroupId, String topic, int partition) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskTest.java index 7ce554d5f663d..f4cc1e4ced6a4 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorCheckpointTaskTest.java @@ -22,12 +22,12 @@ import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalLong; -import java.util.Set; import java.util.concurrent.ExecutionException; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -43,8 +43,8 @@ public class MirrorCheckpointTaskTest { @Test public void testDownstreamTopicRenaming() { MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), null, Set.of(), Map.of(), - new CheckpointStore(Map.of())); + new DefaultReplicationPolicy(), null, Collections.emptySet(), Collections.emptyMap(), + new CheckpointStore(Collections.emptyMap())); assertEquals(new TopicPartition("source1.topic3", 4), mirrorCheckpointTask.renameTopicPartition(new TopicPartition("topic3", 4)), "Renaming source1.topic3 failed"); @@ -65,8 +65,8 @@ public void testCheckpoint() { OffsetSyncStoreTest.FakeOffsetSyncStore offsetSyncStore = new OffsetSyncStoreTest.FakeOffsetSyncStore(); offsetSyncStore.start(true); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), - Map.of(), new CheckpointStore(Map.of())); + new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), + Collections.emptyMap(), new CheckpointStore(Collections.emptyMap())); offsetSyncStore.sync(new TopicPartition("topic1", 2), t1UpstreamOffset, t1DownstreamOffset); offsetSyncStore.sync(new TopicPartition("target2.topic5", 6), t2UpstreamOffset, t2DownstreamOffset); Optional optionalCheckpoint1 = mirrorCheckpointTask.checkpoint("group9", new TopicPartition("topic1", 2), @@ -166,7 +166,7 @@ public void testSyncOffset() throws ExecutionException, InterruptedException { checkpointsPerConsumerGroup.put(consumer2, checkpointMapC2); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), null, Set.of(), idleConsumerGroupsOffset, + new DefaultReplicationPolicy(), null, Collections.emptySet(), idleConsumerGroupsOffset, new CheckpointStore(checkpointsPerConsumerGroup)); Map> output = mirrorCheckpointTask.syncGroupOffset(); @@ -197,7 +197,7 @@ public void testSyncOffsetForTargetGroupWithNullOffsetAndMetadata() throws Execu checkpointsPerConsumerGroup.put(consumer, checkpointMap); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source", "target", - new DefaultReplicationPolicy(), null, Set.of(), idleConsumerGroupsOffset, + new DefaultReplicationPolicy(), null, Collections.emptySet(), idleConsumerGroupsOffset, new CheckpointStore(checkpointsPerConsumerGroup)); Map> output = mirrorCheckpointTask.syncGroupOffset(); @@ -210,8 +210,8 @@ public void testNoCheckpointForTopicWithoutOffsetSyncs() { OffsetSyncStoreTest.FakeOffsetSyncStore offsetSyncStore = new OffsetSyncStoreTest.FakeOffsetSyncStore(); offsetSyncStore.start(true); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), Map.of(), - new CheckpointStore(Map.of())); + new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), + new CheckpointStore(Collections.emptyMap())); offsetSyncStore.sync(new TopicPartition("topic1", 0), 3L, 4L); Optional checkpoint1 = mirrorCheckpointTask.checkpoint("group9", new TopicPartition("topic1", 1), @@ -227,8 +227,8 @@ public void testNoCheckpointForTopicWithNullOffsetAndMetadata() { OffsetSyncStoreTest.FakeOffsetSyncStore offsetSyncStore = new OffsetSyncStoreTest.FakeOffsetSyncStore(); offsetSyncStore.start(true); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), Map.of(), - new CheckpointStore(Map.of())); + new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), + new CheckpointStore(Collections.emptyMap())); offsetSyncStore.sync(new TopicPartition("topic1", 0), 1L, 3L); Optional checkpoint = mirrorCheckpointTask.checkpoint("g1", new TopicPartition("topic1", 0), null); assertFalse(checkpoint.isPresent()); @@ -240,7 +240,7 @@ public void testCheckpointRecordsMonotonicIfStoreRewinds() { offsetSyncStore.start(true); Map> checkpointsPerConsumerGroup = new HashMap<>(); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), Map.of(), + new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), new CheckpointStore(checkpointsPerConsumerGroup)); TopicPartition tp = new TopicPartition("topic1", 0); TopicPartition targetTP = new TopicPartition("source1.topic1", 0); @@ -277,7 +277,7 @@ public void testCheckpointRecordsMonotonicIfStoreRewinds() { private Map assertCheckpointForTopic( MirrorCheckpointTask task, TopicPartition tp, TopicPartition remoteTp, long consumerGroupOffset, boolean truth ) { - Map consumerGroupOffsets = Map.of(tp, new OffsetAndMetadata(consumerGroupOffset)); + Map consumerGroupOffsets = Collections.singletonMap(tp, new OffsetAndMetadata(consumerGroupOffset)); Map checkpoints = task.checkpointsForGroup(consumerGroupOffsets, "g1"); assertEquals(truth, checkpoints.containsKey(remoteTp), "should" + (truth ? "" : " not") + " emit offset sync"); return checkpoints; @@ -299,8 +299,8 @@ void backingStoreStart() { offsetSyncStore.start(false); MirrorCheckpointTask mirrorCheckpointTask = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore, Set.of(), Map.of(), - new CheckpointStore(Map.of())); + new DefaultReplicationPolicy(), offsetSyncStore, Collections.emptySet(), Collections.emptyMap(), + new CheckpointStore(Collections.emptyMap())); // Generate a checkpoint for upstream offset 250, and assert it maps to downstream 201 // (as nearest mapping in OffsetSyncStore is 200->200) @@ -327,7 +327,7 @@ void backingStoreStart() { Map> checkpointsPerConsumerGroup = new HashMap<>(); checkpointsPerConsumerGroup.put("group1", checkpoints); MirrorCheckpointTask mirrorCheckpointTask2 = new MirrorCheckpointTask("source1", "target2", - new DefaultReplicationPolicy(), offsetSyncStore2, Set.of(), Map.of(), + new DefaultReplicationPolicy(), offsetSyncStore2, Collections.emptySet(), Collections.emptyMap(), new CheckpointStore(checkpointsPerConsumerGroup)); // Upstream offsets 250 and 370 now have the closest downstream value of 176, but this is @@ -354,14 +354,14 @@ public void testCheckpointStoreInitialized() throws InterruptedException { MirrorCheckpointTask task = new MirrorCheckpointTask("source1", "target2", new DefaultReplicationPolicy(), new OffsetSyncStoreTest.FakeOffsetSyncStore(), - Set.of("group"), - Map.of(), + Collections.singleton("group"), + Collections.emptyMap(), checkpointStore) { @Override List sourceRecordsForGroup(String group) { - SourceRecord sr = new SourceRecord(Map.of(), Map.of(), "", 0, null, null); - return List.of(sr); + SourceRecord sr = new SourceRecord(Collections.emptyMap(), Collections.emptyMap(), "", 0, null, null); + return Collections.singletonList(sr); } }; diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartBeatConnectorTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartBeatConnectorTest.java index 8c5be805a936c..190f749d4e71b 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartBeatConnectorTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartBeatConnectorTest.java @@ -20,6 +20,7 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -66,8 +67,8 @@ public void testReplicationDisabled() { @Test public void testAlterOffsetsIncorrectPartitionKey() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( - Map.of("unused_partition_key", "unused_partition_value"), + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( + Collections.singletonMap("unused_partition_key", "unused_partition_value"), SOURCE_OFFSET ))); @@ -82,7 +83,7 @@ public void testAlterOffsetsIncorrectPartitionKey() { public void testAlterOffsetsMissingPartitionKey() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Map.of( + Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Collections.singletonMap( partition, SOURCE_OFFSET )); @@ -91,7 +92,7 @@ public void testAlterOffsetsMissingPartitionKey() { // Sanity check to make sure our valid partition is actually valid assertTrue(alterOffsets.apply(validPartition)); - for (String key : List.of(SOURCE_CLUSTER_ALIAS_KEY, TARGET_CLUSTER_ALIAS_KEY)) { + for (String key : Arrays.asList(SOURCE_CLUSTER_ALIAS_KEY, TARGET_CLUSTER_ALIAS_KEY)) { Map invalidPartition = new HashMap<>(validPartition); invalidPartition.remove(key); assertThrows(ConnectException.class, () -> alterOffsets.apply(invalidPartition)); @@ -116,9 +117,9 @@ public void testAlterOffsetsMultiplePartitions() { public void testAlterOffsetsIncorrectOffsetKey() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - Map, Map> offsets = Map.of( + Map, Map> offsets = Collections.singletonMap( sourcePartition("primary", "backup"), - Map.of("unused_offset_key", 0) + Collections.singletonMap("unused_offset_key", 0) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets)); } @@ -127,7 +128,7 @@ public void testAlterOffsetsIncorrectOffsetKey() { public void testAlterOffsetsOffsetValues() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - Function alterOffsets = offset -> connector.alterOffsets(null, Map.of( + Function alterOffsets = offset -> connector.alterOffsets(null, Collections.singletonMap( sourcePartition("primary", "backup"), Collections.singletonMap(MirrorUtils.OFFSET_KEY, offset) )); @@ -148,7 +149,7 @@ public void testAlterOffsetsOffsetValues() { public void testSuccessfulAlterOffsets() { MirrorHeartbeatConnector connector = new MirrorHeartbeatConnector(); - Map, Map> offsets = Map.of( + Map, Map> offsets = Collections.singletonMap( sourcePartition("primary", "backup"), SOURCE_OFFSET ); @@ -157,7 +158,7 @@ public void testSuccessfulAlterOffsets() { // since it could indicate that the offsets were reset previously or that no offsets have been committed yet // (for a reset operation) assertTrue(connector.alterOffsets(null, offsets)); - assertTrue(connector.alterOffsets(null, Map.of())); + assertTrue(connector.alterOffsets(null, Collections.emptyMap())); } @Test @@ -177,8 +178,8 @@ public void testAlterOffsetsTombstones() { assertTrue(() -> alterOffsets.apply(partition)); assertTrue(() -> alterOffsets.apply(null)); - assertTrue(() -> alterOffsets.apply(Map.of())); - assertTrue(() -> alterOffsets.apply(Map.of("unused_partition_key", "unused_partition_value"))); + assertTrue(() -> alterOffsets.apply(Collections.emptyMap())); + assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value"))); } private static Map sourcePartition(String sourceClusterAlias, String targetClusterAlias) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTaskTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTaskTest.java index 8faf52a12b6b1..0ffe2635d1491 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTaskTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorHeartbeatTaskTest.java @@ -35,9 +35,9 @@ public void testPollCreatesRecords() throws InterruptedException { List records = heartbeatTask.poll(); assertEquals(1, records.size()); Map sourcePartition = records.iterator().next().sourcePartition(); - assertEquals("testSource", sourcePartition.get(Heartbeat.SOURCE_CLUSTER_ALIAS_KEY), + assertEquals(sourcePartition.get(Heartbeat.SOURCE_CLUSTER_ALIAS_KEY), "testSource", "sourcePartition's " + Heartbeat.SOURCE_CLUSTER_ALIAS_KEY + " record was not created"); - assertEquals("testTarget", sourcePartition.get(Heartbeat.TARGET_CLUSTER_ALIAS_KEY), + assertEquals(sourcePartition.get(Heartbeat.TARGET_CLUSTER_ALIAS_KEY), "testTarget", "sourcePartition's " + Heartbeat.TARGET_CLUSTER_ALIAS_KEY + " record was not created"); } } diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorMakerConfigTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorMakerConfigTest.java index ddd22b0b8ad8e..638db3de370a3 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorMakerConfigTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorMakerConfigTest.java @@ -29,6 +29,7 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Locale; @@ -105,7 +106,7 @@ public void testClientConfigProperties() { "replication.policy.separator is picked up in MirrorClientConfig"); assertEquals("b__topic1", aClientConfig.replicationPolicy().formatRemoteTopic("b", "topic1"), "replication.policy.separator is honored"); - assertEquals(Arrays.stream(clusterABootstrap.split(",")).map(String::trim).toList(), aClientConfig.adminConfig().get("bootstrap.servers"), + assertEquals(clusterABootstrap, aClientConfig.adminConfig().get("bootstrap.servers"), "client configs include bootstrap.servers"); try (ForwardingAdmin forwardingAdmin = aClientConfig.forwardingAdmin(aClientConfig.adminConfig())) { assertEquals(ForwardingAdmin.class.getName(), forwardingAdmin.getClass().getName(), @@ -151,11 +152,11 @@ public void testIncludesConnectorConfigProperties() { MirrorSourceConfig sourceConfig = new MirrorSourceConfig(connectorProps); assertEquals(100, (int) sourceConfig.getInt("tasks.max"), "Connector properties like tasks.max should be passed through to underlying Connectors."); - assertEquals(List.of("topic-1"), sourceConfig.getList("topics"), + assertEquals(Collections.singletonList("topic-1"), sourceConfig.getList("topics"), "Topics include should be passed through to underlying Connectors."); - assertEquals(List.of("property-3"), sourceConfig.getList("config.properties.exclude"), + assertEquals(Collections.singletonList("property-3"), sourceConfig.getList("config.properties.exclude"), "Config properties exclude should be passed through to underlying Connectors."); - assertEquals(List.of("FakeMetricsReporter"), sourceConfig.getList("metric.reporters"), + assertEquals(Collections.singletonList("FakeMetricsReporter"), sourceConfig.getList("metric.reporters"), "Metrics reporters should be passed through to underlying Connectors."); assertEquals("DefaultTopicFilter", sourceConfig.getClass("topic.filter.class").getSimpleName(), "Filters should be passed through to underlying Connectors."); @@ -165,7 +166,7 @@ public void testIncludesConnectorConfigProperties() { "Unknown properties should not be passed through to Connectors."); MirrorCheckpointConfig checkpointConfig = new MirrorCheckpointConfig(connectorProps); - assertEquals(List.of("group-2"), checkpointConfig.getList("groups"), + assertEquals(Collections.singletonList("group-2"), checkpointConfig.getList("groups"), "Groups include should be passed through to underlying Connectors."); } @@ -179,11 +180,11 @@ public void testIncludesTopicFilterProperties() { SourceAndTarget sourceAndTarget = new SourceAndTarget("source", "target"); Map connectorProps = mirrorConfig.connectorBaseConfig(sourceAndTarget, MirrorSourceConnector.class); - DefaultTopicFilter.TopicFilterConfig filterConfig = + DefaultTopicFilter.TopicFilterConfig filterConfig = new DefaultTopicFilter.TopicFilterConfig(connectorProps); - assertEquals(List.of("topic1", "topic2"), filterConfig.getList("topics"), + assertEquals(Arrays.asList("topic1", "topic2"), filterConfig.getList("topics"), "source->target.topics should be passed through to TopicFilters."); - assertEquals(List.of("topic3"), filterConfig.getList("topics.exclude"), + assertEquals(Collections.singletonList("topic3"), filterConfig.getList("topics.exclude"), "source->target.topics.exclude should be passed through to TopicFilters."); } @@ -317,10 +318,7 @@ public void testInvalidSecurityProtocol() { @Test public void testClientInvalidSecurityProtocol() { ConfigException ce = assertThrows(ConfigException.class, - () -> new MirrorClientConfig(makeProps( - CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "abc", - CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" - ))); + () -> new MirrorClientConfig(makeProps(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, "abc"))); assertTrue(ce.getMessage().contains(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -328,9 +326,7 @@ public void testClientInvalidSecurityProtocol() { public void testCaseInsensitiveSecurityProtocol() { final String saslSslLowerCase = SecurityProtocol.SASL_SSL.name.toLowerCase(Locale.ROOT); final MirrorClientConfig config = new MirrorClientConfig(makeProps( - CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, saslSslLowerCase, - CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" - )); + CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, saslSslLowerCase)); assertEquals(saslSslLowerCase, config.originalsStrings().get(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG)); } @@ -371,7 +367,7 @@ public void testLazyConfigResolution() { public static class FakeConfigProvider implements ConfigProvider { - Map secrets = Map.of("password", "secret2"); + Map secrets = Collections.singletonMap("password", "secret2"); @Override public void configure(Map props) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java index c7f7f4e19a51c..ccdc7a878a505 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConfigTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -37,7 +38,7 @@ public class MirrorSourceConfigTest { @Test public void testTaskConfigTopicPartitions() { - List topicPartitions = List.of(new TopicPartition("topic-1", 2), + List topicPartitions = Arrays.asList(new TopicPartition("topic-1", 2), new TopicPartition("topic-3", 4), new TopicPartition("topic-5", 6)); MirrorSourceConfig config = new MirrorSourceConfig(makeProps()); Map props = config.taskConfigForTopicPartitions(topicPartitions, 1); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java index 1d106d6deaaa0..21bcc7cbad5e9 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceConnectorTest.java @@ -44,6 +44,7 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -54,6 +55,7 @@ import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.function.Function; +import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.ConsumerConfig.ISOLATION_LEVEL_CONFIG; import static org.apache.kafka.connect.mirror.MirrorConnectorConfig.CONSUMER_CLIENT_PREFIX; @@ -104,7 +106,7 @@ public void testReplicatesHeartbeatsDespiteFilter() { assertTrue(connector.shouldReplicateTopic("heartbeats"), "should replicate heartbeats"); assertTrue(connector.shouldReplicateTopic("us-west.heartbeats"), "should replicate upstream heartbeats"); - Map configs = Map.of(DefaultReplicationPolicy.SEPARATOR_CONFIG, "_"); + Map configs = Collections.singletonMap(DefaultReplicationPolicy.SEPARATOR_CONFIG, "_"); defaultReplicationPolicy.configure(configs); assertTrue(connector.shouldReplicateTopic("heartbeats"), "should replicate heartbeats"); assertFalse(connector.shouldReplicateTopic("us-west.heartbeats"), "should not consider this topic as a heartbeats topic"); @@ -182,15 +184,15 @@ public void testAclTransformation() { String expectedRemoteTopicName = "source" + DefaultReplicationPolicy.SEPARATOR_DEFAULT + allowAllAclBinding.pattern().name(); assertEquals(expectedRemoteTopicName, processedAllowAllAclBinding.pattern().name(), "should change topic name"); - assertEquals(AclOperation.READ, processedAllowAllAclBinding.entry().operation(), "should change ALL to READ"); - assertEquals(AclPermissionType.ALLOW, processedAllowAllAclBinding.entry().permissionType(), "should not change ALLOW"); + assertEquals(processedAllowAllAclBinding.entry().operation(), AclOperation.READ, "should change ALL to READ"); + assertEquals(processedAllowAllAclBinding.entry().permissionType(), AclPermissionType.ALLOW, "should not change ALLOW"); AclBinding denyAllAclBinding = new AclBinding( new ResourcePattern(ResourceType.TOPIC, "test_topic", PatternType.LITERAL), new AccessControlEntry("kafka", "", AclOperation.ALL, AclPermissionType.DENY)); AclBinding processedDenyAllAclBinding = connector.targetAclBinding(denyAllAclBinding); - assertEquals(AclOperation.ALL, processedDenyAllAclBinding.entry().operation(), "should not change ALL"); - assertEquals(AclPermissionType.DENY, processedDenyAllAclBinding.entry().permissionType(), "should not change DENY"); + assertEquals(processedDenyAllAclBinding.entry().operation(), AclOperation.ALL, "should not change ALL"); + assertEquals(processedDenyAllAclBinding.entry().permissionType(), AclPermissionType.DENY, "should not change DENY"); } @Test @@ -278,7 +280,7 @@ public void testConfigPropertyFiltering() { new DefaultReplicationPolicy(), x -> true, new DefaultConfigPropertyFilter()); ArrayList entries = new ArrayList<>(); entries.add(new ConfigEntry("name-1", "value-1")); - entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, List.of(), ConfigEntry.ConfigType.STRING, "")); + entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), ConfigEntry.ConfigType.STRING, "")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); Config targetConfig = connector.targetConfig(config, true); @@ -298,7 +300,7 @@ public void testConfigPropertyFilteringWithAlterConfigs() { List entries = new ArrayList<>(); entries.add(new ConfigEntry("name-1", "value-1")); // When "use.defaults.from" set to "target" by default, the config with default value should be excluded - entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, List.of(), ConfigEntry.ConfigType.STRING, "")); + entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), ConfigEntry.ConfigType.STRING, "")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); Config targetConfig = connector.targetConfig(config, false); @@ -313,7 +315,7 @@ public void testConfigPropertyFilteringWithAlterConfigs() { @Test @Deprecated public void testConfigPropertyFilteringWithAlterConfigsAndSourceDefault() { - Map filterConfig = Map.of(DefaultConfigPropertyFilter.USE_DEFAULTS_FROM, "source"); + Map filterConfig = Collections.singletonMap(DefaultConfigPropertyFilter.USE_DEFAULTS_FROM, "source"); DefaultConfigPropertyFilter filter = new DefaultConfigPropertyFilter(); filter.configure(filterConfig); @@ -322,7 +324,7 @@ public void testConfigPropertyFilteringWithAlterConfigsAndSourceDefault() { List entries = new ArrayList<>(); entries.add(new ConfigEntry("name-1", "value-1")); // When "use.defaults.from" explicitly set to "source", the config with default value should be replicated - entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, List.of(), ConfigEntry.ConfigType.STRING, "")); + entries.add(new ConfigEntry("name-2", "value-2", ConfigEntry.ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), ConfigEntry.ConfigType.STRING, "")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); Config targetConfig = connector.targetConfig(config, false); @@ -356,7 +358,7 @@ public void testNewTopicConfigs() throws Exception { entries.add(new ConfigEntry("exclude_param.param1", "value-param1")); entries.add(new ConfigEntry("min.insync.replicas", "2")); Config config = new Config(entries); - doReturn(Map.of(topic, config)).when(connector).describeTopicConfigs(any()); + doReturn(Collections.singletonMap(topic, config)).when(connector).describeTopicConfigs(any()); doAnswer(invocation -> { Map newTopics = invocation.getArgument(0); assertNotNull(newTopics.get("source." + topic)); @@ -373,7 +375,7 @@ public void testNewTopicConfigs() throws Exception { assertNull(targetConfig.get(prop2), "should not replicate excluded properties " + prop2); return null; }).when(connector).createNewTopics(any()); - connector.createNewTopics(Set.of(topic), Map.of(topic, 1L)); + connector.createNewTopics(Collections.singleton(topic), Collections.singletonMap(topic, 1L)); verify(connector).createNewTopics(any(), any()); } @@ -431,15 +433,15 @@ public void testRefreshTopicPartitions() throws Exception { connector.initialize(mock(ConnectorContext.class)); connector = spy(connector); - Config topicConfig = new Config(List.of( + Config topicConfig = new Config(Arrays.asList( new ConfigEntry("cleanup.policy", "compact"), new ConfigEntry("segment.bytes", "100"))); - Map configs = Map.of("topic", topicConfig); + Map configs = Collections.singletonMap("topic", topicConfig); - List sourceTopicPartitions = List.of(new TopicPartition("topic", 0)); + List sourceTopicPartitions = Collections.singletonList(new TopicPartition("topic", 0)); doReturn(sourceTopicPartitions).when(connector).findSourceTopicPartitions(); - doReturn(List.of()).when(connector).findTargetTopicPartitions(); - doReturn(configs).when(connector).describeTopicConfigs(Set.of("topic")); + doReturn(Collections.emptyList()).when(connector).findTargetTopicPartitions(); + doReturn(configs).when(connector).describeTopicConfigs(Collections.singleton("topic")); doNothing().when(connector).createNewTopics(any()); connector.refreshTopicPartitions(); @@ -458,7 +460,7 @@ public void testRefreshTopicPartitions() throws Exception { verify(connector, times(2)).createNewTopics(eq(expectedNewTopics)); verify(connector, times(0)).createNewPartitions(any()); - List targetTopicPartitions = List.of(new TopicPartition("source.topic", 0)); + List targetTopicPartitions = Collections.singletonList(new TopicPartition("source.topic", 0)); doReturn(targetTopicPartitions).when(connector).findTargetTopicPartitions(); connector.refreshTopicPartitions(); @@ -473,17 +475,17 @@ public void testRefreshTopicPartitionsTopicOnTargetFirst() throws Exception { connector.initialize(mock(ConnectorContext.class)); connector = spy(connector); - Config topicConfig = new Config(List.of( + Config topicConfig = new Config(Arrays.asList( new ConfigEntry("cleanup.policy", "compact"), new ConfigEntry("segment.bytes", "100"))); - Map configs = Map.of("source.topic", topicConfig); + Map configs = Collections.singletonMap("source.topic", topicConfig); - List sourceTopicPartitions = List.of(); - List targetTopicPartitions = List.of(new TopicPartition("source.topic", 0)); + List sourceTopicPartitions = Collections.emptyList(); + List targetTopicPartitions = Collections.singletonList(new TopicPartition("source.topic", 0)); doReturn(sourceTopicPartitions).when(connector).findSourceTopicPartitions(); doReturn(targetTopicPartitions).when(connector).findTargetTopicPartitions(); - doReturn(configs).when(connector).describeTopicConfigs(Set.of("source.topic")); - doReturn(Map.of()).when(connector).describeTopicConfigs(Set.of()); + doReturn(configs).when(connector).describeTopicConfigs(Collections.singleton("source.topic")); + doReturn(Collections.emptyMap()).when(connector).describeTopicConfigs(Collections.emptySet()); doNothing().when(connector).createNewTopics(any()); doNothing().when(connector).createNewPartitions(any()); @@ -492,7 +494,7 @@ public void testRefreshTopicPartitionsTopicOnTargetFirst() throws Exception { connector.refreshTopicPartitions(); verify(connector, times(0)).computeAndCreateTopicPartitions(); - sourceTopicPartitions = List.of(new TopicPartition("topic", 0)); + sourceTopicPartitions = Collections.singletonList(new TopicPartition("topic", 0)); doReturn(sourceTopicPartitions).when(connector).findSourceTopicPartitions(); // when partitions are added to the source cluster, reconfiguration is triggered @@ -618,7 +620,7 @@ private Optional validateProperty(String name, Map List results = new MirrorSourceConnector().validate(props) .configValues().stream() .filter(cv -> name.equals(cv.name())) - .toList(); + .collect(Collectors.toList()); assertTrue(results.size() <= 1, "Connector produced multiple config values for '" + name + "' property"); @@ -633,8 +635,8 @@ private Optional validateProperty(String name, Map @Test public void testAlterOffsetsIncorrectPartitionKey() { MirrorSourceConnector connector = new MirrorSourceConnector(); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( - Map.of("unused_partition_key", "unused_partition_value"), + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( + Collections.singletonMap("unused_partition_key", "unused_partition_value"), MirrorUtils.wrapOffset(10) ))); @@ -649,7 +651,7 @@ public void testAlterOffsetsIncorrectPartitionKey() { public void testAlterOffsetsMissingPartitionKey() { MirrorSourceConnector connector = new MirrorSourceConnector(); - Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Map.of( + Function, Boolean> alterOffsets = partition -> connector.alterOffsets(null, Collections.singletonMap( partition, MirrorUtils.wrapOffset(64) )); @@ -658,7 +660,7 @@ public void testAlterOffsetsMissingPartitionKey() { // Sanity check to make sure our valid partition is actually valid assertTrue(alterOffsets.apply(validPartition)); - for (String key : List.of(SOURCE_CLUSTER_KEY, TOPIC_KEY, PARTITION_KEY)) { + for (String key : Arrays.asList(SOURCE_CLUSTER_KEY, TOPIC_KEY, PARTITION_KEY)) { Map invalidPartition = new HashMap<>(validPartition); invalidPartition.remove(key); assertThrows(ConnectException.class, () -> alterOffsets.apply(invalidPartition)); @@ -670,7 +672,7 @@ public void testAlterOffsetsInvalidPartitionPartition() { MirrorSourceConnector connector = new MirrorSourceConnector(); Map partition = sourcePartition("t", 3, "us-west-2"); partition.put(PARTITION_KEY, "a string"); - assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Map.of( + assertThrows(ConnectException.class, () -> connector.alterOffsets(null, Collections.singletonMap( partition, MirrorUtils.wrapOffset(49) ))); @@ -694,9 +696,9 @@ public void testAlterOffsetsMultiplePartitions() { public void testAlterOffsetsIncorrectOffsetKey() { MirrorSourceConnector connector = new MirrorSourceConnector(); - Map, Map> offsets = Map.of( + Map, Map> offsets = Collections.singletonMap( sourcePartition("t1", 2, "backup"), - Map.of("unused_offset_key", 0) + Collections.singletonMap("unused_offset_key", 0) ); assertThrows(ConnectException.class, () -> connector.alterOffsets(null, offsets)); } @@ -705,7 +707,7 @@ public void testAlterOffsetsIncorrectOffsetKey() { public void testAlterOffsetsOffsetValues() { MirrorSourceConnector connector = new MirrorSourceConnector(); - Function alterOffsets = offset -> connector.alterOffsets(null, Map.of( + Function alterOffsets = offset -> connector.alterOffsets(null, Collections.singletonMap( sourcePartition("t", 5, "backup"), Collections.singletonMap(MirrorUtils.OFFSET_KEY, offset) )); @@ -726,7 +728,7 @@ public void testAlterOffsetsOffsetValues() { public void testSuccessfulAlterOffsets() { MirrorSourceConnector connector = new MirrorSourceConnector(); - Map, Map> offsets = Map.of( + Map, Map> offsets = Collections.singletonMap( sourcePartition("t2", 0, "backup"), MirrorUtils.wrapOffset(5) ); @@ -735,7 +737,7 @@ public void testSuccessfulAlterOffsets() { // since it could indicate that the offsets were reset previously or that no offsets have been committed yet // (for a reset operation) assertTrue(connector.alterOffsets(null, offsets)); - assertTrue(connector.alterOffsets(null, Map.of())); + assertTrue(connector.alterOffsets(null, Collections.emptyMap())); } @Test @@ -755,8 +757,8 @@ public void testAlterOffsetsTombstones() { assertTrue(() -> alterOffsets.apply(partition)); assertTrue(() -> alterOffsets.apply(null)); - assertTrue(() -> alterOffsets.apply(Map.of())); - assertTrue(() -> alterOffsets.apply(Map.of("unused_partition_key", "unused_partition_value"))); + assertTrue(() -> alterOffsets.apply(Collections.emptyMap())); + assertTrue(() -> alterOffsets.apply(Collections.singletonMap("unused_partition_key", "unused_partition_value"))); } private static Map sourcePartition(String topic, int partition, String sourceClusterAlias) { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java index 4a67685537824..335e5e327a9a6 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorSourceTaskTest.java @@ -20,6 +20,7 @@ import org.apache.kafka.clients.consumer.ConsumerRecords; import org.apache.kafka.clients.consumer.KafkaConsumer; import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.header.Header; @@ -35,7 +36,9 @@ import org.junit.jupiter.api.Test; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -222,14 +225,14 @@ public void testSeekBehaviorDuringStart() { OffsetStorageReader mockOffsetStorageReader = mock(OffsetStorageReader.class); when(mockSourceTaskContext.offsetStorageReader()).thenReturn(mockOffsetStorageReader); - Set topicPartitions = Set.of( + Set topicPartitions = new HashSet<>(Arrays.asList( new TopicPartition("previouslyReplicatedTopic", 8), new TopicPartition("previouslyReplicatedTopic1", 0), new TopicPartition("previouslyReplicatedTopic", 1), new TopicPartition("newTopicToReplicate1", 1), new TopicPartition("newTopicToReplicate1", 4), new TopicPartition("newTopicToReplicate2", 0) - ); + )); long arbitraryCommittedOffset = 4L; long offsetToSeek = arbitraryCommittedOffset + 1L; @@ -280,6 +283,8 @@ public void testCommitRecordWithNullMetadata() { @SuppressWarnings("unchecked") KafkaConsumer consumer = mock(KafkaConsumer.class); + @SuppressWarnings("unchecked") + KafkaProducer producer = mock(KafkaProducer.class); MirrorSourceMetrics metrics = mock(MirrorSourceMetrics.class); String sourceClusterName = "cluster1"; diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorUtilsTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorUtilsTest.java index daa818e293e79..e6de8a58f7b26 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorUtilsTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/MirrorUtilsTest.java @@ -29,6 +29,7 @@ import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.Map; import java.util.concurrent.ExecutionException; @@ -50,7 +51,7 @@ public class MirrorUtilsTest { @Test public void testCreateCompactedTopic() throws Exception { - Map> values = Map.of(TOPIC, future); + Map> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenReturn(null); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -63,7 +64,7 @@ public void testCreateCompactedTopic() throws Exception { @Test public void testCreateCompactedTopicAlreadyExists() throws Exception { - Map> values = Map.of(TOPIC, future); + Map> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new TopicExistsException("topic exists"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -76,7 +77,7 @@ public void testCreateCompactedTopicAlreadyExists() throws Exception { @Test public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithUnsupportedVersionException() throws Exception { - Map> values = Map.of(TOPIC, future); + Map> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new UnsupportedVersionException("unsupported"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -89,7 +90,7 @@ public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithUnsupportedVersi @Test public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithClusterAuthorizationException() throws Exception { - Map> values = Map.of(TOPIC, future); + Map> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new ClusterAuthorizationException("not authorised"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -102,7 +103,7 @@ public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithClusterAuthoriza @Test public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithTopicAuthorizationException() throws Exception { - Map> values = Map.of(TOPIC, future); + Map> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new TopicAuthorizationException("not authorised"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -115,7 +116,7 @@ public void testCreateCompactedTopicAssumeTopicAlreadyExistsWithTopicAuthorizati @Test public void testCreateCompactedTopicFailsWithInvalidConfigurationException() throws Exception { - Map> values = Map.of(TOPIC, future); + Map> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new InvalidConfigurationException("wrong config"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); @@ -129,7 +130,7 @@ public void testCreateCompactedTopicFailsWithInvalidConfigurationException() thr @Test public void testCreateCompactedTopicFailsWithTimeoutException() throws Exception { - Map> values = Map.of(TOPIC, future); + Map> values = Collections.singletonMap(TOPIC, future); when(future.get()).thenThrow(new ExecutionException(new TimeoutException("Timeout"))); when(ctr.values()).thenReturn(values); when(admin.createTopics(any(), any())).thenReturn(ctr); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/OffsetSyncWriterTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/OffsetSyncWriterTest.java index 19c8d9d39524d..9a6b10920a058 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/OffsetSyncWriterTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/OffsetSyncWriterTest.java @@ -55,11 +55,11 @@ public void testMaybeQueueOffsetSyncs() { offsetSyncWriter.maybeQueueOffsetSyncs(topicPartition, 0, 1); assertFalse(offsetSyncWriter.getDelayedOffsetSyncs().containsKey(topicPartition)); assertTrue(offsetSyncWriter.getPendingOffsetSyncs().containsKey(topicPartition)); - assertEquals(1, offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset); + assertEquals(offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset, 1); offsetSyncWriter.maybeQueueOffsetSyncs(topicPartition, 1, 2); assertTrue(offsetSyncWriter.getDelayedOffsetSyncs().containsKey(topicPartition)); - assertEquals(1, offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset); + assertEquals(offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset, 1); } @Test @@ -71,7 +71,7 @@ public void testFirePendingOffsetSyncs() { OffsetSyncWriter offsetSyncWriter = new OffsetSyncWriter(producer, topicName, outstandingOffsetSyncs, maxOffsetLag); offsetSyncWriter.maybeQueueOffsetSyncs(topicPartition, 0, 100); - assertEquals(100, offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset); + assertEquals(offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset, 100); offsetSyncWriter.firePendingOffsetSyncs(); @@ -85,7 +85,7 @@ public void testFirePendingOffsetSyncs() { verify(producer, times(1)).send(any(), any()); offsetSyncWriter.maybeQueueOffsetSyncs(topicPartition, 2, 102); - assertEquals(102, offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset); + assertEquals(offsetSyncWriter.partitionStates().get(topicPartition).lastSyncDownstreamOffset, 102); offsetSyncWriter.firePendingOffsetSyncs(); // in-flight offset syncs; will not try to send remaining offset syncs immediately diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java index b55673810a427..1c08cbaf72ef9 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/clients/admin/FakeLocalMetadataStore.java @@ -66,7 +66,7 @@ public static void updatePartitionCount(String topic, int newPartitionCount) { */ public static void updateTopicConfig(String topic, Config newConfig) { ConcurrentHashMap topicConfigs = FakeLocalMetadataStore.ALL_TOPICS.getOrDefault(topic, new ConcurrentHashMap<>()); - newConfig.entries().forEach(configEntry -> { + newConfig.entries().stream().forEach(configEntry -> { if (configEntry.name() != null) { if (configEntry.value() != null) { log.debug("Topic '{}' update config '{}' to '{}'", topic, configEntry.name(), configEntry.value()); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java index 1d1dd0feea341..2ba4438bdba9b 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/DedicatedMirrorIntegrationTest.java @@ -45,6 +45,8 @@ import org.slf4j.LoggerFactory; import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -56,6 +58,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; +import java.util.stream.Collectors; import static org.apache.kafka.clients.consumer.ConsumerConfig.AUTO_OFFSET_RESET_CONFIG; import static org.apache.kafka.connect.mirror.MirrorMaker.CONNECTOR_CLASSES; @@ -142,7 +145,7 @@ public void testSingleNodeCluster() throws Exception { final String ba = b + "->" + a; final String testTopicPrefix = "test-topic-"; - Map mmProps = new HashMap<>() {{ + Map mmProps = new HashMap() {{ put("dedicated.mode.enable.internal.rest", "false"); put("listeners", "http://localhost:0"); // Refresh topics very frequently to quickly pick up on topics that are created @@ -204,7 +207,7 @@ public void testClusterWithEmitOffsetDisabled() throws Exception { final String ab = a + "->" + b; final String testTopicPrefix = "test-topic-"; - Map mmProps = new HashMap<>() {{ + Map mmProps = new HashMap() {{ put("dedicated.mode.enable.internal.rest", "false"); put("listeners", "http://localhost:0"); // Refresh topics very frequently to quickly pick up on topics that are created @@ -227,7 +230,7 @@ public void testClusterWithEmitOffsetDisabled() throws Exception { // Bring up a single-node cluster final MirrorMaker mm = startMirrorMaker("no-offset-syncing", mmProps); final SourceAndTarget sourceAndTarget = new SourceAndTarget(a, b); - awaitMirrorMakerStart(mm, sourceAndTarget, List.of(MirrorSourceConnector.class, MirrorHeartbeatConnector.class)); + awaitMirrorMakerStart(mm, sourceAndTarget, Arrays.asList(MirrorSourceConnector.class, MirrorHeartbeatConnector.class)); // wait for mirror source and heartbeat connectors to start a task awaitConnectorTasksStart(mm, MirrorHeartbeatConnector.class, sourceAndTarget); @@ -253,7 +256,7 @@ public void testClusterWithEmitOffsetDisabled() throws Exception { .stream() .filter(Optional::isPresent) .map(Optional::get) - .toList(); + .collect(Collectors.toList()); assertTrue(offsetSyncTopic.isEmpty()); } @@ -288,7 +291,7 @@ public void testMultiNodeCluster() throws Exception { final String ba = b + "->" + a; final String testTopicPrefix = "test-topic-"; - Map mmProps = new HashMap<>() {{ + Map mmProps = new HashMap() {{ put("dedicated.mode.enable.internal.rest", "true"); put("listeners", "http://localhost:0"); // Refresh topics very frequently to quickly pick up on topics that are created @@ -448,8 +451,8 @@ private void awaitTaskConfigurations(MirrorMaker mm, } private void awaitTopicContent(EmbeddedKafkaCluster cluster, String clusterName, String topic, int numMessages) throws Exception { - try (Consumer consumer = cluster.createConsumer(Map.of(AUTO_OFFSET_RESET_CONFIG, "earliest"))) { - consumer.subscribe(Set.of(topic)); + try (Consumer consumer = cluster.createConsumer(Collections.singletonMap(AUTO_OFFSET_RESET_CONFIG, "earliest"))) { + consumer.subscribe(Collections.singleton(topic)); AtomicInteger messagesRead = new AtomicInteger(0); waitForCondition( () -> { diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/IdentityReplicationIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/IdentityReplicationIntegrationTest.java index 1d4339f397796..0a6ab4bab158c 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/IdentityReplicationIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/IdentityReplicationIntegrationTest.java @@ -21,7 +21,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Tag; -import java.util.Map; +import java.util.HashMap; /** * Tests MM2 replication and failover logic for {@link IdentityReplicationPolicy}. @@ -36,10 +36,10 @@ public class IdentityReplicationIntegrationTest extends MirrorConnectorsIntegrat @BeforeEach public void startClusters() throws Exception { replicateBackupToPrimary = false; - super.startClusters(Map.of( - "replication.policy.class", IdentityReplicationPolicy.class.getName(), - "topics", "test-topic-.*" - )); + super.startClusters(new HashMap() {{ + put("replication.policy.class", IdentityReplicationPolicy.class.getName()); + put("topics", "test-topic-.*"); + }}); } /* diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java index 6d1d50f558bab..b278285e60651 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationBaseTest.java @@ -67,7 +67,9 @@ import java.time.Duration; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -125,7 +127,7 @@ public class MirrorConnectorsIntegrationBaseTest { protected static final Duration CONSUMER_POLL_TIMEOUT = Duration.ofMillis(500L); protected static final String PRIMARY_CLUSTER_ALIAS = "primary"; protected static final String BACKUP_CLUSTER_ALIAS = "backup"; - protected static final List> CONNECTOR_LIST = List.of( + protected static final List> CONNECTOR_LIST = Arrays.asList( MirrorSourceConnector.class, MirrorCheckpointConnector.class, MirrorHeartbeatConnector.class); @@ -152,7 +154,7 @@ public class MirrorConnectorsIntegrationBaseTest { @BeforeEach public void startClusters() throws Exception { - startClusters(new HashMap<>() {{ + startClusters(new HashMap() {{ put("topics", "test-topic-.*, primary.test-topic-.*, backup.test-topic-.*"); }}); } @@ -241,7 +243,7 @@ public void startClusters(Map additionalMM2Config) throws Except waitForTopicCreated(backup, "mm2-configs.primary.internal"); waitForTopicCreated(backup, "test-topic-1"); waitForTopicCreated(primary, "test-topic-1"); - warmUpConsumer(Map.of("group.id", "consumer-group-dummy")); + warmUpConsumer(Collections.singletonMap("group.id", "consumer-group-dummy")); log.info(PRIMARY_CLUSTER_ALIAS + " REST service: {}", primary.endpointForResource("connectors")); log.info(BACKUP_CLUSTER_ALIAS + " REST service: {}", backup.endpointForResource("connectors")); @@ -288,14 +290,14 @@ public void testReplication() throws Exception { } String reverseTopic1 = remoteTopicName("test-topic-1", BACKUP_CLUSTER_ALIAS); String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); // warm up consumers before starting the connectors, so we don't need to wait for discovery warmUpConsumer(consumerProps); mm2Config = new MirrorMakerConfig(mm2Props); waitUntilMirrorMakerIsRunning(backup, CONNECTOR_LIST, mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS); - List> primaryConnectors = replicateBackupToPrimary ? CONNECTOR_LIST : List.of(MirrorHeartbeatConnector.class); + List> primaryConnectors = replicateBackupToPrimary ? CONNECTOR_LIST : Collections.singletonList(MirrorHeartbeatConnector.class); waitUntilMirrorMakerIsRunning(primary, primaryConnectors, mm2Config, BACKUP_CLUSTER_ALIAS, PRIMARY_CLUSTER_ALIAS); MirrorClient primaryClient = new MirrorClient(mm2Config.clientConfig(PRIMARY_CLUSTER_ALIAS)); @@ -368,7 +370,7 @@ public void testReplication() throws Exception { backupClient, consumerGroupName, PRIMARY_CLUSTER_ALIAS, backupTopic1); // Failover consumer group to backup cluster. - try (Consumer primaryConsumer = backup.kafka().createConsumer(Map.of("group.id", consumerGroupName))) { + try (Consumer primaryConsumer = backup.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) { primaryConsumer.assign(backupOffsets.keySet()); backupOffsets.forEach(primaryConsumer::seek); primaryConsumer.poll(CONSUMER_POLL_TIMEOUT); @@ -389,7 +391,7 @@ public void testReplication() throws Exception { primaryClient, consumerGroupName, BACKUP_CLUSTER_ALIAS, reverseTopic1); // Failback consumer group to primary cluster - try (Consumer primaryConsumer = primary.kafka().createConsumer(Map.of("group.id", consumerGroupName))) { + try (Consumer primaryConsumer = primary.kafka().createConsumer(Collections.singletonMap("group.id", consumerGroupName))) { primaryConsumer.assign(primaryOffsets.keySet()); primaryOffsets.forEach(primaryConsumer::seek); primaryConsumer.poll(CONSUMER_POLL_TIMEOUT); @@ -433,7 +435,7 @@ public void testReplication() throws Exception { @Test public void testReplicationWithEmptyPartition() throws Exception { String consumerGroupName = "consumer-group-testReplicationWithEmptyPartition"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); // create topic String topic = "test-topic-with-empty-partition"; @@ -491,7 +493,7 @@ private void testOneWayReplicationWithOffsetSyncs(int offsetLagMax) throws Inter produceMessages(primaryProducer, "test-topic-1"); String backupTopic1 = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); String consumerGroupName = "consumer-group-testOneWayReplicationWithAutoOffsetSync"; - Map consumerProps = new HashMap<>() {{ + Map consumerProps = new HashMap() {{ put("group.id", consumerGroupName); put("auto.offset.reset", "earliest"); }}; @@ -524,7 +526,7 @@ private void testOneWayReplicationWithOffsetSyncs(int offsetLagMax) throws Inter try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo( consumerProps, backupTopic1)) { - waitForConsumerGroupFullSync(backup, List.of(backupTopic1), + waitForConsumerGroupFullSync(backup, Collections.singletonList(backupTopic1), consumerGroupName, NUM_RECORDS_PRODUCED, offsetLagMax); assertDownstreamRedeliveriesBoundedByMaxLag(backupConsumer, offsetLagMax); } @@ -539,17 +541,17 @@ private void testOneWayReplicationWithOffsetSyncs(int offsetLagMax) throws Inter produceMessages(primaryProducer, "test-topic-2"); // create a consumer at primary cluster to consume the new topic - try (Consumer consumer1 = primary.kafka().createConsumerAndSubscribeTo(Map.of( + try (Consumer consumer1 = primary.kafka().createConsumerAndSubscribeTo(Collections.singletonMap( "group.id", consumerGroupName), "test-topic-2")) { // we need to wait for consuming all the records for MM2 replicating the expected offsets waitForConsumingAllRecords(consumer1, NUM_RECORDS_PRODUCED); } // create a consumer at backup cluster with same consumer group ID to consume old and new topic - try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Map.of( + try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Collections.singletonMap( "group.id", consumerGroupName), backupTopic1, remoteTopic2)) { - waitForConsumerGroupFullSync(backup, List.of(backupTopic1, remoteTopic2), + waitForConsumerGroupFullSync(backup, Arrays.asList(backupTopic1, remoteTopic2), consumerGroupName, NUM_RECORDS_PRODUCED, offsetLagMax); assertDownstreamRedeliveriesBoundedByMaxLag(backupConsumer, offsetLagMax); } @@ -565,7 +567,7 @@ public void testReplicationWithoutOffsetSyncWillNotCreateOffsetSyncsTopic() thro produceMessages(backupProducer, "test-topic-1"); } String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); // warm up consumers before starting the connectors, so we don't need to wait for discovery warmUpConsumer(consumerProps); @@ -574,7 +576,7 @@ public void testReplicationWithoutOffsetSyncWillNotCreateOffsetSyncsTopic() thro mm2Config = new MirrorMakerConfig(mm2Props); - waitUntilMirrorMakerIsRunning(backup, List.of(MirrorSourceConnector.class, MirrorHeartbeatConnector.class), mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS); + waitUntilMirrorMakerIsRunning(backup, Arrays.asList(MirrorSourceConnector.class, MirrorHeartbeatConnector.class), mm2Config, PRIMARY_CLUSTER_ALIAS, BACKUP_CLUSTER_ALIAS); MirrorClient primaryClient = new MirrorClient(mm2Config.clientConfig(PRIMARY_CLUSTER_ALIAS)); MirrorClient backupClient = new MirrorClient(mm2Config.clientConfig(BACKUP_CLUSTER_ALIAS)); @@ -593,7 +595,7 @@ public void testReplicationWithoutOffsetSyncWillNotCreateOffsetSyncsTopic() thro .stream() .filter(Optional::isPresent) .map(Optional::get) - .toList(); + .collect(Collectors.toList()); assertTrue(offsetSyncTopic.isEmpty()); primaryClient.close(); @@ -615,7 +617,7 @@ public void testOffsetSyncsTopicsOnTarget() throws Exception { waitForTopicCreated(backup, "mm2-offset-syncs." + PRIMARY_CLUSTER_ALIAS + ".internal"); String consumerGroupName = "consumer-group-syncs-on-target"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); produceMessages(primaryProducer, "test-topic-1"); @@ -624,7 +626,7 @@ public void testOffsetSyncsTopicsOnTarget() throws Exception { String remoteTopic = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); // Check offsets are pushed to the checkpoint topic - try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Map.of( + try (Consumer backupConsumer = backup.kafka().createConsumerAndSubscribeTo(Collections.singletonMap( "auto.offset.reset", "earliest"), PRIMARY_CLUSTER_ALIAS + ".checkpoints.internal")) { waitForCondition(() -> { ConsumerRecords records = backupConsumer.poll(Duration.ofSeconds(1L)); @@ -653,7 +655,7 @@ public void testOffsetSyncsTopicsOnTarget() throws Exception { @Test public void testNoCheckpointsIfNoRecordsAreMirrored() throws InterruptedException { String consumerGroupName = "consumer-group-no-checkpoints"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); // ensure there are some records in the topic on the source cluster produceMessages(primaryProducer, "test-topic-1"); @@ -674,7 +676,7 @@ public void testNoCheckpointsIfNoRecordsAreMirrored() throws InterruptedExceptio TopicPartition tp1 = new TopicPartition("test-topic-1", 0); TopicPartition tp2 = new TopicPartition("test-topic-no-checkpoints", 0); try (Consumer consumer = primary.kafka().createConsumer(consumerProps)) { - Collection tps = List.of(tp1, tp2); + Collection tps = Arrays.asList(tp1, tp2); Map endOffsets = consumer.endOffsets(tps); Map offsetsToCommit = endOffsets.entrySet().stream() .collect(Collectors.toMap( @@ -697,7 +699,7 @@ public void testNoCheckpointsIfNoRecordsAreMirrored() throws InterruptedExceptio produceMessages(primaryProducer, "test-topic-no-checkpoints"); try (Consumer consumer = primary.kafka().createConsumer(consumerProps)) { - Collection tps = List.of(tp1, tp2); + Collection tps = Arrays.asList(tp1, tp2); Map endOffsets = consumer.endOffsets(tps); Map offsetsToCommit = endOffsets.entrySet().stream() .collect(Collectors.toMap( @@ -720,7 +722,7 @@ public void testNoCheckpointsIfNoRecordsAreMirrored() throws InterruptedExceptio @Test public void testRestartReplication() throws InterruptedException { String consumerGroupName = "consumer-group-restart"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); String remoteTopic = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); warmUpConsumer(consumerProps); mm2Props.put("sync.group.offsets.enabled", "true"); @@ -732,7 +734,7 @@ public void testRestartReplication() throws InterruptedException { try (Consumer primaryConsumer = primary.kafka().createConsumerAndSubscribeTo(consumerProps, "test-topic-1")) { waitForConsumingAllRecords(primaryConsumer, NUM_RECORDS_PRODUCED); } - waitForConsumerGroupFullSync(backup, List.of(remoteTopic), consumerGroupName, NUM_RECORDS_PRODUCED, OFFSET_LAG_MAX); + waitForConsumerGroupFullSync(backup, Collections.singletonList(remoteTopic), consumerGroupName, NUM_RECORDS_PRODUCED, OFFSET_LAG_MAX); restartMirrorMakerConnectors(backup, CONNECTOR_LIST); assertMonotonicCheckpoints(backup, "primary.checkpoints.internal"); Thread.sleep(5000); @@ -740,14 +742,14 @@ public void testRestartReplication() throws InterruptedException { try (Consumer primaryConsumer = primary.kafka().createConsumerAndSubscribeTo(consumerProps, "test-topic-1")) { waitForConsumingAllRecords(primaryConsumer, NUM_RECORDS_PRODUCED); } - waitForConsumerGroupFullSync(backup, List.of(remoteTopic), consumerGroupName, 2 * NUM_RECORDS_PRODUCED, OFFSET_LAG_MAX); + waitForConsumerGroupFullSync(backup, Collections.singletonList(remoteTopic), consumerGroupName, 2 * NUM_RECORDS_PRODUCED, OFFSET_LAG_MAX); assertMonotonicCheckpoints(backup, "primary.checkpoints.internal"); } @Test public void testOffsetTranslationBehindReplicationFlow() throws InterruptedException { String consumerGroupName = "consumer-group-lagging-behind"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); String remoteTopic = remoteTopicName("test-topic-1", PRIMARY_CLUSTER_ALIAS); warmUpConsumer(consumerProps); mm2Props.put("sync.group.offsets.enabled", "true"); @@ -837,7 +839,7 @@ public void testSyncTopicConfigs() throws InterruptedException { Collection ops = new ArrayList<>(); ops.add(new AlterConfigOp(new ConfigEntry("delete.retention.ms", "2000"), AlterConfigOp.OpType.SET)); ops.add(new AlterConfigOp(new ConfigEntry("retention.bytes", "2000"), AlterConfigOp.OpType.SET)); - Map> configOps = Map.of(configResource, ops); + Map> configOps = Collections.singletonMap(configResource, ops); // alter configs on target cluster backup.kafka().incrementalAlterConfigs(configOps); @@ -877,7 +879,7 @@ public void testReplicateSourceDefault() throws Exception { Collection ops = new ArrayList<>(); ops.add(new AlterConfigOp(new ConfigEntry("delete.retention.ms", "2000"), AlterConfigOp.OpType.SET)); ops.add(new AlterConfigOp(new ConfigEntry("retention.bytes", "2000"), AlterConfigOp.OpType.SET)); - Map> configOps = Map.of(configResource, ops); + Map> configOps = Collections.singletonMap(configResource, ops); backup.kafka().incrementalAlterConfigs(configOps); waitForCondition(() -> { @@ -931,7 +933,7 @@ public void testReplicateTargetDefault() throws Exception { ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic); Collection ops = new ArrayList<>(); ops.add(new AlterConfigOp(new ConfigEntry("retention.bytes", "1000"), AlterConfigOp.OpType.DELETE)); - Map> configOps = Map.of(configResource, ops); + Map> configOps = Collections.singletonMap(configResource, ops); primary.kafka().incrementalAlterConfigs(configOps); waitForCondition(() -> { @@ -1099,7 +1101,7 @@ protected static void stopMirrorMakerConnectors(EmbeddedConnectCluster connectCl } protected static void alterMirrorMakerSourceConnectorOffsets(EmbeddedConnectCluster connectCluster, LongUnaryOperator alterOffset, String... topics) { - Set topicsSet = Set.of(topics); + Set topicsSet = new HashSet<>(Arrays.asList(topics)); String connectorName = MirrorSourceConnector.class.getSimpleName(); ConnectorOffsets currentOffsets = connectCluster.connectorOffsets(connectorName); @@ -1129,7 +1131,7 @@ protected static void alterMirrorMakerSourceConnectorOffsets(EmbeddedConnectClus } protected static void resetSomeMirrorMakerSourceConnectorOffsets(EmbeddedConnectCluster connectCluster, String... topics) { - Set topicsSet = Set.of(topics); + Set topicsSet = new HashSet<>(Arrays.asList(topics)); String connectorName = MirrorSourceConnector.class.getSimpleName(); ConnectorOffsets currentOffsets = connectCluster.connectorOffsets(connectorName); @@ -1153,7 +1155,7 @@ protected static void resetAllMirrorMakerConnectorOffsets(EmbeddedConnectCluster String connectorName = connectorClass.getSimpleName(); connectCluster.resetConnectorOffsets(connectorName); assertEquals( - List.of(), + Collections.emptyList(), connectCluster.connectorOffsets(connectorName).offsets(), "Offsets for connector should be completely empty after full reset" ); @@ -1179,7 +1181,7 @@ protected static void waitForTopicCreated(EmbeddedConnectCluster cluster, String */ protected static String getTopicConfig(EmbeddedKafkaCluster cluster, String topic, String configName) throws Exception { try (Admin client = cluster.createAdminClient()) { - Collection cr = Set.of( + Collection cr = Collections.singleton( new ConfigResource(ConfigResource.Type.TOPIC, topic)); DescribeConfigsResult configsResult = client.describeConfigs(cr); @@ -1198,7 +1200,7 @@ protected void produceMessages(Producer producer, String topicNa protected Producer initializeProducer(EmbeddedConnectCluster cluster) { - return cluster.kafka().createProducer(Map.of()); + return cluster.kafka().createProducer(Collections.emptyMap()); } /** @@ -1222,7 +1224,7 @@ protected void produceMessages(Producer producer, List waitForCheckpointOnAllPartitions( MirrorClient client, String consumerGroupName, String remoteClusterAlias, String topicName ) throws InterruptedException { - return waitForNewCheckpointOnAllPartitions(client, consumerGroupName, remoteClusterAlias, topicName, Map.of()); + return waitForNewCheckpointOnAllPartitions(client, consumerGroupName, remoteClusterAlias, topicName, Collections.emptyMap()); } protected static Map waitForNewCheckpointOnAllPartitions( @@ -1316,7 +1318,7 @@ private static void waitForConsumerGroupFullSync( private static void assertMonotonicCheckpoints(EmbeddedConnectCluster cluster, String checkpointTopic) { TopicPartition checkpointTopicPartition = new TopicPartition(checkpointTopic, 0); - try (Consumer backupConsumer = cluster.kafka().createConsumerAndSubscribeTo(Map.of( + try (Consumer backupConsumer = cluster.kafka().createConsumerAndSubscribeTo(Collections.singletonMap( "auto.offset.reset", "earliest"), checkpointTopic)) { Map> checkpointsByGroup = new HashMap<>(); long deadline = System.currentTimeMillis() + CHECKPOINT_DURATION_MS; @@ -1388,11 +1390,11 @@ private static Map basicMM2Config() { private void createTopics() { // to verify topic config will be sync-ed across clusters - Map topicConfig = Map.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); - Map emptyMap = Map.of(); + Map topicConfig = Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); + Map emptyMap = Collections.emptyMap(); // increase admin client request timeout value to make the tests reliable. - Map adminClientConfig = Map.of( + Map adminClientConfig = Collections.singletonMap( AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, REQUEST_TIMEOUT_DURATION_MS); // create these topics before starting the connectors, so we don't need to wait for discovery @@ -1463,7 +1465,7 @@ public void onPartitionsAssigned(Collection partitions) { topicPartitionsPendingPosition.removeAll(topicPartitionsWithPosition); } assertEquals( - Set.of(), + Collections.emptySet(), topicPartitionsPendingPosition, "Failed to calculate consumer position for one or more partitions on cluster " + clusterName + " in time" ); @@ -1492,7 +1494,7 @@ private static void topicShouldNotBeCreated(EmbeddedConnectCluster cluster, Stri */ protected static void waitForTopicPartitionCreated(EmbeddedConnectCluster cluster, String topicName, int totalNumPartitions) throws InterruptedException { try (final Admin adminClient = cluster.kafka().createAdminClient()) { - waitForCondition(() -> adminClient.describeTopics(Set.of(topicName)).allTopicNames().get() + waitForCondition(() -> adminClient.describeTopics(Collections.singleton(topicName)).allTopicNames().get() .get(topicName).partitions().size() == totalNumPartitions, TOPIC_SYNC_DURATION_MS, "Topic: " + topicName + "'s partitions didn't get created on cluster: " + cluster.getName() ); diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java index 5578c2b28774c..e02cc4c02b332 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsIntegrationExactlyOnceTest.java @@ -24,7 +24,7 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; -import java.util.List; +import java.util.Arrays; import java.util.Properties; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -45,7 +45,7 @@ public void startClusters() throws Exception { BACKUP_CLUSTER_ALIAS + "." + DistributedConfig.EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, DistributedConfig.ExactlyOnceSourceSupport.ENABLED.toString() ); - for (Properties brokerProps : List.of(primaryBrokerProps, backupBrokerProps)) { + for (Properties brokerProps : Arrays.asList(primaryBrokerProps, backupBrokerProps)) { brokerProps.put("transaction.state.log.replication.factor", "1"); brokerProps.put("transaction.state.log.min.isr", "1"); } @@ -81,7 +81,7 @@ public void testReplication() throws Exception { assertEquals(expectedRecordsTopic2, backup.kafka().consume(expectedRecordsTopic2, RECORD_TRANSFER_DURATION_MS, backupTopic2).count(), "New topic was not re-replicated to backup cluster after altering offsets."); - @SuppressWarnings("unchecked") + @SuppressWarnings({"unchecked", "rawtypes"}) Class[] connectorsToReset = CONNECTOR_LIST.toArray(new Class[0]); stopMirrorMakerConnectors(backup, connectorsToReset); // Resetting the offsets for the heartbeat and checkpoint connectors doesn't have any effect diff --git a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java index 814a03d278b60..853cd02f13401 100644 --- a/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java +++ b/connect/mirror/src/test/java/org/apache/kafka/connect/mirror/integration/MirrorConnectorsWithCustomForwardingAdminIntegrationTest.java @@ -43,7 +43,9 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -151,7 +153,7 @@ public void startClusters() throws Exception { additionalBackupClusterClientsConfigs.putAll(superUserConfig()); backupWorkerProps.putAll(superUserConfig()); - Map additionalConfig = new HashMap<>(superUserConfig()) {{ + HashMap additionalConfig = new HashMap(superUserConfig()) {{ put(FORWARDING_ADMIN_CLASS, FakeForwardingAdminWithLocalMetadata.class.getName()); }}; @@ -170,7 +172,7 @@ public void startClusters() throws Exception { startClusters(additionalConfig); try (Admin adminClient = primary.kafka().createAdminClient()) { - adminClient.createAcls(List.of( + adminClient.createAcls(Collections.singletonList( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "*", PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -178,7 +180,7 @@ public void startClusters() throws Exception { )).all().get(); } try (Admin adminClient = backup.kafka().createAdminClient()) { - adminClient.createAcls(List.of( + adminClient.createAcls(Collections.singletonList( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "*", PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -200,7 +202,7 @@ public void testReplicationIsCreatingTopicsUsingProvidedForwardingAdmin() throws produceMessages(primaryProducer, "test-topic-1"); produceMessages(backupProducer, "test-topic-1"); String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); // warm up consumers before starting the connectors so we don't need to wait for discovery warmUpConsumer(consumerProps); @@ -237,7 +239,7 @@ public void testCreatePartitionsUseProvidedForwardingAdmin() throws Exception { produceMessages(backupProducer, "test-topic-1"); produceMessages(primaryProducer, "test-topic-1"); String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); // warm up consumers before starting the connectors so we don't need to wait for discovery warmUpConsumer(consumerProps); @@ -253,7 +255,7 @@ public void testCreatePartitionsUseProvidedForwardingAdmin() throws Exception { waitForTopicToPersistInFakeLocalMetadataStore("primary.test-topic-1"); // increase number of partitions - Map newPartitions = Map.of("test-topic-1", NewPartitions.increaseTo(NUM_PARTITIONS + 1)); + Map newPartitions = Collections.singletonMap("test-topic-1", NewPartitions.increaseTo(NUM_PARTITIONS + 1)); try (Admin adminClient = primary.kafka().createAdminClient()) { adminClient.createPartitions(newPartitions).all().get(); } @@ -272,7 +274,7 @@ public void testSyncTopicConfigUseProvidedForwardingAdmin() throws Exception { produceMessages(backupProducer, "test-topic-1"); produceMessages(primaryProducer, "test-topic-1"); String consumerGroupName = "consumer-group-testReplication"; - Map consumerProps = Map.of("group.id", consumerGroupName); + Map consumerProps = Collections.singletonMap("group.id", consumerGroupName); // warm up consumers before starting the connectors so we don't need to wait for discovery warmUpConsumer(consumerProps); @@ -300,7 +302,7 @@ public void testSyncTopicACLsUseProvidedForwardingAdmin() throws Exception { mm2Props.put("sync.topic.acls.enabled", "true"); mm2Props.put("sync.topic.acls.interval.seconds", "1"); mm2Config = new MirrorMakerConfig(mm2Props); - List aclBindings = List.of( + List aclBindings = Collections.singletonList( new AclBinding( new ResourcePattern(ResourceType.TOPIC, "test-topic-1", PatternType.LITERAL), new AccessControlEntry("User:dummy", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW) @@ -342,7 +344,7 @@ public void testSyncTopicACLsUseProvidedForwardingAdmin() throws Exception { ); // expect to use FakeForwardingAdminWithLocalMetadata to update topic ACLs in FakeLocalMetadataStore.allAcls - assertTrue(FakeLocalMetadataStore.aclBindings("dummy").containsAll(List.of(expectedACLOnBackupCluster, expectedACLOnPrimaryCluster))); + assertTrue(FakeLocalMetadataStore.aclBindings("dummy").containsAll(Arrays.asList(expectedACLOnBackupCluster, expectedACLOnPrimaryCluster))); } void waitForTopicToPersistInFakeLocalMetadataStore(String topicName) throws InterruptedException { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/AbstractConnectCli.java b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/AbstractConnectCli.java index 8c0d30b1c992e..5cfa300bafc2b 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/AbstractConnectCli.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/AbstractConnectCli.java @@ -34,7 +34,7 @@ import java.net.URI; import java.util.Arrays; -import java.util.List; +import java.util.Collections; import java.util.Map; /** @@ -82,7 +82,7 @@ protected abstract H createHerder(T config, String workerId, Plugins plugins, * Validate {@link #args}, process worker properties from the first CLI argument, and start {@link Connect} */ public void run() { - if (args.length < 1 || List.of(args).contains("--help")) { + if (args.length < 1 || Arrays.asList(args).contains("--help")) { log.info("Usage: {}", usage()); Exit.exit(1); } @@ -90,7 +90,7 @@ public void run() { try { String workerPropsFile = args[0]; Map workerProps = !workerPropsFile.isEmpty() ? - Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Map.of(); + Utils.propsToStringMap(Utils.loadProps(workerPropsFile)) : Collections.emptyMap(); String[] extraArgs = Arrays.copyOfRange(args, 1, args.length); Connect connect = startConnect(workerProps); processExtraArgs(connect, extraArgs); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectDistributed.java b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectDistributed.java index 59b943ae91913..8763dd908a179 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectDistributed.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectDistributed.java @@ -36,8 +36,8 @@ import org.apache.kafka.connect.util.ConnectUtils; import org.apache.kafka.connect.util.SharedTopicAdmin; +import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.apache.kafka.clients.CommonClientConfigs.CLIENT_ID_CONFIG; @@ -77,7 +77,7 @@ protected DistributedHerder createHerder(DistributedConfig config, String worker KafkaOffsetBackingStore offsetBackingStore = new KafkaOffsetBackingStore(sharedAdmin, () -> clientIdBase, plugins.newInternalConverter(true, JsonConverter.class.getName(), - Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); + Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); offsetBackingStore.configure(config); Worker worker = new Worker(workerId, Time.SYSTEM, plugins, config, offsetBackingStore, connectorClientConfigOverridePolicy); @@ -99,7 +99,7 @@ protected DistributedHerder createHerder(DistributedConfig config, String worker return new DistributedHerder(config, Time.SYSTEM, worker, kafkaClusterId, statusBackingStore, configBackingStore, restServer.advertisedUrl().toString(), restClient, connectorClientConfigOverridePolicy, - List.of(), sharedAdmin); + Collections.emptyList(), sharedAdmin); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java index ded4103c69c50..43af6b274b6ac 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/cli/ConnectStandalone.java @@ -49,6 +49,7 @@ import java.io.File; import java.io.IOException; import java.nio.file.Paths; +import java.util.Collections; import java.util.Map; import static org.apache.kafka.connect.runtime.ConnectorConfig.NAME_CONFIG; @@ -162,7 +163,7 @@ protected StandaloneHerder createHerder(StandaloneConfig config, String workerId RestServer restServer, RestClient restClient) { OffsetBackingStore offsetBackingStore = new FileOffsetBackingStore(plugins.newInternalConverter( - true, JsonConverter.class.getName(), Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); + true, JsonConverter.class.getName(), Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"))); offsetBackingStore.configure(config); Worker worker = new Worker(workerId, Time.SYSTEM, plugins, config, offsetBackingStore, diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java index 2e1e1d7318ed4..046dcc63e5a20 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractHerder.java @@ -26,7 +26,6 @@ import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.config.ConfigTransformer; import org.apache.kafka.common.config.ConfigValue; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.connector.Connector; @@ -35,9 +34,7 @@ import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.NotFoundException; import org.apache.kafka.connect.runtime.isolation.LoaderSwap; -import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.isolation.Plugins; -import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; import org.apache.kafka.connect.runtime.rest.entities.ActiveTopicsInfo; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfo; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos; @@ -66,13 +63,13 @@ import org.apache.kafka.connect.util.Stage; import org.apache.kafka.connect.util.TemporaryStage; -import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; -import org.apache.maven.artifact.versioning.VersionRange; +import org.apache.logging.log4j.Level; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.ByteArrayOutputStream; import java.io.PrintStream; +import java.io.UnsupportedEncodingException; import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collection; @@ -88,6 +85,8 @@ import java.util.Objects; import java.util.Optional; import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.function.Function; @@ -95,15 +94,9 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; -import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; -import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_VERSION; import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; -import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_VERSION_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; -import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG; -import static org.apache.kafka.connect.runtime.ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG; - /** * Abstract Herder implementation which handles connector/task lifecycle tracking. Extensions @@ -140,12 +133,12 @@ public abstract class AbstractHerder implements Herder, TaskStatus.Listener, Con protected final StatusBackingStore statusBackingStore; protected final ConfigBackingStore configBackingStore; private volatile boolean ready = false; - private final Plugin connectorClientConfigOverridePolicyPlugin; + private final ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy; private final ExecutorService connectorExecutor; private final Time time; protected final Loggers loggers; - private final CachedConnectors cachedConnectors; + private final ConcurrentMap tempConnectors = new ConcurrentHashMap<>(); public AbstractHerder(Worker worker, String workerId, @@ -160,14 +153,10 @@ public AbstractHerder(Worker worker, this.kafkaClusterId = kafkaClusterId; this.statusBackingStore = statusBackingStore; this.configBackingStore = configBackingStore; - this.connectorClientConfigOverridePolicyPlugin = Plugin.wrapInstance( - connectorClientConfigOverridePolicy, - worker.metrics().metrics(), - WorkerConfig.CONNECTOR_CLIENT_POLICY_CLASS_CONFIG); + this.connectorClientConfigOverridePolicy = connectorClientConfigOverridePolicy; this.connectorExecutor = Executors.newCachedThreadPool(); this.time = time; - this.loggers = Loggers.newInstance(time); - this.cachedConnectors = new CachedConnectors(worker.getPlugins()); + this.loggers = new Loggers(time); } @Override @@ -188,7 +177,7 @@ protected void stopServices() { this.configBackingStore.stop(); this.worker.stop(); this.connectorExecutor.shutdown(); - Utils.closeQuietly(this.connectorClientConfigOverridePolicyPlugin, "connector client config override policy"); + Utils.closeQuietly(this.connectorClientConfigOverridePolicy, "connector client config override policy"); } protected void ready() { @@ -203,91 +192,83 @@ public boolean isReady() { @Override public void onStartup(String connector) { statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.RUNNING, - workerId, generation(), worker.connectorVersion(connector))); + workerId, generation())); } @Override public void onStop(String connector) { statusBackingStore.put(new ConnectorStatus(connector, AbstractStatus.State.STOPPED, - workerId, generation(), worker.connectorVersion(connector))); + workerId, generation())); } @Override public void onPause(String connector) { statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.PAUSED, - workerId, generation(), worker.connectorVersion(connector))); + workerId, generation())); } @Override public void onResume(String connector) { statusBackingStore.put(new ConnectorStatus(connector, TaskStatus.State.RUNNING, - workerId, generation(), worker.connectorVersion(connector))); + workerId, generation())); } @Override public void onShutdown(String connector) { statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.UNASSIGNED, - workerId, generation(), worker.connectorVersion(connector))); + workerId, generation())); } @Override public void onFailure(String connector, Throwable cause) { statusBackingStore.putSafe(new ConnectorStatus(connector, ConnectorStatus.State.FAILED, - trace(cause), workerId, generation(), worker.connectorVersion(connector))); + trace(cause), workerId, generation())); } @Override public void onStartup(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation(), null, - worker.taskVersion(id))); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation())); } @Override public void onFailure(ConnectorTaskId id, Throwable cause) { - statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.FAILED, workerId, generation(), trace(cause), - worker.taskVersion(id))); + statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.FAILED, workerId, generation(), trace(cause))); } @Override public void onShutdown(ConnectorTaskId id) { - statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.UNASSIGNED, workerId, generation(), null, - worker.taskVersion(id))); + statusBackingStore.putSafe(new TaskStatus(id, TaskStatus.State.UNASSIGNED, workerId, generation())); } @Override public void onResume(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation(), null, - worker.taskVersion(id))); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RUNNING, workerId, generation())); } @Override public void onPause(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.PAUSED, workerId, generation(), null, - worker.taskVersion(id))); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.PAUSED, workerId, generation())); } @Override public void onDeletion(String connector) { for (TaskStatus status : statusBackingStore.getAll(connector)) onDeletion(status.id()); - statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.DESTROYED, workerId, generation(), - worker.connectorVersion(connector))); + statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.DESTROYED, workerId, generation())); } @Override public void onDeletion(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.DESTROYED, workerId, generation(), null, - worker.taskVersion(id))); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.DESTROYED, workerId, generation())); } public void onRestart(String connector) { statusBackingStore.put(new ConnectorStatus(connector, ConnectorStatus.State.RESTARTING, - workerId, generation(), worker.connectorVersion(connector))); + workerId, generation())); } public void onRestart(ConnectorTaskId id) { - statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RESTARTING, workerId, generation(), null, - worker.taskVersion(id))); + statusBackingStore.put(new TaskStatus(id, TaskStatus.State.RESTARTING, workerId, generation())); } @Override @@ -355,12 +336,12 @@ public ConnectorStateInfo connectorStatus(String connName) { Collection tasks = statusBackingStore.getAll(connName); ConnectorStateInfo.ConnectorState connectorState = new ConnectorStateInfo.ConnectorState( - connector.state().toString(), connector.workerId(), connector.trace(), connector.version()); + connector.state().toString(), connector.workerId(), connector.trace()); List taskStates = new ArrayList<>(); for (TaskStatus status : tasks) { taskStates.add(new ConnectorStateInfo.TaskState(status.id().task(), - status.state().toString(), status.workerId(), status.trace(), status.version())); + status.state().toString(), status.workerId(), status.trace())); } Collections.sort(taskStates); @@ -396,12 +377,7 @@ public ConnectorStateInfo.TaskState taskStatus(ConnectorTaskId id) { throw new NotFoundException("No status found for task " + id); return new ConnectorStateInfo.TaskState(id.task(), status.state().toString(), - status.workerId(), status.trace(), status.version()); - } - - @Override - public ConnectMetrics connectMetrics() { - return worker.metrics(); + status.workerId(), status.trace()); } protected Map validateSinkConnectorConfig(SinkConnector connector, ConfigDef configDef, Map config) { @@ -422,8 +398,6 @@ protected Map validateSourceConnectorConfig(SourceConnector * may be null, in which case no validation will be performed under the assumption that the * connector will use inherit the converter settings from the worker. Some errors encountered * during validation may be {@link ConfigValue#addErrorMessage(String) added} to this object - * @param pluginVersionValue the {@link ConfigValue} for the converter version property in the connector config; - * * @param pluginInterface the interface for the plugin type * (e.g., {@code org.apache.kafka.connect.storage.Converter.class}); * may not be null @@ -444,18 +418,14 @@ protected Map validateSourceConnectorConfig(SourceConnector * @param the plugin class to perform validation for */ - @SuppressWarnings("unchecked") private ConfigInfos validateConverterConfig( Map connectorConfig, ConfigValue pluginConfigValue, - ConfigValue pluginVersionValue, Class pluginInterface, Function configDefAccessor, String pluginName, String pluginProperty, - String pluginVersionProperty, Map defaultProperties, - ClassLoader connectorLoader, Function reportStage ) { Objects.requireNonNull(connectorConfig); @@ -463,15 +433,12 @@ private ConfigInfos validateConverterConfig( Objects.requireNonNull(configDefAccessor); Objects.requireNonNull(pluginName); Objects.requireNonNull(pluginProperty); - Objects.requireNonNull(pluginVersionProperty); String pluginClass = connectorConfig.get(pluginProperty); - String pluginVersion = connectorConfig.get(pluginVersionProperty); if (pluginClass == null || pluginConfigValue == null || !pluginConfigValue.errorMessages().isEmpty() - || !pluginVersionValue.errorMessages().isEmpty() ) { // Either no custom converter was specified, or one was specified but there's a problem with it. // No need to proceed any further. @@ -481,22 +448,11 @@ private ConfigInfos validateConverterConfig( T pluginInstance; String stageDescription = "instantiating the connector's " + pluginName + " for validation"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { - VersionRange range = PluginUtils.connectorVersionRequirement(pluginVersion); - pluginInstance = (T) plugins().newPlugin(pluginClass, range, connectorLoader); - } catch (VersionedPluginLoadingException e) { - log.error("Failed to load {} class {} with version {}", pluginName, pluginClass, pluginVersion, e); - pluginConfigValue.addErrorMessage(e.getMessage()); - pluginVersionValue.addErrorMessage(e.getMessage()); - return null; + pluginInstance = Utils.newInstance(pluginClass, pluginInterface); } catch (ClassNotFoundException | RuntimeException e) { log.error("Failed to instantiate {} class {}; this should have been caught by prior validation logic", pluginName, pluginClass, e); pluginConfigValue.addErrorMessage("Failed to load class " + pluginClass + (e.getMessage() != null ? ": " + e.getMessage() : "")); return null; - } catch (InvalidVersionSpecificationException e) { - // this should have been caught by prior validation logic - log.error("Invalid version range for {} class {} with version {}", pluginName, pluginClass, pluginVersion, e); - pluginVersionValue.addErrorMessage(e.getMessage()); - return null; } try { @@ -538,55 +494,55 @@ private ConfigInfos validateConverterConfig( } } - private ConfigInfos validateAllConverterConfigs( - Map connectorProps, - Map validatedConnectorConfig, - ClassLoader connectorLoader, + private ConfigInfos validateHeaderConverterConfig( + Map connectorConfig, + ConfigValue headerConverterConfigValue, Function reportStage ) { - String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); - // do custom converter-specific validation - ConfigInfos headerConverterConfigInfos = validateConverterConfig( - connectorProps, - validatedConnectorConfig.get(HEADER_CONVERTER_CLASS_CONFIG), - validatedConnectorConfig.get(HEADER_CONVERTER_VERSION_CONFIG), + return validateConverterConfig( + connectorConfig, + headerConverterConfigValue, HeaderConverter.class, HeaderConverter::config, "header converter", HEADER_CONVERTER_CLASS_CONFIG, - HEADER_CONVERTER_VERSION_CONFIG, - Map.of(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName()), - connectorLoader, + Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName()), reportStage ); - ConfigInfos keyConverterConfigInfos = validateConverterConfig( - connectorProps, - validatedConnectorConfig.get(KEY_CONVERTER_CLASS_CONFIG), - validatedConnectorConfig.get(KEY_CONVERTER_VERSION_CONFIG), + } + + private ConfigInfos validateKeyConverterConfig( + Map connectorConfig, + ConfigValue keyConverterConfigValue, + Function reportStage + ) { + return validateConverterConfig( + connectorConfig, + keyConverterConfigValue, Converter.class, Converter::config, "key converter", KEY_CONVERTER_CLASS_CONFIG, - KEY_CONVERTER_VERSION_CONFIG, - Map.of(ConverterConfig.TYPE_CONFIG, ConverterType.KEY.getName()), - connectorLoader, + Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.KEY.getName()), reportStage ); + } - ConfigInfos valueConverterConfigInfos = validateConverterConfig( - connectorProps, - validatedConnectorConfig.get(VALUE_CONVERTER_CLASS_CONFIG), - validatedConnectorConfig.get(VALUE_CONVERTER_VERSION_CONFIG), + private ConfigInfos validateValueConverterConfig( + Map connectorConfig, + ConfigValue valueConverterConfigValue, + Function reportStage + ) { + return validateConverterConfig( + connectorConfig, + valueConverterConfigValue, Converter.class, Converter::config, "value converter", VALUE_CONVERTER_CLASS_CONFIG, - VALUE_CONVERTER_VERSION_CONFIG, - Map.of(ConverterConfig.TYPE_CONFIG, ConverterType.VALUE.getName()), - connectorLoader, + Collections.singletonMap(ConverterConfig.TYPE_CONFIG, ConverterType.VALUE.getName()), reportStage ); - return mergeConfigInfos(connType, headerConverterConfigInfos, keyConverterConfigInfos, valueConverterConfigInfos); } @Override @@ -634,8 +590,7 @@ public Optional buildRestartPlan(RestartRequest request) { ConnectorStateInfo.ConnectorState connectorInfoState = new ConnectorStateInfo.ConnectorState( connectorState.toString(), connectorStatus.workerId(), - connectorStatus.trace(), - connectorStatus.version() + connectorStatus.trace() ); // Collect the task states, If requested, mark the task as restarting @@ -647,8 +602,7 @@ public Optional buildRestartPlan(RestartRequest request) { taskStatus.id().task(), taskState.toString(), taskStatus.workerId(), - taskStatus.trace(), - taskStatus.version() + taskStatus.trace() ); }) .collect(Collectors.toList()); @@ -680,146 +634,6 @@ protected boolean connectorUsesProducer(org.apache.kafka.connect.health.Connecto || SinkConnectorConfig.hasDlqTopicConfig(connProps); } - private ConfigInfos validateClientOverrides( - Map connectorProps, - org.apache.kafka.connect.health.ConnectorType connectorType, - Class connectorClass, - Function reportStage, - boolean doLog - ) { - if (connectorClass == null || connectorType == null) { - return null; - } - AbstractConfig connectorConfig = new AbstractConfig(new ConfigDef(), connectorProps, doLog); - String connName = connectorProps.get(ConnectorConfig.NAME_CONFIG); - String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); - ConfigInfos producerConfigInfos = null; - ConfigInfos consumerConfigInfos = null; - ConfigInfos adminConfigInfos = null; - String stageDescription = null; - - if (connectorUsesProducer(connectorType, connectorProps)) { - stageDescription = "validating producer config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - producerConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX, - connectorConfig, - ProducerConfig.configDef(), - connectorClass, - connectorType, - ConnectorClientConfigRequest.ClientType.PRODUCER, - connectorClientConfigOverridePolicyPlugin); - } - } - if (connectorUsesAdmin(connectorType, connectorProps)) { - stageDescription = "validating admin config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - adminConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX, - connectorConfig, - AdminClientConfig.configDef(), - connectorClass, - connectorType, - ConnectorClientConfigRequest.ClientType.ADMIN, - connectorClientConfigOverridePolicyPlugin); - } - } - if (connectorUsesConsumer(connectorType, connectorProps)) { - stageDescription = "validating consumer config overrides for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - consumerConfigInfos = validateClientOverrides( - connName, - ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX, - connectorConfig, - ConsumerConfig.configDef(), - connectorClass, - connectorType, - ConnectorClientConfigRequest.ClientType.CONSUMER, - connectorClientConfigOverridePolicyPlugin); - } - } - return mergeConfigInfos(connType, - producerConfigInfos, - consumerConfigInfos, - adminConfigInfos - ); - } - - private ConfigInfos validateConnectorPluginSpecifiedConfigs( - Map connectorProps, - Map validatedConnectorConfig, - ConfigDef enrichedConfigDef, - Connector connector, - Function reportStage - ) { - List configValues = new ArrayList<>(validatedConnectorConfig.values()); - Map configKeys = new LinkedHashMap<>(enrichedConfigDef.configKeys()); - Set allGroups = new LinkedHashSet<>(enrichedConfigDef.groups()); - - String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); - // do custom connector-specific validation - ConfigDef configDef; - String stageDescription = "retrieving the configuration definition from the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - configDef = connector.config(); - } - if (null == configDef) { - throw new BadRequestException( - String.format( - "%s.config() must return a ConfigDef that is not null.", - connector.getClass().getName() - ) - ); - } - - Config config; - stageDescription = "performing multi-property validation for the connector"; - try (TemporaryStage stage = reportStage.apply(stageDescription)) { - config = connector.validate(connectorProps); - } - if (null == config) { - throw new BadRequestException( - String.format( - "%s.validate() must return a Config that is not null.", - connector.getClass().getName() - ) - ); - } - configKeys.putAll(configDef.configKeys()); - allGroups.addAll(configDef.groups()); - configValues.addAll(config.configValues()); - return generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups)); - } - - private void addNullValuedErrors(Map connectorProps, Map validatedConfig) { - connectorProps.entrySet().stream() - .filter(e -> e.getValue() == null) - .map(Map.Entry::getKey) - .forEach(prop -> - validatedConfig.computeIfAbsent(prop, ConfigValue::new) - .addErrorMessage("Null value can not be supplied as the configuration value.")); - } - - private ConfigInfos invalidVersionedConnectorValidation( - Map connectorProps, - VersionedPluginLoadingException e, - Function reportStage - ) { - String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); - ConfigDef configDef = ConnectorConfig.enrichedConfigDef(worker.getPlugins(), connType); - Map validatedConfig; - try (TemporaryStage stage = reportStage.apply("validating connector configuration")) { - validatedConfig = configDef.validateAll(connectorProps); - } - validatedConfig.get(CONNECTOR_CLASS_CONFIG).addErrorMessage(e.getMessage()); - validatedConfig.get(CONNECTOR_VERSION).addErrorMessage(e.getMessage()); - validatedConfig.get(CONNECTOR_VERSION).recommendedValues(e.availableVersions().stream().map(v -> (Object) v).collect(Collectors.toList())); - addNullValuedErrors(connectorProps, validatedConfig); - return generateResult(connType, configDef.configKeys(), new ArrayList<>(validatedConfig.values()), new ArrayList<>(configDef.groups())); - } - ConfigInfos validateConnectorConfig( Map connectorProps, Function reportStage, @@ -832,60 +646,150 @@ ConfigInfos validateConnectorConfig( connectorProps = worker.configTransformer().transform(connectorProps); } } - String connType = connectorProps.get(CONNECTOR_CLASS_CONFIG); - if (connType == null) { + String connType = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + if (connType == null) throw new BadRequestException("Connector config " + connectorProps + " contains no connector type"); - } - - VersionRange connVersion; - Connector connector; - ClassLoader connectorLoader; - try { - connVersion = PluginUtils.connectorVersionRequirement(connectorProps.get(CONNECTOR_VERSION)); - connector = cachedConnectors.getConnector(connType, connVersion); - connectorLoader = plugins().pluginLoader(connType, connVersion); - log.info("Validating connector {}, version {}", connType, connector.version()); - } catch (VersionedPluginLoadingException e) { - log.warn("Failed to load connector {} with version {}, skipping additional validations (connector, converters, transformations, client overrides) ", - connType, connectorProps.get(CONNECTOR_VERSION), e); - return invalidVersionedConnectorValidation(connectorProps, e, reportStage); - } catch (Exception e) { - throw new BadRequestException(e.getMessage(), e); - } + Connector connector = getConnector(connType); + ClassLoader connectorLoader = plugins().connectorLoader(connType); try (LoaderSwap loaderSwap = plugins().withClassLoader(connectorLoader)) { - + org.apache.kafka.connect.health.ConnectorType connectorType; ConfigDef enrichedConfigDef; Map validatedConnectorConfig; - org.apache.kafka.connect.health.ConnectorType connectorType; if (connector instanceof SourceConnector) { connectorType = org.apache.kafka.connect.health.ConnectorType.SOURCE; - enrichedConfigDef = ConnectorConfig.enrich(plugins(), SourceConnectorConfig.enrichedConfigDef(plugins(), connectorProps, worker.config()), connectorProps, false); + enrichedConfigDef = ConnectorConfig.enrich(plugins(), SourceConnectorConfig.configDef(), connectorProps, false); stageDescription = "validating source connector-specific properties for the connector"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { validatedConnectorConfig = validateSourceConnectorConfig((SourceConnector) connector, enrichedConfigDef, connectorProps); } } else { connectorType = org.apache.kafka.connect.health.ConnectorType.SINK; - enrichedConfigDef = ConnectorConfig.enrich(plugins(), SinkConnectorConfig.enrichedConfigDef(plugins(), connectorProps, worker.config()), connectorProps, false); + enrichedConfigDef = ConnectorConfig.enrich(plugins(), SinkConnectorConfig.configDef(), connectorProps, false); stageDescription = "validating sink connector-specific properties for the connector"; try (TemporaryStage stage = reportStage.apply(stageDescription)) { validatedConnectorConfig = validateSinkConnectorConfig((SinkConnector) connector, enrichedConfigDef, connectorProps); } } - addNullValuedErrors(connectorProps, validatedConnectorConfig); + connectorProps.entrySet().stream() + .filter(e -> e.getValue() == null) + .map(Map.Entry::getKey) + .forEach(prop -> + validatedConnectorConfig.computeIfAbsent(prop, ConfigValue::new) + .addErrorMessage("Null value can not be supplied as the configuration value.") + ); + + List configValues = new ArrayList<>(validatedConnectorConfig.values()); + Map configKeys = new LinkedHashMap<>(enrichedConfigDef.configKeys()); + Set allGroups = new LinkedHashSet<>(enrichedConfigDef.groups()); + + // do custom connector-specific validation + ConfigDef configDef; + stageDescription = "retrieving the configuration definition from the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + configDef = connector.config(); + } + if (null == configDef) { + throw new BadRequestException( + String.format( + "%s.config() must return a ConfigDef that is not null.", + connector.getClass().getName() + ) + ); + } + + Config config; + stageDescription = "performing multi-property validation for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + config = connector.validate(connectorProps); + } + if (null == config) { + throw new BadRequestException( + String.format( + "%s.validate() must return a Config that is not null.", + connector.getClass().getName() + ) + ); + } + configKeys.putAll(configDef.configKeys()); + allGroups.addAll(configDef.groups()); + configValues.addAll(config.configValues()); + + // do custom converter-specific validation + ConfigInfos headerConverterConfigInfos = validateHeaderConverterConfig( + connectorProps, + validatedConnectorConfig.get(HEADER_CONVERTER_CLASS_CONFIG), + reportStage + ); + ConfigInfos keyConverterConfigInfos = validateKeyConverterConfig( + connectorProps, + validatedConnectorConfig.get(KEY_CONVERTER_CLASS_CONFIG), + reportStage + ); + ConfigInfos valueConverterConfigInfos = validateValueConverterConfig( + connectorProps, + validatedConnectorConfig.get(VALUE_CONVERTER_CLASS_CONFIG), + reportStage + ); - // the order of operations here is important, converter validations can add error messages to the connector config - // which are collected and converted to ConfigInfos in validateConnectorPluginSpecifiedConfigs - ConfigInfos converterConfigInfo = validateAllConverterConfigs(connectorProps, validatedConnectorConfig, connectorLoader, reportStage); - ConfigInfos clientOverrideInfo = validateClientOverrides(connectorProps, connectorType, connector.getClass(), reportStage, doLog); - ConfigInfos connectorConfigInfo = validateConnectorPluginSpecifiedConfigs(connectorProps, validatedConnectorConfig, enrichedConfigDef, connector, reportStage); + ConfigInfos configInfos = generateResult(connType, configKeys, configValues, new ArrayList<>(allGroups)); + AbstractConfig connectorConfig = new AbstractConfig(new ConfigDef(), connectorProps, doLog); + String connName = connectorProps.get(ConnectorConfig.NAME_CONFIG); + ConfigInfos producerConfigInfos = null; + ConfigInfos consumerConfigInfos = null; + ConfigInfos adminConfigInfos = null; + if (connectorUsesProducer(connectorType, connectorProps)) { + stageDescription = "validating producer config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + producerConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX, + connectorConfig, + ProducerConfig.configDef(), + connector.getClass(), + connectorType, + ConnectorClientConfigRequest.ClientType.PRODUCER, + connectorClientConfigOverridePolicy); + } + } + if (connectorUsesAdmin(connectorType, connectorProps)) { + stageDescription = "validating admin config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + adminConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX, + connectorConfig, + AdminClientConfig.configDef(), + connector.getClass(), + connectorType, + ConnectorClientConfigRequest.ClientType.ADMIN, + connectorClientConfigOverridePolicy); + } + } + if (connectorUsesConsumer(connectorType, connectorProps)) { + stageDescription = "validating consumer config overrides for the connector"; + try (TemporaryStage stage = reportStage.apply(stageDescription)) { + consumerConfigInfos = validateClientOverrides( + connName, + ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX, + connectorConfig, + ConsumerConfig.configDef(), + connector.getClass(), + connectorType, + ConnectorClientConfigRequest.ClientType.CONSUMER, + connectorClientConfigOverridePolicy); + } + } return mergeConfigInfos(connType, - connectorConfigInfo, - clientOverrideInfo, - converterConfigInfo + configInfos, + producerConfigInfos, + consumerConfigInfos, + adminConfigInfos, + headerConverterConfigInfos, + keyConverterConfigInfos, + valueConverterConfigInfos ); } } @@ -897,7 +801,7 @@ private static ConfigInfos mergeConfigInfos(String connType, ConfigInfos... conf for (ConfigInfos configInfos : configInfosList) { if (configInfos != null) { errorCount += configInfos.errorCount(); - configInfoList.addAll(configInfos.configs()); + configInfoList.addAll(configInfos.values()); groups.addAll(configInfos.groups()); } } @@ -911,7 +815,7 @@ private static ConfigInfos validateClientOverrides(String connName, Class connectorClass, org.apache.kafka.connect.health.ConnectorType connectorType, ConnectorClientConfigRequest.ClientType clientType, - Plugin connectorClientConfigOverridePolicyPlugin) { + ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy) { Map clientConfigs = new HashMap<>(); for (Map.Entry rawClientConfig : connectorConfig.originalsWithPrefix(prefix).entrySet()) { String configName = rawClientConfig.getKey(); @@ -924,7 +828,7 @@ private static ConfigInfos validateClientOverrides(String connName, } ConnectorClientConfigRequest connectorClientConfigRequest = new ConnectorClientConfigRequest( connName, connectorType, connectorClass, clientConfigs, clientType); - List configValues = connectorClientConfigOverridePolicyPlugin.get().validate(connectorClientConfigRequest); + List configValues = connectorClientConfigOverridePolicy.validate(connectorClientConfigRequest); return prefixedConfigInfos(configDef.configKeys(), configValues, prefix); } @@ -1032,6 +936,10 @@ private static ConfigValueInfo convertConfigValue(ConfigValue configValue, Type return new ConfigValueInfo(configValue.name(), value, recommendedValues, configValue.errorMessages(), configValue.visible()); } + protected Connector getConnector(String connType) { + return tempConnectors.computeIfAbsent(connType, k -> plugins().newConnector(k)); + } + /** * Retrieves ConnectorType for the class specified in the connector config * @param connConfig the connector config, may be null @@ -1042,14 +950,13 @@ public ConnectorType connectorType(Map connConfig) { if (connConfig == null) { return ConnectorType.UNKNOWN; } - String connClass = connConfig.get(CONNECTOR_CLASS_CONFIG); + String connClass = connConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); if (connClass == null) { return ConnectorType.UNKNOWN; } try { - VersionRange range = PluginUtils.connectorVersionRequirement(connConfig.get(CONNECTOR_VERSION)); - return ConnectorType.from(cachedConnectors.getConnector(connClass, range).getClass()); - } catch (Exception e) { + return ConnectorType.from(getConnector(connClass).getClass()); + } catch (ConnectException e) { log.warn("Unable to retrieve connector type", e); return ConnectorType.UNKNOWN; } @@ -1073,7 +980,7 @@ protected final boolean maybeAddConfigErrors( StringBuilder messages = new StringBuilder(); messages.append("Connector configuration is invalid and contains the following ") .append(errors).append(" error(s):"); - for (ConfigInfo configInfo : configInfos.configs()) { + for (ConfigInfo configInfo : configInfos.values()) { for (String msg : configInfo.configValue().errors()) { messages.append('\n').append(msg); } @@ -1091,8 +998,12 @@ protected final boolean maybeAddConfigErrors( private String trace(Throwable t) { ByteArrayOutputStream output = new ByteArrayOutputStream(); - t.printStackTrace(new PrintStream(output, false, StandardCharsets.UTF_8)); - return output.toString(StandardCharsets.UTF_8); + try { + t.printStackTrace(new PrintStream(output, false, StandardCharsets.UTF_8.name())); + return output.toString(StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + return null; + } } /* @@ -1167,33 +1078,25 @@ static Set keysWithVariableValues(Map rawConfig, Pattern @Override public List connectorPluginConfig(String pluginName) { - return connectorPluginConfig(pluginName, null); - } - - @Override - public List connectorPluginConfig(String pluginName, VersionRange range) { - Plugins p = plugins(); Class pluginClass; try { - pluginClass = p.pluginClass(pluginName, range); + pluginClass = p.pluginClass(pluginName); } catch (ClassNotFoundException cnfe) { throw new NotFoundException("Unknown plugin " + pluginName + "."); - } catch (VersionedPluginLoadingException e) { - throw new BadRequestException(e.getMessage(), e); } try (LoaderSwap loaderSwap = p.withClassLoader(pluginClass.getClassLoader())) { - Object plugin = p.newPlugin(pluginName, range); + Object plugin = p.newPlugin(pluginName); // Contains definitions coming from Connect framework ConfigDef baseConfigDefs = null; // Contains definitions specifically declared on the plugin ConfigDef pluginConfigDefs; if (plugin instanceof SinkConnector) { - baseConfigDefs = SinkConnectorConfig.enrichedConfigDef(p, pluginName); + baseConfigDefs = SinkConnectorConfig.configDef(); pluginConfigDefs = ((SinkConnector) plugin).config(); } else if (plugin instanceof SourceConnector) { - baseConfigDefs = SourceConnectorConfig.enrichedConfigDef(p, pluginName); + baseConfigDefs = SourceConnectorConfig.configDef(); pluginConfigDefs = ((SourceConnector) plugin).config(); } else if (plugin instanceof Converter) { pluginConfigDefs = ((Converter) plugin).config(); @@ -1211,9 +1114,8 @@ public List connectorPluginConfig(String pluginName, VersionRange // give precedence to the one defined by the plugin class // Preserve the ordering of properties as they're returned from each ConfigDef Map configsMap = new LinkedHashMap<>(pluginConfigDefs.configKeys()); - if (baseConfigDefs != null) { + if (baseConfigDefs != null) baseConfigDefs.configKeys().forEach(configsMap::putIfAbsent); - } List results = new ArrayList<>(); for (ConfigKey configKey : configsMap.values()) { @@ -1274,13 +1176,13 @@ public Map allLoggerLevels() { @Override public List setWorkerLoggerLevel(String namespace, String desiredLevelStr) { - String normalizedLevel = desiredLevelStr.toUpperCase(Locale.ROOT); + Level level = Level.toLevel(desiredLevelStr.toUpperCase(Locale.ROOT), null); - if (!loggers.isValidLevel(normalizedLevel)) { + if (level == null) { log.warn("Ignoring request to set invalid level '{}' for namespace {}", desiredLevelStr, namespace); - return List.of(); + return Collections.emptyList(); } - return loggers.setLevel(namespace, normalizedLevel); + return loggers.setLevel(namespace, level); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java index fc8bc7ca05061..76036d610d738 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractStatus.java @@ -34,29 +34,18 @@ public enum State { private final State state; private final String trace; private final String workerId; - private final String version; private final int generation; public AbstractStatus(T id, State state, String workerId, int generation, - String trace, - String version) { + String trace) { this.id = id; this.state = state; this.workerId = workerId; this.generation = generation; this.trace = trace; - this.version = version; - } - - public AbstractStatus(T id, - State state, - String workerId, - int generation, - String trace) { - this(id, state, workerId, generation, trace, null); } public T id() { @@ -79,17 +68,12 @@ public int generation() { return generation; } - public String version() { - return version; - } - @Override public String toString() { return "Status{" + "id=" + id + ", state=" + state + ", workerId='" + workerId + '\'' + - ", version='" + version + '\'' + ", generation=" + generation + '}'; } @@ -105,8 +89,7 @@ public boolean equals(Object o) { && Objects.equals(id, that.id) && state == that.state && Objects.equals(trace, that.trace) - && Objects.equals(workerId, that.workerId) - && Objects.equals(version, that.version); + && Objects.equals(workerId, that.workerId); } @Override @@ -115,7 +98,6 @@ public int hashCode() { result = 31 * result + (state != null ? state.hashCode() : 0); result = 31 * result + (trace != null ? trace.hashCode() : 0); result = 31 * result + (workerId != null ? workerId.hashCode() : 0); - result = 31 * result + (version != null ? version.hashCode() : 0); result = 31 * result + generation; return result; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java index 9a74d81770fa7..dfd1c0d06fdb1 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTask.java @@ -24,7 +24,6 @@ import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.CumulativeSum; @@ -43,12 +42,10 @@ import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; import org.apache.kafka.connect.runtime.errors.Stage; import org.apache.kafka.connect.runtime.errors.ToleranceType; -import org.apache.kafka.connect.runtime.isolation.LoaderSwap; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceTask; import org.apache.kafka.connect.source.SourceTaskContext; import org.apache.kafka.connect.storage.CloseableOffsetStorageReader; -import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.ConnectorOffsetBackingStore; import org.apache.kafka.connect.storage.Converter; import org.apache.kafka.connect.storage.HeaderConverter; @@ -71,7 +68,6 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Executor; import java.util.concurrent.TimeUnit; -import java.util.function.Function; import java.util.function.Supplier; import static org.apache.kafka.connect.runtime.WorkerConfig.TOPIC_TRACKING_ENABLE_CONFIG; @@ -188,14 +184,15 @@ protected abstract void producerSendFailed( protected final WorkerConfig workerConfig; + protected final WorkerSourceTaskContext sourceTaskContext; protected final ConnectorOffsetBackingStore offsetStore; protected final OffsetStorageWriter offsetWriter; protected final Producer producer; private final SourceTask task; - private final Plugin keyConverterPlugin; - private final Plugin valueConverterPlugin; - private final Plugin headerConverterPlugin; + private final Converter keyConverter; + private final Converter valueConverter; + private final HeaderConverter headerConverter; private final TopicAdmin admin; private final CloseableOffsetStorageReader offsetReader; private final SourceTaskMetricsGroup sourceTaskMetricsGroup; @@ -203,12 +200,10 @@ protected abstract void producerSendFailed( private final boolean topicTrackingEnabled; private final TopicCreation topicCreation; private final Executor closeExecutor; - private final String version; // Visible for testing List toSend; protected Map taskConfig; - protected WorkerSourceTaskContext sourceTaskContext; protected boolean started = false; private volatile boolean producerClosed = false; @@ -216,12 +211,11 @@ protected AbstractWorkerSourceTask(ConnectorTaskId id, SourceTask task, TaskStatus.Listener statusListener, TargetState initialState, - ClusterConfigState configState, - Plugin keyConverterPlugin, - Plugin valueConverterPlugin, - Plugin headerConverterPlugin, + Converter keyConverter, + Converter valueConverter, + HeaderConverter headerConverter, TransformationChain transformationChain, - WorkerTransactionContext workerTransactionContext, + WorkerSourceTaskContext sourceTaskContext, Producer producer, TopicAdmin admin, Map topicGroups, @@ -236,31 +230,28 @@ protected AbstractWorkerSourceTask(ConnectorTaskId id, RetryWithToleranceOperator retryWithToleranceOperator, StatusBackingStore statusBackingStore, Executor closeExecutor, - Supplier>> errorReportersSupplier, - TaskPluginsMetadata pluginsMetadata, - Function pluginLoaderSwapper) { + Supplier>> errorReportersSupplier) { super(id, statusListener, initialState, loader, connectMetrics, errorMetrics, retryWithToleranceOperator, transformationChain, errorReportersSupplier, - time, statusBackingStore, pluginsMetadata, pluginLoaderSwapper); + time, statusBackingStore); this.workerConfig = workerConfig; this.task = task; - this.keyConverterPlugin = keyConverterPlugin; - this.valueConverterPlugin = valueConverterPlugin; - this.headerConverterPlugin = headerConverterPlugin; + this.keyConverter = keyConverter; + this.valueConverter = valueConverter; + this.headerConverter = headerConverter; this.producer = producer; this.admin = admin; this.offsetReader = offsetReader; this.offsetWriter = offsetWriter; this.offsetStore = Objects.requireNonNull(offsetStore, "offset store cannot be null for source tasks"); this.closeExecutor = closeExecutor; - this.sourceTaskContext = new WorkerSourceTaskContext(offsetReader, id, configState, workerTransactionContext, pluginMetrics); + this.sourceTaskContext = sourceTaskContext; this.stopRequestedLatch = new CountDownLatch(1); this.sourceTaskMetricsGroup = new SourceTaskMetricsGroup(id, connectMetrics); this.topicTrackingEnabled = workerConfig.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG); this.topicCreation = TopicCreation.newTopicCreation(workerConfig, topicGroups); - this.version = task.version(); } @Override @@ -329,10 +320,7 @@ protected void close() { } Utils.closeQuietly(offsetReader, "offset reader"); Utils.closeQuietly(offsetStore::stop, "offset backing store"); - Utils.closeQuietly(headerConverterPlugin, "header converter"); - Utils.closeQuietly(keyConverterPlugin, "key converter"); - Utils.closeQuietly(valueConverterPlugin, "value converter"); - Utils.closeQuietly(pluginMetrics, "pluginMetrics"); + Utils.closeQuietly(headerConverter, "header converter"); } private void closeProducer(Duration duration) { @@ -394,11 +382,6 @@ public void execute() { finalOffsetCommit(false); } - @Override - public String taskVersion() { - return version; - } - /** * Try to send a batch of records. If a send fails and is retriable, this saves the remainder of the batch so it can * be retried after backing off. If a send fails and is not retriable, this will throw a ConnectException. @@ -500,19 +483,13 @@ protected ProducerRecord convertTransformedRecord(ProcessingCont return null; } - RecordHeaders headers = retryWithToleranceOperator.execute(context, () -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverterPlugin.get().getClass()); + RecordHeaders headers = retryWithToleranceOperator.execute(context, () -> convertHeaderFor(record), Stage.HEADER_CONVERTER, headerConverter.getClass()); - byte[] key = retryWithToleranceOperator.execute(context, () -> { - try (LoaderSwap swap = pluginLoaderSwapper.apply(keyConverterPlugin.get().getClass().getClassLoader())) { - return keyConverterPlugin.get().fromConnectData(record.topic(), headers, record.keySchema(), record.key()); - } - }, Stage.KEY_CONVERTER, keyConverterPlugin.get().getClass()); + byte[] key = retryWithToleranceOperator.execute(context, () -> keyConverter.fromConnectData(record.topic(), headers, record.keySchema(), record.key()), + Stage.KEY_CONVERTER, keyConverter.getClass()); - byte[] value = retryWithToleranceOperator.execute(context, () -> { - try (LoaderSwap swap = pluginLoaderSwapper.apply(valueConverterPlugin.get().getClass().getClassLoader())) { - return valueConverterPlugin.get().fromConnectData(record.topic(), headers, record.valueSchema(), record.value()); - } - }, Stage.VALUE_CONVERTER, valueConverterPlugin.get().getClass()); + byte[] value = retryWithToleranceOperator.execute(context, () -> valueConverter.fromConnectData(record.topic(), headers, record.valueSchema(), record.value()), + Stage.VALUE_CONVERTER, valueConverter.getClass()); if (context.failed()) { return null; @@ -568,11 +545,8 @@ protected RecordHeaders convertHeaderFor(SourceRecord record) { String topic = record.topic(); for (Header header : headers) { String key = header.key(); - try (LoaderSwap swap = pluginLoaderSwapper.apply(headerConverterPlugin.get().getClass().getClassLoader())) { - byte[] rawHeader = headerConverterPlugin.get().fromConnectHeader(topic, key, header.schema(), header.value()); - result.add(key, rawHeader); - } - + byte[] rawHeader = headerConverter.fromConnectHeader(topic, key, header.schema(), header.value()); + result.add(key, rawHeader); } } return result; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java index 834086490002f..430cad52b8f6f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetrics.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.MetricName; import org.apache.kafka.common.MetricNameTemplate; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Gauge; import org.apache.kafka.common.metrics.KafkaMetricsContext; import org.apache.kafka.common.metrics.MetricConfig; @@ -28,16 +27,9 @@ import org.apache.kafka.common.metrics.MetricsReporter; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.internals.MetricsUtils; -import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; import org.apache.kafka.common.utils.AppInfoParser; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.connect.connector.ConnectRecord; import org.apache.kafka.connect.runtime.distributed.DistributedConfig; -import org.apache.kafka.connect.storage.Converter; -import org.apache.kafka.connect.storage.HeaderConverter; -import org.apache.kafka.connect.transforms.Transformation; -import org.apache.kafka.connect.transforms.predicates.Predicate; -import org.apache.kafka.connect.util.ConnectorTaskId; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -53,7 +45,6 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; -import java.util.function.Supplier; /** * The Connect metrics with configurable {@link MetricsReporter}s. @@ -175,74 +166,6 @@ public void stop() { AppInfoParser.unregisterAppInfo(JMX_PREFIX, workerId, metrics); } - public PluginMetricsImpl connectorPluginMetrics(String connectorId) { - return new PluginMetricsImpl(metrics, connectorPluginTags(connectorId)); - } - - private static Map connectorPluginTags(String connectorId) { - Map tags = new LinkedHashMap<>(); - tags.put("connector", connectorId); - return tags; - } - - PluginMetricsImpl taskPluginMetrics(ConnectorTaskId connectorTaskId) { - return new PluginMetricsImpl(metrics, taskPluginTags(connectorTaskId)); - } - - private static Map taskPluginTags(ConnectorTaskId connectorTaskId) { - Map tags = connectorPluginTags(connectorTaskId.connector()); - tags.put("task", String.valueOf(connectorTaskId.task())); - return tags; - } - - private static Supplier> converterPluginTags(ConnectorTaskId connectorTaskId, boolean isKey) { - return () -> { - Map tags = taskPluginTags(connectorTaskId); - tags.put("converter", isKey ? "key" : "value"); - return tags; - }; - } - - private static Supplier> headerConverterPluginTags(ConnectorTaskId connectorTaskId) { - return () -> { - Map tags = taskPluginTags(connectorTaskId); - tags.put("converter", "header"); - return tags; - }; - } - - private static Supplier> transformationPluginTags(ConnectorTaskId connectorTaskId, String transformationAlias) { - return () -> { - Map tags = taskPluginTags(connectorTaskId); - tags.put("transformation", transformationAlias); - return tags; - }; - } - - private static Supplier> predicatePluginTags(ConnectorTaskId connectorTaskId, String predicateAlias) { - return () -> { - Map tags = taskPluginTags(connectorTaskId); - tags.put("predicate", predicateAlias); - return tags; - }; - } - - public Plugin wrap(HeaderConverter headerConverter, ConnectorTaskId connectorTaskId) { - return Plugin.wrapInstance(headerConverter, metrics, headerConverterPluginTags(connectorTaskId)); - } - - public Plugin wrap(Converter converter, ConnectorTaskId connectorTaskId, boolean isKey) { - return Plugin.wrapInstance(converter, metrics, converterPluginTags(connectorTaskId, isKey)); - } - - public > Plugin> wrap(Transformation transformation, ConnectorTaskId connectorTaskId, String alias) { - return Plugin.wrapInstance(transformation, metrics, transformationPluginTags(connectorTaskId, alias)); - } - - public > Plugin> wrap(Predicate predicate, ConnectorTaskId connectorTaskId, String alias) { - return Plugin.wrapInstance(predicate, metrics, predicatePluginTags(connectorTaskId, alias)); - } - public static class MetricGroupId { private final String groupName; private final Map tags; @@ -253,7 +176,7 @@ public MetricGroupId(String groupName, Map tags) { Objects.requireNonNull(groupName); Objects.requireNonNull(tags); this.groupName = groupName; - this.tags = Collections.unmodifiableMap(new LinkedHashMap<>(tags)); // To ensure the order of insertion, we have to use Collections. + this.tags = Collections.unmodifiableMap(new LinkedHashMap<>(tags)); this.hc = Objects.hash(this.groupName, this.tags); StringBuilder sb = new StringBuilder(this.groupName); for (Map.Entry entry : this.tags.entrySet()) { @@ -299,7 +222,8 @@ public int hashCode() { public boolean equals(Object obj) { if (obj == this) return true; - if (obj instanceof MetricGroupId that) { + if (obj instanceof MetricGroupId) { + MetricGroupId that = (MetricGroupId) obj; return this.groupName.equals(that.groupName) && this.tags.equals(that.tags); } return false; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java index 7f879ea8f2aa8..1d144440f2c20 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectMetricsRegistry.java @@ -19,6 +19,8 @@ import org.apache.kafka.common.MetricNameTemplate; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; @@ -35,10 +37,6 @@ public class ConnectMetricsRegistry { public static final String WORKER_GROUP_NAME = "connect-worker-metrics"; public static final String WORKER_REBALANCE_GROUP_NAME = "connect-worker-rebalance-metrics"; public static final String TASK_ERROR_HANDLING_GROUP_NAME = "task-error-metrics"; - public static final String TRANSFORMS_GROUP = "connector-transform-metrics"; - public static final String PREDICATES_GROUP = "connector-predicate-metrics"; - public static final String TRANSFORM_TAG_NAME = "transform"; - public static final String PREDICATE_TAG_NAME = "predicate"; private final List allTemplates = new ArrayList<>(); public final MetricNameTemplate connectorStatus; @@ -61,17 +59,6 @@ public class ConnectMetricsRegistry { public final MetricNameTemplate taskBatchSizeAvg; public final MetricNameTemplate taskCommitFailurePercentage; public final MetricNameTemplate taskCommitSuccessPercentage; - public final MetricNameTemplate taskConnectorClass; - public final MetricNameTemplate taskConnectorClassVersion; - public final MetricNameTemplate taskConnectorType; - public final MetricNameTemplate taskClass; - public final MetricNameTemplate taskVersion; - public final MetricNameTemplate taskKeyConverterClass; - public final MetricNameTemplate taskValueConverterClass; - public final MetricNameTemplate taskKeyConverterVersion; - public final MetricNameTemplate taskValueConverterVersion; - public final MetricNameTemplate taskHeaderConverterClass; - public final MetricNameTemplate taskHeaderConverterVersion; public final MetricNameTemplate sourceRecordPollRate; public final MetricNameTemplate sourceRecordPollTotal; public final MetricNameTemplate sourceRecordWriteRate; @@ -128,12 +115,8 @@ public class ConnectMetricsRegistry { public final MetricNameTemplate transactionSizeMin; public final MetricNameTemplate transactionSizeMax; public final MetricNameTemplate transactionSizeAvg; - public final MetricNameTemplate transformClass; - public final MetricNameTemplate transformVersion; - public final MetricNameTemplate predicateClass; - public final MetricNameTemplate predicateVersion; - public final Map connectorStatusMetrics; + public Map connectorStatusMetrics; public ConnectMetricsRegistry() { this(new LinkedHashSet<>()); @@ -181,43 +164,6 @@ public ConnectMetricsRegistry(Set tags) { taskCommitSuccessPercentage = createTemplate("offset-commit-success-percentage", TASK_GROUP_NAME, "The average percentage of this task's offset commit attempts that succeeded.", workerTaskTags); - taskConnectorClass = createTemplate("connector-class", TASK_GROUP_NAME, "The name of the connector class.", workerTaskTags); - taskConnectorClassVersion = createTemplate("connector-version", TASK_GROUP_NAME, - "The version of the connector class, as reported by the connector.", workerTaskTags); - taskConnectorType = createTemplate("connector-type", TASK_GROUP_NAME, "The type of the connector. One of 'source' or 'sink'.", - workerTaskTags); - taskClass = createTemplate("task-class", TASK_GROUP_NAME, "The class name of the task.", workerTaskTags); - taskVersion = createTemplate("task-version", TASK_GROUP_NAME, "The version of the task.", workerTaskTags); - taskKeyConverterClass = createTemplate("key-converter-class", TASK_GROUP_NAME, - "The fully qualified class name from key.converter", workerTaskTags); - taskValueConverterClass = createTemplate("value-converter-class", TASK_GROUP_NAME, - "The fully qualified class name from value.converter", workerTaskTags); - taskKeyConverterVersion = createTemplate("key-converter-version", TASK_GROUP_NAME, - "The version instantiated for key.converter. May be undefined", workerTaskTags); - taskValueConverterVersion = createTemplate("value-converter-version", TASK_GROUP_NAME, - "The version instantiated for value.converter. May be undefined", workerTaskTags); - taskHeaderConverterClass = createTemplate("header-converter-class", TASK_GROUP_NAME, - "The fully qualified class name from header.converter", workerTaskTags); - taskHeaderConverterVersion = createTemplate("header-converter-version", TASK_GROUP_NAME, - "The version instantiated for header.converter. May be undefined", workerTaskTags); - - /* Transformation Metrics */ - Set transformTags = new LinkedHashSet<>(tags); - transformTags.addAll(workerTaskTags); - transformTags.add(TRANSFORM_TAG_NAME); - transformClass = createTemplate("transform-class", TRANSFORMS_GROUP, - "The class name of the transformation class", transformTags); - transformVersion = createTemplate("transform-version", TRANSFORMS_GROUP, - "The version of the transformation class", transformTags); - - /* Predicate Metrics */ - Set predicateTags = new LinkedHashSet<>(tags); - predicateTags.addAll(workerTaskTags); - predicateTags.add(PREDICATE_TAG_NAME); - predicateClass = createTemplate("predicate-class", PREDICATES_GROUP, - "The class name of the predicate class", predicateTags); - predicateVersion = createTemplate("predicate-version", PREDICATES_GROUP, - "The version of the predicate class", predicateTags); /* Source worker task level */ Set sourceTaskTags = new LinkedHashSet<>(tags); @@ -386,14 +332,14 @@ public ConnectMetricsRegistry(Set tags) { WORKER_GROUP_NAME, "The number of restarting tasks of the connector on the worker.", workerConnectorTags); - connectorStatusMetrics = Map.of( - connectorRunningTaskCount, TaskStatus.State.RUNNING, - connectorPausedTaskCount, TaskStatus.State.PAUSED, - connectorFailedTaskCount, TaskStatus.State.FAILED, - connectorUnassignedTaskCount, TaskStatus.State.UNASSIGNED, - connectorDestroyedTaskCount, TaskStatus.State.DESTROYED, - connectorRestartingTaskCount, TaskStatus.State.RESTARTING - ); + connectorStatusMetrics = new HashMap<>(); + connectorStatusMetrics.put(connectorRunningTaskCount, TaskStatus.State.RUNNING); + connectorStatusMetrics.put(connectorPausedTaskCount, TaskStatus.State.PAUSED); + connectorStatusMetrics.put(connectorFailedTaskCount, TaskStatus.State.FAILED); + connectorStatusMetrics.put(connectorUnassignedTaskCount, TaskStatus.State.UNASSIGNED); + connectorStatusMetrics.put(connectorDestroyedTaskCount, TaskStatus.State.DESTROYED); + connectorStatusMetrics.put(connectorRestartingTaskCount, TaskStatus.State.RESTARTING); + connectorStatusMetrics = Collections.unmodifiableMap(connectorStatusMetrics); /* Worker rebalance level */ Set rebalanceTags = new LinkedHashSet<>(tags); @@ -442,7 +388,7 @@ private MetricNameTemplate createTemplate(String name, String group, String doc, } public List getAllTemplates() { - return List.copyOf(allTemplates); + return Collections.unmodifiableList(allTemplates); } public String connectorTagName() { @@ -480,20 +426,4 @@ public String workerRebalanceGroupName() { public String taskErrorHandlingGroupName() { return TASK_ERROR_HANDLING_GROUP_NAME; } - - public String transformsGroupName() { - return TRANSFORMS_GROUP; - } - - public String transformsTagName() { - return TRANSFORM_TAG_NAME; - } - - public String predicatesGroupName() { - return PREDICATES_GROUP; - } - - public String predicateTagName() { - return PREDICATE_TAG_NAME; - } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java index ff4d399db1a6f..cb604ad73eef5 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorConfig.java @@ -22,31 +22,24 @@ import org.apache.kafka.common.config.ConfigDef.Type; import org.apache.kafka.common.config.ConfigDef.Width; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.connector.ConnectRecord; import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.runtime.errors.ToleranceType; import org.apache.kafka.connect.runtime.isolation.PluginDesc; -import org.apache.kafka.connect.runtime.isolation.PluginType; -import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.isolation.Plugins; -import org.apache.kafka.connect.runtime.isolation.PluginsRecommenders; -import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; import org.apache.kafka.connect.storage.Converter; import org.apache.kafka.connect.storage.HeaderConverter; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; import org.apache.kafka.connect.util.ConcreteSubClassValidator; -import org.apache.kafka.connect.util.ConnectorTaskId; import org.apache.kafka.connect.util.InstantiableClassValidator; -import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; -import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashSet; import java.util.List; @@ -89,11 +82,6 @@ public class ConnectorConfig extends AbstractConfig { " or use \"FileStreamSink\" or \"FileStreamSinkConnector\" to make the configuration a bit shorter"; private static final String CONNECTOR_CLASS_DISPLAY = "Connector class"; - public static final String CONNECTOR_VERSION = "connector." + WorkerConfig.PLUGIN_VERSION_SUFFIX; - private static final String CONNECTOR_VERSION_DOC = "Version of the connector."; - private static final String CONNECTOR_VERSION_DISPLAY = "Connector version"; - private static final ConfigDef.Validator CONNECTOR_VERSION_VALIDATOR = new PluginVersionValidator(); - public static final String KEY_CONVERTER_CLASS_CONFIG = WorkerConfig.KEY_CONVERTER_CLASS_CONFIG; public static final String KEY_CONVERTER_CLASS_DOC = WorkerConfig.KEY_CONVERTER_CLASS_DOC; public static final String KEY_CONVERTER_CLASS_DISPLAY = "Key converter class"; @@ -102,12 +90,6 @@ public class ConnectorConfig extends AbstractConfig { new InstantiableClassValidator() ); - public static final String KEY_CONVERTER_VERSION_CONFIG = WorkerConfig.KEY_CONVERTER_VERSION; - private static final String KEY_CONVERTER_VERSION_DOC = "Version of the key converter."; - private static final String KEY_CONVERTER_VERSION_DISPLAY = "Key converter version"; - private static final ConfigDef.Validator KEY_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); - - public static final String VALUE_CONVERTER_CLASS_CONFIG = WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG; public static final String VALUE_CONVERTER_CLASS_DOC = WorkerConfig.VALUE_CONVERTER_CLASS_DOC; public static final String VALUE_CONVERTER_CLASS_DISPLAY = "Value converter class"; @@ -116,24 +98,17 @@ public class ConnectorConfig extends AbstractConfig { new InstantiableClassValidator() ); - public static final String VALUE_CONVERTER_VERSION_CONFIG = WorkerConfig.VALUE_CONVERTER_VERSION; - private static final String VALUE_CONVERTER_VERSION_DOC = "Version of the value converter."; - private static final String VALUE_CONVERTER_VERSION_DISPLAY = "Value converter version"; - private static final ConfigDef.Validator VALUE_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); - public static final String HEADER_CONVERTER_CLASS_CONFIG = WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG; public static final String HEADER_CONVERTER_CLASS_DOC = WorkerConfig.HEADER_CONVERTER_CLASS_DOC; public static final String HEADER_CONVERTER_CLASS_DISPLAY = "Header converter class"; + // The Connector config should not have a default for the header converter, since the absence of a config property means that + // the worker config settings should be used. Thus, we set the default to null here. + public static final String HEADER_CONVERTER_CLASS_DEFAULT = null; private static final ConfigDef.Validator HEADER_CONVERTER_CLASS_VALIDATOR = ConfigDef.CompositeValidator.of( ConcreteSubClassValidator.forSuperClass(HeaderConverter.class), new InstantiableClassValidator() ); - public static final String HEADER_CONVERTER_VERSION_CONFIG = WorkerConfig.HEADER_CONVERTER_VERSION; - private static final String HEADER_CONVERTER_VERSION_DOC = "Version of the header converter."; - private static final String HEADER_CONVERTER_VERSION_DISPLAY = "Header converter version"; - private static final ConfigDef.Validator HEADER_CONVERTER_VERSION_VALIDATOR = new PluginVersionValidator(); - public static final String TASKS_MAX_CONFIG = "tasks.max"; private static final String TASKS_MAX_DOC = "Maximum number of tasks to use for this connector."; public static final int TASKS_MAX_DEFAULT = 1; @@ -212,11 +187,7 @@ public class ConnectorConfig extends AbstractConfig { public static final String CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX = "admin.override."; public static final String PREDICATES_PREFIX = "predicates."; - private static final PluginsRecommenders EMPTY_RECOMMENDER = new PluginsRecommenders(); - private static final ConverterDefaults CONVERTER_DEFAULTS = new ConverterDefaults(null, null); - - private final ConnectorConfig.EnrichedConnectorConfig enrichedConfig; - + private final EnrichedConnectorConfig enrichedConfig; private static class EnrichedConnectorConfig extends AbstractConfig { EnrichedConnectorConfig(ConfigDef configDef, Map props) { super(configDef, props); @@ -228,29 +199,19 @@ public Object get(String key) { } } - protected static ConfigDef configDef( - String defaultConnectorVersion, - ConverterDefaults keyConverterDefaults, - ConverterDefaults valueConverterDefaults, - ConverterDefaults headerConverterDefaults, - PluginsRecommenders recommender - ) { + public static ConfigDef configDef() { int orderInGroup = 0; int orderInErrorGroup = 0; return new ConfigDef() .define(NAME_CONFIG, Type.STRING, ConfigDef.NO_DEFAULT_VALUE, nonEmptyStringWithoutControlChars(), Importance.HIGH, NAME_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, NAME_DISPLAY) .define(CONNECTOR_CLASS_CONFIG, Type.STRING, Importance.HIGH, CONNECTOR_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.LONG, CONNECTOR_CLASS_DISPLAY) - .define(CONNECTOR_VERSION, Type.STRING, defaultConnectorVersion, CONNECTOR_VERSION_VALIDATOR, Importance.MEDIUM, CONNECTOR_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, CONNECTOR_VERSION_DISPLAY, recommender.connectorPluginVersionRecommender()) .define(TASKS_MAX_CONFIG, Type.INT, TASKS_MAX_DEFAULT, atLeast(TASKS_MIN_CONFIG), Importance.HIGH, TASKS_MAX_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, TASK_MAX_DISPLAY) .define(TASKS_MAX_ENFORCE_CONFIG, Type.BOOLEAN, TASKS_MAX_ENFORCE_DEFAULT, Importance.LOW, TASKS_MAX_ENFORCE_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, TASKS_MAX_ENFORCE_DISPLAY) - .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, keyConverterDefaults.type, KEY_CONVERTER_CLASS_VALIDATOR, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY, recommender.converterPluginRecommender()) - .define(KEY_CONVERTER_VERSION_CONFIG, Type.STRING, keyConverterDefaults.version, KEY_CONVERTER_VERSION_VALIDATOR, Importance.LOW, KEY_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_VERSION_DISPLAY, recommender.keyConverterPluginVersionRecommender()) - .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, valueConverterDefaults.type, VALUE_CONVERTER_CLASS_VALIDATOR, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY, recommender.converterPluginRecommender()) - .define(VALUE_CONVERTER_VERSION_CONFIG, Type.STRING, valueConverterDefaults.version, VALUE_CONVERTER_VERSION_VALIDATOR, Importance.LOW, VALUE_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_VERSION_DISPLAY, recommender.valueConverterPluginVersionRecommender()) - .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, headerConverterDefaults.type, HEADER_CONVERTER_CLASS_VALIDATOR, Importance.LOW, HEADER_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_CLASS_DISPLAY, recommender.headerConverterPluginRecommender()) - .define(HEADER_CONVERTER_VERSION_CONFIG, Type.STRING, headerConverterDefaults.version, HEADER_CONVERTER_VERSION_VALIDATOR, Importance.LOW, HEADER_CONVERTER_VERSION_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_VERSION_DISPLAY, recommender.headerConverterPluginVersionRecommender()) - .define(TRANSFORMS_CONFIG, Type.LIST, List.of(), aliasValidator("transformation"), Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, ++orderInGroup, Width.LONG, TRANSFORMS_DISPLAY) - .define(PREDICATES_CONFIG, Type.LIST, List.of(), aliasValidator("predicate"), Importance.LOW, PREDICATES_DOC, PREDICATES_GROUP, ++orderInGroup, Width.LONG, PREDICATES_DISPLAY) + .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, null, KEY_CONVERTER_CLASS_VALIDATOR, Importance.LOW, KEY_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, KEY_CONVERTER_CLASS_DISPLAY) + .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, null, VALUE_CONVERTER_CLASS_VALIDATOR, Importance.LOW, VALUE_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, VALUE_CONVERTER_CLASS_DISPLAY) + .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, HEADER_CONVERTER_CLASS_DEFAULT, HEADER_CONVERTER_CLASS_VALIDATOR, Importance.LOW, HEADER_CONVERTER_CLASS_DOC, COMMON_GROUP, ++orderInGroup, Width.SHORT, HEADER_CONVERTER_CLASS_DISPLAY) + .define(TRANSFORMS_CONFIG, Type.LIST, Collections.emptyList(), aliasValidator("transformation"), Importance.LOW, TRANSFORMS_DOC, TRANSFORMS_GROUP, ++orderInGroup, Width.LONG, TRANSFORMS_DISPLAY) + .define(PREDICATES_CONFIG, Type.LIST, Collections.emptyList(), aliasValidator("predicate"), Importance.LOW, PREDICATES_DOC, PREDICATES_GROUP, ++orderInGroup, Width.LONG, PREDICATES_DISPLAY) .define(CONFIG_RELOAD_ACTION_CONFIG, Type.STRING, CONFIG_RELOAD_ACTION_RESTART, in(CONFIG_RELOAD_ACTION_NONE, CONFIG_RELOAD_ACTION_RESTART), Importance.LOW, CONFIG_RELOAD_ACTION_DOC, COMMON_GROUP, ++orderInGroup, Width.MEDIUM, CONFIG_RELOAD_ACTION_DISPLAY) @@ -265,28 +226,6 @@ protected static ConfigDef configDef( ERRORS_LOG_ENABLE_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_LOG_ENABLE_DISPLAY) .define(ERRORS_LOG_INCLUDE_MESSAGES_CONFIG, Type.BOOLEAN, ERRORS_LOG_INCLUDE_MESSAGES_DEFAULT, Importance.MEDIUM, ERRORS_LOG_INCLUDE_MESSAGES_DOC, ERROR_GROUP, ++orderInErrorGroup, Width.SHORT, ERRORS_LOG_INCLUDE_MESSAGES_DISPLAY); - - } - - public static ConfigDef configDef() { - return configDef(null, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, EMPTY_RECOMMENDER); - } - - // ConfigDef with additional defaults and recommenders - public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { - PluginsRecommenders recommender = new PluginsRecommenders(plugins); - ConverterDefaults keyConverterDefaults = converterDefaults(plugins, KEY_CONVERTER_CLASS_CONFIG, - WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION, connProps, workerConfig, PluginType.CONVERTER); - ConverterDefaults valueConverterDefaults = converterDefaults(plugins, VALUE_CONVERTER_CLASS_CONFIG, - WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION, connProps, workerConfig, PluginType.CONVERTER); - ConverterDefaults headerConverterDefaults = converterDefaults(plugins, HEADER_CONVERTER_CLASS_CONFIG, - WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, WorkerConfig.HEADER_CONVERTER_VERSION, connProps, workerConfig, PluginType.HEADER_CONVERTER); - return configDef(plugins.latestVersion(connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG), PluginType.SINK, PluginType.SOURCE), - keyConverterDefaults, valueConverterDefaults, headerConverterDefaults, recommender); - } - - public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { - return configDef(plugins.latestVersion(connectorClass, PluginType.SINK, PluginType.SOURCE), CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, CONVERTER_DEFAULTS, EMPTY_RECOMMENDER); } private static ConfigDef.CompositeValidator aliasValidator(String kind) { @@ -302,7 +241,7 @@ private static ConfigDef.CompositeValidator aliasValidator(String kind) { } public ConnectorConfig(Plugins plugins) { - this(plugins, Map.of()); + this(plugins, Collections.emptyMap()); } public ConnectorConfig(Plugins plugins, Map props) { @@ -332,7 +271,7 @@ public long errorMaxDelayInMillis() { public ToleranceType errorToleranceType() { String tolerance = getString(ERRORS_TOLERANCE_CONFIG); - for (ToleranceType type : ToleranceType.values()) { + for (ToleranceType type: ToleranceType.values()) { if (type.name().equalsIgnoreCase(tolerance)) { return type; } @@ -361,7 +300,7 @@ public boolean enforceTasksMax() { * {@link Transformation transformations} and {@link Predicate predicates} * as they are specified in the {@link #TRANSFORMS_CONFIG} and {@link #PREDICATES_CONFIG} */ - public > List> transformationStages(Plugins plugins, ConnectorTaskId connectorTaskId, ConnectMetrics metrics) { + public > List> transformationStages() { final List transformAliases = getList(TRANSFORMS_CONFIG); final List> transformations = new ArrayList<>(transformAliases.size()); @@ -369,38 +308,20 @@ public > List> transformationS final String prefix = TRANSFORMS_CONFIG + "." + alias + "."; try { - final String typeConfig = prefix + "type"; - final String versionConfig = prefix + WorkerConfig.PLUGIN_VERSION_SUFFIX; - final Transformation transformation = getTransformationOrPredicate(plugins, typeConfig, versionConfig); + @SuppressWarnings("unchecked") + final Transformation transformation = Utils.newInstance(getClass(prefix + "type"), Transformation.class); Map configs = originalsWithPrefix(prefix); - String predicateAlias = (String) configs.remove(TransformationStage.PREDICATE_CONFIG); + Object predicateAlias = configs.remove(TransformationStage.PREDICATE_CONFIG); Object negate = configs.remove(TransformationStage.NEGATE_CONFIG); transformation.configure(configs); - Plugin> transformationPlugin = metrics.wrap(transformation, connectorTaskId, alias); if (predicateAlias != null) { String predicatePrefix = PREDICATES_PREFIX + predicateAlias + "."; - final String predicateTypeConfig = predicatePrefix + "type"; - final String predicateVersionConfig = predicatePrefix + WorkerConfig.PLUGIN_VERSION_SUFFIX; - Predicate predicate = getTransformationOrPredicate(plugins, predicateTypeConfig, predicateVersionConfig); + @SuppressWarnings("unchecked") + Predicate predicate = Utils.newInstance(getClass(predicatePrefix + "type"), Predicate.class); predicate.configure(originalsWithPrefix(predicatePrefix)); - Plugin> predicatePlugin = metrics.wrap(predicate, connectorTaskId, predicateAlias); - transformations.add(new TransformationStage<>( - predicatePlugin, - predicateAlias, - plugins.pluginVersion(predicate.getClass().getName(), predicate.getClass().getClassLoader(), PluginType.PREDICATE), - negate != null && Boolean.parseBoolean(negate.toString()), - transformationPlugin, - alias, - plugins.pluginVersion(transformation.getClass().getName(), transformation.getClass().getClassLoader(), PluginType.TRANSFORMATION), - plugins.safeLoaderSwapper()) - ); + transformations.add(new TransformationStage<>(predicate, negate != null && Boolean.parseBoolean(negate.toString()), transformation)); } else { - transformations.add(new TransformationStage<>( - transformationPlugin, - alias, - plugins.pluginVersion(transformation.getClass().getName(), transformation.getClass().getClassLoader(), PluginType.TRANSFORMATION), - plugins.safeLoaderSwapper()) - ); + transformations.add(new TransformationStage<>(transformation)); } } catch (Exception e) { throw new ConnectException(e); @@ -410,25 +331,15 @@ public > List> transformationS return transformations; } - @SuppressWarnings("unchecked") - private T getTransformationOrPredicate(Plugins plugins, String classConfig, String versionConfig) { - try { - VersionRange range = PluginUtils.connectorVersionRequirement(getString(versionConfig)); - VersionRange connectorRange = PluginUtils.connectorVersionRequirement(getString(CONNECTOR_VERSION)); - return (T) plugins.newPlugin(getClass(classConfig).getName(), range, plugins.pluginLoader(getString(CONNECTOR_CLASS_CONFIG), connectorRange)); - } catch (Exception e) { - throw new ConnectException(e); - } - } - /** * Returns an enriched {@link ConfigDef} building upon the {@code ConfigDef}, using the current configuration specified in {@code props} as an input. *

          * {@code requireFullConfig} specifies whether required config values that are missing should cause an exception to be thrown. */ + @SuppressWarnings({"rawtypes", "unchecked"}) public static ConfigDef enrich(Plugins plugins, ConfigDef baseConfigDef, Map props, boolean requireFullConfig) { ConfigDef newDef = new ConfigDef(baseConfigDef); - new EnrichablePlugin>("Transformation", TRANSFORMS_CONFIG, TRANSFORMS_GROUP, PluginType.TRANSFORMATION, + new EnrichablePlugin>("Transformation", TRANSFORMS_CONFIG, TRANSFORMS_GROUP, (Class) Transformation.class, props, requireFullConfig) { @Override @@ -447,19 +358,19 @@ protected ConfigDef initialConfigDef() { } @Override - protected Stream> configDefsForClass(String typeConfig, String versionConfig, Plugins plugins) { - return super.configDefsForClass(typeConfig, versionConfig, plugins) - .filter(entry -> { - // The implicit parameters mask any from the transformer with the same name - if (TransformationStage.PREDICATE_CONFIG.equals(entry.getKey()) - || TransformationStage.NEGATE_CONFIG.equals(entry.getKey())) { - log.warn("Transformer config {} is masked by implicit config of that name", - entry.getKey()); - return false; - } else { - return true; - } - }); + protected Stream> configDefsForClass(String typeConfig) { + return super.configDefsForClass(typeConfig) + .filter(entry -> { + // The implicit parameters mask any from the transformer with the same name + if (TransformationStage.PREDICATE_CONFIG.equals(entry.getKey()) + || TransformationStage.NEGATE_CONFIG.equals(entry.getKey())) { + log.warn("Transformer config {} is masked by implicit config of that name", + entry.getKey()); + return false; + } else { + return true; + } + }); } @Override @@ -477,16 +388,10 @@ protected void validateProps(String prefix) { "but there is no config '" + prefixedPredicate + "' defining a predicate to be negated."); } } - - @Override - protected ConfigDef.Recommender versionRecommender(String typeConfig) { - return new PluginsRecommenders(plugins).transformationPluginRecommender(typeConfig); - } - - }.enrich(newDef, plugins); + }.enrich(newDef); new EnrichablePlugin>("Predicate", PREDICATES_CONFIG, PREDICATES_GROUP, - PluginType.PREDICATE, props, requireFullConfig) { + (Class) Predicate.class, props, requireFullConfig) { @Override protected Set>> plugins() { return plugins.predicates(); @@ -496,87 +401,10 @@ protected Set>> plugins() { protected ConfigDef config(Predicate predicate) { return predicate.config(); } - - @Override - protected ConfigDef.Recommender versionRecommender(String typeConfig) { - return new PluginsRecommenders(plugins).predicatePluginRecommender(typeConfig); - } - - }.enrich(newDef, plugins); - + }.enrich(newDef); return newDef; } - private static ConverterDefaults converterDefaults( - Plugins plugins, - String connectorConverterConfig, - String workerConverterConfig, - String workerConverterVersionConfig, - Map connectorProps, - WorkerConfig workerConfig, - PluginType converterType - ) { - /* - if a converter is specified in the connector config it overrides the worker config for the corresponding converter - otherwise the worker config is used, hence if the converter is not provided in the connector config, the default - is the one provided in the worker config - - for converters which version is used depends on a several factors with multi-versioning support - A. If the converter class is provided as part of the connector properties - 1. if the version is not provided, - - if the converter is packaged with the connector then, the packaged version is used - - if the converter is not packaged with the connector, the latest version is used - 2. if the version is provided, the provided version is used - B. If the converter class is not provided as part of the connector properties, but provided as part of the worker properties - 1. if the version is not provided, the latest version is used - 2. if the version is provided, the provided version is used - C. If the converter class is not provided as part of the connector properties and not provided as part of the worker properties, - the converter to use is unknown hence no default version can be determined (null) - - Note: Connect when using service loading has an issue outlined in KAFKA-18119. The issue means that the above - logic does not hold currently for clusters using service loading when converters are defined in the connector. - However, the logic to determine the default should ideally follow the one outlined above, and the code here - should still show the correct default version regardless of the bug. - */ - final String connectorConverter = connectorProps.get(connectorConverterConfig); - // since header converter defines a default in the worker config we need to handle it separately - final String workerConverter = workerConverterConfig.equals(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG) ? - workerConfig.getClass(workerConverterConfig).getName() : workerConfig.originalsStrings().get(workerConverterConfig); - final String connectorClass = connectorProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); - final String connectorVersion = connectorProps.get(ConnectorConfig.CONNECTOR_VERSION); - String type = null; - if (connectorClass == null || (connectorConverter == null && workerConverter == null)) { - return new ConverterDefaults(null, null); - } - // update the default of connector converter based on if the worker converter is provided - type = workerConverter; - - String version = null; - if (connectorConverter != null) { - version = fetchPluginVersion(plugins, connectorClass, connectorVersion, connectorConverter, converterType); - } else { - version = workerConfig.originalsStrings().get(workerConverterVersionConfig); - if (version == null) { - version = plugins.latestVersion(workerConverter, converterType); - } - } - return new ConverterDefaults(type, version); - } - - private static String fetchPluginVersion(Plugins plugins, String connectorClass, String connectorVersion, String pluginName, PluginType pluginType) { - if (pluginName == null || connectorClass == null) { - return null; - } - try { - VersionRange range = PluginUtils.connectorVersionRequirement(connectorVersion); - return plugins.pluginVersion(pluginName, plugins.pluginLoader(connectorClass, range), pluginType); - } catch (InvalidVersionSpecificationException | VersionedPluginLoadingException e) { - // these errors should be captured in other places, so we can ignore them here - log.warn("Failed to determine default plugin version for {}", connectorClass, e); - } - return null; - } - /** * An abstraction over "enrichable plugins" ({@link Transformation}s and {@link Predicate}s) used for computing the * contribution to a Connectors ConfigDef. @@ -591,27 +419,24 @@ abstract static class EnrichablePlugin { private final String aliasKind; private final String aliasConfig; private final String aliasGroup; - private final PluginType pluginType; private final Class baseClass; private final Map props; private final boolean requireFullConfig; - @SuppressWarnings("unchecked") public EnrichablePlugin( String aliasKind, - String aliasConfig, String aliasGroup, PluginType pluginType, + String aliasConfig, String aliasGroup, Class baseClass, Map props, boolean requireFullConfig) { this.aliasKind = aliasKind; this.aliasConfig = aliasConfig; this.aliasGroup = aliasGroup; - this.pluginType = pluginType; - this.baseClass = (Class) pluginType.superClass(); + this.baseClass = baseClass; this.props = props; this.requireFullConfig = requireFullConfig; } /** Add the configs for this alias to the given {@code ConfigDef}. */ - void enrich(ConfigDef newDef, Plugins plugins) { + void enrich(ConfigDef newDef) { Object aliases = ConfigDef.parseType(aliasConfig, props.get(aliasConfig), Type.LIST); if (!(aliases instanceof List)) { return; @@ -619,71 +444,49 @@ void enrich(ConfigDef newDef, Plugins plugins) { LinkedHashSet uniqueAliases = new LinkedHashSet<>((List) aliases); for (Object o : uniqueAliases) { - if (!(o instanceof String alias)) { + if (!(o instanceof String)) { throw new ConfigException("Item in " + aliasConfig + " property is not of " + "type String"); } + String alias = (String) o; final String prefix = aliasConfig + "." + alias + "."; final String group = aliasGroup + ": " + alias; int orderInGroup = 0; final String typeConfig = prefix + "type"; - final String versionConfig = prefix + WorkerConfig.PLUGIN_VERSION_SUFFIX; - final String defaultVersion = fetchPluginVersion(plugins, props.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG), - props.get(ConnectorConfig.CONNECTOR_VERSION), props.get(typeConfig), pluginType); - - // Add the class configuration final ConfigDef.Validator typeValidator = ConfigDef.LambdaValidator.with( - (String name, Object value) -> { - validateProps(prefix); - // The value will be null if the class couldn't be found; no point in performing follow-up validation - if (value != null) { - getConfigDefFromPlugin(typeConfig, ((Class) value).getName(), props.getOrDefault(versionConfig, defaultVersion), plugins); - } - }, - () -> "valid configs for " + alias + " " + aliasKind.toLowerCase(Locale.ENGLISH)); + (String name, Object value) -> { + validateProps(prefix); + // The value will be null if the class couldn't be found; no point in performing follow-up validation + if (value != null) { + getConfigDefFromConfigProvidingClass(typeConfig, (Class) value); + } + }, + () -> "valid configs for " + alias + " " + aliasKind.toLowerCase(Locale.ENGLISH)); newDef.define(typeConfig, Type.CLASS, ConfigDef.NO_DEFAULT_VALUE, typeValidator, Importance.HIGH, "Class for the '" + alias + "' " + aliasKind.toLowerCase(Locale.ENGLISH) + ".", group, orderInGroup++, Width.LONG, baseClass.getSimpleName() + " type for " + alias, - List.of(), new ClassRecommender()); - - // Add the version configuration - final ConfigDef.Validator versionValidator = (name, value) -> { - if (value != null) { - try { - getConfigDefFromPlugin(typeConfig, props.get(typeConfig), (String) value, plugins); - } catch (VersionedPluginLoadingException e) { - throw e; - } catch (Exception e) { - // ignore any other exception here as they are not related to version validation and - // will be captured in the validation of the class configuration - } - } - }; - newDef.define(versionConfig, Type.STRING, defaultVersion, versionValidator, Importance.HIGH, - "Version of the '" + alias + "' " + aliasKind.toLowerCase(Locale.ENGLISH) + ".", group, orderInGroup++, Width.LONG, - baseClass.getSimpleName() + " version for " + alias, - List.of(), versionRecommender(typeConfig)); - - final ConfigDef configDef = populateConfigDef(typeConfig, versionConfig, plugins); + Collections.emptyList(), new ClassRecommender()); + + final ConfigDef configDef = populateConfigDef(typeConfig); if (configDef == null) continue; newDef.embed(prefix, group, orderInGroup, configDef); } } /** Subclasses can add extra validation of the {@link #props}. */ - protected void validateProps(String prefix) { - } + protected void validateProps(String prefix) { } /** * Populates the ConfigDef according to the configs returned from {@code configs()} method of class * named in the {@code ...type} parameter of the {@code props}. */ - protected ConfigDef populateConfigDef(String typeConfig, String versionConfig, Plugins plugins) { + protected ConfigDef populateConfigDef(String typeConfig) { final ConfigDef configDef = initialConfigDef(); try { - configDefsForClass(typeConfig, versionConfig, plugins) + configDefsForClass(typeConfig) .forEach(entry -> configDef.define(entry.getValue())); + } catch (ConfigException e) { if (requireFullConfig) { throw e; @@ -698,11 +501,9 @@ protected ConfigDef populateConfigDef(String typeConfig, String versionConfig, P * Return a stream of configs provided by the {@code configs()} method of class * named in the {@code ...type} parameter of the {@code props}. */ - protected Stream> configDefsForClass(String typeConfig, String versionConfig, Plugins plugins) { - if (props.get(typeConfig) == null) { - throw new ConfigException(typeConfig, null, "Not a " + baseClass.getSimpleName()); - } - return getConfigDefFromPlugin(typeConfig, props.get(typeConfig), props.get(versionConfig), plugins) + protected Stream> configDefsForClass(String typeConfig) { + final Class cls = (Class) ConfigDef.parseType(typeConfig, props.get(typeConfig), Type.CLASS); + return getConfigDefFromConfigProvidingClass(typeConfig, cls) .configKeys().entrySet().stream(); } @@ -711,47 +512,31 @@ protected ConfigDef initialConfigDef() { return new ConfigDef(); } - @SuppressWarnings("unchecked") - ConfigDef getConfigDefFromPlugin(String key, String pluginClass, String version, Plugins plugins) { - String connectorClass = props.get(CONNECTOR_CLASS_CONFIG); - if (pluginClass == null || connectorClass == null) { - // if transformation class is null or connector class is null, we return empty as these validations are done in respective validators - return new ConfigDef(); - } - VersionRange connectorVersionRange; - try { - connectorVersionRange = PluginUtils.connectorVersionRequirement(props.get(CONNECTOR_VERSION)); - } catch (InvalidVersionSpecificationException e) { - // this should be caught in connector version validation - return new ConfigDef(); - } - - VersionRange pluginVersion; - try { - pluginVersion = PluginUtils.connectorVersionRequirement(version); - } catch (InvalidVersionSpecificationException e) { - throw new VersionedPluginLoadingException(e.getMessage()); + /** + * Return {@link ConfigDef} from {@code cls}, which is expected to be a non-null {@code Class}, + * by instantiating it and invoking {@link #config(T)}. + * @param key + * @param cls The subclass of the baseclass. + */ + ConfigDef getConfigDefFromConfigProvidingClass(String key, Class cls) { + if (cls == null) { + throw new ConfigException(key, null, "Not a " + baseClass.getSimpleName()); } - - // validate that the plugin class is a subclass of the base class - final Class cls = (Class) ConfigDef.parseType(key, props.get(key), Type.CLASS); Utils.ensureConcreteSubclass(baseClass, cls); - T plugin; + T pluginInstance; try { - plugin = (T) plugins.newPlugin(pluginClass, pluginVersion, plugins.pluginLoader(connectorClass, connectorVersionRange)); - } catch (VersionedPluginLoadingException e) { - throw e; + pluginInstance = Utils.newInstance(cls, baseClass); } catch (Exception e) { - throw new ConfigException(key, pluginClass, "Error getting config definition from " + baseClass.getSimpleName() + ": " + e.getMessage()); + throw new ConfigException(key, String.valueOf(cls), "Error getting config definition from " + baseClass.getSimpleName() + ": " + e.getMessage()); } - ConfigDef configDef = config(plugin); + ConfigDef configDef = config(pluginInstance); if (null == configDef) { throw new ConnectException( - String.format( - "%s.config() must return a ConfigDef that is not null.", - plugin.getClass().getName() - ) + String.format( + "%s.config() must return a ConfigDef that is not null.", + cls.getName() + ) ); } return configDef; @@ -770,8 +555,6 @@ ConfigDef getConfigDefFromPlugin(String key, String pluginClass, String version, */ protected abstract Set> plugins(); - protected abstract ConfigDef.Recommender versionRecommender(String typeConfig); - /** * Recommend bundled transformations or predicates. */ @@ -779,7 +562,11 @@ final class ClassRecommender implements ConfigDef.Recommender { @Override public List validValues(String name, Map parsedConfig) { - return plugins().stream().map(p -> (Object) p.pluginClass()).toList(); + List result = new ArrayList<>(); + for (PluginDesc plugin : plugins()) { + result.add(plugin.pluginClass()); + } + return Collections.unmodifiableList(result); } @Override @@ -789,19 +576,4 @@ public boolean visible(String name, Map parsedConfig) { } } - private record ConverterDefaults(String type, String version) { - } - - public static class PluginVersionValidator implements ConfigDef.Validator { - - @Override - public void ensureValid(String name, Object value) { - - try { - PluginUtils.connectorVersionRequirement((String) value); - } catch (InvalidVersionSpecificationException e) { - throw new VersionedPluginLoadingException(e.getMessage()); - } - } - } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorStatus.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorStatus.java index d704a3374e296..10ed188cdf883 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorStatus.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ConnectorStatus.java @@ -19,12 +19,8 @@ public class ConnectorStatus extends AbstractStatus { - public ConnectorStatus(String connector, State state, String msg, String workerUrl, int generation, String version) { - super(connector, state, workerUrl, generation, msg, version); - } - - public ConnectorStatus(String connector, State state, String workerUrl, int generation, String version) { - super(connector, state, workerUrl, generation, null, version); + public ConnectorStatus(String connector, State state, String msg, String workerUrl, int generation) { + super(connector, state, workerUrl, generation, msg); } public ConnectorStatus(String connector, State state, String workerUrl, int generation) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java index d6f4ffbd4b9be..d837776be3829 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTask.java @@ -20,7 +20,6 @@ import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; import org.apache.kafka.common.errors.InvalidProducerEpochException; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; @@ -32,7 +31,6 @@ import org.apache.kafka.connect.runtime.errors.ErrorReporter; import org.apache.kafka.connect.runtime.errors.ProcessingContext; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; -import org.apache.kafka.connect.runtime.isolation.LoaderSwap; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceTask; import org.apache.kafka.connect.source.SourceTask.TransactionBoundary; @@ -58,7 +56,6 @@ import java.util.Optional; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; import java.util.function.Supplier; @@ -81,9 +78,9 @@ public ExactlyOnceWorkerSourceTask(ConnectorTaskId id, SourceTask task, TaskStatus.Listener statusListener, TargetState initialState, - Plugin keyConverterPlugin, - Plugin valueConverterPlugin, - Plugin headerConverterPlugin, + Converter keyConverter, + Converter valueConverter, + HeaderConverter headerConverter, TransformationChain transformationChain, Producer producer, TopicAdmin admin, @@ -103,13 +100,11 @@ public ExactlyOnceWorkerSourceTask(ConnectorTaskId id, Executor closeExecutor, Runnable preProducerCheck, Runnable postProducerCheck, - Supplier>> errorReportersSupplier, - TaskPluginsMetadata pluginsMetadata, - Function pluginLoaderSwapper) { - super(id, task, statusListener, initialState, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, - buildTransactionContext(sourceConfig), + Supplier>> errorReportersSupplier) { + super(id, task, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, + new WorkerSourceTaskContext(offsetReader, id, configState, buildTransactionContext(sourceConfig)), producer, admin, topicGroups, offsetReader, offsetWriter, offsetStore, workerConfig, connectMetrics, errorMetrics, - loader, time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier, pluginsMetadata, pluginLoaderSwapper); + loader, time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier); this.transactionOpen = false; this.committableRecords = new LinkedHashMap<>(); @@ -429,22 +424,24 @@ private TransactionBoundaryManager buildTransactionManager( SourceConnectorConfig sourceConfig, WorkerTransactionContext transactionContext) { TransactionBoundary boundary = sourceConfig.transactionBoundary(); - return switch (boundary) { - case POLL -> new TransactionBoundaryManager() { - @Override - protected boolean shouldCommitTransactionForBatch(long currentTimeMs) { - return true; - } + switch (boundary) { + case POLL: + return new TransactionBoundaryManager() { + @Override + protected boolean shouldCommitTransactionForBatch(long currentTimeMs) { + return true; + } - @Override - protected boolean shouldCommitFinalTransaction() { - return true; - } - }; - case INTERVAL -> { + @Override + protected boolean shouldCommitFinalTransaction() { + return true; + } + }; + + case INTERVAL: long transactionBoundaryInterval = Optional.ofNullable(sourceConfig.transactionBoundaryInterval()) .orElse(workerConfig.offsetCommitInterval()); - yield new TransactionBoundaryManager() { + return new TransactionBoundaryManager() { private final long commitInterval = transactionBoundaryInterval; private long lastCommit; @@ -464,14 +461,14 @@ protected boolean shouldCommitTransactionForBatch(long currentTimeMs) { } @Override - protected boolean shouldCommitFinalTransaction() { + protected boolean shouldCommitFinalTransaction() { return true; } }; - } - case CONNECTOR -> { + + case CONNECTOR: Objects.requireNonNull(transactionContext, "Transaction context must be provided when using connector-defined transaction boundaries"); - yield new TransactionBoundaryManager() { + return new TransactionBoundaryManager() { @Override protected boolean shouldCommitFinalTransaction() { return shouldCommitTransactionForBatch(time.milliseconds()); @@ -512,8 +509,9 @@ private void maybeAbortTransaction() { transactionOpen = false; } }; - } - }; + default: + throw new IllegalArgumentException("Unrecognized transaction boundary: " + boundary); + } } TransactionMetricsGroup transactionMetricsGroup() { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java index a1a1505c98320..52be401bbfaba 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Herder.java @@ -32,11 +32,10 @@ import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.ConnectorTaskId; -import org.apache.maven.artifact.versioning.VersionRange; - import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Objects; /** *

          @@ -323,8 +322,6 @@ default void validateConnectorConfig(Map connectorConfig, Callba */ List connectorPluginConfig(String pluginName); - List connectorPluginConfig(String pluginName, VersionRange version); - /** * Get the current offsets for a connector. * @param connName the name of the connector whose offsets are to be retrieved @@ -380,17 +377,40 @@ default void validateConnectorConfig(Map connectorConfig, Callba */ void setClusterLoggerLevel(String namespace, String level); - /** - * Get the ConnectMetrics from the worker for this herder - * @return the ConnectMetrics - */ - ConnectMetrics connectMetrics(); - enum ConfigReloadAction { NONE, RESTART } - record Created(boolean created, T result) { + class Created { + private final boolean created; + private final T result; + + public Created(boolean created, T result) { + this.created = created; + this.result = result; + } + + public boolean created() { + return created; + } + + public T result() { + return result; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + Created created1 = (Created) o; + return Objects.equals(created, created1.created) && + Objects.equals(result, created1.result); + } + + @Override + public int hashCode() { + return Objects.hash(created, result); + } } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/HerderConnectorContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/HerderConnectorContext.java index 8568e91f40088..d0a90064d5bd2 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/HerderConnectorContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/HerderConnectorContext.java @@ -16,9 +16,6 @@ */ package org.apache.kafka.connect.runtime; -import org.apache.kafka.common.metrics.PluginMetrics; -import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; -import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.errors.ConnectException; import org.slf4j.Logger; @@ -33,13 +30,11 @@ public class HerderConnectorContext implements CloseableConnectorContext { private final AbstractHerder herder; private final String connectorName; - private final PluginMetricsImpl pluginMetrics; private volatile boolean closed; - public HerderConnectorContext(AbstractHerder herder, String connectorName, PluginMetricsImpl pluginMetrics) { + public HerderConnectorContext(AbstractHerder herder, String connectorName) { this.herder = herder; this.connectorName = connectorName; - this.pluginMetrics = pluginMetrics; this.closed = false; } @@ -68,14 +63,8 @@ public void raiseError(Exception e) { herder.onFailure(connectorName, e); } - @Override - public PluginMetrics pluginMetrics() { - return pluginMetrics; - } - @Override public void close() { - Utils.closeQuietly(pluginMetrics, "Plugin metrics for " + connectorName); closed = true; } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java index 1a79698ae9f56..1593e3708fdf0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Loggers.java @@ -34,7 +34,6 @@ import java.util.Map; import java.util.Objects; import java.util.TreeMap; -import java.util.concurrent.ConcurrentHashMap; import java.util.stream.Collectors; /** @@ -44,7 +43,7 @@ * This class is thread-safe; concurrent calls to all of its public methods from any number * of threads are permitted. */ -public abstract class Loggers { +public class Loggers { private static final Logger log = LoggerFactory.getLogger(Loggers.class); @@ -53,277 +52,184 @@ public abstract class Loggers { /** * Log4j uses "root" (case-insensitive) as name of the root logger. * Note: In log4j, the root logger's name was "root" and Kafka also followed that name for dynamic logging control feature. - *

          + * * While log4j2 changed the root logger's name to empty string (see: [[LogManager.ROOT_LOGGER_NAME]]), * for backward-compatibility purposes, we accept both empty string and "root" as valid root logger names. * This is why we have a dedicated definition that includes both values. - *

          */ private static final List VALID_ROOT_LOGGER_NAMES = List.of(LogManager.ROOT_LOGGER_NAME, ROOT_LOGGER_NAME); - final Time time; + private final Time time; /** * Maps logger names to their last modification timestamps. * Note: The logger name "root" refers to the actual root logger of log4j2. */ - final Map lastModifiedTimes; + private final Map lastModifiedTimes; - /** - * Creates a {@link Loggers} instance appropriate for the current environment. - * - * @param time A time source. - * @return A new {@link Loggers} instance, never {@link null}. - */ - public static Loggers newInstance(Time time) { - Objects.requireNonNull(time); - try { - return new Log4jLoggers(time); - } catch (ClassCastException | LinkageError e) { - log.info("No supported logging implementation found. Logging configuration endpoint will be disabled."); - return new NoOpLoggers(time); - } catch (Exception e) { - log.warn("A problem occurred, while initializing the logging controller. Logging configuration endpoint will be disabled.", e); - return new NoOpLoggers(time); - } - } - - private Loggers(Time time) { + public Loggers(Time time) { this.time = time; - this.lastModifiedTimes = new ConcurrentHashMap<>(); + this.lastModifiedTimes = new HashMap<>(); } /** * Retrieve the current level for a single logger. - * - * @param loggerName the name of the logger to retrieve the level for; may not be null + * @param logger the name of the logger to retrieve the level for; may not be null * @return the current level (falling back on the effective level if necessary) of the logger, * or null if no logger with the specified name exists */ - public abstract LoggerLevel level(String loggerName); + public synchronized LoggerLevel level(String logger) { + Objects.requireNonNull(logger, "Logger may not be null"); + + org.apache.logging.log4j.Logger foundLogger = null; + if (isValidRootLoggerName(logger)) { + foundLogger = rootLogger(); + } else { + var currentLoggers = currentLoggers().values(); + // search within existing loggers for the given name. + // using LogManger.getLogger() will create a logger if it doesn't exist + // (potential leak since these don't get cleaned up). + for (org.apache.logging.log4j.Logger currentLogger : currentLoggers) { + if (logger.equals(currentLogger.getName())) { + foundLogger = currentLogger; + break; + } + } + } + + if (foundLogger == null) { + log.warn("Unable to find level for logger {}", logger); + return null; + } + + return loggerLevel(foundLogger); + } /** * Retrieve the current levels of all known loggers - * * @return the levels of all known loggers; may be empty, but never null */ - public abstract Map allLevels(); + public synchronized Map allLevels() { + return currentLoggers() + .values() + .stream() + .filter(logger -> !logger.getLevel().equals(Level.OFF)) + .collect(Collectors.toMap( + this::getLoggerName, + this::loggerLevel, + (existing, replacing) -> replacing, + TreeMap::new) + ); + } /** * Set the level for the specified logger and all of its children - * * @param namespace the name of the logger to adjust along with its children; may not be null - * @param level the level to set for the logger and its children; may not be null + * @param level the level to set for the logger and its children; may not be null * @return all loggers that were affected by this action, sorted by their natural ordering; * may be empty, but never null */ - public abstract List setLevel(String namespace, String level); - - public abstract boolean isValidLevel(String level); - - static class Log4jLoggers extends Loggers { - - // package-private for testing - final LoggerContext loggerContext; - - // Package-private for testing - Log4jLoggers(Time time) { - super(time); - loggerContext = (LoggerContext) LogManager.getContext(false); - } - - @Override - public LoggerLevel level(String logger) { - Objects.requireNonNull(logger, "Logger may not be null"); - - org.apache.logging.log4j.Logger foundLogger = null; - if (isValidRootLoggerName(logger)) { - foundLogger = rootLogger(); - } else { - var currentLoggers = currentLoggers().values(); - // search within existing loggers for the given name. - // using LogManger.getLogger() will create a logger if it doesn't exist - // (potential leak since these don't get cleaned up). - for (org.apache.logging.log4j.Logger currentLogger : currentLoggers) { - if (logger.equals(currentLogger.getName())) { - foundLogger = currentLogger; - break; - } - } + public synchronized List setLevel(String namespace, Level level) { + Objects.requireNonNull(namespace, "Logging namespace may not be null"); + Objects.requireNonNull(level, "Level may not be null"); + String internalNameSpace = isValidRootLoggerName(namespace) ? LogManager.ROOT_LOGGER_NAME : namespace; + + log.info("Setting level of namespace {} and children to {}", internalNameSpace, level); + + var loggers = loggers(internalNameSpace); + var nameToLevel = allLevels(); + + List result = new ArrayList<>(); + Configurator.setAllLevels(internalNameSpace, level); + for (org.apache.logging.log4j.Logger logger : loggers) { + // We need to track level changes for each logger and record their update timestamps to ensure this method + // correctly returns only the loggers whose levels were actually modified. + String name = getLoggerName(logger); + String newLevel = logger.getLevel().name(); + String oldLevel = nameToLevel.getOrDefault(name, new LoggerLevel("", time.milliseconds())).level(); + if (!newLevel.equalsIgnoreCase(oldLevel)) { + lastModifiedTimes.put(name, time.milliseconds()); + result.add(name); } - - if (foundLogger == null) { - log.warn("Unable to find level for logger {}", logger); - return null; - } - - return loggerLevel(foundLogger); - } - - @Override - public Map allLevels() { - return currentLoggers() - .values() - .stream() - .filter(logger -> !logger.getLevel().equals(Level.OFF)) - .collect(Collectors.toMap( - this::getLoggerName, - this::loggerLevel, - (existing, replacing) -> replacing, - TreeMap::new) - ); } + Collections.sort(result); - @Override - public List setLevel(String namespace, String level) { - Objects.requireNonNull(namespace, "Logging namespace may not be null"); - Objects.requireNonNull(level, "Level may not be null"); - String internalNameSpace = isValidRootLoggerName(namespace) ? LogManager.ROOT_LOGGER_NAME : namespace; - - log.info("Setting level of namespace {} and children to {}", internalNameSpace, level); - - var loggers = loggers(internalNameSpace); - var nameToLevel = allLevels(); - - List result = new ArrayList<>(); - Configurator.setAllLevels(internalNameSpace, Level.valueOf(level)); - for (org.apache.logging.log4j.Logger logger : loggers) { - // We need to track level changes for each logger and record their update timestamps to ensure this method - // correctly returns only the loggers whose levels were actually modified. - String name = getLoggerName(logger); - String newLevel = logger.getLevel().name(); - String oldLevel = nameToLevel.getOrDefault(name, new LoggerLevel("", time.milliseconds())).level(); - if (!newLevel.equalsIgnoreCase(oldLevel)) { - lastModifiedTimes.put(name, time.milliseconds()); - result.add(name); - } - } - Collections.sort(result); + return result; + } - return result; - } + /** + * Retrieve all known loggers within a given namespace, creating an ancestor logger for that + * namespace if one does not already exist + * @param namespace the namespace that the loggers should fall under; may not be null + * @return all loggers that fall under the given namespace; never null, and will always contain + * at least one logger (the ancestor logger for the namespace) + */ + private synchronized Collection loggers(String namespace) { + Objects.requireNonNull(namespace, "Logging namespace may not be null"); - @Override - public boolean isValidLevel(String level) { - return !level.isEmpty() && Level.getLevel(level) != null; + if (isValidRootLoggerName(namespace)) { + return currentLoggers().values(); } - /** - * Retrieve all known loggers within a given namespace, creating an ancestor logger for that - * namespace if one does not already exist - * - * @param namespace the namespace that the loggers should fall under; may not be null - * @return all loggers that fall under the given namespace; never null, and will always contain - * at least one logger (the ancestor logger for the namespace) - */ - private Collection loggers(String namespace) { - Objects.requireNonNull(namespace, "Logging namespace may not be null"); - - if (isValidRootLoggerName(namespace)) { - return currentLoggers().values(); - } + var result = new ArrayList(); + var nameToLogger = currentLoggers(); + var ancestorLogger = lookupLogger(namespace); + var currentLoggers = nameToLogger.values(); - var result = new ArrayList(); - var nameToLogger = currentLoggers(); - var ancestorLogger = lookupLogger(namespace); - var currentLoggers = nameToLogger.values(); - - boolean present = false; - for (org.apache.logging.log4j.core.Logger currentLogger : currentLoggers) { - if (currentLogger.getName().startsWith(namespace)) { - result.add(currentLogger); - } - if (namespace.equals(currentLogger.getName())) { - present = true; - } + boolean present = false; + for (org.apache.logging.log4j.Logger currentLogger : currentLoggers) { + if (currentLogger.getName().startsWith(namespace)) { + result.add(currentLogger); } - - if (!present) { - result.add(ancestorLogger); + if (namespace.equals(currentLogger.getName())) { + present = true; } - - return result; } - // visible for testing - org.apache.logging.log4j.core.Logger lookupLogger(String logger) { - return loggerContext.getLogger(isValidRootLoggerName(logger) ? LogManager.ROOT_LOGGER_NAME : logger); - } - - // visible for testing - Map currentLoggers() { - LoggerContext context = (LoggerContext) LogManager.getContext(false); - var results = new HashMap(); - context.getConfiguration().getLoggers().forEach((name, logger) -> results.put(name, loggerContext.getLogger(name))); - context.getLoggers().forEach(logger -> results.put(logger.getName(), logger)); - return results; - } - - // visible for testing - org.apache.logging.log4j.Logger rootLogger() { - return LogManager.getRootLogger(); - } - - private LoggerLevel loggerLevel(org.apache.logging.log4j.Logger logger) { - Long lastModified = lastModifiedTimes.get(getLoggerName(logger)); - return new LoggerLevel(Objects.toString(logger.getLevel()), lastModified); - } - - private boolean isValidRootLoggerName(String namespace) { - return VALID_ROOT_LOGGER_NAMES.stream() - .anyMatch(rootLoggerNames -> rootLoggerNames.equalsIgnoreCase(namespace)); - } - - /** - * Converts logger name to ensure backward compatibility between Log4j 1 and Log4j 2. - * If the logger name is empty (Log4j 2 root logger representation), converts it to "root" (Log4j 1 style). - * Otherwise, returns the original logger name. - * - * @param loggerName The name of the logger. - * @return The logger name - returns "root" for empty string, otherwise returns the original logger name - */ - private String getLoggerName(String loggerName) { - return loggerName.equals(LogManager.ROOT_LOGGER_NAME) ? ROOT_LOGGER_NAME : loggerName; - } - - /** - * Converts logger name to ensure backward compatibility between Log4j 1 and Log4j 2. - * If the logger name is empty (Log4j 2 root logger representation), converts it to "root" (Log4j 1 style). - * Otherwise, returns the original logger name. - * - * @param logger The logger instance to get the name from - * @return The logger name - returns "root" for empty string, otherwise returns the original logger name - */ - private String getLoggerName(org.apache.logging.log4j.Logger logger) { - return getLoggerName(logger.getName()); + if (!present) { + result.add(ancestorLogger); } + return result; } - private static class NoOpLoggers extends Loggers { + // visible for testing + org.apache.logging.log4j.Logger lookupLogger(String logger) { + return LogManager.getLogger(isValidRootLoggerName(logger) ? LogManager.ROOT_LOGGER_NAME : logger); + } - private NoOpLoggers(Time time) { - super(time); - } + Map currentLoggers() { + LoggerContext context = (LoggerContext) LogManager.getContext(false); + var results = new HashMap(); + context.getConfiguration().getLoggers().forEach((name, logger) -> results.put(name, LogManager.getLogger(name))); + context.getLoggerRegistry().getLoggers().forEach(logger -> results.put(logger.getName(), logger)); + return results; + } - @Override - public LoggerLevel level(String loggerName) { - return new LoggerLevel("OFF", 0L); - } + // visible for testing + org.apache.logging.log4j.Logger rootLogger() { + return LogManager.getRootLogger(); + } - @Override - public Map allLevels() { - return Map.of(); - } + private LoggerLevel loggerLevel(org.apache.logging.log4j.Logger logger) { + Long lastModified = lastModifiedTimes.get(getLoggerName(logger)); + return new LoggerLevel(Objects.toString(logger.getLevel()), lastModified); + } - @Override - public List setLevel(String loggerName, String level) { - return List.of(); - } + private boolean isValidRootLoggerName(String namespace) { + return VALID_ROOT_LOGGER_NAMES.stream() + .anyMatch(rootLoggerNames -> rootLoggerNames.equalsIgnoreCase(namespace)); + } - @Override - public boolean isValidLevel(String level) { - return "OFF".equals(level); - } + /** + * Converts logger name to ensure backward compatibility between log4j and log4j2. + * If the logger name is empty (log4j2's root logger representation), converts it to "root" (log4j's style). + * Otherwise, returns the original logger name. + * + * @param logger The logger instance to get the name from + * @return The logger name - returns "root" for empty string, otherwise returns the original logger name + */ + private String getLoggerName(org.apache.logging.log4j.Logger logger) { + return logger.getName().equals(LogManager.ROOT_LOGGER_NAME) ? ROOT_LOGGER_NAME : logger.getName(); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/RestartPlan.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/RestartPlan.java index c263ae3d36f4c..0dcfd4d03e54f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/RestartPlan.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/RestartPlan.java @@ -22,7 +22,9 @@ import org.apache.kafka.connect.util.ConnectorTaskId; import java.util.Collection; +import java.util.Collections; import java.util.Objects; +import java.util.stream.Collectors; /** * An immutable restart plan per connector. @@ -43,10 +45,13 @@ public RestartPlan(RestartRequest request, ConnectorStateInfo restartStateInfo) this.request = Objects.requireNonNull(request, "RestartRequest name may not be null"); this.stateInfo = Objects.requireNonNull(restartStateInfo, "ConnectorStateInfo name may not be null"); // Collect the task IDs to stop and restart (may be none) - this.idsToRestart = stateInfo.tasks() - .stream() - .filter(this::isRestarting) - .map(taskState -> new ConnectorTaskId(request.connectorName(), taskState.id())).toList(); + this.idsToRestart = Collections.unmodifiableList( + stateInfo.tasks() + .stream() + .filter(this::isRestarting) + .map(taskState -> new ConnectorTaskId(request.connectorName(), taskState.id())) + .collect(Collectors.toList()) + ); } /** diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/RestartRequest.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/RestartRequest.java index d68caf787a747..8055004e4cfef 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/RestartRequest.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/RestartRequest.java @@ -17,6 +17,7 @@ package org.apache.kafka.connect.runtime; import org.apache.kafka.connect.connector.Connector; +import org.apache.kafka.connect.connector.Task; import java.util.Objects; @@ -24,18 +25,53 @@ * A request to restart a connector and/or task instances. *

          * The natural order is based first upon the connector name and then requested restart behaviors. - * If two requests have the same connector name, then the requests are ordered based on the + * If two requests have the same connector name, then the requests are ordered based on the * probable number of tasks/connector this request is going to restart. - * @param connectorName the name of the connector; may not be null - * @param onlyFailed true if only failed instances should be restarted - * @param includeTasks true if tasks should be restarted, or false if only the connector should be restarted */ -public record RestartRequest(String connectorName, - boolean onlyFailed, - boolean includeTasks) implements Comparable { +public class RestartRequest implements Comparable { - public RestartRequest { - Objects.requireNonNull(connectorName, "Connector name may not be null"); + private final String connectorName; + private final boolean onlyFailed; + private final boolean includeTasks; + + /** + * Create a new request to restart a connector and optionally its tasks. + * + * @param connectorName the name of the connector; may not be null + * @param onlyFailed true if only failed instances should be restarted + * @param includeTasks true if tasks should be restarted, or false if only the connector should be restarted + */ + public RestartRequest(String connectorName, boolean onlyFailed, boolean includeTasks) { + this.connectorName = Objects.requireNonNull(connectorName, "Connector name may not be null"); + this.onlyFailed = onlyFailed; + this.includeTasks = includeTasks; + } + + /** + * Get the name of the connector. + * + * @return the connector name; never null + */ + public String connectorName() { + return connectorName; + } + + /** + * Determine whether only failed instances be restarted. + * + * @return true if only failed instances should be restarted, or false if all applicable instances should be restarted + */ + public boolean onlyFailed() { + return onlyFailed; + } + + /** + * Determine whether {@link Task} instances should also be restarted in addition to the {@link Connector} instance. + * + * @return true if the connector and task instances should be restarted, or false if just the connector should be restarted + */ + public boolean includeTasks() { + return includeTasks; } /** @@ -72,7 +108,6 @@ public int compareTo(RestartRequest o) { int result = connectorName.compareTo(o.connectorName); return result == 0 ? impactRank() - o.impactRank() : result; } - //calculates an internal rank for the restart request based on the probable number of tasks/connector this request is going to restart private int impactRank() { if (onlyFailed && !includeTasks) { //restarts only failed connector so least impactful @@ -86,6 +121,23 @@ private int impactRank() { return 3; } + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + RestartRequest that = (RestartRequest) o; + return onlyFailed == that.onlyFailed && includeTasks == that.includeTasks && Objects.equals(connectorName, that.connectorName); + } + + @Override + public int hashCode() { + return Objects.hash(connectorName, onlyFailed, includeTasks); + } + @Override public String toString() { return "restart request for {" + "connectorName='" + connectorName + "', onlyFailed=" + onlyFailed + ", includeTasks=" + includeTasks + '}'; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SessionKey.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SessionKey.java index 2403c7e653708..6b6facd0359bc 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SessionKey.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SessionKey.java @@ -22,12 +22,53 @@ /** * A session key, which can be used to validate internal REST requests between workers. - * @param key the actual cryptographic key to use for request validation; may not be null - * @param creationTimestamp the time at which the key was generated */ -public record SessionKey(SecretKey key, long creationTimestamp) { +public class SessionKey { - public SessionKey { - Objects.requireNonNull(key, "Key may not be null"); + private final SecretKey key; + private final long creationTimestamp; + + /** + * Create a new session key with the given key value and creation timestamp + * @param key the actual cryptographic key to use for request validation; may not be null + * @param creationTimestamp the time at which the key was generated + */ + public SessionKey(SecretKey key, long creationTimestamp) { + this.key = Objects.requireNonNull(key, "Key may not be null"); + this.creationTimestamp = creationTimestamp; + } + + /** + * Get the cryptographic key to use for request validation. + * + * @return the cryptographic key; may not be null + */ + public SecretKey key() { + return key; + } + + /** + * Get the time at which the key was generated. + * + * @return the time at which the key was generated + */ + public long creationTimestamp() { + return creationTimestamp; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + SessionKey that = (SessionKey) o; + return creationTimestamp == that.creationTimestamp + && key.equals(that.key); + } + + @Override + public int hashCode() { + return Objects.hash(key, creationTimestamp); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java index 43da717a78cf4..2ab7dfa089763 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SinkConnectorConfig.java @@ -27,6 +27,7 @@ import org.apache.kafka.connect.transforms.util.RegexValidator; import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -41,7 +42,7 @@ public class SinkConnectorConfig extends ConnectorConfig { public static final String TOPICS_CONFIG = SinkTask.TOPICS_CONFIG; private static final String TOPICS_DOC = "List of topics to consume, separated by commas"; - public static final List TOPICS_DEFAULT = List.of(); + public static final String TOPICS_DEFAULT = ""; private static final String TOPICS_DISPLAY = "Topics"; public static final String TOPICS_REGEX_CONFIG = SinkTask.TOPICS_REGEX_CONFIG; @@ -72,29 +73,19 @@ public class SinkConnectorConfig extends ConnectorConfig { "keys, all error context header keys will start with __connect.errors."; private static final String DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY = "Enable Error Context Headers"; - private static ConfigDef configDef(ConfigDef baseConfigs) { - return baseConfigs - .define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.ValidList.anyNonDuplicateValues(true, false), ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY) - .define(TOPICS_REGEX_CONFIG, ConfigDef.Type.STRING, TOPICS_REGEX_DEFAULT, new RegexValidator(), ConfigDef.Importance.HIGH, TOPICS_REGEX_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_REGEX_DISPLAY) - .define(DLQ_TOPIC_NAME_CONFIG, ConfigDef.Type.STRING, DLQ_TOPIC_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_NAME_DOC, ERROR_GROUP, 6, ConfigDef.Width.MEDIUM, DLQ_TOPIC_DISPLAY) - .define(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC, ERROR_GROUP, 7, ConfigDef.Width.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY) - .define(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, DLQ_CONTEXT_HEADERS_ENABLE_DEFAULT, Importance.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DOC, ERROR_GROUP, 8, ConfigDef.Width.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY); - } + static final ConfigDef CONFIG = ConnectorConfig.configDef() + .define(TOPICS_CONFIG, ConfigDef.Type.LIST, TOPICS_DEFAULT, ConfigDef.Importance.HIGH, TOPICS_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_DISPLAY) + .define(TOPICS_REGEX_CONFIG, ConfigDef.Type.STRING, TOPICS_REGEX_DEFAULT, new RegexValidator(), ConfigDef.Importance.HIGH, TOPICS_REGEX_DOC, COMMON_GROUP, 4, ConfigDef.Width.LONG, TOPICS_REGEX_DISPLAY) + .define(DLQ_TOPIC_NAME_CONFIG, ConfigDef.Type.STRING, DLQ_TOPIC_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_NAME_DOC, ERROR_GROUP, 6, ConfigDef.Width.MEDIUM, DLQ_TOPIC_DISPLAY) + .define(DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, ConfigDef.Type.SHORT, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DEFAULT, Importance.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DOC, ERROR_GROUP, 7, ConfigDef.Width.MEDIUM, DLQ_TOPIC_REPLICATION_FACTOR_CONFIG_DISPLAY) + .define(DLQ_CONTEXT_HEADERS_ENABLE_CONFIG, ConfigDef.Type.BOOLEAN, DLQ_CONTEXT_HEADERS_ENABLE_DEFAULT, Importance.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DOC, ERROR_GROUP, 8, ConfigDef.Width.MEDIUM, DLQ_CONTEXT_HEADERS_ENABLE_DISPLAY); public static ConfigDef configDef() { - return configDef(ConnectorConfig.configDef()); - } - - public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { - return configDef(ConnectorConfig.enrichedConfigDef(plugins, connProps, workerConfig)); - } - - public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { - return configDef(ConnectorConfig.enrichedConfigDef(plugins, connectorClass)); + return CONFIG; } public SinkConnectorConfig(Plugins plugins, Map props) { - super(plugins, configDef(), props); + super(plugins, CONFIG, props); } /** @@ -168,7 +159,7 @@ public static void validate(Map props, Map private static void addErrorMessage(Map validatedConfig, String name, String value, String errorMessage) { validatedConfig.computeIfAbsent( name, - p -> new ConfigValue(name, value, List.of(), new ArrayList<>()) + p -> new ConfigValue(name, value, Collections.emptyList(), new ArrayList<>()) ).addErrorMessage( errorMessage ); @@ -188,7 +179,7 @@ public static boolean hasDlqTopicConfig(Map props) { public static List parseTopicsList(Map props) { List topics = (List) ConfigDef.parseType(TOPICS_CONFIG, props.get(TOPICS_CONFIG), Type.LIST); if (topics == null) { - return List.of(); + return Collections.emptyList(); } return topics .stream() @@ -215,6 +206,6 @@ public boolean enableErrantRecordReporter() { } public static void main(String[] args) { - System.out.println(configDef().toHtml(4, config -> "sinkconnectorconfigs_" + config)); + System.out.println(CONFIG.toHtml(4, config -> "sinkconnectorconfigs_" + config)); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java index e9913e81f4c72..bc797563b10dd 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SourceConnectorConfig.java @@ -26,12 +26,12 @@ import org.slf4j.LoggerFactory; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Locale; import java.util.Map; -import java.util.Set; import java.util.stream.Collectors; import static org.apache.kafka.common.utils.Utils.enumOptions; @@ -125,14 +125,14 @@ private static class EnrichedSourceConnectorConfig extends ConnectorConfig { private final EnrichedSourceConnectorConfig enrichedSourceConfig; private final String offsetsTopic; - private static ConfigDef configDef(ConfigDef baseConfigDef) { + public static ConfigDef configDef() { ConfigDef.Validator atLeastZero = ConfigDef.Range.atLeast(0); int orderInGroup = 0; - return new ConfigDef(baseConfigDef) + return new ConfigDef(ConnectorConfig.configDef()) .define( TOPIC_CREATION_GROUPS_CONFIG, ConfigDef.Type.LIST, - List.of(), + Collections.emptyList(), ConfigDef.CompositeValidator.of( new ConfigDef.NonNullValidator(), ConfigDef.LambdaValidator.with( @@ -203,18 +203,6 @@ private static ConfigDef configDef(ConfigDef baseConfigDef) { OFFSETS_TOPIC_DISPLAY); } - public static ConfigDef configDef() { - return configDef(ConnectorConfig.configDef()); - } - - public static ConfigDef enrichedConfigDef(Plugins plugins, Map connProps, WorkerConfig workerConfig) { - return configDef(ConnectorConfig.enrichedConfigDef(plugins, connProps, workerConfig)); - } - - public static ConfigDef enrichedConfigDef(Plugins plugins, String connectorClass) { - return configDef(ConnectorConfig.enrichedConfigDef(plugins, connectorClass)); - } - public static ConfigDef embedDefaultGroup(ConfigDef baseConfigDef) { String defaultGroup = "default"; ConfigDef newDefaultDef = new ConfigDef(baseConfigDef); @@ -240,7 +228,7 @@ public static ConfigDef enrich(ConfigDef baseConfigDef, Map prop if (topicCreationGroups.contains(DEFAULT_TOPIC_CREATION_GROUP)) { log.warn("'{}' topic creation group always exists and does not need to be listed explicitly", DEFAULT_TOPIC_CREATION_GROUP); - topicCreationGroups.removeAll(Set.of(DEFAULT_TOPIC_CREATION_GROUP)); + topicCreationGroups.removeAll(Collections.singleton(DEFAULT_TOPIC_CREATION_GROUP)); } ConfigDef newDef = new ConfigDef(baseConfigDef); @@ -248,9 +236,10 @@ public static ConfigDef enrich(ConfigDef baseConfigDef, Map prop short defaultGroupReplicationFactor = defaultGroupConfig.getShort(defaultGroupPrefix + REPLICATION_FACTOR_CONFIG); int defaultGroupPartitions = defaultGroupConfig.getInt(defaultGroupPrefix + PARTITIONS_CONFIG); topicCreationGroups.stream().distinct().forEach(group -> { - if (!(group instanceof String alias)) { + if (!(group instanceof String)) { throw new ConfigException("Item in " + TOPIC_CREATION_GROUPS_CONFIG + " property is not of type String"); } + String alias = (String) group; String prefix = TOPIC_CREATION_PREFIX + alias + "."; String configGroup = TOPIC_CREATION_GROUP + ": " + alias; newDef.embed(prefix, configGroup, 0, @@ -332,7 +321,7 @@ public Integer topicCreationPartitions(String group) { public Map topicCreationOtherConfigs(String group) { if (enrichedSourceConfig == null) { - return Map.of(); + return Collections.emptyMap(); } return enrichedSourceConfig.originalsWithPrefix(TOPIC_CREATION_PREFIX + group + '.').entrySet().stream() .filter(e -> { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SubmittedRecords.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SubmittedRecords.java index 86cec8080db3b..458413e372dbf 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SubmittedRecords.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/SubmittedRecords.java @@ -227,33 +227,81 @@ private Map offset() { /** * Contains a snapshot of offsets that can be committed for a source task and metadata for that offset commit * (such as the number of messages for which offsets can and cannot be committed). - * @param offsets the offsets that can be committed at the time of the snapshot - * @param numCommittableMessages the number of committable messages at the time of the snapshot, where a - * committable message is both acknowledged and not preceded by any unacknowledged - * messages in the deque for its source partition - * @param numUncommittableMessages the number of uncommittable messages at the time of the snapshot, where an - * uncommittable message is either unacknowledged, or preceded in the deque for its - * source partition by an unacknowledged message - * @param numDeques the number of non-empty deques tracking uncommittable messages at the time of the snapshot - * @param largestDequeSize the size of the largest deque at the time of the snapshot - * @param largestDequePartition the applicable partition, which may be null, or null if there are no uncommitted - * messages; it is the caller's responsibility to distinguish between these two cases - * via {@link #hasPending()} */ - record CommittableOffsets(Map, Map> offsets, - int numCommittableMessages, - int numUncommittableMessages, - int numDeques, - int largestDequeSize, - Map largestDequePartition) { + static class CommittableOffsets { /** * An "empty" snapshot that contains no offsets to commit and whose metadata contains no committable or uncommitable messages. */ - public static final CommittableOffsets EMPTY = new CommittableOffsets(Map.of(), 0, 0, 0, 0, null); + public static final CommittableOffsets EMPTY = new CommittableOffsets(Collections.emptyMap(), 0, 0, 0, 0, null); - CommittableOffsets { - offsets = Collections.unmodifiableMap(offsets); + private final Map, Map> offsets; + private final int numCommittableMessages; + private final int numUncommittableMessages; + private final int numDeques; + private final int largestDequeSize; + private final Map largestDequePartition; + + CommittableOffsets( + Map, Map> offsets, + int numCommittableMessages, + int numUncommittableMessages, + int numDeques, + int largestDequeSize, + Map largestDequePartition + ) { + this.offsets = offsets != null ? new HashMap<>(offsets) : Collections.emptyMap(); + this.numCommittableMessages = numCommittableMessages; + this.numUncommittableMessages = numUncommittableMessages; + this.numDeques = numDeques; + this.largestDequeSize = largestDequeSize; + this.largestDequePartition = largestDequePartition; + } + + /** + * @return the offsets that can be committed at the time of the snapshot + */ + public Map, Map> offsets() { + return Collections.unmodifiableMap(offsets); + } + + /** + * @return the number of committable messages at the time of the snapshot, where a committable message is both + * acknowledged and not preceded by any unacknowledged messages in the deque for its source partition + */ + public int numCommittableMessages() { + return numCommittableMessages; + } + + /** + * @return the number of uncommittable messages at the time of the snapshot, where an uncommittable message + * is either unacknowledged, or preceded in the deque for its source partition by an unacknowledged message + */ + public int numUncommittableMessages() { + return numUncommittableMessages; + } + + /** + * @return the number of non-empty deques tracking uncommittable messages at the time of the snapshot + */ + public int numDeques() { + return numDeques; + } + + /** + * @return the size of the largest deque at the time of the snapshot + */ + public int largestDequeSize() { + return largestDequeSize; + } + + /** + * Get the partition for the deque with the most uncommitted messages at the time of the snapshot. + * @return the applicable partition, which may be null, or null if there are no uncommitted messages; + * it is the caller's responsibility to distinguish between these two cases via {@link #hasPending()} + */ + public Map largestDequePartition() { + return largestDequePartition; } /** @@ -275,7 +323,6 @@ public boolean isEmpty() { * Offsets are combined (giving precedence to the newer snapshot in case of conflict), the total number of * committable messages is summed across the two snapshots, and the newer snapshot's information on pending * messages (num deques, largest deque size, etc.) is used. - * * @param newerOffsets the newer snapshot to combine with this snapshot * @return the new offset snapshot containing information from this snapshot and the newer snapshot; never null */ diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskStatus.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskStatus.java index 45150ef7ef5a3..e35efcafe2e91 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskStatus.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TaskStatus.java @@ -20,8 +20,8 @@ public class TaskStatus extends AbstractStatus { - public TaskStatus(ConnectorTaskId id, State state, String workerUrl, int generation, String trace, String version) { - super(id, state, workerUrl, generation, trace, version); + public TaskStatus(ConnectorTaskId id, State state, String workerUrl, int generation, String trace) { + super(id, state, workerUrl, generation, trace); } public TaskStatus(ConnectorTaskId id, State state, String workerUrl, int generation) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicCreationConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicCreationConfig.java index 4339fd6f2364a..11c2ba9d37425 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicCreationConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicCreationConfig.java @@ -20,6 +20,7 @@ import org.apache.kafka.common.config.ConfigException; import org.apache.kafka.connect.util.TopicAdmin; +import java.util.Collections; import java.util.List; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; @@ -101,11 +102,11 @@ public static ConfigDef configDef(String group, short defaultReplicationFactor, int orderInGroup = 0; ConfigDef configDef = new ConfigDef(); configDef - .define(INCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, List.of(), + .define(INCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, Collections.emptyList(), REGEX_VALIDATOR, ConfigDef.Importance.LOW, INCLUDE_REGEX_DOC, group, ++orderInGroup, ConfigDef.Width.LONG, "Inclusion Topic Pattern for " + group) - .define(EXCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, List.of(), + .define(EXCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, Collections.emptyList(), REGEX_VALIDATOR, ConfigDef.Importance.LOW, EXCLUDE_REGEX_DOC, group, ++orderInGroup, ConfigDef.Width.LONG, "Exclusion Topic Pattern for " + group) @@ -128,7 +129,7 @@ public static ConfigDef defaultGroupConfigDef() { new ConfigDef.NonNullValidator(), ConfigDef.Importance.LOW, INCLUDE_REGEX_DOC, DEFAULT_TOPIC_CREATION_GROUP, ++orderInGroup, ConfigDef.Width.LONG, "Inclusion Topic Pattern for " + DEFAULT_TOPIC_CREATION_GROUP) - .define(EXCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, List.of(), + .define(EXCLUDE_REGEX_CONFIG, ConfigDef.Type.LIST, Collections.emptyList(), new ConfigDef.NonNullValidator(), ConfigDef.Importance.LOW, EXCLUDE_REGEX_DOC, DEFAULT_TOPIC_CREATION_GROUP, ++orderInGroup, ConfigDef.Width.LONG, "Exclusion Topic Pattern for " + DEFAULT_TOPIC_CREATION_GROUP) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicStatus.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicStatus.java index 3175f6dcd4023..16dcd80b43f84 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicStatus.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TopicStatus.java @@ -93,9 +93,10 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof TopicStatus that)) { + if (!(o instanceof TopicStatus)) { return false; } + TopicStatus that = (TopicStatus) o; return task == that.task && discoverTimestamp == that.discoverTimestamp && topic.equals(that.topic) && diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationChain.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationChain.java index 68d52f2c1ca3d..f6b92697c443b 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationChain.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationChain.java @@ -28,7 +28,6 @@ import java.util.List; import java.util.Objects; import java.util.StringJoiner; -import java.util.stream.Collectors; /** * Represents a chain of {@link Transformation}s to be applied to a {@link ConnectRecord} serially. @@ -90,8 +89,4 @@ public String toString() { } return chain.toString(); } - - public List transformationChainInfo() { - return transformationStages.stream().map(TransformationStage::transformationStageInfo).collect(Collectors.toList()); - } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java index 56293e0363206..3831730ad8f5c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/TransformationStage.java @@ -17,16 +17,11 @@ package org.apache.kafka.connect.runtime; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.connector.ConnectRecord; -import org.apache.kafka.connect.runtime.isolation.LoaderSwap; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; -import java.util.Objects; -import java.util.function.Function; - /** * Wrapper for a {@link Transformation} and corresponding optional {@link Predicate} * which applies the transformation when the {@link Predicate} is true (or false, according to {@code negate}). @@ -37,107 +32,45 @@ public class TransformationStage> implements AutoClos static final String PREDICATE_CONFIG = "predicate"; static final String NEGATE_CONFIG = "negate"; - private final Plugin> predicatePlugin; - private final Plugin> transformationPlugin; + private final Predicate predicate; + private final Transformation transformation; private final boolean negate; - private final String transformAlias; - private final String predicateAlias; - private final String transformVersion; - private final String predicateVersion; - private final Function pluginLoaderSwapper; - - TransformationStage( - Plugin> transformationPlugin, - String transformAlias, - String transformVersion, - Function pluginLoaderSwapper - ) { - this(null, null, null, false, transformationPlugin, transformAlias, transformVersion, pluginLoaderSwapper); + TransformationStage(Transformation transformation) { + this(null, false, transformation); } - TransformationStage( - Plugin> predicatePlugin, - String predicateAlias, - String predicateVersion, - boolean negate, - Plugin> transformationPlugin, - String transformAlias, - String transformVersion, - Function pluginLoaderSwapper - ) { - this.predicatePlugin = predicatePlugin; + TransformationStage(Predicate predicate, boolean negate, Transformation transformation) { + this.predicate = predicate; this.negate = negate; - this.transformationPlugin = transformationPlugin; - this.pluginLoaderSwapper = pluginLoaderSwapper; - this.transformAlias = transformAlias; - this.predicateAlias = predicateAlias; - this.transformVersion = transformVersion; - this.predicateVersion = predicateVersion; + this.transformation = transformation; } public Class> transformClass() { @SuppressWarnings("unchecked") - Class> transformClass = (Class>) transformationPlugin.get().getClass(); + Class> transformClass = (Class>) transformation.getClass(); return transformClass; } public R apply(R record) { - Predicate predicate = predicatePlugin != null ? predicatePlugin.get() : null; - boolean shouldTransform = predicate == null; - if (predicate != null) { - try (LoaderSwap swap = pluginLoaderSwapper.apply(predicate.getClass().getClassLoader())) { - shouldTransform = negate ^ predicate.test(record); - } - } - if (shouldTransform) { - try (LoaderSwap swap = pluginLoaderSwapper.apply(transformationPlugin.get().getClass().getClassLoader())) { - record = transformationPlugin.get().apply(record); - } + if (predicate == null || negate ^ predicate.test(record)) { + return transformation.apply(record); } return record; } @Override public void close() { - Utils.closeQuietly(transformationPlugin, "transformation"); - Utils.closeQuietly(predicatePlugin, "predicate"); + Utils.closeQuietly(transformation, "transformation"); + Utils.closeQuietly(predicate, "predicate"); } @Override public String toString() { return "TransformationStage{" + - "predicate=" + predicatePlugin.get() + - ", transformation=" + transformationPlugin.get() + + "predicate=" + predicate + + ", transformation=" + transformation + ", negate=" + negate + '}'; } - - public record AliasedPluginInfo(String alias, String className, String version) { - public AliasedPluginInfo { - Objects.requireNonNull(alias, "alias cannot be null"); - Objects.requireNonNull(className, "className cannot be null"); - } - } - - - public record StageInfo(AliasedPluginInfo transform, AliasedPluginInfo predicate) { - public StageInfo { - Objects.requireNonNull(transform, "transform cannot be null"); - } - } - - - public StageInfo transformationStageInfo() { - AliasedPluginInfo transformInfo = new AliasedPluginInfo( - transformAlias, - transformationPlugin.get().getClass().getName(), - transformVersion - ); - AliasedPluginInfo predicateInfo = predicatePlugin != null ? new AliasedPluginInfo( - predicateAlias, - predicatePlugin.get().getClass().getName(), predicateVersion - ) : null; - return new StageInfo(transformInfo, predicateInfo); - } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java index 53cc40d7fd8b7..591e9816a7a50 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/Worker.java @@ -44,7 +44,6 @@ import org.apache.kafka.common.errors.GroupNotEmptyException; import org.apache.kafka.common.errors.GroupSubscribedToTopicException; import org.apache.kafka.common.errors.UnknownMemberIdException; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.ThreadUtils; import org.apache.kafka.common.utils.Time; import org.apache.kafka.common.utils.Timer; @@ -67,10 +66,8 @@ import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; import org.apache.kafka.connect.runtime.errors.WorkerErrantRecordReporter; import org.apache.kafka.connect.runtime.isolation.LoaderSwap; -import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.isolation.Plugins; import org.apache.kafka.connect.runtime.isolation.Plugins.ClassLoaderUsage; -import org.apache.kafka.connect.runtime.isolation.VersionedPluginLoadingException; import org.apache.kafka.connect.runtime.rest.RestServer; import org.apache.kafka.connect.runtime.rest.entities.ConnectorOffset; import org.apache.kafka.connect.runtime.rest.entities.ConnectorOffsets; @@ -101,13 +98,13 @@ import org.apache.kafka.connect.util.TopicAdmin; import org.apache.kafka.connect.util.TopicCreationGroup; -import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.time.Duration; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -196,7 +193,7 @@ public Worker( this.connectorClientConfigOverridePolicy = connectorClientConfigOverridePolicy; this.workerMetricsGroup = new WorkerMetricsGroup(this.connectors, this.tasks, metrics); - Map internalConverterConfig = Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"); + Map internalConverterConfig = Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"); this.internalKeyConverter = plugins.newInternalConverter(true, JsonConverter.class.getName(), internalConverterConfig); this.internalValueConverter = plugins.newInternalConverter(false, JsonConverter.class.getName(), internalConverterConfig); @@ -279,12 +276,6 @@ public void stop() { workerConfigTransformer.close(); ThreadUtils.shutdownExecutorServiceQuietly(executor, EXECUTOR_SHUTDOWN_TERMINATION_TIMEOUT_MS, TimeUnit.MILLISECONDS); - Utils.closeQuietly(internalKeyConverter, "internal key converter"); - Utils.closeQuietly(internalValueConverter, "internal value converter"); - } - - public WorkerConfig config() { - return config; } /** @@ -316,38 +307,32 @@ public void startConnector( final WorkerConnector workerConnector; final String connClass = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); - final ClassLoader connectorLoader; - - try { - connectorLoader = connectorClassLoader(connProps); - try (LoaderSwap loaderSwap = plugins.withClassLoader(connectorLoader)) { - log.info("Creating connector {} of type {}", connName, connClass); - final Connector connector = instantiateConnector(connProps); - - final ConnectorConfig connConfig; - final CloseableOffsetStorageReader offsetReader; - final ConnectorOffsetBackingStore offsetStore; - - if (ConnectUtils.isSinkConnector(connector)) { - connConfig = new SinkConnectorConfig(plugins, connProps); - offsetReader = null; - offsetStore = null; - } else { - SourceConnectorConfig sourceConfig = new SourceConnectorConfig(plugins, connProps, config.topicCreationEnable()); - connConfig = sourceConfig; + ClassLoader connectorLoader = plugins.connectorLoader(connClass); + try (LoaderSwap loaderSwap = plugins.withClassLoader(connectorLoader)) { + log.info("Creating connector {} of type {}", connName, connClass); + final Connector connector = plugins.newConnector(connClass); + final ConnectorConfig connConfig; + final CloseableOffsetStorageReader offsetReader; + final ConnectorOffsetBackingStore offsetStore; + if (ConnectUtils.isSinkConnector(connector)) { + connConfig = new SinkConnectorConfig(plugins, connProps); + offsetReader = null; + offsetStore = null; + } else { + SourceConnectorConfig sourceConfig = new SourceConnectorConfig(plugins, connProps, config.topicCreationEnable()); + connConfig = sourceConfig; - // Set up the offset backing store for this connector instance - offsetStore = config.exactlyOnceSourceEnabled() + // Set up the offset backing store for this connector instance + offsetStore = config.exactlyOnceSourceEnabled() ? offsetStoreForExactlyOnceSourceConnector(sourceConfig, connName, connector, null) : offsetStoreForRegularSourceConnector(sourceConfig, connName, connector, null); - offsetStore.configure(config); - offsetReader = new OffsetStorageReaderImpl(offsetStore, connName, internalKeyConverter, internalValueConverter); - } - workerConnector = new WorkerConnector( - connName, connector, connConfig, ctx, metrics, connectorStatusListener, offsetReader, offsetStore, connectorLoader); - log.info("Instantiated connector {} with version {} of type {}", connName, workerConnector.connectorVersion(), connector.getClass()); - workerConnector.transitionTo(initialState, onConnectorStateChange); + offsetStore.configure(config); + offsetReader = new OffsetStorageReaderImpl(offsetStore, connName, internalKeyConverter, internalValueConverter); } + workerConnector = new WorkerConnector( + connName, connector, connConfig, ctx, metrics, connectorStatusListener, offsetReader, offsetStore, connectorLoader); + log.info("Instantiated connector {} with version {} of type {}", connName, connector.version(), connector.getClass()); + workerConnector.transitionTo(initialState, onConnectorStateChange); } catch (Throwable t) { log.error("Failed to start connector {}", connName, t); connectorStatusListener.onFailure(connName, t); @@ -538,7 +523,7 @@ public void stopAndAwaitConnectors(Collection ids) { */ public void stopAndAwaitConnector(String connName) { stopConnector(connName); - awaitStopConnectors(List.of(connName)); + awaitStopConnectors(Collections.singletonList(connName)); } /** @@ -561,22 +546,6 @@ public boolean isRunning(String connName) { return workerConnector != null && workerConnector.isRunning(); } - public String connectorVersion(String connName) { - WorkerConnector conn = connectors.get(connName); - if (conn == null) { - return null; - } - return conn.connectorVersion(); - } - - public String taskVersion(ConnectorTaskId taskId) { - WorkerTask task = tasks.get(taskId); - if (task == null) { - return null; - } - return task.taskVersion(); - } - /** * Start a sink task managed by this worker. * @@ -679,61 +648,60 @@ private boolean startTask( throw new ConnectException("Task already exists in this worker: " + id); connectorStatusMetricsGroup.recordTaskAdded(id); + String connType = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + ClassLoader connectorLoader = plugins.connectorLoader(connType); + + try (LoaderSwap loaderSwap = plugins.withClassLoader(connectorLoader)) { + final ConnectorConfig connConfig = new ConnectorConfig(plugins, connProps); + + int maxTasks = connConfig.tasksMax(); + int numTasks = configState.taskCount(id.connector()); + checkTasksMax(id.connector(), numTasks, maxTasks, connConfig.enforceTasksMax()); + + final TaskConfig taskConfig = new TaskConfig(taskProps); + final Class taskClass = taskConfig.getClass(TaskConfig.TASK_CLASS_CONFIG).asSubclass(Task.class); + final Task task = plugins.newTask(taskClass); + log.info("Instantiated task {} with version {} of type {}", id, task.version(), taskClass.getName()); + + // By maintaining connector's specific class loader for this thread here, we first + // search for converters within the connector dependencies. + // If any of these aren't found, that means the connector didn't configure specific converters, + // so we should instantiate based upon the worker configuration + Converter keyConverter = plugins.newConverter(connConfig, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, ClassLoaderUsage + .CURRENT_CLASSLOADER); + Converter valueConverter = plugins.newConverter(connConfig, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.CURRENT_CLASSLOADER); + HeaderConverter headerConverter = plugins.newHeaderConverter(connConfig, WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, + ClassLoaderUsage.CURRENT_CLASSLOADER); + if (keyConverter == null) { + keyConverter = plugins.newConverter(config, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.PLUGINS); + log.info("Set up the key converter {} for task {} using the worker config", keyConverter.getClass(), id); + } else { + log.info("Set up the key converter {} for task {} using the connector config", keyConverter.getClass(), id); + } + if (valueConverter == null) { + valueConverter = plugins.newConverter(config, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, ClassLoaderUsage.PLUGINS); + log.info("Set up the value converter {} for task {} using the worker config", valueConverter.getClass(), id); + } else { + log.info("Set up the value converter {} for task {} using the connector config", valueConverter.getClass(), id); + } + if (headerConverter == null) { + headerConverter = plugins.newHeaderConverter(config, WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, ClassLoaderUsage + .PLUGINS); + log.info("Set up the header converter {} for task {} using the worker config", headerConverter.getClass(), id); + } else { + log.info("Set up the header converter {} for task {} using the connector config", headerConverter.getClass(), id); + } - final ClassLoader connectorLoader; - try { - connectorLoader = connectorClassLoader(connProps); - try (LoaderSwap loaderSwap = plugins.withClassLoader(connectorLoader)) { - final ConnectorConfig connConfig = new ConnectorConfig(plugins, connProps); - - int maxTasks = connConfig.tasksMax(); - int numTasks = configState.taskCount(id.connector()); - checkTasksMax(id.connector(), numTasks, maxTasks, connConfig.enforceTasksMax()); - - final TaskConfig taskConfig = new TaskConfig(taskProps); - final Class taskClass = taskConfig.getClass(TaskConfig.TASK_CLASS_CONFIG).asSubclass(Task.class); - final Task task = plugins.newTask(taskClass); - log.info("Instantiated task {} with version {} of type {}", id, task.version(), taskClass.getName()); - - - // By maintaining connector's specific class loader for this thread here, we first - // search for converters within the connector dependencies. - // If any of these aren't found, that means the connector didn't configure specific converters, - // so we should instantiate based upon the worker configuration - Converter keyConverter = plugins.newConverter(connConfig, ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG); - Converter valueConverter = plugins.newConverter(connConfig, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG); - HeaderConverter headerConverter = plugins.newHeaderConverter(connConfig, ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG, ConnectorConfig.HEADER_CONVERTER_VERSION_CONFIG); - - if (keyConverter == null) { - keyConverter = plugins.newConverter(config, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION); - log.info("Set up the key converter {} for task {} using the worker config", keyConverter.getClass(), id); - } else { - log.info("Set up the key converter {} for task {} using the connector config", keyConverter.getClass(), id); - } - if (valueConverter == null) { - valueConverter = plugins.newConverter(config, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION); - log.info("Set up the value converter {} for task {} using the worker config", valueConverter.getClass(), id); - } else { - log.info("Set up the value converter {} for task {} using the connector config", valueConverter.getClass(), id); - } - if (headerConverter == null) { - headerConverter = plugins.newHeaderConverter(config, WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, WorkerConfig.HEADER_CONVERTER_VERSION); - log.info("Set up the header converter {} for task {} using the worker config", headerConverter.getClass(), id); - } else { - log.info("Set up the header converter {} for task {} using the connector config", headerConverter.getClass(), id); - } - - workerTask = taskBuilder + workerTask = taskBuilder .withTask(task) .withConnectorConfig(connConfig) - .withKeyConverterPlugin(metrics.wrap(keyConverter, id, true)) - .withValueConverterPlugin(metrics.wrap(valueConverter, id, false)) - .withHeaderConverterPlugin(metrics.wrap(headerConverter, id)) - .withClassLoader(connectorLoader) + .withKeyConverter(keyConverter) + .withValueConverter(valueConverter) + .withHeaderConverter(headerConverter) + .withClassloader(connectorLoader) .build(); - workerTask.initialize(taskConfig); - } + workerTask.initialize(taskConfig); } catch (Throwable t) { log.error("Failed to start task {}", id, t); connectorStatusMetricsGroup.recordTaskRemoved(id); @@ -764,17 +732,19 @@ private boolean startTask( public KafkaFuture fenceZombies(String connName, int numTasks, Map connProps) { log.debug("Fencing out {} task producers for source connector {}", numTasks, connName); try (LoggingContext loggingContext = LoggingContext.forConnector(connName)) { - Class connectorClass = connectorClass(connProps); - ClassLoader classLoader = connectorClassLoader(connProps); - try (LoaderSwap loaderSwap = plugins.withClassLoader(classLoader)) { + String connType = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + ClassLoader connectorLoader = plugins.connectorLoader(connType); + try (LoaderSwap loaderSwap = plugins.withClassLoader(connectorLoader)) { final SourceConnectorConfig connConfig = new SourceConnectorConfig(plugins, connProps, config.topicCreationEnable()); + final Class connClass = plugins.connectorClass( + connConfig.getString(ConnectorConfig.CONNECTOR_CLASS_CONFIG)); Map adminConfig = adminConfigs( connName, "connector-worker-adminclient-" + connName, config, connConfig, - connectorClass, + connClass, connectorClientConfigOverridePolicy, kafkaClusterId, ConnectorType.SOURCE); @@ -997,7 +967,7 @@ private static Map connectorClientConfigOverrides(String connNam ); List configValues = connectorClientConfigOverridePolicy.validate(connectorClientConfigRequest); List errorConfigs = configValues.stream(). - filter(configValue -> configValue.errorMessages().size() > 0).toList(); + filter(configValue -> configValue.errorMessages().size() > 0).collect(Collectors.toList()); // These should be caught when the herder validates the connector configuration, but just in case if (errorConfigs.size() > 0) { throw new ConnectException("Client Config Overrides not allowed " + errorConfigs); @@ -1034,7 +1004,7 @@ private List>> sinkTaskReporters(Co if (topic != null && !topic.isEmpty()) { Map producerProps = baseProducerConfigs(id.connector(), "connector-dlq-producer-" + id, config, connConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId); - Map adminProps = adminConfigs(id.connector(), "connector-dlq-adminclient-" + id, config, connConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId, ConnectorType.SINK); + Map adminProps = adminConfigs(id.connector(), "connector-dlq-adminclient-", config, connConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId, ConnectorType.SINK); DeadLetterQueueReporter reporter = DeadLetterQueueReporter.createAndSetup(adminProps, id, connConfig, producerProps, errorHandlingMetrics); reporters.add(reporter); @@ -1148,7 +1118,7 @@ public void stopAndAwaitTasks(Collection ids) { */ public void stopAndAwaitTask(ConnectorTaskId taskId) { stopTask(taskId); - awaitStopTasks(List.of(taskId)); + awaitStopTasks(Collections.singletonList(taskId)); } /** @@ -1158,6 +1128,10 @@ public Set taskIds() { return tasks.keySet(); } + public Converter getInternalKeyConverter() { + return internalKeyConverter; + } + public Converter getInternalValueConverter() { return internalValueConverter; } @@ -1217,9 +1191,11 @@ public void setTargetState(String connName, TargetState state, Callback connectorConfig, Callback cb) { - Connector connector = instantiateConnector(connectorConfig); - ClassLoader connectorLoader = connectorClassLoader(connectorConfig); + String connectorClassOrAlias = connectorConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + ClassLoader connectorLoader = plugins.connectorLoader(connectorClassOrAlias); + try (LoaderSwap loaderSwap = plugins.withClassLoader(connectorLoader)) { + Connector connector = plugins.newConnector(connectorClassOrAlias); if (ConnectUtils.isSinkConnector(connector)) { log.debug("Fetching offsets for sink connector: {}", connName); sinkConnectorOffsets(connName, connector, connectorConfig, cb); @@ -1230,43 +1206,6 @@ public void connectorOffsets(String connName, Map connectorConfi } } - private Connector instantiateConnector(Map connProps) throws ConnectException { - - final String klass = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); - final String version = connProps.get(ConnectorConfig.CONNECTOR_VERSION); - - try { - return plugins.newConnector(klass, PluginUtils.connectorVersionRequirement(version)); - } catch (InvalidVersionSpecificationException | VersionedPluginLoadingException e) { - throw new ConnectException( - String.format("Failed to instantiate class for connector %s, class %s", klass, connProps.get(ConnectorConfig.NAME_CONFIG)), e); - } - } - - private ClassLoader connectorClassLoader(Map connProps) throws ConnectException { - final String klass = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); - final String version = connProps.get(ConnectorConfig.CONNECTOR_VERSION); - - try { - return plugins.pluginLoader(klass, PluginUtils.connectorVersionRequirement(version)); - } catch (InvalidVersionSpecificationException | VersionedPluginLoadingException e) { - throw new ConnectException( - String.format("Failed to get class loader for connector %s, class %s", klass, connProps.get(ConnectorConfig.NAME_CONFIG)), e); - } - } - - private Class connectorClass(Map connProps) throws ConnectException { - final String klass = connProps.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); - final String version = connProps.get(ConnectorConfig.CONNECTOR_VERSION); - - try { - return plugins.connectorClass(klass, PluginUtils.connectorVersionRequirement(version)); - } catch (InvalidVersionSpecificationException | VersionedPluginLoadingException e) { - throw new ConnectException( - String.format("Failed to get class for connector %s, class %s", klass, connProps.get(ConnectorConfig.NAME_CONFIG)), e); - } - } - /** * Get the current consumer group offsets for a sink connector. *

          @@ -1365,10 +1304,12 @@ void sourceConnectorOffsets(String connName, ConnectorOffsetBackingStore offsetS */ public void modifyConnectorOffsets(String connName, Map connectorConfig, Map, Map> offsets, Callback cb) { + String connectorClassOrAlias = connectorConfig.get(ConnectorConfig.CONNECTOR_CLASS_CONFIG); + ClassLoader connectorLoader = plugins.connectorLoader(connectorClassOrAlias); + Connector connector; - final Connector connector = instantiateConnector(connectorConfig); - ClassLoader connectorLoader = connectorClassLoader(connectorConfig); try (LoaderSwap loaderSwap = plugins.withClassLoader(connectorLoader)) { + connector = plugins.newConnector(connectorClassOrAlias); if (ConnectUtils.isSinkConnector(connector)) { log.debug("Modifying offsets for sink connector: {}", connName); modifySinkConnectorOffsets(connName, connector, connectorConfig, offsets, connectorLoader, cb); @@ -1561,7 +1502,7 @@ private void alterSinkConnectorOffsets(String connName, String groupId, Admin ad private void resetSinkConnectorOffsets(String connName, String groupId, Admin admin, Callback cb, boolean alterOffsetsResult, Timer timer) { DeleteConsumerGroupsOptions deleteConsumerGroupsOptions = new DeleteConsumerGroupsOptions().timeoutMs((int) timer.remainingMs()); - admin.deleteConsumerGroups(Set.of(groupId), deleteConsumerGroupsOptions) + admin.deleteConsumerGroups(Collections.singleton(groupId), deleteConsumerGroupsOptions) .all() .whenComplete((ignored, error) -> { // We treat GroupIdNotFoundException as a non-error here because resetting a connector's offsets is expected to be an idempotent operation @@ -1789,9 +1730,9 @@ abstract class TaskBuilder> { private Task task = null; private ConnectorConfig connectorConfig = null; - private Plugin keyConverterPlugin = null; - private Plugin valueConverterPlugin = null; - private Plugin headerConverterPlugin = null; + private Converter keyConverter = null; + private Converter valueConverter = null; + private HeaderConverter headerConverter = null; private ClassLoader classLoader = null; public TaskBuilder(ConnectorTaskId id, @@ -1814,51 +1755,48 @@ public TaskBuilder withConnectorConfig(ConnectorConfig connectorConfig) { return this; } - public TaskBuilder withKeyConverterPlugin(Plugin keyConverterPlugin) { - this.keyConverterPlugin = keyConverterPlugin; + public TaskBuilder withKeyConverter(Converter keyConverter) { + this.keyConverter = keyConverter; return this; } - public TaskBuilder withValueConverterPlugin(Plugin valueConverterPlugin) { - this.valueConverterPlugin = valueConverterPlugin; + public TaskBuilder withValueConverter(Converter valueConverter) { + this.valueConverter = valueConverter; return this; } - public TaskBuilder withHeaderConverterPlugin(Plugin headerConverterPlugin) { - this.headerConverterPlugin = headerConverterPlugin; + public TaskBuilder withHeaderConverter(HeaderConverter headerConverter) { + this.headerConverter = headerConverter; return this; } - public TaskBuilder withClassLoader(ClassLoader classLoader) { + public TaskBuilder withClassloader(ClassLoader classLoader) { this.classLoader = classLoader; return this; } - public WorkerTask build() { Objects.requireNonNull(task, "Task cannot be null"); Objects.requireNonNull(connectorConfig, "Connector config used by task cannot be null"); - Objects.requireNonNull(keyConverterPlugin.get(), "Key converter used by task cannot be null"); - Objects.requireNonNull(valueConverterPlugin.get(), "Value converter used by task cannot be null"); - Objects.requireNonNull(headerConverterPlugin.get(), "Header converter used by task cannot be null"); + Objects.requireNonNull(keyConverter, "Key converter used by task cannot be null"); + Objects.requireNonNull(valueConverter, "Value converter used by task cannot be null"); + Objects.requireNonNull(headerConverter, "Header converter used by task cannot be null"); Objects.requireNonNull(classLoader, "Classloader used by task cannot be null"); ErrorHandlingMetrics errorHandlingMetrics = errorHandlingMetrics(id); - final Class connectorClass = connectorClass(connectorConfig.originalsStrings()); + final Class connectorClass = plugins.connectorClass( + connectorConfig.getString(ConnectorConfig.CONNECTOR_CLASS_CONFIG)); RetryWithToleranceOperator retryWithToleranceOperator = new RetryWithToleranceOperator<>(connectorConfig.errorRetryTimeout(), connectorConfig.errorMaxDelayInMillis(), connectorConfig.errorToleranceType(), Time.SYSTEM, errorHandlingMetrics); - TransformationChain transformationChain = new TransformationChain<>(connectorConfig.transformationStages(plugins, id, metrics), retryWithToleranceOperator); + TransformationChain transformationChain = new TransformationChain<>(connectorConfig.transformationStages(), retryWithToleranceOperator); log.info("Initializing: {}", transformationChain); - TaskPluginsMetadata taskPluginsMetadata = new TaskPluginsMetadata( - connectorClass, task, keyConverterPlugin.get(), valueConverterPlugin.get(), headerConverterPlugin.get(), transformationChain.transformationChainInfo(), plugins); - return doBuild(task, id, configState, statusListener, initialState, - connectorConfig, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, classLoader, - retryWithToleranceOperator, transformationChain, - errorHandlingMetrics, connectorClass, taskPluginsMetadata); + connectorConfig, keyConverter, valueConverter, headerConverter, classLoader, + retryWithToleranceOperator, transformationChain, + errorHandlingMetrics, connectorClass); } abstract WorkerTask doBuild( @@ -1868,15 +1806,14 @@ abstract WorkerTask doBuild( TaskStatus.Listener statusListener, TargetState initialState, ConnectorConfig connectorConfig, - Plugin keyConverterPlugin, - Plugin valueConverterPlugin, - Plugin headerConverterPlugin, + Converter keyConverter, + Converter valueConverter, + HeaderConverter headerConverter, ClassLoader classLoader, RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, ErrorHandlingMetrics errorHandlingMetrics, - Class connectorClass, - TaskPluginsMetadata pluginsMetadata + Class connectorClass ); } @@ -1897,29 +1834,28 @@ public WorkerTask, SinkRecord> doBuild( TaskStatus.Listener statusListener, TargetState initialState, ConnectorConfig connectorConfig, - Plugin keyConverterPlugin, - Plugin valueConverterPlugin, - Plugin headerConverterPlugin, + Converter keyConverter, + Converter valueConverter, + HeaderConverter headerConverter, ClassLoader classLoader, RetryWithToleranceOperator> retryWithToleranceOperator, TransformationChain, SinkRecord> transformationChain, ErrorHandlingMetrics errorHandlingMetrics, - Class connectorClass, - TaskPluginsMetadata taskPluginsMetadata + Class connectorClass ) { SinkConnectorConfig sinkConfig = new SinkConnectorConfig(plugins, connectorConfig.originalsStrings()); WorkerErrantRecordReporter workerErrantRecordReporter = createWorkerErrantRecordReporter(sinkConfig, retryWithToleranceOperator, - keyConverterPlugin.get(), valueConverterPlugin.get(), headerConverterPlugin.get()); + keyConverter, valueConverter, headerConverter); Map consumerProps = baseConsumerConfigs( id.connector(), "connector-consumer-" + id, config, connectorConfig, connectorClass, connectorClientConfigOverridePolicy, kafkaClusterId, ConnectorType.SINK); KafkaConsumer consumer = new KafkaConsumer<>(consumerProps); - return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, configState, metrics, keyConverterPlugin, - valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, consumer, classLoader, time, + return new WorkerSinkTask(id, (SinkTask) task, statusListener, initialState, config, configState, metrics, keyConverter, + valueConverter, errorHandlingMetrics, headerConverter, transformationChain, consumer, classLoader, time, retryWithToleranceOperator, workerErrantRecordReporter, herder.statusBackingStore(), - () -> sinkTaskReporters(id, sinkConfig, errorHandlingMetrics, connectorClass), taskPluginsMetadata, plugins.safeLoaderSwapper()); + () -> sinkTaskReporters(id, sinkConfig, errorHandlingMetrics, connectorClass)); } } @@ -1939,15 +1875,14 @@ public WorkerTask doBuild( TaskStatus.Listener statusListener, TargetState initialState, ConnectorConfig connectorConfig, - Plugin keyConverterPlugin, - Plugin valueConverterPlugin, - Plugin headerConverterPlugin, + Converter keyConverter, + Converter valueConverter, + HeaderConverter headerConverter, ClassLoader classLoader, RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, ErrorHandlingMetrics errorHandlingMetrics, - Class connectorClass, - TaskPluginsMetadata pluginsMetadata + Class connectorClass ) { SourceConnectorConfig sourceConfig = new SourceConnectorConfig(plugins, connectorConfig.originalsStrings(), config.topicCreationEnable()); @@ -1977,10 +1912,10 @@ public WorkerTask doBuild( OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, id.connector(), internalKeyConverter, internalValueConverter); // Note we pass the configState as it performs dynamic transformations under the covers - return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, - headerConverterPlugin, transformationChain, producer, topicAdmin, topicCreationGroups, + return new WorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, errorHandlingMetrics, + headerConverter, transformationChain, producer, topicAdmin, topicCreationGroups, offsetReader, offsetWriter, offsetStore, config, configState, metrics, classLoader, time, - retryWithToleranceOperator, herder.statusBackingStore(), executor, () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics), pluginsMetadata, plugins.safeLoaderSwapper()); + retryWithToleranceOperator, herder.statusBackingStore(), executor, () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics)); } } @@ -2007,15 +1942,14 @@ public WorkerTask doBuild( TaskStatus.Listener statusListener, TargetState initialState, ConnectorConfig connectorConfig, - Plugin keyConverterPlugin, - Plugin valueConverterPlugin, - Plugin headerConverterPlugin, + Converter keyConverter, + Converter valueConverter, + HeaderConverter headerConverter, ClassLoader classLoader, RetryWithToleranceOperator retryWithToleranceOperator, TransformationChain transformationChain, ErrorHandlingMetrics errorHandlingMetrics, - Class connectorClass, - TaskPluginsMetadata pluginsMetadata + Class connectorClass ) { SourceConnectorConfig sourceConfig = new SourceConnectorConfig(plugins, connectorConfig.originalsStrings(), config.topicCreationEnable()); @@ -2042,11 +1976,11 @@ public WorkerTask doBuild( OffsetStorageWriter offsetWriter = new OffsetStorageWriter(offsetStore, id.connector(), internalKeyConverter, internalValueConverter); // Note we pass the configState as it performs dynamic transformations under the covers - return new ExactlyOnceWorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, - headerConverterPlugin, transformationChain, producer, topicAdmin, topicCreationGroups, + return new ExactlyOnceWorkerSourceTask(id, (SourceTask) task, statusListener, initialState, keyConverter, valueConverter, + headerConverter, transformationChain, producer, topicAdmin, topicCreationGroups, offsetReader, offsetWriter, offsetStore, config, configState, metrics, errorHandlingMetrics, classLoader, time, retryWithToleranceOperator, herder.statusBackingStore(), sourceConfig, executor, preProducerCheck, postProducerCheck, - () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics), pluginsMetadata, plugins.safeLoaderSwapper()); + () -> sourceTaskReporters(id, sourceConfig, errorHandlingMetrics)); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java index 8d953d7ded35b..ca188ffd97af7 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConfig.java @@ -37,6 +37,7 @@ import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Locale; import java.util.Map; @@ -58,18 +59,17 @@ public class WorkerConfig extends AbstractConfig { private static final Logger log = LoggerFactory.getLogger(WorkerConfig.class); public static final String BOOTSTRAP_SERVERS_CONFIG = "bootstrap.servers"; - public static final String BOOTSTRAP_SERVERS_DOC = + public static final String BOOTSTRAP_SERVERS_DOC = "A list of host/port pairs used to establish the initial connection to the Kafka cluster. " + "Clients use this list to bootstrap and discover the full set of Kafka brokers. " + "While the order of servers in the list does not matter, we recommend including more than one server to ensure resilience if any servers are down. " + "This list does not need to contain the entire set of brokers, as Kafka clients automatically manage and update connections to the cluster efficiently. " + "This list must be in the form host1:port1,host2:port2,...."; + public static final String BOOTSTRAP_SERVERS_DEFAULT = "localhost:9092"; public static final String CLIENT_DNS_LOOKUP_CONFIG = CommonClientConfigs.CLIENT_DNS_LOOKUP_CONFIG; public static final String CLIENT_DNS_LOOKUP_DOC = CommonClientConfigs.CLIENT_DNS_LOOKUP_DOC; - public static final String PLUGIN_VERSION_SUFFIX = "plugin.version"; - public static final String KEY_CONVERTER_CLASS_CONFIG = "key.converter"; public static final String KEY_CONVERTER_CLASS_DOC = "Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -77,10 +77,6 @@ public class WorkerConfig extends AbstractConfig { " independent of connectors it allows any connector to work with any serialization format." + " Examples of common formats include JSON and Avro."; - public static final String KEY_CONVERTER_VERSION = "key.converter." + PLUGIN_VERSION_SUFFIX; - public static final String KEY_CONVERTER_VERSION_DEFAULT = null; - public static final String KEY_CONVERTER_VERSION_DOC = "Version of the key converter."; - public static final String VALUE_CONVERTER_CLASS_CONFIG = "value.converter"; public static final String VALUE_CONVERTER_CLASS_DOC = "Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -88,10 +84,6 @@ public class WorkerConfig extends AbstractConfig { " independent of connectors it allows any connector to work with any serialization format." + " Examples of common formats include JSON and Avro."; - public static final String VALUE_CONVERTER_VERSION = "value.converter." + PLUGIN_VERSION_SUFFIX; - public static final String VALUE_CONVERTER_VERSION_DEFAULT = null; - public static final String VALUE_CONVERTER_VERSION_DOC = "Version of the value converter."; - public static final String HEADER_CONVERTER_CLASS_CONFIG = "header.converter"; public static final String HEADER_CONVERTER_CLASS_DOC = "HeaderConverter class used to convert between Kafka Connect format and the serialized form that is written to Kafka." + @@ -101,10 +93,6 @@ public class WorkerConfig extends AbstractConfig { " header values to strings and deserialize them by inferring the schemas."; public static final String HEADER_CONVERTER_CLASS_DEFAULT = SimpleHeaderConverter.class.getName(); - public static final String HEADER_CONVERTER_VERSION = "header.converter." + PLUGIN_VERSION_SUFFIX; - public static final String HEADER_CONVERTER_VERSION_DEFAULT = null; - public static final String HEADER_CONVERTER_VERSION_DOC = "Version of the header converter."; - public static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG = "task.shutdown.graceful.timeout.ms"; private static final String TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC = @@ -135,7 +123,7 @@ public class WorkerConfig extends AbstractConfig { + "plugins and their dependencies\n" + "Note: symlinks will be followed to discover dependencies or plugins.\n" + "Examples: plugin.path=/usr/local/share/java,/usr/local/share/kafka/plugins," - + "/opt/connectors\n" + + "/opt/connectors\n" + "Do not use config provider variables in this property, since the raw path is used " + "by the worker's scanner before config providers are initialized and used to " + "replace variables."; @@ -152,15 +140,19 @@ public class WorkerConfig extends AbstractConfig { + "* " + SERVICE_LOAD + ": Discover plugins only by ServiceLoader. Faster startup than other modes. " + "Plugins which are not discoverable by ServiceLoader may not be usable."; - public static final String CONFIG_PROVIDERS_CONFIG = AbstractConfig.CONFIG_PROVIDERS_CONFIG; - protected static final String CONFIG_PROVIDERS_DOC = AbstractConfig.CONFIG_PROVIDERS_DOC; + public static final String CONFIG_PROVIDERS_CONFIG = "config.providers"; + protected static final String CONFIG_PROVIDERS_DOC = + "Comma-separated names of ConfigProvider classes, loaded and used " + + "in the order specified. Implementing the interface " + + "ConfigProvider allows you to replace variable references in connector configurations, " + + "such as for externalized secrets. "; public static final String CONNECTOR_CLIENT_POLICY_CLASS_CONFIG = "connector.client.config.override.policy"; public static final String CONNECTOR_CLIENT_POLICY_CLASS_DOC = "Class name or alias of implementation of ConnectorClientConfigOverridePolicy. Defines what client configurations can be " - + "overridden by the connector. The default implementation is All, meaning connector configurations can override all client properties. " - + "The other possible policies in the framework include None to disallow connectors from overriding client properties, " - + "and Principal to allow connectors to override only client principals."; + + "overridden by the connector. The default implementation is `All`, meaning connector configurations can override all client properties. " + + "The other possible policies in the framework include `None` to disallow connectors from overriding client properties, " + + "and `Principal` to allow connectors to override only client principals."; public static final String CONNECTOR_CLIENT_POLICY_CLASS_DEFAULT = "All"; @@ -185,7 +177,7 @@ public class WorkerConfig extends AbstractConfig { public static final String TOPIC_CREATION_ENABLE_CONFIG = "topic.creation.enable"; protected static final String TOPIC_CREATION_ENABLE_DOC = "Whether to allow " + "automatic creation of topics used by source connectors, when source connectors " - + "are configured with " + TOPIC_CREATION_PREFIX + " properties. Each task will use an " + + "are configured with `" + TOPIC_CREATION_PREFIX + "` properties. Each task will use an " + "admin client to create its topics and will not depend on the Kafka brokers " + "to create topics automatically."; protected static final boolean TOPIC_CREATION_ENABLE_DEFAULT = true; @@ -197,8 +189,7 @@ public class WorkerConfig extends AbstractConfig { */ protected static ConfigDef baseConfigDef() { ConfigDef result = new ConfigDef() - .define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, ConfigDef.NO_DEFAULT_VALUE, - ConfigDef.ValidList.anyNonDuplicateValues(false, false), + .define(BOOTSTRAP_SERVERS_CONFIG, Type.LIST, BOOTSTRAP_SERVERS_DEFAULT, Importance.HIGH, BOOTSTRAP_SERVERS_DOC) .define(CLIENT_DNS_LOOKUP_CONFIG, Type.STRING, @@ -209,12 +200,8 @@ protected static ConfigDef baseConfigDef() { CLIENT_DNS_LOOKUP_DOC) .define(KEY_CONVERTER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, KEY_CONVERTER_CLASS_DOC) - .define(KEY_CONVERTER_VERSION, Type.STRING, - KEY_CONVERTER_VERSION_DEFAULT, Importance.LOW, KEY_CONVERTER_VERSION_DOC) .define(VALUE_CONVERTER_CLASS_CONFIG, Type.CLASS, Importance.HIGH, VALUE_CONVERTER_CLASS_DOC) - .define(VALUE_CONVERTER_VERSION, Type.STRING, - VALUE_CONVERTER_VERSION_DEFAULT, Importance.LOW, VALUE_CONVERTER_VERSION_DOC) .define(TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_CONFIG, Type.LONG, TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DEFAULT, Importance.LOW, TASK_SHUTDOWN_GRACEFUL_TIMEOUT_MS_DOC) @@ -225,7 +212,6 @@ protected static ConfigDef baseConfigDef() { .define(PLUGIN_PATH_CONFIG, Type.LIST, null, - ConfigDef.ValidList.anyNonDuplicateValues(false, true), Importance.LOW, PLUGIN_PATH_DOC) .define(PLUGIN_DISCOVERY_CONFIG, @@ -246,19 +232,13 @@ protected static ConfigDef baseConfigDef() { Importance.LOW, CommonClientConfigs.METRICS_RECORDING_LEVEL_DOC) .define(METRIC_REPORTER_CLASSES_CONFIG, Type.LIST, - JmxReporter.class.getName(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), - Importance.LOW, + JmxReporter.class.getName(), Importance.LOW, CommonClientConfigs.METRIC_REPORTER_CLASSES_DOC) .define(HEADER_CONVERTER_CLASS_CONFIG, Type.CLASS, HEADER_CONVERTER_CLASS_DEFAULT, Importance.LOW, HEADER_CONVERTER_CLASS_DOC) - .define(HEADER_CONVERTER_VERSION, Type.STRING, - HEADER_CONVERTER_VERSION_DEFAULT, Importance.LOW, HEADER_CONVERTER_VERSION_DOC) - .define(CONFIG_PROVIDERS_CONFIG, - Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), + .define(CONFIG_PROVIDERS_CONFIG, Type.LIST, + Collections.emptyList(), Importance.LOW, CONFIG_PROVIDERS_DOC) .define(CONNECTOR_CLIENT_POLICY_CLASS_CONFIG, Type.STRING, CONNECTOR_CLIENT_POLICY_CLASS_DEFAULT, Importance.MEDIUM, CONNECTOR_CLIENT_POLICY_CLASS_DOC) @@ -320,7 +300,7 @@ static String lookupKafkaClusterId(Admin adminClient) { private void logInternalConverterRemovalWarnings(Map props) { List removedProperties = new ArrayList<>(); - for (String property : List.of("internal.key.converter", "internal.value.converter")) { + for (String property : Arrays.asList("internal.key.converter", "internal.value.converter")) { if (props.containsKey(property)) { removedProperties.add(property); } @@ -329,7 +309,12 @@ private void logInternalConverterRemovalWarnings(Map props) { if (!removedProperties.isEmpty()) { log.warn( "The worker has been configured with one or more internal converter properties ({}). " - + "These properties have been removed since version 3.0 and an instance of the JsonConverter with schemas.enable set to false will be used instead.", + + "Support for these properties was deprecated in version 2.0 and removed in version 3.0, " + + "and specifying them will have no effect. " + + "Instead, an instance of the JsonConverter with schemas.enable " + + "set to false will be used. For more information, please visit " + + "https://kafka.apache.org/documentation/#upgrade and consult the upgrade notes" + + "for the 3.0 release.", removedProperties); } } @@ -342,8 +327,8 @@ private void logPluginPathConfigProviderWarning(Map rawOriginals if (!Objects.equals(rawPluginPath, transformedPluginPath)) { log.warn( "Variables cannot be used in the 'plugin.path' property, since the property is " - + "used by plugin scanning before the config providers that replace the " - + "variables are initialized. The raw value '{}' was used for plugin scanning, as " + + "used by plugin scanning before the config providers that replace the " + + "variables are initialized. The raw value '{}' was used for plugin scanning, as " + "opposed to the transformed value '{}', and this may cause unexpected results.", rawPluginPath, transformedPluginPath diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java index 3faf70f898c7c..8bb541c018519 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerConnector.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.connect.runtime; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.connector.Connector; import org.apache.kafka.connect.connector.ConnectorContext; @@ -78,7 +77,6 @@ private enum State { private volatile Throwable externalFailure; private volatile boolean stopping; // indicates whether the Worker has asked the connector to stop private volatile boolean cancelled; // indicates whether the Worker has cancelled the connector (e.g. because of slow shutdown) - private final String version; private State state; private final CloseableOffsetStorageReader offsetStorageReader; @@ -88,7 +86,7 @@ public WorkerConnector(String connName, Connector connector, ConnectorConfig connectorConfig, CloseableConnectorContext ctx, - ConnectMetrics connectMetrics, + ConnectMetrics metrics, ConnectorStatus.Listener statusListener, CloseableOffsetStorageReader offsetStorageReader, ConnectorOffsetBackingStore offsetStore, @@ -98,9 +96,8 @@ public WorkerConnector(String connName, this.loader = loader; this.ctx = ctx; this.connector = connector; - this.version = connector.version(); this.state = State.INIT; - this.metrics = new ConnectorMetricsGroup(connectMetrics, AbstractStatus.State.UNASSIGNED, this.version, statusListener); + this.metrics = new ConnectorMetricsGroup(metrics, AbstractStatus.State.UNASSIGNED, statusListener); this.statusListener = this.metrics; this.offsetStorageReader = offsetStorageReader; this.offsetStore = offsetStore; @@ -420,10 +417,6 @@ public final boolean isSourceConnector() { return ConnectUtils.isSourceConnector(connector); } - public String connectorVersion() { - return version; - } - protected final String connectorType() { if (isSinkConnector()) return "sink"; @@ -456,12 +449,7 @@ class ConnectorMetricsGroup implements ConnectorStatus.Listener, AutoCloseable { private final MetricGroup metricGroup; private final ConnectorStatus.Listener delegate; - public ConnectorMetricsGroup( - ConnectMetrics connectMetrics, - AbstractStatus.State initialState, - String connectorVersion, - ConnectorStatus.Listener delegate - ) { + public ConnectorMetricsGroup(ConnectMetrics connectMetrics, AbstractStatus.State initialState, ConnectorStatus.Listener delegate) { Objects.requireNonNull(connectMetrics); Objects.requireNonNull(connector); Objects.requireNonNull(initialState); @@ -476,7 +464,7 @@ public ConnectorMetricsGroup( metricGroup.addImmutableValueMetric(registry.connectorType, connectorType()); metricGroup.addImmutableValueMetric(registry.connectorClass, connector.getClass().getName()); - metricGroup.addImmutableValueMetric(registry.connectorVersion, connectorVersion); + metricGroup.addImmutableValueMetric(registry.connectorVersion, connector.version()); metricGroup.addValueMetric(registry.connectorStatus, now -> state.toString().toLowerCase(Locale.getDefault())); } @@ -594,11 +582,6 @@ public void raiseError(Exception e) { onFailure(e); WorkerConnector.this.ctx.raiseError(e); } - - @Override - public PluginMetrics pluginMetrics() { - return WorkerConnector.this.ctx.pluginMetrics(); - } } private class WorkerSinkConnectorContext extends WorkerConnectorContext implements SinkConnectorContext { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java index 1de9ff2d9a56e..424de8f3de5b1 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTask.java @@ -25,7 +25,6 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.errors.WakeupException; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Sensor; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.CumulativeSum; @@ -47,7 +46,6 @@ import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; import org.apache.kafka.connect.runtime.errors.Stage; import org.apache.kafka.connect.runtime.errors.WorkerErrantRecordReporter; -import org.apache.kafka.connect.runtime.isolation.LoaderSwap; import org.apache.kafka.connect.sink.SinkRecord; import org.apache.kafka.connect.sink.SinkTask; import org.apache.kafka.connect.storage.ClusterConfigState; @@ -63,15 +61,15 @@ import java.time.Duration; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.function.Function; import java.util.function.Supplier; import java.util.regex.Pattern; import java.util.stream.Collectors; +import static java.util.Collections.singleton; import static org.apache.kafka.connect.runtime.WorkerConfig.TOPIC_TRACKING_ENABLE_CONFIG; /** @@ -84,9 +82,9 @@ class WorkerSinkTask extends WorkerTask, SinkReco private final SinkTask task; private final ClusterConfigState configState; private Map taskConfig; - private final Plugin keyConverterPlugin; - private final Plugin valueConverterPlugin; - private final Plugin headerConverterPlugin; + private final Converter keyConverter; + private final Converter valueConverter; + private final HeaderConverter headerConverter; private final SinkTaskMetricsGroup sinkTaskMetricsGroup; private final boolean isTopicTrackingEnabled; private final Consumer consumer; @@ -104,7 +102,6 @@ class WorkerSinkTask extends WorkerTask, SinkReco private boolean committing; private boolean taskStopped; private final WorkerErrantRecordReporter workerErrantRecordReporter; - private final String version; public WorkerSinkTask(ConnectorTaskId id, SinkTask task, @@ -113,10 +110,10 @@ public WorkerSinkTask(ConnectorTaskId id, WorkerConfig workerConfig, ClusterConfigState configState, ConnectMetrics connectMetrics, - Plugin keyConverterPlugin, - Plugin valueConverterPlugin, + Converter keyConverter, + Converter valueConverter, ErrorHandlingMetrics errorMetrics, - Plugin headerConverterPlugin, + HeaderConverter headerConverter, TransformationChain, SinkRecord> transformationChain, Consumer consumer, ClassLoader loader, @@ -124,18 +121,16 @@ public WorkerSinkTask(ConnectorTaskId id, RetryWithToleranceOperator> retryWithToleranceOperator, WorkerErrantRecordReporter workerErrantRecordReporter, StatusBackingStore statusBackingStore, - Supplier>>> errorReportersSupplier, - TaskPluginsMetadata pluginsMetadata, - Function pluginLoaderSwapper) { + Supplier>>> errorReportersSupplier) { super(id, statusListener, initialState, loader, connectMetrics, errorMetrics, - retryWithToleranceOperator, transformationChain, errorReportersSupplier, time, statusBackingStore, pluginsMetadata, pluginLoaderSwapper); + retryWithToleranceOperator, transformationChain, errorReportersSupplier, time, statusBackingStore); this.workerConfig = workerConfig; this.task = task; this.configState = configState; - this.keyConverterPlugin = keyConverterPlugin; - this.valueConverterPlugin = valueConverterPlugin; - this.headerConverterPlugin = headerConverterPlugin; + this.keyConverter = keyConverter; + this.valueConverter = valueConverter; + this.headerConverter = headerConverter; this.messageBatch = new ArrayList<>(); this.lastCommittedOffsets = new HashMap<>(); this.currentOffsets = new HashMap<>(); @@ -154,7 +149,6 @@ public WorkerSinkTask(ConnectorTaskId id, this.isTopicTrackingEnabled = workerConfig.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG); this.taskStopped = false; this.workerErrantRecordReporter = workerErrantRecordReporter; - this.version = task.version(); } @Override @@ -186,10 +180,7 @@ protected void close() { } taskStopped = true; Utils.closeQuietly(consumer, "consumer"); - Utils.closeQuietly(headerConverterPlugin, "header converter"); - Utils.closeQuietly(keyConverterPlugin, "key converter"); - Utils.closeQuietly(valueConverterPlugin, "value converter"); - Utils.closeQuietly(pluginMetrics, "plugin metrics"); + Utils.closeQuietly(headerConverter, "header converter"); /* Setting partition count explicitly to 0 to handle the case, when the task fails, which would cause its consumer to leave the group. @@ -229,11 +220,6 @@ public void execute() { } } - @Override - public String taskVersion() { - return version; - } - protected void iteration() { final long offsetCommitIntervalMs = workerConfig.getLong(WorkerConfig.OFFSET_COMMIT_INTERVAL_MS_CONFIG); @@ -366,12 +352,12 @@ boolean isCommitting() { //VisibleForTesting Map lastCommittedOffsets() { - return Map.copyOf(lastCommittedOffsets); + return Collections.unmodifiableMap(lastCommittedOffsets); } //VisibleForTesting Map currentOffsets() { - return Map.copyOf(currentOffsets); + return Collections.unmodifiableMap(currentOffsets); } private void doCommitSync(Map offsets, int seqno) { @@ -549,19 +535,13 @@ private void convertMessages(ConsumerRecords msgs) { } private SinkRecord convertAndTransformRecord(ProcessingContext> context, final ConsumerRecord msg) { - SchemaAndValue keyAndSchema = retryWithToleranceOperator.execute(context, () -> { - try (LoaderSwap swap = pluginLoaderSwapper.apply(keyConverterPlugin.get().getClass().getClassLoader())) { - return keyConverterPlugin.get().toConnectData(msg.topic(), msg.headers(), msg.key()); - } - }, Stage.KEY_CONVERTER, keyConverterPlugin.get().getClass()); + SchemaAndValue keyAndSchema = retryWithToleranceOperator.execute(context, () -> keyConverter.toConnectData(msg.topic(), msg.headers(), msg.key()), + Stage.KEY_CONVERTER, keyConverter.getClass()); - SchemaAndValue valueAndSchema = retryWithToleranceOperator.execute(context, () -> { - try (LoaderSwap swap = pluginLoaderSwapper.apply(valueConverterPlugin.get().getClass().getClassLoader())) { - return valueConverterPlugin.get().toConnectData(msg.topic(), msg.headers(), msg.value()); - } - }, Stage.VALUE_CONVERTER, valueConverterPlugin.get().getClass()); + SchemaAndValue valueAndSchema = retryWithToleranceOperator.execute(context, () -> valueConverter.toConnectData(msg.topic(), msg.headers(), msg.value()), + Stage.VALUE_CONVERTER, valueConverter.getClass()); - Headers headers = retryWithToleranceOperator.execute(context, () -> convertHeadersFor(msg), Stage.HEADER_CONVERTER, headerConverterPlugin.get().getClass()); + Headers headers = retryWithToleranceOperator.execute(context, () -> convertHeadersFor(msg), Stage.HEADER_CONVERTER, headerConverter.getClass()); if (context.failed()) { return null; @@ -596,10 +576,8 @@ private Headers convertHeadersFor(ConsumerRecord record) { if (recordHeaders != null) { String topic = record.topic(); for (org.apache.kafka.common.header.Header recordHeader : recordHeaders) { - try (LoaderSwap swap = pluginLoaderSwapper.apply(headerConverterPlugin.get().getClass().getClassLoader())) { - SchemaAndValue schemaAndValue = headerConverterPlugin.get().toConnectHeader(topic, recordHeader.key(), recordHeader.value()); - result.add(recordHeader.key(), schemaAndValue); - } + SchemaAndValue schemaAndValue = headerConverter.toConnectHeader(topic, recordHeader.key(), recordHeader.value()); + result.add(recordHeader.key(), schemaAndValue); } } return result; @@ -612,7 +590,7 @@ protected WorkerErrantRecordReporter workerErrantRecordReporter() { private void resumeAll() { for (TopicPartition tp : consumer.assignment()) if (!context.pausedPartitions().contains(tp)) - consumer.resume(Set.of(tp)); + consumer.resume(singleton(tp)); } private void pauseAll() { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java index 9b31b6e35d95c..e767c7640b47a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSinkTaskContext.java @@ -18,7 +18,6 @@ import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.errors.IllegalWorkerStateException; import org.apache.kafka.connect.sink.ErrantRecordReporter; import org.apache.kafka.connect.sink.SinkTaskContext; @@ -27,10 +26,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; @@ -117,7 +116,7 @@ public void pause(TopicPartition... partitions) { if (sinkTask.shouldPause()) { log.debug("{} Connector is paused, so not pausing consumer's partitions {}", this, partitions); } else { - consumer.pause(List.of(partitions)); + consumer.pause(Arrays.asList(partitions)); log.debug("{} Pausing partitions {}. Connector is not paused.", this, partitions); } } catch (IllegalStateException e) { @@ -131,13 +130,12 @@ public void resume(TopicPartition... partitions) { throw new IllegalWorkerStateException("SinkTaskContext may not be used to resume consumption until the task is initialized"); } try { - List partitionList = List.of(partitions); - partitionList.forEach(pausedPartitions::remove); + pausedPartitions.removeAll(Arrays.asList(partitions)); if (sinkTask.shouldPause()) { - log.debug("{} Connector is paused, so not resuming consumer's partitions {}", this, partitionList); + log.debug("{} Connector is paused, so not resuming consumer's partitions {}", this, partitions); } else { - consumer.resume(partitionList); - log.debug("{} Resuming partitions: {}", this, partitionList); + consumer.resume(Arrays.asList(partitions)); + log.debug("{} Resuming partitions: {}", this, partitions); } } catch (IllegalStateException e) { throw new IllegalWorkerStateException("SinkTasks may not resume partitions that are not currently assigned to them.", e); @@ -167,11 +165,6 @@ public ErrantRecordReporter errantRecordReporter() { return sinkTask.workerErrantRecordReporter(); } - @Override - public PluginMetrics pluginMetrics() { - return sinkTask.pluginMetrics(); - } - @Override public String toString() { return "WorkerSinkTaskContext{" + diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java index 3ccd530be3900..55cc097083d02 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTask.java @@ -19,7 +19,6 @@ import org.apache.kafka.clients.producer.Producer; import org.apache.kafka.clients.producer.ProducerRecord; import org.apache.kafka.clients.producer.RecordMetadata; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; @@ -28,7 +27,6 @@ import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; import org.apache.kafka.connect.runtime.errors.Stage; import org.apache.kafka.connect.runtime.errors.ToleranceType; -import org.apache.kafka.connect.runtime.isolation.LoaderSwap; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceTask; import org.apache.kafka.connect.storage.CloseableOffsetStorageReader; @@ -54,7 +52,6 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicReference; -import java.util.function.Function; import java.util.function.Supplier; import static org.apache.kafka.connect.runtime.SubmittedRecords.CommittableOffsets; @@ -74,10 +71,10 @@ public WorkerSourceTask(ConnectorTaskId id, SourceTask task, TaskStatus.Listener statusListener, TargetState initialState, - Plugin keyConverterPlugin, - Plugin valueConverterPlugin, + Converter keyConverter, + Converter valueConverter, ErrorHandlingMetrics errorMetrics, - Plugin headerConverterPlugin, + HeaderConverter headerConverter, TransformationChain transformationChain, Producer producer, TopicAdmin admin, @@ -93,14 +90,12 @@ public WorkerSourceTask(ConnectorTaskId id, RetryWithToleranceOperator retryWithToleranceOperator, StatusBackingStore statusBackingStore, Executor closeExecutor, - Supplier>> errorReportersSupplier, - TaskPluginsMetadata pluginsMetadata, - Function pluginLoaderSwapper) { + Supplier>> errorReportersSupplier) { - super(id, task, statusListener, initialState, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, - null, producer, + super(id, task, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, + new WorkerSourceTaskContext(offsetReader, id, configState, null), producer, admin, topicGroups, offsetReader, offsetWriter, offsetStore, workerConfig, connectMetrics, errorMetrics, loader, - time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier, pluginsMetadata, pluginLoaderSwapper); + time, retryWithToleranceOperator, statusBackingStore, closeExecutor, errorReportersSupplier); this.committableOffsets = CommittableOffsets.EMPTY; this.submittedRecords = new SubmittedRecords(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java index e1061d70a93af..cb5af463ce771 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerSourceTaskContext.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.connect.runtime; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.connect.source.SourceTaskContext; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.OffsetStorageReader; @@ -30,18 +29,15 @@ public class WorkerSourceTaskContext implements SourceTaskContext { private final ConnectorTaskId id; private final ClusterConfigState configState; private final WorkerTransactionContext transactionContext; - private final PluginMetrics pluginMetrics; public WorkerSourceTaskContext(OffsetStorageReader reader, ConnectorTaskId id, ClusterConfigState configState, - WorkerTransactionContext transactionContext, - PluginMetrics pluginMetrics) { + WorkerTransactionContext transactionContext) { this.reader = reader; this.id = id; this.configState = configState; this.transactionContext = transactionContext; - this.pluginMetrics = pluginMetrics; } @Override @@ -58,9 +54,4 @@ public OffsetStorageReader offsetStorageReader() { public WorkerTransactionContext transactionContext() { return transactionContext; } - - @Override - public PluginMetrics pluginMetrics() { - return pluginMetrics; - } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java index 1661d710a8659..9b70572fe24a7 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/WorkerTask.java @@ -19,9 +19,7 @@ import org.apache.kafka.common.MetricName; import org.apache.kafka.common.MetricNameTemplate; import org.apache.kafka.common.metrics.Gauge; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.metrics.Sensor; -import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Frequencies; import org.apache.kafka.common.metrics.stats.Max; @@ -33,7 +31,6 @@ import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.ErrorReporter; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; -import org.apache.kafka.connect.runtime.isolation.LoaderSwap; import org.apache.kafka.connect.storage.StatusBackingStore; import org.apache.kafka.connect.util.ConnectorTaskId; import org.apache.kafka.connect.util.LoggingContext; @@ -41,12 +38,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; import java.util.List; import java.util.Locale; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import java.util.function.Function; import java.util.function.Supplier; /** @@ -81,8 +76,6 @@ abstract class WorkerTask> implements Runnable { protected final RetryWithToleranceOperator retryWithToleranceOperator; protected final TransformationChain transformationChain; private final Supplier>> errorReportersSupplier; - protected final Function pluginLoaderSwapper; - protected final PluginMetricsImpl pluginMetrics; public WorkerTask(ConnectorTaskId id, TaskStatus.Listener statusListener, @@ -94,11 +87,9 @@ public WorkerTask(ConnectorTaskId id, TransformationChain transformationChain, Supplier>> errorReportersSupplier, Time time, - StatusBackingStore statusBackingStore, - TaskPluginsMetadata pluginsMetadata, - Function pluginLoaderSwapper) { + StatusBackingStore statusBackingStore) { this.id = id; - this.taskMetricsGroup = new TaskMetricsGroup(this.id, connectMetrics, statusListener, pluginsMetadata); + this.taskMetricsGroup = new TaskMetricsGroup(this.id, connectMetrics, statusListener); this.errorMetrics = errorMetrics; this.statusListener = taskMetricsGroup; this.loader = loader; @@ -112,8 +103,6 @@ public WorkerTask(ConnectorTaskId id, this.errorReportersSupplier = errorReportersSupplier; this.time = time; this.statusBackingStore = statusBackingStore; - this.pluginLoaderSwapper = pluginLoaderSwapper; - this.pluginMetrics = connectMetrics.taskPluginMetrics(id); } public ConnectorTaskId id() { @@ -124,10 +113,6 @@ public ClassLoader loader() { return loader; } - public PluginMetrics pluginMetrics() { - return pluginMetrics; - } - /** * Initialize the task for execution. * @@ -198,8 +183,6 @@ void doStart() { protected abstract void close(); - protected abstract String taskVersion(); - protected boolean isFailed() { return failed; } @@ -401,25 +384,14 @@ TaskMetricsGroup taskMetricsGroup() { static class TaskMetricsGroup implements TaskStatus.Listener { private final TaskStatus.Listener delegateListener; private final MetricGroup metricGroup; - private final List transformationGroups = new ArrayList<>(); - private final List predicateGroups = new ArrayList<>(); private final Time time; private final StateTracker taskStateTimer; private final Sensor commitTime; private final Sensor batchSize; private final Sensor commitAttempts; - private final ConnectMetrics connectMetrics; - private final ConnectorTaskId id; public TaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics, TaskStatus.Listener statusListener) { - this(id, connectMetrics, statusListener, null); - } - - public TaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics, TaskStatus.Listener statusListener, TaskPluginsMetadata pluginsMetadata) { delegateListener = statusListener; - this.connectMetrics = connectMetrics; - this.id = id; - time = connectMetrics.time(); taskStateTimer = new StateTracker(); ConnectMetricsRegistry registry = connectMetrics.registry(); @@ -449,7 +421,6 @@ public TaskMetricsGroup(ConnectorTaskId id, ConnectMetrics connectMetrics, TaskS Frequencies commitFrequencies = Frequencies.forBooleanValues(offsetCommitFailures, offsetCommitSucceeds); commitAttempts = metricGroup.sensor("offset-commit-completion"); commitAttempts.add(commitFrequencies); - addPluginInfoMetric(pluginsMetadata); } private void addRatioMetric(final State matchingState, MetricNameTemplate template) { @@ -458,52 +429,8 @@ private void addRatioMetric(final State matchingState, MetricNameTemplate templa taskStateTimer.durationRatio(matchingState, now)); } - private void addPluginInfoMetric(TaskPluginsMetadata pluginsMetadata) { - if (pluginsMetadata == null) { - return; - } - ConnectMetricsRegistry registry = connectMetrics.registry(); - metricGroup.addValueMetric(registry.taskConnectorClass, now -> pluginsMetadata.connectorClass()); - metricGroup.addValueMetric(registry.taskConnectorClassVersion, now -> pluginsMetadata.connectorVersion()); - metricGroup.addValueMetric(registry.taskConnectorType, now -> pluginsMetadata.connectorType()); - metricGroup.addValueMetric(registry.taskClass, now -> pluginsMetadata.taskClass()); - metricGroup.addValueMetric(registry.taskVersion, now -> pluginsMetadata.taskVersion()); - metricGroup.addValueMetric(registry.taskKeyConverterClass, now -> pluginsMetadata.keyConverterClass()); - metricGroup.addValueMetric(registry.taskKeyConverterVersion, now -> pluginsMetadata.keyConverterVersion()); - metricGroup.addValueMetric(registry.taskValueConverterClass, now -> pluginsMetadata.valueConverterClass()); - metricGroup.addValueMetric(registry.taskValueConverterVersion, now -> pluginsMetadata.valueConverterVersion()); - metricGroup.addValueMetric(registry.taskHeaderConverterClass, now -> pluginsMetadata.headerConverterClass()); - metricGroup.addValueMetric(registry.taskHeaderConverterVersion, now -> pluginsMetadata.headerConverterVersion()); - - if (!pluginsMetadata.transformations().isEmpty()) { - for (TransformationStage.AliasedPluginInfo entry : pluginsMetadata.transformations()) { - MetricGroup transformationGroup = connectMetrics.group(registry.transformsGroupName(), - registry.connectorTagName(), id.connector(), - registry.taskTagName(), Integer.toString(id.task()), - registry.transformsTagName(), entry.alias()); - transformationGroup.addValueMetric(registry.transformClass, now -> entry.className()); - transformationGroup.addValueMetric(registry.transformVersion, now -> entry.version()); - this.transformationGroups.add(transformationGroup); - } - } - - if (!pluginsMetadata.predicates().isEmpty()) { - for (TransformationStage.AliasedPluginInfo entry : pluginsMetadata.predicates()) { - MetricGroup predicateGroup = connectMetrics.group(registry.predicatesGroupName(), - registry.connectorTagName(), id.connector(), - registry.taskTagName(), Integer.toString(id.task()), - registry.predicateTagName(), entry.alias()); - predicateGroup.addValueMetric(registry.predicateClass, now -> entry.className()); - predicateGroup.addValueMetric(registry.predicateVersion, now -> entry.version()); - this.predicateGroups.add(predicateGroup); - } - } - } - void close() { metricGroup.close(); - transformationGroups.forEach(MetricGroup::close); - predicateGroups.forEach(MetricGroup::close); } void recordCommit(long duration, boolean success) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectAssignor.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectAssignor.java index d91e1fec85f46..1436460d1a913 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectAssignor.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectAssignor.java @@ -32,12 +32,12 @@ public interface ConnectAssignor { * method computes an assignment of connectors and tasks among the members of the worker group. * * @param leaderId the leader of the group - * @param protocol the protocol type + * @param protocol the protocol type; for Connect assignors this is "eager", "compatible", or "sessioned" * @param allMemberMetadata the metadata of all the active workers of the group * @param coordinator the worker coordinator that runs this assignor * @return the assignment of connectors and tasks to workers */ - Map performAssignment(String leaderId, ConnectProtocolCompatibility protocol, + Map performAssignment(String leaderId, String protocol, List allMemberMetadata, WorkerCoordinator coordinator); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java index 6b29598ab1014..2644e105d4dcd 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocol.java @@ -27,12 +27,12 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Objects; -import java.util.Set; import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol; import static org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocolCollection; @@ -142,7 +142,7 @@ public static ByteBuffer serializeMetadata(WorkerState workerState) { * @return the collection of Connect protocol metadata */ public static JoinGroupRequestProtocolCollection metadataRequest(WorkerState workerState) { - return new JoinGroupRequestProtocolCollection(Set.of( + return new JoinGroupRequestProtocolCollection(Collections.singleton( new JoinGroupRequestProtocol() .setName(EAGER.protocol()) .setMetadata(ConnectProtocol.serializeMetadata(workerState).array())) diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibility.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibility.java index 84ec2f0a392d8..a1b661fce0808 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibility.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibility.java @@ -99,12 +99,16 @@ public static ConnectProtocolCompatibility compatibility(String name) { * @return the enum that corresponds to the protocol compatibility mode */ public static ConnectProtocolCompatibility fromProtocolVersion(short protocolVersion) { - return switch (protocolVersion) { - case CONNECT_PROTOCOL_V0 -> EAGER; - case CONNECT_PROTOCOL_V1 -> COMPATIBLE; - case CONNECT_PROTOCOL_V2 -> SESSIONED; - default -> throw new IllegalArgumentException("Unknown Connect protocol version: " + protocolVersion); - }; + switch (protocolVersion) { + case CONNECT_PROTOCOL_V0: + return EAGER; + case CONNECT_PROTOCOL_V1: + return COMPATIBLE; + case CONNECT_PROTOCOL_V2: + return SESSIONED; + default: + throw new IllegalArgumentException("Unknown Connect protocol version: " + protocolVersion); + } } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java index ccf33926bf93f..16ab0d47a3c72 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedConfig.java @@ -35,6 +35,7 @@ import java.security.Security; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Locale; @@ -119,7 +120,7 @@ public final class DistributedConfig extends WorkerConfig { */ public static final String WORKER_UNSYNC_BACKOFF_MS_CONFIG = "worker.unsync.backoff.ms"; private static final String WORKER_UNSYNC_BACKOFF_MS_DOC = "When the worker is out of sync with other workers and " + - " fails to catch up within the worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining."; + " fails to catch up within worker.sync.timeout.ms, leave the Connect cluster for this long before rejoining."; public static final int WORKER_UNSYNC_BACKOFF_MS_DEFAULT = 5 * 60 * 1000; public static final String CONFIG_STORAGE_PREFIX = "config.storage."; @@ -206,20 +207,20 @@ public final class DistributedConfig extends WorkerConfig { public static final Long INTER_WORKER_KEY_SIZE_DEFAULT = null; public static final String INTER_WORKER_KEY_TTL_MS_CONFIG = "inter.worker.key.ttl.ms"; - public static final String INTER_WORKER_KEY_TTL_MS_DOC = "The TTL of generated session keys used for " + public static final String INTER_WORKER_KEY_TTL_MS_MS_DOC = "The TTL of generated session keys used for " + "internal request validation (in milliseconds)"; - public static final int INTER_WORKER_KEY_TTL_MS_DEFAULT = Math.toIntExact(TimeUnit.HOURS.toMillis(1)); + public static final int INTER_WORKER_KEY_TTL_MS_MS_DEFAULT = Math.toIntExact(TimeUnit.HOURS.toMillis(1)); public static final String INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG = "inter.worker.signature.algorithm"; public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT = "HmacSHA256"; - public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DOC = "The algorithm used to sign internal requests. " - + "The algorithm '" + INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT + "' will be used as a default on JVMs that support it; " + public static final String INTER_WORKER_SIGNATURE_ALGORITHM_DOC = "The algorithm used to sign internal requests" + + "The algorithm '" + INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG + "' will be used as a default on JVMs that support it; " + "on other JVMs, no default is used and a value for this property must be manually specified in the worker config."; public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG = "inter.worker.verification.algorithms"; - public static final List INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT = List.of(INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT); + public static final List INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT = Collections.singletonList(INTER_WORKER_SIGNATURE_ALGORITHM_DEFAULT); public static final String INTER_WORKER_VERIFICATION_ALGORITHMS_DOC = "A list of permitted algorithms for verifying internal requests, " - + "which must include the algorithm used for the " + INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG + " property. " + + "which must include the algorithm used for the " + INTER_WORKER_SIGNATURE_ALGORITHM_CONFIG + " property. " + "The algorithm(s) '" + INTER_WORKER_VERIFICATION_ALGORITHMS_DEFAULT + "' will be used as a default on JVMs that provide them; " + "on other JVMs, no default is used and a value for this property must be manually specified in the worker config."; private final Crypto crypto; @@ -490,10 +491,10 @@ private static ConfigDef config(Crypto crypto) { SCHEDULED_REBALANCE_MAX_DELAY_MS_DOC) .define(INTER_WORKER_KEY_TTL_MS_CONFIG, ConfigDef.Type.INT, - INTER_WORKER_KEY_TTL_MS_DEFAULT, + INTER_WORKER_KEY_TTL_MS_MS_DEFAULT, between(0, Integer.MAX_VALUE), ConfigDef.Importance.LOW, - INTER_WORKER_KEY_TTL_MS_DOC) + INTER_WORKER_KEY_TTL_MS_MS_DOC) .define(INTER_WORKER_KEY_GENERATION_ALGORITHM_CONFIG, ConfigDef.Type.STRING, defaultKeyGenerationAlgorithm(crypto), diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java index 6c4bed311d36e..0d2a664cbc251 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/DistributedHerder.java @@ -82,7 +82,9 @@ import org.slf4j.Logger; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -303,7 +305,7 @@ public DistributedHerder(DistributedConfig config, this.restClient = restClient; this.isTopicTrackingEnabled = config.getBoolean(TOPIC_TRACKING_ENABLE_CONFIG); this.restNamespace = Objects.requireNonNull(restNamespace); - this.uponShutdown = List.of(uponShutdown); + this.uponShutdown = Arrays.asList(uponShutdown); String clientIdConfig = config.getString(CommonClientConfigs.CLIENT_ID_CONFIG); String clientId = clientIdConfig.isEmpty() ? "connect-" + workerId : clientIdConfig; @@ -711,7 +713,7 @@ private synchronized boolean updateConfigsWithIncrementalCooperative(AtomicRefer private void processConnectorConfigUpdates(Set connectorConfigUpdates) { // If we only have connector config updates, we can just bounce the updated connectors that are // currently assigned to this worker. - Set localConnectors = assignment == null ? Set.of() : new HashSet<>(assignment.connectors()); + Set localConnectors = assignment == null ? Collections.emptySet() : new HashSet<>(assignment.connectors()); Collection> connectorsToStart = new ArrayList<>(); log.trace("Processing connector config updates; " + "currently-owned connectors are {}, and to-be-updated connectors are {}", @@ -767,7 +769,7 @@ private void processTargetStateChanges(Set connectorTargetStateChanges) private void processTaskConfigUpdatesWithIncrementalCooperative(Set taskConfigUpdates) { Set localTasks = assignment == null - ? Set.of() + ? Collections.emptySet() : new HashSet<>(assignment.tasks()); log.trace("Processing task config updates with incremental cooperative rebalance protocol; " + "currently-owned tasks are {}, and to-be-updated tasks are {}", @@ -779,7 +781,7 @@ private void processTaskConfigUpdatesWithIncrementalCooperative(Set connectors) { Set localTasks = assignment == null - ? Set.of() + ? Collections.emptySet() : new HashSet<>(assignment.tasks()); List tasksToStop = localTasks.stream() @@ -964,7 +966,7 @@ private void validateSinkConnectorGroupId(Map config, Map new ConfigValue(overriddenConsumerGroupIdConfig, consumerGroupId, List.of(), new ArrayList<>()) + p -> new ConfigValue(overriddenConsumerGroupIdConfig, consumerGroupId, Collections.emptyList(), new ArrayList<>()) ); if (workerGroupId.equals(consumerGroupId)) { validatedGroupId.addErrorMessage("Consumer group " + consumerGroupId + @@ -1193,7 +1195,7 @@ public void stopConnector(final String connName, final Callback callback) // if the connector is reassigned during the ensuing rebalance, it is likely that it will immediately generate // a non-empty set of task configs). A STOPPED connector with a non-empty set of tasks is less acceptable // and likely to confuse users. - writeTaskConfigs(connName, List.of()); + writeTaskConfigs(connName, Collections.emptyList()); String stageDescription = "writing the STOPPED target stage for connector " + connName + " to the config topic"; try (TickThreadStage stage = new TickThreadStage(stageDescription)) { configBackingStore.putTargetState(connName, TargetState.STOPPED); @@ -1543,7 +1545,7 @@ void processRestartRequests() { try (TickThreadStage stage = new TickThreadStage(stageDescription)) { doRestartConnectorAndTasks(restartRequest); } catch (Exception e) { - log.warn("Unexpected error while trying to process {}, the restart request will be skipped.", restartRequest, e); + log.warn("Unexpected error while trying to process " + restartRequest + ", the restart request will be skipped.", e); } }); } @@ -2077,7 +2079,7 @@ private Callable getTaskStoppingCallable(final ConnectorTaskId taskId) { private void startConnector(String connectorName, Callback callback) { log.info("Starting connector {}", connectorName); final Map configProps = configState.connectorConfig(connectorName); - final CloseableConnectorContext ctx = new HerderConnectorContext(this, connectorName, worker.metrics().connectorPluginMetrics(connectorName)); + final CloseableConnectorContext ctx = new HerderConnectorContext(this, connectorName); final TargetState initialState = configState.targetState(connectorName); final Callback onInitialStateChange = (error, newState) -> { if (error != null) { @@ -2111,11 +2113,11 @@ private Callable getConnectorStartingCallable(final String connectorName) try { startConnector(connectorName, (error, result) -> { if (error != null) { - log.error("Failed to start connector '{}'", connectorName, error); + log.error("Failed to start connector '" + connectorName + "'", error); } }); } catch (Throwable t) { - log.error("Unexpected error while trying to start connector {}", connectorName, t); + log.error("Unexpected error while trying to start connector " + connectorName, t); onFailure(connectorName, t); } return null; @@ -2127,7 +2129,7 @@ private Callable getConnectorStoppingCallable(final String connectorName) try { worker.stopAndAwaitConnector(connectorName); } catch (Throwable t) { - log.error("Failed to shut down connector {}", connectorName, t); + log.error("Failed to shut down connector " + connectorName, t); } return null; }; @@ -2191,7 +2193,8 @@ private void reconfigureConnectorTasksWithExponentialBackoffRetries(long initial } boolean isPossibleExpiredKeyException(long initialRequestTime, Throwable error) { - if (error instanceof ConnectRestException connectError) { + if (error instanceof ConnectRestException) { + ConnectRestException connectError = (ConnectRestException) error; return connectError.statusCode() == Response.Status.FORBIDDEN.getStatusCode() && initialRequestTime + TimeUnit.MINUTES.toMillis(1) >= time.milliseconds(); } @@ -2562,8 +2565,9 @@ public int compareTo(DistributedHerderRequest o) { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof DistributedHerderRequest other)) + if (!(o instanceof DistributedHerderRequest)) return false; + DistributedHerderRequest other = (DistributedHerderRequest) o; return compareTo(other) == 0; } @@ -2574,10 +2578,11 @@ public int hashCode() { } private static Callback forwardErrorAndTickThreadStages(final Callback callback) { - return callback.chainStaging((error, result) -> { + Callback cb = callback.chainStaging((error, result) -> { if (error != null) callback.onCompletion(error, null); }); + return cb; } private void updateDeletedConnectorStatus() { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/EagerAssignor.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/EagerAssignor.java index 1004382e1028d..0663d9e571052 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/EagerAssignor.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/EagerAssignor.java @@ -52,7 +52,7 @@ public EagerAssignor(LogContext logContext) { } @Override - public Map performAssignment(String leaderId, ConnectProtocolCompatibility protocol, + public Map performAssignment(String leaderId, String protocol, List allMemberMetadata, WorkerCoordinator coordinator) { log.debug("Performing task assignment"); @@ -132,13 +132,13 @@ private Map fillAssignmentsAndSerialize(Collection m Map groupAssignment = new HashMap<>(); for (String member : members) { - Collection connectors = connectorAssignments.getOrDefault(member, List.of()); + Collection connectors = connectorAssignments.get(member); if (connectors == null) { - connectors = List.of(); + connectors = Collections.emptyList(); } - Collection tasks = taskAssignments.getOrDefault(member, List.of()); + Collection tasks = taskAssignments.get(member); if (tasks == null) { - tasks = List.of(); + tasks = Collections.emptyList(); } Assignment assignment = new Assignment(error, leaderId, leaderUrl, maxOffset, connectors, tasks); log.debug("Assignment: {} -> {}", member, assignment); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ExtendedAssignment.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ExtendedAssignment.java index 3c1f483b1f108..d99b38349a88a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ExtendedAssignment.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/ExtendedAssignment.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.LinkedHashSet; import java.util.List; @@ -55,7 +56,7 @@ public class ExtendedAssignment extends ConnectProtocol.Assignment { private static final ExtendedAssignment EMPTY = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, null, null, -1, - new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), new ArrayList<>(), 0); + Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), Collections.emptyList(), 0); /** * Create an assignment indicating responsibility for the given connector instances and task Ids. @@ -166,7 +167,7 @@ private Map> revokedAsMap() { // Using LinkedHashMap preserves the ordering, which is helpful for tests and debugging Map> taskMap = new LinkedHashMap<>(); Optional.ofNullable(revokedConnectorIds) - .orElseGet(List::of) + .orElseGet(Collections::emptyList) .stream() .distinct() .forEachOrdered(connectorId -> { @@ -176,7 +177,7 @@ private Map> revokedAsMap() { }); Optional.ofNullable(revokedTaskIds) - .orElseGet(List::of) + .orElseGet(Collections::emptyList) .forEach(taskId -> { String connectorId = taskId.connector(); Collection connectorTasks = @@ -243,7 +244,7 @@ private static Collection extractConnectors(Struct struct, String key) { Object[] connectors = struct.getArray(key); if (connectors == null) { - return List.of(); + return Collections.emptyList(); } List connectorIds = new ArrayList<>(); for (Object structObj : connectors) { @@ -264,7 +265,7 @@ private static Collection extractTasks(Struct struct, String ke Object[] tasks = struct.getArray(key); if (tasks == null) { - return List.of(); + return Collections.emptyList(); } List tasksIds = new ArrayList<>(); for (Object structObj : tasks) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java index 2b8f87c81c71f..676ae78753002 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignor.java @@ -29,7 +29,9 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -87,7 +89,7 @@ public IncrementalCooperativeAssignor(LogContext logContext, Time time, int maxD this.candidateWorkersForReassignment = new LinkedHashSet<>(); this.delay = 0; this.previousGenerationId = -1; - this.previousMembers = Set.of(); + this.previousMembers = Collections.emptySet(); this.numSuccessiveRevokingRebalances = 0; // By default, initial interval is 1. The only corner case is when the user has set maxDelay to 0 // in which case, the exponential backoff delay should be 0 which would return the backoff delay to be 0 always @@ -95,7 +97,7 @@ public IncrementalCooperativeAssignor(LogContext logContext, Time time, int maxD } @Override - public Map performAssignment(String leaderId, ConnectProtocolCompatibility protocol, + public Map performAssignment(String leaderId, String protocol, List allMemberMetadata, WorkerCoordinator coordinator) { log.debug("Performing task assignment"); @@ -115,7 +117,7 @@ public Map performAssignment(String leaderId, ConnectProtoco log.debug("Max config offset root: {}, local snapshot config offsets root: {}", maxOffset, coordinator.configSnapshot().offset()); - short protocolVersion = protocol.protocolVersion(); + short protocolVersion = ConnectProtocolCompatibility.fromProtocol(protocol).protocolVersion(); Long leaderOffset = ensureLeaderConfig(maxOffset, coordinator); if (leaderOffset == null) { @@ -446,7 +448,8 @@ protected void handleLostAssignments(ConnectorsAndTasks lostAssignments, } final long now = time.milliseconds(); - log.debug("Found the following connectors and tasks missing from previous assignments: {}", lostAssignments); + log.debug("Found the following connectors and tasks missing from previous assignments: " + + lostAssignments); Set activeMembers = completeWorkerAssignment.stream() .map(WorkerLoad::worker) @@ -470,7 +473,7 @@ protected void handleLostAssignments(ConnectorsAndTasks lostAssignments, if (scheduledRebalance > 0 && now >= scheduledRebalance) { // delayed rebalance expired and it's time to assign resources log.debug("Delayed rebalance expired. Reassigning lost tasks"); - List candidateWorkerLoad = List.of(); + List candidateWorkerLoad = Collections.emptyList(); if (!candidateWorkersForReassignment.isEmpty()) { candidateWorkerLoad = pickCandidateWorkerForReassignment(completeWorkerAssignment); } @@ -604,7 +607,7 @@ private static Map> diff(Map> ba Map> incremental = new HashMap<>(); for (Map.Entry> entry : base.entrySet()) { List values = new ArrayList<>(entry.getValue()); - values.removeAll(toSubtract.getOrDefault(entry.getKey(), Set.of())); + values.removeAll(toSubtract.getOrDefault(entry.getKey(), Collections.emptySet())); incremental.put(entry.getKey(), values); } return incremental; @@ -641,11 +644,11 @@ private Map performLoadBalancingRevocations( log.trace("No load-balancing revocations required; all workers are either new " + "or will have all currently-assigned connectors and tasks revoked during this round" ); - return Map.of(); + return Collections.emptyMap(); } if (configured.isEmpty()) { log.trace("No load-balancing revocations required; no connectors are currently configured on this cluster"); - return Map.of(); + return Collections.emptyMap(); } Map result = new HashMap<>(); @@ -703,7 +706,7 @@ private Map> loadBalancingRevocations( allocatedResourceName, allocatedResourceName ); - return Map.of(); + return Collections.emptyMap(); } Map> result = new HashMap<>(); @@ -885,12 +888,12 @@ static class ClusterAssignment { private final Set allWorkers; public static final ClusterAssignment EMPTY = new ClusterAssignment( - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Map.of() + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap() ); public ClusterAssignment( @@ -908,7 +911,7 @@ public ClusterAssignment( this.allAssignedConnectors = allAssignedConnectors; this.allAssignedTasks = allAssignedTasks; this.allWorkers = combineCollections( - List.of(newlyAssignedConnectors, newlyAssignedTasks, newlyRevokedConnectors, newlyRevokedTasks, allAssignedConnectors, allAssignedTasks), + Arrays.asList(newlyAssignedConnectors, newlyAssignedTasks, newlyRevokedConnectors, newlyRevokedTasks, allAssignedConnectors, allAssignedTasks), Map::keySet, Collectors.toSet() ); @@ -919,7 +922,7 @@ public Map> newlyAssignedConnectors() { } public Collection newlyAssignedConnectors(String worker) { - return newlyAssignedConnectors.getOrDefault(worker, Set.of()); + return newlyAssignedConnectors.getOrDefault(worker, Collections.emptySet()); } public Map> newlyAssignedTasks() { @@ -927,7 +930,7 @@ public Map> newlyAssignedTasks() { } public Collection newlyAssignedTasks(String worker) { - return newlyAssignedTasks.getOrDefault(worker, Set.of()); + return newlyAssignedTasks.getOrDefault(worker, Collections.emptySet()); } public Map> newlyRevokedConnectors() { @@ -935,7 +938,7 @@ public Map> newlyRevokedConnectors() { } public Collection newlyRevokedConnectors(String worker) { - return newlyRevokedConnectors.getOrDefault(worker, Set.of()); + return newlyRevokedConnectors.getOrDefault(worker, Collections.emptySet()); } public Map> newlyRevokedTasks() { @@ -943,7 +946,7 @@ public Map> newlyRevokedTasks() { } public Collection newlyRevokedTasks(String worker) { - return newlyRevokedTasks.getOrDefault(worker, Set.of()); + return newlyRevokedTasks.getOrDefault(worker, Collections.emptySet()); } public Map> allAssignedConnectors() { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java index edb174ab52bed..138bf9fc51473 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinator.java @@ -17,7 +17,6 @@ package org.apache.kafka.connect.runtime.distributed; import org.apache.kafka.clients.GroupRebalanceConfig; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.internals.AbstractCoordinator; import org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient; import org.apache.kafka.common.metrics.Measurable; @@ -36,6 +35,7 @@ import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.LinkedHashSet; @@ -183,11 +183,16 @@ public JoinGroupRequestProtocolCollection metadata() { configSnapshot = configStorage.snapshot(); final ExtendedAssignment localAssignmentSnapshot = assignmentSnapshot; ExtendedWorkerState workerState = new ExtendedWorkerState(restUrl, configSnapshot.offset(), localAssignmentSnapshot); - return switch (protocolCompatibility) { - case EAGER -> ConnectProtocol.metadataRequest(workerState); - case COMPATIBLE -> IncrementalCooperativeConnectProtocol.metadataRequest(workerState, false); - case SESSIONED -> IncrementalCooperativeConnectProtocol.metadataRequest(workerState, true); - }; + switch (protocolCompatibility) { + case EAGER: + return ConnectProtocol.metadataRequest(workerState); + case COMPATIBLE: + return IncrementalCooperativeConnectProtocol.metadataRequest(workerState, false); + case SESSIONED: + return IncrementalCooperativeConnectProtocol.metadataRequest(workerState, true); + default: + throw new IllegalStateException("Unknown Connect protocol compatibility mode " + protocolCompatibility); + } } @Override @@ -228,10 +233,9 @@ protected Map onLeaderElected(String leaderId, if (skipAssignment) throw new IllegalStateException("Can't skip assignment because Connect does not support static membership."); - ConnectProtocolCompatibility protocolCompatibility = ConnectProtocolCompatibility.fromProtocol(protocol); - return protocolCompatibility == EAGER - ? eagerAssignor.performAssignment(leaderId, protocolCompatibility, allMemberMetadata, this) - : incrementalAssignor.performAssignment(leaderId, protocolCompatibility, allMemberMetadata, this); + return ConnectProtocolCompatibility.fromProtocol(protocol) == EAGER + ? eagerAssignor.performAssignment(leaderId, protocol, allMemberMetadata, this) + : incrementalAssignor.performAssignment(leaderId, protocol, allMemberMetadata, this); } @Override @@ -267,7 +271,7 @@ public String memberId() { @Override protected void handlePollTimeoutExpiry() { listener.onPollTimeoutExpiry(); - maybeLeaveGroup(CloseOptions.GroupMembershipOperation.DEFAULT, "worker poll timeout has expired."); + maybeLeaveGroup("worker poll timeout has expired."); } /** @@ -436,7 +440,8 @@ private String ownerUrl(String connector) { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof LeaderState that)) return false; + if (!(o instanceof LeaderState)) return false; + LeaderState that = (LeaderState) o; return Objects.equals(allMembers, that.allMembers) && Objects.equals(connectorOwners, that.connectorOwners) && Objects.equals(taskOwners, that.taskOwners); @@ -459,7 +464,7 @@ public String toString() { public static class ConnectorsAndTasks { public static final ConnectorsAndTasks EMPTY = - new ConnectorsAndTasks(List.of(), List.of()); + new ConnectorsAndTasks(Collections.emptyList(), Collections.emptyList()); private final Collection connectors; private final Collection tasks; @@ -639,9 +644,10 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof WorkerLoad that)) { + if (!(o instanceof WorkerLoad)) { return false; } + WorkerLoad that = (WorkerLoad) o; return worker.equals(that.worker) && connectors.equals(that.connectors) && tasks.equals(that.tasks); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMember.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMember.java index a283ce7cf3878..c89eb33082fbe 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMember.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMember.java @@ -23,7 +23,6 @@ import org.apache.kafka.clients.Metadata; import org.apache.kafka.clients.MetadataRecoveryStrategy; import org.apache.kafka.clients.NetworkClient; -import org.apache.kafka.clients.consumer.CloseOptions; import org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.internals.ClusterResourceListeners; @@ -199,7 +198,7 @@ public void requestRejoin() { } public void maybeLeaveGroup(String leaveReason) { - coordinator.maybeLeaveGroup(CloseOptions.GroupMembershipOperation.LEAVE_GROUP, leaveReason); + coordinator.maybeLeaveGroup(leaveReason); } public String ownerUrl(String connector) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/DeadLetterQueueReporter.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/DeadLetterQueueReporter.java index bb240af82d79c..6f7b1dc66ca52 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/DeadLetterQueueReporter.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/DeadLetterQueueReporter.java @@ -38,11 +38,12 @@ import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.Objects; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; +import static java.util.Collections.singleton; + /** * Write the original consumed record into a dead letter queue. The dead letter queue is a Kafka topic located * on the same cluster used by the worker to maintain internal topics. Each connector is typically configured @@ -84,7 +85,7 @@ public static DeadLetterQueueReporter createAndSetup(Map adminPr if (!admin.listTopics().names().get().contains(topic)) { log.error("Topic {} doesn't exist. Will attempt to create topic.", topic); NewTopic schemaTopicRequest = new NewTopic(topic, DLQ_NUM_DESIRED_PARTITIONS, sinkConfig.dlqTopicReplicationFactor()); - admin.createTopics(Set.of(schemaTopicRequest)).all().get(); + admin.createTopics(singleton(schemaTopicRequest)).all().get(); } } catch (InterruptedException e) { throw new ConnectException("Could not initialize dead letter queue with topic=" + topic, e); @@ -151,7 +152,7 @@ public Future report(ProcessingContext { if (exception != null) { - log.error("Could not produce message to dead letter queue. topic={}", dlqTopicName, exception); + log.error("Could not produce message to dead letter queue. topic=" + dlqTopicName, exception); errorHandlingMetrics.recordDeadLetterQueueProduceFailed(); } }); @@ -183,7 +184,7 @@ void populateContextHeaders(ProducerRecord producerRecord, Proce private byte[] stacktrace(Throwable error) { ByteArrayOutputStream bos = new ByteArrayOutputStream(); try { - PrintStream stream = new PrintStream(bos, true, StandardCharsets.UTF_8); + PrintStream stream = new PrintStream(bos, true, StandardCharsets.UTF_8.name()); error.printStackTrace(stream); bos.close(); return bos.toByteArray(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java index b8b74f12e5f1f..2b9ba9fc5b7b9 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperator.java @@ -26,6 +26,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -100,7 +101,7 @@ public RetryWithToleranceOperator(long errorRetryTimeout, long errorMaxDelayInMi this.errorHandlingMetrics = errorHandlingMetrics; this.stopRequestedLatch = stopRequestedLatch; this.stopping = false; - this.reporters = List.of(); + this.reporters = Collections.emptyList(); } /** @@ -136,7 +137,7 @@ public Future executeFailed(ProcessingContext context, Stage stage, Cla // Visible for testing synchronized Future report(ProcessingContext context) { if (reporters.size() == 1) { - return new WorkerErrantRecordReporter.ErrantRecordFuture(List.of(reporters.get(0).report(context))); + return new WorkerErrantRecordReporter.ErrantRecordFuture(Collections.singletonList(reporters.iterator().next().report(context))); } List> futures = reporters.stream() .map(r -> r.report(context)) @@ -310,7 +311,7 @@ void backoff(int attempt, long deadline) { try { stopRequestedLatch.await(delay, TimeUnit.MILLISECONDS); } catch (InterruptedException e) { - // ignore + return; } } @@ -356,7 +357,7 @@ public synchronized void close() { e.addSuppressed(t); } } - reporters = List.of(); + reporters = Collections.emptyList(); if (e != null) { throw e; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/health/ConnectClusterDetailsImpl.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/health/ConnectClusterDetailsImpl.java index 7d0537b19f454..09f09bd7d383c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/health/ConnectClusterDetailsImpl.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/health/ConnectClusterDetailsImpl.java @@ -19,5 +19,16 @@ import org.apache.kafka.connect.health.ConnectClusterDetails; -public record ConnectClusterDetailsImpl(String kafkaClusterId) implements ConnectClusterDetails { +public class ConnectClusterDetailsImpl implements ConnectClusterDetails { + + private final String kafkaClusterId; + + public ConnectClusterDetailsImpl(String kafkaClusterId) { + this.kafkaClusterId = kafkaClusterId; + } + + @Override + public String kafkaClusterId() { + return kafkaClusterId; + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java index f89b1f03a75fd..fdbadef7b6939 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoader.java @@ -16,22 +16,17 @@ */ package org.apache.kafka.connect.runtime.isolation; -import org.apache.maven.artifact.versioning.DefaultArtifactVersion; -import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.URL; import java.net.URLClassLoader; import java.util.HashMap; -import java.util.List; import java.util.Map; -import java.util.Set; import java.util.SortedMap; import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; -import java.util.stream.Collectors; /** * A custom classloader dedicated to loading Connect plugin classes in classloading isolation. @@ -74,117 +69,36 @@ public DelegatingClassLoader() { /** * Retrieve the PluginClassLoader associated with a plugin class - * * @param name The fully qualified class name of the plugin * @return the PluginClassLoader that should be used to load this, or null if the plugin is not isolated. */ // VisibleForTesting - PluginClassLoader pluginClassLoader(String name, VersionRange range) { + PluginClassLoader pluginClassLoader(String name) { if (!PluginUtils.shouldLoadInIsolation(name)) { return null; } - SortedMap, ClassLoader> inner = pluginLoaders.get(name); if (inner == null) { return null; } - - - ClassLoader pluginLoader = findPluginLoader(inner, name, range); + ClassLoader pluginLoader = inner.get(inner.lastKey()); return pluginLoader instanceof PluginClassLoader - ? (PluginClassLoader) pluginLoader - : null; + ? (PluginClassLoader) pluginLoader + : null; } - PluginClassLoader pluginClassLoader(String name) { - return pluginClassLoader(name, null); - } - - ClassLoader loader(String classOrAlias, VersionRange range) { - String fullName = aliases.getOrDefault(classOrAlias, classOrAlias); - ClassLoader classLoader = pluginClassLoader(fullName, range); - if (classLoader == null) { - classLoader = this; - } + ClassLoader connectorLoader(String connectorClassOrAlias) { + String fullName = aliases.getOrDefault(connectorClassOrAlias, connectorClassOrAlias); + ClassLoader classLoader = pluginClassLoader(fullName); + if (classLoader == null) classLoader = this; log.debug( - "Got plugin class loader: '{}' for connector: {}", - classLoader, - classOrAlias + "Getting plugin class loader: '{}' for connector: {}", + classLoader, + connectorClassOrAlias ); return classLoader; } - ClassLoader loader(String classOrAlias) { - return loader(classOrAlias, null); - } - - ClassLoader connectorLoader(String connectorClassOrAlias) { - return loader(connectorClassOrAlias); - } - - String resolveFullClassName(String classOrAlias) { - return aliases.getOrDefault(classOrAlias, classOrAlias); - } - - PluginDesc pluginDesc(String classOrAlias, String preferredLocation, Set allowedTypes) { - if (classOrAlias == null) { - return null; - } - String fullName = aliases.getOrDefault(classOrAlias, classOrAlias); - SortedMap, ClassLoader> inner = pluginLoaders.get(fullName); - if (inner == null) { - return null; - } - PluginDesc result = null; - for (Map.Entry, ClassLoader> entry : inner.entrySet()) { - if (!allowedTypes.contains(entry.getKey().type())) { - continue; - } - result = entry.getKey(); - if (result.location().equals(preferredLocation)) { - return result; - } - } - return result; - } - - private ClassLoader findPluginLoader( - SortedMap, ClassLoader> loaders, - String pluginName, - VersionRange range - ) { - - if (range != null) { - - if (null != range.getRecommendedVersion()) { - throw new VersionedPluginLoadingException(String.format("A soft version range is not supported for plugin loading, " - + "this is an internal error as connect should automatically convert soft ranges to hard ranges. " - + "Provided soft version: %s ", range)); - } - - ClassLoader loader = null; - for (Map.Entry, ClassLoader> entry : loaders.entrySet()) { - // the entries should be in sorted order of versions so this should end up picking the latest version which matches the range - if (range.containsVersion(entry.getKey().encodedVersion())) { - loader = entry.getValue(); - } - } - - if (loader == null) { - List availableVersions = loaders.keySet().stream().map(PluginDesc::version).collect(Collectors.toList()); - throw new VersionedPluginLoadingException(String.format( - "Plugin %s not found that matches the version range %s, available versions: %s", - pluginName, - range, - availableVersions - ), availableVersions); - } - return loader; - } - - return loaders.get(loaders.lastKey()); - } - public void installDiscoveredPlugins(PluginScanResult scanResult) { pluginLoaders.putAll(computePluginLoaders(scanResult)); for (String pluginClassName : pluginLoaders.keySet()) { @@ -198,76 +112,21 @@ public void installDiscoveredPlugins(PluginScanResult scanResult) { @Override protected Class loadClass(String name, boolean resolve) throws ClassNotFoundException { - return loadVersionedPluginClass(name, null, resolve); - } - - protected Class loadVersionedPluginClass( - String name, - VersionRange range, - boolean resolve - ) throws VersionedPluginLoadingException, ClassNotFoundException { - String fullName = aliases.getOrDefault(name, name); - PluginClassLoader pluginLoader = pluginClassLoader(fullName, range); - Class plugin; + PluginClassLoader pluginLoader = pluginClassLoader(fullName); if (pluginLoader != null) { - log.trace("Retrieving loaded class '{}' from '{}'", name, pluginLoader); - plugin = pluginLoader.loadClass(fullName, resolve); - } else { - plugin = super.loadClass(fullName, resolve); - if (range == null) { - return plugin; - } - verifyClasspathVersionedPlugin(fullName, plugin, range); - } - return plugin; - } - - private void verifyClasspathVersionedPlugin(String fullName, Class plugin, VersionRange range) throws VersionedPluginLoadingException { - String pluginVersion; - SortedMap, ClassLoader> scannedPlugin = pluginLoaders.get(fullName); - - if (scannedPlugin == null) { - throw new VersionedPluginLoadingException(String.format( - "Plugin %s is not part of Connect's plugin loading mechanism (ClassPath or Plugin Path)", - fullName - )); + log.trace("Retrieving loaded class '{}' from '{}'", fullName, pluginLoader); + return pluginLoader.loadClass(fullName, resolve); } - // if a plugin implements two interfaces (like JsonConverter implements both converter and header converter) - // it will have two entries under classpath, one for each scan. Hence, we count distinct by version. - List classpathPlugins = scannedPlugin.keySet().stream() - .filter(pluginDesc -> pluginDesc.location().equals("classpath")) - .map(PluginDesc::version) - .distinct() - .toList(); - - if (classpathPlugins.size() > 1) { - throw new VersionedPluginLoadingException(String.format( - "Plugin %s has multiple versions specified in class path, " - + "only one version is allowed in class path for loading a plugin with version range", - fullName - )); - } else if (classpathPlugins.isEmpty()) { - throw new VersionedPluginLoadingException("Invalid plugin found in classpath"); - } else { - pluginVersion = classpathPlugins.get(0); - if (!range.containsVersion(new DefaultArtifactVersion(pluginVersion))) { - throw new VersionedPluginLoadingException(String.format( - "Plugin %s has version %s which does not match the required version range %s", - fullName, - pluginVersion, - range - ), List.of(pluginVersion)); - } - } + return super.loadClass(fullName, resolve); } private static Map, ClassLoader>> computePluginLoaders(PluginScanResult plugins) { Map, ClassLoader>> pluginLoaders = new HashMap<>(); plugins.forEach(pluginDesc -> - pluginLoaders.computeIfAbsent(pluginDesc.className(), k -> new TreeMap<>()) - .put(pluginDesc, pluginDesc.loader())); + pluginLoaders.computeIfAbsent(pluginDesc.className(), k -> new TreeMap<>()) + .put(pluginDesc, pluginDesc.loader())); return pluginLoaders; } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginClassLoader.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginClassLoader.java index d1829b731dc7d..693972c1989c5 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginClassLoader.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginClassLoader.java @@ -22,11 +22,9 @@ import java.io.IOException; import java.net.URL; import java.net.URLClassLoader; -import java.util.ArrayList; -import java.util.Collections; import java.util.Enumeration; -import java.util.List; import java.util.Objects; +import java.util.Vector; /** * A custom classloader dedicated to loading Connect plugin classes in classloading isolation. @@ -89,7 +87,7 @@ public URL getResource(String name) { @Override public Enumeration getResources(String name) throws IOException { Objects.requireNonNull(name); - List resources = new ArrayList<>(); + Vector resources = new Vector<>(); for (Enumeration foundLocally = findResources(name); foundLocally.hasMoreElements();) { URL url = foundLocally.nextElement(); if (url != null) @@ -101,7 +99,7 @@ public Enumeration getResources(String name) throws IOException { if (url != null) resources.add(url); } - return Collections.enumeration(resources); + return resources.elements(); } // This method needs to be thread-safe because it is supposed to be called by multiple diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java index b480124c6dc5f..a58aef7ceca74 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginDesc.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.connect.runtime.isolation; -import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; import org.apache.maven.artifact.versioning.DefaultArtifactVersion; @@ -61,11 +60,6 @@ public String toString() { '}'; } - @JsonIgnore - public DefaultArtifactVersion encodedVersion() { - return encodedVersion; - } - public Class pluginClass() { return klass; } @@ -103,9 +97,10 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof PluginDesc that)) { + if (!(o instanceof PluginDesc)) { return false; } + PluginDesc that = (PluginDesc) o; return Objects.equals(klass, that.klass) && Objects.equals(version, that.version) && type == that.type; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanResult.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanResult.java index 04be35f2d2603..7d5105012b1a0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanResult.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanResult.java @@ -26,6 +26,7 @@ import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; +import java.util.Arrays; import java.util.List; import java.util.SortedSet; import java.util.TreeSet; @@ -66,7 +67,7 @@ public PluginScanResult( this.restExtensions = restExtensions; this.connectorClientConfigPolicies = connectorClientConfigPolicies; this.allPlugins = - List.of(sinkConnectors, sourceConnectors, converters, headerConverters, transformations, predicates, + Arrays.asList(sinkConnectors, sourceConnectors, converters, headerConverters, transformations, predicates, configProviders, restExtensions, connectorClientConfigPolicies); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanner.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanner.java index dc3eedd02138f..004f78fe1c0db 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanner.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginScanner.java @@ -198,7 +198,7 @@ protected static String versionFor(T pluginImpl) { return ((Versioned) pluginImpl).version(); } } catch (Throwable t) { - log.error("Failed to get plugin version for {}", pluginImpl.getClass(), t); + log.error("Failed to get plugin version for " + pluginImpl.getClass(), t); } return PluginDesc.UNDEFINED_VERSION; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginSource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginSource.java index 9dcfe30cb7f6e..c61e2cd87228c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginSource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginSource.java @@ -21,15 +21,40 @@ import java.util.Arrays; import java.util.Objects; -public record PluginSource(Path location, - org.apache.kafka.connect.runtime.isolation.PluginSource.Type type, - ClassLoader loader, - URL[] urls) { +public class PluginSource { public enum Type { CLASSPATH, MULTI_JAR, SINGLE_JAR, CLASS_HIERARCHY } + private final Path location; + private final Type type; + private final ClassLoader loader; + private final URL[] urls; + + public PluginSource(Path location, Type type, ClassLoader loader, URL[] urls) { + this.location = location; + this.type = type; + this.loader = loader; + this.urls = urls; + } + + public Path location() { + return location; + } + + public Type type() { + return type; + } + + public ClassLoader loader() { + return loader; + } + + public URL[] urls() { + return urls; + } + public boolean isolated() { return location != null; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java index 729074d508e75..932e87395f728 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/PluginUtils.java @@ -16,8 +16,6 @@ */ package org.apache.kafka.connect.runtime.isolation; -import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; -import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,7 +32,9 @@ import java.nio.file.Path; import java.nio.file.Paths; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -202,7 +202,7 @@ public static boolean isClassFile(Path path) { public static Set pluginLocations(String pluginPath, boolean failFast) { if (pluginPath == null) { - return Set.of(); + return Collections.emptySet(); } String[] pluginPathElements = COMMA_WITH_WHITESPACE.split(pluginPath.trim(), -1); Set pluginLocations = new LinkedHashSet<>(); @@ -264,7 +264,7 @@ public static List pluginUrls(Path topPath) throws IOException { Set visited = new HashSet<>(); if (isArchive(topPath)) { - return List.of(topPath); + return Collections.singletonList(topPath); } DirectoryStream topListing = Files.newDirectoryStream( @@ -333,12 +333,12 @@ public static List pluginUrls(Path topPath) throws IOException { if (containsClassFiles) { if (archives.isEmpty()) { - return List.of(topPath); + return Collections.singletonList(topPath); } log.warn("Plugin path contains both java archives and class files. Returning only the" + " archives"); } - return List.copyOf(archives); + return Arrays.asList(archives.toArray(new Path[0])); } public static Set pluginSources(Set pluginLocations, ClassLoader classLoader, PluginClassLoaderFactory factory) { @@ -408,10 +408,13 @@ public static String simpleName(PluginDesc plugin) { */ public static String prunedName(PluginDesc plugin) { // It's currently simpler to switch on type than do pattern matching. - return switch (plugin.type()) { - case SOURCE, SINK -> prunePluginName(plugin, "Connector"); - default -> prunePluginName(plugin, plugin.type().simpleName()); - }; + switch (plugin.type()) { + case SOURCE: + case SINK: + return prunePluginName(plugin, "Connector"); + default: + return prunePluginName(plugin, plugin.type().simpleName()); + } } private static String prunePluginName(PluginDesc plugin, String suffix) { @@ -466,21 +469,21 @@ private static Collection forJavaClassPath() { } return distinctUrls(urls); } - + private static Collection forClassLoader(ClassLoader classLoader) { final Collection result = new ArrayList<>(); while (classLoader != null) { if (classLoader instanceof URLClassLoader) { URL[] urls = ((URLClassLoader) classLoader).getURLs(); if (urls != null) { - result.addAll(new HashSet<>(List.of(urls))); + result.addAll(new HashSet<>(Arrays.asList(urls))); } } classLoader = classLoader.getParent(); } return distinctUrls(result); } - + private static Collection distinctUrls(Collection urls) { Map distinct = new HashMap<>(urls.size()); for (URL url : urls) { @@ -488,21 +491,4 @@ private static Collection distinctUrls(Collection urls) { } return distinct.values(); } - - public static VersionRange connectorVersionRequirement(String version) throws InvalidVersionSpecificationException { - if (version == null || version.equals("latest")) { - return null; - } - version = version.trim(); - - // check first if the given version is valid - VersionRange range = VersionRange.createFromVersionSpec(version); - - if (range.hasRestrictions()) { - return range; - } - // now if the version is not enclosed we consider it as a hard requirement and enclose it in [] - version = "[" + version + "]"; - return VersionRange.createFromVersionSpec(version); - } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java index daf9f2199922b..816f870157e49 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/isolation/Plugins.java @@ -35,20 +35,19 @@ import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; -import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; -import org.apache.maven.artifact.versioning.VersionRange; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.nio.file.Path; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedSet; import java.util.TreeSet; -import java.util.function.Function; import java.util.stream.Collectors; public class Plugins { @@ -78,7 +77,7 @@ public Plugins(Map props) { } public PluginScanResult initLoaders(Set pluginSources, PluginDiscoveryMode discoveryMode) { - PluginScanResult empty = new PluginScanResult(List.of()); + PluginScanResult empty = new PluginScanResult(Collections.emptyList()); PluginScanResult serviceLoadingScanResult; try { serviceLoadingScanResult = discoveryMode.serviceLoad() ? @@ -91,7 +90,7 @@ public PluginScanResult initLoaders(Set pluginSources, PluginDisco } PluginScanResult reflectiveScanResult = discoveryMode.reflectivelyScan() ? new ReflectionScanner().discoverPlugins(pluginSources) : empty; - PluginScanResult scanResult = new PluginScanResult(List.of(reflectiveScanResult, serviceLoadingScanResult)); + PluginScanResult scanResult = new PluginScanResult(Arrays.asList(reflectiveScanResult, serviceLoadingScanResult)); maybeReportHybridDiscoveryIssue(discoveryMode, serviceLoadingScanResult, scanResult); delegatingLoader.installDiscoveredPlugins(scanResult); return scanResult; @@ -163,25 +162,17 @@ protected Class pluginClassFromConfig( ); } + @SuppressWarnings("unchecked") protected static Class pluginClass( DelegatingClassLoader loader, String classOrAlias, Class pluginClass ) throws ClassNotFoundException { - return pluginClass(loader, classOrAlias, pluginClass, null); - } - - @SuppressWarnings("unchecked") - protected static Class pluginClass( - DelegatingClassLoader loader, - String classOrAlias, - Class pluginClass, - VersionRange range - ) throws VersionedPluginLoadingException, ClassNotFoundException { - Class klass = loader.loadVersionedPluginClass(classOrAlias, range, false); + Class klass = loader.loadClass(classOrAlias, false); if (pluginClass.isAssignableFrom(klass)) { return (Class) klass; } + throw new ClassNotFoundException( "Requested class: " + classOrAlias @@ -193,10 +184,6 @@ public Class pluginClass(String classOrAlias) throws ClassNotFoundException { return pluginClass(delegatingLoader, classOrAlias, Object.class); } - public Class pluginClass(String classOrAlias, VersionRange range) throws VersionedPluginLoadingException, ClassNotFoundException { - return pluginClass(delegatingLoader, classOrAlias, Object.class, range); - } - public static ClassLoader compareAndSwapLoaders(ClassLoader loader) { ClassLoader current = Thread.currentThread().getContextClassLoader(); if (!current.equals(loader)) { @@ -253,46 +240,14 @@ public Runnable withClassLoader(ClassLoader classLoader, Runnable operation) { }; } - public Function safeLoaderSwapper() { - return loader -> { - if (!(loader instanceof PluginClassLoader)) { - loader = delegatingLoader; - } - return withClassLoader(loader); - }; - } - - public String latestVersion(String classOrAlias, PluginType... allowedTypes) { - return pluginVersion(classOrAlias, null, allowedTypes); - } - - public String pluginVersion(String classOrAlias, ClassLoader sourceLoader, PluginType... allowedTypes) { - String location = (sourceLoader instanceof PluginClassLoader) ? ((PluginClassLoader) sourceLoader).location() : null; - PluginDesc desc = delegatingLoader.pluginDesc(classOrAlias, location, Set.of(allowedTypes)); - if (desc != null) { - return desc.version(); - } - return null; - } - public DelegatingClassLoader delegatingLoader() { return delegatingLoader; } - // kept for compatibility public ClassLoader connectorLoader(String connectorClassOrAlias) { - return delegatingLoader.loader(connectorClassOrAlias); - } - - public ClassLoader pluginLoader(String classOrAlias, VersionRange range) { - return delegatingLoader.loader(classOrAlias, range); - } - - public ClassLoader pluginLoader(String classOrAlias) { - return delegatingLoader.loader(classOrAlias); + return delegatingLoader.connectorLoader(connectorClassOrAlias); } - @SuppressWarnings({"unchecked", "rawtypes"}) public Set> connectors() { Set> connectors = new TreeSet<>((Set) sinkConnectors()); @@ -304,96 +259,48 @@ public Set> sinkConnectors() { return scanResult.sinkConnectors(); } - Set> sinkConnectors(String connectorClassOrAlias) { - return pluginsOfClass(connectorClassOrAlias, scanResult.sinkConnectors()); - } - public Set> sourceConnectors() { return scanResult.sourceConnectors(); } - Set> sourceConnectors(String connectorClassOrAlias) { - return pluginsOfClass(connectorClassOrAlias, scanResult.sourceConnectors()); - } - public Set> converters() { return scanResult.converters(); } - Set> converters(String converterClassOrAlias) { - return pluginsOfClass(converterClassOrAlias, scanResult.converters()); - } - public Set> headerConverters() { return scanResult.headerConverters(); } - Set> headerConverters(String headerConverterClassOrAlias) { - return pluginsOfClass(headerConverterClassOrAlias, scanResult.headerConverters()); - } - public Set>> transformations() { return scanResult.transformations(); } - Set>> transformations(String transformationClassOrAlias) { - return pluginsOfClass(transformationClassOrAlias, scanResult.transformations()); - } - public Set>> predicates() { return scanResult.predicates(); } - Set>> predicates(String predicateClassOrAlias) { - return pluginsOfClass(predicateClassOrAlias, scanResult.predicates()); - } - public Set> connectorClientConfigPolicies() { return scanResult.connectorClientConfigPolicies(); } - private Set> pluginsOfClass(String classNameOrAlias, Set> allPluginsOfType) { - String className = delegatingLoader.resolveFullClassName(classNameOrAlias); - Set> plugins = new TreeSet<>(); - for (PluginDesc desc : allPluginsOfType) { - if (desc.className().equals(className)) { - plugins.add(desc); - } - } - return plugins; - } - public Object newPlugin(String classOrAlias) throws ClassNotFoundException { Class klass = pluginClass(delegatingLoader, classOrAlias, Object.class); return newPlugin(klass); } - public Object newPlugin(String classOrAlias, VersionRange range) throws VersionedPluginLoadingException, ClassNotFoundException { - Class klass = pluginClass(delegatingLoader, classOrAlias, Object.class, range); - return newPlugin(klass); - } - - public Object newPlugin(String classOrAlias, VersionRange range, ClassLoader sourceLoader) throws ClassNotFoundException { - if (range == null && sourceLoader instanceof PluginClassLoader) { - return newPlugin(sourceLoader.loadClass(classOrAlias)); - } - return newPlugin(classOrAlias, range); - } - public Connector newConnector(String connectorClassOrAlias) { Class klass = connectorClass(connectorClassOrAlias); return newPlugin(klass); } - public Connector newConnector(String connectorClassOrAlias, VersionRange range) throws VersionedPluginLoadingException { - Class klass = connectorClass(connectorClassOrAlias, range); - return newPlugin(klass); - } - - public Class connectorClass(String connectorClassOrAlias, VersionRange range) throws VersionedPluginLoadingException { + public Class connectorClass(String connectorClassOrAlias) { Class klass; try { - klass = pluginClass(delegatingLoader, connectorClassOrAlias, Connector.class, range); + klass = pluginClass( + delegatingLoader, + connectorClassOrAlias, + Connector.class + ); } catch (ClassNotFoundException e) { List> matches = new ArrayList<>(); Set> connectors = connectors(); @@ -429,10 +336,6 @@ public Class connectorClass(String connectorClassOrAlias, V return klass; } - public Class connectorClass(String connectorClassOrAlias) { - return connectorClass(connectorClassOrAlias, null); - } - public Task newTask(Class taskClass) { return newPlugin(taskClass); } @@ -447,49 +350,54 @@ public Task newTask(Class taskClass) { * @throws ConnectException if the {@link Converter} implementation class could not be found */ public Converter newConverter(AbstractConfig config, String classPropertyName, ClassLoaderUsage classLoaderUsage) { - return newConverter(config, classPropertyName, null, classLoaderUsage); - } - - /** - * Used to get a versioned converter. If the version is specified, it will always use the plugins classloader. - * - * @param config the configuration containing the {@link Converter}'s configuration; may not be null - * @param classPropertyName the name of the property that contains the name of the {@link Converter} class; may not be null - * @param versionPropertyName the name of the property that contains the version of the {@link Converter} class; may not be null - * @return the instantiated and configured {@link Converter}; null if the configuration did not define the specified property - * @throws ConnectException if the {@link Converter} implementation class could not be found, - * @throws VersionedPluginLoadingException if the version requested is not found - */ - public Converter newConverter(AbstractConfig config, String classPropertyName, String versionPropertyName) { - ClassLoaderUsage classLoader = config.getString(versionPropertyName) == null ? ClassLoaderUsage.CURRENT_CLASSLOADER : ClassLoaderUsage.PLUGINS; - return newConverter(config, classPropertyName, versionPropertyName, classLoader); - } - - private Converter newConverter(AbstractConfig config, String classPropertyName, String versionPropertyName, ClassLoaderUsage classLoaderUsage) { if (!config.originals().containsKey(classPropertyName)) { // This configuration does not define the converter via the specified property name return null; } + + Class klass = null; + switch (classLoaderUsage) { + case CURRENT_CLASSLOADER: + // Attempt to load first with the current classloader, and plugins as a fallback. + // Note: we can't use config.getConfiguredInstance because Converter doesn't implement Configurable, and even if it did + // we have to remove the property prefixes before calling config(...) and we still always want to call Converter.config. + klass = pluginClassFromConfig(config, classPropertyName, Converter.class, scanResult.converters()); + break; + case PLUGINS: + // Attempt to load with the plugin class loader, which uses the current classloader as a fallback + String converterClassOrAlias = config.getClass(classPropertyName).getName(); + try { + klass = pluginClass(delegatingLoader, converterClassOrAlias, Converter.class); + } catch (ClassNotFoundException e) { + throw new ConnectException( + "Failed to find any class that implements Converter and which name matches " + + converterClassOrAlias + ", available converters are: " + + pluginNames(scanResult.converters()) + ); + } + break; + } + if (klass == null) { + throw new ConnectException("Unable to initialize the Converter specified in '" + classPropertyName + "'"); + } + // Determine whether this is a key or value converter based upon the supplied property name ... final boolean isKeyConverter = WorkerConfig.KEY_CONVERTER_CLASS_CONFIG.equals(classPropertyName); // Configure the Converter using only the old configuration mechanism ... String configPrefix = classPropertyName + "."; Map converterConfig = config.originalsWithPrefix(configPrefix); - log.debug("Configuring the {} converter with configuration keys:{}{}", - isKeyConverter ? "key" : "value", System.lineSeparator(), converterConfig.keySet()); + isKeyConverter ? "key" : "value", System.lineSeparator(), converterConfig.keySet()); - Converter plugin = newVersionedPlugin(config, classPropertyName, versionPropertyName, - Converter.class, classLoaderUsage, scanResult.converters()); - try (LoaderSwap loaderSwap = safeLoaderSwapper().apply(plugin.getClass().getClassLoader())) { + Converter plugin; + try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { + plugin = newPlugin(klass); plugin.configure(converterConfig, isKeyConverter); } return plugin; } - - /** * Load an internal converter, used by the worker for (de)serializing data in internal topics. * @@ -519,124 +427,99 @@ public Converter newInternalConverter(boolean isKey, String className, Map klass = null; + switch (classLoaderUsage) { + case CURRENT_CLASSLOADER: + if (!config.originals().containsKey(classPropertyName)) { + // This connector configuration does not define the header converter via the specified property name + return null; + } + // Attempt to load first with the current classloader, and plugins as a fallback. + // Note: we can't use config.getConfiguredInstance because we have to remove the property prefixes + // before calling config(...) + klass = pluginClassFromConfig(config, classPropertyName, HeaderConverter.class, scanResult.headerConverters()); + break; + case PLUGINS: + // Attempt to load with the plugin class loader, which uses the current classloader as a fallback. + // Note that there will always be at least a default header converter for the worker + String converterClassOrAlias = config.getClass(classPropertyName).getName(); + try { + klass = pluginClass( + delegatingLoader, + converterClassOrAlias, + HeaderConverter.class + ); + } catch (ClassNotFoundException e) { + throw new ConnectException( + "Failed to find any class that implements HeaderConverter and which name matches " + + converterClassOrAlias + + ", available header converters are: " + + pluginNames(scanResult.headerConverters()) + ); + } + } + if (klass == null) { + throw new ConnectException("Unable to initialize the HeaderConverter specified in '" + classPropertyName + "'"); } - HeaderConverter plugin = newVersionedPlugin(config, classPropertyName, versionPropertyName, - HeaderConverter.class, classLoaderUsage, scanResult.headerConverters()); String configPrefix = classPropertyName + "."; Map converterConfig = config.originalsWithPrefix(configPrefix); converterConfig.put(ConverterConfig.TYPE_CONFIG, ConverterType.HEADER.getName()); log.debug("Configuring the header converter with configuration keys:{}{}", System.lineSeparator(), converterConfig.keySet()); - try (LoaderSwap loaderSwap = safeLoaderSwapper().apply(plugin.getClass().getClassLoader())) { + HeaderConverter plugin; + try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { + plugin = newPlugin(klass); plugin.configure(converterConfig); } return plugin; } - @SuppressWarnings({"unchecked", "rawtypes"}) - private U newVersionedPlugin( - AbstractConfig config, - String classPropertyName, - String versionPropertyName, - Class basePluginClass, - ClassLoaderUsage classLoaderUsage, - SortedSet> availablePlugins - ) { - - String version = versionPropertyName == null ? null : config.getString(versionPropertyName); - VersionRange range = null; - if (version != null) { - try { - range = PluginUtils.connectorVersionRequirement(version); - } catch (InvalidVersionSpecificationException e) { - throw new ConnectException(String.format("Invalid version range for %s: %s", classPropertyName, version), e); - } + public ConfigProvider newConfigProvider(AbstractConfig config, String providerPrefix, ClassLoaderUsage classLoaderUsage) { + String classPropertyName = providerPrefix + ".class"; + Map originalConfig = config.originalsStrings(); + if (!originalConfig.containsKey(classPropertyName)) { + // This configuration does not define the config provider via the specified property name + return null; } - - assert range == null || classLoaderUsage == ClassLoaderUsage.PLUGINS; - - Class klass = null; - String basePluginClassName = basePluginClass.getSimpleName(); + Class klass = null; switch (classLoaderUsage) { case CURRENT_CLASSLOADER: // Attempt to load first with the current classloader, and plugins as a fallback. - klass = pluginClassFromConfig(config, classPropertyName, basePluginClass, availablePlugins); + klass = pluginClassFromConfig(config, classPropertyName, ConfigProvider.class, scanResult.configProviders()); break; case PLUGINS: // Attempt to load with the plugin class loader, which uses the current classloader as a fallback - - // if the config specifies the class name, use it, otherwise use the default which we can get from config.getClass - String classOrAlias = config.originalsStrings().get(classPropertyName); - if (classOrAlias == null) { - classOrAlias = config.getClass(classPropertyName) == null ? null : config.getClass(classPropertyName).getName(); - } + String configProviderClassOrAlias = originalConfig.get(classPropertyName); try { - klass = pluginClass(delegatingLoader, classOrAlias, basePluginClass, range); + klass = pluginClass(delegatingLoader, configProviderClassOrAlias, ConfigProvider.class); } catch (ClassNotFoundException e) { throw new ConnectException( - "Failed to find any class that implements " + basePluginClassName + " and which name matches " - + classOrAlias + ", available plugins are: " - + pluginNames(availablePlugins) + "Failed to find any class that implements ConfigProvider and which name matches " + + configProviderClassOrAlias + ", available ConfigProviders are: " + + pluginNames(scanResult.configProviders()) ); } break; } if (klass == null) { - throw new ConnectException("Unable to initialize the " + basePluginClassName - + " specified in " + classPropertyName); + throw new ConnectException("Unable to initialize the ConfigProvider specified in '" + classPropertyName + "'"); } - U plugin; - try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { - plugin = newPlugin(klass); - } - return plugin; - } - - public ConfigProvider newConfigProvider(AbstractConfig config, String providerPrefix, ClassLoaderUsage classLoaderUsage) { - String classPropertyName = providerPrefix + ".class"; - Map originalConfig = config.originalsStrings(); - if (!originalConfig.containsKey(classPropertyName)) { - // This configuration does not define the config provider via the specified property name - return null; - } - - ConfigProvider plugin = newVersionedPlugin(config, classPropertyName, null, ConfigProvider.class, classLoaderUsage, scanResult.configProviders()); - // Configure the ConfigProvider String configPrefix = providerPrefix + ".param."; Map configProviderConfig = config.originalsWithPrefix(configPrefix); - try (LoaderSwap loaderSwap = safeLoaderSwapper().apply(plugin.getClass().getClassLoader())) { + + ConfigProvider plugin; + try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { + plugin = newPlugin(klass); plugin.configure(configProviderConfig); } return plugin; @@ -674,7 +557,8 @@ public T newPlugin(String klassName, AbstractConfig config, Class pluginK } try (LoaderSwap loaderSwap = withClassLoader(klass.getClassLoader())) { plugin = newPlugin(klass); - if (plugin instanceof Versioned versionedPlugin) { + if (plugin instanceof Versioned) { + Versioned versionedPlugin = (Versioned) plugin; if (Utils.isBlank(versionedPlugin.version())) { throw new ConnectException("Version not defined for '" + klassName + "'"); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java index 794dbfad1e5c1..1990ebdf36926 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestExtensionContextImpl.java @@ -22,6 +22,26 @@ import jakarta.ws.rs.core.Configurable; -public record ConnectRestExtensionContextImpl(Configurable> configurable, - ConnectClusterState clusterState) implements ConnectRestExtensionContext { +public class ConnectRestExtensionContextImpl implements ConnectRestExtensionContext { + + private final Configurable> configurable; + private final ConnectClusterState clusterState; + + public ConnectRestExtensionContextImpl( + Configurable> configurable, + ConnectClusterState clusterState + ) { + this.configurable = configurable; + this.clusterState = clusterState; + } + + @Override + public Configurable> configurable() { + return configurable; + } + + @Override + public ConnectClusterState clusterState() { + return clusterState; + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java index db660f1651f74..f5de82dab7385 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/ConnectRestServer.java @@ -26,8 +26,9 @@ import org.glassfish.hk2.utilities.binding.AbstractBinder; import org.glassfish.jersey.server.ResourceConfig; +import java.util.Arrays; import java.util.Collection; -import java.util.List; +import java.util.Collections; import java.util.Map; public class ConnectRestServer extends RestServer { @@ -47,7 +48,7 @@ public void initializeResources(Herder herder) { @Override protected Collection> regularResources() { - return List.of( + return Arrays.asList( RootResource.class, ConnectorsResource.class, InternalConnectResource.class, @@ -57,7 +58,9 @@ protected Collection> regularResources() { @Override protected Collection> adminResources() { - return List.of(LoggingResource.class); + return Collections.singletonList( + LoggingResource.class + ); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java index b3262e689eb02..4dedc7289b8f4 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/HerderRequestHandler.java @@ -141,7 +141,7 @@ public T completeOrForwardRequest(FutureCallback cb, String path, String public void completeOrForwardRequest(FutureCallback cb, String path, String method, HttpHeaders headers, Object body, Boolean forward) throws Throwable { - completeOrForwardRequest(cb, path, method, headers, body, new TypeReference<>() { }, new IdentityTranslator<>(), forward); + completeOrForwardRequest(cb, path, method, headers, body, new TypeReference() { }, new IdentityTranslator<>(), forward); } public interface Translator { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java index b576827bc0252..511f7f9f2c7a2 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestClient.java @@ -254,6 +254,27 @@ private static Map convertHttpFieldsToMap(HttpFields httpFields) return headers; } - public record HttpResponse(int status, Map headers, T body) { + public static class HttpResponse { + private final int status; + private final Map headers; + private final T body; + + public HttpResponse(int status, Map headers, T body) { + this.status = status; + this.headers = headers; + this.body = body; + } + + public int status() { + return status; + } + + public Map headers() { + return headers; + } + + public T body() { + return body; + } } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java index 5bbc3312aa791..b6c7690a51d79 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServer.java @@ -17,7 +17,6 @@ package org.apache.kafka.connect.runtime.rest; import org.apache.kafka.common.config.ConfigException; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.health.ConnectClusterDetails; @@ -57,6 +56,7 @@ import java.net.URI; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.Locale; @@ -96,7 +96,7 @@ public abstract class RestServer { private final Server jettyServer; private final RequestTimeout requestTimeout; - private List> connectRestExtensionPlugins = List.of(); + private List connectRestExtensions = Collections.emptyList(); /** * Create a REST server for this herder using the specified configs. @@ -217,10 +217,10 @@ public void initializeServer() { throw new ConnectException("Unable to initialize REST server", e); } - log.info("REST server listening at {}, advertising URL {}", jettyServer.getURI(), advertisedUrl()); + log.info("REST server listening at " + jettyServer.getURI() + ", advertising URL " + advertisedUrl()); URI adminUrl = adminUrl(); if (adminUrl != null) - log.info("REST admin endpoints at {}", adminUrl); + log.info("REST admin endpoints at " + adminUrl); } protected final void initializeResources() { @@ -370,11 +370,11 @@ public void stop() { } } } - for (Plugin connectRestExtensionPlugin : connectRestExtensionPlugins) { + for (ConnectRestExtension connectRestExtension : connectRestExtensions) { try { - connectRestExtensionPlugin.close(); + connectRestExtension.close(); } catch (IOException e) { - log.warn("Error while invoking close on {}", connectRestExtensionPlugin.get().getClass(), e); + log.warn("Error while invoking close on " + connectRestExtension.getClass(), e); } } jettyServer.stop(); @@ -504,14 +504,9 @@ ServerConnector findConnector(String protocol) { } protected final void registerRestExtensions(Herder herder, ResourceConfig resourceConfig) { - connectRestExtensionPlugins = Plugin.wrapInstances( - herder.plugins().newPlugins( - config.restExtensions(), - config, - ConnectRestExtension.class - ), - herder.connectMetrics().metrics(), - RestServerConfig.REST_EXTENSION_CLASSES_CONFIG); + connectRestExtensions = herder.plugins().newPlugins( + config.restExtensions(), + config, ConnectRestExtension.class); long herderRequestTimeoutMs = DEFAULT_REST_REQUEST_TIMEOUT_MS; @@ -530,8 +525,8 @@ protected final void registerRestExtensions(Herder herder, ResourceConfig resour new ConnectRestConfigurable(resourceConfig), new ConnectClusterStateImpl(herderRequestTimeoutMs, connectClusterDetails, herder) ); - for (Plugin connectRestExtensionPlugin : connectRestExtensionPlugins) { - connectRestExtensionPlugin.get().register(connectRestExtensionContext); + for (ConnectRestExtension connectRestExtension : connectRestExtensions) { + connectRestExtension.register(connectRestExtensionContext); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java index 1a08a7eb123d4..fd732cc9f1e2c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/RestServerConfig.java @@ -28,6 +28,7 @@ import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -53,7 +54,7 @@ public abstract class RestServerConfig extends AbstractConfig { " Leave hostname empty to bind to default interface.\n" + " Examples of legal listener lists: HTTP://myhost:8083,HTTPS://myhost:8084"; // Visible for testing - static final List LISTENERS_DEFAULT = List.of("http://:8083"); + static final List LISTENERS_DEFAULT = Collections.singletonList("http://:8083"); public static final String REST_ADVERTISED_HOST_NAME_CONFIG = "rest.advertised.host.name"; private static final String REST_ADVERTISED_HOST_NAME_DOC @@ -85,8 +86,7 @@ public abstract class RestServerConfig extends AbstractConfig { private static final String ADMIN_LISTENERS_DOC = "List of comma-separated URIs the Admin REST API will listen on." + " The supported protocols are HTTP and HTTPS." + " An empty or blank string will disable this feature." + - " The default behavior is to use the regular listener (specified by the 'listeners' property)." + - " A comma-separated list of valid URLs, e.g., http://localhost:8080,https://localhost:8443."; + " The default behavior is to use the regular listener (specified by the 'listeners' property)."; public static final String ADMIN_LISTENERS_HTTPS_CONFIGS_PREFIX = "admin.listeners.https."; public static final String REST_EXTENSION_CLASSES_CONFIG = "rest.extension.classes"; @@ -102,7 +102,9 @@ public abstract class RestServerConfig extends AbstractConfig { static final String RESPONSE_HTTP_HEADERS_DOC = "Rules for REST API HTTP response headers"; // Visible for testing static final String RESPONSE_HTTP_HEADERS_DEFAULT = ""; - private static final Collection HEADER_ACTIONS = List.of("set", "add", "setDate", "addDate"); + private static final Collection HEADER_ACTIONS = Collections.unmodifiableList( + Arrays.asList("set", "add", "setDate", "addDate") + ); /** @@ -140,15 +142,15 @@ public abstract class RestServerConfig extends AbstractConfig { public static void addPublicConfig(ConfigDef configDef) { addInternalConfig(configDef); configDef - .define(REST_EXTENSION_CLASSES_CONFIG, + .define( + REST_EXTENSION_CLASSES_CONFIG, ConfigDef.Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), - ConfigDef.Importance.LOW, REST_EXTENSION_CLASSES_DOC) - .define(ADMIN_LISTENERS_CONFIG, + "", + ConfigDef.Importance.LOW, REST_EXTENSION_CLASSES_DOC + ).define(ADMIN_LISTENERS_CONFIG, ConfigDef.Type.LIST, null, - ConfigDef.ValidList.anyNonDuplicateValues(true, true), + new AdminListenersValidator(), ConfigDef.Importance.LOW, ADMIN_LISTENERS_DOC); } @@ -305,10 +307,11 @@ static void validateHeaderConfigAction(String action) { private static class ListenersValidator implements ConfigDef.Validator { @Override public void ensureValid(String name, Object value) { - if (!(value instanceof List items)) { + if (!(value instanceof List)) { throw new ConfigException("Invalid value type for listeners (expected list of URLs , ex: http://localhost:8080,https://localhost:8443)."); } + List items = (List) value; if (items.isEmpty()) { throw new ConfigException("Invalid value for listeners, at least one URL is expected, ex: http://localhost:8080,https://localhost:8443."); } @@ -329,6 +332,38 @@ public String toString() { } } + private static class AdminListenersValidator implements ConfigDef.Validator { + @Override + public void ensureValid(String name, Object value) { + if (value == null) { + return; + } + + if (!(value instanceof List)) { + throw new ConfigException("Invalid value type for admin.listeners (expected list)."); + } + + List items = (List) value; + if (items.isEmpty()) { + return; + } + + for (Object item : items) { + if (!(item instanceof String)) { + throw new ConfigException("Invalid type for admin.listeners (expected String)."); + } + if (Utils.isBlank((String) item)) { + throw new ConfigException("Empty URL found when parsing admin.listeners list."); + } + } + } + + @Override + public String toString() { + return "List of comma-separated URLs, ex: http://localhost:8080,https://localhost:8443."; + } + } + private static class ResponseHttpHeadersValidator implements ConfigDef.Validator { @Override public void ensureValid(String name, Object value) { @@ -360,7 +395,7 @@ private static ConfigDef config() { @Override public List adminListeners() { // Disable admin resources (such as the logging resource) - return List.of(); + return Collections.emptyList(); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfo.java index 71d0d18cb861a..b011fba993403 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfo.java @@ -16,10 +16,50 @@ */ package org.apache.kafka.connect.runtime.rest.entities; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; -public record ConfigInfo( - @JsonProperty("definition") ConfigKeyInfo configKey, - @JsonProperty("value") ConfigValueInfo configValue -) { +import java.util.Objects; + +public class ConfigInfo { + + private final ConfigKeyInfo configKey; + private final ConfigValueInfo configValue; + + @JsonCreator + public ConfigInfo( + @JsonProperty("definition") ConfigKeyInfo configKey, + @JsonProperty("value") ConfigValueInfo configValue) { + this.configKey = configKey; + this.configValue = configValue; + } + + @JsonProperty("definition") + public ConfigKeyInfo configKey() { + return configKey; + } + + @JsonProperty("value") + public ConfigValueInfo configValue() { + return configValue; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConfigInfo that = (ConfigInfo) o; + return Objects.equals(configKey, that.configKey) && + Objects.equals(configValue, that.configValue); + } + + @Override + public int hashCode() { + return Objects.hash(configKey, configValue); + } + + @Override + public String toString() { + return "[" + configKey + "," + configValue + "]"; + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java index 28171d8404278..dd075b5f90df8 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigInfos.java @@ -16,14 +16,84 @@ */ package org.apache.kafka.connect.runtime.rest.entities; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; +import java.util.Objects; -public record ConfigInfos( - @JsonProperty("name") String name, - @JsonProperty("error_count") int errorCount, - @JsonProperty("groups") List groups, - @JsonProperty("configs") List configs -) { -} \ No newline at end of file +public class ConfigInfos { + + @JsonProperty("name") + private final String name; + + @JsonProperty("error_count") + private final int errorCount; + + @JsonProperty("groups") + private final List groups; + + @JsonProperty("configs") + private final List configs; + + @JsonCreator + public ConfigInfos(@JsonProperty("name") String name, + @JsonProperty("error_count") int errorCount, + @JsonProperty("groups") List groups, + @JsonProperty("configs") List configs) { + this.name = name; + this.groups = groups; + this.errorCount = errorCount; + this.configs = configs; + } + + @JsonProperty + public String name() { + return name; + } + + @JsonProperty + public List groups() { + return groups; + } + + @JsonProperty("error_count") + public int errorCount() { + return errorCount; + } + + @JsonProperty("configs") + public List values() { + return configs; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConfigInfos that = (ConfigInfos) o; + return Objects.equals(name, that.name) && + Objects.equals(errorCount, that.errorCount) && + Objects.equals(groups, that.groups) && + Objects.equals(configs, that.configs); + } + + @Override + public int hashCode() { + return Objects.hash(name, errorCount, groups, configs); + } + + @Override + public String toString() { + return "[" + + name + + "," + + errorCount + + "," + + groups + + "," + + configs + + "]"; + } + +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java index 2d3a3f93be151..0b1a41c212ed9 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigKeyInfo.java @@ -16,21 +16,153 @@ */ package org.apache.kafka.connect.runtime.rest.entities; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; +import java.util.Objects; -public record ConfigKeyInfo( - @JsonProperty("name") String name, - @JsonProperty("type") String type, - @JsonProperty("required") boolean required, - @JsonProperty("default_value") String defaultValue, - @JsonProperty("importance") String importance, - @JsonProperty("documentation") String documentation, - @JsonProperty("group") String group, - @JsonProperty("order_in_group") int orderInGroup, - @JsonProperty("width") String width, - @JsonProperty("display_name") String displayName, - @JsonProperty("dependents") List dependents -) { +public class ConfigKeyInfo { + + private final String name; + private final String type; + private final boolean required; + private final String defaultValue; + private final String importance; + private final String documentation; + private final String group; + private final int orderInGroup; + private final String width; + private final String displayName; + private final List dependents; + + @JsonCreator + public ConfigKeyInfo(@JsonProperty("name") String name, + @JsonProperty("type") String type, + @JsonProperty("required") boolean required, + @JsonProperty("default_value") String defaultValue, + @JsonProperty("importance") String importance, + @JsonProperty("documentation") String documentation, + @JsonProperty("group") String group, + @JsonProperty("order_in_group") int orderInGroup, + @JsonProperty("width") String width, + @JsonProperty("display_name") String displayName, + @JsonProperty("dependents") List dependents) { + this.name = name; + this.type = type; + this.required = required; + this.defaultValue = defaultValue; + this.importance = importance; + this.documentation = documentation; + this.group = group; + this.orderInGroup = orderInGroup; + this.width = width; + this.displayName = displayName; + this.dependents = dependents; + } + + @JsonProperty + public String name() { + return name; + } + + @JsonProperty + public String type() { + return type; + } + + @JsonProperty + public boolean required() { + return required; + } + + @JsonProperty("default_value") + public String defaultValue() { + return defaultValue; + } + + @JsonProperty + public String documentation() { + return documentation; + } + + @JsonProperty + public String group() { + return group; + } + + @JsonProperty("order") + public int orderInGroup() { + return orderInGroup; + } + + @JsonProperty + public String width() { + return width; + } + + @JsonProperty + public String importance() { + return importance; + } + + @JsonProperty("display_name") + public String displayName() { + return displayName; + } + + @JsonProperty + public List dependents() { + return dependents; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConfigKeyInfo that = (ConfigKeyInfo) o; + return Objects.equals(name, that.name) && + Objects.equals(type, that.type) && + Objects.equals(required, that.required) && + Objects.equals(defaultValue, that.defaultValue) && + Objects.equals(importance, that.importance) && + Objects.equals(documentation, that.documentation) && + Objects.equals(group, that.group) && + Objects.equals(orderInGroup, that.orderInGroup) && + Objects.equals(width, that.width) && + Objects.equals(displayName, that.displayName) && + Objects.equals(dependents, that.dependents); + } + + @Override + public int hashCode() { + return Objects.hash(name, type, required, defaultValue, importance, documentation, group, orderInGroup, width, displayName, dependents); + } + + @Override + public String toString() { + return "[" + + name + + "," + + type + + "," + + required + + "," + + defaultValue + + "," + + importance + + "," + + documentation + + "," + + group + + "," + + orderInGroup + + "," + + width + + "," + + displayName + + "," + + dependents + + "]"; + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigValueInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigValueInfo.java index 7eb5c71d3b3a8..a5528730e22fc 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigValueInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConfigValueInfo.java @@ -16,15 +16,88 @@ */ package org.apache.kafka.connect.runtime.rest.entities; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; +import java.util.Objects; -public record ConfigValueInfo( - @JsonProperty("name") String name, - @JsonProperty("value") String value, - @JsonProperty("recommended_values") List recommendedValues, - @JsonProperty("errors") List errors, - @JsonProperty("visible") boolean visible -) { -} \ No newline at end of file +public class ConfigValueInfo { + private final String name; + private final String value; + private final List recommendedValues; + private final List errors; + private final boolean visible; + + @JsonCreator + public ConfigValueInfo( + @JsonProperty("name") String name, + @JsonProperty("value") String value, + @JsonProperty("recommended_values") List recommendedValues, + @JsonProperty("errors") List errors, + @JsonProperty("visible") boolean visible) { + this.name = name; + this.value = value; + this.recommendedValues = recommendedValues; + this.errors = errors; + this.visible = visible; + } + + @JsonProperty + public String name() { + return name; + } + + @JsonProperty + public String value() { + return value; + } + + @JsonProperty("recommended_values") + public List recommendedValues() { + return recommendedValues; + } + + @JsonProperty + public List errors() { + return errors; + } + + @JsonProperty + public boolean visible() { + return visible; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConfigValueInfo that = (ConfigValueInfo) o; + return Objects.equals(name, that.name) && + Objects.equals(value, that.value) && + Objects.equals(recommendedValues, that.recommendedValues) && + Objects.equals(errors, that.errors) && + Objects.equals(visible, that.visible); + } + + @Override + public int hashCode() { + return Objects.hash(name, value, recommendedValues, errors, visible); + } + + @Override + public String toString() { + return "[" + + name + + "," + + value + + "," + + recommendedValues + + "," + + errors + + "," + + visible + + "]"; + } + +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorInfo.java index 775268677e540..cb9b26ff040e9 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorInfo.java @@ -18,15 +18,66 @@ import org.apache.kafka.connect.util.ConnectorTaskId; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.List; import java.util.Map; +import java.util.Objects; -public record ConnectorInfo( - @JsonProperty("name") String name, - @JsonProperty("config") Map config, - @JsonProperty("tasks") List tasks, - @JsonProperty("type") ConnectorType type -) { -} \ No newline at end of file +public class ConnectorInfo { + + private final String name; + private final Map config; + private final List tasks; + private final ConnectorType type; + + @JsonCreator + public ConnectorInfo(@JsonProperty("name") String name, + @JsonProperty("config") Map config, + @JsonProperty("tasks") List tasks, + @JsonProperty("type") ConnectorType type) { + this.name = name; + this.config = config; + this.tasks = tasks; + this.type = type; + } + + + @JsonProperty + public String name() { + return name; + } + + @JsonProperty + public ConnectorType type() { + return type; + } + + @JsonProperty + public Map config() { + return config; + } + + @JsonProperty + public List tasks() { + return tasks; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ConnectorInfo that = (ConnectorInfo) o; + return Objects.equals(name, that.name) && + Objects.equals(config, that.config) && + Objects.equals(tasks, that.tasks) && + Objects.equals(type, that.type); + } + + @Override + public int hashCode() { + return Objects.hash(name, config, tasks, type); + } + +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java index 2420c99177987..2813a65c53c41 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffset.java @@ -16,9 +16,11 @@ */ package org.apache.kafka.connect.runtime.rest.entities; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Map; +import java.util.Objects; /** * Represents a single {partition, offset} pair for either a sink connector or a source connector. For source connectors, @@ -36,15 +38,50 @@ * } * */ -public record ConnectorOffset( - @JsonProperty("partition") Map partition, - @JsonProperty("offset") Map offset -) { +public class ConnectorOffset { + + private final Map partition; + private final Map offset; + + @JsonCreator + public ConnectorOffset(@JsonProperty("partition") Map partition, @JsonProperty("offset") Map offset) { + this.partition = partition; + this.offset = offset; + } + + @JsonProperty + public Map partition() { + return partition; + } + + @JsonProperty + public Map offset() { + return offset; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof ConnectorOffset)) { + return false; + } + ConnectorOffset that = (ConnectorOffset) obj; + return Objects.equals(this.partition, that.partition) && + Objects.equals(this.offset, that.offset); + } + + @Override + public int hashCode() { + return Objects.hash(partition, offset); + } + @Override public String toString() { return "{" + - "partition=" + partition + - ", offset=" + offset + - '}'; + "partition=" + partition + + ", offset=" + offset + + '}'; } -} \ No newline at end of file +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java index c0e6b33e0dc37..d37138a82ceb1 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsets.java @@ -18,6 +18,7 @@ import org.apache.kafka.connect.runtime.rest.resources.ConnectorsResource; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.HashMap; @@ -50,9 +51,19 @@ * @see ConnectorsResource#getOffsets * @see ConnectorsResource#alterConnectorOffsets */ -public record ConnectorOffsets( - @JsonProperty("offsets") List offsets -) { +public class ConnectorOffsets { + private final List offsets; + + @JsonCreator + public ConnectorOffsets(@JsonProperty("offsets") List offsets) { + this.offsets = offsets; + } + + @JsonProperty + public List offsets() { + return offsets; + } + public Map, Map> toMap() { Map, Map> partitionOffsetMap = new HashMap<>(); for (ConnectorOffset offset : offsets) { @@ -61,8 +72,25 @@ public record ConnectorOffsets( return partitionOffsetMap; } + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof ConnectorOffsets)) { + return false; + } + ConnectorOffsets that = (ConnectorOffsets) obj; + return Objects.equals(this.offsets, that.offsets); + } + + @Override + public int hashCode() { + return Objects.hashCode(offsets); + } + @Override public String toString() { return Objects.toString(offsets); } -} \ No newline at end of file +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java index fe53e65e37953..6280473af964d 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorStateInfo.java @@ -23,24 +23,53 @@ import java.util.List; import java.util.Objects; -public record ConnectorStateInfo( - @JsonProperty String name, - @JsonProperty ConnectorState connector, - @JsonProperty List tasks, - @JsonProperty ConnectorType type -) { +public class ConnectorStateInfo { + + private final String name; + private final ConnectorState connector; + private final List tasks; + private final ConnectorType type; + + @JsonCreator + public ConnectorStateInfo(@JsonProperty("name") String name, + @JsonProperty("connector") ConnectorState connector, + @JsonProperty("tasks") List tasks, + @JsonProperty("type") ConnectorType type) { + this.name = name; + this.connector = connector; + this.tasks = tasks; + this.type = type; + } + + @JsonProperty + public String name() { + return name; + } + + @JsonProperty + public ConnectorState connector() { + return connector; + } + + @JsonProperty + public List tasks() { + return tasks; + } + + @JsonProperty + public ConnectorType type() { + return type; + } public abstract static class AbstractState { private final String state; private final String trace; private final String workerId; - private final String version; - public AbstractState(String state, String workerId, String trace, String version) { + public AbstractState(String state, String workerId, String trace) { this.state = state; this.workerId = workerId; this.trace = trace; - this.version = version; } @JsonProperty @@ -58,21 +87,14 @@ public String workerId() { public String trace() { return trace; } - - @JsonProperty - @JsonInclude(value = JsonInclude.Include.CUSTOM, valueFilter = PluginInfo.NoVersionFilter.class) - public String version() { - return version; - } } public static class ConnectorState extends AbstractState { @JsonCreator public ConnectorState(@JsonProperty("state") String state, @JsonProperty("worker_id") String worker, - @JsonProperty("msg") String msg, - @JsonProperty("version") String version) { - super(state, worker, msg, version); + @JsonProperty("msg") String msg) { + super(state, worker, msg); } } @@ -83,9 +105,8 @@ public static class TaskState extends AbstractState implements Comparable config; + private final InitialState initialState; + + @JsonCreator + public CreateConnectorRequest(@JsonProperty("name") String name, @JsonProperty("config") Map config, + @JsonProperty("initial_state") InitialState initialState) { + this.name = name; + this.config = config; + this.initialState = initialState; + } + + @JsonProperty + public String name() { + return name; + } + + @JsonProperty + public Map config() { + return config; + } + + @JsonProperty("initial_state") + public InitialState initialState() { + return initialState; + } -public record CreateConnectorRequest( - @JsonProperty("name") String name, - @JsonProperty("config") Map config, - @JsonProperty("initial_state") InitialState initialState -) { public TargetState initialTargetState() { - return initialState != null ? initialState.toTargetState() : null; + if (initialState != null) { + return initialState.toTargetState(); + } else { + return null; + } + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + CreateConnectorRequest that = (CreateConnectorRequest) o; + return Objects.equals(name, that.name) && + Objects.equals(config, that.config) && + Objects.equals(initialState, that.initialState); + } + + @Override + public int hashCode() { + return Objects.hash(name, config, initialState); } public enum InitialState { @@ -44,11 +87,16 @@ public static InitialState forValue(String value) { } public TargetState toTargetState() { - return switch (this) { - case RUNNING -> TargetState.STARTED; - case PAUSED -> TargetState.PAUSED; - case STOPPED -> TargetState.STOPPED; - }; + switch (this) { + case RUNNING: + return TargetState.STARTED; + case PAUSED: + return TargetState.PAUSED; + case STOPPED: + return TargetState.STOPPED; + default: + throw new IllegalArgumentException("Unknown initial state: " + this); + } } } -} \ No newline at end of file +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java index 67012ebece7ed..ecc4de56cd4bb 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/ErrorMessage.java @@ -16,15 +16,47 @@ */ package org.apache.kafka.connect.runtime.rest.entities; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + /** * Standard error format for all REST API failures. These are generated automatically by * {@link org.apache.kafka.connect.runtime.rest.errors.ConnectExceptionMapper} in response to uncaught * {@link org.apache.kafka.connect.errors.ConnectException}s. */ -public record ErrorMessage( - @JsonProperty("error_code") int errorCode, - @JsonProperty String message -) { -} \ No newline at end of file +public class ErrorMessage { + private final int errorCode; + private final String message; + + @JsonCreator + public ErrorMessage(@JsonProperty("error_code") int errorCode, @JsonProperty("message") String message) { + this.errorCode = errorCode; + this.message = message; + } + + @JsonProperty("error_code") + public int errorCode() { + return errorCode; + } + + @JsonProperty + public String message() { + return message; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + ErrorMessage that = (ErrorMessage) o; + return Objects.equals(errorCode, that.errorCode) && + Objects.equals(message, that.message); + } + + @Override + public int hashCode() { + return Objects.hash(errorCode, message); + } +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/LoggerLevel.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/LoggerLevel.java index 0f3ce9a7c38e6..4a9a6be32e539 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/LoggerLevel.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/LoggerLevel.java @@ -20,11 +20,49 @@ import java.util.Objects; -public record LoggerLevel( - @JsonProperty String level, - @JsonProperty("last_modified") Long lastModified -) { - public LoggerLevel { - Objects.requireNonNull(level, "level may not be null"); +public class LoggerLevel { + + private final String level; + private final Long lastModified; + + public LoggerLevel( + @JsonProperty("level") String level, + @JsonProperty("last_modified") Long lastModified + ) { + this.level = Objects.requireNonNull(level, "level may not be null"); + this.lastModified = lastModified; + } + + @JsonProperty + public String level() { + return level; + } + + @JsonProperty("last_modified") + public Long lastModified() { + return lastModified; + } + + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + LoggerLevel that = (LoggerLevel) o; + return level.equals(that.level) && Objects.equals(lastModified, that.lastModified); + } + + @Override + public int hashCode() { + return Objects.hash(level, lastModified); + } + + @Override + public String toString() { + return "LoggerLevel{" + + "level='" + level + '\'' + + ", lastModified=" + lastModified + + '}'; } -} \ No newline at end of file +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java index 3c84b44b1d57a..e4dc8fd0b6f67 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/Message.java @@ -16,8 +16,11 @@ */ package org.apache.kafka.connect.runtime.rest.entities; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; +import java.util.Objects; + /** * Standard format for regular successful REST API responses that look like: *

          @@ -26,5 +29,33 @@
            *     }
            * 
          */ -public record Message(@JsonProperty String message) { +public class Message { + private final String message; + + @JsonCreator + public Message(@JsonProperty("message") String message) { + this.message = message; + } + + @JsonProperty + public String message() { + return message; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (!(obj instanceof Message)) { + return false; + } + Message that = (Message) obj; + return Objects.equals(this.message, that.message); + } + + @Override + public int hashCode() { + return message.hashCode(); + } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfo.java index 2f21bf9abd4d1..cd8f3c614a74e 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/PluginInfo.java @@ -19,23 +19,77 @@ import org.apache.kafka.connect.runtime.isolation.PluginDesc; import org.apache.kafka.connect.runtime.isolation.PluginType; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonInclude; import com.fasterxml.jackson.annotation.JsonProperty; -public record PluginInfo( - @JsonProperty("class") String className, - @JsonProperty("type") PluginType type, - @JsonProperty("version") - @JsonInclude(value = JsonInclude.Include.CUSTOM, valueFilter = NoVersionFilter.class) - String version -) { +import java.util.Objects; + +public class PluginInfo { + private final String className; + private final PluginType type; + private final String version; + + @JsonCreator + public PluginInfo( + @JsonProperty("class") String className, + @JsonProperty("type") PluginType type, + @JsonProperty("version") String version + ) { + this.className = className; + this.type = type; + this.version = version; + } + public PluginInfo(PluginDesc plugin) { this(plugin.className(), plugin.type(), plugin.version()); } + @JsonProperty("class") + public String className() { + return className; + } + + @JsonProperty("type") + public String type() { + return type.toString(); + } + + @JsonProperty("version") + @JsonInclude(value = JsonInclude.Include.CUSTOM, valueFilter = NoVersionFilter.class) + public String version() { + return version; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + PluginInfo that = (PluginInfo) o; + return Objects.equals(className, that.className) && + Objects.equals(type, that.type) && + Objects.equals(version, that.version); + } + + @Override + public int hashCode() { + return Objects.hash(className, type, version); + } + + @Override + public String toString() { + return "PluginInfo{" + "className='" + className + '\'' + + ", type=" + type.toString() + + ", version='" + version + '\'' + + '}'; + } + public static final class NoVersionFilter { - // Used by Jackson to filter out undefined versions - @Override + // This method is used by Jackson to filter the version field for plugins that don't have a version public boolean equals(Object obj) { return PluginDesc.UNDEFINED_VERSION.equals(obj); } @@ -46,4 +100,4 @@ public int hashCode() { return super.hashCode(); } } -} \ No newline at end of file +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/TaskInfo.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/TaskInfo.java index b4d78b7ee8921..cc5ae3577f0d3 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/TaskInfo.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/entities/TaskInfo.java @@ -18,12 +18,43 @@ import org.apache.kafka.connect.util.ConnectorTaskId; +import com.fasterxml.jackson.annotation.JsonCreator; import com.fasterxml.jackson.annotation.JsonProperty; import java.util.Map; +import java.util.Objects; -public record TaskInfo( - @JsonProperty("id") ConnectorTaskId id, - @JsonProperty("config") Map config -) { -} \ No newline at end of file +public class TaskInfo { + private final ConnectorTaskId id; + private final Map config; + + @JsonCreator + public TaskInfo(@JsonProperty("id") ConnectorTaskId id, @JsonProperty("config") Map config) { + this.id = id; + this.config = config; + } + + @JsonProperty + public ConnectorTaskId id() { + return id; + } + + @JsonProperty + public Map config() { + return config; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TaskInfo taskInfo = (TaskInfo) o; + return Objects.equals(id, taskInfo.id) && + Objects.equals(config, taskInfo.config); + } + + @Override + public int hashCode() { + return Objects.hash(id, config); + } +} diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java index 65053151b4fad..91c337c234b99 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/errors/ConnectExceptionMapper.java @@ -42,7 +42,8 @@ public class ConnectExceptionMapper implements ExceptionMapper { public Response toResponse(Exception exception) { log.debug("Uncaught exception in REST call to /{}", uriInfo.getPath(), exception); - if (exception instanceof ConnectRestException restException) { + if (exception instanceof ConnectRestException) { + ConnectRestException restException = (ConnectRestException) exception; return Response.status(restException.statusCode()) .entity(new ErrorMessage(restException.errorCode(), restException.getMessage())) .build(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java index 8637e79087cab..800a8b2c1a3d2 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResource.java @@ -20,7 +20,6 @@ import org.apache.kafka.connect.runtime.Herder; import org.apache.kafka.connect.runtime.isolation.PluginDesc; import org.apache.kafka.connect.runtime.isolation.PluginType; -import org.apache.kafka.connect.runtime.isolation.PluginUtils; import org.apache.kafka.connect.runtime.rest.RestRequestTimeout; import org.apache.kafka.connect.runtime.rest.entities.ConfigInfos; import org.apache.kafka.connect.runtime.rest.entities.ConfigKeyInfo; @@ -30,17 +29,17 @@ import org.apache.kafka.connect.util.Stage; import org.apache.kafka.connect.util.StagedTimeoutException; -import org.apache.maven.artifact.versioning.InvalidVersionSpecificationException; -import org.apache.maven.artifact.versioning.VersionRange; - import java.time.Instant; +import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import java.util.stream.Collectors; import io.swagger.v3.oas.annotations.Operation; import io.swagger.v3.oas.annotations.Parameter; @@ -142,11 +141,11 @@ public List listConnectorPlugins( ) { synchronized (this) { if (connectorsOnly) { - return connectorPlugins.stream() - .filter(p -> p.type() == PluginType.SINK || p.type() == PluginType.SOURCE) - .toList(); + return Collections.unmodifiableList(connectorPlugins.stream() + .filter(p -> PluginType.SINK.toString().equals(p.type()) || PluginType.SOURCE.toString().equals(p.type())) + .collect(Collectors.toList())); } else { - return List.copyOf(connectorPlugins); + return Collections.unmodifiableList(new ArrayList<>(connectorPlugins)); } } } @@ -154,18 +153,9 @@ public List listConnectorPlugins( @GET @Path("/{pluginName}/config") @Operation(summary = "Get the configuration definition for the specified pluginName") - public List getConnectorConfigDef(final @PathParam("pluginName") String pluginName, - final @QueryParam("version") @DefaultValue("latest") String version) { - - VersionRange range; - try { - range = PluginUtils.connectorVersionRequirement(version); - } catch (InvalidVersionSpecificationException e) { - throw new BadRequestException("Invalid version specification: " + version, e); - } - + public List getConnectorConfigDef(final @PathParam("pluginName") String pluginName) { synchronized (this) { - return herder.connectorPluginConfig(pluginName, range); + return herder.connectorPluginConfig(pluginName); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java index 148e96a4cee13..efbf39d790bef 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResource.java @@ -41,6 +41,7 @@ import org.slf4j.LoggerFactory; import java.net.URI; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -150,7 +151,7 @@ public Response createConnector(final @Parameter(hidden = true) @QueryParam("for FutureCallback> cb = new FutureCallback<>(); herder.putConnectorConfig(name, configs, createRequest.initialTargetState(), false, cb); Herder.Created info = requestHandler.completeOrForwardRequest(cb, "/connectors", "POST", headers, createRequest, - new TypeReference<>() { }, new CreatedConnectorInfoTranslator(), forward); + new TypeReference() { }, new CreatedConnectorInfoTranslator(), forward); URI location = UriBuilder.fromUri("/connectors").path(name).build(); return Response.created(location).entity(info.result()).build(); @@ -190,7 +191,7 @@ public Response getConnectorActiveTopics(final @PathParam("connector") String co "Topic tracking is disabled."); } ActiveTopicsInfo info = herder.connectorActiveTopics(connector); - return Response.ok(Map.of(info.connector(), info)).build(); + return Response.ok(Collections.singletonMap(info.connector(), info)).build(); } @PUT @@ -221,7 +222,7 @@ public Response putConnectorConfig(final @PathParam("connector") String connecto herder.putConnectorConfig(connector, connectorConfig, true, cb); Herder.Created createdInfo = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/config", - "PUT", headers, connectorConfig, new TypeReference<>() { }, new CreatedConnectorInfoTranslator(), forward); + "PUT", headers, connectorConfig, new TypeReference() { }, new CreatedConnectorInfoTranslator(), forward); Response.ResponseBuilder response; if (createdInfo.created()) { URI location = UriBuilder.fromUri("/connectors").path(connector).build(); @@ -241,7 +242,7 @@ public Response patchConnectorConfig(final @PathParam("connector") String connec FutureCallback> cb = new FutureCallback<>(); herder.patchConnectorConfig(connector, connectorConfigPatch, cb); Herder.Created createdInfo = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/config", - "PATCH", headers, connectorConfigPatch, new TypeReference<>() { }, new CreatedConnectorInfoTranslator(), forward); + "PATCH", headers, connectorConfigPatch, new TypeReference() { }, new CreatedConnectorInfoTranslator(), forward); return Response.ok().entity(createdInfo.result()).build(); } @@ -269,7 +270,7 @@ public Response restartConnector(final @PathParam("connector") String connector, Map queryParameters = new HashMap<>(); queryParameters.put("includeTasks", includeTasks.toString()); queryParameters.put("onlyFailed", onlyFailed.toString()); - ConnectorStateInfo stateInfo = requestHandler.completeOrForwardRequest(cb, forwardingPath, "POST", headers, queryParameters, null, new TypeReference<>() { + ConnectorStateInfo stateInfo = requestHandler.completeOrForwardRequest(cb, forwardingPath, "POST", headers, queryParameters, null, new TypeReference() { }, new IdentityTranslator<>(), forward); return Response.accepted().entity(stateInfo).build(); } @@ -333,7 +334,7 @@ public void restartTask(final @PathParam("connector") String connector, FutureCallback cb = new FutureCallback<>(); ConnectorTaskId taskId = new ConnectorTaskId(connector, task); herder.restartTask(taskId, cb); - requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks/" + task + "/restart", "POST", headers, null, new TypeReference<>() { }, forward); + requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/tasks/" + task + "/restart", "POST", headers, null, new TypeReference() { }, forward); } @DELETE @@ -344,7 +345,7 @@ public void destroyConnector(final @PathParam("connector") String connector, final @Parameter(hidden = true) @QueryParam("forward") Boolean forward) throws Throwable { FutureCallback> cb = new FutureCallback<>(); herder.deleteConnectorConfig(connector, cb); - requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", headers, null, new TypeReference<>() { }, forward); + requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector, "DELETE", headers, null, new TypeReference>() { }, forward); } @GET @@ -369,7 +370,7 @@ public Response alterConnectorOffsets(final @Parameter(hidden = true) @QueryPara FutureCallback cb = new FutureCallback<>(); herder.alterConnectorOffsets(connector, offsets.toMap(), cb); Message msg = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/offsets", "PATCH", headers, offsets, - new TypeReference<>() { }, new IdentityTranslator<>(), forward); + new TypeReference() { }, new IdentityTranslator<>(), forward); return Response.ok().entity(msg).build(); } @@ -381,7 +382,7 @@ public Response resetConnectorOffsets(final @Parameter(hidden = true) @QueryPara FutureCallback cb = new FutureCallback<>(); herder.resetConnectorOffsets(connector, cb); Message msg = requestHandler.completeOrForwardRequest(cb, "/connectors/" + connector + "/offsets", "DELETE", headers, null, - new TypeReference<>() { }, new IdentityTranslator<>(), forward); + new TypeReference() { }, new IdentityTranslator<>(), forward); return Response.ok().entity(msg).build(); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java index b91772bb6ba49..8ffec431f36de 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/InternalClusterResource.java @@ -51,7 +51,7 @@ public abstract class InternalClusterResource { private static final TypeReference>> TASK_CONFIGS_TYPE = - new TypeReference<>() { }; + new TypeReference>>() { }; private final HerderRequestHandler requestHandler; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java index c5bad9fd78a57..dbbfb46375dfd 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResource.java @@ -21,6 +21,7 @@ import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel; import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; +import org.apache.logging.log4j.Level; import org.slf4j.LoggerFactory; import java.util.List; @@ -118,7 +119,8 @@ public Response setLevel(final @PathParam("logger") String namespace, } // Make sure that this is a valid level - if (org.apache.logging.log4j.Level.getLevel(levelString) == null) { + Level level = Level.toLevel(levelString.toUpperCase(Locale.ROOT), null); + if (level == null) { throw new NotFoundException("invalid log level '" + levelString + "'."); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/util/SSLUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/util/SSLUtils.java index 83a175e8d5fff..8f51b6e1b942c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/util/SSLUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/rest/util/SSLUtils.java @@ -25,6 +25,7 @@ import org.eclipse.jetty.util.ssl.SslContextFactory; +import java.util.Arrays; import java.util.List; import java.util.Map; import java.util.regex.Pattern; @@ -122,10 +123,8 @@ protected static void configureSslContextFactoryTrustStore(SslContextFactory ssl */ @SuppressWarnings("unchecked") protected static void configureSslContextFactoryAlgorithms(SslContextFactory ssl, Map sslConfigValues) { - List sslEnabledProtocols = (List) getOrDefault(sslConfigValues, SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, List.of(COMMA_WITH_WHITESPACE.split(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS))); - - if (!sslEnabledProtocols.isEmpty()) - ssl.setIncludeProtocols(sslEnabledProtocols.toArray(new String[0])); + List sslEnabledProtocols = (List) getOrDefault(sslConfigValues, SslConfigs.SSL_ENABLED_PROTOCOLS_CONFIG, Arrays.asList(COMMA_WITH_WHITESPACE.split(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS))); + ssl.setIncludeProtocols(sslEnabledProtocols.toArray(new String[0])); String sslProvider = (String) sslConfigValues.get(SslConfigs.SSL_PROVIDER_CONFIG); if (sslProvider != null) @@ -134,8 +133,7 @@ protected static void configureSslContextFactoryAlgorithms(SslContextFactory ssl ssl.setProtocol((String) getOrDefault(sslConfigValues, SslConfigs.SSL_PROTOCOL_CONFIG, SslConfigs.DEFAULT_SSL_PROTOCOL)); List sslCipherSuites = (List) sslConfigValues.get(SslConfigs.SSL_CIPHER_SUITES_CONFIG); - - if (!sslCipherSuites.isEmpty()) + if (sslCipherSuites != null) ssl.setIncludeCipherSuites(sslCipherSuites.toArray(new String[0])); ssl.setKeyManagerFactoryAlgorithm((String) getOrDefault(sslConfigValues, SslConfigs.SSL_KEYMANAGER_ALGORITHM_CONFIG, SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM)); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java index e654ded2239e7..dee293b0c4ce0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerder.java @@ -378,7 +378,7 @@ public synchronized void restartConnectorAndTasks(RestartRequest request, Callba } Optional maybePlan = buildRestartPlan(request); - if (maybePlan.isEmpty()) { + if (!maybePlan.isPresent()) { cb.onCompletion(new NotFoundException("Status for connector " + connectorName + " not found", null), null); return; } @@ -463,7 +463,7 @@ public void setClusterLoggerLevel(String namespace, String level) { private void startConnector(String connName, Callback onStart) { Map connConfigs = configState.connectorConfig(connName); TargetState targetState = configState.targetState(connName); - worker.startConnector(connName, connConfigs, new HerderConnectorContext(this, connName, worker.metrics().connectorPluginMetrics(connName)), this, targetState, onStart); + worker.startConnector(connName, connConfigs, new HerderConnectorContext(this, connName), this, targetState, onStart); } private List> recomputeTaskConfigs(String connName) { @@ -489,26 +489,28 @@ private void createConnectorTasks(String connName, Collection t } private boolean startTask(ConnectorTaskId taskId, Map connProps) { - return switch (connectorType(connProps)) { - case SINK -> worker.startSinkTask( - taskId, - configState, - connProps, - configState.taskConfig(taskId), - this, - configState.targetState(taskId.connector()) - ); - case SOURCE -> worker.startSourceTask( - taskId, - configState, - connProps, - configState.taskConfig(taskId), - this, - configState.targetState(taskId.connector()) - ); - default -> - throw new ConnectException("Failed to start task " + taskId + " since it is not a recognizable type (source or sink)"); - }; + switch (connectorType(connProps)) { + case SINK: + return worker.startSinkTask( + taskId, + configState, + connProps, + configState.taskConfig(taskId), + this, + configState.targetState(taskId.connector()) + ); + case SOURCE: + return worker.startSourceTask( + taskId, + configState, + connProps, + configState.taskConfig(taskId), + this, + configState.targetState(taskId.connector()) + ); + default: + throw new ConnectException("Failed to start task " + taskId + " since it is not a recognizable type (source or sink)"); + } } private void removeConnectorTasks(String connName) { @@ -622,8 +624,9 @@ public void cancel() { @Override public boolean equals(Object o) { if (this == o) return true; - if (!(o instanceof StandaloneHerderRequest other)) + if (!(o instanceof StandaloneHerderRequest)) return false; + StandaloneHerderRequest other = (StandaloneHerderRequest) o; return seq == other.seq; } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java index 5626fbc809db5..fc2327a1bf7c6 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ClusterConfigState.java @@ -23,6 +23,7 @@ import org.apache.kafka.connect.util.ConnectorTaskId; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -36,15 +37,15 @@ public class ClusterConfigState { public static final ClusterConfigState EMPTY = new ClusterConfigState( NO_OFFSET, null, - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptySet(), + Collections.emptySet()); private final long offset; private final SessionKey sessionKey; @@ -231,12 +232,12 @@ public boolean pendingFencing(String connectorName) { */ public List tasks(String connectorName) { if (inconsistentConnectors.contains(connectorName)) { - return List.of(); + return Collections.emptyList(); } Integer numTasks = connectorTaskCounts.get(connectorName); if (numTasks == null) { - return List.of(); + return Collections.emptyList(); } List taskIds = new ArrayList<>(numTasks); @@ -244,7 +245,7 @@ public List tasks(String connectorName) { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, taskIndex); taskIds.add(taskId); } - return List.copyOf(taskIds); + return Collections.unmodifiableList(taskIds); } /** diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStore.java index 99e7f94fc5725..3c79a7817481a 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStore.java @@ -28,6 +28,7 @@ import java.nio.ByteBuffer; import java.time.Duration; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -209,7 +210,7 @@ public Future> get(Collection keys) { Future> workerGetFuture = getFromStore(workerStore, keys); Future> connectorGetFuture = getFromStore(connectorStore, keys); - return new Future<>() { + return new Future>() { @Override public boolean cancel(boolean mayInterruptIfRunning) { // Note the use of | instead of || here; this causes cancel to be invoked on both futures, @@ -397,7 +398,7 @@ private LoggingContext loggingContext() { } private static Future> getFromStore(Optional store, Collection keys) { - return store.map(s -> s.get(keys)).orElseGet(() -> CompletableFuture.completedFuture(Map.of())); + return store.map(s -> s.get(keys)).orElseGet(() -> CompletableFuture.completedFuture(Collections.emptyMap())); } private class ChainedOffsetWriteFuture implements Future { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/FileOffsetBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/FileOffsetBackingStore.java index 200e5e0b48f90..59caa61266048 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/FileOffsetBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/FileOffsetBackingStore.java @@ -31,6 +31,7 @@ import java.nio.ByteBuffer; import java.nio.file.Files; import java.nio.file.NoSuchFileException; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -111,6 +112,6 @@ protected void save() { @Override public Set> connectorPartitions(String connectorName) { - return connectorPartitions.getOrDefault(connectorName, Set.of()); + return connectorPartitions.getOrDefault(connectorName, Collections.emptySet()); } } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java index 0e425301c1111..63b33a792cfed 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaConfigBackingStore.java @@ -62,6 +62,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Base64; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -544,7 +545,7 @@ public void removeConnectorConfig(String connector) { log.debug("Removing connector configuration for connector '{}'", connector); try { Timer timer = time.timer(READ_WRITE_TOTAL_TIMEOUT_MS); - List keyValues = List.of( + List keyValues = Arrays.asList( new ProducerKeyValue(CONNECTOR_KEY(connector), null), new ProducerKeyValue(TARGET_STATE_KEY(connector), null) ); @@ -791,7 +792,7 @@ KafkaBasedLog setupAndCreateKafkaBasedLog(String topic, final Wo Map topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).configStorageTopicSettings() - : Map.of(); + : Collections.emptyMap(); NewTopic topicDescription = TopicAdmin.defineTopic(topic) .config(topicSettings) // first so that we override user-supplied settings as needed .compacted() @@ -810,7 +811,7 @@ KafkaBasedLog setupAndCreateKafkaBasedLog(String topic, final Wo * @param timer Timer bounding how long this method can block. The timer is updated before the method returns. */ private void sendPrivileged(String key, byte[] value, Timer timer) throws ExecutionException, InterruptedException, TimeoutException { - sendPrivileged(List.of(new ProducerKeyValue(key, value)), timer); + sendPrivileged(Collections.singletonList(new ProducerKeyValue(key, value)), timer); } /** @@ -853,7 +854,14 @@ private void sendPrivileged(List keyValues, Timer timer) throw } } - private record ProducerKeyValue(String key, byte[] value) { + private static class ProducerKeyValue { + final String key; + final byte[] value; + + ProducerKeyValue(String key, byte[] value) { + this.key = key; + this.value = value; + } } private void relinquishWritePrivileges() { @@ -1250,7 +1258,7 @@ private void processLoggerLevelRecord(String namespace, SchemaAndValue value) { } else { // TRACE level since there may be many of these records in the config topic log.trace( - "Ignoring old logging level {} for namespace {} that was written to the config topic before this worker completed startup", + "Ignoring old logging level {} for namespace {} that was writen to the config topic before this worker completed startup", level, namespace ); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java index 7920b3d6e0c0a..96da411a27f22 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStore.java @@ -45,6 +45,7 @@ import java.nio.ByteBuffer; import java.time.Duration; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Objects; @@ -217,7 +218,7 @@ public void configure(final WorkerConfig config) { protected NewTopic newTopicDescription(final String topic, final WorkerConfig config) { Map topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).offsetStorageTopicSettings() - : Map.of(); + : Collections.emptyMap(); return TopicAdmin.defineTopic(topic) .config(topicSettings) // first so that we override user-supplied settings as needed .compacted() @@ -264,7 +265,7 @@ public void stop() { @Override public Future> get(final Collection keys) { - ConvertingFutureCallback> future = new ConvertingFutureCallback<>() { + ConvertingFutureCallback> future = new ConvertingFutureCallback>() { @Override public Map convert(Void result) { Map values = new HashMap<>(); @@ -296,7 +297,7 @@ public Future set(final Map values, final Callback @Override public Set> connectorPartitions(String connectorName) { - return connectorPartitions.getOrDefault(connectorName, Set.of()); + return connectorPartitions.getOrDefault(connectorName, Collections.emptySet()); } protected final Callback> consumedCallback = (error, record) -> { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java index 8de8d9ee18a81..0a9e383700605 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/KafkaStatusBackingStore.java @@ -53,6 +53,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -100,7 +101,6 @@ public class KafkaStatusBackingStore extends KafkaTopicBasedBackingStore impleme public static final String TRACE_KEY_NAME = "trace"; public static final String WORKER_ID_KEY_NAME = "worker_id"; public static final String GENERATION_KEY_NAME = "generation"; - public static final String VERSION_KEY_NAME = "version"; public static final String TOPIC_STATE_KEY = "topic"; public static final String TOPIC_NAME_KEY = "name"; @@ -113,7 +113,6 @@ public class KafkaStatusBackingStore extends KafkaTopicBasedBackingStore impleme .field(TRACE_KEY_NAME, SchemaBuilder.string().optional().build()) .field(WORKER_ID_KEY_NAME, Schema.STRING_SCHEMA) .field(GENERATION_KEY_NAME, Schema.INT32_SCHEMA) - .field(VERSION_KEY_NAME, Schema.OPTIONAL_STRING_SCHEMA) .build(); private static final Schema TOPIC_STATUS_VALUE_SCHEMA_V0 = SchemaBuilder.struct() @@ -198,7 +197,7 @@ public void configure(final WorkerConfig config) { Map topicSettings = config instanceof DistributedConfig ? ((DistributedConfig) config).statusStorageTopicSettings() - : Map.of(); + : Collections.emptyMap(); NewTopic topicDescription = TopicAdmin.defineTopic(statusTopic) .config(topicSettings) // first so that we override user-supplied settings as needed .compacted() @@ -401,8 +400,8 @@ public TopicStatus getTopic(String connector, String topic) { public Collection getAllTopics(String connector) { ConcurrentMap activeTopics = topics.get(Objects.requireNonNull(connector)); return activeTopics != null - ? Set.copyOf(Objects.requireNonNull(activeTopics.values())) - : Set.of(); + ? Collections.unmodifiableCollection(Objects.requireNonNull(activeTopics.values())) + : Collections.emptySet(); } @Override @@ -429,8 +428,7 @@ private ConnectorStatus parseConnectorStatus(String connector, byte[] data) { String trace = (String) statusMap.get(TRACE_KEY_NAME); String workerUrl = (String) statusMap.get(WORKER_ID_KEY_NAME); int generation = ((Long) statusMap.get(GENERATION_KEY_NAME)).intValue(); - String version = (String) statusMap.get(VERSION_KEY_NAME); - return new ConnectorStatus(connector, state, trace, workerUrl, generation, version); + return new ConnectorStatus(connector, state, trace, workerUrl, generation); } catch (Exception e) { log.error("Failed to deserialize connector status", e); return null; @@ -450,8 +448,7 @@ private TaskStatus parseTaskStatus(ConnectorTaskId taskId, byte[] data) { String trace = (String) statusMap.get(TRACE_KEY_NAME); String workerUrl = (String) statusMap.get(WORKER_ID_KEY_NAME); int generation = ((Long) statusMap.get(GENERATION_KEY_NAME)).intValue(); - String version = (String) statusMap.get(VERSION_KEY_NAME); - return new TaskStatus(taskId, state, workerUrl, generation, trace, version); + return new TaskStatus(taskId, state, workerUrl, generation, trace); } catch (Exception e) { log.error("Failed to deserialize task status", e); return null; @@ -490,7 +487,6 @@ private byte[] serialize(AbstractStatus status) { struct.put(TRACE_KEY_NAME, status.trace()); struct.put(WORKER_ID_KEY_NAME, status.workerId()); struct.put(GENERATION_KEY_NAME, status.generation()); - struct.put(VERSION_KEY_NAME, status.version()); return converter.fromConnectData(statusTopic, STATUS_SCHEMA_V0, struct); } @@ -508,7 +504,7 @@ protected byte[] serializeTopicStatus(TopicStatus status) { return converter.fromConnectData( statusTopic, TOPIC_STATUS_SCHEMA_V0, - Map.of(TOPIC_STATE_KEY, struct)); + Collections.singletonMap(TOPIC_STATE_KEY, struct)); } private String parseConnectorStatusKey(String key) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryConfigBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryConfigBackingStore.java index 254aaf89584a6..42c002c988e41 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryConfigBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryConfigBackingStore.java @@ -22,11 +22,14 @@ import org.apache.kafka.connect.runtime.WorkerConfigTransformer; import org.apache.kafka.connect.util.ConnectorTaskId; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.TreeMap; import java.util.concurrent.TimeUnit; @@ -36,6 +39,8 @@ */ public class MemoryConfigBackingStore implements ConfigBackingStore { + private static final Logger log = LoggerFactory.getLogger(MemoryConfigBackingStore.class); + private final Map connectors = new HashMap<>(); private UpdateListener updateListener; private WorkerConfigTransformer configTransformer; @@ -82,11 +87,11 @@ public synchronized ClusterConfigState snapshot() { connectorConfigs, connectorTargetStates, taskConfigs, - Map.of(), - Map.of(), + Collections.emptyMap(), + Collections.emptyMap(), appliedConnectorConfigs, - Set.of(), - Set.of(), + Collections.emptySet(), + Collections.emptySet(), configTransformer ); } diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryStatusBackingStore.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryStatusBackingStore.java index a465bea968937..a51a405d3de0f 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryStatusBackingStore.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/MemoryStatusBackingStore.java @@ -24,6 +24,7 @@ import org.apache.kafka.connect.util.Table; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -118,8 +119,8 @@ public TopicStatus getTopic(String connector, String topic) { public Collection getAllTopics(String connector) { ConcurrentMap activeTopics = topics.get(Objects.requireNonNull(connector)); return activeTopics != null - ? Set.copyOf(activeTopics.values()) - : Set.of(); + ? Collections.unmodifiableCollection(activeTopics.values()) + : Collections.emptySet(); } @Override diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetStorageReaderImpl.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetStorageReaderImpl.java index c17d2fb099ca0..d9776e05dd3db 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetStorageReaderImpl.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/OffsetStorageReaderImpl.java @@ -23,10 +23,11 @@ import org.slf4j.LoggerFactory; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.CancellationException; @@ -60,7 +61,7 @@ public OffsetStorageReaderImpl(OffsetBackingStore backingStore, String namespace @Override public Map offset(Map partition) { - return offsets(List.of(partition)).get(partition); + return offsets(Collections.singletonList(partition)).get(partition); } @Override @@ -72,7 +73,7 @@ public Map, Map> offsets(Collection partition = (Map) keyList.get(1); connectorPartitions.computeIfAbsent(connectorName, ignored -> new HashSet<>()); if (offsetValue == null) { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/PrivilegedWriteException.java b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/PrivilegedWriteException.java index d9a112a1c7140..e4900fa9b0e59 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/storage/PrivilegedWriteException.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/storage/PrivilegedWriteException.java @@ -22,6 +22,9 @@ * Used when a write that requires {@link ConfigBackingStore#claimWritePrivileges() special privileges} fails */ public class PrivilegedWriteException extends ConnectException { + public PrivilegedWriteException(String message) { + super(message); + } public PrivilegedWriteException(String message, Throwable cause) { super(message, cause); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/PredicateDoc.java b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/PredicateDoc.java index e1415bed57531..56f559dc245e0 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/PredicateDoc.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/PredicateDoc.java @@ -20,9 +20,10 @@ import org.apache.kafka.connect.runtime.isolation.Plugins; import org.apache.kafka.connect.transforms.predicates.Predicate; +import java.util.Collections; import java.util.Comparator; import java.util.List; -import java.util.Map; +import java.util.stream.Collectors; public class PredicateDoc { @@ -38,7 +39,7 @@ private

          > DocInfo(Class

          predicateClass, String overvie } } - private static final List PREDICATES = new Plugins(Map.of()).predicates().stream() + private static final List PREDICATES = new Plugins(Collections.emptyMap()).predicates().stream() .map(p -> { try { String overviewDoc = (String) p.pluginClass().getDeclaredField("OVERVIEW_DOC").get(null); @@ -49,7 +50,7 @@ private

          > DocInfo(Class

          predicateClass, String overvie } }) .sorted(Comparator.comparing(docInfo -> docInfo.predicateName)) - .toList(); + .collect(Collectors.toList()); private static String toHtml() { StringBuilder b = new StringBuilder(); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java index e3e9ad063d2f7..100f938bd9b5d 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/tools/TransformationDoc.java @@ -34,14 +34,24 @@ import org.apache.kafka.connect.transforms.TimestampRouter; import org.apache.kafka.connect.transforms.ValueToKey; +import java.util.Arrays; import java.util.List; public class TransformationDoc { - private record DocInfo(String transformationName, String overview, ConfigDef configDef) { + private static final class DocInfo { + final String transformationName; + final String overview; + final ConfigDef configDef; + + private DocInfo(String transformationName, String overview, ConfigDef configDef) { + this.transformationName = transformationName; + this.overview = overview; + this.configDef = configDef; + } } - private static final List TRANSFORMATIONS = List.of( + private static final List TRANSFORMATIONS = Arrays.asList( new DocInfo(Cast.class.getName(), Cast.OVERVIEW_DOC, Cast.CONFIG_DEF), new DocInfo(DropHeaders.class.getName(), DropHeaders.OVERVIEW_DOC, DropHeaders.CONFIG_DEF), new DocInfo(ExtractField.class.getName(), ExtractField.OVERVIEW_DOC, ExtractField.CONFIG_DEF), diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/Callback.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/Callback.java index f628e8222741c..fd62fc172f4cf 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/Callback.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/Callback.java @@ -19,7 +19,6 @@ /** * Generic interface for callbacks */ -@FunctionalInterface public interface Callback { /** * Invoked upon completion of the operation. diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/ConnectorTaskId.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/ConnectorTaskId.java index 613caf1dbad64..1b69bd0179550 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/ConnectorTaskId.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/ConnectorTaskId.java @@ -20,30 +20,54 @@ import com.fasterxml.jackson.annotation.JsonProperty; import java.io.Serializable; +import java.util.Objects; /** * Unique ID for a single task. It includes a unique connector ID and a task ID that is unique within * the connector. */ -public record ConnectorTaskId(String connector, int task) implements Serializable, Comparable { +public class ConnectorTaskId implements Serializable, Comparable { + private final String connector; + private final int task; + @JsonCreator public ConnectorTaskId(@JsonProperty("connector") String connector, @JsonProperty("task") int task) { this.connector = connector; this.task = task; } - @Override @JsonProperty public String connector() { return connector; } - @Override @JsonProperty public int task() { return task; } + @Override + public boolean equals(Object o) { + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; + + ConnectorTaskId that = (ConnectorTaskId) o; + + if (task != that.task) + return false; + + return Objects.equals(connector, that.connector); + } + + @Override + public int hashCode() { + int result = connector != null ? connector.hashCode() : 0; + result = 31 * result + task; + return result; + } + @Override public String toString() { return connector + '-' + task; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java index 5452ee9e1ee18..e36df1b7dbc57 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/KafkaBasedLog.java @@ -44,6 +44,7 @@ import java.time.Duration; import java.util.ArrayDeque; import java.util.ArrayList; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -184,8 +185,8 @@ public static KafkaBasedLog withExistingClients(String topic, Objects.requireNonNull(topicAdmin); Objects.requireNonNull(readTopicPartition); return new KafkaBasedLog<>(topic, - Map.of(), - Map.of(), + Collections.emptyMap(), + Collections.emptyMap(), () -> topicAdmin, consumedCallback, time, diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java index a83c515e73adf..fa6a0b9cccd91 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/LoggingContext.java @@ -19,9 +19,9 @@ import org.slf4j.MDC; import java.util.Collection; +import java.util.Collections; import java.util.Map; import java.util.Objects; -import java.util.Set; /** * A utility for defining Mapped Diagnostic Context (MDC) for SLF4J logs. @@ -49,7 +49,7 @@ public final class LoggingContext implements AutoCloseable { */ public static final String CONNECTOR_CONTEXT = "connector.context"; - public static final Collection ALL_CONTEXTS = Set.of(CONNECTOR_CONTEXT); + public static final Collection ALL_CONTEXTS = Collections.singleton(CONNECTOR_CONTEXT); /** * The Scope values used by Connect when specifying the context. diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/SinkUtils.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/SinkUtils.java index 620eec2f13933..70bcf8c427e6b 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/SinkUtils.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/SinkUtils.java @@ -23,6 +23,7 @@ import org.apache.kafka.connect.runtime.rest.errors.BadRequestException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -47,7 +48,7 @@ public static ConnectorOffsets consumerGroupOffsetsToConnectorOffsets(Map row(R row) { Map columns = table.get(row); if (columns == null) - return Map.of(); - return Map.copyOf(columns); + return Collections.emptyMap(); + return Collections.unmodifiableMap(columns); } public boolean isEmpty() { diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java index 67285c1c197cc..348beb002330c 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicAdmin.java @@ -56,6 +56,7 @@ import java.time.Duration; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -72,8 +73,8 @@ */ public class TopicAdmin implements AutoCloseable { - public static final TopicCreationResponse EMPTY_CREATION = new TopicCreationResponse(Set.of(), Set.of()); - private static final List> CAUSES_TO_RETRY_TOPIC_CREATION = List.of( + public static final TopicCreationResponse EMPTY_CREATION = new TopicCreationResponse(Collections.emptySet(), Collections.emptySet()); + private static final List> CAUSES_TO_RETRY_TOPIC_CREATION = Arrays.asList( InvalidReplicationFactorException.class, TimeoutException.class); @@ -83,8 +84,8 @@ public static class TopicCreationResponse { private final Set existing; public TopicCreationResponse(Set createdTopicNames, Set existingTopicNames) { - this.created = Set.copyOf(createdTopicNames); - this.existing = Set.copyOf(existingTopicNames); + this.created = Collections.unmodifiableSet(createdTopicNames); + this.existing = Collections.unmodifiableSet(existingTopicNames); } public Set createdTopics() { @@ -472,12 +473,12 @@ public TopicCreationResponse createOrFindTopics(NewTopic... topics) { */ public Map describeTopics(String... topics) { if (topics == null) { - return Map.of(); + return Collections.emptyMap(); } String topicNameList = String.join(", ", topics); Map> newResults = - admin.describeTopics(List.of(topics), new DescribeTopicsOptions()).topicNameValues(); + admin.describeTopics(Arrays.asList(topics), new DescribeTopicsOptions()).topicNameValues(); // Iterate over each future so that we can handle individual failures like when some topics don't exist Map existingTopics = new HashMap<>(); @@ -535,7 +536,7 @@ public boolean verifyTopicCleanupPolicyOnlyCompact(String topic, String workerTo + "describe topic configurations.", topic, TopicConfig.CLEANUP_POLICY_COMPACT); return false; } - Set expectedPolicies = Set.of(TopicConfig.CLEANUP_POLICY_COMPACT); + Set expectedPolicies = Collections.singleton(TopicConfig.CLEANUP_POLICY_COMPACT); if (!cleanupPolicies.equals(expectedPolicies)) { String expectedPolicyStr = String.join(",", expectedPolicies); String cleanupPolicyStr = String.join(",", cleanupPolicies); @@ -565,7 +566,7 @@ public Set topicCleanupPolicy(String topic) { if (topicConfig == null) { // The topic must not exist log.debug("Unable to find topic '{}' when getting cleanup policy", topic); - return Set.of(); + return Collections.emptySet(); } ConfigEntry entry = topicConfig.get(CLEANUP_POLICY_CONFIG); if (entry != null && entry.value() != null) { @@ -580,7 +581,7 @@ public Set topicCleanupPolicy(String topic) { // This is unexpected, as the topic config should include the cleanup.policy even if // the topic settings don't override the broker's log.cleanup.policy. But just to be safe. log.debug("Found no cleanup.policy for topic '{}'", topic); - return Set.of(); + return Collections.emptySet(); } /** @@ -619,7 +620,7 @@ public Config describeTopicConfig(String topic) { */ public Map describeTopicConfigs(String... topicNames) { if (topicNames == null) { - return Map.of(); + return Collections.emptyMap(); } Collection topics = Arrays.stream(topicNames) .filter(Objects::nonNull) @@ -627,7 +628,7 @@ public Map describeTopicConfigs(String... topicNames) { .filter(s -> !s.isEmpty()) .collect(Collectors.toList()); if (topics.isEmpty()) { - return Map.of(); + return Collections.emptyMap(); } String topicNameList = String.join(", ", topics); Collection resources = topics.stream() @@ -685,7 +686,7 @@ public Map describeTopicConfigs(String... topicNames) { */ public Map endOffsets(Set partitions) { if (partitions == null || partitions.isEmpty()) { - return Map.of(); + return Collections.emptyMap(); } Map offsetSpecMap = partitions.stream().collect(Collectors.toMap(Function.identity(), tp -> OffsetSpec.latest())); ListOffsetsResult resultFuture = admin.listOffsets(offsetSpecMap, new ListOffsetsOptions(IsolationLevel.READ_UNCOMMITTED)); diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreation.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreation.java index f98d1afa5b239..45c12aa292a39 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreation.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreation.java @@ -18,6 +18,7 @@ import org.apache.kafka.connect.runtime.WorkerConfig; +import java.util.Collections; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.Map; @@ -31,7 +32,7 @@ */ public class TopicCreation { private static final TopicCreation EMPTY = - new TopicCreation(false, null, Map.of(), Set.of()); + new TopicCreation(false, null, Collections.emptyMap(), Collections.emptySet()); private final boolean isTopicCreationEnabled; private final TopicCreationGroup defaultTopicGroup; diff --git a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java index e5694c944d4c8..fb007314c798b 100644 --- a/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java +++ b/connect/runtime/src/main/java/org/apache/kafka/connect/util/TopicCreationGroup.java @@ -20,6 +20,7 @@ import org.apache.kafka.connect.runtime.SourceConnectorConfig; import org.apache.kafka.connect.runtime.TopicCreationConfig; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -63,7 +64,7 @@ protected TopicCreationGroup(String group, SourceConnectorConfig config) { */ public static Map configuredGroups(SourceConnectorConfig config) { if (!config.usesTopicCreation()) { - return Map.of(); + return Collections.emptyMap(); } List groupNames = config.getList(TOPIC_CREATION_GROUPS_CONFIG); Map groups = new LinkedHashMap<>(); @@ -121,9 +122,10 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof TopicCreationGroup that)) { + if (!(o instanceof TopicCreationGroup)) { return false; } + TopicCreationGroup that = (TopicCreationGroup) o; return Objects.equals(name, that.name) && numPartitions == that.numPartitions && replicationFactor == that.replicationFactor diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/NoneConnectorClientConfigOverridePolicyTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/NoneConnectorClientConfigOverridePolicyTest.java index 286a8c212fe8e..82d67254f504b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/NoneConnectorClientConfigOverridePolicyTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/NoneConnectorClientConfigOverridePolicyTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -31,7 +32,7 @@ public class NoneConnectorClientConfigOverridePolicyTest extends BaseConnectorCl @Test public void testNoOverrides() { - testValidOverride(Map.of()); + testValidOverride(Collections.emptyMap()); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/PrincipalConnectorClientConfigOverridePolicyTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/PrincipalConnectorClientConfigOverridePolicyTest.java index 1b566a6de7747..94567f960f4ee 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/PrincipalConnectorClientConfigOverridePolicyTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/connector/policy/PrincipalConnectorClientConfigOverridePolicyTest.java @@ -22,6 +22,7 @@ import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -31,7 +32,8 @@ public class PrincipalConnectorClientConfigOverridePolicyTest extends BaseConnec @Test public void testPrincipalOnly() { - testValidOverride(Map.of(SaslConfigs.SASL_JAAS_CONFIG, "test")); + Map clientConfig = Collections.singletonMap(SaslConfigs.SASL_JAAS_CONFIG, "test"); + testValidOverride(clientConfig); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/converters/BooleanConverterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/converters/BooleanConverterTest.java index 89456699e6933..eac691ab06761 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/converters/BooleanConverterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/converters/BooleanConverterTest.java @@ -25,7 +25,7 @@ import org.junit.jupiter.api.Test; import java.nio.charset.StandardCharsets; -import java.util.Map; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -42,7 +42,7 @@ public class BooleanConverterTest { @BeforeEach public void setUp() { - converter.configure(Map.of(), false); + converter.configure(Collections.emptyMap(), false); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/converters/ByteArrayConverterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/converters/ByteArrayConverterTest.java index 0d926e42dd6f6..7386360f4fa82 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/converters/ByteArrayConverterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/converters/ByteArrayConverterTest.java @@ -27,7 +27,7 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; -import java.util.Map; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -42,7 +42,7 @@ public class ByteArrayConverterTest { @BeforeEach public void setUp() { - converter.configure(Map.of(), false); + converter.configure(Collections.emptyMap(), false); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java index 87608fa07df57..17135a6936613 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/BlockingConnectorTest.java @@ -48,6 +48,7 @@ import org.slf4j.LoggerFactory; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -322,9 +323,9 @@ private void createNormalConnector() { normalConnectorHandle.expectedCommits(NUM_RECORDS_PRODUCED); Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, "1"); - props.put(TestableSourceConnector.TOPIC_CONFIG, TEST_TOPIC); + props.put(MonitorableSourceConnector.TOPIC_CONFIG, TEST_TOPIC); log.info("Creating normal connector"); try { connect.configureConnector(NORMAL_CONNECTOR_NAME, props); @@ -576,7 +577,7 @@ public Class taskClass() { @Override public List> taskConfigs(int maxTasks) { block.maybeBlockOn(CONNECTOR_TASK_CONFIGS); - return List.of(Map.of()); + return Collections.singletonList(Collections.emptyMap()); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java index d46d76c3606ef..079887c361d24 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectWorkerIntegrationTest.java @@ -43,6 +43,7 @@ import org.apache.kafka.connect.util.SinkUtils; import org.apache.kafka.connect.util.clusters.EmbeddedConnectCluster; import org.apache.kafka.connect.util.clusters.WorkerHandle; +import org.apache.kafka.network.SocketServerConfigs; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; @@ -56,8 +57,11 @@ import java.io.File; import java.io.FileOutputStream; +import java.io.IOException; +import java.net.ServerSocket; import java.nio.file.Path; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -78,7 +82,7 @@ import static org.apache.kafka.common.config.TopicConfig.DELETE_RETENTION_MS_CONFIG; import static org.apache.kafka.common.config.TopicConfig.SEGMENT_MS_CONFIG; import static org.apache.kafka.connect.integration.BlockingConnectorTest.TASK_STOP; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_PRODUCER_OVERRIDES_PREFIX; import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; @@ -228,7 +232,7 @@ public void testRestartFailedTask() throws Exception { // Restart the failed task String taskRestartEndpoint = connect.endpointForResource( String.format("connectors/%s/tasks/0/restart", CONNECTOR_NAME)); - connect.requestPost(taskRestartEndpoint, "", Map.of()); + connect.requestPost(taskRestartEndpoint, "", Collections.emptyMap()); // Ensure the task started successfully this time connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(CONNECTOR_NAME, numTasks, @@ -243,6 +247,8 @@ public void testBrokerCoordinator() throws Exception { ConnectorHandle connectorHandle = RuntimeHandles.get().connectorHandle(CONNECTOR_NAME); workerProps.put(DistributedConfig.SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG, String.valueOf(5000)); + useFixedBrokerPort(); + // start the clusters connect = connectBuilder.build(); connect.start(); @@ -325,7 +331,7 @@ public void testTaskStatuses() throws Exception { // base connector props Map props = defaultSourceConnectorProps(TOPIC_NAME); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); // start the connector with only one task int initialNumTasks = 1; @@ -373,7 +379,7 @@ public void testSourceTaskNotBlockedOnShutdownWithNonExistentTopic() throws Exce NUM_TASKS, "Connector tasks did not start in time"); connector.awaitRecords(TimeUnit.MINUTES.toMillis(1)); - // Then, if we delete the connector, it and each of its tasks should be stopped by the framework + // Then if we delete the connector, it and each of its tasks should be stopped by the framework // even though the producer is blocked because there is no topic StartAndStopLatch stopCounter = connector.expectedStops(1); connect.deleteConnector(CONNECTOR_NAME); @@ -433,8 +439,8 @@ public void testPauseStopResume() throws Exception { "Connector did not stop in time" ); // If the connector is truly stopped, we should also see an empty set of tasks and task configs - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Transition to RUNNING connect.resumeConnector(CONNECTOR_NAME); @@ -462,8 +468,8 @@ public void testPauseStopResume() throws Exception { CONNECTOR_NAME, "Connector did not stop in time" ); - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Transition to PAUSED connect.pauseConnector(CONNECTOR_NAME); @@ -519,8 +525,8 @@ public void testStoppedState() throws Exception { "Connector did not stop in time" ); // If the connector is truly stopped, we should also see an empty set of tasks and task configs - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Can resume a connector after its Connector has failed before shutdown after receiving a stop request props.remove("connector.start.inject.error"); @@ -541,8 +547,8 @@ public void testStoppedState() throws Exception { CONNECTOR_NAME, "Connector did not stop in time" ); - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Can resume a connector after its Connector has failed during shutdown after receiving a stop request connect.resumeConnector(CONNECTOR_NAME); @@ -579,8 +585,8 @@ public void testCreateConnectorWithPausedInitialState() throws Exception { 0, "Connector was not created in a paused state" ); - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that a connector created in the PAUSED state can be resumed successfully connect.resumeConnector(CONNECTOR_NAME); @@ -614,16 +620,16 @@ public void testCreateSourceConnectorWithStoppedInitialStateAndModifyOffsets() t CONNECTOR_NAME, "Connector was not created in a stopped state" ); - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that the offsets can be modified for a source connector created in the STOPPED state // Alter the offsets so that only 5 messages are produced connect.alterSourceConnectorOffset( CONNECTOR_NAME, - Map.of("task.id", CONNECTOR_NAME + "-0"), - Map.of("saved", 5L) + Collections.singletonMap("task.id", CONNECTOR_NAME + "-0"), + Collections.singletonMap("saved", 5L) ); // Verify that a connector created in the STOPPED state can be resumed successfully @@ -668,8 +674,8 @@ public void testCreateSinkConnectorWithStoppedInitialStateAndModifyOffsets() thr CONNECTOR_NAME, "Connector was not created in a stopped state" ); - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that the offsets can be modified for a sink connector created in the STOPPED state @@ -725,8 +731,8 @@ public void testDeleteConnectorCreatedWithPausedOrStoppedInitialState() throws E 0, "Connector was not created in a paused state" ); - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that a connector created in the PAUSED state can be deleted successfully connect.deleteConnector(CONNECTOR_NAME); @@ -746,8 +752,8 @@ public void testDeleteConnectorCreatedWithPausedOrStoppedInitialState() throws E CONNECTOR_NAME, "Connector was not created in a stopped state" ); - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that a connector created in the STOPPED state can be deleted successfully connect.deleteConnector(CONNECTOR_NAME); @@ -791,7 +797,7 @@ public void testPatchConnectorConfig() throws Exception { private Map defaultSinkConnectorProps(String topics) { // setup props for the sink connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, topics); @@ -807,6 +813,8 @@ public void testRequestTimeouts() throws Exception { workerProps.put(SCHEDULED_REBALANCE_MAX_DELAY_MS_CONFIG, "0"); workerProps.put(METADATA_RECOVERY_STRATEGY_CONFIG, MetadataRecoveryStrategy.NONE.name); + useFixedBrokerPort(); + connect = connectBuilder .numWorkers(1) .build(); @@ -986,7 +994,7 @@ public void testTasksMaxEnforcement() throws Exception { int maxTasks = 1; connectorProps.put(TASKS_MAX_CONFIG, Integer.toString(maxTasks)); int numTasks = 2; - connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); + connectorProps.put(MonitorableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); connect.configureConnector(CONNECTOR_NAME, connectorProps); // A connector that generates excessive tasks will be failed with an expected error message @@ -1013,12 +1021,12 @@ public void testTasksMaxEnforcement() throws Exception { // an existing set of task configs that was written before the cluster was upgraded try (JsonConverter converter = new JsonConverter()) { converter.configure( - Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), + Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), false ); for (int i = 0; i < numTasks; i++) { - Map taskConfig = TestableSourceConnector.taskConfig( + Map taskConfig = MonitorableSourceConnector.taskConfig( connectorProps, CONNECTOR_NAME, i @@ -1069,7 +1077,7 @@ public void testTasksMaxEnforcement() throws Exception { ); numTasks++; - connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); + connectorProps.put(MonitorableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); connect.configureConnector(CONNECTOR_NAME, connectorProps); // A connector will be allowed to generate excessive tasks when tasks.max.enforce is set to false @@ -1080,7 +1088,7 @@ public void testTasksMaxEnforcement() throws Exception { ); numTasks = maxTasks; - connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); + connectorProps.put(MonitorableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); connectorProps.put(TASKS_MAX_ENFORCE_CONFIG, "true"); connect.configureConnector(CONNECTOR_NAME, connectorProps); @@ -1091,7 +1099,7 @@ public void testTasksMaxEnforcement() throws Exception { ); numTasks = maxTasks + 1; - connectorProps.put(TestableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); + connectorProps.put(MonitorableSourceConnector.NUM_TASKS, Integer.toString(numTasks)); connect.configureConnector(CONNECTOR_NAME, connectorProps); // A connector that generates excessive tasks after being reconfigured will be failed, but its existing tasks will continue running @@ -1325,7 +1333,7 @@ public void testRuntimePropertyReconfiguration() throws Exception { "Connector did not start or task did not fail in time" ); assertEquals( - new ConnectorOffsets(List.of()), + new ConnectorOffsets(Collections.emptyList()), connect.connectorOffsets(CONNECTOR_NAME), "Connector should not have any committed offsets when only task fails on first record" ); @@ -1345,9 +1353,9 @@ public void testRuntimePropertyReconfiguration() throws Exception { Map expectedOffsetKey = new HashMap<>(); expectedOffsetKey.put(SinkUtils.KAFKA_TOPIC_KEY, topic); expectedOffsetKey.put(SinkUtils.KAFKA_PARTITION_KEY, 0); - Map expectedOffsetValue = Map.of(SinkUtils.KAFKA_OFFSET_KEY, 1); + Map expectedOffsetValue = Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 1); ConnectorOffset expectedOffset = new ConnectorOffset(expectedOffsetKey, expectedOffsetValue); - ConnectorOffsets expectedOffsets = new ConnectorOffsets(List.of(expectedOffset)); + ConnectorOffsets expectedOffsets = new ConnectorOffsets(Collections.singletonList(expectedOffset)); // Wait for it to commit offsets, signaling that it has successfully processed the record we produced earlier waitForCondition( @@ -1385,7 +1393,7 @@ public void testPluginAliases() throws Exception { final String sourceConnectorName = "plugins-alias-test-source"; Map sourceConnectorConfig = new HashMap<>(baseConnectorConfig); // Aliased source connector class - sourceConnectorConfig.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + sourceConnectorConfig.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); // Connector-specific properties sourceConnectorConfig.put(TOPIC_CONFIG, topic); sourceConnectorConfig.put("throughput", "10"); @@ -1399,7 +1407,7 @@ public void testPluginAliases() throws Exception { final String sinkConnectorName = "plugins-alias-test-sink"; Map sinkConnectorConfig = new HashMap<>(baseConnectorConfig); // Aliased sink connector class - sinkConnectorConfig.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + sinkConnectorConfig.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); // Connector-specific properties sinkConnectorConfig.put(TOPICS_CONFIG, topic); // Create the connector and ensure it and its tasks can start @@ -1411,7 +1419,7 @@ public void testPluginAliases() throws Exception { private Map defaultSourceConnectorProps(String topic) { // setup props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", "10"); @@ -1423,6 +1431,23 @@ private Map defaultSourceConnectorProps(String topic) { return props; } + private void useFixedBrokerPort() throws IOException { + // Find a free port and use it in the Kafka broker's listeners config. We can't use port 0 in the listeners + // config to get a random free port because in this test we want to stop the Kafka broker and then bring it + // back up and listening on the same port in order to verify that the Connect cluster can re-connect to Kafka + // and continue functioning normally. If we were to use port 0 here, the Kafka broker would most likely listen + // on a different random free port the second time it is started. Note that we can only use the static port + // because we have a single broker setup in this test. + int listenerPort; + try (ServerSocket s = new ServerSocket(0)) { + listenerPort = s.getLocalPort(); + } + brokerProps.put(SocketServerConfigs.LISTENERS_CONFIG, String.format("EXTERNAL://localhost:%d,CONTROLLER://localhost:0", listenerPort)); + connectBuilder + .numBrokers(1) + .brokerProps(brokerProps); + } + public static class EmptyTaskConfigsConnector extends SinkConnector { @Override public String version() { @@ -1442,7 +1467,7 @@ public Class taskClass() { @Override public List> taskConfigs(int maxTasks) { return IntStream.range(0, maxTasks) - .mapToObj(i -> Map.of()) + .mapToObj(i -> Collections.emptyMap()) .collect(Collectors.toList()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java index c42402eea2ef1..ef55e0b3258b5 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorClientPolicyIntegrationTest.java @@ -148,7 +148,7 @@ private void assertPassCreateConnector(String policy, Map props) public Map basicConnectorConfig() { Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, "test-topic"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java index 7faf2311c133d..074c6eb91fb25 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorHandle.java @@ -23,12 +23,14 @@ import org.slf4j.LoggerFactory; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import java.util.stream.Collectors; import java.util.stream.IntStream; /** @@ -281,8 +283,8 @@ public StartAndStopLatch expectedStarts(int expectedStarts, boolean includeTasks List taskLatches = includeTasks ? taskHandles.values().stream() .map(task -> task.expectedStarts(expectedStarts)) - .toList() - : List.of(); + .collect(Collectors.toList()) + : Collections.emptyList(); return startAndStopCounter.expectedStarts(expectedStarts, taskLatches); } @@ -290,8 +292,8 @@ public StartAndStopLatch expectedStarts(int expectedStarts, Map List taskLatches = includeTasks ? taskHandles.values().stream() .map(task -> task.expectedStarts(expectedTasksStarts.get(task.taskId()))) - .toList() - : List.of(); + .collect(Collectors.toList()) + : Collections.emptyList(); return startAndStopCounter.expectedStarts(expectedStarts, taskLatches); } @@ -343,8 +345,8 @@ public StartAndStopLatch expectedStops(int expectedStops, boolean includeTasks) List taskLatches = includeTasks ? taskHandles.values().stream() .map(task -> task.expectedStops(expectedStops)) - .toList() - : List.of(); + .collect(Collectors.toList()) + : Collections.emptyList(); return startAndStopCounter.expectedStops(expectedStops, taskLatches); } @@ -352,8 +354,8 @@ public StartAndStopLatch expectedStops(int expectedStops, Map e List taskLatches = includeTasks ? taskHandles.values().stream() .map(task -> task.expectedStops(expectedTasksStops.get(task.taskId()))) - .toList() - : List.of(); + .collect(Collectors.toList()) + : Collections.emptyList(); return startAndStopCounter.expectedStops(expectedStops, taskLatches); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java index 2859a1c71f063..9b76bf2ce64cb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorRestartApiIntegrationTest.java @@ -30,6 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -41,7 +42,7 @@ import jakarta.ws.rs.core.Response; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -130,7 +131,7 @@ public void testRestartUnknownConnectorNoParams() { // Call the Restart API String restartEndpoint = connect.endpointForResource( String.format("connectors/%s/restart", connectorName)); - Response response = connect.requestPost(restartEndpoint, "", Map.of()); + Response response = connect.requestPost(restartEndpoint, "", Collections.emptyMap()); assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); } @@ -151,7 +152,7 @@ private void restartUnknownConnector(boolean onlyFailed, boolean includeTasks) { // Call the Restart API String restartEndpoint = connect.endpointForResource( String.format("connectors/%s/restart?onlyFailed=" + onlyFailed + "&includeTasks=" + includeTasks, connectorName)); - Response response = connect.requestPost(restartEndpoint, "", Map.of()); + Response response = connect.requestPost(restartEndpoint, "", Collections.emptyMap()); assertEquals(Response.Status.NOT_FOUND.getStatusCode(), response.getStatus()); } @@ -212,7 +213,7 @@ public void testFailedTasksRestartBothConnectorAndTasks() throws Exception { @Test public void testOneFailedTasksRestartOnlyOneTasks() throws Exception { - Set tasksToFail = Set.of(taskId(1)); + Set tasksToFail = Collections.singleton(taskId(1)); failedTasksRestart(true, true, 0, buildExpectedTasksRestarts(tasksToFail), tasksToFail, false); } @@ -414,7 +415,7 @@ private String taskId(int i) { private Map defaultSourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", "10"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java index d8572ef3ba957..fb4bbcdf408ec 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorTopicsIntegrationTest.java @@ -34,6 +34,8 @@ import java.nio.charset.StandardCharsets; import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -41,9 +43,10 @@ import java.util.Objects; import java.util.Properties; import java.util.Set; +import java.util.stream.Collectors; import java.util.stream.StreamSupport; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -112,7 +115,7 @@ public void testGetActiveTopics() throws InterruptedException { connect.kafka().createTopic(FOO_TOPIC, NUM_TOPIC_PARTITIONS); connect.kafka().createTopic(BAR_TOPIC, NUM_TOPIC_PARTITIONS); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(), + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.emptyList(), "Active topic set is not empty for connector: " + FOO_CONNECTOR); // start a source connector @@ -121,8 +124,8 @@ public void testGetActiveTopics() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(FOO_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(FOO_TOPIC), - "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + FOO_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.singletonList(FOO_TOPIC), + "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + FOO_CONNECTOR); // start another source connector connect.configureConnector(BAR_CONNECTOR, defaultSourceConnectorProps(BAR_TOPIC)); @@ -130,8 +133,8 @@ public void testGetActiveTopics() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(BAR_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(BAR_CONNECTOR, List.of(BAR_TOPIC), - "Active topic set is not: " + List.of(BAR_TOPIC) + " for connector: " + BAR_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(BAR_CONNECTOR, Collections.singletonList(BAR_TOPIC), + "Active topic set is not: " + Collections.singletonList(BAR_TOPIC) + " for connector: " + BAR_CONNECTOR); // start a sink connector connect.configureConnector(SINK_CONNECTOR, defaultSinkConnectorProps(FOO_TOPIC, BAR_TOPIC)); @@ -139,8 +142,8 @@ public void testGetActiveTopics() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(SINK_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, List.of(FOO_TOPIC, BAR_TOPIC), - "Active topic set is not: " + List.of(FOO_TOPIC, BAR_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Arrays.asList(FOO_TOPIC, BAR_TOPIC), + "Active topic set is not: " + Arrays.asList(FOO_TOPIC, BAR_TOPIC) + " for connector: " + SINK_CONNECTOR); // deleting a connector resets its active topics connect.deleteConnector(BAR_CONNECTOR); @@ -148,7 +151,7 @@ public void testGetActiveTopics() throws InterruptedException { connect.assertions().assertConnectorDoesNotExist(BAR_CONNECTOR, "Connector wasn't deleted in time."); - connect.assertions().assertConnectorActiveTopics(BAR_CONNECTOR, List.of(), + connect.assertions().assertConnectorActiveTopics(BAR_CONNECTOR, Collections.emptyList(), "Active topic set is not empty for deleted connector: " + BAR_CONNECTOR); // Unfortunately there's currently no easy way to know when the consumer caught up with @@ -159,8 +162,8 @@ public void testGetActiveTopics() throws InterruptedException { // reset active topics for the sink connector after one of the topics has become idle connect.resetConnectorTopics(SINK_CONNECTOR); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, List.of(FOO_TOPIC), - "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Collections.singletonList(FOO_TOPIC), + "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); } @Test @@ -174,7 +177,7 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.kafka().createTopic(FOO_TOPIC, NUM_TOPIC_PARTITIONS); connect.kafka().createTopic(BAR_TOPIC, NUM_TOPIC_PARTITIONS); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(), + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.emptyList(), "Active topic set is not empty for connector: " + FOO_CONNECTOR); // start a source connector @@ -183,8 +186,8 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(FOO_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(FOO_TOPIC), - "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + FOO_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.singletonList(FOO_TOPIC), + "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + FOO_CONNECTOR); // start a sink connector connect.configureConnector(SINK_CONNECTOR, defaultSinkConnectorProps(FOO_TOPIC)); @@ -192,8 +195,8 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.assertions().assertConnectorAndAtLeastNumTasksAreRunning(SINK_CONNECTOR, NUM_TASKS, "Connector tasks did not start in time."); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, List.of(FOO_TOPIC), - "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Collections.singletonList(FOO_TOPIC), + "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); // deleting a connector resets its active topics connect.deleteConnector(FOO_CONNECTOR); @@ -201,7 +204,7 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { connect.assertions().assertConnectorDoesNotExist(FOO_CONNECTOR, "Connector wasn't deleted in time."); - connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, List.of(), + connect.assertions().assertConnectorActiveTopics(FOO_CONNECTOR, Collections.emptyList(), "Active topic set is not empty for deleted connector: " + FOO_CONNECTOR); // Unfortunately there's currently no easy way to know when the consumer caught up with @@ -213,8 +216,8 @@ public void testTopicTrackingResetIsDisabled() throws InterruptedException { Exception e = assertThrows(ConnectRestException.class, () -> connect.resetConnectorTopics(SINK_CONNECTOR)); assertTrue(e.getMessage().contains("Topic tracking reset is disabled.")); - connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, List.of(FOO_TOPIC), - "Active topic set is not: " + List.of(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); + connect.assertions().assertConnectorActiveTopics(SINK_CONNECTOR, Collections.singletonList(FOO_TOPIC), + "Active topic set is not: " + Collections.singletonList(FOO_TOPIC) + " for connector: " + SINK_CONNECTOR); } @Test @@ -249,7 +252,7 @@ public void testTopicTrackingIsDisabled() throws InterruptedException { public void assertNoTopicStatusInStatusTopic() { String statusTopic = workerProps.get(DistributedConfig.STATUS_STORAGE_TOPIC_CONFIG); Consumer verifiableConsumer = connect.kafka().createConsumer( - Map.of("group.id", "verifiable-consumer-group-0")); + Collections.singletonMap("group.id", "verifiable-consumer-group-0")); List partitionInfos = verifiableConsumer.partitionsFor(statusTopic); if (partitionInfos.isEmpty()) { @@ -257,7 +260,7 @@ public void assertNoTopicStatusInStatusTopic() { } List partitions = partitionInfos.stream() .map(info -> new TopicPartition(info.topic(), info.partition())) - .toList(); + .collect(Collectors.toList()); verifiableConsumer.assign(partitions); // Based on the implementation of {@link org.apache.kafka.connect.util.KafkaBasedLog#readToLogEnd} @@ -293,7 +296,7 @@ public void assertNoTopicStatusInStatusTopic() { private Map defaultSourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", String.valueOf(10)); @@ -308,7 +311,7 @@ private Map defaultSourceConnectorProps(String topic) { private Map defaultSinkConnectorProps(String... topics) { // setup up props for the sink connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, String.join(",", topics)); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java index eb8b59de015d6..2805504e360d9 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ConnectorValidationIntegrationTest.java @@ -39,7 +39,7 @@ import java.util.Map; import static org.apache.kafka.clients.consumer.ConsumerConfig.GROUP_ID_CONFIG; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX; import static org.apache.kafka.connect.runtime.ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG; @@ -228,7 +228,7 @@ public void testConnectorHasInvalidTransformClass() throws InterruptedException Map config = defaultSinkConnectorProps(); String transformName = "t"; config.put(TRANSFORMS_CONFIG, transformName); - config.put(TRANSFORMS_CONFIG + "." + transformName + ".type", TestableSinkConnector.class.getName()); + config.put(TRANSFORMS_CONFIG + "." + transformName + ".type", MonitorableSinkConnector.class.getName()); connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation( config.get(CONNECTOR_CLASS_CONFIG), config, @@ -289,7 +289,7 @@ public void testConnectorHasInvalidPredicateClass() throws InterruptedException Map config = defaultSinkConnectorProps(); String predicateName = "p"; config.put(PREDICATES_CONFIG, predicateName); - config.put(PREDICATES_CONFIG + "." + predicateName + ".type", TestableSinkConnector.class.getName()); + config.put(PREDICATES_CONFIG + "." + predicateName + ".type", MonitorableSinkConnector.class.getName()); connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation( config.get(CONNECTOR_CLASS_CONFIG), config, @@ -315,7 +315,7 @@ public void testConnectorHasMissingConverterClass() throws InterruptedException @Test public void testConnectorHasInvalidConverterClassType() throws InterruptedException { Map config = defaultSinkConnectorProps(); - config.put(KEY_CONVERTER_CLASS_CONFIG, TestableSinkConnector.class.getName()); + config.put(KEY_CONVERTER_CLASS_CONFIG, MonitorableSinkConnector.class.getName()); connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation( config.get(CONNECTOR_CLASS_CONFIG), config, @@ -413,7 +413,7 @@ public void testConnectorHasMissingHeaderConverterClass() throws InterruptedExce @Test public void testConnectorHasInvalidHeaderConverterClassType() throws InterruptedException { Map config = defaultSinkConnectorProps(); - config.put(HEADER_CONVERTER_CLASS_CONFIG, TestableSinkConnector.class.getName()); + config.put(HEADER_CONVERTER_CLASS_CONFIG, MonitorableSinkConnector.class.getName()); connect.assertions().assertExactlyNumErrorsOnConnectorConfigValidation( config.get(CONNECTOR_CLASS_CONFIG), config, @@ -560,7 +560,7 @@ private Map defaultSourceConnectorProps() { // setup up props for the source connector Map props = new HashMap<>(); props.put(NAME_CONFIG, "source-connector"); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, "1"); props.put(TOPIC_CONFIG, "t1"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -572,7 +572,7 @@ private Map defaultSinkConnectorProps() { // setup up props for the sink connector Map props = new HashMap<>(); props.put(NAME_CONFIG, "sink-connector"); - props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, "1"); props.put(TOPICS_CONFIG, "t1"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrantRecordSinkConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrantRecordSinkConnector.java index c87b854d4bbb8..bcac3505f5de3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrantRecordSinkConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrantRecordSinkConnector.java @@ -30,14 +30,14 @@ import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; -public class ErrantRecordSinkConnector extends TestableSinkConnector { +public class ErrantRecordSinkConnector extends MonitorableSinkConnector { @Override public Class taskClass() { return ErrantRecordSinkTask.class; } - public static class ErrantRecordSinkTask extends TestableSinkTask { + public static class ErrantRecordSinkTask extends MonitorableSinkTask { private ErrantRecordReporter reporter; private ExecutorService executorService; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java index 58caffa2b2f53..6f386267e21fc 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ErrorHandlingIntegrationTest.java @@ -110,7 +110,7 @@ public void testSkipRetryAndDLQWithHeaders() throws Exception { // setup connector config Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, "test-topic"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java index 44f895b8a513b..494af3358e0dd 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExactlyOnceSourceIntegrationTest.java @@ -67,6 +67,7 @@ import java.io.Closeable; import java.time.Duration; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -83,11 +84,11 @@ import static org.apache.kafka.clients.producer.ProducerConfig.CLIENT_ID_CONFIG; import static org.apache.kafka.clients.producer.ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG; import static org.apache.kafka.clients.producer.ProducerConfig.TRANSACTIONAL_ID_CONFIG; -import static org.apache.kafka.connect.integration.TestableSourceConnector.CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG; -import static org.apache.kafka.connect.integration.TestableSourceConnector.CUSTOM_TRANSACTION_BOUNDARIES_CONFIG; -import static org.apache.kafka.connect.integration.TestableSourceConnector.MAX_MESSAGES_PER_SECOND_CONFIG; -import static org.apache.kafka.connect.integration.TestableSourceConnector.MESSAGES_PER_POLL_CONFIG; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.CUSTOM_TRANSACTION_BOUNDARIES_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.MAX_MESSAGES_PER_SECOND_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.MESSAGES_PER_POLL_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX; @@ -182,7 +183,7 @@ public void testPreflightValidation() { startConnect(); Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, "1"); props.put(TOPIC_CONFIG, "topic"); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -193,8 +194,8 @@ public void testPreflightValidation() { props.put(EXACTLY_ONCE_SUPPORT_CONFIG, "required"); // Connector will return null from SourceConnector::exactlyOnceSupport - props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, TestableSourceConnector.EXACTLY_ONCE_NULL); - ConfigInfos validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, MonitorableSourceConnector.EXACTLY_ONCE_NULL); + ConfigInfos validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); ConfigInfo propertyValidation = findConfigInfo(EXACTLY_ONCE_SUPPORT_CONFIG, validation); @@ -202,56 +203,56 @@ public void testPreflightValidation() { "Preflight validation for exactly-once support property should have at least one error message"); // Connector will return UNSUPPORTED from SourceConnector::exactlyOnceSupport - props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, TestableSourceConnector.EXACTLY_ONCE_UNSUPPORTED); - validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, MonitorableSourceConnector.EXACTLY_ONCE_UNSUPPORTED); + validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(EXACTLY_ONCE_SUPPORT_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for exactly-once support property should have at least one error message"); // Connector will throw an exception from SourceConnector::exactlyOnceSupport - props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, TestableSourceConnector.EXACTLY_ONCE_FAIL); - validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, MonitorableSourceConnector.EXACTLY_ONCE_FAIL); + validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(EXACTLY_ONCE_SUPPORT_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for exactly-once support property should have at least one error message"); // Connector will return SUPPORTED from SourceConnector::exactlyOnceSupport - props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, TestableSourceConnector.EXACTLY_ONCE_SUPPORTED); - validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, MonitorableSourceConnector.EXACTLY_ONCE_SUPPORTED); + validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); assertEquals(0, validation.errorCount(), "Preflight validation should have zero errors"); // Test out the transaction boundary definition property props.put(TRANSACTION_BOUNDARY_CONFIG, CONNECTOR.toString()); // Connector will return null from SourceConnector::canDefineTransactionBoundaries - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_NULL); - validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_NULL); + validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(TRANSACTION_BOUNDARY_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for transaction boundary property should have at least one error message"); // Connector will return UNSUPPORTED from SourceConnector::canDefineTransactionBoundaries - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_UNSUPPORTED); - validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_UNSUPPORTED); + validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(TRANSACTION_BOUNDARY_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for transaction boundary property should have at least one error message"); // Connector will throw an exception from SourceConnector::canDefineTransactionBoundaries - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_FAIL); - validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_FAIL); + validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); assertEquals(1, validation.errorCount(), "Preflight validation should have exactly one error"); propertyValidation = findConfigInfo(TRANSACTION_BOUNDARY_CONFIG, validation); assertFalse(propertyValidation.configValue().errors().isEmpty(), "Preflight validation for transaction boundary property should have at least one error message"); // Connector will return SUPPORTED from SourceConnector::canDefineTransactionBoundaries - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_SUPPORTED); - validation = connect.validateConnectorConfig(TestableSourceConnector.class.getSimpleName(), props); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_SUPPORTED); + validation = connect.validateConnectorConfig(MonitorableSourceConnector.class.getSimpleName(), props); assertEquals(0, validation.errorCount(), "Preflight validation should have zero errors"); } @@ -273,7 +274,7 @@ public void testPollBoundary() throws Exception { int numTasks = 1; Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -305,7 +306,7 @@ public void testPollBoundary() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords records = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -332,7 +333,7 @@ public void testIntervalBoundary() throws Exception { int numTasks = 1; Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -365,7 +366,7 @@ public void testIntervalBoundary() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords records = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -392,14 +393,14 @@ public void testConnectorBoundary() throws Exception { connect.kafka().createTopic(topic, 3); Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, "1"); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(NAME_CONFIG, CONNECTOR_NAME); props.put(TRANSACTION_BOUNDARY_CONFIG, CONNECTOR.toString()); - props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TestableSourceConnector.TRANSACTION_BOUNDARIES_SUPPORTED); + props.put(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, MonitorableSourceConnector.TRANSACTION_BOUNDARIES_SUPPORTED); props.put(MESSAGES_PER_POLL_CONFIG, MESSAGES_PER_POLL); props.put(MAX_MESSAGES_PER_SECOND_CONFIG, MESSAGES_PER_SECOND); @@ -426,7 +427,7 @@ public void testConnectorBoundary() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords sourceRecords = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -494,7 +495,7 @@ public void testFencedLeaderRecovery() throws Exception { int numTasks = 1; Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -537,7 +538,7 @@ public void testFencedLeaderRecovery() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords records = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -561,7 +562,7 @@ public void testConnectorReconfiguration() throws Exception { connect.kafka().createTopic(topic, 3); Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -600,7 +601,7 @@ public void testConnectorReconfiguration() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords records = connect.kafka().consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -663,12 +664,12 @@ public void testTasksFailOnInabilityToFence() throws Exception { String topic = "test-topic"; try (Admin admin = connect.kafka().createAdminClient()) { - admin.createTopics(Set.of(new NewTopic(topic, 3, (short) 1))).all().get(); + admin.createTopics(Collections.singleton(new NewTopic(topic, 3, (short) 1))).all().get(); } Map props = new HashMap<>(); int tasksMax = 2; // Use two tasks since single-task connectors don't require zombie fencing - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -689,7 +690,7 @@ public void testTasksFailOnInabilityToFence() throws Exception { // Grant the connector's admin permissions to access the topics for its records and offsets // Intentionally leave out permissions required for fencing try (Admin admin = connect.kafka().createAdminClient()) { - admin.createAcls(List.of( + admin.createAcls(Arrays.asList( new AclBinding( new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -736,7 +737,7 @@ public void testTasksFailOnInabilityToFence() throws Exception { // Now grant the necessary permissions for fencing to the connector's admin try (Admin admin = connect.kafka().createAdminClient()) { - admin.createAcls(List.of( + admin.createAcls(Arrays.asList( new AclBinding( new ResourcePattern(ResourceType.TRANSACTIONAL_ID, Worker.taskTransactionalId(CLUSTER_GROUP_ID, CONNECTOR_NAME, 0), PatternType.LITERAL), new AccessControlEntry("User:connector", "*", AclOperation.ALL, AclPermissionType.ALLOW) @@ -823,7 +824,7 @@ public void testSeparateOffsetsTopic() throws Exception { int numTasks = 1; Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getName()); props.put(TASKS_MAX_CONFIG, Integer.toString(numTasks)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -863,7 +864,7 @@ public void testSeparateOffsetsTopic() throws Exception { .consume( MINIMUM_MESSAGES, TimeUnit.MINUTES.toMillis(1), - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), "test-topic") .count(); assertTrue(recordNum >= MINIMUM_MESSAGES, @@ -873,7 +874,7 @@ public void testSeparateOffsetsTopic() throws Exception { ConsumerRecords offsetRecords = connectorTargetedCluster .consumeAll( TimeUnit.MINUTES.toMillis(1), - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, offsetsTopic ); @@ -929,7 +930,7 @@ public void testSeparateOffsetsTopic() throws Exception { // consume all records from the source topic or fail, to ensure that they were correctly produced ConsumerRecords sourceRecords = connectorTargetedCluster.consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, topic ); @@ -938,7 +939,7 @@ public void testSeparateOffsetsTopic() throws Exception { // also have to check which offsets have actually been committed, since we no longer have exactly-once semantics offsetRecords = connectorTargetedCluster.consumeAll( CONSUME_RECORDS_TIMEOUT_MS, - Map.of(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), + Collections.singletonMap(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed"), null, offsetsTopic ); @@ -990,7 +991,7 @@ public void testPotentialDeadlockWhenProducingToOffsetsTopic() throws Exception } private ConfigInfo findConfigInfo(String property, ConfigInfos validationResult) { - return validationResult.configs().stream() + return validationResult.values().stream() .filter(info -> property.equals(info.configKey().name())) .findAny() .orElseThrow(() -> new AssertionError("Failed to find configuration validation result for property '" + property + "'")); @@ -998,13 +999,13 @@ private ConfigInfo findConfigInfo(String property, ConfigInfos validationResult) private List parseAndAssertOffsetsForSingleTask(ConsumerRecords offsetRecords) { Map> parsedOffsets = parseOffsetForTasks(offsetRecords); - assertEquals(Set.of(0), parsedOffsets.keySet(), "Expected records to only be produced from a single task"); + assertEquals(Collections.singleton(0), parsedOffsets.keySet(), "Expected records to only be produced from a single task"); return parsedOffsets.get(0); } private List parseAndAssertValuesForSingleTask(ConsumerRecords sourceRecords) { Map> parsedValues = parseValuesForTasks(sourceRecords); - assertEquals(Set.of(0), parsedValues.keySet(), "Expected records to only be produced from a single task"); + assertEquals(Collections.singleton(0), parsedValues.keySet(), "Expected records to only be produced from a single task"); return parsedValues.get(0); } @@ -1023,7 +1024,7 @@ private void assertAtLeastOnceSeqnos(ConsumerRecords sourceRecor parsedValues.replaceAll((task, values) -> { Long committedValue = lastCommittedValues.get(task); assertNotNull(committedValue, "No committed offset found for task " + task); - return values.stream().filter(v -> v <= committedValue).toList(); + return values.stream().filter(v -> v <= committedValue).collect(Collectors.toList()); }); assertSeqnos(parsedValues, numTasks); } @@ -1101,7 +1102,7 @@ private Map> parseOffsetForTasks(ConsumerRecords> result = new HashMap<>(); for (ConsumerRecord offsetRecord : offsetRecords) { @@ -1122,7 +1123,7 @@ private Map> parseOffsetForTasks(ConsumerRecords partition = assertAndCast(key.get(1), Map.class, "Key[1]"); Object taskIdObject = partition.get("task.id"); - assertNotNull(taskIdObject, "Serialized source partition should contain 'task.id' field from TestableSourceConnector"); + assertNotNull(taskIdObject, "Serialized source partition should contain 'task.id' field from MonitorableSourceConnector"); String taskId = assertAndCast(taskIdObject, String.class, "task ID"); assertTrue(taskId.startsWith(CONNECTOR_NAME + "-"), "task ID should match pattern '-"); String taskIdRemainder = taskId.substring(CONNECTOR_NAME.length() + 1); @@ -1137,7 +1138,7 @@ private Map> parseOffsetForTasks(ConsumerRecords value = assertAndCast(valueObject, Map.class, "Value"); Object seqnoObject = value.get("saved"); - assertNotNull(seqnoObject, "Serialized source offset should contain 'seqno' field from TestableSourceConnector"); + assertNotNull(seqnoObject, "Serialized source offset should contain 'seqno' field from MonitorableSourceConnector"); long seqno = assertAndCast(seqnoObject, Long.class, "Seqno offset field"); result.computeIfAbsent(taskNum, t -> new ArrayList<>()).add(seqno); @@ -1162,7 +1163,7 @@ private static T assertAndCast(Object o, Class klass, String objectDescri private StartAndStopLatch connectorAndTaskStart(int numTasks) { connectorHandle.clearTasks(); IntStream.range(0, numTasks) - .mapToObj(i -> TestableSourceConnector.taskId(CONNECTOR_NAME, i)) + .mapToObj(i -> MonitorableSourceConnector.taskId(CONNECTOR_NAME, i)) .forEach(connectorHandle::taskHandle); return connectorHandle.expectedStarts(1, true); } @@ -1198,7 +1199,7 @@ private void assertProducersAreFencedOnReconfiguration( .mapToObj(i -> transactionalProducer( "simulated-task-producer-" + CONNECTOR_NAME + "-" + i, Worker.taskTransactionalId(CLUSTER_GROUP_ID, CONNECTOR_NAME, i) - )).toList(); + )).collect(Collectors.toList()); producers.forEach(KafkaProducer::initTransactions); @@ -1283,7 +1284,7 @@ public List poll() { // Request a read to the end of the offsets topic context.offsetStorageReader().offset(Collections.singletonMap("", null)); // Produce a record to the offsets topic - return List.of(new SourceRecord(null, null, topic, null, "", null, null)); + return Collections.singletonList(new SourceRecord(null, null, topic, null, "", null, null)); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExampleConnectIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExampleConnectIntegrationTest.java index 6263c8ab96cc1..d131fd4efc632 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExampleConnectIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/ExampleConnectIntegrationTest.java @@ -63,8 +63,8 @@ public class ExampleConnectIntegrationTest { private static final int NUM_TASKS = 3; private static final int NUM_WORKERS = 3; private static final String CONNECTOR_NAME = "simple-conn"; - private static final String SINK_CONNECTOR_CLASS_NAME = TestableSinkConnector.class.getSimpleName(); - private static final String SOURCE_CONNECTOR_CLASS_NAME = TestableSourceConnector.class.getSimpleName(); + private static final String SINK_CONNECTOR_CLASS_NAME = MonitorableSinkConnector.class.getSimpleName(); + private static final String SOURCE_CONNECTOR_CLASS_NAME = MonitorableSourceConnector.class.getSimpleName(); private EmbeddedConnectCluster connect; private ConnectorHandle connectorHandle; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java index d0841b26941f0..d85ac9a440cb4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/InternalTopicsIntegrationTest.java @@ -28,6 +28,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Properties; @@ -285,15 +286,15 @@ public void testStartWhenInternalTopicsCreatedManuallyWithCompactForBrokersDefau } protected Map compactCleanupPolicy() { - return Map.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); + return Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT); } protected Map deleteCleanupPolicy() { - return Map.of(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE); + return Collections.singletonMap(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE); } protected Map noTopicSettings() { - return Map.of(); + return Collections.emptyMap(); } protected Map compactAndDeleteCleanupPolicy() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java index f86fabca7159a..1084ddf6732c3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSinkConnector.java @@ -16,28 +16,48 @@ */ package org.apache.kafka.connect.integration; -import org.apache.kafka.common.MetricName; -import org.apache.kafka.common.metrics.Gauge; -import org.apache.kafka.common.metrics.Measurable; -import org.apache.kafka.common.metrics.PluginMetrics; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.runtime.SampleSinkConnector; import org.apache.kafka.connect.sink.SinkRecord; +import org.apache.kafka.connect.sink.SinkTask; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; import java.util.Collection; -import java.util.LinkedHashMap; +import java.util.HashMap; +import java.util.List; import java.util.Map; -public class MonitorableSinkConnector extends TestableSinkConnector { +/** + * A sink connector that is used in Apache Kafka integration tests to verify the behavior of the + * Connect framework, but that can be used in other integration tests as a simple connector that + * consumes and counts records. This class provides methods to find task instances + * which are initiated by the embedded connector, and wait for them to consume a desired number of + * messages. + */ +public class MonitorableSinkConnector extends SampleSinkConnector { + + private static final Logger log = LoggerFactory.getLogger(MonitorableSinkConnector.class); - public static final String VALUE = "started"; - public static MetricName metricsName = null; + // Boolean valued configuration that determines whether MonitorableSinkConnector::alterOffsets should return true or false + public static final String ALTER_OFFSETS_RESULT = "alter.offsets.result"; + + private String connectorName; + private Map commonConfigs; + private ConnectorHandle connectorHandle; @Override public void start(Map props) { - super.start(props); - PluginMetrics pluginMetrics = context.pluginMetrics(); - metricsName = pluginMetrics.metricName("start", "description", new LinkedHashMap<>()); - pluginMetrics.addMetric(metricsName, (Gauge) (config, now) -> VALUE); + connectorHandle = RuntimeHandles.get().connectorHandle(props.get("name")); + connectorName = props.get("name"); + commonConfigs = props; + log.info("Starting connector {}", props.get("name")); + connectorHandle.recordConnectorStart(); } @Override @@ -45,24 +65,103 @@ public Class taskClass() { return MonitorableSinkTask.class; } - public static class MonitorableSinkTask extends TestableSinkTask { + @Override + public List> taskConfigs(int maxTasks) { + List> configs = new ArrayList<>(); + for (int i = 0; i < maxTasks; i++) { + Map config = new HashMap<>(commonConfigs); + config.put("connector.name", connectorName); + config.put("task.id", connectorName + "-" + i); + configs.add(config); + } + return configs; + } + + @Override + public void stop() { + log.info("Stopped {} connector {}", this.getClass().getSimpleName(), connectorName); + connectorHandle.recordConnectorStop(); + } + + @Override + public ConfigDef config() { + return new ConfigDef(); + } + + @Override + public boolean alterOffsets(Map connectorConfig, Map offsets) { + return Boolean.parseBoolean(connectorConfig.get(ALTER_OFFSETS_RESULT)); + } + + public static class MonitorableSinkTask extends SinkTask { - public static MetricName metricsName = null; - private int count = 0; + private String taskId; + TaskHandle taskHandle; + Map committedOffsets; + Map> cachedTopicPartitions; + + public MonitorableSinkTask() { + this.committedOffsets = new HashMap<>(); + this.cachedTopicPartitions = new HashMap<>(); + } + + @Override + public String version() { + return "unknown"; + } @Override public void start(Map props) { - super.start(props); - PluginMetrics pluginMetrics = context.pluginMetrics(); - metricsName = pluginMetrics.metricName("put", "description", new LinkedHashMap<>()); - pluginMetrics.addMetric(metricsName, (Measurable) (config, now) -> count); + taskId = props.get("task.id"); + String connectorName = props.get("connector.name"); + taskHandle = RuntimeHandles.get().connectorHandle(connectorName).taskHandle(taskId); + log.debug("Starting task {}", taskId); + taskHandle.recordTaskStart(); + } + + @Override + public void open(Collection partitions) { + log.debug("Opening partitions {}", partitions); + taskHandle.partitionsAssigned(partitions); + } + + @Override + public void close(Collection partitions) { + log.debug("Closing partitions {}", partitions); + taskHandle.partitionsRevoked(partitions); + partitions.forEach(committedOffsets::remove); } @Override public void put(Collection records) { - super.put(records); - count += records.size(); + for (SinkRecord rec : records) { + taskHandle.record(rec); + TopicPartition tp = cachedTopicPartitions + .computeIfAbsent(rec.topic(), v -> new HashMap<>()) + .computeIfAbsent(rec.kafkaPartition(), v -> new TopicPartition(rec.topic(), rec.kafkaPartition())); + committedOffsets.put(tp, committedOffsets.getOrDefault(tp, 0) + 1); + log.trace("Task {} obtained record (key='{}' value='{}')", taskId, rec.key(), rec.value()); + } + } + + @Override + public Map preCommit(Map offsets) { + taskHandle.partitionsCommitted(offsets.keySet()); + offsets.forEach((tp, offset) -> { + int recordsSinceLastCommit = committedOffsets.getOrDefault(tp, 0); + if (recordsSinceLastCommit != 0) { + taskHandle.commit(recordsSinceLastCommit); + log.debug("Forwarding to framework request to commit {} records for {}", recordsSinceLastCommit, tp); + committedOffsets.put(tp, 0); + } + }); + return offsets; } + @Override + public void stop() { + log.info("Stopped {} task {}", this.getClass().getSimpleName(), taskId); + taskHandle.recordTaskStop(); + } } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java index 07b7155b92543..7387c81c599be 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/MonitorableSourceConnector.java @@ -16,28 +16,76 @@ */ package org.apache.kafka.connect.integration; -import org.apache.kafka.common.MetricName; -import org.apache.kafka.common.metrics.Gauge; -import org.apache.kafka.common.metrics.Measurable; -import org.apache.kafka.common.metrics.PluginMetrics; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.connect.connector.Task; +import org.apache.kafka.connect.data.Schema; +import org.apache.kafka.connect.errors.ConnectException; +import org.apache.kafka.connect.header.ConnectHeaders; +import org.apache.kafka.connect.runtime.SampleSourceConnector; +import org.apache.kafka.connect.source.ConnectorTransactionBoundaries; +import org.apache.kafka.connect.source.ExactlyOnceSupport; import org.apache.kafka.connect.source.SourceRecord; +import org.apache.kafka.connect.source.SourceTask; +import org.apache.kafka.server.util.ThroughputThrottler; -import java.util.LinkedHashMap; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Locale; import java.util.Map; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.LongStream; + +/** + * A source connector that is used in Apache Kafka integration tests to verify the behavior of + * the Connect framework, but that can be used in other integration tests as a simple connector + * that generates records of a fixed structure. The rate of record production can be adjusted + * through the configs 'throughput' and 'messages.per.poll' + */ +public class MonitorableSourceConnector extends SampleSourceConnector { + private static final Logger log = LoggerFactory.getLogger(MonitorableSourceConnector.class); + + public static final String TOPIC_CONFIG = "topic"; + public static final String NUM_TASKS = "num.tasks"; + public static final String MESSAGES_PER_POLL_CONFIG = "messages.per.poll"; + public static final String MAX_MESSAGES_PER_SECOND_CONFIG = "throughput"; + public static final String MAX_MESSAGES_PRODUCED_CONFIG = "max.messages"; -public class MonitorableSourceConnector extends TestableSourceConnector { + public static final String CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG = "custom.exactly.once.support"; + public static final String EXACTLY_ONCE_SUPPORTED = "supported"; + public static final String EXACTLY_ONCE_UNSUPPORTED = "unsupported"; + public static final String EXACTLY_ONCE_NULL = "null"; + public static final String EXACTLY_ONCE_FAIL = "fail"; - public static MetricName metricsName = null; - public static final String VALUE = "started"; + public static final String CUSTOM_TRANSACTION_BOUNDARIES_CONFIG = "custom.transaction.boundaries"; + public static final String TRANSACTION_BOUNDARIES_SUPPORTED = "supported"; + public static final String TRANSACTION_BOUNDARIES_UNSUPPORTED = "unsupported"; + public static final String TRANSACTION_BOUNDARIES_NULL = "null"; + public static final String TRANSACTION_BOUNDARIES_FAIL = "fail"; + + // Boolean valued configuration that determines whether MonitorableSourceConnector::alterOffsets should return true or false + public static final String ALTER_OFFSETS_RESULT = "alter.offsets.result"; + + private String connectorName; + private ConnectorHandle connectorHandle; + private Map commonConfigs; @Override public void start(Map props) { - super.start(props); - PluginMetrics pluginMetrics = context.pluginMetrics(); - metricsName = pluginMetrics.metricName("start", "description", new LinkedHashMap<>()); - pluginMetrics.addMetric(metricsName, (Gauge) (config, now) -> VALUE); + connectorHandle = RuntimeHandles.get().connectorHandle(props.get("name")); + connectorName = connectorHandle.name(); + commonConfigs = props; + log.info("Started {} connector {}", this.getClass().getSimpleName(), connectorName); + connectorHandle.recordConnectorStart(); + if (Boolean.parseBoolean(props.getOrDefault("connector.start.inject.error", "false"))) { + throw new RuntimeException("Injecting errors during connector start"); + } } @Override @@ -45,27 +93,222 @@ public Class taskClass() { return MonitorableSourceTask.class; } - public static class MonitorableSourceTask extends TestableSourceTask { + @Override + public List> taskConfigs(int maxTasks) { + String numTasksProp = commonConfigs.get(NUM_TASKS); + int numTasks = numTasksProp != null ? Integer.parseInt(numTasksProp) : maxTasks; + List> configs = new ArrayList<>(); + for (int i = 0; i < numTasks; i++) { + Map config = taskConfig(commonConfigs, connectorName, i); + configs.add(config); + } + return configs; + } + + public static Map taskConfig( + Map connectorProps, + String connectorName, + int taskNum + ) { + Map result = new HashMap<>(connectorProps); + result.put("connector.name", connectorName); + result.put("task.id", taskId(connectorName, taskNum)); + return result; + } - public static MetricName metricsName = null; - private int count = 0; + @Override + public void stop() { + log.info("Stopped {} connector {}", this.getClass().getSimpleName(), connectorName); + connectorHandle.recordConnectorStop(); + if (Boolean.parseBoolean(commonConfigs.getOrDefault("connector.stop.inject.error", "false"))) { + throw new RuntimeException("Injecting errors during connector stop"); + } + } + + @Override + public ConfigDef config() { + log.info("Configured {} connector {}", this.getClass().getSimpleName(), connectorName); + return new ConfigDef(); + } + + @Override + public ExactlyOnceSupport exactlyOnceSupport(Map connectorConfig) { + String supportLevel = connectorConfig.getOrDefault(CUSTOM_EXACTLY_ONCE_SUPPORT_CONFIG, "null").toLowerCase(Locale.ROOT); + switch (supportLevel) { + case EXACTLY_ONCE_SUPPORTED: + return ExactlyOnceSupport.SUPPORTED; + case EXACTLY_ONCE_UNSUPPORTED: + return ExactlyOnceSupport.UNSUPPORTED; + case EXACTLY_ONCE_FAIL: + throw new ConnectException("oops"); + default: + case EXACTLY_ONCE_NULL: + return null; + } + } + + @Override + public ConnectorTransactionBoundaries canDefineTransactionBoundaries(Map connectorConfig) { + String supportLevel = connectorConfig.getOrDefault(CUSTOM_TRANSACTION_BOUNDARIES_CONFIG, TRANSACTION_BOUNDARIES_UNSUPPORTED).toLowerCase(Locale.ROOT); + switch (supportLevel) { + case TRANSACTION_BOUNDARIES_SUPPORTED: + return ConnectorTransactionBoundaries.SUPPORTED; + case TRANSACTION_BOUNDARIES_FAIL: + throw new ConnectException("oh no :("); + case TRANSACTION_BOUNDARIES_NULL: + return null; + default: + case TRANSACTION_BOUNDARIES_UNSUPPORTED: + return ConnectorTransactionBoundaries.UNSUPPORTED; + } + } + + @Override + public boolean alterOffsets(Map connectorConfig, Map, Map> offsets) { + return Boolean.parseBoolean(connectorConfig.get(ALTER_OFFSETS_RESULT)); + } + + public static String taskId(String connectorName, int taskId) { + return connectorName + "-" + taskId; + } + + public static class MonitorableSourceTask extends SourceTask { + private String taskId; + private String topicName; + private TaskHandle taskHandle; + private volatile boolean stopped; + private long startingSeqno; + private long seqno; + private int batchSize; + private ThroughputThrottler throttler; + private long maxMessages; + + private long priorTransactionBoundary; + private long nextTransactionBoundary; + + @Override + public String version() { + return "unknown"; + } @Override public void start(Map props) { - super.start(props); - PluginMetrics pluginMetrics = context.pluginMetrics(); - metricsName = pluginMetrics.metricName("poll", "description", new LinkedHashMap<>()); - pluginMetrics.addMetric(metricsName, (Measurable) (config, now) -> count); + taskId = props.get("task.id"); + String connectorName = props.get("connector.name"); + topicName = props.getOrDefault(TOPIC_CONFIG, "sequential-topic"); + batchSize = Integer.parseInt(props.getOrDefault(MESSAGES_PER_POLL_CONFIG, "1")); + taskHandle = RuntimeHandles.get().connectorHandle(connectorName).taskHandle(taskId); + Map offset = Optional.ofNullable( + context.offsetStorageReader().offset(sourcePartition(taskId))) + .orElse(Collections.emptyMap()); + startingSeqno = Optional.ofNullable((Long) offset.get("saved")).orElse(0L); + seqno = startingSeqno; + log.info("Started {} task {} with properties {}", this.getClass().getSimpleName(), taskId, props); + throttler = new ThroughputThrottler(Long.parseLong(props.getOrDefault(MAX_MESSAGES_PER_SECOND_CONFIG, "-1")), System.currentTimeMillis()); + maxMessages = Long.parseLong(props.getOrDefault(MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(Long.MAX_VALUE))); + taskHandle.recordTaskStart(); + priorTransactionBoundary = 0; + nextTransactionBoundary = 1; + if (Boolean.parseBoolean(props.getOrDefault("task-" + taskId + ".start.inject.error", "false"))) { + throw new RuntimeException("Injecting errors during task start"); + } + calculateNextBoundary(); } @Override public List poll() { - List records = super.poll(); - if (records != null) { - count += records.size(); + if (!stopped) { + // Don't return any more records since we've already produced the configured maximum number. + if (seqno >= maxMessages) { + return null; + } + if (throttler.shouldThrottle(seqno - startingSeqno, System.currentTimeMillis())) { + throttler.throttle(); + } + int currentBatchSize = (int) Math.min(maxMessages - seqno, batchSize); + taskHandle.record(currentBatchSize); + log.trace("Returning batch of {} records", currentBatchSize); + return LongStream.range(0, currentBatchSize) + .mapToObj(i -> { + seqno++; + SourceRecord record = new SourceRecord( + sourcePartition(taskId), + sourceOffset(seqno), + topicName, + null, + Schema.STRING_SCHEMA, + "key-" + taskId + "-" + seqno, + Schema.STRING_SCHEMA, + "value-" + taskId + "-" + seqno, + null, + new ConnectHeaders().addLong("header-" + seqno, seqno)); + maybeDefineTransactionBoundary(record); + return record; + }) + .collect(Collectors.toList()); + } + return null; + } + + @Override + public void commit() { + log.info("Task {} committing offsets", taskId); + //TODO: save progress outside the offset topic, potentially in the task handle + } + + @Override + public void commitRecord(SourceRecord record, RecordMetadata metadata) { + log.trace("Committing record: {}", record); + taskHandle.commit(); + } + + @Override + public void stop() { + log.info("Stopped {} task {}", this.getClass().getSimpleName(), taskId); + stopped = true; + taskHandle.recordTaskStop(); + } + + /** + * Calculate the next transaction boundary, i.e., the seqno whose corresponding source record should be used to + * either {@link org.apache.kafka.connect.source.TransactionContext#commitTransaction(SourceRecord) commit} + * or {@link org.apache.kafka.connect.source.TransactionContext#abortTransaction(SourceRecord) abort} the next transaction. + *

          + * This connector defines transactions whose size correspond to successive elements of the Fibonacci sequence, + * where transactions with an even number of records are aborted, and those with an odd number of records are committed. + */ + private void calculateNextBoundary() { + while (nextTransactionBoundary <= seqno) { + nextTransactionBoundary += priorTransactionBoundary; + priorTransactionBoundary = nextTransactionBoundary - priorTransactionBoundary; + } + } + + private void maybeDefineTransactionBoundary(SourceRecord record) { + if (context.transactionContext() == null || seqno != nextTransactionBoundary) { + return; + } + long transactionSize = nextTransactionBoundary - priorTransactionBoundary; + + // If the transaction boundary ends on an even-numbered offset, abort it + // Otherwise, commit + boolean abort = nextTransactionBoundary % 2 == 0; + calculateNextBoundary(); + if (abort) { + log.info("Aborting transaction of {} records", transactionSize); + context.transactionContext().abortTransaction(record); + } else { + log.info("Committing transaction of {} records", transactionSize); + context.transactionContext().commitTransaction(record); } - return records; } + } + + public static Map sourcePartition(String taskId) { + return Collections.singletonMap("task.id", taskId); + } + public static Map sourceOffset(long seqno) { + return Collections.singletonMap("saved", seqno); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java index c9edd6093bdf4..e0f395f442508 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/OffsetsApiIntegrationTest.java @@ -18,8 +18,7 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.admin.Admin; -import org.apache.kafka.clients.admin.GroupListing; -import org.apache.kafka.clients.admin.ListGroupsOptions; +import org.apache.kafka.clients.admin.ConsumerGroupListing; import org.apache.kafka.common.test.api.Flaky; import org.apache.kafka.connect.runtime.ConnectorConfig; import org.apache.kafka.connect.runtime.SourceConnectorConfig; @@ -44,6 +43,7 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -58,7 +58,7 @@ import jakarta.ws.rs.core.Response; import static jakarta.ws.rs.core.Response.Status.INTERNAL_SERVER_ERROR; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; import static org.apache.kafka.connect.runtime.SinkConnectorConfig.TOPICS_CONFIG; @@ -106,7 +106,7 @@ public void tearDown() { } try { assertEquals( - Set.of(), + Collections.emptySet(), remainingConnectors, "Some connectors were not properly cleaned up after this test" ); @@ -149,11 +149,11 @@ private static EmbeddedConnectCluster createOrReuseConnectWithWorkerProps(Map workerProps = Map.of( + Map workerProps = Collections.singletonMap( DistributedConfig.EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled" ); @@ -184,7 +184,7 @@ public void testGetSinkConnectorOffsetsOverriddenConsumerGroupId() throws Except // Ensure that the overridden consumer group ID was the one actually used try (Admin admin = connect.kafka().createAdminClient()) { - Collection consumerGroups = admin.listGroups(ListGroupsOptions.forConsumerGroups()).all().get(); + Collection consumerGroups = admin.listConsumerGroups().all().get(); assertTrue(consumerGroups.stream().anyMatch(consumerGroupListing -> overriddenGroupId.equals(consumerGroupListing.groupId()))); assertTrue(consumerGroups.stream().noneMatch(consumerGroupListing -> SinkUtils.consumerGroupId(connectorName).equals(consumerGroupListing.groupId()))); } @@ -277,7 +277,7 @@ private void getAndVerifySourceConnectorOffsets(Map connectorCon "Source connector offsets should reflect the expected number of records produced"); // Each task should produce more records - connectorConfigs.put(TestableSourceConnector.MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(2 * NUM_RECORDS_PER_PARTITION)); + connectorConfigs.put(MonitorableSourceConnector.MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(2 * NUM_RECORDS_PER_PARTITION)); connect.configureConnector(connectorName, connectorConfigs); verifyExpectedSourceConnectorOffsets(connectorName, NUM_TASKS, 2 * NUM_RECORDS_PER_PARTITION, @@ -287,8 +287,8 @@ private void getAndVerifySourceConnectorOffsets(Map connectorCon @Test public void testAlterOffsetsNonExistentConnector() { ConnectRestException e = assertThrows(ConnectRestException.class, - () -> connect.alterConnectorOffsets("non-existent-connector", new ConnectorOffsets(List.of( - new ConnectorOffset(Map.of(), Map.of()))))); + () -> connect.alterConnectorOffsets("non-existent-connector", new ConnectorOffsets(Collections.singletonList( + new ConnectorOffset(Collections.emptyMap(), Collections.emptyMap()))))); assertEquals(404, e.errorCode()); } @@ -300,10 +300,11 @@ public void testAlterOffsetsNonStoppedConnector() throws Exception { "Connector tasks did not start in time."); List offsets = new ArrayList<>(); - // The TestableSourceConnector has a source partition per task + // The MonitorableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsets.add( - new ConnectorOffset(Map.of("task.id", connectorName + "-" + i), Map.of("saved", 5)) + new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), + Collections.singletonMap("saved", 5)) ); } @@ -342,7 +343,7 @@ public void testAlterSinkConnectorOffsetsOverriddenConsumerGroupId() throws Exce alterAndVerifySinkConnectorOffsets(connectorConfigs, connect.kafka()); // Ensure that the overridden consumer group ID was the one actually used try (Admin admin = connect.kafka().createAdminClient()) { - Collection consumerGroups = admin.listGroups(ListGroupsOptions.forConsumerGroups()).all().get(); + Collection consumerGroups = admin.listConsumerGroups().all().get(); assertTrue(consumerGroups.stream().anyMatch(consumerGroupListing -> overriddenGroupId.equals(consumerGroupListing.groupId()))); assertTrue(consumerGroups.stream().noneMatch(consumerGroupListing -> SinkUtils.consumerGroupId(connectorName).equals(consumerGroupListing.groupId()))); } @@ -401,7 +402,7 @@ private void alterAndVerifySinkConnectorOffsets(Map connectorCon partition = new HashMap<>(); partition.put(SinkUtils.KAFKA_TOPIC_KEY, topic); partition.put(SinkUtils.KAFKA_PARTITION_KEY, i); - offsetsToAlter.add(new ConnectorOffset(partition, Map.of(SinkUtils.KAFKA_OFFSET_KEY, 5))); + offsetsToAlter.add(new ConnectorOffset(partition, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 5))); } // Alter the sink connector's offsets, with retry logic (since we just stopped the connector) @@ -414,7 +415,7 @@ private void alterAndVerifySinkConnectorOffsets(Map connectorCon "Sink connector consumer group offsets should reflect the altered offsets"); // Update the connector's configs; this time expect SinkConnector::alterOffsets to return true - connectorConfigs.put(TestableSinkConnector.ALTER_OFFSETS_RESULT, "true"); + connectorConfigs.put(MonitorableSinkConnector.ALTER_OFFSETS_RESULT, "true"); connect.configureConnector(connectorName, connectorConfigs); // Alter offsets again while the connector is still in a stopped state @@ -423,7 +424,7 @@ private void alterAndVerifySinkConnectorOffsets(Map connectorCon partition = new HashMap<>(); partition.put(SinkUtils.KAFKA_TOPIC_KEY, topic); partition.put(SinkUtils.KAFKA_PARTITION_KEY, i); - offsetsToAlter.add(new ConnectorOffset(partition, Map.of(SinkUtils.KAFKA_OFFSET_KEY, 3))); + offsetsToAlter.add(new ConnectorOffset(partition, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 3))); } response = connect.alterConnectorOffsets(connectorName, new ConnectorOffsets(offsetsToAlter)); @@ -473,7 +474,7 @@ public void testAlterSinkConnectorOffsetsZombieSinkTasks() throws Exception { Map partition = new HashMap<>(); partition.put(SinkUtils.KAFKA_TOPIC_KEY, topic); partition.put(SinkUtils.KAFKA_PARTITION_KEY, 0); - List offsetsToAlter = List.of(new ConnectorOffset(partition, null)); + List offsetsToAlter = Collections.singletonList(new ConnectorOffset(partition, null)); ConnectRestException e = assertThrows(ConnectRestException.class, () -> connect.alterConnectorOffsets(connectorName, new ConnectorOffsets(offsetsToAlter))); @@ -597,10 +598,11 @@ public void alterAndVerifySourceConnectorOffsets(Map connectorCo ); List offsetsToAlter = new ArrayList<>(); - // The TestableSourceConnector has a source partition per task + // The MonitorableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsetsToAlter.add( - new ConnectorOffset(Map.of("task.id", connectorName + "-" + i), Map.of("saved", 5)) + new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), + Collections.singletonMap("saved", 5)) ); } @@ -612,15 +614,16 @@ public void alterAndVerifySourceConnectorOffsets(Map connectorCo "Source connector offsets should reflect the altered offsets"); // Update the connector's configs; this time expect SourceConnector::alterOffsets to return true - connectorConfigs.put(TestableSourceConnector.ALTER_OFFSETS_RESULT, "true"); + connectorConfigs.put(MonitorableSourceConnector.ALTER_OFFSETS_RESULT, "true"); connect.configureConnector(connectorName, connectorConfigs); // Alter offsets again while connector is in stopped state offsetsToAlter = new ArrayList<>(); - // The TestableSourceConnector has a source partition per task + // The MonitorableSourceConnector has a source partition per task for (int i = 0; i < NUM_TASKS; i++) { offsetsToAlter.add( - new ConnectorOffset(Map.of("task.id", connectorName + "-" + i), Map.of("saved", 7)) + new ConnectorOffset(Collections.singletonMap("task.id", connectorName + "-" + i), + Collections.singletonMap("saved", 7)) ); } @@ -721,7 +724,7 @@ public void testResetSinkConnectorOffsetsOverriddenConsumerGroupId() throws Exce resetAndVerifySinkConnectorOffsets(connectorConfigs, connect.kafka()); // Ensure that the overridden consumer group ID was the one actually used try (Admin admin = connect.kafka().createAdminClient()) { - Collection consumerGroups = admin.listGroups(ListGroupsOptions.forConsumerGroups()).all().get(); + Collection consumerGroups = admin.listConsumerGroups().all().get(); assertTrue(consumerGroups.stream().anyMatch(consumerGroupListing -> overriddenGroupId.equals(consumerGroupListing.groupId()))); assertTrue(consumerGroups.stream().noneMatch(consumerGroupListing -> SinkUtils.consumerGroupId(connectorName).equals(consumerGroupListing.groupId()))); } @@ -905,7 +908,7 @@ public void resetAndVerifySourceConnectorOffsets(Map connectorCo private Map baseSinkConnectorConfigs() { Map configs = new HashMap<>(); - configs.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + configs.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); configs.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); configs.put(TOPICS_CONFIG, topic); configs.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -915,11 +918,11 @@ private Map baseSinkConnectorConfigs() { private Map baseSourceConnectorConfigs() { Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); - props.put(TestableSourceConnector.MESSAGES_PER_POLL_CONFIG, "3"); - props.put(TestableSourceConnector.MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(NUM_RECORDS_PER_PARTITION)); + props.put(MonitorableSourceConnector.MESSAGES_PER_POLL_CONFIG, "3"); + props.put(MonitorableSourceConnector.MAX_MESSAGES_PRODUCED_CONFIG, String.valueOf(NUM_RECORDS_PER_PARTITION)); props.put(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); props.put(DEFAULT_TOPIC_CREATION_PREFIX + REPLICATION_FACTOR_CONFIG, "1"); @@ -1041,7 +1044,7 @@ private void verifyExpectedSinkConnectorOffsets(String connectorName, String exp * Verify whether the actual offsets for a source connector match the expected offsets. The verification is done using the * GET /connectors/{connector}/offsets REST API which is repeatedly queried until the offsets match * or the {@link #OFFSET_READ_TIMEOUT_MS timeout} is reached. Note that this assumes that the source connector is a - * {@link TestableSourceConnector} + * {@link MonitorableSourceConnector} * * @param connectorName the name of the source connector whose offsets are to be verified * @param numTasks the number of tasks for the source connector @@ -1054,7 +1057,7 @@ private void verifyExpectedSourceConnectorOffsets(String connectorName, int numT int expectedOffset, String conditionDetails) throws InterruptedException { waitForCondition(() -> { ConnectorOffsets offsets = connect.connectorOffsets(connectorName); - // The TestableSourceConnector has a source partition per task + // The MonitorableSourceConnector has a source partition per task if (offsets.offsets().size() != numTasks) { return false; } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java index 513e064ddb682..ff028928c2576 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RebalanceSourceConnectorsIntegrationTest.java @@ -30,14 +30,15 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Properties; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import java.util.stream.IntStream; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -297,7 +298,7 @@ public void testMultipleWorkersRejoining() throws Exception { private Map defaultSourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", String.valueOf(10)); @@ -332,10 +333,10 @@ private boolean assertConnectorAndTasksAreUniqueAndBalanced() { assertNotEquals(0, maxConnectors, "Found no connectors running!"); assertNotEquals(0, maxTasks, "Found no tasks running!"); - assertEquals(connectors.size(), + assertEquals(connectors.values().size(), connectors.values().stream().distinct().count(), "Connector assignments are not unique: " + connectors); - assertEquals(tasks.size(), + assertEquals(tasks.values().size(), tasks.values().stream().distinct().count(), "Task assignments are not unique: " + tasks); assertTrue(maxConnectors - minConnectors < 2, "Connectors are imbalanced: " + formatAssignment(connectors)); @@ -349,8 +350,9 @@ private boolean assertConnectorAndTasksAreUniqueAndBalanced() { private static String formatAssignment(Map> assignment) { StringBuilder result = new StringBuilder(); - for (String worker : assignment.keySet().stream().sorted().toList()) { - result.append(String.format("\n%s=%s", worker, assignment.getOrDefault(worker, List.of()))); + for (String worker : assignment.keySet().stream().sorted().collect(Collectors.toList())) { + result.append(String.format("\n%s=%s", worker, assignment.getOrDefault(worker, + Collections.emptyList()))); } return result.toString(); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java index 0e0cfa6a1ce9c..1af52dba59f89 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestExtensionIntegrationTest.java @@ -31,6 +31,7 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.concurrent.TimeUnit; @@ -91,7 +92,7 @@ public void testRestExtensionApi() throws InterruptedException { try { // setup up props for the connector Map connectorProps = new HashMap<>(); - connectorProps.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + connectorProps.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); connectorProps.put(TASKS_MAX_CONFIG, String.valueOf(1)); connectorProps.put(TOPICS_CONFIG, "test-topic"); @@ -111,7 +112,7 @@ public void testRestExtensionApi() throws InterruptedException { workerId, null ), - Map.of( + Collections.singletonMap( 0, new TaskState(0, "RUNNING", workerId, null) ), diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java index 3831eb8f24685..81b18d03442ff 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/RestForwardingIntegrationTest.java @@ -21,7 +21,6 @@ import org.apache.kafka.common.network.ConnectionMode; import org.apache.kafka.common.utils.Utils; import org.apache.kafka.connect.runtime.Herder; -import org.apache.kafka.connect.runtime.MockConnectMetrics; import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.distributed.DistributedConfig; import org.apache.kafka.connect.runtime.distributed.NotLeaderException; @@ -63,6 +62,7 @@ import java.security.GeneralSecurityException; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -75,7 +75,6 @@ import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.doAnswer; -import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.when; @ExtendWith(MockitoExtension.class) @@ -168,7 +167,6 @@ public void testRestForwardToLeader(boolean dualListener, boolean followerSsl, b followerServer = new ConnectRestServer(null, followerClient, followerConfig.originals()); followerServer.initializeServer(); when(followerHerder.plugins()).thenReturn(plugins); - doReturn(new MockConnectMetrics()).when(followerHerder).connectMetrics(); followerServer.initializeResources(followerHerder); // Leader worker setup @@ -176,7 +174,6 @@ public void testRestForwardToLeader(boolean dualListener, boolean followerSsl, b leaderServer = new ConnectRestServer(null, leaderClient, leaderConfig.originals()); leaderServer.initializeServer(); when(leaderHerder.plugins()).thenReturn(plugins); - doReturn(new MockConnectMetrics()).when(leaderHerder).connectMetrics(); leaderServer.initializeResources(leaderHerder); // External client setup @@ -198,7 +195,7 @@ public void testRestForwardToLeader(boolean dualListener, boolean followerSsl, b .putConnectorConfig(any(), any(), isNull(), anyBoolean(), followerCallbackCaptor.capture()); // Leader will reply - ConnectorInfo connectorInfo = new ConnectorInfo("blah", Map.of(), List.of(), ConnectorType.SOURCE); + ConnectorInfo connectorInfo = new ConnectorInfo("blah", Collections.emptyMap(), Collections.emptyList(), ConnectorType.SOURCE); Herder.Created leaderAnswer = new Herder.Created<>(true, connectorInfo); ArgumentCaptor>> leaderCallbackCaptor = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -214,7 +211,7 @@ public void testRestForwardToLeader(boolean dualListener, boolean followerSsl, b "\"name\": \"blah\"," + "\"config\": {}" + "}"; - StringEntity entity = new StringEntity(jsonBody, StandardCharsets.UTF_8); + StringEntity entity = new StringEntity(jsonBody, StandardCharsets.UTF_8.name()); entity.setContentType("application/json"); request.setEntity(entity); HttpResponse httpResponse = executeRequest(followerUrl, request); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java index 27506aaaedb21..86473ffe613b4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SessionedProtocolIntegrationTest.java @@ -122,7 +122,7 @@ public void ensureInternalEndpointIsSecured() throws Throwable { // Create the connector now // setup up props for the sink connector Map connectorProps = new HashMap<>(); - connectorProps.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + connectorProps.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); connectorProps.put(TASKS_MAX_CONFIG, String.valueOf(1)); connectorProps.put(TOPICS_CONFIG, "test-topic"); connectorProps.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java index 2b27f790f7eed..961eeb70f995c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SinkConnectorsIntegrationTest.java @@ -28,10 +28,10 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Objects; import java.util.Properties; @@ -98,10 +98,10 @@ public void close() { public void testEagerConsumerPartitionAssignment() throws Exception { final String topic1 = "topic1", topic2 = "topic2", topic3 = "topic3"; final TopicPartition tp1 = new TopicPartition(topic1, 0), tp2 = new TopicPartition(topic2, 0), tp3 = new TopicPartition(topic3, 0); - final Collection topics = List.of(topic1, topic2, topic3); + final Collection topics = Arrays.asList(topic1, topic2, topic3); Map connectorProps = baseSinkConnectorProps(String.join(",", topics)); - // Need an eager assignor here; round-robin is as good as any + // Need an eager assignor here; round robin is as good as any connectorProps.put( CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX + PARTITION_ASSIGNMENT_STRATEGY_CONFIG, RoundRobinAssignor.class.getName()); @@ -205,7 +205,7 @@ public void testEagerConsumerPartitionAssignment() throws Exception { public void testCooperativeConsumerPartitionAssignment() throws Exception { final String topic1 = "topic1", topic2 = "topic2", topic3 = "topic3"; final TopicPartition tp1 = new TopicPartition(topic1, 0), tp2 = new TopicPartition(topic2, 0), tp3 = new TopicPartition(topic3, 0); - final Collection topics = List.of(topic1, topic2, topic3); + final Collection topics = Arrays.asList(topic1, topic2, topic3); Map connectorProps = baseSinkConnectorProps(String.join(",", topics)); connectorProps.put( @@ -309,7 +309,7 @@ public void testCooperativeConsumerPartitionAssignment() throws Exception { private Map baseSinkConnectorProps(String topics) { Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSinkConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSinkConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPICS_CONFIG, topics); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java index 1fbdfa70b8e32..aa1dc6bcf94f2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/SourceConnectorsIntegrationTest.java @@ -25,12 +25,13 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Properties; import java.util.stream.IntStream; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.NAME_CONFIG; @@ -149,7 +150,7 @@ public void testSwitchingToTopicCreationEnabled() throws InterruptedException { // start the clusters connect.start(); - connect.kafka().createTopic(BAR_TOPIC, DEFAULT_PARTITIONS, DEFAULT_REPLICATION_FACTOR, Map.of()); + connect.kafka().createTopic(BAR_TOPIC, DEFAULT_PARTITIONS, DEFAULT_REPLICATION_FACTOR, Collections.emptyMap()); connect.assertions().assertTopicsExist(BAR_TOPIC); connect.assertions().assertTopicSettings(BAR_TOPIC, DEFAULT_REPLICATION_FACTOR, @@ -207,7 +208,7 @@ public void testSwitchingToTopicCreationEnabled() throws InterruptedException { private Map defaultSourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put("throughput", String.valueOf(10)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java index fd4438e750ff4..7a48660629518 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StandaloneWorkerIntegrationTest.java @@ -30,6 +30,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -37,13 +38,14 @@ import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; import jakarta.ws.rs.core.Response; import static org.apache.kafka.connect.integration.BlockingConnectorTest.Block.BLOCK_CONFIG; import static org.apache.kafka.connect.integration.BlockingConnectorTest.CONNECTOR_START; import static org.apache.kafka.connect.integration.BlockingConnectorTest.CONNECTOR_TASK_CONFIGS; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.NAME_CONFIG; @@ -100,7 +102,7 @@ public void testDynamicLogging() { StandaloneWorkerIntegrationTest::isModified ); assertEquals( - Map.of(), + Collections.emptyMap(), invalidModifiedLoggers, "No loggers should have a non-null last-modified timestamp" ); @@ -153,9 +155,9 @@ private Map testSetLoggingLevel( assertTrue(affectedLoggers.contains(namespace)); List invalidAffectedLoggers = affectedLoggers.stream() .filter(l -> !l.startsWith(namespace)) - .toList(); + .collect(Collectors.toList()); assertEquals( - List.of(), + Collections.emptyList(), invalidAffectedLoggers, "No loggers outside the namespace '" + namespace + "' should have been included in the response for a request to modify that namespace" @@ -186,7 +188,7 @@ private Map testSetLoggingLevel( ) ); assertEquals( - Map.of(), + Collections.emptyMap(), invalidAffectedLoggerLevels, "At least one logger in the affected namespace '" + namespace + "' does not have the expected level of '" + level @@ -197,7 +199,7 @@ private Map testSetLoggingLevel( Set droppedLoggers = Utils.diff(HashSet::new, initialLevels.keySet(), newLevels.keySet()); assertEquals( - Set.of(), + Collections.emptySet(), droppedLoggers, "At least one logger was present in the listing of all loggers " + "before the logging level for namespace '" + namespace @@ -210,7 +212,7 @@ private Map testSetLoggingLevel( e -> !hasNamespace(e, namespace) && !e.getValue().equals(initialLevels.get(e.getKey())) ); assertEquals( - Map.of(), + Collections.emptyMap(), invalidUnaffectedLoggerLevels, "At least one logger outside of the affected namespace '" + namespace + "' has a different logging level or last-modified timestamp than it did " @@ -254,8 +256,8 @@ public void testCreateConnectorWithStoppedInitialState() throws Exception { CONNECTOR_NAME, "Connector was not created in a stopped state" ); - assertEquals(List.of(), connect.connectorInfo(CONNECTOR_NAME).tasks()); - assertEquals(List.of(), connect.taskConfigs(CONNECTOR_NAME)); + assertEquals(Collections.emptyList(), connect.connectorInfo(CONNECTOR_NAME).tasks()); + assertEquals(Collections.emptyList(), connect.taskConfigs(CONNECTOR_NAME)); // Verify that a connector created in the STOPPED state can be resumed successfully connect.resumeConnector(CONNECTOR_NAME); @@ -368,7 +370,7 @@ private Map defaultSourceConnectorProps(String topic) { // setup props for the source connector Map props = new HashMap<>(); props.put(NAME_CONFIG, CONNECTOR_NAME); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(NUM_TASKS)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopLatchTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopLatchTest.java index 55a2d5c6d497f..b864cc5759cf6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopLatchTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartAndStopLatchTest.java @@ -25,6 +25,7 @@ import org.junit.jupiter.api.Tag; import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -91,7 +92,7 @@ public void shouldReturnTrueWhenAwaitingForStartAndStopToComplete() throws Throw @Test public void shouldReturnFalseWhenAwaitingForDependentLatchToComplete() throws Throwable { StartAndStopLatch depLatch = new StartAndStopLatch(1, 1, this::complete, null, clock); - dependents = List.of(depLatch); + dependents = Collections.singletonList(depLatch); latch = new StartAndStopLatch(1, 1, this::complete, dependents, clock); future = asyncAwait(100); @@ -105,7 +106,7 @@ public void shouldReturnFalseWhenAwaitingForDependentLatchToComplete() throws Th @Test public void shouldReturnTrueWhenAwaitingForStartAndStopAndDependentLatch() throws Throwable { StartAndStopLatch depLatch = new StartAndStopLatch(1, 1, this::complete, null, clock); - dependents = List.of(depLatch); + dependents = Collections.singletonList(depLatch); latch = new StartAndStopLatch(1, 1, this::complete, dependents, clock); future = asyncAwait(100); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartsAndStops.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartsAndStops.java index 9dfabc8fb86f1..25bc74802880d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartsAndStops.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/StartsAndStops.java @@ -17,5 +17,21 @@ package org.apache.kafka.connect.integration; -public record StartsAndStops(int starts, int stops) { +public class StartsAndStops { + private final int starts; + private final int stops; + + public StartsAndStops(int starts, int stops) { + this.starts = starts; + this.stops = stops; + } + + public int starts() { + return starts; + } + + public int stops() { + return stops; + } + } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java index 5eca2c24e8401..50c7d829a4a9e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/integration/TransformationIntegrationTest.java @@ -34,6 +34,7 @@ import java.util.Properties; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonMap; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.PREDICATES_CONFIG; @@ -62,8 +63,8 @@ public class TransformationIntegrationTest { private static final int NUM_TASKS = 1; private static final int NUM_WORKERS = 3; private static final String CONNECTOR_NAME = "simple-conn"; - private static final String SINK_CONNECTOR_CLASS_NAME = TestableSinkConnector.class.getSimpleName(); - private static final String SOURCE_CONNECTOR_CLASS_NAME = TestableSourceConnector.class.getSimpleName(); + private static final String SINK_CONNECTOR_CLASS_NAME = MonitorableSinkConnector.class.getSimpleName(); + private static final String SOURCE_CONNECTOR_CLASS_NAME = MonitorableSourceConnector.class.getSimpleName(); private EmbeddedConnectCluster connect; private ConnectorHandle connectorHandle; @@ -172,7 +173,7 @@ public void testFilterOnTopicNameWithSinkConnector() throws Exception { connectorHandle.awaitCommits(RECORD_TRANSFER_DURATION_MS); // Assert that we didn't see any baz - Map expectedRecordCounts = Map.of(fooTopic, (long) numFooRecords); + Map expectedRecordCounts = singletonMap(fooTopic, (long) numFooRecords); assertObservedRecords(observedRecords, expectedRecordCounts); // delete connector @@ -252,7 +253,7 @@ public void testFilterOnTombstonesWithSinkConnector() throws Exception { // wait for the connector tasks to commit all records. connectorHandle.awaitCommits(RECORD_TRANSFER_DURATION_MS); - Map expectedRecordCounts = Map.of(topic, (long) (numRecords / 2)); + Map expectedRecordCounts = singletonMap(topic, (long) (numRecords / 2)); assertObservedRecords(observedRecords, expectedRecordCounts); // delete connector diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java index 8c32f2d33be2f..aa715667d24c4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractHerderTest.java @@ -18,16 +18,11 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.clients.producer.ProducerConfig; -import org.apache.kafka.common.MetricName; import org.apache.kafka.common.config.ConfigDef; import org.apache.kafka.common.config.ConfigTransformer; import org.apache.kafka.common.config.ConfigValue; import org.apache.kafka.common.config.SaslConfigs; import org.apache.kafka.common.config.provider.DirectoryConfigProvider; -import org.apache.kafka.common.metrics.KafkaMetric; -import org.apache.kafka.common.metrics.Measurable; -import org.apache.kafka.common.metrics.Monitorable; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.security.oauthbearer.internals.unsecured.OAuthBearerUnsecuredLoginCallbackHandler; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.connector.Connector; @@ -55,7 +50,6 @@ import org.apache.kafka.connect.storage.AppliedConnectorConfig; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.ConfigBackingStore; -import org.apache.kafka.connect.storage.SimpleHeaderConverter; import org.apache.kafka.connect.storage.StatusBackingStore; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; @@ -67,16 +61,16 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; @@ -154,27 +148,27 @@ public class AbstractHerderTest { private static final ClusterConfigState SNAPSHOT = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); private static final ClusterConfigState SNAPSHOT_NO_TASKS = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), - Map.of(), - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); private final String workerId = "workerId"; private final String kafkaClusterId = "I4ZmrWqfT2e-upky_4fdPA"; @@ -183,7 +177,6 @@ public class AbstractHerderTest { private final ConnectorClientConfigOverridePolicy noneConnectorClientConfigOverridePolicy = new NoneConnectorClientConfigOverridePolicy(); @Mock private Worker worker; - @Mock private WorkerConfig workerConfig; @Mock private WorkerConfigTransformer transformer; @Mock private ConfigBackingStore configStore; @Mock private StatusBackingStore statusStore; @@ -196,7 +189,7 @@ public void testConnectors() { AbstractHerder herder = testHerder(); when(configStore.snapshot()).thenReturn(SNAPSHOT); - assertEquals(Set.of(CONN1), Set.copyOf(herder.connectors())); + assertEquals(Collections.singleton(CONN1), new HashSet<>(herder.connectors())); } @Test @@ -213,12 +206,12 @@ public void testConnectorClientConfigOverridePolicyClose() { public void testConnectorStatus() { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, 0); - when(plugins.newConnector(anyString(), any())).thenReturn(new SampleSourceConnector()); - when(worker.getPlugins()).thenReturn(plugins); - AbstractHerder herder = testHerder(); - when(herder.rawConfig(connectorName)).thenReturn(Map.of( + when(plugins.newConnector(anyString())).thenReturn(new SampleSourceConnector()); + when(herder.plugins()).thenReturn(plugins); + + when(herder.rawConfig(connectorName)).thenReturn(Collections.singletonMap( ConnectorConfig.CONNECTOR_CLASS_CONFIG, SampleSourceConnector.class.getName() )); @@ -226,7 +219,7 @@ public void testConnectorStatus() { .thenReturn(new ConnectorStatus(connectorName, AbstractStatus.State.RUNNING, workerId, generation)); when(statusStore.getAll(connectorName)) - .thenReturn(List.of( + .thenReturn(Collections.singletonList( new TaskStatus(taskId, AbstractStatus.State.UNASSIGNED, workerId, generation))); ConnectorStateInfo state = herder.connectorStatus(connectorName); @@ -247,19 +240,19 @@ public void testConnectorStatus() { public void testConnectorStatusMissingPlugin() { ConnectorTaskId taskId = new ConnectorTaskId(connectorName, 0); - when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("Unable to find class")); - when(worker.getPlugins()).thenReturn(plugins); - AbstractHerder herder = testHerder(); + when(plugins.newConnector(anyString())).thenThrow(new ConnectException("Unable to find class")); + when(herder.plugins()).thenReturn(plugins); + when(herder.rawConfig(connectorName)) - .thenReturn(Map.of(ConnectorConfig.CONNECTOR_CLASS_CONFIG, "missing")); + .thenReturn(Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, "missing")); when(statusStore.get(connectorName)) .thenReturn(new ConnectorStatus(connectorName, AbstractStatus.State.RUNNING, workerId, generation)); when(statusStore.getAll(connectorName)) - .thenReturn(List.of( + .thenReturn(Collections.singletonList( new TaskStatus(taskId, AbstractStatus.State.UNASSIGNED, workerId, generation))); ConnectorStateInfo state = herder.connectorStatus(connectorName); @@ -278,19 +271,18 @@ public void testConnectorStatusMissingPlugin() { @Test public void testConnectorInfo() { - - when(plugins.newConnector(anyString(), any())).thenReturn(new SampleSourceConnector()); - when(worker.getPlugins()).thenReturn(plugins); - AbstractHerder herder = testHerder(); + when(plugins.newConnector(anyString())).thenReturn(new SampleSourceConnector()); + when(herder.plugins()).thenReturn(plugins); + when(configStore.snapshot()).thenReturn(SNAPSHOT); ConnectorInfo info = herder.connectorInfo(CONN1); assertEquals(CONN1, info.name()); assertEquals(CONN1_CONFIG, info.config()); - assertEquals(List.of(TASK0, TASK1, TASK2), info.tasks()); + assertEquals(Arrays.asList(TASK0, TASK1, TASK2), info.tasks()); assertEquals(ConnectorType.SOURCE, info.type()); } @@ -318,19 +310,18 @@ public void testResumeConnector() { @Test public void testConnectorInfoMissingPlugin() { - - when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("No class found")); - when(worker.getPlugins()).thenReturn(plugins); - AbstractHerder herder = testHerder(); + when(plugins.newConnector(anyString())).thenThrow(new ConnectException("No class found")); + when(herder.plugins()).thenReturn(plugins); + when(configStore.snapshot()).thenReturn(SNAPSHOT); ConnectorInfo info = herder.connectorInfo(CONN1); assertEquals(CONN1, info.name()); assertEquals(CONN1_CONFIG, info.config()); - assertEquals(List.of(TASK0, TASK1, TASK2), info.tasks()); + assertEquals(Arrays.asList(TASK0, TASK1, TASK2), info.tasks()); assertEquals(ConnectorType.UNKNOWN, info.type()); } @@ -466,8 +457,8 @@ public void testBuildRestartPlanForNoRestart() { public void testConfigValidationEmptyConfig() { AbstractHerder herder = createConfigValidationHerder(SampleSourceConnector.class, noneConnectorClientConfigOverridePolicy, 0); - assertThrows(BadRequestException.class, () -> herder.validateConnectorConfig(Map.of(), s -> null, false)); - verify(transformer).transform(Map.of()); + assertThrows(BadRequestException.class, () -> herder.validateConnectorConfig(Collections.emptyMap(), s -> null, false)); + verify(transformer).transform(Collections.emptyMap()); assertEquals(worker.getPlugins(), plugins); } @@ -476,21 +467,21 @@ public void testConfigValidationMissingName() { final Class connectorClass = SampleSourceConnector.class; AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); - Map config = Map.of(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName()); + Map config = Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName()); ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false); // We expect there to be errors due to the missing name and .... Note that these assertions depend heavily on // the config fields for SourceConnectorConfig, but we expect these to change rarely. assertEquals(connectorClass.getName(), result.name()); - assertEquals(List.of(ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, + assertEquals(Arrays.asList(ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, ConnectorConfig.PREDICATES_GROUP, ConnectorConfig.ERROR_GROUP, SourceConnectorConfig.TOPIC_CREATION_GROUP, SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_GROUP, SourceConnectorConfig.OFFSETS_TOPIC_GROUP), result.groups()); assertEquals(2, result.errorCount()); - Map infos = result.configs().stream() + Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); // Base connector config has 15 fields, connector's configs add 7 - assertEquals(26, infos.size()); + assertEquals(22, infos.size()); // Missing name should generate an error assertEquals(ConnectorConfig.NAME_CONFIG, infos.get(ConnectorConfig.NAME_CONFIG).configValue().name()); @@ -564,14 +555,12 @@ public void testConfigValidationTopicsRegexWithDlq() { } @Test - @SuppressWarnings("rawtypes") - public void testConfigValidationTransformsExtendResults() throws ClassNotFoundException { + public void testConfigValidationTransformsExtendResults() { final Class connectorClass = SampleSourceConnector.class; AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); // 2 transform aliases defined -> 2 plugin lookups - Mockito.lenient().when(plugins.transformations()).thenReturn(Set.of(transformationPluginDesc())); - Mockito.lenient().when(plugins.newPlugin(SampleTransformation.class.getName(), null, classLoader)).thenReturn(new SampleTransformation()); + when(plugins.transformations()).thenReturn(Collections.singleton(transformationPluginDesc())); // Define 2 transformations. One has a class defined and so can get embedded configs, the other is missing // class info that should generate an error. @@ -582,14 +571,13 @@ public void testConfigValidationTransformsExtendResults() throws ClassNotFoundEx config.put(ConnectorConfig.TRANSFORMS_CONFIG + ".xformA.type", SampleTransformation.class.getName()); config.put("required", "value"); // connector required config ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false); - - assertEquals(ConnectorType.SOURCE, herder.connectorType(config)); + assertEquals(herder.connectorType(config), ConnectorType.SOURCE); // We expect there to be errors due to the missing name and .... Note that these assertions depend heavily on // the config fields for SourceConnectorConfig, but we expect these to change rarely. assertEquals(connectorClass.getName(), result.name()); // Each transform also gets its own group - List expectedGroups = List.of( + List expectedGroups = Arrays.asList( ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, ConnectorConfig.PREDICATES_GROUP, @@ -602,9 +590,9 @@ public void testConfigValidationTransformsExtendResults() throws ClassNotFoundEx ); assertEquals(expectedGroups, result.groups()); assertEquals(1, result.errorCount()); - Map infos = result.configs().stream() + Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); - assertEquals(33, infos.size()); + assertEquals(27, infos.size()); // Should get 2 type fields from the transforms, first adds its own config since it has a valid class assertEquals("transforms.xformA.type", infos.get("transforms.xformA.type").configValue().name()); @@ -619,15 +607,12 @@ public void testConfigValidationTransformsExtendResults() throws ClassNotFoundEx } @Test - @SuppressWarnings("rawtypes") - public void testConfigValidationPredicatesExtendResults() throws ClassNotFoundException { + public void testConfigValidationPredicatesExtendResults() { final Class connectorClass = SampleSourceConnector.class; AbstractHerder herder = createConfigValidationHerder(connectorClass, noneConnectorClientConfigOverridePolicy); - Mockito.lenient().when(plugins.transformations()).thenReturn(Set.of(transformationPluginDesc())); - Mockito.lenient().when(plugins.predicates()).thenReturn(Set.of(predicatePluginDesc())); - Mockito.lenient().when(plugins.newPlugin(SampleTransformation.class.getName(), null, classLoader)).thenReturn(new SampleTransformation()); - Mockito.lenient().when(plugins.newPlugin(SamplePredicate.class.getName(), null, classLoader)).thenReturn(new SamplePredicate()); + when(plugins.transformations()).thenReturn(Collections.singleton(transformationPluginDesc())); + when(plugins.predicates()).thenReturn(Collections.singleton(predicatePluginDesc())); // Define 2 predicates. One has a class defined and so can get embedded configs, the other is missing // class info that should generate an error. @@ -648,7 +633,7 @@ public void testConfigValidationPredicatesExtendResults() throws ClassNotFoundEx // the config fields for SourceConnectorConfig, but we expect these to change rarely. assertEquals(connectorClass.getName(), result.name()); // Each transform also gets its own group - List expectedGroups = List.of( + List expectedGroups = Arrays.asList( ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, ConnectorConfig.PREDICATES_GROUP, @@ -662,9 +647,9 @@ public void testConfigValidationPredicatesExtendResults() throws ClassNotFoundEx ); assertEquals(expectedGroups, result.groups()); assertEquals(1, result.errorCount()); - Map infos = result.configs().stream() + Map infos = result.values().stream() .collect(Collectors.toMap(info -> info.configKey().name(), Function.identity())); - assertEquals(36, infos.size()); + assertEquals(29, infos.size()); // Should get 2 type fields from the transforms, first adds its own config since it has a valid class assertEquals("transforms.xformA.type", infos.get("transforms.xformA.type").configValue().name()); assertTrue(infos.get("transforms.xformA.type").configValue().errors().isEmpty()); @@ -708,13 +693,13 @@ public void testConfigValidationPrincipalOnlyOverride() { config.put(saslConfigKey, "jaas_config"); ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false); - assertEquals(ConnectorType.SOURCE, herder.connectorType(config)); + assertEquals(herder.connectorType(config), ConnectorType.SOURCE); // We expect there to be errors due to now allowed override policy for ACKS.... Note that these assertions depend heavily on // the config fields for SourceConnectorConfig, but we expect these to change rarely. assertEquals(SampleSourceConnector.class.getName(), result.name()); // Each transform also gets its own group - List expectedGroups = List.of( + List expectedGroups = Arrays.asList( ConnectorConfig.COMMON_GROUP, ConnectorConfig.TRANSFORMS_GROUP, ConnectorConfig.PREDICATES_GROUP, @@ -725,11 +710,11 @@ public void testConfigValidationPrincipalOnlyOverride() { ); assertEquals(expectedGroups, result.groups()); assertEquals(1, result.errorCount()); - // Base connector config has 19 fields, connector's configs add 7, and 2 producer overrides - assertEquals(28, result.configs().size()); - assertTrue(result.configs().stream().anyMatch( + // Base connector config has 15 fields, connector's configs add 7, and 2 producer overrides + assertEquals(24, result.values().size()); + assertTrue(result.values().stream().anyMatch( configInfo -> ackConfigKey.equals(configInfo.configValue().name()) && !configInfo.configValue().errors().isEmpty())); - assertTrue(result.configs().stream().anyMatch( + assertTrue(result.values().stream().anyMatch( configInfo -> saslConfigKey.equals(configInfo.configValue().name()) && configInfo.configValue().errors().isEmpty())); verifyValidationIsolation(); @@ -767,10 +752,10 @@ public void testConfigValidationAllOverride() { overriddenClientConfigs.add(loginCallbackHandlerConfigKey); ConfigInfos result = herder.validateConnectorConfig(config, s -> null, false); - assertEquals(ConnectorType.SOURCE, herder.connectorType(config)); + assertEquals(herder.connectorType(config), ConnectorType.SOURCE); Map validatedOverriddenClientConfigs = new HashMap<>(); - for (ConfigInfo configInfo : result.configs()) { + for (ConfigInfo configInfo : result.values()) { String configName = configInfo.configKey().name(); if (overriddenClientConfigs.contains(configName)) { validatedOverriddenClientConfigs.put(configName, configInfo.configValue().value()); @@ -785,51 +770,6 @@ public void testConfigValidationAllOverride() { verifyValidationIsolation(); } - static final class TestClientConfigOverridePolicy extends AllConnectorClientConfigOverridePolicy implements Monitorable { - - private static MetricName metricName = null; - private int count = 0; - - @Override - protected boolean isAllowed(ConfigValue configValue) { - count++; - return super.isAllowed(configValue); - } - - @Override - public void withPluginMetrics(PluginMetrics metrics) { - metricName = metrics.metricName("name", "description", new LinkedHashMap<>()); - metrics.addMetric(metricName, (Measurable) (config, now) -> count); - } - } - - @Test - public void testClientConfigOverridePolicyWithMetrics() { - final Class connectorClass = SampleSourceConnector.class; - AbstractHerder herder = createConfigValidationHerder(connectorClass, new TestClientConfigOverridePolicy()); - - Map config = new HashMap<>(); - config.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connectorClass.getName()); - config.put(ConnectorConfig.NAME_CONFIG, "connector-name"); - config.put("required", "value"); - - Map overrides = Map.of( - producerOverrideKey(ProducerConfig.MAX_REQUEST_SIZE_CONFIG), "420", - producerOverrideKey(ProducerConfig.MAX_BLOCK_MS_CONFIG), "28980", - producerOverrideKey(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG), "true"); - config.putAll(overrides); - - herder.validateConnectorConfig(config, s -> null, false); - - Map metrics = herder.worker.metrics().metrics().metrics(); - assertTrue(metrics.containsKey(TestClientConfigOverridePolicy.metricName)); - assertEquals((double) overrides.size(), metrics.get(TestClientConfigOverridePolicy.metricName).metricValue()); - - herder.stopServices(); - metrics = herder.worker.metrics().metrics().metrics(); - assertFalse(metrics.containsKey(TestClientConfigOverridePolicy.metricName)); - } - @Test public void testReverseTransformConfigs() { // Construct a task config with constant values for TEST_KEY and TEST_KEY2 @@ -854,12 +794,12 @@ public void testReverseTransformConfigs() { } private void assertErrorForKey(ConfigInfos configInfos, String testKey) { - final List errorsForKey = configInfos.configs().stream() + final List errorsForKey = configInfos.values().stream() .map(ConfigInfo::configValue) .filter(configValue -> configValue.name().equals(testKey)) .map(ConfigValueInfo::errors) .flatMap(Collection::stream) - .toList(); + .collect(Collectors.toList()); assertEquals(1, errorsForKey.size()); } @@ -889,7 +829,7 @@ public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithNoErrors( addConfigKey(keys, "config.b2", "group B"); addConfigKey(keys, "config.c1", "group C"); - List groups = List.of("groupB", "group C"); + List groups = Arrays.asList("groupB", "group C"); List values = new ArrayList<>(); addValue(values, "config.a1", "value.a1"); addValue(values, "config.b1", "value.b1"); @@ -899,7 +839,7 @@ public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithNoErrors( ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); - assertEquals(values.size(), infos.configs().size()); + assertEquals(values.size(), infos.values().size()); assertEquals(0, infos.errorCount()); assertInfoKey(infos, "config.a1", null); assertInfoKey(infos, "config.b1", "group B"); @@ -920,7 +860,7 @@ public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithSomeError addConfigKey(keys, "config.b2", "group B"); addConfigKey(keys, "config.c1", "group C"); - List groups = List.of("groupB", "group C"); + List groups = Arrays.asList("groupB", "group C"); List values = new ArrayList<>(); addValue(values, "config.a1", "value.a1"); addValue(values, "config.b1", "value.b1"); @@ -930,7 +870,7 @@ public void testGenerateResultWithConfigValuesAllUsingConfigKeysAndWithSomeError ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); - assertEquals(values.size(), infos.configs().size()); + assertEquals(values.size(), infos.values().size()); assertEquals(1, infos.errorCount()); assertInfoKey(infos, "config.a1", null); assertInfoKey(infos, "config.b1", "group B"); @@ -951,7 +891,7 @@ public void testGenerateResultWithConfigValuesMoreThanConfigKeysAndWithSomeError addConfigKey(keys, "config.b2", "group B"); addConfigKey(keys, "config.c1", "group C"); - List groups = List.of("groupB", "group C"); + List groups = Arrays.asList("groupB", "group C"); List values = new ArrayList<>(); addValue(values, "config.a1", "value.a1"); addValue(values, "config.b1", "value.b1"); @@ -963,7 +903,7 @@ public void testGenerateResultWithConfigValuesMoreThanConfigKeysAndWithSomeError ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); - assertEquals(values.size(), infos.configs().size()); + assertEquals(values.size(), infos.values().size()); assertEquals(2, infos.errorCount()); assertInfoKey(infos, "config.a1", null); assertInfoKey(infos, "config.b1", "group B"); @@ -996,7 +936,7 @@ public void testGenerateResultWithConfigValuesWithNoConfigKeysAndWithSomeErrors( ConfigInfos infos = AbstractHerder.generateResult(name, keys, values, groups); assertEquals(name, infos.name()); assertEquals(groups, infos.groups()); - assertEquals(values.size(), infos.configs().size()); + assertEquals(values.size(), infos.values().size()); assertEquals(2, infos.errorCount()); assertNoInfoKey(infos, "config.a1"); assertNoInfoKey(infos, "config.b1"); @@ -1100,8 +1040,8 @@ private void testConnectorPluginConfig( ) throws ClassNotFoundException { AbstractHerder herder = testHerder(); - when(plugins.pluginClass(pluginName, null)).then(invocation -> newPluginInstance.get().getClass()); - when(plugins.newPlugin(anyString(), any())).then(invocation -> newPluginInstance.get()); + when(plugins.pluginClass(pluginName)).then(invocation -> newPluginInstance.get().getClass()); + when(plugins.newPlugin(anyString())).then(invocation -> newPluginInstance.get()); when(herder.plugins()).thenReturn(plugins); List configs = herder.connectorPluginConfig(pluginName); @@ -1120,7 +1060,7 @@ public void testGetConnectorConfigDefWithBadName() throws Exception { String connName = "AnotherPlugin"; AbstractHerder herder = testHerder(); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.pluginClass(anyString(), any())).thenThrow(new ClassNotFoundException()); + when(plugins.pluginClass(anyString())).thenThrow(new ClassNotFoundException()); assertThrows(NotFoundException.class, () -> herder.connectorPluginConfig(connName)); } @@ -1130,18 +1070,18 @@ public void testGetConnectorConfigDefWithInvalidPluginType() throws Exception { String connName = "AnotherPlugin"; AbstractHerder herder = testHerder(); when(worker.getPlugins()).thenReturn(plugins); - when(plugins.pluginClass(anyString(), any())).thenReturn((Class) Object.class); - when(plugins.newPlugin(anyString(), any())).thenReturn(new DirectoryConfigProvider()); + when(plugins.pluginClass(anyString())).thenReturn((Class) Object.class); + when(plugins.newPlugin(anyString())).thenReturn(new DirectoryConfigProvider()); assertThrows(BadRequestException.class, () -> herder.connectorPluginConfig(connName)); } @Test public void testGetConnectorTypeWithMissingPlugin() { String connName = "AnotherPlugin"; - when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString(), any())).thenThrow(new ConnectException("No class found")); AbstractHerder herder = testHerder(); - assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Map.of(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connName))); + when(worker.getPlugins()).thenReturn(plugins); + when(plugins.newConnector(anyString())).thenThrow(new ConnectException("No class found")); + assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Collections.singletonMap(ConnectorConfig.CONNECTOR_CLASS_CONFIG, connName))); } @Test @@ -1153,7 +1093,7 @@ public void testGetConnectorTypeWithNullConfig() { @Test public void testGetConnectorTypeWithEmptyConfig() { AbstractHerder herder = testHerder(); - assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Map.of())); + assertEquals(ConnectorType.UNKNOWN, herder.connectorType(Collections.emptyMap())); } @Test @@ -1168,9 +1108,9 @@ public void testConnectorOffsetsConnectorNotFound() { @Test public void testConnectorOffsets() throws Exception { - ConnectorOffsets offsets = new ConnectorOffsets(List.of( - new ConnectorOffset(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), - new ConnectorOffset(Map.of("partitionKey", "partitionValue2"), Map.of("offsetKey", "offsetValue")) + ConnectorOffsets offsets = new ConnectorOffsets(Arrays.asList( + new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), + new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")) )); @SuppressWarnings("unchecked") ArgumentCaptor> workerCallback = ArgumentCaptor.forClass(Callback.class); @@ -1201,7 +1141,7 @@ public void testTaskConfigComparison() { when(snapshot.taskCount(CONN1)).thenReturn(TASK_CONFIG.size()); List> alteredTaskConfigs = new ArrayList<>(TASK_CONFIGS); - alteredTaskConfigs.set(alteredTaskConfigs.size() - 1, Map.of()); + alteredTaskConfigs.set(alteredTaskConfigs.size() - 1, Collections.emptyMap()); // Last task config is different; should report a change assertTrue(AbstractHerder.taskConfigsChanged(snapshot, CONN1, alteredTaskConfigs)); @@ -1218,15 +1158,15 @@ public void testTaskConfigsChangedWhenAppliedConnectorConfigDiffers() { ClusterConfigState snapshotWithNoAppliedConfig = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(), - Set.of(), - Set.of() + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptySet(), + Collections.emptySet() ); assertTrue(AbstractHerder.taskConfigsChanged(snapshotWithNoAppliedConfig, CONN1, TASK_CONFIGS)); @@ -1236,15 +1176,15 @@ public void testTaskConfigsChangedWhenAppliedConnectorConfigDiffers() { ClusterConfigState snapshotWithDifferentAppliedConfig = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(appliedConfig)), - Set.of(), - Set.of() + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(appliedConfig)), + Collections.emptySet(), + Collections.emptySet() ); assertTrue(AbstractHerder.taskConfigsChanged(snapshotWithDifferentAppliedConfig, CONN1, TASK_CONFIGS)); } @@ -1252,12 +1192,12 @@ public void testTaskConfigsChangedWhenAppliedConnectorConfigDiffers() { protected void addConfigKey(Map keys, String name, String group) { ConfigDef configDef = new ConfigDef().define(name, ConfigDef.Type.STRING, null, null, ConfigDef.Importance.HIGH, "doc", group, 10, - ConfigDef.Width.MEDIUM, "display name", List.of(), null, null); + ConfigDef.Width.MEDIUM, "display name", Collections.emptyList(), null, null); keys.putAll(configDef.configKeys()); } protected void addValue(List values, String name, String value, String... errors) { - values.add(new ConfigValue(name, value, new ArrayList<>(), List.of(errors))); + values.add(new ConfigValue(name, value, new ArrayList<>(), Arrays.asList(errors))); } protected void assertInfoKey(ConfigInfos infos, String name, String group) { @@ -1275,11 +1215,11 @@ protected void assertInfoValue(ConfigInfos infos, String name, String value, Str ConfigValueInfo info = findInfo(infos, name).configValue(); assertEquals(name, info.name()); assertEquals(value, info.value()); - assertEquals(List.of(errors), info.errors()); + assertEquals(Arrays.asList(errors), info.errors()); } protected ConfigInfo findInfo(ConfigInfos infos, String name) { - return infos.configs() + return infos.values() .stream() .filter(i -> i.configValue().name().equals(name)) .findFirst() @@ -1291,7 +1231,7 @@ private void testConfigProviderRegex(String rawConnConfig) { } private void testConfigProviderRegex(String rawConnConfig, boolean expected) { - Set keys = keysWithVariableValues(Map.of("key", rawConnConfig), ConfigTransformer.DEFAULT_PATTERN); + Set keys = keysWithVariableValues(Collections.singletonMap("key", rawConnConfig), ConfigTransformer.DEFAULT_PATTERN); boolean actual = !keys.isEmpty() && keys.contains("key"); assertEquals(expected, actual, String.format("%s should have matched regex", rawConnConfig)); } @@ -1304,14 +1244,15 @@ private AbstractHerder createConfigValidationHerder(Class c private AbstractHerder createConfigValidationHerder(Class connectorClass, ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy, int countOfCallingNewConnector) { + + AbstractHerder herder = testHerder(connectorClientConfigOverridePolicy); + // Call to validateConnectorConfig when(worker.configTransformer()).thenReturn(transformer); @SuppressWarnings("unchecked") final ArgumentCaptor> mapArgumentCaptor = ArgumentCaptor.forClass(Map.class); when(transformer.transform(mapArgumentCaptor.capture())).thenAnswer(invocation -> mapArgumentCaptor.getValue()); when(worker.getPlugins()).thenReturn(plugins); - - AbstractHerder herder = testHerder(connectorClientConfigOverridePolicy); final Connector connector; try { connector = connectorClass.getConstructor().newInstance(); @@ -1329,24 +1270,19 @@ private AbstractHerder testHerder() { } private AbstractHerder testHerder(ConnectorClientConfigOverridePolicy connectorClientConfigOverridePolicy) { - ConnectMetrics connectMetrics = new MockConnectMetrics(); - when(worker.metrics()).thenReturn(connectMetrics); return mock(AbstractHerder.class, withSettings() .useConstructor(worker, workerId, kafkaClusterId, statusStore, configStore, connectorClientConfigOverridePolicy, Time.SYSTEM) .defaultAnswer(CALLS_REAL_METHODS)); } - @SuppressWarnings({"unchecked", "rawtypes"}) private void mockValidationIsolation(String connectorClass, Connector connector) { - when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); - when(worker.config()).thenReturn(workerConfig); - when(plugins.newConnector(anyString(), any())).thenReturn(connector); - when(plugins.pluginLoader(connectorClass, null)).thenReturn(classLoader); + when(plugins.newConnector(connectorClass)).thenReturn(connector); + when(plugins.connectorLoader(connectorClass)).thenReturn(classLoader); when(plugins.withClassLoader(classLoader)).thenReturn(loaderSwap); } private void verifyValidationIsolation() { - verify(plugins).newConnector(anyString(), any()); + verify(plugins).newConnector(anyString()); verify(plugins).withClassLoader(classLoader); verify(loaderSwap).close(); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java index d5b15dde76f22..f33e9bc514b6c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/AbstractWorkerSourceTaskTest.java @@ -29,26 +29,23 @@ import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.errors.ConnectException; import org.apache.kafka.connect.errors.RetriableException; import org.apache.kafka.connect.header.ConnectHeaders; -import org.apache.kafka.connect.integration.TestableSourceConnector; +import org.apache.kafka.connect.integration.MonitorableSourceConnector; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.ErrorReporter; import org.apache.kafka.connect.runtime.errors.ProcessingContext; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest; import org.apache.kafka.connect.runtime.isolation.Plugins; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceTask; import org.apache.kafka.connect.storage.CloseableOffsetStorageReader; -import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.ConnectorOffsetBackingStore; import org.apache.kafka.connect.storage.Converter; import org.apache.kafka.connect.storage.HeaderConverter; @@ -73,6 +70,8 @@ import java.nio.ByteBuffer; import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -83,7 +82,7 @@ import java.util.stream.Collectors; import java.util.stream.Stream; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -119,8 +118,8 @@ public class AbstractWorkerSourceTaskTest { private static final String TOPIC = "topic"; private static final String OTHER_TOPIC = "other-topic"; - private static final Map PARTITION = Map.of("key", "partition".getBytes()); - private static final Map OFFSET = Map.of("key", 12); + private static final Map PARTITION = Collections.singletonMap("key", "partition".getBytes()); + private static final Map OFFSET = Collections.singletonMap("key", 12); // Connect-format data private static final Schema KEY_SCHEMA = Schema.INT32_SCHEMA; @@ -132,7 +131,8 @@ public class AbstractWorkerSourceTaskTest { private static final byte[] SERIALIZED_KEY = "converted-key".getBytes(); private static final byte[] SERIALIZED_RECORD = "converted-record".getBytes(); - @Mock private SourceTask sourceTask; + @Mock + private SourceTask sourceTask; @Mock private TopicAdmin admin; @Mock private KafkaProducer producer; @Mock private Converter keyConverter; @@ -143,9 +143,8 @@ public class AbstractWorkerSourceTaskTest { @Mock private OffsetStorageWriter offsetWriter; @Mock private ConnectorOffsetBackingStore offsetStore; @Mock private StatusBackingStore statusBackingStore; - @Mock private WorkerTransactionContext workerTransactionContext; + @Mock private WorkerSourceTaskContext sourceTaskContext; @Mock private TaskStatus.Listener statusListener; - @Mock private ClusterConfigState configState; private final ConnectorTaskId taskId = new ConnectorTaskId("job", 0); private final ConnectorTaskId taskId1 = new ConnectorTaskId("job", 1); @@ -169,7 +168,6 @@ public void setup() { private Map workerProps() { Map props = new HashMap<>(); - props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -181,7 +179,7 @@ private Map sourceConnectorPropsWithGroups() { // setup up props for the source connector Map props = new HashMap<>(); props.put("name", "foo-connector"); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(1)); props.put(TOPIC_CONFIG, TOPIC); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -252,7 +250,7 @@ public void testSendRecordsConvertsData() { createWorkerTask(); // Can just use the same record for key and value - List records = List.of( + List records = Collections.singletonList( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD) ); @@ -281,7 +279,7 @@ public void testSendRecordsPropagatesTimestamp() { expectApplyTransformationChain(); expectTopicCreation(TOPIC); - workerTask.toSend = List.of( + workerTask.toSend = Collections.singletonList( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp) ); workerTask.sendRecords(); @@ -301,7 +299,7 @@ public void testSendRecordsCorruptTimestamp() { expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); expectApplyTransformationChain(); - workerTask.toSend = List.of( + workerTask.toSend = Collections.singletonList( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp) ); assertThrows(InvalidRecordException.class, workerTask::sendRecords); @@ -318,7 +316,7 @@ public void testSendRecordsNoTimestamp() { expectApplyTransformationChain(); expectTopicCreation(TOPIC); - workerTask.toSend = List.of( + workerTask.toSend = Collections.singletonList( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, timestamp) ); workerTask.sendRecords(); @@ -344,7 +342,7 @@ public void testHeaders() { expectApplyTransformationChain(); expectTopicCreation(TOPIC); - workerTask.toSend = List.of( + workerTask.toSend = Collections.singletonList( new SourceRecord(PARTITION, OFFSET, TOPIC, null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD, null, connectHeaders) ); @@ -366,7 +364,7 @@ public void testHeadersWithCustomConverter() throws Exception { SampleConverterWithHeaders testConverter = new SampleConverterWithHeaders(); createWorkerTask(stringConverter, testConverter, stringConverter, RetryWithToleranceOperatorTest.noneOperator(), - List::of, transformationChain); + Collections::emptyList, transformationChain); expectSendRecord(null); expectApplyTransformationChain(); @@ -382,7 +380,7 @@ public void testHeadersWithCustomConverter() throws Exception { org.apache.kafka.connect.header.Headers headersB = new ConnectHeaders() .addString("encoding", encodingB); - workerTask.toSend = List.of( + workerTask.toSend = Arrays.asList( new SourceRecord(PARTITION, OFFSET, "topic", null, Schema.STRING_SCHEMA, "a", Schema.STRING_SCHEMA, stringA, null, headersA), new SourceRecord(PARTITION, OFFSET, "topic", null, Schema.STRING_SCHEMA, "b", @@ -425,13 +423,13 @@ public void testTopicCreateWhenTopicExists() { expectPreliminaryCalls(TOPIC); - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, List.of(), List.of()); - TopicDescription topicDesc = new TopicDescription(TOPIC, false, List.of(topicPartitionInfo)); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of(TOPIC, topicDesc)); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); expectSendRecord(emptyHeaders()); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); workerTask.sendRecords(); verifySendRecord(2); @@ -451,11 +449,11 @@ public void testSendRecordsTopicDescribeRetries() { when(admin.describeTopics(TOPIC)) .thenThrow(new RetriableException(new TimeoutException("timeout"))) - .thenReturn(Map.of()); + .thenReturn(Collections.emptyMap()); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); workerTask.sendRecords(); - assertEquals(List.of(record1, record2), workerTask.toSend); + assertEquals(Arrays.asList(record1, record2), workerTask.toSend); verify(admin, never()).createOrFindTopics(any(NewTopic.class)); verifyNoMoreInteractions(admin); @@ -476,16 +474,16 @@ public void testSendRecordsTopicCreateRetries() { expectPreliminaryCalls(TOPIC); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))) // First call to create the topic times out .thenThrow(new RetriableException(new TimeoutException("timeout"))) // Next attempt succeeds .thenReturn(createdTopic(TOPIC)); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); workerTask.sendRecords(); - assertEquals(List.of(record1, record2), workerTask.toSend); + assertEquals(Arrays.asList(record1, record2), workerTask.toSend); // Next they all succeed workerTask.sendRecords(); @@ -508,9 +506,9 @@ public void testSendRecordsTopicDescribeRetriesMidway() { expectPreliminaryCalls(OTHER_TOPIC); when(admin.describeTopics(anyString())) - .thenReturn(Map.of()) + .thenReturn(Collections.emptyMap()) .thenThrow(new RetriableException(new TimeoutException("timeout"))) - .thenReturn(Map.of()); + .thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))).thenAnswer( (Answer) invocation -> { NewTopic newTopic = invocation.getArgument(0); @@ -518,9 +516,9 @@ public void testSendRecordsTopicDescribeRetriesMidway() { }); // Try to send 3, make first pass, second fail. Should save last record - workerTask.toSend = List.of(record1, record2, record3); + workerTask.toSend = Arrays.asList(record1, record2, record3); workerTask.sendRecords(); - assertEquals(List.of(record3), workerTask.toSend); + assertEquals(Collections.singletonList(record3), workerTask.toSend); // Next they all succeed workerTask.sendRecords(); @@ -531,10 +529,10 @@ public void testSendRecordsTopicDescribeRetriesMidway() { ArgumentCaptor newTopicCaptor = ArgumentCaptor.forClass(NewTopic.class); verify(admin, times(2)).createOrFindTopics(newTopicCaptor.capture()); - assertEquals(List.of(TOPIC, OTHER_TOPIC), newTopicCaptor.getAllValues() + assertEquals(Arrays.asList(TOPIC, OTHER_TOPIC), newTopicCaptor.getAllValues() .stream() .map(NewTopic::name) - .toList()); + .collect(Collectors.toList())); } @Test @@ -549,16 +547,16 @@ public void testSendRecordsTopicCreateRetriesMidway() { expectPreliminaryCalls(TOPIC); expectPreliminaryCalls(OTHER_TOPIC); - when(admin.describeTopics(anyString())).thenReturn(Map.of()); + when(admin.describeTopics(anyString())).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))) .thenReturn(createdTopic(TOPIC)) .thenThrow(new RetriableException(new TimeoutException("timeout"))) .thenReturn(createdTopic(OTHER_TOPIC)); // Try to send 3, make first pass, second fail. Should save last record - workerTask.toSend = List.of(record1, record2, record3); + workerTask.toSend = Arrays.asList(record1, record2, record3); workerTask.sendRecords(); - assertEquals(List.of(record3), workerTask.toSend); + assertEquals(Collections.singletonList(record3), workerTask.toSend); verifyTopicCreation(2, TOPIC, OTHER_TOPIC); // Second call to createOrFindTopics will throw // Next they all succeed @@ -580,7 +578,7 @@ public void testTopicDescribeFails() { new ConnectException(new TopicAuthorizationException("unauthorized")) ); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); assertThrows(ConnectException.class, workerTask::sendRecords); } @@ -592,12 +590,12 @@ public void testTopicCreateFails() { SourceRecord record2 = new SourceRecord(PARTITION, OFFSET, TOPIC, 2, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD); expectPreliminaryCalls(TOPIC); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))).thenThrow( new ConnectException(new TopicAuthorizationException("unauthorized")) ); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); assertThrows(ConnectException.class, workerTask::sendRecords); verify(admin).createOrFindTopics(any()); @@ -613,10 +611,10 @@ public void testTopicCreateFailsWithExceptionWhenCreateReturnsTopicNotCreatedOrF expectPreliminaryCalls(TOPIC); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(TopicAdmin.EMPTY_CREATION); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); assertThrows(ConnectException.class, workerTask::sendRecords); verify(admin).createOrFindTopics(any()); @@ -633,10 +631,10 @@ public void testTopicCreateSucceedsWhenCreateReturnsExistingTopicFound() { expectSendRecord(emptyHeaders()); expectApplyTransformationChain(); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(foundTopic(TOPIC)); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); workerTask.sendRecords(); ArgumentCaptor> sent = verifySendRecord(2); @@ -658,10 +656,10 @@ public void testTopicCreateSucceedsWhenCreateReturnsNewTopicFound() { expectSendRecord(emptyHeaders()); expectApplyTransformationChain(); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of()); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(createdTopic(TOPIC)); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); workerTask.sendRecords(); ArgumentCaptor> sent = verifySendRecord(2); @@ -687,13 +685,13 @@ public void testSendRecordsRetriableException() { when(transformationChain.apply(any(), eq(record2))).thenReturn(null); when(transformationChain.apply(any(), eq(record3))).thenReturn(record3); - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, List.of(), List.of()); - TopicDescription topicDesc = new TopicDescription(TOPIC, false, List.of(topicPartitionInfo)); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of(TOPIC, topicDesc)); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); when(producer.send(any(), any())).thenThrow(new RetriableException("Retriable exception")).thenReturn(null); - workerTask.toSend = List.of(record1, record2, record3); + workerTask.toSend = Arrays.asList(record1, record2, record3); // The first two records are filtered out / dropped by the transformation chain; only the third record will be attempted to be sent. // The producer throws a RetriableException the first time we try to send the third record @@ -720,11 +718,11 @@ public void testSendRecordsFailedTransformationErrorToleranceNone() { expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, List.of(), List.of()); - TopicDescription topicDesc = new TopicDescription(TOPIC, false, List.of(topicPartitionInfo)); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of(TOPIC, topicDesc)); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); - workerTask.toSend = List.of(record1); + workerTask.toSend = Arrays.asList(record1); // The transformation errored out so the error should be re-raised by sendRecords with error tolerance None Exception exception = assertThrows(ConnectException.class, workerTask::sendRecords); @@ -751,7 +749,10 @@ public void testSendRecordsFailedTransformationErrorToleranceAll() { expectConvertHeadersAndKeyValue(emptyHeaders(), TOPIC); - workerTask.toSend = List.of(record1); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); + + workerTask.toSend = Arrays.asList(record1); // The transformation errored out so the error should be ignored & the record skipped with error tolerance all assertTrue(workerTask.sendRecords()); @@ -777,11 +778,11 @@ public void testSendRecordsConversionExceptionErrorToleranceNone() { // When we try to convert the key/value of each record, throw an exception throwExceptionWhenConvertKey(emptyHeaders(), TOPIC); - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, List.of(), List.of()); - TopicDescription topicDesc = new TopicDescription(TOPIC, false, List.of(topicPartitionInfo)); - when(admin.describeTopics(TOPIC)).thenReturn(Map.of(TOPIC, topicDesc)); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, null, Collections.emptyList(), Collections.emptyList()); + TopicDescription topicDesc = new TopicDescription(TOPIC, false, Collections.singletonList(topicPartitionInfo)); + when(admin.describeTopics(TOPIC)).thenReturn(Collections.singletonMap(TOPIC, topicDesc)); - workerTask.toSend = List.of(record1, record2, record3); + workerTask.toSend = Arrays.asList(record1, record2, record3); // Send records should fail when errors.tolerance is none and the conversion call fails Exception exception = assertThrows(ConnectException.class, workerTask::sendRecords); @@ -812,9 +813,9 @@ public void testSendRecordsConversionExceptionErrorToleranceAll() { // When we try to convert the key/value of each record, throw an exception throwExceptionWhenConvertKey(emptyHeaders(), TOPIC); - workerTask.toSend = List.of(record1, record2, record3); + workerTask.toSend = Arrays.asList(record1, record2, record3); - // With errors.tolerance to all, the failed conversion should simply skip the record, and record successful batch + // With errors.tolerance to all, the faiiled conversion should simply skip the record, and record successful batch assertTrue(workerTask.sendRecords()); } @@ -882,15 +883,15 @@ private void verifyTopicCreation(int times, String... topics) { @SuppressWarnings("SameParameterValue") private TopicAdmin.TopicCreationResponse createdTopic(String topic) { - Set created = Set.of(topic); - Set existing = Set.of(); + Set created = Collections.singleton(topic); + Set existing = Collections.emptySet(); return new TopicAdmin.TopicCreationResponse(created, existing); } @SuppressWarnings("SameParameterValue") private TopicAdmin.TopicCreationResponse foundTopic(String topic) { - Set created = Set.of(); - Set existing = Set.of(topic); + Set created = Collections.emptySet(); + Set existing = Collections.singleton(topic); return new TopicAdmin.TopicCreationResponse(created, existing); } @@ -914,8 +915,8 @@ private void expectConvertHeadersAndKeyValue(Headers headers, String topic) { when(valueConverter.fromConnectData(eq(topic), any(Headers.class), eq(RECORD_SCHEMA), eq(RECORD))) .thenReturn(SERIALIZED_RECORD); - assertEquals(SERIALIZED_KEY, keyConverter.fromConnectData(topic, headers, KEY_SCHEMA, KEY)); - assertEquals(SERIALIZED_RECORD, valueConverter.fromConnectData(topic, headers, RECORD_SCHEMA, RECORD)); + assertEquals(keyConverter.fromConnectData(topic, headers, KEY_SCHEMA, KEY), SERIALIZED_KEY); + assertEquals(valueConverter.fromConnectData(topic, headers, RECORD_SCHEMA, RECORD), SERIALIZED_RECORD); } private void throwExceptionWhenConvertKey(Headers headers, String topic) { @@ -944,26 +945,23 @@ private RecordHeaders emptyHeaders() { } private void createWorkerTask(TransformationChain transformationChain, RetryWithToleranceOperator toleranceOperator) { - createWorkerTask(keyConverter, valueConverter, headerConverter, toleranceOperator, List::of, + createWorkerTask(keyConverter, valueConverter, headerConverter, toleranceOperator, Collections::emptyList, transformationChain); } private void createWorkerTask() { createWorkerTask( - keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), List::of, transformationChain); + keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), Collections::emptyList, transformationChain); } private void createWorkerTask(Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, RetryWithToleranceOperator retryWithToleranceOperator, Supplier>> errorReportersSupplier, - TransformationChain transformationChain) { - Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); - Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); - Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); + TransformationChain transformationChain) { workerTask = new AbstractWorkerSourceTask( - taskId, sourceTask, statusListener, TargetState.STARTED, configState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, transformationChain, - workerTransactionContext, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, + taskId, sourceTask, statusListener, TargetState.STARTED, keyConverter, valueConverter, headerConverter, transformationChain, + sourceTaskContext, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, metrics, errorHandlingMetrics, plugins.delegatingLoader(), Time.SYSTEM, retryWithToleranceOperator, - statusBackingStore, Runnable::run, errorReportersSupplier, null, TestPlugins.noOpLoaderSwap()) { + statusBackingStore, Runnable::run, errorReportersSupplier) { @Override protected void prepareToInitializeTask() { } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java index 8ba0316e20c0d..58924d79ecf65 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectMetricsTest.java @@ -18,36 +18,19 @@ import org.apache.kafka.clients.CommonClientConfigs; import org.apache.kafka.common.MetricName; -import org.apache.kafka.common.config.ConfigDef; -import org.apache.kafka.common.metrics.KafkaMetric; -import org.apache.kafka.common.metrics.Measurable; -import org.apache.kafka.common.metrics.Monitorable; -import org.apache.kafka.common.metrics.PluginMetrics; import org.apache.kafka.common.metrics.Sensor; -import org.apache.kafka.common.metrics.internals.PluginMetricsImpl; import org.apache.kafka.common.metrics.stats.Avg; import org.apache.kafka.common.metrics.stats.Max; import org.apache.kafka.common.utils.MockTime; -import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup; import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroupId; -import org.apache.kafka.connect.source.SourceRecord; -import org.apache.kafka.connect.storage.Converter; -import org.apache.kafka.connect.storage.HeaderConverter; -import org.apache.kafka.connect.transforms.Transformation; -import org.apache.kafka.connect.transforms.predicates.Predicate; -import org.apache.kafka.connect.util.ConnectorTaskId; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.ValueSource; -import java.io.IOException; +import java.util.Collections; import java.util.HashMap; -import java.util.LinkedHashMap; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -60,19 +43,15 @@ public class ConnectMetricsTest { - private static final Map DEFAULT_WORKER_CONFIG = Map.of( - WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter", - WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter", - WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092" - ); - private static final ConnectorTaskId CONNECTOR_TASK_ID = new ConnectorTaskId("connector", 0); - private static final LinkedHashMap TAGS = new LinkedHashMap<>(); - private ConnectMetrics metrics; + private static final Map DEFAULT_WORKER_CONFIG = new HashMap<>(); static { - TAGS.put("t1", "v1"); + DEFAULT_WORKER_CONFIG.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); + DEFAULT_WORKER_CONFIG.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); } + private ConnectMetrics metrics; + @BeforeEach public void setUp() { metrics = new ConnectMetrics("worker1", new WorkerConfig(WorkerConfig.baseConfigDef(), DEFAULT_WORKER_CONFIG), new MockTime(), "cluster-1"); @@ -192,221 +171,6 @@ public void testExplicitlyEnableJmxReporter() { cm.stop(); } - @Test - public void testConnectorPluginMetrics() throws Exception { - try (PluginMetricsImpl pluginMetrics = metrics.connectorPluginMetrics(CONNECTOR_TASK_ID.connector())) { - MetricName metricName = pluginMetrics.metricName("name", "description", TAGS); - Map expectedTags = new LinkedHashMap<>(); - expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); - expectedTags.putAll(TAGS); - assertEquals(expectedTags, metricName.tags()); - } - } - - @Test - public void testTaskPluginMetrics() throws Exception { - try (PluginMetricsImpl pluginMetrics = metrics.taskPluginMetrics(CONNECTOR_TASK_ID)) { - MetricName metricName = pluginMetrics.metricName("name", "description", TAGS); - Map expectedTags = new LinkedHashMap<>(); - expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); - expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); - expectedTags.putAll(TAGS); - assertEquals(expectedTags, metricName.tags()); - } - } - - static final class MonitorableConverter implements Converter, HeaderConverter, Monitorable { - - private int calls = 0; - private PluginMetrics pluginMetrics = null; - private MetricName metricName = null; - - @Override - public void withPluginMetrics(PluginMetrics pluginMetrics) { - this.pluginMetrics = pluginMetrics; - metricName = pluginMetrics.metricName("name", "description", TAGS); - pluginMetrics.addMetric(metricName, (Measurable) (config, now) -> calls); - } - - @Override - public void configure(Map configs, boolean isKey) { } - - @Override - public byte[] fromConnectData(String topic, Schema schema, Object value) { - calls++; - return new byte[0]; - } - - @Override - public SchemaAndValue toConnectData(String topic, byte[] value) { - calls++; - return null; - } - - @Override - public ConfigDef config() { - return Converter.super.config(); - } - - @Override - public void configure(Map configs) { } - - @Override - public SchemaAndValue toConnectHeader(String topic, String headerKey, byte[] value) { - calls++; - return null; - } - - @Override - public byte[] fromConnectHeader(String topic, String headerKey, Schema schema, Object value) { - calls++; - return new byte[0]; - } - } - - @ParameterizedTest - @ValueSource(booleans = {true, false}) - public void testWrapConverter(boolean isKey) throws IOException { - try (MonitorableConverter converter = new MonitorableConverter()) { - metrics.wrap(converter, CONNECTOR_TASK_ID, isKey); - assertNotNull(converter.pluginMetrics); - MetricName metricName = converter.metricName; - Map expectedTags = new LinkedHashMap<>(); - expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); - expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); - expectedTags.put("converter", isKey ? "key" : "value"); - expectedTags.putAll(TAGS); - assertEquals(expectedTags, metricName.tags()); - KafkaMetric metric = metrics.metrics().metrics().get(metricName); - assertEquals(0.0, (double) metric.metricValue()); - converter.toConnectData("topic", new byte[]{}); - assertEquals(1.0, (double) metric.metricValue()); - converter.fromConnectData("topic", null, null); - assertEquals(2.0, (double) metric.metricValue()); - } - } - - @Test - public void testWrapHeaderConverter() throws IOException { - try (MonitorableConverter converter = new MonitorableConverter()) { - metrics.wrap(converter, CONNECTOR_TASK_ID); - assertNotNull(converter.pluginMetrics); - MetricName metricName = converter.metricName; - Map expectedTags = new LinkedHashMap<>(); - expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); - expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); - expectedTags.put("converter", "header"); - expectedTags.putAll(TAGS); - assertEquals(expectedTags, metricName.tags()); - KafkaMetric metric = metrics.metrics().metrics().get(metricName); - assertEquals(0.0, (double) metric.metricValue()); - converter.toConnectHeader("topic", "header", new byte[]{}); - assertEquals(1.0, (double) metric.metricValue()); - converter.fromConnectHeader("topic", "header", null, null); - assertEquals(2.0, (double) metric.metricValue()); - } - } - - static final class MonitorableTransformation implements Transformation, Monitorable { - - private int calls = 0; - private PluginMetrics pluginMetrics = null; - private MetricName metricName = null; - - @Override - public void withPluginMetrics(PluginMetrics pluginMetrics) { - this.pluginMetrics = pluginMetrics; - metricName = pluginMetrics.metricName("name", "description", TAGS); - pluginMetrics.addMetric(metricName, (Measurable) (config, now) -> calls); - } - - @Override - public void configure(Map configs) { } - - @Override - public SourceRecord apply(SourceRecord record) { - calls++; - return null; - } - - @Override - public ConfigDef config() { - return null; - } - - @Override - public void close() { } - } - - @Test - public void testWrapTransformation() { - try (MonitorableTransformation transformation = new MonitorableTransformation()) { - metrics.wrap(transformation, CONNECTOR_TASK_ID, "alias"); - assertNotNull(transformation.pluginMetrics); - MetricName metricName = transformation.metricName; - Map expectedTags = new LinkedHashMap<>(); - expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); - expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); - expectedTags.put("transformation", "alias"); - expectedTags.putAll(TAGS); - assertEquals(expectedTags, metricName.tags()); - KafkaMetric metric = metrics.metrics().metrics().get(metricName); - assertEquals(0.0, (double) metric.metricValue()); - transformation.apply(null); - assertEquals(1.0, (double) metric.metricValue()); - } - } - - static final class MonitorablePredicate implements Predicate, Monitorable { - - private int calls = 0; - private PluginMetrics pluginMetrics = null; - private MetricName metricName = null; - - @Override - public void withPluginMetrics(PluginMetrics pluginMetrics) { - this.pluginMetrics = pluginMetrics; - metricName = pluginMetrics.metricName("name", "description", TAGS); - pluginMetrics.addMetric(metricName, (Measurable) (config, now) -> calls); - } - - @Override - public void configure(Map configs) { } - - @Override - public ConfigDef config() { - return null; - } - - @Override - public boolean test(SourceRecord record) { - calls++; - return false; - } - - @Override - public void close() { } - } - - @Test - public void testWrapPredicate() { - try (MonitorablePredicate predicate = new MonitorablePredicate()) { - metrics.wrap(predicate, CONNECTOR_TASK_ID, "alias"); - assertNotNull(predicate.pluginMetrics); - MetricName metricName = predicate.metricName; - Map expectedTags = new LinkedHashMap<>(); - expectedTags.put("connector", CONNECTOR_TASK_ID.connector()); - expectedTags.put("task", String.valueOf(CONNECTOR_TASK_ID.task())); - expectedTags.put("predicate", "alias"); - expectedTags.putAll(TAGS); - assertEquals(expectedTags, metricName.tags()); - KafkaMetric metric = metrics.metrics().metrics().get(metricName); - assertEquals(0.0, (double) metric.metricValue()); - predicate.test(null); - assertEquals(1.0, (double) metric.metricValue()); - } - } - private Sensor addToGroup(ConnectMetrics connectMetrics, boolean shouldClose) { ConnectMetricsRegistry registry = connectMetrics.registry(); ConnectMetrics.MetricGroup metricGroup = connectMetrics.group(registry.taskGroupName(), @@ -424,6 +188,6 @@ private Sensor addToGroup(ConnectMetrics connectMetrics, boolean shouldClose) { } static MetricName metricName(String name) { - return new MetricName(name, "test_group", "metrics for testing", Map.of()); + return new MetricName(name, "test_group", "metrics for testing", Collections.emptyMap()); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java index 65b378921434d..6092f8ca7bdc7 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ConnectorConfigTest.java @@ -26,10 +26,10 @@ import org.apache.kafka.connect.sink.SinkRecord; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; -import org.apache.kafka.connect.util.ConnectorTaskId; import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -41,18 +41,13 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.junit.jupiter.api.Assertions.fail; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class ConnectorConfigTest> { - private static final ConnectMetrics METRICS = new MockConnectMetrics(); - private static final ConnectorTaskId CONNECTOR_TASK_ID = new ConnectorTaskId("test", 0); - public static final Plugins MOCK_PLUGINS = new Plugins(new HashMap<>()) { @Override public Set>> transformations() { - return Set.of(); + return Collections.emptySet(); } }; @@ -162,7 +157,7 @@ public void singleTransform() { props.put("transforms.a.type", SimpleTransformation.class.getName()); props.put("transforms.a.magic.number", "42"); final ConnectorConfig config = new ConnectorConfig(MOCK_PLUGINS, props); - final List> transformationStages = config.transformationStages(MOCK_PLUGINS, CONNECTOR_TASK_ID, METRICS); + final List> transformationStages = config.transformationStages(); assertEquals(1, transformationStages.size()); final TransformationStage stage = transformationStages.get(0); assertEquals(SimpleTransformation.class, stage.transformClass()); @@ -191,7 +186,7 @@ public void multipleTransforms() { props.put("transforms.b.type", SimpleTransformation.class.getName()); props.put("transforms.b.magic.number", "84"); final ConnectorConfig config = new ConnectorConfig(MOCK_PLUGINS, props); - final List> transformationStages = config.transformationStages(MOCK_PLUGINS, CONNECTOR_TASK_ID, METRICS); + final List> transformationStages = config.transformationStages(); assertEquals(2, transformationStages.size()); assertEquals(42, transformationStages.get(0).apply(DUMMY_RECORD).kafkaPartition().intValue()); assertEquals(84, transformationStages.get(1).apply(DUMMY_RECORD).kafkaPartition().intValue()); @@ -292,7 +287,7 @@ public void abstractPredicate() { private void assertTransformationStageWithPredicate(Map props, boolean expectedNegated) { final ConnectorConfig config = new ConnectorConfig(MOCK_PLUGINS, props); - final List> transformationStages = config.transformationStages(MOCK_PLUGINS, CONNECTOR_TASK_ID, METRICS); + final List> transformationStages = config.transformationStages(); assertEquals(1, transformationStages.size()); TransformationStage stage = transformationStages.get(0); @@ -460,19 +455,13 @@ public static class Value> extends AbstractKeyValueTr } @Test - @SuppressWarnings("rawtypes") - public void testEnrichedConfigDef() throws ClassNotFoundException { + public void testEnrichedConfigDef() { String alias = "hdt"; String prefix = ConnectorConfig.TRANSFORMS_CONFIG + "." + alias + "."; Map props = new HashMap<>(); props.put(ConnectorConfig.TRANSFORMS_CONFIG, alias); - props.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, TestConnector.class.getName()); props.put(prefix + "type", HasDuplicateConfigTransformation.class.getName()); - Plugins mockPlugins = mock(Plugins.class); - when(mockPlugins.newPlugin(HasDuplicateConfigTransformation.class.getName(), - null, (ClassLoader) null)).thenReturn(new HasDuplicateConfigTransformation()); - when(mockPlugins.transformations()).thenReturn(Set.of()); - ConfigDef def = ConnectorConfig.enrich(mockPlugins, new ConfigDef(), props, false); + ConfigDef def = ConnectorConfig.enrich(MOCK_PLUGINS, new ConfigDef(), props, false); assertEnrichedConfigDef(def, prefix, HasDuplicateConfigTransformation.MUST_EXIST_KEY, ConfigDef.Type.BOOLEAN); assertEnrichedConfigDef(def, prefix, TransformationStage.PREDICATE_CONFIG, ConfigDef.Type.STRING); assertEnrichedConfigDef(def, prefix, TransformationStage.NEGATE_CONFIG, ConfigDef.Type.BOOLEAN); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java index a8e001544b35c..f4374d18500ea 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ErrorHandlingTaskTest.java @@ -25,7 +25,6 @@ import org.apache.kafka.clients.producer.KafkaProducer; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.config.ConfigDef; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.components.Versioned; @@ -34,7 +33,7 @@ import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.errors.RetriableException; -import org.apache.kafka.connect.integration.TestableSourceConnector; +import org.apache.kafka.connect.integration.MonitorableSourceConnector; import org.apache.kafka.connect.json.JsonConverter; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.ErrorReporter; @@ -44,7 +43,6 @@ import org.apache.kafka.connect.runtime.errors.WorkerErrantRecordReporter; import org.apache.kafka.connect.runtime.isolation.PluginClassLoader; import org.apache.kafka.connect.runtime.isolation.Plugins; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.sink.SinkConnector; import org.apache.kafka.connect.sink.SinkRecord; @@ -76,13 +74,16 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Set; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonList; import static org.apache.kafka.common.utils.Time.SYSTEM; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -184,7 +185,6 @@ public void setup(boolean enableTopicCreation) { workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); - workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put(TOPIC_CREATION_ENABLE_CONFIG, String.valueOf(enableTopicCreation)); workerConfig = new StandaloneConfig(workerProps); sourceConfig = new SourceConnectorConfig(plugins, sourceConnectorProps(TOPIC), true); @@ -195,7 +195,7 @@ private Map sourceConnectorProps(String topic) { // setup up props for the source connector Map props = new HashMap<>(); props.put("name", "foo-connector"); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(1)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -224,7 +224,7 @@ public void testErrorHandlingInSinkTasks(boolean enableTopicCreation) { LogReporter> reporter = new LogReporter.Sink(taskId, connConfig(reportProps), errorHandlingMetrics); RetryWithToleranceOperator> retryWithToleranceOperator = operator(); - createSinkTask(initialState, retryWithToleranceOperator, List.of(reporter)); + createSinkTask(initialState, retryWithToleranceOperator, singletonList(reporter)); // valid json ConsumerRecord record1 = new ConsumerRecord<>( @@ -276,14 +276,14 @@ public void testErrorHandlingInSourceTasks(boolean enableTopicCreation) throws E LogReporter reporter = new LogReporter.Source(taskId, connConfig(reportProps), errorHandlingMetrics); RetryWithToleranceOperator retryWithToleranceOperator = operator(); - createSourceTask(initialState, retryWithToleranceOperator, List.of(reporter)); + createSourceTask(initialState, retryWithToleranceOperator, singletonList(reporter)); // valid json Schema valSchema = SchemaBuilder.struct().field("val", Schema.INT32_SCHEMA).build(); Struct struct1 = new Struct(valSchema).put("val", 1234); - SourceRecord record1 = new SourceRecord(Map.of(), Map.of(), TOPIC, PARTITION1, valSchema, struct1); + SourceRecord record1 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct1); Struct struct2 = new Struct(valSchema).put("val", 6789); - SourceRecord record2 = new SourceRecord(Map.of(), Map.of(), TOPIC, PARTITION1, valSchema, struct2); + SourceRecord record2 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct2); when(workerSourceTask.isStopping()) .thenReturn(false) @@ -293,8 +293,8 @@ public void testErrorHandlingInSourceTasks(boolean enableTopicCreation) throws E doReturn(true).when(workerSourceTask).commitOffsets(); when(sourceTask.poll()) - .thenReturn(List.of(record1)) - .thenReturn(List.of(record2)); + .thenReturn(singletonList(record1)) + .thenReturn(singletonList(record2)); expectTopicCreation(TOPIC); @@ -338,14 +338,14 @@ public void testErrorHandlingInSourceTasksWithBadConverter(boolean enableTopicCr LogReporter reporter = new LogReporter.Source(taskId, connConfig(reportProps), errorHandlingMetrics); RetryWithToleranceOperator retryWithToleranceOperator = operator(); - createSourceTask(initialState, retryWithToleranceOperator, List.of(reporter), badConverter()); + createSourceTask(initialState, retryWithToleranceOperator, singletonList(reporter), badConverter()); // valid json Schema valSchema = SchemaBuilder.struct().field("val", Schema.INT32_SCHEMA).build(); Struct struct1 = new Struct(valSchema).put("val", 1234); - SourceRecord record1 = new SourceRecord(Map.of(), Map.of(), TOPIC, PARTITION1, valSchema, struct1); + SourceRecord record1 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct1); Struct struct2 = new Struct(valSchema).put("val", 6789); - SourceRecord record2 = new SourceRecord(Map.of(), Map.of(), TOPIC, PARTITION1, valSchema, struct2); + SourceRecord record2 = new SourceRecord(emptyMap(), emptyMap(), TOPIC, PARTITION1, valSchema, struct2); when(workerSourceTask.isStopping()) .thenReturn(false) @@ -355,8 +355,8 @@ public void testErrorHandlingInSourceTasksWithBadConverter(boolean enableTopicCr doReturn(true).when(workerSourceTask).commitOffsets(); when(sourceTask.poll()) - .thenReturn(List.of(record1)) - .thenReturn(List.of(record2)); + .thenReturn(singletonList(record1)) + .thenReturn(singletonList(record2)); expectTopicCreation(TOPIC); workerSourceTask.initialize(TASK_CONFIG); workerSourceTask.initializeAndStart(); @@ -390,7 +390,7 @@ private void assertSinkMetricValue(String name, double expected) { private void verifyInitializeSink() { verify(sinkTask).start(TASK_PROPS); verify(sinkTask).initialize(any(WorkerSinkTaskContext.class)); - verify(consumer).subscribe(eq(List.of(TOPIC)), + verify(consumer).subscribe(eq(singletonList(TOPIC)), any(ConsumerRebalanceListener.class)); } @@ -408,9 +408,9 @@ private void assertErrorHandlingMetricValue(String name, double expected) { private void expectTopicCreation(String topic) { if (enableTopicCreation) { - when(admin.describeTopics(topic)).thenReturn(Map.of()); - Set created = Set.of(topic); - Set existing = Set.of(); + when(admin.describeTopics(topic)).thenReturn(Collections.emptyMap()); + Set created = Collections.singleton(topic); + Set existing = Collections.emptySet(); TopicAdmin.TopicCreationResponse response = new TopicAdmin.TopicCreationResponse(created, existing); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(response); } @@ -424,19 +424,15 @@ private void createSinkTask(TargetState initialState, RetryWithToleranceOperator oo.put("schemas.enable", "false"); converter.configure(oo); - Plugin> transformationPlugin = metrics.wrap(new FaultyPassthrough(), taskId, "test"); TransformationChain, SinkRecord> sinkTransforms = - new TransformationChain<>(List.of(new TransformationStage<>(transformationPlugin, "test", null, TestPlugins.noOpLoaderSwap())), retryWithToleranceOperator); + new TransformationChain<>(singletonList(new TransformationStage<>(new FaultyPassthrough())), retryWithToleranceOperator); - Plugin keyConverterPlugin = metrics.wrap(converter, taskId, true); - Plugin valueConverterPlugin = metrics.wrap(converter, taskId, false); - Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); workerSinkTask = new WorkerSinkTask( taskId, sinkTask, statusListener, initialState, workerConfig, - ClusterConfigState.EMPTY, metrics, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, - headerConverterPlugin, sinkTransforms, consumer, pluginLoader, time, + ClusterConfigState.EMPTY, metrics, converter, converter, errorHandlingMetrics, + headerConverter, sinkTransforms, consumer, pluginLoader, time, retryWithToleranceOperator, workerErrantRecordReporter, - statusBackingStore, () -> errorReporters, null, TestPlugins.noOpLoaderSwap()); + statusBackingStore, () -> errorReporters); } private void createSourceTask(TargetState initialState, RetryWithToleranceOperator retryWithToleranceOperator, List> errorReporters) { @@ -460,22 +456,18 @@ private Converter badConverter() { private void createSourceTask(TargetState initialState, RetryWithToleranceOperator retryWithToleranceOperator, List> errorReporters, Converter converter) { - Plugin> transformationPlugin = metrics.wrap(new FaultyPassthrough(), taskId, "test"); - TransformationChain sourceTransforms = new TransformationChain<>(List.of( - new TransformationStage<>(transformationPlugin, "test", null, TestPlugins.noOpLoaderSwap())), retryWithToleranceOperator); + TransformationChain sourceTransforms = new TransformationChain<>(singletonList( + new TransformationStage<>(new FaultyPassthrough())), retryWithToleranceOperator); - Plugin keyConverterPlugin = metrics.wrap(converter, taskId, true); - Plugin valueConverterPlugin = metrics.wrap(converter, taskId, false); - Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); workerSourceTask = spy(new WorkerSourceTask( - taskId, sourceTask, statusListener, initialState, keyConverterPlugin, - valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, + taskId, sourceTask, statusListener, initialState, converter, + converter, errorHandlingMetrics, headerConverter, sourceTransforms, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, workerConfig, ClusterConfigState.EMPTY, metrics, pluginLoader, time, retryWithToleranceOperator, - statusBackingStore, Runnable::run, () -> errorReporters, null, TestPlugins.noOpLoaderSwap())); + statusBackingStore, Runnable::run, () -> errorReporters)); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java index 4067f5aa59d72..4ee0f61572cdd 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/ExactlyOnceWorkerSourceTaskTest.java @@ -26,17 +26,15 @@ import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.integration.TestableSourceConnector; +import org.apache.kafka.connect.integration.MonitorableSourceConnector; import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest; import org.apache.kafka.connect.runtime.isolation.Plugins; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceTask; @@ -68,6 +66,8 @@ import org.mockito.verification.VerificationMode; import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -82,7 +82,8 @@ import java.util.function.Consumer; import java.util.stream.Collectors; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static java.util.Collections.emptySet; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -116,7 +117,7 @@ @MockitoSettings(strictness = Strictness.WARN) public class ExactlyOnceWorkerSourceTaskTest { private static final String TOPIC = "topic"; - private static final Map PARTITION = Map.of("key", "partition".getBytes()); + private static final Map PARTITION = Collections.singletonMap("key", "partition".getBytes()); private static final Map OFFSET = offset(12); // Connect-format data @@ -166,7 +167,7 @@ public class ExactlyOnceWorkerSourceTaskTest { private static final SourceRecord SOURCE_RECORD_2 = new SourceRecord(PARTITION, OFFSET, TOPIC, null, KEY_SCHEMA, KEY, RECORD_SCHEMA, VALUE_2); - private static final List RECORDS = List.of(SOURCE_RECORD_1, SOURCE_RECORD_2); + private static final List RECORDS = Arrays.asList(SOURCE_RECORD_1, SOURCE_RECORD_2); private final AtomicReference pollLatch = new AtomicReference<>(new CountDownLatch(0)); private final AtomicReference> pollRecords = new AtomicReference<>(RECORDS); @@ -195,7 +196,6 @@ public void setup(boolean enableTopicCreation) throws Exception { Thread.sleep(10); return result; }); - when(sourceTask.version()).thenReturn(null); } @AfterEach @@ -220,8 +220,8 @@ public void teardown() throws Exception { } verify(statusBackingStore, MockitoUtils.anyTimes()).getTopic(any(), any()); + verify(offsetStore, MockitoUtils.anyTimes()).primaryOffsetsTopic(); - verify(sourceTask).version(); verifyNoMoreInteractions(statusListener, producer, sourceTask, admin, offsetWriter, statusBackingStore, offsetStore, preProducerCheck, postProducerCheck); if (metrics != null) metrics.stop(); @@ -229,7 +229,6 @@ public void teardown() throws Exception { private Map workerProps() { Map props = new HashMap<>(); - props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("internal.key.converter", "org.apache.kafka.connect.json.JsonConverter"); @@ -249,7 +248,7 @@ private Map sourceConnectorProps(SourceTask.TransactionBoundary // setup up props for the source connector Map props = new HashMap<>(); props.put("name", "foo-connector"); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(1)); props.put(TOPIC_CONFIG, TOPIC); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -265,7 +264,7 @@ private Map sourceConnectorProps(SourceTask.TransactionBoundary } private static Map offset(int n) { - return Map.of("key", n); + return Collections.singletonMap("key", n); } private void createWorkerTask() { @@ -277,13 +276,10 @@ private void createWorkerTask(TargetState initialState) { } private void createWorkerTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter) { - Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); - Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); - Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); - workerTask = new ExactlyOnceWorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, headerConverterPlugin, + workerTask = new ExactlyOnceWorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverter, valueConverter, headerConverter, transformationChain, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, clusterConfigState, metrics, errorHandlingMetrics, plugins.delegatingLoader(), time, RetryWithToleranceOperatorTest.noneOperator(), statusBackingStore, - sourceConfig, Runnable::run, preProducerCheck, postProducerCheck, List::of, null, TestPlugins.noOpLoaderSwap()); + sourceConfig, Runnable::run, preProducerCheck, postProducerCheck, Collections::emptyList); } @ParameterizedTest @@ -294,7 +290,7 @@ public void testRemoveMetrics(boolean enableTopicCreation) throws Exception { workerTask.removeMetrics(); - assertEquals(Set.of(), filterToTaskMetrics(metrics.metrics().metrics().keySet())); + assertEquals(emptySet(), filterToTaskMetrics(metrics.metrics().metrics().keySet())); } private Set filterToTaskMetrics(Set metricNames) { @@ -560,7 +556,7 @@ public void testPollReturnsNoRecords(boolean enableTopicCreation) throws Excepti createWorkerTask(); // Make sure the task returns empty batches from poll before we start polling it - pollRecords.set(List.of()); + pollRecords.set(Collections.emptyList()); when(offsetWriter.beginFlush()).thenReturn(false); @@ -636,7 +632,7 @@ public void testIntervalBasedCommit(boolean enableTopicCreation) throws Exceptio time.sleep(commitInterval * 2); awaitPolls(2); - assertEquals(2, flushCount(), + assertEquals(2, flushCount(), "Two flushes should have taken place after offset commit interval has elapsed again"); awaitShutdown(); @@ -954,7 +950,7 @@ public void testSendRecordsRetries(boolean enableTopicCreation) throws Exception expectConvertHeadersAndKeyValue(); // We're trying to send three records - workerTask.toSend = List.of(record1, record2, record3); + workerTask.toSend = Arrays.asList(record1, record2, record3); OngoingStubbing> producerSend = when(producer.send(any(), any())); // The first one is sent successfully producerSend = expectSuccessfulSend(producerSend); @@ -964,7 +960,7 @@ public void testSendRecordsRetries(boolean enableTopicCreation) throws Exception expectSuccessfulSend(producerSend); assertFalse(workerTask.sendRecords()); - assertEquals(List.of(record2, record3), workerTask.toSend); + assertEquals(Arrays.asList(record2, record3), workerTask.toSend); verify(producer).beginTransaction(); // When using poll-based transaction boundaries, we do not commit transactions while retrying delivery for a batch verify(producer, never()).commitTransaction(); @@ -999,7 +995,7 @@ public void testSendRecordsProducerSendFailsImmediately(boolean enableTopicCreat when(producer.send(any(), any())) .thenThrow(new KafkaException("Producer closed while send in progress", new InvalidTopicException(TOPIC))); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); assertThrows(ConnectException.class, workerTask::sendRecords); verify(producer).beginTransaction(); @@ -1078,7 +1074,7 @@ private void awaitPolls(int minimum, List records) { } private void awaitEmptyPolls(int minimum) { - awaitPolls(minimum, List.of()); + awaitPolls(minimum, Collections.emptyList()); } private void awaitPolls(int minimum) { @@ -1166,8 +1162,8 @@ private void expectTaskGetTopic() { private void expectPossibleTopicCreation() { if (config.topicCreationEnable()) { - Set created = Set.of(TOPIC); - Set existing = Set.of(); + Set created = Collections.singleton(TOPIC); + Set existing = Collections.emptySet(); TopicAdmin.TopicCreationResponse creationResponse = new TopicAdmin.TopicCreationResponse(created, existing); when(admin.createOrFindTopics(any())).thenReturn(creationResponse); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/InternalSinkRecordTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/InternalSinkRecordTest.java index 18edc9c6f8431..f76c05a005169 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/InternalSinkRecordTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/InternalSinkRecordTest.java @@ -27,7 +27,7 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.List; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -49,7 +49,7 @@ public void testNewRecordHeaders() { assertTrue(sinkRecord.headers().isEmpty()); SinkRecord newRecord = internalSinkRecord.newRecord(TOPIC, 0, null, null, null, - null, null, List.of(mock(Header.class))); + null, null, Collections.singletonList(mock(Header.class))); assertEquals(1, newRecord.headers().size()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java index f2b034c08d391..a965a061f82b5 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/LoggersTest.java @@ -20,9 +20,9 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.runtime.rest.entities.LoggerLevel; +import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LoggerContext; -import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -41,20 +41,14 @@ public class LoggersTest { private static final long INITIAL_TIME = 1696951712135L; - private Loggers.Log4jLoggers loggers; + private final LoggerContext context = (LoggerContext) LogManager.getContext(false); + private Loggers loggers; private Time time; @BeforeEach public void setup() { time = new MockTime(0, INITIAL_TIME, 0); - loggers = (Loggers.Log4jLoggers) Loggers.newInstance(time); - } - - @AfterEach - public void tearDown() { - // Reset LoggerContext to its initial configuration. - // This ensures any log level changes made in a test do not leak into subsequent tests. - LoggerContext.getContext(false).reconfigure(); + loggers = new Loggers(time); } @Test @@ -74,7 +68,7 @@ public void testLevelWithValidRootLoggerNames() { @Test public void testLevelWithExistLoggerName() { - loggers.setLevel("foo", DEBUG.name()); + loggers.setLevel("foo", DEBUG); assertEquals(new LoggerLevel(DEBUG.name(), INITIAL_TIME), loggers.level("foo") ); @@ -87,7 +81,7 @@ public void testLevelWithNonExistLoggerName() { @Test public void testLevelWithNewlyCreatedLogger() { - loggers.setLevel("dummy", ERROR.name()); + loggers.setLevel("dummy", ERROR); assertEquals( new LoggerLevel(ERROR.name(), time.milliseconds()), loggers.level("dummy"), @@ -97,8 +91,8 @@ public void testLevelWithNewlyCreatedLogger() { @Test public void testAllLevelsAfterCreatingNewLogger() { - loggers.setLevel("foo", WARN.name()); - loggers.setLevel("bar", ERROR.name()); + loggers.setLevel("foo", WARN); + loggers.setLevel("bar", ERROR); Map loggerToLevel = loggers.allLevels(); Map expectedLevels = Map.of( "foo", new LoggerLevel(WARN.name(), INITIAL_TIME), @@ -119,8 +113,8 @@ public void testSetLevelWithNullNameSpaceOrNullLevel() { @Test public void testSetLevelWithValidRootLoggerNames() { - loggers.setLevel("", ERROR.name()); - List setLevelResultWithRoot = loggers.setLevel("root", ERROR.name()); + loggers.setLevel("", ERROR); + List setLevelResultWithRoot = loggers.setLevel("root", ERROR); assertTrue(setLevelResultWithRoot.isEmpty(), "Setting level with empty string ('') and 'root' should affect the same set of loggers - " + "when setting the same level twice, second call should return empty list indicating no loggers were affected"); @@ -128,9 +122,9 @@ public void testSetLevelWithValidRootLoggerNames() { @Test public void testSetLevel() { - loggers.setLevel("a.b.c", DEBUG.name()); - loggers.setLevel("a.b", ERROR.name()); - loggers.setLevel("a", WARN.name()); + loggers.setLevel("a.b.c", DEBUG); + loggers.setLevel("a.b", ERROR); + loggers.setLevel("a", WARN); Map expected = Map.of( "a", new LoggerLevel(WARN.name(), INITIAL_TIME), "a.b", new LoggerLevel(WARN.name(), INITIAL_TIME), @@ -141,7 +135,7 @@ public void testSetLevel() { @Test public void testLookupLoggerAfterCreatingNewLogger() { - loggers.setLevel("dummy", INFO.name()); + loggers.setLevel("dummy", INFO); Logger logger = loggers.lookupLogger("dummy"); assertNotNull(logger); assertEquals(INFO, logger.getLevel()); @@ -150,9 +144,9 @@ public void testLookupLoggerAfterCreatingNewLogger() { @Test public void testSetLevelWithSameLevel() { String loggerName = "dummy"; - loggers.setLevel(loggerName, DEBUG.name()); + loggers.setLevel(loggerName, DEBUG); time.sleep(100); - loggers.setLevel(loggerName, DEBUG.name()); + loggers.setLevel(loggerName, DEBUG); assertEquals( new LoggerLevel(DEBUG.name(), INITIAL_TIME), loggers.allLevels().get(loggerName), @@ -163,9 +157,9 @@ public void testSetLevelWithSameLevel() { @Test public void testSetLevelWithDifferentLevels() { String loggerName = "dummy"; - loggers.setLevel(loggerName, DEBUG.name()); + loggers.setLevel(loggerName, DEBUG); time.sleep(100); - loggers.setLevel(loggerName, WARN.name()); + loggers.setLevel(loggerName, WARN); assertEquals( new LoggerLevel(WARN.name(), INITIAL_TIME + 100), loggers.allLevels().get(loggerName), diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java index b6548651418d6..c5f9f8314d9ef 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockConnectMetrics.java @@ -49,7 +49,6 @@ public class MockConnectMetrics extends ConnectMetrics { DEFAULT_WORKER_CONFIG.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); DEFAULT_WORKER_CONFIG.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); DEFAULT_WORKER_CONFIG.put(CommonClientConfigs.METRIC_REPORTER_CLASSES_CONFIG, MockMetricsReporter.class.getName()); - DEFAULT_WORKER_CONFIG.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); } public MockConnectMetrics() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java index 6d4692bae1053..3df5028461190 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/MockLoggersTest.java @@ -22,7 +22,7 @@ import org.apache.logging.log4j.Level; import org.apache.logging.log4j.LogManager; -import org.apache.logging.log4j.core.Logger; +import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.core.LoggerContext; import org.apache.logging.log4j.core.config.Configuration; import org.apache.logging.log4j.core.config.Configurator; @@ -34,6 +34,8 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -49,6 +51,7 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class MockLoggersTest { private static final long INITIAL_TIME = 1696951712135L; + private final LoggerContext context = (LoggerContext) LogManager.getContext(false); private Time time; @BeforeEach @@ -70,7 +73,7 @@ public void testGetLoggersIgnoresNullLevels() { Loggers loggers = new TestLoggers(root, a, b); - Map expectedLevels = Map.of( + Map expectedLevels = Collections.singletonMap( "b", new LoggerLevel(Level.INFO.toString(), null) ); @@ -134,8 +137,8 @@ public void testSetLevel() { // one should be created by the Loggers instance when we set the level TestLoggers loggers = new TestLoggers(root, x, y, z, w); - List modified = loggers.setLevel("a.b.c.p", Level.WARN.name()); - assertEquals(List.of("a.b.c.p", "a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z"), modified); + List modified = loggers.setLevel("a.b.c.p", Level.WARN); + assertEquals(Arrays.asList("a.b.c.p", "a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z"), modified); assertEquals(Level.WARN.toString(), loggers.level("a.b.c.p").level()); assertEquals(Level.WARN, x.getLevel()); assertEquals(Level.WARN, y.getLevel()); @@ -147,7 +150,7 @@ public void testSetLevel() { // Sleep a little and adjust the level of a leaf logger time.sleep(10); - loggers.setLevel("a.b.c.p.X", Level.ERROR.name()); + loggers.setLevel("a.b.c.p.X", Level.ERROR); expectedLevel = new LoggerLevel(Level.ERROR.toString(), INITIAL_TIME + 10); actualLevel = loggers.level("a.b.c.p.X"); assertEquals(expectedLevel, actualLevel); @@ -163,7 +166,7 @@ public void testSetLevel() { // Set the same level again, and verify that the last modified time hasn't been altered time.sleep(10); - loggers.setLevel("a.b.c.p.X", Level.ERROR.name()); + loggers.setLevel("a.b.c.p.X", Level.ERROR); expectedLevel = new LoggerLevel(Level.ERROR.toString(), INITIAL_TIME + 10); actualLevel = loggers.level("a.b.c.p.X"); assertEquals(expectedLevel, actualLevel); @@ -182,7 +185,7 @@ public void testSetRootLevel() { config.addLogger(rootLoggerName, rootConfig); loggerContext.updateLoggers(); - Logger root = loggerContext.getLogger(rootLoggerName); + Logger root = LogManager.getLogger(rootLoggerName); Configurator.setLevel(root, Level.ERROR); Logger p = loggerContext.getLogger("a.b.c.p"); @@ -198,8 +201,8 @@ public void testSetRootLevel() { Loggers loggers = new TestLoggers(root, x, y, z, w); - List modified = loggers.setLevel(rootLoggerName, Level.DEBUG.name()); - assertEquals(List.of("a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z", "a.b.c.s.W", rootLoggerName), modified); + List modified = loggers.setLevel(rootLoggerName, Level.DEBUG); + assertEquals(Arrays.asList("a.b.c.p.X", "a.b.c.p.Y", "a.b.c.p.Z", "a.b.c.s.W", rootLoggerName), modified); assertEquals(Level.DEBUG, p.getLevel()); @@ -227,17 +230,17 @@ public void testSetLevelNullArguments() { LoggerContext loggerContext = (LoggerContext) LogManager.getContext(false); Logger root = loggerContext.getRootLogger(); Loggers loggers = new TestLoggers(root); - assertThrows(NullPointerException.class, () -> loggers.setLevel(null, Level.INFO.name())); + assertThrows(NullPointerException.class, () -> loggers.setLevel(null, Level.INFO)); assertThrows(NullPointerException.class, () -> loggers.setLevel("root", null)); } - private class TestLoggers extends Loggers.Log4jLoggers { + private class TestLoggers extends Loggers { private final Logger rootLogger; private final Map currentLoggers; public TestLoggers(Logger rootLogger, Logger... knownLoggers) { - super(MockLoggersTest.this.time); + super(time); this.rootLogger = rootLogger; this.currentLoggers = new HashMap<>(Stream.of(knownLoggers) .collect(Collectors.toMap( @@ -249,7 +252,7 @@ public TestLoggers(Logger rootLogger, Logger... knownLoggers) { @Override Logger lookupLogger(String logger) { - return currentLoggers.computeIfAbsent(logger, loggerContext::getLogger); + return currentLoggers.computeIfAbsent(logger, LogManager::getLogger); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/RestartPlanTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/RestartPlanTest.java index d0f3f974c635f..8d6f54ce2581b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/RestartPlanTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/RestartPlanTest.java @@ -35,15 +35,17 @@ public class RestartPlanTest { @Test public void testRestartPlan() { ConnectorStateInfo.ConnectorState state = new ConnectorStateInfo.ConnectorState( - AbstractStatus.State.RESTARTING.name(), "foo", null, null + AbstractStatus.State.RESTARTING.name(), + "foo", + null ); List tasks = new ArrayList<>(); - tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); - tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null, null)); - tasks.add(new TaskState(3, AbstractStatus.State.RESTARTING.name(), "worker1", null, null)); - tasks.add(new TaskState(4, AbstractStatus.State.DESTROYED.name(), "worker1", null, null)); - tasks.add(new TaskState(5, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); - tasks.add(new TaskState(6, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); + tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null)); + tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null)); + tasks.add(new TaskState(3, AbstractStatus.State.RESTARTING.name(), "worker1", null)); + tasks.add(new TaskState(4, AbstractStatus.State.DESTROYED.name(), "worker1", null)); + tasks.add(new TaskState(5, AbstractStatus.State.RUNNING.name(), "worker1", null)); + tasks.add(new TaskState(6, AbstractStatus.State.RUNNING.name(), "worker1", null)); ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, tasks, ConnectorType.SOURCE); RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, false, true); @@ -59,11 +61,13 @@ public void testRestartPlan() { @Test public void testNoRestartsPlan() { ConnectorStateInfo.ConnectorState state = new ConnectorStateInfo.ConnectorState( - AbstractStatus.State.RUNNING.name(), "foo", null, null + AbstractStatus.State.RUNNING.name(), + "foo", + null ); List tasks = new ArrayList<>(); - tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); - tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null, null)); + tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null)); + tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null)); ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, tasks, ConnectorType.SOURCE); RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, false, true); RestartPlan restartPlan = new RestartPlan(restartRequest, connectorStateInfo); @@ -77,11 +81,13 @@ public void testNoRestartsPlan() { @Test public void testRestartsOnlyConnector() { ConnectorStateInfo.ConnectorState state = new ConnectorStateInfo.ConnectorState( - AbstractStatus.State.RESTARTING.name(), "foo", null, null + AbstractStatus.State.RESTARTING.name(), + "foo", + null ); List tasks = new ArrayList<>(); - tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null, null)); - tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null, null)); + tasks.add(new TaskState(1, AbstractStatus.State.RUNNING.name(), "worker1", null)); + tasks.add(new TaskState(2, AbstractStatus.State.PAUSED.name(), "worker1", null)); ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, tasks, ConnectorType.SOURCE); RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, false, true); RestartPlan restartPlan = new RestartPlan(restartRequest, connectorStateInfo); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java index 1324b9a22638c..cb91530439f3d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SampleSourceConnector.java @@ -27,7 +27,7 @@ public class SampleSourceConnector extends SourceConnector { - public static final String VERSION = "some great version"; + public static final String VERSION = "an entirely different version"; @Override public String version() { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java index 0b1f4efc85029..106659d0f8f46 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SourceTaskOffsetCommitterTest.java @@ -22,6 +22,7 @@ import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.util.ConnectorTaskId; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; @@ -40,6 +41,7 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; +import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -72,7 +74,6 @@ public class SourceTaskOffsetCommitterTest { @BeforeEach public void setup() { Map workerProps = new HashMap<>(); - workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -94,7 +95,7 @@ public void testSchedule() { committer.schedule(taskId, task); assertNotNull(taskWrapper.getValue()); - assertEquals(Map.of(taskId, commitFuture), committers); + assertEquals(singletonMap(taskId, commitFuture), committers); } @Test @@ -146,7 +147,7 @@ public void testRemoveCancelledTask() throws ExecutionException, InterruptedExce committers.put(taskId, taskFuture); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(SourceTaskOffsetCommitter.class)) { - logCaptureAppender.setClassLogger(SourceTaskOffsetCommitter.class, org.apache.logging.log4j.Level.TRACE); + logCaptureAppender.setClassLogger(SourceTaskOffsetCommitter.class, Level.TRACE); committer.remove(taskId); assertTrue(logCaptureAppender.getEvents().stream().anyMatch(e -> e.getLevel().equals("TRACE"))); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java index 043cf59b3f12e..6b8368e002c43 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/SubmittedRecordsTest.java @@ -24,7 +24,6 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; @@ -38,9 +37,9 @@ public class SubmittedRecordsTest { - private static final Map PARTITION1 = Map.of("subreddit", "apachekafka"); - private static final Map PARTITION2 = Map.of("subreddit", "adifferentvalue"); - private static final Map PARTITION3 = Map.of("subreddit", "asdfqweoicus"); + private static final Map PARTITION1 = Collections.singletonMap("subreddit", "apachekafka"); + private static final Map PARTITION2 = Collections.singletonMap("subreddit", "adifferentvalue"); + private static final Map PARTITION3 = Collections.singletonMap("subreddit", "asdfqweoicus"); private AtomicInteger offset; @@ -69,22 +68,22 @@ public void testNoRecords() { @Test public void testNoCommittedRecords() { for (int i = 0; i < 3; i++) { - for (Map partition : List.of(PARTITION1, PARTITION2, PARTITION3)) { + for (Map partition : Arrays.asList(PARTITION1, PARTITION2, PARTITION3)) { submittedRecords.submit(partition, newOffset()); } } CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); assertMetadata(committableOffsets, 0, 9, 3, 3, PARTITION1, PARTITION2, PARTITION3); - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); committableOffsets = submittedRecords.committableOffsets(); assertMetadata(committableOffsets, 0, 9, 3, 3, PARTITION1, PARTITION2, PARTITION3); - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); committableOffsets = submittedRecords.committableOffsets(); assertMetadata(committableOffsets, 0, 9, 3, 3, PARTITION1, PARTITION2, PARTITION3); - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); } @Test @@ -95,7 +94,7 @@ public void testSingleAck() { CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); // Record has been submitted but not yet acked; cannot commit offsets for it yet assertFalse(committableOffsets.isEmpty()); - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 1, 1, 1, PARTITION1); assertNoEmptyDeques(); @@ -103,7 +102,7 @@ public void testSingleAck() { committableOffsets = submittedRecords.committableOffsets(); // Record has been acked; can commit offsets for it assertFalse(committableOffsets.isEmpty()); - assertEquals(Map.of(PARTITION1, offset), committableOffsets.offsets()); + assertEquals(Collections.singletonMap(PARTITION1, offset), committableOffsets.offsets()); assertMetadataNoPending(committableOffsets, 1); // Everything has been ack'd and consumed; make sure that it's been cleaned up to avoid memory leaks @@ -111,7 +110,7 @@ public void testSingleAck() { committableOffsets = submittedRecords.committableOffsets(); // Old offsets should be wiped - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); assertTrue(committableOffsets.isEmpty()); } @@ -129,27 +128,27 @@ public void testMultipleAcksAcrossMultiplePartitions() { CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); // No records ack'd yet; can't commit any offsets - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 4, 2, 2, PARTITION1, PARTITION2); assertNoEmptyDeques(); partition1Record2.ack(); committableOffsets = submittedRecords.committableOffsets(); // One record has been ack'd, but a record that comes before it and corresponds to the same source partition hasn't been - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 4, 2, 2, PARTITION1, PARTITION2); assertNoEmptyDeques(); partition2Record1.ack(); committableOffsets = submittedRecords.committableOffsets(); // We can commit the first offset for the second partition - assertEquals(Map.of(PARTITION2, partition2Offset1), committableOffsets.offsets()); + assertEquals(Collections.singletonMap(PARTITION2, partition2Offset1), committableOffsets.offsets()); assertMetadata(committableOffsets, 1, 3, 2, 2, PARTITION1); assertNoEmptyDeques(); committableOffsets = submittedRecords.committableOffsets(); // No new offsets to commit - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 3, 2, 2, PARTITION1); assertNoEmptyDeques(); @@ -177,7 +176,7 @@ public void testRemoveLastSubmittedRecord() { SubmittedRecord submittedRecord = submittedRecords.submit(PARTITION1, newOffset()); CommittableOffsets committableOffsets = submittedRecords.committableOffsets(); - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 1, 1, 1, PARTITION1); assertTrue(submittedRecord.drop(), "First attempt to remove record from submitted queue should succeed"); @@ -209,7 +208,7 @@ public void testRemoveNotLastSubmittedRecord() { committableOffsets = submittedRecords.committableOffsets(); // Even if SubmittedRecords::remove is broken, we haven't ack'd anything yet, so there should be no committable offsets - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 1, 1, 1, PARTITION2); assertNoEmptyDeques(); // The only record for this partition has been removed; we shouldn't be tracking a deque for it anymore @@ -218,14 +217,14 @@ public void testRemoveNotLastSubmittedRecord() { recordToRemove.ack(); committableOffsets = submittedRecords.committableOffsets(); // Even though the record has somehow been acknowledged, it should not be counted when collecting committable offsets - assertEquals(Map.of(), committableOffsets.offsets()); + assertEquals(Collections.emptyMap(), committableOffsets.offsets()); assertMetadata(committableOffsets, 0, 1, 1, 1, PARTITION2); assertNoEmptyDeques(); lastSubmittedRecord.ack(); committableOffsets = submittedRecords.committableOffsets(); // Now that the last-submitted record has been ack'd, we should be able to commit its offset - assertEquals(Map.of(PARTITION2, partition2Offset), committableOffsets.offsets()); + assertEquals(Collections.singletonMap(PARTITION2, partition2Offset), committableOffsets.offsets()); assertMetadata(committableOffsets, 1, 0, 0, 0, (Map) null); assertFalse(committableOffsets.hasPending()); @@ -339,7 +338,7 @@ public void testAwaitMessagesReturnsAfterAsynchronousAck() throws Exception { } private void assertNoRemainingDeques() { - assertEquals(Map.of(), submittedRecords.records, "Internal records map should be completely empty"); + assertEquals(Collections.emptyMap(), submittedRecords.records, "Internal records map should be completely empty"); } @SafeVarargs @@ -356,7 +355,7 @@ private void assertNoEmptyDeques() { } private Map newOffset() { - return Map.of("timestamp", offset.getAndIncrement()); + return Collections.singletonMap("timestamp", offset.getAndIncrement()); } private void assertMetadataNoPending(CommittableOffsets committableOffsets, int committableMessages) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationConfigTest.java index ef7f17e1d09a2..2a6c0ed2b9d1d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationConfigTest.java @@ -29,16 +29,12 @@ import org.apache.kafka.connect.transforms.SetSchemaMetadata; import org.apache.kafka.connect.transforms.TimestampConverter; import org.apache.kafka.connect.transforms.TimestampRouter; -import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.ValueToKey; import org.junit.jupiter.api.Test; import java.util.HashMap; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - /** * Tests that transformations' configs can be composed with ConnectorConfig during its construction, ensuring no * conflicting fields or other issues. @@ -46,19 +42,8 @@ * This test appears here simply because it requires both connect-runtime and connect-transforms and connect-runtime * already depends on connect-transforms. */ -@SuppressWarnings("rawtypes") public class TransformationConfigTest { - private Plugins setupMockPlugins(Transformation transformation) { - Plugins plugins = mock(Plugins.class); - try { - when(plugins.newPlugin(transformation.getClass().getName(), null, (ClassLoader) null)).thenReturn(transformation); - } catch (ClassNotFoundException e) { - // Shouldn't happen since we're mocking the plugins - } - return plugins; - } - @Test public void testEmbeddedConfigCast() { // Validate that we can construct a Connector config containing the extended config for the transform @@ -69,7 +54,7 @@ public void testEmbeddedConfigCast() { connProps.put("transforms.example.type", Cast.Value.class.getName()); connProps.put("transforms.example.spec", "int8"); - Plugins plugins = setupMockPlugins(new Cast.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -83,7 +68,7 @@ public void testEmbeddedConfigExtractField() { connProps.put("transforms.example.type", ExtractField.Value.class.getName()); connProps.put("transforms.example.field", "field"); - Plugins plugins = setupMockPlugins(new ExtractField.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -96,7 +81,7 @@ public void testEmbeddedConfigFlatten() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", Flatten.Value.class.getName()); - Plugins plugins = setupMockPlugins(new Flatten.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -110,7 +95,7 @@ public void testEmbeddedConfigHoistField() { connProps.put("transforms.example.type", HoistField.Value.class.getName()); connProps.put("transforms.example.field", "field"); - Plugins plugins = setupMockPlugins(new HoistField.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -123,7 +108,7 @@ public void testEmbeddedConfigInsertField() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", InsertField.Value.class.getName()); - Plugins plugins = setupMockPlugins(new InsertField.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -138,7 +123,7 @@ public void testEmbeddedConfigMaskField() { connProps.put("transforms.example.fields", "field"); connProps.put("transforms.example.replacement", "nothing"); - Plugins plugins = setupMockPlugins(new MaskField.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -153,7 +138,7 @@ public void testEmbeddedConfigRegexRouter() { connProps.put("transforms.example.regex", "(.*)"); connProps.put("transforms.example.replacement", "prefix-$1"); - Plugins plugins = setupMockPlugins(new RegexRouter()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -166,7 +151,7 @@ public void testEmbeddedConfigReplaceField() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", ReplaceField.Value.class.getName()); - Plugins plugins = setupMockPlugins(new ReplaceField.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -179,7 +164,7 @@ public void testEmbeddedConfigSetSchemaMetadata() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", SetSchemaMetadata.Value.class.getName()); - Plugins plugins = setupMockPlugins(new SetSchemaMetadata.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -193,7 +178,7 @@ public void testEmbeddedConfigTimestampConverter() { connProps.put("transforms.example.type", TimestampConverter.Value.class.getName()); connProps.put("transforms.example.target.type", "unix"); - Plugins plugins = setupMockPlugins(new TimestampConverter.Value()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -206,7 +191,7 @@ public void testEmbeddedConfigTimestampRouter() { connProps.put("transforms", "example"); connProps.put("transforms.example.type", TimestampRouter.class.getName()); - Plugins plugins = setupMockPlugins(new TimestampRouter()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } @@ -220,7 +205,7 @@ public void testEmbeddedConfigValueToKey() { connProps.put("transforms.example.type", ValueToKey.class.getName()); connProps.put("transforms.example.fields", "field"); - Plugins plugins = setupMockPlugins(new ValueToKey()); + Plugins plugins = null; // Safe when we're only constructing the config new ConnectorConfig(plugins, connProps); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java index 4b3935c35f8d7..c3208515ac3a3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/TransformationStageTest.java @@ -16,8 +16,6 @@ */ package org.apache.kafka.connect.runtime; -import org.apache.kafka.common.internals.Plugin; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.transforms.Transformation; import org.apache.kafka.connect.transforms.predicates.Predicate; @@ -26,8 +24,7 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; -import java.util.Map; - +import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; @@ -37,44 +34,37 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class TransformationStageTest { - private final SourceRecord initial = new SourceRecord(Map.of("initial", 1), null, null, null, null); - private final SourceRecord transformed = new SourceRecord(Map.of("transformed", 2), null, null, null, null); + private final SourceRecord initial = new SourceRecord(singletonMap("initial", 1), null, null, null, null); + private final SourceRecord transformed = new SourceRecord(singletonMap("transformed", 2), null, null, null, null); @Test - public void apply() throws Exception { + public void apply() { applyAndAssert(true, false, transformed); applyAndAssert(true, true, initial); applyAndAssert(false, false, initial); applyAndAssert(false, true, transformed); } - @SuppressWarnings("unchecked") - private void applyAndAssert(boolean predicateResult, boolean negate, SourceRecord expectedResult) throws Exception { - Plugin> predicatePlugin = mock(Plugin.class); + private void applyAndAssert(boolean predicateResult, boolean negate, + SourceRecord expectedResult) { + + @SuppressWarnings("unchecked") Predicate predicate = mock(Predicate.class); when(predicate.test(any())).thenReturn(predicateResult); - when(predicatePlugin.get()).thenReturn(predicate); - Plugin> transformationPlugin = mock(Plugin.class); + @SuppressWarnings("unchecked") Transformation transformation = mock(Transformation.class); if (expectedResult == transformed) { - when(transformationPlugin.get()).thenReturn(transformation); when(transformation.apply(any())).thenReturn(transformed); } TransformationStage stage = new TransformationStage<>( - predicatePlugin, - "testPredicate", - null, + predicate, negate, - transformationPlugin, - "testTransformation", - null, - TestPlugins.noOpLoaderSwap() - ); + transformation); assertEquals(expectedResult, stage.apply(initial)); stage.close(); - verify(predicatePlugin).close(); - verify(transformationPlugin).close(); + verify(predicate).close(); + verify(transformation).close(); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTest.java index 07fa131694b77..4ad4c11ee89cd 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTest.java @@ -27,6 +27,7 @@ import org.mockito.MockedStatic; import org.mockito.internal.stubbing.answers.CallsRealMethods; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -58,7 +59,7 @@ public void teardown() { public void testLookupKafkaClusterId() { final Node broker1 = new Node(0, "dummyHost-1", 1234); final Node broker2 = new Node(1, "dummyHost-2", 1234); - List cluster = List.of(broker1, broker2); + List cluster = Arrays.asList(broker1, broker2); MockAdminClient adminClient = new MockAdminClient.Builder(). brokers(cluster).build(); assertEquals(MockAdminClient.DEFAULT_CLUSTER_ID, WorkerConfig.lookupKafkaClusterId(adminClient)); @@ -68,7 +69,7 @@ public void testLookupKafkaClusterId() { public void testLookupNullKafkaClusterId() { final Node broker1 = new Node(0, "dummyHost-1", 1234); final Node broker2 = new Node(1, "dummyHost-2", 1234); - List cluster = List.of(broker1, broker2); + List cluster = Arrays.asList(broker1, broker2); MockAdminClient adminClient = new MockAdminClient.Builder(). brokers(cluster).clusterId(null).build(); assertNull(WorkerConfig.lookupKafkaClusterId(adminClient)); @@ -78,7 +79,7 @@ public void testLookupNullKafkaClusterId() { public void testLookupKafkaClusterIdTimeout() { final Node broker1 = new Node(0, "dummyHost-1", 1234); final Node broker2 = new Node(1, "dummyHost-2", 1234); - List cluster = List.of(broker1, broker2); + List cluster = Arrays.asList(broker1, broker2); MockAdminClient adminClient = new MockAdminClient.Builder(). brokers(cluster).build(); adminClient.timeoutNextRequest(1); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java index 9aae6848d1940..c3a8f151750ec 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConfigTransformerTest.java @@ -27,6 +27,7 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Set; @@ -65,13 +66,13 @@ public class WorkerConfigTransformerTest { @BeforeEach public void setup() { - configTransformer = new WorkerConfigTransformer(worker, Map.of("test", new TestConfigProvider())); + configTransformer = new WorkerConfigTransformer(worker, Collections.singletonMap("test", new TestConfigProvider())); } @Test public void testReplaceVariable() { // Execution - Map result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKey}")); + Map result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKey}")); // Assertions assertEquals(TEST_RESULT, result.get(MY_KEY)); @@ -96,7 +97,7 @@ public void testReplaceVariableWithTTLAndScheduleRestart() { when(herder.restartConnector(eq(1L), eq(MY_CONNECTOR), notNull())).thenReturn(requestId); // Execution - Map result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithTTL}")); + Map result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKeyWithTTL}")); // Assertions assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY)); @@ -111,14 +112,14 @@ public void testReplaceVariableWithTTLFirstCancelThenScheduleRestart() { when(herder.restartConnector(eq(10L), eq(MY_CONNECTOR), notNull())).thenReturn(requestId); // Execution - Map result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithTTL}")); + Map result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKeyWithTTL}")); // Assertions assertEquals(TEST_RESULT_WITH_TTL, result.get(MY_KEY)); verify(herder).restartConnector(eq(1L), eq(MY_CONNECTOR), notNull()); // Execution - result = configTransformer.transform(MY_CONNECTOR, Map.of(MY_KEY, "${test:testPath:testKeyWithLongerTTL}")); + result = configTransformer.transform(MY_CONNECTOR, Collections.singletonMap(MY_KEY, "${test:testPath:testKeyWithLongerTTL}")); // Assertions assertEquals(TEST_RESULT_WITH_LONGER_TTL, result.get(MY_KEY)); @@ -146,14 +147,14 @@ public ConfigData get(String path) { public ConfigData get(String path, Set keys) { if (path.equals(TEST_PATH)) { if (keys.contains(TEST_KEY)) { - return new ConfigData(Map.of(TEST_KEY, TEST_RESULT)); + return new ConfigData(Collections.singletonMap(TEST_KEY, TEST_RESULT)); } else if (keys.contains(TEST_KEY_WITH_TTL)) { - return new ConfigData(Map.of(TEST_KEY_WITH_TTL, TEST_RESULT_WITH_TTL), 1L); + return new ConfigData(Collections.singletonMap(TEST_KEY_WITH_TTL, TEST_RESULT_WITH_TTL), 1L); } else if (keys.contains(TEST_KEY_WITH_LONGER_TTL)) { - return new ConfigData(Map.of(TEST_KEY_WITH_LONGER_TTL, TEST_RESULT_WITH_LONGER_TTL), 10L); + return new ConfigData(Collections.singletonMap(TEST_KEY_WITH_LONGER_TTL, TEST_RESULT_WITH_LONGER_TTL), 10L); } } - return new ConfigData(Map.of()); + return new ConfigData(Collections.emptyMap()); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConnectorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConnectorTest.java index 1fa16713f150c..1fd45e2bf00ed 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConnectorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerConnectorTest.java @@ -596,11 +596,17 @@ protected void assertDestroyedMetric(WorkerConnector workerConnector) { } protected void assertInitializedMetric(WorkerConnector workerConnector) { - String expectedType = switch (connectorType) { - case SINK -> "sink"; - case SOURCE -> "source"; - default -> throw new IllegalStateException("Unexpected connector type: " + connectorType); - }; + String expectedType; + switch (connectorType) { + case SINK: + expectedType = "sink"; + break; + case SOURCE: + expectedType = "source"; + break; + default: + throw new IllegalStateException("Unexpected connector type: " + connectorType); + } assertInitializedMetric(workerConnector, expectedType); } @@ -615,7 +621,6 @@ protected void assertInitializedMetric(WorkerConnector workerConnector, String e String type = metrics.currentMetricValueAsString(metricGroup, "connector-type"); String clazz = metrics.currentMetricValueAsString(metricGroup, "connector-class"); String version = metrics.currentMetricValueAsString(metricGroup, "connector-version"); - assertEquals("unassigned", status); assertEquals(expectedType, type); assertNotNull(clazz); assertEquals(VERSION, version); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java index ce052dd243969..4e91183fd3125 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskTest.java @@ -16,7 +16,6 @@ */ package org.apache.kafka.connect.runtime; -import org.apache.kafka.clients.consumer.Consumer; import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; import org.apache.kafka.clients.consumer.ConsumerRecord; import org.apache.kafka.clients.consumer.ConsumerRecords; @@ -31,7 +30,6 @@ import org.apache.kafka.common.header.Header; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.MockTime; @@ -48,7 +46,6 @@ import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest; import org.apache.kafka.connect.runtime.isolation.PluginClassLoader; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.sink.SinkConnector; import org.apache.kafka.connect.sink.SinkRecord; @@ -74,7 +71,9 @@ import java.time.Duration; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; @@ -89,6 +88,8 @@ import java.util.regex.Pattern; import java.util.stream.Collectors; +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; import static org.apache.kafka.connect.runtime.WorkerTestUtils.getTransformationChain; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -131,7 +132,7 @@ public class WorkerSinkTaskTest { private static final TopicPartition TOPIC_PARTITION3 = new TopicPartition(TOPIC, PARTITION3); private static final Set INITIAL_ASSIGNMENT = - Set.of(TOPIC_PARTITION, TOPIC_PARTITION2); + new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); private static final Map TASK_PROPS = new HashMap<>(); @@ -179,7 +180,6 @@ public class WorkerSinkTaskTest { public void setUp() { time = new MockTime(); Map workerProps = new HashMap<>(); - workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -194,39 +194,22 @@ private void createTask(TargetState initialState) { } private void createTask(TargetState initialState, TransformationChain transformationChain, RetryWithToleranceOperator toleranceOperator) { - createTask(initialState, keyConverter, valueConverter, headerConverter, toleranceOperator, List::of, transformationChain); + createTask(initialState, keyConverter, valueConverter, headerConverter, toleranceOperator, Collections::emptyList, transformationChain); } private void createTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter) { - createTask(initialState, keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), List::of, transformationChain); + createTask(initialState, keyConverter, valueConverter, headerConverter, RetryWithToleranceOperatorTest.noneOperator(), Collections::emptyList, transformationChain); } private void createTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, RetryWithToleranceOperator> retryWithToleranceOperator, Supplier>>> errorReportersSupplier, - TransformationChain, SinkRecord> transformationChain) { - createTask(taskId, sinkTask, statusListener, initialState, workerConfig, metrics, + TransformationChain transformationChain) { + workerTask = new WorkerSinkTask( + taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, transformationChain, consumer, pluginLoader, time, - retryWithToleranceOperator, statusBackingStore, errorReportersSupplier); - } - - private void createTask(ConnectorTaskId taskId, SinkTask task, TaskStatus.Listener statusListener, TargetState initialState, - WorkerConfig workerConfig, ConnectMetrics connectMetrics, Converter keyConverter, Converter valueConverter, - ErrorHandlingMetrics errorMetrics, HeaderConverter headerConverter, - TransformationChain, SinkRecord> transformationChain, - Consumer consumer, ClassLoader loader, Time time, - RetryWithToleranceOperator> retryWithToleranceOperator, - StatusBackingStore statusBackingStore, - Supplier>>> errorReportersSupplier) { - Plugin keyConverterPlugin = connectMetrics.wrap(keyConverter, taskId, true); - Plugin valueConverterPlugin = connectMetrics.wrap(valueConverter, taskId, false); - Plugin headerConverterPlugin = connectMetrics.wrap(headerConverter, taskId); - workerTask = new WorkerSinkTask( - taskId, task, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, connectMetrics, - keyConverterPlugin, valueConverterPlugin, errorMetrics, headerConverterPlugin, - transformationChain, consumer, loader, time, - retryWithToleranceOperator, null, statusBackingStore, errorReportersSupplier, null, TestPlugins.noOpLoaderSwap()); + retryWithToleranceOperator, null, statusBackingStore, errorReportersSupplier); } @AfterEach @@ -315,7 +298,7 @@ public void testPause() { verify(consumer).wakeup(); // Offset commit as requested when pausing; No records returned by consumer.poll() - when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); + when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); workerTask.iteration(); // now paused time.sleep(30000L); @@ -337,7 +320,7 @@ public void testPause() { // And unpause verify(statusListener).onResume(taskId); verify(consumer, times(2)).wakeup(); - INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Set.of(tp))); + INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(singleton(tp))); verify(sinkTask, times(4)).put(anyList()); } @@ -360,7 +343,7 @@ public void testShutdown() throws Exception { sinkTaskContext.getValue().requestCommit(); // Force an offset commit // second iteration - when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); + when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); workerTask.iteration(); verify(sinkTask, times(2)).put(anyList()); @@ -438,7 +421,7 @@ public void testPollRedelivery() { time.sleep(30000L); verify(sinkTask, times(3)).put(anyList()); - INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Set.of(tp))); + INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Collections.singleton(tp))); assertSinkMetricValue("sink-record-read-total", 1.0); assertSinkMetricValue("sink-record-send-total", 1.0); @@ -480,14 +463,14 @@ public void testPollRedeliveryWithConsumerRebalance() { workerTask.initializeAndStart(); verifyInitializeTask(); - Set newAssignment = Set.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); + Set newAssignment = new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)); when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT) .thenReturn(newAssignment, newAssignment, newAssignment) - .thenReturn(Set.of(TOPIC_PARTITION3), - Set.of(TOPIC_PARTITION3), - Set.of(TOPIC_PARTITION3)); + .thenReturn(Collections.singleton(TOPIC_PARTITION3), + Collections.singleton(TOPIC_PARTITION3), + Collections.singleton(TOPIC_PARTITION3)); INITIAL_ASSIGNMENT.forEach(tp -> when(consumer.position(tp)).thenReturn(FIRST_OFFSET)); when(consumer.position(TOPIC_PARTITION3)).thenReturn(FIRST_OFFSET); @@ -500,8 +483,8 @@ public void testPollRedeliveryWithConsumerRebalance() { .thenAnswer(expectConsumerPoll(1)) // Empty consumer poll (all partitions are paused) with rebalance; one new partition is assigned .thenAnswer(invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Set.of()); - rebalanceListener.getValue().onPartitionsAssigned(Set.of(TOPIC_PARTITION3)); + rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet()); + rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3)); return ConsumerRecords.empty(); }) .thenAnswer(expectConsumerPoll(0)) @@ -510,7 +493,7 @@ public void testPollRedeliveryWithConsumerRebalance() { ConsumerRecord newRecord = new ConsumerRecord<>(TOPIC, PARTITION3, FIRST_OFFSET, RAW_KEY, RAW_VALUE); rebalanceListener.getValue().onPartitionsRevoked(INITIAL_ASSIGNMENT); - rebalanceListener.getValue().onPartitionsAssigned(List.of()); + rebalanceListener.getValue().onPartitionsAssigned(Collections.emptyList()); return new ConsumerRecords<>(Map.of(TOPIC_PARTITION3, List.of(newRecord)), Map.of(TOPIC_PARTITION3, new OffsetAndMetadata(FIRST_OFFSET + 1, Optional.empty(), ""))); }); @@ -531,7 +514,7 @@ public void testPollRedeliveryWithConsumerRebalance() { verify(consumer).pause(INITIAL_ASSIGNMENT); workerTask.iteration(); - verify(sinkTask).open(Set.of(TOPIC_PARTITION3)); + verify(sinkTask).open(Collections.singleton(TOPIC_PARTITION3)); // All partitions are re-paused in order to pause any newly-assigned partitions so that redelivery efforts can continue verify(consumer).pause(newAssignment); @@ -540,13 +523,13 @@ public void testPollRedeliveryWithConsumerRebalance() { final Map offsets = INITIAL_ASSIGNMENT.stream() .collect(Collectors.toMap(Function.identity(), tp -> new OffsetAndMetadata(FIRST_OFFSET))); when(sinkTask.preCommit(offsets)).thenReturn(offsets); - newAssignment = Set.of(TOPIC_PARTITION3); + newAssignment = Collections.singleton(TOPIC_PARTITION3); workerTask.iteration(); verify(sinkTask).close(INITIAL_ASSIGNMENT); // All partitions are resumed, as all previously paused-for-redelivery partitions were revoked - newAssignment.forEach(tp -> verify(consumer).resume(Set.of(tp))); + newAssignment.forEach(tp -> verify(consumer).resume(Collections.singleton(tp))); } @Test @@ -633,10 +616,10 @@ public void testPartialRevocationAndAssignment() { when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(INITIAL_ASSIGNMENT) - .thenReturn(Set.of(TOPIC_PARTITION2)) - .thenReturn(Set.of(TOPIC_PARTITION2)) - .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) - .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) + .thenReturn(Collections.singleton(TOPIC_PARTITION2)) + .thenReturn(Collections.singleton(TOPIC_PARTITION2)) + .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) + .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(INITIAL_ASSIGNMENT) .thenReturn(INITIAL_ASSIGNMENT); @@ -653,18 +636,18 @@ public void testPartialRevocationAndAssignment() { return ConsumerRecords.empty(); }) .thenAnswer((Answer>) invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Set.of(TOPIC_PARTITION)); - rebalanceListener.getValue().onPartitionsAssigned(Set.of()); + rebalanceListener.getValue().onPartitionsRevoked(singleton(TOPIC_PARTITION)); + rebalanceListener.getValue().onPartitionsAssigned(Collections.emptySet()); return ConsumerRecords.empty(); }) .thenAnswer((Answer>) invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Set.of()); - rebalanceListener.getValue().onPartitionsAssigned(Set.of(TOPIC_PARTITION3)); + rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet()); + rebalanceListener.getValue().onPartitionsAssigned(singleton(TOPIC_PARTITION3)); return ConsumerRecords.empty(); }) .thenAnswer((Answer>) invocation -> { - rebalanceListener.getValue().onPartitionsLost(Set.of(TOPIC_PARTITION3)); - rebalanceListener.getValue().onPartitionsAssigned(Set.of(TOPIC_PARTITION)); + rebalanceListener.getValue().onPartitionsLost(singleton(TOPIC_PARTITION3)); + rebalanceListener.getValue().onPartitionsAssigned(singleton(TOPIC_PARTITION)); return ConsumerRecords.empty(); }); @@ -680,19 +663,19 @@ public void testPartialRevocationAndAssignment() { // Second iteration--second call to poll, partial consumer revocation workerTask.iteration(); - verify(sinkTask).close(Set.of(TOPIC_PARTITION)); - verify(sinkTask, times(2)).put(List.of()); + verify(sinkTask).close(singleton(TOPIC_PARTITION)); + verify(sinkTask, times(2)).put(Collections.emptyList()); // Third iteration--third call to poll, partial consumer assignment workerTask.iteration(); - verify(sinkTask).open(Set.of(TOPIC_PARTITION3)); - verify(sinkTask, times(3)).put(List.of()); + verify(sinkTask).open(singleton(TOPIC_PARTITION3)); + verify(sinkTask, times(3)).put(Collections.emptyList()); // Fourth iteration--fourth call to poll, one partition lost; can't commit offsets for it, one new partition assigned workerTask.iteration(); - verify(sinkTask).close(Set.of(TOPIC_PARTITION3)); - verify(sinkTask).open(Set.of(TOPIC_PARTITION)); - verify(sinkTask, times(4)).put(List.of()); + verify(sinkTask).close(singleton(TOPIC_PARTITION3)); + verify(sinkTask).open(singleton(TOPIC_PARTITION)); + verify(sinkTask, times(4)).put(Collections.emptyList()); } @Test @@ -707,12 +690,12 @@ public void testPreCommitFailureAfterPartialRevocationAndAssignment() { when(consumer.assignment()) .thenReturn(INITIAL_ASSIGNMENT, INITIAL_ASSIGNMENT) - .thenReturn(Set.of(TOPIC_PARTITION2)) - .thenReturn(Set.of(TOPIC_PARTITION2)) - .thenReturn(Set.of(TOPIC_PARTITION2)) - .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) - .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)) - .thenReturn(Set.of(TOPIC_PARTITION2, TOPIC_PARTITION3)); + .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) + .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) + .thenReturn(new HashSet<>(Collections.singletonList(TOPIC_PARTITION2))) + .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) + .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))) + .thenReturn(new HashSet<>(Arrays.asList(TOPIC_PARTITION2, TOPIC_PARTITION3))); INITIAL_ASSIGNMENT.forEach(tp -> when(consumer.position(tp)).thenReturn(FIRST_OFFSET)); when(consumer.position(TOPIC_PARTITION3)).thenReturn(FIRST_OFFSET); @@ -727,14 +710,14 @@ public void testPreCommitFailureAfterPartialRevocationAndAssignment() { .thenAnswer(expectConsumerPoll(1)) // Third poll; assignment changes to [TP2] .thenAnswer(invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Set.of(TOPIC_PARTITION)); - rebalanceListener.getValue().onPartitionsAssigned(Set.of()); + rebalanceListener.getValue().onPartitionsRevoked(Collections.singleton(TOPIC_PARTITION)); + rebalanceListener.getValue().onPartitionsAssigned(Collections.emptySet()); return ConsumerRecords.empty(); }) // Fourth poll; assignment changes to [TP2, TP3] .thenAnswer(invocation -> { - rebalanceListener.getValue().onPartitionsRevoked(Set.of()); - rebalanceListener.getValue().onPartitionsAssigned(Set.of(TOPIC_PARTITION3)); + rebalanceListener.getValue().onPartitionsRevoked(Collections.emptySet()); + rebalanceListener.getValue().onPartitionsAssigned(Collections.singleton(TOPIC_PARTITION3)); return ConsumerRecords.empty(); }) // Fifth poll; an offset commit takes place @@ -753,13 +736,13 @@ public void testPreCommitFailureAfterPartialRevocationAndAssignment() { doNothing().when(consumer).commitSync(offsets); workerTask.iteration(); - verify(sinkTask).close(Set.of(TOPIC_PARTITION)); - verify(sinkTask, times(2)).put(List.of()); + verify(sinkTask).close(Collections.singleton(TOPIC_PARTITION)); + verify(sinkTask, times(2)).put(Collections.emptyList()); // Fourth iteration--fourth call to poll, partial consumer assignment workerTask.iteration(); - verify(sinkTask).open(Set.of(TOPIC_PARTITION3)); + verify(sinkTask).open(Collections.singleton(TOPIC_PARTITION3)); final Map workerCurrentOffsets = new HashMap<>(); workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET)); @@ -816,7 +799,7 @@ public void testWakeupInCommitSyncCausesRetry() { verify(sinkTask).close(INITIAL_ASSIGNMENT); verify(sinkTask, times(2)).open(INITIAL_ASSIGNMENT); - INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Set.of(tp))); + INITIAL_ASSIGNMENT.forEach(tp -> verify(consumer).resume(Collections.singleton(tp))); verify(statusListener).onResume(taskId); @@ -902,7 +885,7 @@ public void testRaisesFailedRetriableExceptionFromConvert() { @Test public void testSkipsFailedRetriableExceptionFromConvert() { createTask(initialState, keyConverter, valueConverter, headerConverter, - RetryWithToleranceOperatorTest.allOperator(), List::of, transformationChain); + RetryWithToleranceOperatorTest.allOperator(), Collections::emptyList, transformationChain); workerTask.initialize(TASK_CONFIG); workerTask.initializeAndStart(); @@ -920,7 +903,7 @@ public void testSkipsFailedRetriableExceptionFromConvert() { workerTask.iteration(); workerTask.execute(); - verify(sinkTask, times(3)).put(List.of()); + verify(sinkTask, times(3)).put(Collections.emptyList()); } @Test @@ -971,7 +954,7 @@ public void testSkipsFailedRetriableExceptionFromTransform() { workerTask.iteration(); workerTask.execute(); - verify(sinkTask, times(3)).put(List.of()); + verify(sinkTask, times(3)).put(Collections.emptyList()); } @Test @@ -1270,7 +1253,7 @@ public void testSinkTasksHandleCloseErrors() { .when(sinkTask).put(anyList()); Throwable closeException = new RuntimeException(); - when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); + when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); // Throw another exception while closing the task's assignment doThrow(closeException).when(sinkTask).close(any(Collection.class)); @@ -1307,7 +1290,7 @@ public void testSuppressCloseErrors() { .doThrow(putException) .when(sinkTask).put(anyList()); - when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); + when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); // Throw another exception while closing the task's assignment doThrow(closeException).when(sinkTask).close(any(Collection.class)); @@ -1387,7 +1370,7 @@ public void testCommitWithOutOfOrderCallback() { workerCurrentOffsets.put(TOPIC_PARTITION2, new OffsetAndMetadata(FIRST_OFFSET)); final List originalPartitions = new ArrayList<>(INITIAL_ASSIGNMENT); - final List rebalancedPartitions = List.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); + final List rebalancedPartitions = asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); final Map rebalanceOffsets = new HashMap<>(); rebalanceOffsets.put(TOPIC_PARTITION, workerCurrentOffsets.get(TOPIC_PARTITION)); rebalanceOffsets.put(TOPIC_PARTITION2, workerCurrentOffsets.get(TOPIC_PARTITION2)); @@ -1529,10 +1512,7 @@ public void testCommitWithOutOfOrderCallback() { assertEquals(rebalanceOffsets, workerTask.lastCommittedOffsets()); // onPartitionsRevoked - ArgumentCaptor> closeCaptor = ArgumentCaptor.forClass(Collection.class); - verify(sinkTask).close(closeCaptor.capture()); - Collection actualClosePartitions = closeCaptor.getValue(); - assertEquals(workerCurrentOffsets.keySet(), new HashSet<>(actualClosePartitions)); + verify(sinkTask).close(new ArrayList<>(workerCurrentOffsets.keySet())); verify(consumer).commitSync(anyMap()); // onPartitionsAssigned - step 2 @@ -1816,7 +1796,7 @@ public void testHeadersWithCustomConverter() { expectPollInitialAssignment() .thenAnswer((Answer>) invocation -> { - List> records = List.of( + List> records = Arrays.asList( new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + 1, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, 0, 0, keyA.getBytes(), valueA.getBytes(encodingA), headersA, Optional.empty()), new ConsumerRecord<>(TOPIC, PARTITION, FIRST_OFFSET + recordsReturnedTp1 + 2, RecordBatch.NO_TIMESTAMP, TimestampType.NO_TIMESTAMP_TYPE, @@ -1877,10 +1857,11 @@ public void testOriginalTopicWithTopicMutatingTransformations() { public void testPartitionCountInCaseOfPartitionRevocation() { MockConsumer mockConsumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); // Setting up Worker Sink Task to check metrics - createTask(taskId, sinkTask, statusListener, TargetState.PAUSED, workerConfig, metrics, + workerTask = new WorkerSinkTask( + taskId, sinkTask, statusListener, TargetState.PAUSED, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, transformationChain, mockConsumer, pluginLoader, time, - RetryWithToleranceOperatorTest.noneOperator(), statusBackingStore, List::of); + RetryWithToleranceOperatorTest.noneOperator(), null, statusBackingStore, Collections::emptyList); mockConsumer.updateBeginningOffsets( new HashMap<>() {{ put(TOPIC_PARTITION, 0L); @@ -1893,7 +1874,7 @@ public void testPartitionCountInCaseOfPartitionRevocation() { mockConsumer.rebalance(INITIAL_ASSIGNMENT); assertSinkMetricValue("partition-count", 2); // Revoked "TOPIC_PARTITION" and second re-balance with "TOPIC_PARTITION2" - mockConsumer.rebalance(Set.of(TOPIC_PARTITION2)); + mockConsumer.rebalance(Collections.singleton(TOPIC_PARTITION2)); assertSinkMetricValue("partition-count", 1); // Closing the Worker Sink Task which will update the partition count as 0. workerTask.close(); @@ -1901,12 +1882,12 @@ public void testPartitionCountInCaseOfPartitionRevocation() { } private void expectRebalanceRevocationError(RuntimeException e) { - when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); + when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); doThrow(e).when(sinkTask).close(INITIAL_ASSIGNMENT); } private void expectRebalanceAssignmentError(RuntimeException e) { - when(sinkTask.preCommit(anyMap())).thenReturn(Map.of()); + when(sinkTask.preCommit(anyMap())).thenReturn(Collections.emptyMap()); when(consumer.position(TOPIC_PARTITION)).thenReturn(FIRST_OFFSET); when(consumer.position(TOPIC_PARTITION2)).thenReturn(FIRST_OFFSET); @@ -1914,7 +1895,7 @@ private void expectRebalanceAssignmentError(RuntimeException e) { } private void verifyInitializeTask() { - verify(consumer).subscribe(eq(List.of(TOPIC)), rebalanceListener.capture()); + verify(consumer).subscribe(eq(Collections.singletonList(TOPIC)), rebalanceListener.capture()); verify(sinkTask).initialize(sinkTaskContext.capture()); verify(sinkTask).start(TASK_PROPS); } @@ -1934,7 +1915,7 @@ private OngoingStubbing> expectPollInitialAssign private void verifyPollInitialAssignment() { verify(sinkTask).open(INITIAL_ASSIGNMENT); verify(consumer, atLeastOnce()).assignment(); - verify(sinkTask).put(List.of()); + verify(sinkTask).put(Collections.emptyList()); } private Answer> expectConsumerPoll(final int numMessages) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java index 729b5f0436c2b..2ed01a747a726 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSinkTaskThreadedTest.java @@ -24,7 +24,6 @@ import org.apache.kafka.clients.consumer.OffsetCommitCallback; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; @@ -35,7 +34,6 @@ import org.apache.kafka.connect.runtime.errors.ProcessingContext; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest; import org.apache.kafka.connect.runtime.isolation.PluginClassLoader; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.sink.SinkConnector; import org.apache.kafka.connect.sink.SinkRecord; @@ -61,14 +59,18 @@ import java.io.IOException; import java.time.Duration; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; import java.util.function.Function; +import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -107,8 +109,8 @@ public class WorkerSinkTaskThreadedTest { private static final TopicPartition TOPIC_PARTITION2 = new TopicPartition(TOPIC, PARTITION2); private static final TopicPartition TOPIC_PARTITION3 = new TopicPartition(TOPIC, PARTITION3); private static final TopicPartition UNASSIGNED_TOPIC_PARTITION = new TopicPartition(TOPIC, 200); - private static final Set INITIAL_ASSIGNMENT = Set.of( - TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); + private static final Set INITIAL_ASSIGNMENT = new HashSet<>(Arrays.asList( + TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)); private static final Map TASK_PROPS = new HashMap<>(); private static final long TIMESTAMP = 42L; @@ -171,16 +173,12 @@ public void setup() { workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); - workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); WorkerConfig workerConfig = new StandaloneConfig(workerProps); - Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); - Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); - Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); workerTask = new WorkerSinkTask( - taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverterPlugin, - valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, transformationChain, + taskId, sinkTask, statusListener, initialState, workerConfig, ClusterConfigState.EMPTY, metrics, keyConverter, + valueConverter, errorHandlingMetrics, headerConverter, transformationChain, consumer, pluginLoader, time, RetryWithToleranceOperatorTest.noneOperator(), null, statusBackingStore, - List::of, null, TestPlugins.noOpLoaderSwap()); + Collections::emptyList); recordsReturned = 0; } @@ -436,7 +434,7 @@ public void testAssignmentPauseResume() { doAnswer(invocation -> { return null; // initial assignment }).doAnswer(invocation -> { - assertEquals(Set.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3), sinkTaskContext.getValue().assignment()); + assertEquals(new HashSet<>(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3)), sinkTaskContext.getValue().assignment()); return null; }).doAnswer(invocation -> { try { @@ -458,11 +456,11 @@ public void testAssignmentPauseResume() { return null; }).when(sinkTask).put(any(Collection.class)); - doThrow(new IllegalStateException("unassigned topic partition")).when(consumer).pause(List.of(UNASSIGNED_TOPIC_PARTITION)); - doAnswer(invocation -> null).when(consumer).pause(List.of(TOPIC_PARTITION, TOPIC_PARTITION2)); + doThrow(new IllegalStateException("unassigned topic partition")).when(consumer).pause(singletonList(UNASSIGNED_TOPIC_PARTITION)); + doAnswer(invocation -> null).when(consumer).pause(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); - doThrow(new IllegalStateException("unassigned topic partition")).when(consumer).resume(List.of(UNASSIGNED_TOPIC_PARTITION)); - doAnswer(invocation -> null).when(consumer).resume(List.of(TOPIC_PARTITION, TOPIC_PARTITION2)); + doThrow(new IllegalStateException("unassigned topic partition")).when(consumer).resume(singletonList(UNASSIGNED_TOPIC_PARTITION)); + doAnswer(invocation -> null).when(consumer).resume(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); workerTask.initialize(TASK_CONFIG); workerTask.initializeAndStart(); @@ -479,8 +477,8 @@ public void testAssignmentPauseResume() { verifyStopTask(); verifyTaskGetTopic(3); - verify(consumer, atLeastOnce()).pause(List.of(TOPIC_PARTITION, TOPIC_PARTITION2)); - verify(consumer, atLeastOnce()).resume(List.of(TOPIC_PARTITION, TOPIC_PARTITION2)); + verify(consumer, atLeastOnce()).pause(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); + verify(consumer, atLeastOnce()).resume(Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2)); } @Test @@ -555,7 +553,7 @@ public void testRewindOnRebalanceDuringPoll() { } private void verifyInitializeTask() { - verify(consumer).subscribe(eq(List.of(TOPIC)), rebalanceListener.capture()); + verify(consumer).subscribe(eq(singletonList(TOPIC)), rebalanceListener.capture()); verify(sinkTask).initialize(sinkTaskContext.capture()); verify(sinkTask).start(TASK_PROPS); } @@ -568,7 +566,7 @@ private void expectInitialAssignment() { private void verifyInitialAssignment() { verify(sinkTask).open(INITIAL_ASSIGNMENT); - verify(sinkTask).put(List.of()); + verify(sinkTask).put(Collections.emptyList()); } private void verifyStopTask() { @@ -612,7 +610,7 @@ private void expectPolls(final long pollDelayMs) { @SuppressWarnings("SameParameterValue") private void expectRebalanceDuringPoll(long startOffset) { - final List partitions = List.of(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); + final List partitions = Arrays.asList(TOPIC_PARTITION, TOPIC_PARTITION2, TOPIC_PARTITION3); final Map offsets = new HashMap<>(); offsets.put(TOPIC_PARTITION, startOffset); @@ -649,7 +647,7 @@ private void expectPreCommit(ExpectOffsetCommitCommand... commands) { @Override public Object answer(InvocationOnMock invocation) { ExpectOffsetCommitCommand commitCommand = commands[index++]; - // All assigned partitions will have offsets committed, but we've only processed messages/updated + // All assigned partitions will have offsets committed, but we've only processed messages/updated // offsets for one final Map offsetsToCommit = offsetsToCommitFn.apply(commitCommand.expectedMessages); @@ -662,7 +660,7 @@ public Object answer(InvocationOnMock invocation) { } }).when(sinkTask).preCommit(anyMap()); } - + private void expectOffsetCommit(ExpectOffsetCommitCommand... commands) { doAnswer(new Answer<>() { int index = 0; @@ -720,8 +718,19 @@ private RecordHeaders emptyHeaders() { private abstract static class TestSinkTask extends SinkTask { } - private record ExpectOffsetCommitCommand(long expectedMessages, RuntimeException error, - Exception consumerCommitError, long consumerCommitDelayMs, - boolean invokeCallback) { + private static class ExpectOffsetCommitCommand { + final long expectedMessages; + final RuntimeException error; + final Exception consumerCommitError; + final long consumerCommitDelayMs; + final boolean invokeCallback; + + private ExpectOffsetCommitCommand(long expectedMessages, RuntimeException error, Exception consumerCommitError, long consumerCommitDelayMs, boolean invokeCallback) { + this.expectedMessages = expectedMessages; + this.error = error; + this.consumerCommitError = consumerCommitError; + this.consumerCommitDelayMs = consumerCommitDelayMs; + this.invokeCallback = invokeCallback; + } } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java index 4fca3f8ac6153..a04b3bc7caa56 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerSourceTaskTest.java @@ -27,18 +27,16 @@ import org.apache.kafka.common.errors.TopicAuthorizationException; import org.apache.kafka.common.header.Headers; import org.apache.kafka.common.header.internals.RecordHeaders; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.utils.LogCaptureAppender; import org.apache.kafka.common.utils.Time; import org.apache.kafka.connect.data.Schema; import org.apache.kafka.connect.errors.ConnectException; -import org.apache.kafka.connect.integration.TestableSourceConnector; +import org.apache.kafka.connect.integration.MonitorableSourceConnector; import org.apache.kafka.connect.runtime.ConnectMetrics.MetricGroup; import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperatorTest; import org.apache.kafka.connect.runtime.isolation.Plugins; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.source.SourceTask; @@ -56,6 +54,7 @@ import org.apache.kafka.connect.util.TopicAdmin; import org.apache.kafka.connect.util.TopicCreationGroup; +import org.apache.logging.log4j.Level; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; @@ -71,6 +70,8 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.time.Duration; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedList; import java.util.List; @@ -85,7 +86,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.function.Supplier; -import static org.apache.kafka.connect.integration.TestableSourceConnector.TOPIC_CONFIG; +import static org.apache.kafka.connect.integration.MonitorableSourceConnector.TOPIC_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.TASKS_MAX_CONFIG; @@ -126,8 +127,8 @@ public class WorkerSourceTaskTest { public static final String POLL_TIMEOUT_MSG = "Timeout waiting for poll"; private static final String TOPIC = "topic"; - private static final Map PARTITION = Map.of("key", "partition".getBytes()); - private static final Map OFFSET = Map.of("key", 12); + private static final Map PARTITION = Collections.singletonMap("key", "partition".getBytes()); + private static final Map OFFSET = Collections.singletonMap("key", 12); // Connect-format data private static final Schema KEY_SCHEMA = Schema.INT32_SCHEMA; @@ -183,7 +184,7 @@ public class WorkerSourceTaskTest { private static final TaskConfig TASK_CONFIG = new TaskConfig(TASK_PROPS); - private static final List RECORDS = List.of( + private static final List RECORDS = Collections.singletonList( new SourceRecord(PARTITION, OFFSET, "topic", null, KEY_SCHEMA, KEY, RECORD_SCHEMA, RECORD) ); @@ -202,7 +203,6 @@ private Map workerProps(boolean enableTopicCreation) { props.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); props.put("offset.storage.file.filename", "/tmp/connect.offsets"); props.put(TOPIC_CREATION_ENABLE_CONFIG, String.valueOf(enableTopicCreation)); - props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); return props; } @@ -210,7 +210,7 @@ private Map sourceConnectorPropsWithGroups(String topic) { // setup up props for the source connector Map props = new HashMap<>(); props.put("name", "foo-connector"); - props.put(CONNECTOR_CLASS_CONFIG, TestableSourceConnector.class.getSimpleName()); + props.put(CONNECTOR_CLASS_CONFIG, MonitorableSourceConnector.class.getSimpleName()); props.put(TASKS_MAX_CONFIG, String.valueOf(1)); props.put(TOPIC_CONFIG, topic); props.put(KEY_CONVERTER_CLASS_CONFIG, StringConverter.class.getName()); @@ -248,13 +248,10 @@ private void createWorkerTask(TargetState initialState, RetryWithToleranceOperat private void createWorkerTask(TargetState initialState, Converter keyConverter, Converter valueConverter, HeaderConverter headerConverter, RetryWithToleranceOperator retryWithToleranceOperator) { - Plugin keyConverterPlugin = metrics.wrap(keyConverter, taskId, true); - Plugin valueConverterPlugin = metrics.wrap(valueConverter, taskId, false); - Plugin headerConverterPlugin = metrics.wrap(headerConverter, taskId); - workerTask = new WorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverterPlugin, valueConverterPlugin, errorHandlingMetrics, headerConverterPlugin, + workerTask = new WorkerSourceTask(taskId, sourceTask, statusListener, initialState, keyConverter, valueConverter, errorHandlingMetrics, headerConverter, transformationChain, producer, admin, TopicCreationGroup.configuredGroups(sourceConfig), offsetReader, offsetWriter, offsetStore, config, clusterConfigState, metrics, plugins.delegatingLoader(), Time.SYSTEM, - retryWithToleranceOperator, statusBackingStore, Runnable::run, List::of, null, TestPlugins.noOpLoaderSwap()); + retryWithToleranceOperator, statusBackingStore, Runnable::run, Collections::emptyList); } @ParameterizedTest @@ -503,7 +500,7 @@ public void testCommit(boolean enableTopicCreation) throws Exception { final CountDownLatch pollLatch = expectPolls(1); expectTopicCreation(TOPIC); - expectBeginFlush(List.of(true, false).iterator()::next); + expectBeginFlush(Arrays.asList(true, false).iterator()::next); expectOffsetFlush(true, true); workerTask.initialize(TASK_CONFIG); @@ -590,9 +587,9 @@ public void testSendRecordsRetries(boolean enableTopicCreation) { .thenAnswer(producerSendAnswer(true)); // Try to send 3, make first pass, second fail. Should save last two - workerTask.toSend = List.of(record1, record2, record3); + workerTask.toSend = Arrays.asList(record1, record2, record3); workerTask.sendRecords(); - assertEquals(List.of(record2, record3), workerTask.toSend); + assertEquals(Arrays.asList(record2, record3), workerTask.toSend); // Next they all succeed workerTask.sendRecords(); @@ -612,7 +609,7 @@ public void testSendRecordsProducerCallbackFail(boolean enableTopicCreation) { expectSendRecordProducerCallbackFail(); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); assertThrows(ConnectException.class, () -> workerTask.sendRecords()); verify(transformationChain, times(2)).apply(any(), any(SourceRecord.class)); @@ -635,7 +632,7 @@ public void testSendRecordsProducerSendFailsImmediately(boolean enableTopicCreat when(producer.send(any(ProducerRecord.class), any(Callback.class))) .thenThrow(new KafkaException("Producer closed while send in progress", new InvalidTopicException(TOPIC))); - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); assertThrows(ConnectException.class, () -> workerTask.sendRecords()); } @@ -659,7 +656,7 @@ public void testSendRecordsTaskCommitRecordFail(boolean enableTopicCreation) thr .doNothing() .when(sourceTask).commitRecord(any(SourceRecord.class), any(RecordMetadata.class)); - workerTask.toSend = List.of(record1, record2, record3); + workerTask.toSend = Arrays.asList(record1, record2, record3); workerTask.sendRecords(); assertNull(workerTask.toSend); } @@ -672,7 +669,7 @@ public void testSourceTaskIgnoresProducerException(boolean enableTopicCreation) expectTopicCreation(TOPIC); //Use different offsets for each record, so we can verify all were committed - final Map offset2 = Map.of("key", 13); + final Map offset2 = Collections.singletonMap("key", 13); // send two records // record 1 will succeed @@ -689,7 +686,7 @@ public void testSourceTaskIgnoresProducerException(boolean enableTopicCreation) .thenAnswer(producerSendAnswer(false)); //Send records and then commit offsets and verify both were committed and no exception - workerTask.toSend = List.of(record1, record2); + workerTask.toSend = Arrays.asList(record1, record2); workerTask.sendRecords(); workerTask.updateCommittableOffsets(); workerTask.commitOffsets(); @@ -754,8 +751,8 @@ public void testCancel(boolean enableTopicCreation) { } private TopicAdmin.TopicCreationResponse createdTopic(String topic) { - Set created = Set.of(topic); - Set existing = Set.of(); + Set created = Collections.singleton(topic); + Set existing = Collections.emptySet(); return new TopicAdmin.TopicCreationResponse(created, existing); } @@ -773,7 +770,7 @@ private CountDownLatch expectEmptyPolls(final AtomicInteger count) throws Interr count.incrementAndGet(); latch.countDown(); Thread.sleep(10); - return List.of(); + return Collections.emptyList(); }); return latch; } @@ -895,7 +892,7 @@ private void expectOffsetFlush() throws Exception { private void expectOffsetFlush(Boolean... succeedList) throws Exception { Future flushFuture = mock(Future.class); when(offsetWriter.doFlush(any(org.apache.kafka.connect.util.Callback.class))).thenReturn(flushFuture); - LinkedList succeedQueue = new LinkedList<>(List.of(succeedList)); + LinkedList succeedQueue = new LinkedList<>(Arrays.asList(succeedList)); doAnswer(invocationOnMock -> { boolean succeed = succeedQueue.pop(); @@ -992,7 +989,7 @@ private void verifyClose() { private void expectTopicCreation(String topic) { if (config.topicCreationEnable()) { - when(admin.describeTopics(topic)).thenReturn(Map.of()); + when(admin.describeTopics(topic)).thenReturn(Collections.emptyMap()); when(admin.createOrFindTopics(any(NewTopic.class))).thenReturn(createdTopic(topic)); } } @@ -1014,10 +1011,10 @@ private void assertShouldSkipCommit() { try (LogCaptureAppender committerAppender = LogCaptureAppender.createAndRegister(SourceTaskOffsetCommitter.class); LogCaptureAppender taskAppender = LogCaptureAppender.createAndRegister(WorkerSourceTask.class)) { - committerAppender.setClassLogger(SourceTaskOffsetCommitter.class, org.apache.logging.log4j.Level.TRACE); - taskAppender.setClassLogger(WorkerSourceTask.class, org.apache.logging.log4j.Level.TRACE); + committerAppender.setClassLogger(SourceTaskOffsetCommitter.class, Level.TRACE); + taskAppender.setClassLogger(WorkerSourceTask.class, Level.TRACE); SourceTaskOffsetCommitter.commit(workerTask); - assertEquals(List.of(), taskAppender.getMessages()); + assertEquals(Collections.emptyList(), taskAppender.getMessages()); List committerMessages = committerAppender.getMessages(); assertEquals(1, committerMessages.size()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTaskTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTaskTest.java index fa445454fd088..c8c8cc49d0565 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTaskTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTaskTest.java @@ -24,7 +24,6 @@ import org.apache.kafka.connect.runtime.errors.ErrorHandlingMetrics; import org.apache.kafka.connect.runtime.errors.ErrorReporter; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.sink.SinkTask; import org.apache.kafka.connect.source.SourceRecord; import org.apache.kafka.connect.storage.StatusBackingStore; @@ -300,7 +299,7 @@ public TestWorkerTask(ConnectorTaskId id, Listener statusListener, TargetState i Supplier>> errorReporterSupplier, Time time, StatusBackingStore statusBackingStore) { super(id, statusListener, initialState, loader, connectMetrics, errorHandlingMetrics, - retryWithToleranceOperator, transformationChain, errorReporterSupplier, time, statusBackingStore, null, TestPlugins.noOpLoaderSwap()); + retryWithToleranceOperator, transformationChain, errorReporterSupplier, time, statusBackingStore); } @Override @@ -318,11 +317,6 @@ protected void execute() { @Override protected void close() { } - - @Override - protected String taskVersion() { - return null; - } } protected void assertFailedMetric(TaskMetricsGroup metricsGroup) { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java index e29eeebe88d60..65262983d9f8b 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTest.java @@ -63,7 +63,6 @@ import org.apache.kafka.connect.runtime.isolation.PluginClassLoader; import org.apache.kafka.connect.runtime.isolation.Plugins; import org.apache.kafka.connect.runtime.isolation.Plugins.ClassLoaderUsage; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.runtime.rest.RestServer; import org.apache.kafka.connect.runtime.rest.entities.ConnectorOffsets; import org.apache.kafka.connect.runtime.rest.entities.ConnectorStateInfo; @@ -90,7 +89,6 @@ import org.apache.kafka.connect.util.SinkUtils; import org.apache.kafka.connect.util.TopicAdmin; -import org.apache.maven.artifact.versioning.VersionRange; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; @@ -105,6 +103,7 @@ import org.mockito.quality.Strictness; import java.lang.management.ManagementFactory; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -249,7 +248,6 @@ public void setup(boolean enableTopicCreation) { .strictness(Strictness.STRICT_STUBS) .startMocking(); - workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -324,13 +322,13 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa // Create mockKafkaClusterId(); - mockVersionedConnectorIsolation(connectorClass, null, sourceConnector); + mockConnectorIsolation(connectorClass, sourceConnector); mockExecutorRealSubmit(WorkerConnector.class); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, noneConnectorClientConfigOverridePolicy); worker.start(); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); FutureCallback onFirstStart = new FutureCallback<>(); @@ -338,7 +336,7 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa // Wait for the connector to actually start assertEquals(TargetState.STARTED, onFirstStart.get(1000, TimeUnit.MILLISECONDS)); - assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); + assertEquals(Collections.singleton(CONNECTOR_ID), worker.connectorNames()); FutureCallback onSecondStart = new FutureCallback<>(); @@ -352,7 +350,7 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 0, 0, 0); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); @@ -360,7 +358,7 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa verifyKafkaClusterId(); - verifyVersionedConnectorIsolation(connectorClass, null, sourceConnector); + verifyConnectorIsolation(sourceConnector); verifyExecutorSubmit(); verify(sourceConnector).initialize(any(ConnectorContext.class)); verify(sourceConnector).start(connectorProps); @@ -373,7 +371,7 @@ public void testStartAndStopConnector(boolean enableTopicCreation) throws Throwa private void mockFileConfigProvider() { MockFileConfigProvider mockFileConfigProvider = new MockFileConfigProvider(); - mockFileConfigProvider.configure(Map.of("testId", mockFileProviderTestId)); + mockFileConfigProvider.configure(Collections.singletonMap("testId", mockFileProviderTestId)); when(plugins.newConfigProvider(any(AbstractConfig.class), eq("config.providers.file"), any(ClassLoaderUsage.class))) @@ -391,8 +389,7 @@ public void testStartConnectorFailure(boolean enableTopicCreation) throws Except mockKafkaClusterId(); mockGenericIsolation(); - when(plugins.pluginLoader(nonConnectorClass, null)).thenReturn(pluginLoader); - when(plugins.newConnector(nonConnectorClass, null)).thenThrow(exception); + when(plugins.newConnector(anyString())).thenThrow(exception); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, noneConnectorClientConfigOverridePolicy); worker.herder = herder; @@ -409,7 +406,7 @@ public void testStartConnectorFailure(boolean enableTopicCreation) throws Except } assertStartupStatistics(worker, 1, 1, 0, 0); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 1, 0, 0); @@ -417,7 +414,7 @@ public void testStartConnectorFailure(boolean enableTopicCreation) throws Except assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 1, 0, 0); - verify(plugins).newConnector(nonConnectorClass, null); + verify(plugins).newConnector(anyString()); verifyKafkaClusterId(); verifyGenericIsolation(); verify(connectorStatusListener).onFailure(eq(CONNECTOR_ID), any(ConnectException.class)); @@ -429,7 +426,7 @@ public void testAddConnectorByAlias(boolean enableTopicCreation) throws Throwabl setup(enableTopicCreation); final String connectorAlias = "SampleSourceConnector"; mockKafkaClusterId(); - mockVersionedConnectorIsolation(connectorAlias, null, sinkConnector); + mockConnectorIsolation(connectorAlias, sinkConnector); mockExecutorRealSubmit(WorkerConnector.class); connectorProps.put(CONNECTOR_CLASS_CONFIG, connectorAlias); @@ -440,26 +437,26 @@ public void testAddConnectorByAlias(boolean enableTopicCreation) throws Throwabl worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); FutureCallback onStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onStart); // Wait for the connector to actually start assertEquals(TargetState.STARTED, onStart.get(1000, TimeUnit.MILLISECONDS)); - assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); + assertEquals(Collections.singleton(CONNECTOR_ID), worker.connectorNames()); assertStatistics(worker, 1, 0); assertStartupStatistics(worker, 1, 0, 0, 0); worker.stopAndAwaitConnector(CONNECTOR_ID); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 0, 0, 0); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 0, 0, 0); verifyKafkaClusterId(); - verifyVersionedConnectorIsolation(connectorAlias, null, sinkConnector); + verifyConnectorIsolation(sinkConnector); verifyExecutorSubmit(); verify(sinkConnector).initialize(any(ConnectorContext.class)); verify(sinkConnector).start(connectorProps); @@ -475,7 +472,7 @@ public void testAddConnectorByShortAlias(boolean enableTopicCreation) throws Thr final String shortConnectorAlias = "WorkerTest"; mockKafkaClusterId(); - mockVersionedConnectorIsolation(shortConnectorAlias, null, sinkConnector); + mockConnectorIsolation(shortConnectorAlias, sinkConnector); mockExecutorRealSubmit(WorkerConnector.class); connectorProps.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, shortConnectorAlias); @@ -486,23 +483,23 @@ public void testAddConnectorByShortAlias(boolean enableTopicCreation) throws Thr worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); FutureCallback onStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onStart); // Wait for the connector to actually start assertEquals(TargetState.STARTED, onStart.get(1000, TimeUnit.MILLISECONDS)); - assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); + assertEquals(Collections.singleton(CONNECTOR_ID), worker.connectorNames()); assertStatistics(worker, 1, 0); worker.stopAndAwaitConnector(CONNECTOR_ID); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); verifyKafkaClusterId(); - verifyVersionedConnectorIsolation(shortConnectorAlias, null, sinkConnector); + verifyConnectorIsolation(sinkConnector); verify(sinkConnector).initialize(any(ConnectorContext.class)); verify(sinkConnector).start(connectorProps); verify(connectorStatusListener).onStartup(CONNECTOR_ID); @@ -534,11 +531,11 @@ public void testReconfigureConnectorTasks(boolean enableTopicCreation) throws Th final String connectorClass = SampleSourceConnector.class.getName(); mockKafkaClusterId(); - mockVersionedConnectorIsolation(connectorClass, null, sinkConnector); + mockConnectorIsolation(connectorClass, sinkConnector); mockExecutorRealSubmit(WorkerConnector.class); - Map taskProps = Map.of("foo", "bar"); - when(sinkConnector.taskConfigs(2)).thenReturn(List.of(taskProps, taskProps)); + Map taskProps = Collections.singletonMap("foo", "bar"); + when(sinkConnector.taskConfigs(2)).thenReturn(Arrays.asList(taskProps, taskProps)); // Use doReturn().when() syntax due to when().thenReturn() not being able to return wildcard generic types doReturn(TestSourceTask.class).when(sinkConnector).taskClass(); @@ -552,13 +549,13 @@ public void testReconfigureConnectorTasks(boolean enableTopicCreation) throws Th worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); FutureCallback onFirstStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onFirstStart); // Wait for the connector to actually start assertEquals(TargetState.STARTED, onFirstStart.get(1000, TimeUnit.MILLISECONDS)); assertStatistics(worker, 1, 0); - assertEquals(Set.of(CONNECTOR_ID), worker.connectorNames()); + assertEquals(Collections.singleton(CONNECTOR_ID), worker.connectorNames()); FutureCallback onSecondStart = new FutureCallback<>(); worker.startConnector(CONNECTOR_ID, connectorProps, ctx, connectorStatusListener, TargetState.STARTED, onSecondStart); @@ -581,13 +578,13 @@ public void testReconfigureConnectorTasks(boolean enableTopicCreation) throws Th worker.stopAndAwaitConnector(CONNECTOR_ID); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 1, 0, 0, 0); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); verifyKafkaClusterId(); - verifyVersionedConnectorIsolation(connectorClass, null, sinkConnector); + verifyConnectorIsolation(sinkConnector); verifyExecutorSubmit(); verify(sinkConnector).initialize(any(ConnectorContext.class)); verify(sinkConnector).start(connectorProps); @@ -604,13 +601,13 @@ public void testReconfigureConnectorTasks(boolean enableTopicCreation) throws Th public void testAddRemoveSourceTask(boolean enableTopicCreation) { setup(enableTopicCreation); mockKafkaClusterId(); - mockVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, sourceConnector, task); - mockVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG, taskKeyConverter); - mockVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG, taskValueConverter); - mockVersionedTaskHeaderConverterFromConnector(taskHeaderConverter); + mockTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, task); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, taskKeyConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, taskValueConverter); + mockTaskHeaderConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); - Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, noneConnectorClientConfigOverridePolicy, null); @@ -618,37 +615,37 @@ public void testAddRemoveSourceTask(boolean enableTopicCreation) { worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); Map connectorConfigs = anyConnectorConfigMap(); ClusterConfigState configState = new ClusterConfigState( 0, null, - Map.of(CONNECTOR_ID, 1), - Map.of(CONNECTOR_ID, connectorConfigs), - Map.of(CONNECTOR_ID, TargetState.STARTED), - Map.of(TASK_ID, origProps), - Map.of(), - Map.of(), - Map.of(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), - Set.of(), - Set.of() + Collections.singletonMap(CONNECTOR_ID, 1), + Collections.singletonMap(CONNECTOR_ID, connectorConfigs), + Collections.singletonMap(CONNECTOR_ID, TargetState.STARTED), + Collections.singletonMap(TASK_ID, origProps), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), + Collections.emptySet(), + Collections.emptySet() ); assertTrue(worker.startSourceTask(TASK_ID, configState, connectorConfigs, origProps, taskStatusListener, TargetState.STARTED)); assertStatistics(worker, 0, 1); - assertEquals(Set.of(TASK_ID), worker.taskIds()); + assertEquals(Collections.singleton(TASK_ID), worker.taskIds()); worker.stopAndAwaitTask(TASK_ID); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); verifyKafkaClusterId(); - verifyVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, task); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskHeaderConverterFromConnector(); + verifyTaskIsolation(task); + verifyTaskConverter(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG); + verifyTaskConverter(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG); + verifyTaskHeaderConverter(); verifyExecutorSubmit(); } @@ -660,13 +657,13 @@ public void testAddRemoveSinkTask(boolean enableTopicCreation) { // Most of the other cases use source tasks; we make sure to get code coverage for sink tasks here as well SinkTask task = mock(TestSinkTask.class); mockKafkaClusterId(); - mockVersionedTaskIsolation(SampleSinkConnector.class, TestSinkTask.class, null, sinkConnector, task); - mockVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG, taskKeyConverter); - mockVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG, taskValueConverter); - mockVersionedTaskHeaderConverterFromConnector(taskHeaderConverter); + mockTaskIsolation(SampleSinkConnector.class, TestSinkTask.class, task); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, taskKeyConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, taskValueConverter); + mockTaskHeaderConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); - Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSinkTask.class.getName()); + Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSinkTask.class.getName()); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, noneConnectorClientConfigOverridePolicy, null); @@ -674,7 +671,7 @@ public void testAddRemoveSinkTask(boolean enableTopicCreation) { worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); Map connectorConfigs = anyConnectorConfigMap(); connectorConfigs.put(TOPICS_CONFIG, "t1"); connectorConfigs.put(CONNECTOR_CLASS_CONFIG, SampleSinkConnector.class.getName()); @@ -682,31 +679,31 @@ public void testAddRemoveSinkTask(boolean enableTopicCreation) { ClusterConfigState configState = new ClusterConfigState( 0, null, - Map.of(CONNECTOR_ID, 1), - Map.of(CONNECTOR_ID, connectorConfigs), - Map.of(CONNECTOR_ID, TargetState.STARTED), - Map.of(TASK_ID, origProps), - Map.of(), - Map.of(), - Map.of(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), - Set.of(), - Set.of() + Collections.singletonMap(CONNECTOR_ID, 1), + Collections.singletonMap(CONNECTOR_ID, connectorConfigs), + Collections.singletonMap(CONNECTOR_ID, TargetState.STARTED), + Collections.singletonMap(TASK_ID, origProps), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), + Collections.emptySet(), + Collections.emptySet() ); assertTrue(worker.startSinkTask(TASK_ID, configState, connectorConfigs, origProps, taskStatusListener, TargetState.STARTED)); assertStatistics(worker, 0, 1); - assertEquals(Set.of(TASK_ID), worker.taskIds()); + assertEquals(Collections.singleton(TASK_ID), worker.taskIds()); worker.stopAndAwaitTask(TASK_ID); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); verifyKafkaClusterId(); - verifyVersionedTaskIsolation(SampleSinkConnector.class, TestSinkTask.class, null, task); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskHeaderConverterFromConnector(); + verifyTaskIsolation(task); + verifyTaskConverter(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG); + verifyTaskConverter(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG); + verifyTaskHeaderConverter(); verifyExecutorSubmit(); } @@ -732,16 +729,16 @@ public void testAddRemoveExactlyOnceSourceTask(boolean enableTopicCreation) { config = new DistributedConfig(workerProps); mockKafkaClusterId(); - mockVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, sourceConnector, task); - mockVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG, taskKeyConverter); - mockVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG, taskValueConverter); - mockVersionedTaskHeaderConverterFromConnector(taskHeaderConverter); + mockTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, task); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, taskKeyConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, taskValueConverter); + mockTaskHeaderConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); Runnable preProducer = mock(Runnable.class); Runnable postProducer = mock(Runnable.class); - Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, noneConnectorClientConfigOverridePolicy, null); @@ -749,38 +746,38 @@ public void testAddRemoveExactlyOnceSourceTask(boolean enableTopicCreation) { worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); Map connectorConfigs = anyConnectorConfigMap(); ClusterConfigState configState = new ClusterConfigState( 0, null, - Map.of(CONNECTOR_ID, 1), - Map.of(CONNECTOR_ID, connectorConfigs), - Map.of(CONNECTOR_ID, TargetState.STARTED), - Map.of(TASK_ID, origProps), - Map.of(), - Map.of(), - Map.of(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), - Set.of(), - Set.of() + Collections.singletonMap(CONNECTOR_ID, 1), + Collections.singletonMap(CONNECTOR_ID, connectorConfigs), + Collections.singletonMap(CONNECTOR_ID, TargetState.STARTED), + Collections.singletonMap(TASK_ID, origProps), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_ID, new AppliedConnectorConfig(connectorConfigs)), + Collections.emptySet(), + Collections.emptySet() ); assertTrue(worker.startExactlyOnceSourceTask(TASK_ID, configState, connectorConfigs, origProps, taskStatusListener, TargetState.STARTED, preProducer, postProducer)); assertStatistics(worker, 0, 1); - assertEquals(Set.of(TASK_ID), worker.taskIds()); + assertEquals(Collections.singleton(TASK_ID), worker.taskIds()); worker.stopAndAwaitTask(TASK_ID); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); verifyKafkaClusterId(); - verifyVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, task); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskHeaderConverterFromConnector(); + verifyTaskIsolation(task); + verifyTaskConverter(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG); + verifyTaskConverter(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG); + verifyTaskHeaderConverter(); verifyExecutorSubmit(); } @@ -792,25 +789,26 @@ public void testTaskStatusMetricsStatuses(boolean enableTopicCreation) { mockStorage(); mockFileConfigProvider(); - Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); TaskConfig taskConfig = new TaskConfig(origProps); mockKafkaClusterId(); - mockVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, sourceConnector, task); - mockVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG, taskKeyConverter); - mockVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG, taskValueConverter); - mockVersionedTaskHeaderConverterFromConnector(taskHeaderConverter); + mockTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, task); + // Expect that the worker will create converters and will find them using the current classloader ... + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, taskKeyConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, taskValueConverter); + mockTaskHeaderConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); // Each time we check the task metrics, the worker will call the herder when(herder.taskStatus(TASK_ID)).thenReturn( - new ConnectorStateInfo.TaskState(0, "RUNNING", "worker", "msg", null), - new ConnectorStateInfo.TaskState(0, "PAUSED", "worker", "msg", null), - new ConnectorStateInfo.TaskState(0, "FAILED", "worker", "msg", null), - new ConnectorStateInfo.TaskState(0, "DESTROYED", "worker", "msg", null), - new ConnectorStateInfo.TaskState(0, "UNASSIGNED", "worker", "msg", null) + new ConnectorStateInfo.TaskState(0, "RUNNING", "worker", "msg"), + new ConnectorStateInfo.TaskState(0, "PAUSED", "worker", "msg"), + new ConnectorStateInfo.TaskState(0, "FAILED", "worker", "msg"), + new ConnectorStateInfo.TaskState(0, "DESTROYED", "worker", "msg"), + new ConnectorStateInfo.TaskState(0, "UNASSIGNED", "worker", "msg") ); worker = new Worker(WORKER_ID, @@ -827,7 +825,7 @@ public void testTaskStatusMetricsStatuses(boolean enableTopicCreation) { worker.start(); assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 0, 0, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); worker.startSourceTask( TASK_ID, ClusterConfigState.EMPTY, @@ -853,7 +851,7 @@ public void testTaskStatusMetricsStatuses(boolean enableTopicCreation) { verify(instantiatedTask).initialize(taskConfig); verify(herder, times(5)).taskStatus(TASK_ID); verifyKafkaClusterId(); - verifyVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, task); + verifyTaskIsolation(task); verifyExecutorSubmit(); verify(instantiatedTask, atLeastOnce()).id(); verify(instantiatedTask).awaitStop(anyLong()); @@ -862,9 +860,9 @@ public void testTaskStatusMetricsStatuses(boolean enableTopicCreation) { // Called when we stop the worker verify(instantiatedTask).loader(); verify(instantiatedTask).stop(); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskHeaderConverterFromConnector(); + verifyTaskConverter(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG); + verifyTaskConverter(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG); + verifyTaskHeaderConverter(); } @ParameterizedTest @@ -905,11 +903,10 @@ public void testStartTaskFailure(boolean enableTopicCreation) { mockInternalConverters(); mockFileConfigProvider(); - Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, "missing.From.This.Workers.Classpath"); + Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, "missing.From.This.Workers.Classpath"); mockKafkaClusterId(); mockGenericIsolation(); - when(plugins.pluginLoader(SampleSourceConnector.class.getName(), null)).thenReturn(pluginLoader); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, noneConnectorClientConfigOverridePolicy); worker.herder = herder; @@ -922,7 +919,7 @@ public void testStartTaskFailure(boolean enableTopicCreation) { assertStatistics(worker, 0, 0); assertStartupStatistics(worker, 0, 0, 1, 1); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); verify(taskStatusListener).onFailure(eq(TASK_ID), any(ConfigException.class)); verifyKafkaClusterId(); @@ -938,17 +935,17 @@ public void testCleanupTasksOnStop(boolean enableTopicCreation) { mockFileConfigProvider(); mockKafkaClusterId(); - mockVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, sourceConnector, task); + mockTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, task); // Expect that the worker will create converters and will not initially find them using the current classloader ... - mockVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG, null); - mockVersionedTaskConverterFromWorker(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION, taskKeyConverter); - mockVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG, null); - mockVersionedTaskConverterFromWorker(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION, taskValueConverter); - mockVersionedTaskHeaderConverterFromConnector(null); - mockVersionedTaskHeaderConverterFromWorker(taskHeaderConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, null); + mockTaskConverter(ClassLoaderUsage.PLUGINS, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, taskKeyConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, null); + mockTaskConverter(ClassLoaderUsage.PLUGINS, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, taskValueConverter); + mockTaskHeaderConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, null); + mockTaskHeaderConverter(ClassLoaderUsage.PLUGINS, taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); - Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); TaskConfig taskConfig = new TaskConfig(origProps); @@ -971,13 +968,7 @@ public void testCleanupTasksOnStop(boolean enableTopicCreation) { verify(constructedMockTask).awaitStop(anyLong()); verify(constructedMockTask).removeMetrics(); verifyKafkaClusterId(); - verifyVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, task); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskConverterFromWorker(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskConverterFromWorker(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION); - verifyVersionedTaskHeaderConverterFromConnector(); - verifyVersionedTaskHeaderConverterFromWorker(); + verifyTaskIsolation(task); verifyConverters(); verifyExecutorSubmit(); } @@ -990,18 +981,18 @@ public void testConverterOverrides(boolean enableTopicCreation) { mockStorage(); mockFileConfigProvider(); - Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); + Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSourceTask.class.getName()); TaskConfig taskConfig = new TaskConfig(origProps); mockKafkaClusterId(); - mockVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, sourceConnector, task); + mockTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, task); // Expect that the worker will create converters and will not initially find them using the current classloader ... - mockVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG, null); - mockVersionedTaskConverterFromWorker(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION, taskKeyConverter); - mockVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG, null); - mockVersionedTaskConverterFromWorker(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION, taskValueConverter); - mockVersionedTaskHeaderConverterFromConnector(null); - mockVersionedTaskHeaderConverterFromWorker(taskHeaderConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, null); + mockTaskConverter(ClassLoaderUsage.PLUGINS, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, taskKeyConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, null); + mockTaskConverter(ClassLoaderUsage.PLUGINS, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, taskValueConverter); + mockTaskHeaderConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, null); + mockTaskHeaderConverter(ClassLoaderUsage.PLUGINS, taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, @@ -1009,16 +1000,16 @@ public void testConverterOverrides(boolean enableTopicCreation) { worker.herder = herder; worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); Map connProps = anyConnectorConfigMap(); connProps.put(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, SampleConverterWithHeaders.class.getName()); connProps.put(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, SampleConverterWithHeaders.class.getName()); worker.startSourceTask(TASK_ID, ClusterConfigState.EMPTY, connProps, origProps, taskStatusListener, TargetState.STARTED); assertStatistics(worker, 0, 1); - assertEquals(Set.of(TASK_ID), worker.taskIds()); + assertEquals(Collections.singleton(TASK_ID), worker.taskIds()); worker.stopAndAwaitTask(TASK_ID); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); // Nothing should be left, so this should effectively be a nop worker.stop(); assertStatistics(worker, 0, 0); @@ -1033,13 +1024,7 @@ public void testConverterOverrides(boolean enableTopicCreation) { verify(instantiatedTask).removeMetrics(); verifyKafkaClusterId(); - verifyVersionedTaskIsolation(SampleSourceConnector.class, TestSourceTask.class, null, task); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskConverterFromWorker(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, WorkerConfig.KEY_CONVERTER_VERSION); - verifyVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG); - verifyVersionedTaskConverterFromWorker(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, WorkerConfig.VALUE_CONVERTER_VERSION); - verifyVersionedTaskHeaderConverterFromConnector(); - verifyVersionedTaskHeaderConverterFromWorker(); + verifyTaskIsolation(task); verifyExecutorSubmit(); verifyStorage(); } @@ -1206,7 +1191,7 @@ public void testAdminConfigsClientOverridesWithAllPolicy(boolean enableTopicCrea props.put("consumer.bootstrap.servers", "localhost:4761"); WorkerConfig configWithOverrides = new StandaloneConfig(props); - Map connConfig = Map.of("metadata.max.age.ms", "10000"); + Map connConfig = Collections.singletonMap("metadata.max.age.ms", "10000"); Map expectedConfigs = new HashMap<>(workerProps); expectedConfigs.remove(AbstractConfig.CONFIG_PROVIDERS_CONFIG); expectedConfigs.put("bootstrap.servers", "localhost:9092"); @@ -1231,7 +1216,7 @@ public void testAdminConfigsClientOverridesWithNonePolicy(boolean enableTopicCre props.put("admin.client.id", "testid"); props.put("admin.metadata.max.age.ms", "5000"); WorkerConfig configWithOverrides = new StandaloneConfig(props); - Map connConfig = Map.of("metadata.max.age.ms", "10000"); + Map connConfig = Collections.singletonMap("metadata.max.age.ms", "10000"); when(connectorConfig.originalsWithPrefix(CONNECTOR_CLIENT_ADMIN_OVERRIDES_PREFIX)).thenReturn(connConfig); @@ -1792,7 +1777,8 @@ public void testWorkerMetrics(boolean enableTopicCreation) throws Exception { List list = worker.metrics().metrics().reporters(); for (MetricsReporter reporter : list) { - if (reporter instanceof MockMetricsReporter mockMetricsReporter) { + if (reporter instanceof MockMetricsReporter) { + MockMetricsReporter mockMetricsReporter = (MockMetricsReporter) reporter; //verify connect cluster is set in MetricsContext assertEquals(CLUSTER_ID, mockMetricsReporter.getMetricsContext().contextLabels().get(WorkerConfig.CONNECT_KAFKA_CLUSTER_ID)); } @@ -1817,7 +1803,7 @@ public void testExecutorServiceShutdown(boolean enableTopicCreation) throws Inte noneConnectorClientConfigOverridePolicy, null); worker.start(); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); worker.stop(); verifyKafkaClusterId(); verify(executorService, times(1)).shutdown(); @@ -1839,7 +1825,7 @@ public void testExecutorServiceShutdownWhenTerminationFails(boolean enableTopicC noneConnectorClientConfigOverridePolicy, null); worker.start(); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); worker.stop(); verifyKafkaClusterId(); verify(executorService, times(1)).shutdown(); @@ -1862,7 +1848,7 @@ public void testExecutorServiceShutdownWhenTerminationThrowsException(boolean en noneConnectorClientConfigOverridePolicy, null); worker.start(); - assertEquals(Set.of(), worker.connectorNames()); + assertEquals(Collections.emptySet(), worker.connectorNames()); worker.stop(); // Clear the interrupted status so that the test infrastructure doesn't hit an unexpected interrupt. assertTrue(Thread.interrupted()); @@ -1875,7 +1861,7 @@ public void testExecutorServiceShutdownWhenTerminationThrowsException(boolean en @ParameterizedTest @ValueSource(booleans = {true, false}) - @SuppressWarnings({"unchecked", "rawtypes"}) + @SuppressWarnings("unchecked") public void testZombieFencing(boolean enableTopicCreation) { setup(enableTopicCreation); Admin admin = mock(Admin.class); @@ -1893,8 +1879,6 @@ public void testZombieFencing(boolean enableTopicCreation) { mockKafkaClusterId(); mockGenericIsolation(); - when(plugins.connectorClass(anyString(), any())).thenReturn((Class) sourceConnector.getClass()); - when(plugins.pluginLoader(SampleSourceConnector.class.getName(), null)).thenReturn(pluginLoader); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, allConnectorClientConfigOverridePolicy, mockAdminConstructor); @@ -1931,7 +1915,7 @@ public void testGetSinkConnectorOffsets(boolean enableTopicCreation) throws Exce worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, allConnectorClientConfigOverridePolicy, config -> admin); worker.start(); - mockAdminListConsumerGroupOffsets(admin, Map.of(new TopicPartition("test-topic", 0), new OffsetAndMetadata(10)), null); + mockAdminListConsumerGroupOffsets(admin, Collections.singletonMap(new TopicPartition("test-topic", 0), new OffsetAndMetadata(10)), null); FutureCallback cb = new FutureCallback<>(); worker.sinkConnectorOffsets(CONNECTOR_ID, sinkConnector, connectorProps, cb); @@ -2038,11 +2022,11 @@ public void testGetSourceConnectorOffsets(boolean enableTopicCreation) throws Ex worker.start(); Set> connectorPartitions = - Set.of(Map.of("partitionKey", "partitionValue")); + Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue")); - Map, Map> partitionOffsets = Map.of( - Map.of("partitionKey", "partitionValue"), - Map.of("offsetKey", "offsetValue") + Map, Map> partitionOffsets = Collections.singletonMap( + Collections.singletonMap("partitionKey", "partitionValue"), + Collections.singletonMap("offsetKey", "offsetValue") ); when(offsetStore.connectorPartitions(CONNECTOR_ID)).thenReturn(connectorPartitions); @@ -2104,15 +2088,14 @@ public void testAlterOffsetsConnectorDoesNotSupportOffsetAlteration(boolean enab worker.start(); mockGenericIsolation(); - when(plugins.newConnector(anyString(), any())).thenReturn(sourceConnector); - when(plugins.pluginLoader(SampleSourceConnector.class.getName(), null)).thenReturn(pluginLoader); + when(plugins.newConnector(anyString())).thenReturn(sourceConnector); when(plugins.withClassLoader(any(ClassLoader.class), any(Runnable.class))).thenAnswer(AdditionalAnswers.returnsSecondArg()); when(sourceConnector.alterOffsets(eq(connectorProps), anyMap())).thenThrow(new UnsupportedOperationException("This connector doesn't " + "support altering of offsets")); FutureCallback cb = new FutureCallback<>(); worker.modifyConnectorOffsets(CONNECTOR_ID, connectorProps, - Map.of(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), + Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), cb); ExecutionException e = assertThrows(ExecutionException.class, () -> cb.get(1000, TimeUnit.MILLISECONDS)); @@ -2142,8 +2125,8 @@ public void testAlterOffsetsSourceConnector(boolean enableTopicCreation) throws OffsetStorageWriter offsetWriter = mock(OffsetStorageWriter.class); Map, Map> partitionOffsets = new HashMap<>(); - partitionOffsets.put(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")); - partitionOffsets.put(Map.of("partitionKey", "partitionValue2"), Map.of("offsetKey", "offsetValue")); + partitionOffsets.put(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")); + partitionOffsets.put(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")); when(offsetWriter.doFlush(any())).thenAnswer(invocation -> { invocation.getArgument(0, Callback.class).onCompletion(null, null); @@ -2181,8 +2164,8 @@ public void testAlterOffsetsSourceConnectorError(boolean enableTopicCreation) { OffsetStorageWriter offsetWriter = mock(OffsetStorageWriter.class); Map, Map> partitionOffsets = new HashMap<>(); - partitionOffsets.put(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")); - partitionOffsets.put(Map.of("partitionKey", "partitionValue2"), Map.of("offsetKey", "offsetValue")); + partitionOffsets.put(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")); + partitionOffsets.put(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")); when(offsetWriter.doFlush(any())).thenAnswer(invocation -> { invocation.getArgument(0, Callback.class).onCompletion(new RuntimeException("Test exception"), null); @@ -2207,9 +2190,9 @@ public void testAlterOffsetsSourceConnectorError(boolean enableTopicCreation) { @ValueSource(booleans = {true, false}) public void testNormalizeSourceConnectorOffsets(boolean enableTopicCreation) { setup(enableTopicCreation); - Map, Map> offsets = Map.of( - Map.of("filename", "/path/to/filename"), - Map.of("position", 20) + Map, Map> offsets = Collections.singletonMap( + Collections.singletonMap("filename", "/path/to/filename"), + Collections.singletonMap("position", 20) ); assertInstanceOf(Integer.class, offsets.values().iterator().next().get("position")); @@ -2237,11 +2220,11 @@ public void testAlterOffsetsSinkConnectorNoDeletes(boolean enableTopicCreation) Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - partitionOffsets.put(partition1, Map.of(SinkUtils.KAFKA_OFFSET_KEY, 500)); + partitionOffsets.put(partition1, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 500)); Map partition2 = new HashMap<>(); partition2.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition2.put(SinkUtils.KAFKA_PARTITION_KEY, "20"); - partitionOffsets.put(partition2, Map.of(SinkUtils.KAFKA_OFFSET_KEY, 100)); + partitionOffsets.put(partition2, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 100)); // A null value for deleteOffsetsSetCapture indicates that we don't expect any call to Admin::deleteConsumerGroupOffsets alterOffsetsSinkConnector(partitionOffsets, alterOffsetsMapCapture, null); @@ -2290,7 +2273,7 @@ public void testAlterOffsetsSinkConnectorAltersAndDeletes(boolean enableTopicCre Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - partitionOffsets.put(partition1, Map.of(SinkUtils.KAFKA_OFFSET_KEY, "100")); + partitionOffsets.put(partition1, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, "100")); Map partition2 = new HashMap<>(); partition2.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition2.put(SinkUtils.KAFKA_PARTITION_KEY, "20"); @@ -2301,7 +2284,7 @@ public void testAlterOffsetsSinkConnectorAltersAndDeletes(boolean enableTopicCre assertEquals(1, alterOffsetsMapCapture.getValue().size()); assertEquals(100, alterOffsetsMapCapture.getValue().get(new TopicPartition("test_topic", 10)).offset()); - Set expectedTopicPartitionsForOffsetDelete = Set.of(new TopicPartition("test_topic", 20)); + Set expectedTopicPartitionsForOffsetDelete = Collections.singleton(new TopicPartition("test_topic", 20)); assertEquals(expectedTopicPartitionsForOffsetDelete, deleteOffsetsSetCapture.getValue()); } @@ -2375,8 +2358,8 @@ public void testAlterOffsetsSinkConnectorAlterOffsetsError(boolean enableTopicCr Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - Map, Map> partitionOffsets = Map.of(partition1, - Map.of(SinkUtils.KAFKA_OFFSET_KEY, "100")); + Map, Map> partitionOffsets = Collections.singletonMap(partition1, + Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, "100")); FutureCallback cb = new FutureCallback<>(); worker.modifySinkConnectorOffsets(CONNECTOR_ID, sinkConnector, connectorProps, partitionOffsets, @@ -2423,7 +2406,7 @@ public void testAlterOffsetsSinkConnectorDeleteOffsetsError(boolean enableTopicC Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - partitionOffsets.put(partition1, Map.of(SinkUtils.KAFKA_OFFSET_KEY, "100")); + partitionOffsets.put(partition1, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, "100")); Map partition2 = new HashMap<>(); partition2.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition2.put(SinkUtils.KAFKA_PARTITION_KEY, "20"); @@ -2464,7 +2447,7 @@ public void testAlterOffsetsSinkConnectorSynchronousError(boolean enableTopicCre Map partition1 = new HashMap<>(); partition1.put(SinkUtils.KAFKA_TOPIC_KEY, "test_topic"); partition1.put(SinkUtils.KAFKA_PARTITION_KEY, "10"); - partitionOffsets.put(partition1, Map.of(SinkUtils.KAFKA_OFFSET_KEY, "100")); + partitionOffsets.put(partition1, Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, "100")); FutureCallback cb = new FutureCallback<>(); worker.modifySinkConnectorOffsets(CONNECTOR_ID, sinkConnector, connectorProps, partitionOffsets, @@ -2504,8 +2487,8 @@ public void testResetOffsetsSourceConnectorExactlyOnceSupportEnabled(boolean ena OffsetStorageWriter offsetWriter = mock(OffsetStorageWriter.class); Set> connectorPartitions = new HashSet<>(); - connectorPartitions.add(Map.of("partitionKey", "partitionValue1")); - connectorPartitions.add(Map.of("partitionKey", "partitionValue2")); + connectorPartitions.add(Collections.singletonMap("partitionKey", "partitionValue1")); + connectorPartitions.add(Collections.singletonMap("partitionKey", "partitionValue2")); when(offsetStore.connectorPartitions(eq(CONNECTOR_ID))).thenReturn(connectorPartitions); when(offsetWriter.doFlush(any())).thenAnswer(invocation -> { invocation.getArgument(0, Callback.class).onCompletion(null, null); @@ -2546,7 +2529,7 @@ public void testResetOffsetsSinkConnector(boolean enableTopicCreation) throws Ex when(plugins.withClassLoader(any(ClassLoader.class), any(Runnable.class))).thenAnswer(AdditionalAnswers.returnsSecondArg()); TopicPartition tp = new TopicPartition("test-topic", 0); - mockAdminListConsumerGroupOffsets(admin, Map.of(tp, new OffsetAndMetadata(10L)), null, time, 2000); + mockAdminListConsumerGroupOffsets(admin, Collections.singletonMap(tp, new OffsetAndMetadata(10L)), null, time, 2000); when(sinkConnector.alterOffsets(eq(connectorProps), eq(Collections.singletonMap(tp, null)))).thenAnswer(invocation -> { time.sleep(3000); return true; @@ -2588,7 +2571,7 @@ public void testResetOffsetsSinkConnectorDeleteConsumerGroupError(boolean enable when(plugins.withClassLoader(any(ClassLoader.class), any(Runnable.class))).thenAnswer(AdditionalAnswers.returnsSecondArg()); TopicPartition tp = new TopicPartition("test-topic", 0); - mockAdminListConsumerGroupOffsets(admin, Map.of(tp, new OffsetAndMetadata(10L)), null); + mockAdminListConsumerGroupOffsets(admin, Collections.singletonMap(tp, new OffsetAndMetadata(10L)), null); when(sinkConnector.alterOffsets(eq(connectorProps), eq(Collections.singletonMap(tp, null)))).thenReturn(true); DeleteConsumerGroupsResult deleteConsumerGroupsResult = mock(DeleteConsumerGroupsResult.class); @@ -2629,9 +2612,9 @@ public void testModifySourceConnectorOffsetsTimeout(boolean enableTopicCreation) KafkaProducer producer = mock(KafkaProducer.class); OffsetStorageWriter offsetWriter = mock(OffsetStorageWriter.class); - Map, Map> partitionOffsets = Map.of( - Map.of("partitionKey", "partitionValue"), - Map.of("offsetKey", "offsetValue")); + Map, Map> partitionOffsets = Collections.singletonMap( + Collections.singletonMap("partitionKey", "partitionValue"), + Collections.singletonMap("offsetKey", "offsetValue")); FutureCallback cb = new FutureCallback<>(); worker.modifySourceConnectorOffsets(CONNECTOR_ID, sourceConnector, connectorProps, partitionOffsets, offsetStore, producer, @@ -2697,7 +2680,7 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti String connectorClass = SampleSourceConnector.class.getName(); connectorProps.put(CONNECTOR_CLASS_CONFIG, connectorClass); connectorProps.put(TASKS_MAX_ENFORCE_CONFIG, Boolean.toString(enforced)); - mockVersionedConnectorIsolation(connectorClass, null, sourceConnector); + mockConnectorIsolation(connectorClass, sourceConnector); mockExecutorRealSubmit(WorkerConnector.class); @@ -2715,7 +2698,7 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti Map taskConfig = new HashMap<>(); // No warnings or exceptions when a connector generates an empty list of task configs - when(sourceConnector.taskConfigs(1)).thenReturn(List.of()); + when(sourceConnector.taskConfigs(1)).thenReturn(Collections.emptyList()); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { connectorProps.put(TASKS_MAX_CONFIG, "1"); List> taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); @@ -2724,9 +2707,9 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti } // No warnings or exceptions when a connector generates the maximum permitted number of task configs - when(sourceConnector.taskConfigs(1)).thenReturn(List.of(taskConfig)); - when(sourceConnector.taskConfigs(2)).thenReturn(List.of(taskConfig, taskConfig)); - when(sourceConnector.taskConfigs(3)).thenReturn(List.of(taskConfig, taskConfig, taskConfig)); + when(sourceConnector.taskConfigs(1)).thenReturn(Collections.singletonList(taskConfig)); + when(sourceConnector.taskConfigs(2)).thenReturn(Arrays.asList(taskConfig, taskConfig)); + when(sourceConnector.taskConfigs(3)).thenReturn(Arrays.asList(taskConfig, taskConfig, taskConfig)); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { connectorProps.put(TASKS_MAX_CONFIG, "1"); List> taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); @@ -2740,12 +2723,12 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); assertEquals(3, taskConfigs.size()); - assertEquals(List.of(), logCaptureAppender.getMessages("WARN")); - assertEquals(List.of(), logCaptureAppender.getMessages("ERROR")); + assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("WARN")); + assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("ERROR")); } // Warning/exception when a connector generates too many task configs - List> tooManyTaskConfigs = List.of(taskConfig, taskConfig, taskConfig, taskConfig); + List> tooManyTaskConfigs = Arrays.asList(taskConfig, taskConfig, taskConfig, taskConfig); when(sourceConnector.taskConfigs(1)).thenReturn(tooManyTaskConfigs); when(sourceConnector.taskConfigs(2)).thenReturn(tooManyTaskConfigs); when(sourceConnector.taskConfigs(3)).thenReturn(tooManyTaskConfigs); @@ -2780,19 +2763,19 @@ private void testConnectorGeneratesTooManyTasks(boolean enforced) throws Excepti ); // Regardless of enforcement, there should never be any error-level log messages - assertEquals(List.of(), logCaptureAppender.getMessages("ERROR")); + assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("ERROR")); } } // One last sanity check in case the connector is reconfigured and respects tasks.max - when(sourceConnector.taskConfigs(1)).thenReturn(List.of(taskConfig)); + when(sourceConnector.taskConfigs(1)).thenReturn(Collections.singletonList(taskConfig)); try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(Worker.class)) { connectorProps.put(TASKS_MAX_CONFIG, "1"); List> taskConfigs = worker.connectorTaskConfigs(CONNECTOR_ID, new ConnectorConfig(plugins, connectorProps)); assertEquals(1, taskConfigs.size()); - assertEquals(List.of(), logCaptureAppender.getMessages("WARN")); - assertEquals(List.of(), logCaptureAppender.getMessages("ERROR")); + assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("WARN")); + assertEquals(Collections.emptyList(), logCaptureAppender.getMessages("ERROR")); } worker.stop(); @@ -2816,7 +2799,7 @@ private void testStartTaskWithTooManyTaskConfigs(boolean enforced) { SinkTask task = mock(TestSinkTask.class); mockKafkaClusterId(); - Map origProps = Map.of(TaskConfig.TASK_CLASS_CONFIG, TestSinkTask.class.getName()); + Map origProps = Collections.singletonMap(TaskConfig.TASK_CLASS_CONFIG, TestSinkTask.class.getName()); worker = new Worker(WORKER_ID, new MockTime(), plugins, config, offsetBackingStore, executorService, noneConnectorClientConfigOverridePolicy, null); @@ -2824,7 +2807,7 @@ private void testStartTaskWithTooManyTaskConfigs(boolean enforced) { worker.start(); assertStatistics(worker, 0, 0); - assertEquals(Set.of(), worker.taskIds()); + assertEquals(Collections.emptySet(), worker.taskIds()); Map connectorConfigs = anyConnectorConfigMap(); connectorConfigs.put(TASKS_MAX_ENFORCE_CONFIG, Boolean.toString(enforced)); connectorConfigs.put(TOPICS_CONFIG, "t1"); @@ -2839,15 +2822,15 @@ private void testStartTaskWithTooManyTaskConfigs(boolean enforced) { 0, null, // ... but it has generated two task configs - Map.of(connName, numTasks), - Map.of(connName, connectorConfigs), - Map.of(connName, TargetState.STARTED), - Map.of(TASK_ID, origProps), - Map.of(), - Map.of(), - Map.of(connName, new AppliedConnectorConfig(connectorConfigs)), - Set.of(), - Set.of() + Collections.singletonMap(connName, numTasks), + Collections.singletonMap(connName, connectorConfigs), + Collections.singletonMap(connName, TargetState.STARTED), + Collections.singletonMap(TASK_ID, origProps), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(connName, new AppliedConnectorConfig(connectorConfigs)), + Collections.emptySet(), + Collections.emptySet() ); String tasksMaxExceededMessage; @@ -2864,15 +2847,15 @@ private void testStartTaskWithTooManyTaskConfigs(boolean enforced) { ArgumentCaptor failureCaptor = ArgumentCaptor.forClass(Throwable.class); verify(taskStatusListener, times(1)).onFailure(eq(TASK_ID), failureCaptor.capture()); - assertInstanceOf(TooManyTasksException.class, failureCaptor.getValue(), + assertInstanceOf(TooManyTasksException.class, failureCaptor.getValue(), "Expected task start exception to be TooManyTasksException, but was " + failureCaptor.getValue().getClass() + " instead"); tasksMaxExceededMessage = failureCaptor.getValue().getMessage(); } else { - mockVersionedTaskIsolation(SampleSinkConnector.class, TestSinkTask.class, null, sinkConnector, task); - mockVersionedTaskConverterFromConnector(ConnectorConfig.KEY_CONVERTER_CLASS_CONFIG, ConnectorConfig.KEY_CONVERTER_VERSION_CONFIG, taskKeyConverter); - mockVersionedTaskConverterFromConnector(ConnectorConfig.VALUE_CONVERTER_CLASS_CONFIG, ConnectorConfig.VALUE_CONVERTER_VERSION_CONFIG, taskValueConverter); - mockVersionedTaskHeaderConverterFromConnector(taskHeaderConverter); + mockTaskIsolation(SampleSinkConnector.class, TestSinkTask.class, task); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, taskKeyConverter); + mockTaskConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, taskValueConverter); + mockTaskHeaderConverter(ClassLoaderUsage.CURRENT_CLASSLOADER, taskHeaderConverter); mockExecutorFakeSubmit(WorkerTask.class); assertTrue(worker.startSinkTask( @@ -2963,7 +2946,7 @@ private void verifyStorage() { private void mockInternalConverters() { JsonConverter jsonConverter = new JsonConverter(); - jsonConverter.configure(Map.of(SCHEMAS_ENABLE_CONFIG, false), false); + jsonConverter.configure(Collections.singletonMap(SCHEMAS_ENABLE_CONFIG, false), false); when(plugins.newInternalConverter(eq(true), anyString(), anyMap())) .thenReturn(jsonConverter); @@ -2982,20 +2965,8 @@ private void mockTaskConverter(ClassLoaderUsage classLoaderUsage, String convert .thenReturn(returning); } - private void mockVersionedTaskConverterFromConnector(String converterClassConfig, String converterVersionConfig, Converter returning) { - when(plugins.newConverter(any(ConnectorConfig.class), eq(converterClassConfig), eq(converterVersionConfig))).thenReturn(returning); - } - - private void verifyVersionedTaskConverterFromConnector(String converterClassConfig, String converterVersionConfig) { - verify(plugins).newConverter(any(ConnectorConfig.class), eq(converterClassConfig), eq(converterVersionConfig)); - } - - private void mockVersionedTaskConverterFromWorker(String converterClassConfig, String converterVersionConfig, Converter returning) { - when(plugins.newConverter(any(WorkerConfig.class), eq(converterClassConfig), eq(converterVersionConfig))).thenReturn(returning); - } - - private void verifyVersionedTaskConverterFromWorker(String converterClassConfig, String converterVersionConfig) { - verify(plugins).newConverter(any(WorkerConfig.class), eq(converterClassConfig), eq(converterVersionConfig)); + private void verifyTaskConverter(String converterClassConfig) { + verify(plugins).newConverter(any(AbstractConfig.class), eq(converterClassConfig), eq(ClassLoaderUsage.CURRENT_CLASSLOADER)); } private void mockTaskHeaderConverter(ClassLoaderUsage classLoaderUsage, HeaderConverter returning) { @@ -3007,25 +2978,8 @@ private void verifyTaskHeaderConverter() { verify(plugins).newHeaderConverter(any(AbstractConfig.class), eq(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG), eq(ClassLoaderUsage.CURRENT_CLASSLOADER)); } - private void mockVersionedTaskHeaderConverterFromConnector(HeaderConverter returning) { - when(plugins.newHeaderConverter(any(ConnectorConfig.class), eq(ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG), eq(ConnectorConfig.HEADER_CONVERTER_VERSION_CONFIG))) - .thenReturn(returning); - } - - private void verifyVersionedTaskHeaderConverterFromConnector() { - verify(plugins).newHeaderConverter(any(ConnectorConfig.class), eq(ConnectorConfig.HEADER_CONVERTER_CLASS_CONFIG), eq(ConnectorConfig.HEADER_CONVERTER_VERSION_CONFIG)); - } - - private void mockVersionedTaskHeaderConverterFromWorker(HeaderConverter returning) { - when(plugins.newHeaderConverter(any(WorkerConfig.class), eq(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG), eq(WorkerConfig.HEADER_CONVERTER_VERSION))) - .thenReturn(returning); - } - - private void verifyVersionedTaskHeaderConverterFromWorker() { - verify(plugins).newHeaderConverter(any(WorkerConfig.class), eq(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG), eq(WorkerConfig.HEADER_CONVERTER_VERSION)); - } - private void mockGenericIsolation() { + when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); } @@ -3040,26 +2994,12 @@ private void mockConnectorIsolation(String connectorClass, Connector connector) when(connector.version()).thenReturn("1.0"); } - private void mockVersionedConnectorIsolation(String connectorClass, VersionRange range, Connector connector) { - mockGenericIsolation(); - when(plugins.pluginLoader(connectorClass, range)).thenReturn(pluginLoader); - when(plugins.newConnector(connectorClass, range)).thenReturn(connector); - when(connector.version()).thenReturn(range == null ? "unknown" : range.toString()); - } - private void verifyConnectorIsolation(Connector connector) { verifyGenericIsolation(); verify(plugins).newConnector(anyString()); verify(connector, atLeastOnce()).version(); } - private void verifyVersionedConnectorIsolation(String connectorClass, VersionRange range, Connector connector) { - verifyGenericIsolation(); - verify(plugins).pluginLoader(connectorClass, range); - verify(plugins).newConnector(connectorClass, range); - verify(connector, atLeastOnce()).version(); - } - private void mockTaskIsolation(Class connector, Class taskClass, Task task) { mockGenericIsolation(); doReturn(connector).when(plugins).connectorClass(connector.getName()); @@ -3067,16 +3007,6 @@ private void mockTaskIsolation(Class connector, Class connectorClass, Class taskClass, VersionRange range, Connector connector, Task task) { - mockGenericIsolation(); - when(plugins.pluginLoader(connectorClass.getName(), range)).thenReturn(pluginLoader); - when(plugins.connectorClass(connectorClass.getName(), range)).thenReturn((Class) connectorClass); - when(plugins.newTask(taskClass)).thenReturn(task); - when(plugins.safeLoaderSwapper()).thenReturn(TestPlugins.noOpLoaderSwap()); - when(task.version()).thenReturn(range == null ? "unknown" : range.toString()); - } - private void verifyTaskIsolation(Task task) { verifyGenericIsolation(); verify(plugins).connectorClass(anyString()); @@ -3084,14 +3014,6 @@ private void verifyTaskIsolation(Task task) { verify(task).version(); } - private void verifyVersionedTaskIsolation(Class connectorClass, Class taskClass, VersionRange range, Task task) { - verifyGenericIsolation(); - verify(plugins).pluginLoader(connectorClass.getName(), range); - verify(plugins).connectorClass(connectorClass.getName(), range); - verify(plugins).newTask(taskClass); - verify(task, times(2)).version(); - } - private void mockExecutorRealSubmit(Class runnableClass) { // This test expects the runnable to be executed, so have the isolated runnable pass-through. // Requires using the Worker constructor without the mocked executorService @@ -3161,12 +3083,16 @@ private static void workerTaskConstructor(WorkerTask mock, MockedConstruct */ private Object workerTaskMethod(InvocationOnMock invocation) { // provide implementations of three methods used during testing - return switch (invocation.getMethod().getName()) { - case "id" -> TASK_ID; - case "loader" -> pluginLoader; - case "awaitStop" -> true; - default -> null; - }; + switch (invocation.getMethod().getName()) { + case "id": + return TASK_ID; + case "loader": + return pluginLoader; + case "awaitStop": + return true; + default: + return null; + } } private static class TestSourceTask extends SourceTask { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java index 0546e3bb4e950..06c3a42b64f8d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/WorkerTestUtils.java @@ -16,11 +16,9 @@ */ package org.apache.kafka.connect.runtime; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.connect.connector.ConnectRecord; import org.apache.kafka.connect.runtime.distributed.ExtendedAssignment; import org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator; -import org.apache.kafka.connect.runtime.isolation.TestPlugins; import org.apache.kafka.connect.storage.AppliedConnectorConfig; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.transforms.Transformation; @@ -31,10 +29,10 @@ import org.mockito.stubbing.OngoingStubbing; import java.util.AbstractMap.SimpleEntry; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.stream.Collectors; import java.util.stream.IntStream; @@ -62,11 +60,11 @@ public static ClusterConfigState clusterConfigState(long offset, connectorConfigs, connectorTargetStates(1, connectorNum, TargetState.STARTED), taskConfigs(0, connectorNum, connectorNum * taskNum), - Map.of(), - Map.of(), + Collections.emptyMap(), + Collections.emptyMap(), appliedConnectorConfigs, - Set.of(), - Set.of()); + Collections.emptySet(), + Collections.emptySet()); } public static Map connectorTaskCounts(int start, @@ -168,7 +166,6 @@ public static void assertAssignment(boolean expectFailed, "Wrong rebalance delay in " + assignment); } - @SuppressWarnings("unchecked") public static > TransformationChain getTransformationChain( RetryWithToleranceOperator toleranceOperator, List results) { @@ -184,26 +181,17 @@ public static > TransformationChain getTrans return buildTransformationChain(transformation, toleranceOperator); } - @SuppressWarnings("unchecked") public static > TransformationChain buildTransformationChain( Transformation transformation, RetryWithToleranceOperator toleranceOperator) { Predicate predicate = mock(Predicate.class); when(predicate.test(any())).thenReturn(true); - Plugin> predicatePlugin = mock(Plugin.class); - when(predicatePlugin.get()).thenReturn(predicate); - Plugin> transformationPlugin = mock(Plugin.class); - when(transformationPlugin.get()).thenReturn(transformation); - TransformationStage stage = new TransformationStage<>( - predicatePlugin, - "testPredicate", - null, + TransformationStage stage = new TransformationStage( + predicate, false, - transformationPlugin, - "testTransformation", - null, - TestPlugins.noOpLoaderSwap()); - TransformationChain realTransformationChainRetriableException = new TransformationChain<>(List.of(stage), toleranceOperator); - return Mockito.spy(realTransformationChainRetriableException); + transformation); + TransformationChain realTransformationChainRetriableException = new TransformationChain(List.of(stage), toleranceOperator); + TransformationChain transformationChainRetriableException = Mockito.spy(realTransformationChainRetriableException); + return transformationChainRetriableException; } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java index e9f6de400dd19..f966e12345ff3 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/ConnectProtocolCompatibilityTest.java @@ -21,8 +21,8 @@ import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; -import java.util.List; -import java.util.Set; +import java.util.Arrays; +import java.util.Collections; import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V1; import static org.apache.kafka.connect.runtime.distributed.IncrementalCooperativeConnectProtocol.CONNECT_PROTOCOL_V2; @@ -99,48 +99,48 @@ public void testEagerToCoopMetadata() { public void testEagerToEagerAssignment() { ConnectProtocol.Assignment assignment = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - List.of(connectorId1, connectorId3), List.of(taskId2x0)); + Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0)); ByteBuffer leaderBuf = ConnectProtocol.serializeAssignment(assignment); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(leaderBuf); assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); - assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); + assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment assignment2 = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - List.of(connectorId2), List.of(taskId1x0, taskId3x0)); + Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0)); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(memberBuf); assertFalse(memberAssignment.failed()); assertEquals("member", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); - assertEquals(List.of(connectorId2), memberAssignment.connectors()); - assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); + assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); } @Test public void testCoopToCoopAssignment() { ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - List.of(connectorId1, connectorId3), List.of(taskId2x0), - List.of(), List.of(), 0); + Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0), + Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer leaderBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(leaderBuf); assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); - assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); + assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); ExtendedAssignment assignment2 = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - List.of(connectorId2), List.of(taskId1x0, taskId3x0), - List.of(), List.of(), 0); + Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0), + Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); ConnectProtocol.Assignment memberAssignment = @@ -148,15 +148,15 @@ public void testCoopToCoopAssignment() { assertFalse(memberAssignment.failed()); assertEquals("member", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); - assertEquals(List.of(connectorId2), memberAssignment.connectors()); - assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); + assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); } @Test public void testEagerToCoopAssignment() { ConnectProtocol.Assignment assignment = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - List.of(connectorId1, connectorId3), List.of(taskId2x0)); + Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0)); ByteBuffer leaderBuf = ConnectProtocol.serializeAssignment(assignment); ConnectProtocol.Assignment leaderAssignment = @@ -164,12 +164,12 @@ public void testEagerToCoopAssignment() { assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); - assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); + assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment assignment2 = new ConnectProtocol.Assignment( ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - List.of(connectorId2), List.of(taskId1x0, taskId3x0)); + Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0)); ByteBuffer memberBuf = ConnectProtocol.serializeAssignment(assignment2); ConnectProtocol.Assignment memberAssignment = @@ -177,37 +177,37 @@ public void testEagerToCoopAssignment() { assertFalse(memberAssignment.failed()); assertEquals("member", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); - assertEquals(List.of(connectorId2), memberAssignment.connectors()); - assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); + assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); } @Test public void testCoopToEagerAssignment() { ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "leader", LEADER_URL, 1L, - List.of(connectorId1, connectorId3), List.of(taskId2x0), - List.of(), List.of(), 0); + Arrays.asList(connectorId1, connectorId3), Collections.singletonList(taskId2x0), + Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer leaderBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); ConnectProtocol.Assignment leaderAssignment = ConnectProtocol.deserializeAssignment(leaderBuf); assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(1, leaderAssignment.offset()); - assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); + assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); ExtendedAssignment assignment2 = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ConnectProtocol.Assignment.NO_ERROR, "member", LEADER_URL, 1L, - List.of(connectorId2), List.of(taskId1x0, taskId3x0), - List.of(), List.of(), 0); + Collections.singletonList(connectorId2), Arrays.asList(taskId1x0, taskId3x0), + Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer memberBuf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment2, false); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(memberBuf); assertFalse(memberAssignment.failed()); assertEquals("member", memberAssignment.leader()); assertEquals(1, memberAssignment.offset()); - assertEquals(List.of(connectorId2), memberAssignment.connectors()); - assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); + assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); } private ConnectProtocol.WorkerState emptyWorkerState() { @@ -221,10 +221,10 @@ private ExtendedWorkerState emptyExtendedWorkerState(short protocolVersion) { LEADER, LEADER_URL, CONFIG_OFFSET, - Set.of(), - Set.of(), - Set.of(), - Set.of(), + Collections.emptySet(), + Collections.emptySet(), + Collections.emptySet(), + Collections.emptySet(), 0 ); return new ExtendedWorkerState(LEADER_URL, CONFIG_OFFSET, assignment); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedConfigTest.java index da153572fdac4..af8aeab46a589 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedConfigTest.java @@ -29,6 +29,9 @@ import java.security.InvalidParameterException; import java.security.NoSuchAlgorithmException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -148,9 +151,9 @@ public void testSupportedKeyGeneratorAlgorithms() { private void testSupportedAlgorithms(String type, String... expectedAlgorithms) { Set supportedAlgorithms = DistributedConfig.supportedAlgorithms(type); - Set unsupportedAlgorithms = new HashSet<>(List.of(expectedAlgorithms)); + Set unsupportedAlgorithms = new HashSet<>(Arrays.asList(expectedAlgorithms)); unsupportedAlgorithms.removeAll(supportedAlgorithms); - assertEquals(Set.of(), unsupportedAlgorithms, type + " algorithms were found that should be supported by this JVM but are not"); + assertEquals(Collections.emptySet(), unsupportedAlgorithms, type + " algorithms were found that should be supported by this JVM but are not"); } @Test @@ -211,13 +214,13 @@ public void shouldFailWithInvalidKeySize() throws NoSuchAlgorithmException { @Test public void shouldValidateAllVerificationAlgorithms() { - List algorithms = List.of("HmacSHA1", "HmacSHA256", "HmacMD5", "bad-algorithm"); + List algorithms = + new ArrayList<>(Arrays.asList("HmacSHA1", "HmacSHA256", "HmacMD5", "bad-algorithm")); Map configs = configs(); for (int i = 0; i < algorithms.size(); i++) { configs.put(DistributedConfig.INTER_WORKER_VERIFICATION_ALGORITHMS_CONFIG, String.join(",", algorithms)); assertThrows(ConfigException.class, () -> new DistributedConfig(configs)); - // Rotate the algorithm list by creating a new list with rotated elements - algorithms = List.of(algorithms.get(1), algorithms.get(2), algorithms.get(3), algorithms.get(0)); + algorithms.add(algorithms.remove(0)); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java index 820de522f1293..2632360b7f6a4 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/DistributedHerderTest.java @@ -87,6 +87,7 @@ import org.mockito.stubbing.Answer; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; @@ -115,6 +116,7 @@ import static jakarta.ws.rs.core.Response.Status.FORBIDDEN; import static jakarta.ws.rs.core.Response.Status.SERVICE_UNAVAILABLE; +import static java.util.Collections.singletonList; import static org.apache.kafka.common.utils.Utils.UncheckedCloseable; import static org.apache.kafka.connect.runtime.AbstractStatus.State.FAILED; import static org.apache.kafka.connect.runtime.ConnectorConfig.CONNECTOR_CLIENT_CONSUMER_OVERRIDES_PREFIX; @@ -189,7 +191,7 @@ public class DistributedHerderTest { CONN1_CONFIG_UPDATED.put(SinkConnectorConfig.TOPICS_CONFIG, String.join(",", FOO_TOPIC, BAR_TOPIC, BAZ_TOPIC)); } private static final ConfigInfos CONN1_CONFIG_INFOS = - new ConfigInfos(CONN1, 0, List.of(), List.of()); + new ConfigInfos(CONN1, 0, Collections.emptyList(), Collections.emptyList()); private static final Map CONN2_CONFIG = new HashMap<>(); static { CONN2_CONFIG.put(ConnectorConfig.NAME_CONFIG, CONN2); @@ -198,9 +200,9 @@ public class DistributedHerderTest { CONN2_CONFIG.put(ConnectorConfig.CONNECTOR_CLASS_CONFIG, BogusSourceConnector.class.getName()); } private static final ConfigInfos CONN2_CONFIG_INFOS = - new ConfigInfos(CONN2, 0, List.of(), List.of()); + new ConfigInfos(CONN2, 0, Collections.emptyList(), Collections.emptyList()); private static final ConfigInfos CONN2_INVALID_CONFIG_INFOS = - new ConfigInfos(CONN2, 1, List.of(), List.of()); + new ConfigInfos(CONN2, 1, Collections.emptyList(), Collections.emptyList()); private static final Map TASK_CONFIG = new HashMap<>(); static { TASK_CONFIG.put(TaskConfig.TASK_CLASS_CONFIG, BogusSourceTask.class.getName()); @@ -220,64 +222,64 @@ public class DistributedHerderTest { private static final ClusterConfigState SNAPSHOT = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); private static final ClusterConfigState SNAPSHOT_PAUSED_CONN1 = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.PAUSED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.PAUSED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); private static final ClusterConfigState SNAPSHOT_STOPPED_CONN1 = new ClusterConfigState( 1, null, - Map.of(CONN1, 0), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STOPPED), - Map.of(), // Stopped connectors should have an empty set of task configs - Map.of(CONN1, 3), - Map.of(CONN1, 10), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(CONN1), - Set.of()); + Collections.singletonMap(CONN1, 0), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STOPPED), + Collections.emptyMap(), // Stopped connectors should have an empty set of task configs + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, 10), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.singleton(CONN1), + Collections.emptySet()); private static final ClusterConfigState SNAPSHOT_STOPPED_CONN1_FENCED = new ClusterConfigState( 1, null, - Map.of(CONN1, 0), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STOPPED), - Map.of(), - Map.of(CONN1, 0), - Map.of(CONN1, 11), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.singletonMap(CONN1, 0), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STOPPED), + Collections.emptyMap(), + Collections.singletonMap(CONN1, 0), + Collections.singletonMap(CONN1, 11), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); private static final ClusterConfigState SNAPSHOT_UPDATED_CONN1_CONFIG = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG_UPDATED), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG_UPDATED), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG_UPDATED)), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG_UPDATED)), + Collections.emptySet(), + Collections.emptySet()); private static final String WORKER_ID = "localhost:8083"; private static final String KAFKA_CLUSTER_ID = "I4ZmrWqfT2e-upky_4fdPA"; @@ -310,7 +312,6 @@ public class DistributedHerderTest { public void setUp() throws Exception { time = new MockTime(); metrics = new MockConnectMetrics(time); - when(worker.metrics()).thenReturn(metrics); AutoCloseable uponShutdown = shutdownCalled::countDown; // Default to the old protocol unless specified otherwise @@ -318,8 +319,8 @@ public void setUp() throws Exception { herder = mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(HERDER_CONFIG), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, - noneConnectorClientConfigOverridePolicy, List.of(), null, new AutoCloseable[]{uponShutdown})); - verify(worker).getPlugins(); + noneConnectorClientConfigOverridePolicy, Collections.emptyList(), null, new AutoCloseable[]{uponShutdown})); + configUpdateListener = herder.new ConfigUpdateListener(); rebalanceListener = herder.new RebalanceListener(time); conn1SinkConfig = new SinkConnectorConfig(plugins, CONN1_CONFIG); @@ -342,7 +343,7 @@ public void testJoinAssignment() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(CONN1), List.of(TASK1)); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -368,7 +369,7 @@ public void testRebalance() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(CONN1), List.of(TASK1)); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -393,8 +394,8 @@ public void testRebalance() { verify(worker).startSourceTask(eq(TASK1), any(), any(), any(), eq(herder), eq(TargetState.STARTED)); // Rebalance and get a new assignment - expectRebalance(List.of(CONN1), List.of(TASK1), ConnectProtocol.Assignment.NO_ERROR, - 1, List.of(CONN1), List.of()); + expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, + 1, singletonList(CONN1), Collections.emptyList()); herder.tick(); time.sleep(3000L); assertStatistics(3, 2, 100, 3000); @@ -416,7 +417,7 @@ public void testIncrementalCooperativeRebalanceForNewMember() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -427,9 +428,9 @@ public void testIncrementalCooperativeRebalanceForNewMember() { herder.tick(); // The new member got its assignment - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, List.of(CONN1), List.of(TASK1), 0); + 1, singletonList(CONN1), singletonList(TASK1), 0); // and the new assignment started ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -460,9 +461,9 @@ public void testIncrementalCooperativeRebalanceForExistingMember() { // Join group. First rebalance contains revocations because a new member joined. when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); - expectRebalance(List.of(CONN1), List.of(TASK1), + expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, 1, - List.of(), List.of(), 0); + Collections.emptyList(), Collections.emptyList(), 0); doNothing().when(member).requestRejoin(); expectMemberPoll(); @@ -474,7 +475,7 @@ public void testIncrementalCooperativeRebalanceForExistingMember() { // In the second rebalance the new member gets its assignment and this member has no // assignments or revocations - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); @@ -497,9 +498,9 @@ public void testIncrementalCooperativeRebalanceWithDelay() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, - List.of(), List.of(TASK2), + Collections.emptyList(), singletonList(TASK2), rebalanceDelay); expectConfigRefreshAndSnapshot(SNAPSHOT); @@ -518,9 +519,9 @@ public void testIncrementalCooperativeRebalanceWithDelay() { herder.tick(); // The member got its assignment and revocation - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, List.of(CONN1), List.of(TASK1), 0); + 1, singletonList(CONN1), singletonList(TASK1), 0); // and the new assignment started ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -547,7 +548,7 @@ public void testRebalanceFailedConnector() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(CONN1), List.of(TASK1)); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1)); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -569,8 +570,8 @@ public void testRebalanceFailedConnector() { verify(worker).startSourceTask(eq(TASK1), any(), any(), any(), eq(herder), eq(TargetState.STARTED)); // Rebalance and get a new assignment - expectRebalance(List.of(CONN1), List.of(TASK1), ConnectProtocol.Assignment.NO_ERROR, - 1, List.of(CONN1), List.of()); + expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, + 1, singletonList(CONN1), Collections.emptyList()); // worker is not running, so we should see no call to connectorTaskConfigs() expectExecuteTaskReconfiguration(false, null, null); @@ -608,7 +609,7 @@ public void revokeAndReassign(boolean incompleteRebalance) { when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(connectProtocolVersion); // The lists need to be mutable because assignments might be removed - expectRebalance(configOffset, new ArrayList<>(List.of(CONN1)), new ArrayList<>(List.of(TASK1))); + expectRebalance(configOffset, new ArrayList<>(singletonList(CONN1)), new ArrayList<>(singletonList(TASK1))); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -629,7 +630,7 @@ public void revokeAndReassign(boolean incompleteRebalance) { // Perform a partial re-balance just prior to the revocation // bump the configOffset to trigger reading the config topic to the end configOffset++; - expectRebalance(configOffset, List.of(), List.of()); + expectRebalance(configOffset, Collections.emptyList(), Collections.emptyList()); // give it the wrong snapshot, as if we're out of sync/can't reach the broker expectConfigRefreshAndSnapshot(SNAPSHOT); doNothing().when(member).requestRejoin(); @@ -639,9 +640,9 @@ public void revokeAndReassign(boolean incompleteRebalance) { } // Revoke the connector in the next rebalance - expectRebalance(List.of(CONN1), List.of(), - ConnectProtocol.Assignment.NO_ERROR, configOffset, List.of(), - List.of()); + expectRebalance(singletonList(CONN1), Collections.emptyList(), + ConnectProtocol.Assignment.NO_ERROR, configOffset, Collections.emptyList(), + Collections.emptyList()); if (incompleteRebalance) { // Same as SNAPSHOT, except with an updated offset @@ -649,15 +650,15 @@ public void revokeAndReassign(boolean incompleteRebalance) { ClusterConfigState secondSnapshot = new ClusterConfigState( configOffset, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of() + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet() ); expectConfigRefreshAndSnapshot(secondSnapshot); } @@ -667,7 +668,7 @@ public void revokeAndReassign(boolean incompleteRebalance) { herder.tick(); // re-assign the connector back to the same worker to ensure state was cleaned up - expectRebalance(configOffset, List.of(CONN1), List.of()); + expectRebalance(configOffset, singletonList(CONN1), Collections.emptyList()); herder.tick(); @@ -702,10 +703,10 @@ public void testCreateConnector() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); // Initial rebalance where this member becomes the leader @@ -740,12 +741,12 @@ public void testCreateConnector() { time.sleep(1000L); assertStatistics(3, 1, 100, 1000L); - ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, List.of(), ConnectorType.SOURCE); + ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, Collections.emptyList(), ConnectorType.SOURCE); verify(putConnectorCallback).onCompletion(isNull(), eq(new Herder.Created<>(true, info))); verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - List.of( + Arrays.asList( "ensuring membership in the cluster", "writing a config for connector " + CONN2 + " to the config topic" ), @@ -758,10 +759,10 @@ public void testCreateConnectorWithInitialState() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); // Initial rebalance where this member becomes the leader @@ -795,12 +796,12 @@ public void testCreateConnectorWithInitialState() { time.sleep(1000L); assertStatistics(3, 1, 100, 1000L); - ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, List.of(), ConnectorType.SOURCE); + ConnectorInfo info = new ConnectorInfo(CONN2, CONN2_CONFIG, Collections.emptyList(), ConnectorType.SOURCE); verify(putConnectorCallback).onCompletion(isNull(), eq(new Herder.Created<>(true, info))); verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - List.of( + Arrays.asList( "ensuring membership in the cluster", "writing a config for connector " + CONN2 + " to the config topic" ), @@ -812,10 +813,10 @@ public void testCreateConnectorWithInitialState() { public void testCreateConnectorConfigBackingStoreError() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); // Initial rebalance where this member becomes the leader @@ -853,7 +854,7 @@ public void testCreateConnectorConfigBackingStoreError() { verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - List.of( + Arrays.asList( "ensuring membership in the cluster", "writing a config for connector " + CONN2 + " to the config topic" ), @@ -865,10 +866,10 @@ public void testCreateConnectorConfigBackingStoreError() { public void testCreateConnectorFailedValidation() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); HashMap config = new HashMap<>(CONN2_CONFIG); @@ -899,7 +900,7 @@ public void testCreateConnectorFailedValidation() { verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - List.of( + Arrays.asList( "awaiting startup", "ensuring membership in the cluster", "reading to the end of the config topic" @@ -922,7 +923,7 @@ public void testConnectorNameConflictsWithWorkerGroupId() { ConfigValue nameConfig = validatedConfigs.get(ConnectorConfig.NAME_CONFIG); assertEquals( - List.of("Consumer group for sink connector named test-group conflicts with Connect worker group connect-test-group"), + Collections.singletonList("Consumer group for sink connector named test-group conflicts with Connect worker group connect-test-group"), nameConfig.errorMessages()); } @@ -941,12 +942,12 @@ public void testConnectorGroupIdConflictsWithWorkerGroupId() { ConfigValue overriddenGroupIdConfig = validatedConfigs.get(overriddenGroupId); assertEquals( - List.of("Consumer group connect-test-group conflicts with Connect worker group connect-test-group"), + Collections.singletonList("Consumer group connect-test-group conflicts with Connect worker group connect-test-group"), overriddenGroupIdConfig.errorMessages()); ConfigValue nameConfig = validatedConfigs.get(ConnectorConfig.NAME_CONFIG); assertEquals( - List.of(), + Collections.emptyList(), nameConfig.errorMessages() ); } @@ -955,10 +956,10 @@ public void testConnectorGroupIdConflictsWithWorkerGroupId() { public void testCreateConnectorAlreadyExists() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); // mock the actual validation since its asynchronous nature is difficult to test and should @@ -985,7 +986,7 @@ public void testCreateConnectorAlreadyExists() { verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - List.of( + Arrays.asList( "awaiting startup", "ensuring membership in the cluster", "reading to the end of the config topic" @@ -1000,7 +1001,7 @@ public void testDestroyConnector() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // Start with one connector - expectRebalance(1, List.of(CONN1), List.of(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -1010,7 +1011,7 @@ public void testDestroyConnector() { }).when(worker).startConnector(eq(CONN1), any(), any(), eq(herder), eq(TargetState.STARTED), onStart.capture()); expectExecuteTaskReconfiguration(true, conn1SinkConfig, invocation -> TASK_CONFIGS); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); // And delete the connector @@ -1029,13 +1030,13 @@ public void testDestroyConnector() { // tasks are revoked TopicStatus fooStatus = new TopicStatus(FOO_TOPIC, CONN1, 0, time.milliseconds()); TopicStatus barStatus = new TopicStatus(BAR_TOPIC, CONN1, 0, time.milliseconds()); - when(statusBackingStore.getAllTopics(eq(CONN1))).thenReturn(Set.of(fooStatus, barStatus)); + when(statusBackingStore.getAllTopics(eq(CONN1))).thenReturn(new HashSet<>(Arrays.asList(fooStatus, barStatus))); doNothing().when(statusBackingStore).deleteTopic(eq(CONN1), eq(FOO_TOPIC)); doNothing().when(statusBackingStore).deleteTopic(eq(CONN1), eq(BAR_TOPIC)); - expectRebalance(List.of(CONN1), List.of(TASK1), + expectRebalance(singletonList(CONN1), singletonList(TASK1), ConnectProtocol.Assignment.NO_ERROR, 2, "leader", "leaderUrl", - List.of(), List.of(), 0, true); + Collections.emptyList(), Collections.emptyList(), 0, true); expectConfigRefreshAndSnapshot(ClusterConfigState.EMPTY); doNothing().when(member).requestRejoin(); @@ -1048,7 +1049,7 @@ public void testDestroyConnector() { verifyNoMoreInteractions(worker, member, configBackingStore, statusBackingStore, putConnectorCallback); assertEquals( - List.of( + Arrays.asList( "awaiting startup", "ensuring membership in the cluster", "reading to the end of the config topic", @@ -1066,10 +1067,10 @@ public void testRestartConnector() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - expectRebalance(1, List.of(CONN1), List.of(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -1102,10 +1103,10 @@ public void testRestartUnknownConnector() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); herder.tick(); @@ -1126,7 +1127,7 @@ public void testRestartConnectorRedirectToLeader() { // get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1150,10 +1151,10 @@ public void testRestartConnectorRedirectToOwner() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); herder.tick(); @@ -1188,10 +1189,10 @@ public void testRestartConnectorAndTasksUnknownConnector() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); herder.tick(); @@ -1214,7 +1215,7 @@ public void testRestartConnectorAndTasksNotLeader() { // get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1239,10 +1240,10 @@ public void testRestartConnectorAndTasksUnknownStatus() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); herder.tick(); @@ -1268,10 +1269,10 @@ public void testRestartConnectorAndTasksSuccess() throws Exception { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); herder.tick(); @@ -1309,7 +1310,7 @@ public void testDoRestartConnectorAndTasksNoAssignments() { RestartRequest restartRequest = new RestartRequest(CONN1, false, true); RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); - when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); @@ -1325,13 +1326,13 @@ public void testDoRestartConnectorAndTasksOnlyConnector() { RestartRequest restartRequest = new RestartRequest(CONN1, false, true); RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); - when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); herder.assignment = mock(ExtendedAssignment.class); - when(herder.assignment.connectors()).thenReturn(List.of(CONN1)); - when(herder.assignment.tasks()).thenReturn(List.of()); + when(herder.assignment.connectors()).thenReturn(Collections.singletonList(CONN1)); + when(herder.assignment.tasks()).thenReturn(Collections.emptyList()); herder.configState = SNAPSHOT; @@ -1346,7 +1347,6 @@ public void testDoRestartConnectorAndTasksOnlyConnector() { return true; }).when(worker).startConnector(eq(CONN1), any(), any(), eq(herder), any(), stateCallback.capture()); doNothing().when(member).wakeup(); - when(worker.connectorVersion(any())).thenReturn(null); herder.doRestartConnectorAndTasks(restartRequest); @@ -1359,25 +1359,24 @@ public void testDoRestartConnectorAndTasksOnlyTasks() { RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); // The connector has three tasks - when(restartPlan.taskIdsToRestart()).thenReturn(List.of(TASK0, TASK1, TASK2)); + when(restartPlan.taskIdsToRestart()).thenReturn(Arrays.asList(TASK0, TASK1, TASK2)); when(restartPlan.totalTaskCount()).thenReturn(3); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); herder.assignment = mock(ExtendedAssignment.class); - when(herder.assignment.connectors()).thenReturn(List.of()); + when(herder.assignment.connectors()).thenReturn(Collections.emptyList()); // But only one task is assigned to this worker - when(herder.assignment.tasks()).thenReturn(List.of(TASK0)); + when(herder.assignment.tasks()).thenReturn(Collections.singletonList(TASK0)); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); herder.configState = SNAPSHOT; - doNothing().when(worker).stopAndAwaitTasks(List.of(TASK0)); + doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(TASK0)); TaskStatus status = new TaskStatus(TASK0, AbstractStatus.State.RESTARTING, WORKER_ID, 0); doNothing().when(statusBackingStore).put(eq(status)); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), any())).thenReturn(true); - when(worker.taskVersion(any())).thenReturn(null); herder.doRestartConnectorAndTasks(restartRequest); @@ -1390,14 +1389,14 @@ public void testDoRestartConnectorAndTasksBoth() { RestartRequest restartRequest = new RestartRequest(CONN1, false, true); RestartPlan restartPlan = mock(RestartPlan.class); when(restartPlan.shouldRestartConnector()).thenReturn(true); - when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); when(restartPlan.totalTaskCount()).thenReturn(1); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); herder.assignment = mock(ExtendedAssignment.class); - when(herder.assignment.connectors()).thenReturn(List.of(CONN1)); - when(herder.assignment.tasks()).thenReturn(List.of(taskId)); + when(herder.assignment.connectors()).thenReturn(Collections.singletonList(CONN1)); + when(herder.assignment.tasks()).thenReturn(Collections.singletonList(taskId)); herder.configState = SNAPSHOT; @@ -1413,14 +1412,12 @@ public void testDoRestartConnectorAndTasksBoth() { }).when(worker).startConnector(eq(CONN1), any(), any(), eq(herder), any(), stateCallback.capture()); doNothing().when(member).wakeup(); - doNothing().when(worker).stopAndAwaitTasks(List.of(taskId)); + doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(taskId)); TaskStatus taskStatus = new TaskStatus(TASK0, AbstractStatus.State.RESTARTING, WORKER_ID, 0); doNothing().when(statusBackingStore).put(eq(taskStatus)); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), any())).thenReturn(true); - when(worker.taskVersion(any())).thenReturn(null); - when(worker.connectorVersion(any())).thenReturn(null); herder.doRestartConnectorAndTasks(restartRequest); @@ -1433,10 +1430,10 @@ public void testRestartTask() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(TASK0), true); + expectRebalance(1, Collections.emptyList(), singletonList(TASK0), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), any())).thenReturn(true); @@ -1461,7 +1458,7 @@ public void testRestartUnknownTask() { // get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1483,7 +1480,7 @@ public void testRestartTaskRedirectToLeader() { // get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1506,10 +1503,10 @@ public void testRestartTaskRedirectToOwner() { // get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); herder.tick(); @@ -1554,7 +1551,7 @@ public void testConnectorConfigAdded() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join, no configs so no need to catch up on config topic - expectRebalance(-1, List.of(), List.of()); + expectRebalance(-1, Collections.emptyList(), Collections.emptyList()); expectMemberPoll(); herder.tick(); // join @@ -1568,8 +1565,8 @@ public void testConnectorConfigAdded() { herder.tick(); // apply config // Performs rebalance and gets new assignment - expectRebalance(List.of(), List.of(), - ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of()); + expectRebalance(Collections.emptyList(), Collections.emptyList(), + ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList()); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -1595,7 +1592,7 @@ public void testConnectorConfigDetectedAfterLeaderAlreadyAssigned(short protocol when(member.currentProtocolVersion()).thenReturn(protocolVersion); // join, no configs so no need to catch up on config topic - expectRebalance(-1, List.of(), List.of()); + expectRebalance(-1, Collections.emptyList(), Collections.emptyList()); expectMemberPoll(); herder.tick(); // join @@ -1613,8 +1610,8 @@ public void testConnectorConfigDetectedAfterLeaderAlreadyAssigned(short protocol // Performs rebalance and gets new assignment // Important--we're simulating a scenario where the leader has already detected the new // connector, and assigns it to our herder at the top of its tick thread - expectRebalance(List.of(), List.of(), - ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of()); + expectRebalance(Collections.emptyList(), Collections.emptyList(), + ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList()); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -1639,7 +1636,7 @@ public void testConnectorConfigUpdate() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // join - expectRebalance(1, List.of(CONN1), List.of()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1672,11 +1669,10 @@ public void testConnectorConfigUpdateFailedTransformation() { when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - when(worker.connectorVersion(CONN1)).thenReturn(null); WorkerConfigTransformer configTransformer = mock(WorkerConfigTransformer.class); // join - expectRebalance(1, List.of(CONN1), List.of()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1695,15 +1691,15 @@ public void testConnectorConfigUpdateFailedTransformation() { ClusterConfigState snapshotWithTransform = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet(), configTransformer ); when(configBackingStore.snapshot()).thenReturn(snapshotWithTransform); @@ -1733,7 +1729,7 @@ public void testConnectorPaused() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // join - expectRebalance(1, List.of(CONN1), List.of()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1771,7 +1767,7 @@ public void testConnectorResumed() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // start with the connector paused - expectRebalance(1, List.of(CONN1), List.of()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT_PAUSED_CONN1); expectMemberPoll(); @@ -1812,7 +1808,7 @@ public void testConnectorStopped() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // join - expectRebalance(1, List.of(CONN1), List.of()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1850,7 +1846,7 @@ public void testUnknownConnectorPaused() { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); // join - expectRebalance(1, List.of(), List.of(TASK0)); + expectRebalance(1, Collections.emptyList(), singletonList(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1875,10 +1871,10 @@ public void testStopConnector() throws Exception { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join as leader - expectRebalance(1, List.of(), List.of(TASK0), true); + expectRebalance(1, Collections.emptyList(), singletonList(TASK0), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -1888,7 +1884,7 @@ public void testStopConnector() throws Exception { // handle stop request expectMemberEnsureActive(); expectConfigRefreshAndSnapshot(SNAPSHOT); - doNothing().when(configBackingStore).putTaskConfigs(CONN1, List.of()); + doNothing().when(configBackingStore).putTaskConfigs(CONN1, Collections.emptyList()); doNothing().when(configBackingStore).putTargetState(CONN1, TargetState.STOPPED); FutureCallback cb = new FutureCallback<>(); @@ -1909,7 +1905,7 @@ public void testStopConnectorNotLeader() { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); // join as member (non-leader) - expectRebalance(1, List.of(), List.of(TASK0)); + expectRebalance(1, Collections.emptyList(), singletonList(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -1942,10 +1938,10 @@ public void testStopConnectorFailToWriteTaskConfigs() { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); // join as leader - expectRebalance(1, List.of(), List.of(TASK0), true); + expectRebalance(1, Collections.emptyList(), singletonList(TASK0), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -1955,7 +1951,7 @@ public void testStopConnectorFailToWriteTaskConfigs() { ConnectException taskConfigsWriteException = new ConnectException("Could not write task configs to config topic"); // handle stop request expectMemberEnsureActive(); - doThrow(taskConfigsWriteException).when(configBackingStore).putTaskConfigs(CONN1, List.of()); + doThrow(taskConfigsWriteException).when(configBackingStore).putTaskConfigs(CONN1, Collections.emptyList()); // We do not expect configBackingStore::putTargetState to be invoked, which // is intentional since that call should only take place if we are first able to // successfully write the empty list of task configs @@ -1986,7 +1982,7 @@ public void testConnectorPausedRunningTaskOnly() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join - expectRebalance(1, List.of(), List.of(TASK0)); + expectRebalance(1, Collections.emptyList(), singletonList(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2021,7 +2017,7 @@ public void testConnectorResumedRunningTaskOnly() { when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // join - expectRebalance(1, List.of(), List.of(TASK0)); + expectRebalance(1, Collections.emptyList(), singletonList(TASK0)); expectConfigRefreshAndSnapshot(SNAPSHOT_PAUSED_CONN1); expectMemberPoll(); @@ -2055,7 +2051,7 @@ public void testTaskConfigAdded() { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); // join - expectRebalance(-1, List.of(), List.of()); + expectRebalance(-1, Collections.emptyList(), Collections.emptyList()); expectMemberPoll(); herder.tick(); // join @@ -2065,13 +2061,13 @@ public void testTaskConfigAdded() { // Rebalance will be triggered when the new config is detected doNothing().when(member).requestRejoin(); - configUpdateListener.onTaskConfigUpdate(List.of(TASK0, TASK1, TASK2)); // read updated config + configUpdateListener.onTaskConfigUpdate(Arrays.asList(TASK0, TASK1, TASK2)); // read updated config herder.tick(); // apply config // Performs rebalance and gets new assignment - expectRebalance(List.of(), List.of(), - ConnectProtocol.Assignment.NO_ERROR, 1, List.of(), - List.of(TASK0)); + expectRebalance(Collections.emptyList(), Collections.emptyList(), + ConnectProtocol.Assignment.NO_ERROR, 1, Collections.emptyList(), + singletonList(TASK0)); when(worker.startSourceTask(eq(TASK0), any(), any(), any(), eq(herder), eq(TargetState.STARTED))).thenReturn(true); herder.tick(); // do rebalance @@ -2085,13 +2081,13 @@ public void testJoinLeaderCatchUpFails() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(configBackingStore.snapshot()).thenReturn(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(List.of(), List.of(), - ConnectProtocol.Assignment.CONFIG_MISMATCH, 1, "leader", "leaderUrl", List.of(), - List.of(), 0, true); + expectRebalance(Collections.emptyList(), Collections.emptyList(), + ConnectProtocol.Assignment.CONFIG_MISMATCH, 1, "leader", "leaderUrl", Collections.emptyList(), + Collections.emptyList(), 0, true); // Reading to end of log times out doThrow(new TimeoutException()).when(configBackingStore).refresh(anyLong(), any(TimeUnit.class)); @@ -2110,7 +2106,7 @@ public void testJoinLeaderCatchUpFails() throws Exception { before = time.milliseconds(); // After backoff, restart the process and this time succeed - expectRebalance(1, List.of(CONN1), List.of(TASK1), true); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onStart = ArgumentCaptor.forClass(Callback.class); @@ -2129,7 +2125,7 @@ public void testJoinLeaderCatchUpFails() throws Exception { assertStatistics("leaderUrl", false, 3, 1, 100, 2000L); // one more tick, to make sure we don't keep trying to read to the config topic unnecessarily - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); // tick once more to ensure that the successful read to the end of the config topic was // tracked and no further unnecessary attempts were made @@ -2146,10 +2142,10 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep // Join group as leader when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(CONN1), List.of(TASK1), true); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2168,9 +2164,9 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep herder.tick(); // The leader gets the same assignment after a rebalance is triggered - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", List.of(CONN1), List.of(TASK1), 0, true); + 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); @@ -2179,9 +2175,9 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep // Another rebalance is triggered but this time it fails to read to the max offset and // triggers a re-sync - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.CONFIG_MISMATCH, 1, "leader", "leaderUrl", - List.of(), List.of(), 0, true); + Collections.emptyList(), Collections.emptyList(), 0, true); // The leader will retry a few times to read to the end of the config log doNothing().when(member).requestRejoin(); @@ -2202,9 +2198,9 @@ public void testJoinLeaderCatchUpRetriesForIncrementalCooperative() throws Excep } // After a few retries succeed to read the log to the end - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", List.of(CONN1), List.of(TASK1), 0, true); + 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); expectConfigRefreshAndSnapshot(SNAPSHOT); before = time.milliseconds(); @@ -2222,10 +2218,10 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti // Join group as leader when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V1); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(CONN1), List.of(TASK1), true); + expectRebalance(1, singletonList(CONN1), singletonList(TASK1), true); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2244,9 +2240,9 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti herder.tick(); // The leader gets the same assignment after a rebalance is triggered - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, - "leader", "leaderUrl", List.of(CONN1), List.of(TASK1), 0, true); + "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); time.sleep(2000L); assertStatistics(3, 1, 100, 2000); @@ -2255,9 +2251,9 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti // Another rebalance is triggered but this time it fails to read to the max offset and // triggers a re-sync - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.CONFIG_MISMATCH, 1, "leader", "leaderUrl", - List.of(), List.of(), 0, true); + Collections.emptyList(), Collections.emptyList(), 0, true); // The leader will exhaust the retries while trying to read to the end of the config log doNothing().when(member).requestRejoin(); @@ -2283,14 +2279,14 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti herder.tick(); assertEquals(before, time.milliseconds()); - assertEquals(Set.of(CONN1), assignmentCapture.getValue().connectors()); - assertEquals(Set.of(TASK1), assignmentCapture.getValue().tasks()); + assertEquals(Collections.singleton(CONN1), assignmentCapture.getValue().connectors()); + assertEquals(Collections.singleton(TASK1), assignmentCapture.getValue().tasks()); // After a complete backoff and a revocation of running tasks rejoin and this time succeed // The worker gets back the assignment that had given up - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, - 1, "leader", "leaderUrl", List.of(CONN1), List.of(TASK1), + 1, "leader", "leaderUrl", singletonList(CONN1), singletonList(TASK1), 0, true); expectConfigRefreshAndSnapshot(SNAPSHOT); @@ -2303,10 +2299,10 @@ public void testJoinLeaderCatchUpFailsForIncrementalCooperative() throws Excepti public void testAccessors() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectMemberPoll(); @@ -2315,15 +2311,15 @@ public void testAccessors() throws Exception { ClusterConfigState snapshotWithTransform = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet(), configTransformer); expectConfigRefreshAndSnapshot(snapshotWithTransform); @@ -2340,15 +2336,15 @@ public void testAccessors() throws Exception { herder.tick(); assertTrue(listConnectorsCb.isDone()); - assertEquals(Set.of(CONN1), listConnectorsCb.get()); + assertEquals(Collections.singleton(CONN1), listConnectorsCb.get()); assertTrue(connectorInfoCb.isDone()); - ConnectorInfo info = new ConnectorInfo(CONN1, CONN1_CONFIG, List.of(TASK0, TASK1, TASK2), + ConnectorInfo info = new ConnectorInfo(CONN1, CONN1_CONFIG, Arrays.asList(TASK0, TASK1, TASK2), ConnectorType.SOURCE); assertEquals(info, connectorInfoCb.get()); assertTrue(connectorConfigCb.isDone()); assertEquals(CONN1_CONFIG, connectorConfigCb.get()); assertTrue(taskConfigsCb.isDone()); - assertEquals(List.of( + assertEquals(Arrays.asList( new TaskInfo(TASK0, TASK_CONFIG), new TaskInfo(TASK1, TASK_CONFIG), new TaskInfo(TASK2, TASK_CONFIG)), @@ -2364,8 +2360,8 @@ public void testPutConnectorConfig() throws Exception { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); when(member.memberId()).thenReturn("leader"); - expectRebalance(1, List.of(CONN1), List.of(), true); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectConfigRefreshAndSnapshot(SNAPSHOT); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); @@ -2417,7 +2413,7 @@ public void testPutConnectorConfig() throws Exception { herder.putConnectorConfig(CONN1, CONN1_CONFIG_UPDATED, true, putConfigCb); herder.tick(); assertTrue(putConfigCb.isDone()); - ConnectorInfo updatedInfo = new ConnectorInfo(CONN1, CONN1_CONFIG_UPDATED, List.of(TASK0, TASK1, TASK2), + ConnectorInfo updatedInfo = new ConnectorInfo(CONN1, CONN1_CONFIG_UPDATED, Arrays.asList(TASK0, TASK1, TASK2), ConnectorType.SOURCE); assertEquals(new Herder.Created<>(false, updatedInfo), putConfigCb.get()); @@ -2436,21 +2432,21 @@ public void testPutConnectorConfig() throws Exception { @Test public void testPatchConnectorConfigNotFound() { when(member.memberId()).thenReturn("leader"); - expectRebalance(0, List.of(), List.of(), true); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + expectRebalance(0, Collections.emptyList(), Collections.emptyList(), true); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); ClusterConfigState clusterConfigState = new ClusterConfigState( 0, null, - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptySet(), + Collections.emptySet()); expectConfigRefreshAndSnapshot(clusterConfigState); Map connConfigPatch = new HashMap<>(); @@ -2472,21 +2468,21 @@ public void testPatchConnectorConfigNotALeader() { ClusterConfigState originalSnapshot = new ClusterConfigState( 1, null, - Map.of(CONN1, 0), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Set.of(), - Set.of()); + Collections.singletonMap(CONN1, 0), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptySet(), + Collections.emptySet()); expectConfigRefreshAndSnapshot(originalSnapshot); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); // Patch the connector config. - expectRebalance(1, List.of(CONN1), List.of(), false); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), false); FutureCallback> patchCallback = new FutureCallback<>(); herder.patchConnectorConfig(CONN1, new HashMap<>(), patchCallback); @@ -2500,7 +2496,7 @@ public void testPatchConnectorConfigNotALeader() { public void testPatchConnectorConfig() throws Exception { when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); when(member.memberId()).thenReturn("leader"); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); Map originalConnConfig = new HashMap<>(CONN1_CONFIG); originalConnConfig.put("foo0", "unaffected"); @@ -2512,15 +2508,15 @@ public void testPatchConnectorConfig() throws Exception { ClusterConfigState originalSnapshot = new ClusterConfigState( 1, null, - Map.of(CONN1, 0), - Map.of(CONN1, originalConnConfig), - Map.of(CONN1, TargetState.STARTED), - Map.of(), - Map.of(), - Map.of(), - Map.of(), - Set.of(), - Set.of()); + Collections.singletonMap(CONN1, 0), + Collections.singletonMap(CONN1, originalConnConfig), + Collections.singletonMap(CONN1, TargetState.STARTED), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptySet(), + Collections.emptySet()); expectConfigRefreshAndSnapshot(originalSnapshot); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); @@ -2538,7 +2534,7 @@ public void testPatchConnectorConfig() throws Exception { patchedConnConfig.remove("foo2"); patchedConnConfig.put("foo3", "added"); - expectRebalance(1, List.of(CONN1), List.of(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); ArgumentCaptor> validateCallback = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -2563,11 +2559,11 @@ public void testPatchConnectorConfig() throws Exception { @Test public void testKeyRotationWhenWorkerBecomesLeader() { - long rotationTtlDelay = DistributedConfig.INTER_WORKER_KEY_TTL_MS_DEFAULT; + long rotationTtlDelay = DistributedConfig.INTER_WORKER_KEY_TTL_MS_MS_DEFAULT; when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); expectConfigRefreshAndSnapshot(SNAPSHOT); expectMemberPoll(); @@ -2577,20 +2573,20 @@ public void testKeyRotationWhenWorkerBecomesLeader() { // First rebalance: poll indefinitely as no key has been read yet, so expiration doesn't come into play verify(member).poll(eq(Long.MAX_VALUE), any()); - expectRebalance(2, List.of(), List.of()); + expectRebalance(2, Collections.emptyList(), Collections.emptyList()); SessionKey initialKey = new SessionKey(mock(SecretKey.class), 0); ClusterConfigState snapshotWithKey = new ClusterConfigState( 2, initialKey, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); expectConfigRefreshAndSnapshot(snapshotWithKey); configUpdateListener.onSessionKeyUpdate(initialKey); @@ -2599,8 +2595,8 @@ public void testKeyRotationWhenWorkerBecomesLeader() { // Second rebalance: poll indefinitely as worker is follower, so expiration still doesn't come into play verify(member, times(2)).poll(eq(Long.MAX_VALUE), any()); - expectRebalance(2, List.of(), List.of(), "member", MEMBER_URL, true); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + expectRebalance(2, Collections.emptyList(), Collections.emptyList(), "member", MEMBER_URL, true); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); ArgumentCaptor updatedKey = ArgumentCaptor.forClass(SessionKey.class); doAnswer(invocation -> { configUpdateListener.onSessionKeyUpdate(updatedKey.getValue()); @@ -2616,12 +2612,12 @@ public void testKeyRotationWhenWorkerBecomesLeader() { @Test public void testKeyRotationDisabledWhenWorkerBecomesFollower() { - long rotationTtlDelay = DistributedConfig.INTER_WORKER_KEY_TTL_MS_DEFAULT; + long rotationTtlDelay = DistributedConfig.INTER_WORKER_KEY_TTL_MS_MS_DEFAULT; when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); - expectRebalance(1, List.of(), List.of(), "member", MEMBER_URL, true); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), "member", MEMBER_URL, true); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); SecretKey initialSecretKey = mock(SecretKey.class); when(initialSecretKey.getAlgorithm()).thenReturn(DistributedConfig.INTER_WORKER_KEY_GENERATION_ALGORITHM_DEFAULT); when(initialSecretKey.getEncoded()).thenReturn(new byte[32]); @@ -2629,15 +2625,15 @@ public void testKeyRotationDisabledWhenWorkerBecomesFollower() { ClusterConfigState snapshotWithKey = new ClusterConfigState( 1, initialKey, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); expectConfigRefreshAndSnapshot(snapshotWithKey); expectMemberPoll(); @@ -2647,7 +2643,7 @@ public void testKeyRotationDisabledWhenWorkerBecomesFollower() { // First rebalance: poll for a limited time as worker is leader and must wake up for key expiration verify(member).poll(leq(rotationTtlDelay), any()); - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); herder.tick(); // Second rebalance: poll indefinitely as worker is no longer leader, so key expiration doesn't come into play @@ -2667,7 +2663,7 @@ public void testPutTaskConfigsSignatureNotRequiredV0() { verify(member).wakeup(); verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - List.of("awaiting startup"), + singletonList("awaiting startup"), stages ); } @@ -2684,7 +2680,7 @@ public void testPutTaskConfigsSignatureNotRequiredV1() { verify(member).wakeup(); verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - List.of("awaiting startup"), + singletonList("awaiting startup"), stages ); } @@ -2790,7 +2786,7 @@ public void testPutTaskConfigsValidRequiredSignature() { verifyNoMoreInteractions(member, taskConfigCb); assertEquals( - List.of("awaiting startup"), + singletonList("awaiting startup"), stages ); } @@ -2801,7 +2797,7 @@ public void testFailedToWriteSessionKey() { // session key to the config topic, and fail when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); doThrow(new ConnectException("Oh no!")).when(configBackingStore).putSessionKey(any(SessionKey.class)); @@ -2831,15 +2827,15 @@ public void testFailedToReadBackNewlyWrittenSessionKey() throws Exception { ClusterConfigState snapshotWithSessionKey = new ClusterConfigState( 1, sessionKey, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), - Map.of(), - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); // First tick -- after joining the group, we try to write a new session key to // the config topic, and fail (in this case, we're trying to simulate that we've @@ -2848,7 +2844,7 @@ public void testFailedToReadBackNewlyWrittenSessionKey() throws Exception { // to write the key) when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); doThrow(new ConnectException("Oh no!")).when(configBackingStore).putSessionKey(any(SessionKey.class)); @@ -2921,7 +2917,7 @@ private void testTaskRequestedZombieFencingForwardingToLeader(boolean succeed) t when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); expectConfigRefreshAndSnapshot(SNAPSHOT); - expectRebalance(1, List.of(), List.of()); + expectRebalance(1, Collections.emptyList(), Collections.emptyList()); expectMemberPoll(); doAnswer(invocation -> { @@ -2968,9 +2964,9 @@ public void testExternalZombieFencingRequestForAlreadyFencedConnector() throws E ClusterConfigState configState = exactlyOnceSnapshot( expectNewSessionKey(), TASK_CONFIGS_MAP, - Map.of(CONN1, 12), - Map.of(CONN1, 5), - Set.of() + Collections.singletonMap(CONN1, 12), + Collections.singletonMap(CONN1, 5), + Collections.emptySet() ); testExternalZombieFencingRequestThatRequiresNoPhysicalFencing(configState, false); } @@ -2980,10 +2976,10 @@ public void testExternalZombieFencingRequestForSingleTaskConnector() throws Exce when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); ClusterConfigState configState = exactlyOnceSnapshot( expectNewSessionKey(), - Map.of(TASK1, TASK_CONFIG), - Map.of(CONN1, 1), - Map.of(CONN1, 5), - Set.of(CONN1) + Collections.singletonMap(TASK1, TASK_CONFIG), + Collections.singletonMap(CONN1, 1), + Collections.singletonMap(CONN1, 5), + Collections.singleton(CONN1) ); testExternalZombieFencingRequestThatRequiresNoPhysicalFencing(configState, true); } @@ -2994,9 +2990,9 @@ public void testExternalZombieFencingRequestForFreshConnector() throws Exception ClusterConfigState configState = exactlyOnceSnapshot( expectNewSessionKey(), TASK_CONFIGS_MAP, - Map.of(), - Map.of(CONN1, 5), - Set.of(CONN1) + Collections.emptyMap(), + Collections.singletonMap(CONN1, 5), + Collections.singleton(CONN1) ); testExternalZombieFencingRequestThatRequiresNoPhysicalFencing(configState, true); } @@ -3010,9 +3006,9 @@ private void testExternalZombieFencingRequestThatRequiresNoPhysicalFencing( when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); expectConfigRefreshAndSnapshot(configState); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); if (expectTaskCountRecord) { @@ -3045,19 +3041,19 @@ public void testExternalZombieFencingRequestImmediateCompletion() throws Excepti when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); SessionKey sessionKey = expectNewSessionKey(); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, - Map.of(CONN1, 2), - Map.of(CONN1, 5), - Set.of(CONN1) + Collections.singletonMap(CONN1, 2), + Collections.singletonMap(CONN1, 5), + Collections.singleton(CONN1) ); expectConfigRefreshAndSnapshot(configState); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); // The future returned by Worker::fenceZombies @@ -3105,19 +3101,19 @@ public void testExternalZombieFencingRequestSynchronousFailure() throws Exceptio when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); SessionKey sessionKey = expectNewSessionKey(); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, - Map.of(CONN1, 2), - Map.of(CONN1, 5), - Set.of(CONN1) + Collections.singletonMap(CONN1, 2), + Collections.singletonMap(CONN1, 5), + Collections.singleton(CONN1) ); expectConfigRefreshAndSnapshot(configState); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); Exception fencingException = new KafkaException("whoops!"); @@ -3149,19 +3145,19 @@ public void testExternalZombieFencingRequestAsynchronousFailure() throws Excepti when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); SessionKey sessionKey = expectNewSessionKey(); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, - Map.of(CONN1, 2), - Map.of(CONN1, 5), - Set.of(CONN1) + Collections.singletonMap(CONN1, 2), + Collections.singletonMap(CONN1, 5), + Collections.singleton(CONN1) ); expectConfigRefreshAndSnapshot(configState); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); // The future returned by Worker::fenceZombies @@ -3219,7 +3215,7 @@ public void testExternalZombieFencingRequestDelayedCompletion() throws Exception when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V2); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); SessionKey sessionKey = expectNewSessionKey(); Map taskCountRecords = new HashMap<>(); @@ -3230,7 +3226,7 @@ public void testExternalZombieFencingRequestDelayedCompletion() throws Exception taskConfigGenerations.put(CONN1, 3); taskConfigGenerations.put(CONN2, 4); taskConfigGenerations.put(conn3, 2); - Set pendingFencing = Set.of(CONN1, CONN2, conn3); + Set pendingFencing = new HashSet<>(Arrays.asList(CONN1, CONN2, conn3)); ClusterConfigState configState = exactlyOnceSnapshot( sessionKey, TASK_CONFIGS_MAP, @@ -3241,7 +3237,7 @@ public void testExternalZombieFencingRequestDelayedCompletion() throws Exception ); expectConfigRefreshAndSnapshot(configState); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); // The callbacks that the herder has accrued for outstanding fencing futures, which will be completed after @@ -3287,7 +3283,7 @@ public void testExternalZombieFencingRequestDelayedCompletion() throws Exception tasksPerConnector.forEach((connector, numStackedRequests) -> { List> connectorFencingRequests = IntStream.range(0, numStackedRequests) .mapToObj(i -> new FutureCallback()) - .toList(); + .collect(Collectors.toList()); connectorFencingRequests.forEach(fencing -> herder.fenceZombieSourceTasks(connector, fencing) @@ -3323,22 +3319,22 @@ public void testVerifyTaskGeneration() { herder.configState = new ClusterConfigState( 1, null, - Map.of(CONN1, 3), - Map.of(CONN1, CONN1_CONFIG), - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, 3), + Collections.singletonMap(CONN1, CONN1_CONFIG), + Collections.singletonMap(CONN1, TargetState.STARTED), TASK_CONFIGS_MAP, - Map.of(), + Collections.emptyMap(), taskConfigGenerations, - Map.of(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), - Set.of(), - Set.of()); + Collections.singletonMap(CONN1, new AppliedConnectorConfig(CONN1_CONFIG)), + Collections.emptySet(), + Collections.emptySet()); Callback verifyCallback = mock(Callback.class); herder.assignment = new ExtendedAssignment( (short) 2, (short) 0, "leader", "leaderUrl", 0, - Set.of(), Set.of(TASK1), - Set.of(), Set.of(), 0); + Collections.emptySet(), Collections.singleton(TASK1), + Collections.emptySet(), Collections.emptySet(), 0); assertThrows(ConnectException.class, () -> herder.verifyTaskGenerationAndOwnership(TASK1, 0, verifyCallback)); assertThrows(ConnectException.class, () -> herder.verifyTaskGenerationAndOwnership(TASK1, 1, verifyCallback)); @@ -3431,7 +3427,7 @@ public void testPollDurationOnSlowConnectorOperations() { when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); // Assign the connector to this worker, and have it start - expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of(), rebalanceDelayMs); + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList(), rebalanceDelayMs); expectConfigRefreshAndSnapshot(SNAPSHOT); ArgumentCaptor> onFirstStart = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -3444,7 +3440,7 @@ public void testPollDurationOnSlowConnectorOperations() { herder.tick(); // Rebalance again due to config update - expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of(), rebalanceDelayMs); + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList(), rebalanceDelayMs); when(configBackingStore.snapshot()).thenReturn(SNAPSHOT_UPDATED_CONN1_CONFIG); doNothing().when(worker).stopAndAwaitConnector(CONN1); @@ -3460,7 +3456,7 @@ public void testPollDurationOnSlowConnectorOperations() { herder.tick(); // Third tick should resolve all outstanding requests - expectRebalance(List.of(), List.of(), ConnectProtocol.Assignment.NO_ERROR, 1, List.of(CONN1), List.of(), rebalanceDelayMs); + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, 1, singletonList(CONN1), Collections.emptyList(), rebalanceDelayMs); // which includes querying the connector task configs after the update expectExecuteTaskReconfiguration(true, conn1SinkConfigUpdated, invocation -> { time.sleep(operationDelayMs); @@ -3479,7 +3475,7 @@ public void testPollDurationOnSlowConnectorOperations() { public void shouldThrowWhenStartAndStopExecutorThrowsRejectedExecutionExceptionAndHerderNotStopping() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(CONN1), List.of(), true); + expectRebalance(1, singletonList(CONN1), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); herder.startAndStopExecutor.shutdown(); @@ -3491,7 +3487,7 @@ public void testTaskReconfigurationRetriesWithConnectorTaskConfigsException() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); when(worker.isRunning(CONN1)).thenReturn(true); @@ -3513,7 +3509,7 @@ public void testTaskReconfigurationNoRetryWithTooManyTasks() { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); when(worker.isRunning(CONN1)).thenReturn(true); @@ -3553,14 +3549,14 @@ public void testTaskReconfigurationNoRetryWithTooManyTasks() { public void testTaskReconfigurationRetriesWithLeaderRequestForwardingException() { herder = mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(HERDER_CONFIG), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, - noneConnectorClientConfigOverridePolicy, List.of(), new MockSynchronousExecutor(), new AutoCloseable[]{})); - verify(worker, times(2)).getPlugins(); + noneConnectorClientConfigOverridePolicy, Collections.emptyList(), new MockSynchronousExecutor(), new AutoCloseable[]{})); + rebalanceListener = herder.new RebalanceListener(time); when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(worker.isSinkConnector(CONN1)).thenReturn(Boolean.TRUE); - expectRebalance(1, List.of(), List.of(), false); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), false); expectConfigRefreshAndSnapshot(SNAPSHOT); when(worker.isRunning(CONN1)).thenReturn(true); @@ -3676,7 +3672,7 @@ public void testExactlyOnceSourceSupportValidation() { connectorMock, SourceConnectorConfig.configDef(), config); List errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages(); - assertEquals(List.of(), errors); + assertEquals(Collections.emptyList(), errors); } @Test @@ -3693,7 +3689,7 @@ public void testExactlyOnceSourceSupportValidationOnUnsupportedConnector() { List errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages(); assertEquals( - List.of("The connector does not support exactly-once semantics with the provided configuration."), + Collections.singletonList("The connector does not support exactly-once semantics with the provided configuration."), errors); } @@ -3751,7 +3747,7 @@ public void testExactlyOnceSourceSupportValidationWhenExactlyOnceNotEnabledOnWor List errors = validatedConfigs.get(SourceConnectorConfig.EXACTLY_ONCE_SUPPORT_CONFIG).errorMessages(); assertEquals( - List.of("This worker does not have exactly-once source support enabled."), + Collections.singletonList("This worker does not have exactly-once source support enabled."), errors); } @@ -3788,7 +3784,7 @@ public void testConnectorTransactionBoundaryValidation() { connectorMock, SourceConnectorConfig.configDef(), config); List errors = validatedConfigs.get(SourceConnectorConfig.TRANSACTION_BOUNDARY_CONFIG).errorMessages(); - assertEquals(List.of(), errors); + assertEquals(Collections.emptyList(), errors); } @Test @@ -3856,18 +3852,18 @@ public void testConnectorTransactionBoundaryValidationHandlesInvalidValuesGracef public void testConnectorOffsets() throws Exception { when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); - when(statusBackingStore.connectors()).thenReturn(Set.of()); + when(statusBackingStore.connectors()).thenReturn(Collections.emptySet()); expectMemberPoll(); herder.tick(); when(configBackingStore.snapshot()).thenReturn(SNAPSHOT); - ConnectorOffsets offsets = new ConnectorOffsets(List.of(new ConnectorOffset( - Map.of("partitionKey", "partitionValue"), - Map.of("offsetKey", "offsetValue")))); + ConnectorOffsets offsets = new ConnectorOffsets(Collections.singletonList(new ConnectorOffset( + Collections.singletonMap("partitionKey", "partitionValue"), + Collections.singletonMap("offsetKey", "offsetValue")))); ArgumentCaptor> callbackCapture = ArgumentCaptor.forClass(Callback.class); doAnswer(invocation -> { @@ -3888,7 +3884,7 @@ public void testModifyConnectorOffsetsUnknownConnector() { // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); herder.tick(); @@ -3906,7 +3902,7 @@ public void testModifyOffsetsConnectorNotInStoppedState() { // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT); herder.tick(); @@ -3924,7 +3920,7 @@ public void testModifyOffsetsNotLeader() { // Get the initial assignment when(member.memberId()).thenReturn("member"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), false); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), false); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); herder.tick(); @@ -3943,15 +3939,15 @@ public void testModifyOffsetsSinkConnector() throws Exception { // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); herder.tick(); // Now handle the alter connector offsets request - Map, Map> offsets = Map.of( - Map.of("partitionKey", "partitionValue"), - Map.of("offsetKey", "offsetValue")); + Map, Map> offsets = Collections.singletonMap( + Collections.singletonMap("partitionKey", "partitionValue"), + Collections.singletonMap("offsetKey", "offsetValue")); ArgumentCaptor> workerCallbackCapture = ArgumentCaptor.forClass(Callback.class); Message msg = new Message("The offsets for this connector have been altered successfully"); @@ -3973,7 +3969,7 @@ public void testModifyOffsetsSourceConnectorExactlyOnceDisabled() throws Excepti when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); when(herder.connectorType(anyMap())).thenReturn(ConnectorType.SOURCE); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); herder.tick(); @@ -3996,12 +3992,11 @@ public void testModifyOffsetsSourceConnectorExactlyOnceDisabled() throws Excepti public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exception { // Setup herder with exactly-once support for source connectors enabled herder = exactlyOnceHerder(); - verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); expectMemberPoll(); @@ -4026,9 +4021,9 @@ public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exceptio ArgumentCaptor> workerCallbackCapture = ArgumentCaptor.forClass(Callback.class); Message msg = new Message("The offsets for this connector have been altered successfully"); - Map, Map> offsets = Map.of( - Map.of("partitionKey", "partitionValue"), - Map.of("offsetKey", "offsetValue")); + Map, Map> offsets = Collections.singletonMap( + Collections.singletonMap("partitionKey", "partitionValue"), + Collections.singletonMap("offsetKey", "offsetValue")); doAnswer(invocation -> { workerCallbackCapture.getValue().onCompletion(null, msg); return null; @@ -4062,13 +4057,12 @@ public void testModifyOffsetsSourceConnectorExactlyOnceEnabled() throws Exceptio public void testModifyOffsetsSourceConnectorExactlyOnceEnabledZombieFencingFailure() { // Setup herder with exactly-once support for source connectors enabled herder = exactlyOnceHerder(); - verify(worker, times(2)).getPlugins(); rebalanceListener = herder.new RebalanceListener(time); // Get the initial assignment when(member.memberId()).thenReturn("leader"); when(member.currentProtocolVersion()).thenReturn(CONNECT_PROTOCOL_V0); - expectRebalance(1, List.of(), List.of(), true); + expectRebalance(1, Collections.emptyList(), Collections.emptyList(), true); expectConfigRefreshAndSnapshot(SNAPSHOT_STOPPED_CONN1); expectMemberPoll(); @@ -4133,14 +4127,14 @@ private void expectRebalance(final long offset, final List assignedTasks, final boolean isLeader) { - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, offset, "leader", "leaderUrl", assignedConnectors, assignedTasks, 0, isLeader); } private void expectRebalance(final long offset, final List assignedConnectors, final List assignedTasks, String leader, String leaderUrl, boolean isLeader) { - expectRebalance(List.of(), List.of(), + expectRebalance(Collections.emptyList(), Collections.emptyList(), ConnectProtocol.Assignment.NO_ERROR, offset, leader, leaderUrl, assignedConnectors, assignedTasks, 0, isLeader); } @@ -4186,12 +4180,12 @@ private void expectRebalance(final Collection revokedConnectors, if (connectProtocolVersion == CONNECT_PROTOCOL_V0) { assignment = new ExtendedAssignment( connectProtocolVersion, error, leader, leaderUrl, offset, - new ArrayList<>(assignedConnectors), new ArrayList<>(assignedTasks), - new ArrayList<>(), new ArrayList<>(), 0); + assignedConnectors, assignedTasks, + Collections.emptyList(), Collections.emptyList(), 0); } else { assignment = new ExtendedAssignment( connectProtocolVersion, error, leader, leaderUrl, offset, - new ArrayList<>(assignedConnectors), new ArrayList<>(assignedTasks), + assignedConnectors, assignedTasks, new ArrayList<>(revokedConnectors), new ArrayList<>(revokedTasks), delay); } rebalanceListener.onAssigned(assignment, 3); @@ -4268,13 +4262,13 @@ private ClusterConfigState exactlyOnceSnapshot( sessionKey, taskCounts, connectorConfigs, - Map.of(CONN1, TargetState.STARTED), + Collections.singletonMap(CONN1, TargetState.STARTED), taskConfigs, taskCountRecords, taskConfigGenerations, appliedConnectorConfigs, pendingFencing, - Set.of()); + Collections.emptySet()); } private void expectExecuteTaskReconfiguration(boolean running, ConnectorConfig connectorConfig, Answer>> answer) { @@ -4423,7 +4417,7 @@ private DistributedHerder exactlyOnceHerder() { config.put(EXACTLY_ONCE_SOURCE_SUPPORT_CONFIG, "enabled"); return mock(DistributedHerder.class, withSettings().defaultAnswer(CALLS_REAL_METHODS).useConstructor(new DistributedConfig(config), worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, configBackingStore, member, MEMBER_URL, restClient, metrics, time, - noneConnectorClientConfigOverridePolicy, List.of(), null, new AutoCloseable[0])); + noneConnectorClientConfigOverridePolicy, Collections.emptyList(), null, new AutoCloseable[0])); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java index 84bb8b145ad0a..86bc897fafe23 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/IncrementalCooperativeAssignorTest.java @@ -37,7 +37,9 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -660,8 +662,8 @@ public void testAssignConnectorsWhenBalanced() { List expectedAssignment = existingAssignment.stream() .map(wl -> new WorkerLoad.Builder(wl.worker()).withCopies(wl.connectors(), wl.tasks()).build()) .collect(Collectors.toList()); - expectedAssignment.get(0).connectors().addAll(List.of("connector6", "connector9")); - expectedAssignment.get(1).connectors().addAll(List.of("connector7", "connector10")); + expectedAssignment.get(0).connectors().addAll(Arrays.asList("connector6", "connector9")); + expectedAssignment.get(1).connectors().addAll(Arrays.asList("connector7", "connector10")); expectedAssignment.get(2).connectors().add("connector8"); List newConnectors = newConnectors(6, 11); @@ -680,12 +682,12 @@ public void testAssignTasksWhenBalanced() { .map(wl -> new WorkerLoad.Builder(wl.worker()).withCopies(wl.connectors(), wl.tasks()).build()) .collect(Collectors.toList()); - expectedAssignment.get(0).connectors().addAll(List.of("connector6", "connector9")); - expectedAssignment.get(1).connectors().addAll(List.of("connector7", "connector10")); + expectedAssignment.get(0).connectors().addAll(Arrays.asList("connector6", "connector9")); + expectedAssignment.get(1).connectors().addAll(Arrays.asList("connector7", "connector10")); expectedAssignment.get(2).connectors().add("connector8"); - expectedAssignment.get(0).tasks().addAll(List.of(new ConnectorTaskId("task", 6), new ConnectorTaskId("task", 9))); - expectedAssignment.get(1).tasks().addAll(List.of(new ConnectorTaskId("task", 7), new ConnectorTaskId("task", 10))); + expectedAssignment.get(0).tasks().addAll(Arrays.asList(new ConnectorTaskId("task", 6), new ConnectorTaskId("task", 9))); + expectedAssignment.get(1).tasks().addAll(Arrays.asList(new ConnectorTaskId("task", 7), new ConnectorTaskId("task", 10))); expectedAssignment.get(2).tasks().add(new ConnectorTaskId("task", 8)); List newConnectors = newConnectors(6, 11); @@ -732,7 +734,7 @@ public void testLostAssignmentHandlingWhenWorkerBounces() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -749,7 +751,7 @@ public void testLostAssignmentHandlingWhenWorkerBounces() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -764,7 +766,7 @@ public void testLostAssignmentHandlingWhenWorkerBounces() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(flakyWorker), + assertEquals(Collections.singleton(flakyWorker), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -785,7 +787,7 @@ public void testLostAssignmentHandlingWhenWorkerBounces() { .tasks() .containsAll(lostAssignments.tasks()), "Wrong assignment of lost tasks"); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -812,7 +814,7 @@ public void testLostAssignmentHandlingWhenWorkerLeavesPermanently() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -829,7 +831,7 @@ public void testLostAssignmentHandlingWhenWorkerLeavesPermanently() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -843,7 +845,7 @@ public void testLostAssignmentHandlingWhenWorkerLeavesPermanently() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -859,7 +861,7 @@ public void testLostAssignmentHandlingWhenWorkerLeavesPermanently() { "Wrong assignment of lost connectors"); assertTrue(lostAssignmentsToReassign.build().tasks().containsAll(lostAssignments.tasks()), "Wrong assignment of lost tasks"); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -886,7 +888,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -906,7 +908,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(newWorker), + assertEquals(Collections.singleton(newWorker), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -921,7 +923,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - Set expectedWorkers = Set.of(newWorker, flakyWorker); + Set expectedWorkers = new HashSet<>(Arrays.asList(newWorker, flakyWorker)); assertEquals(expectedWorkers, assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); @@ -954,7 +956,7 @@ public void testLostAssignmentHandlingWithMoreThanOneCandidates() { "Wrong assignment of lost connectors"); assertTrue(listOfTasksInLast2Workers.containsAll(lostAssignments.tasks()), "Wrong assignment of lost tasks"); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -981,7 +983,7 @@ public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -998,7 +1000,7 @@ public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -1013,7 +1015,7 @@ public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() { assignor.handleLostAssignments(lostAssignments, new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(veryFlakyWorker), + assertEquals(Collections.singleton(veryFlakyWorker), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(time.milliseconds() + rebalanceDelay, assignor.scheduledRebalance); @@ -1032,7 +1034,7 @@ public void testLostAssignmentHandlingWhenWorkerBouncesBackButFinallyLeaves() { "Wrong assignment of lost connectors"); assertTrue(lostAssignmentsToReassign.build().tasks().containsAll(lostAssignments.tasks()), "Wrong assignment of lost tasks"); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -1060,7 +1062,7 @@ public void testLostAssignmentHandlingWhenScheduledDelayIsDisabled() { new ConnectorsAndTasks.Builder(), new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -1078,7 +1080,7 @@ public void testLostAssignmentHandlingWhenScheduledDelayIsDisabled() { assignor.handleLostAssignments(lostAssignments, lostAssignmentsToReassign, new ArrayList<>(configuredAssignment.values())); - assertEquals(Set.of(), + assertEquals(Collections.emptySet(), assignor.candidateWorkersForReassignment, "Wrong set of workers for reassignments"); assertEquals(0, assignor.scheduledRebalance); @@ -1219,10 +1221,10 @@ public void testProtocolV1() { leader, "followMe:618", CONFIG_OFFSET, - Set.of(), - Set.of(), - Set.of(), - Set.of(), + Collections.emptySet(), + Collections.emptySet(), + Collections.emptySet(), + Collections.emptySet(), 0 ); ExtendedWorkerState leaderState = new ExtendedWorkerState("followMe:618", CONFIG_OFFSET, leaderAssignment); @@ -1234,7 +1236,7 @@ public void testProtocolV1() { when(coordinator.configSnapshot()).thenReturn(configState()); Map serializedAssignments = assignor.performAssignment( leader, - ConnectProtocolCompatibility.COMPATIBLE, + ConnectProtocolCompatibility.COMPATIBLE.protocol(), memberMetadata, coordinator ); @@ -1260,10 +1262,10 @@ public void testProtocolV2() { leader, "followMe:618", CONFIG_OFFSET, - Set.of(), - Set.of(), - Set.of(), - Set.of(), + Collections.emptySet(), + Collections.emptySet(), + Collections.emptySet(), + Collections.emptySet(), 0 ); ExtendedWorkerState leaderState = new ExtendedWorkerState("followMe:618", CONFIG_OFFSET, leaderAssignment); @@ -1275,7 +1277,7 @@ public void testProtocolV2() { when(coordinator.configSnapshot()).thenReturn(configState()); Map serializedAssignments = assignor.performAssignment( leader, - ConnectProtocolCompatibility.SESSIONED, + ConnectProtocolCompatibility.SESSIONED.protocol(), memberMetadata, coordinator ); @@ -1326,7 +1328,7 @@ private void performRebalance(boolean assignmentFailure, boolean generationMisma private void addNewEmptyWorkers(String... workers) { for (String worker : workers) { - addNewWorker(worker, List.of(), List.of()); + addNewWorker(worker, Collections.emptyList(), Collections.emptyList()); } } @@ -1390,13 +1392,13 @@ private void removeConnector(String connector) { private ClusterConfigState configState() { Map taskCounts = new HashMap<>(connectors); - Map> connectorConfigs = transformValues(taskCounts, c -> Map.of()); + Map> connectorConfigs = transformValues(taskCounts, c -> Collections.emptyMap()); Map targetStates = transformValues(taskCounts, c -> TargetState.STARTED); Map> taskConfigs = taskCounts.entrySet().stream() .flatMap(e -> IntStream.range(0, e.getValue()).mapToObj(i -> new ConnectorTaskId(e.getKey(), i))) .collect(Collectors.toMap( Function.identity(), - connectorTaskId -> Map.of() + connectorTaskId -> Collections.emptyMap() )); Map appliedConnectorConfigs = connectorConfigs.entrySet().stream() .collect(Collectors.toMap( @@ -1410,11 +1412,11 @@ private ClusterConfigState configState() { connectorConfigs, targetStates, taskConfigs, - Map.of(), - Map.of(), + Collections.emptyMap(), + Collections.emptyMap(), appliedConnectorConfigs, - Set.of(), - Set.of()); + Collections.emptySet(), + Collections.emptySet()); } private void applyAssignments() { @@ -1438,22 +1440,22 @@ private void applyAssignments() { } private void assertEmptyAssignment() { - assertEquals(List.of(), + assertEquals(Collections.emptyList(), ConnectUtils.combineCollections(returnedAssignments.newlyAssignedConnectors().values()), "No connectors should have been newly assigned during this round"); - assertEquals(List.of(), + assertEquals(Collections.emptyList(), ConnectUtils.combineCollections(returnedAssignments.newlyAssignedTasks().values()), "No tasks should have been newly assigned during this round"); - assertEquals(List.of(), + assertEquals(Collections.emptyList(), ConnectUtils.combineCollections(returnedAssignments.newlyRevokedConnectors().values()), "No connectors should have been revoked during this round"); - assertEquals(List.of(), + assertEquals(Collections.emptyList(), ConnectUtils.combineCollections(returnedAssignments.newlyRevokedTasks().values()), "No tasks should have been revoked during this round"); } private void assertWorkers(String... workers) { - assertEquals(Set.of(workers), returnedAssignments.allWorkers(), "Wrong set of workers"); + assertEquals(new HashSet<>(Arrays.asList(workers)), returnedAssignments.allWorkers(), "Wrong set of workers"); } /** @@ -1498,14 +1500,14 @@ private List allocations(Function assertEquals( - Set.of(), + Collections.emptySet(), new HashSet<>(revocations), "Expected no revocations to take place during this round, but connector revocations were issued for worker " + worker ) ); returnedAssignments.newlyRevokedTasks().forEach((worker, revocations) -> assertEquals( - Set.of(), + Collections.emptySet(), new HashSet<>(revocations), "Expected no revocations to take place during this round, but task revocations were issued for worker " + worker ) @@ -1540,11 +1542,11 @@ private void assertNoRedundantAssignments() { ); existingConnectors.retainAll(newConnectors); - assertEquals(List.of(), + assertEquals(Collections.emptyList(), existingConnectors, "Found connectors in new assignment that already exist in current assignment"); existingTasks.retainAll(newTasks); - assertEquals(List.of(), + assertEquals(Collections.emptyList(), existingConnectors, "Found tasks in new assignment that already exist in current assignment"); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java index f867183a324f7..a8e7cd465529f 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorIncrementalTest.java @@ -42,7 +42,9 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -124,7 +126,7 @@ public void init(ConnectProtocolCompatibility compatibility) { this.time = new MockTime(); this.metadata = new Metadata(0, 0, Long.MAX_VALUE, loggerFactory, new ClusterResourceListeners()); this.client = new MockClient(time, metadata); - this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Map.of("topic", 1))); + this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1))); this.node = metadata.fetch().nodes().get(0); this.consumerClient = new ConsumerNetworkClient(loggerFactory, client, metadata, time, retryBackoffMs, requestTimeoutMs, heartbeatIntervalMs); @@ -147,7 +149,6 @@ public void init(ConnectProtocolCompatibility compatibility) { heartbeatIntervalMs, groupId, Optional.empty(), - null, retryBackoffMs, retryBackoffMaxMs, true); @@ -208,8 +209,8 @@ public void testMetadataWithExistingAssignment(ConnectProtocolCompatibility comp ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ExtendedAssignment.NO_ERROR, leaderId, leaderUrl, configState1.offset(), - List.of(connectorId1), List.of(taskId1x0, taskId2x0), - List.of(), List.of(), 0); + Collections.singletonList(connectorId1), Arrays.asList(taskId1x0, taskId2x0), + Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer buf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); // Using onJoinComplete to register the protocol selection decided by the broker // coordinator as well as an existing previous assignment that the call to metadata will @@ -227,8 +228,8 @@ public void testMetadataWithExistingAssignment(ConnectProtocolCompatibility comp .deserializeMetadata(ByteBuffer.wrap(selectedMetadata.metadata())); assertEquals(offset, state.offset()); assertNotEquals(ExtendedAssignment.empty(), state.assignment()); - assertEquals(List.of(connectorId1), state.assignment().connectors()); - assertEquals(List.of(taskId1x0, taskId2x0), state.assignment().tasks()); + assertEquals(Collections.singletonList(connectorId1), state.assignment().connectors()); + assertEquals(Arrays.asList(taskId1x0, taskId2x0), state.assignment().tasks()); verify(configStorage, times(1)).snapshot(); } @@ -241,8 +242,8 @@ public void testMetadataWithExistingAssignmentButOlderProtocolSelection(ConnectP ExtendedAssignment assignment = new ExtendedAssignment( CONNECT_PROTOCOL_V1, ExtendedAssignment.NO_ERROR, leaderId, leaderUrl, configState1.offset(), - List.of(connectorId1), List.of(taskId1x0, taskId2x0), - List.of(), List.of(), 0); + Collections.singletonList(connectorId1), Arrays.asList(taskId1x0, taskId2x0), + Collections.emptyList(), Collections.emptyList(), 0); ByteBuffer buf = IncrementalCooperativeConnectProtocol.serializeAssignment(assignment, false); // Using onJoinComplete to register the protocol selection decided by the broker // coordinator as well as an existing previous assignment that the call to metadata will @@ -281,14 +282,14 @@ public void testTaskAssignmentWhenWorkerJoins(ConnectProtocolCompatibility compa ExtendedAssignment leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(connectorId1), 4, - List.of(), 0, + Collections.singletonList(connectorId1), 4, + Collections.emptyList(), 0, leaderAssignment); ExtendedAssignment memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(connectorId2), 4, - List.of(), 0, + Collections.singletonList(connectorId2), 4, + Collections.emptyList(), 0, memberAssignment); coordinator.metadata(); @@ -304,20 +305,20 @@ public void testTaskAssignmentWhenWorkerJoins(ConnectProtocolCompatibility compa //Equally distributing tasks across member leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 1, + Collections.emptyList(), 0, + Collections.emptyList(), 1, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 1, + Collections.emptyList(), 0, + Collections.emptyList(), 1, memberAssignment); ExtendedAssignment anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, anotherMemberAssignment); verify(configStorage, times(configStorageCalls)).snapshot(); @@ -342,20 +343,20 @@ public void testTaskAssignmentWhenWorkerLeavesPermanently(ConnectProtocolCompati ExtendedAssignment leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(connectorId1), 3, - List.of(), 0, + Collections.singletonList(connectorId1), 3, + Collections.emptyList(), 0, leaderAssignment); ExtendedAssignment memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(connectorId2), 3, - List.of(), 0, + Collections.singletonList(connectorId2), 3, + Collections.emptyList(), 0, memberAssignment); ExtendedAssignment anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - List.of(), 2, - List.of(), 0, + Collections.emptyList(), 2, + Collections.emptyList(), 0, anotherMemberAssignment); // Second rebalance detects a worker is missing @@ -371,15 +372,15 @@ public void testTaskAssignmentWhenWorkerLeavesPermanently(ConnectProtocolCompati leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, memberAssignment); @@ -391,15 +392,15 @@ public void testTaskAssignmentWhenWorkerLeavesPermanently(ConnectProtocolCompati leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, memberAssignment); @@ -410,14 +411,14 @@ public void testTaskAssignmentWhenWorkerLeavesPermanently(ConnectProtocolCompati leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(), 1, - List.of(), 0, + Collections.emptyList(), 1, + Collections.emptyList(), 0, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(), 1, - List.of(), 0, + Collections.emptyList(), 1, + Collections.emptyList(), 0, memberAssignment); verify(configStorage, times(configStorageCalls)).snapshot(); @@ -442,20 +443,20 @@ public void testTaskAssignmentWhenWorkerBounces(ConnectProtocolCompatibility com ExtendedAssignment leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(connectorId1), 3, - List.of(), 0, + Collections.singletonList(connectorId1), 3, + Collections.emptyList(), 0, leaderAssignment); ExtendedAssignment memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(connectorId2), 3, - List.of(), 0, + Collections.singletonList(connectorId2), 3, + Collections.emptyList(), 0, memberAssignment); ExtendedAssignment anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - List.of(), 2, - List.of(), 0, + Collections.emptyList(), 2, + Collections.emptyList(), 0, anotherMemberAssignment); // Second rebalance detects a worker is missing @@ -470,15 +471,15 @@ public void testTaskAssignmentWhenWorkerBounces(ConnectProtocolCompatibility com leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, memberAssignment); @@ -492,22 +493,22 @@ public void testTaskAssignmentWhenWorkerBounces(ConnectProtocolCompatibility com leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, memberAssignment); anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, rebalanceDelay, anotherMemberAssignment); @@ -518,20 +519,20 @@ public void testTaskAssignmentWhenWorkerBounces(ConnectProtocolCompatibility com // A rebalance after the delay expires re-assigns the lost tasks to the returning member leaderAssignment = deserializeAssignment(result, leaderId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, leaderAssignment); memberAssignment = deserializeAssignment(result, memberId); assertAssignment(leaderId, offset, - List.of(), 0, - List.of(), 0, + Collections.emptyList(), 0, + Collections.emptyList(), 0, memberAssignment); anotherMemberAssignment = deserializeAssignment(result, anotherMemberId); assertAssignment(leaderId, offset, - List.of(), 2, - List.of(), 0, + Collections.emptyList(), 2, + Collections.emptyList(), 0, anotherMemberAssignment); verify(configStorage, times(configStorageCalls)).snapshot(); @@ -541,8 +542,8 @@ private static class MockRebalanceListener implements WorkerRebalanceListener { public ExtendedAssignment assignment = null; public String revokedLeader; - public Collection revokedConnectors = List.of(); - public Collection revokedTasks = List.of(); + public Collection revokedConnectors = Collections.emptyList(); + public Collection revokedTasks = Collections.emptyList(); public int revokedCount = 0; public int assignedCount = 0; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorTest.java index c4bed410abb4a..4122578266aaf 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerCoordinatorTest.java @@ -54,13 +54,14 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.stream.Collectors; import java.util.stream.Stream; @@ -127,7 +128,7 @@ public void setup(ConnectProtocolCompatibility compatibility) { this.time = new MockTime(); this.metadata = new Metadata(0, 0, Long.MAX_VALUE, logContext, new ClusterResourceListeners()); this.client = new MockClient(time, metadata); - this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Map.of("topic", 1))); + this.client.updateMetadata(RequestTestUtils.metadataUpdateWith(1, Collections.singletonMap("topic", 1))); this.node = metadata.fetch().nodes().get(0); this.consumerClient = new ConsumerNetworkClient(logContext, client, metadata, time, 100, 1000, heartbeatIntervalMs); this.metrics = new Metrics(time); @@ -138,7 +139,6 @@ public void setup(ConnectProtocolCompatibility compatibility) { heartbeatIntervalMs, groupId, Optional.empty(), - null, retryBackoffMs, retryBackoffMaxMs, true); @@ -157,15 +157,15 @@ public void setup(ConnectProtocolCompatibility compatibility) { configState1 = new ClusterConfigState( 4L, null, - Map.of(connectorId1, 1), - Map.of(connectorId1, new HashMap<>()), - Map.of(connectorId1, TargetState.STARTED), - Map.of(taskId1x0, new HashMap<>()), - Map.of(), - Map.of(), - Map.of(), - Set.of(), - Set.of() + Collections.singletonMap(connectorId1, 1), + Collections.singletonMap(connectorId1, new HashMap<>()), + Collections.singletonMap(connectorId1, TargetState.STARTED), + Collections.singletonMap(taskId1x0, new HashMap<>()), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptySet(), + Collections.emptySet() ); Map configState2ConnectorTaskCounts = new HashMap<>(); @@ -188,11 +188,11 @@ public void setup(ConnectProtocolCompatibility compatibility) { configState2ConnectorConfigs, configState2TargetStates, configState2TaskConfigs, - Map.of(), - Map.of(), - Map.of(), - Set.of(), - Set.of() + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptySet(), + Collections.emptySet() ); Map configStateSingleTaskConnectorsConnectorTaskCounts = new HashMap<>(); @@ -223,11 +223,11 @@ public void setup(ConnectProtocolCompatibility compatibility) { configStateSingleTaskConnectorsConnectorConfigs, configStateSingleTaskConnectorsTargetStates, configStateSingleTaskConnectorsTaskConfigs, - Map.of(), - Map.of(), + Collections.emptyMap(), + Collections.emptyMap(), appliedConnectorConfigs, - Set.of(), - Set.of() + Collections.emptySet(), + Collections.emptySet() ); } @@ -280,8 +280,8 @@ public void testNormalJoinGroupLeader(ConnectProtocolCompatibility compatibility return sync.data().memberId().equals(memberId) && sync.data().generationId() == 1 && sync.groupAssignments().containsKey(memberId); - }, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), List.of(connectorId1), - List.of(), Errors.NONE)); + }, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), Collections.singletonList(connectorId1), + Collections.emptyList(), Errors.NONE)); coordinator.ensureActiveGroup(); assertFalse(coordinator.rejoinNeededOrPending()); @@ -290,8 +290,8 @@ public void testNormalJoinGroupLeader(ConnectProtocolCompatibility compatibility assertFalse(rebalanceListener.assignment.failed()); assertEquals(configState1.offset(), rebalanceListener.assignment.offset()); assertEquals("leader", rebalanceListener.assignment.leader()); - assertEquals(List.of(connectorId1), rebalanceListener.assignment.connectors()); - assertEquals(List.of(), rebalanceListener.assignment.tasks()); + assertEquals(Collections.singletonList(connectorId1), rebalanceListener.assignment.connectors()); + assertEquals(Collections.emptyList(), rebalanceListener.assignment.tasks()); verify(configStorage).snapshot(); } @@ -314,8 +314,8 @@ public void testNormalJoinGroupFollower(ConnectProtocolCompatibility compatibili return sync.data().memberId().equals(memberId) && sync.data().generationId() == 1 && sync.data().assignments().isEmpty(); - }, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), List.of(), - List.of(taskId1x0), Errors.NONE)); + }, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), Collections.emptyList(), + Collections.singletonList(taskId1x0), Errors.NONE)); coordinator.ensureActiveGroup(); assertFalse(coordinator.rejoinNeededOrPending()); @@ -323,8 +323,8 @@ public void testNormalJoinGroupFollower(ConnectProtocolCompatibility compatibili assertEquals(1, rebalanceListener.assignedCount); assertFalse(rebalanceListener.assignment.failed()); assertEquals(configState1.offset(), rebalanceListener.assignment.offset()); - assertEquals(List.of(), rebalanceListener.assignment.connectors()); - assertEquals(List.of(taskId1x0), rebalanceListener.assignment.tasks()); + assertEquals(Collections.emptyList(), rebalanceListener.assignment.connectors()); + assertEquals(Collections.singletonList(taskId1x0), rebalanceListener.assignment.tasks()); verify(configStorage).snapshot(); } @@ -351,14 +351,14 @@ public void testJoinLeaderCannotAssign(ConnectProtocolCompatibility compatibilit sync.data().assignments().isEmpty(); }; client.prepareResponse(matcher, syncGroupResponse(ConnectProtocol.Assignment.CONFIG_MISMATCH, "leader", configState2.offset(), - List.of(), List.of(), Errors.NONE)); + Collections.emptyList(), Collections.emptyList(), Errors.NONE)); // When the first round fails, we'll take an updated config snapshot when(configStorage.snapshot()).thenReturn(configState2); client.prepareResponse(joinGroupFollowerResponse(1, memberId, "leader", Errors.NONE)); client.prepareResponse(matcher, syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState2.offset(), - List.of(), List.of(taskId1x0), Errors.NONE)); + Collections.emptyList(), Collections.singletonList(taskId1x0), Errors.NONE)); coordinator.ensureActiveGroup(); verify(configStorage, times(2)).snapshot(); @@ -375,32 +375,32 @@ public void testRejoinGroup(ConnectProtocolCompatibility compatibility) { // join the group once client.prepareResponse(joinGroupFollowerResponse(1, "member", "leader", Errors.NONE)); - client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), List.of(), - List.of(taskId1x0), Errors.NONE)); + client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), Collections.emptyList(), + Collections.singletonList(taskId1x0), Errors.NONE)); coordinator.ensureActiveGroup(); assertEquals(0, rebalanceListener.revokedCount); assertEquals(1, rebalanceListener.assignedCount); assertFalse(rebalanceListener.assignment.failed()); assertEquals(configState1.offset(), rebalanceListener.assignment.offset()); - assertEquals(List.of(), rebalanceListener.assignment.connectors()); - assertEquals(List.of(taskId1x0), rebalanceListener.assignment.tasks()); + assertEquals(Collections.emptyList(), rebalanceListener.assignment.connectors()); + assertEquals(Collections.singletonList(taskId1x0), rebalanceListener.assignment.tasks()); // and join the group again coordinator.requestRejoin("test"); client.prepareResponse(joinGroupFollowerResponse(1, "member", "leader", Errors.NONE)); - client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), List.of(connectorId1), - List.of(), Errors.NONE)); + client.prepareResponse(syncGroupResponse(ConnectProtocol.Assignment.NO_ERROR, "leader", configState1.offset(), Collections.singletonList(connectorId1), + Collections.emptyList(), Errors.NONE)); coordinator.ensureActiveGroup(); assertEquals(1, rebalanceListener.revokedCount); - assertEquals(List.of(), rebalanceListener.revokedConnectors); - assertEquals(List.of(taskId1x0), rebalanceListener.revokedTasks); + assertEquals(Collections.emptyList(), rebalanceListener.revokedConnectors); + assertEquals(Collections.singletonList(taskId1x0), rebalanceListener.revokedTasks); assertEquals(2, rebalanceListener.assignedCount); assertFalse(rebalanceListener.assignment.failed()); assertEquals(configState1.offset(), rebalanceListener.assignment.offset()); - assertEquals(List.of(connectorId1), rebalanceListener.assignment.connectors()); - assertEquals(List.of(), rebalanceListener.assignment.tasks()); + assertEquals(Collections.singletonList(connectorId1), rebalanceListener.assignment.connectors()); + assertEquals(Collections.emptyList(), rebalanceListener.assignment.tasks()); verify(configStorage, times(2)).snapshot(); } @@ -434,15 +434,15 @@ public void testLeaderPerformAssignment1(ConnectProtocolCompatibility compatibil assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(configState1.offset(), leaderAssignment.offset()); - assertEquals(List.of(connectorId1), leaderAssignment.connectors()); - assertEquals(List.of(), leaderAssignment.tasks()); + assertEquals(Collections.singletonList(connectorId1), leaderAssignment.connectors()); + assertEquals(Collections.emptyList(), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertFalse(memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(configState1.offset(), memberAssignment.offset()); - assertEquals(List.of(), memberAssignment.connectors()); - assertEquals(List.of(taskId1x0), memberAssignment.tasks()); + assertEquals(Collections.emptyList(), memberAssignment.connectors()); + assertEquals(Collections.singletonList(taskId1x0), memberAssignment.tasks()); verify(configStorage).snapshot(); } @@ -477,15 +477,15 @@ public void testLeaderPerformAssignment2(ConnectProtocolCompatibility compatibil assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(configState2.offset(), leaderAssignment.offset()); - assertEquals(List.of(connectorId1), leaderAssignment.connectors()); - assertEquals(List.of(taskId1x0, taskId2x0), leaderAssignment.tasks()); + assertEquals(Collections.singletonList(connectorId1), leaderAssignment.connectors()); + assertEquals(Arrays.asList(taskId1x0, taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertFalse(memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(configState2.offset(), memberAssignment.offset()); - assertEquals(List.of(connectorId2), memberAssignment.connectors()); - assertEquals(List.of(taskId1x1), memberAssignment.tasks()); + assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); + assertEquals(Collections.singletonList(taskId1x1), memberAssignment.tasks()); verify(configStorage).snapshot(); } @@ -521,15 +521,15 @@ public void testLeaderPerformAssignmentSingleTaskConnectors(ConnectProtocolCompa assertFalse(leaderAssignment.failed()); assertEquals("leader", leaderAssignment.leader()); assertEquals(configStateSingleTaskConnectors.offset(), leaderAssignment.offset()); - assertEquals(List.of(connectorId1, connectorId3), leaderAssignment.connectors()); - assertEquals(List.of(taskId2x0), leaderAssignment.tasks()); + assertEquals(Arrays.asList(connectorId1, connectorId3), leaderAssignment.connectors()); + assertEquals(Collections.singletonList(taskId2x0), leaderAssignment.tasks()); ConnectProtocol.Assignment memberAssignment = ConnectProtocol.deserializeAssignment(result.get("member")); assertFalse(memberAssignment.failed()); assertEquals("leader", memberAssignment.leader()); assertEquals(configStateSingleTaskConnectors.offset(), memberAssignment.offset()); - assertEquals(List.of(connectorId2), memberAssignment.connectors()); - assertEquals(List.of(taskId1x0, taskId3x0), memberAssignment.tasks()); + assertEquals(Collections.singletonList(connectorId2), memberAssignment.connectors()); + assertEquals(Arrays.asList(taskId1x0, taskId3x0), memberAssignment.tasks()); verify(configStorage).snapshot(); } @@ -546,7 +546,7 @@ public void testSkippingAssignmentFails(ConnectProtocolCompatibility compatibili coordinator.metadata(); assertThrows(IllegalStateException.class, - () -> coordinator.onLeaderElected("leader", EAGER.protocol(), List.of(), true)); + () -> coordinator.onLeaderElected("leader", EAGER.protocol(), Collections.emptyList(), true)); verify(configStorage).snapshot(); } @@ -582,7 +582,7 @@ private JoinGroupResponse joinGroupFollowerResponse(int generationId, String mem .setProtocolName(EAGER.protocol()) .setLeader(leaderId) .setMemberId(memberId) - .setMembers(List.of()), + .setMembers(Collections.emptyList()), ApiKeys.JOIN_GROUP.latestVersion() ); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java index 4886431869c1e..d85c2246dff85 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/distributed/WorkerGroupMemberTest.java @@ -59,7 +59,6 @@ public class WorkerGroupMemberTest { public void testMetrics() throws Exception { WorkerGroupMember member; Map workerProps = new HashMap<>(); - workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("offset.storage.file.filename", "/tmp/connect.offsets"); @@ -79,8 +78,9 @@ public void testMetrics() throws Exception { boolean foundJmxReporter = false; assertEquals(2, member.metrics().reporters().size()); for (MetricsReporter reporter : member.metrics().reporters()) { - if (reporter instanceof MockConnectMetrics.MockMetricsReporter mockMetricsReporter) { + if (reporter instanceof MockConnectMetrics.MockMetricsReporter) { foundMockReporter = true; + MockConnectMetrics.MockMetricsReporter mockMetricsReporter = (MockConnectMetrics.MockMetricsReporter) reporter; assertEquals("cluster-1", mockMetricsReporter.getMetricsContext().contextLabels().get(WorkerConfig.CONNECT_KAFKA_CLUSTER_ID)); assertEquals("group-1", mockMetricsReporter.getMetricsContext().contextLabels().get(WorkerConfig.CONNECT_GROUP_ID)); } @@ -103,7 +103,6 @@ public void testMetrics() throws Exception { public void testDisableJmxReporter() { WorkerGroupMember member; Map workerProps = new HashMap<>(); - workerProps.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); workerProps.put("key.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("value.converter", "org.apache.kafka.connect.json.JsonConverter"); workerProps.put("group.id", "group-1"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/ErrorReporterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/ErrorReporterTest.java index ce0188ecf8aa0..e1b222730b838 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/ErrorReporterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/ErrorReporterTest.java @@ -46,6 +46,8 @@ import java.util.concurrent.CompletableFuture; import java.util.concurrent.Future; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; import static org.apache.kafka.connect.runtime.errors.DeadLetterQueueReporter.ERROR_HEADER_CONNECTOR_NAME; import static org.apache.kafka.connect.runtime.errors.DeadLetterQueueReporter.ERROR_HEADER_EXCEPTION; import static org.apache.kafka.connect.runtime.errors.DeadLetterQueueReporter.ERROR_HEADER_EXCEPTION_MESSAGE; @@ -103,13 +105,13 @@ public void tearDown() { @Test public void initializeDLQWithNullMetrics() { - assertThrows(NullPointerException.class, () -> new DeadLetterQueueReporter(producer, config(Map.of()), TASK_ID, null)); + assertThrows(NullPointerException.class, () -> new DeadLetterQueueReporter(producer, config(emptyMap()), TASK_ID, null)); } @Test public void testDLQConfigWithEmptyTopicName() { DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter( - producer, config(Map.of()), TASK_ID, errorHandlingMetrics); + producer, config(emptyMap()), TASK_ID, errorHandlingMetrics); ProcessingContext> context = processingContext(); @@ -122,7 +124,7 @@ public void testDLQConfigWithEmptyTopicName() { @Test public void testDLQConfigWithValidTopicName() { DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter( - producer, config(Map.of(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); + producer, config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); ProcessingContext> context = processingContext(); @@ -136,7 +138,7 @@ public void testDLQConfigWithValidTopicName() { @Test public void testReportDLQTwice() { DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter( - producer, config(Map.of(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); + producer, config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); ProcessingContext> context = processingContext(); @@ -151,7 +153,7 @@ public void testReportDLQTwice() { @Test public void testCloseDLQ() { DeadLetterQueueReporter deadLetterQueueReporter = new DeadLetterQueueReporter( - producer, config(Map.of(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); + producer, config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)), TASK_ID, errorHandlingMetrics); deadLetterQueueReporter.close(); verify(producer).close(); @@ -159,7 +161,7 @@ public void testCloseDLQ() { @Test public void testLogOnDisabledLogReporter() { - LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(Map.of()), errorHandlingMetrics); + LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(emptyMap()), errorHandlingMetrics); ProcessingContext> context = processingContext(); context.error(new RuntimeException()); @@ -171,7 +173,7 @@ public void testLogOnDisabledLogReporter() { @Test public void testLogOnEnabledLogReporter() { - LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(Map.of(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true")), errorHandlingMetrics); + LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(singletonMap(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true")), errorHandlingMetrics); ProcessingContext> context = processingContext(); context.error(new RuntimeException()); @@ -183,7 +185,7 @@ public void testLogOnEnabledLogReporter() { @Test public void testLogMessageWithNoRecords() { - LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(Map.of(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true")), errorHandlingMetrics); + LogReporter> logReporter = new LogReporter.Sink(TASK_ID, config(singletonMap(ConnectorConfig.ERRORS_LOG_ENABLE_CONFIG, "true")), errorHandlingMetrics); ProcessingContext> context = processingContext(); @@ -229,11 +231,11 @@ public void testLogReportAndReturnFuture() { @Test public void testSetDLQConfigs() { - SinkConnectorConfig configuration = config(Map.of(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)); - assertEquals(DLQ_TOPIC, configuration.dlqTopicName()); + SinkConnectorConfig configuration = config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_NAME_CONFIG, DLQ_TOPIC)); + assertEquals(configuration.dlqTopicName(), DLQ_TOPIC); - configuration = config(Map.of(SinkConnectorConfig.DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, "7")); - assertEquals(7, configuration.dlqTopicReplicationFactor()); + configuration = config(singletonMap(SinkConnectorConfig.DLQ_TOPIC_REPLICATION_FACTOR_CONFIG, "7")); + assertEquals(configuration.dlqTopicReplicationFactor(), 7); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java index f9e47afd0b31f..23c4bc25553c6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/RetryWithToleranceOperatorTest.java @@ -44,6 +44,8 @@ import org.mockito.quality.Strictness; import org.mockito.stubbing.OngoingStubbing; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -52,8 +54,11 @@ import java.util.concurrent.CountDownLatch; import java.util.concurrent.Future; import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; import java.util.stream.IntStream; +import static java.util.Collections.emptyMap; +import static java.util.Collections.singletonMap; import static org.apache.kafka.common.utils.Time.SYSTEM; import static org.apache.kafka.connect.runtime.ConnectorConfig.ERRORS_RETRY_MAX_DELAY_CONFIG; import static org.apache.kafka.connect.runtime.ConnectorConfig.ERRORS_RETRY_MAX_DELAY_DEFAULT; @@ -88,7 +93,6 @@ public class RetryWithToleranceOperatorTest { put(CommonClientConfigs.METRICS_RECORDING_LEVEL_CONFIG, Sensor.RecordingLevel.INFO.toString()); // define required properties - put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); }}; @@ -228,22 +232,22 @@ private RetryWithToleranceOperator setupExecutor(ToleranceType toleranceT @Test public void testExecAndHandleRetriableErrorOnce() throws Exception { - execAndHandleRetriableError(6000, 1, List.of(300L), new RetriableException("Test"), true); + execAndHandleRetriableError(6000, 1, Collections.singletonList(300L), new RetriableException("Test"), true); } @Test public void testExecAndHandleRetriableErrorThrice() throws Exception { - execAndHandleRetriableError(6000, 3, List.of(300L, 600L, 1200L), new RetriableException("Test"), true); + execAndHandleRetriableError(6000, 3, Arrays.asList(300L, 600L, 1200L), new RetriableException("Test"), true); } @Test public void testExecAndHandleRetriableErrorWithInfiniteRetries() throws Exception { - execAndHandleRetriableError(-1, 8, List.of(300L, 600L, 1200L, 2400L, 4800L, 9600L, 19200L, 38400L), new RetriableException("Test"), true); + execAndHandleRetriableError(-1, 8, Arrays.asList(300L, 600L, 1200L, 2400L, 4800L, 9600L, 19200L, 38400L), new RetriableException("Test"), true); } @Test public void testExecAndHandleRetriableErrorWithMaxRetriesExceeded() throws Exception { - execAndHandleRetriableError(6000, 6, List.of(300L, 600L, 1200L, 2400L, 1500L), new RetriableException("Test"), false); + execAndHandleRetriableError(6000, 6, Arrays.asList(300L, 600L, 1200L, 2400L, 1500L), new RetriableException("Test"), false); } public void execAndHandleRetriableError(long errorRetryTimeout, int numRetriableExceptionsThrown, List expectedWaits, Exception e, boolean successExpected) throws Exception { @@ -392,10 +396,10 @@ public void testToleranceLimit() { @Test public void testDefaultConfigs() { - ConnectorConfig configuration = config(Map.of()); - assertEquals(ERRORS_RETRY_TIMEOUT_DEFAULT, configuration.errorRetryTimeout()); - assertEquals(ERRORS_RETRY_MAX_DELAY_DEFAULT, configuration.errorMaxDelayInMillis()); - assertEquals(ERRORS_TOLERANCE_DEFAULT, configuration.errorToleranceType()); + ConnectorConfig configuration = config(emptyMap()); + assertEquals(configuration.errorRetryTimeout(), ERRORS_RETRY_TIMEOUT_DEFAULT); + assertEquals(configuration.errorMaxDelayInMillis(), ERRORS_RETRY_MAX_DELAY_DEFAULT); + assertEquals(configuration.errorToleranceType(), ERRORS_TOLERANCE_DEFAULT); } ConnectorConfig config(Map connProps) { @@ -409,14 +413,14 @@ ConnectorConfig config(Map connProps) { @Test public void testSetConfigs() { ConnectorConfig configuration; - configuration = config(Map.of(ERRORS_RETRY_TIMEOUT_CONFIG, "100")); - assertEquals(100, configuration.errorRetryTimeout()); + configuration = config(singletonMap(ERRORS_RETRY_TIMEOUT_CONFIG, "100")); + assertEquals(configuration.errorRetryTimeout(), 100); - configuration = config(Map.of(ERRORS_RETRY_MAX_DELAY_CONFIG, "100")); - assertEquals(100, configuration.errorMaxDelayInMillis()); + configuration = config(singletonMap(ERRORS_RETRY_MAX_DELAY_CONFIG, "100")); + assertEquals(configuration.errorMaxDelayInMillis(), 100); - configuration = config(Map.of(ERRORS_TOLERANCE_CONFIG, "none")); - assertEquals(ToleranceType.NONE, configuration.errorToleranceType()); + configuration = config(singletonMap(ERRORS_TOLERANCE_CONFIG, "none")); + assertEquals(configuration.errorToleranceType(), ToleranceType.NONE); } @Test @@ -434,8 +438,8 @@ private void testReport(int numberOfReports) { CountDownLatch exitLatch = mock(CountDownLatch.class); RetryWithToleranceOperator> retryWithToleranceOperator = new RetryWithToleranceOperator<>(-1, ERRORS_RETRY_MAX_DELAY_DEFAULT, ALL, time, errorHandlingMetrics, exitLatch); ConsumerRecord consumerRecord = new ConsumerRecord<>("t", 0, 0, null, null); - List> fs = IntStream.range(0, numberOfReports).mapToObj(i -> new CompletableFuture()).toList(); - List>> reporters = IntStream.range(0, numberOfReports).mapToObj(i -> (ErrorReporter>) c -> fs.get(i)).toList(); + List> fs = IntStream.range(0, numberOfReports).mapToObj(i -> new CompletableFuture()).collect(Collectors.toList()); + List>> reporters = IntStream.range(0, numberOfReports).mapToObj(i -> (ErrorReporter>) c -> fs.get(i)).collect(Collectors.toList()); retryWithToleranceOperator.reporters(reporters); ProcessingContext> context = new ProcessingContext<>(consumerRecord); Future result = retryWithToleranceOperator.report(context); @@ -454,7 +458,7 @@ public void testCloseErrorReporters() { RetryWithToleranceOperator retryWithToleranceOperator = allOperator(); - retryWithToleranceOperator.reporters(List.of(reporterA, reporterB)); + retryWithToleranceOperator.reporters(Arrays.asList(reporterA, reporterB)); // Even though the reporters throw exceptions, they should both still be closed. @@ -471,7 +475,7 @@ public void testCloseErrorReportersExceptionPropagation() { RetryWithToleranceOperator retryWithToleranceOperator = allOperator(); - retryWithToleranceOperator.reporters(List.of(reporterA, reporterB)); + retryWithToleranceOperator.reporters(Arrays.asList(reporterA, reporterB)); // Even though the reporters throw exceptions, they should both still be closed. doThrow(new RuntimeException()).when(reporterA).close(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/WorkerErrantRecordReporterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/WorkerErrantRecordReporterTest.java index 10b715fe18d24..7783f267dfea0 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/WorkerErrantRecordReporterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/errors/WorkerErrantRecordReporterTest.java @@ -35,7 +35,7 @@ import java.util.ArrayList; import java.util.Collection; -import java.util.List; +import java.util.Collections; import java.util.concurrent.CompletableFuture; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -66,7 +66,7 @@ public void testGetFutures() { for (int i = 0; i < 4; i++) { TopicPartition topicPartition = new TopicPartition("topic", i); topicPartitions.add(topicPartition); - reporter.futures.put(topicPartition, List.of(CompletableFuture.completedFuture(null))); + reporter.futures.put(topicPartition, Collections.singletonList(CompletableFuture.completedFuture(null))); } assertFalse(reporter.futures.isEmpty()); reporter.awaitFutures(topicPartitions); @@ -105,7 +105,7 @@ private void initializeReporter(boolean errorsTolerated) { Time.SYSTEM, errorHandlingMetrics ); - retryWithToleranceOperator.reporters(List.of(errorReporter)); + retryWithToleranceOperator.reporters(Collections.singletonList(errorReporter)); reporter = new WorkerErrantRecordReporter( retryWithToleranceOperator, converter, diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/health/ConnectClusterStateImplTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/health/ConnectClusterStateImplTest.java index 65e5b15128d93..79d5788ff436e 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/health/ConnectClusterStateImplTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/health/ConnectClusterStateImplTest.java @@ -29,8 +29,9 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import java.util.Arrays; import java.util.Collection; -import java.util.List; +import java.util.Collections; import java.util.Map; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; @@ -54,7 +55,7 @@ public class ConnectClusterStateImplTest { @BeforeEach public void setUp() { - expectedConnectors = List.of("sink1", "source1", "source2"); + expectedConnectors = Arrays.asList("sink1", "source1", "source2"); connectClusterState = new ConnectClusterStateImpl( herderRequestTimeoutMs, new ConnectClusterDetailsImpl(KAFKA_CLUSTER_ID), @@ -77,7 +78,7 @@ public void connectors() { @Test public void connectorConfig() { final String connName = "sink6"; - final Map expectedConfig = Map.of("key", "value"); + final Map expectedConfig = Collections.singletonMap("key", "value"); @SuppressWarnings("unchecked") ArgumentCaptor>> callback = ArgumentCaptor.forClass(Callback.class); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoaderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoaderTest.java index fd97935933a07..749acb3e5b0b2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoaderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/DelegatingClassLoaderTest.java @@ -27,6 +27,7 @@ import java.net.MalformedURLException; import java.net.URL; +import java.util.Collections; import java.util.SortedSet; import java.util.TreeSet; @@ -72,15 +73,15 @@ public void setUp() { assertTrue(PluginUtils.shouldLoadInIsolation(pluginDesc.className())); sinkConnectors.add(pluginDesc); scanResult = new PluginScanResult( - sinkConnectors, - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>() + sinkConnectors, + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet() ); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java index b253405204d2f..ca099976444e9 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginScannerTest.java @@ -26,6 +26,7 @@ import java.io.File; import java.nio.file.Files; import java.nio.file.Path; +import java.util.Collections; import java.util.HashSet; import java.util.Optional; import java.util.Set; @@ -49,7 +50,7 @@ static Stream parameters() { @ParameterizedTest @MethodSource("parameters") public void testScanningEmptyPluginPath(PluginScanner scanner) { - PluginScanResult result = scan(scanner, Set.of()); + PluginScanResult result = scan(scanner, Collections.emptySet()); assertTrue(result.isEmpty()); } @@ -68,7 +69,7 @@ public void testScanningPluginClasses(PluginScanner scanner) { public void testScanningInvalidUberJar(PluginScanner scanner) throws Exception { File newFile = new File(pluginDir, "invalid.jar"); newFile.createNewFile(); - PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); assertTrue(result.isEmpty()); } @@ -80,14 +81,14 @@ public void testScanningPluginDirContainsInvalidJarsOnly(PluginScanner scanner) newFile = new File(newFile, "invalid.jar"); newFile.createNewFile(); - PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); assertTrue(result.isEmpty()); } @ParameterizedTest @MethodSource("parameters") public void testScanningNoPlugins(PluginScanner scanner) { - PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); assertTrue(result.isEmpty()); } @@ -97,7 +98,7 @@ public void testScanningPluginDirEmpty(PluginScanner scanner) { File newFile = new File(pluginDir, "my-plugin"); newFile.mkdir(); - PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); assertTrue(result.isEmpty()); } @@ -115,7 +116,7 @@ public void testScanningMixOfValidAndInvalidPlugins(PluginScanner scanner) throw Files.copy(source, pluginPath.resolve(source.getFileName())); } - PluginScanResult result = scan(scanner, Set.of(pluginDir.toPath())); + PluginScanResult result = scan(scanner, Collections.singleton(pluginDir.toPath())); Set classes = new HashSet<>(); result.forEach(pluginDesc -> classes.add(pluginDesc.className())); Set expectedClasses = new HashSet<>(TestPlugins.pluginClasses()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java index 24ef0d535b8a6..23041f9c31937 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginUtilsTest.java @@ -113,7 +113,7 @@ public void testKafkaDependencyClasses() { @Test public void testConnectApiClasses() { - List apiClasses = List.of( + List apiClasses = Arrays.asList( // Enumerate all packages and classes "org.apache.kafka.connect.", "org.apache.kafka.connect.components.", @@ -201,7 +201,7 @@ public void testConnectApiClasses() { @Test public void testConnectRuntimeClasses() { // Only list packages, because there are too many classes. - List runtimeClasses = List.of( + List runtimeClasses = Arrays.asList( "org.apache.kafka.connect.cli.", //"org.apache.kafka.connect.connector.policy.", isolated by default //"org.apache.kafka.connect.converters.", isolated by default @@ -229,7 +229,7 @@ public void testConnectRuntimeClasses() { @Test public void testAllowedRuntimeClasses() { - List jsonConverterClasses = List.of( + List jsonConverterClasses = Arrays.asList( "org.apache.kafka.connect.connector.policy.", "org.apache.kafka.connect.connector.policy.AbstractConnectorClientConfigOverridePolicy", "org.apache.kafka.connect.connector.policy.AllConnectorClientConfigOverridePolicy", @@ -256,7 +256,7 @@ public void testAllowedRuntimeClasses() { @Test public void testTransformsClasses() { - List transformsClasses = List.of( + List transformsClasses = Arrays.asList( "org.apache.kafka.connect.transforms.", "org.apache.kafka.connect.transforms.util.", "org.apache.kafka.connect.transforms.util.NonEmptyListValidator", @@ -309,7 +309,7 @@ public void testTransformsClasses() { @Test public void testAllowedJsonConverterClasses() { - List jsonConverterClasses = List.of( + List jsonConverterClasses = Arrays.asList( "org.apache.kafka.connect.json.", "org.apache.kafka.connect.json.DecimalFormat", "org.apache.kafka.connect.json.JsonConverter", @@ -326,7 +326,7 @@ public void testAllowedJsonConverterClasses() { @Test public void testAllowedFileConnectors() { - List jsonConverterClasses = List.of( + List jsonConverterClasses = Arrays.asList( "org.apache.kafka.connect.file.", "org.apache.kafka.connect.file.FileStreamSinkConnector", "org.apache.kafka.connect.file.FileStreamSinkTask", @@ -341,7 +341,7 @@ public void testAllowedFileConnectors() { @Test public void testAllowedBasicAuthExtensionClasses() { - List basicAuthExtensionClasses = List.of( + List basicAuthExtensionClasses = Arrays.asList( "org.apache.kafka.connect.rest.basic.auth.extension.BasicAuthSecurityRestExtension" //"org.apache.kafka.connect.rest.basic.auth.extension.JaasBasicAuthFilter", TODO fix? //"org.apache.kafka.connect.rest.basic.auth.extension.PropertyFileLoginModule" TODO fix? @@ -377,13 +377,13 @@ public void testClientConfigProvider() { @Test public void testEmptyPluginUrls() throws Exception { - assertEquals(List.of(), PluginUtils.pluginUrls(pluginPath)); + assertEquals(Collections.emptyList(), PluginUtils.pluginUrls(pluginPath)); } @Test public void testEmptyStructurePluginUrls() throws Exception { createBasicDirectoryLayout(); - assertEquals(List.of(), PluginUtils.pluginUrls(pluginPath)); + assertEquals(Collections.emptyList(), PluginUtils.pluginUrls(pluginPath)); } @Test @@ -511,12 +511,12 @@ public void testNonCollidingAliases() { sinkConnectors, sourceConnectors, converters, - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>() + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet() ); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); @@ -538,14 +538,14 @@ public void testMultiVersionAlias() { assertEquals(2, sinkConnectors.size()); PluginScanResult result = new PluginScanResult( sinkConnectors, - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>() + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet() ); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); @@ -561,15 +561,15 @@ public void testCollidingPrunedAlias() { SortedSet> headerConverters = new TreeSet<>(); headerConverters.add(new PluginDesc<>(CollidingHeaderConverter.class, null, PluginType.HEADER_CONVERTER, CollidingHeaderConverter.class.getClassLoader())); PluginScanResult result = new PluginScanResult( - new TreeSet<>(), - new TreeSet<>(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), converters, headerConverters, - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>() + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet() ); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); @@ -586,15 +586,15 @@ public void testCollidingSimpleAlias() { SortedSet>> transformations = new TreeSet<>(); transformations.add(new PluginDesc<>((Class>) (Class) Colliding.class, null, PluginType.TRANSFORMATION, Colliding.class.getClassLoader())); PluginScanResult result = new PluginScanResult( - new TreeSet<>(), - new TreeSet<>(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), converters, - new TreeSet<>(), + Collections.emptySortedSet(), transformations, - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>() + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet() ); Map actualAliases = PluginUtils.computeAliases(result); Map expectedAliases = new HashMap<>(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java index 9492f9f7ea22e..55a3445a3318c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/PluginsTest.java @@ -55,19 +55,21 @@ import java.net.URL; import java.net.URLClassLoader; import java.nio.file.Path; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.SortedSet; -import java.util.TreeSet; import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertSame; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; @@ -92,7 +94,6 @@ public void setup() { pluginProps.put(WorkerConfig.PLUGIN_PATH_CONFIG, TestPlugins.pluginPathJoined()); plugins = new Plugins(pluginProps); props = new HashMap<>(pluginProps); - props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); props.put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); props.put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, TestConverter.class.getName()); props.put("key.converter." + JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "true"); @@ -106,17 +107,17 @@ public void setup() { SortedSet> sinkConnectors = (SortedSet>) plugins.sinkConnectors(); missingPluginClass = sinkConnectors.first().className(); nonEmpty = new PluginScanResult( - sinkConnectors, - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>(), - new TreeSet<>() + sinkConnectors, + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet(), + Collections.emptySortedSet() ); - empty = new PluginScanResult(List.of()); + empty = new PluginScanResult(Collections.emptyList()); createConfig(); } @@ -140,7 +141,7 @@ public void shouldInstantiateAndConfigureConverters() { @Test public void shouldInstantiateAndConfigureInternalConverters() { - instantiateAndConfigureInternalConverter(true, Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false")); + instantiateAndConfigureInternalConverter(true, Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false")); // Validate schemas.enable is set to false assertEquals("false", internalConverter.configs.get(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG)); } @@ -196,9 +197,16 @@ public void shouldInstantiateAndConfigureDefaultHeaderConverter() { props.remove(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG); createConfig(); + // Because it's not explicitly set on the supplied configuration, the logic to use the current classloader for the connector + // will exit immediately, and so this method always returns null HeaderConverter headerConverter = plugins.newHeaderConverter(config, + WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, + ClassLoaderUsage.CURRENT_CLASSLOADER); + assertNull(headerConverter); + // But we should always find it (or the worker's default) when using the plugins classloader ... + headerConverter = plugins.newHeaderConverter(config, WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG, - ClassLoaderUsage.CURRENT_CLASSLOADER); + ClassLoaderUsage.PLUGINS); assertNotNull(headerConverter); assertInstanceOf(SimpleHeaderConverter.class, headerConverter); } @@ -207,7 +215,7 @@ public void shouldInstantiateAndConfigureDefaultHeaderConverter() { public void shouldThrowIfPluginThrows() { assertThrows(ConnectException.class, () -> plugins.newPlugin( TestPlugin.ALWAYS_THROW_EXCEPTION.className(), - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class )); } @@ -216,7 +224,7 @@ public void shouldThrowIfPluginThrows() { public void shouldFindCoLocatedPluginIfBadPackaging() { Converter converter = plugins.newPlugin( TestPlugin.BAD_PACKAGING_CO_LOCATED.className(), - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class ); assertNotNull(converter); @@ -226,7 +234,7 @@ public void shouldFindCoLocatedPluginIfBadPackaging() { public void shouldThrowIfPluginMissingSuperclass() { assertThrows(ConnectException.class, () -> plugins.newPlugin( TestPlugin.BAD_PACKAGING_MISSING_SUPERCLASS.className(), - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class )); } @@ -242,7 +250,7 @@ public void shouldThrowIfStaticInitializerThrows() { public void shouldThrowIfStaticInitializerThrowsServiceLoader() { assertThrows(ConnectException.class, () -> plugins.newPlugin( TestPlugin.BAD_PACKAGING_STATIC_INITIALIZER_THROWS_REST_EXTENSION.className(), - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), ConnectRestExtension.class )); } @@ -300,7 +308,7 @@ public void shouldShareStaticValuesBetweenSamePlugin() { // Plugins are not isolated from other instances of their own class. Converter firstPlugin = plugins.newPlugin( TestPlugin.ALIASED_STATIC_FIELD.className(), - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class ); @@ -308,7 +316,7 @@ public void shouldShareStaticValuesBetweenSamePlugin() { Converter secondPlugin = plugins.newPlugin( TestPlugin.ALIASED_STATIC_FIELD.className(), - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class ); @@ -323,7 +331,7 @@ public void shouldShareStaticValuesBetweenSamePlugin() { public void newPluginShouldServiceLoadWithPluginClassLoader() { Converter plugin = plugins.newPlugin( TestPlugin.SERVICE_LOADER.className(), - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class ); @@ -339,7 +347,7 @@ public void newPluginShouldServiceLoadWithPluginClassLoader() { public void newPluginShouldInstantiateWithPluginClassLoader() { Converter plugin = plugins.newPlugin( TestPlugin.ALIASED_STATIC_FIELD.className(), - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class ); @@ -428,7 +436,7 @@ public void newConnectorShouldInstantiateWithPluginClassLoader() { @Test public void newPluginsShouldConfigureWithPluginClassLoader() { List configurables = plugins.newPlugins( - List.of(TestPlugin.SAMPLING_CONFIGURABLE.className()), + Collections.singletonList(TestPlugin.SAMPLING_CONFIGURABLE.className()), config, Configurable.class ); @@ -596,7 +604,7 @@ public void testAliasesInConverters() throws ClassNotFoundException { String alias = "SamplingConverter"; assertTrue(TestPlugin.SAMPLING_CONVERTER.className().contains(alias)); ConfigDef def = new ConfigDef().define(configKey, ConfigDef.Type.CLASS, ConfigDef.Importance.HIGH, "docstring"); - AbstractConfig config = new AbstractConfig(def, Map.of(configKey, alias)); + AbstractConfig config = new AbstractConfig(def, Collections.singletonMap(configKey, alias)); assertNotNull(config.getClass(configKey)); assertNotNull(config.getConfiguredInstance(configKey, Converter.class)); @@ -625,7 +633,7 @@ private void assertClassLoaderReadsVersionFromResource( // Initialize Plugins object with parent class loader in the class loader tree. This is // to simulate the situation where jars exist on both system classpath and plugin path. - Map pluginProps = Map.of( + Map pluginProps = Collections.singletonMap( WorkerConfig.PLUGIN_PATH_CONFIG, TestPlugins.pluginPathJoined(childResource) ); @@ -638,14 +646,14 @@ private void assertClassLoaderReadsVersionFromResource( Converter converter = plugins.newPlugin( className, - new AbstractConfig(new ConfigDef(), Map.of()), + new AbstractConfig(new ConfigDef(), Collections.emptyMap()), Converter.class ); // Verify the version was read from the correct resource assertEquals(expectedVersions[0], new String(converter.fromConnectData(null, null, null))); // When requesting multiple resources, they should be listed in the correct order - assertEquals(List.of(expectedVersions), + assertEquals(Arrays.asList(expectedVersions), converter.toConnectData(null, null).value()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java index b4e5c578e836b..a99235a1d1404 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SamplingTestPlugin.java @@ -17,6 +17,7 @@ package org.apache.kafka.connect.runtime.isolation; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -41,7 +42,7 @@ public interface SamplingTestPlugin { * @return All known instances of this class, including this instance. */ default List allInstances() { - return List.of(this); + return Collections.singletonList(this); } /** @@ -49,7 +50,7 @@ default List allInstances() { * This should only return direct children, and not reference this instance directly */ default Map otherSamples() { - return Map.of(); + return Collections.emptyMap(); } /** diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java index bc326c02ee37a..04501e0763856 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/SynchronizationTest.java @@ -27,7 +27,6 @@ import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.storage.Converter; -import org.apache.maven.artifact.versioning.VersionRange; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; @@ -43,6 +42,7 @@ import java.lang.management.ThreadInfo; import java.net.URL; import java.util.Arrays; +import java.util.Collections; import java.util.Map; import java.util.Objects; import java.util.concurrent.BrokenBarrierException; @@ -70,7 +70,7 @@ public class SynchronizationTest { @BeforeEach public void setup(TestInfo testInfo) { - Map pluginProps = Map.of( + Map pluginProps = Collections.singletonMap( WorkerConfig.PLUGIN_PATH_CONFIG, TestPlugins.pluginPathJoined() ); @@ -190,10 +190,10 @@ public SynchronizedDelegatingClassLoader(ClassLoader parent, Breakpoint } @Override - public PluginClassLoader pluginClassLoader(String name, VersionRange range) { + public PluginClassLoader pluginClassLoader(String name) { dclBreakpoint.await(name); dclBreakpoint.await(name); - return super.pluginClassLoader(name, range); + return super.pluginClassLoader(name); } } @@ -240,7 +240,7 @@ public void testSimultaneousUpwardAndDownwardDelegating() throws Exception { // 4. Load the isolated plugin class and return new AbstractConfig( new ConfigDef().define("a.class", Type.CLASS, Importance.HIGH, ""), - Map.of("a.class", t1Class)); + Collections.singletonMap("a.class", t1Class)); } }; @@ -258,7 +258,7 @@ public void testSimultaneousUpwardAndDownwardDelegating() throws Exception { // 3. Enter the DelegatingClassLoader // 4. Load the non-isolated class and return new AbstractConfig(new ConfigDef().define("a.class", Type.CLASS, Importance.HIGH, ""), - Map.of("a.class", t2Class)); + Collections.singletonMap("a.class", t2Class)); } }; @@ -456,6 +456,7 @@ private static void printStacktrace(ThreadInfo info, StringBuilder sb) { } } + @SuppressWarnings("removal") private static ThreadFactory threadFactoryWithNamedThreads(String threadPrefix) { AtomicInteger threadNumber = new AtomicInteger(1); return r -> { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java index 5a86ddd7b5f35..adb2c2418d5fb 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/isolation/TestPlugins.java @@ -39,7 +39,6 @@ import java.util.Map; import java.util.Objects; import java.util.Set; -import java.util.function.Function; import java.util.function.Predicate; import java.util.jar.Attributes; import java.util.jar.JarEntry; @@ -296,9 +295,9 @@ public boolean includeByDefault() { try { for (TestPackage testPackage : TestPackage.values()) { if (pluginJars.containsKey(testPackage)) { - log.debug("Skipping recompilation of {}", testPackage.resourceDir()); + log.debug("Skipping recompilation of " + testPackage.resourceDir()); } - pluginJars.put(testPackage, createPluginJar(testPackage.resourceDir(), testPackage.removeRuntimeClasses(), Map.of())); + pluginJars.put(testPackage, createPluginJar(testPackage.resourceDir(), testPackage.removeRuntimeClasses())); } } catch (Throwable e) { log.error("Could not set up plugin test jars", e); @@ -372,11 +371,7 @@ public static List pluginClasses(TestPlugin... plugins) { .filter(Objects::nonNull) .map(TestPlugin::className) .distinct() - .toList(); - } - - public static Function noOpLoaderSwap() { - return classLoader -> new LoaderSwap(Thread.currentThread().getContextClassLoader()); + .collect(Collectors.toList()); } private static TestPlugin[] defaultPlugins() { @@ -385,11 +380,10 @@ private static TestPlugin[] defaultPlugins() { .toArray(TestPlugin[]::new); } - - static Path createPluginJar(String resourceDir, Predicate removeRuntimeClasses, Map replacements) throws IOException { + private static Path createPluginJar(String resourceDir, Predicate removeRuntimeClasses) throws IOException { Path inputDir = resourceDirectoryPath("test-plugins/" + resourceDir); Path binDir = Files.createTempDirectory(resourceDir + ".bin."); - compileJavaSources(inputDir, binDir, replacements); + compileJavaSources(inputDir, binDir); Path jarFile = Files.createTempFile(resourceDir + ".", ".jar"); try (JarOutputStream jar = openJarFile(jarFile)) { writeJar(jar, inputDir, removeRuntimeClasses); @@ -429,7 +423,7 @@ private static void removeDirectory(Path binDir) throws IOException { classFiles = stream .sorted(Comparator.reverseOrder()) .map(Path::toFile) - .toList(); + .collect(Collectors.toList()); } for (File classFile : classFiles) { if (!classFile.delete()) { @@ -449,7 +443,7 @@ private static void removeDirectory(Path binDir) throws IOException { * @param sourceDir Directory containing java source files * @throws IOException if the files cannot be compiled */ - private static void compileJavaSources(Path sourceDir, Path binDir, Map replacements) throws IOException { + private static void compileJavaSources(Path sourceDir, Path binDir) throws IOException { JavaCompiler compiler = ToolProvider.getSystemJavaCompiler(); List sourceFiles; try (Stream stream = Files.walk(sourceDir)) { @@ -457,14 +451,13 @@ private static void compileJavaSources(Path sourceDir, Path binDir, Map file.getName().endsWith(".java")) - .map(file -> replacements.isEmpty() ? file : copyAndReplace(file, replacements)) - .toList(); + .collect(Collectors.toList()); } - StringWriter writer = new StringWriter(); - List options = List.of( + List options = Arrays.asList( "-d", binDir.toString() // Write class output to a different directory. ); + try (StandardJavaFileManager fileManager = compiler.getStandardFileManager(null, null, null)) { boolean success = compiler.getTask( writer, @@ -480,21 +473,6 @@ private static void compileJavaSources(Path sourceDir, Path binDir, Map replacements) throws RuntimeException { - try { - String content = Files.readString(source.toPath()); - for (Map.Entry entry : replacements.entrySet()) { - content = content.replace(entry.getKey(), entry.getValue()); - } - File tmpFile = new File(System.getProperty("java.io.tmpdir") + File.separator + source.getName()); - Files.writeString(tmpFile.toPath(), content); - tmpFile.deleteOnExit(); - return tmpFile; - } catch (IOException e) { - throw new RuntimeException("Could not copy and replace file: " + source, e); - } - } - private static void writeJar(JarOutputStream jar, Path inputDir, Predicate removeRuntimeClasses) throws IOException { List paths; try (Stream stream = Files.walk(inputDir)) { @@ -502,7 +480,7 @@ private static void writeJar(JarOutputStream jar, Path inputDir, Predicate !path.toFile().getName().endsWith(".java")) .filter(path -> !removeRuntimeClasses.test(path.toFile().getName())) - .toList(); + .collect(Collectors.toList()); } for (Path path : paths) { try (InputStream in = new BufferedInputStream(Files.newInputStream(path))) { @@ -520,4 +498,5 @@ private static void writeJar(JarOutputStream jar, Path inputDir, Predicate configMap = new HashMap<>(baseServerProps()); configMap.put(RestServerConfig.LISTENERS_CONFIG, "http://localhost:8080,https://localhost:8443"); @@ -211,7 +197,7 @@ public void checkCORSRequest(String corsDomain, String origin, String expectedHe doReturn(KAFKA_CLUSTER_ID).when(herder).kafkaClusterId(); doReturn(plugins).when(herder).plugins(); expectEmptyRestExtensions(); - doReturn(List.of("a", "b")).when(herder).connectors(); + doReturn(Arrays.asList("a", "b")).when(herder).connectors(); server = new ConnectRestServer(null, restClient, configMap); server.initializeServer(); @@ -254,7 +240,7 @@ public void testStandaloneConfig() throws IOException { doReturn(KAFKA_CLUSTER_ID).when(herder).kafkaClusterId(); doReturn(plugins).when(herder).plugins(); expectEmptyRestExtensions(); - doReturn(List.of("a", "b")).when(herder).connectors(); + doReturn(Arrays.asList("a", "b")).when(herder).connectors(); server = new ConnectRestServer(null, restClient, configMap); server.initializeServer(); @@ -276,8 +262,8 @@ public void testLoggerEndpointWithDefaults() throws IOException { doReturn(KAFKA_CLUSTER_ID).when(herder).kafkaClusterId(); doReturn(plugins).when(herder).plugins(); expectEmptyRestExtensions(); - doReturn(List.of()).when(herder).setWorkerLoggerLevel(logger, loggingLevel); - doReturn(Map.of(logger, new LoggerLevel(loggingLevel, lastModified))).when(herder).allLoggerLevels(); + doReturn(Collections.emptyList()).when(herder).setWorkerLoggerLevel(logger, loggingLevel); + doReturn(Collections.singletonMap(logger, new LoggerLevel(loggingLevel, lastModified))).when(herder).allLoggerLevels(); server = new ConnectRestServer(null, restClient, configMap); server.initializeServer(); @@ -294,7 +280,7 @@ public void testLoggerEndpointWithDefaults() throws IOException { Map expectedLogger = new HashMap<>(); expectedLogger.put("level", loggingLevel); expectedLogger.put("last_modified", lastModified); - Map> expectedLoggers = Map.of(logger, expectedLogger); + Map> expectedLoggers = Collections.singletonMap(logger, expectedLogger); Map> actualLoggers = mapper.readValue(responseStr, new TypeReference<>() { }); assertEquals(expectedLoggers, actualLoggers); } @@ -389,44 +375,6 @@ public void testDefaultCustomizedHttpResponseHeaders() throws IOException { checkCustomizedHttpResponseHeaders(headerConfig, expectedHeaders); } - static final class MonitorableConnectRestExtension extends PluginsTest.TestConnectRestExtension implements Monitorable { - - private boolean called = false; - private static MetricName metricName; - - @Override - public void register(ConnectRestExtensionContext restPluginContext) { - called = true; - } - - @Override - public void withPluginMetrics(PluginMetrics metrics) { - metricName = metrics.metricName("name", "description", new LinkedHashMap<>()); - metrics.addMetric(metricName, (Gauge) (config, now) -> called); - } - } - - @Test - public void testMonitorableConnectRestExtension() { - Map configMap = new HashMap<>(baseServerProps()); - configMap.put(RestServerConfig.REST_EXTENSION_CLASSES_CONFIG, MonitorableConnectRestExtension.class.getName()); - - doReturn(plugins).when(herder).plugins(); - doReturn(List.of(new MonitorableConnectRestExtension())).when(plugins).newPlugins(any(), any(), eq(ConnectRestExtension.class)); - - server = new ConnectRestServer(null, restClient, configMap); - server.initializeServer(); - server.initializeResources(herder); - - Map metrics = herder.connectMetrics().metrics().metrics(); - assertTrue(metrics.containsKey(MonitorableConnectRestExtension.metricName)); - assertTrue((boolean) metrics.get(MonitorableConnectRestExtension.metricName).metricValue()); - - server.stop(); - metrics = herder.connectMetrics().metrics().metrics(); - assertFalse(metrics.containsKey(MonitorableConnectRestExtension.metricName)); - } - private void checkCustomizedHttpResponseHeaders(String headerConfig, Map expectedHeaders) throws IOException { Map configMap = baseServerProps(); @@ -436,7 +384,7 @@ private void checkCustomizedHttpResponseHeaders(String headerConfig, Map OBJECT_MAPPER.writeValueAsString(obj)); } - private record TestDTO(String content) { + private static class TestDTO { + private final String content; + @JsonCreator private TestDTO(@JsonProperty(value = "content") String content) { this.content = content; } + public String getContent() { + return content; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + TestDTO testDTO = (TestDTO) o; + return content.equals(testDTO.content); + } + + @Override + public int hashCode() { + return Objects.hash(content); + } } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java index 58a2bd2e54294..e58444ccd4d77 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/RestServerConfigTest.java @@ -21,6 +21,8 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -33,7 +35,7 @@ public class RestServerConfigTest { - private static final List VALID_HEADER_CONFIGS = List.of( + private static final List VALID_HEADER_CONFIGS = Arrays.asList( "add \t Cache-Control: no-cache, no-store, must-revalidate", "add \r X-XSS-Protection: 1; mode=block", "\n add Strict-Transport-Security: max-age=31536000; includeSubDomains", @@ -46,7 +48,7 @@ public class RestServerConfigTest { "adDdate \n Last-Modified: \t 0" ); - private static final List INVALID_HEADER_CONFIGS = List.of( + private static final List INVALID_HEADER_CONFIGS = Arrays.asList( "set \t", "badaction \t X-Frame-Options:DENY", "set add X-XSS-Protection:1", @@ -68,11 +70,11 @@ public void testListenersConfigAllowedValues() { props.put(RestServerConfig.LISTENERS_CONFIG, "http://a.b:9999"); config = RestServerConfig.forPublic(null, props); - assertEquals(List.of("http://a.b:9999"), config.listeners()); + assertEquals(Collections.singletonList("http://a.b:9999"), config.listeners()); props.put(RestServerConfig.LISTENERS_CONFIG, "http://a.b:9999, https://a.b:7812"); config = RestServerConfig.forPublic(null, props); - assertEquals(List.of("http://a.b:9999", "https://a.b:7812"), config.listeners()); + assertEquals(Arrays.asList("http://a.b:9999", "https://a.b:7812"), config.listeners()); } @Test @@ -111,7 +113,7 @@ public void testAdminListenersConfigAllowedValues() { props.put(RestServerConfig.ADMIN_LISTENERS_CONFIG, "http://a.b:9999, https://a.b:7812"); config = RestServerConfig.forPublic(null, props); - assertEquals(List.of("http://a.b:9999", "https://a.b:7812"), config.adminListeners()); + assertEquals(Arrays.asList("http://a.b:9999", "https://a.b:7812"), config.adminListeners()); RestServerConfig.forPublic(null, props); } @@ -122,7 +124,7 @@ public void testAdminListenersNotAllowingEmptyStrings() { props.put(RestServerConfig.ADMIN_LISTENERS_CONFIG, "http://a.b:9999,"); ConfigException ce = assertThrows(ConfigException.class, () -> RestServerConfig.forPublic(null, props)); - assertTrue(ce.getMessage().contains("admin.listeners")); + assertTrue(ce.getMessage().contains(" admin.listeners")); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsetsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsetsTest.java index 96ff0fc9a6240..9731dd6969713 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsetsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/ConnectorOffsetsTest.java @@ -18,8 +18,8 @@ import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -43,7 +43,7 @@ public void testConnectorOffsetsToMap() { offset2.put("offset", new byte[]{0x00, 0x1A}); ConnectorOffset connectorOffset2 = new ConnectorOffset(partition2, offset2); - ConnectorOffsets connectorOffsets = new ConnectorOffsets(List.of(connectorOffset1, connectorOffset2)); + ConnectorOffsets connectorOffsets = new ConnectorOffsets(Arrays.asList(connectorOffset1, connectorOffset2)); Map, Map> connectorOffsetsMap = connectorOffsets.toMap(); assertEquals(2, connectorOffsetsMap.size()); assertEquals(offset1, connectorOffsetsMap.get(partition1)); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequestTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequestTest.java index 3d8241e378b9b..9c01f1d92a6f5 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequestTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/entities/CreateConnectorRequestTest.java @@ -20,7 +20,7 @@ import org.junit.jupiter.api.Test; -import java.util.Map; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -33,7 +33,7 @@ public void testToTargetState() { assertEquals(TargetState.PAUSED, CreateConnectorRequest.InitialState.PAUSED.toTargetState()); assertEquals(TargetState.STOPPED, CreateConnectorRequest.InitialState.STOPPED.toTargetState()); - CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest("test-name", Map.of(), null); + CreateConnectorRequest createConnectorRequest = new CreateConnectorRequest("test-name", Collections.emptyMap(), null); assertNull(createConnectorRequest.initialTargetState()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java index d510c3c475d1b..bec99d2d55c16 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorPluginsResourceTest.java @@ -71,6 +71,7 @@ import java.net.URL; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.Comparator; import java.util.HashMap; import java.util.HashSet; @@ -85,6 +86,7 @@ import jakarta.ws.rs.BadRequestException; +import static java.util.Arrays.asList; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_HEALTH_CHECK_TIMEOUT_MS; import static org.apache.kafka.connect.runtime.rest.RestServer.DEFAULT_REST_REQUEST_TIMEOUT_MS; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -165,37 +167,37 @@ public class ConnectorPluginsResourceTest { ConfigDef connectorConfigDef = ConnectorConfig.configDef(); List connectorConfigValues = connectorConfigDef.validate(PROPS); List partialConnectorConfigValues = connectorConfigDef.validate(PARTIAL_PROPS); - ConfigInfos result = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), connectorConfigValues, List.of()); - ConfigInfos partialResult = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), partialConnectorConfigValues, List.of()); - List configs = new LinkedList<>(result.configs()); - List partialConfigs = new LinkedList<>(partialResult.configs()); + ConfigInfos result = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), connectorConfigValues, Collections.emptyList()); + ConfigInfos partialResult = AbstractHerder.generateResult(ConnectorPluginsResourceTestConnector.class.getName(), connectorConfigDef.configKeys(), partialConnectorConfigValues, Collections.emptyList()); + List configs = new LinkedList<>(result.values()); + List partialConfigs = new LinkedList<>(partialResult.values()); - ConfigKeyInfo configKeyInfo = new ConfigKeyInfo("test.string.config", "STRING", true, null, "HIGH", "Test configuration for string type.", null, -1, "NONE", "test.string.config", List.of()); - ConfigValueInfo configValueInfo = new ConfigValueInfo("test.string.config", "testString", List.of(), List.of(), true); + ConfigKeyInfo configKeyInfo = new ConfigKeyInfo("test.string.config", "STRING", true, null, "HIGH", "Test configuration for string type.", null, -1, "NONE", "test.string.config", Collections.emptyList()); + ConfigValueInfo configValueInfo = new ConfigValueInfo("test.string.config", "testString", Collections.emptyList(), Collections.emptyList(), true); ConfigInfo configInfo = new ConfigInfo(configKeyInfo, configValueInfo); configs.add(configInfo); partialConfigs.add(configInfo); - configKeyInfo = new ConfigKeyInfo("test.int.config", "INT", true, null, "MEDIUM", "Test configuration for integer type.", "Test", 1, "MEDIUM", "test.int.config", List.of()); - configValueInfo = new ConfigValueInfo("test.int.config", "1", List.of("1", "2", "3"), List.of(), true); + configKeyInfo = new ConfigKeyInfo("test.int.config", "INT", true, null, "MEDIUM", "Test configuration for integer type.", "Test", 1, "MEDIUM", "test.int.config", Collections.emptyList()); + configValueInfo = new ConfigValueInfo("test.int.config", "1", asList("1", "2", "3"), Collections.emptyList(), true); configInfo = new ConfigInfo(configKeyInfo, configValueInfo); configs.add(configInfo); partialConfigs.add(configInfo); - configKeyInfo = new ConfigKeyInfo("test.string.config.default", "STRING", false, "", "LOW", "Test configuration with default value.", null, -1, "NONE", "test.string.config.default", List.of()); - configValueInfo = new ConfigValueInfo("test.string.config.default", "", List.of(), List.of(), true); + configKeyInfo = new ConfigKeyInfo("test.string.config.default", "STRING", false, "", "LOW", "Test configuration with default value.", null, -1, "NONE", "test.string.config.default", Collections.emptyList()); + configValueInfo = new ConfigValueInfo("test.string.config.default", "", Collections.emptyList(), Collections.emptyList(), true); configInfo = new ConfigInfo(configKeyInfo, configValueInfo); configs.add(configInfo); partialConfigs.add(configInfo); - configKeyInfo = new ConfigKeyInfo("test.list.config", "LIST", true, null, "HIGH", "Test configuration for list type.", "Test", 2, "LONG", "test.list.config", List.of()); - configValueInfo = new ConfigValueInfo("test.list.config", "a,b", List.of("a", "b", "c"), List.of(), true); + configKeyInfo = new ConfigKeyInfo("test.list.config", "LIST", true, null, "HIGH", "Test configuration for list type.", "Test", 2, "LONG", "test.list.config", Collections.emptyList()); + configValueInfo = new ConfigValueInfo("test.list.config", "a,b", asList("a", "b", "c"), Collections.emptyList(), true); configInfo = new ConfigInfo(configKeyInfo, configValueInfo); configs.add(configInfo); partialConfigs.add(configInfo); - CONFIG_INFOS = new ConfigInfos(ConnectorPluginsResourceTestConnector.class.getName(), ERROR_COUNT, List.of("Test"), configs); - PARTIAL_CONFIG_INFOS = new ConfigInfos(ConnectorPluginsResourceTestConnector.class.getName(), PARTIAL_CONFIG_ERROR_COUNT, List.of("Test"), partialConfigs); + CONFIG_INFOS = new ConfigInfos(ConnectorPluginsResourceTestConnector.class.getName(), ERROR_COUNT, Collections.singletonList("Test"), configs); + PARTIAL_CONFIG_INFOS = new ConfigInfos(ConnectorPluginsResourceTestConnector.class.getName(), PARTIAL_CONFIG_ERROR_COUNT, Collections.singletonList("Test"), partialConfigs); } private final Herder herder = mock(DistributedHerder.class); @@ -240,7 +242,7 @@ public void testValidateConfigWithSingleErrorDueToMissingConnectorClassname() th ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, - List.of("Test") + Collections.singletonList("Test") ); configInfosCallback.getValue().onCompletion(null, configInfos); return null; @@ -256,8 +258,8 @@ public void testValidateConfigWithSingleErrorDueToMissingConnectorClassname() th assertEquals(PARTIAL_CONFIG_INFOS.errorCount(), configInfos.errorCount()); assertEquals(PARTIAL_CONFIG_INFOS.groups(), configInfos.groups()); assertEquals( - new HashSet<>(PARTIAL_CONFIG_INFOS.configs()), - new HashSet<>(configInfos.configs()) + new HashSet<>(PARTIAL_CONFIG_INFOS.values()), + new HashSet<>(configInfos.values()) ); verify(herder).validateConnectorConfig(eq(PARTIAL_PROPS), any(), anyBoolean()); } @@ -284,7 +286,7 @@ public void testValidateConfigWithSimpleName() throws Throwable { ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, - List.of("Test") + Collections.singletonList("Test") ); configInfosCallback.getValue().onCompletion(null, configInfos); return null; @@ -298,7 +300,7 @@ public void testValidateConfigWithSimpleName() throws Throwable { assertEquals(CONFIG_INFOS.name(), configInfos.name()); assertEquals(0, configInfos.errorCount()); assertEquals(CONFIG_INFOS.groups(), configInfos.groups()); - assertEquals(new HashSet<>(CONFIG_INFOS.configs()), new HashSet<>(configInfos.configs())); + assertEquals(new HashSet<>(CONFIG_INFOS.values()), new HashSet<>(configInfos.values())); verify(herder).validateConnectorConfig(eq(PROPS), any(), anyBoolean()); } @@ -324,7 +326,7 @@ public void testValidateConfigWithAlias() throws Throwable { ConnectorPluginsResourceTestConnector.class.getName(), resultConfigKeys, configValues, - List.of("Test") + Collections.singletonList("Test") ); configInfosCallback.getValue().onCompletion(null, configInfos); return null; @@ -338,7 +340,7 @@ public void testValidateConfigWithAlias() throws Throwable { assertEquals(CONFIG_INFOS.name(), configInfos.name()); assertEquals(0, configInfos.errorCount()); assertEquals(CONFIG_INFOS.groups(), configInfos.groups()); - assertEquals(new HashSet<>(CONFIG_INFOS.configs()), new HashSet<>(configInfos.configs())); + assertEquals(new HashSet<>(CONFIG_INFOS.values()), new HashSet<>(configInfos.values())); verify(herder).validateConnectorConfig(eq(PROPS), any(), anyBoolean()); } @@ -372,8 +374,8 @@ public void testConnectorPluginsIncludesClassTypeAndVersionInformation() throws ClassLoader classLoader = ConnectorPluginsResourceTest.class.getClassLoader(); PluginInfo sinkInfo = new PluginInfo(new PluginDesc<>(SampleSinkConnector.class, SampleSinkConnector.VERSION, PluginType.SINK, classLoader)); PluginInfo sourceInfo = new PluginInfo(new PluginDesc<>(SampleSourceConnector.class, SampleSourceConnector.VERSION, PluginType.SOURCE, classLoader)); - assertEquals(PluginType.SINK.toString(), sinkInfo.type().toString()); - assertEquals(PluginType.SOURCE.toString(), sourceInfo.type().toString()); + assertEquals(PluginType.SINK.toString(), sinkInfo.type()); + assertEquals(PluginType.SOURCE.toString(), sourceInfo.type()); assertEquals(SampleSinkConnector.VERSION, sinkInfo.version()); assertEquals(SampleSourceConnector.VERSION, sourceInfo.version()); assertEquals(SampleSinkConnector.class.getName(), sinkInfo.className()); @@ -426,14 +428,14 @@ public void testListAllPlugins() { @Test public void testGetConnectorConfigDef() { String connName = ConnectorPluginsResourceTestConnector.class.getName(); - when(herder.connectorPluginConfig(eq(connName), eq(null))).thenAnswer(answer -> { + when(herder.connectorPluginConfig(eq(connName))).thenAnswer(answer -> { List results = new ArrayList<>(); for (ConfigDef.ConfigKey configKey : ConnectorPluginsResourceTestConnector.CONFIG_DEF.configKeys().values()) { results.add(AbstractHerder.convertConfigKey(configKey)); } return results; }); - List connectorConfigDef = connectorPluginsResource.getConnectorConfigDef(connName, null); + List connectorConfigDef = connectorPluginsResource.getConnectorConfigDef(connName); assertEquals(ConnectorPluginsResourceTestConnector.CONFIG_DEF.names().size(), connectorConfigDef.size()); for (String config : ConnectorPluginsResourceTestConnector.CONFIG_DEF.names()) { Optional cki = connectorConfigDef.stream().filter(c -> c.name().equals(config)).findFirst(); @@ -491,7 +493,7 @@ private static class IntegerRecommender implements Recommender { @Override public List validValues(String name, Map parsedConfig) { - return List.of(1, 2, 3); + return asList(1, 2, 3); } @Override @@ -503,7 +505,7 @@ public boolean visible(String name, Map parsedConfig) { private static class ListRecommender implements Recommender { @Override public List validValues(String name, Map parsedConfig) { - return List.of("a", "b", "c"); + return asList("a", "b", "c"); } @Override diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java index 26a4665824802..9dfead77220f6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/ConnectorsResourceTest.java @@ -55,7 +55,9 @@ import java.net.URI; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -140,14 +142,14 @@ public class ConnectorsResourceTest { CONNECTOR_CONFIG_WITH_EMPTY_NAME.put(ConnectorConfig.NAME_CONFIG, ""); CONNECTOR_CONFIG_WITH_EMPTY_NAME.put("sample_config", "test_config"); } - private static final List CONNECTOR_TASK_NAMES = List.of( + private static final List CONNECTOR_TASK_NAMES = Arrays.asList( new ConnectorTaskId(CONNECTOR_NAME, 0), new ConnectorTaskId(CONNECTOR_NAME, 1) ); private static final List> TASK_CONFIGS = new ArrayList<>(); static { - TASK_CONFIGS.add(Map.of("config", "value")); - TASK_CONFIGS.add(Map.of("config", "other_value")); + TASK_CONFIGS.add(Collections.singletonMap("config", "value")); + TASK_CONFIGS.add(Collections.singletonMap("config", "other_value")); } private static final List TASK_INFOS = new ArrayList<>(); static { @@ -156,7 +158,7 @@ public class ConnectorsResourceTest { } private static final Set CONNECTOR_ACTIVE_TOPICS = new HashSet<>( - List.of("foo_topic", "bar_topic")); + Arrays.asList("foo_topic", "bar_topic")); private static final RestRequestTimeout REQUEST_TIMEOUT = RestRequestTimeout.constant( DEFAULT_REST_REQUEST_TIMEOUT_MS, @@ -194,16 +196,16 @@ public void testListConnectors() { MultivaluedMap queryParams = new MultivaluedHashMap<>(); queryParams.putSingle("forward", "true"); when(forward.getQueryParameters()).thenReturn(queryParams); - when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); Collection connectors = (Collection) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), new HashSet<>(connectors)); + assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), new HashSet<>(connectors)); } @Test public void testExpandConnectorsStatus() { - when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorStateInfo connector = mock(ConnectorStateInfo.class); ConnectorStateInfo connector2 = mock(ConnectorStateInfo.class); when(herder.connectorStatus(CONNECTOR2_NAME)).thenReturn(connector2); @@ -216,14 +218,14 @@ public void testExpandConnectorsStatus() { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); + assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); assertEquals(connector, expanded.get(CONNECTOR_NAME).get("status")); } @Test public void testExpandConnectorsInfo() { - when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorInfo connector = mock(ConnectorInfo.class); ConnectorInfo connector2 = mock(ConnectorInfo.class); when(herder.connectorInfo(CONNECTOR2_NAME)).thenReturn(connector2); @@ -236,14 +238,14 @@ public void testExpandConnectorsInfo() { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); + assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("info")); assertEquals(connector, expanded.get(CONNECTOR_NAME).get("info")); } @Test public void testFullExpandConnectors() { - when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorInfo connectorInfo = mock(ConnectorInfo.class); ConnectorInfo connectorInfo2 = mock(ConnectorInfo.class); when(herder.connectorInfo(CONNECTOR2_NAME)).thenReturn(connectorInfo2); @@ -255,12 +257,12 @@ public void testFullExpandConnectors() { forward = mock(UriInfo.class); MultivaluedMap queryParams = new MultivaluedHashMap<>(); - queryParams.put("expand", List.of("info", "status")); + queryParams.put("expand", Arrays.asList("info", "status")); when(forward.getQueryParameters()).thenReturn(queryParams); Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(Set.of(CONNECTOR_NAME, CONNECTOR2_NAME), expanded.keySet()); + assertEquals(new HashSet<>(Arrays.asList(CONNECTOR_NAME, CONNECTOR2_NAME)), expanded.keySet()); assertEquals(connectorInfo2, expanded.get(CONNECTOR2_NAME).get("info")); assertEquals(connectorInfo, expanded.get(CONNECTOR_NAME).get("info")); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); @@ -269,7 +271,7 @@ public void testFullExpandConnectors() { @Test public void testExpandConnectorsWithConnectorNotFound() { - when(herder.connectors()).thenReturn(List.of(CONNECTOR2_NAME, CONNECTOR_NAME)); + when(herder.connectors()).thenReturn(Arrays.asList(CONNECTOR2_NAME, CONNECTOR_NAME)); ConnectorStateInfo connector2 = mock(ConnectorStateInfo.class); when(herder.connectorStatus(CONNECTOR2_NAME)).thenReturn(connector2); doThrow(mock(NotFoundException.class)).when(herder).connectorStatus(CONNECTOR_NAME); @@ -281,7 +283,7 @@ public void testExpandConnectorsWithConnectorNotFound() { Map> expanded = (Map>) connectorsResource.listConnectors(forward, NULL_HEADERS).getEntity(); // Ordering isn't guaranteed, compare sets - assertEquals(Set.of(CONNECTOR2_NAME), expanded.keySet()); + assertEquals(Collections.singleton(CONNECTOR2_NAME), expanded.keySet()); assertEquals(connector2, expanded.get(CONNECTOR2_NAME).get("status")); } @@ -289,7 +291,7 @@ public void testExpandConnectorsWithConnectorNotFound() { @Test public void testCreateConnector() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, @@ -302,7 +304,7 @@ public void testCreateConnector() throws Throwable { @Test public void testCreateConnectorWithPausedInitialState() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.PAUSED); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.PAUSED); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, @@ -315,7 +317,7 @@ public void testCreateConnectorWithPausedInitialState() throws Throwable { @Test public void testCreateConnectorWithStoppedInitialState() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.STOPPED); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.STOPPED); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, @@ -328,7 +330,7 @@ public void testCreateConnectorWithStoppedInitialState() throws Throwable { @Test public void testCreateConnectorWithRunningInitialState() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.RUNNING); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), CreateConnectorRequest.InitialState.RUNNING); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME, CONNECTOR_CONFIG, @@ -341,7 +343,7 @@ public void testCreateConnectorWithRunningInitialState() throws Throwable { @Test public void testCreateConnectorNotLeader() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackNotLeaderException(cb).when(herder) @@ -355,7 +357,7 @@ public void testCreateConnectorNotLeader() throws Throwable { @Test public void testCreateConnectorWithHeaders() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); HttpHeaders httpHeaders = mock(HttpHeaders.class); expectAndCallbackNotLeaderException(cb) @@ -369,7 +371,7 @@ public void testCreateConnectorWithHeaders() throws Throwable { @Test public void testCreateConnectorExists() { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackException(cb, new AlreadyExistsException("already exists")) @@ -494,7 +496,7 @@ public void testGetTaskConfigs() throws Throwable { connectorTask0Configs.put("connector-task1-config0", "321"); connectorTask0Configs.put("connector-task1-config1", "654"); final ConnectorTaskId connector2Task0 = new ConnectorTaskId(CONNECTOR2_NAME, 0); - final Map connector2Task0Configs = Map.of("connector2-task0-config0", "789"); + final Map connector2Task0Configs = Collections.singletonMap("connector2-task0-config0", "789"); final List expectedTasksConnector = new ArrayList<>(); expectedTasksConnector.add(new TaskInfo(connectorTask0, connectorTask0Configs)); @@ -527,7 +529,7 @@ public void testPutConnectorConfig() throws Throwable { @Test public void testCreateConnectorWithSpecialCharsInName() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME_SPECIAL_CHARS, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME_SPECIAL_CHARS), null); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME_SPECIAL_CHARS), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME_SPECIAL_CHARS, CONNECTOR_CONFIG, @@ -542,7 +544,7 @@ public void testCreateConnectorWithSpecialCharsInName() throws Throwable { @Test public void testCreateConnectorWithControlSequenceInName() throws Throwable { CreateConnectorRequest body = new CreateConnectorRequest(CONNECTOR_NAME_CONTROL_SEQUENCES1, - Map.of(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME_CONTROL_SEQUENCES1), null); + Collections.singletonMap(ConnectorConfig.NAME_CONFIG, CONNECTOR_NAME_CONTROL_SEQUENCES1), null); final ArgumentCaptor>> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackResult(cb, new Herder.Created<>(true, new ConnectorInfo(CONNECTOR_NAME_CONTROL_SEQUENCES1, CONNECTOR_CONFIG, @@ -686,9 +688,11 @@ public void testRestartConnectorAndTasksRebalanceNeeded() { @Test public void testRestartConnectorAndTasksRequestAccepted() throws Throwable { ConnectorStateInfo.ConnectorState state = new ConnectorStateInfo.ConnectorState( - AbstractStatus.State.RESTARTING.name(), "foo", null, null + AbstractStatus.State.RESTARTING.name(), + "foo", + null ); - ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, List.of(), ConnectorType.SOURCE); + ConnectorStateInfo connectorStateInfo = new ConnectorStateInfo(CONNECTOR_NAME, state, Collections.emptyList(), ConnectorType.SOURCE); RestartRequest restartRequest = new RestartRequest(CONNECTOR_NAME, true, false); final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); @@ -857,9 +861,9 @@ public void testGetOffsetsConnectorNotFound() { @Test public void testGetOffsets() throws Throwable { final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); - ConnectorOffsets offsets = new ConnectorOffsets(List.of( - new ConnectorOffset(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), - new ConnectorOffset(Map.of("partitionKey", "partitionValue2"), Map.of("offsetKey", "offsetValue")) + ConnectorOffsets offsets = new ConnectorOffsets(Arrays.asList( + new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), + new ConnectorOffset(Collections.singletonMap("partitionKey", "partitionValue2"), Collections.singletonMap("offsetKey", "offsetValue")) )); expectAndCallbackResult(cb, offsets).when(herder).connectorOffsets(eq(CONNECTOR_NAME), cb.capture()); @@ -869,7 +873,7 @@ public void testGetOffsets() throws Throwable { @Test public void testAlterOffsetsEmptyOffsets() { assertThrows(BadRequestException.class, () -> connectorsResource.alterConnectorOffsets( - false, NULL_HEADERS, CONNECTOR_NAME, new ConnectorOffsets(List.of()))); + false, NULL_HEADERS, CONNECTOR_NAME, new ConnectorOffsets(Collections.emptyList()))); } @Test @@ -877,7 +881,7 @@ public void testAlterOffsetsNotLeader() throws Throwable { Map partition = new HashMap<>(); Map offset = new HashMap<>(); ConnectorOffset connectorOffset = new ConnectorOffset(partition, offset); - ConnectorOffsets body = new ConnectorOffsets(List.of(connectorOffset)); + ConnectorOffsets body = new ConnectorOffsets(Collections.singletonList(connectorOffset)); final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackNotLeaderException(cb).when(herder).alterConnectorOffsets(eq(CONNECTOR_NAME), eq(body.toMap()), cb.capture()); @@ -892,7 +896,7 @@ public void testAlterOffsetsConnectorNotFound() { Map partition = new HashMap<>(); Map offset = new HashMap<>(); ConnectorOffset connectorOffset = new ConnectorOffset(partition, offset); - ConnectorOffsets body = new ConnectorOffsets(List.of(connectorOffset)); + ConnectorOffsets body = new ConnectorOffsets(Collections.singletonList(connectorOffset)); final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); expectAndCallbackException(cb, new NotFoundException("Connector not found")) .when(herder).alterConnectorOffsets(eq(CONNECTOR_NAME), eq(body.toMap()), cb.capture()); @@ -902,10 +906,10 @@ public void testAlterOffsetsConnectorNotFound() { @Test public void testAlterOffsets() throws Throwable { - Map partition = Map.of("partitionKey", "partitionValue"); - Map offset = Map.of("offsetKey", "offsetValue"); + Map partition = Collections.singletonMap("partitionKey", "partitionValue"); + Map offset = Collections.singletonMap("offsetKey", "offsetValue"); ConnectorOffset connectorOffset = new ConnectorOffset(partition, offset); - ConnectorOffsets body = new ConnectorOffsets(List.of(connectorOffset)); + ConnectorOffsets body = new ConnectorOffsets(Collections.singletonList(connectorOffset)); final ArgumentCaptor> cb = ArgumentCaptor.forClass(Callback.class); Message msg = new Message("The offsets for this connector have been altered successfully"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java index 6cbe164e26213..aee85a86c2ab2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/InternalConnectResourceTest.java @@ -39,6 +39,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Base64; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -65,8 +66,8 @@ public class InternalConnectResourceTest { private static final HttpHeaders NULL_HEADERS = null; private static final List> TASK_CONFIGS = new ArrayList<>(); static { - TASK_CONFIGS.add(Map.of("config", "value")); - TASK_CONFIGS.add(Map.of("config", "other_value")); + TASK_CONFIGS.add(Collections.singletonMap("config", "value")); + TASK_CONFIGS.add(Collections.singletonMap("config", "other_value")); } private static final String FENCE_PATH = "/connectors/" + CONNECTOR_NAME + "/fence"; private static final String TASK_CONFIGS_PATH = "/connectors/" + CONNECTOR_NAME + "/tasks"; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java index 67ccb519d05ef..c73bba8c84368 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/resources/LoggingResourceTest.java @@ -32,8 +32,8 @@ import org.slf4j.event.Level; import java.util.Arrays; +import java.util.Collections; import java.util.List; -import java.util.Map; import jakarta.ws.rs.core.Response; @@ -90,7 +90,7 @@ public void setLevelWithEmptyArgTest() { BadRequestException.class, () -> loggingResource.setLevel( "@root", - Map.of(), + Collections.emptyMap(), scope ) ); @@ -104,7 +104,7 @@ public void setLevelWithInvalidArgTest() { NotFoundException.class, () -> loggingResource.setLevel( "@root", - Map.of("level", "HIGH"), + Collections.singletonMap("level", "HIGH"), scope ) ); @@ -130,7 +130,7 @@ public void testSetLevelWorkerScope() { private void testSetLevelWorkerScope(String scope, boolean expectWarning) { final String logger = "org.apache.kafka.connect"; final String level = "TRACE"; - final List expectedLoggers = List.of( + final List expectedLoggers = Arrays.asList( "org.apache.kafka.connect", "org.apache.kafka.connect.runtime.distributed.DistributedHerder" ); @@ -138,7 +138,7 @@ private void testSetLevelWorkerScope(String scope, boolean expectWarning) { List actualLoggers; try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(LoggingResource.class)) { - Response response = loggingResource.setLevel(logger, Map.of("level", level), scope); + Response response = loggingResource.setLevel(logger, Collections.singletonMap("level", level), scope); assertEquals(Response.Status.OK.getStatusCode(), response.getStatus()); actualLoggers = (List) response.getEntity(); long warningMessages = logCaptureAppender.getEvents().stream() @@ -159,7 +159,7 @@ public void testSetLevelClusterScope() { final String logger = "org.apache.kafka.connect"; final String level = "TRACE"; - Response response = loggingResource.setLevel(logger, Map.of("level", level), "cluster"); + Response response = loggingResource.setLevel(logger, Collections.singletonMap("level", level), "cluster"); assertEquals(Response.Status.NO_CONTENT.getStatusCode(), response.getStatus()); assertNull(response.getEntity()); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java index c9eedccdc64ee..408f4cb886b29 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/rest/util/SSLUtilsTest.java @@ -25,8 +25,8 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.Arrays; import java.util.HashMap; -import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -36,6 +36,7 @@ public class SSLUtilsTest { + private Map sslConfig; private String keystorePath; private String truststorePath; private Password keystorePassword; @@ -44,7 +45,7 @@ public class SSLUtilsTest { @BeforeEach public void before() throws Exception { CertStores serverCertStores = new CertStores(true, "localhost"); - Map sslConfig = serverCertStores.getUntrustingConfig(); + sslConfig = serverCertStores.getUntrustingConfig(); keystorePath = sslConfig.get("ssl.keystore.location").toString(); truststorePath = sslConfig.get("ssl.truststore.location").toString(); keystorePassword = (Password) sslConfig.get("ssl.keystore.password"); @@ -60,12 +61,12 @@ public void testGetOrDefault() { Map map = new HashMap<>(); map.put("exists", "value"); - assertEquals(value, SSLUtils.getOrDefault(map, existingKey, defaultValue)); - assertEquals(defaultValue, SSLUtils.getOrDefault(map, missingKey, defaultValue)); + assertEquals(SSLUtils.getOrDefault(map, existingKey, defaultValue), value); + assertEquals(SSLUtils.getOrDefault(map, missingKey, defaultValue), defaultValue); } @Test - public void testCreateServerSideSslContextFactory() { + public void testCreateServerSideSslContextFactory() throws Exception { Map configMap = new HashMap<>(); configMap.put("ssl.keystore.location", keystorePath); configMap.put("ssl.keystore.password", keystorePassword.value()); @@ -156,7 +157,7 @@ public void testCreateServerSideSslContextFactoryDefaultValues() { assertEquals(SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, ssl.getKeyStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, ssl.getTrustStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, ssl.getProtocol()); - assertArrayEquals(List.of(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); + assertArrayEquals(Arrays.asList(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); assertEquals(SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, ssl.getKeyManagerFactoryAlgorithm()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, ssl.getTrustManagerFactoryAlgorithm()); assertFalse(ssl.getNeedClientAuth()); @@ -181,7 +182,7 @@ public void testCreateClientSideSslContextFactoryDefaultValues() { assertEquals(SslConfigs.DEFAULT_SSL_KEYSTORE_TYPE, ssl.getKeyStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTSTORE_TYPE, ssl.getTrustStoreType()); assertEquals(SslConfigs.DEFAULT_SSL_PROTOCOL, ssl.getProtocol()); - assertArrayEquals(List.of(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); + assertArrayEquals(Arrays.asList(SslConfigs.DEFAULT_SSL_ENABLED_PROTOCOLS.split("\\s*,\\s*")).toArray(), ssl.getIncludeProtocols()); assertEquals(SslConfigs.DEFAULT_SSL_KEYMANGER_ALGORITHM, ssl.getKeyManagerFactoryAlgorithm()); assertEquals(SslConfigs.DEFAULT_SSL_TRUSTMANAGER_ALGORITHM, ssl.getTrustManagerFactoryAlgorithm()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java index c4d52fe4b4e5b..4d8c25932fe42 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneConfigTest.java @@ -48,7 +48,6 @@ private Map sslProps() { private Map baseWorkerProps() { return new HashMap<>() { { - put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); put(WorkerConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); put(WorkerConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); put(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, "/tmp/foo"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java index dc6325bfa36bc..c220ca9c70d0c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/runtime/standalone/StandaloneHerderTest.java @@ -30,7 +30,6 @@ import org.apache.kafka.connect.runtime.ConnectorStatus; import org.apache.kafka.connect.runtime.Herder; import org.apache.kafka.connect.runtime.HerderConnectorContext; -import org.apache.kafka.connect.runtime.MockConnectMetrics; import org.apache.kafka.connect.runtime.RestartPlan; import org.apache.kafka.connect.runtime.RestartRequest; import org.apache.kafka.connect.runtime.SinkConnectorConfig; @@ -39,7 +38,6 @@ import org.apache.kafka.connect.runtime.TaskConfig; import org.apache.kafka.connect.runtime.TaskStatus; import org.apache.kafka.connect.runtime.Worker; -import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.WorkerConfigTransformer; import org.apache.kafka.connect.runtime.distributed.SampleConnectorClientConfigOverridePolicy; import org.apache.kafka.connect.runtime.isolation.LoaderSwap; @@ -58,7 +56,6 @@ import org.apache.kafka.connect.storage.AppliedConnectorConfig; import org.apache.kafka.connect.storage.ClusterConfigState; import org.apache.kafka.connect.storage.MemoryConfigBackingStore; -import org.apache.kafka.connect.storage.SimpleHeaderConverter; import org.apache.kafka.connect.storage.StatusBackingStore; import org.apache.kafka.connect.util.Callback; import org.apache.kafka.connect.util.ConnectorTaskId; @@ -75,16 +72,20 @@ import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; +import static java.util.Collections.emptyList; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.apache.kafka.connect.runtime.TopicCreationConfig.DEFAULT_TOPIC_CREATION_PREFIX; import static org.apache.kafka.connect.runtime.TopicCreationConfig.PARTITIONS_CONFIG; import static org.apache.kafka.connect.runtime.TopicCreationConfig.REPLICATION_FACTOR_CONFIG; @@ -129,8 +130,6 @@ private enum SourceSink { @Mock protected Worker worker; @Mock - protected WorkerConfig workerConfig; - @Mock protected WorkerConfigTransformer transformer; @Mock private Plugins plugins; @@ -145,12 +144,9 @@ private enum SourceSink { noneConnectorClientConfigOverridePolicy = new SampleConnectorClientConfigOverridePolicy(); public void initialize(boolean mockTransform) { - when(worker.getPlugins()).thenReturn(plugins); - when(worker.metrics()).thenReturn(new MockConnectMetrics()); herder = mock(StandaloneHerder.class, withSettings() .useConstructor(worker, WORKER_ID, KAFKA_CLUSTER_ID, statusBackingStore, new MemoryConfigBackingStore(transformer), noneConnectorClientConfigOverridePolicy, new MockTime()) .defaultAnswer(CALLS_REAL_METHODS)); - verify(worker).getPlugins(); createCallback = new FutureCallback<>(); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); if (mockTransform) @@ -177,7 +173,6 @@ public void testCreateSourceConnector() throws Exception { } @Test - @SuppressWarnings("rawtypes") public void testCreateConnectorFailedValidation() { initialize(false); // Basic validation should be performed and return an error, but should still evaluate the connector's config @@ -190,15 +185,14 @@ public void testCreateConnectorFailedValidation() { final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); when(worker.getPlugins()).thenReturn(plugins); - when(worker.config()).thenReturn(workerConfig); - when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); - when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); - when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); + when(plugins.newConnector(anyString())).thenReturn(connectorMock); + when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); when(connectorMock.config()).thenReturn(new ConfigDef()); + ConfigValue validatedValue = new ConfigValue("foo.bar"); - when(connectorMock.validate(config)).thenReturn(new Config(List.of(validatedValue))); + when(connectorMock.validate(config)).thenReturn(new Config(singletonList(validatedValue))); herder.putConnectorConfig(CONNECTOR_NAME, config, false, createCallback); @@ -261,7 +255,7 @@ public void testCreateConnectorWithStoppedInitialState() throws Exception { herder.putConnectorConfig(CONNECTOR_NAME, config, TargetState.STOPPED, false, createCallback); Herder.Created connectorInfo = createCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); assertEquals( - new ConnectorInfo(CONNECTOR_NAME, connectorConfig(SourceSink.SINK), List.of(), ConnectorType.SINK), + new ConnectorInfo(CONNECTOR_NAME, connectorConfig(SourceSink.SINK), Collections.emptyList(), ConnectorType.SINK), connectorInfo.result() ); verify(loaderSwap).close(); @@ -275,8 +269,7 @@ public void testDestroyConnector() throws Exception { Map config = connectorConfig(SourceSink.SOURCE); expectConfigValidation(SourceSink.SOURCE, config); - when(statusBackingStore.getAll(CONNECTOR_NAME)).thenReturn(List.of()); - when(worker.connectorVersion(CONNECTOR_NAME)).thenReturn(null); + when(statusBackingStore.getAll(CONNECTOR_NAME)).thenReturn(Collections.emptyList()); herder.putConnectorConfig(CONNECTOR_NAME, config, false, createCallback); Herder.Created connectorInfo = createCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); @@ -311,11 +304,11 @@ public void testRestartConnectorSameTaskConfigs() throws Exception { mockStartConnector(config, TargetState.STARTED, TargetState.STARTED, null); - when(worker.connectorNames()).thenReturn(Set.of(CONNECTOR_NAME)); + when(worker.connectorNames()).thenReturn(Collections.singleton(CONNECTOR_NAME)); when(worker.getPlugins()).thenReturn(plugins); // same task configs as earlier, so don't expect a new set of tasks to be brought up when(worker.connectorTaskConfigs(CONNECTOR_NAME, new SourceConnectorConfig(plugins, config, true))) - .thenReturn(List.of(taskConfig(SourceSink.SOURCE))); + .thenReturn(Collections.singletonList(taskConfig(SourceSink.SOURCE))); herder.putConnectorConfig(CONNECTOR_NAME, config, false, createCallback); Herder.Created connectorInfo = createCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); @@ -342,13 +335,13 @@ public void testRestartConnectorNewTaskConfigs() throws Exception { mockStartConnector(config, TargetState.STARTED, TargetState.STARTED, null); - when(worker.connectorNames()).thenReturn(Set.of(CONNECTOR_NAME)); + when(worker.connectorNames()).thenReturn(Collections.singleton(CONNECTOR_NAME)); when(worker.getPlugins()).thenReturn(plugins); // changed task configs, expect a new set of tasks to be brought up (and the old ones to be stopped) Map taskConfigs = taskConfig(SourceSink.SOURCE); taskConfigs.put("k", "v"); when(worker.connectorTaskConfigs(CONNECTOR_NAME, new SourceConnectorConfig(plugins, config, true))) - .thenReturn(List.of(taskConfigs)); + .thenReturn(Collections.singletonList(taskConfigs)); when(worker.startSourceTask(eq(new ConnectorTaskId(CONNECTOR_NAME, 0)), any(), eq(connectorConfig(SourceSink.SOURCE)), eq(taskConfigs), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -397,13 +390,13 @@ public void testRestartTask() throws Exception { ClusterConfigState configState = new ClusterConfigState( -1, null, - Map.of(CONNECTOR_NAME, 1), - Map.of(CONNECTOR_NAME, connectorConfig), - Map.of(CONNECTOR_NAME, TargetState.STARTED), - Map.of(taskId, taskConfig(SourceSink.SOURCE)), - Map.of(), - Map.of(), - Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Collections.singletonMap(CONNECTOR_NAME, 1), + Collections.singletonMap(CONNECTOR_NAME, connectorConfig), + Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), + Collections.singletonMap(taskId, taskConfig(SourceSink.SOURCE)), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); @@ -432,13 +425,13 @@ public void testRestartTaskFailureOnStart() throws Exception { ClusterConfigState configState = new ClusterConfigState( -1, null, - Map.of(CONNECTOR_NAME, 1), - Map.of(CONNECTOR_NAME, connectorConfig), - Map.of(CONNECTOR_NAME, TargetState.STARTED), - Map.of(new ConnectorTaskId(CONNECTOR_NAME, 0), taskConfig(SourceSink.SOURCE)), - Map.of(), - Map.of(), - Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Collections.singletonMap(CONNECTOR_NAME, 1), + Collections.singletonMap(CONNECTOR_NAME, connectorConfig), + Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), + Collections.singletonMap(new ConnectorTaskId(CONNECTOR_NAME, 0), taskConfig(SourceSink.SOURCE)), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); @@ -530,7 +523,6 @@ public void testRestartConnectorAndTasksOnlyConnector() throws Exception { expectConfigValidation(SourceSink.SINK, connectorConfig); doNothing().when(worker).stopAndAwaitConnector(CONNECTOR_NAME); - when(worker.connectorVersion(CONNECTOR_NAME)).thenReturn(null); mockStartConnector(connectorConfig, null, TargetState.STARTED, null); @@ -556,28 +548,27 @@ public void testRestartConnectorAndTasksOnlyTasks() throws Exception { when(restartPlan.shouldRestartTasks()).thenReturn(true); when(restartPlan.restartTaskCount()).thenReturn(1); when(restartPlan.totalTaskCount()).thenReturn(1); - when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); when(restartPlan.restartConnectorStateInfo()).thenReturn(connectorStateInfo); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); expectAdd(SourceSink.SINK); - when(worker.taskVersion(any())).thenReturn(null); Map connectorConfig = connectorConfig(SourceSink.SINK); expectConfigValidation(SourceSink.SINK, connectorConfig); - doNothing().when(worker).stopAndAwaitTasks(List.of(taskId)); + doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(taskId)); ClusterConfigState configState = new ClusterConfigState( -1, null, - Map.of(CONNECTOR_NAME, 1), - Map.of(CONNECTOR_NAME, connectorConfig), - Map.of(CONNECTOR_NAME, TargetState.STARTED), - Map.of(taskId, taskConfig(SourceSink.SINK)), - Map.of(), - Map.of(), - Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Collections.singletonMap(CONNECTOR_NAME, 1), + Collections.singletonMap(CONNECTOR_NAME, connectorConfig), + Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), + Collections.singletonMap(taskId, taskConfig(SourceSink.SINK)), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); @@ -608,34 +599,32 @@ public void testRestartConnectorAndTasksBoth() throws Exception { when(restartPlan.shouldRestartTasks()).thenReturn(true); when(restartPlan.restartTaskCount()).thenReturn(1); when(restartPlan.totalTaskCount()).thenReturn(1); - when(restartPlan.taskIdsToRestart()).thenReturn(List.of(taskId)); + when(restartPlan.taskIdsToRestart()).thenReturn(Collections.singletonList(taskId)); when(restartPlan.restartConnectorStateInfo()).thenReturn(connectorStateInfo); doReturn(Optional.of(restartPlan)).when(herder).buildRestartPlan(restartRequest); ArgumentCaptor taskStatus = ArgumentCaptor.forClass(TaskStatus.class); expectAdd(SourceSink.SINK, false); - when(worker.connectorVersion(any())).thenReturn(null); - when(worker.taskVersion(any())).thenReturn(null); Map connectorConfig = connectorConfig(SourceSink.SINK); expectConfigValidation(SourceSink.SINK, connectorConfig); doNothing().when(worker).stopAndAwaitConnector(CONNECTOR_NAME); - doNothing().when(worker).stopAndAwaitTasks(List.of(taskId)); + doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(taskId)); mockStartConnector(connectorConfig, null, TargetState.STARTED, null); ClusterConfigState configState = new ClusterConfigState( -1, null, - Map.of(CONNECTOR_NAME, 1), - Map.of(CONNECTOR_NAME, connectorConfig), - Map.of(CONNECTOR_NAME, TargetState.STARTED), - Map.of(taskId, taskConfig(SourceSink.SINK)), - Map.of(), - Map.of(), - Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Collections.singletonMap(CONNECTOR_NAME, 1), + Collections.singletonMap(CONNECTOR_NAME, connectorConfig), + Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), + Collections.singletonMap(taskId, taskConfig(SourceSink.SINK)), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); @@ -690,7 +679,7 @@ public void testAccessors() throws Exception { Callback> taskConfigsCb = mock(Callback.class); // Check accessors with empty worker - doNothing().when(listConnectorsCb).onCompletion(null, Set.of()); + doNothing().when(listConnectorsCb).onCompletion(null, Collections.EMPTY_SET); doNothing().when(connectorInfoCb).onCompletion(any(NotFoundException.class), isNull()); doNothing().when(taskConfigsCb).onCompletion(any(NotFoundException.class), isNull()); doNothing().when(connectorConfigCb).onCompletion(any(NotFoundException.class), isNull()); @@ -699,13 +688,13 @@ public void testAccessors() throws Exception { expectConfigValidation(SourceSink.SOURCE, connConfig); // Validate accessors with 1 connector - doNothing().when(listConnectorsCb).onCompletion(null, Set.of(CONNECTOR_NAME)); - ConnectorInfo connInfo = new ConnectorInfo(CONNECTOR_NAME, connConfig, List.of(new ConnectorTaskId(CONNECTOR_NAME, 0)), + doNothing().when(listConnectorsCb).onCompletion(null, singleton(CONNECTOR_NAME)); + ConnectorInfo connInfo = new ConnectorInfo(CONNECTOR_NAME, connConfig, singletonList(new ConnectorTaskId(CONNECTOR_NAME, 0)), ConnectorType.SOURCE); doNothing().when(connectorInfoCb).onCompletion(null, connInfo); TaskInfo taskInfo = new TaskInfo(new ConnectorTaskId(CONNECTOR_NAME, 0), taskConfig(SourceSink.SOURCE)); - doNothing().when(taskConfigsCb).onCompletion(null, List.of(taskInfo)); + doNothing().when(taskConfigsCb).onCompletion(null, singletonList(taskInfo)); // All operations are synchronous for StandaloneHerder, so we don't need to actually wait after making each call herder.connectors(listConnectorsCb); @@ -754,8 +743,8 @@ public void testPutConnectorConfig() throws Exception { // Generate same task config, but from different connector config, resulting // in task restarts when(worker.connectorTaskConfigs(CONNECTOR_NAME, new SourceConnectorConfig(plugins, newConnConfig, true))) - .thenReturn(List.of(taskConfig(SourceSink.SOURCE))); - doNothing().when(worker).stopAndAwaitTasks(List.of(taskId)); + .thenReturn(singletonList(taskConfig(SourceSink.SOURCE))); + doNothing().when(worker).stopAndAwaitTasks(Collections.singletonList(taskId)); doNothing().when(statusBackingStore).put(new TaskStatus(taskId, TaskStatus.State.DESTROYED, WORKER_ID, 0)); when(worker.startSourceTask(eq(taskId), any(), eq(newConnConfig), eq(taskConfig(SourceSink.SOURCE)), eq(herder), eq(TargetState.STARTED))).thenReturn(true); @@ -769,7 +758,7 @@ public void testPutConnectorConfig() throws Exception { doNothing().when(connectorConfigCb).onCompletion(null, newConnConfig); herder.putConnectorConfig(CONNECTOR_NAME, newConnConfig, true, reconfigureCallback); Herder.Created newConnectorInfo = reconfigureCallback.get(1000L, TimeUnit.SECONDS); - ConnectorInfo newConnInfo = new ConnectorInfo(CONNECTOR_NAME, newConnConfig, List.of(new ConnectorTaskId(CONNECTOR_NAME, 0)), + ConnectorInfo newConnInfo = new ConnectorInfo(CONNECTOR_NAME, newConnConfig, singletonList(new ConnectorTaskId(CONNECTOR_NAME, 0)), ConnectorType.SOURCE); assertEquals(newConnInfo, newConnectorInfo.result()); @@ -789,7 +778,7 @@ public void testPatchConnectorConfigNotFound() { ArgumentCaptor exceptionCaptor = ArgumentCaptor.forClass(NotFoundException.class); verify(patchCallback).onCompletion(exceptionCaptor.capture(), isNull()); - assertEquals("Connector " + CONNECTOR_NAME + " not found", exceptionCaptor.getValue().getMessage()); + assertEquals(exceptionCaptor.getValue().getMessage(), "Connector " + CONNECTOR_NAME + " not found"); } @Test @@ -848,7 +837,7 @@ private void expectConnectorStartingWithoutTasks(Map config, boo eq(herder), eq(TargetState.STARTED), onStart.capture()); ConnectorConfig connConfig = new SourceConnectorConfig(plugins, config, true); when(worker.connectorTaskConfigs(CONNECTOR_NAME, connConfig)) - .thenReturn(List.of()); + .thenReturn(emptyList()); } @Test @@ -857,11 +846,10 @@ public void testPutTaskConfigs() { Callback cb = mock(Callback.class); assertThrows(UnsupportedOperationException.class, () -> herder.putTaskConfigs(CONNECTOR_NAME, - List.of(Map.of("config", "value")), cb, null)); + singletonList(singletonMap("config", "value")), cb, null)); } @Test - @SuppressWarnings("rawtypes") public void testCorruptConfig() { initialize(false); Map config = new HashMap<>(); @@ -870,11 +858,11 @@ public void testCorruptConfig() { config.put(SinkConnectorConfig.TOPICS_CONFIG, TOPICS_LIST_STR); Connector connectorMock = mock(SinkConnector.class); String error = "This is an error in your config!"; - List errors = new ArrayList<>(List.of(error)); + List errors = new ArrayList<>(singletonList(error)); String key = "foo.invalid.key"; when(connectorMock.validate(config)).thenReturn( new Config( - List.of(new ConfigValue(key, null, List.of(), errors)) + singletonList(new ConfigValue(key, null, Collections.emptyList(), errors)) ) ); ConfigDef configDef = new ConfigDef(); @@ -882,12 +870,10 @@ public void testCorruptConfig() { when(worker.configTransformer()).thenReturn(transformer); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); - when(worker.config()).thenReturn(workerConfig); when(worker.getPlugins()).thenReturn(plugins); - when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); - when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); + when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); - when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); + when(plugins.newConnector(anyString())).thenReturn(connectorMock); when(connectorMock.config()).thenReturn(configDef); herder.putConnectorConfig(CONNECTOR_NAME, config, true, createCallback); @@ -900,10 +886,10 @@ public void testCorruptConfig() { Throwable cause = e.getCause(); assertInstanceOf(BadRequestException.class, cause); assertEquals( - "Connector configuration is invalid and contains the following 1 error(s):\n" + - error + "\n" + - "You can also find the above list of errors at the endpoint `/connector-plugins/{connectorType}/config/validate`", - cause.getMessage() + cause.getMessage(), + "Connector configuration is invalid and contains the following 1 error(s):\n" + + error + "\n" + + "You can also find the above list of errors at the endpoint `/connector-plugins/{connectorType}/config/validate`" ); verify(loaderSwap).close(); } @@ -934,7 +920,7 @@ public void testTargetStates() throws Exception { verify(statusBackingStore).put(new TaskStatus(new ConnectorTaskId(CONNECTOR_NAME, 0), AbstractStatus.State.DESTROYED, WORKER_ID, 0)); stopCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); herder.taskConfigs(CONNECTOR_NAME, taskConfigsCallback); - assertEquals(List.of(), taskConfigsCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS)); + assertEquals(Collections.emptyList(), taskConfigsCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS)); // herder.stop() should stop any running connectors and tasks even if destroyConnector was not invoked herder.stop(); @@ -949,7 +935,7 @@ public void testModifyConnectorOffsetsUnknownConnector() { initialize(false); FutureCallback alterOffsetsCallback = new FutureCallback<>(); herder.alterConnectorOffsets("unknown-connector", - Map.of(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), + Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), alterOffsetsCallback); ExecutionException e = assertThrows(ExecutionException.class, () -> alterOffsetsCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS)); assertInstanceOf(NotFoundException.class, e.getCause()); @@ -968,20 +954,20 @@ public void testModifyConnectorOffsetsConnectorNotInStoppedState() { herder.configState = new ClusterConfigState( 10, null, - Map.of(CONNECTOR_NAME, 3), - Map.of(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), - Map.of(CONNECTOR_NAME, TargetState.PAUSED), - Map.of(), - Map.of(), - Map.of(), - Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), - Set.of(), - Set.of() + Collections.singletonMap(CONNECTOR_NAME, 3), + Collections.singletonMap(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), + Collections.singletonMap(CONNECTOR_NAME, TargetState.PAUSED), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Collections.emptySet(), + Collections.emptySet() ); FutureCallback alterOffsetsCallback = new FutureCallback<>(); herder.alterConnectorOffsets(CONNECTOR_NAME, - Map.of(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), + Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), alterOffsetsCallback); ExecutionException e = assertThrows(ExecutionException.class, () -> alterOffsetsCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS)); assertInstanceOf(BadRequestException.class, e.getCause()); @@ -1007,19 +993,19 @@ public void testAlterConnectorOffsets() throws Exception { herder.configState = new ClusterConfigState( 10, null, - Map.of(CONNECTOR_NAME, 0), - Map.of(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), - Map.of(CONNECTOR_NAME, TargetState.STOPPED), - Map.of(), - Map.of(), - Map.of(), - Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), - Set.of(), - Set.of() + Collections.singletonMap(CONNECTOR_NAME, 0), + Collections.singletonMap(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), + Collections.singletonMap(CONNECTOR_NAME, TargetState.STOPPED), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Collections.emptySet(), + Collections.emptySet() ); FutureCallback alterOffsetsCallback = new FutureCallback<>(); herder.alterConnectorOffsets(CONNECTOR_NAME, - Map.of(Map.of("partitionKey", "partitionValue"), Map.of("offsetKey", "offsetValue")), + Collections.singletonMap(Collections.singletonMap("partitionKey", "partitionValue"), Collections.singletonMap("offsetKey", "offsetValue")), alterOffsetsCallback); assertEquals(msg, alterOffsetsCallback.get(1000, TimeUnit.MILLISECONDS)); } @@ -1040,15 +1026,15 @@ public void testResetConnectorOffsets() throws Exception { herder.configState = new ClusterConfigState( 10, null, - Map.of(CONNECTOR_NAME, 0), - Map.of(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), - Map.of(CONNECTOR_NAME, TargetState.STOPPED), - Map.of(), - Map.of(), - Map.of(), - Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), - Set.of(), - Set.of() + Collections.singletonMap(CONNECTOR_NAME, 0), + Collections.singletonMap(CONNECTOR_NAME, connectorConfig(SourceSink.SOURCE)), + Collections.singletonMap(CONNECTOR_NAME, TargetState.STOPPED), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Collections.emptySet(), + Collections.emptySet() ); FutureCallback resetOffsetsCallback = new FutureCallback<>(); herder.resetConnectorOffsets(CONNECTOR_NAME, resetOffsetsCallback); @@ -1074,7 +1060,7 @@ public void testRequestTaskReconfigurationDoesNotDeadlock() throws Exception { assertEquals(createdInfo(SourceSink.SOURCE), connectorInfo.result()); // Prepare for task config update - when(worker.connectorNames()).thenReturn(Set.of(CONNECTOR_NAME)); + when(worker.connectorNames()).thenReturn(Collections.singleton(CONNECTOR_NAME)); expectStop(); @@ -1086,8 +1072,8 @@ public void testRequestTaskReconfigurationDoesNotDeadlock() throws Exception { updatedTaskConfig2.put("dummy-task-property", "2"); when(worker.connectorTaskConfigs(eq(CONNECTOR_NAME), any())) .thenReturn( - List.of(updatedTaskConfig1), - List.of(updatedTaskConfig2)); + Collections.singletonList(updatedTaskConfig1), + Collections.singletonList(updatedTaskConfig2)); // Set new config on the connector and tasks FutureCallback> reconfigureCallback = new FutureCallback<>(); @@ -1098,7 +1084,7 @@ public void testRequestTaskReconfigurationDoesNotDeadlock() throws Exception { // Wait on connector update Herder.Created updatedConnectorInfo = reconfigureCallback.get(WAIT_TIME_MS, TimeUnit.MILLISECONDS); - ConnectorInfo expectedConnectorInfo = new ConnectorInfo(CONNECTOR_NAME, newConfig, List.of(new ConnectorTaskId(CONNECTOR_NAME, 0)), ConnectorType.SOURCE); + ConnectorInfo expectedConnectorInfo = new ConnectorInfo(CONNECTOR_NAME, newConfig, singletonList(new ConnectorTaskId(CONNECTOR_NAME, 0)), ConnectorType.SOURCE); assertEquals(expectedConnectorInfo, updatedConnectorInfo.result()); verify(statusBackingStore, times(2)).put(new TaskStatus(new ConnectorTaskId(CONNECTOR_NAME, 0), TaskStatus.State.DESTROYED, WORKER_ID, 0)); @@ -1125,7 +1111,6 @@ private void expectAdd(SourceSink sourceSink, } when(worker.isRunning(CONNECTOR_NAME)).thenReturn(true); - if (sourceSink == SourceSink.SOURCE) { when(worker.isTopicCreationEnabled()).thenReturn(true); } @@ -1136,25 +1121,24 @@ private void expectAdd(SourceSink sourceSink, Map generatedTaskProps = taskConfig(sourceSink); if (mockConnectorTaskConfigs) { - when(worker.connectorTaskConfigs(CONNECTOR_NAME, connConfig)).thenReturn(List.of(generatedTaskProps)); + when(worker.connectorTaskConfigs(CONNECTOR_NAME, connConfig)).thenReturn(singletonList(generatedTaskProps)); } ClusterConfigState configState = new ClusterConfigState( -1, null, - Map.of(CONNECTOR_NAME, 1), - Map.of(CONNECTOR_NAME, connectorConfig), - Map.of(CONNECTOR_NAME, TargetState.STARTED), - Map.of(new ConnectorTaskId(CONNECTOR_NAME, 0), generatedTaskProps), - Map.of(), - Map.of(), - Map.of(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), + Collections.singletonMap(CONNECTOR_NAME, 1), + Collections.singletonMap(CONNECTOR_NAME, connectorConfig), + Collections.singletonMap(CONNECTOR_NAME, TargetState.STARTED), + Collections.singletonMap(new ConnectorTaskId(CONNECTOR_NAME, 0), generatedTaskProps), + Collections.emptyMap(), + Collections.emptyMap(), + Collections.singletonMap(CONNECTOR_NAME, new AppliedConnectorConfig(connectorConfig)), new HashSet<>(), new HashSet<>(), transformer); if (sourceSink.equals(SourceSink.SOURCE) && mockStartSourceTask) { - when(worker.taskVersion(any())).thenReturn(null); when(worker.startSourceTask(new ConnectorTaskId(CONNECTOR_NAME, 0), configState, connectorConfig(sourceSink), generatedTaskProps, herder, TargetState.STARTED)).thenReturn(true); } @@ -1188,13 +1172,13 @@ private void expectTargetState(String connector, TargetState state) { private ConnectorInfo createdInfo(SourceSink sourceSink) { return new ConnectorInfo(CONNECTOR_NAME, connectorConfig(sourceSink), - List.of(new ConnectorTaskId(CONNECTOR_NAME, 0)), + singletonList(new ConnectorTaskId(CONNECTOR_NAME, 0)), SourceSink.SOURCE == sourceSink ? ConnectorType.SOURCE : ConnectorType.SINK); } private void expectStop() { ConnectorTaskId task = new ConnectorTaskId(CONNECTOR_NAME, 0); - doNothing().when(worker).stopAndAwaitTasks(List.of(task)); + doNothing().when(worker).stopAndAwaitTasks(singletonList(task)); doNothing().when(worker).stopAndAwaitConnector(CONNECTOR_NAME); } @@ -1228,7 +1212,6 @@ private static Map taskConfig(SourceSink sourceSink) { return generatedTaskProps; } - @SuppressWarnings("rawtypes") private void expectConfigValidation( SourceSink sourceSink, Map... configs @@ -1238,18 +1221,18 @@ private void expectConfigValidation( when(worker.configTransformer()).thenReturn(transformer); final ArgumentCaptor> configCapture = ArgumentCaptor.forClass(Map.class); when(transformer.transform(configCapture.capture())).thenAnswer(invocation -> configCapture.getValue()); - when(worker.config()).thenReturn(workerConfig); - when(workerConfig.getClass(WorkerConfig.HEADER_CONVERTER_CLASS_CONFIG)).thenReturn((Class) SimpleHeaderConverter.class); - when(plugins.pluginLoader(anyString(), any())).thenReturn(pluginLoader); + when(worker.getPlugins()).thenReturn(plugins); + when(plugins.connectorLoader(anyString())).thenReturn(pluginLoader); when(plugins.withClassLoader(pluginLoader)).thenReturn(loaderSwap); + // Assume the connector should always be created when(worker.getPlugins()).thenReturn(plugins); - when(plugins.newConnector(anyString(), any())).thenReturn(connectorMock); + when(plugins.newConnector(anyString())).thenReturn(connectorMock); when(connectorMock.config()).thenReturn(new ConfigDef()); // Set up validation for each config for (Map config : configs) { - when(connectorMock.validate(config)).thenReturn(new Config(List.of())); + when(connectorMock.validate(config)).thenReturn(new Config(Collections.emptyList())); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java index b12658c35e399..f78ab54950f4a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/ConnectorOffsetBackingStoreTest.java @@ -40,8 +40,8 @@ import org.mockito.quality.Strictness; import java.nio.ByteBuffer; +import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -440,7 +440,7 @@ private MockConsumer createMockConsumer(String topic) { MockConsumer consumer = new MockConsumer<>(AutoOffsetResetStrategy.LATEST.name()); Node noNode = Node.noNode(); Node[] nodes = new Node[]{noNode}; - consumer.updatePartitions(topic, List.of(new PartitionInfo(topic, 0, noNode, nodes, nodes))); + consumer.updatePartitions(topic, Collections.singletonList(new PartitionInfo(topic, 0, noNode, nodes, nodes))); consumer.updateBeginningOffsets(mkMap(mkEntry(new TopicPartition(topic, 0), 100L))); return consumer; } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java index 139369c0d560a..204fcc283bd94 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/FileOffsetBackingStoreTest.java @@ -19,7 +19,6 @@ import org.apache.kafka.connect.data.SchemaAndValue; import org.apache.kafka.connect.json.JsonConverter; import org.apache.kafka.connect.json.JsonConverterConfig; -import org.apache.kafka.connect.runtime.WorkerConfig; import org.apache.kafka.connect.runtime.standalone.StandaloneConfig; import org.apache.kafka.connect.util.Callback; @@ -35,6 +34,8 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.nio.file.Files; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -78,14 +79,13 @@ public void setup() { converter = mock(Converter.class); // This is only needed for storing deserialized connector partitions, which we don't test in most of the cases here when(converter.toConnectData(anyString(), any(byte[].class))).thenReturn(new SchemaAndValue(null, - List.of("connector", Map.of("partitionKey", "dummy")))); + Arrays.asList("connector", Collections.singletonMap("partitionKey", "dummy")))); store = new FileOffsetBackingStore(converter); tempFile = assertDoesNotThrow(() -> File.createTempFile("fileoffsetbackingstore", null)); Map props = new HashMap<>(); props.put(StandaloneConfig.OFFSET_STORAGE_FILE_FILENAME_CONFIG, tempFile.getAbsolutePath()); props.put(StandaloneConfig.KEY_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); props.put(StandaloneConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); - props.put(WorkerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092"); config = new StandaloneConfig(props); store.configure(config); store.start(); @@ -105,7 +105,7 @@ public void testGetSet() throws Exception { store.set(FIRST_SET, setCallback).get(); - Map values = store.get(List.of(buffer("key"), buffer("bad"))).get(); + Map values = store.get(Arrays.asList(buffer("key"), buffer("bad"))).get(); assertEquals(buffer("value"), values.get(buffer("key"))); assertNull(values.get(buffer("bad"))); verify(setCallback).onCompletion(isNull(), isNull()); @@ -123,7 +123,7 @@ public void testSaveRestore() throws Exception { FileOffsetBackingStore restore = new FileOffsetBackingStore(converter); restore.configure(config); restore.start(); - Map values = restore.get(List.of(buffer("key"))).get(); + Map values = restore.get(Collections.singletonList(buffer("key"))).get(); assertEquals(buffer("value"), values.get(buffer("key"))); verify(setCallback).onCompletion(isNull(), isNull()); } @@ -135,26 +135,26 @@ public void testConnectorPartitions() throws Exception { // This test actually requires the offset store to track deserialized source partitions, so we can't use the member variable mock converter JsonConverter jsonConverter = new JsonConverter(); - jsonConverter.configure(Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); + jsonConverter.configure(Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); Map serializedPartitionOffsets = new HashMap<>(); serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector1", Map.of("partitionKey", "partitionValue1")), - serialize(jsonConverter, Map.of("offsetKey", "offsetValue")) + serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")), + serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue")) ); store.set(serializedPartitionOffsets, setCallback).get(); serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector1", Map.of("partitionKey", "partitionValue1")), - serialize(jsonConverter, Map.of("offsetKey", "offsetValue2")) + serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")), + serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue2")) ); serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector1", Map.of("partitionKey", "partitionValue2")), - serialize(jsonConverter, Map.of("offsetKey", "offsetValue")) + serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue2")), + serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue")) ); serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector2", Map.of("partitionKey", "partitionValue")), - serialize(jsonConverter, Map.of("offsetKey", "offsetValue")) + serializeKey(jsonConverter, "connector2", Collections.singletonMap("partitionKey", "partitionValue")), + serialize(jsonConverter, Collections.singletonMap("offsetKey", "offsetValue")) ); store.set(serializedPartitionOffsets, setCallback).get(); @@ -167,23 +167,23 @@ public void testConnectorPartitions() throws Exception { Set> connectorPartitions1 = restore.connectorPartitions("connector1"); Set> expectedConnectorPartition1 = new HashSet<>(); - expectedConnectorPartition1.add(Map.of("partitionKey", "partitionValue1")); - expectedConnectorPartition1.add(Map.of("partitionKey", "partitionValue2")); + expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue1")); + expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue2")); assertEquals(expectedConnectorPartition1, connectorPartitions1); Set> connectorPartitions2 = restore.connectorPartitions("connector2"); - Set> expectedConnectorPartition2 = Set.of(Map.of("partitionKey", "partitionValue")); + Set> expectedConnectorPartition2 = Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue")); assertEquals(expectedConnectorPartition2, connectorPartitions2); serializedPartitionOffsets.clear(); // Null valued offset for a partition key should remove that partition for the connector serializedPartitionOffsets.put( - serializeKey(jsonConverter, "connector1", Map.of("partitionKey", "partitionValue1")), + serializeKey(jsonConverter, "connector1", Collections.singletonMap("partitionKey", "partitionValue1")), null ); restore.set(serializedPartitionOffsets, setCallback).get(); connectorPartitions1 = restore.connectorPartitions("connector1"); - assertEquals(Set.of(Map.of("partitionKey", "partitionValue2")), connectorPartitions1); + assertEquals(Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue2")), connectorPartitions1); verify(setCallback, times(3)).onCompletion(isNull(), isNull()); } @@ -193,7 +193,7 @@ private static ByteBuffer buffer(String v) { } private static ByteBuffer serializeKey(Converter converter, String connectorName, Map sourcePartition) { - List nameAndPartition = List.of(connectorName, sourcePartition); + List nameAndPartition = Arrays.asList(connectorName, sourcePartition); return serialize(converter, nameAndPartition); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java index 98eaab7df4f10..4173d9a357c45 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaConfigBackingStoreTest.java @@ -61,13 +61,14 @@ import java.time.Duration; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.Future; @@ -124,41 +125,41 @@ public class KafkaConfigBackingStoreTest { DEFAULT_CONFIG_STORAGE_PROPS.put(DistributedConfig.VALUE_CONVERTER_CLASS_CONFIG, "org.apache.kafka.connect.json.JsonConverter"); } - private static final List CONNECTOR_IDS = List.of("connector1", "connector2"); - private static final List CONNECTOR_CONFIG_KEYS = List.of("connector-connector1", "connector-connector2"); - private static final List COMMIT_TASKS_CONFIG_KEYS = List.of("commit-connector1", "commit-connector2"); + private static final List CONNECTOR_IDS = Arrays.asList("connector1", "connector2"); + private static final List CONNECTOR_CONFIG_KEYS = Arrays.asList("connector-connector1", "connector-connector2"); + private static final List COMMIT_TASKS_CONFIG_KEYS = Arrays.asList("commit-connector1", "commit-connector2"); - private static final List TARGET_STATE_KEYS = List.of("target-state-connector1", "target-state-connector2"); - private static final List CONNECTOR_TASK_COUNT_RECORD_KEYS = List.of("tasks-fencing-connector1", "tasks-fencing-connector2"); + private static final List TARGET_STATE_KEYS = Arrays.asList("target-state-connector1", "target-state-connector2"); + private static final List CONNECTOR_TASK_COUNT_RECORD_KEYS = Arrays.asList("tasks-fencing-connector1", "tasks-fencing-connector2"); private static final String CONNECTOR_1_NAME = "connector1"; private static final String CONNECTOR_2_NAME = "connector2"; - private static final List RESTART_CONNECTOR_KEYS = List.of(RESTART_KEY(CONNECTOR_1_NAME), RESTART_KEY(CONNECTOR_2_NAME)); + private static final List RESTART_CONNECTOR_KEYS = Arrays.asList(RESTART_KEY(CONNECTOR_1_NAME), RESTART_KEY(CONNECTOR_2_NAME)); // Need a) connector with multiple tasks and b) multiple connectors - private static final List TASK_IDS = List.of( + private static final List TASK_IDS = Arrays.asList( new ConnectorTaskId("connector1", 0), new ConnectorTaskId("connector1", 1), new ConnectorTaskId("connector2", 0) ); - private static final List TASK_CONFIG_KEYS = List.of("task-connector1-0", "task-connector1-1", "task-connector2-0"); + private static final List TASK_CONFIG_KEYS = Arrays.asList("task-connector1-0", "task-connector1-1", "task-connector2-0"); // Need some placeholders -- the contents don't matter here, just that they are restored properly - private static final List> SAMPLE_CONFIGS = List.of( - Map.of("config-key-one", "config-value-one"), - Map.of("config-key-two", "config-value-two"), - Map.of("config-key-three", "config-value-three") + private static final List> SAMPLE_CONFIGS = Arrays.asList( + Collections.singletonMap("config-key-one", "config-value-one"), + Collections.singletonMap("config-key-two", "config-value-two"), + Collections.singletonMap("config-key-three", "config-value-three") ); - private static final List TASK_CONFIG_STRUCTS = List.of( + private static final List TASK_CONFIG_STRUCTS = Arrays.asList( new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), new Struct(KafkaConfigBackingStore.TASK_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)) ); private static final Struct ONLY_FAILED_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(INCLUDE_TASKS_FIELD_NAME, false); private static final Struct INCLUDE_TASKS_MISSING_STRUCT = new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true); - private static final List RESTART_REQUEST_STRUCTS = List.of( + private static final List RESTART_REQUEST_STRUCTS = Arrays.asList( new Struct(KafkaConfigBackingStore.RESTART_REQUEST_V0).put(ONLY_FAILED_FIELD_NAME, true).put(INCLUDE_TASKS_FIELD_NAME, false), ONLY_FAILED_MISSING_STRUCT, INCLUDE_TASKS_MISSING_STRUCT); - private static final List CONNECTOR_CONFIG_STRUCTS = List.of( + private static final List CONNECTOR_CONFIG_STRUCTS = Arrays.asList( new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(0)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(1)), new Struct(KafkaConfigBackingStore.CONNECTOR_CONFIGURATION_V0).put("properties", SAMPLE_CONFIGS.get(2)) @@ -171,14 +172,13 @@ public class KafkaConfigBackingStoreTest { private static final Struct TARGET_STATE_STOPPED = new Struct(KafkaConfigBackingStore.TARGET_STATE_V1) .put("state", "PAUSED") .put("state.v2", "STOPPED"); - private static final List CONNECTOR_TASK_COUNT_RECORD_STRUCTS = List.of( + private static final List CONNECTOR_TASK_COUNT_RECORD_STRUCTS = Arrays.asList( new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 6), - new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 9), - new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 2) + new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 9) ); // The exact format doesn't matter here since both conversions are mocked - private static final List CONFIGS_SERIALIZED = List.of( + private static final List CONFIGS_SERIALIZED = Arrays.asList( "config-bytes-1".getBytes(), "config-bytes-2".getBytes(), "config-bytes-3".getBytes(), "config-bytes-4".getBytes(), "config-bytes-5".getBytes(), "config-bytes-6".getBytes(), "config-bytes-7".getBytes(), "config-bytes-8".getBytes(), "config-bytes-9".getBytes() @@ -189,7 +189,7 @@ public class KafkaConfigBackingStoreTest { private static final Struct TASKS_COMMIT_STRUCT_ZERO_TASK_CONNECTOR = new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 0); - private static final List TARGET_STATES_SERIALIZED = List.of( + private static final List TARGET_STATES_SERIALIZED = Arrays.asList( "started".getBytes(), "paused".getBytes(), "stopped".getBytes() ); @Mock @@ -306,8 +306,8 @@ public void testPutConnectorConfig() throws Exception { String configKey = CONNECTOR_CONFIG_KEYS.get(1); String targetStateKey = TARGET_STATE_KEYS.get(1); - doAnswer(expectReadToEnd(Map.of(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .doAnswer(expectReadToEnd(Map.of(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)))) + doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1)))) // Config deletion .doAnswer(expectReadToEnd(new LinkedHashMap<>() {{ put(configKey, null); @@ -424,7 +424,7 @@ public void testPutConnectorConfigProducerError() throws Exception { assertEquals(-1, configState.offset()); assertEquals(0, configState.connectors().size()); - Exception thrownException = new ExecutionException(new TopicAuthorizationException(Set.of("test"))); + Exception thrownException = new ExecutionException(new TopicAuthorizationException(Collections.singleton("test"))); when(producerFuture.get(anyLong(), any(TimeUnit.class))).thenThrow(thrownException); // verify that the producer exception from KafkaBasedLog::send is propagated @@ -508,8 +508,8 @@ public void testWritePrivileges() throws Exception { doReturn(fencableProducer).when(configStorage).createFencableProducer(); // And write the task count record successfully when(fencableProducer.send(any(ProducerRecord.class))).thenReturn(null); - doAnswer(expectReadToEnd(Map.of(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) - .doAnswer(expectReadToEnd(Map.of(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)))) + doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + .doAnswer(expectReadToEnd(Collections.singletonMap(CONNECTOR_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(2)))) .when(configLog).readToEnd(); when(converter.toConnectData(TOPIC, CONFIGS_SERIALIZED.get(0))) .thenReturn(new SchemaAndValue(null, structToMap(CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(0)))); @@ -568,7 +568,7 @@ public void testWritePrivileges() throws Exception { @Test public void testRestoreTargetStateUnexpectedDeletion() { - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -597,7 +597,7 @@ public void testRestoreTargetStateUnexpectedDeletion() { // The target state deletion should reset the state to STARTED ClusterConfigState configState = configStorage.snapshot(); assertEquals(5, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); configStorage.stop(); @@ -606,7 +606,7 @@ public void testRestoreTargetStateUnexpectedDeletion() { @Test public void testRestoreTargetState() { - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -641,7 +641,7 @@ public void testRestoreTargetState() { // Should see a single connector with initial state paused ClusterConfigState configState = configStorage.snapshot(); assertEquals(6, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); assertEquals(TargetState.PAUSED, configState.targetState(CONNECTOR_IDS.get(0))); assertEquals(TargetState.STOPPED, configState.targetState(CONNECTOR_IDS.get(1))); @@ -655,7 +655,7 @@ public void testRestore() { // that inconsistent state is ignored. // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), @@ -697,18 +697,18 @@ public void testRestore() { // Should see a single connector and its config should be the last one seen anywhere in the log ClusterConfigState configState = configStorage.snapshot(); assertEquals(logOffset, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); // Should see 2 tasks for that connector. Only config updates before the root key update should be reflected - assertEquals(List.of(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(CONNECTOR_IDS.get(0))); // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(1))); assertEquals(9, (int) configState.taskCountRecord(CONNECTOR_IDS.get(1))); - assertEquals(Set.of(), configState.inconsistentConnectors()); - assertEquals(Set.of("connector1"), configState.connectorsPendingFencing); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); + assertEquals(Collections.singleton("connector1"), configState.connectorsPendingFencing); // Shouldn't see any callbacks since this is during startup configStorage.stop(); @@ -721,7 +721,7 @@ public void testRestoreConnectorDeletion() { // that inconsistent state is ignored. // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -767,7 +767,7 @@ public void testRestoreZeroTasks() { // that inconsistent state is ignored. // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -805,69 +805,19 @@ public void testRestoreZeroTasks() { // Should see a single connector and its config should be the last one seen anywhere in the log ClusterConfigState configState = configStorage.snapshot(); assertEquals(8, configState.offset()); // Should always be next to be read, even if uncommitted - assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); // CONNECTOR_CONFIG_STRUCTS[2] -> SAMPLE_CONFIGS[2] assertEquals(SAMPLE_CONFIGS.get(2), configState.connectorConfig(CONNECTOR_IDS.get(0))); // Should see 0 tasks for that connector. - assertEquals(List.of(), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] - assertEquals(Set.of(), configState.inconsistentConnectors()); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); // Shouldn't see any callbacks since this is during startup configStorage.stop(); verify(configLog).stop(); } - @Test - public void testRestoreCompactedDeletedConnector() { - // When a connector is deleted, we emit a tombstone record for its config (with key - // "connector-") and its target state (with key "target-state-"), but not - // for its task configs - // As a result, we need to carefully handle the case where task configs are present in - // the config topic for a connector, but there is no accompanying config for the - // connector itself - - int offset = 0; - List> existingRecords = List.of( - new ConsumerRecord<>(TOPIC, 0, offset++, 0L, TimestampType.CREATE_TIME, 0, 0, - TASK_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, offset++, 0L, TimestampType.CREATE_TIME, 0, 0, - TASK_CONFIG_KEYS.get(1), CONFIGS_SERIALIZED.get(1), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, offset++, 0L, TimestampType.CREATE_TIME, 0, 0, - COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(2), new RecordHeaders(), Optional.empty()), - new ConsumerRecord<>(TOPIC, 0, offset++, 0L, TimestampType.CREATE_TIME, 0, 0, - CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), CONFIGS_SERIALIZED.get(3), new RecordHeaders(), Optional.empty())); - LinkedHashMap deserialized = new LinkedHashMap<>(); - deserialized.put(CONFIGS_SERIALIZED.get(0), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(1), TASK_CONFIG_STRUCTS.get(0)); - deserialized.put(CONFIGS_SERIALIZED.get(2), TASKS_COMMIT_STRUCT_TWO_TASK_CONNECTOR); - deserialized.put(CONFIGS_SERIALIZED.get(3), CONNECTOR_TASK_COUNT_RECORD_STRUCTS.get(2)); - logOffset = offset; - expectStart(existingRecords, deserialized); - when(configLog.partitionCount()).thenReturn(1); - - configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); - verifyConfigure(); - configStorage.start(); - - // Should see no connectors and no task configs - ClusterConfigState configState = configStorage.snapshot(); - assertEquals(Set.of(), configState.connectors()); - assertEquals(0, configState.taskCount(CONNECTOR_1_NAME)); - assertNull(configState.rawTaskConfig(TASK_IDS.get(0))); - assertNull(configState.rawTaskConfig(TASK_IDS.get(1))); - - // Probe internal collections just to be sure - assertEquals(Map.of(), configState.connectorConfigs); - assertEquals(Map.of(), configState.taskConfigs); - assertEquals(Map.of(), configState.connectorTaskCounts); - - // Exception: we still include task count records, for the unlikely-but-possible case - // where there are still zombie instances of the tasks for this long-deleted connector - // running somewhere on the cluster - assertEquals(2, (int) configState.taskCountRecord(CONNECTOR_1_NAME)); - } - @Test public void testRecordToRestartRequest() { ConsumerRecord record = new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), @@ -1018,7 +968,7 @@ public void testConsumerPropertiesNotInsertedByDefaultWithoutExactlyOnceSourceEn @Test public void testBackgroundConnectorDeletion() throws Exception { // verify that we handle connector deletions correctly when they come up through the log - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -1070,7 +1020,7 @@ public void testBackgroundConnectorDeletion() throws Exception { assertEquals(0, configState.taskCount(CONNECTOR_IDS.get(0))); // Ensure that the deleted connector's deferred task updates have been cleaned up // in order to prevent unbounded growth of the map - assertEquals(Map.of(), configStorage.deferredTaskUpdates); + assertEquals(Collections.emptyMap(), configStorage.deferredTaskUpdates); configStorage.stop(); verify(configLog).stop(); @@ -1081,7 +1031,7 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() { // Test a case where a failure and compaction has left us in an inconsistent state when reading the log. // We start out by loading an initial configuration where we started to write a task update, and then // compaction cleaned up the earlier record. - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), // This is the record that has been compacted: @@ -1108,13 +1058,13 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() { // After reading the log, it should have been in an inconsistent state ClusterConfigState configState = configStorage.snapshot(); assertEquals(6, configState.offset()); // Should always be next to be read, not last committed - assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); // Inconsistent data should leave us with no tasks listed for the connector and an entry in the inconsistent list - assertEquals(List.of(), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); // Both TASK_CONFIG_STRUCTS[0] -> SAMPLE_CONFIGS[0] assertNull(configState.taskConfig(TASK_IDS.get(0))); assertNull(configState.taskConfig(TASK_IDS.get(1))); - assertEquals(Set.of(CONNECTOR_IDS.get(0)), configState.inconsistentConnectors()); + assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configState.inconsistentConnectors()); // Records to be read by consumer as it reads to the end of the log LinkedHashMap serializedConfigs = new LinkedHashMap<>(); @@ -1134,20 +1084,20 @@ public void testPutTaskConfigsDoesNotResolveAllInconsistencies() { // Next, issue a write that has everything that is needed and it should be accepted. Note that in this case // we are going to shrink the number of tasks to 1 - configStorage.putTaskConfigs("connector1", List.of(SAMPLE_CONFIGS.get(0))); + configStorage.putTaskConfigs("connector1", Collections.singletonList(SAMPLE_CONFIGS.get(0))); // Validate updated config configState = configStorage.snapshot(); // This is only two more ahead of the last one because multiple calls fail, and so their configs are not written // to the topic. Only the last call with 1 task config + 1 commit actually gets written. assertEquals(8, configState.offset()); - assertEquals(List.of(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); - assertEquals(List.of(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(Collections.singletonList(CONNECTOR_IDS.get(0)), new ArrayList<>(configState.connectors())); + assertEquals(Collections.singletonList(TASK_IDS.get(0)), configState.tasks(CONNECTOR_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); - assertEquals(Set.of(), configState.inconsistentConnectors()); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(0))); + verify(configUpdateListener).onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(0))); configStorage.stop(); verify(configLog).stop(); @@ -1166,7 +1116,7 @@ public void testPutRestartRequestOnlyFailedIncludingTasks() { } private void testPutRestartRequest(RestartRequest restartRequest) { - expectStart(List.of(), Map.of()); + expectStart(Collections.emptyList(), Collections.emptyMap()); when(configLog.partitionCount()).thenReturn(1); configStorage.setupAndCreateKafkaBasedLog(TOPIC, config); @@ -1202,7 +1152,7 @@ public void testRestoreRestartRequestInconsistentState() { // Restoring data should notify only of the latest values after loading is complete. This also validates // that inconsistent state doesn't prevent startup. // Overwrite each type at least once to ensure we see the latest data after loading - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, RESTART_CONNECTOR_KEYS.get(1), @@ -1248,7 +1198,7 @@ public void testPutTaskConfigsZeroTasks() { // Records to be read by consumer as it reads to the end of the log doAnswer(expectReadToEnd(new LinkedHashMap<>())). - doAnswer(expectReadToEnd(Map.of(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) + doAnswer(expectReadToEnd(Collections.singletonMap(COMMIT_TASKS_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0)))) .when(configLog).readToEnd(); expectConvertWriteRead( @@ -1256,7 +1206,7 @@ public void testPutTaskConfigsZeroTasks() { "tasks", 0); // We have 0 tasks // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), List.of()); + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); // Null before writing @@ -1265,19 +1215,19 @@ public void testPutTaskConfigsZeroTasks() { // Writing task configs should block until all the writes have been performed and the root record update // has completed - List> taskConfigs = List.of(); + List> taskConfigs = Collections.emptyList(); configStorage.putTaskConfigs("connector1", taskConfigs); // Validate root config by listing all connectors and tasks configState = configStorage.snapshot(); assertEquals(1, configState.offset()); String connectorName = CONNECTOR_IDS.get(0); - assertEquals(List.of(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(List.of(), configState.tasks(connectorName)); - assertEquals(Set.of(), configState.inconsistentConnectors()); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Collections.emptyList(), configState.tasks(connectorName)); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(List.of()); + verify(configUpdateListener).onTaskConfigUpdate(Collections.emptyList()); configStorage.stop(); verify(configLog).stop(); @@ -1286,7 +1236,7 @@ public void testPutTaskConfigsZeroTasks() { @Test public void testBackgroundUpdateTargetState() throws Exception { // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -1312,7 +1262,7 @@ public void testBackgroundUpdateTargetState() throws Exception { // Should see a single connector with initial state started ClusterConfigState configState = configStorage.snapshot(); - assertEquals(Set.of(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); + assertEquals(Collections.singleton(CONNECTOR_IDS.get(0)), configStorage.connectorTargetStates.keySet()); assertEquals(TargetState.STARTED, configState.targetState(CONNECTOR_IDS.get(0))); LinkedHashMap serializedAfterStartup = new LinkedHashMap<>(); @@ -1341,7 +1291,7 @@ public void testBackgroundUpdateTargetState() throws Exception { @Test public void testSameTargetState() { // verify that we handle target state changes correctly when they come up through the log - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, CONNECTOR_CONFIG_KEYS.get(0), CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty()), new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, TASK_CONFIG_KEYS.get(0), @@ -1393,7 +1343,7 @@ public void testPutLogLevel() throws Exception { // Pre-populate the config topic with a couple of logger level records; these should be ignored (i.e., // not reported to the update listener) - List> existingRecords = List.of( + List> existingRecords = Arrays.asList( new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, "logger-cluster-" + logger1, CONFIGS_SERIALIZED.get(0), new RecordHeaders(), Optional.empty() ), @@ -1480,7 +1430,7 @@ public void testTaskCountRecordsAndGenerations() { CONNECTOR_TASK_COUNT_RECORD_KEYS.get(0), KafkaConfigBackingStore.TASK_COUNT_RECORD_V0, CONFIGS_SERIALIZED.get(3), new Struct(KafkaConfigBackingStore.TASK_COUNT_RECORD_V0).put("task-count", 4)); - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), List.of()); + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); // Before anything is written String connectorName = CONNECTOR_IDS.get(0); @@ -1491,7 +1441,7 @@ public void testTaskCountRecordsAndGenerations() { // Writing task configs should block until all the writes have been performed and the root record update // has completed - List> taskConfigs = List.of(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); configStorage.putTaskConfigs("connector1", taskConfigs); configState = configStorage.snapshot(); @@ -1509,7 +1459,7 @@ public void testTaskCountRecordsAndGenerations() { assertEquals(0, (long) configState.taskConfigGeneration(connectorName)); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(0), TASK_IDS.get(1))); + verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); configStorage.stop(); verify(configLog).stop(); @@ -1543,7 +1493,7 @@ public void testPutTaskConfigs() { COMMIT_TASKS_CONFIG_KEYS.get(0), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(2), new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 2)); // Starts with 0 tasks, after update has 2 // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), List.of()); + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); // Null before writing ClusterConfigState configState = configStorage.snapshot(); @@ -1553,21 +1503,21 @@ public void testPutTaskConfigs() { // Writing task configs should block until all the writes have been performed and the root record update // has completed - List> taskConfigs = List.of(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); configStorage.putTaskConfigs("connector1", taskConfigs); // Validate root config by listing all connectors and tasks configState = configStorage.snapshot(); assertEquals(3, configState.offset()); String connectorName = CONNECTOR_IDS.get(0); - assertEquals(List.of(connectorName), new ArrayList<>(configState.connectors())); - assertEquals(List.of(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName)); + assertEquals(Collections.singletonList(connectorName), new ArrayList<>(configState.connectors())); + assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName)); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); - assertEquals(Set.of(), configState.inconsistentConnectors()); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(0), TASK_IDS.get(1))); + verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); configStorage.stop(); verify(configLog).stop(); @@ -1614,8 +1564,8 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() { assertNull(configState.taskConfig(TASK_IDS.get(1))); // Bootstrap as if we had already added the connector, but no tasks had been added yet - addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), List.of()); - List> taskConfigs = List.of(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); + addConnector(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), Collections.emptyList()); + List> taskConfigs = Arrays.asList(SAMPLE_CONFIGS.get(0), SAMPLE_CONFIGS.get(1)); configStorage.putTaskConfigs("connector1", taskConfigs); expectConvertWriteRead2( @@ -1625,8 +1575,8 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() { COMMIT_TASKS_CONFIG_KEYS.get(1), KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0, CONFIGS_SERIALIZED.get(4), new Struct(KafkaConfigBackingStore.CONNECTOR_TASKS_COMMIT_V0).put("tasks", 1)); // Starts with 2 tasks, after update has 3 - addConnector(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1), List.of()); - taskConfigs = List.of(SAMPLE_CONFIGS.get(2)); + addConnector(CONNECTOR_IDS.get(1), SAMPLE_CONFIGS.get(1), Collections.emptyList()); + taskConfigs = Collections.singletonList(SAMPLE_CONFIGS.get(2)); configStorage.putTaskConfigs("connector2", taskConfigs); // Validate root config by listing all connectors and tasks @@ -1634,17 +1584,17 @@ public void testPutTaskConfigsStartsOnlyReconfiguredTasks() { assertEquals(5, configState.offset()); String connectorName1 = CONNECTOR_IDS.get(0); String connectorName2 = CONNECTOR_IDS.get(1); - assertEquals(List.of(connectorName1, connectorName2), new ArrayList<>(configState.connectors())); - assertEquals(List.of(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName1)); - assertEquals(List.of(TASK_IDS.get(2)), configState.tasks(connectorName2)); + assertEquals(Arrays.asList(connectorName1, connectorName2), new ArrayList<>(configState.connectors())); + assertEquals(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1)), configState.tasks(connectorName1)); + assertEquals(Collections.singletonList(TASK_IDS.get(2)), configState.tasks(connectorName2)); assertEquals(SAMPLE_CONFIGS.get(0), configState.taskConfig(TASK_IDS.get(0))); assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(TASK_IDS.get(1))); assertEquals(SAMPLE_CONFIGS.get(2), configState.taskConfig(TASK_IDS.get(2))); - assertEquals(Set.of(), configState.inconsistentConnectors()); + assertEquals(Collections.EMPTY_SET, configState.inconsistentConnectors()); // As soon as root is rewritten, we should see a callback notifying us that we reconfigured some tasks - verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(0), TASK_IDS.get(1))); - verify(configUpdateListener).onTaskConfigUpdate(List.of(TASK_IDS.get(2))); + verify(configUpdateListener).onTaskConfigUpdate(Arrays.asList(TASK_IDS.get(0), TASK_IDS.get(1))); + verify(configUpdateListener).onTaskConfigUpdate(Collections.singletonList(TASK_IDS.get(2))); configStorage.stop(); verify(configLog).stop(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java index 19aafabee7178..6a1969fe64fc6 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaOffsetBackingStoreTest.java @@ -45,9 +45,9 @@ import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Set; @@ -145,7 +145,7 @@ public void setup(Boolean mockKeyConverter) { if (mockKeyConverter) { when(keyConverter.toConnectData(any(), any())).thenReturn(new SchemaAndValue(null, - List.of("connector", Map.of("partitionKey", "dummy")))); + Arrays.asList("connector", Collections.singletonMap("partitionKey", "dummy")))); } store = spy(new KafkaOffsetBackingStore(adminSupplier, clientIdBase, keyConverter)); @@ -233,7 +233,7 @@ public void testGetSet() throws Exception { }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Getting from empty store should return nulls - Map offsets = store.get(List.of(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); + Map offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); // Since we didn't read them yet, these will be null assertNull(offsets.get(TP0_KEY)); assertNull(offsets.get(TP1_KEY)); @@ -270,7 +270,7 @@ public void testGetSet() throws Exception { }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Getting data should read to end of our published data and return it - offsets = store.get(List.of(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); + offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); assertEquals(TP0_VALUE, offsets.get(TP0_KEY)); assertEquals(TP1_VALUE, offsets.get(TP1_KEY)); @@ -287,7 +287,7 @@ public void testGetSet() throws Exception { }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Getting data should read to end of our published data and return it - offsets = store.get(List.of(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); + offsets = store.get(Arrays.asList(TP0_KEY, TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); assertEquals(TP0_VALUE_NEW, offsets.get(TP0_KEY)); assertEquals(TP1_VALUE_NEW, offsets.get(TP1_KEY)); @@ -363,7 +363,7 @@ public void testGetSetNull() throws Exception { }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Getting data should read to end of our published data and return it - offsets = store.get(List.of(TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); + offsets = store.get(Collections.singletonList(TP1_KEY)).get(10000, TimeUnit.MILLISECONDS); assertNull(offsets.get(TP1_KEY)); // Just verifying that KafkaOffsetBackingStore::get returns null isn't enough, we also need to verify that the mapping for the source partition key is removed. @@ -488,7 +488,7 @@ public void testClientIds() { @Test public void testConnectorPartitions() throws Exception { JsonConverter jsonConverter = new JsonConverter(); - jsonConverter.configure(Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); + jsonConverter.configure(Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); store = spy(new KafkaOffsetBackingStore(() -> { fail("Should not attempt to instantiate admin in these tests"); return null; @@ -506,57 +506,57 @@ public void testConnectorPartitions() throws Exception { doAnswer(invocation -> { capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 0, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, List.of("connector1", - Map.of("partitionKey", "partitionValue1"))), TP0_VALUE.array(), + jsonConverter.fromConnectData("", null, Arrays.asList("connector1", + Collections.singletonMap("partitionKey", "partitionValue1"))), TP0_VALUE.array(), new RecordHeaders(), Optional.empty())); capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 1, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, List.of("connector1", - Map.of("partitionKey", "partitionValue1"))), TP1_VALUE.array(), + jsonConverter.fromConnectData("", null, Arrays.asList("connector1", + Collections.singletonMap("partitionKey", "partitionValue1"))), TP1_VALUE.array(), new RecordHeaders(), Optional.empty())); capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 2, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, List.of("connector1", - Map.of("partitionKey", "partitionValue2"))), TP2_VALUE.array(), + jsonConverter.fromConnectData("", null, Arrays.asList("connector1", + Collections.singletonMap("partitionKey", "partitionValue2"))), TP2_VALUE.array(), new RecordHeaders(), Optional.empty())); capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 3, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, List.of("connector2", - Map.of("partitionKey", "partitionValue"))), TP1_VALUE.array(), + jsonConverter.fromConnectData("", null, Arrays.asList("connector2", + Collections.singletonMap("partitionKey", "partitionValue"))), TP1_VALUE.array(), new RecordHeaders(), Optional.empty())); storeLogCallbackArgumentCaptor.getValue().onCompletion(null, null); return null; }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Trigger a read to the end of the log - store.get(List.of()).get(10000, TimeUnit.MILLISECONDS); + store.get(Collections.emptyList()).get(10000, TimeUnit.MILLISECONDS); Set> connectorPartitions1 = store.connectorPartitions("connector1"); Set> expectedConnectorPartition1 = new HashSet<>(); - expectedConnectorPartition1.add(Map.of("partitionKey", "partitionValue1")); - expectedConnectorPartition1.add(Map.of("partitionKey", "partitionValue2")); + expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue1")); + expectedConnectorPartition1.add(Collections.singletonMap("partitionKey", "partitionValue2")); assertEquals(expectedConnectorPartition1, connectorPartitions1); Set> connectorPartitions2 = store.connectorPartitions("connector2"); - Set> expectedConnectorPartition2 = Set.of(Map.of("partitionKey", "partitionValue")); + Set> expectedConnectorPartition2 = Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue")); assertEquals(expectedConnectorPartition2, connectorPartitions2); doAnswer(invocation -> { capturedConsumedCallback.getValue().onCompletion(null, new ConsumerRecord<>(TOPIC, 0, 4, 0L, TimestampType.CREATE_TIME, 0, 0, - jsonConverter.fromConnectData("", null, List.of("connector1", - Map.of("partitionKey", "partitionValue1"))), null, + jsonConverter.fromConnectData("", null, Arrays.asList("connector1", + Collections.singletonMap("partitionKey", "partitionValue1"))), null, new RecordHeaders(), Optional.empty())); storeLogCallbackArgumentCaptor.getValue().onCompletion(null, null); return null; }).when(storeLog).readToEnd(storeLogCallbackArgumentCaptor.capture()); // Trigger a read to the end of the log - store.get(List.of()).get(10000, TimeUnit.MILLISECONDS); + store.get(Collections.emptyList()).get(10000, TimeUnit.MILLISECONDS); // Null valued offset for a partition key should remove that partition for the connector connectorPartitions1 = store.connectorPartitions("connector1"); - assertEquals(Set.of(Map.of("partitionKey", "partitionValue2")), connectorPartitions1); + assertEquals(Collections.singleton(Collections.singletonMap("partitionKey", "partitionValue2")), connectorPartitions1); store.stop(); verify(storeLog).stop(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java index dadb3f4242315..83d9e953478e0 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreFormatTest.java @@ -35,9 +35,9 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import java.util.Arrays; +import java.util.Collections; import java.util.HashSet; -import java.util.Map; -import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import static org.apache.kafka.connect.json.JsonConverterConfig.SCHEMAS_ENABLE_CONFIG; @@ -75,7 +75,7 @@ public class KafkaStatusBackingStoreFormatTest { public void setup() { time = new MockTime(); JsonConverter converter = new JsonConverter(); - converter.configure(Map.of(SCHEMAS_ENABLE_CONFIG, false), false); + converter.configure(Collections.singletonMap(SCHEMAS_ENABLE_CONFIG, false), false); store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, () -> null, kafkaBasedLog); } @@ -182,7 +182,7 @@ public void deleteTopicStatus() { store.read(statusRecord); assertTrue(store.topics.containsKey("bar")); assertFalse(store.topics.get("bar").containsKey("foo")); - assertEquals(Map.of(), store.topics.get("bar")); + assertEquals(Collections.emptyMap(), store.topics.get("bar")); } @Test @@ -204,7 +204,7 @@ public void putTopicState() { ConsumerRecord statusRecord = new ConsumerRecord<>(STATUS_TOPIC, 0, 0, key, valueCaptor.getValue()); store.read(statusRecord); assertEquals(topicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC)); - assertEquals(Set.of(topicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); + assertEquals(Collections.singleton(topicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); } @Test @@ -277,7 +277,7 @@ public void putTopicStateShouldOverridePreviousState() { assertEquals(secondTopicStatus, store.parseTopicStatus(valueCaptor.getValue())); assertEquals(firstTopicStatus, store.getTopic(FOO_CONNECTOR, FOO_TOPIC)); assertEquals(secondTopicStatus, store.getTopic(FOO_CONNECTOR, BAR_TOPIC)); - assertEquals(Set.of(firstTopicStatus, secondTopicStatus), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); + assertEquals(new HashSet<>(Arrays.asList(firstTopicStatus, secondTopicStatus)), new HashSet<>(store.getAllTopics(FOO_CONNECTOR))); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java index 108dbbc45c3f1..a9ac5f483bef1 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/KafkaStatusBackingStoreTest.java @@ -43,11 +43,11 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.Map; import java.util.Optional; -import java.util.Set; import java.util.function.Supplier; import static org.apache.kafka.clients.CommonClientConfigs.CLIENT_ID_CONFIG; @@ -388,8 +388,8 @@ public void deleteConnectorState() { verify(kafkaBasedLog).send(eq("status-connector-" + CONNECTOR), eq(value), any(Callback.class)); verify(kafkaBasedLog).send(eq("status-task-conn-0"), eq(value), any(Callback.class)); - assertEquals(Set.of(CONNECTOR), store.connectors()); - assertEquals(Set.of(taskStatus), new HashSet<>(store.getAll(CONNECTOR))); + assertEquals(new HashSet<>(Collections.singletonList(CONNECTOR)), store.connectors()); + assertEquals(new HashSet<>(Collections.singletonList(taskStatus)), new HashSet<>(store.getAll(CONNECTOR))); store.read(consumerRecord(0, "status-connector-conn", null)); assertTrue(store.connectors().isEmpty()); assertTrue(store.getAll(CONNECTOR).isEmpty()); @@ -412,7 +412,7 @@ public void deleteTaskState() { verify(kafkaBasedLog).send(eq("status-task-conn-0"), eq(value), any(Callback.class)); - assertEquals(Set.of(taskStatus), new HashSet<>(store.getAll(CONNECTOR))); + assertEquals(new HashSet<>(Collections.singletonList(taskStatus)), new HashSet<>(store.getAll(CONNECTOR))); store.read(consumerRecord(0, "status-task-conn-0", null)); assertTrue(store.getAll(CONNECTOR).isEmpty()); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryConfigBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryConfigBackingStoreTest.java index f0a87695f9c22..a06496d112dd2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryConfigBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryConfigBackingStoreTest.java @@ -28,7 +28,9 @@ import org.mockito.quality.Strictness; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -49,13 +51,13 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class MemoryConfigBackingStoreTest { - private static final List CONNECTOR_IDS = List.of("connector1", "connector2"); + private static final List CONNECTOR_IDS = Arrays.asList("connector1", "connector2"); // Actual values are irrelevant here and can be used as either connector or task configurations - private static final List> SAMPLE_CONFIGS = List.of( - Map.of("config-key-one", "config-value-one"), - Map.of("config-key-two", "config-value-two"), - Map.of("config-key-three", "config-value-three") + private static final List> SAMPLE_CONFIGS = Arrays.asList( + Collections.singletonMap("config-key-one", "config-value-one"), + Collections.singletonMap("config-key-two", "config-value-two"), + Collections.singletonMap("config-key-three", "config-value-three") ); @Mock @@ -140,10 +142,10 @@ public void testRemoveConnectorConfig() { public void testPutTaskConfigs() { // Can't write task configs for non-existent connector assertThrows(IllegalArgumentException.class, - () -> configStore.putTaskConfigs(CONNECTOR_IDS.get(0), List.of(SAMPLE_CONFIGS.get(1)))); + () -> configStore.putTaskConfigs(CONNECTOR_IDS.get(0), Collections.singletonList(SAMPLE_CONFIGS.get(1)))); configStore.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); - configStore.putTaskConfigs(CONNECTOR_IDS.get(0), List.of(SAMPLE_CONFIGS.get(1))); + configStore.putTaskConfigs(CONNECTOR_IDS.get(0), Collections.singletonList(SAMPLE_CONFIGS.get(1))); ClusterConfigState configState = configStore.snapshot(); ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_IDS.get(0), 0); @@ -151,7 +153,7 @@ public void testPutTaskConfigs() { assertEquals(SAMPLE_CONFIGS.get(1), configState.taskConfig(taskId)); verify(configUpdateListener).onConnectorConfigUpdate(eq(CONNECTOR_IDS.get(0))); - verify(configUpdateListener).onTaskConfigUpdate(eq(Set.of(taskId))); + verify(configUpdateListener).onTaskConfigUpdate(eq(Collections.singleton(taskId))); } @Test @@ -170,18 +172,18 @@ public void testRemoveTaskConfigs() { }).when(configUpdateListener).onTaskConfigUpdate(anySet()); configStore.putConnectorConfig(CONNECTOR_IDS.get(0), SAMPLE_CONFIGS.get(0), null); - configStore.putTaskConfigs(CONNECTOR_IDS.get(0), List.of(SAMPLE_CONFIGS.get(1))); + configStore.putTaskConfigs(CONNECTOR_IDS.get(0), Collections.singletonList(SAMPLE_CONFIGS.get(1))); configStore.removeTaskConfigs(CONNECTOR_IDS.get(0)); ClusterConfigState configState = configStore.snapshot(); assertEquals(0, configState.taskCount(CONNECTOR_IDS.get(0))); - assertEquals(List.of(), configState.tasks(CONNECTOR_IDS.get(0))); + assertEquals(Collections.emptyList(), configState.tasks(CONNECTOR_IDS.get(0))); verify(configUpdateListener).onConnectorConfigUpdate(eq(CONNECTOR_IDS.get(0))); verify(configUpdateListener, times(2)).onTaskConfigUpdate(anySet()); ConnectorTaskId taskId = new ConnectorTaskId(CONNECTOR_IDS.get(0), 0); - assertEquals(List.of(Set.of(taskId), Set.of(taskId)), onTaskConfigUpdateCaptures); + assertEquals(Arrays.asList(Collections.singleton(taskId), Collections.singleton(taskId)), onTaskConfigUpdateCaptures); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryStatusBackingStoreTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryStatusBackingStoreTest.java index 7e6d072b9a720..33d76cbd6a6d5 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryStatusBackingStoreTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/MemoryStatusBackingStoreTest.java @@ -22,7 +22,7 @@ import org.junit.jupiter.api.Test; -import java.util.Set; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertNull; @@ -44,7 +44,7 @@ public void putAndGetTaskStatus() { TaskStatus status = new TaskStatus(taskId, ConnectorStatus.State.RUNNING, "localhost:8083", 0); store.put(status); assertEquals(status, store.get(taskId)); - assertEquals(Set.of(status), store.getAll("connector")); + assertEquals(Collections.singleton(status), store.getAll("connector")); } @Test @@ -63,4 +63,5 @@ public void deleteTaskStatus() { store.put(new TaskStatus(taskId, ConnectorStatus.State.DESTROYED, "localhost:8083", 0)); assertNull(store.get(taskId)); } + } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetStorageWriterTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetStorageWriterTest.java index 94b5bb0e78c81..23d17cd9970f8 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetStorageWriterTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetStorageWriterTest.java @@ -54,8 +54,8 @@ public class OffsetStorageWriterTest { private static final String NAMESPACE = "namespace"; // Connect format - any types should be accepted here - private static final Map OFFSET_KEY = Map.of("key", "key"); - private static final Map OFFSET_VALUE = Map.of("key", 12); + private static final Map OFFSET_KEY = Collections.singletonMap("key", "key"); + private static final Map OFFSET_VALUE = Collections.singletonMap("key", 12); // Serialized private static final byte[] OFFSET_KEY_SERIALIZED = "key-serialized".getBytes(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java index 9cc0f34af8810..d4f0cf45203c0 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/storage/OffsetUtilsTest.java @@ -26,8 +26,8 @@ import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.Set; @@ -40,7 +40,7 @@ public class OffsetUtilsTest { private static final JsonConverter CONVERTER = new JsonConverter(); static { - CONVERTER.configure(Map.of(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); + CONVERTER.configure(Collections.singletonMap(JsonConverterConfig.SCHEMAS_ENABLE_CONFIG, "false"), true); } @Test @@ -60,18 +60,18 @@ public void testValidateFormatMapWithNonStringKeys() { @Test public void testValidateFormatMapWithNonPrimitiveKeys() { - Map offsetData = Map.of("key", new Object()); + Map offsetData = Collections.singletonMap("key", new Object()); DataException e = assertThrows(DataException.class, () -> OffsetUtils.validateFormat(offsetData)); assertTrue(e.getMessage().contains("Offsets may only contain primitive types as values")); - Map offsetData2 = Map.of("key", new ArrayList<>()); + Map offsetData2 = Collections.singletonMap("key", new ArrayList<>()); e = assertThrows(DataException.class, () -> OffsetUtils.validateFormat(offsetData2)); assertTrue(e.getMessage().contains("Offsets may only contain primitive types as values")); } @Test public void testValidateFormatWithValidFormat() { - Map offsetData = Map.of("key", 1); + Map offsetData = Collections.singletonMap("key", 1); // Expect no exception to be thrown OffsetUtils.validateFormat(offsetData); } @@ -99,17 +99,17 @@ public void testProcessPartitionKeyNotList() { @Test public void testProcessPartitionKeyListWithOneElement() { assertInvalidPartitionKey( - serializePartitionKey(List.of("")), + serializePartitionKey(Collections.singletonList("")), "Ignoring offset partition key with an unexpected number of elements"); } @Test public void testProcessPartitionKeyListWithElementsOfWrongType() { assertInvalidPartitionKey( - serializePartitionKey(List.of(1, new HashMap<>())), + serializePartitionKey(Arrays.asList(1, new HashMap<>())), "Ignoring offset partition key with an unexpected format for the first element in the partition key list"); assertInvalidPartitionKey( - serializePartitionKey(List.of("connector-name", new ArrayList<>())), + serializePartitionKey(Arrays.asList("connector-name", new ArrayList<>())), "Ignoring offset partition key with an unexpected format for the second element in the partition key list"); } @@ -128,7 +128,7 @@ public void assertInvalidPartitionKey(byte[] key, String message) { public void testProcessPartitionKeyValidList() { try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(OffsetUtils.class)) { Map>> connectorPartitions = new HashMap<>(); - OffsetUtils.processPartitionKey(serializePartitionKey(List.of("connector-name", new HashMap<>())), new byte[0], CONVERTER, connectorPartitions); + OffsetUtils.processPartitionKey(serializePartitionKey(Arrays.asList("connector-name", new HashMap<>())), new byte[0], CONVERTER, connectorPartitions); assertEquals(1, connectorPartitions.size()); assertEquals(0, logCaptureAppender.getMessages().size()); } @@ -139,7 +139,7 @@ public void testProcessPartitionKeyNullPartition() { try (LogCaptureAppender logCaptureAppender = LogCaptureAppender.createAndRegister(OffsetUtils.class)) { Map>> connectorPartitions = new HashMap<>(); OffsetUtils.processPartitionKey(serializePartitionKey(Arrays.asList("connector-name", null)), new byte[0], CONVERTER, connectorPartitions); - assertEquals(Map.of(), connectorPartitions); + assertEquals(Collections.emptyMap(), connectorPartitions); assertEquals(0, logCaptureAppender.getMessages().size()); } } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConnectUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConnectUtilsTest.java index 95216af9be696..7b2e8d7cfa733 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConnectUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/ConnectUtilsTest.java @@ -27,6 +27,7 @@ import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -124,7 +125,7 @@ public void testOverrideWarning() { "thanks to newly-introduced federal legislation", false) ); - assertEquals(Map.of("\u1984", "big brother"), props); + assertEquals(Collections.singletonMap("\u1984", "big brother"), props); props.clear(); props.put("\u1984", "BIG BROTHER"); @@ -140,7 +141,7 @@ public void testOverrideWarning() { "thanks to newly-introduced federal legislation", true) ); - assertEquals(Map.of("\u1984", "big brother"), props); + assertEquals(Collections.singletonMap("\u1984", "big brother"), props); } @Test diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java index defac44851b08..aabf894e1ea90 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/KafkaBasedLogTest.java @@ -49,7 +49,9 @@ import org.mockito.quality.Strictness; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Optional; @@ -98,7 +100,7 @@ public class KafkaBasedLogTest { CONSUMER_PROPS.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer"); } - private static final Set CONSUMER_ASSIGNMENT = Set.of(TP0, TP1); + private static final Set CONSUMER_ASSIGNMENT = new HashSet<>(Arrays.asList(TP0, TP1)); private static final Map FIRST_SET = new HashMap<>(); static { FIRST_SET.put("key", "value"); @@ -151,7 +153,7 @@ protected MockConsumer createConsumer() { } }; consumer = new MockConsumer<>(AutoOffsetResetStrategy.EARLIEST.name()); - consumer.updatePartitions(TOPIC, List.of(TPINFO0, TPINFO1)); + consumer.updatePartitions(TOPIC, Arrays.asList(TPINFO0, TPINFO1)); Map beginningOffsets = new HashMap<>(); beginningOffsets.put(TP0, 0L); beginningOffsets.put(TP1, 0L); @@ -406,7 +408,7 @@ public void testGetOffsetsConsumerErrorOnReadToEnd() throws Exception { @Test public void testOffsetReadFailureWhenWorkThreadFails() throws Exception { RuntimeException exception = new RuntimeException(); - Set tps = Set.of(TP0, TP1); + Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); @@ -480,7 +482,7 @@ public void testProducerError() { @Test public void testReadEndOffsetsUsingAdmin() { - Set tps = Set.of(TP0, TP1); + Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); @@ -496,7 +498,7 @@ public void testReadEndOffsetsUsingAdmin() { @Test public void testReadEndOffsetsUsingAdminThatFailsWithUnsupported() { - Set tps = Set.of(TP0, TP1); + Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); admin = mock(TopicAdmin.class); // Getting end offsets using the admin client should fail with unsupported version when(admin.retryEndOffsets(eq(tps), any(), anyLong())).thenThrow(new UnsupportedVersionException("too old")); @@ -514,7 +516,7 @@ public void testReadEndOffsetsUsingAdminThatFailsWithUnsupported() { @Test public void testReadEndOffsetsUsingAdminThatFailsWithRetriable() { - Set tps = Set.of(TP0, TP1); + Set tps = new HashSet<>(Arrays.asList(TP0, TP1)); Map endOffsets = new HashMap<>(); endOffsets.put(TP0, 0L); endOffsets.put(TP1, 0L); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java index 22ffd21d5de16..2fb788a1f495a 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SharedTopicAdminTest.java @@ -26,6 +26,7 @@ import org.mockito.quality.Strictness; import java.time.Duration; +import java.util.Collections; import java.util.Map; import java.util.function.Function; @@ -42,7 +43,7 @@ @MockitoSettings(strictness = Strictness.STRICT_STUBS) public class SharedTopicAdminTest { - private static final Map EMPTY_CONFIG = Map.of(); + private static final Map EMPTY_CONFIG = Collections.emptyMap(); @Mock private TopicAdmin mockTopicAdmin; @Mock private Function, TopicAdmin> factory; diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java index b22602872f18c..268b27e19d8b2 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/SinkUtilsTest.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -43,7 +44,7 @@ public void testConsumerGroupOffsetsToConnectorOffsets() { connectorOffsets = SinkUtils.consumerGroupOffsetsToConnectorOffsets(consumerGroupOffsets); assertEquals(1, connectorOffsets.offsets().size()); - assertEquals(Map.of(SinkUtils.KAFKA_OFFSET_KEY, 100L), connectorOffsets.offsets().get(0).offset()); + assertEquals(Collections.singletonMap(SinkUtils.KAFKA_OFFSET_KEY, 100L), connectorOffsets.offsets().get(0).offset()); Map expectedPartition = new HashMap<>(); expectedPartition.put(SinkUtils.KAFKA_TOPIC_KEY, "test-topic"); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TableTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TableTest.java index 9bcf117e73fa2..dee4a24106e36 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TableTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TableTest.java @@ -44,4 +44,5 @@ public void basicOperations() { assertNull(table.get("foo", 6)); assertTrue(table.row("foo").isEmpty()); } + } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java index b40683865b454..1f25dd15f514c 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicAdminTest.java @@ -65,10 +65,12 @@ import java.util.Arrays; import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; import java.util.stream.Stream; import static org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic; @@ -160,8 +162,8 @@ public void shouldNotCreateTopicWhenItAlreadyExists() { NewTopic newTopic = TopicAdmin.defineTopic("myTopic").partitions(1).compacted().build(); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); - mockAdminClient.addTopic(false, "myTopic", List.of(topicPartitionInfo), null); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); + mockAdminClient.addTopic(false, "myTopic", Collections.singletonList(topicPartitionInfo), null); TopicAdmin admin = new TopicAdmin(mockAdminClient); assertFalse(admin.createTopic(newTopic)); assertTrue(admin.createTopics(newTopic).isEmpty()); @@ -306,12 +308,12 @@ public void describeShouldReturnTopicDescriptionWhenTopicExists() { NewTopic newTopic = TopicAdmin.defineTopic(topicName).partitions(1).compacted().build(); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); - mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), null); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); + mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), null); TopicAdmin admin = new TopicAdmin(mockAdminClient); Map desc = admin.describeTopics(newTopic.name()); assertFalse(desc.isEmpty()); - TopicDescription topicDesc = new TopicDescription(topicName, false, List.of(topicPartitionInfo)); + TopicDescription topicDesc = new TopicDescription(topicName, false, Collections.singletonList(topicPartitionInfo)); assertEquals(desc.get("myTopic"), topicDesc); } } @@ -380,14 +382,14 @@ public void describeTopicConfigShouldReturnMapWithNullValueWhenTopicDoesNotExist public void describeTopicConfigShouldReturnTopicConfigWhenTopicExists() { String topicName = "myTopic"; NewTopic newTopic = TopicAdmin.defineTopic(topicName) - .config(Map.of("foo", "bar")) + .config(Collections.singletonMap("foo", "bar")) .partitions(1) .compacted() .build(); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); - mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), null); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); + mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), null); TopicAdmin admin = new TopicAdmin(mockAdminClient); Map result = admin.describeTopicConfigs(newTopic.name()); assertFalse(result.isEmpty()); @@ -437,11 +439,11 @@ public void verifyingTopicCleanupPolicyShouldReturnFalseWhenTopicAuthorizationEr @Test public void verifyingTopicCleanupPolicyShouldReturnTrueWhenTopicHasCorrectPolicy() { String topicName = "myTopic"; - Map topicConfigs = Map.of("cleanup.policy", "compact"); + Map topicConfigs = Collections.singletonMap("cleanup.policy", "compact"); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); - mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); + mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); TopicAdmin admin = new TopicAdmin(mockAdminClient); boolean result = admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose"); assertTrue(result); @@ -451,11 +453,11 @@ public void verifyingTopicCleanupPolicyShouldReturnTrueWhenTopicHasCorrectPolicy @Test public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeletePolicy() { String topicName = "myTopic"; - Map topicConfigs = Map.of("cleanup.policy", "delete"); + Map topicConfigs = Collections.singletonMap("cleanup.policy", "delete"); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); - mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); + mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); TopicAdmin admin = new TopicAdmin(mockAdminClient); ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose")); assertTrue(e.getMessage().contains("to guarantee consistency and durability")); @@ -465,11 +467,11 @@ public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeletePolicy() { @Test public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeleteAndCompactPolicy() { String topicName = "myTopic"; - Map topicConfigs = Map.of("cleanup.policy", "delete,compact"); + Map topicConfigs = Collections.singletonMap("cleanup.policy", "delete,compact"); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); - mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); + mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); TopicAdmin admin = new TopicAdmin(mockAdminClient); ConfigException e = assertThrows(ConfigException.class, () -> admin.verifyTopicCleanupPolicyOnlyCompact("myTopic", "worker.topic", "purpose")); assertTrue(e.getMessage().contains("to guarantee consistency and durability")); @@ -479,11 +481,11 @@ public void verifyingTopicCleanupPolicyShouldFailWhenTopicHasDeleteAndCompactPol @Test public void verifyingGettingTopicCleanupPolicies() { String topicName = "myTopic"; - Map topicConfigs = Map.of("cleanup.policy", "compact"); + Map topicConfigs = Collections.singletonMap("cleanup.policy", "compact"); Cluster cluster = createCluster(1); try (MockAdminClient mockAdminClient = new MockAdminClient(cluster.nodes(), cluster.nodeById(0))) { - TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), List.of()); - mockAdminClient.addTopic(false, topicName, List.of(topicPartitionInfo), topicConfigs); + TopicPartitionInfo topicPartitionInfo = new TopicPartitionInfo(0, cluster.nodeById(0), cluster.nodes(), Collections.emptyList()); + mockAdminClient.addTopic(false, topicName, Collections.singletonList(topicPartitionInfo), topicConfigs); TopicAdmin admin = new TopicAdmin(mockAdminClient); Set policies = admin.topicCleanupPolicy("myTopic"); assertEquals(1, policies.size()); @@ -500,7 +502,7 @@ public void verifyingGettingTopicCleanupPolicies() { public void retryEndOffsetsShouldRethrowUnknownVersionException() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -518,7 +520,7 @@ public void retryEndOffsetsShouldRethrowUnknownVersionException() { public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); Long offset = 1000L; Cluster cluster = createCluster(1, "myTopic", 1); @@ -547,7 +549,7 @@ public void retryEndOffsetsShouldWrapNonRetriableExceptionsWithConnectException( public void retryEndOffsetsShouldRetryWhenTopicNotFound() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); Long offset = 1000L; Cluster cluster = createCluster(1, "myTopic", 1); @@ -559,7 +561,7 @@ public void retryEndOffsetsShouldRetryWhenTopicNotFound() { TopicAdmin admin = new TopicAdmin(env.adminClient()); Map endoffsets = admin.retryEndOffsets(tps, Duration.ofMillis(100), 1); - assertEquals(Map.of(tp1, offset), endoffsets); + assertEquals(Collections.singletonMap(tp1, offset), endoffsets); } } @@ -567,7 +569,7 @@ public void retryEndOffsetsShouldRetryWhenTopicNotFound() { public void endOffsetsShouldFailWithNonRetriableWhenAuthorizationFailureOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -584,7 +586,7 @@ public void endOffsetsShouldFailWithNonRetriableWhenAuthorizationFailureOccurs() public void endOffsetsShouldFailWithUnsupportedVersionWhenVersionUnsupportedErrorOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -600,7 +602,7 @@ public void endOffsetsShouldFailWithUnsupportedVersionWhenVersionUnsupportedErro public void endOffsetsShouldFailWithTimeoutExceptionWhenTimeoutErrorOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv( @@ -618,7 +620,7 @@ public void endOffsetsShouldFailWithTimeoutExceptionWhenTimeoutErrorOccurs() { public void endOffsetsShouldFailWithNonRetriableWhenUnknownErrorOccurs() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); Long offset = null; // response should use error Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -637,7 +639,7 @@ public void endOffsetsShouldReturnEmptyMapWhenPartitionsSetIsNull() { Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { TopicAdmin admin = new TopicAdmin(env.adminClient()); - Map offsets = admin.endOffsets(Set.of()); + Map offsets = admin.endOffsets(Collections.emptySet()); assertTrue(offsets.isEmpty()); } } @@ -646,7 +648,7 @@ public void endOffsetsShouldReturnEmptyMapWhenPartitionsSetIsNull() { public void endOffsetsShouldReturnOffsetsForOnePartition() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); long offset = 1000L; Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { @@ -665,7 +667,7 @@ public void endOffsetsShouldReturnOffsetsForMultiplePartitions() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); TopicPartition tp2 = new TopicPartition(topicName, 1); - Set tps = Set.of(tp1, tp2); + Set tps = new HashSet<>(Arrays.asList(tp1, tp2)); long offset1 = 1001; long offset2 = 1002; Cluster cluster = createCluster(1, topicName, 2); @@ -685,7 +687,7 @@ public void endOffsetsShouldReturnOffsetsForMultiplePartitions() { public void endOffsetsShouldFailWhenAnyTopicPartitionHasError() { String topicName = "myTopic"; TopicPartition tp1 = new TopicPartition(topicName, 0); - Set tps = Set.of(tp1); + Set tps = Collections.singleton(tp1); Cluster cluster = createCluster(1, topicName, 1); try (AdminClientUnitTestEnv env = new AdminClientUnitTestEnv(new MockTime(), cluster)) { env.kafkaClient().setNodeApiVersions(NodeApiVersions.create()); @@ -717,8 +719,8 @@ private Cluster createCluster(int numNodes, String topicName, int partitions) { "mockClusterId", nodes.values(), pInfos, - Set.of(), - Set.of(), + Collections.emptySet(), + Collections.emptySet(), leader); } @@ -736,9 +738,9 @@ private MetadataResponse prepareMetadataResponse(Cluster cluster, Errors topicEr .setPartitionIndex(pInfo.partition()) .setLeaderId(pInfo.leader().id()) .setLeaderEpoch(234) - .setReplicaNodes(Arrays.stream(pInfo.replicas()).map(Node::id).toList()) - .setIsrNodes(Arrays.stream(pInfo.inSyncReplicas()).map(Node::id).toList()) - .setOfflineReplicas(Arrays.stream(pInfo.offlineReplicas()).map(Node::id).toList()); + .setReplicaNodes(Arrays.stream(pInfo.replicas()).map(Node::id).collect(Collectors.toList())) + .setIsrNodes(Arrays.stream(pInfo.inSyncReplicas()).map(Node::id).collect(Collectors.toList())) + .setOfflineReplicas(Arrays.stream(pInfo.offlineReplicas()).map(Node::id).collect(Collectors.toList())); pms.add(pm); } MetadataResponseTopic tm = new MetadataResponseTopic() @@ -786,7 +788,7 @@ private ListOffsetsResponse listOffsetsResultWithClusterAuthorizationException(T } private ListOffsetsResponse listOffsetsResult(TopicPartition tp1, Long offset1) { - return listOffsetsResult(null, Map.of(tp1, offset1)); + return listOffsetsResult(null, Collections.singletonMap(tp1, offset1)); } private ListOffsetsResponse listOffsetsResult(TopicPartition tp1, Long offset1, TopicPartition tp2, Long offset2) { @@ -888,7 +890,7 @@ protected void assertTopic(MockAdminClient admin, String topicName, int expected protected TopicDescription topicDescription(MockAdminClient admin, String topicName) throws ExecutionException, InterruptedException { - DescribeTopicsResult result = admin.describeTopics(Set.of(topicName)); + DescribeTopicsResult result = admin.describeTopics(Collections.singleton(topicName)); Map> byName = result.topicNameValues(); return byName.get(topicName).get(); } @@ -957,8 +959,8 @@ private DescribeConfigsResponse describeConfigsResponse(ApiError error, NewTopic .map(e -> new DescribeConfigsResponseData.DescribeConfigsResourceResult() .setName(e.getKey()) .setValue(e.getValue())) - .toList())) - .toList(); + .collect(Collectors.toList()))) + .collect(Collectors.toList()); return new DescribeConfigsResponse(new DescribeConfigsResponseData().setThrottleTimeMs(1000).setResults(results)); } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java index ecf25761e96fd..5d764d1603107 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/TopicCreationTest.java @@ -19,8 +19,6 @@ import org.apache.kafka.clients.admin.NewTopic; import org.apache.kafka.connect.data.Schema; -import org.apache.kafka.connect.runtime.ConnectMetrics; -import org.apache.kafka.connect.runtime.MockConnectMetrics; import org.apache.kafka.connect.runtime.SourceConnectorConfig; import org.apache.kafka.connect.runtime.TransformationStage; import org.apache.kafka.connect.runtime.WorkerConfig; @@ -33,10 +31,12 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Set; import java.util.concurrent.TimeUnit; import static org.apache.kafka.common.config.TopicConfig.CLEANUP_POLICY_COMPACT; @@ -80,8 +80,6 @@ public class TopicCreationTest { private static final short DEFAULT_REPLICATION_FACTOR = -1; private static final int DEFAULT_PARTITIONS = -1; - private static final ConnectMetrics METRICS = new MockConnectMetrics(); - private static final ConnectorTaskId CONNECTOR_TASK_ID = new ConnectorTaskId("test", 0); Map workerProps; WorkerConfig workerConfig; @@ -134,7 +132,7 @@ public void testTopicCreationWhenTopicCreationIsEnabled() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertEquals(topicCreation.defaultTopicGroup(), groups.get(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); + assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -153,7 +151,7 @@ public void testTopicCreationWhenTopicCreationIsDisabled() { assertFalse(topicCreation.isTopicCreationEnabled()); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertNull(topicCreation.defaultTopicGroup()); - assertEquals(Map.of(), topicCreation.topicGroups()); + assertEquals(Collections.emptyMap(), topicCreation.topicGroups()); assertNull(topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -168,7 +166,7 @@ public void testEmptyTopicCreation() { assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertNull(topicCreation.defaultTopicGroup()); assertEquals(0, topicCreation.topicGroups().size()); - assertEquals(Map.of(), topicCreation.topicGroups()); + assertEquals(Collections.emptyMap(), topicCreation.topicGroups()); assertNull(topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -186,14 +184,14 @@ public void withDefaultTopicCreation() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(DEFAULT_PARTITIONS, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(1, groups.size()); - assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP), groups.keySet()); + assertEquals(Collections.singleton(DEFAULT_TOPIC_CREATION_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -204,7 +202,7 @@ public void withDefaultTopicCreation() { assertEquals(DEFAULT_TOPIC_CREATION_GROUP, group.name()); assertTrue(topicCreation.isTopicCreationEnabled()); assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); - assertEquals(Map.of(), topicCreation.topicGroups()); + assertEquals(Collections.emptyMap(), topicCreation.topicGroups()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -214,7 +212,7 @@ public void withDefaultTopicCreation() { assertEquals(FOO_TOPIC, topicSpec.name()); assertEquals(DEFAULT_REPLICATION_FACTOR, topicSpec.replicationFactor()); assertEquals(DEFAULT_PARTITIONS, topicSpec.numPartitions()); - assertEquals(Map.of(), topicSpec.configs()); + assertEquals(Collections.emptyMap(), topicSpec.configs()); } @Test @@ -240,14 +238,14 @@ public void topicCreationWithDefaultGroupAndCustomProps() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(replicas, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(topicProps, sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(1, groups.size()); - assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP), groups.keySet()); + assertEquals(Collections.singleton(DEFAULT_TOPIC_CREATION_GROUP), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -258,7 +256,7 @@ public void topicCreationWithDefaultGroupAndCustomProps() { assertEquals(DEFAULT_TOPIC_CREATION_GROUP, group.name()); assertTrue(topicCreation.isTopicCreationEnabled()); assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); - assertEquals(Map.of(), topicCreation.topicGroups()); + assertEquals(Collections.emptyMap(), topicCreation.topicGroups()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -291,14 +289,14 @@ public void topicCreationWithOneGroup() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(2, groups.size()); - assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP), groups.keySet()); + assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP)), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -317,7 +315,7 @@ public void topicCreationWithOneGroup() { assertTrue(topicCreation.isTopicCreationEnabled()); assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertEquals(1, topicCreation.topicGroups().size()); - assertEquals(Set.of(FOO_GROUP), topicCreation.topicGroups().keySet()); + assertEquals(Collections.singleton(FOO_GROUP), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); @@ -327,7 +325,7 @@ public void topicCreationWithOneGroup() { assertEquals(BAR_TOPIC, defaultTopicSpec.name()); assertEquals(DEFAULT_REPLICATION_FACTOR, defaultTopicSpec.replicationFactor()); assertEquals(partitions, defaultTopicSpec.numPartitions()); - assertEquals(Map.of(), defaultTopicSpec.configs()); + assertEquals(Collections.emptyMap(), defaultTopicSpec.configs()); NewTopic fooTopicSpec = topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC); assertEquals(FOO_TOPIC, fooTopicSpec.name()); @@ -356,14 +354,14 @@ public void topicCreationWithOneGroupAndCombinedRegex() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(2, groups.size()); - assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP), groups.keySet()); + assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP)), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -383,7 +381,7 @@ public void topicCreationWithOneGroupAndCombinedRegex() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC)); assertEquals(1, topicCreation.topicGroups().size()); - assertEquals(Set.of(FOO_GROUP), topicCreation.topicGroups().keySet()); + assertEquals(Collections.singleton(FOO_GROUP), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); assertEquals(fooGroup, topicCreation.findFirstGroup(BAR_TOPIC)); topicCreation.addTopic(FOO_TOPIC); @@ -433,14 +431,14 @@ public void topicCreationWithTwoGroups() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(3, groups.size()); - assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP), groups.keySet()); + assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP)), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -464,7 +462,7 @@ public void topicCreationWithTwoGroups() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); + assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); assertEquals(barGroup, topicCreation.findFirstGroup(BAR_TOPIC)); topicCreation.addTopic(FOO_TOPIC); @@ -478,7 +476,7 @@ public void topicCreationWithTwoGroups() { assertEquals(otherTopic, defaultTopicSpec.name()); assertEquals(DEFAULT_REPLICATION_FACTOR, defaultTopicSpec.replicationFactor()); assertEquals(partitions, defaultTopicSpec.numPartitions()); - assertEquals(Map.of(), defaultTopicSpec.configs()); + assertEquals(Collections.emptyMap(), defaultTopicSpec.configs()); NewTopic fooTopicSpec = topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC); assertEquals(FOO_TOPIC, fooTopicSpec.name()); @@ -512,12 +510,12 @@ public void testTopicCreationWithSingleTransformation() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertEquals(groups.get(DEFAULT_TOPIC_CREATION_GROUP), topicCreation.defaultTopicGroup()); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); + assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); assertEquals(topicCreation.defaultTopicGroup(), topicCreation.findFirstGroup(FOO_TOPIC)); topicCreation.addTopic(FOO_TOPIC); assertFalse(topicCreation.isTopicCreationRequired(FOO_TOPIC)); - List> transformationStages = sourceConfig.transformationStages(MOCK_PLUGINS, CONNECTOR_TASK_ID, METRICS); + List> transformationStages = sourceConfig.transformationStages(); assertEquals(1, transformationStages.size()); TransformationStage xform = transformationStages.get(0); SourceRecord transformed = xform.apply(new SourceRecord(null, null, "topic", 0, null, null, Schema.INT8_SCHEMA, 42)); @@ -565,14 +563,14 @@ public void topicCreationWithTwoGroupsAndTwoTransformations() { assertTrue(sourceConfig.usesTopicCreation()); assertEquals(DEFAULT_REPLICATION_FACTOR, (short) sourceConfig.topicCreationReplicationFactor(DEFAULT_TOPIC_CREATION_GROUP)); assertEquals(partitions, (int) sourceConfig.topicCreationPartitions(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(List.of(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); - assertEquals(Map.of(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.singletonList(".*"), sourceConfig.topicCreationInclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyList(), sourceConfig.topicCreationExclude(DEFAULT_TOPIC_CREATION_GROUP)); + assertEquals(Collections.emptyMap(), sourceConfig.topicCreationOtherConfigs(DEFAULT_TOPIC_CREATION_GROUP)); // verify topic creation group is instantiated correctly Map groups = TopicCreationGroup.configuredGroups(sourceConfig); assertEquals(3, groups.size()); - assertEquals(Set.of(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP), groups.keySet()); + assertEquals(new HashSet<>(Arrays.asList(DEFAULT_TOPIC_CREATION_GROUP, FOO_GROUP, BAR_GROUP)), groups.keySet()); // verify topic creation TopicCreation topicCreation = TopicCreation.newTopicCreation(workerConfig, groups); @@ -596,7 +594,7 @@ public void topicCreationWithTwoGroupsAndTwoTransformations() { assertTrue(topicCreation.isTopicCreationRequired(FOO_TOPIC)); assertTrue(topicCreation.isTopicCreationRequired(BAR_TOPIC)); assertEquals(2, topicCreation.topicGroups().size()); - assertEquals(Set.of(FOO_GROUP, BAR_GROUP), topicCreation.topicGroups().keySet()); + assertEquals(new HashSet<>(Arrays.asList(FOO_GROUP, BAR_GROUP)), topicCreation.topicGroups().keySet()); assertEquals(fooGroup, topicCreation.findFirstGroup(FOO_TOPIC)); assertEquals(barGroup, topicCreation.findFirstGroup(BAR_TOPIC)); topicCreation.addTopic(FOO_TOPIC); @@ -610,7 +608,7 @@ public void topicCreationWithTwoGroupsAndTwoTransformations() { assertEquals(otherTopic, defaultTopicSpec.name()); assertEquals(DEFAULT_REPLICATION_FACTOR, defaultTopicSpec.replicationFactor()); assertEquals(partitions, defaultTopicSpec.numPartitions()); - assertEquals(Map.of(), defaultTopicSpec.configs()); + assertEquals(Collections.emptyMap(), defaultTopicSpec.configs()); NewTopic fooTopicSpec = topicCreation.findFirstGroup(FOO_TOPIC).newTopic(FOO_TOPIC); assertEquals(FOO_TOPIC, fooTopicSpec.name()); @@ -624,7 +622,7 @@ public void topicCreationWithTwoGroupsAndTwoTransformations() { assertEquals(barPartitions, barTopicSpec.numPartitions()); assertEquals(barTopicProps, barTopicSpec.configs()); - List> transformationStages = sourceConfig.transformationStages(MOCK_PLUGINS, CONNECTOR_TASK_ID, METRICS); + List> transformationStages = sourceConfig.transformationStages(); assertEquals(2, transformationStages.size()); TransformationStage castXForm = transformationStages.get(0); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java index c901361cb64b4..8dc22edb86309 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/ConnectAssertions.java @@ -25,6 +25,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Arrays; import java.util.Collection; import java.util.HashSet; import java.util.Map; @@ -146,7 +147,7 @@ protected Optional checkBrokersUp(int numBrokers, BiFunction topicNameSet = Set.of(topicNames); + Set topicNameSet = new HashSet<>(Arrays.asList(topicNames)); AtomicReference> existingTopics = new AtomicReference<>(topicNameSet); waitForCondition( () -> checkTopicsExist(topicNameSet, (actual, expected) -> { @@ -163,7 +164,7 @@ public void assertTopicsDoNotExist(String... topicNames) throws InterruptedExcep * @param topicNames the names of the topics that are expected to exist */ public void assertTopicsExist(String... topicNames) throws InterruptedException { - Set topicNameSet = Set.of(topicNames); + Set topicNameSet = new HashSet<>(Arrays.asList(topicNames)); AtomicReference> missingTopics = new AtomicReference<>(topicNameSet); waitForCondition( () -> checkTopicsExist(topicNameSet, (actual, expected) -> { diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java index ddd7eab4e8b3e..b576cda56a75d 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnect.java @@ -47,6 +47,7 @@ import java.io.IOException; import java.nio.charset.StandardCharsets; import java.util.Collection; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Map; @@ -281,7 +282,7 @@ public String configureConnector(CreateConnectorRequest createConnectorRequest) throw new ConnectException("Failed to serialize connector creation request: " + createConnectorRequest); } - Response response = requestPost(url, requestBody, Map.of()); + Response response = requestPost(url, requestBody, Collections.emptyMap()); if (response.getStatus() < Response.Status.BAD_REQUEST.getStatusCode()) { return responseToString(response); } else { @@ -448,7 +449,7 @@ public void resumeConnector(String connName) { */ public void restartConnector(String connName) { String url = endpointForResource(String.format("connectors/%s/restart", connName)); - Response response = requestPost(url, "", Map.of()); + Response response = requestPost(url, "", Collections.emptyMap()); if (response.getStatus() >= Response.Status.BAD_REQUEST.getStatusCode()) { throw new ConnectRestException(response.getStatus(), "Could not execute POST request. Error response: " + responseToString(response)); @@ -465,7 +466,7 @@ public void restartConnector(String connName) { */ public void restartTask(String connName, int taskNum) { String url = endpointForResource(String.format("connectors/%s/tasks/%d/restart", connName, taskNum)); - Response response = requestPost(url, "", Map.of()); + Response response = requestPost(url, "", Collections.emptyMap()); if (response.getStatus() >= Response.Status.BAD_REQUEST.getStatusCode()) { throw new ConnectRestException(response.getStatus(), "Could not execute POST request. Error response: " + responseToString(response)); @@ -491,10 +492,10 @@ public ConnectorStateInfo restartConnectorAndTasks(String connName, boolean only } else { restartEndpoint = endpointForResource(restartPath); } - Response response = requestPost(restartEndpoint, "", Map.of()); + Response response = requestPost(restartEndpoint, "", Collections.emptyMap()); try { if (response.getStatus() < Response.Status.BAD_REQUEST.getStatusCode()) { - //only the 202 status returns a body + //only the 202 stauts returns a body if (response.getStatus() == Response.Status.ACCEPTED.getStatusCode()) { return mapper.readerFor(ConnectorStateInfo.class) .readValue(responseToString(response)); @@ -576,7 +577,7 @@ public ActiveTopicsInfo connectorTopics(String connectorName) { .readerFor(new TypeReference>>>() { }) .readValue(responseToString(response)); return new ActiveTopicsInfo(connectorName, - activeTopics.get(connectorName).getOrDefault("topics", List.of())); + activeTopics.get(connectorName).getOrDefault("topics", Collections.emptyList())); } } catch (IOException e) { log.error("Could not read connector state from response: {}", @@ -687,7 +688,7 @@ public ConnectorOffsets connectorOffsets(String connectorName) { public String alterSourceConnectorOffset(String connectorName, Map partition, Map offset) { return alterConnectorOffsets( connectorName, - new ConnectorOffsets(List.of(new ConnectorOffset(partition, offset))) + new ConnectorOffsets(Collections.singletonList(new ConnectorOffset(partition, offset))) ); } @@ -704,7 +705,7 @@ public String alterSourceConnectorOffset(String connectorName, Map pa public String alterSinkConnectorOffset(String connectorName, TopicPartition topicPartition, Long offset) { return alterConnectorOffsets( connectorName, - SinkUtils.consumerGroupOffsetsToConnectorOffsets(Map.of(topicPartition, new OffsetAndMetadata(offset))) + SinkUtils.consumerGroupOffsetsToConnectorOffsets(Collections.singletonMap(topicPartition, new OffsetAndMetadata(offset))) ); } @@ -928,7 +929,7 @@ public EmbeddedKafkaCluster kafka() { * @throws ConnectException if execution of the GET request fails */ public Response requestGet(String url) { - return requestHttpMethod(url, null, Map.of(), "GET"); + return requestHttpMethod(url, null, Collections.emptyMap(), "GET"); } /** @@ -940,7 +941,7 @@ public Response requestGet(String url) { * @throws ConnectException if execution of the PUT request fails */ public Response requestPut(String url, String body) { - return requestHttpMethod(url, body, Map.of(), "PUT"); + return requestHttpMethod(url, body, Collections.emptyMap(), "PUT"); } /** @@ -965,7 +966,7 @@ public Response requestPost(String url, String body, Map headers * @throws ConnectException if execution of the PATCH request fails */ public Response requestPatch(String url, String body) { - return requestHttpMethod(url, body, Map.of(), "PATCH"); + return requestHttpMethod(url, body, Collections.emptyMap(), "PATCH"); } /** @@ -976,7 +977,7 @@ public Response requestPatch(String url, String body) { * @throws ConnectException if execution of the DELETE request fails */ public Response requestDelete(String url) { - return requestHttpMethod(url, null, Map.of(), "DELETE"); + return requestHttpMethod(url, null, Collections.emptyMap(), "DELETE"); } /** @@ -1009,7 +1010,7 @@ protected Response requestHttpMethod(String url, String body, Map workers() { return new LinkedHashSet<>(connectCluster); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java index 230d293b51b6d..5678b97bb1314 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedConnectStandalone.java @@ -18,7 +18,6 @@ import org.apache.kafka.connect.cli.ConnectStandalone; import org.apache.kafka.connect.runtime.Connect; -import org.apache.kafka.connect.runtime.ConnectMetrics; import org.apache.kafka.connect.runtime.standalone.StandaloneHerder; import org.apache.kafka.test.TestUtils; @@ -31,6 +30,7 @@ import java.io.UncheckedIOException; import java.nio.file.Files; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -62,7 +62,6 @@ public class EmbeddedConnectStandalone extends EmbeddedConnect { private final String offsetsFile; private volatile WorkerHandle connectWorker; - private Connect connect; private EmbeddedConnectStandalone( int numBrokers, @@ -93,7 +92,7 @@ public void startConnect() { workerProps.putIfAbsent(PLUGIN_DISCOVERY_CONFIG, "hybrid_fail"); ConnectStandalone cli = new ConnectStandalone(); - connect = cli.startConnect(workerProps); + Connect connect = cli.startConnect(workerProps); connectWorker = new WorkerHandle("standalone", connect); cli.processExtraArgs(connect, connectorConfigFiles()); } @@ -108,8 +107,8 @@ public String toString() { @Override protected Set workers() { return connectWorker != null - ? Set.of(connectWorker) - : Set.of(); + ? Collections.singleton(connectWorker) + : Collections.emptySet(); } public Response healthCheck() { @@ -138,10 +137,6 @@ private String[] connectorConfigFiles() { return result; } - public ConnectMetrics connectMetrics() { - return connect.herder().connectMetrics(); - } - public static class Builder extends EmbeddedConnectBuilder { private final List> connectorConfigs = new ArrayList<>(); diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java index 7913d60fc2837..5d075ab75e0de 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/EmbeddedKafkaCluster.java @@ -63,7 +63,9 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -110,7 +112,7 @@ public class EmbeddedKafkaCluster { private KafkaProducer producer; public EmbeddedKafkaCluster(final int numBrokers, final Properties brokerConfig) { - this(numBrokers, brokerConfig, Map.of()); + this(numBrokers, brokerConfig, Collections.emptyMap()); } public EmbeddedKafkaCluster(final int numBrokers, @@ -179,7 +181,7 @@ public void start() { */ public void verifyClusterReadiness() { String consumerGroupId = UUID.randomUUID().toString(); - Map consumerConfig = Map.of(GROUP_ID_CONFIG, consumerGroupId); + Map consumerConfig = Collections.singletonMap(GROUP_ID_CONFIG, consumerGroupId); String topic = "consumer-warmup-" + consumerGroupId; try { @@ -203,8 +205,8 @@ public void verifyClusterReadiness() { } try (Admin admin = createAdminClient()) { - admin.deleteConsumerGroups(Set.of(consumerGroupId)).all().get(30, TimeUnit.SECONDS); - admin.deleteTopics(Set.of(topic)).all().get(30, TimeUnit.SECONDS); + admin.deleteConsumerGroups(Collections.singleton(consumerGroupId)).all().get(30, TimeUnit.SECONDS); + admin.deleteTopics(Collections.singleton(topic)).all().get(30, TimeUnit.SECONDS); } catch (InterruptedException | ExecutionException | TimeoutException e) { throw new AssertionError("Failed to clean up cluster health check resource(s)", e); } @@ -280,7 +282,7 @@ public boolean sslEnabled() { * @return the map of optional {@link TopicDescription} keyed by the topic name */ public Map> describeTopics(String... topicNames) { - return describeTopics(Set.of(topicNames)); + return describeTopics(new HashSet<>(Arrays.asList(topicNames))); } /** @@ -353,7 +355,7 @@ public void createTopic(String topic) { * @param topic The name of the topic. */ public void createTopic(String topic, int partitions) { - createTopic(topic, partitions, 1, Map.of()); + createTopic(topic, partitions, 1, Collections.emptyMap()); } /** @@ -362,7 +364,7 @@ public void createTopic(String topic, int partitions) { * @param topic The name of the topic. */ public void createTopic(String topic, int partitions, int replication, Map topicConfig) { - createTopic(topic, partitions, replication, topicConfig, Map.of()); + createTopic(topic, partitions, replication, topicConfig, Collections.emptyMap()); } /** @@ -386,7 +388,7 @@ public void createTopic(String topic, int partitions, int replication, Map adminClientConfig) { } public Admin createAdminClient() { - return createAdminClient(Map.of()); + return createAdminClient(Collections.emptyMap()); } /** @@ -447,7 +449,7 @@ public Admin createAdminClient() { * @return a {@link ConsumerRecords} collection containing at least n records. */ public ConsumerRecords consume(int n, long maxDuration, String... topics) { - return consume(n, maxDuration, Map.of(), topics); + return consume(n, maxDuration, Collections.emptyMap(), topics); } /** @@ -523,10 +525,10 @@ public ConsumerRecords consumeAll( long remainingTimeMs; Set topicPartitions; Map endOffsets; - try (Admin admin = createAdminClient(adminProps != null ? adminProps : Map.of())) { + try (Admin admin = createAdminClient(adminProps != null ? adminProps : Collections.emptyMap())) { remainingTimeMs = endTimeMs - System.currentTimeMillis(); - topicPartitions = listPartitions(remainingTimeMs, admin, List.of(topics)); + topicPartitions = listPartitions(remainingTimeMs, admin, Arrays.asList(topics)); remainingTimeMs = endTimeMs - System.currentTimeMillis(); endOffsets = readEndOffsets(remainingTimeMs, admin, topicPartitions); @@ -538,7 +540,7 @@ public ConsumerRecords consumeAll( tp -> new ArrayList<>() )); Map nextOffsets = new HashMap<>(); - try (Consumer consumer = createConsumer(consumerProps != null ? consumerProps : Map.of())) { + try (Consumer consumer = createConsumer(consumerProps != null ? consumerProps : Collections.emptyMap())) { consumer.assign(topicPartitions); while (!endOffsets.isEmpty()) { @@ -554,7 +556,7 @@ public ConsumerRecords consumeAll( } else { remainingTimeMs = endTimeMs - System.currentTimeMillis(); if (remainingTimeMs <= 0) { - throw new AssertionError("failed to read to end of topic(s) " + List.of(topics) + " within " + maxDurationMs + "ms"); + throw new AssertionError("failed to read to end of topic(s) " + Arrays.asList(topics) + " within " + maxDurationMs + "ms"); } // We haven't reached the end offset yet; need to keep polling ConsumerRecords recordBatch = consumer.poll(Duration.ofMillis(remainingTimeMs)); @@ -572,7 +574,7 @@ public ConsumerRecords consumeAll( public long endOffset(TopicPartition topicPartition) throws TimeoutException, InterruptedException, ExecutionException { try (Admin admin = createAdminClient()) { - Map offsets = Map.of( + Map offsets = Collections.singletonMap( topicPartition, OffsetSpec.latest() ); return admin.listOffsets(offsets) @@ -661,9 +663,9 @@ public KafkaConsumer createConsumerAndSubscribeTo(Map createConsumerAndSubscribeTo(Map consumerProps, ConsumerRebalanceListener rebalanceListener, String... topics) { KafkaConsumer consumer = createConsumer(consumerProps); if (rebalanceListener != null) { - consumer.subscribe(List.of(topics), rebalanceListener); + consumer.subscribe(Arrays.asList(topics), rebalanceListener); } else { - consumer.subscribe(List.of(topics)); + consumer.subscribe(Arrays.asList(topics)); } return consumer; } diff --git a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/WorkerHandle.java b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/WorkerHandle.java index a3f67cd06bbd8..ad9c2917bbd3f 100644 --- a/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/WorkerHandle.java +++ b/connect/runtime/src/test/java/org/apache/kafka/connect/util/clusters/WorkerHandle.java @@ -114,9 +114,10 @@ public boolean equals(Object o) { if (this == o) { return true; } - if (!(o instanceof WorkerHandle that)) { + if (!(o instanceof WorkerHandle)) { return false; } + WorkerHandle that = (WorkerHandle) o; return Objects.equals(workerName, that.workerName) && Objects.equals(worker, that.worker); } diff --git a/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.sink.SinkConnector b/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.sink.SinkConnector index 818d09e618717..56e054ddbeb9a 100644 --- a/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.sink.SinkConnector +++ b/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.sink.SinkConnector @@ -17,6 +17,5 @@ org.apache.kafka.connect.integration.BlockingConnectorTest$BlockingSinkConnector org.apache.kafka.connect.integration.BlockingConnectorTest$TaskInitializeBlockingSinkConnector org.apache.kafka.connect.integration.ErrantRecordSinkConnector org.apache.kafka.connect.integration.MonitorableSinkConnector -org.apache.kafka.connect.integration.TestableSinkConnector org.apache.kafka.connect.runtime.SampleSinkConnector org.apache.kafka.connect.integration.ConnectWorkerIntegrationTest$EmptyTaskConfigsConnector \ No newline at end of file diff --git a/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector b/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector index 8ff259f8878eb..73033ca23c02d 100644 --- a/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector +++ b/connect/runtime/src/test/resources/META-INF/services/org.apache.kafka.connect.source.SourceConnector @@ -21,6 +21,5 @@ org.apache.kafka.connect.integration.BlockingConnectorTest$BlockingSourceConnect org.apache.kafka.connect.integration.BlockingConnectorTest$TaskInitializeBlockingSourceConnector org.apache.kafka.connect.integration.ExactlyOnceSourceIntegrationTest$NaughtyConnector org.apache.kafka.connect.integration.MonitorableSourceConnector -org.apache.kafka.connect.integration.TestableSourceConnector org.apache.kafka.connect.runtime.SampleSourceConnector org.apache.kafka.connect.runtime.rest.resources.ConnectorPluginsResourceTest$ConnectorPluginsResourceTestConnector diff --git a/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v1/test/plugins/ReadVersionFromResource.java b/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v1/test/plugins/ReadVersionFromResource.java index f68b4eb4e581a..9f3de801f16c8 100644 --- a/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v1/test/plugins/ReadVersionFromResource.java +++ b/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v1/test/plugins/ReadVersionFromResource.java @@ -49,9 +49,9 @@ public void configure(final Map configs, final boolean isKey) { private String version(InputStream stream) throws IOException { try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) { return reader.lines() - .filter(s -> !s.isEmpty() && !s.startsWith("#")) - .findFirst() - .get(); + .filter(s -> !s.isEmpty() && !s.startsWith("#")) + .collect(Collectors.toList()) + .get(0); } } diff --git a/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v2/test/plugins/ReadVersionFromResource.java b/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v2/test/plugins/ReadVersionFromResource.java index 863ed9fad97dc..caeb4340d6e60 100644 --- a/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v2/test/plugins/ReadVersionFromResource.java +++ b/connect/runtime/src/test/resources/test-plugins/read-version-from-resource-v2/test/plugins/ReadVersionFromResource.java @@ -49,9 +49,9 @@ public void configure(final Map configs, final boolean isKey) { private String version(InputStream stream) throws IOException { try (BufferedReader reader = new BufferedReader(new InputStreamReader(stream))) { return reader.lines() - .filter(s -> !s.isEmpty() && !s.startsWith("#")) - .findFirst() - .get(); + .filter(s -> !s.isEmpty() && !s.startsWith("#")) + .collect(Collectors.toList()) + .get(0); } } diff --git a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockConnector.java b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockConnector.java index f598feede8c1d..267466a4b0b52 100644 --- a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockConnector.java +++ b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockConnector.java @@ -24,6 +24,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.Executors; @@ -90,7 +91,7 @@ public Class taskClass() { @Override public List> taskConfigs(int maxTasks) { log.debug("Creating single task for MockConnector"); - return List.of(config); + return Collections.singletonList(config); } @Override diff --git a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockSourceTask.java b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockSourceTask.java index 49dc5e8a7e694..f69c58b99ab4c 100644 --- a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockSourceTask.java +++ b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/MockSourceTask.java @@ -23,6 +23,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -66,7 +67,7 @@ public List poll() { throw new RuntimeException(); } } - return List.of(); + return Collections.emptyList(); } @Override diff --git a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/SchemaSourceTask.java b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/SchemaSourceTask.java index d79c133f67383..c40e0932e5317 100644 --- a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/SchemaSourceTask.java +++ b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/SchemaSourceTask.java @@ -27,6 +27,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.List; import java.util.Map; @@ -103,7 +104,7 @@ public void start(Map props) { } throttler = new ThroughputThrottler(throughput, System.currentTimeMillis()); - partition = Map.of(ID_FIELD, id); + partition = Collections.singletonMap(ID_FIELD, id); Map previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) { seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; @@ -123,7 +124,7 @@ public List poll() { throttler.throttle(); } - Map ccOffset = Map.of(SEQNO_FIELD, seqno); + Map ccOffset = Collections.singletonMap(SEQNO_FIELD, seqno); int partitionVal = (int) (seqno % partitionCount); final Struct data; final SourceRecord srcRecord; @@ -157,10 +158,10 @@ public List poll() { System.out.println("{\"task\": " + id + ", \"seqno\": " + seqno + "}"); seqno++; count++; - return List.of(srcRecord); + return Collections.singletonList(srcRecord); } else { throttler.throttle(); - return List.of(); + return Collections.emptyList(); } } diff --git a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/VerifiableSourceTask.java b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/VerifiableSourceTask.java index 1fe2bd318023c..49151b40d1ebb 100644 --- a/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/VerifiableSourceTask.java +++ b/connect/test-plugins/src/main/java/org/apache/kafka/connect/tools/VerifiableSourceTask.java @@ -31,6 +31,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -93,7 +94,7 @@ public void start(Map props) { throw new ConnectException("Invalid VerifiableSourceTask configuration", e); } - partition = Map.of(ID_FIELD, id); + partition = Collections.singletonMap(ID_FIELD, id); Map previousOffset = this.context.offsetStorageReader().offset(partition); if (previousOffset != null) seqno = (Long) previousOffset.get(SEQNO_FIELD) + 1; @@ -128,11 +129,11 @@ public List poll() { } System.out.println(dataJson); - Map ccOffset = Map.of(SEQNO_FIELD, seqno); + Map ccOffset = Collections.singletonMap(SEQNO_FIELD, seqno); Schema valueSchema = completeRecordData ? COMPLETE_VALUE_SCHEMA : Schema.INT64_SCHEMA; Object value = completeRecordData ? completeValue(data) : seqno; SourceRecord srcRecord = new SourceRecord(partition, ccOffset, topic, Schema.INT32_SCHEMA, id, valueSchema, value); - List result = List.of(srcRecord); + List result = Collections.singletonList(srcRecord); seqno++; return result; } diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java index 7c13ef4d785de..ffc0c8b8b715e 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/Cast.java @@ -231,26 +231,38 @@ private Schema getOrBuildSchema(Schema valueSchema) { } private SchemaBuilder convertFieldType(Schema.Type type) { - return switch (type) { - case INT8 -> SchemaBuilder.int8(); - case INT16 -> SchemaBuilder.int16(); - case INT32 -> SchemaBuilder.int32(); - case INT64 -> SchemaBuilder.int64(); - case FLOAT32 -> SchemaBuilder.float32(); - case FLOAT64 -> SchemaBuilder.float64(); - case BOOLEAN -> SchemaBuilder.bool(); - case STRING -> SchemaBuilder.string(); - default -> throw new DataException("Unexpected type in Cast transformation: " + type); - }; + switch (type) { + case INT8: + return SchemaBuilder.int8(); + case INT16: + return SchemaBuilder.int16(); + case INT32: + return SchemaBuilder.int32(); + case INT64: + return SchemaBuilder.int64(); + case FLOAT32: + return SchemaBuilder.float32(); + case FLOAT64: + return SchemaBuilder.float64(); + case BOOLEAN: + return SchemaBuilder.bool(); + case STRING: + return SchemaBuilder.string(); + default: + throw new DataException("Unexpected type in Cast transformation: " + type); + } } private static Object encodeLogicalType(Schema schema, Object value) { - return switch (schema.name()) { - case Date.LOGICAL_NAME -> Date.fromLogical(schema, (java.util.Date) value); - case Time.LOGICAL_NAME -> Time.fromLogical(schema, (java.util.Date) value); - case Timestamp.LOGICAL_NAME -> Timestamp.fromLogical(schema, (java.util.Date) value); - default -> value; - }; + switch (schema.name()) { + case Date.LOGICAL_NAME: + return Date.fromLogical(schema, (java.util.Date) value); + case Time.LOGICAL_NAME: + return Time.fromLogical(schema, (java.util.Date) value); + case Timestamp.LOGICAL_NAME: + return Timestamp.fromLogical(schema, (java.util.Date) value); + } + return value; } private static Object castValueToType(Schema schema, Object value, Schema.Type targetType) { @@ -271,17 +283,26 @@ private static Object castValueToType(Schema schema, Object value, Schema.Type t value = encodeLogicalType(schema, value); } - return switch (targetType) { - case INT8 -> castToInt8(value); - case INT16 -> castToInt16(value); - case INT32 -> castToInt32(value); - case INT64 -> castToInt64(value); - case FLOAT32 -> castToFloat32(value); - case FLOAT64 -> castToFloat64(value); - case BOOLEAN -> castToBoolean(value); - case STRING -> castToString(value); - default -> throw new DataException(targetType + " is not supported in the Cast transformation."); - }; + switch (targetType) { + case INT8: + return castToInt8(value); + case INT16: + return castToInt16(value); + case INT32: + return castToInt32(value); + case INT64: + return castToInt64(value); + case FLOAT32: + return castToFloat32(value); + case FLOAT64: + return castToFloat64(value); + case BOOLEAN: + return castToBoolean(value); + case STRING: + return castToString(value); + default: + throw new DataException(targetType + " is not supported in the Cast transformation."); + } } catch (NumberFormatException e) { throw new DataException("Value (" + value.toString() + ") was out of range for requested data type", e); } @@ -365,11 +386,14 @@ else if (value instanceof String) } private static String castToString(Object value) { - if (value instanceof java.util.Date dateValue) { + if (value instanceof java.util.Date) { + java.util.Date dateValue = (java.util.Date) value; return Values.dateFormatFor(dateValue).format(dateValue); - } else if (value instanceof ByteBuffer byteBuffer) { + } else if (value instanceof ByteBuffer) { + ByteBuffer byteBuffer = (ByteBuffer) value; return Base64.getEncoder().encodeToString(Utils.readBytes(byteBuffer)); - } else if (value instanceof byte[] rawBytes) { + } else if (value instanceof byte[]) { + byte[] rawBytes = (byte[]) value; return Base64.getEncoder().encodeToString(rawBytes); } else { return value.toString(); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/DropHeaders.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/DropHeaders.java index cd87c33a5095e..c1d20a48c1d29 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/DropHeaders.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/DropHeaders.java @@ -23,6 +23,7 @@ import org.apache.kafka.connect.header.ConnectHeaders; import org.apache.kafka.connect.header.Header; import org.apache.kafka.connect.header.Headers; +import org.apache.kafka.connect.transforms.util.NonEmptyListValidator; import org.apache.kafka.connect.transforms.util.SimpleConfig; import java.util.HashSet; @@ -40,8 +41,7 @@ public class DropHeaders> implements Transformation MOVE; - case COPY_OPERATION -> COPY; - default -> throw new IllegalArgumentException(); - }; + switch (name) { + case MOVE_OPERATION: + return MOVE; + case COPY_OPERATION: + return COPY; + default: + throw new IllegalArgumentException(); + } } public String toString() { diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/MaskField.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/MaskField.java index 7d37d548eb486..c3a45d9170e59 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/MaskField.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/MaskField.java @@ -25,6 +25,7 @@ import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.data.Values; import org.apache.kafka.connect.errors.DataException; +import org.apache.kafka.connect.transforms.util.NonEmptyListValidator; import org.apache.kafka.connect.transforms.util.SimpleConfig; import java.math.BigDecimal; @@ -54,8 +55,7 @@ public abstract class MaskField> implements Transform private static final String REPLACE_NULL_WITH_DEFAULT_CONFIG = "replace.null.with.default"; public static final ConfigDef CONFIG_DEF = new ConfigDef() - .define(FIELDS_CONFIG, ConfigDef.Type.LIST, ConfigDef.NO_DEFAULT_VALUE, - ConfigDef.ValidList.anyNonDuplicateValues(false, false), + .define(FIELDS_CONFIG, ConfigDef.Type.LIST, ConfigDef.NO_DEFAULT_VALUE, new NonEmptyListValidator(), ConfigDef.Importance.HIGH, "Names of fields to mask.") .define(REPLACEMENT_CONFIG, ConfigDef.Type.STRING, null, new ConfigDef.NonEmptyString(), ConfigDef.Importance.LOW, "Custom value replacement, that will be applied to all" @@ -65,30 +65,32 @@ public abstract class MaskField> implements Transform private static final String PURPOSE = "mask fields"; - private static final Map, Function> REPLACEMENT_MAPPING_FUNC = Map.of( - Byte.class, v -> Values.convertToByte(null, v), - Short.class, v -> Values.convertToShort(null, v), - Integer.class, v -> Values.convertToInteger(null, v), - Long.class, v -> Values.convertToLong(null, v), - Float.class, v -> Values.convertToFloat(null, v), - Double.class, v -> Values.convertToDouble(null, v), - String.class, Function.identity(), - BigDecimal.class, BigDecimal::new, - BigInteger.class, BigInteger::new - ); - private static final Map, Object> PRIMITIVE_VALUE_MAPPING = Map.ofEntries( - Map.entry(Boolean.class, Boolean.FALSE), - Map.entry(Byte.class, (byte) 0), - Map.entry(Short.class, (short) 0), - Map.entry(Integer.class, 0), - Map.entry(Long.class, 0L), - Map.entry(Float.class, 0f), - Map.entry(Double.class, 0d), - Map.entry(BigInteger.class, BigInteger.ZERO), - Map.entry(BigDecimal.class, BigDecimal.ZERO), - Map.entry(Date.class, new Date(0)), - Map.entry(String.class, "") - ); + private static final Map, Function> REPLACEMENT_MAPPING_FUNC = new HashMap<>(); + private static final Map, Object> PRIMITIVE_VALUE_MAPPING = new HashMap<>(); + + static { + PRIMITIVE_VALUE_MAPPING.put(Boolean.class, Boolean.FALSE); + PRIMITIVE_VALUE_MAPPING.put(Byte.class, (byte) 0); + PRIMITIVE_VALUE_MAPPING.put(Short.class, (short) 0); + PRIMITIVE_VALUE_MAPPING.put(Integer.class, 0); + PRIMITIVE_VALUE_MAPPING.put(Long.class, 0L); + PRIMITIVE_VALUE_MAPPING.put(Float.class, 0f); + PRIMITIVE_VALUE_MAPPING.put(Double.class, 0d); + PRIMITIVE_VALUE_MAPPING.put(BigInteger.class, BigInteger.ZERO); + PRIMITIVE_VALUE_MAPPING.put(BigDecimal.class, BigDecimal.ZERO); + PRIMITIVE_VALUE_MAPPING.put(Date.class, new Date(0)); + PRIMITIVE_VALUE_MAPPING.put(String.class, ""); + + REPLACEMENT_MAPPING_FUNC.put(Byte.class, v -> Values.convertToByte(null, v)); + REPLACEMENT_MAPPING_FUNC.put(Short.class, v -> Values.convertToShort(null, v)); + REPLACEMENT_MAPPING_FUNC.put(Integer.class, v -> Values.convertToInteger(null, v)); + REPLACEMENT_MAPPING_FUNC.put(Long.class, v -> Values.convertToLong(null, v)); + REPLACEMENT_MAPPING_FUNC.put(Float.class, v -> Values.convertToFloat(null, v)); + REPLACEMENT_MAPPING_FUNC.put(Double.class, v -> Values.convertToDouble(null, v)); + REPLACEMENT_MAPPING_FUNC.put(String.class, Function.identity()); + REPLACEMENT_MAPPING_FUNC.put(BigDecimal.class, BigDecimal::new); + REPLACEMENT_MAPPING_FUNC.put(BigInteger.class, BigInteger::new); + } private Set maskedFields; private String replacement; diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ReplaceField.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ReplaceField.java index 7e8f6700bf634..38d27e8a818f8 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ReplaceField.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ReplaceField.java @@ -31,6 +31,7 @@ import org.apache.kafka.connect.transforms.util.SchemaUtil; import org.apache.kafka.connect.transforms.util.SimpleConfig; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -55,19 +56,11 @@ interface ConfigName { } public static final ConfigDef CONFIG_DEF = new ConfigDef() - .define(ConfigName.EXCLUDE, - ConfigDef.Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), - ConfigDef.Importance.MEDIUM, + .define(ConfigName.EXCLUDE, ConfigDef.Type.LIST, Collections.emptyList(), ConfigDef.Importance.MEDIUM, "Fields to exclude. This takes precedence over the fields to include.") - .define(ConfigName.INCLUDE, - ConfigDef.Type.LIST, - List.of(), - ConfigDef.ValidList.anyNonDuplicateValues(true, false), - ConfigDef.Importance.MEDIUM, + .define(ConfigName.INCLUDE, ConfigDef.Type.LIST, Collections.emptyList(), ConfigDef.Importance.MEDIUM, "Fields to include. If specified, only these fields will be used.") - .define(ConfigName.RENAMES, ConfigDef.Type.LIST, List.of(), + .define(ConfigName.RENAMES, ConfigDef.Type.LIST, Collections.emptyList(), ConfigDef.LambdaValidator.with( (name, value) -> { @SuppressWarnings("unchecked") diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java index 4a94dd1cddc5f..86ec11f5fd485 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/SetSchemaMetadata.java @@ -179,7 +179,8 @@ protected R newRecord(R record, Schema updatedSchema) { * a copy of the key or value object with updated references to the new schema. */ protected Object updateSchemaIn(Object keyOrValue, Schema updatedSchema) { - if (keyOrValue instanceof Struct origStruct) { + if (keyOrValue instanceof Struct) { + Struct origStruct = (Struct) keyOrValue; Struct newStruct = new Struct(updatedSchema); for (Field field : updatedSchema.fields()) { // assume both schemas have exact same fields with same names and schemas ... diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java index 940bb6045a9dd..957cc3e1fe316 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/TimestampConverter.java @@ -164,17 +164,20 @@ public String toType(Config config, Date orig) { TRANSLATORS.put(TYPE_UNIX, new TimestampTranslator() { @Override public Date toRaw(Config config, Object orig) { - if (!(orig instanceof Long unixTime)) + if (!(orig instanceof Long)) throw new DataException("Expected Unix timestamp to be a Long, but found " + orig.getClass()); - return switch (config.unixPrecision) { - case UNIX_PRECISION_SECONDS -> - Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.SECONDS.toMillis(unixTime)); - case UNIX_PRECISION_MICROS -> - Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.MICROSECONDS.toMillis(unixTime)); - case UNIX_PRECISION_NANOS -> - Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.NANOSECONDS.toMillis(unixTime)); - default -> Timestamp.toLogical(Timestamp.SCHEMA, unixTime); - }; + Long unixTime = (Long) orig; + switch (config.unixPrecision) { + case UNIX_PRECISION_SECONDS: + return Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.SECONDS.toMillis(unixTime)); + case UNIX_PRECISION_MICROS: + return Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.MICROSECONDS.toMillis(unixTime)); + case UNIX_PRECISION_NANOS: + return Timestamp.toLogical(Timestamp.SCHEMA, TimeUnit.NANOSECONDS.toMillis(unixTime)); + case UNIX_PRECISION_MILLIS: + default: + return Timestamp.toLogical(Timestamp.SCHEMA, unixTime); + } } @Override @@ -184,13 +187,18 @@ public Schema typeSchema(boolean isOptional) { @Override public Long toType(Config config, Date orig) { - long unixTimeMillis = Timestamp.fromLogical(Timestamp.SCHEMA, orig); - return switch (config.unixPrecision) { - case UNIX_PRECISION_SECONDS -> TimeUnit.MILLISECONDS.toSeconds(unixTimeMillis); - case UNIX_PRECISION_MICROS -> TimeUnit.MILLISECONDS.toMicros(unixTimeMillis); - case UNIX_PRECISION_NANOS -> TimeUnit.MILLISECONDS.toNanos(unixTimeMillis); - default -> unixTimeMillis; - }; + Long unixTimeMillis = Timestamp.fromLogical(Timestamp.SCHEMA, orig); + switch (config.unixPrecision) { + case UNIX_PRECISION_SECONDS: + return TimeUnit.MILLISECONDS.toSeconds(unixTimeMillis); + case UNIX_PRECISION_MICROS: + return TimeUnit.MILLISECONDS.toMicros(unixTimeMillis); + case UNIX_PRECISION_NANOS: + return TimeUnit.MILLISECONDS.toNanos(unixTimeMillis); + case UNIX_PRECISION_MILLIS: + default: + return unixTimeMillis; + } } }); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ValueToKey.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ValueToKey.java index 19c299e6867e6..24cdec2249ab1 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ValueToKey.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/ValueToKey.java @@ -28,6 +28,7 @@ import org.apache.kafka.connect.data.SchemaBuilder; import org.apache.kafka.connect.data.Struct; import org.apache.kafka.connect.errors.DataException; +import org.apache.kafka.connect.transforms.util.NonEmptyListValidator; import org.apache.kafka.connect.transforms.util.SimpleConfig; import java.util.HashMap; @@ -45,7 +46,7 @@ public class ValueToKey> implements Transformation public static final String REPLACE_NULL_WITH_DEFAULT_CONFIG = "replace.null.with.default"; public static final ConfigDef CONFIG_DEF = new ConfigDef() - .define(FIELDS_CONFIG, ConfigDef.Type.LIST, ConfigDef.NO_DEFAULT_VALUE, ConfigDef.ValidList.anyNonDuplicateValues(false, false), ConfigDef.Importance.HIGH, + .define(FIELDS_CONFIG, ConfigDef.Type.LIST, ConfigDef.NO_DEFAULT_VALUE, new NonEmptyListValidator(), ConfigDef.Importance.HIGH, "Field names on the record value to extract as the record key.") .define(REPLACE_NULL_WITH_DEFAULT_CONFIG, ConfigDef.Type.BOOLEAN, true, ConfigDef.Importance.MEDIUM, "Whether to replace fields that have a default value and that are null to the default value. When set to true, the default value is used, otherwise null is used."); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersion.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersion.java index 4b585d663b8dc..514ebb425ebe6 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersion.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersion.java @@ -44,13 +44,13 @@ public enum FieldSyntaxVersion { public static final String FIELD_SYNTAX_VERSION_CONFIG = "field.syntax.version"; public static final String FIELD_SYNTAX_VERSION_DOC = "Defines the version of the syntax to access fields. " - + "If set to V1, then the field paths are limited to access the elements at the root level of the struct or map. " - + "If set to V2, the syntax will support accessing nested elements. " + + "If set to `V1`, then the field paths are limited to access the elements at the root level of the struct or map. " + + "If set to `V2`, the syntax will support accessing nested elements. " + "To access nested elements, dotted notation is used. " + "If dots are already included in the field name, " + "then backtick pairs can be used to wrap field names containing dots. " - + "E.g. to access the subfield baz from a field named \"foo.bar\" in a struct/map " - + "the following format can be used to access its elements: \"foo.bar.baz\"."; + + "E.g. to access the subfield `baz` from a field named \"foo.bar\" in a struct/map " + + "the following format can be used to access its elements: \"`foo.bar`.baz\"."; public static final String FIELD_SYNTAX_VERSION_DEFAULT_VALUE = V1.name(); diff --git a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/SingleFieldPath.java b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/SingleFieldPath.java index 6016707d36764..326a844025d63 100644 --- a/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/SingleFieldPath.java +++ b/connect/transforms/src/main/java/org/apache/kafka/connect/transforms/field/SingleFieldPath.java @@ -22,6 +22,7 @@ import org.apache.kafka.connect.data.Struct; import java.util.ArrayList; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.Objects; @@ -52,7 +53,7 @@ public SingleFieldPath(String pathText, FieldSyntaxVersion version) { this.version = version; switch (version) { case V1: // backward compatibility - this.steps = List.of(pathText); + this.steps = Collections.singletonList(pathText); break; case V2: this.steps = buildFieldPathV2(pathText); @@ -133,7 +134,7 @@ private static List buildFieldPathV2(String path) { // add last step if last char is a dot if (!path.isEmpty() && path.charAt(path.length() - 1) == DOT) steps.add(""); - return List.copyOf(steps); + return Collections.unmodifiableList(steps); } private static void failWhenIncompleteBacktickPair(String path, int backtickAt) { diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/CastTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/CastTest.java index 1a470095a417d..e79e163b46394 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/CastTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/CastTest.java @@ -39,6 +39,7 @@ import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; @@ -71,38 +72,38 @@ public void teardown() { @Test public void testConfigEmpty() { - assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, ""))); + assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, ""))); } @Test public void testConfigInvalidSchemaType() { - assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:faketype"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:faketype"))); } @Test public void testConfigInvalidTargetType() { - assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:array"))); - assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "array"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:array"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "array"))); } @Test public void testUnsupportedTargetType() { - assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:bytes"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:bytes"))); } @Test public void testConfigInvalidMap() { - assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:int8:extra"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int8:extra"))); } @Test public void testConfigMixWholeAndFieldTransformation() { - assertThrows(ConfigException.class, () -> xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:int8,int32"))); + assertThrows(ConfigException.class, () -> xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int8,int32"))); } @Test public void castNullValueRecordWithSchema() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "foo:int64")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64")); SourceRecord original = new SourceRecord(null, null, "topic", 0, Schema.STRING_SCHEMA, "key", Schema.STRING_SCHEMA, null); SourceRecord transformed = xformValue.apply(original); @@ -128,7 +129,7 @@ public void castFieldWithDefaultValueRecordWithSchema(boolean replaceNullWithDef @Test public void castNullValueRecordSchemaless() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "foo:int64")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64")); SourceRecord original = new SourceRecord(null, null, "topic", 0, Schema.STRING_SCHEMA, "key", null, null); SourceRecord transformed = xformValue.apply(original); @@ -137,7 +138,7 @@ public void castNullValueRecordSchemaless() { @Test public void castNullKeyRecordWithSchema() { - xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:int64")); + xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64")); SourceRecord original = new SourceRecord(null, null, "topic", 0, Schema.STRING_SCHEMA, null, Schema.STRING_SCHEMA, "value"); SourceRecord transformed = xformKey.apply(original); @@ -146,7 +147,7 @@ public void castNullKeyRecordWithSchema() { @Test public void castNullKeyRecordSchemaless() { - xformKey.configure(Map.of(Cast.SPEC_CONFIG, "foo:int64")); + xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "foo:int64")); SourceRecord original = new SourceRecord(null, null, "topic", 0, null, null, Schema.STRING_SCHEMA, "value"); SourceRecord transformed = xformKey.apply(original); @@ -155,7 +156,7 @@ public void castNullKeyRecordSchemaless() { @Test public void castWholeRecordKeyWithSchema() { - xformKey.configure(Map.of(Cast.SPEC_CONFIG, "int8")); + xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); SourceRecord transformed = xformKey.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42, Schema.STRING_SCHEMA, "bogus")); @@ -165,7 +166,7 @@ public void castWholeRecordKeyWithSchema() { @Test public void castWholeRecordValueWithSchemaInt8() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -175,7 +176,7 @@ public void castWholeRecordValueWithSchemaInt8() { @Test public void castWholeRecordValueWithSchemaInt16() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int16")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int16")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -185,7 +186,7 @@ public void castWholeRecordValueWithSchemaInt16() { @Test public void castWholeRecordValueWithSchemaInt32() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int32")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -195,7 +196,7 @@ public void castWholeRecordValueWithSchemaInt32() { @Test public void castWholeRecordValueWithSchemaInt64() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int64")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int64")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -205,7 +206,7 @@ public void castWholeRecordValueWithSchemaInt64() { @Test public void castWholeRecordValueWithSchemaFloat32() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "float32")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -215,7 +216,7 @@ public void castWholeRecordValueWithSchemaFloat32() { @Test public void castWholeRecordValueWithSchemaFloat64() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "float64")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float64")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -225,7 +226,7 @@ public void castWholeRecordValueWithSchemaFloat64() { @Test public void castWholeRecordValueWithSchemaBooleanTrue() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "boolean")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "boolean")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -235,7 +236,7 @@ public void castWholeRecordValueWithSchemaBooleanTrue() { @Test public void castWholeRecordValueWithSchemaBooleanFalse() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "boolean")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "boolean")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 0)); @@ -245,7 +246,7 @@ public void castWholeRecordValueWithSchemaBooleanFalse() { @Test public void castWholeRecordValueWithSchemaString() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "string")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "string")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42)); @@ -256,7 +257,7 @@ public void castWholeRecordValueWithSchemaString() { @Test public void castWholeBigDecimalRecordValueWithSchemaString() { BigDecimal bigDecimal = new BigDecimal(42); - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "string")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "string")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Decimal.schema(bigDecimal.scale()), bigDecimal)); @@ -267,7 +268,7 @@ public void castWholeBigDecimalRecordValueWithSchemaString() { @Test public void castWholeDateRecordValueWithSchemaString() { Date timestamp = new Date(MILLIS_PER_DAY + 1); // day + 1msec to get a timestamp formatting. - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "string")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "string")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, Timestamp.SCHEMA, timestamp)); @@ -278,7 +279,7 @@ public void castWholeDateRecordValueWithSchemaString() { @Test public void castWholeRecordDefaultValue() { // Validate default value in schema is correctly converted - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int32")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, SchemaBuilder.float32().defaultValue(-42.125f).build(), 42.125f)); @@ -289,7 +290,7 @@ public void castWholeRecordDefaultValue() { @Test public void castWholeRecordKeySchemaless() { - xformKey.configure(Map.of(Cast.SPEC_CONFIG, "int8")); + xformKey.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); SourceRecord transformed = xformKey.apply(new SourceRecord(null, null, "topic", 0, null, 42, Schema.STRING_SCHEMA, "bogus")); @@ -299,7 +300,7 @@ public void castWholeRecordKeySchemaless() { @Test public void castWholeRecordValueSchemalessInt8() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -309,7 +310,7 @@ public void castWholeRecordValueSchemalessInt8() { @Test public void castWholeRecordValueSchemalessInt16() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int16")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int16")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -319,7 +320,7 @@ public void castWholeRecordValueSchemalessInt16() { @Test public void castWholeRecordValueSchemalessInt32() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int32")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -329,7 +330,7 @@ public void castWholeRecordValueSchemalessInt32() { @Test public void castWholeRecordValueSchemalessInt64() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int64")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int64")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -339,7 +340,7 @@ public void castWholeRecordValueSchemalessInt64() { @Test public void castWholeRecordValueSchemalessFloat32() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "float32")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float32")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -349,7 +350,7 @@ public void castWholeRecordValueSchemalessFloat32() { @Test public void castWholeRecordValueSchemalessFloat64() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "float64")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "float64")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -359,7 +360,7 @@ public void castWholeRecordValueSchemalessFloat64() { @Test public void castWholeRecordValueSchemalessBooleanTrue() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "boolean")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "boolean")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -369,7 +370,7 @@ public void castWholeRecordValueSchemalessBooleanTrue() { @Test public void castWholeRecordValueSchemalessBooleanFalse() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "boolean")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "boolean")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 0)); @@ -379,7 +380,7 @@ public void castWholeRecordValueSchemalessBooleanFalse() { @Test public void castWholeRecordValueSchemalessString() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "string")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "string")); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42)); @@ -389,15 +390,15 @@ public void castWholeRecordValueSchemalessString() { @Test public void castWholeRecordValueSchemalessUnsupportedType() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8")); assertThrows(DataException.class, () -> xformValue.apply(new SourceRecord(null, null, "topic", 0, - null, List.of("foo")))); + null, Collections.singletonList("foo")))); } @Test public void castLogicalToPrimitive() { - List specParts = List.of( + List specParts = Arrays.asList( "date_to_int32:int32", // Cast to underlying representation "timestamp_to_int64:int64", // Cast to underlying representation "time_to_int64:int64", // Cast to wider datatype than underlying representation @@ -407,7 +408,7 @@ public void castLogicalToPrimitive() { ); Date day = new Date(MILLIS_PER_DAY); - xformValue.configure(Map.of(Cast.SPEC_CONFIG, + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, String.join(",", specParts))); SchemaBuilder builder = SchemaBuilder.struct(); @@ -454,7 +455,7 @@ public void castLogicalToString() { Date time = new Date(MILLIS_PER_HOUR); Date timestamp = new Date(); - xformValue.configure(Map.of(Cast.SPEC_CONFIG, + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "date:string,decimal:string,time:string,timestamp:string")); SchemaBuilder builder = SchemaBuilder.struct(); @@ -493,7 +494,7 @@ public void castFieldsWithSchema() { byte[] byteArray = new byte[] {(byte) 0xFE, (byte) 0xDC, (byte) 0xBA, (byte) 0x98, 0x76, 0x54, 0x32, 0x10}; ByteBuffer byteBuffer = ByteBuffer.wrap(Arrays.copyOf(byteArray, byteArray.length)); - xformValue.configure(Map.of(Cast.SPEC_CONFIG, + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8:int16,int16:int32,int32:int64,int64:boolean,float32:float64,float64:boolean,boolean:int8,string:int32,bigdecimal:string,date:string,optional:int32,bytes:string,byteArray:string")); // Include an optional fields and fields with defaults to validate their values are passed through properly @@ -577,7 +578,7 @@ public void castFieldsWithSchema() { @SuppressWarnings("unchecked") @Test public void castFieldsSchemaless() { - xformValue.configure(Map.of(Cast.SPEC_CONFIG, "int8:int16,int16:int32,int32:int64,int64:boolean,float32:float64,float64:boolean,boolean:int8,string:int32")); + xformValue.configure(Collections.singletonMap(Cast.SPEC_CONFIG, "int8:int16,int16:int32,int32:int64,int64:boolean,float32:float64,float64:boolean,boolean:int8,string:int32")); Map recordValue = new HashMap<>(); recordValue.put("int8", (byte) 8); recordValue.put("int16", (short) 16); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/DropHeadersTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/DropHeadersTest.java index 2def8f2e4d226..d164512897b64 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/DropHeadersTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/DropHeadersTest.java @@ -25,9 +25,10 @@ import org.junit.jupiter.api.Test; import java.util.HashMap; -import java.util.List; import java.util.Map; +import static java.util.Arrays.asList; +import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -37,7 +38,7 @@ public class DropHeadersTest { private Map config(String... headers) { Map result = new HashMap<>(); - result.put(DropHeaders.HEADERS_FIELD, List.of(headers)); + result.put(DropHeaders.HEADERS_FIELD, asList(headers)); return result; } @@ -105,8 +106,8 @@ private void assertNonHeaders(SourceRecord original, SourceRecord xformed) { } private SourceRecord sourceRecord(ConnectHeaders headers) { - Map sourcePartition = Map.of("foo", "bar"); - Map sourceOffset = Map.of("baz", "quxx"); + Map sourcePartition = singletonMap("foo", "bar"); + Map sourceOffset = singletonMap("baz", "quxx"); String topic = "topic"; Integer partition = 0; Schema keySchema = null; diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ExtractFieldTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ExtractFieldTest.java index 414dec56095d3..ff11ffe4e852d 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ExtractFieldTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ExtractFieldTest.java @@ -29,6 +29,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -56,9 +57,9 @@ public void teardown() { @Test public void schemaless() { - xformKey.configure(Map.of("field", "magic")); + xformKey.configure(Collections.singletonMap("field", "magic")); - final SinkRecord record = new SinkRecord("test", 0, null, Map.of("magic", 42), null, null, 0); + final SinkRecord record = new SinkRecord("test", 0, null, Collections.singletonMap("magic", 42), null, null, 0); final SinkRecord transformedRecord = xformKey.apply(record); assertNull(transformedRecord.keySchema()); @@ -72,7 +73,7 @@ public void schemalessAndNestedPath() { configs.put("field", "magic.foo"); xformKey.configure(configs); - final Map key = Map.of("magic", Map.of("foo", 42)); + final Map key = Collections.singletonMap("magic", Collections.singletonMap("foo", 42)); final SinkRecord record = new SinkRecord("test", 0, null, key, null, null, 0); final SinkRecord transformedRecord = xformKey.apply(record); @@ -82,7 +83,7 @@ public void schemalessAndNestedPath() { @Test public void nullSchemaless() { - xformKey.configure(Map.of("field", "magic")); + xformKey.configure(Collections.singletonMap("field", "magic")); final Map key = null; final SinkRecord record = new SinkRecord("test", 0, null, key, null, null, 0); @@ -94,7 +95,7 @@ public void nullSchemaless() { @Test public void withSchema() { - xformKey.configure(Map.of("field", "magic")); + xformKey.configure(Collections.singletonMap("field", "magic")); final Schema keySchema = SchemaBuilder.struct().field("magic", Schema.INT32_SCHEMA).build(); final Struct key = new Struct(keySchema).put("magic", 42); @@ -124,7 +125,7 @@ public void withSchemaAndNestedPath() { @Test public void testNullWithSchema() { - xformKey.configure(Map.of("field", "magic")); + xformKey.configure(Collections.singletonMap("field", "magic")); final Schema keySchema = SchemaBuilder.struct().field("magic", Schema.INT32_SCHEMA).optional().build(); final Struct key = null; @@ -137,9 +138,9 @@ public void testNullWithSchema() { @Test public void nonExistentFieldSchemalessShouldReturnNull() { - xformKey.configure(Map.of("field", "nonexistent")); + xformKey.configure(Collections.singletonMap("field", "nonexistent")); - final SinkRecord record = new SinkRecord("test", 0, null, Map.of("magic", 42), null, null, 0); + final SinkRecord record = new SinkRecord("test", 0, null, Collections.singletonMap("magic", 42), null, null, 0); final SinkRecord transformedRecord = xformKey.apply(record); assertNull(transformedRecord.keySchema()); @@ -153,7 +154,7 @@ public void nonExistentNestedFieldSchemalessShouldReturnNull() { configs.put("field", "magic.nonexistent"); xformKey.configure(configs); - final Map key = Map.of("magic", Map.of("foo", 42)); + final Map key = Collections.singletonMap("magic", Collections.singletonMap("foo", 42)); final SinkRecord record = new SinkRecord("test", 0, null, key, null, null, 0); final SinkRecord transformedRecord = xformKey.apply(record); @@ -163,7 +164,7 @@ public void nonExistentNestedFieldSchemalessShouldReturnNull() { @Test public void nonExistentFieldWithSchemaShouldFail() { - xformKey.configure(Map.of("field", "nonexistent")); + xformKey.configure(Collections.singletonMap("field", "nonexistent")); final Schema keySchema = SchemaBuilder.struct().field("magic", Schema.INT32_SCHEMA).build(); final Struct key = new Struct(keySchema).put("magic", 42); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/FlattenTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/FlattenTest.java index 8873f4c03b0b8..f771d4f0ac3e4 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/FlattenTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/FlattenTest.java @@ -27,9 +27,10 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.LinkedHashMap; -import java.util.List; import java.util.Map; import static org.junit.jupiter.api.Assertions.assertArrayEquals; @@ -51,21 +52,21 @@ public void teardown() { @Test public void topLevelStructRequired() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); assertThrows(DataException.class, () -> xformValue.apply(new SourceRecord(null, null, "topic", 0, Schema.INT32_SCHEMA, 42))); } @Test public void topLevelMapRequired() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); assertThrows(DataException.class, () -> xformValue.apply(new SourceRecord(null, null, "topic", 0, null, 42))); } @Test public void testNestedStruct() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); SchemaBuilder builder = SchemaBuilder.struct(); builder.field("int8", Schema.INT8_SCHEMA); @@ -124,7 +125,7 @@ public void testNestedStruct() { @Test public void testNestedMapWithDelimiter() { - xformValue.configure(Map.of("delimiter", "#")); + xformValue.configure(Collections.singletonMap("delimiter", "#")); Map supportedTypes = new HashMap<>(); supportedTypes.put("int8", (byte) 8); @@ -137,8 +138,8 @@ public void testNestedMapWithDelimiter() { supportedTypes.put("string", "stringy"); supportedTypes.put("bytes", "bytes".getBytes()); - Map oneLevelNestedMap = Map.of("B", supportedTypes); - Map twoLevelNestedMap = Map.of("A", oneLevelNestedMap); + Map oneLevelNestedMap = Collections.singletonMap("B", supportedTypes); + Map twoLevelNestedMap = Collections.singletonMap("A", oneLevelNestedMap); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, @@ -162,7 +163,7 @@ public void testNestedMapWithDelimiter() { @Test public void testOptionalFieldStruct() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); SchemaBuilder builder = SchemaBuilder.struct(); builder.field("opt_int32", Schema.OPTIONAL_INT32_SCHEMA); @@ -189,7 +190,7 @@ public void testOptionalFieldStruct() { @Test public void testOptionalStruct() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); SchemaBuilder builder = SchemaBuilder.struct().optional(); builder.field("opt_int32", Schema.OPTIONAL_INT32_SCHEMA); @@ -205,7 +206,7 @@ public void testOptionalStruct() { @Test public void testOptionalNestedStruct() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); SchemaBuilder builder = SchemaBuilder.struct().optional(); builder.field("opt_int32", Schema.OPTIONAL_INT32_SCHEMA); @@ -229,12 +230,12 @@ public void testOptionalNestedStruct() { @Test public void testOptionalFieldMap() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); Map supportedTypes = new HashMap<>(); supportedTypes.put("opt_int32", null); - Map oneLevelNestedMap = Map.of("B", supportedTypes); + Map oneLevelNestedMap = Collections.singletonMap("B", supportedTypes); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", 0, @@ -250,9 +251,9 @@ public void testOptionalFieldMap() { @Test public void testKey() { - xformKey.configure(Map.of()); + xformKey.configure(Collections.emptyMap()); - Map> key = Map.of("A", Map.of("B", 12)); + Map> key = Collections.singletonMap("A", Collections.singletonMap("B", 12)); SourceRecord src = new SourceRecord(null, null, "topic", null, key, null, null); SourceRecord transformed = xformKey.apply(src); @@ -265,14 +266,14 @@ public void testKey() { @Test public void testSchemalessArray() { - xformValue.configure(Map.of()); - Object value = Map.of("foo", List.of("bar", Map.of("baz", Map.of("lfg", "lfg")))); + xformValue.configure(Collections.emptyMap()); + Object value = Collections.singletonMap("foo", Arrays.asList("bar", Collections.singletonMap("baz", Collections.singletonMap("lfg", "lfg")))); assertEquals(value, xformValue.apply(new SourceRecord(null, null, "topic", null, null, null, value)).value()); } @Test public void testArrayWithSchema() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); Schema nestedStructSchema = SchemaBuilder.struct().field("lfg", Schema.STRING_SCHEMA).build(); Schema innerStructSchema = SchemaBuilder.struct().field("baz", nestedStructSchema).build(); Schema structSchema = SchemaBuilder.struct() @@ -283,7 +284,7 @@ public void testArrayWithSchema() { Struct innerValue = new Struct(innerStructSchema); innerValue.put("baz", nestedValue); Struct value = new Struct(structSchema); - value.put("foo", List.of(innerValue)); + value.put("foo", Collections.singletonList(innerValue)); SourceRecord transformed = xformValue.apply(new SourceRecord(null, null, "topic", null, null, structSchema, value)); assertEquals(value, transformed.value()); assertEquals(structSchema, transformed.valueSchema()); @@ -295,7 +296,7 @@ public void testOptionalAndDefaultValuesNested() { // children should also be optional. Similarly, if the parent Struct has a default value, the default value for // the flattened field - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); SchemaBuilder builder = SchemaBuilder.struct().optional(); builder.field("req_field", Schema.STRING_SCHEMA); @@ -324,7 +325,7 @@ public void testOptionalAndDefaultValuesNested() { @Test public void tombstoneEventWithoutSchemaShouldPassThrough() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); final SourceRecord record = new SourceRecord(null, null, "test", 0, null, null); @@ -336,7 +337,7 @@ public void tombstoneEventWithoutSchemaShouldPassThrough() { @Test public void tombstoneEventWithSchemaShouldPassThrough() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); final Schema simpleStructSchema = SchemaBuilder.struct().name("name").version(1).doc("doc").field("magic", Schema.OPTIONAL_INT64_SCHEMA).build(); final SourceRecord record = new SourceRecord(null, null, "test", 0, @@ -349,7 +350,7 @@ public void tombstoneEventWithSchemaShouldPassThrough() { @Test public void testMapWithNullFields() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); // Use a LinkedHashMap to ensure the SMT sees entries in a specific order Map value = new LinkedHashMap<>(); @@ -367,7 +368,7 @@ public void testMapWithNullFields() { @Test public void testStructWithNullFields() { - xformValue.configure(Map.of()); + xformValue.configure(Collections.emptyMap()); final Schema structSchema = SchemaBuilder.struct() .field("firstNull", Schema.OPTIONAL_STRING_SCHEMA) diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HeaderFromTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HeaderFromTest.java index f68d7493a75a1..da9e35843252d 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HeaderFromTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HeaderFromTest.java @@ -36,6 +36,10 @@ import java.util.List; import java.util.Map; +import static java.util.Arrays.asList; +import static java.util.Collections.emptyList; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; import static org.apache.kafka.connect.data.Schema.STRING_SCHEMA; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -99,8 +103,8 @@ public SourceRecord withSchema(boolean keyTransform) { } private SourceRecord sourceRecord(boolean keyTransform, Schema keyOrValueSchema, Object keyOrValue) { - Map sourcePartition = Map.of("foo", "bar"); - Map sourceOffset = Map.of("baz", "quxx"); + Map sourcePartition = singletonMap("foo", "bar"); + Map sourceOffset = singletonMap("baz", "quxx"); String topic = "topic"; Integer partition = 0; Long timestamp = 0L; @@ -136,7 +140,7 @@ public static List data() { List result = new ArrayList<>(); - for (Boolean testKeyTransform : List.of(true, false)) { + for (Boolean testKeyTransform : asList(true, false)) { result.add( Arguments.of( "basic copy", @@ -145,7 +149,7 @@ public static List data() { .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), - List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.COPY, true, + singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.COPY, true, new RecordBuilder() .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") @@ -160,7 +164,7 @@ public static List data() { .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), - List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.MOVE, true, + singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 got moved .withField("field2", STRING_SCHEMA, "field2-value") @@ -175,7 +179,7 @@ public static List data() { .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("inserted1", STRING_SCHEMA, "existing-value"), - List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.COPY, true, + singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.COPY, true, new RecordBuilder() .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") @@ -190,7 +194,7 @@ public static List data() { .withField("field1", STRING_SCHEMA, "field1-value") .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("inserted1", STRING_SCHEMA, "existing-value"), - List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.MOVE, true, + singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 got moved .withField("field2", STRING_SCHEMA, "field2-value") @@ -207,7 +211,7 @@ public static List data() { .withField("field1", schema, struct) .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), - List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.COPY, true, + singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.COPY, true, new RecordBuilder() .withField("field1", schema, struct) .withField("field2", STRING_SCHEMA, "field2-value") @@ -222,7 +226,7 @@ public static List data() { .withField("field1", schema, struct) .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), - List.of("field1"), List.of("inserted1"), HeaderFrom.Operation.MOVE, true, + singletonList("field1"), singletonList("inserted1"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 got moved .withField("field2", STRING_SCHEMA, "field2-value") @@ -238,7 +242,7 @@ public static List data() { .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), // two headers from the same field - List.of("field1", "field1"), List.of("inserted1", "inserted2"), HeaderFrom.Operation.MOVE, true, + asList("field1", "field1"), asList("inserted1", "inserted2"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 got moved .withField("field2", STRING_SCHEMA, "field2-value") @@ -255,7 +259,7 @@ public static List data() { .withField("field2", STRING_SCHEMA, "field2-value") .addHeader("header1", STRING_SCHEMA, "existing-value"), // two headers from the same field - List.of("field1", "field2"), List.of("inserted1", "inserted1"), HeaderFrom.Operation.MOVE, true, + asList("field1", "field2"), asList("inserted1", "inserted1"), HeaderFrom.Operation.MOVE, true, new RecordBuilder() // field1 and field2 got moved .addHeader("header1", STRING_SCHEMA, "existing-value") @@ -270,7 +274,7 @@ public static List data() { .withField("field1", SchemaBuilder.string().defaultValue("default").optional().build(), "field1-value") .withField("field2", SchemaBuilder.string().defaultValue("default").optional().build(), null) .addHeader("header1", STRING_SCHEMA, "existing-value"), - List.of("field1", "field2"), List.of("inserted1", "inserted2"), HeaderFrom.Operation.COPY, false, + asList("field1", "field2"), asList("inserted1", "inserted2"), HeaderFrom.Operation.COPY, false, new RecordBuilder() .withField("field1", SchemaBuilder.string().defaultValue("default").optional().build(), "field1-value") .withField("field2", SchemaBuilder.string().defaultValue("default").optional().build(), null) @@ -286,7 +290,7 @@ public static List data() { .withField("field1", SchemaBuilder.string().defaultValue("default").optional().build(), "field1-value") .withField("field2", SchemaBuilder.string().defaultValue("default").optional().build(), null) .addHeader("header1", STRING_SCHEMA, "existing-value"), - List.of("field1", "field2"), List.of("inserted1", "inserted2"), HeaderFrom.Operation.MOVE, false, + asList("field1", "field2"), asList("inserted1", "inserted2"), HeaderFrom.Operation.MOVE, false, new RecordBuilder() .addHeader("header1", STRING_SCHEMA, "existing-value") .addHeader("inserted1", SchemaBuilder.string().defaultValue("default").optional().build(), "field1-value") @@ -349,7 +353,7 @@ public void withSchema(String description, @ParameterizedTest @ValueSource(booleans = {true, false}) public void invalidConfigExtraHeaderConfig(boolean keyTransform) { - Map config = config(List.of("foo"), List.of("foo", "bar"), HeaderFrom.Operation.COPY, true); + Map config = config(singletonList("foo"), asList("foo", "bar"), HeaderFrom.Operation.COPY, true); HeaderFrom xform = keyTransform ? new HeaderFrom.Key<>() : new HeaderFrom.Value<>(); assertThrows(ConfigException.class, () -> xform.configure(config)); } @@ -357,7 +361,7 @@ public void invalidConfigExtraHeaderConfig(boolean keyTransform) { @ParameterizedTest @ValueSource(booleans = {true, false}) public void invalidConfigExtraFieldConfig(boolean keyTransform) { - Map config = config(List.of("foo", "bar"), List.of("foo"), HeaderFrom.Operation.COPY, true); + Map config = config(asList("foo", "bar"), singletonList("foo"), HeaderFrom.Operation.COPY, true); HeaderFrom xform = keyTransform ? new HeaderFrom.Key<>() : new HeaderFrom.Value<>(); assertThrows(ConfigException.class, () -> xform.configure(config)); } @@ -365,7 +369,7 @@ public void invalidConfigExtraFieldConfig(boolean keyTransform) { @ParameterizedTest @ValueSource(booleans = {true, false}) public void invalidConfigEmptyHeadersAndFieldsConfig(boolean keyTransform) { - Map config = config(List.of(), List.of(), HeaderFrom.Operation.COPY, true); + Map config = config(emptyList(), emptyList(), HeaderFrom.Operation.COPY, true); HeaderFrom xform = keyTransform ? new HeaderFrom.Key<>() : new HeaderFrom.Value<>(); assertThrows(ConfigException.class, () -> xform.configure(config)); } diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java index 93b69d5413d83..b72dddcdd155c 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/HoistFieldTest.java @@ -24,6 +24,7 @@ import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.Map; @@ -40,18 +41,18 @@ public void teardown() { @Test public void schemaless() { - xform.configure(Map.of("field", "magic")); + xform.configure(Collections.singletonMap("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, null, 42, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); assertNull(transformedRecord.keySchema()); - assertEquals(Map.of("magic", 42), transformedRecord.key()); + assertEquals(Collections.singletonMap("magic", 42), transformedRecord.key()); } @Test public void withSchema() { - xform.configure(Map.of("field", "magic")); + xform.configure(Collections.singletonMap("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, Schema.INT32_SCHEMA, 42, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); @@ -63,7 +64,7 @@ public void withSchema() { @Test public void testSchemalessMapIsMutable() { - xform.configure(Map.of("field", "magic")); + xform.configure(Collections.singletonMap("field", "magic")); final SinkRecord record = new SinkRecord("test", 0, null, 420, null, null, 0); final SinkRecord transformedRecord = xform.apply(record); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertFieldTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertFieldTest.java index 705f60f5a2e5b..cb48fdd810f7d 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertFieldTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertFieldTest.java @@ -30,6 +30,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.Map; @@ -58,7 +59,7 @@ public void teardown() { @Test public void topLevelStructRequired() { - xformValue.configure(Map.of("topic.field", "topic_field")); + xformValue.configure(Collections.singletonMap("topic.field", "topic_field")); assertThrows(DataException.class, () -> xformValue.apply(new SourceRecord(null, null, "", 0, Schema.INT32_SCHEMA, 42))); } @@ -117,7 +118,7 @@ public void schemalessInsertConfiguredFields() { xformValue.configure(props); final SourceRecord record = new SourceRecord(null, null, "test", 0, - null, null, null, Map.of("magic", 42L), 123L); + null, null, null, Collections.singletonMap("magic", 42L), 123L); final SourceRecord transformedRecord = xformValue.apply(record); @@ -182,7 +183,7 @@ public void insertKeyFieldsIntoTombstoneEvent() { xformKey.configure(props); final SourceRecord record = new SourceRecord(null, null, "test", 0, - null, Map.of("magic", 42L), null, null); + null, Collections.singletonMap("magic", 42L), null, null); final SourceRecord transformedRecord = xformKey.apply(record); @@ -206,7 +207,7 @@ public void insertIntoNullKeyLeavesRecordUnchanged() { xformKey.configure(props); final SourceRecord record = new SourceRecord(null, null, "test", 0, - null, null, null, Map.of("magic", 42L)); + null, null, null, Collections.singletonMap("magic", 42L)); final SourceRecord transformedRecord = xformKey.apply(record); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertHeaderTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertHeaderTest.java index 190931b829bf2..20c5b67a50a93 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertHeaderTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/InsertHeaderTest.java @@ -28,6 +28,7 @@ import java.util.HashMap; import java.util.Map; +import static java.util.Collections.singletonMap; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -104,8 +105,8 @@ private void assertNonHeaders(SourceRecord original, SourceRecord xformed) { } private SourceRecord sourceRecord(ConnectHeaders headers) { - Map sourcePartition = Map.of("foo", "bar"); - Map sourceOffset = Map.of("baz", "quxx"); + Map sourcePartition = singletonMap("foo", "bar"); + Map sourceOffset = singletonMap("baz", "quxx"); String topic = "topic"; Integer partition = 0; Schema keySchema = null; diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/MaskFieldTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/MaskFieldTest.java index 3bdc1cd3b4cfa..05989af572f4d 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/MaskFieldTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/MaskFieldTest.java @@ -33,69 +33,73 @@ import java.math.BigDecimal; import java.math.BigInteger; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.Date; import java.util.HashMap; import java.util.List; import java.util.Map; +import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; public class MaskFieldTest { private static final Schema SCHEMA = SchemaBuilder.struct() - .field("magic", Schema.INT32_SCHEMA) - .field("bool", Schema.BOOLEAN_SCHEMA) - .field("byte", Schema.INT8_SCHEMA) - .field("short", Schema.INT16_SCHEMA) - .field("int", Schema.INT32_SCHEMA) - .field("long", Schema.INT64_SCHEMA) - .field("float", Schema.FLOAT32_SCHEMA) - .field("double", Schema.FLOAT64_SCHEMA) - .field("string", Schema.STRING_SCHEMA) - .field("date", org.apache.kafka.connect.data.Date.SCHEMA) - .field("time", Time.SCHEMA) - .field("timestamp", Timestamp.SCHEMA) - .field("decimal", Decimal.schema(0)) - .field("array", SchemaBuilder.array(Schema.INT32_SCHEMA)) - .field("map", SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA)) - .field("withDefault", SchemaBuilder.string().optional().defaultValue("default").build()) - .build(); + .field("magic", Schema.INT32_SCHEMA) + .field("bool", Schema.BOOLEAN_SCHEMA) + .field("byte", Schema.INT8_SCHEMA) + .field("short", Schema.INT16_SCHEMA) + .field("int", Schema.INT32_SCHEMA) + .field("long", Schema.INT64_SCHEMA) + .field("float", Schema.FLOAT32_SCHEMA) + .field("double", Schema.FLOAT64_SCHEMA) + .field("string", Schema.STRING_SCHEMA) + .field("date", org.apache.kafka.connect.data.Date.SCHEMA) + .field("time", Time.SCHEMA) + .field("timestamp", Timestamp.SCHEMA) + .field("decimal", Decimal.schema(0)) + .field("array", SchemaBuilder.array(Schema.INT32_SCHEMA)) + .field("map", SchemaBuilder.map(Schema.STRING_SCHEMA, Schema.STRING_SCHEMA)) + .field("withDefault", SchemaBuilder.string().optional().defaultValue("default").build()) + .build(); + private static final Map VALUES = new HashMap<>(); + private static final Struct VALUES_WITH_SCHEMA = new Struct(SCHEMA); - private static final Map VALUES = Map.ofEntries( - Map.entry("magic", 42), - Map.entry("bool", true), - Map.entry("byte", (byte) 42), - Map.entry("short", (short) 42), - Map.entry("int", 42), - Map.entry("long", 42L), - Map.entry("float", 42f), - Map.entry("double", 42d), - Map.entry("string", "55.121.20.20"), - Map.entry("date", new Date()), - Map.entry("bigint", new BigInteger("42")), - Map.entry("bigdec", new BigDecimal("42.0")), - Map.entry("list", List.of(42)), - Map.entry("map", Map.of("key", "value")) - ); + static { + VALUES.put("magic", 42); + VALUES.put("bool", true); + VALUES.put("byte", (byte) 42); + VALUES.put("short", (short) 42); + VALUES.put("int", 42); + VALUES.put("long", 42L); + VALUES.put("float", 42f); + VALUES.put("double", 42d); + VALUES.put("string", "55.121.20.20"); + VALUES.put("date", new Date()); + VALUES.put("bigint", new BigInteger("42")); + VALUES.put("bigdec", new BigDecimal("42.0")); + VALUES.put("list", singletonList(42)); + VALUES.put("map", Collections.singletonMap("key", "value")); - private static final Struct VALUES_WITH_SCHEMA = new Struct(SCHEMA) - .put("magic", 42) - .put("bool", true) - .put("byte", (byte) 42) - .put("short", (short) 42) - .put("int", 42) - .put("long", 42L) - .put("float", 42f) - .put("double", 42d) - .put("string", "hmm") - .put("date", new Date()) - .put("time", new Date()) - .put("timestamp", new Date()) - .put("decimal", new BigDecimal(42)) - .put("array", List.of(1, 2, 3)) - .put("map", Map.of("what", "what")) - .put("withDefault", null); + VALUES_WITH_SCHEMA.put("magic", 42); + VALUES_WITH_SCHEMA.put("bool", true); + VALUES_WITH_SCHEMA.put("byte", (byte) 42); + VALUES_WITH_SCHEMA.put("short", (short) 42); + VALUES_WITH_SCHEMA.put("int", 42); + VALUES_WITH_SCHEMA.put("long", 42L); + VALUES_WITH_SCHEMA.put("float", 42f); + VALUES_WITH_SCHEMA.put("double", 42d); + VALUES_WITH_SCHEMA.put("string", "hmm"); + VALUES_WITH_SCHEMA.put("date", new Date()); + VALUES_WITH_SCHEMA.put("time", new Date()); + VALUES_WITH_SCHEMA.put("timestamp", new Date()); + VALUES_WITH_SCHEMA.put("decimal", new BigDecimal(42)); + VALUES_WITH_SCHEMA.put("array", Arrays.asList(1, 2, 3)); + VALUES_WITH_SCHEMA.put("map", Collections.singletonMap("what", "what")); + VALUES_WITH_SCHEMA.put("withDefault", null); + } private static MaskField transform(List fields, String replacement) { final MaskField xform = new MaskField.Value<>(); @@ -113,20 +117,20 @@ private static SinkRecord record(Schema schema, Object value) { private static void checkReplacementWithSchema(String maskField, Object replacement) { SinkRecord record = record(SCHEMA, VALUES_WITH_SCHEMA); - final Struct updatedValue = (Struct) transform(List.of(maskField), String.valueOf(replacement)).apply(record).value(); + final Struct updatedValue = (Struct) transform(singletonList(maskField), String.valueOf(replacement)).apply(record).value(); assertEquals(replacement, updatedValue.get(maskField), "Invalid replacement for " + maskField + " value"); } private static void checkReplacementSchemaless(String maskField, Object replacement) { - checkReplacementSchemaless(List.of(maskField), replacement); + checkReplacementSchemaless(singletonList(maskField), replacement); } @SuppressWarnings("unchecked") private static void checkReplacementSchemaless(List maskFields, Object replacement) { SinkRecord record = record(null, VALUES); final Map updatedValue = (Map) transform(maskFields, String.valueOf(replacement)) - .apply(record) - .value(); + .apply(record) + .value(); for (String maskField : maskFields) { assertEquals(replacement, updatedValue.get(maskField), "Invalid replacement for " + maskField + " value"); } @@ -150,8 +154,8 @@ public void testSchemaless() { assertEquals(new Date(0), updatedValue.get("date")); assertEquals(BigInteger.ZERO, updatedValue.get("bigint")); assertEquals(BigDecimal.ZERO, updatedValue.get("bigdec")); - assertEquals(List.of(), updatedValue.get("list")); - assertEquals(Map.of(), updatedValue.get("map")); + assertEquals(Collections.emptyList(), updatedValue.get("list")); + assertEquals(Collections.emptyMap(), updatedValue.get("map")); } @Test @@ -178,8 +182,8 @@ public void testWithSchema() { assertEquals(new Date(0), updatedValue.get("time")); assertEquals(new Date(0), updatedValue.get("timestamp")); assertEquals(BigDecimal.ZERO, updatedValue.get("decimal")); - assertEquals(List.of(), updatedValue.get("array")); - assertEquals(Map.of(), updatedValue.get("map")); + assertEquals(Collections.emptyList(), updatedValue.get("array")); + assertEquals(Collections.emptyMap(), updatedValue.get("map")); assertEquals(null, updatedValue.getWithoutDefault("withDefault")); } @@ -202,10 +206,10 @@ public void testSchemalessUnsupportedReplacementType() { Class exClass = DataException.class; assertThrows(exClass, () -> checkReplacementSchemaless("date", new Date()), exMessage); - assertThrows(exClass, () -> checkReplacementSchemaless(List.of("int", "date"), new Date()), exMessage); + assertThrows(exClass, () -> checkReplacementSchemaless(Arrays.asList("int", "date"), new Date()), exMessage); assertThrows(exClass, () -> checkReplacementSchemaless("bool", false), exMessage); - assertThrows(exClass, () -> checkReplacementSchemaless("list", List.of("123")), exMessage); - assertThrows(exClass, () -> checkReplacementSchemaless("map", Map.of("123", "321")), exMessage); + assertThrows(exClass, () -> checkReplacementSchemaless("list", singletonList("123")), exMessage); + assertThrows(exClass, () -> checkReplacementSchemaless("map", Collections.singletonMap("123", "321")), exMessage); } @Test @@ -227,7 +231,7 @@ public void testWithSchemaUnsupportedReplacementType() { assertThrows(exClass, () -> checkReplacementWithSchema("time", new Date()), exMessage); assertThrows(exClass, () -> checkReplacementWithSchema("timestamp", new Date()), exMessage); - assertThrows(exClass, () -> checkReplacementWithSchema("array", List.of(123)), exMessage); + assertThrows(exClass, () -> checkReplacementWithSchema("array", singletonList(123)), exMessage); } @Test @@ -245,7 +249,7 @@ public void testReplacementTypeMismatch() { assertThrows(exClass, () -> checkReplacementSchemaless("bigdec", "foo"), exMessage); assertThrows(exClass, () -> checkReplacementSchemaless("int", new Date()), exMessage); assertThrows(exClass, () -> checkReplacementSchemaless("int", new Object()), exMessage); - assertThrows(exClass, () -> checkReplacementSchemaless(List.of("string", "int"), "foo"), exMessage); + assertThrows(exClass, () -> checkReplacementSchemaless(Arrays.asList("string", "int"), "foo"), exMessage); } @Test @@ -255,17 +259,17 @@ public void testEmptyStringReplacementValue() { @Test public void testNullListAndMapReplacementsAreMutable() { - final List maskFields = List.of("array", "map"); + final List maskFields = Arrays.asList("array", "map"); final Struct updatedValue = (Struct) transform(maskFields, null).apply(record(SCHEMA, VALUES_WITH_SCHEMA)).value(); @SuppressWarnings("unchecked") List actualList = (List) updatedValue.get("array"); - assertEquals(List.of(), actualList); + assertEquals(Collections.emptyList(), actualList); actualList.add(0); - assertEquals(List.of(0), actualList); + assertEquals(Collections.singletonList(0), actualList); @SuppressWarnings("unchecked") Map actualMap = (Map) updatedValue.get("map"); - assertEquals(Map.of(), actualMap); + assertEquals(Collections.emptyMap(), actualMap); actualMap.put("k", "v"); - assertEquals(Map.of("k", "v"), actualMap); + assertEquals(Collections.singletonMap("k", "v"), actualMap); } @Test diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java index 7f47dd0f8c0f0..5f0e51559bd14 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/SetSchemaMetadataTest.java @@ -30,6 +30,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -56,7 +57,7 @@ public void teardown() { @Test public void schemaNameUpdate() { - xform.configure(Map.of("schema.name", "foo")); + xform.configure(Collections.singletonMap("schema.name", "foo")); final SinkRecord record = new SinkRecord("", 0, null, null, SchemaBuilder.struct().build(), null, 0); final SinkRecord updatedRecord = xform.apply(record); assertEquals("foo", updatedRecord.valueSchema().name()); @@ -64,7 +65,7 @@ public void schemaNameUpdate() { @Test public void schemaVersionUpdate() { - xform.configure(Map.of("schema.version", 42)); + xform.configure(Collections.singletonMap("schema.version", 42)); final SinkRecord record = new SinkRecord("", 0, null, null, SchemaBuilder.struct().build(), null, 0); final SinkRecord updatedRecord = xform.apply(record); assertEquals(42, updatedRecord.valueSchema().version()); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampConverterTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampConverterTest.java index d67d031482dd6..a807ad1fc2151 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampConverterTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampConverterTest.java @@ -34,6 +34,7 @@ import org.junit.jupiter.params.provider.MethodSource; import java.util.Calendar; +import java.util.Collections; import java.util.GregorianCalendar; import java.util.HashMap; import java.util.Map; @@ -107,13 +108,13 @@ public void teardown() { @Test public void testConfigNoTargetType() { - assertThrows(ConfigException.class, () -> xformValue.configure(Map.of())); + assertThrows(ConfigException.class, () -> xformValue.configure(Collections.emptyMap())); } @Test public void testConfigInvalidTargetType() { assertThrows(ConfigException.class, - () -> xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "invalid"))); + () -> xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "invalid"))); } @Test @@ -135,7 +136,7 @@ public void testConfigValidUnixPrecision() { @Test public void testConfigMissingFormat() { assertThrows(ConfigException.class, - () -> xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "string"))); + () -> xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "string"))); } @Test @@ -150,7 +151,7 @@ public void testConfigInvalidFormat() { @Test public void testSchemalessIdentity() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME.getTime())); assertNull(transformed.valueSchema()); @@ -159,7 +160,7 @@ public void testSchemalessIdentity() { @Test public void testSchemalessTimestampToDate() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Date")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Date")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME.getTime())); assertNull(transformed.valueSchema()); @@ -168,7 +169,7 @@ public void testSchemalessTimestampToDate() { @Test public void testSchemalessTimestampToTime() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Time")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Time")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME.getTime())); assertNull(transformed.valueSchema()); @@ -177,7 +178,7 @@ public void testSchemalessTimestampToTime() { @Test public void testSchemalessTimestampToUnix() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "unix")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "unix")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME.getTime())); assertNull(transformed.valueSchema()); @@ -201,7 +202,7 @@ public void testSchemalessTimestampToString() { @Test public void testSchemalessDateToTimestamp() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE.getTime())); assertNull(transformed.valueSchema()); @@ -211,7 +212,7 @@ public void testSchemalessDateToTimestamp() { @Test public void testSchemalessTimeToTimestamp() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(TIME.getTime())); assertNull(transformed.valueSchema()); @@ -221,7 +222,7 @@ public void testSchemalessTimeToTimestamp() { @Test public void testSchemalessUnixToTimestamp() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordSchemaless(DATE_PLUS_TIME_UNIX)); assertNull(transformed.valueSchema()); @@ -245,7 +246,7 @@ public void testSchemalessStringToTimestamp() { @Test public void testWithSchemaIdentity() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Timestamp.SCHEMA, DATE_PLUS_TIME.getTime())); assertEquals(Timestamp.SCHEMA, transformed.valueSchema()); @@ -254,7 +255,7 @@ public void testWithSchemaIdentity() { @Test public void testWithSchemaTimestampToDate() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Date")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Date")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Timestamp.SCHEMA, DATE_PLUS_TIME.getTime())); assertEquals(Date.SCHEMA, transformed.valueSchema()); @@ -263,7 +264,7 @@ public void testWithSchemaTimestampToDate() { @Test public void testWithSchemaTimestampToTime() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Time")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Time")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Timestamp.SCHEMA, DATE_PLUS_TIME.getTime())); assertEquals(Time.SCHEMA, transformed.valueSchema()); @@ -272,7 +273,7 @@ public void testWithSchemaTimestampToTime() { @Test public void testWithSchemaTimestampToUnix() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "unix")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "unix")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Timestamp.SCHEMA, DATE_PLUS_TIME.getTime())); assertEquals(Schema.INT64_SCHEMA, transformed.valueSchema()); @@ -347,7 +348,7 @@ private void testSchemalessNullFieldConversion(String targetType) { @Test public void testWithSchemaDateToTimestamp() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Date.SCHEMA, DATE.getTime())); assertEquals(Timestamp.SCHEMA, transformed.valueSchema()); @@ -357,7 +358,7 @@ public void testWithSchemaDateToTimestamp() { @Test public void testWithSchemaTimeToTimestamp() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Time.SCHEMA, TIME.getTime())); assertEquals(Timestamp.SCHEMA, transformed.valueSchema()); @@ -367,7 +368,7 @@ public void testWithSchemaTimeToTimestamp() { @Test public void testWithSchemaUnixToTimestamp() { - xformValue.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformValue.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformValue.apply(createRecordWithSchema(Schema.INT64_SCHEMA, DATE_PLUS_TIME_UNIX)); assertEquals(Timestamp.SCHEMA, transformed.valueSchema()); @@ -529,11 +530,11 @@ public void testSchemalessFieldConversion() { config.put(TimestampConverter.FIELD_CONFIG, "ts"); xformValue.configure(config); - Object value = Map.of("ts", DATE_PLUS_TIME.getTime()); + Object value = Collections.singletonMap("ts", DATE_PLUS_TIME.getTime()); SourceRecord transformed = xformValue.apply(createRecordSchemaless(value)); assertNull(transformed.valueSchema()); - assertEquals(Map.of("ts", DATE.getTime()), transformed.value()); + assertEquals(Collections.singletonMap("ts", DATE.getTime()), transformed.value()); } @Test @@ -589,7 +590,7 @@ public void testWithSchemaNullFieldWithDefaultConversion(boolean replaceNullWith .build(); assertEquals(expectedSchema, transformed.valueSchema()); - assertNull(((Struct) transformed.value()).get("ts")); + assertEquals(null, ((Struct) transformed.value()).get("ts")); assertEquals("test", ((Struct) transformed.value()).get("other")); } @@ -715,7 +716,7 @@ public void testSchemalessStringToUnix_Seconds() { @Test public void testKey() { - xformKey.configure(Map.of(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); + xformKey.configure(Collections.singletonMap(TimestampConverter.TARGET_TYPE_CONFIG, "Timestamp")); SourceRecord transformed = xformKey.apply(new SourceRecord(null, null, "topic", 0, null, DATE_PLUS_TIME.getTime(), null, null)); assertNull(transformed.keySchema()); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampRouterTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampRouterTest.java index a98c4406ad9ed..43b3b1f384ff5 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampRouterTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/TimestampRouterTest.java @@ -23,7 +23,7 @@ import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import java.util.Map; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -32,7 +32,7 @@ public class TimestampRouterTest { @BeforeEach public void setup() { xform = new TimestampRouter<>(); - xform.configure(Map.of()); // defaults + xform.configure(Collections.emptyMap()); // defaults } @AfterEach diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ValueToKeyTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ValueToKeyTest.java index 775bfbabac252..df528cf518a2a 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ValueToKeyTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/ValueToKeyTest.java @@ -29,6 +29,7 @@ import org.junit.jupiter.params.provider.Arguments; import org.junit.jupiter.params.provider.MethodSource; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.stream.Stream; @@ -54,7 +55,7 @@ public void teardown() { @Test public void schemaless() { - xform.configure(Map.of("fields", "a,b")); + xform.configure(Collections.singletonMap("fields", "a,b")); final HashMap value = new HashMap<>(); value.put("a", 1); @@ -74,7 +75,7 @@ public void schemaless() { @Test public void withSchema() { - xform.configure(Map.of("fields", "a,b")); + xform.configure(Collections.singletonMap("fields", "a,b")); final Schema valueSchema = SchemaBuilder.struct() .field("a", Schema.INT32_SCHEMA) @@ -105,7 +106,7 @@ public void withSchema() { @Test public void nonExistingField() { - xform.configure(Map.of("fields", "not_exist")); + xform.configure(Collections.singletonMap("fields", "not_exist")); final Schema valueSchema = SchemaBuilder.struct() .field("a", Schema.INT32_SCHEMA) diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldPathNotationTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldPathNotationTest.java index 4ccf130365488..1434778a853b3 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldPathNotationTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldPathNotationTest.java @@ -81,7 +81,7 @@ void shouldBuildV2WithoutWrappingBackticks() { @Test void shouldBuildV2WhenIncludesDotsAndBacktickPair() { // Given v2 and fields including dots - // When backticks are wrapping a field name (i.e. within edges or between dots) + // When backticks are wrapping a field name (i.e. withing edges or between dots) // Then build a path with steps separated by dots and not including backticks assertParseV2("`foo.bar.baz`", "foo.bar.baz"); assertParseV2("foo.`bar.baz`", "foo", "bar.baz"); @@ -92,7 +92,7 @@ void shouldBuildV2WhenIncludesDotsAndBacktickPair() { @Test void shouldBuildV2AndIgnoreBackticksThatAreNotWrapping() { // Given v2 and fields including dots and backticks - // When backticks are wrapping a field name (i.e. within edges or between dots) + // When backticks are wrapping a field name (i.e. withing edges or between dots) // Then build a path with steps separated by dots and including non-wrapping backticks assertParseV2("foo.``bar.baz`", "foo", "`bar.baz"); assertParseV2("foo.`bar.baz``", "foo", "bar.baz`"); @@ -105,7 +105,7 @@ void shouldBuildV2AndIgnoreBackticksThatAreNotWrapping() { @Test void shouldBuildV2AndEscapeBackticks() { // Given v2 and fields including dots and backticks - // When backticks are wrapping a field name (i.e. within edges or between dots) + // When backticks are wrapping a field name (i.e. withing edges or between dots) // and wrapping backticks that are part of the field name are escaped with backslashes // Then build a path with steps separated by dots and including escaped and non-wrapping backticks assertParseV2("foo.`bar\\`.baz`", "foo", "bar`.baz"); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersionTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersionTest.java index a0c2e2c486171..d400141c95b71 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersionTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/field/FieldSyntaxVersionTest.java @@ -35,18 +35,18 @@ public class FieldSyntaxVersionTest { @Test void shouldAppendConfigToDef() { ConfigDef def = FieldSyntaxVersion.appendConfigTo(new ConfigDef()); - assertEquals(1, def.configKeys().size()); + assertEquals(def.configKeys().size(), 1); final ConfigDef.ConfigKey configKey = def.configKeys().get("field.syntax.version"); - assertEquals("field.syntax.version", configKey.name); - assertEquals("V1", configKey.defaultValue); + assertEquals(configKey.name, "field.syntax.version"); + assertEquals(configKey.defaultValue, "V1"); } @Test void shouldFailWhenAppendConfigToDefAgain() { ConfigDef def = FieldSyntaxVersion.appendConfigTo(new ConfigDef()); - assertEquals(1, def.configKeys().size()); + assertEquals(def.configKeys().size(), 1); ConfigException e = assertThrows(ConfigException.class, () -> FieldSyntaxVersion.appendConfigTo(def)); - assertEquals("Configuration field.syntax.version is defined twice.", e.getMessage()); + assertEquals(e.getMessage(), "Configuration field.syntax.version is defined twice."); } @ParameterizedTest diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKeyTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKeyTest.java index 39654859edc28..e3e3920858d27 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKeyTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/HasHeaderKeyTest.java @@ -26,11 +26,13 @@ import org.junit.jupiter.api.Test; import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.stream.Collectors; +import static java.util.Collections.singletonList; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -56,16 +58,16 @@ public void testNameMayNotBeEmptyInConfig() { @Test public void testConfig() { HasHeaderKey predicate = new HasHeaderKey<>(); - predicate.config().validate(Map.of("name", "foo")); + predicate.config().validate(Collections.singletonMap("name", "foo")); - List configs = predicate.config().validate(Map.of("name", "")); - assertEquals(List.of("Invalid value for configuration name: String must be non-empty"), configs.get(0).errorMessages()); + List configs = predicate.config().validate(Collections.singletonMap("name", "")); + assertEquals(singletonList("Invalid value for configuration name: String must be non-empty"), configs.get(0).errorMessages()); } @Test public void testTest() { HasHeaderKey predicate = new HasHeaderKey<>(); - predicate.configure(Map.of("name", "foo")); + predicate.configure(Collections.singletonMap("name", "foo")); assertTrue(predicate.test(recordWithHeaders("foo"))); assertTrue(predicate.test(recordWithHeaders("foo", "bar"))); @@ -86,7 +88,18 @@ private SourceRecord recordWithHeaders(String... headers) { Arrays.stream(headers).map(TestHeader::new).collect(Collectors.toList())); } - private record TestHeader(String key) implements Header { + private static class TestHeader implements Header { + + private final String key; + + public TestHeader(String key) { + this.key = key; + } + + @Override + public String key() { + return key; + } @Override public Schema schema() { diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/TopicNameMatchesTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/TopicNameMatchesTest.java index 140d0d6c30f6f..3d9ac4dba9048 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/TopicNameMatchesTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/predicates/TopicNameMatchesTest.java @@ -23,6 +23,7 @@ import org.junit.jupiter.api.Test; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -60,9 +61,9 @@ public void testPatternIsValidRegexInConfig() { @Test public void testConfig() { TopicNameMatches predicate = new TopicNameMatches<>(); - predicate.config().validate(Map.of("pattern", "my-prefix-.*")); + predicate.config().validate(Collections.singletonMap("pattern", "my-prefix-.*")); - List configs = predicate.config().validate(Map.of("pattern", "*")); + List configs = predicate.config().validate(Collections.singletonMap("pattern", "*")); List errorMsgs = configs.get(0).errorMessages(); assertEquals(1, errorMsgs.size()); assertTrue(errorMsgs.get(0).contains("Invalid regex")); @@ -71,7 +72,7 @@ public void testConfig() { @Test public void testTest() { TopicNameMatches predicate = new TopicNameMatches<>(); - predicate.configure(Map.of("pattern", "my-prefix-.*")); + predicate.configure(Collections.singletonMap("pattern", "my-prefix-.*")); assertTrue(predicate.test(recordWithTopicName("my-prefix-"))); assertTrue(predicate.test(recordWithTopicName("my-prefix-foo"))); diff --git a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/util/NonEmptyListValidatorTest.java b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/util/NonEmptyListValidatorTest.java index 3a9ef48f8dddb..5060346a2d91b 100644 --- a/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/util/NonEmptyListValidatorTest.java +++ b/connect/transforms/src/test/java/org/apache/kafka/connect/transforms/util/NonEmptyListValidatorTest.java @@ -20,7 +20,7 @@ import org.junit.jupiter.api.Test; -import java.util.List; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -34,11 +34,11 @@ public void testNullList() { @Test public void testEmptyList() { assertThrows(ConfigException.class, - () -> new NonEmptyListValidator().ensureValid("foo", List.of())); + () -> new NonEmptyListValidator().ensureValid("foo", Collections.emptyList())); } @Test public void testValidList() { - new NonEmptyListValidator().ensureValid("foo", List.of("foo")); + new NonEmptyListValidator().ensureValid("foo", Collections.singletonList("foo")); } } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java index bcd8fc795fb89..f9a417b0e8607 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImpl.java @@ -31,7 +31,18 @@ import java.util.concurrent.RejectedExecutionException; public class CoordinatorExecutorImpl, U> implements CoordinatorExecutor { - private record TaskResult(R result, Throwable exception) { } + private static class TaskResult { + final R result; + final Throwable exception; + + TaskResult( + R result, + Throwable exception + ) { + this.result = result; + this.exception = exception; + } + } private final Logger log; private final TopicPartition shard; diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoader.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoader.java index b268e22164c11..4f739082d67ec 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoader.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorLoader.java @@ -32,7 +32,51 @@ public interface CoordinatorLoader extends AutoCloseable { * Object that is returned as part of the future from load(). Holds the partition load time and the * end time. */ - record LoadSummary(long startTimeMs, long endTimeMs, long schedulerQueueTimeMs, long numRecords, long numBytes) { } + class LoadSummary { + private final long startTimeMs; + private final long endTimeMs; + private final long schedulerQueueTimeMs; + private final long numRecords; + private final long numBytes; + + public LoadSummary(long startTimeMs, long endTimeMs, long schedulerQueueTimeMs, long numRecords, long numBytes) { + this.startTimeMs = startTimeMs; + this.endTimeMs = endTimeMs; + this.schedulerQueueTimeMs = schedulerQueueTimeMs; + this.numRecords = numRecords; + this.numBytes = numBytes; + } + + public long startTimeMs() { + return startTimeMs; + } + + public long endTimeMs() { + return endTimeMs; + } + + public long schedulerQueueTimeMs() { + return schedulerQueueTimeMs; + } + + public long numRecords() { + return numRecords; + } + + public long numBytes() { + return numBytes; + } + + @Override + public String toString() { + return "LoadSummary(" + + "startTimeMs=" + startTimeMs + + ", endTimeMs=" + endTimeMs + + ", schedulerQueueTimeMs=" + schedulerQueueTimeMs + + ", numRecords=" + numRecords + + ", numBytes=" + numBytes + ")"; + } + } /** * Loads the coordinator by reading all the records from the TopicPartition diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java index 45d9d37348774..10089a7145928 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorMetricsShard.java @@ -21,7 +21,7 @@ /** * A CoordinatorMetricsShard is mapped to a single CoordinatorShard. The metrics shard records sensors that have been * defined in {@link CoordinatorMetrics}. Coordinator specific gauges and related methods are exposed in the - * implementation of CoordinatorMetricsShard (such as GroupCoordinatorMetricsShard and ShareCoordinatorMetricsShard). + * implementation of CoordinatorMetricsShard (i.e. {@link GroupCoordinatorMetricsShard}). * * For sensors, each shard individually records the observed values. */ diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorOperationExceptionHelper.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorOperationExceptionHelper.java index 04d7da690f575..0c70397874a48 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorOperationExceptionHelper.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorOperationExceptionHelper.java @@ -48,25 +48,36 @@ public static OUT handleOperationException( ) { ApiError apiError = ApiError.fromThrowable(exception); - return switch (apiError.error()) { - case UNKNOWN_SERVER_ERROR -> { + switch (apiError.error()) { + case UNKNOWN_SERVER_ERROR: log.error("Operation {} with {} hit an unexpected exception: {}.", - operationName, operationInput, exception.getMessage(), exception); - yield handler.apply(Errors.UNKNOWN_SERVER_ERROR, null); - } - case NETWORK_EXCEPTION -> + operationName, operationInput, exception.getMessage(), exception); + return handler.apply(Errors.UNKNOWN_SERVER_ERROR, null); + + case NETWORK_EXCEPTION: // When committing offsets transactionally, we now verify the transaction with the // transaction coordinator. Verification can fail with `NETWORK_EXCEPTION`, a // retriable error which older clients may not expect and retry correctly. We // translate the error to `COORDINATOR_LOAD_IN_PROGRESS` because it causes clients // to retry the request without an unnecessary coordinator lookup. - handler.apply(Errors.COORDINATOR_LOAD_IN_PROGRESS, null); - case UNKNOWN_TOPIC_OR_PARTITION, NOT_ENOUGH_REPLICAS, REQUEST_TIMED_OUT -> - handler.apply(Errors.COORDINATOR_NOT_AVAILABLE, null); - case NOT_LEADER_OR_FOLLOWER, KAFKA_STORAGE_ERROR -> handler.apply(Errors.NOT_COORDINATOR, null); - case MESSAGE_TOO_LARGE, RECORD_LIST_TOO_LARGE, INVALID_FETCH_SIZE -> - handler.apply(Errors.UNKNOWN_SERVER_ERROR, null); - default -> handler.apply(apiError.error(), apiError.message()); - }; + return handler.apply(Errors.COORDINATOR_LOAD_IN_PROGRESS, null); + + case UNKNOWN_TOPIC_OR_PARTITION: + case NOT_ENOUGH_REPLICAS: + case REQUEST_TIMED_OUT: + return handler.apply(Errors.COORDINATOR_NOT_AVAILABLE, null); + + case NOT_LEADER_OR_FOLLOWER: + case KAFKA_STORAGE_ERROR: + return handler.apply(Errors.NOT_COORDINATOR, null); + + case MESSAGE_TOO_LARGE: + case RECORD_LIST_TOO_LARGE: + case INVALID_FETCH_SIZE: + return handler.apply(Errors.UNKNOWN_SERVER_ERROR, null); + + default: + return handler.apply(apiError.error(), apiError.message()); + } } } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorPlayback.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorPlayback.java index f4f22b0e36341..79d2483078809 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorPlayback.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorPlayback.java @@ -62,12 +62,12 @@ void replayEndTransactionMarker( * * @param offset the offset of the last record in the batch plus one. */ - void updateLastWrittenOffset(long offset); + void updateLastWrittenOffset(Long offset); /** * Called when the high watermark advances. * * @param offset The offset of the new high watermark. */ - void updateLastCommittedOffset(long offset); + void updateLastCommittedOffset(Long offset); } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecord.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecord.java index b98a8ccc81620..fd13ce2ef3acb 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecord.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecord.java @@ -16,37 +16,22 @@ */ package org.apache.kafka.coordinator.common.runtime; -import org.apache.kafka.common.protocol.ApiMessage; import org.apache.kafka.server.common.ApiMessageAndVersion; import java.util.Objects; /** - * A Record which contains an {{@link ApiMessage}} as key and + * A Record which contains an {{@link ApiMessageAndVersion}} as key and * an {{@link ApiMessageAndVersion}} as value. The value could be null to * represent a tombstone. * * This class is immutable. */ public class CoordinatorRecord { - - public static CoordinatorRecord record( - ApiMessage key, - ApiMessageAndVersion value - ) { - return new CoordinatorRecord(key, value); - } - - public static CoordinatorRecord tombstone( - ApiMessage key - ) { - return new CoordinatorRecord(key, null); - } - /** * The key of the record. */ - private final ApiMessage key; + private final ApiMessageAndVersion key; /** * The value of the record or null if the record is @@ -60,30 +45,18 @@ public static CoordinatorRecord tombstone( * @param key A non-null key. * @param value A key or null. */ - private CoordinatorRecord( - ApiMessage key, + public CoordinatorRecord( + ApiMessageAndVersion key, ApiMessageAndVersion value ) { this.key = Objects.requireNonNull(key); - if (key.apiKey() < 0) { - throw new IllegalArgumentException("The key must have a type."); - } - this.value = value; - if (value != null) { - if (value.message().apiKey() < 0) { - throw new IllegalArgumentException("The value must have a type."); - } - if (value.message().apiKey() != key.apiKey()) { - throw new IllegalArgumentException("The key and the value must have the same type."); - } - } } /** * @return The key. */ - public ApiMessage key() { + public ApiMessageAndVersion key() { return this.key; } @@ -98,13 +71,18 @@ public ApiMessageAndVersion value() { public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; - CoordinatorRecord that = (CoordinatorRecord) o; - return Objects.equals(key, that.key) && Objects.equals(value, that.value); + + CoordinatorRecord record = (CoordinatorRecord) o; + + if (!Objects.equals(key, record.key)) return false; + return Objects.equals(value, record.value); } @Override public int hashCode() { - return Objects.hash(key, value); + int result = key.hashCode(); + result = 31 * result + (value != null ? value.hashCode() : 0); + return result; } @Override diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java index a8b77eb299df0..28ed8962baa33 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordSerde.java @@ -43,8 +43,9 @@ public abstract class CoordinatorRecordSerde implements Serializer lingerTimeoutTask; @@ -509,7 +511,6 @@ private static class CoordinatorBatch { long nextOffset; CoordinatorBatch( - Logger log, long baseOffset, long appendTimeMs, int maxBatchSize, @@ -526,7 +527,7 @@ private static class CoordinatorBatch { this.buffer = buffer; this.builder = builder; this.lingerTimeoutTask = lingerTimeoutTask; - this.deferredEvents = new DeferredEventCollection(log); + this.deferredEvents = new DeferredEventCollection(); } } @@ -757,14 +758,8 @@ private void freeCurrentBatch() { // Cancel the linger timeout. currentBatch.lingerTimeoutTask.ifPresent(TimerTask::cancel); - // Release the buffer only if it is not larger than the maxBatchSize. - int maxBatchSize = partitionWriter.config(tp).maxMessageSize(); - - if (currentBatch.builder.buffer().capacity() <= maxBatchSize) { - bufferSupplier.release(currentBatch.builder.buffer()); - } else if (currentBatch.buffer.capacity() <= maxBatchSize) { - bufferSupplier.release(currentBatch.buffer); - } + // Release the buffer. + bufferSupplier.release(currentBatch.buffer); currentBatch = null; } @@ -864,7 +859,7 @@ private void maybeAllocateNewBatch( LogConfig logConfig = partitionWriter.config(tp); int maxBatchSize = logConfig.maxMessageSize(); long prevLastWrittenOffset = coordinator.lastWrittenOffset(); - ByteBuffer buffer = bufferSupplier.get(min(INITIAL_BUFFER_SIZE, maxBatchSize)); + ByteBuffer buffer = bufferSupplier.get(maxBatchSize); MemoryRecordsBuilder builder = new MemoryRecordsBuilder( buffer, @@ -899,7 +894,6 @@ public void run() { } currentBatch = new CoordinatorBatch( - log, prevLastWrittenOffset, currentTimeMs, maxBatchSize, @@ -946,7 +940,7 @@ private void append( currentBatch.deferredEvents.add(event); } else { if (coordinator.lastCommittedOffset() < coordinator.lastWrittenOffset()) { - deferredEventQueue.add(coordinator.lastWrittenOffset(), DeferredEventCollection.of(log, event)); + deferredEventQueue.add(coordinator.lastWrittenOffset(), event); } else { event.complete(null); } @@ -1133,7 +1127,7 @@ private void completeTransaction( runtimeMetrics.recordFlushTime(time.milliseconds() - flushStartMs); coordinator.updateLastWrittenOffset(offset); - deferredEventQueue.add(offset, DeferredEventCollection.of(log, event)); + deferredEventQueue.add(offset, event); } catch (Throwable t) { coordinator.revertLastWrittenOffset(prevLastWrittenOffset); event.complete(t); @@ -1167,21 +1161,9 @@ public void run() { * A collection of {@link DeferredEvent}. When completed, completes all the events in the collection * and logs any exceptions thrown. */ - static class DeferredEventCollection implements DeferredEvent { - /** - * The logger. - */ - private final Logger log; - - /** - * The list of events. - */ + class DeferredEventCollection implements DeferredEvent { private final List events = new ArrayList<>(); - public DeferredEventCollection(Logger log) { - this.log = log; - } - @Override public void complete(Throwable t) { for (DeferredEvent event : events) { @@ -1205,14 +1187,6 @@ public int size() { public String toString() { return "DeferredEventCollection(events=" + events + ")"; } - - public static DeferredEventCollection of(Logger log, DeferredEvent... deferredEvents) { - DeferredEventCollection collection = new DeferredEventCollection(log); - for (DeferredEvent deferredEvent : deferredEvents) { - collection.add(deferredEvent); - } - return collection; - } } /** @@ -1914,9 +1888,9 @@ public void onHighWatermarkUpdated( } /** - * 512KB. Used for initial buffer size for write operations. + * 16KB. Used for initial buffer size for write operations. */ - static final int INITIAL_BUFFER_SIZE = 512 * 1024; + static final int MIN_BUFFER_SIZE = 16384; /** * The log prefix. @@ -2014,7 +1988,7 @@ public void onHighWatermarkUpdated( /** * The latest known metadata image. */ - private volatile CoordinatorMetadataImage metadataImage = CoordinatorMetadataImage.EMPTY; + private volatile MetadataImage metadataImage = MetadataImage.EMPTY; /** * Constructor. @@ -2241,7 +2215,7 @@ public CompletableFuture scheduleTransactionalWriteOperation( short producerEpoch, Duration timeout, CoordinatorWriteOperation op, - int apiVersion + Short apiVersion ) { throwIfNotRunning(); log.debug("Scheduled execution of transactional write operation {}.", name); @@ -2479,18 +2453,18 @@ public void scheduleUnloadOperation( * @param delta The metadata delta. */ public void onNewMetadataImage( - CoordinatorMetadataImage newImage, - CoordinatorMetadataDelta delta + MetadataImage newImage, + MetadataDelta delta ) { throwIfNotRunning(); - log.debug("Scheduling applying of a new metadata image with version {}.", newImage.version()); + log.debug("Scheduling applying of a new metadata image with offset {}.", newImage.offset()); // Update global image. metadataImage = newImage; // Push an event for each coordinator. coordinators.keySet().forEach(tp -> { - scheduleInternalOperation("UpdateImage(tp=" + tp + ", version=" + newImage.version() + ")", tp, () -> { + scheduleInternalOperation("UpdateImage(tp=" + tp + ", offset=" + newImage.offset() + ")", tp, () -> { CoordinatorContext context = coordinators.get(tp); if (context != null) { context.lock.lock(); @@ -2498,18 +2472,18 @@ public void onNewMetadataImage( if (context.state == CoordinatorState.ACTIVE) { // The new image can be applied to the coordinator only if the coordinator // exists and is in the active state. - log.debug("Applying new metadata image with version {} to {}.", newImage.version(), tp); + log.debug("Applying new metadata image with offset {} to {}.", newImage.offset(), tp); context.coordinator.onNewMetadataImage(newImage, delta); } else { - log.debug("Ignored new metadata image with version {} for {} because the coordinator is not active.", - newImage.version(), tp); + log.debug("Ignored new metadata image with offset {} for {} because the coordinator is not active.", + newImage.offset(), tp); } } finally { context.lock.unlock(); } } else { - log.debug("Ignored new metadata image with version {} for {} because the coordinator does not exist.", - newImage.version(), tp); + log.debug("Ignored new metadata image with offset {} for {} because the coordinator does not exist.", + newImage.offset(), tp); } }); }); @@ -2560,7 +2534,7 @@ public void close() throws Exception { */ public List activeTopicPartitions() { if (coordinators == null || coordinators.isEmpty()) { - return List.of(); + return Collections.emptyList(); } return coordinators.entrySet().stream() diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java index af775c7c45118..a95f590c5b26b 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImpl.java @@ -149,7 +149,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { metrics.addMetric(numPartitionsActive, (Gauge) (config, now) -> numPartitionsActiveCounter.get()); metrics.addMetric(numPartitionsFailed, (Gauge) (config, now) -> numPartitionsFailedCounter.get()); - this.partitionLoadSensor = metrics.sensor(this.metricsGroup + "-PartitionLoadTime"); + this.partitionLoadSensor = metrics.sensor("GroupPartitionLoadTime"); this.partitionLoadSensor.add( metrics.metricName( "partition-load-time-max", @@ -163,7 +163,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The average time it took to load the partitions in the last 30 sec." ), new Avg()); - this.threadIdleSensor = metrics.sensor(this.metricsGroup + "-ThreadIdleRatio"); + this.threadIdleSensor = metrics.sensor("ThreadIdleRatio"); this.threadIdleSensor.add( metrics.metricName( "thread-idle-ratio-avg", @@ -178,7 +178,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The " + suffix + " event queue time in milliseconds" ) ); - this.eventQueueTimeSensor = metrics.sensor(this.metricsGroup + "-EventQueueTime"); + this.eventQueueTimeSensor = metrics.sensor("EventQueueTime"); this.eventQueueTimeSensor.add(eventQueueTimeHistogram); KafkaMetricHistogram eventProcessingTimeHistogram = KafkaMetricHistogram.newLatencyHistogram( @@ -187,7 +187,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The " + suffix + " event processing time in milliseconds" ) ); - this.eventProcessingTimeSensor = metrics.sensor(this.metricsGroup + "-EventProcessingTime"); + this.eventProcessingTimeSensor = metrics.sensor("EventProcessingTime"); this.eventProcessingTimeSensor.add(eventProcessingTimeHistogram); KafkaMetricHistogram eventPurgatoryTimeHistogram = KafkaMetricHistogram.newLatencyHistogram( @@ -196,7 +196,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The " + suffix + " event purgatory time in milliseconds" ) ); - this.eventPurgatoryTimeSensor = metrics.sensor(this.metricsGroup + "-EventPurgatoryTime"); + this.eventPurgatoryTimeSensor = metrics.sensor("EventPurgatoryTime"); this.eventPurgatoryTimeSensor.add(eventPurgatoryTimeHistogram); KafkaMetricHistogram flushTimeHistogram = KafkaMetricHistogram.newLatencyHistogram( @@ -205,7 +205,7 @@ public CoordinatorRuntimeMetricsImpl(Metrics metrics, String metricsGroup) { "The " + suffix + " flush time in milliseconds" ) ); - this.flushTimeSensor = metrics.sensor(this.metricsGroup + "-FlushTime"); + this.flushTimeSensor = metrics.sensor("FlushTime"); this.flushTimeSensor.add(flushTimeHistogram); } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShard.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShard.java index 7734b12751525..6b0f40ddf3323 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShard.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/CoordinatorShard.java @@ -17,6 +17,8 @@ package org.apache.kafka.coordinator.common.runtime; import org.apache.kafka.common.requests.TransactionResult; +import org.apache.kafka.image.MetadataDelta; +import org.apache.kafka.image.MetadataImage; /** * CoordinatorShard is basically a replicated state machine managed by the @@ -30,16 +32,16 @@ public interface CoordinatorShard { * * @param newImage The metadata image. */ - default void onLoaded(CoordinatorMetadataImage newImage) {} + default void onLoaded(MetadataImage newImage) {} /** - * A new metadata image is available. This is only called after {@link CoordinatorShard#onLoaded(CoordinatorMetadataImage)} + * A new metadata image is available. This is only called after {@link CoordinatorShard#onLoaded(MetadataImage)} * is called to signal that the coordinator has been fully loaded. * * @param newImage The new metadata image. * @param delta The delta image. */ - default void onNewMetadataImage(CoordinatorMetadataImage newImage, CoordinatorMetadataDelta delta) {} + default void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) {} /** * The coordinator has been unloaded. This is used to apply diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/EventAccumulator.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/EventAccumulator.java index 985fb48834430..e6386e35f9d05 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/EventAccumulator.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/EventAccumulator.java @@ -32,8 +32,8 @@ import java.util.concurrent.locks.ReentrantLock; /** - * A concurrent event accumulator which groups events per key and ensures that only one - * event with a given key can be processed concurrently. + * A concurrent event accumulator which group events per key and ensure that only one + * event with a given key can't be processed concurrently. * * This class is threadsafe. * @@ -90,7 +90,7 @@ public interface Event { private int size; /** - * A boolean indicating whether the accumulator is closed. + * A boolean indicated whether the accumulator is closed. */ private boolean closed; @@ -174,7 +174,7 @@ public T poll() { /** * Returns the next {{@link Event}} available. This method blocks for the provided - * time and returns null if no event is available. + * time and returns null of no event is available. * * @param timeout The timeout. * @param unit The timeout unit. diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/HdrHistogram.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/HdrHistogram.java index 5cda3fd44c26a..ac618430e930c 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/HdrHistogram.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/HdrHistogram.java @@ -133,6 +133,15 @@ public double measurePercentile(long now, double percentile) { * A simple tuple of a timestamp and a value. Can be used updating a value and recording the * timestamp of the update in a single atomic operation. */ - private record Timestamped(long timestamp, T value) { } + private static final class Timestamped { + + private final long timestamp; + private final T value; + + private Timestamped(long timestamp, T value) { + this.timestamp = timestamp; + this.value = value; + } + } } diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java index cc76cfd64605b..cb8bec3f71c94 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/PartitionWriter.java @@ -105,7 +105,7 @@ CompletableFuture maybeStartTransactionVerification( String transactionalId, long producerId, short producerEpoch, - int apiVersion + short apiVersion ) throws KafkaException; /** diff --git a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinator.java b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinator.java index 278373e6842f3..3aa622cc98b08 100644 --- a/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinator.java +++ b/coordinator-common/src/main/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinator.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.requests.TransactionResult; import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; import org.apache.kafka.timeline.SnapshotRegistry; @@ -61,7 +62,7 @@ public class SnapshottableCoordinator, U> implemen */ private long lastCommittedOffset; - public SnapshottableCoordinator( + SnapshottableCoordinator( LogContext logContext, SnapshotRegistry snapshotRegistry, S coordinator, @@ -137,7 +138,7 @@ public synchronized void replayEndTransactionMarker( * @param offset The new last written offset. */ @Override - public synchronized void updateLastWrittenOffset(long offset) { + public synchronized void updateLastWrittenOffset(Long offset) { if (offset <= lastWrittenOffset) { throw new IllegalStateException("New last written offset " + offset + " of " + tp + " must be greater than " + lastWrittenOffset + "."); @@ -156,7 +157,7 @@ public synchronized void updateLastWrittenOffset(long offset) { * @param offset The new last committed offset. */ @Override - public synchronized void updateLastCommittedOffset(long offset) { + public synchronized void updateLastCommittedOffset(Long offset) { if (offset < lastCommittedOffset) { throw new IllegalStateException("New committed offset " + offset + " of " + tp + " must be greater than or equal to " + lastCommittedOffset + "."); @@ -178,7 +179,7 @@ public synchronized void updateLastCommittedOffset(long offset) { * * @param newImage The metadata image. */ - synchronized void onLoaded(CoordinatorMetadataImage newImage) { + synchronized void onLoaded(MetadataImage newImage) { this.coordinator.onLoaded(newImage); } @@ -206,7 +207,7 @@ synchronized long lastWrittenOffset() { * @param newImage The new metadata image. * @param delta The delta image. */ - synchronized void onNewMetadataImage(CoordinatorMetadataImage newImage, CoordinatorMetadataDelta delta) { + synchronized void onNewMetadataImage(MetadataImage newImage, MetadataDelta delta) { this.coordinator.onNewMetadataImage(newImage, delta); } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImplTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImplTest.java index 4f5e917f1795b..d5ac1be7820d6 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImplTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorExecutorImplTest.java @@ -23,7 +23,7 @@ import org.junit.jupiter.api.Test; import java.time.Duration; -import java.util.List; +import java.util.Collections; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutorService; import java.util.concurrent.RejectedExecutionException; @@ -71,7 +71,7 @@ public void testTaskSuccessfulLifecycle() { CoordinatorRuntime.CoordinatorWriteOperation, Void, String> op = args.getArgument(3); assertEquals( - new CoordinatorResult<>(List.of("record"), null), + new CoordinatorResult<>(Collections.singletonList("record"), null), op.generateRecordsAndResult(coordinatorShard) ); return CompletableFuture.completedFuture(null); @@ -95,7 +95,7 @@ public void testTaskSuccessfulLifecycle() { operationCalled.set(true); assertEquals("Hello!", result); assertNull(exception); - return new CoordinatorResult<>(List.of("record"), null); + return new CoordinatorResult<>(Collections.singletonList("record"), null); }; executor.schedule( @@ -130,7 +130,7 @@ public void testTaskFailedLifecycle() { CoordinatorRuntime.CoordinatorWriteOperation, Void, String> op = args.getArgument(3); assertEquals( - new CoordinatorResult<>(List.of(), null), + new CoordinatorResult<>(Collections.emptyList(), null), op.generateRecordsAndResult(coordinatorShard) ); return CompletableFuture.completedFuture(null); @@ -154,7 +154,7 @@ public void testTaskFailedLifecycle() { assertNull(result); assertNotNull(exception); assertEquals("Oh no!", exception.getMessage()); - return new CoordinatorResult<>(List.of(), null); + return new CoordinatorResult<>(Collections.emptyList(), null); }; executor.schedule( @@ -301,7 +301,7 @@ public void testTaskSchedulingWriteOperationFailed() { AtomicBoolean operationCalled = new AtomicBoolean(false); CoordinatorExecutor.TaskOperation taskOperation = (result, exception) -> { operationCalled.set(true); - return new CoordinatorResult<>(List.of(), null); + return new CoordinatorResult<>(Collections.emptyList(), null); }; executor.schedule( diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordTest.java index 1b2754c07e56f..6b2827b3b018b 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRecordTest.java @@ -29,32 +29,32 @@ public class CoordinatorRecordTest { @Test public void testAttributes() { - ApiMessage key = mock(ApiMessage.class); + ApiMessageAndVersion key = new ApiMessageAndVersion(mock(ApiMessage.class), (short) 0); ApiMessageAndVersion value = new ApiMessageAndVersion(mock(ApiMessage.class), (short) 0); - CoordinatorRecord record = CoordinatorRecord.record(key, value); + CoordinatorRecord record = new CoordinatorRecord(key, value); assertEquals(key, record.key()); assertEquals(value, record.value()); } @Test public void testKeyCannotBeNull() { - assertThrows(NullPointerException.class, () -> CoordinatorRecord.record(null, null)); + assertThrows(NullPointerException.class, () -> new CoordinatorRecord(null, null)); } @Test public void testValueCanBeNull() { - ApiMessage key = mock(ApiMessage.class); - CoordinatorRecord record = CoordinatorRecord.record(key, null); + ApiMessageAndVersion key = new ApiMessageAndVersion(mock(ApiMessage.class), (short) 0); + CoordinatorRecord record = new CoordinatorRecord(key, null); assertEquals(key, record.key()); assertNull(record.value()); } @Test public void testEquals() { - ApiMessage key = mock(ApiMessage.class); + ApiMessageAndVersion key = new ApiMessageAndVersion(mock(ApiMessage.class), (short) 0); ApiMessageAndVersion value = new ApiMessageAndVersion(mock(ApiMessage.class), (short) 0); - CoordinatorRecord record1 = CoordinatorRecord.record(key, value); - CoordinatorRecord record2 = CoordinatorRecord.record(key, value); + CoordinatorRecord record1 = new CoordinatorRecord(key, value); + CoordinatorRecord record2 = new CoordinatorRecord(key, value); assertEquals(record1, record2); } } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorResultTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorResultTest.java index 263f14859ff06..8d050cb1e0780 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorResultTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorResultTest.java @@ -18,7 +18,7 @@ import org.junit.jupiter.api.Test; -import java.util.List; +import java.util.Collections; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; @@ -26,8 +26,8 @@ public class CoordinatorResultTest { @Test public void testAttributes() { - CoordinatorResult result = new CoordinatorResult<>(List.of(), "response"); - assertEquals(List.of(), result.records()); + CoordinatorResult result = new CoordinatorResult<>(Collections.emptyList(), "response"); + assertEquals(Collections.emptyList(), result.records()); assertEquals("response", result.response()); } @@ -38,8 +38,8 @@ public void testRecordsCannotBeNull() { @Test public void testEquals() { - CoordinatorResult result1 = new CoordinatorResult<>(List.of(), "response"); - CoordinatorResult result2 = new CoordinatorResult<>(List.of(), "response"); + CoordinatorResult result1 = new CoordinatorResult<>(Collections.emptyList(), "response"); + CoordinatorResult result2 = new CoordinatorResult<>(Collections.emptyList(), "response"); assertEquals(result1, result2); } } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java index 68f152f2bea08..ed6d269763450 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeMetricsImplTest.java @@ -27,8 +27,8 @@ import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.ValueSource; -import java.util.List; -import java.util.Set; +import java.util.Arrays; +import java.util.HashSet; import java.util.stream.IntStream; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeMetricsImpl.BATCH_FLUSH_TIME_METRIC_NAME; @@ -39,19 +39,17 @@ import static org.apache.kafka.coordinator.common.runtime.KafkaMetricHistogram.MAX_LATENCY_MS; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertTrue; public class CoordinatorRuntimeMetricsImplTest { private static final String METRICS_GROUP = "test-runtime-metrics"; - private static final String OTHER_METRICS_GROUP = "test-runtime-metrics-2"; - + @Test public void testMetricNames() { Metrics metrics = new Metrics(); - Set expectedMetrics = Set.of( + HashSet expectedMetrics = new HashSet<>(Arrays.asList( kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "loading"), kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "active"), kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", "failed"), @@ -79,7 +77,7 @@ public void testMetricNames() { kafkaMetricName(metrics, "batch-flush-time-ms-p95"), kafkaMetricName(metrics, "batch-flush-time-ms-p99"), kafkaMetricName(metrics, "batch-flush-time-ms-p999") - ); + )); try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP)) { runtimeMetrics.registerEventQueueSizeGauge(() -> 0); @@ -112,26 +110,6 @@ public void testUpdateNumPartitionsMetrics() { } } - @Test - public void testNumPartitionsMetricsGroupIsolation() { - Metrics metrics = new Metrics(); - - try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); - CoordinatorRuntimeMetricsImpl runtimeMetrics2 = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { - IntStream.range(0, 3) - .forEach(__ -> runtimeMetrics.recordPartitionStateChange(CoordinatorState.INITIAL, CoordinatorState.LOADING)); - IntStream.range(0, 2) - .forEach(__ -> runtimeMetrics.recordPartitionStateChange(CoordinatorState.LOADING, CoordinatorState.ACTIVE)); - IntStream.range(0, 1) - .forEach(__ -> runtimeMetrics.recordPartitionStateChange(CoordinatorState.ACTIVE, CoordinatorState.FAILED)); - - for (String state : List.of("loading", "active", "failed")) { - assertMetricGauge(metrics, kafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", state), 1); - assertMetricGauge(metrics, otherGroupKafkaMetricName(metrics, NUM_PARTITIONS_METRIC_NAME, "state", state), 0); - } - } - } - @Test public void testPartitionLoadSensorMetrics() { Time time = new MockTime(); @@ -153,29 +131,6 @@ public void testPartitionLoadSensorMetrics() { } } - @ParameterizedTest - @ValueSource(strings = { - "partition-load-time-avg", - "partition-load-time-max" - }) - public void testPartitionLoadSensorMetricsGroupIsolation(String name) { - Time time = new MockTime(); - Metrics metrics = new Metrics(time); - - try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); - CoordinatorRuntimeMetricsImpl runtimeMetrics2 = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { - long startTimeMs = time.milliseconds(); - runtimeMetrics.recordPartitionLoadSensor(startTimeMs, startTimeMs + 1000); - - org.apache.kafka.common.MetricName metricName = kafkaMetricName(metrics, name); - org.apache.kafka.common.MetricName otherGroupMetricName = otherGroupKafkaMetricName(metrics, name); - KafkaMetric metric = metrics.metrics().get(metricName); - KafkaMetric otherMetric = metrics.metrics().get(otherGroupMetricName); - assertNotEquals(Double.NaN, metric.metricValue()); - assertEquals(Double.NaN, otherMetric.metricValue()); - } - } - @Test public void testThreadIdleSensor() { Time time = new MockTime(); @@ -189,22 +144,6 @@ public void testThreadIdleSensor() { assertEquals(6 / 30.0, metric.metricValue()); // 'total_ms / window_ms' } - @Test - public void testThreadIdleSensorMetricsGroupIsolation() { - Time time = new MockTime(); - Metrics metrics = new Metrics(time); - - try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); - CoordinatorRuntimeMetricsImpl runtimeMetrics2 = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { - runtimeMetrics.recordThreadIdleTime(1000.0); - - org.apache.kafka.common.MetricName metricName = kafkaMetricName(metrics, "thread-idle-ratio-avg"); - org.apache.kafka.common.MetricName otherGroupMetricName = otherGroupKafkaMetricName(metrics, "thread-idle-ratio-avg"); - assertNotEquals(0.0, metrics.metrics().get(metricName).metricValue()); - assertEquals(0.0, metrics.metrics().get(otherGroupMetricName).metricValue()); - } - } - @Test public void testEventQueueSize() { Time time = new MockTime(); @@ -216,21 +155,6 @@ public void testEventQueueSize() { } } - @Test - public void testEventQueueSizeMetricsGroupIsolation() { - Time time = new MockTime(); - Metrics metrics = new Metrics(time); - - try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); - CoordinatorRuntimeMetricsImpl otherRuntimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { - runtimeMetrics.registerEventQueueSizeGauge(() -> 5); - otherRuntimeMetrics.registerEventQueueSizeGauge(() -> 0); - - assertMetricGauge(metrics, kafkaMetricName(metrics, "event-queue-size"), 5); - assertMetricGauge(metrics, otherGroupKafkaMetricName(metrics, "event-queue-size"), 0); - } - } - @ParameterizedTest @ValueSource(strings = { EVENT_QUEUE_TIME_METRIC_NAME, @@ -281,45 +205,6 @@ public void testHistogramMetrics(String metricNamePrefix) { assertEquals(999.0, metric.metricValue()); } - @ParameterizedTest - @ValueSource(strings = { - EVENT_QUEUE_TIME_METRIC_NAME, - EVENT_PROCESSING_TIME_METRIC_NAME, - EVENT_PURGATORY_TIME_METRIC_NAME, - BATCH_FLUSH_TIME_METRIC_NAME - }) - public void testHistogramMetricsGroupIsolation(String metricNamePrefix) { - Time time = new MockTime(); - Metrics metrics = new Metrics(time); - - try (CoordinatorRuntimeMetricsImpl runtimeMetrics = new CoordinatorRuntimeMetricsImpl(metrics, METRICS_GROUP); - CoordinatorRuntimeMetricsImpl runtimeMetrics2 = new CoordinatorRuntimeMetricsImpl(metrics, OTHER_METRICS_GROUP)) { - switch (metricNamePrefix) { - case EVENT_QUEUE_TIME_METRIC_NAME: - runtimeMetrics.recordEventQueueTime(1000); - break; - case EVENT_PROCESSING_TIME_METRIC_NAME: - runtimeMetrics.recordEventProcessingTime(1000); - break; - case EVENT_PURGATORY_TIME_METRIC_NAME: - runtimeMetrics.recordEventPurgatoryTime(1000); - break; - case BATCH_FLUSH_TIME_METRIC_NAME: - runtimeMetrics.recordFlushTime(1000); - } - - // Check metric group isolation - for (String suffix : List.of("-max", "-p50", "-p95", "-p99", "-p999")) { - org.apache.kafka.common.MetricName metricName = kafkaMetricName(metrics, metricNamePrefix + suffix); - org.apache.kafka.common.MetricName otherGroupMetricName = otherGroupKafkaMetricName(metrics, metricNamePrefix + suffix); - KafkaMetric metric = metrics.metrics().get(metricName); - KafkaMetric otherMetric = metrics.metrics().get(otherGroupMetricName); - assertNotEquals(0.0, metric.metricValue()); - assertEquals(0.0, otherMetric.metricValue()); - } - } - } - @Test public void testRecordEventPurgatoryTimeLimit() { Time time = new MockTime(); @@ -344,8 +229,4 @@ private static void assertMetricGauge(Metrics metrics, org.apache.kafka.common.M private static MetricName kafkaMetricName(Metrics metrics, String name, String... keyValue) { return metrics.metricName(name, METRICS_GROUP, "", keyValue); } - - private static MetricName otherGroupKafkaMetricName(Metrics metrics, String name, String... keyValue) { - return metrics.metricName(name, OTHER_METRICS_GROUP, "", keyValue); - } } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java index 4a040df6712c0..9e4e6f7bb9b44 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/CoordinatorRuntimeTest.java @@ -18,25 +18,36 @@ import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.NotCoordinatorException; import org.apache.kafka.common.errors.NotEnoughReplicasException; import org.apache.kafka.common.errors.RecordTooLargeException; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.record.AbstractRecords; +import org.apache.kafka.common.record.CompressionType; import org.apache.kafka.common.record.ControlRecordType; +import org.apache.kafka.common.record.EndTransactionMarker; import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.MemoryRecordsBuilder; import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.record.RecordVersion; +import org.apache.kafka.common.record.SimpleRecord; +import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.requests.TransactionResult; import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.image.MetadataDelta; import org.apache.kafka.image.MetadataImage; +import org.apache.kafka.image.MetadataProvenance; import org.apache.kafka.server.util.FutureUtils; import org.apache.kafka.server.util.timer.MockTimer; import org.apache.kafka.storage.internals.log.LogConfig; import org.apache.kafka.storage.internals.log.VerificationGuard; import org.apache.kafka.timeline.SnapshotRegistry; +import org.apache.kafka.timeline.TimelineHashMap; +import org.apache.kafka.timeline.TimelineHashSet; import org.junit.jupiter.api.Test; import org.junit.jupiter.params.ParameterizedTest; @@ -45,16 +56,22 @@ import java.nio.BufferOverflowException; import java.nio.ByteBuffer; +import java.nio.charset.Charset; import java.time.Duration; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Deque; +import java.util.LinkedList; import java.util.List; -import java.util.Map; +import java.util.Objects; import java.util.OptionalInt; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; import java.util.concurrent.ExecutorService; +import java.util.concurrent.RejectedExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.concurrent.atomic.AtomicInteger; @@ -67,10 +84,7 @@ import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.CoordinatorState.INITIAL; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.CoordinatorState.LOADING; import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.HighWatermarkListener.NO_OFFSET; -import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.INITIAL_BUFFER_SIZE; -import static org.apache.kafka.coordinator.common.runtime.TestUtil.endTransactionMarker; -import static org.apache.kafka.coordinator.common.runtime.TestUtil.records; -import static org.apache.kafka.coordinator.common.runtime.TestUtil.transactionalRecords; +import static org.apache.kafka.coordinator.common.runtime.CoordinatorRuntime.MIN_BUFFER_SIZE; import static org.apache.kafka.test.TestUtils.assertFutureThrows; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; @@ -97,6 +111,554 @@ public class CoordinatorRuntimeTest { private static final short TXN_OFFSET_COMMIT_LATEST_VERSION = ApiKeys.TXN_OFFSET_COMMIT.latestVersion(); + private static class StringSerializer implements Serializer { + @Override + public byte[] serializeKey(String record) { + return null; + } + + @Override + public byte[] serializeValue(String record) { + return record.getBytes(Charset.defaultCharset()); + } + } + + private static class ThrowingSerializer implements Serializer { + private final Serializer serializer; + private boolean throwOnNextOperation; + + public ThrowingSerializer(Serializer serializer) { + this.serializer = serializer; + this.throwOnNextOperation = false; + } + + public void throwOnNextOperation() { + throwOnNextOperation = true; + } + + @Override + public byte[] serializeKey(T record) { + return serializer.serializeKey(record); + } + + @Override + public byte[] serializeValue(T record) { + if (throwOnNextOperation) { + throwOnNextOperation = false; + throw new BufferOverflowException(); + } + return serializer.serializeValue(record); + } + } + + /** + * A CoordinatorEventProcessor that directly executes the operations. This is + * useful in unit tests where execution in threads is not required. + */ + private static class DirectEventProcessor implements CoordinatorEventProcessor { + @Override + public void enqueueLast(CoordinatorEvent event) throws RejectedExecutionException { + try { + event.run(); + } catch (Throwable ex) { + event.complete(ex); + } + } + + @Override + public void enqueueFirst(CoordinatorEvent event) throws RejectedExecutionException { + try { + event.run(); + } catch (Throwable ex) { + event.complete(ex); + } + } + + @Override + public void close() {} + } + + /** + * A CoordinatorEventProcessor that queues event and execute the next one + * when poll() is called. + */ + private static class ManualEventProcessor implements CoordinatorEventProcessor { + private final Deque queue = new LinkedList<>(); + + @Override + public void enqueueLast(CoordinatorEvent event) throws RejectedExecutionException { + queue.addLast(event); + } + + @Override + public void enqueueFirst(CoordinatorEvent event) throws RejectedExecutionException { + queue.addFirst(event); + } + + public boolean poll() { + CoordinatorEvent event = queue.poll(); + if (event == null) return false; + + try { + event.run(); + } catch (Throwable ex) { + event.complete(ex); + } + + return true; + } + + public int size() { + return queue.size(); + } + + @Override + public void close() { + + } + } + + /** + * A CoordinatorLoader that always succeeds. + */ + private static class MockCoordinatorLoader implements CoordinatorLoader { + private final LoadSummary summary; + private final List lastWrittenOffsets; + private final List lastCommittedOffsets; + + public MockCoordinatorLoader( + LoadSummary summary, + List lastWrittenOffsets, + List lastCommittedOffsets + ) { + this.summary = summary; + this.lastWrittenOffsets = lastWrittenOffsets; + this.lastCommittedOffsets = lastCommittedOffsets; + } + + public MockCoordinatorLoader() { + this(null, Collections.emptyList(), Collections.emptyList()); + } + + @Override + public CompletableFuture load( + TopicPartition tp, + CoordinatorPlayback replayable + ) { + lastWrittenOffsets.forEach(replayable::updateLastWrittenOffset); + lastCommittedOffsets.forEach(replayable::updateLastCommittedOffset); + return CompletableFuture.completedFuture(summary); + } + + @Override + public void close() { } + } + + /** + * An in-memory partition writer that accepts a maximum number of writes. + */ + private static class MockPartitionWriter extends InMemoryPartitionWriter { + private final Time time; + private final int maxWrites; + private final boolean failEndMarker; + private final AtomicInteger writeCount = new AtomicInteger(0); + + public MockPartitionWriter() { + this(new MockTime(), Integer.MAX_VALUE, false); + } + + public MockPartitionWriter(int maxWrites) { + this(new MockTime(), maxWrites, false); + } + + public MockPartitionWriter(boolean failEndMarker) { + this(new MockTime(), Integer.MAX_VALUE, failEndMarker); + } + + public MockPartitionWriter(Time time, int maxWrites, boolean failEndMarker) { + super(false); + this.time = time; + this.maxWrites = maxWrites; + this.failEndMarker = failEndMarker; + } + + @Override + public void registerListener(TopicPartition tp, Listener listener) { + super.registerListener(tp, listener); + } + + @Override + public void deregisterListener(TopicPartition tp, Listener listener) { + super.deregisterListener(tp, listener); + } + + @Override + public long append( + TopicPartition tp, + VerificationGuard verificationGuard, + MemoryRecords batch + ) { + if (batch.sizeInBytes() > config(tp).maxMessageSize()) + throw new RecordTooLargeException("Batch is larger than the max message size"); + + // We don't want the coordinator to write empty batches. + if (batch.validBytes() <= 0) + throw new KafkaException("Coordinator tried to write an empty batch"); + + if (writeCount.incrementAndGet() > maxWrites) + throw new KafkaException("Maximum number of writes reached"); + + if (failEndMarker && batch.firstBatch().isControlBatch()) + throw new KafkaException("Couldn't write end marker."); + + time.sleep(10); + return super.append(tp, verificationGuard, batch); + } + } + + /** + * A simple Coordinator implementation that stores the records into a set. + */ + static class MockCoordinatorShard implements CoordinatorShard { + static class RecordAndMetadata { + public final long offset; + public final long producerId; + public final short producerEpoch; + public final String record; + + public RecordAndMetadata( + long offset, + String record + ) { + this( + offset, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_EPOCH, + record + ); + } + + public RecordAndMetadata( + long offset, + long producerId, + short producerEpoch, + String record + ) { + this.offset = offset; + this.producerId = producerId; + this.producerEpoch = producerEpoch; + this.record = record; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + RecordAndMetadata that = (RecordAndMetadata) o; + + if (offset != that.offset) return false; + if (producerId != that.producerId) return false; + if (producerEpoch != that.producerEpoch) return false; + return Objects.equals(record, that.record); + } + + @Override + public int hashCode() { + int result = (int) (offset ^ (offset >>> 32)); + result = 31 * result + (int) (producerId ^ (producerId >>> 32)); + result = 31 * result + (int) producerEpoch; + result = 31 * result + (record != null ? record.hashCode() : 0); + return result; + } + + @Override + public String toString() { + return "RecordAndMetadata(" + + "offset=" + offset + + ", producerId=" + producerId + + ", producerEpoch=" + producerEpoch + + ", record='" + record.substring(0, Math.min(10, record.length())) + '\'' + + ')'; + } + } + + private final SnapshotRegistry snapshotRegistry; + private final TimelineHashSet records; + private final TimelineHashMap> pendingRecords; + private final CoordinatorTimer timer; + private final CoordinatorExecutor executor; + + MockCoordinatorShard( + SnapshotRegistry snapshotRegistry, + CoordinatorTimer timer + ) { + this(snapshotRegistry, timer, null); + } + + MockCoordinatorShard( + SnapshotRegistry snapshotRegistry, + CoordinatorTimer timer, + CoordinatorExecutor executor + ) { + this.snapshotRegistry = snapshotRegistry; + this.records = new TimelineHashSet<>(snapshotRegistry, 0); + this.pendingRecords = new TimelineHashMap<>(snapshotRegistry, 0); + this.timer = timer; + this.executor = executor; + } + + @Override + public void replay( + long offset, + long producerId, + short producerEpoch, + String record + ) throws RuntimeException { + RecordAndMetadata recordAndMetadata = new RecordAndMetadata( + offset, + producerId, + producerEpoch, + record + ); + + if (producerId == RecordBatch.NO_PRODUCER_ID) { + records.add(recordAndMetadata); + } else { + pendingRecords + .computeIfAbsent(producerId, __ -> new TimelineHashSet<>(snapshotRegistry, 0)) + .add(recordAndMetadata); + } + } + + @Override + public void replayEndTransactionMarker( + long producerId, + short producerEpoch, + TransactionResult result + ) throws RuntimeException { + if (result == TransactionResult.COMMIT) { + TimelineHashSet pending = pendingRecords.remove(producerId); + if (pending == null) return; + records.addAll(pending); + } else { + pendingRecords.remove(producerId); + } + } + + Set pendingRecords(long producerId) { + TimelineHashSet pending = pendingRecords.get(producerId); + if (pending == null) return Collections.emptySet(); + return pending.stream().map(record -> record.record).collect(Collectors.toUnmodifiableSet()); + } + + Set records() { + return records.stream().map(record -> record.record).collect(Collectors.toUnmodifiableSet()); + } + + List fullRecords() { + return records + .stream() + .sorted(Comparator.comparingLong(record -> record.offset)) + .collect(Collectors.toList()); + } + } + + /** + * A CoordinatorBuilder that creates a MockCoordinator. + */ + private static class MockCoordinatorShardBuilder implements CoordinatorShardBuilder { + private SnapshotRegistry snapshotRegistry; + private CoordinatorTimer timer; + private CoordinatorExecutor executor; + + @Override + public CoordinatorShardBuilder withSnapshotRegistry( + SnapshotRegistry snapshotRegistry + ) { + this.snapshotRegistry = snapshotRegistry; + return this; + } + + @Override + public CoordinatorShardBuilder withLogContext( + LogContext logContext + ) { + return this; + } + + @Override + public CoordinatorShardBuilder withTime( + Time time + ) { + return this; + } + + @Override + public CoordinatorShardBuilder withExecutor( + CoordinatorExecutor executor + ) { + this.executor = executor; + return this; + } + + @Override + public CoordinatorShardBuilder withTimer( + CoordinatorTimer timer + ) { + this.timer = timer; + return this; + } + + @Override + public CoordinatorShardBuilder withCoordinatorMetrics(CoordinatorMetrics coordinatorMetrics) { + return this; + } + + @Override + public CoordinatorShardBuilder withTopicPartition( + TopicPartition topicPartition + ) { + return this; + } + + @Override + public MockCoordinatorShard build() { + return new MockCoordinatorShard( + Objects.requireNonNull(this.snapshotRegistry), + Objects.requireNonNull(this.timer), + Objects.requireNonNull(this.executor) + ); + } + } + + /** + * A CoordinatorBuilderSupplier that returns a MockCoordinatorBuilder. + */ + private static class MockCoordinatorShardBuilderSupplier implements CoordinatorShardBuilderSupplier { + @Override + public CoordinatorShardBuilder get() { + return new MockCoordinatorShardBuilder(); + } + } + + private static MemoryRecords records( + long timestamp, + String... records + ) { + return records(timestamp, Arrays.stream(records).collect(Collectors.toList())); + } + + private static MemoryRecords records( + long timestamp, + List records + ) { + if (records.isEmpty()) + return MemoryRecords.EMPTY; + + List simpleRecords = records.stream().map(record -> + new SimpleRecord(timestamp, record.getBytes(Charset.defaultCharset())) + ).collect(Collectors.toList()); + + int sizeEstimate = AbstractRecords.estimateSizeInBytes( + RecordVersion.current().value, + CompressionType.NONE, + simpleRecords + ); + + ByteBuffer buffer = ByteBuffer.allocate(sizeEstimate); + + MemoryRecordsBuilder builder = MemoryRecords.builder( + buffer, + RecordVersion.current().value, + Compression.NONE, + TimestampType.CREATE_TIME, + 0L, + timestamp, + RecordBatch.NO_PRODUCER_ID, + RecordBatch.NO_PRODUCER_EPOCH, + 0, + false, + RecordBatch.NO_PARTITION_LEADER_EPOCH + ); + + simpleRecords.forEach(builder::append); + + return builder.build(); + } + + private static MemoryRecords transactionalRecords( + long producerId, + short producerEpoch, + long timestamp, + String... records + ) { + return transactionalRecords( + producerId, + producerEpoch, + timestamp, + Arrays.stream(records).collect(Collectors.toList()) + ); + } + + private static MemoryRecords transactionalRecords( + long producerId, + short producerEpoch, + long timestamp, + List records + ) { + if (records.isEmpty()) + return MemoryRecords.EMPTY; + + List simpleRecords = records.stream().map(record -> + new SimpleRecord(timestamp, record.getBytes(Charset.defaultCharset())) + ).collect(Collectors.toList()); + + int sizeEstimate = AbstractRecords.estimateSizeInBytes( + RecordVersion.current().value, + CompressionType.NONE, + simpleRecords + ); + + ByteBuffer buffer = ByteBuffer.allocate(sizeEstimate); + + MemoryRecordsBuilder builder = MemoryRecords.builder( + buffer, + RecordVersion.current().value, + Compression.NONE, + TimestampType.CREATE_TIME, + 0L, + timestamp, + producerId, + producerEpoch, + 0, + true, + RecordBatch.NO_PARTITION_LEADER_EPOCH + ); + + simpleRecords.forEach(builder::append); + + return builder.build(); + } + + private static MemoryRecords endTransactionMarker( + long producerId, + short producerEpoch, + long timestamp, + int coordinatorEpoch, + ControlRecordType result + ) { + return MemoryRecords.withEndTransactionMarker( + timestamp, + producerId, + producerEpoch, + new EndTransactionMarker( + result, + coordinatorEpoch + ) + ); + } + @Test public void testScheduleLoading() { MockTimer timer = new MockTimer(); @@ -154,7 +716,7 @@ public void testScheduleLoading() { assertEquals(ACTIVE, ctx.state); // Verify that onLoaded is called. - verify(coordinator, times(1)).onLoaded(CoordinatorMetadataImage.EMPTY); + verify(coordinator, times(1)).onLoaded(MetadataImage.EMPTY); // Verify that the listener is registered. verify(writer, times(1)).registerListener( @@ -650,37 +1212,42 @@ public void testScheduleUnloadingWithDeferredEventExceptions() throws ExecutionE runtime.scheduleLoadOperation(TP, 10); CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); + // Get the max batch size. + int maxBatchSize = writer.config(TP).maxMessageSize(); + + // Create records with three quarters of the max batch size each, so that it is not + // possible to have more than one record in a single batch. + List records = Stream.of('1', '2', '3').map(c -> { + char[] payload = new char[maxBatchSize * 3 / 4]; + Arrays.fill(payload, c); + return new String(payload); + }).collect(Collectors.toList()); + // Write #1. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(List.of("record1"), "response1") + state -> new CoordinatorResult<>(List.of(records.get(0)), "response1") ); - // Complete transaction #1, to force the flush of write #1. - CompletableFuture complete1 = runtime.scheduleTransactionCompletion( - "complete#1", - TP, - 100L, - (short) 50, - 10, - TransactionResult.COMMIT, - DEFAULT_WRITE_TIMEOUT + // Write #2. + CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), + state -> new CoordinatorResult<>(List.of(records.get(1)), "response2") ); - // Write #2 but without any records. - CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(List.of(), "response2") + // Write #3, to force the flush of write #2. + CompletableFuture write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20), + state -> new CoordinatorResult<>(List.of(records.get(1)), "response3") ); // Records have been written to the log. assertEquals(List.of( - records(timer.time().milliseconds(), "record1"), - endTransactionMarker(100L, (short) 50, timer.time().milliseconds(), 10, ControlRecordType.COMMIT) + records(timer.time().milliseconds(), records.get(0)), + records(timer.time().milliseconds(), records.get(1)) ), writer.entries(TP)); // Verify that no writes are committed yet. assertFalse(write1.isDone()); - assertFalse(complete1.isDone()); assertFalse(write2.isDone()); + assertFalse(write3.isDone()); // Schedule the unloading. runtime.scheduleUnloadOperation(TP, OptionalInt.of(ctx.epoch + 1)); @@ -689,11 +1256,11 @@ public void testScheduleUnloadingWithDeferredEventExceptions() throws ExecutionE // All write completions throw exceptions after completing their futures. // Despite the exceptions, the unload should still complete. assertTrue(write1.isDone()); - assertTrue(complete1.isDone()); assertTrue(write2.isDone()); - assertFutureThrows(NotCoordinatorException.class, write1); - assertFutureThrows(NotCoordinatorException.class, complete1); - assertFutureThrows(NotCoordinatorException.class, write2); + assertTrue(write3.isDone()); + assertFutureThrows(write1, NotCoordinatorException.class); + assertFutureThrows(write2, NotCoordinatorException.class); + assertFutureThrows(write3, NotCoordinatorException.class); // Verify that onUnloaded is called. verify(coordinator, times(1)).onUnloaded(); @@ -775,7 +1342,7 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // Write #3 but without any records. CompletableFuture write3 = runtime.scheduleWriteOperation("write#3", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(List.of(), "response3")); + state -> new CoordinatorResult<>(Collections.emptyList(), "response3")); // Verify that the write is not committed yet. assertFalse(write3.isDone()); @@ -818,7 +1385,7 @@ public void testScheduleWriteOp() throws ExecutionException, InterruptedExceptio // Write #4 but without records. CompletableFuture write4 = runtime.scheduleWriteOperation("write#4", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(List.of(), "response4")); + state -> new CoordinatorResult<>(Collections.emptyList(), "response4")); // It is completed immediately because the state is fully committed. assertTrue(write4.isDone()); @@ -847,8 +1414,8 @@ public void testScheduleWriteOpWhenInactive() { // Scheduling a write fails with a NotCoordinatorException because the coordinator // does not exist. CompletableFuture write = runtime.scheduleWriteOperation("write", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(List.of(), "response1")); - assertFutureThrows(NotCoordinatorException.class, write); + state -> new CoordinatorResult<>(Collections.emptyList(), "response1")); + assertFutureThrows(write, NotCoordinatorException.class); } @Test @@ -877,7 +1444,7 @@ public void testScheduleWriteOpWhenOpFails() { CompletableFuture write = runtime.scheduleWriteOperation("write", TP, DEFAULT_WRITE_TIMEOUT, state -> { throw new KafkaException("error"); }); - assertFutureThrows(KafkaException.class, write); + assertFutureThrows(write, KafkaException.class); } @Test @@ -930,7 +1497,7 @@ public void replay( // Write. It should fail. CompletableFuture write = runtime.scheduleWriteOperation("write", TP, DEFAULT_WRITE_TIMEOUT, state -> new CoordinatorResult<>(List.of("record1", "record2"), "response1")); - assertFutureThrows(IllegalArgumentException.class, write); + assertFutureThrows(write, IllegalArgumentException.class); // Verify that the state has not changed. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); @@ -982,7 +1549,7 @@ public void testScheduleWriteOpWhenWriteFails() { // accept 1 write. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, state -> new CoordinatorResult<>(List.of("record3", "record4", "record5"), "response2")); - assertFutureThrows(KafkaException.class, write2); + assertFutureThrows(write2, KafkaException.class); // Verify that the state has not changed. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); @@ -1027,7 +1594,7 @@ public void testScheduleWriteOpWhenWriteTimesOut() throws InterruptedException { timer.advanceClock(4); - assertFutureThrows(org.apache.kafka.common.errors.TimeoutException.class, timedOutWrite); + assertFutureThrows(timedOutWrite, org.apache.kafka.common.errors.TimeoutException.class); } @Test @@ -1129,7 +1696,7 @@ public CoordinatorShardBuilder get() { verify(writer, times(1)).registerListener(eq(TP), any()); // Prepare the log config. - when(writer.config(TP)).thenReturn(new LogConfig(Map.of())); + when(writer.config(TP)).thenReturn(new LogConfig(Collections.emptyMap())); // Prepare the transaction verification. VerificationGuard guard = new VerificationGuard(); @@ -1244,7 +1811,7 @@ public CoordinatorShardBuilder get() { ); // Verify that the future is failed with the expected exception. - assertFutureThrows(NotEnoughReplicasException.class, future); + assertFutureThrows(future, NotEnoughReplicasException.class); // Verify that the writer is not called. verify(writer, times(0)).append( @@ -1343,7 +1910,7 @@ public void testScheduleTransactionCompletion(TransactionResult result) throws E expectedType = ControlRecordType.COMMIT; } else { // Or they are gone if aborted. - assertEquals(Set.of(), ctx.coordinator.coordinator().records()); + assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); expectedType = ControlRecordType.ABORT; } @@ -1416,7 +1983,7 @@ public void testScheduleTransactionCompletionWhenWriteTimesOut() throws Interrup // Advance clock to timeout Complete #1. timer.advanceClock(4); - assertFutureThrows(org.apache.kafka.common.errors.TimeoutException.class, timedOutCompletion); + assertFutureThrows(timedOutCompletion, org.apache.kafka.common.errors.TimeoutException.class); // Verify that the state is still the same. We don't revert when the // operation timeouts because the record has been written to the log. @@ -1472,7 +2039,7 @@ public void testScheduleTransactionCompletionWhenWriteFails() { assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); - assertEquals(Set.of(), ctx.coordinator.coordinator().records()); + assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); // Complete transaction #1. It should fail. CompletableFuture complete1 = runtime.scheduleTransactionCompletion( @@ -1484,14 +2051,14 @@ public void testScheduleTransactionCompletionWhenWriteFails() { TransactionResult.COMMIT, DEFAULT_WRITE_TIMEOUT ); - assertFutureThrows(KafkaException.class, complete1); + assertFutureThrows(complete1, KafkaException.class); // Verify that the state has not changed. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); - assertEquals(Set.of(), ctx.coordinator.coordinator().records()); + assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); } @Test @@ -1558,7 +2125,7 @@ public void replayEndTransactionMarker( assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); - assertEquals(Set.of(), ctx.coordinator.coordinator().records()); + assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); assertEquals(List.of( transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); @@ -1573,14 +2140,14 @@ public void replayEndTransactionMarker( TransactionResult.COMMIT, DEFAULT_WRITE_TIMEOUT ); - assertFutureThrows(IllegalArgumentException.class, complete1); + assertFutureThrows(complete1, IllegalArgumentException.class); // Verify that the state has not changed. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L, 2L), ctx.coordinator.snapshotRegistry().epochsList()); assertEquals(Set.of("record1", "record2"), ctx.coordinator.coordinator().pendingRecords(100L)); - assertEquals(Set.of(), ctx.coordinator.coordinator().records()); + assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); assertEquals(List.of( transactionalRecords(100L, (short) 5, timer.time().milliseconds(), "record1", "record2") ), writer.entries(TP)); @@ -1668,7 +2235,7 @@ public void testScheduleReadOpWhenPartitionInactive() { // Schedule a read. It fails because the coordinator does not exist. CompletableFuture read = runtime.scheduleReadOperation("read", TP, (state, offset) -> "read-response"); - assertFutureThrows(NotCoordinatorException.class, read); + assertFutureThrows(read, NotCoordinatorException.class); } @Test @@ -1715,7 +2282,7 @@ public void testScheduleReadOpWhenOpsFails() { assertEquals(ctx.coordinator.lastCommittedOffset(), offset); throw new IllegalArgumentException("error"); }); - assertFutureThrows(IllegalArgumentException.class, read); + assertFutureThrows(read, IllegalArgumentException.class); } @Test @@ -1826,8 +2393,8 @@ public void testClose() throws Exception { runtime.close(); // All the pending operations are completed with NotCoordinatorException. - assertFutureThrows(NotCoordinatorException.class, write1); - assertFutureThrows(NotCoordinatorException.class, write2); + assertFutureThrows(write1, NotCoordinatorException.class); + assertFutureThrows(write2, NotCoordinatorException.class); // Verify that the loader was closed. verify(loader).close(); @@ -1896,11 +2463,11 @@ public void testOnNewMetadataImage() { // Coordinator 0 is loaded. It should get the current image // that is the empty one. future0.complete(null); - verify(coordinator0).onLoaded(CoordinatorMetadataImage.EMPTY); + verify(coordinator0).onLoaded(MetadataImage.EMPTY); // Publish a new image. - CoordinatorMetadataDelta delta = new KRaftCoordinatorMetadataDelta(new MetadataDelta(MetadataImage.EMPTY)); - CoordinatorMetadataImage newImage = CoordinatorMetadataImage.EMPTY; + MetadataDelta delta = new MetadataDelta(MetadataImage.EMPTY); + MetadataImage newImage = delta.apply(MetadataProvenance.EMPTY); runtime.onNewMetadataImage(newImage, delta); // Coordinator 0 should be notified about it. @@ -2113,7 +2680,7 @@ public void testCancelTimer() throws InterruptedException { assertTrue(processor.poll()); // Verify that no operation was executed. - assertEquals(Set.of(), ctx.coordinator.coordinator().records()); + assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().records()); assertEquals(0, ctx.timer.size()); } @@ -2443,8 +3010,8 @@ public void testPartitionLoadSensor() { startTimeMs + 500, 30, 3000), - List.of(), - List.of())) + Collections.emptyList(), + Collections.emptyList())) .withEventProcessor(new DirectEventProcessor()) .withPartitionWriter(writer) .withCoordinatorShardBuilderSupplier(supplier) @@ -2560,8 +3127,8 @@ public void testPartitionLoadGeneratesSnapshotAtHighWatermarkNoRecordsLoaded() { 1500, 30, 3000), - List.of(), - List.of())) + Collections.emptyList(), + Collections.emptyList())) .withEventProcessor(new DirectEventProcessor()) .withPartitionWriter(writer) .withCoordinatorShardBuilderSupplier(supplier) @@ -2694,56 +3261,53 @@ public void testHighWatermarkUpdateWithDeferredEventExceptions() throws Executio // Load the coordinator. runtime.scheduleLoadOperation(TP, 10); + // Get the max batch size. + int maxBatchSize = writer.config(TP).maxMessageSize(); + + // Create records with three quarters of the max batch size each, so that it is not + // possible to have more than one record in a single batch. + List records = Stream.of('1', '2', '3').map(c -> { + char[] payload = new char[maxBatchSize * 3 / 4]; + Arrays.fill(payload, c); + return new String(payload); + }).collect(Collectors.toList()); + // Write #1. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(List.of("record1"), "response1") + state -> new CoordinatorResult<>(List.of(records.get(0)), "response1") ); - // Complete transaction #1, to force the flush of write #2. - CompletableFuture complete1 = runtime.scheduleTransactionCompletion( - "complete#1", - TP, - 100L, - (short) 50, - 10, - TransactionResult.COMMIT, - DEFAULT_WRITE_TIMEOUT - ); - - // Write #2 but without any records. + // Write #2. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(List.of(), "response2") + state -> new CoordinatorResult<>(List.of(records.get(1)), "response2") ); - // Write #3, also without any records. Should complete together with write #2. + // Write #3, to force the flush of write #2. CompletableFuture write3 = runtime.scheduleWriteOperation("write#3", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(List.of(), "response3") + state -> new CoordinatorResult<>(List.of(records.get(1)), "response3") ); // Records have been written to the log. assertEquals(List.of( - records(timer.time().milliseconds(), "record1"), - endTransactionMarker(100L, (short) 50, timer.time().milliseconds(), 10, ControlRecordType.COMMIT) + records(timer.time().milliseconds(), records.get(0)), + records(timer.time().milliseconds(), records.get(1)) ), writer.entries(TP)); // Verify that no writes are committed yet. assertFalse(write1.isDone()); - assertFalse(complete1.isDone()); assertFalse(write2.isDone()); assertFalse(write3.isDone()); - // Commit the records and transaction marker. + // Commit the first and second record. writer.commit(TP, 2); - // All write completions throw exceptions after completing their futures. - // Despite the exceptions, all writes should still complete. + // Write #1 and write #2's completions throw exceptions after completing their futures. + // Despite the exception from write #1, write #2 should still be completed. assertTrue(write1.isDone()); - assertTrue(complete1.isDone()); assertTrue(write2.isDone()); - assertTrue(write3.isDone()); + assertFalse(write3.isDone()); assertEquals("response1", write1.get(5, TimeUnit.SECONDS)); assertEquals("response2", write2.get(5, TimeUnit.SECONDS)); - assertEquals("response3", write3.get(5, TimeUnit.SECONDS)); } @Test @@ -2816,8 +3380,9 @@ public void testWriteEventWriteTimeoutTaskIsCancelledWhenHighWatermarkIsUpdated( assertTrue(write1.isDone()); assertTrue(write2.isDone()); - // All timer tasks have been cancelled. Hence,they have been removed in MockTimer. - assertEquals(0, timer.size()); + // All timer tasks have been cancelled. TimerTask entries are not removed in MockTimer. + assertEquals(2, timer.size()); + timer.taskQueue().forEach(taskEntry -> assertTrue(taskEntry.cancelled())); } @Test @@ -2885,8 +3450,9 @@ public void testCoordinatorCompleteTransactionEventWriteTimeoutTaskIsCancelledWh assertEquals(1, runtime.contextOrThrow(TP).coordinator.lastCommittedOffset()); assertTrue(write1.isDone()); - // All timer tasks have been cancelled. Hence, they have been removed in MockTimer. - assertEquals(0, timer.size()); + // All timer tasks have been cancelled. TimerTask entries are not removed in MockTimer. + assertEquals(1, timer.size()); + timer.taskQueue().forEach(taskEntry -> assertTrue(taskEntry.cancelled())); } @Test @@ -2920,11 +3486,11 @@ public void testAppendRecordBatchSize() { assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); int maxBatchSize = writer.config(TP).maxMessageSize(); - assertTrue(maxBatchSize > INITIAL_BUFFER_SIZE); + assertTrue(maxBatchSize > MIN_BUFFER_SIZE); - // Generate enough records to create a batch that has INITIAL_BUFFER_SIZE < batchSize < maxBatchSize + // Generate enough records to create a batch that has 16KB < batchSize < maxBatchSize List records = new ArrayList<>(); - for (int i = 0; i < 50000; i++) { + for (int i = 0; i < 3000; i++) { records.add("record-" + i); } @@ -2938,210 +3504,7 @@ public void testAppendRecordBatchSize() { assertFalse(write1.isCompletedExceptionally()); int batchSize = writer.entries(TP).get(0).sizeInBytes(); - assertTrue(batchSize > INITIAL_BUFFER_SIZE && batchSize < maxBatchSize); - } - - @Test - public void testCoordinatorDoNotRetainBufferLargeThanMaxMessageSize() { - MockTimer timer = new MockTimer(); - InMemoryPartitionWriter mockWriter = new InMemoryPartitionWriter(false) { - @Override - public LogConfig config(TopicPartition tp) { - return new LogConfig(Map.of( - TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024) // 1MB - )); - } - }; - StringSerializer serializer = new StringSerializer(); - - CoordinatorRuntime runtime = - new CoordinatorRuntime.Builder() - .withTime(timer.time()) - .withTimer(timer) - .withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT) - .withLoader(new MockCoordinatorLoader()) - .withEventProcessor(new DirectEventProcessor()) - .withPartitionWriter(mockWriter) - .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) - .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) - .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) - .withSerializer(serializer) - .withExecutorService(mock(ExecutorService.class)) - .build(); - - // Schedule the loading. - runtime.scheduleLoadOperation(TP, 10); - - // Verify the initial state. - CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); - assertEquals(0L, ctx.coordinator.lastWrittenOffset()); - assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); - - // Generate a record larger than the maxBatchSize. - List largeRecords = List.of("A".repeat(100 * 1024 * 1024)); - - // Write #1. - CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(largeRecords, "response1", null, true, false) - ); - - // Verify that the write has not completed exceptionally. - // This will catch any exceptions thrown including RecordTooLargeException. - assertFalse(write1.isCompletedExceptionally()); - - // Verify that the next buffer retrieved from the bufferSupplier is the initial small one, not the large buffer. - assertEquals(INITIAL_BUFFER_SIZE, ctx.bufferSupplier.get(1).capacity()); - } - - @Test - public void testCoordinatorRetainExpandedBufferLessOrEqualToMaxMessageSize() { - MockTimer timer = new MockTimer(); - InMemoryPartitionWriter mockWriter = new InMemoryPartitionWriter(false) { - @Override - public LogConfig config(TopicPartition tp) { - return new LogConfig(Map.of( - TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024 * 1024) // 1GB - )); - } - }; - StringSerializer serializer = new StringSerializer(); - - CoordinatorRuntime runtime = - new CoordinatorRuntime.Builder() - .withTime(timer.time()) - .withTimer(timer) - .withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT) - .withLoader(new MockCoordinatorLoader()) - .withEventProcessor(new DirectEventProcessor()) - .withPartitionWriter(mockWriter) - .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) - .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) - .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) - .withSerializer(serializer) - .withExecutorService(mock(ExecutorService.class)) - .build(); - - // Schedule the loading. - runtime.scheduleLoadOperation(TP, 10); - - // Verify the initial state. - CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); - assertEquals(0L, ctx.coordinator.lastWrittenOffset()); - assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); - - // Generate enough records to create a batch that has INITIAL_BUFFER_SIZE < batchSize < maxBatchSize - List records = new ArrayList<>(); - for (int i = 0; i < 1000000; i++) { - records.add("record-" + i); - } - - // Write #1. - CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(records, "response1") - ); - - // Verify that the write has not completed exceptionally. - // This will catch any exceptions thrown including RecordTooLargeException. - assertFalse(write1.isCompletedExceptionally()); - - int batchSize = mockWriter.entries(TP).get(0).sizeInBytes(); - int maxBatchSize = mockWriter.config(TP).maxMessageSize(); - assertTrue(INITIAL_BUFFER_SIZE < batchSize && batchSize <= maxBatchSize); - - // Verify that the next buffer retrieved from the bufferSupplier is the expanded buffer. - assertTrue(ctx.bufferSupplier.get(1).capacity() > INITIAL_BUFFER_SIZE); - } - - @Test - public void testBufferShrinkWhenMaxMessageSizeReducedBelowInitialBufferSize() { - MockTimer timer = new MockTimer(); - var mockWriter = new InMemoryPartitionWriter(false) { - private LogConfig config = new LogConfig(Map.of( - TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(1024 * 1024) // 1MB - )); - - @Override - public LogConfig config(TopicPartition tp) { - return config; - } - - public void updateConfig(LogConfig newConfig) { - this.config = newConfig; - } - }; - StringSerializer serializer = new StringSerializer(); - - CoordinatorRuntime runtime = - new CoordinatorRuntime.Builder() - .withTime(timer.time()) - .withTimer(timer) - .withDefaultWriteTimeOut(DEFAULT_WRITE_TIMEOUT) - .withLoader(new MockCoordinatorLoader()) - .withEventProcessor(new DirectEventProcessor()) - .withPartitionWriter(mockWriter) - .withCoordinatorShardBuilderSupplier(new MockCoordinatorShardBuilderSupplier()) - .withCoordinatorRuntimeMetrics(mock(CoordinatorRuntimeMetrics.class)) - .withCoordinatorMetrics(mock(CoordinatorMetrics.class)) - .withSerializer(serializer) - .withExecutorService(mock(ExecutorService.class)) - .build(); - - // Schedule the loading. - runtime.scheduleLoadOperation(TP, 10); - - // Verify the initial state. - CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); - assertEquals(0L, ctx.coordinator.lastWrittenOffset()); - assertEquals(0L, ctx.coordinator.lastCommittedOffset()); - assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); - - List records = new ArrayList<>(); - for (int i = 0; i < 1000; i++) { - records.add("record-" + i); - } - - // Write #1. - CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(records, "response1") - ); - - // Verify that the write has not completed exceptionally. - // This will catch any exceptions thrown including RecordTooLargeException. - assertFalse(write1.isCompletedExceptionally()); - - int batchSize = mockWriter.entries(TP).get(0).sizeInBytes(); - int maxBatchSize = mockWriter.config(TP).maxMessageSize(); - assertTrue(batchSize <= INITIAL_BUFFER_SIZE && INITIAL_BUFFER_SIZE <= maxBatchSize); - - ByteBuffer cachedBuffer = ctx.bufferSupplier.get(1); - assertEquals(INITIAL_BUFFER_SIZE, cachedBuffer.capacity()); - // ctx.bufferSupplier.get(1); will clear cachedBuffer in bufferSupplier. Use release to put it back to bufferSupplier - ctx.bufferSupplier.release(cachedBuffer); - - // Reduce max message size below initial buffer size. - mockWriter.updateConfig(new LogConfig( - Map.of(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, String.valueOf(INITIAL_BUFFER_SIZE - 66)))); - assertEquals(INITIAL_BUFFER_SIZE - 66, mockWriter.config(TP).maxMessageSize()); - - // Write #2. - CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(records, "response2") - ); - assertFalse(write2.isCompletedExceptionally()); - - // Verify that there is no cached buffer since the cached buffer size is greater than new maxMessageSize. - assertEquals(1, ctx.bufferSupplier.get(1).capacity()); - - // Write #3. - CompletableFuture write3 = runtime.scheduleWriteOperation("write#3", TP, DEFAULT_WRITE_TIMEOUT, - state -> new CoordinatorResult<>(records, "response3") - ); - assertFalse(write3.isCompletedExceptionally()); - - // Verify that the cached buffer size is equals to new maxMessageSize that less than INITIAL_BUFFER_SIZE. - assertEquals(mockWriter.config(TP).maxMessageSize(), ctx.bufferSupplier.get(1).capacity()); + assertTrue(batchSize > MIN_BUFFER_SIZE && batchSize < maxBatchSize); } @Test @@ -3206,7 +3569,7 @@ public void testScheduleWriteOperationWithBatching() throws ExecutionException, new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)), new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), writer.entries(TP)); // Write #2 with one record. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), @@ -3225,7 +3588,7 @@ public void testScheduleWriteOperationWithBatching() throws ExecutionException, new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), writer.entries(TP)); // Write #3 with one record. This one cannot go into the existing batch // so the existing batch should be flushed and a new one should be created. @@ -3327,7 +3690,7 @@ public void testScheduleWriteOperationWithBatchingWhenRecordsTooLarge() { state -> new CoordinatorResult<>(records, "response1") ); - assertFutureThrows(RecordTooLargeException.class, write); + assertFutureThrows(write, RecordTooLargeException.class); } @Test @@ -3395,7 +3758,7 @@ public void testScheduleWriteOperationWithBatchingWhenWriteFails() { new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), writer.entries(TP)); // Write #4. This write cannot make it in the current batch. So the current batch // is flushed. It will fail. So we expect all writes to fail. @@ -3403,18 +3766,18 @@ public void testScheduleWriteOperationWithBatchingWhenWriteFails() { state -> new CoordinatorResult<>(records.subList(3, 4), "response4")); // Verify the futures. - assertFutureThrows(KafkaException.class, write1); - assertFutureThrows(KafkaException.class, write2); - assertFutureThrows(KafkaException.class, write3); + assertFutureThrows(write1, KafkaException.class); + assertFutureThrows(write2, KafkaException.class); + assertFutureThrows(write3, KafkaException.class); // Write #4 is also expected to fail. - assertFutureThrows(KafkaException.class, write4); + assertFutureThrows(write4, KafkaException.class); // Verify the state. The state should be reverted to the initial state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(List.of(), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), ctx.coordinator.coordinator().fullRecords()); + assertEquals(Collections.emptyList(), writer.entries(TP)); } @Test @@ -3497,22 +3860,22 @@ public void replay( assertEquals(List.of( new MockCoordinatorShard.RecordAndMetadata(0, records.get(0)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), writer.entries(TP)); // Write #2. It should fail. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), state -> new CoordinatorResult<>(records.subList(1, 2), "response2")); // Verify the futures. - assertFutureThrows(IllegalArgumentException.class, write1); - assertFutureThrows(IllegalArgumentException.class, write2); + assertFutureThrows(write1, IllegalArgumentException.class); + assertFutureThrows(write2, IllegalArgumentException.class); // Verify the state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(List.of(), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), ctx.coordinator.coordinator().fullRecords()); + assertEquals(Collections.emptyList(), writer.entries(TP)); } @Test @@ -3558,9 +3921,9 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Set.of(), ctx.coordinator.coordinator().pendingRecords(100L)); + assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Set.of("record#1"), ctx.coordinator.coordinator().records()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), writer.entries(TP)); // Transactional write #2 with one record. This will flush the current batch. CompletableFuture write2 = runtime.scheduleTransactionalWriteOperation( @@ -3626,7 +3989,7 @@ public void testScheduleTransactionalWriteOperationWithBatching() throws Executi assertEquals(4L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L, 1L, 2L, 3L, 4L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(Set.of(), ctx.coordinator.coordinator().pendingRecords(100L)); + assertEquals(Collections.emptySet(), ctx.coordinator.coordinator().pendingRecords(100L)); assertEquals(Set.of("record#1", "record#2", "record#3"), ctx.coordinator.coordinator().records()); assertEquals(List.of( records(timer.time().milliseconds(), "record#1"), @@ -3723,11 +4086,11 @@ public long append( state -> new CoordinatorResult<>(records.subList(3, 4), "response4")); // Verify the futures. - assertFutureThrows(NotCoordinatorException.class, write1); - assertFutureThrows(NotCoordinatorException.class, write2); - assertFutureThrows(NotCoordinatorException.class, write3); + assertFutureThrows(write1, NotCoordinatorException.class); + assertFutureThrows(write2, NotCoordinatorException.class); + assertFutureThrows(write3, NotCoordinatorException.class); // Write #4 is also expected to fail. - assertFutureThrows(NotCoordinatorException.class, write4); + assertFutureThrows(write4, NotCoordinatorException.class); // Verify that the state machine was loaded twice. verify(loader, times(2)).load(eq(TP), any()); @@ -3805,7 +4168,7 @@ public void close() {} // Schedule a write operation that does not generate any records. CompletableFuture write = runtime.scheduleWriteOperation("write#1", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(List.of(), "response1")); + state -> new CoordinatorResult<>(Collections.emptyList(), "response1")); // The write operation should not be done. assertFalse(write.isDone()); @@ -3871,7 +4234,7 @@ public void testScheduleNonAtomicWriteOperation() throws ExecutionException, Int state -> new CoordinatorResult<>(records, "write#1") ); - assertFutureThrows(RecordTooLargeException.class, write1); + assertFutureThrows(write1, RecordTooLargeException.class); // Let's try to write the same records non-atomically. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), @@ -3993,7 +4356,7 @@ public void testScheduleNonAtomicWriteOperationWithRecordTooLarge() throws Inter new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), writer.entries(TP)); // Let's write the 4th record which is too large. This will flush the current // pending batch, allocate a new batch, and put the record into it. @@ -4007,7 +4370,7 @@ public void testScheduleNonAtomicWriteOperationWithRecordTooLarge() throws Inter timer.advanceClock(11); // The write should have failed... - assertFutureThrows(RecordTooLargeException.class, write2); + assertFutureThrows(write2, RecordTooLargeException.class); // ... but write#1 should be left intact. assertFalse(write1.isDone()); @@ -4091,7 +4454,7 @@ public void testScheduleNonAtomicWriteOperationWhenWriteFails() { new MockCoordinatorShard.RecordAndMetadata(1, records.get(1)), new MockCoordinatorShard.RecordAndMetadata(2, records.get(2)) ), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), writer.entries(TP)); // Write #4. This write cannot make it in the current batch. So the current batch // is flushed. It will fail. So we expect all writes to fail. @@ -4099,18 +4462,18 @@ public void testScheduleNonAtomicWriteOperationWhenWriteFails() { state -> new CoordinatorResult<>(records.subList(3, 4), "response4", null, true, false)); // Verify the futures. - assertFutureThrows(KafkaException.class, write1); - assertFutureThrows(KafkaException.class, write2); - assertFutureThrows(KafkaException.class, write3); + assertFutureThrows(write1, KafkaException.class); + assertFutureThrows(write2, KafkaException.class); + assertFutureThrows(write3, KafkaException.class); // Write #4 is also expected to fail. - assertFutureThrows(KafkaException.class, write4); + assertFutureThrows(write4, KafkaException.class); // Verify the state. The state should be reverted to the initial state. assertEquals(0L, ctx.coordinator.lastWrittenOffset()); assertEquals(0L, ctx.coordinator.lastCommittedOffset()); assertEquals(List.of(0L), ctx.coordinator.snapshotRegistry().epochsList()); - assertEquals(List.of(), ctx.coordinator.coordinator().fullRecords()); - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), ctx.coordinator.coordinator().fullRecords()); + assertEquals(Collections.emptyList(), writer.entries(TP)); } @Test @@ -4148,12 +4511,12 @@ public void testEmptyBatch() throws Exception { state -> new CoordinatorResult<>(List.of("1"), "response1")); // Write #1 should fail and leave an empty batch. - assertFutureThrows(BufferOverflowException.class, write1); + assertFutureThrows(write1, BufferOverflowException.class); assertNotNull(ctx.currentBatch); // Write #2, with no records. CompletableFuture write2 = runtime.scheduleWriteOperation("write#2", TP, Duration.ofMillis(20), - state -> new CoordinatorResult<>(List.of(), "response2")); + state -> new CoordinatorResult<>(Collections.emptyList(), "response2")); // Write #2 should not be attached to the empty batch. assertTrue(write2.isDone()); @@ -4238,7 +4601,7 @@ public void testRecordFlushTime() throws Exception { ); // Verify the state. Records are replayed but no batch written. - assertEquals(List.of(), writer.entries(TP)); + assertEquals(Collections.emptyList(), writer.entries(TP)); verify(runtimeMetrics, times(0)).recordFlushTime(10); // Write #3 with one record. This one cannot go into the existing batch @@ -4416,7 +4779,7 @@ public void testWriteEventCompletesOnlyOnce() throws Exception { // Records have been written to the log. long writeTimestamp = timer.time().milliseconds(); - assertEquals(List.of( + assertEquals(Collections.singletonList( records(writeTimestamp, "record1") ), writer.entries(TP)); @@ -4554,16 +4917,16 @@ public void testCoordinatorExecutor() { // Schedule a write which schedules an async tasks. CompletableFuture write1 = runtime.scheduleWriteOperation("write#1", TP, writeTimeout, state -> { - state.executor().schedule( + state.executor.schedule( "write#1#task", () -> "task result", (result, exception) -> { assertEquals("task result", result); assertNull(exception); - return new CoordinatorResult<>(List.of("record2"), null); + return new CoordinatorResult<>(Collections.singletonList("record2"), null); } ); - return new CoordinatorResult<>(List.of("record1"), "response1"); + return new CoordinatorResult<>(Collections.singletonList("record1"), "response1"); } ); @@ -4572,7 +4935,7 @@ public void testCoordinatorExecutor() { // We should have a new write event in the queue as a result of the // task being executed immediately. - assertEquals(1, processor.size()); + assertEquals(1, processor.queue.size()); // Verify the state. CoordinatorRuntime.CoordinatorContext ctx = runtime.contextOrThrow(TP); @@ -4586,7 +4949,7 @@ public void testCoordinatorExecutor() { processor.poll(); // The processor must be empty now. - assertEquals(0, processor.size()); + assertEquals(0, processor.queue.size()); // Verify the state. assertEquals(2L, ctx.coordinator.lastWrittenOffset()); diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java index 7809c46cb1001..a8551f0734bbd 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/InMemoryPartitionWriter.java @@ -89,7 +89,7 @@ public void deregisterListener( @Override public LogConfig config(TopicPartition tp) { - return new LogConfig(Map.of()); + return new LogConfig(Collections.emptyMap()); } @Override @@ -129,7 +129,7 @@ public CompletableFuture maybeStartTransactionVerification( String transactionalId, long producerId, short producerEpoch, - int apiVersion + short apiVersion ) throws KafkaException { return CompletableFuture.completedFuture(new VerificationGuard()); } diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KafkaMetricHistogramTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KafkaMetricHistogramTest.java index db3fb57444870..72a4bce3fae87 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KafkaMetricHistogramTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/KafkaMetricHistogramTest.java @@ -22,7 +22,7 @@ import org.junit.jupiter.api.Test; -import java.util.Map; +import java.util.Collections; import java.util.Set; import java.util.stream.Collectors; @@ -42,11 +42,11 @@ public void testStats() { ); Set expected = Set.of( - new MetricName("test-metric-max", "test-group", "test description", Map.of()), - new MetricName("test-metric-p999", "test-group", "test description", Map.of()), - new MetricName("test-metric-p99", "test-group", "test description", Map.of()), - new MetricName("test-metric-p95", "test-group", "test description", Map.of()), - new MetricName("test-metric-p50", "test-group", "test description", Map.of()) + new MetricName("test-metric-max", "test-group", "test description", Collections.emptyMap()), + new MetricName("test-metric-p999", "test-group", "test description", Collections.emptyMap()), + new MetricName("test-metric-p99", "test-group", "test description", Collections.emptyMap()), + new MetricName("test-metric-p95", "test-group", "test description", Collections.emptyMap()), + new MetricName("test-metric-p50", "test-group", "test description", Collections.emptyMap()) ); Set actual = histogram.stats().stream().map(CompoundStat.NamedMeasurable::name).collect(Collectors.toSet()); assertEquals(expected, actual); diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java index fc2ace6698ce9..40b946bbefd9d 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorExecutor.java @@ -49,7 +49,10 @@ CoordinatorResult execute() { } } - public record ExecutorResult(String key, CoordinatorResult result) { + public static class ExecutorResult { + public final String key; + public final CoordinatorResult result; + public ExecutorResult( String key, CoordinatorResult result @@ -58,6 +61,24 @@ public ExecutorResult( this.result = Objects.requireNonNull(result); } + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ExecutorResult that = (ExecutorResult) o; + + if (!Objects.equals(key, that.key)) return false; + return Objects.equals(result, that.result); + } + + @Override + public int hashCode() { + int result = key.hashCode(); + result = 31 * result + this.result.hashCode(); + return result; + } + @Override public String toString() { return "ExecutorResult(" + diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java index 78e14ac576b39..5c55f59d608f5 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/MockCoordinatorTimer.java @@ -23,6 +23,7 @@ import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.PriorityQueue; import java.util.concurrent.TimeUnit; @@ -35,13 +36,54 @@ public class MockCoordinatorTimer implements CoordinatorTimer { /** * Represents a scheduled timeout. */ - public record ScheduledTimeout(String key, long deadlineMs, TimeoutOperation operation) { + public static class ScheduledTimeout { + public final String key; + public final long deadlineMs; + public final TimeoutOperation operation; + + public ScheduledTimeout( + String key, + long deadlineMs, + TimeoutOperation operation + ) { + this.key = key; + this.deadlineMs = deadlineMs; + this.operation = operation; + } } /** * Represents an expired timeout. */ - public record ExpiredTimeout(String key, CoordinatorResult result) { + public static class ExpiredTimeout { + public final String key; + public final CoordinatorResult result; + + public ExpiredTimeout( + String key, + CoordinatorResult result + ) { + this.key = key; + this.result = result; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + + ExpiredTimeout that = (ExpiredTimeout) o; + + if (!Objects.equals(key, that.key)) return false; + return Objects.equals(result, that.result); + } + + @Override + public int hashCode() { + int result1 = key != null ? key.hashCode() : 0; + result1 = 31 * result1 + (result != null ? result.hashCode() : 0); + return result1; + } } private final Time time; diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinatorTest.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinatorTest.java index b1436a3eff5c0..40c23a2759a96 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinatorTest.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/SnapshottableCoordinatorTest.java @@ -19,6 +19,7 @@ import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.coordinator.common.runtime.CoordinatorRuntimeTest.MockCoordinatorShard; import org.apache.kafka.timeline.SnapshotRegistry; import org.junit.jupiter.api.Test; diff --git a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/TestUtil.java b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/TestUtil.java index c3eda174671f6..3acd3599e2d35 100644 --- a/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/TestUtil.java +++ b/coordinator-common/src/test/java/org/apache/kafka/coordinator/common/runtime/TestUtil.java @@ -16,149 +16,17 @@ */ package org.apache.kafka.coordinator.common.runtime; -import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.network.ClientInformation; import org.apache.kafka.common.network.ListenerName; import org.apache.kafka.common.protocol.ApiKeys; -import org.apache.kafka.common.record.AbstractRecords; -import org.apache.kafka.common.record.CompressionType; -import org.apache.kafka.common.record.ControlRecordType; -import org.apache.kafka.common.record.EndTransactionMarker; -import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.MemoryRecordsBuilder; -import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.record.RecordVersion; -import org.apache.kafka.common.record.SimpleRecord; -import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.requests.RequestContext; import org.apache.kafka.common.requests.RequestHeader; import org.apache.kafka.common.security.auth.KafkaPrincipal; import org.apache.kafka.common.security.auth.SecurityProtocol; import java.net.InetAddress; -import java.nio.ByteBuffer; -import java.nio.charset.Charset; -import java.util.Arrays; -import java.util.List; public class TestUtil { - public static MemoryRecords records( - long timestamp, - String... records - ) { - return records(timestamp, Arrays.stream(records).toList()); - } - - public static MemoryRecords records( - long timestamp, - List records - ) { - if (records.isEmpty()) - return MemoryRecords.EMPTY; - - List simpleRecords = records.stream().map(record -> - new SimpleRecord(timestamp, record.getBytes(Charset.defaultCharset())) - ).toList(); - - int sizeEstimate = AbstractRecords.estimateSizeInBytes( - RecordVersion.current().value, - CompressionType.NONE, - simpleRecords - ); - - ByteBuffer buffer = ByteBuffer.allocate(sizeEstimate); - - MemoryRecordsBuilder builder = MemoryRecords.builder( - buffer, - RecordVersion.current().value, - Compression.NONE, - TimestampType.CREATE_TIME, - 0L, - timestamp, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH, - 0, - false, - RecordBatch.NO_PARTITION_LEADER_EPOCH - ); - - simpleRecords.forEach(builder::append); - - return builder.build(); - } - - public static MemoryRecords transactionalRecords( - long producerId, - short producerEpoch, - long timestamp, - String... records - ) { - return transactionalRecords( - producerId, - producerEpoch, - timestamp, - Arrays.stream(records).toList() - ); - } - - public static MemoryRecords transactionalRecords( - long producerId, - short producerEpoch, - long timestamp, - List records - ) { - if (records.isEmpty()) - return MemoryRecords.EMPTY; - - List simpleRecords = records.stream().map(record -> - new SimpleRecord(timestamp, record.getBytes(Charset.defaultCharset())) - ).toList(); - - int sizeEstimate = AbstractRecords.estimateSizeInBytes( - RecordVersion.current().value, - CompressionType.NONE, - simpleRecords - ); - - ByteBuffer buffer = ByteBuffer.allocate(sizeEstimate); - - MemoryRecordsBuilder builder = MemoryRecords.builder( - buffer, - RecordVersion.current().value, - Compression.NONE, - TimestampType.CREATE_TIME, - 0L, - timestamp, - producerId, - producerEpoch, - 0, - true, - RecordBatch.NO_PARTITION_LEADER_EPOCH - ); - - simpleRecords.forEach(builder::append); - - return builder.build(); - } - - public static MemoryRecords endTransactionMarker( - long producerId, - short producerEpoch, - long timestamp, - int coordinatorEpoch, - ControlRecordType result - ) { - return MemoryRecords.withEndTransactionMarker( - timestamp, - producerId, - producerEpoch, - new EndTransactionMarker( - result, - coordinatorEpoch - ) - ); - } - public static RequestContext requestContext( ApiKeys apiKey ) { diff --git a/core/src/main/java/kafka/docker/Log4jConfiguration.java b/core/src/main/java/kafka/docker/Log4jConfiguration.java index 2409df8170d3a..45b06760066f2 100644 --- a/core/src/main/java/kafka/docker/Log4jConfiguration.java +++ b/core/src/main/java/kafka/docker/Log4jConfiguration.java @@ -22,6 +22,7 @@ import com.fasterxml.jackson.annotation.JsonProperty; import com.fasterxml.jackson.annotation.JsonPropertyOrder; +import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; @@ -120,7 +121,7 @@ public void setProperties(String key, Object value) { @JsonIgnoreProperties(ignoreUnknown = true) class Loggers { private Root root; - private List logger = List.of(); + private List logger = Collections.emptyList(); @JsonProperty("Root") public Root getRoot() { diff --git a/core/src/main/java/kafka/log/remote/RemoteLogManager.java b/core/src/main/java/kafka/log/remote/RemoteLogManager.java new file mode 100644 index 0000000000000..b5f9e408c9442 --- /dev/null +++ b/core/src/main/java/kafka/log/remote/RemoteLogManager.java @@ -0,0 +1,2251 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.log.remote; + +import kafka.cluster.Partition; +import kafka.log.UnifiedLog; +import kafka.server.DelayedRemoteListOffsets; + +import org.apache.kafka.common.Endpoint; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.OffsetOutOfRangeException; +import org.apache.kafka.common.errors.RetriableException; +import org.apache.kafka.common.internals.SecurityManagerCompatibility; +import org.apache.kafka.common.message.FetchResponseData; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Quota; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.record.FileRecords; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.Record; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.record.RemoteLogInputStream; +import org.apache.kafka.common.requests.FetchRequest; +import org.apache.kafka.common.utils.BufferSupplier; +import org.apache.kafka.common.utils.ChildFirstClassLoader; +import org.apache.kafka.common.utils.CloseableIterator; +import org.apache.kafka.common.utils.LogContext; +import org.apache.kafka.common.utils.ThreadUtils; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.common.CheckpointFile; +import org.apache.kafka.server.common.OffsetAndEpoch; +import org.apache.kafka.server.common.StopPartition; +import org.apache.kafka.server.config.ServerConfigs; +import org.apache.kafka.server.log.remote.metadata.storage.ClassLoaderAwareRemoteLogMetadataManager; +import org.apache.kafka.server.log.remote.quota.RLMQuotaManager; +import org.apache.kafka.server.log.remote.quota.RLMQuotaManagerConfig; +import org.apache.kafka.server.log.remote.quota.RLMQuotaMetrics; +import org.apache.kafka.server.log.remote.storage.ClassLoaderAwareRemoteStorageManager; +import org.apache.kafka.server.log.remote.storage.CustomMetadataSizeLimitExceededException; +import org.apache.kafka.server.log.remote.storage.LogSegmentData; +import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig; +import org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata.CustomMetadata; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState; +import org.apache.kafka.server.log.remote.storage.RemoteStorageException; +import org.apache.kafka.server.log.remote.storage.RemoteStorageManager; +import org.apache.kafka.server.metrics.KafkaMetricsGroup; +import org.apache.kafka.server.purgatory.DelayedOperationPurgatory; +import org.apache.kafka.server.purgatory.TopicPartitionOperationKey; +import org.apache.kafka.server.quota.QuotaType; +import org.apache.kafka.server.storage.log.FetchIsolation; +import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; +import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; +import org.apache.kafka.storage.internals.log.AbortedTxn; +import org.apache.kafka.storage.internals.log.AsyncOffsetReadFutureHolder; +import org.apache.kafka.storage.internals.log.EpochEntry; +import org.apache.kafka.storage.internals.log.FetchDataInfo; +import org.apache.kafka.storage.internals.log.LogOffsetMetadata; +import org.apache.kafka.storage.internals.log.LogSegment; +import org.apache.kafka.storage.internals.log.OffsetIndex; +import org.apache.kafka.storage.internals.log.OffsetPosition; +import org.apache.kafka.storage.internals.log.OffsetResultHolder; +import org.apache.kafka.storage.internals.log.RemoteIndexCache; +import org.apache.kafka.storage.internals.log.RemoteLogReadResult; +import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; +import org.apache.kafka.storage.internals.log.RemoteStorageThreadPool; +import org.apache.kafka.storage.internals.log.TransactionIndex; +import org.apache.kafka.storage.internals.log.TxnIndexSearchResult; +import org.apache.kafka.storage.log.metrics.BrokerTopicStats; + +import com.yammer.metrics.core.Timer; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.BufferedWriter; +import java.io.ByteArrayOutputStream; +import java.io.Closeable; +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStreamWriter; +import java.lang.reflect.InvocationTargetException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.Objects; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.OptionalLong; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.ScheduledThreadPoolExecutor; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.BiConsumer; +import java.util.function.Consumer; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import scala.Option; +import scala.jdk.javaapi.CollectionConverters; + +import static org.apache.kafka.server.config.ServerLogConfigs.LOG_DIR_CONFIG; +import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX; +import static org.apache.kafka.server.log.remote.quota.RLMQuotaManagerConfig.INACTIVE_SENSOR_EXPIRATION_TIME_SECONDS; +import static org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics.REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC; +import static org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics.REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC; + +/** + * This class is responsible for + * - initializing `RemoteStorageManager` and `RemoteLogMetadataManager` instances + * - receives any leader and follower replica events and partition stop events and act on them + * - also provides APIs to fetch indexes, metadata about remote log segments + * - copying log segments to the remote storage + * - cleaning up segments that are expired based on retention size or retention time + */ +public class RemoteLogManager implements Closeable { + + private static final Logger LOGGER = LoggerFactory.getLogger(RemoteLogManager.class); + private static final String REMOTE_LOG_READER_THREAD_NAME_PATTERN = "remote-log-reader-%d"; + private final RemoteLogManagerConfig rlmConfig; + private final int brokerId; + private final String logDir; + private final Time time; + private final Function> fetchLog; + private final BiConsumer updateRemoteLogStartOffset; + private final BrokerTopicStats brokerTopicStats; + private final Metrics metrics; + + private final RemoteStorageManager remoteLogStorageManager; + + private final RemoteLogMetadataManager remoteLogMetadataManager; + + private final ReentrantLock copyQuotaManagerLock = new ReentrantLock(true); + private final Condition copyQuotaManagerLockCondition = copyQuotaManagerLock.newCondition(); + private final RLMQuotaManager rlmCopyQuotaManager; + private final RLMQuotaManager rlmFetchQuotaManager; + private final Sensor fetchThrottleTimeSensor; + private final Sensor copyThrottleTimeSensor; + + private final RemoteIndexCache indexCache; + private final RemoteStorageThreadPool remoteStorageReaderThreadPool; + private final RLMScheduledThreadPool rlmCopyThreadPool; + private final RLMScheduledThreadPool rlmExpirationThreadPool; + private final RLMScheduledThreadPool followerThreadPool; + + private final long delayInMs; + + private final ConcurrentHashMap leaderCopyRLMTasks = new ConcurrentHashMap<>(); + private final ConcurrentHashMap leaderExpirationRLMTasks = new ConcurrentHashMap<>(); + private final ConcurrentHashMap followerRLMTasks = new ConcurrentHashMap<>(); + private final Set segmentIdsBeingCopied = ConcurrentHashMap.newKeySet(); + + // topic ids that are received on leadership changes, this map is cleared on stop partitions + private final ConcurrentMap topicIdByPartitionMap = new ConcurrentHashMap<>(); + private final String clusterId; + private final KafkaMetricsGroup metricsGroup = new KafkaMetricsGroup(this.getClass()); + + // The endpoint for remote log metadata manager to connect to + private Optional endpoint = Optional.empty(); + private boolean closed = false; + + private volatile boolean remoteLogManagerConfigured = false; + private final Timer remoteReadTimer; + private volatile DelayedOperationPurgatory delayedRemoteListOffsetsPurgatory; + + /** + * Creates RemoteLogManager instance with the given arguments. + * + * @param rlmConfig Configuration required for remote logging subsystem(tiered storage) at the broker level. + * @param brokerId id of the current broker. + * @param logDir directory of Kafka log segments. + * @param time Time instance. + * @param clusterId The cluster id. + * @param fetchLog function to get UnifiedLog instance for a given topic. + * @param updateRemoteLogStartOffset function to update the log-start-offset for a given topic partition. + * @param brokerTopicStats BrokerTopicStats instance to update the respective metrics. + * @param metrics Metrics instance + */ + @SuppressWarnings({"this-escape"}) + public RemoteLogManager(RemoteLogManagerConfig rlmConfig, + int brokerId, + String logDir, + String clusterId, + Time time, + Function> fetchLog, + BiConsumer updateRemoteLogStartOffset, + BrokerTopicStats brokerTopicStats, + Metrics metrics) throws IOException { + this.rlmConfig = rlmConfig; + this.brokerId = brokerId; + this.logDir = logDir; + this.clusterId = clusterId; + this.time = time; + this.fetchLog = fetchLog; + this.updateRemoteLogStartOffset = updateRemoteLogStartOffset; + this.brokerTopicStats = brokerTopicStats; + this.metrics = metrics; + + remoteLogStorageManager = createRemoteStorageManager(); + remoteLogMetadataManager = createRemoteLogMetadataManager(); + rlmCopyQuotaManager = createRLMCopyQuotaManager(); + rlmFetchQuotaManager = createRLMFetchQuotaManager(); + + fetchThrottleTimeSensor = new RLMQuotaMetrics(metrics, "remote-fetch-throttle-time", RemoteLogManager.class.getSimpleName(), + "The %s time in millis remote fetches was throttled by a broker", INACTIVE_SENSOR_EXPIRATION_TIME_SECONDS).sensor(); + copyThrottleTimeSensor = new RLMQuotaMetrics(metrics, "remote-copy-throttle-time", RemoteLogManager.class.getSimpleName(), + "The %s time in millis remote copies was throttled by a broker", INACTIVE_SENSOR_EXPIRATION_TIME_SECONDS).sensor(); + + indexCache = new RemoteIndexCache(rlmConfig.remoteLogIndexFileCacheTotalSizeBytes(), remoteLogStorageManager, logDir); + delayInMs = rlmConfig.remoteLogManagerTaskIntervalMs(); + rlmCopyThreadPool = new RLMScheduledThreadPool(rlmConfig.remoteLogManagerCopierThreadPoolSize(), + "RLMCopyThreadPool", "kafka-rlm-copy-thread-pool-%d"); + rlmExpirationThreadPool = new RLMScheduledThreadPool(rlmConfig.remoteLogManagerExpirationThreadPoolSize(), + "RLMExpirationThreadPool", "kafka-rlm-expiration-thread-pool-%d"); + followerThreadPool = new RLMScheduledThreadPool(rlmConfig.remoteLogManagerThreadPoolSize(), + "RLMFollowerScheduledThreadPool", "kafka-rlm-follower-thread-pool-%d"); + + metricsGroup.newGauge(REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC, rlmCopyThreadPool::getIdlePercent); + remoteReadTimer = metricsGroup.newTimer(REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC, + TimeUnit.MILLISECONDS, TimeUnit.SECONDS); + + remoteStorageReaderThreadPool = new RemoteStorageThreadPool( + REMOTE_LOG_READER_THREAD_NAME_PATTERN, + rlmConfig.remoteLogReaderThreads(), + rlmConfig.remoteLogReaderMaxPendingTasks() + ); + } + + public void setDelayedOperationPurgatory(DelayedOperationPurgatory delayedRemoteListOffsetsPurgatory) { + this.delayedRemoteListOffsetsPurgatory = delayedRemoteListOffsetsPurgatory; + } + + public void resizeCacheSize(long remoteLogIndexFileCacheSize) { + indexCache.resizeCacheSize(remoteLogIndexFileCacheSize); + } + + public void updateCopyQuota(long quota) { + LOGGER.info("Updating remote copy quota to {} bytes per second", quota); + rlmCopyQuotaManager.updateQuota(new Quota(quota, true)); + } + + public void updateFetchQuota(long quota) { + LOGGER.info("Updating remote fetch quota to {} bytes per second", quota); + rlmFetchQuotaManager.updateQuota(new Quota(quota, true)); + } + + public void resizeCopierThreadPool(int newSize) { + int currentSize = rlmCopyThreadPool.getCorePoolSize(); + LOGGER.info("Updating remote copy thread pool size from {} to {}", currentSize, newSize); + rlmCopyThreadPool.setCorePoolSize(newSize); + } + + public void resizeExpirationThreadPool(int newSize) { + int currentSize = rlmExpirationThreadPool.getCorePoolSize(); + LOGGER.info("Updating remote expiration thread pool size from {} to {}", currentSize, newSize); + rlmExpirationThreadPool.setCorePoolSize(newSize); + } + + public void resizeReaderThreadPool(int newSize) { + int currentSize = remoteStorageReaderThreadPool.getCorePoolSize(); + LOGGER.info("Updating remote reader thread pool size from {} to {}", currentSize, newSize); + remoteStorageReaderThreadPool.setCorePoolSize(newSize); + } + + private void removeMetrics() { + metricsGroup.removeMetric(REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC); + metricsGroup.removeMetric(REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC); + remoteStorageReaderThreadPool.removeMetrics(); + } + + /** + * Returns the timeout for the RLM Tasks to wait for the quota to be available + */ + Duration quotaTimeout() { + return Duration.ofSeconds(1); + } + + RLMQuotaManager createRLMCopyQuotaManager() { + return new RLMQuotaManager(copyQuotaManagerConfig(rlmConfig), metrics, QuotaType.RLM_COPY, + "Tracking copy byte-rate for Remote Log Manager", time); + } + + RLMQuotaManager createRLMFetchQuotaManager() { + return new RLMQuotaManager(fetchQuotaManagerConfig(rlmConfig), metrics, QuotaType.RLM_FETCH, + "Tracking fetch byte-rate for Remote Log Manager", time); + } + + public long getFetchThrottleTimeMs() { + return rlmFetchQuotaManager.getThrottleTimeMs(); + } + + public Sensor fetchThrottleTimeSensor() { + return fetchThrottleTimeSensor; + } + + static RLMQuotaManagerConfig copyQuotaManagerConfig(RemoteLogManagerConfig rlmConfig) { + return new RLMQuotaManagerConfig(rlmConfig.remoteLogManagerCopyMaxBytesPerSecond(), + rlmConfig.remoteLogManagerCopyNumQuotaSamples(), + rlmConfig.remoteLogManagerCopyQuotaWindowSizeSeconds()); + } + + static RLMQuotaManagerConfig fetchQuotaManagerConfig(RemoteLogManagerConfig rlmConfig) { + return new RLMQuotaManagerConfig(rlmConfig.remoteLogManagerFetchMaxBytesPerSecond(), + rlmConfig.remoteLogManagerFetchNumQuotaSamples(), + rlmConfig.remoteLogManagerFetchQuotaWindowSizeSeconds()); + } + + @SuppressWarnings("unchecked") + private T createDelegate(ClassLoader classLoader, String className) { + try { + return (T) classLoader.loadClass(className) + .getDeclaredConstructor().newInstance(); + } catch (InstantiationException | IllegalAccessException | InvocationTargetException | NoSuchMethodException | + ClassNotFoundException e) { + throw new KafkaException(e); + } + } + + RemoteStorageManager createRemoteStorageManager() { + return SecurityManagerCompatibility.get().doPrivileged(() -> { + final String classPath = rlmConfig.remoteStorageManagerClassPath(); + if (classPath != null && !classPath.trim().isEmpty()) { + ChildFirstClassLoader classLoader = new ChildFirstClassLoader(classPath, this.getClass().getClassLoader()); + RemoteStorageManager delegate = createDelegate(classLoader, rlmConfig.remoteStorageManagerClassName()); + return (RemoteStorageManager) new ClassLoaderAwareRemoteStorageManager(delegate, classLoader); + } else { + return createDelegate(this.getClass().getClassLoader(), rlmConfig.remoteStorageManagerClassName()); + } + }); + } + + private void configureRSM() { + final Map rsmProps = new HashMap<>(rlmConfig.remoteStorageManagerProps()); + rsmProps.put(ServerConfigs.BROKER_ID_CONFIG, brokerId); + remoteLogStorageManager.configure(rsmProps); + } + + RemoteLogMetadataManager createRemoteLogMetadataManager() { + return SecurityManagerCompatibility.get().doPrivileged(() -> { + final String classPath = rlmConfig.remoteLogMetadataManagerClassPath(); + if (classPath != null && !classPath.trim().isEmpty()) { + ClassLoader classLoader = new ChildFirstClassLoader(classPath, this.getClass().getClassLoader()); + RemoteLogMetadataManager delegate = createDelegate(classLoader, rlmConfig.remoteLogMetadataManagerClassName()); + return (RemoteLogMetadataManager) new ClassLoaderAwareRemoteLogMetadataManager(delegate, classLoader); + } else { + return createDelegate(this.getClass().getClassLoader(), rlmConfig.remoteLogMetadataManagerClassName()); + } + }); + } + + public void onEndPointCreated(Endpoint endpoint) { + this.endpoint = Optional.of(endpoint); + } + + private void configureRLMM() { + final Map rlmmProps = new HashMap<>(); + endpoint.ifPresent(e -> { + rlmmProps.put(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "bootstrap.servers", e.host() + ":" + e.port()); + rlmmProps.put(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "security.protocol", e.securityProtocol().name); + }); + // update the remoteLogMetadataProps here to override endpoint config if any + rlmmProps.putAll(rlmConfig.remoteLogMetadataManagerProps()); + + rlmmProps.put(ServerConfigs.BROKER_ID_CONFIG, brokerId); + rlmmProps.put(LOG_DIR_CONFIG, logDir); + rlmmProps.put("cluster.id", clusterId); + + remoteLogMetadataManager.configure(rlmmProps); + } + + public void startup() { + // Initialize and configure RSM and RLMM. This will start RSM, RLMM resources which may need to start resources + // in connecting to the brokers or remote storages. + configureRSM(); + configureRLMM(); + remoteLogManagerConfigured = true; + } + + private boolean isRemoteLogManagerConfigured() { + return this.remoteLogManagerConfigured; + } + + public RemoteStorageManager storageManager() { + return remoteLogStorageManager; + } + + private Stream filterPartitions(Set partitions) { + // We are not specifically checking for internal topics etc here as `log.remoteLogEnabled()` already handles that. + return partitions.stream().filter(partition -> partition.log().exists(UnifiedLog::remoteLogEnabled)); + } + + private void cacheTopicPartitionIds(TopicIdPartition topicIdPartition) { + Uuid previousTopicId = topicIdByPartitionMap.put(topicIdPartition.topicPartition(), topicIdPartition.topicId()); + if (previousTopicId != null && !previousTopicId.equals(topicIdPartition.topicId())) { + LOGGER.info("Previous cached topic id {} for {} does not match updated topic id {}", + previousTopicId, topicIdPartition.topicPartition(), topicIdPartition.topicId()); + } + } + + /** + * Callback to receive any leadership changes for the topic partitions assigned to this broker. If there are no + * existing tasks for a given topic partition then it will assign new leader or follower task else it will convert the + * task to respective target state(leader or follower). + * + * @param partitionsBecomeLeader partitions that have become leaders on this broker. + * @param partitionsBecomeFollower partitions that have become followers on this broker. + * @param topicIds topic name to topic id mappings. + */ + public void onLeadershipChange(Set partitionsBecomeLeader, + Set partitionsBecomeFollower, + Map topicIds) { + LOGGER.debug("Received leadership changes for leaders: {} and followers: {}", partitionsBecomeLeader, partitionsBecomeFollower); + + if (rlmConfig.isRemoteStorageSystemEnabled() && !isRemoteLogManagerConfigured()) { + throw new KafkaException("RemoteLogManager is not configured when remote storage system is enabled"); + } + + Map leaderPartitions = filterPartitions(partitionsBecomeLeader) + .collect(Collectors.toMap(p -> new TopicIdPartition(topicIds.get(p.topic()), p.topicPartition()), + p -> p.log().exists(log -> log.config().remoteLogCopyDisable()))); + + Map followerPartitions = filterPartitions(partitionsBecomeFollower) + .collect(Collectors.toMap(p -> new TopicIdPartition(topicIds.get(p.topic()), p.topicPartition()), + p -> p.log().exists(log -> log.config().remoteLogCopyDisable()))); + + if (!leaderPartitions.isEmpty() || !followerPartitions.isEmpty()) { + LOGGER.debug("Effective topic partitions after filtering compact and internal topics, leaders: {} and followers: {}", + leaderPartitions, followerPartitions); + + leaderPartitions.forEach((tp, __) -> cacheTopicPartitionIds(tp)); + followerPartitions.forEach((tp, __) -> cacheTopicPartitionIds(tp)); + + remoteLogMetadataManager.onPartitionLeadershipChanges(leaderPartitions.keySet(), followerPartitions.keySet()); + followerPartitions.forEach((tp, __) -> doHandleFollowerPartition(tp)); + + // If this node was the previous leader for the partition, then the RLMTask might be running in the + // background thread and might emit metrics. So, removing the metrics after marking this node as follower. + followerPartitions.forEach((tp, __) -> removeRemoteTopicPartitionMetrics(tp)); + + leaderPartitions.forEach(this::doHandleLeaderPartition); + } + } + + public void stopLeaderCopyRLMTasks(Set partitions) { + for (Partition partition : partitions) { + TopicPartition tp = partition.topicPartition(); + if (topicIdByPartitionMap.containsKey(tp)) { + TopicIdPartition tpId = new TopicIdPartition(topicIdByPartitionMap.get(tp), tp); + leaderCopyRLMTasks.computeIfPresent(tpId, (topicIdPartition, task) -> { + LOGGER.info("Cancelling the copy RLM task for partition: {}", tpId); + task.cancel(); + LOGGER.info("Resetting remote copy lag metrics for partition: {}", tpId); + ((RLMCopyTask) task.rlmTask).resetLagStats(); + return null; + }); + } + } + } + + /** + * Stop the remote-log-manager task for the given partitions. And, calls the + * {@link RemoteLogMetadataManager#onStopPartitions(Set)} when {@link StopPartition#deleteLocalLog} is true. + * Deletes the partitions from the remote storage when {@link StopPartition#deleteRemoteLog} is true. + * + * @param stopPartitions topic partitions that needs to be stopped. + * @param errorHandler callback to handle any errors while stopping the partitions. + */ + public void stopPartitions(Set stopPartitions, + BiConsumer errorHandler) { + LOGGER.debug("Stop partitions: {}", stopPartitions); + for (StopPartition stopPartition: stopPartitions) { + TopicPartition tp = stopPartition.topicPartition; + try { + if (topicIdByPartitionMap.containsKey(tp)) { + TopicIdPartition tpId = new TopicIdPartition(topicIdByPartitionMap.get(tp), tp); + leaderCopyRLMTasks.computeIfPresent(tpId, (topicIdPartition, task) -> { + LOGGER.info("Cancelling the copy RLM task for partition: {}", tpId); + task.cancel(); + return null; + }); + leaderExpirationRLMTasks.computeIfPresent(tpId, (topicIdPartition, task) -> { + LOGGER.info("Cancelling the expiration RLM task for partition: {}", tpId); + task.cancel(); + return null; + }); + followerRLMTasks.computeIfPresent(tpId, (topicIdPartition, task) -> { + LOGGER.info("Cancelling the follower RLM task for partition: {}", tpId); + task.cancel(); + return null; + }); + + removeRemoteTopicPartitionMetrics(tpId); + + if (stopPartition.deleteRemoteLog) { + LOGGER.info("Deleting the remote log segments task for partition: {}", tpId); + deleteRemoteLogPartition(tpId); + } + } else { + LOGGER.warn("StopPartition call is not expected for partition: {}", tp); + } + } catch (Exception ex) { + errorHandler.accept(tp, ex); + LOGGER.error("Error while stopping the partition: {}", stopPartition, ex); + } + } + + // We want to remote topicId map and stopPartition on RLMM for deleteLocalLog or stopRLMM partitions because + // in both case, they all mean the topic will not be held in this broker anymore. + // NOTE: In ZK mode, this#stopPartitions method is called when Replica state changes to Offline and ReplicaDeletionStarted + Set pendingActionsPartitions = stopPartitions.stream() + .filter(sp -> (sp.stopRemoteLogMetadataManager || sp.deleteLocalLog) && topicIdByPartitionMap.containsKey(sp.topicPartition)) + .map(sp -> new TopicIdPartition(topicIdByPartitionMap.get(sp.topicPartition), sp.topicPartition)) + .collect(Collectors.toSet()); + + if (!pendingActionsPartitions.isEmpty()) { + pendingActionsPartitions.forEach(tpId -> topicIdByPartitionMap.remove(tpId.topicPartition())); + remoteLogMetadataManager.onStopPartitions(pendingActionsPartitions); + } + } + + private void deleteRemoteLogPartition(TopicIdPartition partition) throws RemoteStorageException, ExecutionException, InterruptedException { + List metadataList = new ArrayList<>(); + remoteLogMetadataManager.listRemoteLogSegments(partition).forEachRemaining(metadataList::add); + + List deleteSegmentStartedEvents = metadataList.stream() + .map(metadata -> + new RemoteLogSegmentMetadataUpdate(metadata.remoteLogSegmentId(), time.milliseconds(), + metadata.customMetadata(), RemoteLogSegmentState.DELETE_SEGMENT_STARTED, brokerId)) + .collect(Collectors.toList()); + publishEvents(deleteSegmentStartedEvents).get(); + + // KAFKA-15313: Delete remote log segments partition asynchronously when a partition is deleted. + Collection deletedSegmentIds = new ArrayList<>(); + for (RemoteLogSegmentMetadata metadata: metadataList) { + deletedSegmentIds.add(metadata.remoteLogSegmentId().id()); + remoteLogStorageManager.deleteLogSegmentData(metadata); + } + indexCache.removeAll(deletedSegmentIds); + + List deleteSegmentFinishedEvents = metadataList.stream() + .map(metadata -> + new RemoteLogSegmentMetadataUpdate(metadata.remoteLogSegmentId(), time.milliseconds(), + metadata.customMetadata(), RemoteLogSegmentState.DELETE_SEGMENT_FINISHED, brokerId)) + .collect(Collectors.toList()); + publishEvents(deleteSegmentFinishedEvents).get(); + } + + private CompletableFuture publishEvents(List events) throws RemoteStorageException { + List> result = new ArrayList<>(); + for (RemoteLogSegmentMetadataUpdate event : events) { + result.add(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(event)); + } + return CompletableFuture.allOf(result.toArray(new CompletableFuture[0])); + } + + public Optional fetchRemoteLogSegmentMetadata(TopicPartition topicPartition, + int epochForOffset, + long offset) throws RemoteStorageException { + Uuid topicId = topicIdByPartitionMap.get(topicPartition); + if (topicId == null) { + throw new KafkaException("No topic id registered for topic partition: " + topicPartition); + } + return remoteLogMetadataManager.remoteLogSegmentMetadata(new TopicIdPartition(topicId, topicPartition), epochForOffset, offset); + } + + /** + * Returns the next segment that may contain the aborted transaction entries. The search ensures that the returned + * segment offsets are greater than or equal to the given offset and in the same epoch. + * @param topicPartition topic partition to search + * @param epochForOffset the epoch + * @param offset the offset + * @return The next segment that contains the transaction index in the same epoch. + * @throws RemoteStorageException If an error occurs while fetching the remote log segment metadata. + */ + public Optional fetchNextSegmentWithTxnIndex(TopicPartition topicPartition, + int epochForOffset, + long offset) throws RemoteStorageException { + Uuid topicId = topicIdByPartitionMap.get(topicPartition); + if (topicId == null) { + throw new KafkaException("No topic id registered for topic partition: " + topicPartition); + } + TopicIdPartition tpId = new TopicIdPartition(topicId, topicPartition); + return remoteLogMetadataManager.nextSegmentWithTxnIndex(tpId, epochForOffset, offset); + } + + Optional lookupTimestamp(RemoteLogSegmentMetadata rlsMetadata, long timestamp, long startingOffset) + throws RemoteStorageException, IOException { + int startPos = indexCache.lookupTimestamp(rlsMetadata, timestamp, startingOffset); + + InputStream remoteSegInputStream = null; + try { + // Search forward for the position of the last offset that is greater than or equal to the startingOffset + remoteSegInputStream = remoteLogStorageManager.fetchLogSegment(rlsMetadata, startPos); + RemoteLogInputStream remoteLogInputStream = new RemoteLogInputStream(remoteSegInputStream); + + while (true) { + RecordBatch batch = remoteLogInputStream.nextBatch(); + if (batch == null) break; + if (batch.maxTimestamp() >= timestamp && batch.lastOffset() >= startingOffset) { + try (CloseableIterator recordStreamingIterator = batch.streamingIterator(BufferSupplier.NO_CACHING)) { + while (recordStreamingIterator.hasNext()) { + Record record = recordStreamingIterator.next(); + if (record.timestamp() >= timestamp && record.offset() >= startingOffset) + return Optional.of(new FileRecords.TimestampAndOffset(record.timestamp(), record.offset(), maybeLeaderEpoch(batch.partitionLeaderEpoch()))); + } + } + } + } + + return Optional.empty(); + } finally { + Utils.closeQuietly(remoteSegInputStream, "RemoteLogSegmentInputStream"); + } + } + + private Optional maybeLeaderEpoch(int leaderEpoch) { + return leaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH ? Optional.empty() : Optional.of(leaderEpoch); + } + + public AsyncOffsetReadFutureHolder asyncOffsetRead( + TopicPartition topicPartition, + Long timestamp, + Long startingOffset, + LeaderEpochFileCache leaderEpochCache, + Supplier> searchLocalLog) { + CompletableFuture taskFuture = new CompletableFuture<>(); + Future jobFuture = remoteStorageReaderThreadPool.submit( + new RemoteLogOffsetReader(this, topicPartition, timestamp, startingOffset, leaderEpochCache, searchLocalLog, result -> { + TopicPartitionOperationKey key = new TopicPartitionOperationKey(topicPartition.topic(), topicPartition.partition()); + taskFuture.complete(result); + delayedRemoteListOffsetsPurgatory.checkAndComplete(key); + }) + ); + return new AsyncOffsetReadFutureHolder<>(jobFuture, taskFuture); + } + + /** + * Search the message offset in the remote storage for the given timestamp and starting-offset. + * Once the target segment where the search to be performed is found: + * 1. If the target segment lies in the local storage (common segments that lies in both remote and local storage), + * then the search will be performed in the local storage. + * 2. If the target segment is found only in the remote storage, then the search will be performed in the remote storage. + * + *

          + * This method returns an option of TimestampOffset. The returned value is determined using the following ordered list of rules: + *

          + * - If there are no messages in the remote storage, return Empty + * - If all the messages in the remote storage have smaller offsets, return Empty + * - If all the messages in the remote storage have smaller timestamps, return Empty + * - Otherwise, return an option of TimestampOffset. The offset is the offset of the first message whose timestamp + * is greater than or equals to the target timestamp and whose offset is greater than or equals to the startingOffset. + * + * @param tp topic partition in which the offset to be found. + * @param timestamp The timestamp to search for. + * @param startingOffset The starting offset to search. + * @param leaderEpochCache LeaderEpochFileCache of the topic partition. + * @return the timestamp and offset of the first message that meets the requirements. Empty will be returned if there + * is no such message. + */ + public Optional findOffsetByTimestamp(TopicPartition tp, + long timestamp, + long startingOffset, + LeaderEpochFileCache leaderEpochCache) throws RemoteStorageException, IOException { + Uuid topicId = topicIdByPartitionMap.get(tp); + if (topicId == null) { + throw new KafkaException("Topic id does not exist for topic partition: " + tp); + } + Optional unifiedLogOptional = fetchLog.apply(tp); + if (unifiedLogOptional.isEmpty()) { + throw new KafkaException("UnifiedLog does not exist for topic partition: " + tp); + } + UnifiedLog unifiedLog = unifiedLogOptional.get(); + + // Get the respective epoch in which the starting-offset exists. + OptionalInt maybeEpoch = leaderEpochCache.epochForOffset(startingOffset); + TopicIdPartition topicIdPartition = new TopicIdPartition(topicId, tp); + NavigableMap epochWithOffsets = buildFilteredLeaderEpochMap(leaderEpochCache.epochWithOffsets()); + while (maybeEpoch.isPresent()) { + int epoch = maybeEpoch.getAsInt(); + // KAFKA-15802: Add a new API for RLMM to choose how to implement the predicate. + // currently, all segments are returned and then iterated, and filtered + Iterator iterator = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, epoch); + while (iterator.hasNext()) { + RemoteLogSegmentMetadata rlsMetadata = iterator.next(); + if (rlsMetadata.maxTimestampMs() >= timestamp + && rlsMetadata.endOffset() >= startingOffset + && isRemoteSegmentWithinLeaderEpochs(rlsMetadata, unifiedLog.logEndOffset(), epochWithOffsets) + && rlsMetadata.state().equals(RemoteLogSegmentState.COPY_SEGMENT_FINISHED)) { + // cache to avoid race conditions + List segmentsCopy = new ArrayList<>(unifiedLog.logSegments()); + if (segmentsCopy.isEmpty() || rlsMetadata.startOffset() < segmentsCopy.get(0).baseOffset()) { + // search in remote-log + return lookupTimestamp(rlsMetadata, timestamp, startingOffset); + } else { + // search in local-log + for (LogSegment segment : segmentsCopy) { + if (segment.largestTimestamp() >= timestamp) { + return segment.findOffsetByTimestamp(timestamp, startingOffset); + } + } + } + } + } + // Move to the next epoch if not found with the current epoch. + maybeEpoch = leaderEpochCache.nextEpoch(epoch); + } + return Optional.empty(); + } + + private abstract static class CancellableRunnable implements Runnable { + private volatile boolean cancelled = false; + + public void cancel() { + cancelled = true; + } + + public boolean isCancelled() { + return cancelled; + } + } + + /** + * Returns the leader epoch entries within the range of the given start[exclusive] and end[inclusive] offset. + *

          + * Visible for testing. + * + * @param log The actual log from where to take the leader-epoch checkpoint + * @param startOffset The start offset of the epoch entries (inclusive). + * If start offset is 6, then it will retain an entry at offset 6. + * @param endOffset The end offset of the epoch entries (exclusive) + * If end offset is 100, then it will remove the entries greater than or equal to 100. + * @return the leader epoch entries + */ + List getLeaderEpochEntries(UnifiedLog log, long startOffset, long endOffset) { + return log.leaderEpochCache().epochEntriesInRange(startOffset, endOffset); + } + + // VisibleForTesting + RLMTask rlmCopyTask(TopicIdPartition topicIdPartition) { + RLMTaskWithFuture task = leaderCopyRLMTasks.get(topicIdPartition); + if (task != null) { + return task.rlmTask; + } + return null; + } + + abstract class RLMTask extends CancellableRunnable { + + protected final TopicIdPartition topicIdPartition; + private final Logger logger; + + public RLMTask(TopicIdPartition topicIdPartition) { + this.topicIdPartition = topicIdPartition; + this.logger = getLogContext().logger(RLMTask.class); + } + + protected LogContext getLogContext() { + return new LogContext("[RemoteLogManager=" + brokerId + " partition=" + topicIdPartition + "] "); + } + + public void run() { + if (isCancelled()) { + logger.debug("Skipping the current run for partition {} as it is cancelled", topicIdPartition); + return; + } + if (!remoteLogMetadataManager.isReady(topicIdPartition)) { + logger.debug("Skipping the current run for partition {} as the remote-log metadata is not ready", topicIdPartition); + return; + } + + try { + Optional unifiedLogOptional = fetchLog.apply(topicIdPartition.topicPartition()); + + if (unifiedLogOptional.isEmpty()) { + return; + } + + execute(unifiedLogOptional.get()); + } catch (InterruptedException ex) { + if (!isCancelled()) { + logger.warn("Current thread for partition {} is interrupted", topicIdPartition, ex); + } + } catch (RetriableException ex) { + logger.debug("Encountered a retryable error while executing current task for partition {}", topicIdPartition, ex); + } catch (Exception ex) { + if (!isCancelled()) { + logger.warn("Current task for partition {} received error but it will be scheduled", topicIdPartition, ex); + } + } + } + + protected abstract void execute(UnifiedLog log) throws InterruptedException, RemoteStorageException, ExecutionException; + + public String toString() { + return this.getClass() + "[" + topicIdPartition + "]"; + } + } + + class RLMCopyTask extends RLMTask { + private final int customMetadataSizeLimit; + private final Logger logger; + + // The copied and log-start offset is empty initially for a new RLMCopyTask, and needs to be fetched inside + // the task's run() method. + private volatile Optional copiedOffsetOption = Optional.empty(); + private volatile boolean isLogStartOffsetUpdated = false; + private volatile Optional logDirectory = Optional.empty(); + + public RLMCopyTask(TopicIdPartition topicIdPartition, int customMetadataSizeLimit) { + super(topicIdPartition); + this.customMetadataSizeLimit = customMetadataSizeLimit; + this.logger = getLogContext().logger(RLMCopyTask.class); + } + + @Override + protected void execute(UnifiedLog log) throws InterruptedException { + // In the first run after completing altering logDir within broker, we should make sure the state is reset. (KAFKA-16711) + if (!log.parentDir().equals(logDirectory.orElse(null))) { + copiedOffsetOption = Optional.empty(); + isLogStartOffsetUpdated = false; + logDirectory = Optional.of(log.parentDir()); + } + + copyLogSegmentsToRemote(log); + } + + private void maybeUpdateLogStartOffsetOnBecomingLeader(UnifiedLog log) throws RemoteStorageException { + if (!isLogStartOffsetUpdated) { + long logStartOffset = findLogStartOffset(topicIdPartition, log); + updateRemoteLogStartOffset.accept(topicIdPartition.topicPartition(), logStartOffset); + isLogStartOffsetUpdated = true; + logger.info("Found the logStartOffset: {} for partition: {} after becoming leader", + logStartOffset, topicIdPartition); + } + } + + private void maybeUpdateCopiedOffset(UnifiedLog log) throws RemoteStorageException { + if (copiedOffsetOption.isEmpty()) { + // This is found by traversing from the latest leader epoch from leader epoch history and find the highest offset + // of a segment with that epoch copied into remote storage. If it can not find an entry then it checks for the + // previous leader epoch till it finds an entry, If there are no entries till the earliest leader epoch in leader + // epoch cache then it starts copying the segments from the earliest epoch entry's offset. + copiedOffsetOption = Optional.of(findHighestRemoteOffset(topicIdPartition, log)); + logger.info("Found the highest copiedRemoteOffset: {} for partition: {} after becoming leader", copiedOffsetOption, topicIdPartition); + copiedOffsetOption.ifPresent(offsetAndEpoch -> log.updateHighestOffsetInRemoteStorage(offsetAndEpoch.offset())); + } + } + + /** + * Segments which match the following criteria are eligible for copying to remote storage: + * 1) Segment is not the active segment and + * 2) Segment end-offset is less than the last-stable-offset as remote storage should contain only + * committed/acked messages + * @param log The log from which the segments are to be copied + * @param fromOffset The offset from which the segments are to be copied + * @param lastStableOffset The last stable offset of the log + * @return candidate log segments to be copied to remote storage + */ + List candidateLogSegments(UnifiedLog log, Long fromOffset, Long lastStableOffset) { + List candidateLogSegments = new ArrayList<>(); + List segments = CollectionConverters.asJava(log.logSegments(fromOffset, Long.MAX_VALUE).toSeq()); + if (!segments.isEmpty()) { + for (int idx = 1; idx < segments.size(); idx++) { + LogSegment previousSeg = segments.get(idx - 1); + LogSegment currentSeg = segments.get(idx); + if (currentSeg.baseOffset() <= lastStableOffset) { + candidateLogSegments.add(new EnrichedLogSegment(previousSeg, currentSeg.baseOffset())); + } + } + // Discard the last active segment + } + return candidateLogSegments; + } + + public void copyLogSegmentsToRemote(UnifiedLog log) throws InterruptedException { + if (isCancelled()) + return; + + try { + maybeUpdateLogStartOffsetOnBecomingLeader(log); + maybeUpdateCopiedOffset(log); + long copiedOffset = copiedOffsetOption.get().offset(); + + // LSO indicates the offset below are ready to be consumed (high-watermark or committed) + long lso = log.lastStableOffset(); + if (lso < 0) { + logger.warn("lastStableOffset for partition {} is {}, which should not be negative.", topicIdPartition, lso); + } else if (lso > 0 && copiedOffset < lso) { + // log-start-offset can be ahead of the copied-offset, when: + // 1) log-start-offset gets incremented via delete-records API (or) + // 2) enabling the remote log for the first time + long fromOffset = Math.max(copiedOffset + 1, log.logStartOffset()); + List candidateLogSegments = candidateLogSegments(log, fromOffset, lso); + logger.debug("Candidate log segments, logStartOffset: {}, copiedOffset: {}, fromOffset: {}, lso: {} " + + "and candidateLogSegments: {}", log.logStartOffset(), copiedOffset, fromOffset, lso, candidateLogSegments); + if (candidateLogSegments.isEmpty()) { + logger.debug("No segments found to be copied for partition {} with copiedOffset: {} and active segment's base-offset: {}", + topicIdPartition, copiedOffset, log.activeSegment().baseOffset()); + } else { + for (EnrichedLogSegment candidateLogSegment : candidateLogSegments) { + if (isCancelled()) { + logger.info("Skipping copying log segments as the current task state is changed, cancelled: {}", + isCancelled()); + return; + } + + copyQuotaManagerLock.lock(); + try { + long throttleTimeMs = rlmCopyQuotaManager.getThrottleTimeMs(); + while (throttleTimeMs > 0) { + copyThrottleTimeSensor.record(throttleTimeMs, time.milliseconds()); + logger.debug("Quota exceeded for copying log segments, waiting for the quota to be available."); + // If the thread gets interrupted while waiting, the InterruptedException is thrown + // back to the caller. It's important to note that the task being executed is already + // cancelled before the executing thread is interrupted. The caller is responsible + // for handling the exception gracefully by checking if the task is already cancelled. + boolean ignored = copyQuotaManagerLockCondition.await(quotaTimeout().toMillis(), TimeUnit.MILLISECONDS); + throttleTimeMs = rlmCopyQuotaManager.getThrottleTimeMs(); + } + rlmCopyQuotaManager.record(candidateLogSegment.logSegment.log().sizeInBytes()); + // Signal waiting threads to check the quota again + copyQuotaManagerLockCondition.signalAll(); + } finally { + copyQuotaManagerLock.unlock(); + } + + RemoteLogSegmentId segmentId = RemoteLogSegmentId.generateNew(topicIdPartition); + segmentIdsBeingCopied.add(segmentId); + try { + copyLogSegment(log, candidateLogSegment.logSegment, segmentId, candidateLogSegment.nextSegmentOffset); + } finally { + segmentIdsBeingCopied.remove(segmentId); + } + } + } + } else { + logger.debug("Skipping copying segments, current read-offset:{}, and LSO:{}", copiedOffset, lso); + } + } catch (CustomMetadataSizeLimitExceededException e) { + // Only stop this task. Logging is done where the exception is thrown. + brokerTopicStats.topicStats(log.topicPartition().topic()).failedRemoteCopyRequestRate().mark(); + brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().mark(); + this.cancel(); + } catch (InterruptedException | RetriableException ex) { + throw ex; + } catch (Exception ex) { + if (!isCancelled()) { + brokerTopicStats.topicStats(log.topicPartition().topic()).failedRemoteCopyRequestRate().mark(); + brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().mark(); + logger.error("Error occurred while copying log segments of partition: {}", topicIdPartition, ex); + } + } + } + + private void copyLogSegment(UnifiedLog log, LogSegment segment, RemoteLogSegmentId segmentId, long nextSegmentBaseOffset) + throws InterruptedException, ExecutionException, RemoteStorageException, IOException, + CustomMetadataSizeLimitExceededException { + File logFile = segment.log().file(); + String logFileName = logFile.getName(); + + logger.info("Copying {} to remote storage.", logFileName); + + long endOffset = nextSegmentBaseOffset - 1; + File producerStateSnapshotFile = log.producerStateManager().fetchSnapshot(nextSegmentBaseOffset).orElse(null); + + List epochEntries = getLeaderEpochEntries(log, segment.baseOffset(), nextSegmentBaseOffset); + Map segmentLeaderEpochs = new HashMap<>(epochEntries.size()); + epochEntries.forEach(entry -> segmentLeaderEpochs.put(entry.epoch, entry.startOffset)); + + boolean isTxnIdxEmpty = segment.txnIndex().isEmpty(); + RemoteLogSegmentMetadata copySegmentStartedRlsm = new RemoteLogSegmentMetadata(segmentId, segment.baseOffset(), endOffset, + segment.largestTimestamp(), brokerId, time.milliseconds(), segment.log().sizeInBytes(), + segmentLeaderEpochs, isTxnIdxEmpty); + + remoteLogMetadataManager.addRemoteLogSegmentMetadata(copySegmentStartedRlsm).get(); + + ByteBuffer leaderEpochsIndex = epochEntriesAsByteBuffer(getLeaderEpochEntries(log, -1, nextSegmentBaseOffset)); + LogSegmentData segmentData = new LogSegmentData(logFile.toPath(), toPathIfExists(segment.offsetIndex().file()), + toPathIfExists(segment.timeIndex().file()), Optional.ofNullable(toPathIfExists(segment.txnIndex().file())), + producerStateSnapshotFile.toPath(), leaderEpochsIndex); + brokerTopicStats.topicStats(log.topicPartition().topic()).remoteCopyRequestRate().mark(); + brokerTopicStats.allTopicsStats().remoteCopyRequestRate().mark(); + Optional customMetadata; + + try { + customMetadata = remoteLogStorageManager.copyLogSegmentData(copySegmentStartedRlsm, segmentData); + } catch (RemoteStorageException e) { + logger.info("Copy failed, cleaning segment {}", copySegmentStartedRlsm.remoteLogSegmentId()); + try { + deleteRemoteLogSegment(copySegmentStartedRlsm, ignored -> !isCancelled()); + LOGGER.info("Cleanup completed for segment {}", copySegmentStartedRlsm.remoteLogSegmentId()); + } catch (RemoteStorageException e1) { + LOGGER.info("Cleanup failed, will retry later with segment {}: {}", copySegmentStartedRlsm.remoteLogSegmentId(), e1.getMessage()); + } + throw e; + } + + RemoteLogSegmentMetadataUpdate copySegmentFinishedRlsm = new RemoteLogSegmentMetadataUpdate(segmentId, time.milliseconds(), + customMetadata, RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId); + + if (customMetadata.isPresent()) { + long customMetadataSize = customMetadata.get().value().length; + if (customMetadataSize > this.customMetadataSizeLimit) { + CustomMetadataSizeLimitExceededException e = new CustomMetadataSizeLimitExceededException(); + logger.info("Custom metadata size {} exceeds configured limit {}." + + " Copying will be stopped and copied segment will be attempted to clean." + + " Original metadata: {}", + customMetadataSize, this.customMetadataSizeLimit, copySegmentStartedRlsm, e); + // For deletion, we provide back the custom metadata by creating a new metadata object from the update. + // However, the update itself will not be stored in this case. + RemoteLogSegmentMetadata newMetadata = copySegmentStartedRlsm.createWithUpdates(copySegmentFinishedRlsm); + try { + deleteRemoteLogSegment(newMetadata, ignored -> !isCancelled()); + LOGGER.info("Cleanup completed for segment {}", newMetadata.remoteLogSegmentId()); + } catch (RemoteStorageException e1) { + LOGGER.info("Cleanup failed, will retry later with segment {}: {}", newMetadata.remoteLogSegmentId(), e1.getMessage()); + } + throw e; + } + } + + remoteLogMetadataManager.updateRemoteLogSegmentMetadata(copySegmentFinishedRlsm).get(); + brokerTopicStats.topicStats(log.topicPartition().topic()) + .remoteCopyBytesRate().mark(copySegmentStartedRlsm.segmentSizeInBytes()); + brokerTopicStats.allTopicsStats().remoteCopyBytesRate().mark(copySegmentStartedRlsm.segmentSizeInBytes()); + + // `epochEntries` cannot be empty, there is a pre-condition validation in RemoteLogSegmentMetadata + // constructor + int lastEpochInSegment = epochEntries.get(epochEntries.size() - 1).epoch; + copiedOffsetOption = Optional.of(new OffsetAndEpoch(endOffset, lastEpochInSegment)); + // Update the highest offset in remote storage for this partition's log so that the local log segments + // are not deleted before they are copied to remote storage. + log.updateHighestOffsetInRemoteStorage(endOffset); + logger.info("Copied {} to remote storage with segment-id: {}", + logFileName, copySegmentFinishedRlsm.remoteLogSegmentId()); + + long bytesLag = log.onlyLocalLogSegmentsSize() - log.activeSegment().size(); + long segmentsLag = log.onlyLocalLogSegmentsCount() - 1; + recordLagStats(bytesLag, segmentsLag); + } + + // VisibleForTesting + void recordLagStats(long bytesLag, long segmentsLag) { + if (!isCancelled()) { + String topic = topicIdPartition.topic(); + int partition = topicIdPartition.partition(); + brokerTopicStats.recordRemoteCopyLagBytes(topic, partition, bytesLag); + brokerTopicStats.recordRemoteCopyLagSegments(topic, partition, segmentsLag); + } + } + + void resetLagStats() { + String topic = topicIdPartition.topic(); + int partition = topicIdPartition.partition(); + brokerTopicStats.recordRemoteCopyLagBytes(topic, partition, 0); + brokerTopicStats.recordRemoteCopyLagSegments(topic, partition, 0); + } + + private Path toPathIfExists(File file) { + return file.exists() ? file.toPath() : null; + } + } + + class RLMExpirationTask extends RLMTask { + private final Logger logger; + + public RLMExpirationTask(TopicIdPartition topicIdPartition) { + super(topicIdPartition); + this.logger = getLogContext().logger(RLMExpirationTask.class); + } + + @Override + protected void execute(UnifiedLog log) throws InterruptedException, RemoteStorageException, ExecutionException { + cleanupExpiredRemoteLogSegments(); + } + + public void handleLogStartOffsetUpdate(TopicPartition topicPartition, long remoteLogStartOffset) { + logger.debug("Updating {} with remoteLogStartOffset: {}", topicPartition, remoteLogStartOffset); + updateRemoteLogStartOffset.accept(topicPartition, remoteLogStartOffset); + } + + class RemoteLogRetentionHandler { + + private final Optional retentionSizeData; + private final Optional retentionTimeData; + + private long remainingBreachedSize; + + private OptionalLong logStartOffset = OptionalLong.empty(); + + public RemoteLogRetentionHandler(Optional retentionSizeData, Optional retentionTimeData) { + this.retentionSizeData = retentionSizeData; + this.retentionTimeData = retentionTimeData; + remainingBreachedSize = retentionSizeData.map(sizeData -> sizeData.remainingBreachedSize).orElse(0L); + } + + private boolean isSegmentBreachedByRetentionSize(RemoteLogSegmentMetadata metadata) { + boolean shouldDeleteSegment = false; + if (retentionSizeData.isEmpty()) { + return shouldDeleteSegment; + } + // Assumption that segments contain size >= 0 + if (remainingBreachedSize > 0) { + long remainingBytes = remainingBreachedSize - metadata.segmentSizeInBytes(); + if (remainingBytes >= 0) { + remainingBreachedSize = remainingBytes; + shouldDeleteSegment = true; + } + } + if (shouldDeleteSegment) { + if (logStartOffset.isEmpty() || logStartOffset.getAsLong() < metadata.endOffset() + 1) { + logStartOffset = OptionalLong.of(metadata.endOffset() + 1); + } + logger.info("About to delete remote log segment {} due to retention size {} breach. Log size after deletion will be {}.", + metadata.remoteLogSegmentId(), retentionSizeData.get().retentionSize, remainingBreachedSize + retentionSizeData.get().retentionSize); + } + return shouldDeleteSegment; + } + + public boolean isSegmentBreachedByRetentionTime(RemoteLogSegmentMetadata metadata) { + boolean shouldDeleteSegment = false; + if (retentionTimeData.isEmpty()) { + return shouldDeleteSegment; + } + shouldDeleteSegment = metadata.maxTimestampMs() <= retentionTimeData.get().cleanupUntilMs; + if (shouldDeleteSegment) { + remainingBreachedSize = Math.max(0, remainingBreachedSize - metadata.segmentSizeInBytes()); + // It is fine to have logStartOffset as `metadata.endOffset() + 1` as the segment offset intervals + // are ascending with in an epoch. + if (logStartOffset.isEmpty() || logStartOffset.getAsLong() < metadata.endOffset() + 1) { + logStartOffset = OptionalLong.of(metadata.endOffset() + 1); + } + logger.info("About to delete remote log segment {} due to retention time {}ms breach based on the largest record timestamp in the segment", + metadata.remoteLogSegmentId(), retentionTimeData.get().retentionMs); + } + return shouldDeleteSegment; + } + + private boolean isSegmentBreachByLogStartOffset(RemoteLogSegmentMetadata metadata, + long logStartOffset, + NavigableMap leaderEpochEntries) { + boolean shouldDeleteSegment = false; + if (!leaderEpochEntries.isEmpty()) { + // Note that `logStartOffset` and `leaderEpochEntries.firstEntry().getValue()` should be same + Integer firstEpoch = leaderEpochEntries.firstKey(); + shouldDeleteSegment = metadata.segmentLeaderEpochs().keySet().stream().allMatch(epoch -> epoch <= firstEpoch) + && metadata.endOffset() < logStartOffset; + } + if (shouldDeleteSegment) { + logger.info("About to delete remote log segment {} due to log-start-offset {} breach. " + + "Current earliest-epoch-entry: {}, segment-end-offset: {} and segment-epochs: {}", + metadata.remoteLogSegmentId(), logStartOffset, leaderEpochEntries.firstEntry(), + metadata.endOffset(), metadata.segmentLeaderEpochs()); + } + return shouldDeleteSegment; + } + + // It removes the segments beyond the current leader's earliest epoch. Those segments are considered as + // unreferenced because they are not part of the current leader epoch lineage. + private boolean deleteLogSegmentsDueToLeaderEpochCacheTruncation(EpochEntry earliestEpochEntry, + RemoteLogSegmentMetadata metadata) + throws RemoteStorageException, ExecutionException, InterruptedException { + boolean isSegmentDeleted = deleteRemoteLogSegment(metadata, + ignored -> metadata.segmentLeaderEpochs().keySet().stream().allMatch(epoch -> epoch < earliestEpochEntry.epoch)); + if (isSegmentDeleted) { + logger.info("Deleted remote log segment {} due to leader-epoch-cache truncation. " + + "Current earliest-epoch-entry: {}, segment-end-offset: {} and segment-epochs: {}", + metadata.remoteLogSegmentId(), earliestEpochEntry, metadata.endOffset(), metadata.segmentLeaderEpochs().keySet()); + } + // No need to update the log-start-offset as these epochs/offsets are earlier to that value. + return isSegmentDeleted; + } + } + + private void updateMetadataCountAndLogSizeWith(int metadataCount, long remoteLogSizeBytes) { + int partition = topicIdPartition.partition(); + String topic = topicIdPartition.topic(); + brokerTopicStats.recordRemoteLogMetadataCount(topic, partition, metadataCount); + brokerTopicStats.recordRemoteLogSizeBytes(topic, partition, remoteLogSizeBytes); + } + + private void updateRemoteDeleteLagWith(int segmentsLeftToDelete, long sizeOfDeletableSegmentsBytes) { + String topic = topicIdPartition.topic(); + int partition = topicIdPartition.partition(); + brokerTopicStats.recordRemoteDeleteLagSegments(topic, partition, segmentsLeftToDelete); + brokerTopicStats.recordRemoteDeleteLagBytes(topic, partition, sizeOfDeletableSegmentsBytes); + } + + /** Cleanup expired and dangling remote log segments. */ + void cleanupExpiredRemoteLogSegments() throws RemoteStorageException, ExecutionException, InterruptedException { + if (isCancelled()) { + logger.info("Returning from remote log segments cleanup as the task state is changed"); + return; + } + + final Optional logOptional = fetchLog.apply(topicIdPartition.topicPartition()); + if (logOptional.isEmpty()) { + logger.debug("No UnifiedLog instance available for partition: {}", topicIdPartition); + return; + } + + final UnifiedLog log = logOptional.get(); + + // Cleanup remote log segments and update the log start offset if applicable. + final Iterator segmentMetadataIter = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition); + if (!segmentMetadataIter.hasNext()) { + updateMetadataCountAndLogSizeWith(0, 0); + logger.debug("No remote log segments available on remote storage for partition: {}", topicIdPartition); + return; + } + + final Set epochsSet = new HashSet<>(); + int metadataCount = 0; + long remoteLogSizeBytes = 0; + // Good to have an API from RLMM to get all the remote leader epochs of all the segments of a partition + // instead of going through all the segments and building it here. + while (segmentMetadataIter.hasNext()) { + RemoteLogSegmentMetadata segmentMetadata = segmentMetadataIter.next(); + epochsSet.addAll(segmentMetadata.segmentLeaderEpochs().keySet()); + metadataCount++; + remoteLogSizeBytes += segmentMetadata.segmentSizeInBytes(); + } + + updateMetadataCountAndLogSizeWith(metadataCount, remoteLogSizeBytes); + + // All the leader epochs in sorted order that exists in remote storage + final List remoteLeaderEpochs = new ArrayList<>(epochsSet); + Collections.sort(remoteLeaderEpochs); + + LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache(); + // Build the leader epoch map by filtering the epochs that do not have any records. + NavigableMap epochWithOffsets = buildFilteredLeaderEpochMap(leaderEpochCache.epochWithOffsets()); + + long logStartOffset = log.logStartOffset(); + long logEndOffset = log.logEndOffset(); + Optional retentionSizeData = buildRetentionSizeData(log.config().retentionSize, + log.onlyLocalLogSegmentsSize(), logEndOffset, epochWithOffsets); + Optional retentionTimeData = buildRetentionTimeData(log.config().retentionMs); + + RemoteLogRetentionHandler remoteLogRetentionHandler = new RemoteLogRetentionHandler(retentionSizeData, retentionTimeData); + Iterator epochIterator = epochWithOffsets.navigableKeySet().iterator(); + boolean canProcess = true; + List segmentsToDelete = new ArrayList<>(); + long sizeOfDeletableSegmentsBytes = 0L; + while (canProcess && epochIterator.hasNext()) { + Integer epoch = epochIterator.next(); + Iterator segmentsIterator = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, epoch); + while (canProcess && segmentsIterator.hasNext()) { + if (isCancelled()) { + logger.info("Returning from remote log segments cleanup for the remaining segments as the task state is changed."); + return; + } + RemoteLogSegmentMetadata metadata = segmentsIterator.next(); + + if (segmentIdsBeingCopied.contains(metadata.remoteLogSegmentId())) { + logger.debug("Copy for the segment {} is currently in process. Skipping cleanup for it and the remaining segments", + metadata.remoteLogSegmentId()); + canProcess = false; + continue; + } + // This works as retry mechanism for dangling remote segments that failed the deletion in previous attempts. + // Rather than waiting for the retention to kick in, we cleanup early to avoid polluting the cache and possibly waste remote storage. + if (RemoteLogSegmentState.DELETE_SEGMENT_STARTED.equals(metadata.state())) { + segmentsToDelete.add(metadata); + continue; + } + if (RemoteLogSegmentState.DELETE_SEGMENT_FINISHED.equals(metadata.state())) { + continue; + } + if (segmentsToDelete.contains(metadata)) { + continue; + } + // When the log-start-offset is moved by the user, the leader-epoch-checkpoint file gets truncated + // as per the log-start-offset. Until the rlm-cleaner-thread runs in the next iteration, those + // remote log segments won't be removed. The `isRemoteSegmentWithinLeaderEpoch` validates whether + // the epochs present in the segment lies in the checkpoint file. It will always return false + // since the checkpoint file was already truncated. + boolean shouldDeleteSegment = remoteLogRetentionHandler.isSegmentBreachByLogStartOffset( + metadata, logStartOffset, epochWithOffsets); + boolean isValidSegment = false; + if (!shouldDeleteSegment) { + // check whether the segment contains the required epoch range with in the current leader epoch lineage. + isValidSegment = isRemoteSegmentWithinLeaderEpochs(metadata, logEndOffset, epochWithOffsets); + if (isValidSegment) { + shouldDeleteSegment = + remoteLogRetentionHandler.isSegmentBreachedByRetentionTime(metadata) || + remoteLogRetentionHandler.isSegmentBreachedByRetentionSize(metadata); + } + } + if (shouldDeleteSegment) { + segmentsToDelete.add(metadata); + sizeOfDeletableSegmentsBytes += metadata.segmentSizeInBytes(); + } + canProcess = shouldDeleteSegment || !isValidSegment; + } + } + + // Update log start offset with the computed value after retention cleanup is done + remoteLogRetentionHandler.logStartOffset.ifPresent(offset -> handleLogStartOffsetUpdate(topicIdPartition.topicPartition(), offset)); + + // At this point in time we have updated the log start offsets, but not initiated a deletion. + // Either a follower has picked up the changes to the log start offset, or they have not. + // If the follower HAS picked up the changes, and they become the leader this replica won't successfully complete + // the deletion. + // However, the new leader will correctly pick up all breaching segments as log start offset breaching ones + // and delete them accordingly. + // If the follower HAS NOT picked up the changes, and they become the leader then they will go through this process + // again and delete them with the original deletion reason i.e. size, time or log start offset breach. + int segmentsLeftToDelete = segmentsToDelete.size(); + updateRemoteDeleteLagWith(segmentsLeftToDelete, sizeOfDeletableSegmentsBytes); + List undeletedSegments = new ArrayList<>(); + for (RemoteLogSegmentMetadata segmentMetadata : segmentsToDelete) { + if (!deleteRemoteLogSegment(segmentMetadata, ignored -> !isCancelled())) { + undeletedSegments.add(segmentMetadata.remoteLogSegmentId().toString()); + } else { + sizeOfDeletableSegmentsBytes -= segmentMetadata.segmentSizeInBytes(); + segmentsLeftToDelete--; + updateRemoteDeleteLagWith(segmentsLeftToDelete, sizeOfDeletableSegmentsBytes); + } + } + if (!undeletedSegments.isEmpty()) { + logger.info("The following remote segments could not be deleted: {}", String.join(",", undeletedSegments)); + } + + // Remove the remote log segments whose segment-leader-epochs are less than the earliest-epoch known + // to the leader. This will remove the unreferenced segments in the remote storage. This is needed for + // unclean leader election scenarios as the remote storage can have epochs earlier to the current leader's + // earliest leader epoch. + Optional earliestEpochEntryOptional = leaderEpochCache.earliestEntry(); + if (earliestEpochEntryOptional.isPresent()) { + EpochEntry earliestEpochEntry = earliestEpochEntryOptional.get(); + Iterator epochsToClean = remoteLeaderEpochs.stream() + .filter(remoteEpoch -> remoteEpoch < earliestEpochEntry.epoch) + .iterator(); + + List listOfSegmentsToBeCleaned = new ArrayList<>(); + + while (epochsToClean.hasNext()) { + int epoch = epochsToClean.next(); + Iterator segmentsToBeCleaned = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, epoch); + while (segmentsToBeCleaned.hasNext()) { + if (!isCancelled()) { + RemoteLogSegmentMetadata nextSegmentMetadata = segmentsToBeCleaned.next(); + sizeOfDeletableSegmentsBytes += nextSegmentMetadata.segmentSizeInBytes(); + listOfSegmentsToBeCleaned.add(nextSegmentMetadata); + } + } + } + + segmentsLeftToDelete += listOfSegmentsToBeCleaned.size(); + updateRemoteDeleteLagWith(segmentsLeftToDelete, sizeOfDeletableSegmentsBytes); + for (RemoteLogSegmentMetadata segmentMetadata : listOfSegmentsToBeCleaned) { + if (!isCancelled()) { + // No need to update the log-start-offset even though the segment is deleted as these epochs/offsets are earlier to that value. + if (remoteLogRetentionHandler.deleteLogSegmentsDueToLeaderEpochCacheTruncation(earliestEpochEntry, segmentMetadata)) { + sizeOfDeletableSegmentsBytes -= segmentMetadata.segmentSizeInBytes(); + segmentsLeftToDelete--; + updateRemoteDeleteLagWith(segmentsLeftToDelete, sizeOfDeletableSegmentsBytes); + } + } + } + } + } + + private Optional buildRetentionTimeData(long retentionMs) { + long cleanupUntilMs = time.milliseconds() - retentionMs; + return retentionMs > -1 && cleanupUntilMs >= 0 + ? Optional.of(new RetentionTimeData(retentionMs, cleanupUntilMs)) + : Optional.empty(); + } + + private Optional buildRetentionSizeData(long retentionSize, + long onlyLocalLogSegmentsSize, + long logEndOffset, + NavigableMap epochEntries) throws RemoteStorageException { + if (retentionSize > -1) { + long startTimeMs = time.milliseconds(); + long remoteLogSizeBytes = 0L; + Set visitedSegmentIds = new HashSet<>(); + for (Integer epoch : epochEntries.navigableKeySet()) { + // remoteLogSize(topicIdPartition, epochEntry.epoch) may not be completely accurate as the remote + // log size may be computed for all the segments but not for segments with in the current + // partition's leader epoch lineage. Better to revisit this API. + // remoteLogSizeBytes += remoteLogMetadataManager.remoteLogSize(topicIdPartition, epochEntry.epoch); + Iterator segmentsIterator = remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, epoch); + while (segmentsIterator.hasNext()) { + RemoteLogSegmentMetadata segmentMetadata = segmentsIterator.next(); + // Count only the size of segments in "COPY_SEGMENT_FINISHED" state because + // "COPY_SEGMENT_STARTED" means copy didn't complete and we will count them later, + // "DELETE_SEGMENT_STARTED" means deletion failed in the previous attempt and we will retry later, + // "DELETE_SEGMENT_FINISHED" means deletion completed, so there is nothing to count. + if (segmentMetadata.state().equals(RemoteLogSegmentState.COPY_SEGMENT_FINISHED)) { + RemoteLogSegmentId segmentId = segmentMetadata.remoteLogSegmentId(); + if (!visitedSegmentIds.contains(segmentId) && isRemoteSegmentWithinLeaderEpochs(segmentMetadata, logEndOffset, epochEntries)) { + remoteLogSizeBytes += segmentMetadata.segmentSizeInBytes(); + visitedSegmentIds.add(segmentId); + } + } + } + } + + brokerTopicStats.recordRemoteLogSizeComputationTime(topicIdPartition.topic(), topicIdPartition.partition(), time.milliseconds() - startTimeMs); + + // This is the total size of segments in local log that have their base-offset > local-log-start-offset + // and size of the segments in remote storage which have their end-offset < local-log-start-offset. + long totalSize = onlyLocalLogSegmentsSize + remoteLogSizeBytes; + if (totalSize > retentionSize) { + long remainingBreachedSize = totalSize - retentionSize; + RetentionSizeData retentionSizeData = new RetentionSizeData(retentionSize, remainingBreachedSize); + return Optional.of(retentionSizeData); + } + } + + return Optional.empty(); + } + } + + class RLMFollowerTask extends RLMTask { + + public RLMFollowerTask(TopicIdPartition topicIdPartition) { + super(topicIdPartition); + } + + @Override + protected void execute(UnifiedLog log) throws InterruptedException, RemoteStorageException, ExecutionException { + OffsetAndEpoch offsetAndEpoch = findHighestRemoteOffset(topicIdPartition, log); + // Update the highest offset in remote storage for this partition's log so that the local log segments + // are not deleted before they are copied to remote storage. + log.updateHighestOffsetInRemoteStorage(offsetAndEpoch.offset()); + } + } + + private boolean deleteRemoteLogSegment( + RemoteLogSegmentMetadata segmentMetadata, + Predicate predicate + ) throws RemoteStorageException, ExecutionException, InterruptedException { + if (predicate.test(segmentMetadata)) { + LOGGER.debug("Deleting remote log segment {}", segmentMetadata.remoteLogSegmentId()); + String topic = segmentMetadata.topicIdPartition().topic(); + + // Publish delete segment started event. + remoteLogMetadataManager.updateRemoteLogSegmentMetadata( + new RemoteLogSegmentMetadataUpdate(segmentMetadata.remoteLogSegmentId(), time.milliseconds(), + segmentMetadata.customMetadata(), RemoteLogSegmentState.DELETE_SEGMENT_STARTED, brokerId)).get(); + + brokerTopicStats.topicStats(topic).remoteDeleteRequestRate().mark(); + brokerTopicStats.allTopicsStats().remoteDeleteRequestRate().mark(); + + // Delete the segment in remote storage. + try { + remoteLogStorageManager.deleteLogSegmentData(segmentMetadata); + } catch (RemoteStorageException e) { + brokerTopicStats.topicStats(topic).failedRemoteDeleteRequestRate().mark(); + brokerTopicStats.allTopicsStats().failedRemoteDeleteRequestRate().mark(); + throw e; + } + + // Publish delete segment finished event. + remoteLogMetadataManager.updateRemoteLogSegmentMetadata( + new RemoteLogSegmentMetadataUpdate(segmentMetadata.remoteLogSegmentId(), time.milliseconds(), + segmentMetadata.customMetadata(), RemoteLogSegmentState.DELETE_SEGMENT_FINISHED, brokerId)).get(); + LOGGER.debug("Deleted remote log segment {}", segmentMetadata.remoteLogSegmentId()); + return true; + } + return false; + } + + /** + * Returns true if the remote segment's epoch/offsets are within the leader epoch lineage of the partition. + * The constraints here are as follows: + * - The segment's first epoch's offset should be more than or equal to the respective leader epoch's offset in the partition leader epoch lineage. + * - The segment's end offset should be less than or equal to the respective leader epoch's offset in the partition leader epoch lineage. + * - The segment's epoch lineage(epoch and offset) should be same as leader epoch lineage((epoch and offset)) except + * for the first and the last epochs in the segment. + * + * @param segmentMetadata The remote segment metadata to be validated. + * @param logEndOffset The log end offset of the partition. + * @param leaderEpochs The leader epoch lineage of the partition by filtering the epochs containing no data. + * @return true if the remote segment's epoch/offsets are within the leader epoch lineage of the partition. + */ + // Visible for testing + static boolean isRemoteSegmentWithinLeaderEpochs(RemoteLogSegmentMetadata segmentMetadata, + long logEndOffset, + NavigableMap leaderEpochs) { + long segmentEndOffset = segmentMetadata.endOffset(); + // Filter epochs that does not have any messages/records associated with them. + NavigableMap segmentLeaderEpochs = buildFilteredLeaderEpochMap(segmentMetadata.segmentLeaderEpochs()); + // Check for out of bound epochs between segment epochs and current leader epochs. + Integer segmentLastEpoch = segmentLeaderEpochs.lastKey(); + if (segmentLastEpoch < leaderEpochs.firstKey() || segmentLastEpoch > leaderEpochs.lastKey()) { + LOGGER.debug("Segment {} is not within the partition leader epoch lineage. " + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), segmentLeaderEpochs, leaderEpochs); + return false; + } + // There can be overlapping remote log segments in the remote storage. (eg) + // leader-epoch-file-cache: {(5, 10), (7, 15), (9, 100)} + // segment1: offset-range = 5-50, Broker = 0, epochs = {(5, 10), (7, 15)} + // segment2: offset-range = 14-150, Broker = 1, epochs = {(5, 14), (7, 15), (9, 100)}, after leader-election. + // When the segment1 gets deleted, then the log-start-offset = 51 and leader-epoch-file-cache gets updated to: {(7, 51), (9, 100)}. + // While validating the segment2, we should ensure the overlapping remote log segments case. + Integer segmentFirstEpoch = segmentLeaderEpochs.ceilingKey(leaderEpochs.firstKey()); + if (segmentFirstEpoch == null) { + LOGGER.debug("Segment {} is not within the partition leader epoch lineage. " + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), segmentLeaderEpochs, leaderEpochs); + return false; + } + for (Map.Entry entry : segmentLeaderEpochs.entrySet()) { + int epoch = entry.getKey(); + long offset = entry.getValue(); + if (epoch < segmentFirstEpoch) { + continue; + } + // If segment's epoch does not exist in the leader epoch lineage then it is not a valid segment. + if (!leaderEpochs.containsKey(epoch)) { + LOGGER.debug("Segment {} epoch {} is not within the leader epoch lineage. " + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), epoch, segmentLeaderEpochs, leaderEpochs); + return false; + } + // Two cases: + // case-1: When the segment-first-epoch equals to the first-epoch in the leader-epoch-lineage, then the + // offset value can lie anywhere between 0 to (next-epoch-start-offset - 1) is valid. + // case-2: When the segment-first-epoch is not equal to the first-epoch in the leader-epoch-lineage, then + // the offset value should be between (current-epoch-start-offset) to (next-epoch-start-offset - 1). + if (epoch == segmentFirstEpoch && leaderEpochs.lowerKey(epoch) != null && offset < leaderEpochs.get(epoch)) { + LOGGER.debug("Segment {} first-valid epoch {} offset is less than first leader epoch offset {}." + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), epoch, leaderEpochs.get(epoch), + segmentLeaderEpochs, leaderEpochs); + return false; + } + // Segment's end offset should be less than or equal to the respective leader epoch's offset. + if (epoch == segmentLastEpoch) { + Map.Entry nextEntry = leaderEpochs.higherEntry(epoch); + if (nextEntry != null && segmentEndOffset > nextEntry.getValue() - 1) { + LOGGER.debug("Segment {} end offset {} is more than leader epoch offset {}." + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), segmentEndOffset, nextEntry.getValue() - 1, + segmentLeaderEpochs, leaderEpochs); + return false; + } + } + // Next segment epoch entry and next leader epoch entry should be same to ensure that the segment's epoch + // is within the leader epoch lineage. + if (epoch != segmentLastEpoch && !leaderEpochs.higherEntry(epoch).equals(segmentLeaderEpochs.higherEntry(epoch))) { + LOGGER.debug("Segment {} epoch {} is not within the leader epoch lineage. " + + "Remote segment epochs: {} and partition leader epochs: {}", + segmentMetadata.remoteLogSegmentId(), epoch, segmentLeaderEpochs, leaderEpochs); + return false; + } + } + // segment end offset should be with in the log end offset. + if (segmentEndOffset >= logEndOffset) { + LOGGER.debug("Segment {} end offset {} is more than log end offset {}.", + segmentMetadata.remoteLogSegmentId(), segmentEndOffset, logEndOffset); + return false; + } + return true; + } + + /** + * Returns a map containing the epoch vs start-offset for the given leader epoch map by filtering the epochs that + * does not contain any messages/records associated with them. + * For ex: + *

          +     * {@code
          +     *  
          +     *  0 - 0
          +     *  1 - 10
          +     *  2 - 20
          +     *  3 - 30
          +     *  4 - 40
          +     *  5 - 60  // epoch 5 does not have records or messages associated with it
          +     *  6 - 60
          +     *  7 - 70
          +     * }
          +     * 
          + * When the above leaderEpochMap is passed to this method, it returns the following map: + *
          +     * {@code
          +     *  
          +     *  0 - 0
          +     *  1 - 10
          +     *  2 - 20
          +     *  3 - 30
          +     *  4 - 40
          +     *  6 - 60
          +     *  7 - 70
          +     * }
          +     * 
          + * @param leaderEpochs The leader epoch map to be refined. + */ + // Visible for testing + static NavigableMap buildFilteredLeaderEpochMap(NavigableMap leaderEpochs) { + List epochsWithNoMessages = new ArrayList<>(); + Map.Entry previousEpochAndOffset = null; + for (Map.Entry currentEpochAndOffset : leaderEpochs.entrySet()) { + if (previousEpochAndOffset != null && previousEpochAndOffset.getValue().equals(currentEpochAndOffset.getValue())) { + epochsWithNoMessages.add(previousEpochAndOffset.getKey()); + } + previousEpochAndOffset = currentEpochAndOffset; + } + if (epochsWithNoMessages.isEmpty()) { + return leaderEpochs; + } + TreeMap filteredLeaderEpochs = new TreeMap<>(leaderEpochs); + for (Integer epochWithNoMessage : epochsWithNoMessages) { + filteredLeaderEpochs.remove(epochWithNoMessage); + } + return filteredLeaderEpochs; + } + + public FetchDataInfo read(RemoteStorageFetchInfo remoteStorageFetchInfo) throws RemoteStorageException, IOException { + int fetchMaxBytes = remoteStorageFetchInfo.fetchMaxBytes; + TopicPartition tp = remoteStorageFetchInfo.topicPartition; + FetchRequest.PartitionData fetchInfo = remoteStorageFetchInfo.fetchInfo; + + boolean includeAbortedTxns = remoteStorageFetchInfo.fetchIsolation == FetchIsolation.TXN_COMMITTED; + + long offset = fetchInfo.fetchOffset; + int maxBytes = Math.min(fetchMaxBytes, fetchInfo.maxBytes); + + Optional logOptional = fetchLog.apply(tp); + OptionalInt epoch = OptionalInt.empty(); + + if (logOptional.isPresent()) { + LeaderEpochFileCache leaderEpochCache = logOptional.get().leaderEpochCache(); + epoch = leaderEpochCache.epochForOffset(offset); + } + + Optional rlsMetadataOptional = epoch.isPresent() + ? fetchRemoteLogSegmentMetadata(tp, epoch.getAsInt(), offset) + : Optional.empty(); + + if (rlsMetadataOptional.isEmpty()) { + String epochStr = (epoch.isPresent()) ? Integer.toString(epoch.getAsInt()) : "NOT AVAILABLE"; + throw new OffsetOutOfRangeException("Received request for offset " + offset + " for leader epoch " + + epochStr + " and partition " + tp + " which does not exist in remote tier."); + } + + RemoteLogSegmentMetadata remoteLogSegmentMetadata = rlsMetadataOptional.get(); + EnrichedRecordBatch enrichedRecordBatch = new EnrichedRecordBatch(null, 0); + InputStream remoteSegInputStream = null; + try { + int startPos = 0; + // Iteration over multiple RemoteSegmentMetadata is required in case of log compaction. + // It may be possible the offset is log compacted in the current RemoteLogSegmentMetadata + // And we need to iterate over the next segment metadata to fetch messages higher than the given offset. + while (enrichedRecordBatch.batch == null && rlsMetadataOptional.isPresent()) { + remoteLogSegmentMetadata = rlsMetadataOptional.get(); + // Search forward for the position of the last offset that is greater than or equal to the target offset + startPos = lookupPositionForOffset(remoteLogSegmentMetadata, offset); + remoteSegInputStream = remoteLogStorageManager.fetchLogSegment(remoteLogSegmentMetadata, startPos); + RemoteLogInputStream remoteLogInputStream = getRemoteLogInputStream(remoteSegInputStream); + enrichedRecordBatch = findFirstBatch(remoteLogInputStream, offset); + if (enrichedRecordBatch.batch == null) { + Utils.closeQuietly(remoteSegInputStream, "RemoteLogSegmentInputStream"); + rlsMetadataOptional = findNextSegmentMetadata(rlsMetadataOptional.get(), logOptional.get().leaderEpochCache()); + } + } + RecordBatch firstBatch = enrichedRecordBatch.batch; + if (firstBatch == null) + return new FetchDataInfo(new LogOffsetMetadata(offset), MemoryRecords.EMPTY, false, + includeAbortedTxns ? Optional.of(Collections.emptyList()) : Optional.empty()); + + int firstBatchSize = firstBatch.sizeInBytes(); + // An empty record is sent instead of an incomplete batch when + // - there is no minimum-one-message constraint and + // - the first batch size is more than maximum bytes that can be sent and + // - for FetchRequest version 3 or above. + if (!remoteStorageFetchInfo.minOneMessage && + !remoteStorageFetchInfo.hardMaxBytesLimit && + firstBatchSize > maxBytes) { + return new FetchDataInfo(new LogOffsetMetadata(offset), MemoryRecords.EMPTY); + } + + int updatedFetchSize = + remoteStorageFetchInfo.minOneMessage && firstBatchSize > maxBytes ? firstBatchSize : maxBytes; + + ByteBuffer buffer = ByteBuffer.allocate(updatedFetchSize); + int remainingBytes = updatedFetchSize; + + firstBatch.writeTo(buffer); + remainingBytes -= firstBatchSize; + + if (remainingBytes > 0) { + // read the input stream until min of (EOF stream or buffer's remaining capacity). + Utils.readFully(remoteSegInputStream, buffer); + } + buffer.flip(); + + startPos = startPos + enrichedRecordBatch.skippedBytes; + FetchDataInfo fetchDataInfo = new FetchDataInfo( + new LogOffsetMetadata(firstBatch.baseOffset(), remoteLogSegmentMetadata.startOffset(), startPos), + MemoryRecords.readableRecords(buffer)); + if (includeAbortedTxns) { + fetchDataInfo = addAbortedTransactions(firstBatch.baseOffset(), remoteLogSegmentMetadata, fetchDataInfo, logOptional.get()); + } + + return fetchDataInfo; + } finally { + if (enrichedRecordBatch.batch != null) { + Utils.closeQuietly(remoteSegInputStream, "RemoteLogSegmentInputStream"); + } + } + } + // for testing + RemoteLogInputStream getRemoteLogInputStream(InputStream in) { + return new RemoteLogInputStream(in); + } + + // Visible for testing + int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, long offset) { + return indexCache.lookupOffset(remoteLogSegmentMetadata, offset); + } + + private FetchDataInfo addAbortedTransactions(long startOffset, + RemoteLogSegmentMetadata segmentMetadata, + FetchDataInfo fetchInfo, + UnifiedLog log) throws RemoteStorageException { + int fetchSize = fetchInfo.records.sizeInBytes(); + OffsetPosition startOffsetPosition = new OffsetPosition(fetchInfo.fetchOffsetMetadata.messageOffset, + fetchInfo.fetchOffsetMetadata.relativePositionInSegment); + + OffsetIndex offsetIndex = indexCache.getIndexEntry(segmentMetadata).offsetIndex(); + long upperBoundOffset = offsetIndex.fetchUpperBoundOffset(startOffsetPosition, fetchSize) + .map(position -> position.offset).orElse(segmentMetadata.endOffset() + 1); + + final Set abortedTransactions = new HashSet<>(); + + Consumer> accumulator = + abortedTxns -> abortedTransactions.addAll(abortedTxns.stream() + .map(AbortedTxn::asAbortedTransaction).collect(Collectors.toList())); + + long startTimeNs = time.nanoseconds(); + collectAbortedTransactions(startOffset, upperBoundOffset, segmentMetadata, accumulator, log); + LOGGER.debug("Time taken to collect: {} aborted transactions for {} in {} ns", abortedTransactions.size(), + segmentMetadata, time.nanoseconds() - startTimeNs); + + return new FetchDataInfo(fetchInfo.fetchOffsetMetadata, + fetchInfo.records, + fetchInfo.firstEntryIncomplete, + Optional.of(abortedTransactions.isEmpty() ? Collections.emptyList() : new ArrayList<>(abortedTransactions))); + } + + /** + * Collects the aborted transaction entries from the current and subsequent segments until the upper bound offset. + * Note that the accumulated aborted transaction entries might contain duplicates as it collects the entries across + * segments. We are relying on the client to discard the duplicates. + * @param startOffset The start offset of the fetch request. + * @param upperBoundOffset The upper bound offset of the fetch request. + * @param segmentMetadata The current segment metadata. + * @param accumulator The accumulator to collect the aborted transactions. + * @param log The unified log instance. + * @throws RemoteStorageException If an error occurs while fetching the remote log segment metadata. + */ + private void collectAbortedTransactions(long startOffset, + long upperBoundOffset, + RemoteLogSegmentMetadata segmentMetadata, + Consumer> accumulator, + UnifiedLog log) throws RemoteStorageException { + TopicPartition tp = segmentMetadata.topicIdPartition().topicPartition(); + boolean isSearchComplete = false; + LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache(); + Optional currentMetadataOpt = Optional.of(segmentMetadata); + while (!isSearchComplete && currentMetadataOpt.isPresent()) { + RemoteLogSegmentMetadata currentMetadata = currentMetadataOpt.get(); + Optional txnIndexOpt = getTransactionIndex(currentMetadata); + if (txnIndexOpt.isPresent()) { + TransactionIndex txnIndex = txnIndexOpt.get(); + TxnIndexSearchResult searchResult = txnIndex.collectAbortedTxns(startOffset, upperBoundOffset); + accumulator.accept(searchResult.abortedTransactions); + isSearchComplete = searchResult.isComplete; + } + if (!isSearchComplete) { + currentMetadataOpt = findNextSegmentWithTxnIndex(tp, currentMetadata.endOffset() + 1, leaderEpochCache); + } + } + // Search in local segments + if (!isSearchComplete) { + collectAbortedTransactionInLocalSegments(startOffset, upperBoundOffset, accumulator, log.logSegments().iterator()); + } + } + + private Optional getTransactionIndex(RemoteLogSegmentMetadata currentMetadata) { + return !currentMetadata.isTxnIdxEmpty() ? + // `ofNullable` is needed for backward compatibility for old events that were stored in the + // `__remote_log_metadata` topic. The old events will return the `txnIdxEmpty` as false, but the + // transaction index may not exist in the remote storage. + Optional.ofNullable(indexCache.getIndexEntry(currentMetadata).txnIndex()) : Optional.empty(); + } + + private void collectAbortedTransactionInLocalSegments(long startOffset, + long upperBoundOffset, + Consumer> accumulator, + Iterator localLogSegments) { + while (localLogSegments.hasNext()) { + TransactionIndex txnIndex = localLogSegments.next().txnIndex(); + if (txnIndex != null) { + TxnIndexSearchResult searchResult = txnIndex.collectAbortedTxns(startOffset, upperBoundOffset); + accumulator.accept(searchResult.abortedTransactions); + if (searchResult.isComplete) { + return; + } + } + } + } + + // visible for testing. + Optional findNextSegmentMetadata(RemoteLogSegmentMetadata segmentMetadata, + LeaderEpochFileCache leaderEpochFileCacheOption) throws RemoteStorageException { + long nextSegmentBaseOffset = segmentMetadata.endOffset() + 1; + OptionalInt epoch = leaderEpochFileCacheOption.epochForOffset(nextSegmentBaseOffset); + return epoch.isPresent() + ? fetchRemoteLogSegmentMetadata(segmentMetadata.topicIdPartition().topicPartition(), epoch.getAsInt(), nextSegmentBaseOffset) + : Optional.empty(); + } + + /** + * Returns the next segment metadata that contains the aborted transaction entries from the given offset. + * Note that the search starts from the given (offset-for-epoch, offset) pair, when there are no segments contains + * the transaction index in that epoch, then it proceeds to the next epoch (next-epoch, epoch-start-offset) + * and the search ends when the segment metadata is found or the leader epoch cache is exhausted. + * Note that the returned segment metadata may or may not contain the transaction index. + * Visible for testing + * @param tp The topic partition. + * @param offset The offset to start the search. + * @param leaderEpochCache The leader epoch file cache. + * @return The next segment metadata that contains the transaction index. The transaction index may or may not exist + * in that segment metadata which depends on the RLMM plugin implementation. The caller of this method should handle + * for both the cases. + * @throws RemoteStorageException If an error occurs while fetching the remote log segment metadata. + */ + Optional findNextSegmentWithTxnIndex(TopicPartition tp, + long offset, + LeaderEpochFileCache leaderEpochCache) throws RemoteStorageException { + OptionalInt initialEpochOpt = leaderEpochCache.epochForOffset(offset); + if (initialEpochOpt.isEmpty()) { + return Optional.empty(); + } + int initialEpoch = initialEpochOpt.getAsInt(); + for (EpochEntry epochEntry : leaderEpochCache.epochEntries()) { + if (epochEntry.epoch >= initialEpoch) { + long startOffset = Math.max(epochEntry.startOffset, offset); + Optional metadataOpt = fetchNextSegmentWithTxnIndex(tp, epochEntry.epoch, startOffset); + if (metadataOpt.isPresent()) { + return metadataOpt; + } + } + } + return Optional.empty(); + } + + // Visible for testing + EnrichedRecordBatch findFirstBatch(RemoteLogInputStream remoteLogInputStream, long offset) throws IOException { + int skippedBytes = 0; + RecordBatch nextBatch = null; + // Look for the batch which has the desired offset + // We will always have a batch in that segment as it is a non-compacted topic. + do { + if (nextBatch != null) { + skippedBytes += nextBatch.sizeInBytes(); + } + nextBatch = remoteLogInputStream.nextBatch(); + } while (nextBatch != null && nextBatch.lastOffset() < offset); + return new EnrichedRecordBatch(nextBatch, skippedBytes); + } + + OffsetAndEpoch findHighestRemoteOffset(TopicIdPartition topicIdPartition, UnifiedLog log) throws RemoteStorageException { + OffsetAndEpoch offsetAndEpoch = null; + LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache(); + Optional maybeEpochEntry = leaderEpochCache.latestEntry(); + while (offsetAndEpoch == null && maybeEpochEntry.isPresent()) { + int epoch = maybeEpochEntry.get().epoch; + Optional highestRemoteOffsetOpt = + remoteLogMetadataManager.highestOffsetForEpoch(topicIdPartition, epoch); + if (highestRemoteOffsetOpt.isPresent()) { + Map.Entry entry = leaderEpochCache.endOffsetFor(epoch, log.logEndOffset()); + int requestedEpoch = entry.getKey(); + long endOffset = entry.getValue(); + long highestRemoteOffset = highestRemoteOffsetOpt.get(); + if (endOffset <= highestRemoteOffset) { + LOGGER.info("The end-offset for epoch {}: ({}, {}) is less than or equal to the " + + "highest-remote-offset: {} for partition: {}", epoch, requestedEpoch, endOffset, + highestRemoteOffset, topicIdPartition); + offsetAndEpoch = new OffsetAndEpoch(endOffset - 1, requestedEpoch); + } else { + offsetAndEpoch = new OffsetAndEpoch(highestRemoteOffset, epoch); + } + } + maybeEpochEntry = leaderEpochCache.previousEntry(epoch); + } + if (offsetAndEpoch == null) { + offsetAndEpoch = new OffsetAndEpoch(-1L, RecordBatch.NO_PARTITION_LEADER_EPOCH); + } + return offsetAndEpoch; + } + + long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) throws RemoteStorageException { + Optional logStartOffset = Optional.empty(); + LeaderEpochFileCache leaderEpochCache = log.leaderEpochCache(); + OptionalInt earliestEpochOpt = leaderEpochCache.earliestEntry() + .map(epochEntry -> OptionalInt.of(epochEntry.epoch)) + .orElseGet(OptionalInt::empty); + while (logStartOffset.isEmpty() && earliestEpochOpt.isPresent()) { + Iterator iterator = + remoteLogMetadataManager.listRemoteLogSegments(topicIdPartition, earliestEpochOpt.getAsInt()); + if (iterator.hasNext()) { + logStartOffset = Optional.of(iterator.next().startOffset()); + } + earliestEpochOpt = leaderEpochCache.nextEpoch(earliestEpochOpt.getAsInt()); + } + return logStartOffset.orElseGet(log::localLogStartOffset); + } + + /** + * Submit a remote log read task. + * This method returns immediately. The read operation is executed in a thread pool. + * The callback will be called when the task is done. + * + * @throws java.util.concurrent.RejectedExecutionException if the task cannot be accepted for execution (task queue is full) + */ + public Future asyncRead(RemoteStorageFetchInfo fetchInfo, Consumer callback) { + return remoteStorageReaderThreadPool.submit( + new RemoteLogReader(fetchInfo, this, callback, brokerTopicStats, rlmFetchQuotaManager, remoteReadTimer)); + } + + void doHandleLeaderPartition(TopicIdPartition topicPartition, Boolean remoteLogCopyDisable) { + RLMTaskWithFuture followerRLMTaskWithFuture = followerRLMTasks.remove(topicPartition); + if (followerRLMTaskWithFuture != null) { + LOGGER.info("Cancelling the follower task: {}", followerRLMTaskWithFuture.rlmTask); + followerRLMTaskWithFuture.cancel(); + } + + // Only create copy task when remoteLogCopyDisable is disabled + if (!remoteLogCopyDisable) { + leaderCopyRLMTasks.computeIfAbsent(topicPartition, topicIdPartition -> { + RLMCopyTask task = new RLMCopyTask(topicIdPartition, this.rlmConfig.remoteLogMetadataCustomMetadataMaxBytes()); + // set this upfront when it is getting initialized instead of doing it after scheduling. + LOGGER.info("Created a new copy task: {} and getting scheduled", task); + ScheduledFuture future = rlmCopyThreadPool.scheduleWithFixedDelay(task, 0, delayInMs, TimeUnit.MILLISECONDS); + return new RLMTaskWithFuture(task, future); + }); + } + + leaderExpirationRLMTasks.computeIfAbsent(topicPartition, topicIdPartition -> { + RLMExpirationTask task = new RLMExpirationTask(topicIdPartition); + LOGGER.info("Created a new expiration task: {} and getting scheduled", task); + ScheduledFuture future = rlmExpirationThreadPool.scheduleWithFixedDelay(task, 0, delayInMs, TimeUnit.MILLISECONDS); + return new RLMTaskWithFuture(task, future); + }); + } + + void doHandleFollowerPartition(TopicIdPartition topicPartition) { + RLMTaskWithFuture copyRLMTaskWithFuture = leaderCopyRLMTasks.remove(topicPartition); + if (copyRLMTaskWithFuture != null) { + LOGGER.info("Cancelling the copy task: {}", copyRLMTaskWithFuture.rlmTask); + copyRLMTaskWithFuture.cancel(); + } + + RLMTaskWithFuture expirationRLMTaskWithFuture = leaderExpirationRLMTasks.remove(topicPartition); + if (expirationRLMTaskWithFuture != null) { + LOGGER.info("Cancelling the expiration task: {}", expirationRLMTaskWithFuture.rlmTask); + expirationRLMTaskWithFuture.cancel(); + } + + followerRLMTasks.computeIfAbsent(topicPartition, topicIdPartition -> { + RLMFollowerTask task = new RLMFollowerTask(topicIdPartition); + LOGGER.info("Created a new follower task: {} and getting scheduled", task); + ScheduledFuture future = followerThreadPool.scheduleWithFixedDelay(task, 0, delayInMs, TimeUnit.MILLISECONDS); + return new RLMTaskWithFuture(task, future); + }); + } + + static class RLMTaskWithFuture { + + private final RLMTask rlmTask; + private final Future future; + + RLMTaskWithFuture(RLMTask rlmTask, Future future) { + this.rlmTask = rlmTask; + this.future = future; + } + + public void cancel() { + rlmTask.cancel(); + try { + future.cancel(true); + } catch (Exception ex) { + LOGGER.error("Error occurred while canceling the task: {}", rlmTask, ex); + } + } + + } + + /** + * Closes and releases all the resources like RemoterStorageManager and RemoteLogMetadataManager. + */ + public void close() { + synchronized (this) { + if (!closed) { + leaderCopyRLMTasks.values().forEach(RLMTaskWithFuture::cancel); + leaderExpirationRLMTasks.values().forEach(RLMTaskWithFuture::cancel); + followerRLMTasks.values().forEach(RLMTaskWithFuture::cancel); + Utils.closeQuietly(remoteLogStorageManager, "RemoteLogStorageManager"); + Utils.closeQuietly(remoteLogMetadataManager, "RemoteLogMetadataManager"); + Utils.closeQuietly(indexCache, "RemoteIndexCache"); + + rlmCopyThreadPool.close(); + rlmExpirationThreadPool.close(); + followerThreadPool.close(); + try { + shutdownAndAwaitTermination(remoteStorageReaderThreadPool, "RemoteStorageReaderThreadPool", 10, TimeUnit.SECONDS); + } finally { + removeMetrics(); + } + + leaderCopyRLMTasks.clear(); + leaderExpirationRLMTasks.clear(); + followerRLMTasks.clear(); + closed = true; + } + } + } + + private static void shutdownAndAwaitTermination(ExecutorService executor, String poolName, long timeout, TimeUnit timeUnit) { + LOGGER.info("Shutting down {} executor", poolName); + ThreadUtils.shutdownExecutorServiceQuietly(executor, timeout, timeUnit); + LOGGER.info("{} executor shutdown completed", poolName); + } + + //Visible for testing + static ByteBuffer epochEntriesAsByteBuffer(List epochEntries) throws IOException { + ByteArrayOutputStream stream = new ByteArrayOutputStream(); + try (BufferedWriter writer = new BufferedWriter(new OutputStreamWriter(stream, StandardCharsets.UTF_8))) { + CheckpointFile.CheckpointWriteBuffer writeBuffer = + new CheckpointFile.CheckpointWriteBuffer<>(writer, 0, LeaderEpochCheckpointFile.FORMATTER); + writeBuffer.write(epochEntries); + writer.flush(); + } + + return ByteBuffer.wrap(stream.toByteArray()); + } + + private void removeRemoteTopicPartitionMetrics(TopicIdPartition topicIdPartition) { + String topic = topicIdPartition.topic(); + if (!brokerTopicStats.isTopicStatsExisted(topicIdPartition.topic())) { + // The topic metrics are already removed, removing this topic key from broker-level metrics + brokerTopicStats.removeBrokerLevelRemoteCopyLagBytes(topic); + brokerTopicStats.removeBrokerLevelRemoteCopyLagSegments(topic); + brokerTopicStats.removeBrokerLevelRemoteDeleteLagBytes(topic); + brokerTopicStats.removeBrokerLevelRemoteDeleteLagSegments(topic); + brokerTopicStats.removeBrokerLevelRemoteLogMetadataCount(topic); + brokerTopicStats.removeBrokerLevelRemoteLogSizeComputationTime(topic); + brokerTopicStats.removeBrokerLevelRemoteLogSizeBytes(topic); + } else { + int partition = topicIdPartition.partition(); + // remove the partition metric values and update the broker-level metrics + brokerTopicStats.removeRemoteCopyLagBytes(topic, partition); + brokerTopicStats.removeRemoteCopyLagSegments(topic, partition); + brokerTopicStats.removeRemoteDeleteLagBytes(topic, partition); + brokerTopicStats.removeRemoteDeleteLagSegments(topic, partition); + brokerTopicStats.removeRemoteLogMetadataCount(topic, partition); + brokerTopicStats.removeRemoteLogSizeComputationTime(topic, partition); + brokerTopicStats.removeRemoteLogSizeBytes(topic, partition); + } + } + + //Visible for testing + RLMTaskWithFuture leaderCopyTask(TopicIdPartition partition) { + return leaderCopyRLMTasks.get(partition); + } + RLMTaskWithFuture leaderExpirationTask(TopicIdPartition partition) { + return leaderExpirationRLMTasks.get(partition); + } + RLMTaskWithFuture followerTask(TopicIdPartition partition) { + return followerRLMTasks.get(partition); + } + + static class RLMScheduledThreadPool { + + private static final Logger LOGGER = LoggerFactory.getLogger(RLMScheduledThreadPool.class); + private final String threadPoolName; + private final String threadNamePattern; + private final ScheduledThreadPoolExecutor scheduledThreadPool; + + public RLMScheduledThreadPool(int poolSize, String threadPoolName, String threadNamePattern) { + this.threadPoolName = threadPoolName; + this.threadNamePattern = threadNamePattern; + scheduledThreadPool = createPool(poolSize); + } + + public void setCorePoolSize(int newSize) { + scheduledThreadPool.setCorePoolSize(newSize); + } + + public int getCorePoolSize() { + return scheduledThreadPool.getCorePoolSize(); + } + + private ScheduledThreadPoolExecutor createPool(int poolSize) { + ThreadFactory threadFactory = ThreadUtils.createThreadFactory(threadNamePattern, true, + (t, e) -> LOGGER.error("Uncaught exception in thread '{}':", t.getName(), e)); + ScheduledThreadPoolExecutor threadPool = new ScheduledThreadPoolExecutor(poolSize); + threadPool.setRemoveOnCancelPolicy(true); + threadPool.setExecuteExistingDelayedTasksAfterShutdownPolicy(false); + threadPool.setContinueExistingPeriodicTasksAfterShutdownPolicy(false); + threadPool.setThreadFactory(threadFactory); + return threadPool; + } + + public Double getIdlePercent() { + return 1 - (double) scheduledThreadPool.getActiveCount() / (double) scheduledThreadPool.getCorePoolSize(); + } + + public ScheduledFuture scheduleWithFixedDelay(Runnable runnable, long initialDelay, long delay, TimeUnit timeUnit) { + LOGGER.info("Scheduling runnable {} with initial delay: {}, fixed delay: {}", runnable, initialDelay, delay); + return scheduledThreadPool.scheduleWithFixedDelay(runnable, initialDelay, delay, timeUnit); + } + + public void close() { + shutdownAndAwaitTermination(scheduledThreadPool, threadPoolName, 10, TimeUnit.SECONDS); + } + } + + // Visible for testing + public static class RetentionSizeData { + private final long retentionSize; + private final long remainingBreachedSize; + + public RetentionSizeData(long retentionSize, long remainingBreachedSize) { + if (retentionSize < 0) + throw new IllegalArgumentException("retentionSize should be non negative, but it is " + retentionSize); + + if (remainingBreachedSize <= 0) { + throw new IllegalArgumentException("remainingBreachedSize should be more than zero, but it is " + remainingBreachedSize); + } + + this.retentionSize = retentionSize; + this.remainingBreachedSize = remainingBreachedSize; + } + } + + // Visible for testing + public static class RetentionTimeData { + + private final long retentionMs; + private final long cleanupUntilMs; + + public RetentionTimeData(long retentionMs, long cleanupUntilMs) { + if (retentionMs < 0) + throw new IllegalArgumentException("retentionMs should be non negative, but it is " + retentionMs); + + if (cleanupUntilMs < 0) + throw new IllegalArgumentException("cleanupUntilMs should be non negative, but it is " + cleanupUntilMs); + + this.retentionMs = retentionMs; + this.cleanupUntilMs = cleanupUntilMs; + } + } + + // Visible for testing + static class EnrichedLogSegment { + private final LogSegment logSegment; + private final long nextSegmentOffset; + + public EnrichedLogSegment(LogSegment logSegment, + long nextSegmentOffset) { + this.logSegment = logSegment; + this.nextSegmentOffset = nextSegmentOffset; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + EnrichedLogSegment that = (EnrichedLogSegment) o; + return nextSegmentOffset == that.nextSegmentOffset && Objects.equals(logSegment, that.logSegment); + } + + @Override + public int hashCode() { + return Objects.hash(logSegment, nextSegmentOffset); + } + + @Override + public String toString() { + return "EnrichedLogSegment{" + + "logSegment=" + logSegment + + ", nextSegmentOffset=" + nextSegmentOffset + + '}'; + } + } + + static class EnrichedRecordBatch { + private final RecordBatch batch; + private final int skippedBytes; + + public EnrichedRecordBatch(RecordBatch batch, int skippedBytes) { + this.batch = batch; + this.skippedBytes = skippedBytes; + } + } +} diff --git a/core/src/main/java/kafka/log/remote/RemoteLogOffsetReader.java b/core/src/main/java/kafka/log/remote/RemoteLogOffsetReader.java new file mode 100644 index 0000000000000..493139248e642 --- /dev/null +++ b/core/src/main/java/kafka/log/remote/RemoteLogOffsetReader.java @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.log.remote; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.record.FileRecords; +import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; +import org.apache.kafka.storage.internals.log.OffsetResultHolder; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Optional; +import java.util.concurrent.Callable; +import java.util.function.Consumer; +import java.util.function.Supplier; + +import scala.Option; +import scala.jdk.javaapi.OptionConverters; + +public class RemoteLogOffsetReader implements Callable { + private static final Logger LOGGER = LoggerFactory.getLogger(RemoteLogOffsetReader.class); + private final RemoteLogManager rlm; + private final TopicPartition tp; + private final long timestamp; + private final long startingOffset; + private final LeaderEpochFileCache leaderEpochCache; + private final Supplier> searchInLocalLog; + private final Consumer callback; + + public RemoteLogOffsetReader(RemoteLogManager rlm, + TopicPartition tp, + long timestamp, + long startingOffset, + LeaderEpochFileCache leaderEpochCache, + Supplier> searchInLocalLog, + Consumer callback) { + this.rlm = rlm; + this.tp = tp; + this.timestamp = timestamp; + this.startingOffset = startingOffset; + this.leaderEpochCache = leaderEpochCache; + this.searchInLocalLog = () -> OptionConverters.toJava(searchInLocalLog.get()); + this.callback = callback; + } + + @Override + public Void call() throws Exception { + OffsetResultHolder.FileRecordsOrError result; + try { + // If it is not found in remote storage, then search in the local storage starting with local log start offset. + Optional timestampAndOffsetOpt = + rlm.findOffsetByTimestamp(tp, timestamp, startingOffset, leaderEpochCache).or(searchInLocalLog); + result = new OffsetResultHolder.FileRecordsOrError(Optional.empty(), timestampAndOffsetOpt); + } catch (Exception e) { + // NOTE: All the exceptions from the secondary storage are catched instead of only the KafkaException. + LOGGER.error("Error occurred while reading the remote log offset for {}", tp, e); + result = new OffsetResultHolder.FileRecordsOrError(Optional.of(e), Optional.empty()); + } + callback.accept(result); + return null; + } +} diff --git a/core/src/main/java/kafka/log/remote/RemoteLogReader.java b/core/src/main/java/kafka/log/remote/RemoteLogReader.java new file mode 100644 index 0000000000000..369417fa84628 --- /dev/null +++ b/core/src/main/java/kafka/log/remote/RemoteLogReader.java @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.log.remote; + +import org.apache.kafka.common.errors.OffsetOutOfRangeException; +import org.apache.kafka.server.log.remote.quota.RLMQuotaManager; +import org.apache.kafka.storage.internals.log.FetchDataInfo; +import org.apache.kafka.storage.internals.log.RemoteLogReadResult; +import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; +import org.apache.kafka.storage.log.metrics.BrokerTopicStats; + +import com.yammer.metrics.core.Timer; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.Optional; +import java.util.concurrent.Callable; +import java.util.function.Consumer; + +public class RemoteLogReader implements Callable { + private static final Logger LOGGER = LoggerFactory.getLogger(RemoteLogReader.class); + private final RemoteStorageFetchInfo fetchInfo; + private final RemoteLogManager rlm; + private final BrokerTopicStats brokerTopicStats; + private final Consumer callback; + private final RLMQuotaManager quotaManager; + private final Timer remoteReadTimer; + + public RemoteLogReader(RemoteStorageFetchInfo fetchInfo, + RemoteLogManager rlm, + Consumer callback, + BrokerTopicStats brokerTopicStats, + RLMQuotaManager quotaManager, + Timer remoteReadTimer) { + this.fetchInfo = fetchInfo; + this.rlm = rlm; + this.brokerTopicStats = brokerTopicStats; + this.callback = callback; + this.brokerTopicStats.topicStats(fetchInfo.topicPartition.topic()).remoteFetchRequestRate().mark(); + this.brokerTopicStats.allTopicsStats().remoteFetchRequestRate().mark(); + this.quotaManager = quotaManager; + this.remoteReadTimer = remoteReadTimer; + } + + @Override + public Void call() { + RemoteLogReadResult result; + try { + LOGGER.debug("Reading records from remote storage for topic partition {}", fetchInfo.topicPartition); + FetchDataInfo fetchDataInfo = remoteReadTimer.time(() -> rlm.read(fetchInfo)); + brokerTopicStats.topicStats(fetchInfo.topicPartition.topic()).remoteFetchBytesRate().mark(fetchDataInfo.records.sizeInBytes()); + brokerTopicStats.allTopicsStats().remoteFetchBytesRate().mark(fetchDataInfo.records.sizeInBytes()); + result = new RemoteLogReadResult(Optional.of(fetchDataInfo), Optional.empty()); + } catch (OffsetOutOfRangeException e) { + result = new RemoteLogReadResult(Optional.empty(), Optional.of(e)); + } catch (Exception e) { + brokerTopicStats.topicStats(fetchInfo.topicPartition.topic()).failedRemoteFetchRequestRate().mark(); + brokerTopicStats.allTopicsStats().failedRemoteFetchRequestRate().mark(); + LOGGER.error("Error occurred while reading the remote data for {}", fetchInfo.topicPartition, e); + result = new RemoteLogReadResult(Optional.empty(), Optional.of(e)); + } + LOGGER.debug("Finished reading records from remote storage for topic partition {}", fetchInfo.topicPartition); + quotaManager.record(result.fetchDataInfo.map(fetchDataInfo -> fetchDataInfo.records.sizeInBytes()).orElse(0)); + callback.accept(result); + return null; + } +} diff --git a/core/src/main/java/kafka/server/ClientRequestQuotaManager.java b/core/src/main/java/kafka/server/ClientRequestQuotaManager.java index 3f2398b8358f5..f93c50b27830f 100644 --- a/core/src/main/java/kafka/server/ClientRequestQuotaManager.java +++ b/core/src/main/java/kafka/server/ClientRequestQuotaManager.java @@ -19,7 +19,6 @@ import kafka.network.RequestChannel; import org.apache.kafka.common.MetricName; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.metrics.QuotaViolationException; import org.apache.kafka.common.metrics.Sensor; @@ -27,14 +26,15 @@ import org.apache.kafka.common.utils.Time; import org.apache.kafka.server.config.ClientQuotaManagerConfig; import org.apache.kafka.server.quota.ClientQuotaCallback; -import org.apache.kafka.server.quota.ClientQuotaManager; import org.apache.kafka.server.quota.QuotaType; import org.apache.kafka.server.quota.QuotaUtils; -import java.util.Map; import java.util.Optional; import java.util.concurrent.TimeUnit; +import scala.jdk.javaapi.CollectionConverters; +import scala.jdk.javaapi.OptionConverters; + @SuppressWarnings("this-escape") public class ClientRequestQuotaManager extends ClientQuotaManager { // Since exemptSensor is for all clients and has a constant name, we do not expire exemptSensor and only @@ -49,14 +49,9 @@ public class ClientRequestQuotaManager extends ClientQuotaManager { // Visible for testing private final Sensor exemptSensor; - public ClientRequestQuotaManager( - ClientQuotaManagerConfig config, - Metrics metrics, Time time, - String threadNamePrefix, - Optional> quotaCallbackPlugin - ) { - super(config, metrics, QuotaType.REQUEST, time, threadNamePrefix, quotaCallbackPlugin); - this.maxThrottleTimeMs = TimeUnit.SECONDS.toMillis(config.quotaWindowSizeSeconds()); + public ClientRequestQuotaManager(ClientQuotaManagerConfig config, Metrics metrics, Time time, String threadNamePrefix, Optional quotaCallback) { + super(config, metrics, QuotaType.REQUEST, time, threadNamePrefix, OptionConverters.toScala(quotaCallback)); + this.maxThrottleTimeMs = TimeUnit.SECONDS.toMillis(config.quotaWindowSizeSeconds); this.metrics = metrics; this.exemptMetricName = metrics.metricName("exempt-request-time", QuotaType.REQUEST.toString(), "Tracking exempt-request-time utilization percentage"); exemptSensor = getOrCreateSensor(EXEMPT_SENSOR_NAME, DEFAULT_INACTIVE_EXEMPT_SENSOR_EXPIRATION_TIME_SECONDS, sensor -> sensor.add(exemptMetricName, new Rate())); @@ -71,8 +66,8 @@ private void recordExempt(double value) { } /** - * Records that a user/clientId changed request processing time being throttled. If the quota has been violated, return - * throttle time in milliseconds. Subclasses may override throttle time calculation. + * Records that a user/clientId changed request processing time being throttled. If quota has been violated, return + * throttle time in milliseconds. Throttle time calculation may be overridden by sub-classes. * @param request client request * @return Number of milliseconds to throttle in case of quota violation. Zero otherwise */ @@ -102,8 +97,8 @@ public long throttleTime(QuotaViolationException e, long timeMs) { } @Override - public MetricName clientQuotaMetricName(Map quotaMetricTags) { - return metrics.metricName("request-time", QuotaType.REQUEST.toString(), "Tracking request-time per user/client-id", quotaMetricTags); + public MetricName clientQuotaMetricName(scala.collection.immutable.Map quotaMetricTags) { + return metrics.metricName("request-time", QuotaType.REQUEST.toString(), "Tracking request-time per user/client-id", CollectionConverters.asJava(quotaMetricTags)); } private double nanosToPercentage(long nanos) { diff --git a/core/src/main/java/kafka/server/NetworkUtils.java b/core/src/main/java/kafka/server/NetworkUtils.java index 5f084bfd98904..10236c0ce74a5 100644 --- a/core/src/main/java/kafka/server/NetworkUtils.java +++ b/core/src/main/java/kafka/server/NetworkUtils.java @@ -31,7 +31,7 @@ import org.apache.kafka.common.utils.LogContext; import org.apache.kafka.common.utils.Time; -import java.util.Map; +import java.util.Collections; public class NetworkUtils { @@ -62,7 +62,7 @@ public static NetworkClient buildNetworkClient(String prefix, metrics, time, metricGroupPrefix, - Map.of(), + Collections.emptyMap(), false, channelBuilder, logContext diff --git a/core/src/main/java/kafka/server/QuotaFactory.java b/core/src/main/java/kafka/server/QuotaFactory.java index b672be4265053..57e9139e28583 100644 --- a/core/src/main/java/kafka/server/QuotaFactory.java +++ b/core/src/main/java/kafka/server/QuotaFactory.java @@ -17,20 +17,17 @@ package kafka.server; import org.apache.kafka.common.TopicPartition; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.common.utils.Utils; import org.apache.kafka.server.config.ClientQuotaManagerConfig; import org.apache.kafka.server.config.QuotaConfig; import org.apache.kafka.server.config.ReplicationQuotaManagerConfig; import org.apache.kafka.server.quota.ClientQuotaCallback; -import org.apache.kafka.server.quota.ClientQuotaManager; -import org.apache.kafka.server.quota.ControllerMutationQuotaManager; import org.apache.kafka.server.quota.QuotaType; import java.util.Optional; +import scala.Option; public class QuotaFactory { @@ -51,60 +48,87 @@ public void record(long value) { } }; - public record QuotaManagers(ClientQuotaManager fetch, - ClientQuotaManager produce, - ClientRequestQuotaManager request, - ControllerMutationQuotaManager controllerMutation, - ReplicationQuotaManager leader, - ReplicationQuotaManager follower, - ReplicationQuotaManager alterLogDirs, - Optional> clientQuotaCallbackPlugin) { + public static class QuotaManagers { + private final ClientQuotaManager fetch; + private final ClientQuotaManager produce; + private final ClientRequestQuotaManager request; + private final ControllerMutationQuotaManager controllerMutation; + private final ReplicationQuotaManager leader; + private final ReplicationQuotaManager follower; + private final ReplicationQuotaManager alterLogDirs; + private final Optional clientQuotaCallback; + + public QuotaManagers(ClientQuotaManager fetch, ClientQuotaManager produce, ClientRequestQuotaManager request, + ControllerMutationQuotaManager controllerMutation, ReplicationQuotaManager leader, + ReplicationQuotaManager follower, ReplicationQuotaManager alterLogDirs, + Optional clientQuotaCallback) { + this.fetch = fetch; + this.produce = produce; + this.request = request; + this.controllerMutation = controllerMutation; + this.leader = leader; + this.follower = follower; + this.alterLogDirs = alterLogDirs; + this.clientQuotaCallback = clientQuotaCallback; + } + + public ClientQuotaManager fetch() { + return fetch; + } + + public ClientQuotaManager produce() { + return produce; + } + + public ClientRequestQuotaManager request() { + return request; + } + + public ControllerMutationQuotaManager controllerMutation() { + return controllerMutation; + } + + public ReplicationQuotaManager leader() { + return leader; + } + + public ReplicationQuotaManager follower() { + return follower; + } + + public ReplicationQuotaManager alterLogDirs() { + return alterLogDirs; + } + + public Optional clientQuotaCallback() { + return clientQuotaCallback; + } public void shutdown() { fetch.shutdown(); produce.shutdown(); request.shutdown(); controllerMutation.shutdown(); - clientQuotaCallbackPlugin.ifPresent(plugin -> Utils.closeQuietly(plugin, "client quota callback plugin")); + clientQuotaCallback.ifPresent(ClientQuotaCallback::close); } } - public static QuotaManagers instantiate( - KafkaConfig cfg, - Metrics metrics, - Time time, - String threadNamePrefix, - String role - ) { - Optional> clientQuotaCallbackPlugin = createClientQuotaCallback(cfg, metrics, role); + public static QuotaManagers instantiate(KafkaConfig cfg, Metrics metrics, Time time, String threadNamePrefix) { + ClientQuotaCallback clientQuotaCallback = cfg.getConfiguredInstance( + QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, ClientQuotaCallback.class); return new QuotaManagers( - new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.FETCH, time, threadNamePrefix, clientQuotaCallbackPlugin), - new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.PRODUCE, time, threadNamePrefix, clientQuotaCallbackPlugin), - new ClientRequestQuotaManager(clientConfig(cfg), metrics, time, threadNamePrefix, clientQuotaCallbackPlugin), - new ControllerMutationQuotaManager(clientControllerMutationConfig(cfg), metrics, time, threadNamePrefix, clientQuotaCallbackPlugin), + new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.FETCH, time, threadNamePrefix, Option.apply(clientQuotaCallback)), + new ClientQuotaManager(clientConfig(cfg), metrics, QuotaType.PRODUCE, time, threadNamePrefix, Option.apply(clientQuotaCallback)), + new ClientRequestQuotaManager(clientConfig(cfg), metrics, time, threadNamePrefix, Optional.ofNullable(clientQuotaCallback)), + new ControllerMutationQuotaManager(clientControllerMutationConfig(cfg), metrics, time, threadNamePrefix, Option.apply(clientQuotaCallback)), new ReplicationQuotaManager(replicationConfig(cfg), metrics, QuotaType.LEADER_REPLICATION, time), new ReplicationQuotaManager(replicationConfig(cfg), metrics, QuotaType.FOLLOWER_REPLICATION, time), new ReplicationQuotaManager(alterLogDirsReplicationConfig(cfg), metrics, QuotaType.ALTER_LOG_DIRS_REPLICATION, time), - clientQuotaCallbackPlugin + Optional.ofNullable(clientQuotaCallback) ); } - private static Optional> createClientQuotaCallback( - KafkaConfig cfg, - Metrics metrics, - String role - ) { - ClientQuotaCallback clientQuotaCallback = cfg.getConfiguredInstance( - QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, ClientQuotaCallback.class); - return clientQuotaCallback == null ? Optional.empty() : Optional.of(Plugin.wrapInstance( - clientQuotaCallback, - metrics, - QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, - "role", role - )); - } - private static ClientQuotaManagerConfig clientConfig(KafkaConfig cfg) { return new ClientQuotaManagerConfig( cfg.quotaConfig().numQuotaSamples(), @@ -132,4 +156,4 @@ private static ReplicationQuotaManagerConfig alterLogDirsReplicationConfig(Kafka cfg.quotaConfig().alterLogDirsReplicationQuotaWindowSizeSeconds() ); } -} +} \ No newline at end of file diff --git a/core/src/main/java/kafka/server/TierStateMachine.java b/core/src/main/java/kafka/server/TierStateMachine.java index 9d8dcafd20382..d316e70da2e3e 100644 --- a/core/src/main/java/kafka/server/TierStateMachine.java +++ b/core/src/main/java/kafka/server/TierStateMachine.java @@ -18,6 +18,8 @@ package kafka.server; import kafka.cluster.Partition; +import kafka.log.UnifiedLog; +import kafka.log.remote.RemoteLogManager; import org.apache.kafka.common.KafkaException; import org.apache.kafka.common.TopicPartition; @@ -26,20 +28,14 @@ import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.utils.Utils; -import org.apache.kafka.server.LeaderEndPoint; -import org.apache.kafka.server.PartitionFetchState; -import org.apache.kafka.server.ReplicaState; import org.apache.kafka.server.common.CheckpointFile; import org.apache.kafka.server.common.OffsetAndEpoch; -import org.apache.kafka.server.log.remote.storage.RemoteLogManager; import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata; import org.apache.kafka.server.log.remote.storage.RemoteStorageException; import org.apache.kafka.server.log.remote.storage.RemoteStorageManager; -import org.apache.kafka.server.log.remote.storage.RemoteStorageNotReadyException; import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; import org.apache.kafka.storage.internals.log.EpochEntry; import org.apache.kafka.storage.internals.log.LogFileUtils; -import org.apache.kafka.storage.internals.log.UnifiedLog; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -57,7 +53,9 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.Optional; + +import scala.Option; +import scala.jdk.javaapi.CollectionConverters; import static org.apache.kafka.storage.internals.log.LogStartOffsetIncrementReason.LeaderOffsetIncremented; @@ -97,7 +95,7 @@ PartitionFetchState start(TopicPartition topicPartition, PartitionFetchState currentFetchState, PartitionData fetchPartitionData) throws Exception { OffsetAndEpoch epochAndLeaderLocalStartOffset = leader.fetchEarliestLocalOffset(topicPartition, currentFetchState.currentLeaderEpoch()); - int epoch = epochAndLeaderLocalStartOffset.epoch(); + int epoch = epochAndLeaderLocalStartOffset.leaderEpoch(); long leaderLocalStartOffset = epochAndLeaderLocalStartOffset.offset(); long offsetToFetch; @@ -124,8 +122,8 @@ PartitionFetchState start(TopicPartition topicPartition, long initialLag = leaderEndOffset - offsetToFetch; - return new PartitionFetchState(currentFetchState.topicId(), offsetToFetch, Optional.of(initialLag), currentFetchState.currentLeaderEpoch(), - ReplicaState.FETCHING, unifiedLog.latestEpoch()); + return PartitionFetchState.apply(currentFetchState.topicId(), offsetToFetch, Option.apply(initialLag), currentFetchState.currentLeaderEpoch(), + Fetching$.MODULE$, unifiedLog.latestEpoch()); } @@ -137,12 +135,12 @@ private OffsetForLeaderEpochResponseData.EpochEndOffset fetchEarlierEpochEndOffs // Find the end-offset for the epoch earlier to the given epoch from the leader Map partitionsWithEpochs = new HashMap<>(); partitionsWithEpochs.put(partition, new OffsetForLeaderEpochRequestData.OffsetForLeaderPartition().setPartition(partition.partition()).setCurrentLeaderEpoch(currentLeaderEpoch).setLeaderEpoch(previousEpoch)); - var epochEndOffset = leader.fetchEpochEndOffsets(partitionsWithEpochs).get(partition); - - if (epochEndOffset == null) { + Option maybeEpochEndOffset = leader.fetchEpochEndOffsets(CollectionConverters.asScala(partitionsWithEpochs)).get(partition); + if (maybeEpochEndOffset.isEmpty()) { throw new KafkaException("No response received for partition: " + partition); } + OffsetForLeaderEpochResponseData.EpochEndOffset epochEndOffset = maybeEpochEndOffset.get(); if (epochEndOffset.errorCode() != Errors.NONE.code()) { throw Errors.forCode(epochEndOffset.errorCode()).exception(); } @@ -152,8 +150,8 @@ private OffsetForLeaderEpochResponseData.EpochEndOffset fetchEarlierEpochEndOffs private List readLeaderEpochCheckpoint(RemoteLogManager rlm, RemoteLogSegmentMetadata remoteLogSegmentMetadata) throws IOException, RemoteStorageException { - try (InputStream inputStream = rlm.storageManager().fetchIndex(remoteLogSegmentMetadata, RemoteStorageManager.IndexType.LEADER_EPOCH); - BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) { + InputStream inputStream = rlm.storageManager().fetchIndex(remoteLogSegmentMetadata, RemoteStorageManager.IndexType.LEADER_EPOCH); + try (BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(inputStream, StandardCharsets.UTF_8))) { CheckpointFile.CheckpointReadBuffer readBuffer = new CheckpointFile.CheckpointReadBuffer<>("", bufferedReader, 0, LeaderEpochCheckpointFile.FORMATTER); return readBuffer.read(); } @@ -167,9 +165,8 @@ private void buildProducerSnapshotFile(UnifiedLog unifiedLog, File snapshotFile = LogFileUtils.producerSnapshotFile(unifiedLog.dir(), nextOffset); Path tmpSnapshotFile = Paths.get(snapshotFile.getAbsolutePath() + ".tmp"); // Copy it to snapshot file in atomic manner. - try (InputStream inputStream = rlm.storageManager().fetchIndex(remoteLogSegmentMetadata, RemoteStorageManager.IndexType.PRODUCER_SNAPSHOT)) { - Files.copy(inputStream, tmpSnapshotFile, StandardCopyOption.REPLACE_EXISTING); - } + Files.copy(rlm.storageManager().fetchIndex(remoteLogSegmentMetadata, RemoteStorageManager.IndexType.PRODUCER_SNAPSHOT), + tmpSnapshotFile, StandardCopyOption.REPLACE_EXISTING); Utils.atomicMoveWithFallback(tmpSnapshotFile, snapshotFile.toPath(), false); // Reload producer snapshots. @@ -189,7 +186,7 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, Long leaderLogStartOffset, UnifiedLog unifiedLog) throws IOException, RemoteStorageException { - if (!unifiedLog.remoteLogEnabled()) { + if (!unifiedLog.remoteStorageSystemEnable() || !unifiedLog.config().remoteStorageEnable()) { // If the tiered storage is not enabled throw an exception back so that it will retry until the tiered storage // is set as expected. throw new RemoteStorageException("Couldn't build the state from remote store for partition " + topicPartition + ", as remote log storage is not yet enabled"); @@ -231,10 +228,6 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, } } - if (!rlm.isPartitionReady(topicPartition)) { - throw new RemoteStorageNotReadyException("RemoteLogManager is not ready for partition: " + topicPartition); - } - RemoteLogSegmentMetadata remoteLogSegmentMetadata = rlm.fetchRemoteLogSegmentMetadata(topicPartition, targetEpoch, previousOffsetToLeaderLocalLogStartOffset) .orElseThrow(() -> buildRemoteStorageException(topicPartition, targetEpoch, currentLeaderEpoch, leaderLocalLogStartOffset, leaderLogStartOffset)); @@ -247,7 +240,7 @@ private Long buildRemoteLogAuxState(TopicPartition topicPartition, // Truncate the existing local log before restoring the leader epoch cache and producer snapshots. Partition partition = replicaMgr.getPartitionOrException(topicPartition); - partition.truncateFullyAndStartAt(nextOffset, useFutureLog, Optional.of(leaderLogStartOffset)); + partition.truncateFullyAndStartAt(nextOffset, useFutureLog, Option.apply(leaderLogStartOffset)); // Increment start offsets unifiedLog.maybeIncrementLogStartOffset(leaderLogStartOffset, LeaderOffsetIncremented); unifiedLog.maybeIncrementLocalLogStartOffset(nextOffset, LeaderOffsetIncremented); diff --git a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java index e03ab35e90eb4..b4764f8d284d8 100644 --- a/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java +++ b/core/src/main/java/kafka/server/builders/KafkaApisBuilder.java @@ -19,30 +19,28 @@ import kafka.coordinator.transaction.TransactionCoordinator; import kafka.network.RequestChannel; +import kafka.server.ApiVersionManager; import kafka.server.AutoTopicCreationManager; +import kafka.server.DelegationTokenManager; import kafka.server.FetchManager; import kafka.server.ForwardingManager; import kafka.server.KafkaApis; import kafka.server.KafkaConfig; +import kafka.server.MetadataCache; import kafka.server.QuotaFactory.QuotaManagers; import kafka.server.ReplicaManager; +import kafka.server.metadata.ConfigRepository; import kafka.server.share.SharePartitionManager; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.coordinator.group.GroupConfigManager; import org.apache.kafka.coordinator.group.GroupCoordinator; import org.apache.kafka.coordinator.share.ShareCoordinator; -import org.apache.kafka.metadata.ConfigRepository; -import org.apache.kafka.metadata.MetadataCache; -import org.apache.kafka.security.DelegationTokenManager; -import org.apache.kafka.server.ApiVersionManager; import org.apache.kafka.server.ClientMetricsManager; import org.apache.kafka.server.authorizer.Authorizer; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; -import java.util.Map; +import java.util.Collections; import java.util.Optional; import scala.jdk.javaapi.OptionConverters; @@ -59,7 +57,7 @@ public class KafkaApisBuilder { private ConfigRepository configRepository = null; private MetadataCache metadataCache = null; private Metrics metrics = null; - private Optional> authorizerPlugin = Optional.empty(); + private Optional authorizer = Optional.empty(); private QuotaManagers quotas = null; private FetchManager fetchManager = null; private SharePartitionManager sharePartitionManager = null; @@ -69,8 +67,7 @@ public class KafkaApisBuilder { private DelegationTokenManager tokenManager = null; private ApiVersionManager apiVersionManager = null; private ClientMetricsManager clientMetricsManager = null; - private ShareCoordinator shareCoordinator = null; - private GroupConfigManager groupConfigManager = null; + private Optional shareCoordinator = Optional.empty(); public KafkaApisBuilder setRequestChannel(RequestChannel requestChannel) { this.requestChannel = requestChannel; @@ -97,7 +94,7 @@ public KafkaApisBuilder setTxnCoordinator(TransactionCoordinator txnCoordinator) return this; } - public KafkaApisBuilder setShareCoordinator(ShareCoordinator shareCoordinator) { + public KafkaApisBuilder setShareCoordinator(Optional shareCoordinator) { this.shareCoordinator = shareCoordinator; return this; } @@ -132,8 +129,8 @@ public KafkaApisBuilder setMetrics(Metrics metrics) { return this; } - public KafkaApisBuilder setAuthorizerPlugin(Optional> authorizerPlugin) { - this.authorizerPlugin = authorizerPlugin; + public KafkaApisBuilder setAuthorizer(Optional authorizer) { + this.authorizer = authorizer; return this; } @@ -182,11 +179,6 @@ public KafkaApisBuilder setClientMetricsManager(ClientMetricsManager clientMetri return this; } - public KafkaApisBuilder setGroupConfigManager(GroupConfigManager groupConfigManager) { - this.groupConfigManager = groupConfigManager; - return this; - } - @SuppressWarnings({"CyclomaticComplexity"}) public KafkaApis build() { if (requestChannel == null) throw new RuntimeException("you must set requestChannel"); @@ -194,9 +186,9 @@ public KafkaApis build() { if (replicaManager == null) throw new RuntimeException("You must set replicaManager"); if (groupCoordinator == null) throw new RuntimeException("You must set groupCoordinator"); if (txnCoordinator == null) throw new RuntimeException("You must set txnCoordinator"); - if (shareCoordinator == null) throw new RuntimeException("You must set shareCoordinator"); - if (autoTopicCreationManager == null) throw new RuntimeException("You must set autoTopicCreationManager"); - if (config == null) config = new KafkaConfig(Map.of()); + if (autoTopicCreationManager == null) + throw new RuntimeException("You must set autoTopicCreationManager"); + if (config == null) config = new KafkaConfig(Collections.emptyMap()); if (configRepository == null) throw new RuntimeException("You must set configRepository"); if (metadataCache == null) throw new RuntimeException("You must set metadataCache"); if (metrics == null) throw new RuntimeException("You must set metrics"); @@ -206,21 +198,20 @@ public KafkaApis build() { if (clientMetricsManager == null) throw new RuntimeException("You must set clientMetricsManager"); if (brokerTopicStats == null) brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().isRemoteStorageSystemEnabled()); if (apiVersionManager == null) throw new RuntimeException("You must set apiVersionManager"); - if (groupConfigManager == null) throw new RuntimeException("You must set groupConfigManager"); return new KafkaApis(requestChannel, forwardingManager, replicaManager, groupCoordinator, txnCoordinator, - shareCoordinator, + OptionConverters.toScala(shareCoordinator), autoTopicCreationManager, brokerId, config, configRepository, metadataCache, metrics, - OptionConverters.toScala(authorizerPlugin), + OptionConverters.toScala(authorizer), quotas, fetchManager, sharePartitionManager, @@ -229,7 +220,6 @@ public KafkaApis build() { time, tokenManager, apiVersionManager, - clientMetricsManager, - groupConfigManager); + clientMetricsManager); } } diff --git a/core/src/main/java/kafka/server/builders/LogManagerBuilder.java b/core/src/main/java/kafka/server/builders/LogManagerBuilder.java index 6de61915e8efc..c70ee8f31b0bf 100644 --- a/core/src/main/java/kafka/server/builders/LogManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/LogManagerBuilder.java @@ -18,19 +18,19 @@ package kafka.server.builders; import kafka.log.LogManager; +import kafka.server.metadata.ConfigRepository; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.metadata.ConfigRepository; import org.apache.kafka.server.config.ServerLogConfigs; import org.apache.kafka.server.util.Scheduler; import org.apache.kafka.storage.internals.log.CleanerConfig; -import org.apache.kafka.storage.internals.log.LogCleaner; import org.apache.kafka.storage.internals.log.LogConfig; import org.apache.kafka.storage.internals.log.LogDirFailureChannel; import org.apache.kafka.storage.internals.log.ProducerStateManagerConfig; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; import java.io.File; +import java.util.Collections; import java.util.List; import scala.jdk.javaapi.CollectionConverters; @@ -39,7 +39,7 @@ public class LogManagerBuilder { private static final int PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS = 600000; private List logDirs = null; - private List initialOfflineDirs = List.of(); + private List initialOfflineDirs = Collections.emptyList(); private ConfigRepository configRepository = null; private LogConfig initialDefaultConfig = null; private CleanerConfig cleanerConfig = null; @@ -54,6 +54,7 @@ public class LogManagerBuilder { private BrokerTopicStats brokerTopicStats = null; private LogDirFailureChannel logDirFailureChannel = null; private Time time = Time.SYSTEM; + private boolean keepPartitionMetadataFile = true; private boolean remoteStorageSystemEnable = false; private long initialTaskDelayMs = ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DEFAULT; @@ -137,6 +138,11 @@ public LogManagerBuilder setTime(Time time) { return this; } + public LogManagerBuilder setKeepPartitionMetadataFile(boolean keepPartitionMetadataFile) { + this.keepPartitionMetadataFile = keepPartitionMetadataFile; + return this; + } + public LogManagerBuilder setRemoteStorageSystemEnable(boolean remoteStorageSystemEnable) { this.remoteStorageSystemEnable = remoteStorageSystemEnable; return this; @@ -172,9 +178,8 @@ public LogManager build() { brokerTopicStats, logDirFailureChannel, time, + keepPartitionMetadataFile, remoteStorageSystemEnable, - initialTaskDelayMs, - LogCleaner::new - ); + initialTaskDelayMs); } } diff --git a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java index 5426d55a64da3..a431dba15c0ee 100644 --- a/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java +++ b/core/src/main/java/kafka/server/builders/ReplicaManagerBuilder.java @@ -20,22 +20,25 @@ import kafka.log.LogManager; import kafka.server.AlterPartitionManager; import kafka.server.KafkaConfig; +import kafka.server.MetadataCache; import kafka.server.QuotaFactory.QuotaManagers; import kafka.server.ReplicaManager; import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.utils.Time; -import org.apache.kafka.metadata.MetadataCache; import org.apache.kafka.server.DelayedActionQueue; import org.apache.kafka.server.common.DirectoryEventHandler; import org.apache.kafka.server.util.Scheduler; import org.apache.kafka.storage.internals.log.LogDirFailureChannel; import org.apache.kafka.storage.log.metrics.BrokerTopicStats; -import java.util.Map; +import java.util.Collections; +import java.util.concurrent.atomic.AtomicBoolean; import scala.Option; + + public class ReplicaManagerBuilder { private KafkaConfig config = null; private Metrics metrics = null; @@ -99,7 +102,7 @@ public ReplicaManagerBuilder setBrokerTopicStats(BrokerTopicStats brokerTopicSta } public ReplicaManager build() { - if (config == null) config = new KafkaConfig(Map.of()); + if (config == null) config = new KafkaConfig(Collections.emptyMap()); if (logManager == null) throw new RuntimeException("You must set logManager"); if (metadataCache == null) throw new RuntimeException("You must set metadataCache"); if (logDirFailureChannel == null) throw new RuntimeException("You must set logDirFailureChannel"); @@ -120,6 +123,8 @@ public ReplicaManager build() { logDirFailureChannel, alterPartitionManager, brokerTopicStats, + new AtomicBoolean(false), + Option.empty(), Option.empty(), Option.empty(), Option.empty(), diff --git a/core/src/main/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandler.java b/core/src/main/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandler.java index d02fd9d3a7dae..bfadde3bfb081 100644 --- a/core/src/main/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandler.java +++ b/core/src/main/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandler.java @@ -20,6 +20,7 @@ import kafka.network.RequestChannel; import kafka.server.AuthHelper; import kafka.server.KafkaConfig; +import kafka.server.metadata.KRaftMetadataCache; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.InvalidRequestException; @@ -30,23 +31,25 @@ import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.DescribeTopicPartitionsRequest; import org.apache.kafka.common.resource.Resource; -import org.apache.kafka.metadata.MetadataCache; +import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; import java.util.stream.Stream; +import scala.jdk.javaapi.CollectionConverters; + import static org.apache.kafka.common.acl.AclOperation.DESCRIBE; import static org.apache.kafka.common.resource.ResourceType.TOPIC; public class DescribeTopicPartitionsRequestHandler { - MetadataCache metadataCache; + KRaftMetadataCache metadataCache; AuthHelper authHelper; KafkaConfig config; public DescribeTopicPartitionsRequestHandler( - MetadataCache metadataCache, + KRaftMetadataCache metadataCache, AuthHelper authHelper, KafkaConfig config ) { @@ -62,7 +65,7 @@ public DescribeTopicPartitionsResponseData handleDescribeTopicPartitionsRequest( DescribeTopicPartitionsRequestData.Cursor cursor = request.cursor(); String cursorTopicName = cursor != null ? cursor.topicName() : ""; if (fetchAllTopics) { - metadataCache.getAllTopics().forEach(topicName -> { + CollectionConverters.asJavaCollection(metadataCache.getAllTopics()).forEach(topicName -> { if (topicName.compareTo(cursorTopicName) >= 0) { topics.add(topicName); } @@ -95,14 +98,14 @@ public DescribeTopicPartitionsResponseData handleDescribeTopicPartitionsRequest( if (!fetchAllTopics && !isAuthorized) { // We should not return topicId when on unauthorized error, so we return zero uuid. unauthorizedForDescribeTopicMetadata.add(describeTopicPartitionsResponseTopic( - Errors.TOPIC_AUTHORIZATION_FAILED, topicName, Uuid.ZERO_UUID, false, List.of()) + Errors.TOPIC_AUTHORIZATION_FAILED, topicName, Uuid.ZERO_UUID, false, Collections.emptyList()) ); } return isAuthorized; }); - DescribeTopicPartitionsResponseData response = metadataCache.describeTopicResponse( - authorizedTopicsStream.iterator(), + DescribeTopicPartitionsResponseData response = metadataCache.getTopicMetadataForDescribeTopicResponse( + CollectionConverters.asScala(authorizedTopicsStream.iterator()), abstractRequest.context().listenerName, (String topicName) -> topicName.equals(cursorTopicName) ? cursor.partitionIndex() : 0, Math.max(Math.min(config.maxRequestPartitionSizeLimit(), request.responsePartitionLimit()), 1), diff --git a/core/src/main/java/kafka/server/logger/RuntimeLoggerManager.java b/core/src/main/java/kafka/server/logger/RuntimeLoggerManager.java index 3cb226e06867c..14d1b72c10942 100644 --- a/core/src/main/java/kafka/server/logger/RuntimeLoggerManager.java +++ b/core/src/main/java/kafka/server/logger/RuntimeLoggerManager.java @@ -17,6 +17,8 @@ package kafka.server.logger; +import kafka.utils.Log4jController; + import org.apache.kafka.clients.admin.AlterConfigOp.OpType; import org.apache.kafka.common.config.LogLevelConfig; import org.apache.kafka.common.errors.ClusterAuthorizationException; @@ -25,7 +27,6 @@ import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterConfigsResource; import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterableConfig; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.server.logger.LoggingController; import org.slf4j.Logger; @@ -75,14 +76,14 @@ void alterLogLevelConfigs(Collection ops) { String logLevel = op.value(); switch (OpType.forId(op.configOperation())) { case SET: - if (LoggingController.logLevel(loggerName, logLevel)) { + if (Log4jController.logLevel(loggerName, logLevel)) { log.warn("Updated the log level of {} to {}", loggerName, logLevel); } else { log.error("Failed to update the log level of {} to {}", loggerName, logLevel); } break; case DELETE: - if (LoggingController.unsetLogLevel(loggerName)) { + if (Log4jController.unsetLogLevel(loggerName)) { log.warn("Unset the log level of {}", loggerName); } else { log.error("Failed to unset the log level of {}", loggerName); @@ -105,12 +106,12 @@ void validateResourceNameIsNodeId(String resourceName) { } if (requestId != nodeId) { throw new InvalidRequestException("Unexpected node id. Expected " + nodeId + - ", but received " + requestId); + ", but received " + nodeId); } } void validateLoggerNameExists(String loggerName) { - if (!LoggingController.loggerExists(loggerName)) { + if (!Log4jController.loggerExists(loggerName)) { throw new InvalidConfigurationException("Logger " + loggerName + " does not exist!"); } } @@ -130,9 +131,9 @@ void validateLogLevelConfigs(Collection ops) { break; case DELETE: validateLoggerNameExists(loggerName); - if (loggerName.equals(LoggingController.ROOT_LOGGER)) { + if (loggerName.equals(Log4jController.ROOT_LOGGER())) { throw new InvalidRequestException("Removing the log level of the " + - LoggingController.ROOT_LOGGER + " logger is not allowed"); + Log4jController.ROOT_LOGGER() + " logger is not allowed"); } break; case APPEND: diff --git a/core/src/main/java/kafka/server/share/DelayedShareFetch.java b/core/src/main/java/kafka/server/share/DelayedShareFetch.java index 969029a6ea582..9bab9818c0706 100644 --- a/core/src/main/java/kafka/server/share/DelayedShareFetch.java +++ b/core/src/main/java/kafka/server/share/DelayedShareFetch.java @@ -17,57 +17,30 @@ package kafka.server.share; import kafka.cluster.Partition; +import kafka.server.LogReadResult; import kafka.server.QuotaFactory; import kafka.server.ReplicaManager; import org.apache.kafka.common.TopicIdPartition; -import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.errors.KafkaStorageException; -import org.apache.kafka.common.errors.NotLeaderOrFollowerException; -import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; -import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.requests.FetchRequest; -import org.apache.kafka.common.utils.Time; -import org.apache.kafka.raft.errors.NotLeaderException; -import org.apache.kafka.server.LogReadResult; -import org.apache.kafka.server.metrics.KafkaMetricsGroup; import org.apache.kafka.server.purgatory.DelayedOperation; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; -import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey; -import org.apache.kafka.server.share.fetch.PartitionMaxBytesStrategy; import org.apache.kafka.server.share.fetch.ShareFetch; -import org.apache.kafka.server.share.fetch.ShareFetchPartitionData; -import org.apache.kafka.server.share.metrics.ShareGroupMetrics; import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchPartitionData; -import org.apache.kafka.server.util.timer.TimerTask; -import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.apache.kafka.storage.internals.log.LogOffsetSnapshot; -import org.apache.kafka.storage.internals.log.RemoteLogReadResult; -import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; - -import com.yammer.metrics.core.Meter; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.ArrayList; +import java.util.Collections; import java.util.LinkedHashMap; -import java.util.LinkedHashSet; -import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.OptionalInt; -import java.util.OptionalLong; import java.util.Set; -import java.util.concurrent.CompletableFuture; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.Lock; import java.util.function.BiConsumer; import java.util.stream.Collectors; @@ -77,8 +50,6 @@ import scala.jdk.javaapi.CollectionConverters; import scala.runtime.BoxedUnit; -import static kafka.server.share.PendingRemoteFetches.RemoteFetch; - /** * A delayed share fetch operation has been introduced in case there is a share fetch request which cannot be completed instantaneously. */ @@ -86,278 +57,126 @@ public class DelayedShareFetch extends DelayedOperation { private static final Logger log = LoggerFactory.getLogger(DelayedShareFetch.class); - private static final String EXPIRES_PER_SEC = "ExpiresPerSec"; - private final ShareFetch shareFetch; private final ReplicaManager replicaManager; private final BiConsumer exceptionHandler; - private final PartitionMaxBytesStrategy partitionMaxBytesStrategy; - private final ShareGroupMetrics shareGroupMetrics; - private final Time time; // The topic partitions that need to be completed for the share fetch request are given by sharePartitions. // sharePartitions is a subset of shareFetchData. The order of insertion/deletion of entries in sharePartitions is important. private final LinkedHashMap sharePartitions; - /** - * Metric for the rate of expired delayed fetch requests. - */ - private final Meter expiredRequestMeter; - /** - * fetchId serves as a token while acquiring/releasing share partition's fetch lock. - */ - private final Uuid fetchId; - // Tracks the start time to acquire any share partition for a fetch request. - private long acquireStartTimeMs; - private LinkedHashMap partitionsAcquired; - private LinkedHashMap localPartitionsAlreadyFetched; - private Optional pendingRemoteFetchesOpt; - private Optional remoteStorageFetchException; - private final AtomicBoolean outsidePurgatoryCallbackLock; - private final long remoteFetchMaxWaitMs; + private LinkedHashMap partitionsAcquired; + private LinkedHashMap partitionsAlreadyFetched; - /** - * This function constructs an instance of delayed share fetch operation for completing share fetch - * requests instantaneously or with delay. - * - * @param shareFetch The share fetch parameters of the share fetch request. - * @param replicaManager The replica manager instance used to read from log/complete the request. - * @param exceptionHandler The handler to complete share fetch requests with exception. - * @param sharePartitions The share partitions referenced in the share fetch request. - * @param shareGroupMetrics The share group metrics to record the metrics. - * @param time The system time. - * @param remoteFetchMaxWaitMs The max wait time for a share fetch request having remote storage fetch. - */ - public DelayedShareFetch( + DelayedShareFetch( ShareFetch shareFetch, ReplicaManager replicaManager, BiConsumer exceptionHandler, - LinkedHashMap sharePartitions, - ShareGroupMetrics shareGroupMetrics, - Time time, - long remoteFetchMaxWaitMs - ) { - this(shareFetch, - replicaManager, - exceptionHandler, - sharePartitions, - PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM), - shareGroupMetrics, - time, - Optional.empty(), - Uuid.randomUuid(), - remoteFetchMaxWaitMs - ); - } - - /** - * This function constructs an instance of delayed share fetch operation for completing share fetch - * requests instantaneously or with delay. The direct usage of this constructor is only from tests. - * - * @param shareFetch The share fetch parameters of the share fetch request. - * @param replicaManager The replica manager instance used to read from log/complete the request. - * @param exceptionHandler The handler to complete share fetch requests with exception. - * @param sharePartitions The share partitions referenced in the share fetch request. - * @param partitionMaxBytesStrategy The strategy to identify the max bytes for topic partitions in the share fetch request. - * @param shareGroupMetrics The share group metrics to record the metrics. - * @param time The system time. - * @param pendingRemoteFetchesOpt Optional containing an in-flight remote fetch object or an empty optional. - * @param remoteFetchMaxWaitMs The max wait time for a share fetch request having remote storage fetch. - */ - DelayedShareFetch( - ShareFetch shareFetch, - ReplicaManager replicaManager, - BiConsumer exceptionHandler, - LinkedHashMap sharePartitions, - PartitionMaxBytesStrategy partitionMaxBytesStrategy, - ShareGroupMetrics shareGroupMetrics, - Time time, - Optional pendingRemoteFetchesOpt, - Uuid fetchId, - long remoteFetchMaxWaitMs - ) { - super(shareFetch.fetchParams().maxWaitMs); + LinkedHashMap sharePartitions) { + super(shareFetch.fetchParams().maxWaitMs, Optional.empty()); this.shareFetch = shareFetch; this.replicaManager = replicaManager; this.partitionsAcquired = new LinkedHashMap<>(); - this.localPartitionsAlreadyFetched = new LinkedHashMap<>(); + this.partitionsAlreadyFetched = new LinkedHashMap<>(); this.exceptionHandler = exceptionHandler; this.sharePartitions = sharePartitions; - this.partitionMaxBytesStrategy = partitionMaxBytesStrategy; - this.shareGroupMetrics = shareGroupMetrics; - this.time = time; - this.acquireStartTimeMs = time.hiResClockMs(); - this.pendingRemoteFetchesOpt = pendingRemoteFetchesOpt; - this.remoteStorageFetchException = Optional.empty(); - this.fetchId = fetchId; - this.outsidePurgatoryCallbackLock = new AtomicBoolean(false); - this.remoteFetchMaxWaitMs = remoteFetchMaxWaitMs; - // Register metrics for DelayedShareFetch. - KafkaMetricsGroup metricsGroup = new KafkaMetricsGroup("kafka.server", "DelayedShareFetchMetrics"); - this.expiredRequestMeter = metricsGroup.newMeter(EXPIRES_PER_SEC, "requests", TimeUnit.SECONDS); } @Override public void onExpiration() { - expiredRequestMeter.mark(); } /** * Complete the share fetch operation by fetching records for all partitions in the share fetch request irrespective * of whether they have any acquired records. This is called when the fetch operation is forced to complete either * because records can be acquired for some partitions or due to MaxWaitMs timeout. - *

          - * On operation timeout, onComplete is invoked, last try occurs to acquire partitions and read - * from log, if acquired. The fetch will only happen from local log and not remote storage, on - * operation expiration. */ @Override public void onComplete() { + // We are utilizing lock so that onComplete doesn't do a dirty read for instance variables - + // partitionsAcquired and partitionsAlreadyFetched, since these variables can get updated in a different tryComplete thread. + lock.lock(); log.trace("Completing the delayed share fetch request for group {}, member {}, " + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), partitionsAcquired.keySet()); - if (remoteStorageFetchException.isPresent()) { - completeErroneousRemoteShareFetchRequest(); - } else if (pendingRemoteFetchesOpt.isPresent()) { - if (maybeRegisterCallbackPendingRemoteFetch()) { - log.trace("Registered remote storage fetch callback for group {}, member {}, " - + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), - partitionsAcquired.keySet()); + try { + LinkedHashMap topicPartitionData; + // tryComplete did not invoke forceComplete, so we need to check if we have any partitions to fetch. + if (partitionsAcquired.isEmpty()) + topicPartitionData = acquirablePartitions(); + // tryComplete invoked forceComplete, so we can use the data from tryComplete. + else + topicPartitionData = partitionsAcquired; + + if (topicPartitionData.isEmpty()) { + // No locks for share partitions could be acquired, so we complete the request with an empty response. + shareFetch.maybeComplete(Collections.emptyMap()); return; } - completeRemoteStorageShareFetchRequest(); - } else { - completeLocalLogShareFetchRequest(); - } - } - - private void completeLocalLogShareFetchRequest() { - LinkedHashMap topicPartitionData; - // tryComplete did not invoke forceComplete, so we need to check if we have any partitions to fetch. - if (partitionsAcquired.isEmpty()) { - topicPartitionData = acquirablePartitions(sharePartitions); - // The TopicPartitionsAcquireTimeMs metric signifies the tension when acquiring the locks - // for the share partition, hence if no partitions are yet acquired by tryComplete, - // we record the metric here. Do not check if the request has successfully acquired any - // partitions now or not, as then the upper bound of request timeout shall be recorded - // for the metric. - updateAcquireElapsedTimeMetric(); - } else { - // tryComplete invoked forceComplete, so we can use the data from tryComplete. - topicPartitionData = partitionsAcquired; - } + log.trace("Fetchable share partitions data: {} with groupId: {} fetch params: {}", + topicPartitionData, shareFetch.groupId(), shareFetch.fetchParams()); - if (topicPartitionData.isEmpty()) { - // No locks for share partitions could be acquired, so we complete the request with an empty response. - shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), 0); - shareFetch.maybeComplete(Map.of()); - return; - } else { - // Update metric to record acquired to requested partitions. - double requestTopicToAcquired = (double) topicPartitionData.size() / shareFetch.topicIdPartitions().size(); - shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), (int) (requestTopicToAcquired * 100)); + completeShareFetchRequest(topicPartitionData); + } finally { + lock.unlock(); } - log.trace("Fetchable share partitions data: {} with groupId: {} fetch params: {}", - topicPartitionData, shareFetch.groupId(), shareFetch.fetchParams()); - - processAcquiredTopicPartitionsForLocalLogFetch(topicPartitionData); } - private void processAcquiredTopicPartitionsForLocalLogFetch(LinkedHashMap topicPartitionData) { + private void completeShareFetchRequest(LinkedHashMap topicPartitionData) { try { LinkedHashMap responseData; - if (localPartitionsAlreadyFetched.isEmpty()) - responseData = readFromLog( - topicPartitionData, - partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, topicPartitionData.keySet(), topicPartitionData.size())); + if (partitionsAlreadyFetched.isEmpty()) + responseData = readFromLog(topicPartitionData); else // There shouldn't be a case when we have a partitionsAlreadyFetched value here and this variable is getting // updated in a different tryComplete thread. - responseData = combineLogReadResponse(topicPartitionData, localPartitionsAlreadyFetched); - - resetFetchOffsetMetadataForRemoteFetchPartitions(topicPartitionData, responseData); - - List shareFetchPartitionDataList = new ArrayList<>(); - responseData.forEach((topicIdPartition, logReadResult) -> { - if (logReadResult.info().delayedRemoteStorageFetch.isEmpty()) { - shareFetchPartitionDataList.add(new ShareFetchPartitionData( - topicIdPartition, - topicPartitionData.get(topicIdPartition), - logReadResult.toFetchPartitionData(false) - )); - } - }); - - shareFetch.maybeComplete(ShareFetchUtils.processFetchResponse( - shareFetch, - shareFetchPartitionDataList, - sharePartitions, - replicaManager, - exceptionHandler - )); + responseData = combineLogReadResponse(topicPartitionData, partitionsAlreadyFetched); + + LinkedHashMap fetchPartitionsData = new LinkedHashMap<>(); + for (Map.Entry entry : responseData.entrySet()) + fetchPartitionsData.put(entry.getKey(), entry.getValue().toFetchPartitionData(false)); + + shareFetch.maybeComplete(ShareFetchUtils.processFetchResponse(shareFetch, fetchPartitionsData, + sharePartitions, replicaManager, exceptionHandler)); } catch (Exception e) { log.error("Error processing delayed share fetch request", e); handleFetchException(shareFetch, topicPartitionData.keySet(), e); } finally { - releasePartitionLocksAndAddToActionQueue(topicPartitionData.keySet()); + // Releasing the lock to move ahead with the next request in queue. + releasePartitionLocks(topicPartitionData.keySet()); + // If we have a fetch request completed for a topic-partition, we release the locks for that partition, + // then we should check if there is a pending share fetch request for the topic-partition and complete it. + // We add the action to delayed actions queue to avoid an infinite call stack, which could happen if + // we directly call delayedShareFetchPurgatory.checkAndComplete + replicaManager.addToActionQueue(() -> topicPartitionData.keySet().forEach(topicIdPartition -> + replicaManager.completeDelayedShareFetchRequest( + new DelayedShareFetchGroupKey(shareFetch.groupId(), topicIdPartition.topicId(), topicIdPartition.partition())))); } } - /** - * This function updates the cached fetch offset metadata to null corresponding to the share partition's fetch offset. - * This is required in the case when a topic partition that has local log fetch during tryComplete, but changes to remote - * storage fetch in onComplete. In this situation, if the cached fetchOffsetMetadata got updated in tryComplete, then - * we will enter a state where each share fetch request for this topic partition from client will use the cached - * fetchOffsetMetadata in tryComplete and return an empty response to the client from onComplete. - * Hence, we require to set offsetMetadata to null for this fetch offset, which would cause tryComplete to update - * fetchOffsetMetadata and thereby we will identify this partition for remote storage fetch. - * @param topicPartitionData - Map containing the fetch offset for the topic partitions. - * @param replicaManagerReadResponse - Map containing the readFromLog response from replicaManager for the topic partitions. - */ - private void resetFetchOffsetMetadataForRemoteFetchPartitions( - LinkedHashMap topicPartitionData, - LinkedHashMap replicaManagerReadResponse - ) { - replicaManagerReadResponse.forEach((topicIdPartition, logReadResult) -> { - if (logReadResult.info().delayedRemoteStorageFetch.isPresent()) { - SharePartition sharePartition = sharePartitions.get(topicIdPartition); - sharePartition.updateFetchOffsetMetadata( - topicPartitionData.get(topicIdPartition), - null - ); - } - }); - } - /** * Try to complete the fetch operation if we can acquire records for any partition in the share fetch request. */ @Override public boolean tryComplete() { - // Check to see if the remote fetch is in flight. If there is an in flight remote fetch we want to resolve it first. - if (pendingRemoteFetchesOpt.isPresent()) { - return maybeCompletePendingRemoteFetch(); - } + LinkedHashMap topicPartitionData = acquirablePartitions(); - LinkedHashMap topicPartitionData = acquirablePartitions(sharePartitions); try { if (!topicPartitionData.isEmpty()) { - // Update the metric to record the time taken to acquire the locks for the share partitions. - updateAcquireElapsedTimeMetric(); // In case, fetch offset metadata doesn't exist for one or more topic partitions, we do a // replicaManager.readFromLog to populate the offset metadata and update the fetch offset metadata for // those topic partitions. LinkedHashMap replicaManagerReadResponse = maybeReadFromLog(topicPartitionData); - // Store the remote fetch info for the topic partitions for which we need to perform remote fetch. - LinkedHashMap remoteStorageFetchInfoMap = maybePrepareRemoteStorageFetchInfo(topicPartitionData, replicaManagerReadResponse); - - if (!remoteStorageFetchInfoMap.isEmpty()) { - return maybeProcessRemoteFetch(topicPartitionData, remoteStorageFetchInfoMap); - } maybeUpdateFetchOffsetMetadata(topicPartitionData, replicaManagerReadResponse); - if (anyPartitionHasLogReadError(replicaManagerReadResponse) || isMinBytesSatisfied(topicPartitionData, partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, topicPartitionData.keySet(), topicPartitionData.size()))) { + if (anyPartitionHasLogReadError(replicaManagerReadResponse) || isMinBytesSatisfied(topicPartitionData)) { partitionsAcquired = topicPartitionData; - localPartitionsAlreadyFetched = replicaManagerReadResponse; - return forceComplete(); + partitionsAlreadyFetched = replicaManagerReadResponse; + boolean completedByMe = forceComplete(); + // If invocation of forceComplete is not successful, then that means the request is already completed + // hence release the acquired locks. + if (!completedByMe) { + releasePartitionLocks(partitionsAcquired.keySet()); + } + return completedByMe; } else { log.debug("minBytes is not satisfied for the share fetch request for group {}, member {}, " + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), @@ -365,30 +184,16 @@ public boolean tryComplete() { releasePartitionLocks(topicPartitionData.keySet()); } } else { - log.trace("Can't acquire any partitions in the share fetch request for group {}, member {}, " + + log.trace("Can't acquire records for any partition in the share fetch request for group {}, member {}, " + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), sharePartitions.keySet()); } - // At this point, there could be delayed requests sitting in the purgatory which are waiting on - // DelayedShareFetchPartitionKeys corresponding to partitions, whose leader has been changed to a different broker. - // In that case, such partitions would not be able to get acquired, and the tryComplete will keep on returning false. - // Eventually the operation will get timed out and completed, but it might not get removed from the purgatory. - // This has been eventually left it like this because the purging mechanism will trigger whenever the number of completed - // but still being watched operations is larger than the purge interval. This purge interval is defined by the config - // share.fetch.purgatory.purge.interval.requests and is 1000 by default, thereby ensuring that such stale operations do not - // grow indefinitely. return false; } catch (Exception e) { log.error("Error processing delayed share fetch request", e); - // In case we have a remote fetch exception, we have already released locks for partitions which have potential - // local log read. We do not release locks for partitions which have a remote storage read because we need to - // complete the share fetch request in onComplete and if we release the locks early here, some other DelayedShareFetch - // request might get the locks for those partitions without this one getting complete. - if (remoteStorageFetchException.isEmpty()) { - releasePartitionLocks(topicPartitionData.keySet()); - partitionsAcquired.clear(); - localPartitionsAlreadyFetched.clear(); - } + partitionsAcquired.clear(); + partitionsAlreadyFetched.clear(); + releasePartitionLocks(topicPartitionData.keySet()); return forceComplete(); } } @@ -397,59 +202,61 @@ public boolean tryComplete() { * Prepare fetch request structure for partitions in the share fetch request for which we can acquire records. */ // Visible for testing - LinkedHashMap acquirablePartitions( - LinkedHashMap sharePartitionsForAcquire - ) { + LinkedHashMap acquirablePartitions() { // Initialize the topic partitions for which the fetch should be attempted. - LinkedHashMap topicPartitionData = new LinkedHashMap<>(); + LinkedHashMap topicPartitionData = new LinkedHashMap<>(); - sharePartitionsForAcquire.forEach((topicIdPartition, sharePartition) -> { + sharePartitions.forEach((topicIdPartition, sharePartition) -> { + int partitionMaxBytes = shareFetch.partitionMaxBytes().getOrDefault(topicIdPartition, 0); // Add the share partition to the list of partitions to be fetched only if we can // acquire the fetch lock on it. - if (sharePartition.maybeAcquireFetchLock(fetchId)) { + if (sharePartition.maybeAcquireFetchLock()) { try { - log.trace("Fetch lock for share partition {}-{} has been acquired by {}", shareFetch.groupId(), topicIdPartition, fetchId); // If the share partition is already at capacity, we should not attempt to fetch. if (sharePartition.canAcquireRecords()) { - topicPartitionData.put(topicIdPartition, sharePartition.nextFetchOffset()); + topicPartitionData.put( + topicIdPartition, + new FetchRequest.PartitionData( + topicIdPartition.topicId(), + sharePartition.nextFetchOffset(), + 0, + partitionMaxBytes, + Optional.empty() + ) + ); } else { - sharePartition.releaseFetchLock(fetchId); - log.trace("Record lock partition limit exceeded for SharePartition {}-{}, " + - "cannot acquire more records. Releasing the fetch lock by {}", shareFetch.groupId(), topicIdPartition, fetchId); + sharePartition.releaseFetchLock(); + log.trace("Record lock partition limit exceeded for SharePartition {}, " + + "cannot acquire more records", sharePartition); } } catch (Exception e) { - log.error("Error checking condition for SharePartition: {}-{}", shareFetch.groupId(), topicIdPartition, e); + log.error("Error checking condition for SharePartition: {}", sharePartition, e); // Release the lock, if error occurred. - sharePartition.releaseFetchLock(fetchId); - log.trace("Fetch lock for share partition {}-{} is being released by {}", shareFetch.groupId(), topicIdPartition, fetchId); + sharePartition.releaseFetchLock(); } } }); return topicPartitionData; } - private LinkedHashMap maybeReadFromLog(LinkedHashMap topicPartitionData) { - LinkedHashMap partitionsNotMatchingFetchOffsetMetadata = new LinkedHashMap<>(); - topicPartitionData.forEach((topicIdPartition, fetchOffset) -> { + private LinkedHashMap maybeReadFromLog(LinkedHashMap topicPartitionData) { + LinkedHashMap partitionsNotMatchingFetchOffsetMetadata = new LinkedHashMap<>(); + topicPartitionData.forEach((topicIdPartition, partitionData) -> { SharePartition sharePartition = sharePartitions.get(topicIdPartition); - if (sharePartition.fetchOffsetMetadata(fetchOffset).isEmpty()) { - partitionsNotMatchingFetchOffsetMetadata.put(topicIdPartition, fetchOffset); + if (sharePartition.fetchOffsetMetadata(partitionData.fetchOffset).isEmpty()) { + partitionsNotMatchingFetchOffsetMetadata.put(topicIdPartition, partitionData); } }); if (partitionsNotMatchingFetchOffsetMetadata.isEmpty()) { return new LinkedHashMap<>(); } // We fetch data from replica manager corresponding to the topic partitions that have missing fetch offset metadata. - // Although we are fetching partition max bytes for partitionsNotMatchingFetchOffsetMetadata, - // we will take acquired partitions size = topicPartitionData.size() because we do not want to let the - // leftover partitions to starve which will be fetched later. - return readFromLog( - partitionsNotMatchingFetchOffsetMetadata, - partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, partitionsNotMatchingFetchOffsetMetadata.keySet(), topicPartitionData.size())); + return readFromLog(partitionsNotMatchingFetchOffsetMetadata); } - private void maybeUpdateFetchOffsetMetadata(LinkedHashMap topicPartitionData, - LinkedHashMap replicaManagerReadResponseData) { + private void maybeUpdateFetchOffsetMetadata( + LinkedHashMap topicPartitionData, + LinkedHashMap replicaManagerReadResponseData) { for (Map.Entry entry : replicaManagerReadResponseData.entrySet()) { TopicIdPartition topicIdPartition = entry.getKey(); SharePartition sharePartition = sharePartitions.get(topicIdPartition); @@ -460,18 +267,17 @@ private void maybeUpdateFetchOffsetMetadata(LinkedHashMap topicPartitionData, - LinkedHashMap partitionMaxBytes) { + private boolean isMinBytesSatisfied(LinkedHashMap topicPartitionData) { long accumulatedSize = 0; - for (Map.Entry entry : topicPartitionData.entrySet()) { + for (Map.Entry entry : topicPartitionData.entrySet()) { TopicIdPartition topicIdPartition = entry.getKey(); - long fetchOffset = entry.getValue(); + FetchRequest.PartitionData partitionData = entry.getValue(); LogOffsetMetadata endOffsetMetadata; try { @@ -488,7 +294,7 @@ private boolean isMinBytesSatisfied(LinkedHashMap topicP SharePartition sharePartition = sharePartitions.get(topicIdPartition); - Optional optionalFetchOffsetMetadata = sharePartition.fetchOffsetMetadata(fetchOffset); + Optional optionalFetchOffsetMetadata = sharePartition.fetchOffsetMetadata(partitionData.fetchOffset); if (optionalFetchOffsetMetadata.isEmpty() || optionalFetchOffsetMetadata.get() == LogOffsetMetadata.UNKNOWN_OFFSET_METADATA) continue; LogOffsetMetadata fetchOffsetMetadata = optionalFetchOffsetMetadata.get(); @@ -506,7 +312,7 @@ private boolean isMinBytesSatisfied(LinkedHashMap topicP return true; } else if (fetchOffsetMetadata.onSameSegment(endOffsetMetadata)) { // we take the partition fetch size as upper bound when accumulating the bytes. - long bytesAvailable = Math.min(endOffsetMetadata.positionDiff(fetchOffsetMetadata), partitionMaxBytes.get(topicIdPartition)); + long bytesAvailable = Math.min(endOffsetMetadata.positionDiff(fetchOffsetMetadata), partitionData.maxBytes); accumulatedSize += bytesAvailable; } } @@ -521,33 +327,21 @@ private LogOffsetMetadata endOffsetMetadataForTopicPartition(TopicIdPartition to // extend it to support other FetchIsolation types. FetchIsolation isolationType = shareFetch.fetchParams().isolation; if (isolationType == FetchIsolation.LOG_END) - return offsetSnapshot.logEndOffset(); + return offsetSnapshot.logEndOffset; else if (isolationType == FetchIsolation.HIGH_WATERMARK) - return offsetSnapshot.highWatermark(); + return offsetSnapshot.highWatermark; else - return offsetSnapshot.lastStableOffset(); + return offsetSnapshot.lastStableOffset; } - private LinkedHashMap readFromLog(LinkedHashMap topicPartitionFetchOffsets, - LinkedHashMap partitionMaxBytes) { + private LinkedHashMap readFromLog(LinkedHashMap topicPartitionData) { // Filter if there already exists any erroneous topic partition. - Set partitionsToFetch = shareFetch.filterErroneousTopicPartitions(topicPartitionFetchOffsets.keySet()); + Set partitionsToFetch = shareFetch.filterErroneousTopicPartitions(topicPartitionData.keySet()); if (partitionsToFetch.isEmpty()) { return new LinkedHashMap<>(); } - LinkedHashMap topicPartitionData = new LinkedHashMap<>(); - - topicPartitionFetchOffsets.forEach((topicIdPartition, fetchOffset) -> topicPartitionData.put(topicIdPartition, - new FetchRequest.PartitionData( - topicIdPartition.topicId(), - fetchOffset, - 0, - partitionMaxBytes.get(topicIdPartition), - Optional.empty()) - )); - Seq> responseLogResult = replicaManager.readFromLog( shareFetch.fetchParams(), CollectionConverters.asScala( @@ -595,36 +389,19 @@ private void handleFetchException( shareFetch.maybeCompleteWithException(topicIdPartitions, throwable); } - /** - * The method updates the metric for the time taken to acquire the share partition locks. Also, - * it resets the acquireStartTimeMs to the current time, so that the metric records the time taken - * to acquire the locks for the re-try, if the partitions are re-acquired. The partitions can be - * re-acquired if the fetch request is not completed because of the minBytes or some other condition. - */ - private void updateAcquireElapsedTimeMetric() { - long currentTimeMs = time.hiResClockMs(); - shareGroupMetrics.recordTopicPartitionsAcquireTimeMs(shareFetch.groupId(), currentTimeMs - acquireStartTimeMs); - // Reset the acquireStartTimeMs to the current time. If the fetch request is not completed - // and the partitions are re-acquired then metric should record value from the last acquire time. - acquireStartTimeMs = currentTimeMs; - } - // Visible for testing. - LinkedHashMap combineLogReadResponse(LinkedHashMap topicPartitionData, - LinkedHashMap existingFetchedData) { - LinkedHashMap missingLogReadTopicPartitions = new LinkedHashMap<>(); - topicPartitionData.forEach((topicIdPartition, fetchOffset) -> { + LinkedHashMap combineLogReadResponse(LinkedHashMap topicPartitionData, + LinkedHashMap existingFetchedData) { + LinkedHashMap missingLogReadTopicPartitions = new LinkedHashMap<>(); + topicPartitionData.forEach((topicIdPartition, partitionData) -> { if (!existingFetchedData.containsKey(topicIdPartition)) { - missingLogReadTopicPartitions.put(topicIdPartition, fetchOffset); + missingLogReadTopicPartitions.put(topicIdPartition, partitionData); } }); if (missingLogReadTopicPartitions.isEmpty()) { return existingFetchedData; } - - LinkedHashMap missingTopicPartitionsLogReadResponse = readFromLog( - missingLogReadTopicPartitions, - partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes, missingLogReadTopicPartitions.keySet(), topicPartitionData.size())); + LinkedHashMap missingTopicPartitionsLogReadResponse = readFromLog(missingLogReadTopicPartitions); missingTopicPartitionsLogReadResponse.putAll(existingFetchedData); return missingTopicPartitionsLogReadResponse; } @@ -633,8 +410,7 @@ LinkedHashMap combineLogReadResponse(LinkedHash void releasePartitionLocks(Set topicIdPartitions) { topicIdPartitions.forEach(tp -> { SharePartition sharePartition = sharePartitions.get(tp); - sharePartition.releaseFetchLock(fetchId); - log.trace("Fetch lock for share partition {}-{} is being released by {}", shareFetch.groupId(), tp, fetchId); + sharePartition.releaseFetchLock(); }); } @@ -642,348 +418,4 @@ void releasePartitionLocks(Set topicIdPartitions) { Lock lock() { return lock; } - - // Visible for testing. - PendingRemoteFetches pendingRemoteFetches() { - return pendingRemoteFetchesOpt.orElse(null); - } - - // Visible for testing. - boolean outsidePurgatoryCallbackLock() { - return outsidePurgatoryCallbackLock.get(); - } - - // Only used for testing purpose. - void updatePartitionsAcquired(LinkedHashMap partitionsAcquired) { - this.partitionsAcquired = partitionsAcquired; - } - - // Visible for testing. - Meter expiredRequestMeter() { - return expiredRequestMeter; - } - - private LinkedHashMap maybePrepareRemoteStorageFetchInfo( - LinkedHashMap topicPartitionData, - LinkedHashMap replicaManagerReadResponse - ) { - LinkedHashMap remoteStorageFetchInfoMap = new LinkedHashMap<>(); - for (Map.Entry entry : replicaManagerReadResponse.entrySet()) { - TopicIdPartition topicIdPartition = entry.getKey(); - LogReadResult logReadResult = entry.getValue(); - if (logReadResult.info().delayedRemoteStorageFetch.isPresent()) { - remoteStorageFetchInfoMap.put(topicIdPartition, logReadResult); - partitionsAcquired.put(topicIdPartition, topicPartitionData.get(topicIdPartition)); - } - } - return remoteStorageFetchInfoMap; - } - - private boolean maybeProcessRemoteFetch( - LinkedHashMap topicPartitionData, - LinkedHashMap remoteStorageFetchInfoMap - ) { - Set nonRemoteFetchTopicPartitions = new LinkedHashSet<>(); - topicPartitionData.keySet().forEach(topicIdPartition -> { - // non-remote storage fetch topic partitions for which fetch would not be happening in this share fetch request. - if (!remoteStorageFetchInfoMap.containsKey(topicIdPartition)) { - nonRemoteFetchTopicPartitions.add(topicIdPartition); - } - }); - // Release fetch lock for the topic partitions that were acquired but were not a part of remote fetch and add - // them to the delayed actions queue. - releasePartitionLocksAndAddToActionQueue(nonRemoteFetchTopicPartitions); - processRemoteFetchOrException(remoteStorageFetchInfoMap); - // Check if remote fetch can be completed. - return maybeCompletePendingRemoteFetch(); - } - - private boolean maybeRegisterCallbackPendingRemoteFetch() { - log.trace("Registering callback pending remote fetch"); - PendingRemoteFetches pendingFetch = pendingRemoteFetchesOpt.get(); - if (!pendingFetch.isDone() && shareFetch.fetchParams().maxWaitMs < remoteFetchMaxWaitMs) { - TimerTask timerTask = new PendingRemoteFetchTimerTask(); - pendingFetch.invokeCallbackOnCompletion(((ignored, throwable) -> { - timerTask.cancel(); - log.trace("Invoked remote storage fetch callback for group {}, member {}, " - + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), - partitionsAcquired.keySet()); - if (throwable != null) { - log.error("Remote storage fetch failed for group {}, member {}, topic partitions {}", - shareFetch.groupId(), shareFetch.memberId(), sharePartitions.keySet(), throwable); - } - completeRemoteShareFetchRequestOutsidePurgatory(); - })); - replicaManager.addShareFetchTimerRequest(timerTask); - return true; - } - return false; - } - - /** - * Throws an exception if a task for remote storage fetch could not be scheduled successfully else updates pendingRemoteFetchesOpt. - * @param remoteStorageFetchInfoMap - The remote storage fetch information. - */ - private void processRemoteFetchOrException( - LinkedHashMap remoteStorageFetchInfoMap - ) { - LinkedHashMap fetchOffsetMetadataMap = new LinkedHashMap<>(); - remoteStorageFetchInfoMap.forEach((topicIdPartition, logReadResult) -> fetchOffsetMetadataMap.put( - topicIdPartition, - logReadResult.info().fetchOffsetMetadata - )); - - List remoteFetches = new ArrayList<>(); - for (Map.Entry entry : remoteStorageFetchInfoMap.entrySet()) { - TopicIdPartition remoteFetchTopicIdPartition = entry.getKey(); - RemoteStorageFetchInfo remoteStorageFetchInfo = entry.getValue().info().delayedRemoteStorageFetch.get(); - - Future remoteFetchTask; - CompletableFuture remoteFetchResult = new CompletableFuture<>(); - try { - remoteFetchTask = replicaManager.remoteLogManager().get().asyncRead( - remoteStorageFetchInfo, - result -> { - remoteFetchResult.complete(result); - replicaManager.completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(shareFetch.groupId(), remoteFetchTopicIdPartition.topicId(), remoteFetchTopicIdPartition.partition())); - } - ); - } catch (Exception e) { - // Cancel the already created remote fetch tasks in case an exception occurs. - remoteFetches.forEach(this::cancelRemoteFetchTask); - // Throw the error if any in scheduling the remote fetch task. - remoteStorageFetchException = Optional.of(e); - throw e; - } - remoteFetches.add(new RemoteFetch(remoteFetchTopicIdPartition, entry.getValue(), remoteFetchTask, remoteFetchResult, remoteStorageFetchInfo)); - } - pendingRemoteFetchesOpt = Optional.of(new PendingRemoteFetches(remoteFetches, fetchOffsetMetadataMap)); - } - - /** - * This function checks if the remote fetch can be completed or not. It should always be called once you confirm pendingRemoteFetchesOpt.isPresent(). - * The operation can be completed if: - * Case a: The partition is in an offline log directory on this broker - * Case b: This broker does not know the partition it tries to fetch - * Case c: This broker is no longer the leader of the partition it tries to fetch - * Case d: This broker is no longer the leader or follower of the partition it tries to fetch - * Case e: All remote storage read requests completed - * @return boolean representing whether the remote fetch is completed or not. - */ - private boolean maybeCompletePendingRemoteFetch() { - boolean canComplete = false; - - for (TopicIdPartition topicIdPartition : pendingRemoteFetchesOpt.get().fetchOffsetMetadataMap().keySet()) { - try { - Partition partition = replicaManager.getPartitionOrException(topicIdPartition.topicPartition()); - if (!partition.isLeader()) { - throw new NotLeaderException("Broker is no longer the leader of topicPartition: " + topicIdPartition); - } - } catch (KafkaStorageException e) { // Case a - log.debug("TopicPartition {} is in an offline log directory, satisfy {} immediately", topicIdPartition, shareFetch.fetchParams()); - canComplete = true; - } catch (UnknownTopicOrPartitionException e) { // Case b - log.debug("Broker no longer knows of topicPartition {}, satisfy {} immediately", topicIdPartition, shareFetch.fetchParams()); - canComplete = true; - } catch (NotLeaderException e) { // Case c - log.debug("Broker is no longer the leader of topicPartition {}, satisfy {} immediately", topicIdPartition, shareFetch.fetchParams()); - canComplete = true; - } catch (NotLeaderOrFollowerException e) { // Case d - log.debug("Broker is no longer the leader or follower of topicPartition {}, satisfy {} immediately", topicIdPartition, shareFetch.fetchParams()); - canComplete = true; - } - if (canComplete) - break; - } - - if (canComplete || pendingRemoteFetchesOpt.get().isDone()) { // Case e - return forceComplete(); - } else - return false; - } - - /** - * This function completes a share fetch request for which we have identified erroneous remote storage fetch in tryComplete() - * It should only be called when we know that there is remote fetch in-flight/completed. - */ - private void completeErroneousRemoteShareFetchRequest() { - try { - handleFetchException(shareFetch, partitionsAcquired.keySet(), remoteStorageFetchException.get()); - } finally { - releasePartitionLocksAndAddToActionQueue(partitionsAcquired.keySet()); - } - - } - - private void releasePartitionLocksAndAddToActionQueue(Set topicIdPartitions) { - if (topicIdPartitions.isEmpty()) { - return; - } - // Releasing the lock to move ahead with the next request in queue. - releasePartitionLocks(topicIdPartitions); - replicaManager.addToActionQueue(() -> topicIdPartitions.forEach(topicIdPartition -> { - // If we have a fetch request completed for a share-partition, we release the locks for that partition, - // then we should check if there is a pending share fetch request for the share-partition and complete it. - // We add the action to delayed actions queue to avoid an infinite call stack, which could happen if - // we directly call delayedShareFetchPurgatory.checkAndComplete. - replicaManager.completeDelayedShareFetchRequest( - new DelayedShareFetchGroupKey(shareFetch.groupId(), topicIdPartition.topicId(), topicIdPartition.partition())); - // As DelayedShareFetch operation is watched over multiple keys, same operation might be - // completed and can contain references to data fetched. Hence, if the operation is not - // removed from other watched keys then there can be a memory leak. The removal of the - // operation is dependent on the purge task by DelayedOperationPurgatory. Hence, this can - // also be prevented by setting smaller value for configuration {@link ShareGroupConfig#SHARE_FETCH_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG}. - // However, it's best to trigger the check on all the keys that are being watched which - // should free the memory for the completed operation. - replicaManager.completeDelayedShareFetchRequest(new DelayedShareFetchPartitionKey(topicIdPartition)); - })); - } - - /** - * This function completes a share fetch request for which we have identified remoteFetch during tryComplete() - * Note - This function should only be called when we know that there is remote fetch. - */ - private void completeRemoteStorageShareFetchRequest() { - LinkedHashMap acquiredNonRemoteFetchTopicPartitionData = new LinkedHashMap<>(); - try { - List shareFetchPartitionData = new ArrayList<>(); - int readableBytes = 0; - for (RemoteFetch remoteFetch : pendingRemoteFetchesOpt.get().remoteFetches()) { - if (remoteFetch.remoteFetchResult().isDone()) { - RemoteLogReadResult remoteLogReadResult = remoteFetch.remoteFetchResult().get(); - if (remoteLogReadResult.error().isPresent()) { - Throwable error = remoteLogReadResult.error().get(); - // If there is any error for the remote fetch topic partition, we populate the error accordingly. - shareFetchPartitionData.add( - new ShareFetchPartitionData( - remoteFetch.topicIdPartition(), - partitionsAcquired.get(remoteFetch.topicIdPartition()), - ReplicaManager.createLogReadResult(error).toFetchPartitionData(false) - ) - ); - } else { - FetchDataInfo info = remoteLogReadResult.fetchDataInfo().get(); - TopicIdPartition topicIdPartition = remoteFetch.topicIdPartition(); - LogReadResult logReadResult = remoteFetch.logReadResult(); - shareFetchPartitionData.add( - new ShareFetchPartitionData( - topicIdPartition, - partitionsAcquired.get(remoteFetch.topicIdPartition()), - new FetchPartitionData( - logReadResult.error(), - logReadResult.highWatermark(), - logReadResult.leaderLogStartOffset(), - info.records, - Optional.empty(), - logReadResult.lastStableOffset().isPresent() ? OptionalLong.of(logReadResult.lastStableOffset().getAsLong()) : OptionalLong.empty(), - info.abortedTransactions, - logReadResult.preferredReadReplica().isPresent() ? OptionalInt.of(logReadResult.preferredReadReplica().getAsInt()) : OptionalInt.empty(), - false - ) - ) - ); - readableBytes += info.records.sizeInBytes(); - } - } else { - cancelRemoteFetchTask(remoteFetch); - } - } - - // If remote fetch bytes < shareFetch.fetchParams().maxBytes, then we will try for a local read. - if (readableBytes < shareFetch.fetchParams().maxBytes) { - // Get the local log read based topic partitions. - LinkedHashMap nonRemoteFetchSharePartitions = new LinkedHashMap<>(); - sharePartitions.forEach((topicIdPartition, sharePartition) -> { - if (!partitionsAcquired.containsKey(topicIdPartition)) { - nonRemoteFetchSharePartitions.put(topicIdPartition, sharePartition); - } - }); - acquiredNonRemoteFetchTopicPartitionData = acquirablePartitions(nonRemoteFetchSharePartitions); - if (!acquiredNonRemoteFetchTopicPartitionData.isEmpty()) { - log.trace("Fetchable local share partitions for a remote share fetch request data: {} with groupId: {} fetch params: {}", - acquiredNonRemoteFetchTopicPartitionData, shareFetch.groupId(), shareFetch.fetchParams()); - - LinkedHashMap responseData = readFromLog( - acquiredNonRemoteFetchTopicPartitionData, - partitionMaxBytesStrategy.maxBytes(shareFetch.fetchParams().maxBytes - readableBytes, acquiredNonRemoteFetchTopicPartitionData.keySet(), acquiredNonRemoteFetchTopicPartitionData.size())); - resetFetchOffsetMetadataForRemoteFetchPartitions(acquiredNonRemoteFetchTopicPartitionData, responseData); - for (Map.Entry entry : responseData.entrySet()) { - if (entry.getValue().info().delayedRemoteStorageFetch.isEmpty()) { - shareFetchPartitionData.add( - new ShareFetchPartitionData( - entry.getKey(), - acquiredNonRemoteFetchTopicPartitionData.get(entry.getKey()), - entry.getValue().toFetchPartitionData(false) - ) - ); - } - } - } - } - - // Update metric to record acquired to requested partitions. - double acquiredRatio = (double) (partitionsAcquired.size() + acquiredNonRemoteFetchTopicPartitionData.size()) / shareFetch.topicIdPartitions().size(); - if (acquiredRatio > 0) - shareGroupMetrics.recordTopicPartitionsFetchRatio(shareFetch.groupId(), (int) (acquiredRatio * 100)); - - Map remoteFetchResponse = ShareFetchUtils.processFetchResponse( - shareFetch, shareFetchPartitionData, sharePartitions, replicaManager, exceptionHandler); - shareFetch.maybeComplete(remoteFetchResponse); - log.trace("Remote share fetch request completed successfully, response: {}", remoteFetchResponse); - } catch (InterruptedException | ExecutionException e) { - log.error("Exception occurred in completing remote fetch {} for delayed share fetch request {}", pendingRemoteFetchesOpt.get(), e); - handleExceptionInCompletingRemoteStorageShareFetchRequest(acquiredNonRemoteFetchTopicPartitionData.keySet(), e); - } catch (Exception e) { - log.error("Unexpected error in processing delayed share fetch request", e); - handleExceptionInCompletingRemoteStorageShareFetchRequest(acquiredNonRemoteFetchTopicPartitionData.keySet(), e); - } finally { - Set topicIdPartitions = new LinkedHashSet<>(partitionsAcquired.keySet()); - topicIdPartitions.addAll(acquiredNonRemoteFetchTopicPartitionData.keySet()); - releasePartitionLocksAndAddToActionQueue(topicIdPartitions); - } - } - - private void handleExceptionInCompletingRemoteStorageShareFetchRequest( - Set acquiredNonRemoteFetchTopicPartitions, - Exception e - ) { - Set topicIdPartitions = new LinkedHashSet<>(partitionsAcquired.keySet()); - topicIdPartitions.addAll(acquiredNonRemoteFetchTopicPartitions); - handleFetchException(shareFetch, topicIdPartitions, e); - } - - /** - * Cancel the remote storage read task, if it has not been executed yet and avoid interrupting the task if it is - * already running as it may force closing opened/cached resources as transaction index. - * Note - This function should only be called when we know that there is remote fetch. - */ - private void cancelRemoteFetchTask(RemoteFetch remoteFetch) { - boolean cancelled = remoteFetch.remoteFetchTask().cancel(false); - if (!cancelled) { - log.debug("Remote fetch task for RemoteStorageFetchInfo: {} could not be cancelled and its isDone value is {}", - remoteFetch.remoteFetchInfo(), remoteFetch.remoteFetchTask().isDone()); - } - } - - private void completeRemoteShareFetchRequestOutsidePurgatory() { - if (outsidePurgatoryCallbackLock.compareAndSet(false, true)) { - completeRemoteStorageShareFetchRequest(); - } - } - - private class PendingRemoteFetchTimerTask extends TimerTask { - - public PendingRemoteFetchTimerTask() { - super(remoteFetchMaxWaitMs - shareFetch.fetchParams().maxWaitMs); - } - - @Override - public void run() { - log.trace("Expired remote storage fetch callback for group {}, member {}, " - + "topic partitions {}", shareFetch.groupId(), shareFetch.memberId(), - partitionsAcquired.keySet()); - expiredRequestMeter.mark(); - completeRemoteShareFetchRequestOutsidePurgatory(); - } - } } diff --git a/core/src/main/java/kafka/server/share/ShareFetchUtils.java b/core/src/main/java/kafka/server/share/ShareFetchUtils.java index ba9e5368bcfd8..88f604a46f3c3 100644 --- a/core/src/main/java/kafka/server/share/ShareFetchUtils.java +++ b/core/src/main/java/kafka/server/share/ShareFetchUtils.java @@ -25,27 +25,20 @@ import org.apache.kafka.common.errors.NotLeaderOrFollowerException; import org.apache.kafka.common.errors.OffsetNotAvailableException; import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.FileRecords; -import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.record.Records; import org.apache.kafka.common.requests.ListOffsetsRequest; -import org.apache.kafka.coordinator.group.GroupConfigManager; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; import org.apache.kafka.server.share.fetch.ShareFetch; -import org.apache.kafka.server.share.fetch.ShareFetchPartitionData; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.LinkedHashMap; -import java.util.List; import java.util.Map; import java.util.Optional; import java.util.function.BiConsumer; @@ -65,18 +58,17 @@ public class ShareFetchUtils { */ static Map processFetchResponse( ShareFetch shareFetch, - List shareFetchPartitionDataList, + Map responseData, LinkedHashMap sharePartitions, ReplicaManager replicaManager, - BiConsumer exceptionHandler - ) { + BiConsumer exceptionHandler) { Map response = new HashMap<>(); // Acquired records count for the share fetch request. int acquiredRecordsCount = 0; - for (ShareFetchPartitionData shareFetchPartitionData : shareFetchPartitionDataList) { - TopicIdPartition topicIdPartition = shareFetchPartitionData.topicIdPartition(); - FetchPartitionData fetchPartitionData = shareFetchPartitionData.fetchPartitionData(); + for (Map.Entry entry : responseData.entrySet()) { + TopicIdPartition topicIdPartition = entry.getKey(); + FetchPartitionData fetchPartitionData = entry.getValue(); SharePartition sharePartition = sharePartitions.get(topicIdPartition); ShareFetchResponseData.PartitionData partitionData = new ShareFetchResponseData.PartitionData() @@ -84,10 +76,10 @@ static Map processFetchR if (fetchPartitionData.error.code() != Errors.NONE.code()) { partitionData - .setRecords(MemoryRecords.EMPTY) + .setRecords(null) .setErrorCode(fetchPartitionData.error.code()) .setErrorMessage(fetchPartitionData.error.message()) - .setAcquiredRecords(List.of()); + .setAcquiredRecords(Collections.emptyList()); // In case we get OFFSET_OUT_OF_RANGE error, that's because the Log Start Offset is later than the fetch offset. // So, we would update the start and end offset of the share partition and still return an empty @@ -110,25 +102,24 @@ static Map processFetchR partitionData.setErrorMessage(Errors.NONE.message()); } } else { - ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire( - shareFetch.memberId(), - shareFetch.batchSize(), - shareFetch.maxFetchRecords() - acquiredRecordsCount, - shareFetchPartitionData.fetchOffset(), - fetchPartitionData, - shareFetch.fetchParams().isolation - ); + ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire(shareFetch.memberId(), shareFetch.maxFetchRecords() - acquiredRecordsCount, fetchPartitionData); log.trace("Acquired records: {} for topicIdPartition: {}", shareAcquiredRecords, topicIdPartition); // Maybe, in the future, check if no records are acquired, and we want to retry // replica manager fetch. Depends on the share partition manager implementation, // if we want parallel requests for the same share partition or not. if (shareAcquiredRecords.acquiredRecords().isEmpty()) { partitionData - .setRecords(MemoryRecords.EMPTY) - .setAcquiredRecords(List.of()); + .setRecords(null) + .setAcquiredRecords(Collections.emptyList()); } else { partitionData - .setRecords(maybeSliceFetchRecords(fetchPartitionData.records, shareAcquiredRecords)) + // We set the records to the fetchPartitionData records. We do not alter the records + // fetched from the replica manager as they follow zero copy buffer. The acquired records + // might be a subset of the records fetched from the replica manager, depending + // on the max fetch records or available records in the share partition. The client + // sends the max bytes in request which should limit the bytes sent to the client + // in the response. + .setRecords(fetchPartitionData.records) .setAcquiredRecords(shareAcquiredRecords.acquiredRecords()); acquiredRecordsCount += shareAcquiredRecords.count(); } @@ -196,81 +187,4 @@ static Partition partition(ReplicaManager replicaManager, TopicPartition tp) { } return partition; } - - /** - * Slice the fetch records based on the acquired records. The slicing is done based on the first - * and last offset of the acquired records from the list. The slicing doesn't consider individual - * acquired batches rather the boundaries of the acquired list. The method expects the acquired - * records list to be within the fetch records bounds. - * - * @param records The records to be sliced. - * @param shareAcquiredRecords The share acquired records containing the non-empty acquired records. - * @return The sliced records, if the acquired records are a subset of the fetched records. Otherwise, - * the original records are returned. - */ - static Records maybeSliceFetchRecords(Records records, ShareAcquiredRecords shareAcquiredRecords) { - // The acquired records should be non-empty, do not check as the method is called only when the - // acquired records are non-empty. - List acquiredRecords = shareAcquiredRecords.acquiredRecords(); - try { - final Iterator iterator = records.batchIterator(); - // Track the first overlapping batch with the first acquired offset. - RecordBatch firstOverlapBatch = iterator.next(); - // If there exists single fetch batch, then return the original records. - if (!iterator.hasNext()) { - return records; - } - // Find the first and last acquired offset to slice the records. - final long firstAcquiredOffset = acquiredRecords.get(0).firstOffset(); - final long lastAcquiredOffset = acquiredRecords.get(acquiredRecords.size() - 1).lastOffset(); - int startPosition = 0; - int size = 0; - // Start iterating from the second batch. - while (iterator.hasNext()) { - RecordBatch batch = iterator.next(); - // Iterate until finds the first overlap batch with the first acquired offset. All the - // batches before this first overlap batch should be sliced hence increment the start - // position. - if (batch.baseOffset() <= firstAcquiredOffset) { - startPosition += firstOverlapBatch.sizeInBytes(); - firstOverlapBatch = batch; - continue; - } - // Break if traversed all the batches till the last acquired offset. - if (batch.baseOffset() > lastAcquiredOffset) { - break; - } - size += batch.sizeInBytes(); - } - // Include the first overlap batch as it's the last batch traversed which overlapped the first - // acquired offset. - size += firstOverlapBatch.sizeInBytes(); - // Check if we do not need slicing i.e. neither start position nor size changed. - if (startPosition == 0 && size == records.sizeInBytes()) { - return records; - } - return records.slice(startPosition, size); - } catch (Exception e) { - log.error("Error while checking batches for acquired records: {}, skipping slicing.", acquiredRecords, e); - // If there is an exception while slicing, return the original records so that the fetch - // can continue with the original records. - return records; - } - } - - /** - * The method is used to get the record lock duration for the group. If the group config is present, - * then the record lock duration is returned. Otherwise, the default value is returned. - * - * @param groupConfigManager The group config manager. - * @param groupId The group id for which the record lock duration is to be fetched. - * @param defaultValue The default value to be returned if the group config is not present. - * @return The record lock duration for the group. - */ - public static int recordLockDurationMsOrDefault(GroupConfigManager groupConfigManager, String groupId, int defaultValue) { - if (groupConfigManager.groupConfig(groupId).isPresent()) { - return groupConfigManager.groupConfig(groupId).get().shareRecordLockDurationMs(); - } - return defaultValue; - } } diff --git a/core/src/main/java/kafka/server/share/SharePartition.java b/core/src/main/java/kafka/server/share/SharePartition.java index 2c33007673363..1746cbc9e3ed9 100644 --- a/core/src/main/java/kafka/server/share/SharePartition.java +++ b/core/src/main/java/kafka/server/share/SharePartition.java @@ -23,6 +23,7 @@ import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; +import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidRequestException; @@ -30,27 +31,17 @@ import org.apache.kafka.common.errors.NotLeaderOrFollowerException; import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; -import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.record.ControlRecordType; -import org.apache.kafka.common.record.Record; import org.apache.kafka.common.record.RecordBatch; import org.apache.kafka.common.utils.Time; import org.apache.kafka.coordinator.group.GroupConfig; import org.apache.kafka.coordinator.group.GroupConfigManager; import org.apache.kafka.coordinator.group.ShareGroupAutoOffsetResetStrategy; import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch; -import org.apache.kafka.server.share.fetch.AcquisitionLockTimeoutHandler; -import org.apache.kafka.server.share.fetch.AcquisitionLockTimerTask; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchKey; -import org.apache.kafka.server.share.fetch.DeliveryCountOps; -import org.apache.kafka.server.share.fetch.InFlightBatch; -import org.apache.kafka.server.share.fetch.InFlightState; -import org.apache.kafka.server.share.fetch.RecordState; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; -import org.apache.kafka.server.share.metrics.SharePartitionMetrics; import org.apache.kafka.server.share.persister.GroupTopicPartitionData; import org.apache.kafka.server.share.persister.PartitionAllData; import org.apache.kafka.server.share.persister.PartitionErrorData; @@ -62,43 +53,37 @@ import org.apache.kafka.server.share.persister.ReadShareGroupStateParameters; import org.apache.kafka.server.share.persister.TopicData; import org.apache.kafka.server.share.persister.WriteShareGroupStateParameters; -import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.apache.kafka.server.util.timer.Timer; +import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; -import java.util.Comparator; +import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.NavigableMap; import java.util.Objects; import java.util.Optional; -import java.util.PriorityQueue; -import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ConcurrentSkipListMap; -import java.util.concurrent.atomic.AtomicReference; +import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; import static kafka.server.share.ShareFetchUtils.offsetForEarliestTimestamp; import static kafka.server.share.ShareFetchUtils.offsetForLatestTimestamp; import static kafka.server.share.ShareFetchUtils.offsetForTimestamp; -import static kafka.server.share.ShareFetchUtils.recordLockDurationMsOrDefault; /** * The SharePartition is used to track the state of a partition that is shared between multiple * consumers. The class maintains the state of the records that have been fetched from the leader * and are in-flight. */ -@SuppressWarnings({"ClassDataAbstractionCoupling", "ClassFanOutComplexity"}) public class SharePartition { private static final Logger log = LoggerFactory.getLogger(SharePartition.class); @@ -137,6 +122,71 @@ enum SharePartitionState { FENCED } + /** + * The RecordState is used to track the state of a record that has been fetched from the leader. + * The state of the records determines if the records should be re-delivered, move the next fetch + * offset, or be state persisted to disk. + */ + // Visible for testing + enum RecordState { + AVAILABLE((byte) 0), + ACQUIRED((byte) 1), + ACKNOWLEDGED((byte) 2), + ARCHIVED((byte) 4); + + public final byte id; + + RecordState(byte id) { + this.id = id; + } + + /** + * Validates that the newState is one of the valid transition from the current + * {@code RecordState}. + * + * @param newState State into which requesting to transition; must be non-null + * + * @return {@code RecordState} newState if validation succeeds. Returning + * newState helps state assignment chaining. + * + * @throws IllegalStateException if the state transition validation fails. + */ + public RecordState validateTransition(RecordState newState) throws IllegalStateException { + Objects.requireNonNull(newState, "newState cannot be null"); + if (this == newState) { + throw new IllegalStateException("The state transition is invalid as the new state is" + + "the same as the current state"); + } + + if (this == ACKNOWLEDGED || this == ARCHIVED) { + throw new IllegalStateException("The state transition is invalid from the current state: " + this); + } + + if (this == AVAILABLE && newState != ACQUIRED) { + throw new IllegalStateException("The state can only be transitioned to ACQUIRED from AVAILABLE"); + } + + // Either the transition is from Available -> Acquired or from Acquired -> Available/ + // Acknowledged/Archived. + return newState; + } + + public static RecordState forId(byte id) { + switch (id) { + case 0: + return AVAILABLE; + case 1: + return ACQUIRED; + case 2: + return ACKNOWLEDGED; + case 4: + return ARCHIVED; + default: + throw new IllegalArgumentException("Unknown record state id: " + id); + } + } + } + /** * The group id of the share partition belongs to. */ @@ -167,19 +217,23 @@ enum SharePartitionState { */ private final ReadWriteLock lock; + /** + * The find next fetch offset is used to indicate if the next fetch offset should be recomputed. + */ + private final AtomicBoolean findNextFetchOffset; + /** * The lock to ensure that the same share partition does not enter a fetch queue - * while another one is being fetched within the queue. The caller's id that acquires the fetch - * lock is utilized for ensuring the above. + * while another one is being fetched within the queue. */ - private final AtomicReference fetchLock; + private final AtomicBoolean fetchLock; /** - * The max in-flight records is used to limit the number of records that can be in-flight at any - * given time. The max in-flight records is used to prevent the consumer from fetching too many + * The max in-flight messages is used to limit the number of records that can be in-flight at any + * given time. The max in-flight messages is used to prevent the consumer from fetching too many * records from the leader and running out of memory. */ - private final int maxInFlightRecords; + private final int maxInFlightMessages; /** * The max delivery count is used to limit the number of times a record can be delivered to the @@ -200,11 +254,6 @@ enum SharePartitionState { */ private final int defaultRecordLockDurationMs; - /** - * The find next fetch offset is used to indicate if the next fetch offset should be recomputed. - */ - private boolean findNextFetchOffset; - /** * Timer is used to implement acquisition lock on records that guarantees the movement of records from * acquired to available/archived state upon timeout @@ -226,27 +275,6 @@ enum SharePartitionState { */ private final SharePartitionListener listener; - /** - * The load start time is used to track the time taken to load the share partition. - */ - private final long loadStartTimeMs; - - /** - * The share partition metrics is used to track the broker-side metrics for the share partition. - */ - private final SharePartitionMetrics sharePartitionMetrics; - - /** - * The acquisition lock timeout handler is used to handle the acquisition lock timeout for the share partition. - */ - private final AcquisitionLockTimeoutHandler timeoutHandler; - - /** - * The replica manager is used to check to see if any delayed share fetch request can be completed because of data - * availability due to acquisition lock timeout. - */ - private final ReplicaManager replicaManager; - /** * The share partition start offset specifies the partition start offset from which the records * are cached in the cachedState of the sharePartition. @@ -259,23 +287,11 @@ enum SharePartitionState { */ private long endOffset; - /** - * The persister read result gap window tracks if there are any gaps in the in-flight batch during - * initial read of the share partition state from the persister. - */ - private GapWindow persisterReadResultGapWindow; - /** * We maintain the latest fetch offset and its metadata to estimate the minBytes requirement more efficiently. */ private final OffsetMetadata fetchOffsetMetadata; - /** - * The delayed share fetch key is used to track the delayed share fetch requests that are waiting - * for the respective share partition. - */ - private final DelayedShareFetchKey delayedShareFetchKey; - /** * The state epoch is used to track the version of the state of the share partition. */ @@ -287,25 +303,16 @@ enum SharePartitionState { private SharePartitionState partitionState; /** - * The fetch lock acquired time is used to track the time when the lock for share partition is acquired. - */ - private long fetchLockAcquiredTimeMs; - - /** - * The fetch lock released time is used to track the time when the lock for share partition is released. - */ - private long fetchLockReleasedTimeMs; - - /** - * The fetch lock idle duration is used to track the time for which the fetch lock is idle. + * The replica manager is used to check to see if any delayed share fetch request can be completed because of data + * availability due to acquisition lock timeout. */ - private long fetchLockIdleDurationMs; + private final ReplicaManager replicaManager; SharePartition( String groupId, TopicIdPartition topicIdPartition, int leaderEpoch, - int maxInFlightRecords, + int maxInFlightMessages, int maxDeliveryCount, int defaultRecordLockDurationMs, Timer timer, @@ -315,18 +322,15 @@ enum SharePartitionState { GroupConfigManager groupConfigManager, SharePartitionListener listener ) { - this(groupId, topicIdPartition, leaderEpoch, maxInFlightRecords, maxDeliveryCount, defaultRecordLockDurationMs, - timer, time, persister, replicaManager, groupConfigManager, SharePartitionState.EMPTY, listener, - new SharePartitionMetrics(groupId, topicIdPartition.topic(), topicIdPartition.partition())); + this(groupId, topicIdPartition, leaderEpoch, maxInFlightMessages, maxDeliveryCount, defaultRecordLockDurationMs, + timer, time, persister, replicaManager, groupConfigManager, SharePartitionState.EMPTY, listener); } - // Visible for testing - @SuppressWarnings("ParameterNumber") SharePartition( String groupId, TopicIdPartition topicIdPartition, int leaderEpoch, - int maxInFlightRecords, + int maxInFlightMessages, int maxDeliveryCount, int defaultRecordLockDurationMs, Timer timer, @@ -335,32 +339,26 @@ enum SharePartitionState { ReplicaManager replicaManager, GroupConfigManager groupConfigManager, SharePartitionState sharePartitionState, - SharePartitionListener listener, - SharePartitionMetrics sharePartitionMetrics + SharePartitionListener listener ) { this.groupId = groupId; this.topicIdPartition = topicIdPartition; this.leaderEpoch = leaderEpoch; - this.maxInFlightRecords = maxInFlightRecords; + this.maxInFlightMessages = maxInFlightMessages; this.maxDeliveryCount = maxDeliveryCount; this.cachedState = new ConcurrentSkipListMap<>(); this.lock = new ReentrantReadWriteLock(); - this.findNextFetchOffset = false; - this.fetchLock = new AtomicReference<>(null); + this.findNextFetchOffset = new AtomicBoolean(false); + this.fetchLock = new AtomicBoolean(false); this.defaultRecordLockDurationMs = defaultRecordLockDurationMs; this.timer = timer; this.time = time; - this.loadStartTimeMs = time.hiResClockMs(); this.persister = persister; this.partitionState = sharePartitionState; this.replicaManager = replicaManager; this.groupConfigManager = groupConfigManager; this.fetchOffsetMetadata = new OffsetMetadata(); - this.delayedShareFetchKey = new DelayedShareFetchGroupKey(groupId, topicIdPartition); this.listener = listener; - this.sharePartitionMetrics = sharePartitionMetrics; - this.timeoutHandler = releaseAcquisitionLockOnTimeout(); - this.registerGaugeMetrics(); } /** @@ -397,8 +395,8 @@ public CompletableFuture maybeInitialize() { persister.readState(new ReadShareGroupStateParameters.Builder() .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() .setGroupId(this.groupId) - .setTopicsData(List.of(new TopicData<>(topicIdPartition.topicId(), - List.of(PartitionFactory.newPartitionIdLeaderEpochData(topicIdPartition.partition(), leaderEpoch))))) + .setTopicsData(Collections.singletonList(new TopicData<>(topicIdPartition.topicId(), + Collections.singletonList(PartitionFactory.newPartitionIdLeaderEpochData(topicIdPartition.partition(), leaderEpoch))))) .build()) .build() ).whenComplete((result, exception) -> { @@ -436,8 +434,8 @@ public CompletableFuture maybeInitialize() { if (partitionData.errorCode() != Errors.NONE.code()) { KafkaException ex = fetchPersisterError(partitionData.errorCode(), partitionData.errorMessage()); - maybeLogError(String.format("Failed to initialize the share partition: %s-%s. Exception occurred: %s.", - groupId, topicIdPartition, partitionData), Errors.forCode(partitionData.errorCode()), ex); + log.error("Failed to initialize the share partition: {}-{}. Exception occurred: {}.", + groupId, topicIdPartition, partitionData); throwable = ex; return; } @@ -446,11 +444,6 @@ public CompletableFuture maybeInitialize() { stateEpoch = partitionData.stateEpoch(); List stateBatches = partitionData.stateBatches(); - long gapStartOffset = -1; - // The previousBatchLastOffset is used to track the last offset of the previous batch. - // For the first batch that should ideally start from startOffset if there are no gaps, - // we assume the previousBatchLastOffset to be startOffset - 1. - long previousBatchLastOffset = startOffset - 1; for (PersisterStateBatch stateBatch : stateBatches) { if (stateBatch.firstOffset() < startOffset) { log.error("Invalid state batch found for the share partition: {}-{}. The base offset: {}" @@ -459,26 +452,16 @@ public CompletableFuture maybeInitialize() { throwable = new IllegalStateException(String.format("Failed to initialize the share partition %s-%s", groupId, topicIdPartition)); return; } - if (gapStartOffset == -1 && stateBatch.firstOffset() > previousBatchLastOffset + 1) { - gapStartOffset = previousBatchLastOffset + 1; - } - previousBatchLastOffset = stateBatch.lastOffset(); - InFlightBatch inFlightBatch = new InFlightBatch(timer, time, EMPTY_MEMBER_ID, stateBatch.firstOffset(), - stateBatch.lastOffset(), RecordState.forId(stateBatch.deliveryState()), stateBatch.deliveryCount(), - null, timeoutHandler, sharePartitionMetrics); + InFlightBatch inFlightBatch = new InFlightBatch(EMPTY_MEMBER_ID, stateBatch.firstOffset(), + stateBatch.lastOffset(), RecordState.forId(stateBatch.deliveryState()), stateBatch.deliveryCount(), null); cachedState.put(stateBatch.firstOffset(), inFlightBatch); - sharePartitionMetrics.recordInFlightBatchMessageCount(stateBatch.lastOffset() - stateBatch.firstOffset() + 1); } // Update the endOffset of the partition. if (!cachedState.isEmpty()) { // If the cachedState is not empty, findNextFetchOffset flag is set to true so that any AVAILABLE records // in the cached state are not missed - updateFindNextFetchOffset(true); + findNextFetchOffset.set(true); endOffset = cachedState.lastEntry().getValue().lastOffset(); - // gapWindow is not required, if there are no gaps in the read state response - if (gapStartOffset != -1) { - persisterReadResultGapWindow = new GapWindow(endOffset, gapStartOffset); - } // In case the persister read state RPC result contains no AVAILABLE records, we can update cached state // and start/end offsets. maybeUpdateCachedStateAndOffsets(); @@ -496,9 +479,6 @@ public CompletableFuture maybeInitialize() { } // Release the lock. lock.writeLock().unlock(); - // Avoid triggering the listener for waiting share fetch requests in purgatory as the - // share partition manager keeps track of same and will trigger the listener for the - // respective share partition. // Complete the future. if (isFailed) { future.completeExceptionally(throwable); @@ -536,16 +516,14 @@ public long nextFetchOffset() { lock.writeLock().lock(); try { // When none of the records in the cachedState are in the AVAILABLE state, findNextFetchOffset will be false - if (!findNextFetchOffset) { + if (!findNextFetchOffset.get()) { if (cachedState.isEmpty() || startOffset > cachedState.lastEntry().getValue().lastOffset()) { // 1. When cachedState is empty, endOffset is set to the next offset of the last // offset removed from batch, which is the next offset to be fetched. // 2. When startOffset has moved beyond the in-flight records, startOffset and // endOffset point to the LSO, which is the next offset to be fetched. - log.trace("The next fetch offset for the share partition {}-{} is {}", groupId, topicIdPartition, endOffset); return endOffset; } else { - log.trace("The next fetch offset for the share partition {}-{} is {}", groupId, topicIdPartition, endOffset + 1); return endOffset + 1; } } @@ -555,46 +533,24 @@ public long nextFetchOffset() { // If cachedState is empty, there is no need of re-computing next fetch offset in future fetch requests. // Same case when startOffset has moved beyond the in-flight records, startOffset and endOffset point to the LSO // and the cached state is fresh. - updateFindNextFetchOffset(false); - log.trace("The next fetch offset for the share partition {}-{} is {}", groupId, topicIdPartition, endOffset); + findNextFetchOffset.set(false); return endOffset; } long nextFetchOffset = -1; - long gapStartOffset = isPersisterReadGapWindowActive() ? persisterReadResultGapWindow.gapStartOffset() : -1; - for (Map.Entry entry : cachedState.entrySet()) { - // Check if there exists any gap in the in-flight batch which needs to be fetched. If - // gapWindow's endOffset is equal to the share partition's endOffset, then - // only the initial gaps should be considered. Once share partition's endOffset is past - // initial read end offset then all gaps are anyway fetched. - if (isPersisterReadGapWindowActive()) { - if (entry.getKey() > gapStartOffset) { - nextFetchOffset = gapStartOffset; - break; - } - // If the gapStartOffset is already past the last offset of the in-flight batch, - // then do not consider this batch for finding the next fetch offset. For example, - // consider during initialization, the gapWindow is set to 5 and the - // first cached batch is 15-18. First read will happen at offset 5 and say the data - // fetched is [5-6], now next fetch offset should be 7. This works fine but say - // subsequent read returns batch 8-11, and the gapStartOffset will be 12. Without - // the max check, the next fetch offset returned will be 7 which is incorrect. - // The natural gaps for which no data is available shall be considered hence - // take the max of the gapStartOffset and the last offset of the in-flight batch. - gapStartOffset = Math.max(entry.getValue().lastOffset() + 1, gapStartOffset); - } + for (Map.Entry entry : cachedState.entrySet()) { // Check if the state is maintained per offset or batch. If the offsetState // is not maintained then the batch state is used to determine the offsets state. if (entry.getValue().offsetState() == null) { - if (entry.getValue().batchState() == RecordState.AVAILABLE && !entry.getValue().batchHasOngoingStateTransition()) { + if (entry.getValue().batchState() == RecordState.AVAILABLE) { nextFetchOffset = entry.getValue().firstOffset(); break; } } else { // The offset state is maintained hence find the next available offset. for (Map.Entry offsetState : entry.getValue().offsetState().entrySet()) { - if (offsetState.getValue().state() == RecordState.AVAILABLE && !offsetState.getValue().hasOngoingStateTransition()) { + if (offsetState.getValue().state == RecordState.AVAILABLE) { nextFetchOffset = offsetState.getKey(); break; } @@ -609,10 +565,9 @@ public long nextFetchOffset() { // If nextFetchOffset is -1, then no AVAILABLE records are found in the cachedState, so there is no need of // re-computing next fetch offset in future fetch requests if (nextFetchOffset == -1) { - updateFindNextFetchOffset(false); + findNextFetchOffset.set(false); nextFetchOffset = endOffset + 1; } - log.trace("The next fetch offset for the share partition {}-{} is {}", groupId, topicIdPartition, nextFetchOffset); return nextFetchOffset; } finally { lock.writeLock().unlock(); @@ -623,56 +578,19 @@ public long nextFetchOffset() { * Acquire the fetched records for the share partition. The acquired records are added to the * in-flight records and the next fetch offset is updated to the next offset that should be * fetched from the leader. - *

          - * The method always acquire the full batch records. The cache state can consist of multiple - * full batches as a single batch. This behavior is driven by client configurations (batch size - * and max fetch records) and allows for efficient client acknowledgements. However, partial batches - * can exist in the cache only after a leader change and partial acknowledgements have been persisted - * prior leader change. In such case, when a share partition loses track of a batch's start and - * end offsets (e.g., after a leader change and partial acknowledgements), the cache stores the - * batch based on the offset range provided by the persister. This method handles these special - * batches by maintaining this range up to the last offset returned by the persister. No special - * handling is required afterward; the cache will eventually return to managing full batches. - *

          - * For compacted topics, batches may be non-contiguous, and records within cached batches may contain gaps. - * Because this method operates at the batch level, it acquires entire batches and relies on the - * client to report any gaps in the data. Whether non-contiguous batches are acquired depends on - * the first and last offsets of the fetched batches. Batches outside of this boundary will never - * be acquired. For instance, if fetched batches cover offsets [0-9 and 20-29], and the configured - * batch size and maximum fetch records are large enough (greater than 30 in this example), the - * intervening batch [10-19] will be acquired. Since full fetched batch is acquired, the client is - * responsible for reporting any data gaps. However, if the [0-9] and [20-29] ranges are fetched - * in separate calls to this method, the [10-19] batch will not be acquired and cannot exist in - * the cache. - *

          - * However, for compacted topics, previously acquired batches (e.g., due to acquisition lock timeout - * or explicit client release) might become available for acquisition again. But subsequent fetches - * may reveal that these batches, or parts of them, have been removed by compaction. Because this - * method works with whole batches, the disappearance of individual offsets within a batch requires - * no special handling; the batch will be re-acquired, and the client will report the gaps. But if - * an entire batch has been compacted away, this method must archive it in the cache to allow the - * Share Partition Start Offset (SPSO) to progress. This is accomplished by comparing the fetchOffset - * (the offset from which the log was read) with the first base offset of the fetch response. Any - * batches from fetchOffset to first base offset of the fetch response are archived. * * @param memberId The member id of the client that is fetching the record. - * @param batchSize The number of records per acquired records batch. * @param maxFetchRecords The maximum number of records that should be acquired, this is a soft * limit and the method might acquire more records than the maxFetchRecords, * if the records are already part of the same fetch batch. - * @param fetchOffset The fetch offset for which the records are fetched. * @param fetchPartitionData The fetched records for the share partition. - * @param isolationLevel The isolation level for the share fetch request. * @return The acquired records for the share partition. */ - @SuppressWarnings({"cyclomaticcomplexity", "methodlength"}) // Consider refactoring to avoid suppression + @SuppressWarnings("cyclomaticcomplexity") // Consider refactoring to avoid suppression public ShareAcquiredRecords acquire( String memberId, - int batchSize, int maxFetchRecords, - long fetchOffset, - FetchPartitionData fetchPartitionData, - FetchIsolation isolationLevel + FetchPartitionData fetchPartitionData ) { log.trace("Received acquire request for share partition: {}-{} memberId: {}", groupId, topicIdPartition, memberId); if (stateNotActive() || maxFetchRecords <= 0) { @@ -686,56 +604,21 @@ public ShareAcquiredRecords acquire( return ShareAcquiredRecords.empty(); } - LastOffsetAndMaxRecords lastOffsetAndMaxRecords = lastOffsetAndMaxRecordsToAcquire(fetchOffset, - maxFetchRecords, lastBatch.lastOffset()); - if (lastOffsetAndMaxRecords.maxRecords() <= 0) { - return ShareAcquiredRecords.empty(); - } - // The lastOffsetAndMaxRecords contains the last offset to acquire and the maximum number of records - // to acquire. - int maxRecordsToAcquire = lastOffsetAndMaxRecords.maxRecords(); - long lastOffsetToAcquire = lastOffsetAndMaxRecords.lastOffset(); - // We require the first batch of records to get the base offset. Stop parsing further // batches. RecordBatch firstBatch = fetchPartitionData.records.batches().iterator().next(); lock.writeLock().lock(); try { long baseOffset = firstBatch.baseOffset(); - - // There might be cached batches which are stale due to topic compaction hence archive them. - maybeArchiveStaleBatches(fetchOffset, baseOffset); - // Find the floor batch record for the request batch. The request batch could be // for a subset of the in-flight batch i.e. cached batch of offset 10-14 and request batch - // of 12-13. Hence, floor entry is fetched to find the sub-map. Secondly, when the share - // partition is initialized with persisted state, the start offset might be moved to a later - // offset. In such case, the first batch base offset might be less than the start offset. - Map.Entry floorEntry = cachedState.floorEntry(baseOffset); - if (floorEntry == null) { - // The initialize method check that there couldn't be any batches prior to the start offset. - // And once share partition starts fetching records, it will always fetch records, at least, - // from the start offset, but there could be cases where the batch base offset is prior - // to the start offset. This can happen when the share partition is initialized with - // partial persisted state and moved start offset i.e. start offset is not the batch's - // first offset. In such case, we need to adjust the base offset to the start offset. - // It's safe to adjust the base offset to the start offset when there isn't any floor - // i.e. no cached batches available prior to the request batch base offset. Hence, - // check for the floor entry and adjust the base offset accordingly. - if (baseOffset < startOffset) { - log.info("Adjusting base offset for the fetch as it's prior to start offset: {}-{}" - + "from {} to {}", groupId, topicIdPartition, baseOffset, startOffset); - baseOffset = startOffset; - } - } else if (floorEntry.getValue().lastOffset() >= baseOffset) { - // We might find a batch with floor entry but not necessarily that batch has an overlap, - // if the request batch base offset is ahead of last offset from floor entry i.e. cached - // batch of 10-14 and request batch of 15-18, though floor entry is found but no overlap. - // Such scenario will be handled in the next step when considering the subMap. However, - // if the floor entry is found and the request batch base offset is within the floor entry - // then adjust the base offset to the floor entry so that acquire method can still work on - // previously cached batch boundaries. - baseOffset = floorEntry.getKey(); + // of 12-13. Hence, floor entry is fetched to find the sub-map. + Map.Entry floorOffset = cachedState.floorEntry(baseOffset); + // We might find a batch with floor entry but not necessarily that batch has an overlap, + // if the request batch base offset is ahead of last offset from floor entry i.e. cached + // batch of 10-14 and request batch of 15-18, though floor entry is found but no overlap. + if (floorOffset != null && floorOffset.getValue().lastOffset() >= baseOffset) { + baseOffset = floorOffset.getKey(); } // Validate if the fetch records are already part of existing batches and if available. NavigableMap subMap = cachedState.subMap(baseOffset, true, lastBatch.lastOffset(), true); @@ -744,11 +627,9 @@ public ShareAcquiredRecords acquire( if (subMap.isEmpty()) { log.trace("No cached data exists for the share partition for requested fetch batch: {}-{}", groupId, topicIdPartition); - // Do not send the lastOffsetToAcquire as when the subMap is empty, it means that - // there isn't any overlap itself. - ShareAcquiredRecords shareAcquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), - firstBatch.baseOffset(), lastBatch.lastOffset(), batchSize, maxRecordsToAcquire); - return maybeFilterAbortedTransactionalAcquiredRecords(fetchPartitionData, isolationLevel, shareAcquiredRecords); + AcquiredRecords acquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), + firstBatch.baseOffset(), lastBatch.lastOffset(), maxFetchRecords); + return ShareAcquiredRecords.fromAcquiredRecords(acquiredRecords); } log.trace("Overlap exists with in-flight records. Acquire the records if available for" @@ -756,40 +637,16 @@ public ShareAcquiredRecords acquire( List result = new ArrayList<>(); // The acquired count is used to track the number of records acquired for the request. int acquiredCount = 0; - // This tracks whether there is a gap between the subMap entries. If a gap is found, we will acquire - // the corresponding offsets in a separate batch. - long maybeGapStartOffset = baseOffset; // The fetched records are already part of the in-flight records. The records might // be available for re-delivery hence try acquiring same. The request batches could // be an exact match, subset or span over multiple already fetched batches. for (Map.Entry entry : subMap.entrySet()) { // If the acquired count is equal to the max fetch records then break the loop. - if (acquiredCount >= maxRecordsToAcquire) { + if (acquiredCount >= maxFetchRecords) { break; } InFlightBatch inFlightBatch = entry.getValue(); - // If the gapWindow window is active, we need to treat the gaps in between the window as - // acquirable. Once the window is inactive (when we have acquired all the gaps inside the window), - // the remaining gaps are natural (data does not exist at those offsets) and we need not acquire them. - if (isPersisterReadGapWindowActive()) { - // If nextBatchStartOffset is less than the key of the entry, this means the fetch happened for a gap in the cachedState. - // Thus, a new batch needs to be acquired for the gap. - if (maybeGapStartOffset < entry.getKey()) { - ShareAcquiredRecords shareAcquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), - maybeGapStartOffset, entry.getKey() - 1, batchSize, maxRecordsToAcquire); - result.addAll(shareAcquiredRecords.acquiredRecords()); - acquiredCount += shareAcquiredRecords.count(); - } - // Set nextBatchStartOffset as the last offset of the current in-flight batch + 1. - // Hence, after the loop iteration the next gap can be considered. - maybeGapStartOffset = inFlightBatch.lastOffset() + 1; - // If the acquired count is equal to the max fetch records then break the loop. - if (acquiredCount >= maxRecordsToAcquire) { - break; - } - } - // Compute if the batch is a full match. boolean fullMatch = checkForFullMatch(inFlightBatch, firstBatch.baseOffset(), lastBatch.lastOffset()); @@ -817,7 +674,7 @@ public ShareAcquiredRecords acquire( // Do not send max fetch records to acquireSubsetBatchRecords as we want to acquire // all the records from the batch as the batch will anyway be part of the file-records // response batch. - int acquiredSubsetCount = acquireSubsetBatchRecords(memberId, firstBatch.baseOffset(), lastOffsetToAcquire, inFlightBatch, result); + int acquiredSubsetCount = acquireSubsetBatchRecords(memberId, firstBatch.baseOffset(), lastBatch.lastOffset(), inFlightBatch, result); acquiredCount += acquiredSubsetCount; continue; } @@ -829,8 +686,8 @@ public ShareAcquiredRecords acquire( continue; } - InFlightState updateResult = inFlightBatch.tryUpdateBatchState(RecordState.ACQUIRED, DeliveryCountOps.INCREASE, maxDeliveryCount, memberId); - if (updateResult == null || updateResult.state() != RecordState.ACQUIRED) { + InFlightState updateResult = inFlightBatch.tryUpdateBatchState(RecordState.ACQUIRED, true, maxDeliveryCount, memberId); + if (updateResult == null) { log.info("Unable to acquire records for the batch: {} in share partition: {}-{}", inFlightBatch, groupId, topicIdPartition); continue; @@ -849,17 +706,13 @@ public ShareAcquiredRecords acquire( // Some of the request offsets are not found in the fetched batches. Acquire the // missing records as well. - if (acquiredCount < maxRecordsToAcquire && subMap.lastEntry().getValue().lastOffset() < lastOffsetToAcquire) { + if (acquiredCount < maxFetchRecords && subMap.lastEntry().getValue().lastOffset() < lastBatch.lastOffset()) { log.trace("There exists another batch which needs to be acquired as well"); - ShareAcquiredRecords shareAcquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), + AcquiredRecords acquiredRecords = acquireNewBatchRecords(memberId, fetchPartitionData.records.batches(), subMap.lastEntry().getValue().lastOffset() + 1, - lastOffsetToAcquire, batchSize, maxRecordsToAcquire - acquiredCount); - result.addAll(shareAcquiredRecords.acquiredRecords()); - acquiredCount += shareAcquiredRecords.count(); - } - if (!result.isEmpty()) { - maybeUpdatePersisterGapWindowStartOffset(result.get(result.size() - 1).lastOffset() + 1); - return maybeFilterAbortedTransactionalAcquiredRecords(fetchPartitionData, isolationLevel, new ShareAcquiredRecords(result, acquiredCount)); + lastBatch.lastOffset(), maxFetchRecords - acquiredCount); + result.add(acquiredRecords); + acquiredCount += (int) (acquiredRecords.lastOffset() - acquiredRecords.firstOffset() + 1); } return new ShareAcquiredRecords(result, acquiredCount); } finally { @@ -884,7 +737,8 @@ public CompletableFuture acknowledge( CompletableFuture future = new CompletableFuture<>(); Throwable throwable = null; - List persisterBatches = new ArrayList<>(); + List updatedStates = new ArrayList<>(); + List stateBatches = new ArrayList<>(); lock.writeLock().lock(); try { // Avoided using enhanced for loop as need to check if the last batch have offsets @@ -924,7 +778,8 @@ public CompletableFuture acknowledge( batch, recordStateMap, subMap, - persisterBatches + updatedStates, + stateBatches ); if (ackThrowable.isPresent()) { @@ -932,12 +787,14 @@ public CompletableFuture acknowledge( break; } } + + // If the acknowledgement is successful then persist state, complete the state transition + // and update the cached state for start offset. Else rollback the state transition. + rollbackOrProcessStateUpdates(future, throwable, updatedStates, stateBatches); } finally { lock.writeLock().unlock(); } - // If the acknowledgement is successful then persist state, complete the state transition - // and update the cached state for start offset. Else rollback the state transition. - rollbackOrProcessStateUpdates(future, throwable, persisterBatches); + return future; } @@ -953,7 +810,8 @@ public CompletableFuture releaseAcquiredRecords(String memberId) { CompletableFuture future = new CompletableFuture<>(); Throwable throwable = null; - List persisterBatches = new ArrayList<>(); + List updatedStates = new ArrayList<>(); + List stateBatches = new ArrayList<>(); lock.writeLock().lock(); try { @@ -972,40 +830,38 @@ && checkForStartOffsetWithinBatch(inFlightBatch.firstOffset(), inFlightBatch.las } if (inFlightBatch.offsetState() != null) { - Optional releaseAcquiredRecordsThrowable = releaseAcquiredRecordsForPerOffsetBatch(memberId, inFlightBatch, recordState, persisterBatches); + Optional releaseAcquiredRecordsThrowable = releaseAcquiredRecordsForPerOffsetBatch(memberId, inFlightBatch, recordState, updatedStates, stateBatches); if (releaseAcquiredRecordsThrowable.isPresent()) { throwable = releaseAcquiredRecordsThrowable.get(); break; } continue; } - Optional releaseAcquiredRecordsThrowable = releaseAcquiredRecordsForCompleteBatch(memberId, inFlightBatch, recordState, persisterBatches); + Optional releaseAcquiredRecordsThrowable = releaseAcquiredRecordsForCompleteBatch(memberId, inFlightBatch, recordState, updatedStates, stateBatches); if (releaseAcquiredRecordsThrowable.isPresent()) { throwable = releaseAcquiredRecordsThrowable.get(); break; } } + + // If the release acquired records is successful then persist state, complete the state transition + // and update the cached state for start offset. Else rollback the state transition. + rollbackOrProcessStateUpdates(future, throwable, updatedStates, stateBatches); } finally { lock.writeLock().unlock(); } - // If the release acquired records is successful then persist state, complete the state transition - // and update the cached state for start offset. Else rollback the state transition. - rollbackOrProcessStateUpdates(future, throwable, persisterBatches); return future; } - long loadStartTimeMs() { - return loadStartTimeMs; - } - private Optional releaseAcquiredRecordsForPerOffsetBatch(String memberId, InFlightBatch inFlightBatch, RecordState recordState, - List persisterBatches) { + List updatedStates, + List stateBatches) { log.trace("Offset tracked batch record found, batch: {} for the share partition: {}-{}", inFlightBatch, groupId, topicIdPartition); - for (Map.Entry offsetState : inFlightBatch.offsetState().entrySet()) { + for (Map.Entry offsetState : inFlightBatch.offsetState.entrySet()) { // Check if member id is the owner of the offset. if (!offsetState.getValue().memberId().equals(memberId) && !offsetState.getValue().memberId().equals(EMPTY_MEMBER_ID)) { @@ -1013,11 +869,10 @@ private Optional releaseAcquiredRecordsForPerOffsetBatch(String membe + " partition: {}-{}. Skipping offset.", memberId, offsetState.getKey(), inFlightBatch, groupId, topicIdPartition); return Optional.empty(); } - if (offsetState.getValue().state() == RecordState.ACQUIRED) { - // These records were fetched but they were not actually delivered to the client. + if (offsetState.getValue().state == RecordState.ACQUIRED) { InFlightState updateResult = offsetState.getValue().startStateTransition( offsetState.getKey() < startOffset ? RecordState.ARCHIVED : recordState, - DeliveryCountOps.DECREASE, + false, this.maxDeliveryCount, EMPTY_MEMBER_ID ); @@ -1028,10 +883,16 @@ private Optional releaseAcquiredRecordsForPerOffsetBatch(String membe return Optional.of(new InvalidRecordStateException("Unable to release acquired records for the offset")); } - // Successfully updated the state of the offset and created a persister state batch for write to persister. - persisterBatches.add(new PersisterBatch(updateResult, new PersisterStateBatch(offsetState.getKey(), - offsetState.getKey(), updateResult.state().id(), (short) updateResult.deliveryCount()))); - // Do not update the next fetch offset as the offset has not completed the transition yet. + // Successfully updated the state of the offset. + updatedStates.add(updateResult); + stateBatches.add(new PersisterStateBatch(offsetState.getKey(), offsetState.getKey(), + updateResult.state.id, (short) updateResult.deliveryCount)); + + // If the maxDeliveryCount limit has been exceeded, the record will be transitioned to ARCHIVED state. + // This should not change the next fetch offset because the record is not available for acquisition + if (updateResult.state != RecordState.ARCHIVED) { + findNextFetchOffset.set(true); + } } } return Optional.empty(); @@ -1040,7 +901,8 @@ private Optional releaseAcquiredRecordsForPerOffsetBatch(String membe private Optional releaseAcquiredRecordsForCompleteBatch(String memberId, InFlightBatch inFlightBatch, RecordState recordState, - List persisterBatches) { + List updatedStates, + List stateBatches) { // Check if member id is the owner of the batch. if (!inFlightBatch.batchMemberId().equals(memberId) && !inFlightBatch.batchMemberId().equals(EMPTY_MEMBER_ID)) { @@ -1056,7 +918,7 @@ private Optional releaseAcquiredRecordsForCompleteBatch(String member if (inFlightBatch.batchState() == RecordState.ACQUIRED) { InFlightState updateResult = inFlightBatch.startBatchStateTransition( inFlightBatch.lastOffset() < startOffset ? RecordState.ARCHIVED : recordState, - DeliveryCountOps.DECREASE, + false, this.maxDeliveryCount, EMPTY_MEMBER_ID ); @@ -1066,10 +928,16 @@ private Optional releaseAcquiredRecordsForCompleteBatch(String member return Optional.of(new InvalidRecordStateException("Unable to release acquired records for the batch")); } - // Successfully updated the state of the batch and created a persister state batch for write to persister. - persisterBatches.add(new PersisterBatch(updateResult, new PersisterStateBatch(inFlightBatch.firstOffset(), - inFlightBatch.lastOffset(), updateResult.state().id(), (short) updateResult.deliveryCount()))); - // Do not update the next fetch offset as the batch has not completed the transition yet. + // Successfully updated the state of the batch. + updatedStates.add(updateResult); + stateBatches.add(new PersisterStateBatch(inFlightBatch.firstOffset(), inFlightBatch.lastOffset(), + updateResult.state.id, (short) updateResult.deliveryCount)); + + // If the maxDeliveryCount limit has been exceeded, the record will be transitioned to ARCHIVED state. + // This should not change the next fetch offset because the record is not available for acquisition + if (updateResult.state != RecordState.ARCHIVED) { + findNextFetchOffset.set(true); + } } return Optional.empty(); } @@ -1077,24 +945,10 @@ private Optional releaseAcquiredRecordsForCompleteBatch(String member /** * Updates the cached state, start and end offsets of the share partition as per the new log * start offset. The method is called when the log start offset is moved for the share partition. - *

          - * This method only archives the available records in the cached state that are before the new log - * start offset. It does not persist the archived state batches to the persister, rather it - * updates the cached state and offsets to reflect the new log start offset. The state in persister - * will be updated lazily during the acknowledge/release records API calls or acquisition lock timeout. - *

          - * The AVAILABLE state records can either have ongoing state transition or not. Hence, the archive - * records method will update the state of the records to ARCHIVED and set the terminal state flag - * hence if the transition is rolled back then the state will not be AVAILABLE again. However, - * the ACQUIRED state records will not be archived as they are still in-flight and acknowledge - * method also do not allow the state update for any offsets post the log start offset, hence those - * records will only be archived once acquisition lock timeout occurs. * * @param logStartOffset The new log start offset. */ void updateCacheAndOffsets(long logStartOffset) { - log.debug("Updating cached states for share partition: {}-{} with new log start offset: {}", - groupId, topicIdPartition, logStartOffset); lock.writeLock().lock(); try { if (logStartOffset <= startOffset) { @@ -1117,7 +971,7 @@ void updateCacheAndOffsets(long logStartOffset) { // If we have transitioned the state of any batch/offset from AVAILABLE to ARCHIVED, // then there is a chance that the next fetch offset can change. if (anyRecordArchived) { - updateFindNextFetchOffset(true); + findNextFetchOffset.set(true); } // The new startOffset will be the log start offset. @@ -1137,110 +991,37 @@ void updateCacheAndOffsets(long logStartOffset) { } } - /** - * The method archives the available records in the cached state that are between the fetch offset - * and the base offset of the first fetched batch. This method is required to handle the compacted - * topics where the already fetched batch which is marked re-available, might not result in subsequent - * fetch response from log. Hence, the batches need to be archived to allow the SPSO and next fetch - * offset to progress. - * - * @param fetchOffset The fetch offset. - * @param baseOffset The base offset of the first fetched batch. - */ - private void maybeArchiveStaleBatches(long fetchOffset, long baseOffset) { - lock.writeLock().lock(); - try { - // If the fetch happens from within a batch then fetchOffset can be ahead of base offset else - // should be same as baseOffset of the first fetched batch. Otherwise, we might need to archive - // some stale batches. - if (cachedState.isEmpty() || fetchOffset >= baseOffset) { - // No stale batches to archive. - return; - } - - // The fetch offset can exist in the middle of the batch. Hence, find the floor offset - // for the fetch offset and then find the sub-map from the floor offset to the base offset. - long floorOffset = fetchOffset; - Map.Entry floorEntry = cachedState.floorEntry(fetchOffset); - if (floorEntry != null && floorEntry.getValue().lastOffset() >= fetchOffset) { - floorOffset = floorEntry.getKey(); - } - - NavigableMap subMap = cachedState.subMap(floorOffset, true, baseOffset, false); - if (subMap.isEmpty()) { - // No stale batches to archive. - return; - } - - // Though such batches can be removed from the cache, but it is better to archive them so - // that they are never acquired again. - boolean anyRecordArchived = archiveRecords(fetchOffset, baseOffset, subMap, RecordState.AVAILABLE); - - // If we have transitioned the state of any batch/offset from AVAILABLE to ARCHIVED, - // then there is a chance that the next fetch offset can change. - if (anyRecordArchived) { - updateFindNextFetchOffset(true); - } - } finally { - lock.writeLock().unlock(); - } - } - - /** - * The method archives the available records in the cached state that are before the log start offset. - * - * @param logStartOffset The log start offset. - * @return A boolean which indicates whether any record is archived or not. - */ private boolean archiveAvailableRecordsOnLsoMovement(long logStartOffset) { - lock.writeLock().lock(); - try { - return archiveRecords(startOffset, logStartOffset, cachedState, RecordState.AVAILABLE); - } finally { - lock.writeLock().unlock(); - } - } - - /** - * The method archive the records in a given state in the map that are before the end offset. - * - * @param startOffset The offset from which the records should be archived. - * @param endOffset The offset before which the records should be archived. - * @param map The map containing the in-flight records. - * @param initialState The initial state of the records to be archived. - * @return A boolean which indicates whether any record is archived or not. - */ - private boolean archiveRecords(long startOffset, long endOffset, NavigableMap map, RecordState initialState) { lock.writeLock().lock(); try { boolean isAnyOffsetArchived = false, isAnyBatchArchived = false; - for (Map.Entry entry : map.entrySet()) { + for (Map.Entry entry : cachedState.entrySet()) { long batchStartOffset = entry.getKey(); - // We do not need to transition state of batches/offsets that are later than the offset. - if (batchStartOffset >= endOffset) { + // We do not need to transition state of batches/offsets that are later than the new log start offset. + if (batchStartOffset >= logStartOffset) { break; } InFlightBatch inFlightBatch = entry.getValue(); - boolean fullMatch = checkForFullMatch(inFlightBatch, startOffset, endOffset - 1); + boolean fullMatch = checkForFullMatch(inFlightBatch, startOffset, logStartOffset - 1); // Maintain state per offset if the inflight batch is not a full match or the offset state is managed. if (!fullMatch || inFlightBatch.offsetState() != null) { - log.debug("Subset or offset tracked batch record found while trying to update offsets " - + "and cached state map, batch: {}, offsets to update - first: {}, last: {} " - + "for the share partition: {}-{}", inFlightBatch, startOffset, endOffset - 1, - groupId, topicIdPartition); + log.debug("Subset or offset tracked batch record found while trying to update offsets and cached" + + " state map due to LSO movement, batch: {}, offsets to update - " + + "first: {}, last: {} for the share partition: {}-{}", inFlightBatch, startOffset, + logStartOffset - 1, groupId, topicIdPartition); if (inFlightBatch.offsetState() == null) { - if (inFlightBatch.batchState() != initialState) { + if (inFlightBatch.batchState() != RecordState.AVAILABLE) { continue; } inFlightBatch.maybeInitializeOffsetStateUpdate(); } - isAnyOffsetArchived = archivePerOffsetBatchRecords(inFlightBatch, startOffset, endOffset - 1, initialState) || isAnyOffsetArchived; + isAnyOffsetArchived = isAnyOffsetArchived || archivePerOffsetBatchRecords(inFlightBatch, startOffset, logStartOffset - 1); continue; } // The in-flight batch is a full match hence change the state of the complete batch. - isAnyBatchArchived = archiveCompleteBatch(inFlightBatch, initialState) || isAnyBatchArchived; + isAnyBatchArchived = isAnyBatchArchived || archiveCompleteBatch(inFlightBatch); } return isAnyOffsetArchived || isAnyBatchArchived; } finally { @@ -1250,9 +1031,7 @@ private boolean archiveRecords(long startOffset, long endOffset, NavigableMap throw new LeaderNotAvailableException( String.format("Share partition is already initializing %s-%s", groupId, topicIdPartition)); - case FENCED -> throw new LeaderNotAvailableException( + case FENCED -> throw new FencedStateEpochException( String.format("Share partition is fenced %s-%s", groupId, topicIdPartition)); case EMPTY -> // The share partition is not yet initialized. @@ -1461,83 +1176,11 @@ private boolean initializedOrThrowException() { }; } - // Method to reduce the window that tracks gaps in the cachedState - private void maybeUpdatePersisterGapWindowStartOffset(long offset) { - lock.writeLock().lock(); - try { - if (persisterReadResultGapWindow != null) { - // When last cached batch for persister's read gap window is acquired, then endOffset is - // same as the gapWindow's endOffset, but the gap offset to update in the method call - // is endOffset + 1. Hence, do not update the gap start offset if the request offset - // is ahead of the endOffset. - if (persisterReadResultGapWindow.endOffset() == endOffset && offset <= persisterReadResultGapWindow.endOffset()) { - persisterReadResultGapWindow.gapStartOffset(offset); - } else { - // The persister's read gap window is not valid anymore as the end offset has moved - // beyond the read gap window's endOffset. Hence, set the gap window to null. - persisterReadResultGapWindow = null; - } - } - } finally { - lock.writeLock().unlock(); - } - } - - /** - * The method calculates the last offset and maximum records to acquire. The adjustment is needed - * to ensure that the records acquired do not exceed the maximum in-flight records limit. - * - * @param fetchOffset The offset from which the records are fetched. - * @param maxFetchRecords The maximum number of records to acquire. - * @param lastOffset The last offset to acquire records to, which is the last offset of the fetched batch. - * @return LastOffsetAndMaxRecords object, containing the last offset to acquire and the maximum records to acquire. - */ - private LastOffsetAndMaxRecords lastOffsetAndMaxRecordsToAcquire(long fetchOffset, int maxFetchRecords, long lastOffset) { - // There can always be records fetched exceeding the max in-flight records limit. Hence, - // we need to check if the share partition has reached the max in-flight records limit - // and only acquire limited records. - int maxRecordsToAcquire; - long lastOffsetToAcquire = lastOffset; - lock.readLock().lock(); - try { - int inFlightRecordsCount = numInFlightRecords(); - // Take minimum of maxFetchRecords and remaining capacity to fill max in-flight records limit. - maxRecordsToAcquire = Math.min(maxFetchRecords, maxInFlightRecords - inFlightRecordsCount); - // If the maxRecordsToAcquire is less than or equal to 0, then ideally (check exists to not - // fetch records for share partitions which are at capacity) the fetch must be happening - // in-between the in-flight batches i.e. some in-flight records have been released (marked - // re-available). In such case, last offset to acquire should be adjusted to the endOffset - // of the share partition, if not adjusted then the records can be acquired post the endOffset. - // For example, if 30 records are already acquired i.e. [0-29] and single offset 20 is released - // then the next fetch request will be at 20. Difference from endOffset will be 10, which - // means that some offset past the endOffset can be acquired (21-29 are already acquired). - // Hence, the lastOffsetToAcquire should be adjusted to the endOffset. - if (maxRecordsToAcquire <= 0) { - if (fetchOffset <= endOffset()) { - // Adjust the max records to acquire to the capacity available to fill the max - // in-flight records limit. This can happen when the fetch is happening in-between - // the in-flight batches and the share partition has reached the max in-flight records limit. - maxRecordsToAcquire = Math.min(maxFetchRecords, (int) (endOffset() - fetchOffset + 1)); - // Adjust the last offset to acquire to the endOffset of the share partition. - lastOffsetToAcquire = endOffset(); - } else { - // The share partition is already at max in-flight records, hence cannot acquire more records. - log.debug("Share partition {}-{} has reached max in-flight records limit: {}. Cannot acquire more records, inflight records count: {}", - groupId, topicIdPartition, maxInFlightRecords, inFlightRecordsCount); - } - } - } finally { - lock.readLock().unlock(); - } - return new LastOffsetAndMaxRecords(lastOffsetToAcquire, maxRecordsToAcquire); - } - - private ShareAcquiredRecords acquireNewBatchRecords( + private AcquiredRecords acquireNewBatchRecords( String memberId, Iterable batches, long firstOffset, long lastOffset, - int batchSize, int maxFetchRecords ) { lock.writeLock().lock(); @@ -1551,112 +1194,35 @@ private ShareAcquiredRecords acquireNewBatchRecords( firstAcquiredOffset = endOffset; } - // Check how many records can be acquired from the batch. + // Check how many messages can be acquired from the batch. long lastAcquiredOffset = lastOffset; if (maxFetchRecords < lastAcquiredOffset - firstAcquiredOffset + 1) { - // The max records to acquire is less than the complete available batches hence + // The max messages to acquire is less than the complete available batches hence // limit the acquired records. The last offset shall be the batches last offset - // which falls under the max records limit. As the max fetch records is the soft - // limit, the last offset can be higher than the max records. + // which falls under the max messages limit. As the max fetch records is the soft + // limit, the last offset can be higher than the max messages. lastAcquiredOffset = lastOffsetFromBatchWithRequestOffset(batches, firstAcquiredOffset + maxFetchRecords - 1); - // If the initial read gap offset window is active then it's not guaranteed that the - // batches align on batch boundaries. Hence, reset to last offset itself if the batch's - // last offset is greater than the last offset for acquisition, else there could be - // a situation where the batch overlaps with the initial read gap offset window batch. - // For example, if the initial read gap offset window is 10-30 i.e. gapWindow's - // startOffset is 10 and endOffset is 30, and the first persister's read batch is 15-30. - // Say first fetched batch from log is 10-30 and maxFetchRecords is 1, then the lastOffset - // in this method call would be 14. As the maxFetchRecords is lesser than the batch, - // hence last batch offset for request offset is fetched. In this example it will - // be 30, hence check if the initial read gap offset window is active and the last acquired - // offset should be adjusted to 14 instead of 30. - if (isPersisterReadGapWindowActive() && lastAcquiredOffset > lastOffset) { - lastAcquiredOffset = lastOffset; - } } - // Create batches of acquired records. - List acquiredRecords = createBatches(memberId, batches, firstAcquiredOffset, lastAcquiredOffset, batchSize); + // Schedule acquisition lock timeout for the batch. + AcquisitionLockTimerTask timerTask = scheduleAcquisitionLockTimeout(memberId, firstAcquiredOffset, lastAcquiredOffset); + // Add the new batch to the in-flight records along with the acquisition lock timeout task for the batch. + cachedState.put(firstAcquiredOffset, new InFlightBatch( + memberId, + firstAcquiredOffset, + lastAcquiredOffset, + RecordState.ACQUIRED, + 1, + timerTask)); // if the cachedState was empty before acquiring the new batches then startOffset needs to be updated if (cachedState.firstKey() == firstAcquiredOffset) { startOffset = firstAcquiredOffset; } - - // If the new batch acquired is part of a gap in the cachedState, then endOffset should not be updated. - // Ex. if startOffset is 10, endOffset is 30, there is a gap from 10 to 20, and an inFlight batch from 21 to 30. - // In this case, the nextFetchOffset results in 10 and the records are fetched. A new batch is acquired from - // 10 to 20, but the endOffset remains at 30. - if (lastAcquiredOffset > endOffset) { - endOffset = lastAcquiredOffset; - } - maybeUpdatePersisterGapWindowStartOffset(lastAcquiredOffset + 1); - return new ShareAcquiredRecords(acquiredRecords, (int) (lastAcquiredOffset - firstAcquiredOffset + 1)); - } finally { - lock.writeLock().unlock(); - } - } - - private List createBatches( - String memberId, - Iterable batches, - long firstAcquiredOffset, - long lastAcquiredOffset, - int batchSize - ) { - lock.writeLock().lock(); - try { - List result = new ArrayList<>(); - long currentFirstOffset = firstAcquiredOffset; - // No split of batches is required if the batch size is greater than records which - // can be acquired, else split the batch into multiple batches. - if (lastAcquiredOffset - firstAcquiredOffset + 1 > batchSize) { - // The batch is split into multiple batches considering batch size. - // Note: Try reading only the baseOffset of the batch and avoid reading the lastOffset - // as lastOffset call of RecordBatch is expensive (loads headers). - for (RecordBatch batch : batches) { - long batchBaseOffset = batch.baseOffset(); - // Check if the batch is already past the last acquired offset then break. - if (batchBaseOffset > lastAcquiredOffset) { - // Break the loop and the last batch will be processed outside the loop. - break; - } - - // Create new batch once the batch size is reached. - if (batchBaseOffset - currentFirstOffset >= batchSize) { - result.add(new AcquiredRecords() - .setFirstOffset(currentFirstOffset) - .setLastOffset(batchBaseOffset - 1) - .setDeliveryCount((short) 1)); - currentFirstOffset = batchBaseOffset; - } - } - } - // Add the last batch or the only batch if the batch size is greater than the records which - // can be acquired. - result.add(new AcquiredRecords() - .setFirstOffset(currentFirstOffset) + endOffset = lastAcquiredOffset; + return new AcquiredRecords() + .setFirstOffset(firstAcquiredOffset) .setLastOffset(lastAcquiredOffset) - .setDeliveryCount((short) 1)); - - result.forEach(acquiredRecords -> { - // Schedule acquisition lock timeout for the batch. - AcquisitionLockTimerTask timerTask = scheduleAcquisitionLockTimeout(memberId, acquiredRecords.firstOffset(), acquiredRecords.lastOffset()); - // Add the new batch to the in-flight records along with the acquisition lock timeout task for the batch. - cachedState.put(acquiredRecords.firstOffset(), new InFlightBatch( - timer, - time, - memberId, - acquiredRecords.firstOffset(), - acquiredRecords.lastOffset(), - RecordState.ACQUIRED, - 1, - timerTask, - timeoutHandler, - sharePartitionMetrics)); - // Update the in-flight batch message count metrics for the share partition. - sharePartitionMetrics.recordInFlightBatchMessageCount(acquiredRecords.lastOffset() - acquiredRecords.firstOffset() + 1); - }); - return result; + .setDeliveryCount((short) 1); } finally { lock.writeLock().unlock(); } @@ -1672,7 +1238,7 @@ private int acquireSubsetBatchRecords( lock.writeLock().lock(); int acquiredCount = 0; try { - for (Map.Entry offsetState : inFlightBatch.offsetState().entrySet()) { + for (Map.Entry offsetState : inFlightBatch.offsetState.entrySet()) { // For the first batch which might have offsets prior to the request base // offset i.e. cached batch of 10-14 offsets and request batch of 12-13. if (offsetState.getKey() < requestFirstOffset) { @@ -1684,15 +1250,15 @@ private int acquireSubsetBatchRecords( break; } - if (offsetState.getValue().state() != RecordState.AVAILABLE || offsetState.getValue().hasOngoingStateTransition()) { + if (offsetState.getValue().state != RecordState.AVAILABLE || offsetState.getValue().hasOngoingStateTransition()) { log.trace("The offset {} is not available in share partition: {}-{}, skipping: {}", offsetState.getKey(), groupId, topicIdPartition, inFlightBatch); continue; } - InFlightState updateResult = offsetState.getValue().tryUpdateState(RecordState.ACQUIRED, DeliveryCountOps.INCREASE, - maxDeliveryCount, memberId); - if (updateResult == null || updateResult.state() != RecordState.ACQUIRED) { + InFlightState updateResult = offsetState.getValue().tryUpdateState(RecordState.ACQUIRED, true, maxDeliveryCount, + memberId); + if (updateResult == null) { log.trace("Unable to acquire records for the offset: {} in batch: {}" + " for the share partition: {}-{}", offsetState.getKey(), inFlightBatch, groupId, topicIdPartition); @@ -1707,7 +1273,7 @@ private int acquireSubsetBatchRecords( result.add(new AcquiredRecords() .setFirstOffset(offsetState.getKey()) .setLastOffset(offsetState.getKey()) - .setDeliveryCount((short) offsetState.getValue().deliveryCount())); + .setDeliveryCount((short) offsetState.getValue().deliveryCount)); acquiredCount++; } } finally { @@ -1757,12 +1323,17 @@ private Map fetchRecordStateMapForAcknowledgementBatch( } private static RecordState fetchRecordState(byte acknowledgeType) { - return switch (acknowledgeType) { - case 1 /* ACCEPT */ -> RecordState.ACKNOWLEDGED; - case 2 /* RELEASE */ -> RecordState.AVAILABLE; - case 3, 0 /* REJECT / GAP */ -> RecordState.ARCHIVED; - default -> throw new IllegalArgumentException("Invalid acknowledge type: " + acknowledgeType); - }; + switch (acknowledgeType) { + case 1 /* ACCEPT */: + return RecordState.ACKNOWLEDGED; + case 2 /* RELEASE */: + return RecordState.AVAILABLE; + case 3 /* REJECT */: + case 0 /* GAP */: + return RecordState.ARCHIVED; + default: + throw new IllegalArgumentException("Invalid acknowledge type: " + acknowledgeType); + } } private NavigableMap fetchSubMapForAcknowledgementBatch( @@ -1795,7 +1366,7 @@ private NavigableMap fetchSubMapForAcknowledgementBatch( NavigableMap subMap = cachedState.subMap(floorOffset.getKey(), true, batch.lastOffset(), true); // Validate if the request batch has the first offset greater than the last offset of the last // fetched cached batch, then there will be no offsets in the request that can be acknowledged. - if (subMap.lastEntry().getValue().lastOffset() < batch.firstOffset()) { + if (subMap.lastEntry().getValue().lastOffset < batch.firstOffset()) { log.debug("Request batch: {} has offsets which are not found for share partition: {}-{}", batch, groupId, topicIdPartition); throw new InvalidRequestException("Batch record not found. The first offset in request is past acquired records."); } @@ -1803,7 +1374,7 @@ private NavigableMap fetchSubMapForAcknowledgementBatch( // Validate if the request batch has the last offset greater than the last offset of // the last fetched cached batch, then there will be offsets in the request than cannot // be found in the fetched batches. - if (batch.lastOffset() > subMap.lastEntry().getValue().lastOffset()) { + if (batch.lastOffset() > subMap.lastEntry().getValue().lastOffset) { log.debug("Request batch: {} has offsets which are not found for share partition: {}-{}", batch, groupId, topicIdPartition); throw new InvalidRequestException("Batch record not found. The last offset in request is past acquired records."); } @@ -1819,7 +1390,8 @@ private Optional acknowledgeBatchRecords( ShareAcknowledgementBatch batch, Map recordStateMap, NavigableMap subMap, - List persisterBatches + final List updatedStates, + List stateBatches ) { Optional throwable; lock.writeLock().lock(); @@ -1843,12 +1415,6 @@ private Optional acknowledgeBatchRecords( if (throwable.isPresent()) { return throwable; } - - if (inFlightBatch.batchHasOngoingStateTransition()) { - log.debug("The batch has on-going transition, batch: {} for the share " - + "partition: {}-{}", inFlightBatch, groupId, topicIdPartition); - return Optional.of(new InvalidRecordStateException("The record state is invalid. The acknowledgement of delivery could not be completed.")); - } } // Determine if the in-flight batch is a full match from the request batch. @@ -1881,11 +1447,11 @@ private Optional acknowledgeBatchRecords( } throwable = acknowledgePerOffsetBatchRecords(memberId, batch, inFlightBatch, - recordStateMap, persisterBatches); + recordStateMap, updatedStates, stateBatches); } else { // The in-flight batch is a full match hence change the state of the complete batch. throwable = acknowledgeCompleteBatch(batch, inFlightBatch, - recordStateMap.get(batch.firstOffset()), persisterBatches); + recordStateMap.get(batch.firstOffset()), updatedStates, stateBatches); } if (throwable.isPresent()) { @@ -1922,14 +1488,15 @@ private Optional acknowledgePerOffsetBatchRecords( ShareAcknowledgementBatch batch, InFlightBatch inFlightBatch, Map recordStateMap, - List persisterBatches + List updatedStates, + List stateBatches ) { lock.writeLock().lock(); try { // Fetch the first record state from the map to be used as default record state in case the // offset record state is not provided by client. RecordState recordStateDefault = recordStateMap.get(batch.firstOffset()); - for (Map.Entry offsetState : inFlightBatch.offsetState().entrySet()) { + for (Map.Entry offsetState : inFlightBatch.offsetState.entrySet()) { // 1. For the first batch which might have offsets prior to the request base // offset i.e. cached batch of 10-14 offsets and request batch of 12-13. @@ -1943,24 +1510,16 @@ private Optional acknowledgePerOffsetBatchRecords( break; } - if (offsetState.getValue().state() != RecordState.ACQUIRED) { + if (offsetState.getValue().state != RecordState.ACQUIRED) { log.debug("The offset is not acquired, offset: {} batch: {} for the share" + " partition: {}-{}", offsetState.getKey(), inFlightBatch, groupId, topicIdPartition); return Optional.of(new InvalidRecordStateException( - "The offset cannot be acknowledged. The offset is not acquired.")); - } - - if (offsetState.getValue().hasOngoingStateTransition()) { - log.debug("The offset has on-going transition, offset: {} batch: {} for the share" - + " partition: {}-{}", offsetState.getKey(), inFlightBatch, groupId, - topicIdPartition); - return Optional.of(new InvalidRecordStateException( - "The record state is invalid. The acknowledgement of delivery could not be completed.")); + "The batch cannot be acknowledged. The offset is not acquired.")); } // Check if member id is the owner of the offset. - if (!offsetState.getValue().memberId().equals(memberId)) { + if (!offsetState.getValue().memberId.equals(memberId)) { log.debug("Member {} is not the owner of offset: {} in batch: {} for the share" + " partition: {}-{}", memberId, offsetState.getKey(), inFlightBatch, groupId, topicIdPartition); @@ -1975,7 +1534,7 @@ private Optional acknowledgePerOffsetBatchRecords( recordStateDefault; InFlightState updateResult = offsetState.getValue().startStateTransition( recordState, - DeliveryCountOps.NO_OP, + false, this.maxDeliveryCount, EMPTY_MEMBER_ID ); @@ -1986,10 +1545,16 @@ private Optional acknowledgePerOffsetBatchRecords( return Optional.of(new InvalidRecordStateException( "Unable to acknowledge records for the batch")); } - // Successfully updated the state of the offset and created a persister state batch for write to persister. - persisterBatches.add(new PersisterBatch(updateResult, new PersisterStateBatch(offsetState.getKey(), - offsetState.getKey(), updateResult.state().id(), (short) updateResult.deliveryCount()))); - // Do not update the nextFetchOffset as the offset has not completed the transition yet. + // Successfully updated the state of the offset. + updatedStates.add(updateResult); + stateBatches.add(new PersisterStateBatch(offsetState.getKey(), offsetState.getKey(), + updateResult.state.id, (short) updateResult.deliveryCount)); + // If the maxDeliveryCount limit has been exceeded, the record will be transitioned to ARCHIVED state. + // This should not change the next fetch offset because the record is not available for acquisition + if (recordState == RecordState.AVAILABLE + && updateResult.state != RecordState.ARCHIVED) { + findNextFetchOffset.set(true); + } } } finally { lock.writeLock().unlock(); @@ -2001,7 +1566,8 @@ private Optional acknowledgeCompleteBatch( ShareAcknowledgementBatch batch, InFlightBatch inFlightBatch, RecordState recordState, - List persisterBatches + List updatedStates, + List stateBatches ) { lock.writeLock().lock(); try { @@ -2021,7 +1587,7 @@ private Optional acknowledgeCompleteBatch( // is only important when the batch is acquired. InFlightState updateResult = inFlightBatch.startBatchStateTransition( recordState, - DeliveryCountOps.NO_OP, + false, this.maxDeliveryCount, EMPTY_MEMBER_ID ); @@ -2033,10 +1599,18 @@ private Optional acknowledgeCompleteBatch( new InvalidRecordStateException("Unable to acknowledge records for the batch")); } - // Successfully updated the state of the batch and created a persister state batch for write to persister. - persisterBatches.add(new PersisterBatch(updateResult, new PersisterStateBatch(inFlightBatch.firstOffset(), - inFlightBatch.lastOffset(), updateResult.state().id(), (short) updateResult.deliveryCount()))); - // Do not update the next fetch offset as the batch has not completed the transition yet. + // Successfully updated the state of the batch. + updatedStates.add(updateResult); + stateBatches.add( + new PersisterStateBatch(inFlightBatch.firstOffset, inFlightBatch.lastOffset, + updateResult.state.id, (short) updateResult.deliveryCount)); + + // If the maxDeliveryCount limit has been exceeded, the record will be transitioned to ARCHIVED state. + // This should not change the nextFetchOffset because the record is not available for acquisition + if (recordState == RecordState.AVAILABLE + && updateResult.state != RecordState.ARCHIVED) { + findNextFetchOffset.set(true); + } } finally { lock.writeLock().unlock(); } @@ -2077,7 +1651,8 @@ SharePartitionState partitionState() { void rollbackOrProcessStateUpdates( CompletableFuture future, Throwable throwable, - List persisterBatches + List updatedStates, + List stateBatches ) { lock.writeLock().lock(); try { @@ -2085,17 +1660,12 @@ void rollbackOrProcessStateUpdates( // Log in DEBUG to avoid flooding of logs for a faulty client. log.debug("Request failed for updating state, rollback any changed state" + " for the share partition: {}-{}", groupId, topicIdPartition); - persisterBatches.forEach(persisterBatch -> { - persisterBatch.updatedState.completeStateTransition(false); - if (persisterBatch.updatedState.state() == RecordState.AVAILABLE) { - updateFindNextFetchOffset(true); - } - }); + updatedStates.forEach(state -> state.completeStateTransition(false)); future.completeExceptionally(throwable); return; } - if (persisterBatches.isEmpty()) { + if (stateBatches.isEmpty() && updatedStates.isEmpty()) { future.complete(null); return; } @@ -2103,63 +1673,46 @@ void rollbackOrProcessStateUpdates( lock.writeLock().unlock(); } - writeShareGroupState(persisterBatches.stream().map(PersisterBatch::stateBatch).toList()) - .whenComplete((result, exception) -> { - // There can be a pending delayed share fetch requests for the share partition which are waiting - // on the startOffset to move ahead, hence track if the state is updated in the cache. If - // yes, then notify the delayed share fetch purgatory to complete the pending requests. - boolean cacheStateUpdated = false; - lock.writeLock().lock(); - try { - if (exception != null) { - log.debug("Failed to write state to persister for the share partition: {}-{}", - groupId, topicIdPartition, exception); - // In case of failure when transition state is rolled back then it should be rolled - // back to ACQUIRED state, unless acquisition lock for the state has expired. - persisterBatches.forEach(persisterBatch -> { - persisterBatch.updatedState.completeStateTransition(false); - if (persisterBatch.updatedState.state() == RecordState.AVAILABLE) { - updateFindNextFetchOffset(true); - } - }); - future.completeExceptionally(exception); - return; - } - - log.trace("State change request successful for share partition: {}-{}", - groupId, topicIdPartition); - persisterBatches.forEach(persisterBatch -> { - persisterBatch.updatedState.completeStateTransition(true); - if (persisterBatch.updatedState.state() == RecordState.AVAILABLE) { - updateFindNextFetchOffset(true); - } - }); - // Update the cached state and start and end offsets after acknowledging/releasing the acquired records. - cacheStateUpdated = maybeUpdateCachedStateAndOffsets(); - future.complete(null); - } finally { - lock.writeLock().unlock(); - // Maybe complete the delayed share fetch request if the state has been changed in cache - // which might have moved start offset ahead. Hence, the pending delayed share fetch - // request can be completed. The call should be made outside the lock to avoid deadlock. - maybeCompleteDelayedShareFetchRequest(cacheStateUpdated); + writeShareGroupState(stateBatches).whenComplete((result, exception) -> { + lock.writeLock().lock(); + try { + if (exception != null) { + log.error("Failed to write state to persister for the share partition: {}-{}", + groupId, topicIdPartition, exception); + updatedStates.forEach(state -> state.completeStateTransition(false)); + future.completeExceptionally(exception); + return; } - }); + + log.trace("State change request successful for share partition: {}-{}", + groupId, topicIdPartition); + updatedStates.forEach(state -> { + state.completeStateTransition(true); + // Cancel the acquisition lock timeout task for the state since it is acknowledged/released successfully. + state.cancelAndClearAcquisitionLockTimeoutTask(); + }); + // Update the cached state and start and end offsets after acknowledging/releasing the acquired records. + maybeUpdateCachedStateAndOffsets(); + future.complete(null); + } finally { + lock.writeLock().unlock(); + } + }); } - private boolean maybeUpdateCachedStateAndOffsets() { + private void maybeUpdateCachedStateAndOffsets() { lock.writeLock().lock(); try { if (!canMoveStartOffset()) { - return false; + return; } // This will help to find the next position for the startOffset. // The new position of startOffset will be lastOffsetAcknowledged + 1 long lastOffsetAcknowledged = findLastOffsetAcknowledged(); - // If lastOffsetAcknowledged is -1, this means we cannot move startOffset ahead + // If lastOffsetAcknowledged is -1, this means we cannot move out startOffset ahead if (lastOffsetAcknowledged == -1) { - return false; + return; } // This is true if all records in the cachedState have been acknowledged (either Accept or Reject). @@ -2170,7 +1723,7 @@ private boolean maybeUpdateCachedStateAndOffsets() { endOffset = lastCachedOffset + 1; cachedState.clear(); // Nothing further to do. - return true; + return; } /* @@ -2180,30 +1733,17 @@ private boolean maybeUpdateCachedStateAndOffsets() { a) Only full batches can be removed from the cachedState, For example if there is batch (0-99) and 0-49 records are acknowledged (ACCEPT or REJECT), the first 50 records will not be removed from the cachedState. Instead, the startOffset will be moved to 50, but the batch will only - be removed once all the records (0-99) are acknowledged (ACCEPT or REJECT). + be removed once all the messages (0-99) are acknowledged (ACCEPT or REJECT). */ // Since only a subMap will be removed, we need to find the first and last keys of that subMap long firstKeyToRemove = cachedState.firstKey(); long lastKeyToRemove; NavigableMap.Entry entry = cachedState.floorEntry(lastOffsetAcknowledged); - // If the lastOffsetAcknowledged is equal to the last offset of entry, then the entire batch can potentially be removed. if (lastOffsetAcknowledged == entry.getValue().lastOffset()) { startOffset = cachedState.higherKey(lastOffsetAcknowledged); - if (isPersisterReadGapWindowActive()) { - // This case will arise if we have a situation where there is an acquirable gap after the lastOffsetAcknowledged. - // Ex, the cachedState has following state batches -> {(0, 10), (11, 20), (31,40)} and all these batches are acked. - // There is a gap from 21 to 30. Let the gapWindow's gapStartOffset be 21. In this case, - // lastOffsetAcknowledged will be 20, but we cannot simply move the start offset to the first offset - // of next cachedState batch (next cachedState batch is 31 to 40). There is an acquirable gap in between (21 to 30) - // and The startOffset should be at 21. Hence, we set startOffset to the minimum of gapWindow.gapStartOffset - // and higher key of lastOffsetAcknowledged - startOffset = Math.min(persisterReadResultGapWindow.gapStartOffset(), startOffset); - } lastKeyToRemove = entry.getKey(); } else { - // The code will reach this point only if lastOffsetAcknowledged is in the middle of some stateBatch. In this case - // we can simply move the startOffset to the next offset of lastOffsetAcknowledged and should consider any read gap offsets. startOffset = lastOffsetAcknowledged + 1; if (entry.getKey().equals(cachedState.firstKey())) { // If the first batch in cachedState has some records yet to be acknowledged, @@ -2215,16 +1755,17 @@ be removed once all the records (0-99) are acknowledged (ACCEPT or REJECT). } if (lastKeyToRemove != -1) { - cachedState.subMap(firstKeyToRemove, true, lastKeyToRemove, true).clear(); + NavigableMap subMap = cachedState.subMap(firstKeyToRemove, true, lastKeyToRemove, true); + for (Long key : subMap.keySet()) { + cachedState.remove(key); + } } - return true; } finally { lock.writeLock().unlock(); } } - // Visible for testing. - boolean canMoveStartOffset() { + private boolean canMoveStartOffset() { // The Share Partition Start Offset may be moved after acknowledge request is complete. // The following conditions need to be met to move the startOffset: // 1. When the cachedState is not empty. @@ -2236,37 +1777,16 @@ boolean canMoveStartOffset() { NavigableMap.Entry entry = cachedState.floorEntry(startOffset); if (entry == null) { - // The start offset is not found in the cached state when there is a gap starting at the start offset. - // For example, if the start offset is 10 and the cached state has batches -> { (21, 30), (31, 40) }. - // This case arises only when the share partition is initialized and the read state response results in - // state batches containing gaps. This situation is possible in the case where in the previous instance - // of this share partition, the gap offsets were fetched but not acknowledged, and the next batch of offsets - // were fetched as well as acknowledged. In the above example, possibly in the previous instance of the share - // partition, the batch 10-20 was fetched but not acknowledged and the batch 21-30 was fetched and acknowledged. - // Thus, the persister has no clue about what happened with the batch 10-20. During the re-initialization of - // the share partition, the start offset is set to 10 and the cached state has the batch 21-30, resulting in a gap. - log.debug("The start offset: {} is not found in the cached state for share partition: {}-{} " + - "as there is an acquirable gap at the beginning. Cannot move the start offset.", startOffset, groupId, topicIdPartition); + log.error("The start offset: {} is not found in the cached state for share partition: {}-{}." + + " Cannot move the start offset.", startOffset, groupId, topicIdPartition); return false; } - boolean isBatchState = entry.getValue().offsetState() == null; - boolean isOngoingTransition = isBatchState ? - entry.getValue().batchHasOngoingStateTransition() : - entry.getValue().offsetState().get(startOffset).hasOngoingStateTransition(); - if (isOngoingTransition) { - return false; - } - - RecordState startOffsetState = isBatchState ? + RecordState startOffsetState = entry.getValue().offsetState == null ? entry.getValue().batchState() : entry.getValue().offsetState().get(startOffset).state(); return isRecordStateAcknowledged(startOffsetState); } - private boolean isPersisterReadGapWindowActive() { - return persisterReadResultGapWindow != null && persisterReadResultGapWindow.endOffset() == endOffset; - } - /** * The record state is considered acknowledged if it is either acknowledged or archived. * These are terminal states for the record. @@ -2279,26 +1799,20 @@ private boolean isRecordStateAcknowledged(RecordState recordState) { return recordState == RecordState.ACKNOWLEDGED || recordState == RecordState.ARCHIVED; } - // Visible for testing - long findLastOffsetAcknowledged() { - long lastOffsetAcknowledged = -1; + private long findLastOffsetAcknowledged() { lock.readLock().lock(); + long lastOffsetAcknowledged = -1; try { for (NavigableMap.Entry entry : cachedState.entrySet()) { InFlightBatch inFlightBatch = entry.getValue(); - - if (isPersisterReadGapWindowActive() && inFlightBatch.lastOffset() >= persisterReadResultGapWindow.gapStartOffset()) { - return lastOffsetAcknowledged; - } - if (inFlightBatch.offsetState() == null) { - if (inFlightBatch.batchHasOngoingStateTransition() || !isRecordStateAcknowledged(inFlightBatch.batchState())) { + if (!isRecordStateAcknowledged(inFlightBatch.batchState())) { return lastOffsetAcknowledged; } lastOffsetAcknowledged = inFlightBatch.lastOffset(); } else { - for (Map.Entry offsetState : inFlightBatch.offsetState().entrySet()) { - if (offsetState.getValue().hasOngoingStateTransition() || !isRecordStateAcknowledged(offsetState.getValue().state())) { + for (Map.Entry offsetState : inFlightBatch.offsetState.entrySet()) { + if (!isRecordStateAcknowledged(offsetState.getValue().state())) { return lastOffsetAcknowledged; } lastOffsetAcknowledged = offsetState.getKey(); @@ -2344,9 +1858,9 @@ CompletableFuture writeShareGroupState(List stateBatc persister.writeState(new WriteShareGroupStateParameters.Builder() .setGroupTopicPartitionData(new GroupTopicPartitionData.Builder() .setGroupId(this.groupId) - .setTopicsData(List.of(new TopicData<>(topicIdPartition.topicId(), - List.of(PartitionFactory.newPartitionStateBatchData( - topicIdPartition.partition(), stateEpoch, startOffset(), leaderEpoch, stateBatches)))) + .setTopicsData(Collections.singletonList(new TopicData<>(topicIdPartition.topicId(), + Collections.singletonList(PartitionFactory.newPartitionStateBatchData( + topicIdPartition.partition(), stateEpoch, startOffset, leaderEpoch, stateBatches)))) ).build()).build()) .whenComplete((result, exception) -> { if (exception != null) { @@ -2377,8 +1891,8 @@ CompletableFuture writeShareGroupState(List stateBatc PartitionErrorData partitionData = state.partitions().get(0); if (partitionData.errorCode() != Errors.NONE.code()) { KafkaException ex = fetchPersisterError(partitionData.errorCode(), partitionData.errorMessage()); - maybeLogError(String.format("Failed to write the share group state for share partition: %s-%s due to exception", - groupId, topicIdPartition), Errors.forCode(partitionData.errorCode()), ex); + log.error("Failed to write the share group state for share partition: {}-{} due to exception", + groupId, topicIdPartition, ex); future.completeExceptionally(ex); return; } @@ -2389,18 +1903,22 @@ CompletableFuture writeShareGroupState(List stateBatc private KafkaException fetchPersisterError(short errorCode, String errorMessage) { Errors error = Errors.forCode(errorCode); - return switch (error) { - case NOT_COORDINATOR, COORDINATOR_NOT_AVAILABLE, COORDINATOR_LOAD_IN_PROGRESS -> - new CoordinatorNotAvailableException(errorMessage); - case GROUP_ID_NOT_FOUND -> - new GroupIdNotFoundException(errorMessage); - case UNKNOWN_TOPIC_OR_PARTITION -> - new UnknownTopicOrPartitionException(errorMessage); - case FENCED_LEADER_EPOCH, FENCED_STATE_EPOCH -> - new NotLeaderOrFollowerException(errorMessage); - default -> - new UnknownServerException(errorMessage); - }; + switch (error) { + case NOT_COORDINATOR: + case COORDINATOR_NOT_AVAILABLE: + case COORDINATOR_LOAD_IN_PROGRESS: + return new CoordinatorNotAvailableException(errorMessage); + case GROUP_ID_NOT_FOUND: + return new GroupIdNotFoundException(errorMessage); + case UNKNOWN_TOPIC_OR_PARTITION: + return new UnknownTopicOrPartitionException(errorMessage); + case FENCED_STATE_EPOCH: + return new FencedStateEpochException(errorMessage); + case FENCED_LEADER_EPOCH: + return new NotLeaderOrFollowerException(errorMessage); + default: + return new UnknownServerException(errorMessage); + } } // Visible for testing @@ -2408,7 +1926,12 @@ AcquisitionLockTimerTask scheduleAcquisitionLockTimeout(String memberId, long fi // The recordLockDuration value would depend on whether the dynamic config SHARE_RECORD_LOCK_DURATION_MS in // GroupConfig.java is set or not. If dynamic config is set, then that is used, otherwise the value of // SHARE_GROUP_RECORD_LOCK_DURATION_MS_CONFIG defined in ShareGroupConfig is used - int recordLockDurationMs = recordLockDurationMsOrDefault(groupConfigManager, groupId, defaultRecordLockDurationMs); + int recordLockDurationMs; + if (groupConfigManager.groupConfig(groupId).isPresent()) { + recordLockDurationMs = groupConfigManager.groupConfig(groupId).get().shareRecordLockDurationMs(); + } else { + recordLockDurationMs = defaultRecordLockDurationMs; + } return scheduleAcquisitionLockTimeout(memberId, firstOffset, lastOffset, recordLockDurationMs); } @@ -2437,56 +1960,44 @@ private AcquisitionLockTimerTask acquisitionLockTimerTask( long lastOffset, long delayMs ) { - return new AcquisitionLockTimerTask(time, delayMs, memberId, firstOffset, lastOffset, releaseAcquisitionLockOnTimeout(), sharePartitionMetrics); + return new AcquisitionLockTimerTask(delayMs, memberId, firstOffset, lastOffset); } - private AcquisitionLockTimeoutHandler releaseAcquisitionLockOnTimeout() { - return (memberId, firstOffset, lastOffset, timerTask) -> { - List stateBatches; - lock.writeLock().lock(); - try { - // Check if timer task is already cancelled. This can happen when concurrent requests - // happen to acknowledge in-flight state and timeout handler is waiting for the lock - // but already cancelled. - if (timerTask.isCancelled()) { - log.debug("Timer task is already cancelled, not executing further."); - return; - } - - Map.Entry floorOffset = cachedState.floorEntry(firstOffset); - if (floorOffset == null) { - log.error("Base offset {} not found for share partition: {}-{}", firstOffset, groupId, topicIdPartition); - return; - } - stateBatches = new ArrayList<>(); - NavigableMap subMap = cachedState.subMap(floorOffset.getKey(), true, lastOffset, true); - for (Map.Entry entry : subMap.entrySet()) { - InFlightBatch inFlightBatch = entry.getValue(); + private void releaseAcquisitionLockOnTimeout(String memberId, long firstOffset, long lastOffset) { + List stateBatches; + lock.writeLock().lock(); + try { + Map.Entry floorOffset = cachedState.floorEntry(firstOffset); + if (floorOffset == null) { + log.error("Base offset {} not found for share partition: {}-{}", firstOffset, groupId, topicIdPartition); + return; + } + stateBatches = new ArrayList<>(); + NavigableMap subMap = cachedState.subMap(floorOffset.getKey(), true, lastOffset, true); + for (Map.Entry entry : subMap.entrySet()) { + InFlightBatch inFlightBatch = entry.getValue(); - if (inFlightBatch.offsetState() == null + if (inFlightBatch.offsetState() == null && inFlightBatch.batchState() == RecordState.ACQUIRED && checkForStartOffsetWithinBatch(inFlightBatch.firstOffset(), inFlightBatch.lastOffset())) { - // For the case when batch.firstOffset < start offset <= batch.lastOffset, we will be having some - // acquired records that need to move to archived state despite their delivery count. - inFlightBatch.maybeInitializeOffsetStateUpdate(); - } + // For the case when batch.firstOffset < start offset <= batch.lastOffset, we will be having some + // acquired records that need to move to archived state despite their delivery count. + inFlightBatch.maybeInitializeOffsetStateUpdate(); + } - // Case when the state of complete batch is valid - if (inFlightBatch.offsetState() == null) { - releaseAcquisitionLockOnTimeoutForCompleteBatch(inFlightBatch, stateBatches, memberId); - } else { // Case when batch has a valid offset state map. - releaseAcquisitionLockOnTimeoutForPerOffsetBatch(inFlightBatch, stateBatches, memberId, firstOffset, lastOffset); - } + // Case when the state of complete batch is valid + if (inFlightBatch.offsetState() == null) { + releaseAcquisitionLockOnTimeoutForCompleteBatch(inFlightBatch, stateBatches, memberId); + } else { // Case when batch has a valid offset state map. + releaseAcquisitionLockOnTimeoutForPerOffsetBatch(inFlightBatch, stateBatches, memberId, firstOffset, lastOffset); } - } finally { - lock.writeLock().unlock(); } if (!stateBatches.isEmpty()) { writeShareGroupState(stateBatches).whenComplete((result, exception) -> { if (exception != null) { - log.debug("Failed to write the share group state on acquisition lock timeout for share partition: {}-{} memberId: {}", + log.error("Failed to write the share group state on acquisition lock timeout for share partition: {}-{} memberId: {}", groupId, topicIdPartition, memberId, exception); } // Even if write share group state RPC call fails, we will still go ahead with the state transition. @@ -2494,12 +2005,17 @@ && checkForStartOffsetWithinBatch(inFlightBatch.firstOffset(), inFlightBatch.las maybeUpdateCachedStateAndOffsets(); }); } + } finally { + lock.writeLock().unlock(); + } + // Skip null check for stateBatches, it should always be initialized if reached here. + if (!stateBatches.isEmpty()) { // If we have an acquisition lock timeout for a share-partition, then we should check if // there is a pending share fetch request for the share-partition and complete it. - // Skip null check for stateBatches, it should always be initialized if reached here. - maybeCompleteDelayedShareFetchRequest(!stateBatches.isEmpty()); - }; + DelayedShareFetchKey delayedShareFetchKey = new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()); + replicaManager.completeDelayedShareFetchRequest(delayedShareFetchKey); + } } private void releaseAcquisitionLockOnTimeoutForCompleteBatch(InFlightBatch inFlightBatch, @@ -2508,7 +2024,7 @@ private void releaseAcquisitionLockOnTimeoutForCompleteBatch(InFlightBatch inFli if (inFlightBatch.batchState() == RecordState.ACQUIRED) { InFlightState updateResult = inFlightBatch.tryUpdateBatchState( inFlightBatch.lastOffset() < startOffset ? RecordState.ARCHIVED : RecordState.AVAILABLE, - DeliveryCountOps.NO_OP, + false, maxDeliveryCount, EMPTY_MEMBER_ID); if (updateResult == null) { @@ -2517,12 +2033,12 @@ private void releaseAcquisitionLockOnTimeoutForCompleteBatch(InFlightBatch inFli return; } stateBatches.add(new PersisterStateBatch(inFlightBatch.firstOffset(), inFlightBatch.lastOffset(), - updateResult.state().id(), (short) updateResult.deliveryCount())); + updateResult.state.id, (short) updateResult.deliveryCount)); // Cancel the acquisition lock timeout task for the batch since it is completed now. updateResult.cancelAndClearAcquisitionLockTimeoutTask(); - if (updateResult.state() != RecordState.ARCHIVED) { - updateFindNextFetchOffset(true); + if (updateResult.state != RecordState.ARCHIVED) { + findNextFetchOffset.set(true); } return; } @@ -2546,7 +2062,7 @@ private void releaseAcquisitionLockOnTimeoutForPerOffsetBatch(InFlightBatch inFl // No further offsets to process. break; } - if (offsetState.getValue().state() != RecordState.ACQUIRED) { + if (offsetState.getValue().state != RecordState.ACQUIRED) { log.debug("The offset is not in acquired state while release of acquisition lock on timeout, skipping, offset: {} batch: {}" + " for the share partition: {}-{} memberId: {}", offsetState.getKey(), inFlightBatch, groupId, topicIdPartition, memberId); @@ -2554,7 +2070,7 @@ private void releaseAcquisitionLockOnTimeoutForPerOffsetBatch(InFlightBatch inFl } InFlightState updateResult = offsetState.getValue().tryUpdateState( offsetState.getKey() < startOffset ? RecordState.ARCHIVED : RecordState.AVAILABLE, - DeliveryCountOps.NO_OP, + false, maxDeliveryCount, EMPTY_MEMBER_ID); if (updateResult == null) { @@ -2564,23 +2080,17 @@ private void releaseAcquisitionLockOnTimeoutForPerOffsetBatch(InFlightBatch inFl continue; } stateBatches.add(new PersisterStateBatch(offsetState.getKey(), offsetState.getKey(), - updateResult.state().id(), (short) updateResult.deliveryCount())); + updateResult.state.id, (short) updateResult.deliveryCount)); // Cancel the acquisition lock timeout task for the offset since it is completed now. updateResult.cancelAndClearAcquisitionLockTimeoutTask(); - if (updateResult.state() != RecordState.ARCHIVED) { - updateFindNextFetchOffset(true); + if (updateResult.state != RecordState.ARCHIVED) { + findNextFetchOffset.set(true); } } } - private void maybeCompleteDelayedShareFetchRequest(boolean shouldComplete) { - if (shouldComplete) { - replicaManager.completeDelayedShareFetchRequest(delayedShareFetchKey); - } - } - - private long startOffsetDuringInitialization(long partitionDataStartOffset) { + private long startOffsetDuringInitialization(long partitionDataStartOffset) throws Exception { // Set the state epoch and end offset from the persisted state. if (partitionDataStartOffset != PartitionFactory.UNINITIALIZED_START_OFFSET) { return partitionDataStartOffset; @@ -2602,200 +2112,6 @@ private long startOffsetDuringInitialization(long partitionDataStartOffset) { } } - private ShareAcquiredRecords maybeFilterAbortedTransactionalAcquiredRecords( - FetchPartitionData fetchPartitionData, - FetchIsolation isolationLevel, - ShareAcquiredRecords shareAcquiredRecords - ) { - if (isolationLevel != FetchIsolation.TXN_COMMITTED || fetchPartitionData.abortedTransactions.isEmpty() || fetchPartitionData.abortedTransactions.get().isEmpty()) - return shareAcquiredRecords; - - // When FetchIsolation.TXN_COMMITTED is used as isolation level by the share group, we need to filter any - // transactions that were aborted/did not commit due to timeout. - List result = filterAbortedTransactionalAcquiredRecords(fetchPartitionData.records.batches(), - shareAcquiredRecords.acquiredRecords(), fetchPartitionData.abortedTransactions.get()); - int acquiredCount = 0; - for (AcquiredRecords records : result) { - acquiredCount += (int) (records.lastOffset() - records.firstOffset() + 1); - } - return new ShareAcquiredRecords(result, acquiredCount); - } - - private List filterAbortedTransactionalAcquiredRecords( - Iterable batches, - List acquiredRecords, - List abortedTransactions - ) { - // The record batches that need to be archived in cachedState because they were a part of aborted transactions. - List recordsToArchive = fetchAbortedTransactionRecordBatches(batches, abortedTransactions); - for (RecordBatch recordBatch : recordsToArchive) { - // Archive the offsets/batches in the cached state. - NavigableMap subMap = fetchSubMap(recordBatch); - archiveRecords(recordBatch.baseOffset(), recordBatch.lastOffset() + 1, subMap, RecordState.ACQUIRED); - } - return filterRecordBatchesFromAcquiredRecords(acquiredRecords, recordsToArchive); - } - - private void maybeLogError(String message, Errors receivedError, Throwable wrappedException) { - if (receivedError == Errors.NETWORK_EXCEPTION) { - log.debug(message, wrappedException); - } else { - log.error(message, wrappedException); - } - } - - /** - * This function filters out the offsets present in the acquired records list that are also a part of batches that need to be archived. - * It follows an iterative refinement of acquired records to eliminate batches to be archived. - * @param acquiredRecordsList The list containing acquired records. This list is sorted by the firstOffset of the acquired batch. - * @param batchesToArchive The list containing record batches to archive. This list is sorted by the baseOffset of the record batch. - * @return The list containing filtered acquired records offsets. - */ - List filterRecordBatchesFromAcquiredRecords( - List acquiredRecordsList, - List batchesToArchive - ) { - Iterator batchesToArchiveIterator = batchesToArchive.iterator(); - if (!batchesToArchiveIterator.hasNext()) - return acquiredRecordsList; - List result = new ArrayList<>(); - Iterator acquiredRecordsListIter = acquiredRecordsList.iterator(); - RecordBatch batchToArchive = batchesToArchiveIterator.next(); - AcquiredRecords unresolvedAcquiredRecords = null; - - while (unresolvedAcquiredRecords != null || acquiredRecordsListIter.hasNext()) { - if (unresolvedAcquiredRecords == null) - unresolvedAcquiredRecords = acquiredRecordsListIter.next(); - - long unresolvedFirstOffset = unresolvedAcquiredRecords.firstOffset(); - long unresolvedLastOffset = unresolvedAcquiredRecords.lastOffset(); - short unresolvedDeliveryCount = unresolvedAcquiredRecords.deliveryCount(); - - if (batchToArchive == null) { - result.add(unresolvedAcquiredRecords); - unresolvedAcquiredRecords = null; - continue; - } - - // Non-overlap check - unresolvedFirstOffset offsets lie before the batchToArchive offsets. No need to filter out the offsets in such a scenario. - if (unresolvedLastOffset < batchToArchive.baseOffset()) { - // Offsets in unresolvedAcquiredRecords do not overlap with batchToArchive, hence it should not get filtered out. - result.add(unresolvedAcquiredRecords); - unresolvedAcquiredRecords = null; - } - - // Overlap check - unresolvedFirstOffset offsets overlap with the batchToArchive offsets. We need to filter out the overlapping - // offsets in such a scenario. - if (unresolvedFirstOffset <= batchToArchive.lastOffset() && - unresolvedLastOffset >= batchToArchive.baseOffset()) { - unresolvedAcquiredRecords = null; - // Split the unresolvedFirstOffset into parts - before and after the overlapping record batchToArchive. - if (unresolvedFirstOffset < batchToArchive.baseOffset()) { - // The offsets in unresolvedAcquiredRecords that are present before batchToArchive's baseOffset should not get filtered out. - result.add(new AcquiredRecords() - .setFirstOffset(unresolvedFirstOffset) - .setLastOffset(batchToArchive.baseOffset() - 1) - .setDeliveryCount(unresolvedDeliveryCount)); - } - if (unresolvedLastOffset > batchToArchive.lastOffset()) { - // The offsets in unresolvedAcquiredRecords that are present after batchToArchive's lastOffset should not get filtered out - // and should be taken forward for further processing since they could potentially contain offsets that need to be archived. - unresolvedAcquiredRecords = new AcquiredRecords() - .setFirstOffset(batchToArchive.lastOffset() + 1) - .setLastOffset(unresolvedLastOffset) - .setDeliveryCount(unresolvedDeliveryCount); - } - } - - // There is at least one offset in unresolvedFirstOffset which lies after the batchToArchive. Hence, we move forward - // the batchToArchive to the next element in batchesToArchiveIterator. - if (unresolvedLastOffset > batchToArchive.lastOffset()) { - if (batchesToArchiveIterator.hasNext()) - batchToArchive = batchesToArchiveIterator.next(); - else - batchToArchive = null; - } - } - return result; - } - - /** - * This function fetches the sub map from cachedState where all the offset details present in the recordBatch can be referred to - * OR it gives an exception if those offsets are not present in cachedState. - * @param recordBatch The record batch for which we want to find the sub map. - * @return the sub map containing all the offset details. - */ - private NavigableMap fetchSubMap(RecordBatch recordBatch) { - lock.readLock().lock(); - try { - Map.Entry floorEntry = cachedState.floorEntry(recordBatch.baseOffset()); - if (floorEntry == null) { - log.debug("Fetched batch record {} not found for share partition: {}-{}", recordBatch, groupId, - topicIdPartition); - throw new IllegalStateException( - "Batch record not found. The request batch offsets are not found in the cache."); - } - return cachedState.subMap(floorEntry.getKey(), true, recordBatch.lastOffset(), true); - } finally { - lock.readLock().unlock(); - } - } - - // Visible for testing. - List fetchAbortedTransactionRecordBatches( - Iterable batches, - List abortedTransactions - ) { - PriorityQueue orderedAbortedTransactions = orderedAbortedTransactions(abortedTransactions); - Set abortedProducerIds = new HashSet<>(); - List recordsToArchive = new ArrayList<>(); - - for (RecordBatch currentBatch : batches) { - if (currentBatch.hasProducerId()) { - // remove from the aborted transactions queue, all aborted transactions which have begun before the - // current batch's last offset and add the associated producerIds to the aborted producer set. - while (!orderedAbortedTransactions.isEmpty() && orderedAbortedTransactions.peek().firstOffset() <= currentBatch.lastOffset()) { - FetchResponseData.AbortedTransaction abortedTransaction = orderedAbortedTransactions.poll(); - abortedProducerIds.add(abortedTransaction.producerId()); - } - long producerId = currentBatch.producerId(); - if (containsAbortMarker(currentBatch)) { - abortedProducerIds.remove(producerId); - } else if (isBatchAborted(currentBatch, abortedProducerIds)) { - log.debug("Skipping aborted record batch for share partition: {}-{} with producerId {} and " + - "offsets {} to {}", groupId, topicIdPartition, producerId, currentBatch.baseOffset(), currentBatch.lastOffset()); - recordsToArchive.add(currentBatch); - } - } - } - return recordsToArchive; - } - - private PriorityQueue orderedAbortedTransactions(List abortedTransactions) { - PriorityQueue orderedAbortedTransactions = new PriorityQueue<>( - abortedTransactions.size(), Comparator.comparingLong(FetchResponseData.AbortedTransaction::firstOffset) - ); - orderedAbortedTransactions.addAll(abortedTransactions); - return orderedAbortedTransactions; - } - - private boolean isBatchAborted(RecordBatch batch, Set abortedProducerIds) { - return batch.isTransactional() && abortedProducerIds.contains(batch.producerId()); - } - - // Visible for testing. - boolean containsAbortMarker(RecordBatch batch) { - if (!batch.isControlBatch()) - return false; - - Iterator batchIterator = batch.iterator(); - if (!batchIterator.hasNext()) - return false; - - Record firstRecord = batchIterator.next(); - return ControlRecordType.ABORT == ControlRecordType.parse(firstRecord.key()); - } - // Visible for testing. Should only be used for testing purposes. NavigableMap cachedState() { return new ConcurrentSkipListMap<>(cachedState); @@ -2803,22 +2119,12 @@ NavigableMap cachedState() { // Visible for testing. boolean findNextFetchOffset() { - lock.readLock().lock(); - try { - return findNextFetchOffset; - } finally { - lock.readLock().unlock(); - } + return findNextFetchOffset.get(); } - // Visible for testing. - void updateFindNextFetchOffset(boolean value) { - lock.writeLock().lock(); - try { - findNextFetchOffset = value; - } finally { - lock.writeLock().unlock(); - } + // Visible for testing. Should only be used for testing purposes. + void findNextFetchOffset(boolean findNextOffset) { + findNextFetchOffset.getAndSet(findNextOffset); } // Visible for testing @@ -2852,41 +2158,312 @@ Timer timer() { } // Visible for testing - GapWindow persisterReadResultGapWindow() { - return persisterReadResultGapWindow; + final class AcquisitionLockTimerTask extends TimerTask { + private final long expirationMs; + private final String memberId; + private final long firstOffset; + private final long lastOffset; + + AcquisitionLockTimerTask(long delayMs, String memberId, long firstOffset, long lastOffset) { + super(delayMs); + this.expirationMs = time.hiResClockMs() + delayMs; + this.memberId = memberId; + this.firstOffset = firstOffset; + this.lastOffset = lastOffset; + } + + long expirationMs() { + return expirationMs; + } + + /** + * The task is executed when the acquisition lock timeout is reached. The task releases the acquired records. + */ + @Override + public void run() { + releaseAcquisitionLockOnTimeout(memberId, firstOffset, lastOffset); + } } - // Visible for testing. - Uuid fetchLock() { - return fetchLock.get(); + /** + * The InFlightBatch maintains the in-memory state of the fetched records i.e. in-flight records. + */ + final class InFlightBatch { + // The offset of the first record in the batch that is fetched from the log. + private final long firstOffset; + // The last offset of the batch that is fetched from the log. + private final long lastOffset; + + // The batch state of the fetched records. If the offset state map is empty then batchState + // determines the state of the complete batch else individual offset determines the state of + // the respective records. + private InFlightState batchState; + + // The offset state map is used to track the state of the records per offset. However, the + // offset state map is only required when the state of the offsets within same batch are + // different. The states can be different when explicit offset acknowledgment is done which + // is different from the batch state. + private NavigableMap offsetState; + + InFlightBatch(String memberId, long firstOffset, long lastOffset, RecordState state, + int deliveryCount, AcquisitionLockTimerTask acquisitionLockTimeoutTask + ) { + this.firstOffset = firstOffset; + this.lastOffset = lastOffset; + this.batchState = new InFlightState(state, deliveryCount, memberId, acquisitionLockTimeoutTask); + } + + // Visible for testing. + long firstOffset() { + return firstOffset; + } + + // Visible for testing. + long lastOffset() { + return lastOffset; + } + + // Visible for testing. + RecordState batchState() { + return inFlightState().state; + } + + // Visible for testing. + String batchMemberId() { + if (batchState == null) { + throw new IllegalStateException("The batch member id is not available as the offset state is maintained"); + } + return batchState.memberId; + } + + // Visible for testing. + int batchDeliveryCount() { + if (batchState == null) { + throw new IllegalStateException("The batch delivery count is not available as the offset state is maintained"); + } + return batchState.deliveryCount; + } + + // Visible for testing. + AcquisitionLockTimerTask batchAcquisitionLockTimeoutTask() { + return inFlightState().acquisitionLockTimeoutTask; + } + + // Visible for testing. + NavigableMap offsetState() { + return offsetState; + } + + private InFlightState inFlightState() { + if (batchState == null) { + throw new IllegalStateException("The batch state is not available as the offset state is maintained"); + } + return batchState; + } + + private boolean batchHasOngoingStateTransition() { + return inFlightState().hasOngoingStateTransition(); + } + + private void archiveBatch(String newMemberId) { + inFlightState().archive(newMemberId); + } + + private InFlightState tryUpdateBatchState(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, String newMemberId) { + if (batchState == null) { + throw new IllegalStateException("The batch state update is not available as the offset state is maintained"); + } + return batchState.tryUpdateState(newState, incrementDeliveryCount, maxDeliveryCount, newMemberId); + } + + private InFlightState startBatchStateTransition(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, + String newMemberId) { + if (batchState == null) { + throw new IllegalStateException("The batch state update is not available as the offset state is maintained"); + } + return batchState.startStateTransition(newState, incrementDeliveryCount, maxDeliveryCount, newMemberId); + } + + private void maybeInitializeOffsetStateUpdate() { + if (offsetState == null) { + offsetState = new ConcurrentSkipListMap<>(); + // The offset state map is not initialized hence initialize the state of the offsets + // from the first offset to the last offset. Mark the batch inflightState to null as + // the state of the records is maintained in the offset state map now. + for (long offset = this.firstOffset; offset <= this.lastOffset; offset++) { + if (batchState.acquisitionLockTimeoutTask != null) { + // The acquisition lock timeout task is already scheduled for the batch, hence we need to schedule + // the acquisition lock timeout task for the offset as well. + long delayMs = batchState.acquisitionLockTimeoutTask.expirationMs() - time.hiResClockMs(); + AcquisitionLockTimerTask timerTask = acquisitionLockTimerTask(batchState.memberId, offset, offset, delayMs); + offsetState.put(offset, new InFlightState(batchState.state, batchState.deliveryCount, batchState.memberId, timerTask)); + timer.add(timerTask); + } else { + offsetState.put(offset, new InFlightState(batchState.state, batchState.deliveryCount, batchState.memberId)); + } + } + // Cancel the acquisition lock timeout task for the batch as the offset state is maintained. + if (batchState.acquisitionLockTimeoutTask != null) { + batchState.cancelAndClearAcquisitionLockTimeoutTask(); + } + batchState = null; + } + } + + private void updateAcquisitionLockTimeout(AcquisitionLockTimerTask acquisitionLockTimeoutTask) { + inFlightState().acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; + } + + @Override + public String toString() { + return "InFlightBatch(" + + "firstOffset=" + firstOffset + + ", lastOffset=" + lastOffset + + ", inFlightState=" + batchState + + ", offsetState=" + ((offsetState == null) ? "null" : offsetState) + + ")"; + } } /** - * The GapWindow class is used to record the gap start and end offset of the probable gaps - * of available records which are neither known to Persister nor to SharePartition. Share Partition - * will use this information to determine the next fetch offset and should try to fetch the records - * in the gap. + * The InFlightState is used to track the state and delivery count of a record that has been + * fetched from the leader. The state of the record is used to determine if the record should + * be re-deliver or if it can be acknowledged or archived. */ - // Visible for Testing - static class GapWindow { - private final long endOffset; - private long gapStartOffset; + static final class InFlightState { + + // The state of the fetch batch records. + private RecordState state; + // The number of times the records has been delivered to the client. + private int deliveryCount; + // The member id of the client that is fetching/acknowledging the record. + private String memberId; + // The state of the records before the transition. In case we need to revert an in-flight state, we revert the above + // attributes of InFlightState to this state, namely - state, deliveryCount and memberId. + private InFlightState rollbackState; + // The timer task for the acquisition lock timeout. + private AcquisitionLockTimerTask acquisitionLockTimeoutTask; - GapWindow(long endOffset, long gapStartOffset) { - this.endOffset = endOffset; - this.gapStartOffset = gapStartOffset; + + InFlightState(RecordState state, int deliveryCount, String memberId) { + this(state, deliveryCount, memberId, null); + } + + InFlightState(RecordState state, int deliveryCount, String memberId, AcquisitionLockTimerTask acquisitionLockTimeoutTask) { + this.state = state; + this.deliveryCount = deliveryCount; + this.memberId = memberId; + this.acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; + } + + // Visible for testing. + RecordState state() { + return state; + } + + String memberId() { + return memberId; } - long endOffset() { - return endOffset; + // Visible for testing. + TimerTask acquisitionLockTimeoutTask() { + return acquisitionLockTimeoutTask; } - long gapStartOffset() { - return gapStartOffset; + void updateAcquisitionLockTimeoutTask(AcquisitionLockTimerTask acquisitionLockTimeoutTask) throws IllegalArgumentException { + if (this.acquisitionLockTimeoutTask != null) { + throw new IllegalArgumentException("Existing acquisition lock timeout exists, cannot override."); + } + this.acquisitionLockTimeoutTask = acquisitionLockTimeoutTask; } - void gapStartOffset(long gapStartOffset) { - this.gapStartOffset = gapStartOffset; + void cancelAndClearAcquisitionLockTimeoutTask() { + acquisitionLockTimeoutTask.cancel(); + acquisitionLockTimeoutTask = null; + } + + private boolean hasOngoingStateTransition() { + if (rollbackState == null) { + // This case could occur when the batch/offset hasn't transitioned even once or the state transitions have + // been committed. + return false; + } + return rollbackState.state != null; + } + + /** + * Try to update the state of the records. The state of the records can only be updated if the + * new state is allowed to be transitioned from old state. The delivery count is not incremented + * if the state update is unsuccessful. + * + * @param newState The new state of the records. + * @param incrementDeliveryCount Whether to increment the delivery count. + * + * @return {@code InFlightState} if update succeeds, null otherwise. Returning state + * helps update chaining. + */ + private InFlightState tryUpdateState(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, String newMemberId) { + try { + if (newState == RecordState.AVAILABLE && deliveryCount >= maxDeliveryCount) { + newState = RecordState.ARCHIVED; + } + state = state.validateTransition(newState); + if (incrementDeliveryCount && newState != RecordState.ARCHIVED) { + deliveryCount++; + } + memberId = newMemberId; + return this; + } catch (IllegalStateException e) { + log.error("Failed to update state of the records", e); + return null; + } + } + + private void archive(String newMemberId) { + state = RecordState.ARCHIVED; + memberId = newMemberId; + } + + private InFlightState startStateTransition(RecordState newState, boolean incrementDeliveryCount, int maxDeliveryCount, String newMemberId) { + rollbackState = new InFlightState(state, deliveryCount, memberId, acquisitionLockTimeoutTask); + return tryUpdateState(newState, incrementDeliveryCount, maxDeliveryCount, newMemberId); + } + + private void completeStateTransition(boolean commit) { + if (commit) { + rollbackState = null; + return; + } + state = rollbackState.state; + deliveryCount = rollbackState.deliveryCount; + memberId = rollbackState.memberId; + rollbackState = null; + } + + @Override + public int hashCode() { + return Objects.hash(state, deliveryCount, memberId); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + InFlightState that = (InFlightState) o; + return state == that.state && deliveryCount == that.deliveryCount && memberId.equals(that.memberId); + } + + @Override + public String toString() { + return "InFlightState(" + + "state=" + state.toString() + + ", deliveryCount=" + deliveryCount + + ", memberId=" + memberId + + ")"; } } @@ -2915,22 +2492,4 @@ void updateOffsetMetadata(long offset, LogOffsetMetadata offsetMetadata) { this.offsetMetadata = offsetMetadata; } } - - /** - * PersisterBatch class is used to record the state updates for a batch or an offset. - * It contains the updated in-flight state and the persister state batch to be sent to persister. - */ - private record PersisterBatch( - InFlightState updatedState, - PersisterStateBatch stateBatch - ) { } - - /** - * LastOffsetAndMaxRecords class is used to track the last offset to acquire and the maximum number - * of records that can be acquired in a fetch request. - */ - private record LastOffsetAndMaxRecords( - long lastOffset, - int maxRecords - ) { } } diff --git a/core/src/main/java/kafka/server/share/SharePartitionManager.java b/core/src/main/java/kafka/server/share/SharePartitionManager.java index 3c9c727c53a64..93978745f4a6c 100644 --- a/core/src/main/java/kafka/server/share/SharePartitionManager.java +++ b/core/src/main/java/kafka/server/share/SharePartitionManager.java @@ -19,6 +19,7 @@ import kafka.cluster.PartitionListener; import kafka.server.ReplicaManager; +import org.apache.kafka.clients.consumer.AcknowledgeType; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; @@ -29,14 +30,18 @@ import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.metrics.Sensor; +import org.apache.kafka.common.metrics.stats.Avg; +import org.apache.kafka.common.metrics.stats.Max; +import org.apache.kafka.common.metrics.stats.Meter; import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; import org.apache.kafka.common.utils.Time; import org.apache.kafka.coordinator.group.GroupConfigManager; -import org.apache.kafka.server.common.ShareVersion; import org.apache.kafka.server.share.CachedSharePartition; -import org.apache.kafka.server.share.ShareGroupListener; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch; import org.apache.kafka.server.share.context.FinalContext; @@ -45,10 +50,7 @@ import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey; -import org.apache.kafka.server.share.fetch.PartitionRotateStrategy; -import org.apache.kafka.server.share.fetch.PartitionRotateStrategy.PartitionRotateMetadata; import org.apache.kafka.server.share.fetch.ShareFetch; -import org.apache.kafka.server.share.metrics.ShareGroupMetrics; import org.apache.kafka.server.share.persister.Persister; import org.apache.kafka.server.share.session.ShareSession; import org.apache.kafka.server.share.session.ShareSessionCache; @@ -58,24 +60,21 @@ import org.apache.kafka.server.util.timer.SystemTimer; import org.apache.kafka.server.util.timer.SystemTimerReaper; import org.apache.kafka.server.util.timer.Timer; -import org.apache.kafka.server.util.timer.TimerTask; -import org.apache.kafka.storage.log.metrics.BrokerTopicStats; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.HashMap; -import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import java.util.Optional; -import java.util.Set; +import java.util.Objects; import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ConcurrentHashMap; import java.util.function.BiConsumer; -import java.util.function.Consumer; /** * The SharePartitionManager is responsible for managing the SharePartitions and ShareSessions. @@ -86,9 +85,9 @@ public class SharePartitionManager implements AutoCloseable { private static final Logger log = LoggerFactory.getLogger(SharePartitionManager.class); /** - * The partition cache is used to store the SharePartition objects for each share group topic-partition. + * The partition cache map is used to store the SharePartition objects for each share group topic-partition. */ - private final SharePartitionCache partitionCache; + private final Map partitionCacheMap; /** * The replica manager is used to fetch messages from the log. @@ -122,18 +121,14 @@ public class SharePartitionManager implements AutoCloseable { private final Timer timer; /** - * The max in flight records is the maximum number of records that can be in flight at any one time per share-partition. + * The max in flight messages is the maximum number of messages that can be in flight at any one time per share-partition. */ - private final int maxInFlightRecords; + private final int maxInFlightMessages; /** * The max delivery count is the maximum number of times a message can be delivered before it is considered to be archived. */ private final int maxDeliveryCount; - /** - * The max wait time for a share fetch request having remote storage fetch. - */ - private final long remoteFetchMaxWaitMs; /** * The persister is used to persist the share partition state. @@ -146,9 +141,9 @@ public class SharePartitionManager implements AutoCloseable { private final ShareGroupMetrics shareGroupMetrics; /** - * The broker topic stats is used to record the broker topic metrics for share group. + * The max fetch records is the maximum number of records that can be fetched by a share fetch request. */ - private final BrokerTopicStats brokerTopicStats; + private final int maxFetchRecords; public SharePartitionManager( ReplicaManager replicaManager, @@ -156,24 +151,23 @@ public SharePartitionManager( ShareSessionCache cache, int defaultRecordLockDurationMs, int maxDeliveryCount, - int maxInFlightRecords, - long remoteFetchMaxWaitMs, + int maxInFlightMessages, + int maxFetchRecords, Persister persister, GroupConfigManager groupConfigManager, - BrokerTopicStats brokerTopicStats + Metrics metrics ) { this(replicaManager, time, cache, - new SharePartitionCache(), + new ConcurrentHashMap<>(), defaultRecordLockDurationMs, maxDeliveryCount, - maxInFlightRecords, - remoteFetchMaxWaitMs, + maxInFlightMessages, + maxFetchRecords, persister, groupConfigManager, - new ShareGroupMetrics(time), - brokerTopicStats + metrics ); } @@ -181,30 +175,28 @@ private SharePartitionManager( ReplicaManager replicaManager, Time time, ShareSessionCache cache, - SharePartitionCache partitionCache, + Map partitionCacheMap, int defaultRecordLockDurationMs, int maxDeliveryCount, - int maxInFlightRecords, - long remoteFetchMaxWaitMs, + int maxInFlightMessages, + int maxFetchRecords, Persister persister, GroupConfigManager groupConfigManager, - ShareGroupMetrics shareGroupMetrics, - BrokerTopicStats brokerTopicStats + Metrics metrics ) { this(replicaManager, time, cache, - partitionCache, + partitionCacheMap, defaultRecordLockDurationMs, new SystemTimerReaper("share-group-lock-timeout-reaper", new SystemTimer("share-group-lock-timeout")), maxDeliveryCount, - maxInFlightRecords, - remoteFetchMaxWaitMs, + maxInFlightMessages, + maxFetchRecords, persister, groupConfigManager, - shareGroupMetrics, - brokerTopicStats + metrics ); } @@ -213,31 +205,28 @@ private SharePartitionManager( ReplicaManager replicaManager, Time time, ShareSessionCache cache, - SharePartitionCache partitionCache, + Map partitionCacheMap, int defaultRecordLockDurationMs, Timer timer, int maxDeliveryCount, - int maxInFlightRecords, - long remoteFetchMaxWaitMs, + int maxInFlightMessages, + int maxFetchRecords, Persister persister, GroupConfigManager groupConfigManager, - ShareGroupMetrics shareGroupMetrics, - BrokerTopicStats brokerTopicStats + Metrics metrics ) { this.replicaManager = replicaManager; this.time = time; this.cache = cache; - this.partitionCache = partitionCache; + this.partitionCacheMap = partitionCacheMap; this.defaultRecordLockDurationMs = defaultRecordLockDurationMs; this.timer = timer; this.maxDeliveryCount = maxDeliveryCount; - this.maxInFlightRecords = maxInFlightRecords; - this.remoteFetchMaxWaitMs = remoteFetchMaxWaitMs; + this.maxInFlightMessages = maxInFlightMessages; this.persister = persister; this.groupConfigManager = groupConfigManager; - this.shareGroupMetrics = shareGroupMetrics; - this.brokerTopicStats = brokerTopicStats; - this.cache.registerShareGroupListener(new ShareGroupListenerImpl()); + this.shareGroupMetrics = new ShareGroupMetrics(Objects.requireNonNull(metrics), time); + this.maxFetchRecords = maxFetchRecords; } /** @@ -247,10 +236,7 @@ private SharePartitionManager( * @param groupId The group id, this is used to identify the share group. * @param memberId The member id, generated by the group-coordinator, this is used to identify the client. * @param fetchParams The fetch parameters from the share fetch request. - * @param sessionEpoch The session epoch for the member. - * @param maxFetchRecords The maximum number of records to fetch. - * @param batchSize The number of records per acquired records batch. - * @param topicIdPartitions The topic partitions to fetch for. + * @param partitionMaxBytes The maximum number of bytes to fetch for each partition. * * @return A future that will be completed with the fetched messages. */ @@ -258,20 +244,13 @@ public CompletableFuture> fetchMessages( String groupId, String memberId, FetchParams fetchParams, - int sessionEpoch, - int maxFetchRecords, - int batchSize, - List topicIdPartitions + Map partitionMaxBytes ) { log.trace("Fetch request for topicIdPartitions: {} with groupId: {} fetch params: {}", - topicIdPartitions, groupId, fetchParams); - - List rotatedTopicIdPartitions = PartitionRotateStrategy - .type(PartitionRotateStrategy.StrategyType.ROUND_ROBIN) - .rotate(topicIdPartitions, new PartitionRotateMetadata(sessionEpoch)); + partitionMaxBytes.keySet(), groupId, fetchParams); CompletableFuture> future = new CompletableFuture<>(); - processShareFetch(new ShareFetch(fetchParams, groupId, memberId, future, rotatedTopicIdPartitions, batchSize, maxFetchRecords, brokerTopicStats)); + processShareFetch(new ShareFetch(fetchParams, groupId, memberId, future, partitionMaxBytes, maxFetchRecords)); return future; } @@ -293,13 +272,11 @@ public CompletableFuture> futures = new HashMap<>(); - // Track the topics for which we have received an acknowledgement for metrics. - Set topics = new HashSet<>(); acknowledgeTopics.forEach((topicIdPartition, acknowledgePartitionBatches) -> { - topics.add(topicIdPartition.topic()); SharePartitionKey sharePartitionKey = sharePartitionKey(groupId, topicIdPartition); - SharePartition sharePartition = partitionCache.get(sharePartitionKey); + SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey); if (sharePartition != null) { CompletableFuture future = new CompletableFuture<>(); sharePartition.acknowledge(memberId, acknowledgePartitionBatches).whenComplete((result, throwable) -> { @@ -308,15 +285,7 @@ public CompletableFuture { - // Client can either send a single entry in acknowledgeTypes which represents - // the state of the complete batch or can send individual offsets state. - if (batch.acknowledgeTypes().size() == 1) { - shareGroupMetrics.recordAcknowledgement(batch.acknowledgeTypes().get(0), batch.lastOffset() - batch.firstOffset() + 1); - } else { - batch.acknowledgeTypes().forEach(shareGroupMetrics::recordAcknowledgement); - } - }); + acknowledgePartitionBatches.forEach(batch -> batch.acknowledgeTypes().forEach(this.shareGroupMetrics::recordAcknowledgement)); future.complete(null); }); @@ -331,13 +300,7 @@ public CompletableFuture { - brokerTopicStats.allTopicsStats().totalShareAcknowledgementRequestRate().mark(); - brokerTopicStats.topicStats(topic).totalShareAcknowledgementRequestRate().mark(); - }); - - return mapAcknowledgementFutures(futures, Optional.of(failedShareAcknowledgeMetricsHandler())); + return mapAcknowledgementFutures(futures); } /** @@ -364,18 +327,18 @@ public CompletableFuture> futuresMap = new HashMap<>(); topicIdPartitions.forEach(topicIdPartition -> { SharePartitionKey sharePartitionKey = sharePartitionKey(groupId, topicIdPartition); - SharePartition sharePartition = partitionCache.get(sharePartitionKey); + SharePartition sharePartition = partitionCacheMap.get(sharePartitionKey); if (sharePartition == null) { log.error("No share partition found for groupId {} topicPartition {} while releasing acquired topic partitions", groupId, topicIdPartition); futuresMap.put(topicIdPartition, CompletableFuture.completedFuture(Errors.UNKNOWN_TOPIC_OR_PARTITION.exception())); @@ -398,34 +361,14 @@ public CompletableFuture createIdleShareFetchTimerTask(long maxWaitMs) { - CompletableFuture future = new CompletableFuture<>(); - TimerTask idleShareFetchTimerTask = new IdleShareFetchTimerTask(maxWaitMs, future); - replicaManager.addShareFetchTimerRequest(idleShareFetchTimerTask); - return future; - } - - private CompletableFuture> mapAcknowledgementFutures( - Map> futuresMap, - Optional>> failedMetricsHandler - ) { + private CompletableFuture> mapAcknowledgementFutures(Map> futuresMap) { CompletableFuture allFutures = CompletableFuture.allOf( - futuresMap.values().toArray(new CompletableFuture[0])); + futuresMap.values().toArray(new CompletableFuture[0])); return allFutures.thenApply(v -> { Map result = new HashMap<>(); - // Keep the set as same topic might appear multiple times. Multiple partitions can fail for same topic. - Set failedTopics = new HashSet<>(); futuresMap.forEach((topicIdPartition, future) -> { ShareAcknowledgeResponseData.PartitionData partitionData = new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(topicIdPartition.partition()); @@ -433,11 +376,9 @@ private CompletableFuture handler.accept(failedTopics)); return result; }); } @@ -445,27 +386,29 @@ private CompletableFuture shareFetchData, - List toForget, - ShareRequestMetadata reqMetadata, - Boolean isAcknowledgeDataPresent, - String clientConnectionId - ) { + public ShareFetchContext newContext(String groupId, Map shareFetchData, + List toForget, ShareRequestMetadata reqMetadata, Boolean isAcknowledgeDataPresent) { ShareFetchContext context; + // TopicPartition with maxBytes as 0 should not be added in the cachedPartitions + Map shareFetchDataWithMaxBytes = new HashMap<>(); + shareFetchData.forEach((tp, sharePartitionData) -> { + if (sharePartitionData.maxBytes > 0) shareFetchDataWithMaxBytes.put(tp, sharePartitionData); + }); // If the request's epoch is FINAL_EPOCH or INITIAL_EPOCH, we should remove the existing sessions. Also, start a // new session in case it is INITIAL_EPOCH. Hence, we need to treat them as special cases. if (reqMetadata.isFull()) { ShareSessionKey key = shareSessionKey(groupId, reqMetadata.memberId()); if (reqMetadata.epoch() == ShareRequestMetadata.FINAL_EPOCH) { + // If the epoch is FINAL_EPOCH, don't try to create a new session. + if (!shareFetchDataWithMaxBytes.isEmpty()) { + throw Errors.INVALID_REQUEST.exception(); + } if (cache.get(key) == null) { log.error("Share session error for {}: no such share session found", key); throw Errors.SHARE_SESSION_NOT_FOUND.exception(); @@ -480,20 +423,20 @@ public ShareFetchContext newContext( log.debug("Removed share session with key {}", key); } ImplicitLinkedHashCollection cachedSharePartitions = new - ImplicitLinkedHashCollection<>(shareFetchData.size()); - shareFetchData.forEach(topicIdPartition -> - cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, false))); + ImplicitLinkedHashCollection<>(shareFetchDataWithMaxBytes.size()); + shareFetchDataWithMaxBytes.forEach((topicIdPartition, reqData) -> + cachedSharePartitions.mustAdd(new CachedSharePartition(topicIdPartition, reqData, false))); ShareSessionKey responseShareSessionKey = cache.maybeCreateSession(groupId, reqMetadata.memberId(), - cachedSharePartitions, clientConnectionId); + time.milliseconds(), cachedSharePartitions); if (responseShareSessionKey == null) { log.error("Could not create a share session for group {} member {}", groupId, reqMetadata.memberId()); - throw Errors.SHARE_SESSION_LIMIT_REACHED.exception(); + throw Errors.SHARE_SESSION_NOT_FOUND.exception(); } - context = new ShareSessionContext(reqMetadata, shareFetchData); + context = new ShareSessionContext(reqMetadata, shareFetchDataWithMaxBytes); log.debug("Created a new ShareSessionContext with key {} isSubsequent {} returning {}. A new share " + "session will be started.", responseShareSessionKey, false, - partitionsToLogString(shareFetchData)); + partitionsToLogString(shareFetchDataWithMaxBytes.keySet())); } } else { // We update the already existing share session. @@ -510,8 +453,8 @@ public ShareFetchContext newContext( throw Errors.INVALID_SHARE_SESSION_EPOCH.exception(); } Map> modifiedTopicIdPartitions = shareSession.update( - shareFetchData, toForget); - cache.updateNumPartitions(shareSession); + shareFetchDataWithMaxBytes, toForget); + cache.touch(shareSession, time.milliseconds()); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); log.debug("Created a new ShareSessionContext for session key {}, epoch {}: " + "added {}, updated {}, removed {}", shareSession.key(), shareSession.epoch, @@ -552,30 +495,12 @@ public void acknowledgeSessionUpdate(String groupId, ShareRequestMetadata reqMet shareSession.epoch, reqMetadata.epoch()); throw Errors.INVALID_SHARE_SESSION_EPOCH.exception(); } - cache.updateNumPartitions(shareSession); + cache.touch(shareSession, time.milliseconds()); shareSession.epoch = ShareRequestMetadata.nextEpoch(shareSession.epoch); } } } - /** - * The handler for share version feature metadata changes. - * @param shareVersion the new share version feature - * @param isEnabledFromConfig whether the share version feature is enabled from config - */ - public void onShareVersionToggle(ShareVersion shareVersion, boolean isEnabledFromConfig) { - // Clear the cache and remove all share partitions from the cache if the share version does - // not support share groups. - if (!shareVersion.supportsShareGroups() && !isEnabledFromConfig) { - cache.removeAllSessions(); - Set sharePartitionKeys = partitionCache.cachedSharePartitionKeys(); - // Remove all share partitions from partition cache. - sharePartitionKeys.forEach(sharePartitionKey -> - removeSharePartitionFromCache(sharePartitionKey, partitionCache, replicaManager) - ); - } - } - /** * The cachedTopicIdPartitionsInShareSession method is used to get the cached topic-partitions in the share session. * @@ -589,7 +514,7 @@ List cachedTopicIdPartitionsInShareSession(String groupId, Uui ShareSessionKey key = shareSessionKey(groupId, memberId); ShareSession shareSession = cache.get(key); if (shareSession == null) { - return List.of(); + return Collections.emptyList(); } List cachedTopicIdPartitions = new ArrayList<>(); shareSession.partitionMap().forEach(cachedSharePartition -> cachedTopicIdPartitions.add( @@ -607,7 +532,6 @@ private void addDelayedShareFetch(DelayedShareFetch delayedShareFetch, List partiti // Visible for testing. void processShareFetch(ShareFetch shareFetch) { - if (shareFetch.topicIdPartitions().isEmpty()) { + if (shareFetch.partitionMaxBytes().isEmpty()) { // If there are no partitions to fetch then complete the future with an empty map. - shareFetch.maybeComplete(Map.of()); + shareFetch.maybeComplete(Collections.emptyMap()); return; } List delayedShareFetchWatchKeys = new ArrayList<>(); LinkedHashMap sharePartitions = new LinkedHashMap<>(); - // Track the topics for which we have received a share fetch request for metrics. - Set topics = new HashSet<>(); - for (TopicIdPartition topicIdPartition : shareFetch.topicIdPartitions()) { - topics.add(topicIdPartition.topic()); + for (TopicIdPartition topicIdPartition : shareFetch.partitionMaxBytes().keySet()) { SharePartitionKey sharePartitionKey = sharePartitionKey( shareFetch.groupId(), topicIdPartition @@ -669,23 +590,15 @@ void processShareFetch(ShareFetch shareFetch) { // immediately then the requests might be waiting in purgatory until the share partition // is initialized. Hence, trigger the completion of all pending delayed share fetch requests // for the share partition. - if (!initialized) { - shareGroupMetrics.partitionLoadTime(sharePartition.loadStartTimeMs()); + if (!initialized) replicaManager.completeDelayedShareFetchRequest(delayedShareFetchKey); - } }); sharePartitions.put(topicIdPartition, sharePartition); } - // Update the metrics for the topics for which we have received a share fetch request. - topics.forEach(topic -> { - brokerTopicStats.allTopicsStats().totalShareFetchRequestRate().mark(); - brokerTopicStats.topicStats(topic).totalShareFetchRequestRate().mark(); - }); - // If all the partitions in the request errored out, then complete the fetch request with an exception. if (shareFetch.errorInAllPartitions()) { - shareFetch.maybeComplete(Map.of()); + shareFetch.maybeComplete(Collections.emptyMap()); // Do not proceed with share fetch processing as all the partitions errored out. return; } @@ -693,24 +606,25 @@ void processShareFetch(ShareFetch shareFetch) { // Add the share fetch to the delayed share fetch purgatory to process the fetch request. // The request will be added irrespective of whether the share partition is initialized or not. // Once the share partition is initialized, the delayed share fetch will be completed. - addDelayedShareFetch(new DelayedShareFetch(shareFetch, replicaManager, fencedSharePartitionHandler(), sharePartitions, shareGroupMetrics, time, remoteFetchMaxWaitMs), delayedShareFetchWatchKeys); + addDelayedShareFetch(new DelayedShareFetch(shareFetch, replicaManager, fencedSharePartitionHandler(), sharePartitions), delayedShareFetchWatchKeys); } private SharePartition getOrCreateSharePartition(SharePartitionKey sharePartitionKey) { - return partitionCache.computeIfAbsent(sharePartitionKey, + return partitionCacheMap.computeIfAbsent(sharePartitionKey, k -> { + long start = time.hiResClockMs(); int leaderEpoch = ShareFetchUtils.leaderEpoch(replicaManager, sharePartitionKey.topicIdPartition().topicPartition()); // Attach listener to Partition which shall invoke partition change handlers. // However, as there could be multiple share partitions (per group name) for a single topic-partition, // hence create separate listeners per share partition which holds the share partition key // to identify the respective share partition. - SharePartitionListener listener = new SharePartitionListener(sharePartitionKey, replicaManager, partitionCache); + SharePartitionListener listener = new SharePartitionListener(sharePartitionKey, replicaManager, partitionCacheMap); replicaManager.maybeAddListener(sharePartitionKey.topicIdPartition().topicPartition(), listener); - return new SharePartition( + SharePartition partition = new SharePartition( sharePartitionKey.groupId(), sharePartitionKey.topicIdPartition(), leaderEpoch, - maxInFlightRecords, + maxInFlightMessages, maxDeliveryCount, defaultRecordLockDurationMs, timer, @@ -720,6 +634,8 @@ private SharePartition getOrCreateSharePartition(SharePartitionKey sharePartitio groupConfigManager, listener ); + this.shareGroupMetrics.partitionLoadTime(start); + return partition; }); } @@ -736,7 +652,7 @@ private void handleInitializationException( } // Remove the partition from the cache as it's failed to initialize. - removeSharePartitionFromCache(sharePartitionKey, partitionCache, replicaManager); + removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); // The partition initialization failed, so add the partition to the erroneous partitions. log.debug("Error initializing share partition with key {}", sharePartitionKey, throwable); shareFetch.addErroneous(sharePartitionKey.topicIdPartition(), throwable); @@ -756,7 +672,7 @@ private BiConsumer fencedSharePartitionHandler() { // The share partition is fenced hence remove the partition from map and let the client retry. // But surface the error to the client so client might take some action i.e. re-fetch // the metadata and retry the fetch on new leader. - removeSharePartitionFromCache(sharePartitionKey, partitionCache, replicaManager); + removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); } }; } @@ -767,32 +683,16 @@ private SharePartitionKey sharePartitionKey(String groupId, TopicIdPartition top private static void removeSharePartitionFromCache( SharePartitionKey sharePartitionKey, - SharePartitionCache partitionCache, + Map map, ReplicaManager replicaManager ) { - SharePartition sharePartition = partitionCache.remove(sharePartitionKey); + SharePartition sharePartition = map.remove(sharePartitionKey); if (sharePartition != null) { sharePartition.markFenced(); replicaManager.removeListener(sharePartitionKey.topicIdPartition().topicPartition(), sharePartition.listener()); - replicaManager.completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(sharePartitionKey.groupId(), sharePartitionKey.topicIdPartition())); } } - /** - * The handler to update the failed share acknowledge request metrics. - * - * @return A Consumer that updates the failed share acknowledge request metrics. - */ - private Consumer> failedShareAcknowledgeMetricsHandler() { - return failedTopics -> { - // Update failed share acknowledge request metric. - failedTopics.forEach(topic -> { - brokerTopicStats.allTopicsStats().failedShareAcknowledgementRequestRate().mark(); - brokerTopicStats.topicStats(topic).failedShareAcknowledgementRequestRate().mark(); - }); - }; - } - /** * The SharePartitionListener is used to listen for partition events. The share partition is associated with * the topic-partition, we need to handle the partition events for the share partition. @@ -805,16 +705,16 @@ static class SharePartitionListener implements PartitionListener { private final SharePartitionKey sharePartitionKey; private final ReplicaManager replicaManager; - private final SharePartitionCache partitionCache; + private final Map partitionCacheMap; SharePartitionListener( SharePartitionKey sharePartitionKey, ReplicaManager replicaManager, - SharePartitionCache partitionCache + Map partitionCacheMap ) { this.sharePartitionKey = sharePartitionKey; this.replicaManager = replicaManager; - this.partitionCache = partitionCache; + this.partitionCacheMap = partitionCacheMap; } @Override @@ -844,65 +744,101 @@ private void onUpdate(TopicPartition topicPartition) { topicPartition, sharePartitionKey); return; } - removeSharePartitionFromCache(sharePartitionKey, partitionCache, replicaManager); + removeSharePartitionFromCache(sharePartitionKey, partitionCacheMap, replicaManager); } } - /** - * The ShareGroupListenerImpl is used to listen for group events. The share group is associated - * with the group id, need to handle the group events for the share group. - */ - private class ShareGroupListenerImpl implements ShareGroupListener { + static class ShareGroupMetrics { + /** + * share-acknowledgement (share-acknowledgement-rate and share-acknowledgement-count) - The total number of offsets acknowledged for share groups (requests to be ack). + * record-acknowledgement (record-acknowledgement-rate and record-acknowledgement-count) - The number of records acknowledged per acknowledgement type. + * partition-load-time (partition-load-time-avg and partition-load-time-max) - The time taken to load the share partitions. + */ - @Override - public void onMemberLeave(String groupId, Uuid memberId) { - releaseSession(groupId, memberId.toString()); + public static final String METRICS_GROUP_NAME = "share-group-metrics"; + + public static final String SHARE_ACK_SENSOR = "share-acknowledgement-sensor"; + public static final String SHARE_ACK_RATE = "share-acknowledgement-rate"; + public static final String SHARE_ACK_COUNT = "share-acknowledgement-count"; + + public static final String RECORD_ACK_SENSOR_PREFIX = "record-acknowledgement"; + public static final String RECORD_ACK_RATE = "record-acknowledgement-rate"; + public static final String RECORD_ACK_COUNT = "record-acknowledgement-count"; + public static final String ACK_TYPE = "ack-type"; + + public static final String PARTITION_LOAD_TIME_SENSOR = "partition-load-time-sensor"; + public static final String PARTITION_LOAD_TIME_AVG = "partition-load-time-avg"; + public static final String PARTITION_LOAD_TIME_MAX = "partition-load-time-max"; + + public static final Map RECORD_ACKS_MAP = new HashMap<>(); + + private final Time time; + private final Sensor shareAcknowledgementSensor; + private final Map recordAcksSensorMap = new HashMap<>(); + private final Sensor partitionLoadTimeSensor; + + static { + RECORD_ACKS_MAP.put((byte) 1, AcknowledgeType.ACCEPT.toString()); + RECORD_ACKS_MAP.put((byte) 2, AcknowledgeType.RELEASE.toString()); + RECORD_ACKS_MAP.put((byte) 3, AcknowledgeType.REJECT.toString()); } - @Override - public void onGroupEmpty(String groupId) { - // Remove all share partitions from the cache. Instead of defining an API in SharePartitionCache - // for removing all share partitions for a group, share partitions are removed after fetching - // associated topic-partitions from the cache. This is done to mark the share partitions fenced - // and remove the listeners from the replica manager. - Set topicIdPartitions = partitionCache.topicIdPartitionsForGroup(groupId); - if (topicIdPartitions != null) { - // Remove all share partitions from partition cache. - topicIdPartitions.forEach(topicIdPartition -> - removeSharePartitionFromCache(new SharePartitionKey(groupId, topicIdPartition), partitionCache, replicaManager) - ); + public ShareGroupMetrics(Metrics metrics, Time time) { + this.time = time; + + shareAcknowledgementSensor = metrics.sensor(SHARE_ACK_SENSOR); + shareAcknowledgementSensor.add(new Meter( + metrics.metricName( + SHARE_ACK_RATE, + METRICS_GROUP_NAME, + "Rate of acknowledge requests."), + metrics.metricName( + SHARE_ACK_COUNT, + METRICS_GROUP_NAME, + "The number of acknowledge requests."))); + + for (Map.Entry entry : RECORD_ACKS_MAP.entrySet()) { + recordAcksSensorMap.put(entry.getKey(), metrics.sensor(String.format("%s-%s-sensor", RECORD_ACK_SENSOR_PREFIX, entry.getValue()))); + recordAcksSensorMap.get(entry.getKey()) + .add(new Meter( + metrics.metricName( + RECORD_ACK_RATE, + METRICS_GROUP_NAME, + "Rate of records acknowledged per acknowledgement type.", + ACK_TYPE, entry.getValue()), + metrics.metricName( + RECORD_ACK_COUNT, + METRICS_GROUP_NAME, + "The number of records acknowledged per acknowledgement type.", + ACK_TYPE, entry.getValue()))); } - } - } - /** - * The IdleShareFetchTimerTask creates a timer task for a share fetch request which tries to initialize a new share - * session when the share session cache is full. Such a request is delayed for maxWaitMs by passing the corresponding - * IdleShareFetchTimerTask to {@link ReplicaManager#delayedShareFetchTimer}. - */ - private static class IdleShareFetchTimerTask extends TimerTask { + partitionLoadTimeSensor = metrics.sensor(PARTITION_LOAD_TIME_SENSOR); + partitionLoadTimeSensor.add(metrics.metricName( + PARTITION_LOAD_TIME_AVG, + METRICS_GROUP_NAME, + "The average time in milliseconds to load the share partitions."), + new Avg()); + partitionLoadTimeSensor.add(metrics.metricName( + PARTITION_LOAD_TIME_MAX, + METRICS_GROUP_NAME, + "The maximum time in milliseconds to load the share partitions."), + new Max()); + } - /** - * This future is used to complete the share fetch request when the timer task is completed. - */ - private final CompletableFuture future; + void shareAcknowledgement() { + shareAcknowledgementSensor.record(); + } - public IdleShareFetchTimerTask( - long delayMs, - CompletableFuture future - ) { - super(delayMs); - this.future = future; + void recordAcknowledgement(byte ackType) { + // unknown ack types (such as gaps for control records) are intentionally ignored + if (recordAcksSensorMap.containsKey(ackType)) { + recordAcksSensorMap.get(ackType).record(); + } } - /** - * The run method which is executed when the timer task expires. This completes the future indicating that the - * delay for the corresponding share fetch request is over. - */ - @Override - public void run() { - future.complete(null); + void partitionLoadTime(long start) { + partitionLoadTimeSensor.record(time.hiResClockMs() - start); } } - } diff --git a/core/src/main/scala/kafka/MetadataLogConfig.scala b/core/src/main/scala/kafka/MetadataLogConfig.scala new file mode 100644 index 0000000000000..20b5b23539eeb --- /dev/null +++ b/core/src/main/scala/kafka/MetadataLogConfig.scala @@ -0,0 +1,48 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.raft + +import org.apache.kafka.server.config.ServerLogConfigs +import kafka.server.KafkaConfig + +final case class MetadataLogConfig( + logSegmentBytes: Int, + logSegmentMinBytes: Int, + logSegmentMillis: Long, + retentionMaxBytes: Long, + retentionMillis: Long, + maxBatchSizeInBytes: Int, + maxFetchSizeInBytes: Int, + fileDeleteDelayMs: Long, + nodeId: Int +) + +object MetadataLogConfig { + def apply(config: KafkaConfig, maxBatchSizeInBytes: Int, maxFetchSizeInBytes: Int): MetadataLogConfig = { + new MetadataLogConfig( + config.metadataLogSegmentBytes, + config.metadataLogSegmentMinBytes, + config.metadataLogSegmentMillis, + config.metadataRetentionBytes, + config.metadataRetentionMillis, + maxBatchSizeInBytes, + maxFetchSizeInBytes, + ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT, + config.metadataNodeIDConfig + ) + } +} diff --git a/core/src/main/scala/kafka/admin/ConfigCommand.scala b/core/src/main/scala/kafka/admin/ConfigCommand.scala index f004b9956c86e..edf0ff3bb6fa7 100644 --- a/core/src/main/scala/kafka/admin/ConfigCommand.scala +++ b/core/src/main/scala/kafka/admin/ConfigCommand.scala @@ -6,7 +6,7 @@ * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -17,11 +17,14 @@ package kafka.admin +import java.nio.charset.StandardCharsets +import java.util.concurrent.{ExecutionException, TimeUnit} +import java.util.{Collections, Properties} import joptsimple._ import kafka.server.DynamicConfig import kafka.utils.Implicits._ import kafka.utils.Logging -import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, AlterConfigsOptions, ConfigEntry, DescribeClusterOptions, DescribeConfigsOptions, ListConfigResourcesOptions, ListTopicsOptions, ScramCredentialInfo, UserScramCredentialDeletion, UserScramCredentialUpsertion, ScramMechanism => PublicScramMechanism} +import org.apache.kafka.clients.admin.{Admin, AlterClientQuotasOptions, AlterConfigOp, AlterConfigsOptions, ConfigEntry, DescribeClusterOptions, DescribeConfigsOptions, ListTopicsOptions, ScramCredentialInfo, UserScramCredentialDeletion, UserScramCredentialUpsertion, ScramMechanism => PublicScramMechanism} import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors.{InvalidConfigurationException, UnsupportedVersionException} import org.apache.kafka.common.internals.Topic @@ -29,18 +32,13 @@ import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter, ClientQuotaFilterComponent} import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.utils.{Exit, Utils} -import org.apache.kafka.coordinator.group.GroupConfig import org.apache.kafka.server.config.{ConfigType, QuotaConfig} -import org.apache.kafka.server.metrics.ClientMetricsConfigs import org.apache.kafka.server.util.{CommandDefaultOptions, CommandLineUtils} import org.apache.kafka.storage.internals.log.LogConfig import java.net.{InetAddress, UnknownHostException} -import java.nio.charset.StandardCharsets -import java.util.concurrent.{ExecutionException, TimeUnit} -import java.util.{Collections, Properties} -import scala.collection._ import scala.jdk.CollectionConverters._ +import scala.collection._ /** * This script can be used to change configs for topics/clients/users/brokers/ips/client-metrics/groups dynamically @@ -66,15 +64,8 @@ object ConfigCommand extends Logging { private val BrokerDefaultEntityName = "" val BrokerLoggerConfigType = "broker-loggers" - private val BrokerSupportedConfigTypes = ConfigType.values.map(_.value) :+ BrokerLoggerConfigType + private val BrokerSupportedConfigTypes = ConfigType.ALL.asScala :+ BrokerLoggerConfigType private val DefaultScramIterations = 4096 - private val TopicType = ConfigType.TOPIC.value - private val ClientMetricsType = ConfigType.CLIENT_METRICS.value - private val BrokerType = ConfigType.BROKER.value - private val GroupType = ConfigType.GROUP.value - private val UserType = ConfigType.USER.value - private val ClientType = ConfigType.CLIENT.value - private val IpType = ConfigType.IP.value def main(args: Array[String]): Unit = { try { @@ -86,13 +77,13 @@ object ConfigCommand extends Logging { opts.checkArgs() processCommand(opts) } catch { - case e: UnsupportedVersionException => - logger.debug(s"Unsupported API encountered in server when executing config command with args '${args.mkString(" ")}'") + case e @ (_: IllegalArgumentException | _: InvalidConfigurationException | _: OptionException) => + logger.debug(s"Failed config command with args '${args.mkString(" ")}'", e) System.err.println(e.getMessage) Exit.exit(1) - case e @ (_: IllegalArgumentException | _: InvalidConfigurationException | _: OptionException) => - logger.debug(s"Failed config command with args '${args.mkString(" ")}'", e) + case e: UnsupportedVersionException => + logger.debug(s"Unsupported API encountered in server when executing config command with args '${args.mkString(" ")}'") System.err.println(e.getMessage) Exit.exit(1) @@ -180,13 +171,12 @@ object ConfigCommand extends Logging { val configsToBeDeleted = parseConfigsToBeDeleted(opts) entityTypeHead match { - case TopicType | ClientMetricsType | BrokerType | GroupType => + case ConfigType.TOPIC | ConfigType.CLIENT_METRICS | ConfigType.BROKER | ConfigType.GROUP => val configResourceType = entityTypeHead match { - case TopicType => ConfigResource.Type.TOPIC - case ClientMetricsType => ConfigResource.Type.CLIENT_METRICS - case BrokerType => ConfigResource.Type.BROKER - case GroupType => ConfigResource.Type.GROUP - case _ => throw new IllegalArgumentException(s"$entityNameHead is not a valid entity-type.") + case ConfigType.TOPIC => ConfigResource.Type.TOPIC + case ConfigType.CLIENT_METRICS => ConfigResource.Type.CLIENT_METRICS + case ConfigType.BROKER => ConfigResource.Type.BROKER + case ConfigType.GROUP => ConfigResource.Type.GROUP } try { alterResourceConfig(adminClient, entityTypeHead, entityNameHead, configsToBeDeleted, configsToBeAdded, configResourceType) @@ -215,29 +205,29 @@ object ConfigCommand extends Logging { val alterEntries = (deleteEntries ++ addEntries).asJavaCollection adminClient.incrementalAlterConfigs(Map(configResource -> alterEntries).asJava, alterOptions).all().get(60, TimeUnit.SECONDS) - case UserType | ClientType => + case ConfigType.USER | ConfigType.CLIENT => val hasQuotaConfigsToAdd = configsToBeAdded.keys.exists(QuotaConfig.isClientOrUserQuotaConfig) val scramConfigsToAddMap = configsToBeAdded.filter(entry => ScramMechanism.isScram(entry._1)) val unknownConfigsToAdd = configsToBeAdded.keys.filterNot(key => ScramMechanism.isScram(key) || QuotaConfig.isClientOrUserQuotaConfig(key)) val hasQuotaConfigsToDelete = configsToBeDeleted.exists(QuotaConfig.isClientOrUserQuotaConfig) val scramConfigsToDelete = configsToBeDeleted.filter(ScramMechanism.isScram) val unknownConfigsToDelete = configsToBeDeleted.filterNot(key => ScramMechanism.isScram(key) || QuotaConfig.isClientOrUserQuotaConfig(key)) - if (entityTypeHead == ClientType || entityTypes.size == 2) { // size==2 for case where users is specified first on the command line, before clients + if (entityTypeHead == ConfigType.CLIENT || entityTypes.size == 2) { // size==2 for case where users is specified first on the command line, before clients // either just a client or both a user and a client if (unknownConfigsToAdd.nonEmpty || scramConfigsToAddMap.nonEmpty) - throw new IllegalArgumentException(s"Only quota configs can be added for '$ClientType' using --bootstrap-server. Unexpected config names: ${unknownConfigsToAdd ++ scramConfigsToAddMap.keys}") + throw new IllegalArgumentException(s"Only quota configs can be added for '${ConfigType.CLIENT}' using --bootstrap-server. Unexpected config names: ${unknownConfigsToAdd ++ scramConfigsToAddMap.keys}") if (unknownConfigsToDelete.nonEmpty || scramConfigsToDelete.nonEmpty) - throw new IllegalArgumentException(s"Only quota configs can be deleted for '$ClientType' using --bootstrap-server. Unexpected config names: ${unknownConfigsToDelete ++ scramConfigsToDelete}") + throw new IllegalArgumentException(s"Only quota configs can be deleted for '${ConfigType.CLIENT}' using --bootstrap-server. Unexpected config names: ${unknownConfigsToDelete ++ scramConfigsToDelete}") } else { // ConfigType.User if (unknownConfigsToAdd.nonEmpty) - throw new IllegalArgumentException(s"Only quota and SCRAM credential configs can be added for '$UserType' using --bootstrap-server. Unexpected config names: $unknownConfigsToAdd") + throw new IllegalArgumentException(s"Only quota and SCRAM credential configs can be added for '${ConfigType.USER}' using --bootstrap-server. Unexpected config names: $unknownConfigsToAdd") if (unknownConfigsToDelete.nonEmpty) - throw new IllegalArgumentException(s"Only quota and SCRAM credential configs can be deleted for '$UserType' using --bootstrap-server. Unexpected config names: $unknownConfigsToDelete") + throw new IllegalArgumentException(s"Only quota and SCRAM credential configs can be deleted for '${ConfigType.USER}' using --bootstrap-server. Unexpected config names: $unknownConfigsToDelete") if (scramConfigsToAddMap.nonEmpty || scramConfigsToDelete.nonEmpty) { if (entityNames.exists(_.isEmpty)) // either --entity-type users --entity-default or --user-defaults throw new IllegalArgumentException("The use of --entity-default or --user-defaults is not allowed with User SCRAM Credentials using --bootstrap-server.") if (hasQuotaConfigsToAdd || hasQuotaConfigsToDelete) - throw new IllegalArgumentException(s"Cannot alter both quota and SCRAM credential configs simultaneously for '$UserType' using --bootstrap-server.") + throw new IllegalArgumentException(s"Cannot alter both quota and SCRAM credential configs simultaneously for '${ConfigType.USER}' using --bootstrap-server.") } } @@ -251,10 +241,10 @@ object ConfigCommand extends Logging { alterUserScramCredentialConfigs(adminClient, entityNames.head, scramConfigsToAddMap, scramConfigsToDelete) } - case IpType => - val unknownConfigs = (configsToBeAdded.keys ++ configsToBeDeleted).filterNot(key => QuotaConfig.ipConfigs.names.contains(key)) + case ConfigType.IP => + val unknownConfigs = (configsToBeAdded.keys ++ configsToBeDeleted).filterNot(key => DynamicConfig.Ip.names.contains(key)) if (unknownConfigs.nonEmpty) - throw new IllegalArgumentException(s"Only connection quota configs can be added for '$IpType' using --bootstrap-server. Unexpected config names: ${unknownConfigs.mkString(",")}") + throw new IllegalArgumentException(s"Only connection quota configs can be added for '${ConfigType.IP}' using --bootstrap-server. Unexpected config names: ${unknownConfigs.mkString(",")}") alterQuotaConfigs(adminClient, entityTypes, entityNames, configsToBeAddedMap, configsToBeDeleted) case _ => @@ -300,9 +290,9 @@ object ConfigCommand extends Logging { throw new InvalidConfigurationException(s"Invalid config(s): ${invalidConfigs.mkString(",")}") val alterEntityTypes = entityTypes.map { - case UserType => ClientQuotaEntity.USER - case ClientType => ClientQuotaEntity.CLIENT_ID - case IpType => ClientQuotaEntity.IP + case ConfigType.USER => ClientQuotaEntity.USER + case ConfigType.CLIENT => ClientQuotaEntity.CLIENT_ID + case ConfigType.IP => ClientQuotaEntity.IP case entType => throw new IllegalArgumentException(s"Unexpected entity type: $entType") } val alterEntityNames = entityNames.map(en => if (en.nonEmpty) en else null) @@ -331,65 +321,28 @@ object ConfigCommand extends Logging { val describeAll = opts.options.has(opts.allOpt) entityTypes.head match { - case TopicType | BrokerType | BrokerLoggerConfigType | ClientMetricsType | GroupType => + case ConfigType.TOPIC | ConfigType.BROKER | BrokerLoggerConfigType | ConfigType.CLIENT_METRICS | ConfigType.GROUP => describeResourceConfig(adminClient, entityTypes.head, entityNames.headOption, describeAll) - case UserType | ClientType => + case ConfigType.USER | ConfigType.CLIENT => describeClientQuotaAndUserScramCredentialConfigs(adminClient, entityTypes, entityNames) - case IpType => + case ConfigType.IP => describeQuotaConfigs(adminClient, entityTypes, entityNames) case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType") } } private def describeResourceConfig(adminClient: Admin, entityType: String, entityName: Option[String], describeAll: Boolean): Unit = { - if (!describeAll) { - entityName.foreach { name => - entityType match { - case TopicType => - Topic.validate(name) - if (!adminClient.listTopics(new ListTopicsOptions().listInternal(true)).names.get.contains(name)) { - System.out.println(s"The ${entityType.dropRight(1)} '$name' doesn't exist and doesn't have dynamic config.") - return - } - case BrokerType | BrokerLoggerConfigType => - if (adminClient.describeCluster.nodes.get.stream.anyMatch(_.idString == name)) { - // valid broker id - } else if (name == BrokerDefaultEntityName) { - // default broker configs - } else { - System.out.println(s"The ${entityType.dropRight(1)} '$name' doesn't exist and doesn't have dynamic config.") - return - } - case ClientMetricsType => - if (adminClient.listConfigResources(java.util.Set.of(ConfigResource.Type.CLIENT_METRICS), new ListConfigResourcesOptions).all.get - .stream.noneMatch(_.name == name)) { - System.out.println(s"The ${entityType.dropRight(1)} '$name' doesn't exist and doesn't have dynamic config.") - return - } - case GroupType => - if (adminClient.listGroups().all.get.stream.noneMatch(_.groupId() == name) && - adminClient.listConfigResources(java.util.Set.of(ConfigResource.Type.GROUP), new ListConfigResourcesOptions).all.get - .stream.noneMatch(_.name == name)) { - System.out.println(s"The ${entityType.dropRight(1)} '$name' doesn't exist and doesn't have dynamic config.") - return - } - case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType") - } - } - } - val entities = entityName .map(name => List(name)) .getOrElse(entityType match { - case TopicType => + case ConfigType.TOPIC => adminClient.listTopics(new ListTopicsOptions().listInternal(true)).names().get().asScala.toSeq - case BrokerType | BrokerLoggerConfigType => + case ConfigType.BROKER | BrokerLoggerConfigType => adminClient.describeCluster(new DescribeClusterOptions()).nodes().get().asScala.map(_.idString).toSeq :+ BrokerDefaultEntityName - case ClientMetricsType => - adminClient.listConfigResources(java.util.Set.of(ConfigResource.Type.CLIENT_METRICS), new ListConfigResourcesOptions).all().get().asScala.map(_.name).toSeq - case GroupType => - adminClient.listGroups().all.get.asScala.map(_.groupId).toSet ++ - adminClient.listConfigResources(java.util.Set.of(ConfigResource.Type.GROUP), new ListConfigResourcesOptions).all().get().asScala.map(_.name).toSet + case ConfigType.CLIENT_METRICS => + adminClient.listClientMetricsResources().all().get().asScala.map(_.name).toSeq + case ConfigType.GROUP => + adminClient.listConsumerGroups().all.get.asScala.map(_.groupId).toSeq case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType") }) @@ -432,11 +385,11 @@ object ConfigCommand extends Logging { } val (configResourceType, dynamicConfigSource) = entityType match { - case TopicType => + case ConfigType.TOPIC => if (entityName.nonEmpty) Topic.validate(entityName) (ConfigResource.Type.TOPIC, Some(ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG)) - case BrokerType => entityName match { + case ConfigType.BROKER => entityName match { case BrokerDefaultEntityName => (ConfigResource.Type.BROKER, Some(ConfigEntry.ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG)) case _ => @@ -447,9 +400,9 @@ object ConfigCommand extends Logging { if (entityName.nonEmpty) validateBrokerId() (ConfigResource.Type.BROKER_LOGGER, None) - case ClientMetricsType => + case ConfigType.CLIENT_METRICS => (ConfigResource.Type.CLIENT_METRICS, Some(ConfigEntry.ConfigSource.DYNAMIC_CLIENT_METRICS_CONFIG)) - case GroupType => + case ConfigType.GROUP => (ConfigResource.Type.GROUP, Some(ConfigEntry.ConfigSource.DYNAMIC_GROUP_CONFIG)) case entityType => throw new IllegalArgumentException(s"Invalid entity type: $entityType") } @@ -498,7 +451,7 @@ object ConfigCommand extends Logging { describeQuotaConfigs(adminClient, entityTypes, entityNames) // we describe user SCRAM credentials only when we are not describing client information // and we are not given either --entity-default or --user-defaults - if (!entityTypes.contains(ClientType) && !entityNames.contains("")) { + if (!entityTypes.contains(ConfigType.CLIENT) && !entityNames.contains("")) { val result = adminClient.describeUserScramCredentials(entityNames.asJava) result.users.get(30, TimeUnit.SECONDS).asScala.foreach(user => { try { @@ -521,9 +474,9 @@ object ConfigCommand extends Logging { private def getAllClientQuotasConfigs(adminClient: Admin, entityTypes: List[String], entityNames: List[String]) = { val components = entityTypes.map(Some(_)).zipAll(entityNames.map(Some(_)), None, None).map { case (entityTypeOpt, entityNameOpt) => val entityType = entityTypeOpt match { - case Some(UserType) => ClientQuotaEntity.USER - case Some(ClientType) => ClientQuotaEntity.CLIENT_ID - case Some(IpType) => ClientQuotaEntity.IP + case Some(ConfigType.USER) => ClientQuotaEntity.USER + case Some(ConfigType.CLIENT) => ClientQuotaEntity.CLIENT_ID + case Some(ConfigType.IP) => ClientQuotaEntity.IP case Some(_) => throw new IllegalArgumentException(s"Unexpected entity type ${entityTypeOpt.get}") case None => throw new IllegalArgumentException("More entity names specified than entity types") } @@ -566,14 +519,14 @@ object ConfigCommand extends Logging { private val nl: String = System.lineSeparator() val addConfig: OptionSpec[String] = parser.accepts("add-config", "Key Value pairs of configs to add. Square brackets can be used to group values which contain commas: 'k1=v1,k2=[v1,v2,v2],k3=v3'. The following is a list of valid configurations: " + - "For entity-type '" + TopicType + "': " + LogConfig.nonInternalConfigNames.asScala.map("\t" + _).mkString(nl, nl, nl) + - "For entity-type '" + BrokerType + "': " + DynamicConfig.Broker.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + - "For entity-type '" + UserType + "': " + QuotaConfig.scramMechanismsPlusUserAndClientQuotaConfigs().names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + - "For entity-type '" + ClientType + "': " + QuotaConfig.userAndClientQuotaConfigs().names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + - "For entity-type '" + IpType + "': " + QuotaConfig.ipConfigs.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + - "For entity-type '" + ClientMetricsType + "': " + ClientMetricsConfigs.configDef().names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + - "For entity-type '" + GroupType + "': " + GroupConfig.configDef().names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + - s"Entity types '$UserType' and '$ClientType' may be specified together to update config for clients of a specific user.") + "For entity-type '" + ConfigType.TOPIC + "': " + LogConfig.configNames.asScala.map("\t" + _).mkString(nl, nl, nl) + + "For entity-type '" + ConfigType.BROKER + "': " + DynamicConfig.Broker.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + + "For entity-type '" + ConfigType.USER + "': " + DynamicConfig.User.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + + "For entity-type '" + ConfigType.CLIENT + "': " + DynamicConfig.Client.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + + "For entity-type '" + ConfigType.IP + "': " + DynamicConfig.Ip.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + + "For entity-type '" + ConfigType.CLIENT_METRICS + "': " + DynamicConfig.ClientMetrics.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + + "For entity-type '" + ConfigType.GROUP + "': " + DynamicConfig.Group.names.asScala.toSeq.sorted.map("\t" + _).mkString(nl, nl, nl) + + s"Entity types '${ConfigType.USER}' and '${ConfigType.CLIENT}' may be specified together to update config for clients of a specific user.") .withRequiredArg .ofType(classOf[String]) val addConfigFile: OptionSpec[String] = parser.accepts("add-config-file", "Path to a properties file with configs to add. See add-config for a list of valid configurations.") @@ -583,6 +536,7 @@ object ConfigCommand extends Logging { .withRequiredArg .ofType(classOf[String]) .withValuesSeparatedBy(',') + val forceOpt: OptionSpecBuilder = parser.accepts("force", "Suppress console prompts") val topic: OptionSpec[String] = parser.accepts("topic", "The topic's name.") .withRequiredArg .ofType(classOf[String]) @@ -613,19 +567,19 @@ object ConfigCommand extends Logging { .ofType(classOf[String]) options = parser.parse(args : _*) - private val entityFlags = List((topic, TopicType), - (client, ClientType), - (user, UserType), - (broker, BrokerType), + private val entityFlags = List((topic, ConfigType.TOPIC), + (client, ConfigType.CLIENT), + (user, ConfigType.USER), + (broker, ConfigType.BROKER), (brokerLogger, BrokerLoggerConfigType), - (ip, IpType), - (clientMetrics, ClientMetricsType), - (group, GroupType)) + (ip, ConfigType.IP), + (clientMetrics, ConfigType.CLIENT_METRICS), + (group, ConfigType.GROUP)) - private val entityDefaultsFlags = List((clientDefaults, ClientType), - (userDefaults, UserType), - (brokerDefaults, BrokerType), - (ipDefaults, IpType)) + private val entityDefaultsFlags = List((clientDefaults, ConfigType.CLIENT), + (userDefaults, ConfigType.USER), + (brokerDefaults, ConfigType.BROKER), + (ipDefaults, ConfigType.IP)) private[admin] def entityTypes: List[String] = { options.valuesOf(entityType).asScala.toList ++ @@ -671,8 +625,8 @@ object ConfigCommand extends Logging { ) if (entityTypeVals.isEmpty) throw new IllegalArgumentException("At least one entity type must be specified") - else if (entityTypeVals.size > 1 && !entityTypeVals.toSet.equals(Set(UserType, ClientType))) - throw new IllegalArgumentException(s"Only '$UserType' and '$ClientType' entity types may be specified together") + else if (entityTypeVals.size > 1 && !entityTypeVals.toSet.equals(Set(ConfigType.USER, ConfigType.CLIENT))) + throw new IllegalArgumentException(s"Only '${ConfigType.USER}' and '${ConfigType.CLIENT}' entity types may be specified together") if ((options.has(entityName) || options.has(entityType) || options.has(entityDefault)) && (entityFlags ++ entityDefaultsFlags).exists(entity => options.has(entity._1))) @@ -685,7 +639,7 @@ object ConfigCommand extends Logging { (if (options.has(bootstrapControllerOpt)) 1 else 0) if (numConnectOptions > 1) throw new IllegalArgumentException("Only one of --bootstrap-server or --bootstrap-controller can be specified") - if (hasEntityName && (entityTypeVals.contains(BrokerType) || entityTypeVals.contains(BrokerLoggerConfigType))) { + if (hasEntityName && (entityTypeVals.contains(ConfigType.BROKER) || entityTypeVals.contains(BrokerLoggerConfigType))) { Seq(entityName, broker, brokerLogger).filter(options.has(_)).map(options.valueOf(_)).foreach { brokerId => try brokerId.toInt catch { case _: NumberFormatException => @@ -694,7 +648,7 @@ object ConfigCommand extends Logging { } } - if (hasEntityName && entityTypeVals.contains(IpType)) { + if (hasEntityName && entityTypeVals.contains(ConfigType.IP)) { Seq(entityName, ip).filter(options.has(_)).map(options.valueOf(_)).foreach { ipEntity => if (!isValidIpEntity(ipEntity)) throw new IllegalArgumentException(s"The entity name for ${entityTypeVals.head} must be a valid IP or resolvable host, but it is: $ipEntity") @@ -702,10 +656,10 @@ object ConfigCommand extends Logging { } if (options.has(describeOpt)) { - if (!(entityTypeVals.contains(UserType) || - entityTypeVals.contains(ClientType) || - entityTypeVals.contains(BrokerType) || - entityTypeVals.contains(IpType)) && options.has(entityDefault)) { + if (!(entityTypeVals.contains(ConfigType.USER) || + entityTypeVals.contains(ConfigType.CLIENT) || + entityTypeVals.contains(ConfigType.BROKER) || + entityTypeVals.contains(ConfigType.IP)) && options.has(entityDefault)) { throw new IllegalArgumentException(s"--entity-default must not be specified with --describe of ${entityTypeVals.mkString(",")}") } @@ -714,10 +668,10 @@ object ConfigCommand extends Logging { } if (options.has(alterOpt)) { - if (entityTypeVals.contains(UserType) || - entityTypeVals.contains(ClientType) || - entityTypeVals.contains(BrokerType) || - entityTypeVals.contains(IpType)) { + if (entityTypeVals.contains(ConfigType.USER) || + entityTypeVals.contains(ConfigType.CLIENT) || + entityTypeVals.contains(ConfigType.BROKER) || + entityTypeVals.contains(ConfigType.IP)) { if (!hasEntityName && !hasEntityDefault) throw new IllegalArgumentException("An entity-name or default entity must be specified with --alter of users, clients, brokers or ips") } else if (!hasEntityName) diff --git a/core/src/main/scala/kafka/cluster/Broker.scala b/core/src/main/scala/kafka/cluster/Broker.scala new file mode 100644 index 0000000000000..794b641a847ce --- /dev/null +++ b/core/src/main/scala/kafka/cluster/Broker.scala @@ -0,0 +1,87 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.cluster + +import kafka.common.BrokerEndPointNotAvailableException +import org.apache.kafka.common.feature.{Features, SupportedVersionRange} +import org.apache.kafka.common.feature.Features._ +import org.apache.kafka.common.Node +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.server.network.BrokerEndPoint + +import scala.collection.Seq + +object Broker { + + def apply(id: Int, endPoints: Seq[EndPoint], rack: Option[String]): Broker = { + new Broker(id, endPoints, rack, emptySupportedFeatures) + } + + def apply(id: Int, endPoint: EndPoint, rack: Option[String]): Broker = { + new Broker(id, Seq(endPoint), rack, emptySupportedFeatures) + } +} + +/** + * A Kafka broker. + * + * @param id a broker id + * @param endPoints a collection of EndPoint. Each end-point is (host, port, listener name, security protocol). + * @param rack an optional rack + * @param features supported features + */ +case class Broker(id: Int, endPoints: Seq[EndPoint], rack: Option[String], features: Features[SupportedVersionRange]) { + + private val endPointsMap = endPoints.map { endPoint => + endPoint.listenerName -> endPoint + }.toMap + + if (endPointsMap.size != endPoints.size) + throw new IllegalArgumentException(s"There is more than one end point with the same listener name: ${endPoints.mkString(",")}") + + override def toString: String = + s"$id : ${endPointsMap.values.mkString("(",",",")")} : ${rack.orNull} : $features" + + def this(id: Int, host: String, port: Int, listenerName: ListenerName, protocol: SecurityProtocol) = { + this(id, Seq(EndPoint(host, port, listenerName, protocol)), None, emptySupportedFeatures) + } + + def this(bep: BrokerEndPoint, listenerName: ListenerName, protocol: SecurityProtocol) = { + this(bep.id, bep.host, bep.port, listenerName, protocol) + } + + def node(listenerName: ListenerName): Node = + getNode(listenerName).getOrElse { + throw new BrokerEndPointNotAvailableException(s"End point with listener name ${listenerName.value} not found " + + s"for broker $id") + } + + def getNode(listenerName: ListenerName): Option[Node] = + endPointsMap.get(listenerName).map(endpoint => new Node(id, endpoint.host, endpoint.port, rack.orNull)) + + def brokerEndPoint(listenerName: ListenerName): BrokerEndPoint = { + val endpoint = endPoint(listenerName) + new BrokerEndPoint(id, endpoint.host, endpoint.port) + } + + def endPoint(listenerName: ListenerName): EndPoint = { + endPointsMap.getOrElse(listenerName, + throw new BrokerEndPointNotAvailableException(s"End point with listener name ${listenerName.value} not found for broker $id")) + } +} diff --git a/core/src/main/scala/kafka/cluster/EndPoint.scala b/core/src/main/scala/kafka/cluster/EndPoint.scala new file mode 100644 index 0000000000000..d43319830c358 --- /dev/null +++ b/core/src/main/scala/kafka/cluster/EndPoint.scala @@ -0,0 +1,59 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.cluster + +import org.apache.kafka.common.{KafkaException, Endpoint => JEndpoint} +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.common.utils.Utils + +import java.util.Locale + +object EndPoint { + def parseListenerName(connectionString: String): String = { + val firstColon = connectionString.indexOf(':') + if (firstColon < 0) { + throw new KafkaException(s"Unable to parse a listener name from $connectionString") + } + connectionString.substring(0, firstColon).toUpperCase(Locale.ROOT) + } + + def fromJava(endpoint: JEndpoint): EndPoint = + new EndPoint(endpoint.host(), + endpoint.port(), + new ListenerName(endpoint.listenerName().get()), + endpoint.securityProtocol()) +} + +/** + * Part of the broker definition - matching host/port pair to a protocol + */ +case class EndPoint(host: String, port: Int, listenerName: ListenerName, securityProtocol: SecurityProtocol) { + def connectionString: String = { + val hostport = + if (host == null) + ":"+port + else + Utils.formatAddress(host, port) + listenerName.value + "://" + hostport + } + + def toJava: JEndpoint = { + new JEndpoint(listenerName.value, securityProtocol, host, port) + } +} diff --git a/core/src/main/scala/kafka/cluster/Partition.scala b/core/src/main/scala/kafka/cluster/Partition.scala index 3b45a08b0673d..7ec0904cb005b 100755 --- a/core/src/main/scala/kafka/cluster/Partition.scala +++ b/core/src/main/scala/kafka/cluster/Partition.scala @@ -16,13 +16,14 @@ */ package kafka.cluster -import java.lang.{Long => JLong} import java.util.concurrent.locks.ReentrantReadWriteLock import java.util.Optional -import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, CopyOnWriteArrayList} +import java.util.concurrent.{CompletableFuture, CopyOnWriteArrayList} import kafka.controller.StateChangeLogger import kafka.log._ +import kafka.log.remote.RemoteLogManager import kafka.server._ +import kafka.server.metadata.KRaftMetadataCache import kafka.server.share.DelayedShareFetch import kafka.utils.CoreUtils.{inReadLock, inWriteLock} import kafka.utils._ @@ -37,14 +38,11 @@ import org.apache.kafka.common.record.{FileRecords, MemoryRecords, RecordBatch} import org.apache.kafka.common.requests._ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.common.utils.Time -import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState, MetadataCache, PartitionRegistration} +import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} import org.apache.kafka.server.common.RequestLocal -import org.apache.kafka.server.log.remote.TopicPartitionLog -import org.apache.kafka.server.log.remote.storage.RemoteLogManager -import org.apache.kafka.storage.internals.log.{AppendOrigin, AsyncOffsetReader, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogOffsetMetadata, LogOffsetsListener, LogOffsetSnapshot, LogReadInfo, LogStartOffsetIncrementReason, OffsetResultHolder, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogReadInfo, LogStartOffsetIncrementReason, OffsetResultHolder, VerificationGuard} import org.apache.kafka.server.metrics.KafkaMetricsGroup -import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, TopicPartitionOperationKey} -import org.apache.kafka.server.replica.Replica +import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, UnexpectedAppendOffsetException} import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpoints @@ -52,7 +50,6 @@ import org.slf4j.event.Level import scala.collection.Seq import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.{RichOption, RichOptional} import scala.jdk.javaapi.OptionConverters /** @@ -316,14 +313,14 @@ class Partition(val topicPartition: TopicPartition, logManager: LogManager, alterIsrManager: AlterPartitionManager, @volatile private var _topicId: Option[Uuid] = None // TODO: merge topicPartition and _topicId into TopicIdPartition once TopicId persist in most of the code by KAFKA-16212 - ) extends Logging with TopicPartitionLog { + ) extends Logging { import Partition.metricsGroup def topic: String = topicPartition.topic def partitionId: Int = topicPartition.partition private val stateChangeLogger = new StateChangeLogger(localBrokerId, inControllerContext = false, None) - private val remoteReplicasMap = new ConcurrentHashMap[Int, Replica] + private val remoteReplicasMap = new Pool[Int, Replica] // The read lock is only required when multiple reads are executed and needs to be in a consistent manner private val leaderIsrUpdateLock = new ReentrantReadWriteLock @@ -370,8 +367,6 @@ class Partition(val topicPartition: TopicPartition, metricsGroup.newGauge("ReplicasCount", () => if (isLeader) assignmentState.replicationFactor else 0, tags) metricsGroup.newGauge("LastStableOffsetLag", () => log.map(_.lastStableOffsetLag).getOrElse(0), tags) - def unifiedLog(): Optional[UnifiedLog] = log.toJava - def hasLateTransaction(currentTimeMs: Long): Boolean = leaderLogIfLocal.exists(_.hasLateTransaction(currentTimeMs)) def isUnderReplicated: Boolean = isLeader && (assignmentState.replicationFactor - partitionState.isr.size) > 0 @@ -499,7 +494,7 @@ class Partition(val topicPartition: TopicPartition, logManager.initializingLog(topicPartition) var maybeLog: Option[UnifiedLog] = None try { - val log = logManager.getOrCreateLog(topicPartition, isNew, isFutureReplica, topicId.toJava, targetLogDirectoryId) + val log = logManager.getOrCreateLog(topicPartition, isNew, isFutureReplica, topicId, targetLogDirectoryId) if (!isFutureReplica) log.setLogOffsetsListener(logOffsetsListener) maybeLog = Some(log) updateHighWatermark(log) @@ -598,14 +593,14 @@ class Partition(val topicPartition: TopicPartition, */ def topicId: Option[Uuid] = { if (_topicId.isEmpty || _topicId.contains(Uuid.ZERO_UUID)) { - _topicId = this.log.orElse(logManager.getLog(topicPartition)).flatMap(_.topicId.toScala) + _topicId = this.log.orElse(logManager.getLog(topicPartition)).flatMap(_.topicId) } _topicId } // remoteReplicas will be called in the hot path, and must be inexpensive def remoteReplicas: Iterable[Replica] = - remoteReplicasMap.values.asScala + remoteReplicasMap.values def futureReplicaDirChanged(newDestinationDir: String): Boolean = { inReadLock(leaderIsrUpdateLock) { @@ -730,8 +725,7 @@ class Partition(val topicPartition: TopicPartition, * from the time when this broker was the leader last time) and setting the new leader and ISR. * If the leader replica id does not change, return false to indicate the replica manager. */ - def makeLeader(partitionRegistration: PartitionRegistration, - isNew: Boolean, + def makeLeader(partitionState: LeaderAndIsrRequest.PartitionState, highWatermarkCheckpoints: OffsetCheckpoints, topicId: Option[Uuid], targetDirectoryId: Option[Uuid] = None): Boolean = { @@ -739,23 +733,23 @@ class Partition(val topicPartition: TopicPartition, // Partition state changes are expected to have a partition epoch larger or equal // to the current partition epoch. The latter is allowed because the partition epoch // is also updated by the AlterPartition response so the new epoch might be known - // before a partitionRegistration is received or before an update is received via + // before a LeaderAndIsr request is received or before an update is received via // the metadata log. - if (partitionRegistration.partitionEpoch < partitionEpoch) { - stateChangeLogger.info(s"Skipped the become-leader state change for $topicPartition with topic id $topicId, " + - s"partition registration $partitionRegistration and isNew=$isNew since the leader is already at a newer partition epoch $partitionEpoch.") + if (partitionState.partitionEpoch < partitionEpoch) { + stateChangeLogger.info(s"Skipped the become-leader state change for $topicPartition with topic id $topicId " + + s"and partition state $partitionState since the leader is already at a newer partition epoch $partitionEpoch.") return false } val currentTimeMs = time.milliseconds val isNewLeader = !isLeader - val isNewLeaderEpoch = partitionRegistration.leaderEpoch > leaderEpoch - val replicas = partitionRegistration.replicas - val isr = partitionRegistration.isr.toSet - val addingReplicas = partitionRegistration.addingReplicas - val removingReplicas = partitionRegistration.removingReplicas + val isNewLeaderEpoch = partitionState.leaderEpoch > leaderEpoch + val replicas = partitionState.replicas.asScala.map(_.toInt) + val isr = partitionState.isr.asScala.map(_.toInt).toSet + val addingReplicas = partitionState.addingReplicas.asScala.map(_.toInt) + val removingReplicas = partitionState.removingReplicas.asScala.map(_.toInt) - if (partitionRegistration.leaderRecoveryState == LeaderRecoveryState.RECOVERING) { + if (partitionState.leaderRecoveryState == LeaderRecoveryState.RECOVERING.value) { stateChangeLogger.info(s"The topic partition $topicPartition was marked as RECOVERING. " + "Marking the topic partition as RECOVERED.") } @@ -771,7 +765,7 @@ class Partition(val topicPartition: TopicPartition, LeaderRecoveryState.RECOVERED ) - createLogInAssignedDirectoryId(isNew, highWatermarkCheckpoints, topicId, targetDirectoryId) + createLogInAssignedDirectoryId(partitionState, highWatermarkCheckpoints, topicId, targetDirectoryId) val leaderLog = localLogOrException @@ -780,8 +774,8 @@ class Partition(val topicPartition: TopicPartition, if (isNewLeaderEpoch) { val leaderEpochStartOffset = leaderLog.logEndOffset stateChangeLogger.info(s"Leader $topicPartition with topic id $topicId starts at " + - s"leader epoch ${partitionRegistration.leaderEpoch} from offset $leaderEpochStartOffset " + - s"with partition epoch ${partitionRegistration.partitionEpoch}, high watermark ${leaderLog.highWatermark}, " + + s"leader epoch ${partitionState.leaderEpoch} from offset $leaderEpochStartOffset " + + s"with partition epoch ${partitionState.partitionEpoch}, high watermark ${leaderLog.highWatermark}, " + s"ISR ${isr.mkString("[", ",", "]")}, adding replicas ${addingReplicas.mkString("[", ",", "]")} and " + s"removing replicas ${removingReplicas.mkString("[", ",", "]")} ${if (isUnderMinIsr) "(under-min-isr)" else ""}. " + s"Previous leader $leaderReplicaIdOpt and previous leader epoch was $leaderEpoch.") @@ -791,32 +785,32 @@ class Partition(val topicPartition: TopicPartition, // to ensure that these followers can truncate to the right offset, we must cache the new // leader epoch and the start offset since it should be larger than any epoch that a follower // would try to query. - leaderLog.assignEpochStartOffset(partitionRegistration.leaderEpoch, leaderEpochStartOffset) + leaderLog.assignEpochStartOffset(partitionState.leaderEpoch, leaderEpochStartOffset) // Initialize lastCaughtUpTime of replicas as well as their lastFetchTimeMs and // lastFetchLeaderLogEndOffset. remoteReplicas.foreach { replica => replica.resetReplicaState( - currentTimeMs, - leaderEpochStartOffset, - isNewLeader, - isr.contains(replica.brokerId) + currentTimeMs = currentTimeMs, + leaderEndOffset = leaderEpochStartOffset, + isNewLeader = isNewLeader, + isFollowerInSync = partitionState.isr.contains(replica.brokerId) ) } // We update the leader epoch and the leader epoch start offset iff the // leader epoch changed. - leaderEpoch = partitionRegistration.leaderEpoch + leaderEpoch = partitionState.leaderEpoch leaderEpochStartOffsetOpt = Some(leaderEpochStartOffset) } else { - stateChangeLogger.info(s"Skipped the become-leader state change for $topicPartition with topic id $topicId, " + - s"partition registration $partitionRegistration and isNew=$isNew since it is already the leader with leader epoch $leaderEpoch. " + + stateChangeLogger.info(s"Skipped the become-leader state change for $topicPartition with topic id $topicId " + + s"and partition state $partitionState since it is already the leader with leader epoch $leaderEpoch. " + s"Current high watermark ${leaderLog.highWatermark}, ISR ${isr.mkString("[", ",", "]")}, " + s"adding replicas ${addingReplicas.mkString("[", ",", "]")} and " + s"removing replicas ${removingReplicas.mkString("[", ",", "]")}.") } - partitionEpoch = partitionRegistration.partitionEpoch + partitionEpoch = partitionState.partitionEpoch leaderReplicaIdOpt = Some(localBrokerId) // We may need to increment high watermark since ISR could be down to 1. @@ -837,47 +831,46 @@ class Partition(val topicPartition: TopicPartition, * replica manager that state is already correct and the become-follower steps can * be skipped. */ - def makeFollower(partitionRegistration: PartitionRegistration, - isNew: Boolean, + def makeFollower(partitionState: LeaderAndIsrRequest.PartitionState, highWatermarkCheckpoints: OffsetCheckpoints, topicId: Option[Uuid], targetLogDirectoryId: Option[Uuid] = None): Boolean = { inWriteLock(leaderIsrUpdateLock) { - if (partitionRegistration.partitionEpoch < partitionEpoch) { - stateChangeLogger.info(s"Skipped the become-follower state change for $topicPartition with topic id $topicId, " + - s"partition registration $partitionRegistration and isNew=$isNew since the follower is already at a newer partition epoch $partitionEpoch.") + if (partitionState.partitionEpoch < partitionEpoch) { + stateChangeLogger.info(s"Skipped the become-follower state change for $topicPartition with topic id $topicId " + + s"and partition state $partitionState since the follower is already at a newer partition epoch $partitionEpoch.") return false } - val isNewLeaderEpoch = partitionRegistration.leaderEpoch > leaderEpoch + val isNewLeaderEpoch = partitionState.leaderEpoch > leaderEpoch // The leader should be updated before updateAssignmentAndIsr where we clear the ISR. Or it is possible to meet // the under min isr condition during the makeFollower process and emits the wrong metric. - leaderReplicaIdOpt = Option(partitionRegistration.leader) - leaderEpoch = partitionRegistration.leaderEpoch + leaderReplicaIdOpt = Option(partitionState.leader) + leaderEpoch = partitionState.leaderEpoch leaderEpochStartOffsetOpt = None - partitionEpoch = partitionRegistration.partitionEpoch + partitionEpoch = partitionState.partitionEpoch updateAssignmentAndIsr( - replicas = partitionRegistration.replicas, + replicas = partitionState.replicas.asScala.iterator.map(_.toInt).toSeq, isLeader = false, isr = Set.empty, - addingReplicas = partitionRegistration.addingReplicas, - removingReplicas = partitionRegistration.removingReplicas, - partitionRegistration.leaderRecoveryState + addingReplicas = partitionState.addingReplicas.asScala.map(_.toInt), + removingReplicas = partitionState.removingReplicas.asScala.map(_.toInt), + LeaderRecoveryState.of(partitionState.leaderRecoveryState) ) - createLogInAssignedDirectoryId(isNew, highWatermarkCheckpoints, topicId, targetLogDirectoryId) + createLogInAssignedDirectoryId(partitionState, highWatermarkCheckpoints, topicId, targetLogDirectoryId) val followerLog = localLogOrException if (isNewLeaderEpoch) { val leaderEpochEndOffset = followerLog.logEndOffset - stateChangeLogger.info(s"Follower $topicPartition starts at leader epoch ${partitionRegistration.leaderEpoch} from " + - s"offset $leaderEpochEndOffset with partition epoch ${partitionRegistration.partitionEpoch} and " + - s"high watermark ${followerLog.highWatermark}. Current leader is ${partitionRegistration.leader}. " + + stateChangeLogger.info(s"Follower $topicPartition starts at leader epoch ${partitionState.leaderEpoch} from " + + s"offset $leaderEpochEndOffset with partition epoch ${partitionState.partitionEpoch} and " + + s"high watermark ${followerLog.highWatermark}. Current leader is ${partitionState.leader}. " + s"Previous leader $leaderReplicaIdOpt and previous leader epoch was $leaderEpoch.") } else { - stateChangeLogger.info(s"Skipped the become-follower state change for $topicPartition with topic id $topicId, " + - s"partition registration $partitionRegistration and isNew=$isNew since it is already a follower with leader epoch $leaderEpoch.") + stateChangeLogger.info(s"Skipped the become-follower state change for $topicPartition with topic id $topicId " + + s"and partition state $partitionState since it is already a follower with leader epoch $leaderEpoch.") } // We must restart the fetchers when the leader epoch changed regardless of @@ -886,11 +879,11 @@ class Partition(val topicPartition: TopicPartition, } } - private def createLogInAssignedDirectoryId(isNew: Boolean, highWatermarkCheckpoints: OffsetCheckpoints, topicId: Option[Uuid], targetLogDirectoryId: Option[Uuid]): Unit = { + private def createLogInAssignedDirectoryId(partitionState: LeaderAndIsrRequest.PartitionState, highWatermarkCheckpoints: OffsetCheckpoints, topicId: Option[Uuid], targetLogDirectoryId: Option[Uuid]): Unit = { targetLogDirectoryId match { case Some(directoryId) => if (logManager.onlineLogDirId(directoryId) || !logManager.hasOfflineLogDirs() || directoryId == DirectoryId.UNASSIGNED) { - createLogIfNotExists(isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId, targetLogDirectoryId) + createLogIfNotExists(partitionState.isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId, targetLogDirectoryId) } else { warn(s"Skipping creation of log because there are potentially offline log " + s"directories and log may already exist there. directoryId=$directoryId, " + @@ -898,7 +891,7 @@ class Partition(val topicPartition: TopicPartition, } case None => - createLogIfNotExists(isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId) + createLogIfNotExists(partitionState.isNew, isFutureReplica = false, highWatermarkCheckpoints, topicId) } } @@ -986,11 +979,12 @@ class Partition(val topicPartition: TopicPartition, ): Unit = { if (isLeader) { val followers = replicas.filter(_ != localBrokerId) + val removedReplicas = remoteReplicasMap.keys.filterNot(followers.contains(_)) // Due to code paths accessing remoteReplicasMap without a lock, // first add the new replicas and then remove the old ones. - followers.foreach(id => remoteReplicasMap.computeIfAbsent(id, _ => new Replica(id, topicPartition, metadataCache))) - remoteReplicasMap.keySet.removeIf(replica => !followers.contains(replica)) + followers.foreach(id => remoteReplicasMap.getAndMaybePut(id, new Replica(id, topicPartition, metadataCache))) + remoteReplicasMap.removeAll(removedReplicas) } else { remoteReplicasMap.clear() } @@ -1056,28 +1050,33 @@ class Partition(val topicPartition: TopicPartition, } private def isReplicaIsrEligible(followerReplicaId: Int): Boolean = { - // A replica which meets all of the following requirements is allowed to join the ISR. - // 1. It is not fenced. - // 2. It is not in controlled shutdown. - // 3. Its metadata cached broker epoch matches its Fetch request broker epoch. Or the Fetch - // request broker epoch is -1 which bypasses the epoch verification. - val mayBeReplica = getReplica(followerReplicaId) - // The topic is already deleted and we don't have any replica information. In this case, we can return false - // so as to avoid NPE - if (mayBeReplica.isEmpty) { - warn(s"The replica state of replica ID:[$followerReplicaId] doesn't exist in the leader node. It might because the topic is already deleted.") - return false + metadataCache match { + // In KRaft mode, only a replica which meets all of the following requirements is allowed to join the ISR. + // 1. It is not fenced. + // 2. It is not in controlled shutdown. + // 3. Its metadata cached broker epoch matches its Fetch request broker epoch. Or the Fetch + // request broker epoch is -1 which bypasses the epoch verification. + case kRaftMetadataCache: KRaftMetadataCache => + val mayBeReplica = getReplica(followerReplicaId) + // The topic is already deleted and we don't have any replica information. In this case, we can return false + // so as to avoid NPE + if (mayBeReplica.isEmpty) { + warn(s"The replica state of replica ID:[$followerReplicaId] doesn't exist in the leader node. It might because the topic is already deleted.") + return false + } + val storedBrokerEpoch = mayBeReplica.get.stateSnapshot.brokerEpoch + val cachedBrokerEpoch = kRaftMetadataCache.getAliveBrokerEpoch(followerReplicaId) + !kRaftMetadataCache.isBrokerFenced(followerReplicaId) && + !kRaftMetadataCache.isBrokerShuttingDown(followerReplicaId) && + isBrokerEpochIsrEligible(storedBrokerEpoch, cachedBrokerEpoch) + + case _ => true } - val storedBrokerEpoch = mayBeReplica.get.stateSnapshot.brokerEpoch - val cachedBrokerEpoch = metadataCache.getAliveBrokerEpoch(followerReplicaId) - !metadataCache.isBrokerFenced(followerReplicaId) && - !metadataCache.isBrokerShuttingDown(followerReplicaId) && - isBrokerEpochIsrEligible(storedBrokerEpoch, cachedBrokerEpoch) } - private def isBrokerEpochIsrEligible(storedBrokerEpoch: Optional[java.lang.Long], cachedBrokerEpoch: Optional[java.lang.Long]): Boolean = { - storedBrokerEpoch.isPresent && cachedBrokerEpoch.isPresent && - (storedBrokerEpoch.get == -1 || storedBrokerEpoch.get == cachedBrokerEpoch.get) + private def isBrokerEpochIsrEligible(storedBrokerEpoch: Option[Long], cachedBrokerEpoch: Option[Long]): Boolean = { + storedBrokerEpoch.isDefined && cachedBrokerEpoch.isDefined && + (storedBrokerEpoch.get == -1 || storedBrokerEpoch == cachedBrokerEpoch) } /* @@ -1160,7 +1159,7 @@ class Partition(val topicPartition: TopicPartition, // avoid unnecessary collection generation val leaderLogEndOffset = leaderLog.logEndOffsetMetadata var newHighWatermark = leaderLogEndOffset - remoteReplicasMap.forEach { (_, replica) => + remoteReplicasMap.values.foreach { replica => val replicaState = replica.stateSnapshot def shouldWaitForReplicaToJoinIsr: Boolean = { @@ -1176,7 +1175,7 @@ class Partition(val topicPartition: TopicPartition, } } - leaderLog.maybeIncrementHighWatermark(newHighWatermark).toScala match { + leaderLog.maybeIncrementHighWatermark(newHighWatermark) match { case Some(oldHighWatermark) => debug(s"High watermark updated from $oldHighWatermark to $newHighWatermark") true @@ -1308,35 +1307,27 @@ class Partition(val topicPartition: TopicPartition, } } - private def doAppendRecordsToFollowerOrFutureReplica( - records: MemoryRecords, - isFuture: Boolean, - partitionLeaderEpoch: Int - ): Option[LogAppendInfo] = { + private def doAppendRecordsToFollowerOrFutureReplica(records: MemoryRecords, isFuture: Boolean): Option[LogAppendInfo] = { if (isFuture) { // The read lock is needed to handle race condition if request handler thread tries to // remove future replica after receiving AlterReplicaLogDirsRequest. inReadLock(leaderIsrUpdateLock) { // Note the replica may be undefined if it is removed by a non-ReplicaAlterLogDirsThread before // this method is called - futureLog.map { _.appendAsFollower(records, partitionLeaderEpoch) } + futureLog.map { _.appendAsFollower(records) } } } else { // The lock is needed to prevent the follower replica from being updated while ReplicaAlterDirThread // is executing maybeReplaceCurrentWithFutureReplica() to replace follower replica with the future replica. futureLogLock.synchronized { - Some(localLogOrException.appendAsFollower(records, partitionLeaderEpoch)) + Some(localLogOrException.appendAsFollower(records)) } } } - def appendRecordsToFollowerOrFutureReplica( - records: MemoryRecords, - isFuture: Boolean, - partitionLeaderEpoch: Int - ): Option[LogAppendInfo] = { + def appendRecordsToFollowerOrFutureReplica(records: MemoryRecords, isFuture: Boolean): Option[LogAppendInfo] = { try { - doAppendRecordsToFollowerOrFutureReplica(records, isFuture, partitionLeaderEpoch) + doAppendRecordsToFollowerOrFutureReplica(records, isFuture) } catch { case e: UnexpectedAppendOffsetException => val log = if (isFuture) futureLocalLogOrException else localLogOrException @@ -1354,7 +1345,7 @@ class Partition(val topicPartition: TopicPartition, info(s"Unexpected offset in append to $topicPartition. First offset ${e.firstOffset} is less than log start offset ${log.logStartOffset}." + s" Since this is the first record to be appended to the $replicaName's log, will start the log from offset ${e.firstOffset}.") truncateFullyAndStartAt(e.firstOffset, isFuture) - doAppendRecordsToFollowerOrFutureReplica(records, isFuture, partitionLeaderEpoch) + doAppendRecordsToFollowerOrFutureReplica(records, isFuture) } else throw e } @@ -1370,12 +1361,12 @@ class Partition(val topicPartition: TopicPartition, // Avoid writing to leader if there are not enough insync replicas to make it safe if (inSyncSize < minIsr && requiredAcks == -1) { - throw new NotEnoughReplicasException(s"The size of the current ISR : $inSyncSize " + - s"is insufficient to satisfy the min.isr requirement of $minIsr for partition $topicPartition, " + - s"live replica(s) broker.id are : $inSyncReplicaIds") + throw new NotEnoughReplicasException(s"The size of the current ISR ${partitionState.isr} " + + s"is insufficient to satisfy the min.isr requirement of $minIsr for partition $topicPartition") } - val info = leaderLog.appendAsLeader(records, this.leaderEpoch, origin, requestLocal, verificationGuard) + val info = leaderLog.appendAsLeader(records, leaderEpoch = this.leaderEpoch, origin, + requestLocal, verificationGuard) // we may need to increment high watermark since ISR could be down to 1 (info, maybeIncrementLeaderHW(leaderLog)) @@ -1599,7 +1590,7 @@ class Partition(val topicPartition: TopicPartition, def getOffsetByTimestamp: OffsetResultHolder = { logManager.getLog(topicPartition) - .map(log => log.fetchOffsetByTimestamp(timestamp, remoteLogManager.asInstanceOf[Option[AsyncOffsetReader]].toJava)) + .map(log => log.fetchOffsetByTimestamp(timestamp, remoteLogManager)) .getOrElse(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]())) } @@ -1627,7 +1618,7 @@ class Partition(val topicPartition: TopicPartition, case Some(producers) => producerState .setErrorCode(Errors.NONE.code) - .setActiveProducers(producers) + .setActiveProducers(producers.asJava) case None => producerState .setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code) @@ -1701,7 +1692,7 @@ class Partition(val topicPartition: TopicPartition, */ def truncateFullyAndStartAt(newOffset: Long, isFuture: Boolean, - logStartOffsetOpt: Optional[JLong] = Optional.empty): Unit = { + logStartOffsetOpt: Option[Long] = None): Unit = { // The read lock is needed to prevent the follower replica from being truncated while ReplicaAlterDirThread // is executing maybeReplaceCurrentWithFutureReplica() to replace follower replica with the future replica. inReadLock(leaderIsrUpdateLock) { @@ -1729,11 +1720,11 @@ class Partition(val topicPartition: TopicPartition, val localLogOrError = getLocalLog(currentLeaderEpoch, fetchOnlyFromLeader) localLogOrError match { case Left(localLog) => - localLog.endOffsetForEpoch(leaderEpoch).toScala match { + localLog.endOffsetForEpoch(leaderEpoch) match { case Some(epochAndOffset) => new EpochEndOffset() .setPartition(partitionId) .setErrorCode(Errors.NONE.code) - .setLeaderEpoch(epochAndOffset.epoch()) + .setLeaderEpoch(epochAndOffset.leaderEpoch) .setEndOffset(epochAndOffset.offset) case None => new EpochEndOffset() .setPartition(partitionId) @@ -1801,11 +1792,13 @@ class Partition(val topicPartition: TopicPartition, private def addBrokerEpochToIsr(isr: List[Int]): List[BrokerState] = { isr.map { brokerId => val brokerState = new BrokerState().setBrokerId(brokerId) - if (brokerId == localBrokerId) { + if (!metadataCache.isInstanceOf[KRaftMetadataCache]) { + brokerState.setBrokerEpoch(-1) + } else if (brokerId == localBrokerId) { brokerState.setBrokerEpoch(localBrokerEpochSupplier()) } else { val replica = remoteReplicasMap.get(brokerId) - val brokerEpoch = if (replica == null) Optional.empty else replica.stateSnapshot.brokerEpoch + val brokerEpoch = if (replica == null) Option.empty else replica.stateSnapshot.brokerEpoch if (brokerEpoch.isEmpty) { // There are two cases where the broker epoch can be missing: // 1. During ISR expansion, we already held lock for the partition and did the broker epoch check, so the new diff --git a/core/src/main/scala/kafka/cluster/Replica.scala b/core/src/main/scala/kafka/cluster/Replica.scala new file mode 100644 index 0000000000000..9a88544562d17 --- /dev/null +++ b/core/src/main/scala/kafka/cluster/Replica.scala @@ -0,0 +1,213 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.cluster + +import kafka.log.UnifiedLog +import kafka.server.MetadataCache +import kafka.server.metadata.KRaftMetadataCache +import kafka.utils.Logging +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.errors.NotLeaderOrFollowerException +import org.apache.kafka.storage.internals.log.LogOffsetMetadata + +import java.util.concurrent.atomic.AtomicReference + +case class ReplicaState( + // The log start offset value, kept in all replicas; for local replica it is the + // log's start offset, for remote replicas its value is only updated by follower fetch. + logStartOffset: Long, + + // The log end offset value, kept in all replicas; for local replica it is the + // log's end offset, for remote replicas its value is only updated by follower fetch. + logEndOffsetMetadata: LogOffsetMetadata, + + // The log end offset value at the time the leader received the last FetchRequest from this follower. + // This is used to determine the lastCaughtUpTimeMs of the follower. It is reset by the leader + // when a LeaderAndIsr request is received and might be reset when the leader appends a record + // to its log. + lastFetchLeaderLogEndOffset: Long, + + // The time when the leader received the last FetchRequest from this follower. + // This is used to determine the lastCaughtUpTimeMs of the follower. + lastFetchTimeMs: Long, + + // lastCaughtUpTimeMs is the largest time t such that the offset of most recent FetchRequest from this follower >= + // the LEO of leader at time t. This is used to determine the lag of this follower and ISR of this partition. + lastCaughtUpTimeMs: Long, + + // The brokerEpoch is the epoch from the Fetch request. + brokerEpoch: Option[Long] +) { + /** + * Returns the current log end offset of the replica. + */ + def logEndOffset: Long = logEndOffsetMetadata.messageOffset + + /** + * Returns true when the replica is considered as "caught-up". A replica is + * considered "caught-up" when its log end offset is equals to the log end + * offset of the leader OR when its last caught up time minus the current + * time is smaller than the max replica lag. + */ + def isCaughtUp( + leaderEndOffset: Long, + currentTimeMs: Long, + replicaMaxLagMs: Long + ): Boolean = { + leaderEndOffset == logEndOffset || currentTimeMs - lastCaughtUpTimeMs <= replicaMaxLagMs + } +} + +object ReplicaState { + val Empty: ReplicaState = ReplicaState( + logEndOffsetMetadata = LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, + logStartOffset = UnifiedLog.UnknownOffset, + lastFetchLeaderLogEndOffset = 0L, + lastFetchTimeMs = 0L, + lastCaughtUpTimeMs = 0L, + brokerEpoch = None : Option[Long], + ) +} + +class Replica(val brokerId: Int, val topicPartition: TopicPartition, val metadataCache: MetadataCache) extends Logging { + private val replicaState = new AtomicReference[ReplicaState](ReplicaState.Empty) + + def stateSnapshot: ReplicaState = replicaState.get + + /** + * Update the replica's fetch state only if the broker epoch is -1 or it is larger or equal to the current broker + * epoch. Otherwise, NOT_LEADER_OR_FOLLOWER exception will be thrown. This can fence fetch state update from a + * stale request. + * + * If the FetchRequest reads up to the log end offset of the leader when the current fetch request is received, + * set `lastCaughtUpTimeMs` to the time when the current fetch request was received. + * + * Else if the FetchRequest reads up to the log end offset of the leader when the previous fetch request was received, + * set `lastCaughtUpTimeMs` to the time when the previous fetch request was received. + * + * This is needed to enforce the semantics of ISR, i.e. a replica is in ISR if and only if it lags behind leader's LEO + * by at most `replicaLagTimeMaxMs`. These semantics allow a follower to be added to the ISR even if the offset of its + * fetch request is always smaller than the leader's LEO, which can happen if small produce requests are received at + * high frequency. + */ + def updateFetchStateOrThrow( + followerFetchOffsetMetadata: LogOffsetMetadata, + followerStartOffset: Long, + followerFetchTimeMs: Long, + leaderEndOffset: Long, + brokerEpoch: Long + ): Unit = { + replicaState.updateAndGet { currentReplicaState => + metadataCache match { + case kRaftMetadataCache: KRaftMetadataCache => + val cachedBrokerEpoch = kRaftMetadataCache.getAliveBrokerEpoch(brokerId) + // Fence the update if it provides a stale broker epoch. + if (brokerEpoch != -1 && cachedBrokerEpoch.exists(_ > brokerEpoch)) { + throw new NotLeaderOrFollowerException(s"Received stale fetch state update. broker epoch=$brokerEpoch " + + s"vs expected=${cachedBrokerEpoch.get}") + } + case _ => + } + + val lastCaughtUpTime = if (followerFetchOffsetMetadata.messageOffset >= leaderEndOffset) { + math.max(currentReplicaState.lastCaughtUpTimeMs, followerFetchTimeMs) + } else if (followerFetchOffsetMetadata.messageOffset >= currentReplicaState.lastFetchLeaderLogEndOffset) { + math.max(currentReplicaState.lastCaughtUpTimeMs, currentReplicaState.lastFetchTimeMs) + } else { + currentReplicaState.lastCaughtUpTimeMs + } + + ReplicaState( + logStartOffset = followerStartOffset, + logEndOffsetMetadata = followerFetchOffsetMetadata, + lastFetchLeaderLogEndOffset = math.max(leaderEndOffset, currentReplicaState.lastFetchLeaderLogEndOffset), + lastFetchTimeMs = followerFetchTimeMs, + lastCaughtUpTimeMs = lastCaughtUpTime, + brokerEpoch = Option(brokerEpoch) + ) + } + } + + /** + * When the leader is elected or re-elected, the state of the follower is reinitialized + * accordingly. + */ + def resetReplicaState( + currentTimeMs: Long, + leaderEndOffset: Long, + isNewLeader: Boolean, + isFollowerInSync: Boolean + ): Unit = { + replicaState.updateAndGet { currentReplicaState => + // When the leader is elected or re-elected, the follower's last caught up time + // is set to the current time if the follower is in the ISR, else to 0. The latter + // is done to ensure that the high watermark is not hold back unnecessarily for + // a follower which is not in the ISR anymore. + val lastCaughtUpTimeMs = if (isFollowerInSync) currentTimeMs else 0L + + if (isNewLeader) { + ReplicaState( + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffsetMetadata = LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, + lastFetchLeaderLogEndOffset = UnifiedLog.UnknownOffset, + lastFetchTimeMs = 0L, + lastCaughtUpTimeMs = lastCaughtUpTimeMs, + brokerEpoch = Option.empty + ) + } else { + ReplicaState( + logStartOffset = currentReplicaState.logStartOffset, + logEndOffsetMetadata = currentReplicaState.logEndOffsetMetadata, + lastFetchLeaderLogEndOffset = leaderEndOffset, + // When the leader is re-elected, the follower's last fetch time is + // set to the current time if the follower is in the ISR, else to 0. + // The latter is done to ensure that the follower is not brought back + // into the ISR before a fetch is received. + lastFetchTimeMs = if (isFollowerInSync) currentTimeMs else 0L, + lastCaughtUpTimeMs = lastCaughtUpTimeMs, + brokerEpoch = currentReplicaState.brokerEpoch + ) + } + } + trace(s"Reset state of replica to $this") + } + + override def toString: String = { + val replicaState = this.replicaState.get + val replicaString = new StringBuilder + replicaString.append(s"Replica(replicaId=$brokerId") + replicaString.append(s", topic=${topicPartition.topic}") + replicaString.append(s", partition=${topicPartition.partition}") + replicaString.append(s", lastCaughtUpTimeMs=${replicaState.lastCaughtUpTimeMs}") + replicaString.append(s", logStartOffset=${replicaState.logStartOffset}") + replicaString.append(s", logEndOffset=${replicaState.logEndOffsetMetadata.messageOffset}") + replicaString.append(s", logEndOffsetMetadata=${replicaState.logEndOffsetMetadata}") + replicaString.append(s", lastFetchLeaderLogEndOffset=${replicaState.lastFetchLeaderLogEndOffset}") + replicaString.append(s", brokerEpoch=${replicaState.brokerEpoch.getOrElse(-2L)}") + replicaString.append(s", lastFetchTimeMs=${replicaState.lastFetchTimeMs}") + replicaString.append(")") + replicaString.toString + } + + override def equals(that: Any): Boolean = that match { + case other: Replica => brokerId == other.brokerId && topicPartition == other.topicPartition + case _ => false + } + + override def hashCode: Int = 31 + topicPartition.hashCode + 17 * brokerId +} diff --git a/core/src/main/scala/kafka/common/BrokerEndPointNotAvailableException.scala b/core/src/main/scala/kafka/common/BrokerEndPointNotAvailableException.scala new file mode 100644 index 0000000000000..455d8c64b5a44 --- /dev/null +++ b/core/src/main/scala/kafka/common/BrokerEndPointNotAvailableException.scala @@ -0,0 +1,22 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.common + +class BrokerEndPointNotAvailableException(message: String) extends RuntimeException(message) { + def this() = this(null) +} diff --git a/core/src/main/scala/kafka/common/LogCleaningAbortedException.scala b/core/src/main/scala/kafka/common/LogCleaningAbortedException.scala new file mode 100644 index 0000000000000..dfded33f009e4 --- /dev/null +++ b/core/src/main/scala/kafka/common/LogCleaningAbortedException.scala @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.common + +/** + * Thrown when a log cleaning task is requested to be aborted. + */ +class LogCleaningAbortedException extends RuntimeException() { +} diff --git a/core/src/main/scala/kafka/common/ThreadShutdownException.scala b/core/src/main/scala/kafka/common/ThreadShutdownException.scala new file mode 100644 index 0000000000000..8cd6601ce5aa9 --- /dev/null +++ b/core/src/main/scala/kafka/common/ThreadShutdownException.scala @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.common + +/** + * An exception that indicates a thread is being shut down normally. + */ +class ThreadShutdownException extends RuntimeException { +} diff --git a/core/src/main/scala/kafka/controller/ControllerContext.scala b/core/src/main/scala/kafka/controller/ControllerContext.scala new file mode 100644 index 0000000000000..cd56510e9a818 --- /dev/null +++ b/core/src/main/scala/kafka/controller/ControllerContext.scala @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.controller + +import scala.collection.Seq + +object ReplicaAssignment { + def apply(replicas: Seq[Int]): ReplicaAssignment = { + apply(replicas, Seq.empty, Seq.empty) + } + + val empty: ReplicaAssignment = apply(Seq.empty) +} + + +/** + * @param replicas the sequence of brokers assigned to the partition. It includes the set of brokers + * that were added (`addingReplicas`) and removed (`removingReplicas`). + * @param addingReplicas the replicas that are being added if there is a pending reassignment + * @param removingReplicas the replicas that are being removed if there is a pending reassignment + */ +case class ReplicaAssignment private (replicas: Seq[Int], + addingReplicas: Seq[Int], + removingReplicas: Seq[Int]) { + + lazy val targetReplicas: Seq[Int] = replicas.diff(removingReplicas) + + def isBeingReassigned: Boolean = { + addingReplicas.nonEmpty || removingReplicas.nonEmpty + } + + override def toString: String = s"ReplicaAssignment(" + + s"replicas=${replicas.mkString(",")}, " + + s"addingReplicas=${addingReplicas.mkString(",")}, " + + s"removingReplicas=${removingReplicas.mkString(",")})" +} + diff --git a/core/src/main/scala/kafka/controller/StateChangeLogger.scala b/core/src/main/scala/kafka/controller/StateChangeLogger.scala index 9f188fe33b74a..1292e3aa5e540 100644 --- a/core/src/main/scala/kafka/controller/StateChangeLogger.scala +++ b/core/src/main/scala/kafka/controller/StateChangeLogger.scala @@ -42,4 +42,9 @@ class StateChangeLogger(brokerId: Int, inControllerContext: Boolean, controllerE logIdent = s"[$prefix id=$brokerId$epochEntry] " } + def withControllerEpoch(controllerEpoch: Int): StateChangeLogger = + new StateChangeLogger(brokerId, inControllerContext, Some(controllerEpoch)) + + def messageWithPrefix(message: String): String = msgWithLogIdent(message) + } diff --git a/core/src/main/scala/kafka/coordinator/group/CoordinatorLoaderImpl.scala b/core/src/main/scala/kafka/coordinator/group/CoordinatorLoaderImpl.scala new file mode 100644 index 0000000000000..70536abecc0a1 --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/CoordinatorLoaderImpl.scala @@ -0,0 +1,253 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.group + +import kafka.server.ReplicaManager +import kafka.utils.Logging +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.errors.NotLeaderOrFollowerException +import org.apache.kafka.common.record.{ControlRecordType, FileRecords, MemoryRecords} +import org.apache.kafka.common.requests.TransactionResult +import org.apache.kafka.common.utils.Time +import org.apache.kafka.coordinator.common.runtime.CoordinatorLoader.LoadSummary +import org.apache.kafka.coordinator.common.runtime.Deserializer.UnknownRecordTypeException +import org.apache.kafka.coordinator.common.runtime.{CoordinatorLoader, CoordinatorPlayback, Deserializer} +import org.apache.kafka.server.storage.log.FetchIsolation +import org.apache.kafka.server.util.KafkaScheduler + +import java.nio.ByteBuffer +import java.util.concurrent.CompletableFuture +import java.util.concurrent.atomic.AtomicBoolean +import scala.jdk.CollectionConverters._ + +/** + * Coordinator loader which reads records from a partition and replays them + * to a group coordinator. + * + * @param replicaManager The replica manager. + * @param deserializer The deserializer to use. + * @param loadBufferSize The load buffer size. + * @tparam T The record type. + */ +class CoordinatorLoaderImpl[T]( + time: Time, + replicaManager: ReplicaManager, + deserializer: Deserializer[T], + loadBufferSize: Int +) extends CoordinatorLoader[T] with Logging { + private val isRunning = new AtomicBoolean(true) + private val scheduler = new KafkaScheduler(1) + scheduler.startup() + + /** + * Loads the coordinator by reading all the records from the TopicPartition + * and applying them to the Replayable object. + * + * @param tp The TopicPartition to read from. + * @param coordinator The object to apply records to. + */ + override def load( + tp: TopicPartition, + coordinator: CoordinatorPlayback[T] +): CompletableFuture[LoadSummary] = { + val future = new CompletableFuture[LoadSummary]() + val startTimeMs = time.milliseconds() + val result = scheduler.scheduleOnce(s"Load coordinator from $tp", + () => doLoad(tp, coordinator, future, startTimeMs)) + if (result.isCancelled) { + future.completeExceptionally(new RuntimeException("Coordinator loader is closed.")) + } + future + } + + private def doLoad( + tp: TopicPartition, + coordinator: CoordinatorPlayback[T], + future: CompletableFuture[LoadSummary], + startTimeMs: Long + ): Unit = { + val schedulerQueueTimeMs = time.milliseconds() - startTimeMs + try { + replicaManager.getLog(tp) match { + case None => + future.completeExceptionally(new NotLeaderOrFollowerException( + s"Could not load records from $tp because the log does not exist.")) + + case Some(log) => + def logEndOffset: Long = replicaManager.getLogEndOffset(tp).getOrElse(-1L) + + // Buffer may not be needed if records are read from memory. + var buffer = ByteBuffer.allocate(0) + // Loop breaks if leader changes at any time during the load, since logEndOffset is -1. + var currentOffset = log.logStartOffset + // Loop breaks if no records have been read, since the end of the log has been reached. + // This is to ensure that the loop breaks even if the current offset remains smaller than + // the log end offset but the log is empty. This could happen with compacted topics. + var readAtLeastOneRecord = true + + var previousHighWatermark = -1L + var numRecords = 0L + var numBytes = 0L + while (currentOffset < logEndOffset && readAtLeastOneRecord && isRunning.get) { + val fetchDataInfo = log.read( + startOffset = currentOffset, + maxLength = loadBufferSize, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + ) + + readAtLeastOneRecord = fetchDataInfo.records.sizeInBytes > 0 + + val memoryRecords = (fetchDataInfo.records: @unchecked) match { + case records: MemoryRecords => + records + + case fileRecords: FileRecords => + val sizeInBytes = fileRecords.sizeInBytes + val bytesNeeded = Math.max(loadBufferSize, sizeInBytes) + + // "minOneMessage = true in the above log.read() means that the buffer may need to + // be grown to ensure progress can be made. + if (buffer.capacity < bytesNeeded) { + if (loadBufferSize < bytesNeeded) + warn(s"Loaded metadata from $tp with buffer larger ($bytesNeeded bytes) than " + + s"configured buffer size ($loadBufferSize bytes).") + + buffer = ByteBuffer.allocate(bytesNeeded) + } else { + buffer.clear() + } + + fileRecords.readInto(buffer, 0) + MemoryRecords.readableRecords(buffer) + } + + memoryRecords.batches.forEach { batch => + if (batch.isControlBatch) { + batch.asScala.foreach { record => + val controlRecord = ControlRecordType.parse(record.key) + if (controlRecord == ControlRecordType.COMMIT) { + if (isTraceEnabled) { + trace(s"Replaying end transaction marker from $tp at offset ${record.offset} to commit transaction " + + s"with producer id ${batch.producerId} and producer epoch ${batch.producerEpoch}.") + } + coordinator.replayEndTransactionMarker( + batch.producerId, + batch.producerEpoch, + TransactionResult.COMMIT + ) + } else if (controlRecord == ControlRecordType.ABORT) { + if (isTraceEnabled) { + trace(s"Replaying end transaction marker from $tp at offset ${record.offset} to abort transaction " + + s"with producer id ${batch.producerId} and producer epoch ${batch.producerEpoch}.") + } + coordinator.replayEndTransactionMarker( + batch.producerId, + batch.producerEpoch, + TransactionResult.ABORT + ) + } + } + } else { + batch.asScala.foreach { record => + numRecords = numRecords + 1 + + val coordinatorRecordOpt = { + try { + Some(deserializer.deserialize(record.key, record.value)) + } catch { + case ex: UnknownRecordTypeException => + warn(s"Unknown record type ${ex.unknownType} while loading offsets and group metadata " + + s"from $tp. Ignoring it. It could be a left over from an aborted upgrade.") + None + case ex: RuntimeException => + val msg = s"Deserializing record $record from $tp failed due to: ${ex.getMessage}" + error(s"$msg.") + throw new RuntimeException(msg, ex) + } + } + + coordinatorRecordOpt.foreach { coordinatorRecord => + try { + if (isTraceEnabled) { + trace(s"Replaying record $coordinatorRecord from $tp at offset ${record.offset()} " + + s"with producer id ${batch.producerId} and producer epoch ${batch.producerEpoch}.") + } + coordinator.replay( + record.offset(), + batch.producerId, + batch.producerEpoch, + coordinatorRecord + ) + } catch { + case ex: RuntimeException => + val msg = s"Replaying record $coordinatorRecord from $tp at offset ${record.offset()} " + + s"with producer id ${batch.producerId} and producer epoch ${batch.producerEpoch} " + + s"failed due to: ${ex.getMessage}" + error(s"$msg.") + throw new RuntimeException(msg, ex) + } + } + } + } + + // Note that the high watermark can be greater than the current offset but as we load more records + // the current offset will eventually surpass the high watermark. Also note that the high watermark + // will continue to advance while loading. + currentOffset = batch.nextOffset + val currentHighWatermark = log.highWatermark + if (currentOffset >= currentHighWatermark) { + coordinator.updateLastWrittenOffset(currentOffset) + + if (currentHighWatermark > previousHighWatermark) { + coordinator.updateLastCommittedOffset(currentHighWatermark) + previousHighWatermark = currentHighWatermark + } + } + } + numBytes = numBytes + memoryRecords.sizeInBytes() + } + + val endTimeMs = time.milliseconds() + + if (logEndOffset == -1L) { + future.completeExceptionally(new NotLeaderOrFollowerException( + s"Stopped loading records from $tp because the partition is not online or is no longer the leader." + )) + } else if (isRunning.get) { + future.complete(new LoadSummary(startTimeMs, endTimeMs, schedulerQueueTimeMs, numRecords, numBytes)) + } else { + future.completeExceptionally(new RuntimeException("Coordinator loader is closed.")) + } + } + } catch { + case ex: Throwable => + future.completeExceptionally(ex) + } + } + + /** + * Closes the loader. + */ + override def close(): Unit = { + if (!isRunning.compareAndSet(true, false)) { + warn("Coordinator loader is already shutting down.") + return + } + scheduler.shutdown() + } +} diff --git a/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala b/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala index dbbdbb09868e8..08b3c9aa49897 100644 --- a/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala +++ b/core/src/main/scala/kafka/coordinator/group/CoordinatorPartitionWriter.scala @@ -17,14 +17,14 @@ package kafka.coordinator.group import kafka.cluster.PartitionListener -import kafka.server.ReplicaManager -import org.apache.kafka.common.{TopicIdPartition, TopicPartition} +import kafka.server.{AddPartitionsToTxnManager, ReplicaManager} +import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{MemoryRecords, RecordBatch} +import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.coordinator.common.runtime.PartitionWriter import org.apache.kafka.server.ActionQueue import org.apache.kafka.server.common.RequestLocal -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, VerificationGuard} import java.util.concurrent.CompletableFuture @@ -108,7 +108,7 @@ class CoordinatorPartitionWriter( transactionalId: String, producerId: Long, producerEpoch: Short, - apiVersion: Int + apiVersion: Short ): CompletableFuture[VerificationGuard] = { val transactionSupportedOperation = AddPartitionsToTxnManager.txnOffsetCommitRequestVersionToTransactionSupportedOperation(apiVersion) val future = new CompletableFuture[VerificationGuard]() @@ -139,21 +139,23 @@ class CoordinatorPartitionWriter( verificationGuard: VerificationGuard, records: MemoryRecords ): Long = { - // We write synchronously to the leader replica without waiting on replication. - val topicIdPartition: TopicIdPartition = replicaManager.topicIdPartition(tp) - val appendResults = replicaManager.appendRecordsToLeader( + var appendResults: Map[TopicPartition, PartitionResponse] = Map.empty + replicaManager.appendRecords( + timeout = 0L, requiredAcks = 1, internalTopicsAllowed = true, origin = AppendOrigin.COORDINATOR, - entriesPerPartition = Map(topicIdPartition -> records), + entriesPerPartition = Map(tp -> records), + responseCallback = results => appendResults = results, requestLocal = RequestLocal.noCaching, verificationGuards = Map(tp -> verificationGuard), + delayedProduceLock = None, // We can directly complete the purgatories here because we don't hold // any conflicting locks. actionQueue = directActionQueue ) - val partitionResult = appendResults.getOrElse(topicIdPartition, + val partitionResult = appendResults.getOrElse(tp, throw new IllegalStateException(s"Append status $appendResults should have partition $tp.")) if (partitionResult.error != Errors.NONE) { @@ -161,7 +163,7 @@ class CoordinatorPartitionWriter( } // Required offset. - partitionResult.info.lastOffset + 1 + partitionResult.lastOffset + 1 } override def deleteRecords(tp: TopicPartition, deleteBeforeOffset: Long): CompletableFuture[Void] = { diff --git a/core/src/main/scala/kafka/coordinator/group/DelayedHeartbeat.scala b/core/src/main/scala/kafka/coordinator/group/DelayedHeartbeat.scala new file mode 100644 index 0000000000000..1dd2793c553d8 --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/DelayedHeartbeat.scala @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import org.apache.kafka.server.purgatory.DelayedOperation + +/** + * Delayed heartbeat operations that are added to the purgatory for session timeout checking. + * Heartbeats are paused during rebalance. + */ +private[group] class DelayedHeartbeat(coordinator: GroupCoordinator, + group: GroupMetadata, + memberId: String, + isPending: Boolean, + timeoutMs: Long) + extends DelayedOperation(timeoutMs, group.lock) { + + override def tryComplete(): Boolean = coordinator.tryCompleteHeartbeat(group, memberId, isPending, forceComplete _) + override def onExpiration(): Unit = coordinator.onExpireHeartbeat(group, memberId, isPending) + override def onComplete(): Unit = {} +} diff --git a/core/src/main/scala/kafka/coordinator/group/DelayedJoin.scala b/core/src/main/scala/kafka/coordinator/group/DelayedJoin.scala new file mode 100644 index 0000000000000..51eeb324b1c26 --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/DelayedJoin.scala @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, GroupJoinKey} +import java.util +import scala.math.{max, min} + +/** + * Delayed rebalance operations that are added to the purgatory when group is preparing for rebalance + * + * Whenever a join-group request is received, check if all known group members have requested + * to re-join the group; if yes, complete this operation to proceed rebalance. + * + * When the operation has expired, any known members that have not requested to re-join + * the group are marked as failed, and complete this operation to proceed rebalance with + * the rest of the group. + */ +private[group] class DelayedJoin( + coordinator: GroupCoordinator, + group: GroupMetadata, + rebalanceTimeout: Long +) extends DelayedRebalance( + rebalanceTimeout, + group.lock +) { + override def tryComplete(): Boolean = coordinator.tryCompleteJoin(group, forceComplete _) + + override def onExpiration(): Unit = { + // try to complete delayed actions introduced by coordinator.onCompleteJoin + tryToCompleteDelayedAction() + } + override def onComplete(): Unit = coordinator.onCompleteJoin(group) + + // TODO: remove this ugly chain after we move the action queue to handler thread + private def tryToCompleteDelayedAction(): Unit = coordinator.groupManager.replicaManager.tryCompleteActions() +} + +/** + * Delayed rebalance operation that is added to the purgatory when a group is transitioning from + * Empty to PreparingRebalance + * + * When onComplete is triggered we check if any new members have been added and if there is still time remaining + * before the rebalance timeout. If both are true we then schedule a further delay. Otherwise we complete the + * rebalance. + */ +private[group] class InitialDelayedJoin( + coordinator: GroupCoordinator, + purgatory: DelayedOperationPurgatory[DelayedRebalance], + group: GroupMetadata, + configuredRebalanceDelay: Int, + delayMs: Int, + remainingMs: Int +) extends DelayedJoin( + coordinator, + group, + delayMs +) { + override def tryComplete(): Boolean = false + + override def onComplete(): Unit = { + group.inLock { + if (group.newMemberAdded && remainingMs != 0) { + group.newMemberAdded = false + val delay = min(configuredRebalanceDelay, remainingMs) + val remaining = max(remainingMs - delayMs, 0) + purgatory.tryCompleteElseWatch(new InitialDelayedJoin(coordinator, + purgatory, + group, + configuredRebalanceDelay, + delay, + remaining + ), util.List.of(new GroupJoinKey(group.groupId))) + } else + super.onComplete() + } + } + +} diff --git a/core/src/main/scala/kafka/coordinator/group/DelayedRebalance.scala b/core/src/main/scala/kafka/coordinator/group/DelayedRebalance.scala new file mode 100644 index 0000000000000..2c327b922ebc9 --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/DelayedRebalance.scala @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import org.apache.kafka.server.purgatory.DelayedOperation +import java.util.concurrent.locks.Lock + +/** + * Delayed rebalance operation that is shared by DelayedJoin and DelayedSync + * operations. This allows us to use a common purgatory for both cases. + */ +private[group] abstract class DelayedRebalance( + rebalanceTimeoutMs: Long, + groupLock: Lock +) extends DelayedOperation( + rebalanceTimeoutMs, + groupLock +) diff --git a/core/src/main/scala/kafka/coordinator/group/DelayedSync.scala b/core/src/main/scala/kafka/coordinator/group/DelayedSync.scala new file mode 100644 index 0000000000000..a39adefe3aadc --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/DelayedSync.scala @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +/** + * Delayed rebalance operation that is added to the purgatory when the group is completing the + * rebalance. + * + * Whenever a SyncGroup is received, checks that we received all the SyncGroup request from + * each member of the group; if yes, complete this operation. + * + * When the operation has expired, any known members that have not sent a SyncGroup requests + * are removed from the group. If any members is removed, the group is rebalanced. + */ +private[group] class DelayedSync( + coordinator: GroupCoordinator, + group: GroupMetadata, + generationId: Int, + rebalanceTimeoutMs: Long +) extends DelayedRebalance( + rebalanceTimeoutMs, + group.lock +) { + override def tryComplete(): Boolean = { + coordinator.tryCompletePendingSync(group, generationId, forceComplete _) + } + + override def onExpiration(): Unit = { + coordinator.onExpirePendingSync(group, generationId) + } + + override def onComplete(): Unit = { } +} diff --git a/core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala b/core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala new file mode 100644 index 0000000000000..bf2f38743140d --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/GroupCoordinator.scala @@ -0,0 +1,1872 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.group + +import java.util +import java.util.{OptionalInt, Properties} +import java.util.concurrent.atomic.AtomicBoolean +import kafka.server._ +import kafka.utils.Logging +import org.apache.kafka.common.{TopicIdPartition, TopicPartition} +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember +import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.metrics.stats.Meter +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.record.RecordBatch +import org.apache.kafka.common.requests._ +import org.apache.kafka.common.utils.Time +import org.apache.kafka.coordinator.group.{Group, OffsetAndMetadata, OffsetConfig} +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, GroupJoinKey, GroupSyncKey, MemberKey} +import org.apache.kafka.server.record.BrokerCompressionType +import org.apache.kafka.storage.internals.log.VerificationGuard + +import java.util.concurrent.CompletableFuture +import scala.collection.{Map, Seq, Set, immutable, mutable} +import scala.math.max + +/** + * GroupCoordinator handles general group membership and offset management. + * + * Each Kafka server instantiates a coordinator which is responsible for a set of + * groups. Groups are assigned to coordinators based on their group names. + *

          + * Delayed operation locking notes: + * Delayed operations in GroupCoordinator use `group` as the delayed operation + * lock. ReplicaManager.appendRecords may be invoked while holding the group lock + * used by its callback. The delayed callback may acquire the group lock + * since the delayed operation is completed only if the group lock can be acquired. + */ +private[group] class GroupCoordinator( + val brokerId: Int, + val groupConfig: GroupConfig, + val offsetConfig: OffsetConfig, + val groupManager: GroupMetadataManager, + val heartbeatPurgatory: DelayedOperationPurgatory[DelayedHeartbeat], + val rebalancePurgatory: DelayedOperationPurgatory[DelayedRebalance], + time: Time, + metrics: Metrics +) extends Logging { + import GroupCoordinator._ + + type JoinCallback = JoinGroupResult => Unit + type SyncCallback = SyncGroupResult => Unit + + /* setup metrics */ + val offsetDeletionSensor = metrics.sensor("OffsetDeletions") + + offsetDeletionSensor.add(new Meter( + metrics.metricName("offset-deletion-rate", + "group-coordinator-metrics", + "The rate of administrative deleted offsets"), + metrics.metricName("offset-deletion-count", + "group-coordinator-metrics", + "The total number of administrative deleted offsets"))) + + val groupCompletedRebalanceSensor = metrics.sensor("CompletedRebalances") + + groupCompletedRebalanceSensor.add(new Meter( + metrics.metricName("group-completed-rebalance-rate", + "group-coordinator-metrics", + "The rate of completed rebalance"), + metrics.metricName("group-completed-rebalance-count", + "group-coordinator-metrics", + "The total number of completed rebalance"))) + + this.logIdent = "[GroupCoordinator " + brokerId + "]: " + + private val isActive = new AtomicBoolean(false) + + def offsetsTopicConfigs: Properties = { + val props = new Properties + props.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) + props.put(TopicConfig.SEGMENT_BYTES_CONFIG, offsetConfig.offsetsTopicSegmentBytes.toString) + props.put(TopicConfig.COMPRESSION_TYPE_CONFIG, BrokerCompressionType.PRODUCER.name) + + props + } + + /** + * NOTE: If a group lock and metadataLock are simultaneously needed, + * be sure to acquire the group lock before metadataLock to prevent deadlock + */ + + /** + * Startup logic executed at the same time when the server starts up. + */ + def startup(retrieveGroupMetadataTopicPartitionCount: () => Int, enableMetadataExpiration: Boolean = true): Unit = { + info("Starting up.") + groupManager.startup(retrieveGroupMetadataTopicPartitionCount, enableMetadataExpiration) + isActive.set(true) + info("Startup complete.") + } + + /** + * Shutdown logic executed at the same time when server shuts down. + * Ordering of actions should be reversed from the startup process. + */ + def shutdown(): Unit = { + info("Shutting down.") + isActive.set(false) + groupManager.shutdown() + heartbeatPurgatory.shutdown() + rebalancePurgatory.shutdown() + info("Shutdown complete.") + } + + /** + * Verify if the group has space to accept the joining member. The various + * criteria are explained below. + */ + private def acceptJoiningMember(group: GroupMetadata, member: String): Boolean = { + group.currentState match { + // Always accept the request when the group is empty or dead + case Empty | Dead => + true + + // An existing member is accepted if it is already awaiting. New members are accepted + // up to the max group size. Note that the number of awaiting members is used here + // for two reasons: + // 1) the group size is not reliable as it could already be above the max group size + // if the max group size was reduced. + // 2) using the number of awaiting members allows to kick out the last rejoining + // members of the group. + case PreparingRebalance => + (group.has(member) && group.get(member).isAwaitingJoin) || + group.numAwaiting < groupConfig.groupMaxSize + + // An existing member is accepted. New members are accepted up to the max group size. + // Note that the group size is used here. When the group transitions to CompletingRebalance, + // members which haven't rejoined are removed. + case CompletingRebalance | Stable => + group.has(member) || group.size < groupConfig.groupMaxSize + } + } + + def handleJoinGroup(groupId: String, + memberId: String, + groupInstanceId: Option[String], + requireKnownMemberId: Boolean, + supportSkippingAssignment: Boolean, + clientId: String, + clientHost: String, + rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + protocolType: String, + protocols: List[(String, Array[Byte])], + responseCallback: JoinCallback, + reason: Option[String] = None, + requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { + validateGroupStatus(groupId, ApiKeys.JOIN_GROUP).foreach { error => + responseCallback(JoinGroupResult(memberId, error)) + return + } + + if (sessionTimeoutMs < groupConfig.groupMinSessionTimeoutMs || + sessionTimeoutMs > groupConfig.groupMaxSessionTimeoutMs) { + responseCallback(JoinGroupResult(memberId, Errors.INVALID_SESSION_TIMEOUT)) + } else { + val isUnknownMember = memberId == JoinGroupRequest.UNKNOWN_MEMBER_ID + // group is created if it does not exist and the member id is UNKNOWN. if member + // is specified but group does not exist, request is rejected with UNKNOWN_MEMBER_ID + groupManager.getOrMaybeCreateGroup(groupId, isUnknownMember) match { + case None => + responseCallback(JoinGroupResult(memberId, Errors.UNKNOWN_MEMBER_ID)) + case Some(group) => + group.inLock { + val joinReason = reason.getOrElse("not provided") + if (!acceptJoiningMember(group, memberId)) { + group.remove(memberId) + responseCallback(JoinGroupResult(JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.GROUP_MAX_SIZE_REACHED)) + } else if (isUnknownMember) { + doNewMemberJoinGroup( + group, + groupInstanceId, + requireKnownMemberId, + supportSkippingAssignment, + clientId, + clientHost, + rebalanceTimeoutMs, + sessionTimeoutMs, + protocolType, + protocols, + responseCallback, + requestLocal, + joinReason + ) + } else { + doCurrentMemberJoinGroup( + group, + memberId, + groupInstanceId, + clientId, + clientHost, + rebalanceTimeoutMs, + sessionTimeoutMs, + protocolType, + protocols, + responseCallback, + joinReason + ) + } + + // attempt to complete JoinGroup + if (group.is(PreparingRebalance)) { + rebalancePurgatory.checkAndComplete(new GroupJoinKey(group.groupId)) + } + } + } + } + } + + private def doNewMemberJoinGroup( + group: GroupMetadata, + groupInstanceId: Option[String], + requireKnownMemberId: Boolean, + supportSkippingAssignment: Boolean, + clientId: String, + clientHost: String, + rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + protocolType: String, + protocols: List[(String, Array[Byte])], + responseCallback: JoinCallback, + requestLocal: RequestLocal, + reason: String + ): Unit = { + group.inLock { + if (group.is(Dead)) { + // if the group is marked as dead, it means some other thread has just removed the group + // from the coordinator metadata; it is likely that the group has migrated to some other + // coordinator OR the group is in a transient unstable phase. Let the member retry + // finding the correct coordinator and rejoin. + responseCallback(JoinGroupResult(JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.COORDINATOR_NOT_AVAILABLE)) + } else if (!group.supportsProtocols(protocolType, MemberMetadata.plainProtocolSet(protocols))) { + responseCallback(JoinGroupResult(JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.INCONSISTENT_GROUP_PROTOCOL)) + } else { + val newMemberId = group.generateMemberId(clientId, groupInstanceId) + groupInstanceId match { + case Some(instanceId) => + doStaticNewMemberJoinGroup( + group, + instanceId, + newMemberId, + clientId, + clientHost, + supportSkippingAssignment, + rebalanceTimeoutMs, + sessionTimeoutMs, + protocolType, + protocols, + responseCallback, + requestLocal, + reason + ) + case None => + doDynamicNewMemberJoinGroup( + group, + requireKnownMemberId, + newMemberId, + clientId, + clientHost, + rebalanceTimeoutMs, + sessionTimeoutMs, + protocolType, + protocols, + responseCallback, + reason + ) + } + } + } + } + + private def doStaticNewMemberJoinGroup( + group: GroupMetadata, + groupInstanceId: String, + newMemberId: String, + clientId: String, + clientHost: String, + supportSkippingAssignment: Boolean, + rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + protocolType: String, + protocols: List[(String, Array[Byte])], + responseCallback: JoinCallback, + requestLocal: RequestLocal, + reason: String + ): Unit = { + group.currentStaticMemberId(groupInstanceId) match { + case Some(oldMemberId) => + info(s"Static member with groupInstanceId=$groupInstanceId and unknown member id joins " + + s"group ${group.groupId} in ${group.currentState} state. Replacing previously mapped " + + s"member $oldMemberId with this groupInstanceId.") + updateStaticMemberAndRebalance( + group, + oldMemberId, + newMemberId, + groupInstanceId, + protocols, + rebalanceTimeoutMs, + sessionTimeoutMs, + responseCallback, + requestLocal, + reason, + supportSkippingAssignment + ) + + case None => + info(s"Static member with groupInstanceId=$groupInstanceId and unknown member id joins " + + s"group ${group.groupId} in ${group.currentState} state. Created a new member id $newMemberId " + + s"for this member and add to the group.") + addMemberAndRebalance(rebalanceTimeoutMs, sessionTimeoutMs, newMemberId, Some(groupInstanceId), + clientId, clientHost, protocolType, protocols, group, responseCallback, reason) + } + } + + private def doDynamicNewMemberJoinGroup( + group: GroupMetadata, + requireKnownMemberId: Boolean, + newMemberId: String, + clientId: String, + clientHost: String, + rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + protocolType: String, + protocols: List[(String, Array[Byte])], + responseCallback: JoinCallback, + reason: String + ): Unit = { + if (requireKnownMemberId) { + // If member id required, register the member in the pending member list and send + // back a response to call for another join group request with allocated member id. + info(s"Dynamic member with unknown member id joins group ${group.groupId} in " + + s"${group.currentState} state. Created a new member id $newMemberId and request the " + + s"member to rejoin with this id.") + group.addPendingMember(newMemberId) + addPendingMemberExpiration(group, newMemberId, sessionTimeoutMs) + responseCallback(JoinGroupResult(newMemberId, Errors.MEMBER_ID_REQUIRED)) + } else { + info(s"Dynamic Member with unknown member id joins group ${group.groupId} in " + + s"${group.currentState} state. Created a new member id $newMemberId for this member " + + s"and add to the group.") + addMemberAndRebalance(rebalanceTimeoutMs, sessionTimeoutMs, newMemberId, None, + clientId, clientHost, protocolType, protocols, group, responseCallback, reason) + } + } + + private def validateCurrentMember( + group: GroupMetadata, + memberId: String, + groupInstanceId: Option[String], + operation: String + ): Option[Errors] = { + // We are validating two things: + // 1. If `groupInstanceId` is present, then it exists and is mapped to `memberId` + // 2. The `memberId` exists in the group + groupInstanceId.flatMap { instanceId => + group.currentStaticMemberId(instanceId) match { + case Some(currentMemberId) if currentMemberId != memberId => + info(s"Request memberId=$memberId for static member with groupInstanceId=$instanceId " + + s"is fenced by current memberId=$currentMemberId during operation $operation") + Some(Errors.FENCED_INSTANCE_ID) + case Some(_) => + None + case None => + Some(Errors.UNKNOWN_MEMBER_ID) + } + }.orElse { + if (!group.has(memberId)) { + Some(Errors.UNKNOWN_MEMBER_ID) + } else { + None + } + } + } + + private def doCurrentMemberJoinGroup( + group: GroupMetadata, + memberId: String, + groupInstanceId: Option[String], + clientId: String, + clientHost: String, + rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + protocolType: String, + protocols: List[(String, Array[Byte])], + responseCallback: JoinCallback, + reason: String + ): Unit = { + group.inLock { + if (group.is(Dead)) { + // if the group is marked as dead, it means some other thread has just removed the group + // from the coordinator metadata; it is likely that the group has migrated to some other + // coordinator OR the group is in a transient unstable phase. Let the member retry + // finding the correct coordinator and rejoin. + responseCallback(JoinGroupResult(memberId, Errors.COORDINATOR_NOT_AVAILABLE)) + } else if (!group.supportsProtocols(protocolType, MemberMetadata.plainProtocolSet(protocols))) { + responseCallback(JoinGroupResult(memberId, Errors.INCONSISTENT_GROUP_PROTOCOL)) + } else if (group.isPendingMember(memberId)) { + // A rejoining pending member will be accepted. Note that pending member cannot be a static member. + groupInstanceId.foreach { instanceId => + throw new IllegalStateException(s"Received unexpected JoinGroup with groupInstanceId=$instanceId " + + s"for pending member with memberId=$memberId") + } + + info(s"Pending dynamic member with id $memberId joins group ${group.groupId} in " + + s"${group.currentState} state. Adding to the group now.") + addMemberAndRebalance(rebalanceTimeoutMs, sessionTimeoutMs, memberId, None, + clientId, clientHost, protocolType, protocols, group, responseCallback, reason) + } else { + val memberErrorOpt = validateCurrentMember( + group, + memberId, + groupInstanceId, + operation = "join-group" + ) + + memberErrorOpt match { + case Some(error) => responseCallback(JoinGroupResult(memberId, error)) + + case None => group.currentState match { + case PreparingRebalance => + val member = group.get(memberId) + updateMemberAndRebalance(group, member, protocols, rebalanceTimeoutMs, sessionTimeoutMs, s"Member ${member.memberId} joining group during ${group.currentState}; client reason: $reason", responseCallback) + + case CompletingRebalance => + val member = group.get(memberId) + if (member.matches(protocols)) { + // member is joining with the same metadata (which could be because it failed to + // receive the initial JoinGroup response), so just return current group information + // for the current generation. + responseCallback(JoinGroupResult( + members = if (group.isLeader(memberId)) { + group.currentMemberMetadata + } else { + List.empty + }, + memberId = memberId, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = group.leaderOrNull, + skipAssignment = false, + error = Errors.NONE)) + } else { + // member has changed metadata, so force a rebalance + updateMemberAndRebalance(group, member, protocols, rebalanceTimeoutMs, sessionTimeoutMs, s"Updating metadata for member ${member.memberId} during ${group.currentState}; client reason: $reason", responseCallback) + } + + case Stable => + val member = group.get(memberId) + if (group.isLeader(memberId)) { + // force a rebalance if the leader sends JoinGroup; + // This allows the leader to trigger rebalances for changes affecting assignment + // which do not affect the member metadata (such as topic metadata changes for the consumer) + updateMemberAndRebalance(group, member, protocols, rebalanceTimeoutMs, sessionTimeoutMs, s"Leader ${member.memberId} re-joining group during ${group.currentState}; client reason: $reason", responseCallback) + } else if (!member.matches(protocols)) { + updateMemberAndRebalance(group, member, protocols, rebalanceTimeoutMs, sessionTimeoutMs, s"Updating metadata for member ${member.memberId} during ${group.currentState}; client reason: $reason", responseCallback) + } else { + // for followers with no actual change to their metadata, just return group information + // for the current generation which will allow them to issue SyncGroup + responseCallback(JoinGroupResult( + members = List.empty, + memberId = memberId, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = group.leaderOrNull, + skipAssignment = false, + error = Errors.NONE)) + } + + case Empty | Dead => + // Group reaches unexpected state. Let the joining member reset their generation and rejoin. + warn(s"Attempt to add rejoining member $memberId of group ${group.groupId} in " + + s"unexpected group state ${group.currentState}") + responseCallback(JoinGroupResult(memberId, Errors.UNKNOWN_MEMBER_ID)) + } + } + } + } + } + + def handleSyncGroup(groupId: String, + generation: Int, + memberId: String, + protocolType: Option[String], + protocolName: Option[String], + groupInstanceId: Option[String], + groupAssignment: Map[String, Array[Byte]], + responseCallback: SyncCallback, + requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { + validateGroupStatus(groupId, ApiKeys.SYNC_GROUP) match { + case Some(error) if error == Errors.COORDINATOR_LOAD_IN_PROGRESS => + // The coordinator is loading, which means we've lost the state of the active rebalance and the + // group will need to start over at JoinGroup. By returning rebalance in progress, the consumer + // will attempt to rejoin without needing to rediscover the coordinator. Note that we cannot + // return COORDINATOR_LOAD_IN_PROGRESS since older clients do not expect the error. + responseCallback(SyncGroupResult(Errors.REBALANCE_IN_PROGRESS)) + + case Some(error) => responseCallback(SyncGroupResult(error)) + + case None => + groupManager.getGroup(groupId) match { + case None => responseCallback(SyncGroupResult(Errors.UNKNOWN_MEMBER_ID)) + case Some(group) => doSyncGroup(group, generation, memberId, protocolType, protocolName, + groupInstanceId, groupAssignment, requestLocal, responseCallback) + } + } + } + + private def validateSyncGroup( + group: GroupMetadata, + generationId: Int, + memberId: String, + protocolType: Option[String], + protocolName: Option[String], + groupInstanceId: Option[String], + ): Option[Errors] = { + if (group.is(Dead)) { + // if the group is marked as dead, it means some other thread has just removed the group + // from the coordinator metadata; this is likely that the group has migrated to some other + // coordinator OR the group is in a transient unstable phase. Let the member retry + // finding the correct coordinator and rejoin. + Some(Errors.COORDINATOR_NOT_AVAILABLE) + } else { + validateCurrentMember( + group, + memberId, + groupInstanceId, + operation = "sync-group" + ).orElse { + if (generationId != group.generationId) { + Some(Errors.ILLEGAL_GENERATION) + } else if (protocolType.isDefined && !group.protocolType.contains(protocolType.get)) { + Some(Errors.INCONSISTENT_GROUP_PROTOCOL) + } else if (protocolName.isDefined && !group.protocolName.contains(protocolName.get)) { + Some(Errors.INCONSISTENT_GROUP_PROTOCOL) + } else { + None + } + } + } + } + + private def doSyncGroup(group: GroupMetadata, + generationId: Int, + memberId: String, + protocolType: Option[String], + protocolName: Option[String], + groupInstanceId: Option[String], + groupAssignment: Map[String, Array[Byte]], + requestLocal: RequestLocal, + responseCallback: SyncCallback): Unit = { + group.inLock { + val validationErrorOpt = validateSyncGroup( + group, + generationId, + memberId, + protocolType, + protocolName, + groupInstanceId + ) + + validationErrorOpt match { + case Some(error) => responseCallback(SyncGroupResult(error)) + + case None => group.currentState match { + case Empty => + responseCallback(SyncGroupResult(Errors.UNKNOWN_MEMBER_ID)) + + case PreparingRebalance => + responseCallback(SyncGroupResult(Errors.REBALANCE_IN_PROGRESS)) + + case CompletingRebalance => + group.get(memberId).awaitingSyncCallback = responseCallback + removePendingSyncMember(group, memberId) + + // if this is the leader, then we can attempt to persist state and transition to stable + if (group.isLeader(memberId)) { + info(s"Assignment received from leader $memberId for group ${group.groupId} for generation ${group.generationId}. " + + s"The group has ${group.size} members, ${group.allStaticMembers.size} of which are static.") + + // fill any missing members with an empty assignment + val missing = group.allMembers.diff(groupAssignment.keySet) + val assignment = groupAssignment ++ missing.map(_ -> Array.empty[Byte]).toMap + + if (missing.nonEmpty) { + warn(s"Setting empty assignments for members $missing of ${group.groupId} for generation ${group.generationId}") + } + + groupManager.storeGroup(group, assignment, (error: Errors) => { + group.inLock { + // another member may have joined the group while we were awaiting this callback, + // so we must ensure we are still in the CompletingRebalance state and the same generation + // when it gets invoked. if we have transitioned to another state, then do nothing + if (group.is(CompletingRebalance) && generationId == group.generationId) { + if (error != Errors.NONE) { + resetAndPropagateAssignmentError(group, error) + maybePrepareRebalance(group, s"Error $error when storing group assignment during SyncGroup (member: $memberId)") + } else { + setAndPropagateAssignment(group, assignment) + group.transitionTo(Stable) + } + } + } + }, requestLocal) + groupCompletedRebalanceSensor.record() + } + + case Stable => + removePendingSyncMember(group, memberId) + + // if the group is stable, we just return the current assignment + val memberMetadata = group.get(memberId) + responseCallback(SyncGroupResult(group.protocolType, group.protocolName, memberMetadata.assignment, Errors.NONE)) + completeAndScheduleNextHeartbeatExpiration(group, group.get(memberId)) + + case Dead => + throw new IllegalStateException(s"Reached unexpected condition for Dead group ${group.groupId}") + } + } + } + } + + def handleLeaveGroup(groupId: String, + leavingMembers: List[MemberIdentity], + responseCallback: LeaveGroupResult => Unit): Unit = { + + def removeCurrentMemberFromGroup(group: GroupMetadata, memberId: String, reason: Option[String]): Unit = { + val member = group.get(memberId) + val leaveReason = reason.getOrElse("not provided") + removeMemberAndUpdateGroup(group, member, s"Removing member $memberId on LeaveGroup; client reason: $leaveReason") + removeHeartbeatForLeavingMember(group, member.memberId) + info(s"Member $member has left group $groupId through explicit `LeaveGroup`; client reason: $leaveReason") + } + + validateGroupStatus(groupId, ApiKeys.LEAVE_GROUP) match { + case Some(error) => + responseCallback(leaveError(error, List.empty)) + case None => + groupManager.getGroup(groupId) match { + case None => + responseCallback(leaveError(Errors.NONE, leavingMembers.map {leavingMember => + memberLeaveError(leavingMember, Errors.UNKNOWN_MEMBER_ID) + })) + case Some(group) => + group.inLock { + if (group.is(Dead)) { + responseCallback(leaveError(Errors.COORDINATOR_NOT_AVAILABLE, List.empty)) + } else { + val memberErrors = leavingMembers.map { leavingMember => + val memberId = leavingMember.memberId + val groupInstanceId = Option(leavingMember.groupInstanceId) + val reason = Option(leavingMember.reason) + + // The LeaveGroup API allows administrative removal of members by GroupInstanceId + // in which case we expect the MemberId to be undefined. + if (memberId == JoinGroupRequest.UNKNOWN_MEMBER_ID) { + groupInstanceId.flatMap(group.currentStaticMemberId) match { + case Some(currentMemberId) => + removeCurrentMemberFromGroup(group, currentMemberId, reason) + memberLeaveError(leavingMember, Errors.NONE) + case None => + memberLeaveError(leavingMember, Errors.UNKNOWN_MEMBER_ID) + } + } else if (group.isPendingMember(memberId)) { + removePendingMemberAndUpdateGroup(group, memberId) + heartbeatPurgatory.checkAndComplete(new MemberKey(group.groupId, memberId)) + info(s"Pending member with memberId=$memberId has left group ${group.groupId} " + + s"through explicit `LeaveGroup` request") + memberLeaveError(leavingMember, Errors.NONE) + } else { + val memberError = validateCurrentMember( + group, + memberId, + groupInstanceId, + operation = "leave-group" + ).getOrElse { + removeCurrentMemberFromGroup(group, memberId, reason) + Errors.NONE + } + memberLeaveError(leavingMember, memberError) + } + } + responseCallback(leaveError(Errors.NONE, memberErrors)) + } + } + } + } + } + + def handleDeleteGroups(groupIds: Set[String], + requestLocal: RequestLocal = RequestLocal.noCaching): Map[String, Errors] = { + val groupErrors = mutable.Map.empty[String, Errors] + val groupsEligibleForDeletion = mutable.ArrayBuffer[GroupMetadata]() + + groupIds.foreach { groupId => + validateGroupStatus(groupId, ApiKeys.DELETE_GROUPS) match { + case Some(error) => + groupErrors += groupId -> error + + case None => + groupManager.getGroup(groupId) match { + case None => + groupErrors += groupId -> + (if (groupManager.groupNotExists(groupId)) Errors.GROUP_ID_NOT_FOUND else Errors.NOT_COORDINATOR) + case Some(group) => + group.inLock { + group.currentState match { + case Dead => + groupErrors += groupId -> + (if (groupManager.groupNotExists(groupId)) Errors.GROUP_ID_NOT_FOUND else Errors.NOT_COORDINATOR) + case Empty => + group.transitionTo(Dead) + groupsEligibleForDeletion += group + case Stable | PreparingRebalance | CompletingRebalance => + groupErrors(groupId) = Errors.NON_EMPTY_GROUP + } + } + } + } + } + + if (groupsEligibleForDeletion.nonEmpty) { + val offsetsRemoved = groupManager.cleanupGroupMetadata(groupsEligibleForDeletion, requestLocal, + _.removeAllOffsets()) + groupErrors ++= groupsEligibleForDeletion.map(_.groupId -> Errors.NONE).toMap + info(s"The following groups were deleted: ${groupsEligibleForDeletion.map(_.groupId).mkString(", ")}. " + + s"A total of $offsetsRemoved offsets were removed.") + } + + groupErrors + } + + def handleDeleteOffsets(groupId: String, partitions: Seq[TopicPartition], + requestLocal: RequestLocal): (Errors, Map[TopicPartition, Errors]) = { + var groupError: Errors = Errors.NONE + var partitionErrors: Map[TopicPartition, Errors] = Map() + var partitionsEligibleForDeletion: Seq[TopicPartition] = Seq() + + validateGroupStatus(groupId, ApiKeys.OFFSET_DELETE) match { + case Some(error) => + groupError = error + + case None => + groupManager.getGroup(groupId) match { + case None => + groupError = if (groupManager.groupNotExists(groupId)) + Errors.GROUP_ID_NOT_FOUND else Errors.NOT_COORDINATOR + + case Some(group) => + group.inLock { + group.currentState match { + case Dead => + groupError = if (groupManager.groupNotExists(groupId)) + Errors.GROUP_ID_NOT_FOUND else Errors.NOT_COORDINATOR + + case Empty => + partitionsEligibleForDeletion = partitions + + case PreparingRebalance | CompletingRebalance | Stable if group.isConsumerGroup => + val (consumed, notConsumed) = + partitions.partition(tp => group.isSubscribedToTopic(tp.topic())) + + partitionsEligibleForDeletion = notConsumed + partitionErrors = consumed.map(_ -> Errors.GROUP_SUBSCRIBED_TO_TOPIC).toMap + + case _ => + groupError = Errors.NON_EMPTY_GROUP + } + } + + if (partitionsEligibleForDeletion.nonEmpty) { + val offsetsRemoved = groupManager.cleanupGroupMetadata(Seq(group), requestLocal, + _.removeOffsets(partitionsEligibleForDeletion)) + + partitionErrors ++= partitionsEligibleForDeletion.map(_ -> Errors.NONE).toMap + + offsetDeletionSensor.record(offsetsRemoved) + + info(s"The following offsets of the group $groupId were deleted: ${partitionsEligibleForDeletion.mkString(", ")}. " + + s"A total of $offsetsRemoved offsets were removed.") + } + } + } + + // If there is a group error, the partition errors is empty + groupError -> partitionErrors + } + + private def validateHeartbeat( + group: GroupMetadata, + generationId: Int, + memberId: String, + groupInstanceId: Option[String] + ): Option[Errors] = { + if (group.is(Dead)) { + Some(Errors.COORDINATOR_NOT_AVAILABLE) + } else { + validateCurrentMember( + group, + memberId, + groupInstanceId, + operation = "heartbeat" + ).orElse { + if (generationId != group.generationId) { + Some(Errors.ILLEGAL_GENERATION) + } else { + None + } + } + } + } + + def handleHeartbeat(groupId: String, + memberId: String, + groupInstanceId: Option[String], + generationId: Int, + responseCallback: Errors => Unit): Unit = { + validateGroupStatus(groupId, ApiKeys.HEARTBEAT).foreach { error => + if (error == Errors.COORDINATOR_LOAD_IN_PROGRESS) + // the group is still loading, so respond just blindly + responseCallback(Errors.NONE) + else + responseCallback(error) + return + } + + val err = groupManager.getGroup(groupId) match { + case None => + Errors.UNKNOWN_MEMBER_ID + + case Some(group) => group.inLock { + val validationErrorOpt = validateHeartbeat( + group, + generationId, + memberId, + groupInstanceId + ) + + if (validationErrorOpt.isDefined) { + validationErrorOpt.get + } else { + group.currentState match { + case Empty => + Errors.UNKNOWN_MEMBER_ID + + case CompletingRebalance => + // consumers may start sending heartbeat after join-group response, in which case + // we should treat them as normal hb request and reset the timer + val member = group.get(memberId) + completeAndScheduleNextHeartbeatExpiration(group, member) + Errors.NONE + + case PreparingRebalance => + val member = group.get(memberId) + completeAndScheduleNextHeartbeatExpiration(group, member) + Errors.REBALANCE_IN_PROGRESS + + case Stable => + val member = group.get(memberId) + completeAndScheduleNextHeartbeatExpiration(group, member) + Errors.NONE + + case Dead => + throw new IllegalStateException(s"Reached unexpected condition for Dead group $groupId") + } + } + } + } + responseCallback(err) + } + + def handleTxnCommitOffsets(groupId: String, + transactionalId: String, + producerId: Long, + producerEpoch: Short, + memberId: String, + groupInstanceId: Option[String], + generationId: Int, + offsetMetadata: immutable.Map[TopicIdPartition, OffsetAndMetadata], + responseCallback: immutable.Map[TopicIdPartition, Errors] => Unit, + requestLocal: RequestLocal = RequestLocal.noCaching, + apiVersion: Short): Unit = { + validateGroupStatus(groupId, ApiKeys.TXN_OFFSET_COMMIT) match { + case Some(error) => responseCallback(offsetMetadata.map { case (k, _) => k -> error }) + case None => + val group = groupManager.getGroup(groupId).getOrElse { + groupManager.addGroup(new GroupMetadata(groupId, Empty, time)) + } + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partitionFor(group.groupId)) + + def postVerificationCallback( + newRequestLocal: RequestLocal, + errorAndGuard: (Errors, VerificationGuard) + ): Unit = { + val (error, verificationGuard) = errorAndGuard + if (error != Errors.NONE) { + val finalError = GroupMetadataManager.maybeConvertOffsetCommitError(error) + responseCallback(offsetMetadata.map { case (k, _) => k -> finalError }) + } else { + doTxnCommitOffsets(group, memberId, groupInstanceId, generationId, producerId, producerEpoch, + offsetTopicPartition, offsetMetadata, newRequestLocal, responseCallback, Some(verificationGuard)) + } + } + val transactionSupportedOperation = AddPartitionsToTxnManager.txnOffsetCommitRequestVersionToTransactionSupportedOperation(apiVersion) + groupManager.replicaManager.maybeSendPartitionToTransactionCoordinator( + topicPartition = offsetTopicPartition, + transactionalId, + producerId, + producerEpoch, + RecordBatch.NO_SEQUENCE, + // Wrap the callback to be handled on an arbitrary request handler thread + // when transaction verification is complete. The request local passed in + // is only used when the callback is executed immediately. + KafkaRequestHandler.wrapAsyncCallback( + postVerificationCallback, + requestLocal + ), + transactionSupportedOperation + ) + } + } + + def handleCommitOffsets(groupId: String, + memberId: String, + groupInstanceId: Option[String], + generationId: Int, + offsetMetadata: immutable.Map[TopicIdPartition, OffsetAndMetadata], + responseCallback: immutable.Map[TopicIdPartition, Errors] => Unit, + requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { + validateGroupStatus(groupId, ApiKeys.OFFSET_COMMIT) match { + case Some(error) => responseCallback(offsetMetadata.map { case (k, _) => k -> error }) + case None => + groupManager.getGroup(groupId) match { + case None => + if (generationId < 0) { + // the group is not relying on Kafka for group management, so allow the commit + info(s"Creating simple consumer group $groupId via manual offset commit.") + val group = groupManager.addGroup(new GroupMetadata(groupId, Empty, time)) + doCommitOffsets(group, memberId, groupInstanceId, generationId, offsetMetadata, + responseCallback, requestLocal) + } else { + // or this is a request coming from an older generation. either way, reject the commit + responseCallback(offsetMetadata.map { case (k, _) => k -> Errors.ILLEGAL_GENERATION }) + } + + case Some(group) => + doCommitOffsets(group, memberId, groupInstanceId, generationId, offsetMetadata, + responseCallback, requestLocal) + } + } + } + + def scheduleHandleTxnCompletion(producerId: Long, + offsetsPartitions: Iterable[TopicPartition], + transactionResult: TransactionResult): CompletableFuture[Void] = { + require(offsetsPartitions.forall(_.topic == Topic.GROUP_METADATA_TOPIC_NAME)) + val isCommit = transactionResult == TransactionResult.COMMIT + groupManager.scheduleHandleTxnCompletion(producerId, offsetsPartitions.map(_.partition).toSet, isCommit) + } + + private def doTxnCommitOffsets(group: GroupMetadata, + memberId: String, + groupInstanceId: Option[String], + generationId: Int, + producerId: Long, + producerEpoch: Short, + offsetTopicPartition: TopicPartition, + offsetMetadata: immutable.Map[TopicIdPartition, OffsetAndMetadata], + requestLocal: RequestLocal, + responseCallback: immutable.Map[TopicIdPartition, Errors] => Unit, + verificationGuard: Option[VerificationGuard]): Unit = { + group.inLock { + val validationErrorOpt = validateOffsetCommit( + group, + generationId, + memberId, + groupInstanceId, + isTransactional = true + ) + + if (validationErrorOpt.isDefined) { + responseCallback(offsetMetadata.map { case (k, _) => k -> validationErrorOpt.get }) + } else { + groupManager.storeOffsets(group, memberId, offsetTopicPartition, offsetMetadata, responseCallback, producerId, + producerEpoch, requestLocal, verificationGuard) + } + } + } + + private def validateOffsetCommit( + group: GroupMetadata, + generationId: Int, + memberId: String, + groupInstanceId: Option[String], + isTransactional: Boolean + ): Option[Errors] = { + if (group.is(Dead)) { + Some(Errors.COORDINATOR_NOT_AVAILABLE) + } else if (generationId < 0 && group.is(Empty)) { + // When the generation id is -1, the request comes from either the admin client + // or a consumer which does not use the group management facility. In this case, + // the request can commit offsets if the group is empty. + None + } else if (generationId >= 0 || memberId != JoinGroupRequest.UNKNOWN_MEMBER_ID || groupInstanceId.isDefined) { + validateCurrentMember( + group, + memberId, + groupInstanceId, + operation = if (isTransactional) "txn-offset-commit" else "offset-commit" + ).orElse { + if (generationId != group.generationId) { + Some(Errors.ILLEGAL_GENERATION) + } else { + None + } + } + } else if (!isTransactional && !group.is(Empty)) { + // When the group is non-empty, only members can commit offsets. + // This does not apply to transactional offset commits, since the + // older versions of this protocol do not require memberId and + // generationId. + Some(Errors.UNKNOWN_MEMBER_ID) + } else { + None + } + } + + private def doCommitOffsets(group: GroupMetadata, + memberId: String, + groupInstanceId: Option[String], + generationId: Int, + offsetMetadata: immutable.Map[TopicIdPartition, OffsetAndMetadata], + responseCallback: immutable.Map[TopicIdPartition, Errors] => Unit, + requestLocal: RequestLocal): Unit = { + group.inLock { + val validationErrorOpt = validateOffsetCommit( + group, + generationId, + memberId, + groupInstanceId, + isTransactional = false + ) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partitionFor(group.groupId)) + + if (validationErrorOpt.isDefined) { + responseCallback(offsetMetadata.map { case (k, _) => k -> validationErrorOpt.get }) + } else { + group.currentState match { + case Empty => + groupManager.storeOffsets(group, memberId, offsetTopicPartition, offsetMetadata, responseCallback, verificationGuard = None) + + case Stable | PreparingRebalance => + // During PreparingRebalance phase, we still allow a commit request since we rely + // on heartbeat response to eventually notify the rebalance in progress signal to the consumer + val member = group.get(memberId) + completeAndScheduleNextHeartbeatExpiration(group, member) + groupManager.storeOffsets(group, memberId, offsetTopicPartition, offsetMetadata, responseCallback, requestLocal = requestLocal, verificationGuard = None) + + case CompletingRebalance => + // We should not receive a commit request if the group has not completed rebalance; + // but since the consumer's member.id and generation is valid, it means it has received + // the latest group generation information from the JoinResponse. + // So let's return a REBALANCE_IN_PROGRESS to let consumer handle it gracefully. + responseCallback(offsetMetadata.map { case (k, _) => k -> Errors.REBALANCE_IN_PROGRESS }) + + case _ => + throw new RuntimeException(s"Logic error: unexpected group state ${group.currentState}") + } + } + } + } + + def handleFetchOffsets( + groupId: String, + requireStable: Boolean, + partitions: Option[Seq[TopicPartition]] = None + ): (Errors, Map[TopicPartition, OffsetFetchResponse.PartitionData]) = { + + validateGroupStatus(groupId, ApiKeys.OFFSET_FETCH) match { + case Some(error) => error -> Map.empty + case None => + // return offsets blindly regardless the current group state since the group may be using + // Kafka commit storage without automatic group management + (Errors.NONE, groupManager.getOffsets(groupId, requireStable, partitions)) + } + } + + def handleListGroups(states: Set[String], groupTypes: Set[String]): (Errors, List[GroupOverview]) = { + if (!isActive.get) { + (Errors.COORDINATOR_NOT_AVAILABLE, List[GroupOverview]()) + } else { + val errorCode = if (groupManager.isLoading) Errors.COORDINATOR_LOAD_IN_PROGRESS else Errors.NONE + + // Convert state filter strings to lower case and group type strings to the corresponding enum type. + // This is done to ensure a case-insensitive comparison. + val caseInsensitiveStates = states.map(_.toLowerCase) + val enumTypesFilter: Set[Group.GroupType] = groupTypes.map(Group.GroupType.parse) + + // Filter groups based on states and groupTypes. If either is empty, it won't filter on that criterion. + // While using the old group coordinator, all groups are considered classic groups by default. + // An empty list is returned for any other type filter. + val groups = groupManager.currentGroups.filter { g => + (states.isEmpty || g.isInStates(caseInsensitiveStates)) && + (enumTypesFilter.isEmpty || enumTypesFilter.contains(Group.GroupType.CLASSIC)) + } + (errorCode, groups.map(_.overview).toList) + } + } + + def handleDescribeGroup(groupId: String, apiVersion: Short): (Errors, Option[String], GroupSummary) = { + validateGroupStatus(groupId, ApiKeys.DESCRIBE_GROUPS) match { + case Some(error) => (error, None, GroupCoordinator.EmptyGroup) + case None => + groupManager.getGroup(groupId) match { + case None => + if (apiVersion >= 6) { + (Errors.GROUP_ID_NOT_FOUND, Some(s"Group $groupId not found."), GroupCoordinator.DeadGroup) + } else { + (Errors.NONE, None, GroupCoordinator.DeadGroup) + } + case Some(group) => + group.inLock { + (Errors.NONE, None, group.summary) + } + } + } + } + + def handleDeletedPartitions(topicPartitions: Seq[TopicPartition], requestLocal: RequestLocal): Unit = { + val offsetsRemoved = groupManager.cleanupGroupMetadata(groupManager.currentGroups, requestLocal, + _.removeOffsets(topicPartitions)) + info(s"Removed $offsetsRemoved offsets associated with deleted partitions: ${topicPartitions.mkString(", ")}.") + } + + private def isValidGroupId(groupId: String, api: ApiKeys): Boolean = { + api match { + case ApiKeys.OFFSET_COMMIT | ApiKeys.OFFSET_FETCH | ApiKeys.DESCRIBE_GROUPS | ApiKeys.DELETE_GROUPS => + // For backwards compatibility, we support the offset commit APIs for the empty groupId, and also + // in DescribeGroups and DeleteGroups so that users can view and delete state of all groups. + groupId != null + case _ => + // The remaining APIs are groups using Kafka for group coordination and must have a non-empty groupId + groupId != null && groupId.nonEmpty + } + } + + /** + * Check that the groupId is valid, assigned to this coordinator and that the group has been loaded. + */ + private def validateGroupStatus(groupId: String, api: ApiKeys): Option[Errors] = { + if (!isValidGroupId(groupId, api)) + Some(Errors.INVALID_GROUP_ID) + else if (!isActive.get) + Some(Errors.COORDINATOR_NOT_AVAILABLE) + else if (isCoordinatorLoadInProgress(groupId)) + Some(Errors.COORDINATOR_LOAD_IN_PROGRESS) + else if (!isCoordinatorForGroup(groupId)) + Some(Errors.NOT_COORDINATOR) + else + None + } + + private def onGroupUnloaded(group: GroupMetadata): Unit = { + group.inLock { + info(s"Unloading group metadata for ${group.groupId} with generation ${group.generationId}") + val previousState = group.currentState + group.transitionTo(Dead) + + previousState match { + case Empty | Dead => + case PreparingRebalance => + for (member <- group.allMemberMetadata) { + group.maybeInvokeJoinCallback(member, JoinGroupResult(member.memberId, Errors.NOT_COORDINATOR)) + } + + rebalancePurgatory.checkAndComplete(new GroupJoinKey(group.groupId)) + + case Stable | CompletingRebalance => + for (member <- group.allMemberMetadata) { + group.maybeInvokeSyncCallback(member, SyncGroupResult(Errors.NOT_COORDINATOR)) + heartbeatPurgatory.checkAndComplete(new MemberKey(group.groupId, member.memberId)) + } + } + + removeSyncExpiration(group) + } + } + + private def onGroupLoaded(group: GroupMetadata): Unit = { + group.inLock { + info(s"Loading group metadata for ${group.groupId} with generation ${group.generationId}") + assert(group.is(Stable) || group.is(Empty)) + if (groupIsOverCapacity(group)) { + prepareRebalance(group, s"Freshly-loaded group is over capacity (${groupConfig.groupMaxSize}). " + + "Rebalancing in order to give a chance for consumers to commit offsets") + } + + group.allMemberMetadata.foreach(completeAndScheduleNextHeartbeatExpiration(group, _)) + } + } + + /** + * Load cached state from the given partition and begin handling requests for groups which map to it. + * + * @param offsetTopicPartitionId The partition we are now leading + */ + def onElection(offsetTopicPartitionId: Int, coordinatorEpoch: Int): Unit = { + info(s"Elected as the group coordinator for partition $offsetTopicPartitionId in epoch $coordinatorEpoch") + groupManager.scheduleLoadGroupAndOffsets(offsetTopicPartitionId, coordinatorEpoch, onGroupLoaded) + } + + /** + * Unload cached state for the given partition and stop handling requests for groups which map to it. + * + * @param offsetTopicPartitionId The partition we are no longer leading + */ + def onResignation(offsetTopicPartitionId: Int, coordinatorEpoch: OptionalInt): Unit = { + info(s"Resigned as the group coordinator for partition $offsetTopicPartitionId in epoch $coordinatorEpoch") + groupManager.removeGroupsForPartition(offsetTopicPartitionId, coordinatorEpoch, onGroupUnloaded) + } + + private def setAndPropagateAssignment(group: GroupMetadata, assignment: Map[String, Array[Byte]]): Unit = { + assert(group.is(CompletingRebalance)) + group.allMemberMetadata.foreach(member => member.assignment = assignment(member.memberId)) + propagateAssignment(group, Errors.NONE) + } + + private def resetAndPropagateAssignmentError(group: GroupMetadata, error: Errors): Unit = { + assert(group.is(CompletingRebalance)) + group.allMemberMetadata.foreach(_.assignment = Array.empty) + propagateAssignment(group, error) + } + + private def propagateAssignment(group: GroupMetadata, error: Errors): Unit = { + val (protocolType, protocolName) = if (error == Errors.NONE) + (group.protocolType, group.protocolName) + else + (None, None) + for (member <- group.allMemberMetadata) { + if (member.assignment.isEmpty && error == Errors.NONE) { + warn(s"Sending empty assignment to member ${member.memberId} of ${group.groupId} for generation ${group.generationId} with no errors") + } + + if (group.maybeInvokeSyncCallback(member, SyncGroupResult(protocolType, protocolName, member.assignment, error))) { + // reset the session timeout for members after propagating the member's assignment. + // This is because if any member's session expired while we were still awaiting either + // the leader sync group or the storage callback, its expiration will be ignored and no + // future heartbeat expectations will not be scheduled. + completeAndScheduleNextHeartbeatExpiration(group, member) + } + } + } + + /** + * Complete existing DelayedHeartbeats for the given member and schedule the next one + */ + private def completeAndScheduleNextHeartbeatExpiration(group: GroupMetadata, member: MemberMetadata): Unit = { + completeAndScheduleNextExpiration(group, member, member.sessionTimeoutMs) + } + + private def completeAndScheduleNextExpiration(group: GroupMetadata, member: MemberMetadata, timeoutMs: Long): Unit = { + val memberKey = new MemberKey(group.groupId, member.memberId) + + // complete current heartbeat expectation + member.heartbeatSatisfied = true + heartbeatPurgatory.checkAndComplete(memberKey) + + // reschedule the next heartbeat expiration deadline + member.heartbeatSatisfied = false + val delayedHeartbeat = new DelayedHeartbeat(this, group, member.memberId, isPending = false, timeoutMs) + heartbeatPurgatory.tryCompleteElseWatch(delayedHeartbeat, util.Collections.singletonList(memberKey)) + } + + /** + * Add pending member expiration to heartbeat purgatory + */ + private def addPendingMemberExpiration(group: GroupMetadata, pendingMemberId: String, timeoutMs: Long): Unit = { + val pendingMemberKey = new MemberKey(group.groupId, pendingMemberId) + val delayedHeartbeat = new DelayedHeartbeat(this, group, pendingMemberId, isPending = true, timeoutMs) + heartbeatPurgatory.tryCompleteElseWatch(delayedHeartbeat, util.Collections.singletonList(pendingMemberKey)) + } + + private def removeHeartbeatForLeavingMember(group: GroupMetadata, memberId: String): Unit = { + val memberKey = new MemberKey(group.groupId, memberId) + heartbeatPurgatory.checkAndComplete(memberKey) + } + + private def addMemberAndRebalance(rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + memberId: String, + groupInstanceId: Option[String], + clientId: String, + clientHost: String, + protocolType: String, + protocols: List[(String, Array[Byte])], + group: GroupMetadata, + callback: JoinCallback, + reason: String): Unit = { + val member = new MemberMetadata(memberId, groupInstanceId, clientId, clientHost, + rebalanceTimeoutMs, sessionTimeoutMs, protocolType, protocols) + + member.isNew = true + + // update the newMemberAdded flag to indicate that the join group can be further delayed + if (group.is(PreparingRebalance) && group.generationId == 0) + group.newMemberAdded = true + + group.add(member, callback) + + // The session timeout does not affect new members since they do not have their memberId and + // cannot send heartbeats. Furthermore, we cannot detect disconnects because sockets are muted + // while the JoinGroup is in purgatory. If the client does disconnect (e.g. because of a request + // timeout during a long rebalance), they may simply retry which will lead to a lot of defunct + // members in the rebalance. To prevent this going on indefinitely, we timeout JoinGroup requests + // for new members. If the new member is still there, we expect it to retry. + completeAndScheduleNextExpiration(group, member, NewMemberJoinTimeoutMs) + + maybePrepareRebalance(group, s"Adding new member $memberId with group instance id $groupInstanceId; client reason: $reason") + } + + private def updateStaticMemberAndRebalance( + group: GroupMetadata, + oldMemberId: String, + newMemberId: String, + groupInstanceId: String, + protocols: List[(String, Array[Byte])], + rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + responseCallback: JoinCallback, + requestLocal: RequestLocal, + reason: String, + supportSkippingAssignment: Boolean + ): Unit = { + val currentLeader = group.leaderOrNull + val member = group.replaceStaticMember(groupInstanceId, oldMemberId, newMemberId) + // Heartbeat of old member id will expire without effect since the group no longer contains that member id. + // New heartbeat shall be scheduled with new member id. + completeAndScheduleNextHeartbeatExpiration(group, member) + + val knownStaticMember = group.get(newMemberId) + val oldRebalanceTimeoutMs = knownStaticMember.rebalanceTimeoutMs + val oldSessionTimeoutMs = knownStaticMember.sessionTimeoutMs + group.updateMember(knownStaticMember, protocols, rebalanceTimeoutMs, sessionTimeoutMs, responseCallback) + val oldProtocols = knownStaticMember.supportedProtocols + + group.currentState match { + case Stable => + // check if group's selectedProtocol of next generation will change, if not, simply store group to persist the + // updated static member, if yes, rebalance should be triggered to let the group's assignment and selectProtocol consistent + val selectedProtocolOfNextGeneration = group.selectProtocol + if (group.protocolName.contains(selectedProtocolOfNextGeneration)) { + info(s"Static member which joins during Stable stage and doesn't affect selectProtocol will not trigger rebalance.") + val groupAssignment: Map[String, Array[Byte]] = group.allMemberMetadata.map(member => member.memberId -> member.assignment).toMap + groupManager.storeGroup(group, groupAssignment, error => { + group.inLock { + if (error != Errors.NONE) { + warn(s"Failed to persist metadata for group ${group.groupId}: ${error.message}") + + // Failed to persist member.id of the given static member, revert the update of the static member in the group. + group.updateMember(knownStaticMember, oldProtocols, oldRebalanceTimeoutMs, oldSessionTimeoutMs, null) + val oldMember = group.replaceStaticMember(groupInstanceId, newMemberId, oldMemberId) + completeAndScheduleNextHeartbeatExpiration(group, oldMember) + responseCallback(JoinGroupResult( + List.empty, + memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = currentLeader, + skipAssignment = false, + error = error + )) + } else if (supportSkippingAssignment) { + // Starting from version 9 of the JoinGroup API, static members are able to + // skip running the assignor based on the `SkipAssignment` field. We leverage + // this to tell the leader that it is the leader of the group but by skipping + // running the assignor while the group is in stable state. + // Notes: + // 1) This allows the leader to continue monitoring metadata changes for the + // group. Note that any metadata changes happening while the static leader is + // down won't be noticed. + // 2) The assignors are not idempotent nor free from side effects. This is why + // we skip entirely the assignment step as it could generate a different group + // assignment which would be ignored by the group coordinator because the group + // is the stable state. + val isLeader = group.isLeader(newMemberId) + group.maybeInvokeJoinCallback(member, JoinGroupResult( + members = if (isLeader) { + group.currentMemberMetadata + } else { + List.empty + }, + memberId = newMemberId, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = group.leaderOrNull, + skipAssignment = isLeader, + error = Errors.NONE + )) + } else { + // Prior to version 9 of the JoinGroup API, we wanted to avoid current leader + // performing trivial assignment while the group is in stable stage, because + // the new assignment in leader's next sync call won't be broadcast by a stable group. + // This could be guaranteed by always returning the old leader id so that the current + // leader won't assume itself as a leader based on the returned message, since the new + // member.id won't match returned leader id, therefore no assignment will be performed. + group.maybeInvokeJoinCallback(member, JoinGroupResult( + members = List.empty, + memberId = newMemberId, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = currentLeader, + skipAssignment = false, + error = Errors.NONE + )) + } + } + }, requestLocal) + } else { + maybePrepareRebalance(group, s"Group's selectedProtocol will change because static member ${member.memberId} with instance id $groupInstanceId joined with change of protocol; client reason: $reason") + } + case CompletingRebalance => + // if the group is in after-sync stage, upon getting a new join-group of a known static member + // we should still trigger a new rebalance, since the old member may already be sent to the leader + // for assignment, and hence when the assignment gets back there would be a mismatch of the old member id + // with the new replaced member id. As a result the new member id would not get any assignment. + prepareRebalance(group, s"Updating metadata for static member ${member.memberId} with instance id $groupInstanceId; client reason: $reason") + case Empty | Dead => + throw new IllegalStateException(s"Group ${group.groupId} was not supposed to be " + + s"in the state ${group.currentState} when the unknown static member $groupInstanceId rejoins.") + case PreparingRebalance => + } + } + + private def updateMemberAndRebalance(group: GroupMetadata, + member: MemberMetadata, + protocols: List[(String, Array[Byte])], + rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + reason: String, + callback: JoinCallback): Unit = { + group.updateMember(member, protocols, rebalanceTimeoutMs, sessionTimeoutMs, callback) + maybePrepareRebalance(group, reason) + } + + private def maybePrepareRebalance(group: GroupMetadata, reason: String): Unit = { + group.inLock { + if (group.canRebalance) + prepareRebalance(group, reason) + } + } + + // package private for testing + private[group] def prepareRebalance(group: GroupMetadata, reason: String): Unit = { + // if any members are awaiting sync, cancel their request and have them rejoin + if (group.is(CompletingRebalance)) + resetAndPropagateAssignmentError(group, Errors.REBALANCE_IN_PROGRESS) + + // if a sync expiration is pending, cancel it. + removeSyncExpiration(group) + + val delayedRebalance = if (group.is(Empty)) + new InitialDelayedJoin(this, + rebalancePurgatory, + group, + groupConfig.groupInitialRebalanceDelayMs, + groupConfig.groupInitialRebalanceDelayMs, + max(group.rebalanceTimeoutMs - groupConfig.groupInitialRebalanceDelayMs, 0)) + else + new DelayedJoin(this, group, group.rebalanceTimeoutMs) + + group.transitionTo(PreparingRebalance) + + info(s"Preparing to rebalance group ${group.groupId} in state ${group.currentState} with old generation " + + s"${group.generationId} (${Topic.GROUP_METADATA_TOPIC_NAME}-${partitionFor(group.groupId)}) (reason: $reason)") + + val groupKey = new GroupJoinKey(group.groupId) + rebalancePurgatory.tryCompleteElseWatch(delayedRebalance, util.Collections.singletonList(groupKey)) + } + + private def removeMemberAndUpdateGroup(group: GroupMetadata, member: MemberMetadata, reason: String): Unit = { + // New members may timeout with a pending JoinGroup while the group is still rebalancing, so we have + // to invoke the callback before removing the member. We return UNKNOWN_MEMBER_ID so that the consumer + // will retry the JoinGroup request if is still active. + group.maybeInvokeJoinCallback(member, JoinGroupResult(JoinGroupRequest.UNKNOWN_MEMBER_ID, Errors.UNKNOWN_MEMBER_ID)) + group.remove(member.memberId) + + group.currentState match { + case Dead | Empty => + case Stable | CompletingRebalance => maybePrepareRebalance(group, reason) + case PreparingRebalance => rebalancePurgatory.checkAndComplete(new GroupJoinKey(group.groupId)) + } + } + + private def removePendingMemberAndUpdateGroup(group: GroupMetadata, memberId: String): Unit = { + group.remove(memberId) + + if (group.is(PreparingRebalance)) { + rebalancePurgatory.checkAndComplete(new GroupJoinKey(group.groupId)) + } + } + + def tryCompleteJoin(group: GroupMetadata, forceComplete: () => Boolean): Boolean = { + group.inLock { + if (group.hasAllMembersJoined) + forceComplete() + else false + } + } + + def onCompleteJoin(group: GroupMetadata): Unit = { + group.inLock { + val notYetRejoinedDynamicMembers = group.notYetRejoinedMembers.filterNot(_._2.isStaticMember) + if (notYetRejoinedDynamicMembers.nonEmpty) { + info(s"Group ${group.groupId} removed dynamic members " + + s"who haven't joined: ${notYetRejoinedDynamicMembers.keySet}") + + notYetRejoinedDynamicMembers.values.foreach { failedMember => + group.remove(failedMember.memberId) + removeHeartbeatForLeavingMember(group, failedMember.memberId) + } + } + + if (group.is(Dead)) { + info(s"Group ${group.groupId} is dead, skipping rebalance stage") + } else if (!group.maybeElectNewJoinedLeader() && group.allMembers.nonEmpty) { + // If all members are not rejoining, we will postpone the completion + // of rebalance preparing stage, and send out another delayed operation + // until session timeout removes all the non-responsive members. + error(s"Group ${group.groupId} could not complete rebalance because no members rejoined") + rebalancePurgatory.tryCompleteElseWatch( + new DelayedJoin(this, group, group.rebalanceTimeoutMs), + util.Collections.singletonList(new GroupJoinKey(group.groupId))) + } else { + group.initNextGeneration() + if (group.is(Empty)) { + info(s"Group ${group.groupId} with generation ${group.generationId} is now empty " + + s"(${Topic.GROUP_METADATA_TOPIC_NAME}-${partitionFor(group.groupId)})") + + groupManager.storeGroup(group, Map.empty, error => { + if (error != Errors.NONE) { + // we failed to write the empty group metadata. If the broker fails before another rebalance, + // the previous generation written to the log will become active again (and most likely timeout). + // This should be safe since there are no active members in an empty generation, so we just warn. + warn(s"Failed to write empty metadata for group ${group.groupId}: ${error.message}") + } + }, RequestLocal.noCaching) + } else { + info(s"Stabilized group ${group.groupId} generation ${group.generationId} " + + s"(${Topic.GROUP_METADATA_TOPIC_NAME}-${partitionFor(group.groupId)}) with ${group.size} members") + + // trigger the awaiting join group response callback for all the members after rebalancing + for (member <- group.allMemberMetadata) { + val joinResult = JoinGroupResult( + members = if (group.isLeader(member.memberId)) { + group.currentMemberMetadata + } else { + List.empty + }, + memberId = member.memberId, + generationId = group.generationId, + protocolType = group.protocolType, + protocolName = group.protocolName, + leaderId = group.leaderOrNull, + skipAssignment = false, + error = Errors.NONE) + + group.maybeInvokeJoinCallback(member, joinResult) + completeAndScheduleNextHeartbeatExpiration(group, member) + member.isNew = false + + group.addPendingSyncMember(member.memberId) + } + + schedulePendingSync(group) + } + } + } + } + + private def removePendingSyncMember( + group: GroupMetadata, + memberId: String + ): Unit = { + group.removePendingSyncMember(memberId) + maybeCompleteSyncExpiration(group) + } + + private def removeSyncExpiration( + group: GroupMetadata + ): Unit = { + group.clearPendingSyncMembers() + maybeCompleteSyncExpiration(group) + } + + private def maybeCompleteSyncExpiration( + group: GroupMetadata + ): Unit = { + val groupKey = new GroupSyncKey(group.groupId) + rebalancePurgatory.checkAndComplete(groupKey) + } + + private def schedulePendingSync( + group: GroupMetadata + ): Unit = { + val delayedSync = new DelayedSync(this, group, group.generationId, group.rebalanceTimeoutMs) + val groupKey = new GroupSyncKey(group.groupId) + rebalancePurgatory.tryCompleteElseWatch(delayedSync, util.Collections.singletonList(groupKey)) + } + + def tryCompletePendingSync( + group: GroupMetadata, + generationId: Int, + forceComplete: () => Boolean + ): Boolean = { + group.inLock { + if (generationId != group.generationId) { + forceComplete() + } else { + group.currentState match { + case Dead | Empty | PreparingRebalance => + forceComplete() + case CompletingRebalance | Stable => + if (group.hasReceivedSyncFromAllMembers) + forceComplete() + else false + } + } + } + } + + def onExpirePendingSync( + group: GroupMetadata, + generationId: Int + ): Unit = { + group.inLock { + if (generationId != group.generationId) { + error(s"Received unexpected notification of sync expiration for ${group.groupId} " + + s"with an old generation $generationId while the group has ${group.generationId}.") + } else { + group.currentState match { + case Dead | Empty | PreparingRebalance => + error(s"Received unexpected notification of sync expiration after group ${group.groupId} " + + s"already transitioned to the ${group.currentState} state.") + + case CompletingRebalance | Stable => + if (!group.hasReceivedSyncFromAllMembers) { + val pendingSyncMembers = group.allPendingSyncMembers + + pendingSyncMembers.foreach { memberId => + group.remove(memberId) + removeHeartbeatForLeavingMember(group, memberId) + } + + debug(s"Group ${group.groupId} removed members who haven't " + + s"sent their sync request: $pendingSyncMembers") + + prepareRebalance(group, s"Removing $pendingSyncMembers on pending sync request expiration") + } + } + } + } + } + + def tryCompleteHeartbeat(group: GroupMetadata, + memberId: String, + isPending: Boolean, + forceComplete: () => Boolean): Boolean = { + group.inLock { + // The group has been unloaded and invalid, we should complete the heartbeat. + if (group.is(Dead)) { + forceComplete() + } else if (isPending) { + // complete the heartbeat if the member has joined the group + if (group.has(memberId)) { + forceComplete() + } else false + } else if (shouldCompleteNonPendingHeartbeat(group, memberId)) { + forceComplete() + } else false + } + } + + def shouldCompleteNonPendingHeartbeat(group: GroupMetadata, memberId: String): Boolean = { + if (group.has(memberId)) { + val member = group.get(memberId) + member.hasSatisfiedHeartbeat + } else { + debug(s"Member id $memberId was not found in ${group.groupId} during heartbeat completion check") + true + } + } + + def onExpireHeartbeat(group: GroupMetadata, memberId: String, isPending: Boolean): Unit = { + group.inLock { + if (group.is(Dead)) { + info(s"Received notification of heartbeat expiration for member $memberId after group ${group.groupId} had already been unloaded or deleted.") + } else if (isPending) { + info(s"Pending member $memberId in group ${group.groupId} has been removed after session timeout expiration.") + removePendingMemberAndUpdateGroup(group, memberId) + } else if (!group.has(memberId)) { + debug(s"Member $memberId has already been removed from the group.") + } else { + val member = group.get(memberId) + if (!member.hasSatisfiedHeartbeat) { + info(s"Member ${member.memberId} in group ${group.groupId} has failed, removing it from the group") + removeMemberAndUpdateGroup(group, member, s"removing member ${member.memberId} on heartbeat expiration") + } + } + } + } + + def partitionFor(group: String): Int = groupManager.partitionFor(group) + + private def groupIsOverCapacity(group: GroupMetadata): Boolean = { + group.size > groupConfig.groupMaxSize + } + + private def isCoordinatorForGroup(groupId: String) = groupManager.isGroupLocal(groupId) + + private def isCoordinatorLoadInProgress(groupId: String) = groupManager.isGroupLoading(groupId) +} + +object GroupCoordinator { + + val NoState = "" + val NoProtocolType = "" + val NoProtocol = "" + val NoLeader = "" + val NoGeneration = -1 + val NoMembers = List[MemberSummary]() + val EmptyGroup = GroupSummary(NoState, NoProtocolType, NoProtocol, NoMembers) + val DeadGroup = GroupSummary(Dead.toString, NoProtocolType, NoProtocol, NoMembers) + val NewMemberJoinTimeoutMs: Int = 5 * 60 * 1000 + + private[group] def apply( + config: KafkaConfig, + replicaManager: ReplicaManager, + time: Time, + metrics: Metrics + ): GroupCoordinator = { + val heartbeatPurgatory = new DelayedOperationPurgatory[DelayedHeartbeat]("Heartbeat", config.brokerId) + val rebalancePurgatory = new DelayedOperationPurgatory[DelayedRebalance]("Rebalance", config.brokerId) + GroupCoordinator(config, replicaManager, heartbeatPurgatory, rebalancePurgatory, time, metrics) + } + + private[group] def offsetConfig(config: KafkaConfig) = new OffsetConfig( + config.groupCoordinatorConfig.offsetMetadataMaxSize, + config.groupCoordinatorConfig.offsetsLoadBufferSize, + config.groupCoordinatorConfig.offsetsRetentionMs, + config.groupCoordinatorConfig.offsetsRetentionCheckIntervalMs, + config.groupCoordinatorConfig.offsetsTopicPartitions, + config.groupCoordinatorConfig.offsetsTopicSegmentBytes, + config.groupCoordinatorConfig.offsetsTopicReplicationFactor, + config.groupCoordinatorConfig.offsetTopicCompressionType, + config.groupCoordinatorConfig.offsetCommitTimeoutMs + ) + + private[group] def apply( + config: KafkaConfig, + replicaManager: ReplicaManager, + heartbeatPurgatory: DelayedOperationPurgatory[DelayedHeartbeat], + rebalancePurgatory: DelayedOperationPurgatory[DelayedRebalance], + time: Time, + metrics: Metrics + ): GroupCoordinator = { + val offsetConfig = this.offsetConfig(config) + val groupConfig = GroupConfig(groupMinSessionTimeoutMs = config.groupCoordinatorConfig.classicGroupMinSessionTimeoutMs, + groupMaxSessionTimeoutMs = config.groupCoordinatorConfig.classicGroupMaxSessionTimeoutMs, + groupMaxSize = config.groupCoordinatorConfig.classicGroupMaxSize, + groupInitialRebalanceDelayMs = config.groupCoordinatorConfig.classicGroupInitialRebalanceDelayMs) + + val groupMetadataManager = new GroupMetadataManager(config.brokerId, offsetConfig, replicaManager, time, metrics) + new GroupCoordinator(config.brokerId, groupConfig, offsetConfig, groupMetadataManager, heartbeatPurgatory, + rebalancePurgatory, time, metrics) + } + + private def memberLeaveError(memberIdentity: MemberIdentity, + error: Errors): LeaveMemberResponse = { + LeaveMemberResponse( + memberId = memberIdentity.memberId, + groupInstanceId = Option(memberIdentity.groupInstanceId), + error = error) + } + + private def leaveError(topLevelError: Errors, + memberResponses: List[LeaveMemberResponse]): LeaveGroupResult = { + LeaveGroupResult( + topLevelError = topLevelError, + memberResponses = memberResponses) + } +} + +case class GroupConfig(groupMinSessionTimeoutMs: Int, + groupMaxSessionTimeoutMs: Int, + groupMaxSize: Int, + groupInitialRebalanceDelayMs: Int) + +case class JoinGroupResult(members: List[JoinGroupResponseMember], + memberId: String, + generationId: Int, + protocolType: Option[String], + protocolName: Option[String], + leaderId: String, + skipAssignment: Boolean, + error: Errors) + +object JoinGroupResult { + def apply(memberId: String, error: Errors): JoinGroupResult = { + JoinGroupResult( + members = List.empty, + memberId = memberId, + generationId = GroupCoordinator.NoGeneration, + protocolType = None, + protocolName = None, + leaderId = GroupCoordinator.NoLeader, + skipAssignment = false, + error = error) + } +} + +case class SyncGroupResult(protocolType: Option[String], + protocolName: Option[String], + memberAssignment: Array[Byte], + error: Errors) + +object SyncGroupResult { + def apply(error: Errors): SyncGroupResult = { + SyncGroupResult(None, None, Array.empty, error) + } +} + +case class LeaveMemberResponse(memberId: String, + groupInstanceId: Option[String], + error: Errors) + +case class LeaveGroupResult(topLevelError: Errors, + memberResponses : List[LeaveMemberResponse]) diff --git a/core/src/main/scala/kafka/coordinator/group/GroupCoordinatorAdapter.scala b/core/src/main/scala/kafka/coordinator/group/GroupCoordinatorAdapter.scala new file mode 100644 index 0000000000000..bc775b5f38060 --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/GroupCoordinatorAdapter.scala @@ -0,0 +1,664 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.group + +import kafka.server.{KafkaConfig, ReplicaManager} +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.message.{ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, DeleteGroupsResponseData, DescribeGroupsResponseData, HeartbeatRequestData, HeartbeatResponseData, JoinGroupRequestData, JoinGroupResponseData, LeaveGroupRequestData, LeaveGroupResponseData, ListGroupsRequestData, ListGroupsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteRequestData, OffsetDeleteResponseData, OffsetFetchRequestData, OffsetFetchResponseData, ShareGroupDescribeResponseData, ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData, SyncGroupRequestData, SyncGroupResponseData, TxnOffsetCommitRequestData, TxnOffsetCommitResponseData} +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.record.RecordBatch +import org.apache.kafka.common.requests.{OffsetCommitRequest, RequestContext, TransactionResult} +import org.apache.kafka.common.utils.{BufferSupplier, Time} +import org.apache.kafka.coordinator.group +import org.apache.kafka.coordinator.group.OffsetAndMetadata +import org.apache.kafka.image.{MetadataDelta, MetadataImage} +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.util.FutureUtils + +import java.time.Duration +import java.util +import java.util.{Optional, OptionalInt, OptionalLong, Properties} +import java.util.concurrent.CompletableFuture +import java.util.function.IntSupplier +import scala.collection.{immutable, mutable} +import scala.jdk.CollectionConverters._ + +object GroupCoordinatorAdapter { + def apply( + config: KafkaConfig, + replicaManager: ReplicaManager, + time: Time, + metrics: Metrics + ): GroupCoordinatorAdapter = { + new GroupCoordinatorAdapter( + GroupCoordinator( + config, + replicaManager, + time, + metrics + ), + time + ) + } +} + +/** + * GroupCoordinatorAdapter is a thin wrapper around kafka.coordinator.group.GroupCoordinator + * that exposes the new org.apache.kafka.coordinator.group.GroupCoordinator interface. + */ +private[group] class GroupCoordinatorAdapter( + private val coordinator: GroupCoordinator, + private val time: Time +) extends org.apache.kafka.coordinator.group.GroupCoordinator { + + override def isNewGroupCoordinator: Boolean = false + + override def consumerGroupHeartbeat( + context: RequestContext, + request: ConsumerGroupHeartbeatRequestData + ): CompletableFuture[ConsumerGroupHeartbeatResponseData] = { + FutureUtils.failedFuture(Errors.UNSUPPORTED_VERSION.exception( + s"The old group coordinator does not support ${ApiKeys.CONSUMER_GROUP_HEARTBEAT.name} API." + )) + } + + override def shareGroupHeartbeat( + context: RequestContext, + request: ShareGroupHeartbeatRequestData + ): CompletableFuture[ShareGroupHeartbeatResponseData] = { + FutureUtils.failedFuture(Errors.UNSUPPORTED_VERSION.exception( + s"The old group coordinator does not support ${ApiKeys.SHARE_GROUP_HEARTBEAT.name} API." + )) + } + + override def joinGroup( + context: RequestContext, + request: JoinGroupRequestData, + bufferSupplier: BufferSupplier + ): CompletableFuture[JoinGroupResponseData] = { + val future = new CompletableFuture[JoinGroupResponseData]() + + def callback(joinResult: JoinGroupResult): Unit = { + future.complete(new JoinGroupResponseData() + .setErrorCode(joinResult.error.code) + .setGenerationId(joinResult.generationId) + .setProtocolType(joinResult.protocolType.orNull) + .setProtocolName(joinResult.protocolName.orNull) + .setLeader(joinResult.leaderId) + .setSkipAssignment(joinResult.skipAssignment) + .setMemberId(joinResult.memberId) + .setMembers(joinResult.members.asJava) + ) + } + + val groupInstanceId = Option(request.groupInstanceId) + + // Only return MEMBER_ID_REQUIRED error if joinGroupRequest version is >= 4 + // and groupInstanceId is configured to unknown. + val requireKnownMemberId = context.apiVersion >= 4 && groupInstanceId.isEmpty + + val protocols = request.protocols.valuesList.asScala.map { protocol => + (protocol.name, protocol.metadata) + }.toList + + val supportSkippingAssignment = context.apiVersion >= 9 + + coordinator.handleJoinGroup( + request.groupId, + request.memberId, + groupInstanceId, + requireKnownMemberId, + supportSkippingAssignment, + context.clientId, + context.clientAddress.toString, + request.rebalanceTimeoutMs, + request.sessionTimeoutMs, + request.protocolType, + protocols, + callback, + Option(request.reason), + new RequestLocal(bufferSupplier) + ) + + future + } + + override def syncGroup( + context: RequestContext, + request: SyncGroupRequestData, + bufferSupplier: BufferSupplier + ): CompletableFuture[SyncGroupResponseData] = { + val future = new CompletableFuture[SyncGroupResponseData]() + + def callback(syncGroupResult: SyncGroupResult): Unit = { + future.complete(new SyncGroupResponseData() + .setErrorCode(syncGroupResult.error.code) + .setProtocolType(syncGroupResult.protocolType.orNull) + .setProtocolName(syncGroupResult.protocolName.orNull) + .setAssignment(syncGroupResult.memberAssignment) + ) + } + + val assignmentMap = immutable.Map.newBuilder[String, Array[Byte]] + request.assignments.forEach { assignment => + assignmentMap += assignment.memberId -> assignment.assignment + } + + coordinator.handleSyncGroup( + request.groupId, + request.generationId, + request.memberId, + Option(request.protocolType), + Option(request.protocolName), + Option(request.groupInstanceId), + assignmentMap.result(), + callback, + new RequestLocal(bufferSupplier) + ) + + future + } + + override def heartbeat( + context: RequestContext, + request: HeartbeatRequestData + ): CompletableFuture[HeartbeatResponseData] = { + val future = new CompletableFuture[HeartbeatResponseData]() + + coordinator.handleHeartbeat( + request.groupId, + request.memberId, + Option(request.groupInstanceId), + request.generationId, + error => future.complete(new HeartbeatResponseData() + .setErrorCode(error.code)) + ) + + future + } + + override def leaveGroup( + context: RequestContext, + request: LeaveGroupRequestData + ): CompletableFuture[LeaveGroupResponseData] = { + val future = new CompletableFuture[LeaveGroupResponseData]() + + def callback(leaveGroupResult: LeaveGroupResult): Unit = { + future.complete(new LeaveGroupResponseData() + .setErrorCode(leaveGroupResult.topLevelError.code) + .setMembers(leaveGroupResult.memberResponses.map { member => + new LeaveGroupResponseData.MemberResponse() + .setErrorCode(member.error.code) + .setMemberId(member.memberId) + .setGroupInstanceId(member.groupInstanceId.orNull) + }.asJava) + ) + } + + coordinator.handleLeaveGroup( + request.groupId, + request.members.asScala.toList, + callback + ) + + future + } + + override def listGroups( + context: RequestContext, + request: ListGroupsRequestData + ): CompletableFuture[ListGroupsResponseData] = { + // Handle a null array the same as empty. + val (error, groups) = coordinator.handleListGroups( + Option(request.statesFilter).map(_.asScala.toSet).getOrElse(Set.empty), + Option(request.typesFilter).map(_.asScala.toSet).getOrElse(Set.empty) + ) + + val response = new ListGroupsResponseData() + .setErrorCode(error.code) + + groups.foreach { group => + response.groups.add(new ListGroupsResponseData.ListedGroup() + .setGroupId(group.groupId) + .setProtocolType(group.protocolType) + .setGroupState(group.state) + .setGroupType(group.groupType)) + } + + CompletableFuture.completedFuture(response) + } + + override def describeGroups( + context: RequestContext, + groupIds: util.List[String] + ): CompletableFuture[util.List[DescribeGroupsResponseData.DescribedGroup]] = { + + def describeGroup(groupId: String): DescribeGroupsResponseData.DescribedGroup = { + val (error, errorMessage, summary) = coordinator.handleDescribeGroup(groupId, context.apiVersion()) + + new DescribeGroupsResponseData.DescribedGroup() + .setErrorCode(error.code) + .setErrorMessage(errorMessage.orNull) + .setGroupId(groupId) + .setGroupState(summary.state) + .setProtocolType(summary.protocolType) + .setProtocolData(summary.protocol) + .setMembers(summary.members.map { member => + new DescribeGroupsResponseData.DescribedGroupMember() + .setMemberId(member.memberId) + .setGroupInstanceId(member.groupInstanceId.orNull) + .setClientId(member.clientId) + .setClientHost(member.clientHost) + .setMemberAssignment(member.assignment) + .setMemberMetadata(member.metadata) + }.asJava) + } + + CompletableFuture.completedFuture(groupIds.asScala.map(describeGroup).asJava) + } + + override def deleteGroups( + context: RequestContext, + groupIds: util.List[String], + bufferSupplier: BufferSupplier + ): CompletableFuture[DeleteGroupsResponseData.DeletableGroupResultCollection] = { + val results = new DeleteGroupsResponseData.DeletableGroupResultCollection() + coordinator.handleDeleteGroups( + groupIds.asScala.toSet, + new RequestLocal(bufferSupplier) + ).foreachEntry { (groupId, error) => + results.add(new DeleteGroupsResponseData.DeletableGroupResult() + .setGroupId(groupId) + .setErrorCode(error.code)) + } + CompletableFuture.completedFuture(results) + } + + override def fetchAllOffsets( + context: RequestContext, + request: OffsetFetchRequestData.OffsetFetchRequestGroup, + requireStable: Boolean + ): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = { + handleFetchOffset( + request.groupId, + requireStable, + None + ) + } + + override def fetchOffsets( + context: RequestContext, + request: OffsetFetchRequestData.OffsetFetchRequestGroup, + requireStable: Boolean + ): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = { + val topicPartitions = new mutable.ArrayBuffer[TopicPartition]() + request.topics.forEach { topic => + topic.partitionIndexes.forEach { partition => + topicPartitions += new TopicPartition(topic.name, partition) + } + } + + handleFetchOffset( + request.groupId, + requireStable, + Some(topicPartitions.toSeq) + ) + } + + private def handleFetchOffset( + groupId: String, + requireStable: Boolean, + partitions: Option[Seq[TopicPartition]] + ): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = { + val (error, results) = coordinator.handleFetchOffsets( + groupId, + requireStable, + partitions + ) + + val future = new CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]() + if (error != Errors.NONE) { + future.completeExceptionally(error.exception) + } else { + val topicsList = new util.ArrayList[OffsetFetchResponseData.OffsetFetchResponseTopics]() + val topicsMap = new mutable.HashMap[String, OffsetFetchResponseData.OffsetFetchResponseTopics]() + + results.foreachEntry { (tp, offset) => + val topic = topicsMap.get(tp.topic) match { + case Some(topic) => + topic + + case None => + val topicOffsets = new OffsetFetchResponseData.OffsetFetchResponseTopics().setName(tp.topic) + topicsMap += tp.topic -> topicOffsets + topicsList.add(topicOffsets) + topicOffsets + } + + topic.partitions.add(new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(tp.partition) + .setMetadata(offset.metadata) + .setCommittedOffset(offset.offset) + .setCommittedLeaderEpoch(offset.leaderEpoch.orElse(-1)) + .setErrorCode(offset.error.code)) + } + + future.complete(new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(groupId) + .setTopics(topicsList)) + } + + future + } + + override def commitOffsets( + context: RequestContext, + request: OffsetCommitRequestData, + bufferSupplier: BufferSupplier + ): CompletableFuture[OffsetCommitResponseData] = { + val currentTimeMs = time.milliseconds + val future = new CompletableFuture[OffsetCommitResponseData]() + + def callback(commitStatus: Map[TopicIdPartition, Errors]): Unit = { + val response = new OffsetCommitResponseData() + val byTopics = new mutable.HashMap[String, OffsetCommitResponseData.OffsetCommitResponseTopic]() + + commitStatus.foreachEntry { (tp, error) => + val topic = byTopics.get(tp.topic) match { + case Some(existingTopic) => + existingTopic + case None => + val newTopic = new OffsetCommitResponseData.OffsetCommitResponseTopic().setName(tp.topic) + byTopics += tp.topic -> newTopic + response.topics.add(newTopic) + newTopic + } + + topic.partitions.add(new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(tp.partition) + .setErrorCode(error.code)) + } + + future.complete(response) + } + + // "default" expiration timestamp is defined as now + retention. The retention may be overridden + // in versions from v2 to v4. Otherwise, the retention defined on the broker is used. If an explicit + // commit timestamp is provided (v1 only), the expiration timestamp is computed based on that. + val expireTimeMs = request.retentionTimeMs match { + case OffsetCommitRequest.DEFAULT_RETENTION_TIME => None + case retentionTimeMs => Some(currentTimeMs + retentionTimeMs) + } + + val partitions = new mutable.HashMap[TopicIdPartition, OffsetAndMetadata]() + request.topics.forEach { topic => + topic.partitions.forEach { partition => + val tp = new TopicIdPartition(Uuid.ZERO_UUID, partition.partitionIndex, topic.name) + partitions += tp -> createOffsetAndMetadata( + currentTimeMs, + partition.committedOffset, + partition.committedLeaderEpoch, + partition.committedMetadata, + expireTimeMs + ) + } + } + + coordinator.handleCommitOffsets( + request.groupId, + request.memberId, + Option(request.groupInstanceId), + request.generationIdOrMemberEpoch, + partitions.toMap, + callback, + new RequestLocal(bufferSupplier) + ) + + future + } + + override def commitTransactionalOffsets( + context: RequestContext, + request: TxnOffsetCommitRequestData, + bufferSupplier: BufferSupplier + ): CompletableFuture[TxnOffsetCommitResponseData] = { + val currentTimeMs = time.milliseconds + val future = new CompletableFuture[TxnOffsetCommitResponseData]() + + def callback(results: Map[TopicIdPartition, Errors]): Unit = { + val response = new TxnOffsetCommitResponseData() + val byTopics = new mutable.HashMap[String, TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic]() + + results.foreachEntry { (tp, error) => + val topic = byTopics.get(tp.topic) match { + case Some(existingTopic) => + existingTopic + case None => + val newTopic = new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic().setName(tp.topic) + byTopics += tp.topic -> newTopic + response.topics.add(newTopic) + newTopic + } + + topic.partitions.add(new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() + .setPartitionIndex(tp.partition) + .setErrorCode(error.code)) + } + + future.complete(response) + } + + val partitions = new mutable.HashMap[TopicIdPartition, OffsetAndMetadata]() + request.topics.forEach { topic => + topic.partitions.forEach { partition => + val tp = new TopicIdPartition(Uuid.ZERO_UUID, partition.partitionIndex, topic.name) + partitions += tp -> createOffsetAndMetadata( + currentTimeMs, + partition.committedOffset, + partition.committedLeaderEpoch, + partition.committedMetadata, + None + ) + } + } + + coordinator.handleTxnCommitOffsets( + request.groupId, + request.transactionalId, + request.producerId, + request.producerEpoch, + request.memberId, + Option(request.groupInstanceId), + request.generationId, + partitions.toMap, + callback, + new RequestLocal(bufferSupplier), + context.apiVersion() + ) + + future + } + + private def createOffsetAndMetadata( + currentTimeMs: Long, + offset: Long, + leaderEpoch: Int, + metadata: String, + expireTimestamp: Option[Long] + ): OffsetAndMetadata = { + new OffsetAndMetadata( + offset, + leaderEpoch match { + case RecordBatch.NO_PARTITION_LEADER_EPOCH => OptionalInt.empty + case committedLeaderEpoch => OptionalInt.of(committedLeaderEpoch) + }, + metadata match { + case null => OffsetAndMetadata.NO_METADATA + case metadata => metadata + }, + currentTimeMs, + expireTimestamp match { + case Some(timestamp) => OptionalLong.of(timestamp) + case None => OptionalLong.empty() + } + ) + } + + override def deleteOffsets( + context: RequestContext, + request: OffsetDeleteRequestData, + bufferSupplier: BufferSupplier + ): CompletableFuture[OffsetDeleteResponseData] = { + val future = new CompletableFuture[OffsetDeleteResponseData]() + + val partitions = mutable.ArrayBuffer[TopicPartition]() + request.topics.forEach { topic => + topic.partitions.forEach { partition => + partitions += new TopicPartition(topic.name, partition.partitionIndex) + } + } + + val (groupError, topicPartitionResults) = coordinator.handleDeleteOffsets( + request.groupId, + partitions, + new RequestLocal(bufferSupplier) + ) + + if (groupError != Errors.NONE) { + future.completeExceptionally(groupError.exception) + } else { + val response = new OffsetDeleteResponseData() + topicPartitionResults.foreachEntry { (topicPartition, error) => + var topic = response.topics.find(topicPartition.topic) + if (topic == null) { + topic = new OffsetDeleteResponseData.OffsetDeleteResponseTopic().setName(topicPartition.topic) + response.topics.add(topic) + } + topic.partitions.add(new OffsetDeleteResponseData.OffsetDeleteResponsePartition() + .setPartitionIndex(topicPartition.partition) + .setErrorCode(error.code)) + } + + future.complete(response) + } + + future + } + + override def completeTransaction( + tp: TopicPartition, + producerId: Long, + producerEpoch: Short, + coordinatorEpoch: Int, + result: TransactionResult, + timeout: Duration + ): CompletableFuture[Void] = { + FutureUtils.failedFuture(new IllegalStateException( + s"The old group coordinator does not support `completeTransaction` API." + )) + } + + override def partitionFor(groupId: String): Int = { + coordinator.partitionFor(groupId) + } + + override def onTransactionCompleted( + producerId: Long, + partitions: java.lang.Iterable[TopicPartition], + transactionResult: TransactionResult + ): CompletableFuture[Void] = { + try { + coordinator.scheduleHandleTxnCompletion( + producerId, + partitions.asScala, + transactionResult + ) + } catch { + case e: Throwable => FutureUtils.failedFuture(e) + } + } + + override def onPartitionsDeleted( + topicPartitions: util.List[TopicPartition], + bufferSupplier: BufferSupplier + ): Unit = { + coordinator.handleDeletedPartitions(topicPartitions.asScala, new RequestLocal(bufferSupplier)) + } + + override def onElection( + groupMetadataPartitionIndex: Int, + groupMetadataPartitionLeaderEpoch: Int + ): Unit = { + coordinator.onElection(groupMetadataPartitionIndex, groupMetadataPartitionLeaderEpoch) + } + + override def onResignation( + groupMetadataPartitionIndex: Int, + groupMetadataPartitionLeaderEpoch: OptionalInt + ): Unit = { + coordinator.onResignation(groupMetadataPartitionIndex, groupMetadataPartitionLeaderEpoch) + } + + override def onNewMetadataImage( + newImage: MetadataImage, + delta: MetadataDelta + ): Unit = { + // The metadata image is not used in the old group coordinator. + } + + override def groupMetadataTopicConfigs(): Properties = { + coordinator.offsetsTopicConfigs + } + + override def groupConfig(groupId: String): Optional[group.GroupConfig] = { + throw Errors.UNSUPPORTED_VERSION.exception("The old group coordinator does not support get group config.") + } + + override def updateGroupConfig( + groupId: String, + newGroupConfig: Properties + ): Unit = { + throw Errors.UNSUPPORTED_VERSION.exception("The old group coordinator does not support update group config.") + } + + override def startup(groupMetadataTopicPartitionCount: IntSupplier): Unit = { + coordinator.startup(() => groupMetadataTopicPartitionCount.getAsInt) + } + + override def shutdown(): Unit = { + coordinator.shutdown() + } + + override def consumerGroupDescribe( + context: RequestContext, + groupIds: util.List[String] + ): CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]] = { + FutureUtils.failedFuture(Errors.UNSUPPORTED_VERSION.exception( + s"The old group coordinator does not support ${ApiKeys.CONSUMER_GROUP_DESCRIBE.name} API." + )) + } + + override def shareGroupDescribe( + context: RequestContext, + groupIds: util.List[String] + ): CompletableFuture[util.List[ShareGroupDescribeResponseData.DescribedGroup]] = { + FutureUtils.failedFuture(Errors.UNSUPPORTED_VERSION.exception( + s"The old group coordinator does not support ${ApiKeys.SHARE_GROUP_DESCRIBE.name} API." + )) + } +} diff --git a/core/src/main/scala/kafka/coordinator/group/GroupMetadata.scala b/core/src/main/scala/kafka/coordinator/group/GroupMetadata.scala new file mode 100644 index 0000000000000..2f73577a5dd60 --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/GroupMetadata.scala @@ -0,0 +1,857 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.group + +import java.nio.ByteBuffer +import java.util.UUID +import java.util.concurrent.locks.ReentrantLock +import kafka.utils.{CoreUtils, Logging, nonthreadsafe} +import org.apache.kafka.clients.consumer.internals.ConsumerProtocol +import org.apache.kafka.common.{TopicIdPartition, TopicPartition} +import org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.protocol.types.SchemaException +import org.apache.kafka.common.utils.Time +import org.apache.kafka.coordinator.group.{Group, OffsetAndMetadata} + +import scala.collection.{Seq, immutable, mutable} +import scala.jdk.CollectionConverters._ + +private[group] sealed trait GroupState { + val validPreviousStates: Set[GroupState] + val toLowerCaseString: String = toString.toLowerCase +} + +/** + * Group is preparing to rebalance + * + * action: respond to heartbeats with REBALANCE_IN_PROGRESS + * respond to sync group with REBALANCE_IN_PROGRESS + * remove member on leave group request + * park join group requests from new or existing members until all expected members have joined + * allow offset commits from previous generation + * allow offset fetch requests + * transition: some members have joined by the timeout => CompletingRebalance + * all members have left the group => Empty + * group is removed by partition emigration => Dead + */ +private[group] case object PreparingRebalance extends GroupState { + val validPreviousStates: Set[GroupState] = Set(Stable, CompletingRebalance, Empty) +} + +/** + * Group is awaiting state assignment from the leader + * + * action: respond to heartbeats with REBALANCE_IN_PROGRESS + * respond to offset commits with REBALANCE_IN_PROGRESS + * park sync group requests from followers until transition to Stable + * allow offset fetch requests + * transition: sync group with state assignment received from leader => Stable + * join group from new member or existing member with updated metadata => PreparingRebalance + * leave group from existing member => PreparingRebalance + * member failure detected => PreparingRebalance + * group is removed by partition emigration => Dead + */ +private[group] case object CompletingRebalance extends GroupState { + val validPreviousStates: Set[GroupState] = Set(PreparingRebalance) +} + +/** + * Group is stable + * + * action: respond to member heartbeats normally + * respond to sync group from any member with current assignment + * respond to join group from followers with matching metadata with current group metadata + * allow offset commits from member of current generation + * allow offset fetch requests + * transition: member failure detected via heartbeat => PreparingRebalance + * leave group from existing member => PreparingRebalance + * leader join-group received => PreparingRebalance + * follower join-group with new metadata => PreparingRebalance + * group is removed by partition emigration => Dead + */ +private[group] case object Stable extends GroupState { + val validPreviousStates: Set[GroupState] = Set(CompletingRebalance) +} + +/** + * Group has no more members and its metadata is being removed + * + * action: respond to join group with UNKNOWN_MEMBER_ID + * respond to sync group with UNKNOWN_MEMBER_ID + * respond to heartbeat with UNKNOWN_MEMBER_ID + * respond to leave group with UNKNOWN_MEMBER_ID + * respond to offset commit with UNKNOWN_MEMBER_ID + * allow offset fetch requests + * transition: Dead is a final state before group metadata is cleaned up, so there are no transitions + */ +private[group] case object Dead extends GroupState { + val validPreviousStates: Set[GroupState] = Set(Stable, PreparingRebalance, CompletingRebalance, Empty, Dead) +} + +/** + * Group has no more members, but lingers until all offsets have expired. This state + * also represents groups which use Kafka only for offset commits and have no members. + * + * action: respond normally to join group from new members + * respond to sync group with UNKNOWN_MEMBER_ID + * respond to heartbeat with UNKNOWN_MEMBER_ID + * respond to leave group with UNKNOWN_MEMBER_ID + * respond to offset commit with UNKNOWN_MEMBER_ID + * allow offset fetch requests + * transition: last offsets removed in periodic expiration task => Dead + * join group from a new member => PreparingRebalance + * group is removed by partition emigration => Dead + * group is removed by expiration => Dead + */ +private[group] case object Empty extends GroupState { + val validPreviousStates: Set[GroupState] = Set(PreparingRebalance) +} + + +private object GroupMetadata extends Logging { + + def loadGroup(groupId: String, + initialState: GroupState, + generationId: Int, + protocolType: String, + protocolName: String, + leaderId: String, + currentStateTimestamp: Option[Long], + members: Iterable[MemberMetadata], + time: Time): GroupMetadata = { + val group = new GroupMetadata(groupId, initialState, time) + group.generationId = generationId + group.protocolType = if (protocolType == null || protocolType.isEmpty) None else Some(protocolType) + group.protocolName = Option(protocolName) + group.leaderId = Option(leaderId) + group.currentStateTimestamp = currentStateTimestamp + members.foreach { member => + group.add(member, null) + info(s"Loaded member $member in group $groupId with generation ${group.generationId}.") + } + group.subscribedTopics = group.computeSubscribedTopics() + group + } + + private val MemberIdDelimiter = "-" +} + +/** + * Case class used to represent group metadata for the ListGroups API + */ +case class GroupOverview(groupId: String, + protocolType: String, + state: String, + groupType: String) + +/** + * Case class used to represent group metadata for the DescribeGroup API + */ +case class GroupSummary(state: String, + protocolType: String, + protocol: String, + members: List[MemberSummary]) + +/** + * We cache offset commits along with their commit record offset. This enables us to ensure that the latest offset + * commit is always materialized when we have a mix of transactional and regular offset commits. Without preserving + * information of the commit record offset, compaction of the offsets topic itself may result in the wrong offset commit + * being materialized. + */ +case class CommitRecordMetadataAndOffset(appendedBatchOffset: Option[Long], offsetAndMetadata: OffsetAndMetadata) { + def olderThan(that: CommitRecordMetadataAndOffset): Boolean = appendedBatchOffset.get < that.appendedBatchOffset.get +} + +/** + * Group contains the following metadata: + * + * Membership metadata: + * 1. Members registered in this group + * 2. Current protocol assigned to the group (e.g. partition assignment strategy for consumers) + * 3. Protocol metadata associated with group members + * + * State metadata: + * 1. group state + * 2. generation id + * 3. leader id + */ +@nonthreadsafe +private[group] class GroupMetadata(val groupId: String, initialState: GroupState, time: Time) extends Logging { + type JoinCallback = JoinGroupResult => Unit + + private[group] val lock = new ReentrantLock + + private var state: GroupState = initialState + var currentStateTimestamp: Option[Long] = Some(time.milliseconds()) + var protocolType: Option[String] = None + var protocolName: Option[String] = None + var generationId = 0 + private var leaderId: Option[String] = None + + private val members = new mutable.HashMap[String, MemberMetadata] + // Static membership mapping [key: group.instance.id, value: member.id] + private val staticMembers = new mutable.HashMap[String, String] + private val pendingMembers = new mutable.HashSet[String] + private var numMembersAwaitingJoin = 0 + private val supportedProtocols = new mutable.HashMap[String, Integer]().withDefaultValue(0) + private val offsets = new mutable.HashMap[TopicPartition, CommitRecordMetadataAndOffset] + private val pendingOffsetCommits = new mutable.HashMap[TopicPartition, OffsetAndMetadata] + private val pendingTransactionalOffsetCommits = new mutable.HashMap[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]() + private var receivedTransactionalOffsetCommits = false + private var receivedConsumerOffsetCommits = false + private val pendingSyncMembers = new mutable.HashSet[String] + + // When protocolType == `consumer`, a set of subscribed topics is maintained. The set is + // computed when a new generation is created or when the group is restored from the log. + private var subscribedTopics: Option[Set[String]] = None + + var newMemberAdded: Boolean = false + + def inLock[T](fun: => T): T = CoreUtils.inLock(lock)(fun) + + def is(groupState: GroupState): Boolean = state == groupState + def has(memberId: String): Boolean = members.contains(memberId) + def get(memberId: String): MemberMetadata = members(memberId) + def size: Int = members.size + + def isLeader(memberId: String): Boolean = leaderId.contains(memberId) + def leaderOrNull: String = leaderId.orNull + def currentStateTimestampOrDefault: Long = currentStateTimestamp.getOrElse(-1) + + def isConsumerGroup: Boolean = protocolType.contains(ConsumerProtocol.PROTOCOL_TYPE) + + def add(member: MemberMetadata, callback: JoinCallback = null): Unit = { + member.groupInstanceId.foreach { instanceId => + if (staticMembers.contains(instanceId)) + throw new IllegalStateException(s"Static member with groupInstanceId=$instanceId " + + s"cannot be added to group $groupId since it is already a member") + staticMembers.put(instanceId, member.memberId) + } + + if (members.isEmpty) + this.protocolType = Some(member.protocolType) + + assert(this.protocolType.orNull == member.protocolType) + assert(supportsProtocols(member.protocolType, MemberMetadata.plainProtocolSet(member.supportedProtocols))) + + if (leaderId.isEmpty) + leaderId = Some(member.memberId) + + members.put(member.memberId, member) + incSupportedProtocols(member) + member.awaitingJoinCallback = callback + + if (member.isAwaitingJoin) + numMembersAwaitingJoin += 1 + + pendingMembers.remove(member.memberId) + } + + def remove(memberId: String): Unit = { + members.remove(memberId).foreach { member => + decSupportedProtocols(member) + if (member.isAwaitingJoin) + numMembersAwaitingJoin -= 1 + + member.groupInstanceId.foreach(staticMembers.remove) + } + + if (isLeader(memberId)) + leaderId = members.keys.headOption + + pendingMembers.remove(memberId) + pendingSyncMembers.remove(memberId) + } + + /** + * Check whether current leader is rejoined. If not, try to find another joined member to be + * new leader. Return false if + * 1. the group is currently empty (has no designated leader) + * 2. no member rejoined + */ + def maybeElectNewJoinedLeader(): Boolean = { + leaderId.exists { currentLeaderId => + val currentLeader = get(currentLeaderId) + if (!currentLeader.isAwaitingJoin) { + members.find(_._2.isAwaitingJoin) match { + case Some((anyJoinedMemberId, anyJoinedMember)) => + leaderId = Option(anyJoinedMemberId) + info(s"Group leader [member.id: ${currentLeader.memberId}, " + + s"group.instance.id: ${currentLeader.groupInstanceId}] failed to join " + + s"before rebalance timeout, while new leader $anyJoinedMember was elected.") + true + + case None => + info(s"Group leader [member.id: ${currentLeader.memberId}, " + + s"group.instance.id: ${currentLeader.groupInstanceId}] failed to join " + + s"before rebalance timeout, and the group couldn't proceed to next generation" + + s"because no member joined.") + false + } + } else { + true + } + } + } + + /** + * [For static members only]: Replace the old member id with the new one, + * keep everything else unchanged and return the updated member. + */ + def replaceStaticMember( + groupInstanceId: String, + oldMemberId: String, + newMemberId: String + ): MemberMetadata = { + val memberMetadata = members.remove(oldMemberId) + .getOrElse(throw new IllegalArgumentException(s"Cannot replace non-existing member id $oldMemberId")) + + // Fence potential duplicate member immediately if someone awaits join/sync callback. + maybeInvokeJoinCallback(memberMetadata, JoinGroupResult(oldMemberId, Errors.FENCED_INSTANCE_ID)) + maybeInvokeSyncCallback(memberMetadata, SyncGroupResult(Errors.FENCED_INSTANCE_ID)) + + memberMetadata.memberId = newMemberId + members.put(newMemberId, memberMetadata) + + if (isLeader(oldMemberId)) { + leaderId = Some(newMemberId) + } + + staticMembers.put(groupInstanceId, newMemberId) + memberMetadata + } + + def isPendingMember(memberId: String): Boolean = pendingMembers.contains(memberId) + + def addPendingMember(memberId: String): Boolean = { + if (has(memberId)) { + throw new IllegalStateException(s"Attempt to add pending member $memberId which is already " + + s"a stable member of the group") + } + pendingMembers.add(memberId) + } + + def addPendingSyncMember(memberId: String): Boolean = { + if (!has(memberId)) { + throw new IllegalStateException(s"Attempt to add a pending sync for member $memberId which " + + "is not a member of the group") + } + pendingSyncMembers.add(memberId) + } + + def removePendingSyncMember(memberId: String): Boolean = { + if (!has(memberId)) { + throw new IllegalStateException(s"Attempt to remove a pending sync for member $memberId which " + + "is not a member of the group") + } + pendingSyncMembers.remove(memberId) + } + + def hasReceivedSyncFromAllMembers: Boolean = { + pendingSyncMembers.isEmpty + } + + def allPendingSyncMembers: Set[String] = { + pendingSyncMembers.toSet + } + + def clearPendingSyncMembers(): Unit = { + pendingSyncMembers.clear() + } + + def hasStaticMember(groupInstanceId: String): Boolean = { + staticMembers.contains(groupInstanceId) + } + + def currentStaticMemberId(groupInstanceId: String): Option[String] = { + staticMembers.get(groupInstanceId) + } + + def currentState: GroupState = state + + def notYetRejoinedMembers: Map[String, MemberMetadata] = members.filterNot(_._2.isAwaitingJoin).toMap + + def hasAllMembersJoined: Boolean = members.size == numMembersAwaitingJoin && pendingMembers.isEmpty + + def allMembers: collection.Set[String] = members.keySet + + def allStaticMembers: collection.Set[String] = staticMembers.keySet + + // For testing only. + private[group] def allDynamicMembers: Set[String] = { + val dynamicMemberSet = new mutable.HashSet[String] + allMembers.foreach(memberId => dynamicMemberSet.add(memberId)) + staticMembers.values.foreach(memberId => dynamicMemberSet.remove(memberId)) + dynamicMemberSet.toSet + } + + def numPending: Int = pendingMembers.size + + def numAwaiting: Int = numMembersAwaitingJoin + + def allMemberMetadata: List[MemberMetadata] = members.values.toList + + def rebalanceTimeoutMs: Int = members.values.foldLeft(0) { (timeout, member) => + timeout.max(member.rebalanceTimeoutMs) + } + + def generateMemberId(clientId: String, + groupInstanceId: Option[String]): String = { + groupInstanceId match { + case None => + clientId + GroupMetadata.MemberIdDelimiter + UUID.randomUUID().toString + case Some(instanceId) => + instanceId + GroupMetadata.MemberIdDelimiter + UUID.randomUUID().toString + } + } + + def canRebalance: Boolean = PreparingRebalance.validPreviousStates.contains(state) + + def transitionTo(groupState: GroupState): Unit = { + assertValidTransition(groupState) + state = groupState + currentStateTimestamp = Some(time.milliseconds()) + } + + def selectProtocol: String = { + if (members.isEmpty) + throw new IllegalStateException("Cannot select protocol for empty group") + + // select the protocol for this group which is supported by all members + val candidates = candidateProtocols + + // let each member vote for one of the protocols and choose the one with the most votes + val (protocol, _) = allMemberMetadata + .map(_.vote(candidates)) + .groupBy(identity) + .maxBy { case (_, votes) => votes.size } + + protocol + } + + private def incSupportedProtocols(member: MemberMetadata): Unit = { + member.supportedProtocols.foreach { case (protocol, _) => supportedProtocols(protocol) += 1 } + } + + private def decSupportedProtocols(member: MemberMetadata): Unit = { + member.supportedProtocols.foreach { case (protocol, _) => supportedProtocols(protocol) -= 1 } + } + + private def candidateProtocols: Set[String] = { + // get the set of protocols that are commonly supported by all members + val numMembers = members.size + supportedProtocols.filter(_._2 == numMembers).keys.toSet + } + + def supportsProtocols(memberProtocolType: String, memberProtocols: Set[String]): Boolean = { + if (is(Empty)) + memberProtocolType.nonEmpty && memberProtocols.nonEmpty + else + protocolType.contains(memberProtocolType) && memberProtocols.exists(supportedProtocols(_) == members.size) + } + + def getSubscribedTopics: Option[Set[String]] = subscribedTopics + + /** + * Returns true if the consumer group is actively subscribed to the topic. When the consumer + * group does not know, because the information is not available yet or because the it has + * failed to parse the Consumer Protocol, it returns true to be safe. + */ + def isSubscribedToTopic(topic: String): Boolean = subscribedTopics match { + case Some(topics) => topics.contains(topic) + case None => true + } + + /** + * Collects the set of topics that the members are subscribed to when the Protocol Type is equal + * to 'consumer'. None is returned if + * - the protocol type is not equal to 'consumer'; + * - the protocol is not defined yet; or + * - the protocol metadata does not comply with the schema. + */ + private[group] def computeSubscribedTopics(): Option[Set[String]] = { + protocolType match { + case Some(ConsumerProtocol.PROTOCOL_TYPE) if members.nonEmpty && protocolName.isDefined => + try { + Some( + members.map { case (_, member) => + // The consumer protocol is parsed with V0 which is the based prefix of all versions. + // This way the consumer group manager does not depend on any specific existing or + // future versions of the consumer protocol. VO must prefix all new versions. + val buffer = ByteBuffer.wrap(member.metadata(protocolName.get)) + ConsumerProtocol.deserializeVersion(buffer) + ConsumerProtocol.deserializeSubscription(buffer, 0).topics.asScala.toSet + }.reduceLeft(_ ++ _) + ) + } catch { + case e: SchemaException => + warn(s"Failed to parse Consumer Protocol ${ConsumerProtocol.PROTOCOL_TYPE}:${protocolName.get} " + + s"of group $groupId. Consumer group coordinator is not aware of the subscribed topics.", e) + None + } + + case Some(ConsumerProtocol.PROTOCOL_TYPE) if members.isEmpty => + Option(Set.empty) + + case _ => None + } + } + + def updateMember(member: MemberMetadata, + protocols: List[(String, Array[Byte])], + rebalanceTimeoutMs: Int, + sessionTimeoutMs: Int, + callback: JoinCallback): Unit = { + decSupportedProtocols(member) + member.supportedProtocols = protocols + incSupportedProtocols(member) + member.rebalanceTimeoutMs = rebalanceTimeoutMs + member.sessionTimeoutMs = sessionTimeoutMs + + if (callback != null && !member.isAwaitingJoin) { + numMembersAwaitingJoin += 1 + } else if (callback == null && member.isAwaitingJoin) { + numMembersAwaitingJoin -= 1 + } + member.awaitingJoinCallback = callback + } + + def maybeInvokeJoinCallback(member: MemberMetadata, + joinGroupResult: JoinGroupResult): Unit = { + if (member.isAwaitingJoin) { + try { + member.awaitingJoinCallback(joinGroupResult) + } catch { + case t: Throwable => + error(s"Failed to invoke join callback for $member due to ${t.getMessage}.", t) + member.awaitingJoinCallback(JoinGroupResult(member.memberId, Errors.UNKNOWN_SERVER_ERROR)) + } finally { + member.awaitingJoinCallback = null + numMembersAwaitingJoin -= 1 + } + } + } + + /** + * @return true if a sync callback actually performs. + */ + def maybeInvokeSyncCallback(member: MemberMetadata, + syncGroupResult: SyncGroupResult): Boolean = { + if (member.isAwaitingSync) { + try { + member.awaitingSyncCallback(syncGroupResult) + } catch { + case t: Throwable => + error(s"Failed to invoke sync callback for $member due to ${t.getMessage}.", t) + member.awaitingSyncCallback(SyncGroupResult(Errors.UNKNOWN_SERVER_ERROR)) + } finally { + member.awaitingSyncCallback = null + } + true + } else { + false + } + } + + def initNextGeneration(): Unit = { + if (members.nonEmpty) { + generationId += 1 + protocolName = Some(selectProtocol) + subscribedTopics = computeSubscribedTopics() + transitionTo(CompletingRebalance) + } else { + generationId += 1 + protocolName = None + subscribedTopics = computeSubscribedTopics() + transitionTo(Empty) + } + receivedConsumerOffsetCommits = false + receivedTransactionalOffsetCommits = false + clearPendingSyncMembers() + } + + def currentMemberMetadata: List[JoinGroupResponseMember] = { + if (is(Dead) || is(PreparingRebalance)) + throw new IllegalStateException("Cannot obtain member metadata for group in state %s".format(state)) + members.map{ case (memberId, memberMetadata) => new JoinGroupResponseMember() + .setMemberId(memberId) + .setGroupInstanceId(memberMetadata.groupInstanceId.orNull) + .setMetadata(memberMetadata.metadata(protocolName.get)) + }.toList + } + + def summary: GroupSummary = { + if (is(Stable)) { + val protocol = protocolName.orNull + if (protocol == null) + throw new IllegalStateException("Invalid null group protocol for stable group") + + val members = this.members.values.map { member => member.summary(protocol) } + GroupSummary(state.toString, protocolType.getOrElse(""), protocol, members.toList) + } else { + val members = this.members.values.map{ member => member.summaryNoMetadata() } + GroupSummary(state.toString, protocolType.getOrElse(""), GroupCoordinator.NoProtocol, members.toList) + } + } + + def overview: GroupOverview = { + GroupOverview(groupId, protocolType.getOrElse(""), state.toString, Group.GroupType.CLASSIC.toString) + } + + def initializeOffsets(offsets: collection.Map[TopicPartition, CommitRecordMetadataAndOffset], + pendingTxnOffsets: Map[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]): Unit = { + this.offsets ++= offsets + this.pendingTransactionalOffsetCommits ++= pendingTxnOffsets + } + + def onOffsetCommitAppend(topicIdPartition: TopicIdPartition, offsetWithCommitRecordMetadata: CommitRecordMetadataAndOffset): Unit = { + val topicPartition = topicIdPartition.topicPartition + if (pendingOffsetCommits.contains(topicPartition)) { + if (offsetWithCommitRecordMetadata.appendedBatchOffset.isEmpty) + throw new IllegalStateException("Cannot complete offset commit write without providing the metadata of the record " + + "in the log.") + if (!offsets.contains(topicPartition) || offsets(topicPartition).olderThan(offsetWithCommitRecordMetadata)) + offsets.put(topicPartition, offsetWithCommitRecordMetadata) + } + + pendingOffsetCommits.get(topicPartition) match { + case Some(stagedOffset) if offsetWithCommitRecordMetadata.offsetAndMetadata == stagedOffset => + pendingOffsetCommits.remove(topicPartition) + case _ => + // The pendingOffsetCommits for this partition could be empty if the topic was deleted, in which case + // its entries would be removed from the cache by the `removeOffsets` method. + } + } + + def failPendingOffsetWrite(topicIdPartition: TopicIdPartition, offset: OffsetAndMetadata): Unit = { + val topicPartition = topicIdPartition.topicPartition + pendingOffsetCommits.get(topicPartition) match { + case Some(pendingOffset) if offset == pendingOffset => pendingOffsetCommits.remove(topicPartition) + case _ => + } + } + + def prepareOffsetCommit(offsets: Map[TopicIdPartition, OffsetAndMetadata]): Unit = { + receivedConsumerOffsetCommits = true + offsets.foreachEntry { (topicIdPartition, offsetAndMetadata) => + pendingOffsetCommits += topicIdPartition.topicPartition -> offsetAndMetadata + } + } + + def prepareTxnOffsetCommit(producerId: Long, offsets: Map[TopicIdPartition, OffsetAndMetadata]): Unit = { + trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $offsets is pending") + receivedTransactionalOffsetCommits = true + val producerOffsets = pendingTransactionalOffsetCommits.getOrElseUpdate(producerId, + mutable.Map.empty[TopicPartition, CommitRecordMetadataAndOffset]) + + offsets.foreachEntry { (topicIdPartition, offsetAndMetadata) => + producerOffsets.put(topicIdPartition.topicPartition, CommitRecordMetadataAndOffset(None, offsetAndMetadata)) + } + } + + def hasReceivedConsistentOffsetCommits : Boolean = { + !receivedConsumerOffsetCommits || !receivedTransactionalOffsetCommits + } + + /* Remove a pending transactional offset commit if the actual offset commit record was not written to the log. + * We will return an error and the client will retry the request, potentially to a different coordinator. + */ + def failPendingTxnOffsetCommit(producerId: Long, topicIdPartition: TopicIdPartition): Unit = { + val topicPartition = topicIdPartition.topicPartition + pendingTransactionalOffsetCommits.get(producerId) match { + case Some(pendingOffsets) => + val pendingOffsetCommit = pendingOffsets.remove(topicPartition) + trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $pendingOffsetCommit failed " + + s"to be appended to the log") + if (pendingOffsets.isEmpty) + pendingTransactionalOffsetCommits.remove(producerId) + case _ => + // We may hit this case if the partition in question has emigrated already. + } + } + + def onTxnOffsetCommitAppend(producerId: Long, topicIdPartition: TopicIdPartition, + commitRecordMetadataAndOffset: CommitRecordMetadataAndOffset): Unit = { + val topicPartition = topicIdPartition.topicPartition + pendingTransactionalOffsetCommits.get(producerId) match { + case Some(pendingOffset) => + if (pendingOffset.contains(topicPartition) + && pendingOffset(topicPartition).offsetAndMetadata == commitRecordMetadataAndOffset.offsetAndMetadata) + pendingOffset.update(topicPartition, commitRecordMetadataAndOffset) + case _ => + // We may hit this case if the partition in question has emigrated. + } + } + + /* Complete a pending transactional offset commit. This is called after a commit or abort marker is fully written + * to the log. + */ + def completePendingTxnOffsetCommit(producerId: Long, isCommit: Boolean): Unit = { + val pendingOffsetsOpt = pendingTransactionalOffsetCommits.remove(producerId) + if (isCommit) { + pendingOffsetsOpt.foreach { pendingOffsets => + pendingOffsets.foreachEntry { (topicPartition, commitRecordMetadataAndOffset) => + if (commitRecordMetadataAndOffset.appendedBatchOffset.isEmpty) + throw new IllegalStateException(s"Trying to complete a transactional offset commit for producerId $producerId " + + s"and groupId $groupId even though the offset commit record itself hasn't been appended to the log.") + + val currentOffsetOpt = offsets.get(topicPartition) + if (currentOffsetOpt.forall(_.olderThan(commitRecordMetadataAndOffset))) { + trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offset $commitRecordMetadataAndOffset " + + "committed and loaded into the cache.") + offsets.put(topicPartition, commitRecordMetadataAndOffset) + } else { + trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offset $commitRecordMetadataAndOffset " + + s"committed, but not loaded since its offset is older than current offset $currentOffsetOpt.") + } + } + } + } else { + trace(s"TxnOffsetCommit for producer $producerId and group $groupId with offsets $pendingOffsetsOpt aborted") + } + } + + def activeProducers: collection.Set[Long] = pendingTransactionalOffsetCommits.keySet + + def hasPendingOffsetCommitsFromProducer(producerId: Long): Boolean = + pendingTransactionalOffsetCommits.contains(producerId) + + def hasPendingOffsetCommitsForTopicPartition(topicPartition: TopicPartition): Boolean = { + pendingOffsetCommits.contains(topicPartition) || + pendingTransactionalOffsetCommits.exists( + _._2.contains(topicPartition) + ) + } + + def removeAllOffsets(): immutable.Map[TopicPartition, OffsetAndMetadata] = removeOffsets(offsets.keySet.toSeq) + + def removeOffsets(topicPartitions: Seq[TopicPartition]): immutable.Map[TopicPartition, OffsetAndMetadata] = { + topicPartitions.flatMap { topicPartition => + pendingOffsetCommits.remove(topicPartition) + pendingTransactionalOffsetCommits.foreachEntry { (_, pendingOffsets) => + pendingOffsets.remove(topicPartition) + } + val removedOffset = offsets.remove(topicPartition) + removedOffset.map(topicPartition -> _.offsetAndMetadata) + }.toMap + } + + def removeExpiredOffsets(currentTimestamp: Long, offsetRetentionMs: Long): Map[TopicPartition, OffsetAndMetadata] = { + + def getExpiredOffsets(baseTimestamp: CommitRecordMetadataAndOffset => Long, + subscribedTopics: Set[String] = Set.empty): Map[TopicPartition, OffsetAndMetadata] = { + offsets.filter { + case (topicPartition, commitRecordMetadataAndOffset) => + !subscribedTopics.contains(topicPartition.topic()) && + !pendingOffsetCommits.contains(topicPartition) && { + if (commitRecordMetadataAndOffset.offsetAndMetadata.expireTimestampMs.isEmpty) { + // current version with no per partition retention + currentTimestamp - baseTimestamp(commitRecordMetadataAndOffset) >= offsetRetentionMs + } else { + // older versions with explicit expire_timestamp field => old expiration semantics is used + currentTimestamp >= commitRecordMetadataAndOffset.offsetAndMetadata.expireTimestampMs.getAsLong + } + } + }.map { + case (topicPartition, commitRecordOffsetAndMetadata) => + (topicPartition, commitRecordOffsetAndMetadata.offsetAndMetadata) + }.toMap + } + + val expiredOffsets: Map[TopicPartition, OffsetAndMetadata] = protocolType match { + case Some(_) if is(Empty) => + // no consumer exists in the group => + // - if current state timestamp exists and retention period has passed since group became Empty, + // expire all offsets with no pending offset commit; + // - if there is no current state timestamp (old group metadata schema) and retention period has passed + // since the last commit timestamp, expire the offset + getExpiredOffsets( + commitRecordMetadataAndOffset => currentStateTimestamp + .getOrElse(commitRecordMetadataAndOffset.offsetAndMetadata.commitTimestampMs) + ) + + case Some(ConsumerProtocol.PROTOCOL_TYPE) if subscribedTopics.isDefined && is(Stable) => + // consumers exist in the group and group is stable => + // - if the group is aware of the subscribed topics and retention period had passed since the + // the last commit timestamp, expire the offset. offset with pending offset commit are not + // expired + getExpiredOffsets( + _.offsetAndMetadata.commitTimestampMs, + subscribedTopics.get + ) + + case None => + // protocolType is None => standalone (simple) consumer, that uses Kafka for offset storage only + // expire offsets with no pending offset commit that retention period has passed since their last commit + getExpiredOffsets(_.offsetAndMetadata.commitTimestampMs) + + case _ => + Map() + } + + if (expiredOffsets.nonEmpty) + debug(s"Expired offsets from group '$groupId': ${expiredOffsets.keySet}") + + offsets --= expiredOffsets.keySet + expiredOffsets + } + + def allOffsets: Map[TopicPartition, OffsetAndMetadata] = offsets.map { case (topicPartition, commitRecordMetadataAndOffset) => + (topicPartition, commitRecordMetadataAndOffset.offsetAndMetadata) + }.toMap + + def offset(topicPartition: TopicPartition): Option[OffsetAndMetadata] = offsets.get(topicPartition).map(_.offsetAndMetadata) + + // visible for testing + private[group] def offsetWithRecordMetadata(topicPartition: TopicPartition): Option[CommitRecordMetadataAndOffset] = offsets.get(topicPartition) + + // Used for testing + private[group] def pendingOffsetCommit(topicIdPartition: TopicIdPartition): Option[OffsetAndMetadata] = { + pendingOffsetCommits.get(topicIdPartition.topicPartition) + } + + // Used for testing + private[group] def pendingTxnOffsetCommit(producerId: Long, topicIdPartition: TopicIdPartition): Option[CommitRecordMetadataAndOffset] = { + pendingTransactionalOffsetCommits.get(producerId).flatMap(_.get(topicIdPartition.topicPartition)) + } + + def numOffsets: Int = offsets.size + + def hasOffsets: Boolean = offsets.nonEmpty || pendingOffsetCommits.nonEmpty || pendingTransactionalOffsetCommits.nonEmpty + + private def assertValidTransition(targetState: GroupState): Unit = { + if (!targetState.validPreviousStates.contains(state)) + throw new IllegalStateException("Group %s should be in the %s states before moving to %s state. Instead it is in %s state" + .format(groupId, targetState.validPreviousStates.mkString(","), targetState, state)) + } + + def isInStates(states: collection.Set[String]): Boolean = { + states.contains(state.toLowerCaseString) + } + + override def toString: String = { + "GroupMetadata(" + + s"groupId=$groupId, " + + s"generation=$generationId, " + + s"protocolType=$protocolType, " + + s"currentState=$currentState, " + + s"members=$members)" + } + +} + diff --git a/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala b/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala new file mode 100644 index 0000000000000..97ce088d48f2a --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/GroupMetadataManager.scala @@ -0,0 +1,1282 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import java.nio.ByteBuffer +import java.util.{Optional, OptionalInt, OptionalLong} +import java.util.concurrent.atomic.AtomicBoolean +import java.util.concurrent.locks.ReentrantLock +import java.util.concurrent.{CompletableFuture, ConcurrentHashMap} +import java.util.function.Supplier +import com.yammer.metrics.core.Gauge +import kafka.cluster.Partition +import kafka.coordinator.group.GroupMetadataManager.maybeConvertOffsetCommitError +import kafka.server.ReplicaManager +import kafka.utils.CoreUtils.inLock +import kafka.utils._ +import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.metrics.{Metrics, Sensor} +import org.apache.kafka.common.metrics.stats.{Avg, Max, Meter} +import org.apache.kafka.common.protocol.{ByteBufferAccessor, Errors, MessageUtil} +import org.apache.kafka.common.record._ +import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData +import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse +import org.apache.kafka.common.requests.{OffsetCommitRequest, OffsetFetchResponse} +import org.apache.kafka.common.utils.{Time, Utils} +import org.apache.kafka.common.{TopicIdPartition, TopicPartition} +import org.apache.kafka.coordinator.group.{OffsetAndMetadata, OffsetConfig} +import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, OffsetCommitKey, OffsetCommitValue, GroupMetadataKey => GroupMetadataKeyData} +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.storage.log.FetchIsolation +import org.apache.kafka.server.util.KafkaScheduler +import org.apache.kafka.storage.internals.log.{AppendOrigin, VerificationGuard} + +import scala.collection._ +import scala.collection.mutable.ArrayBuffer +import scala.jdk.CollectionConverters._ + +class GroupMetadataManager(brokerId: Int, + config: OffsetConfig, + val replicaManager: ReplicaManager, + time: Time, + metrics: Metrics) extends Logging { + // Visible for test. + private[group] val metricsGroup: KafkaMetricsGroup = new KafkaMetricsGroup(this.getClass) + + private val compression: Compression = Compression.of(config.offsetsTopicCompressionType).build() + + private val groupMetadataCache = new Pool[String, GroupMetadata] + + /* lock protecting access to loading and owned partition sets */ + private val partitionLock = new ReentrantLock() + + /* partitions of consumer groups that are being loaded, its lock should be always called BEFORE the group lock if needed */ + private val loadingPartitions: mutable.Set[Int] = mutable.Set() + + /* partitions of consumer groups that are assigned, using the same loading partition lock */ + private val ownedPartitions: mutable.Set[Int] = mutable.Set() + + /* shutting down flag */ + private val shuttingDown = new AtomicBoolean(false) + + /* number of partitions for the consumer metadata topic */ + @volatile private var groupMetadataTopicPartitionCount: Int = _ + + /* single-thread scheduler to handle offset/group metadata cache loading and unloading */ + private val scheduler = new KafkaScheduler(1, true, "group-metadata-manager-") + + /* The groups with open transactional offsets commits per producer. We need this because when the commit or abort + * marker comes in for a transaction, it is for a particular partition on the offsets topic and a particular producerId. + * We use this structure to quickly find the groups which need to be updated by the commit/abort marker. */ + private val openGroupsForProducer = mutable.HashMap[Long, mutable.Set[String]]() + + /* Track the epoch in which we (un)loaded group state to detect racing LeaderAndIsr requests */ + private [group] val epochForPartitionId = new ConcurrentHashMap[Int, java.lang.Integer]() + + /* setup metrics*/ + private val partitionLoadSensor = metrics.sensor(GroupMetadataManager.LoadTimeSensor) + + partitionLoadSensor.add(metrics.metricName("partition-load-time-max", + GroupMetadataManager.MetricsGroup, + "The max time it took to load the partitions in the last 30sec"), new Max()) + partitionLoadSensor.add(metrics.metricName("partition-load-time-avg", + GroupMetadataManager.MetricsGroup, + "The avg time it took to load the partitions in the last 30sec"), new Avg()) + + val offsetCommitsSensor: Sensor = metrics.sensor(GroupMetadataManager.OffsetCommitsSensor) + + offsetCommitsSensor.add(new Meter( + metrics.metricName("offset-commit-rate", + "group-coordinator-metrics", + "The rate of committed offsets"), + metrics.metricName("offset-commit-count", + "group-coordinator-metrics", + "The total number of committed offsets"))) + + val offsetExpiredSensor: Sensor = metrics.sensor(GroupMetadataManager.OffsetExpiredSensor) + + offsetExpiredSensor.add(new Meter( + metrics.metricName("offset-expiration-rate", + "group-coordinator-metrics", + "The rate of expired offsets"), + metrics.metricName("offset-expiration-count", + "group-coordinator-metrics", + "The total number of expired offsets"))) + + this.logIdent = s"[GroupMetadataManager brokerId=$brokerId] " + + private def recreateGauge[T](name: String, metric: Supplier[T]): Gauge[T] = { + metricsGroup.removeMetric(name) + metricsGroup.newGauge(name, metric) + } + + recreateGauge("NumOffsets", + () => groupMetadataCache.values.map { group => + group.inLock { group.numOffsets } + }.sum + ) + + recreateGauge("NumGroups", + () => groupMetadataCache.size + ) + + recreateGauge("NumGroupsPreparingRebalance", + () => groupMetadataCache.values.count { group => + group synchronized { + group.is(PreparingRebalance) + } + }) + + recreateGauge("NumGroupsCompletingRebalance", + () => groupMetadataCache.values.count { group => + group synchronized { + group.is(CompletingRebalance) + } + }) + + recreateGauge("NumGroupsStable", + () => groupMetadataCache.values.count { group => + group synchronized { + group.is(Stable) + } + }) + + recreateGauge("NumGroupsDead", + () => groupMetadataCache.values.count { group => + group synchronized { + group.is(Dead) + } + }) + + recreateGauge("NumGroupsEmpty", + () => groupMetadataCache.values.count { group => + group synchronized { + group.is(Empty) + } + }) + + def startup(retrieveGroupMetadataTopicPartitionCount: () => Int, enableMetadataExpiration: Boolean): Unit = { + groupMetadataTopicPartitionCount = retrieveGroupMetadataTopicPartitionCount() + scheduler.startup() + if (enableMetadataExpiration) { + scheduler.schedule("delete-expired-group-metadata", + () => cleanupGroupMetadata(), + 0L, + config.offsetsRetentionCheckIntervalMs) + } + } + + def currentGroups: Iterable[GroupMetadata] = groupMetadataCache.values + + def isPartitionOwned(partition: Int): Boolean = inLock(partitionLock) { ownedPartitions.contains(partition) } + + def isPartitionLoading(partition: Int): Boolean = inLock(partitionLock) { loadingPartitions.contains(partition) } + + def partitionFor(groupId: String): Int = Utils.abs(groupId.hashCode) % groupMetadataTopicPartitionCount + + def isGroupLocal(groupId: String): Boolean = isPartitionOwned(partitionFor(groupId)) + + def isGroupLoading(groupId: String): Boolean = isPartitionLoading(partitionFor(groupId)) + + def isLoading: Boolean = inLock(partitionLock) { loadingPartitions.nonEmpty } + + // return true iff group is owned and the group doesn't exist + def groupNotExists(groupId: String): Boolean = inLock(partitionLock) { + isGroupLocal(groupId) && getGroup(groupId).forall { group => + group.inLock(group.is(Dead)) + } + } + + /** + * Get the group associated with the given groupId or null if not found + */ + def getGroup(groupId: String): Option[GroupMetadata] = { + Option(groupMetadataCache.get(groupId)) + } + + /** + * Get the group associated with the given groupId - the group is created if createIfNotExist + * is true - or null if not found + */ + def getOrMaybeCreateGroup(groupId: String, createIfNotExist: Boolean): Option[GroupMetadata] = { + if (createIfNotExist) + Option(groupMetadataCache.getAndMaybePut(groupId, new GroupMetadata(groupId, Empty, time))) + else + Option(groupMetadataCache.get(groupId)) + } + + /** + * Add a group or get the group associated with the given groupId if it already exists + */ + def addGroup(group: GroupMetadata): GroupMetadata = { + val currentGroup = groupMetadataCache.putIfNotExists(group.groupId, group) + if (currentGroup != null) { + currentGroup + } else { + group + } + } + + def storeGroup(group: GroupMetadata, + groupAssignment: Map[String, Array[Byte]], + responseCallback: Errors => Unit, + requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { + if (onlinePartition(partitionFor(group.groupId)).isDefined) { + // We always use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically. + val timestampType = TimestampType.CREATE_TIME + val timestamp = time.milliseconds() + val key = GroupMetadataManager.groupMetadataKey(group.groupId) + val value = GroupMetadataManager.groupMetadataValue(group, groupAssignment) + + val records = { + val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(RecordBatch.CURRENT_MAGIC_VALUE, compression.`type`(), + Seq(new SimpleRecord(timestamp, key, value)).asJava)) + val builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, compression, timestampType, 0L) + builder.append(timestamp, key, value) + builder.build() + } + + val groupMetadataPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partitionFor(group.groupId)) + val groupMetadataRecords = Map(groupMetadataPartition -> records) + val generationId = group.generationId + + // set the callback function to insert the created group into cache after log append completed + def putCacheCallback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { + // the append response should only contain the topics partition + if (responseStatus.size != 1 || !responseStatus.contains(groupMetadataPartition)) + throw new IllegalStateException("Append status %s should only have one partition %s" + .format(responseStatus, groupMetadataPartition)) + + // construct the error status in the propagated assignment response in the cache + val status = responseStatus(groupMetadataPartition) + + val responseError = if (status.error == Errors.NONE) { + Errors.NONE + } else { + debug(s"Metadata from group ${group.groupId} with generation $generationId failed when appending to log " + + s"due to ${status.error.exceptionName}") + + // transform the log append error code to the corresponding the commit status error code + status.error match { + case Errors.UNKNOWN_TOPIC_OR_PARTITION + | Errors.NOT_ENOUGH_REPLICAS + | Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND => + Errors.COORDINATOR_NOT_AVAILABLE + + case Errors.NOT_LEADER_OR_FOLLOWER + | Errors.KAFKA_STORAGE_ERROR => + Errors.NOT_COORDINATOR + + case Errors.REQUEST_TIMED_OUT => + Errors.REBALANCE_IN_PROGRESS + + case Errors.MESSAGE_TOO_LARGE + | Errors.RECORD_LIST_TOO_LARGE + | Errors.INVALID_FETCH_SIZE => + + error(s"Appending metadata message for group ${group.groupId} generation $generationId failed due to " + + s"${status.error.exceptionName}, returning UNKNOWN error code to the client") + + Errors.UNKNOWN_SERVER_ERROR + + case other => + error(s"Appending metadata message for group ${group.groupId} generation $generationId failed " + + s"due to unexpected error: ${status.error.exceptionName}") + + other + } + } + + responseCallback(responseError) + } + + appendForGroup(group, groupMetadataRecords, requestLocal, putCacheCallback) + } else { + responseCallback(Errors.NOT_COORDINATOR) + } + } + + // This method should be called under the group lock to ensure atomicity of the update to the the in-memory and persisted state. + private def appendForGroup( + group: GroupMetadata, + records: Map[TopicPartition, MemoryRecords], + requestLocal: RequestLocal, + callback: Map[TopicPartition, PartitionResponse] => Unit, + verificationGuards: Map[TopicPartition, VerificationGuard] = Map.empty + ): Unit = { + // call replica manager to append the group message + replicaManager.appendRecords( + timeout = config.offsetCommitTimeoutMs.toLong, + requiredAcks = -1, + internalTopicsAllowed = true, + origin = AppendOrigin.COORDINATOR, + entriesPerPartition = records, + delayedProduceLock = Some(group.lock), + responseCallback = callback, + requestLocal = requestLocal, + verificationGuards = verificationGuards + ) + } + + private def generateOffsetRecords(magicValue: Byte, + isTxnOffsetCommit: Boolean, + groupId: String, + offsetTopicPartition: TopicPartition, + filteredOffsetMetadata: Map[TopicIdPartition, OffsetAndMetadata], + producerId: Long, + producerEpoch: Short): Map[TopicPartition, MemoryRecords] = { + // We always use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically. + val timestampType = TimestampType.CREATE_TIME + val timestamp = time.milliseconds() + + val records = filteredOffsetMetadata.map { case (topicIdPartition, offsetAndMetadata) => + val key = GroupMetadataManager.offsetCommitKey(groupId, topicIdPartition.topicPartition) + val value = GroupMetadataManager.offsetCommitValue(offsetAndMetadata) + new SimpleRecord(timestamp, key, value) + } + val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(magicValue, compression.`type`(), records.asJava)) + + if (isTxnOffsetCommit && magicValue < RecordBatch.MAGIC_VALUE_V2) + throw Errors.UNSUPPORTED_FOR_MESSAGE_FORMAT.exception("Attempting to make a transaction offset commit with an invalid magic: " + magicValue) + + val builder = MemoryRecords.builder(buffer, magicValue, compression, timestampType, 0L, time.milliseconds(), + producerId, producerEpoch, 0, isTxnOffsetCommit, RecordBatch.NO_PARTITION_LEADER_EPOCH) + + records.foreach(builder.append) + Map(offsetTopicPartition -> builder.build()) + } + + private def createPutCacheCallback(isTxnOffsetCommit: Boolean, + group: GroupMetadata, + consumerId: String, + offsetMetadata: immutable.Map[TopicIdPartition, OffsetAndMetadata], + filteredOffsetMetadata: Map[TopicIdPartition, OffsetAndMetadata], + responseCallback: immutable.Map[TopicIdPartition, Errors] => Unit, + producerId: Long, + records: Map[TopicPartition, MemoryRecords]): Map[TopicPartition, PartitionResponse] => Unit = { + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partitionFor(group.groupId)) + // set the callback function to insert offsets into cache after log append completed + def putCacheCallback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { + // the append response should only contain the topics partition + if (responseStatus.size != 1 || !responseStatus.contains(offsetTopicPartition)) + throw new IllegalStateException("Append status %s should only have one partition %s" + .format(responseStatus, offsetTopicPartition)) + + // construct the commit response status and insert + // the offset and metadata to cache if the append status has no error + val status = responseStatus(offsetTopicPartition) + + val responseError = group.inLock { + if (status.error == Errors.NONE) { + if (!group.is(Dead)) { + filteredOffsetMetadata.foreachEntry { (topicIdPartition, offsetAndMetadata) => + if (isTxnOffsetCommit) + group.onTxnOffsetCommitAppend(producerId, topicIdPartition, CommitRecordMetadataAndOffset(Some(status.baseOffset), offsetAndMetadata)) + else + group.onOffsetCommitAppend(topicIdPartition, CommitRecordMetadataAndOffset(Some(status.baseOffset), offsetAndMetadata)) + } + } + + // Record the number of offsets committed to the log + offsetCommitsSensor.record(records.size) + + Errors.NONE + } else { + if (!group.is(Dead)) { + if (!group.hasPendingOffsetCommitsFromProducer(producerId)) + removeProducerGroup(producerId, group.groupId) + filteredOffsetMetadata.foreachEntry { (topicIdPartition, offsetAndMetadata) => + if (isTxnOffsetCommit) + group.failPendingTxnOffsetCommit(producerId, topicIdPartition) + else + group.failPendingOffsetWrite(topicIdPartition, offsetAndMetadata) + } + } + + debug(s"Offset commit $filteredOffsetMetadata from group ${group.groupId}, consumer $consumerId " + + s"with generation ${group.generationId} failed when appending to log due to ${status.error.exceptionName}") + + maybeConvertOffsetCommitError(status.error) + } + } + + // compute the final error codes for the commit response + val commitStatus = offsetMetadata.map { case (topicIdPartition, offsetAndMetadata) => + if (!validateOffsetMetadataLength(offsetAndMetadata.metadata)) + (topicIdPartition, Errors.OFFSET_METADATA_TOO_LARGE) + else + (topicIdPartition, responseError) + } + + // finally trigger the callback logic passed from the API layer + responseCallback(commitStatus) + } + putCacheCallback + } + + /** + * Store offsets by appending it to the replicated log and then inserting to cache + * + * This method should be called under the group lock to ensure validations and updates are all performed + * atomically. + */ + def storeOffsets(group: GroupMetadata, + consumerId: String, + offsetTopicPartition: TopicPartition, + offsetMetadata: immutable.Map[TopicIdPartition, OffsetAndMetadata], + responseCallback: immutable.Map[TopicIdPartition, Errors] => Unit, + producerId: Long = RecordBatch.NO_PRODUCER_ID, + producerEpoch: Short = RecordBatch.NO_PRODUCER_EPOCH, + requestLocal: RequestLocal = RequestLocal.noCaching, + verificationGuard: Option[VerificationGuard]): Unit = { + if (!group.hasReceivedConsistentOffsetCommits) + warn(s"group: ${group.groupId} with leader: ${group.leaderOrNull} has received offset commits from consumers as well " + + s"as transactional producers. Mixing both types of offset commits will generally result in surprises and " + + s"should be avoided.") + + val filteredOffsetMetadata = offsetMetadata.filter { case (_, offsetAndMetadata) => + validateOffsetMetadataLength(offsetAndMetadata.metadata) + } + if (filteredOffsetMetadata.isEmpty) { + // compute the final error codes for the commit response + val commitStatus = offsetMetadata.map { case (k, _) => k -> Errors.OFFSET_METADATA_TOO_LARGE } + responseCallback(commitStatus) + return + } + + if (onlinePartition(partitionFor(group.groupId)).isEmpty) { + val commitStatus = offsetMetadata.map { case (topicIdPartition, _) => + (topicIdPartition, Errors.NOT_COORDINATOR) + } + responseCallback(commitStatus) + return + } + + val isTxnOffsetCommit = producerId != RecordBatch.NO_PRODUCER_ID + val records = generateOffsetRecords(RecordBatch.CURRENT_MAGIC_VALUE, isTxnOffsetCommit, group.groupId, offsetTopicPartition, filteredOffsetMetadata, producerId, producerEpoch) + val putCacheCallback = createPutCacheCallback(isTxnOffsetCommit, group, consumerId, offsetMetadata, filteredOffsetMetadata, responseCallback, producerId, records) + + val verificationGuards = verificationGuard.map(guard => offsetTopicPartition -> guard).toMap + + if (isTxnOffsetCommit) { + addProducerGroup(producerId, group.groupId) + group.prepareTxnOffsetCommit(producerId, filteredOffsetMetadata) + } else { + group.prepareOffsetCommit(filteredOffsetMetadata) + } + + appendForGroup(group, records, requestLocal, putCacheCallback, verificationGuards) + } + + /** + * The most important guarantee that this API provides is that it should never return a stale offset. i.e., it either + * returns the current offset or it begins to sync the cache from the log (and returns an error code). + */ + def getOffsets(groupId: String, requireStable: Boolean, topicPartitionsOpt: Option[Seq[TopicPartition]]): Map[TopicPartition, PartitionData] = { + trace("Getting offsets of %s for group %s.".format(topicPartitionsOpt.getOrElse("all partitions"), groupId)) + val group = groupMetadataCache.get(groupId) + if (group == null) { + topicPartitionsOpt.getOrElse(Seq.empty[TopicPartition]).map { topicPartition => + val partitionData = new PartitionData(OffsetFetchResponse.INVALID_OFFSET, + Optional.empty(), "", Errors.NONE) + topicPartition -> partitionData + }.toMap + } else { + group.inLock { + if (group.is(Dead)) { + topicPartitionsOpt.getOrElse(Seq.empty[TopicPartition]).map { topicPartition => + val partitionData = new PartitionData(OffsetFetchResponse.INVALID_OFFSET, + Optional.empty(), "", Errors.NONE) + topicPartition -> partitionData + }.toMap + } else { + val topicPartitions = topicPartitionsOpt.getOrElse(group.allOffsets.keySet) + + topicPartitions.map { topicPartition => + if (requireStable && group.hasPendingOffsetCommitsForTopicPartition(topicPartition)) { + topicPartition -> new PartitionData(OffsetFetchResponse.INVALID_OFFSET, + Optional.empty(), "", Errors.UNSTABLE_OFFSET_COMMIT) + } else { + val partitionData = group.offset(topicPartition) match { + case None => + new PartitionData(OffsetFetchResponse.INVALID_OFFSET, + Optional.empty(), "", Errors.NONE) + case Some(offsetAndMetadata) => + val leaderEpoch: Optional[Integer] = if (offsetAndMetadata.leaderEpoch.isPresent) { + Optional.of(offsetAndMetadata.leaderEpoch.getAsInt) + } else { + Optional.empty() + } + new PartitionData(offsetAndMetadata.committedOffset, leaderEpoch, offsetAndMetadata.metadata, Errors.NONE) + } + topicPartition -> partitionData + } + }.toMap + } + } + } + } + + /** + * Asynchronously read the partition from the offsets topic and populate the cache + */ + def scheduleLoadGroupAndOffsets(offsetsPartition: Int, coordinatorEpoch: Int, onGroupLoaded: GroupMetadata => Unit): Unit = { + val topicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, offsetsPartition) + info(s"Scheduling loading of offsets and group metadata from $topicPartition for epoch $coordinatorEpoch") + val startTimeMs = time.milliseconds() + scheduler.scheduleOnce(topicPartition.toString, () => loadGroupsAndOffsets(topicPartition, coordinatorEpoch, onGroupLoaded, startTimeMs)) + } + + private[group] def loadGroupsAndOffsets( + topicPartition: TopicPartition, + coordinatorEpoch: Int, + onGroupLoaded: GroupMetadata => Unit, + startTimeMs: java.lang.Long + ): Unit = { + if (!maybeUpdateCoordinatorEpoch(topicPartition.partition, OptionalInt.of(coordinatorEpoch))) { + info(s"Not loading offsets and group metadata for $topicPartition " + + s"in epoch $coordinatorEpoch since current epoch is ${epochForPartitionId.get(topicPartition.partition)}") + } else if (!addLoadingPartition(topicPartition.partition)) { + info(s"Already loading offsets and group metadata from $topicPartition") + } else { + try { + val schedulerTimeMs = time.milliseconds() - startTimeMs + debug(s"Started loading offsets and group metadata from $topicPartition for epoch $coordinatorEpoch") + doLoadGroupsAndOffsets(topicPartition, onGroupLoaded) + val endTimeMs = time.milliseconds() + val totalLoadingTimeMs = endTimeMs - startTimeMs + partitionLoadSensor.record(totalLoadingTimeMs.toDouble, endTimeMs, false) + info(s"Finished loading offsets and group metadata from $topicPartition " + + s"in $totalLoadingTimeMs milliseconds for epoch $coordinatorEpoch, of which " + + s"$schedulerTimeMs milliseconds was spent in the scheduler.") + } catch { + case t: Throwable => error(s"Error loading offsets from $topicPartition", t) + } finally { + inLock(partitionLock) { + ownedPartitions.add(topicPartition.partition) + loadingPartitions.remove(topicPartition.partition) + } + } + } + } + + private def doLoadGroupsAndOffsets(topicPartition: TopicPartition, onGroupLoaded: GroupMetadata => Unit): Unit = { + def logEndOffset: Long = replicaManager.getLogEndOffset(topicPartition).getOrElse(-1L) + + replicaManager.getLog(topicPartition) match { + case None => + warn(s"Attempted to load offsets and group metadata from $topicPartition, but found no log") + + case Some(log) => + val loadedOffsets = mutable.Map[GroupTopicPartition, CommitRecordMetadataAndOffset]() + val pendingOffsets = mutable.Map[Long, mutable.Map[GroupTopicPartition, CommitRecordMetadataAndOffset]]() + val loadedGroups = mutable.Map[String, GroupMetadata]() + val removedGroups = mutable.Set[String]() + + // buffer may not be needed if records are read from memory + var buffer = ByteBuffer.allocate(0) + + // loop breaks if leader changes at any time during the load, since logEndOffset is -1 + var currOffset = log.logStartOffset + + // loop breaks if no records have been read, since the end of the log has been reached + var readAtLeastOneRecord = true + + while (currOffset < logEndOffset && readAtLeastOneRecord && !shuttingDown.get()) { + val fetchDataInfo = log.read(currOffset, + maxLength = config.loadBufferSize, + isolation = FetchIsolation.LOG_END, + minOneMessage = true) + + readAtLeastOneRecord = fetchDataInfo.records.sizeInBytes > 0 + + val memRecords = (fetchDataInfo.records: @unchecked) match { + case records: MemoryRecords => records + case fileRecords: FileRecords => + val sizeInBytes = fileRecords.sizeInBytes + val bytesNeeded = Math.max(config.loadBufferSize, sizeInBytes) + + // minOneMessage = true in the above log.read means that the buffer may need to be grown to ensure progress can be made + if (buffer.capacity < bytesNeeded) { + if (config.loadBufferSize < bytesNeeded) + warn(s"Loaded offsets and group metadata from $topicPartition with buffer larger ($bytesNeeded bytes) than " + + s"configured offsets.load.buffer.size (${config.loadBufferSize} bytes)") + + buffer = ByteBuffer.allocate(bytesNeeded) + } else { + buffer.clear() + } + + fileRecords.readInto(buffer, 0) + MemoryRecords.readableRecords(buffer) + } + + memRecords.batches.forEach { batch => + val isTxnOffsetCommit = batch.isTransactional + if (batch.isControlBatch) { + val recordIterator = batch.iterator + if (recordIterator.hasNext) { + val record = recordIterator.next() + val controlRecord = ControlRecordType.parse(record.key) + if (controlRecord == ControlRecordType.COMMIT) { + pendingOffsets.getOrElse(batch.producerId, mutable.Map[GroupTopicPartition, CommitRecordMetadataAndOffset]()) + .foreach { + case (groupTopicPartition, commitRecordMetadataAndOffset) => + if (!loadedOffsets.contains(groupTopicPartition) || loadedOffsets(groupTopicPartition).olderThan(commitRecordMetadataAndOffset)) + loadedOffsets.put(groupTopicPartition, commitRecordMetadataAndOffset) + } + } + pendingOffsets.remove(batch.producerId) + } + } else { + var batchBaseOffset: Option[Long] = None + for (record <- batch.asScala) { + require(record.hasKey, "Group metadata/offset entry key should not be null") + if (batchBaseOffset.isEmpty) + batchBaseOffset = Some(record.offset) + GroupMetadataManager.readMessageKey(record.key) match { + case offsetKey: OffsetKey => + if (isTxnOffsetCommit && !pendingOffsets.contains(batch.producerId)) + pendingOffsets.put(batch.producerId, mutable.Map[GroupTopicPartition, CommitRecordMetadataAndOffset]()) + + // load offset + val groupTopicPartition = offsetKey.key + if (!record.hasValue) { + if (isTxnOffsetCommit) + pendingOffsets(batch.producerId).remove(groupTopicPartition) + else + loadedOffsets.remove(groupTopicPartition) + } else { + val offsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(record.value) + if (isTxnOffsetCommit) + pendingOffsets(batch.producerId).put(groupTopicPartition, CommitRecordMetadataAndOffset(batchBaseOffset, offsetAndMetadata)) + else + loadedOffsets.put(groupTopicPartition, CommitRecordMetadataAndOffset(batchBaseOffset, offsetAndMetadata)) + } + + case groupMetadataKey: GroupMetadataKey => + // load group metadata + val groupId = groupMetadataKey.key + val groupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, record.value, time) + if (groupMetadata != null) { + removedGroups.remove(groupId) + loadedGroups.put(groupId, groupMetadata) + } else { + loadedGroups.remove(groupId) + removedGroups.add(groupId) + } + + case unknownKey: UnknownKey => + warn(s"Unknown message key with version ${unknownKey.version}" + + s" while loading offsets and group metadata from $topicPartition. Ignoring it. " + + "It could be a left over from an aborted upgrade.") + } + } + } + currOffset = batch.nextOffset + } + } + + val (groupOffsets, emptyGroupOffsets) = loadedOffsets + .groupBy(_._1.group) + .map { case (k, v) => + k -> v.map { case (groupTopicPartition, offset) => (groupTopicPartition.topicPartition, offset) } + }.partition { case (group, _) => loadedGroups.contains(group) } + + val pendingOffsetsByGroup = mutable.Map[String, mutable.Map[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]]() + pendingOffsets.foreachEntry { (producerId, producerOffsets) => + producerOffsets.keySet.map(_.group).foreach(addProducerGroup(producerId, _)) + producerOffsets + .groupBy(_._1.group) + .foreachEntry { (group, offsets) => + val groupPendingOffsets = pendingOffsetsByGroup.getOrElseUpdate(group, mutable.Map.empty[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]) + val groupProducerOffsets = groupPendingOffsets.getOrElseUpdate(producerId, mutable.Map.empty[TopicPartition, CommitRecordMetadataAndOffset]) + groupProducerOffsets ++= offsets.map { case (groupTopicPartition, offset) => + (groupTopicPartition.topicPartition, offset) + } + } + } + + val (pendingGroupOffsets, pendingEmptyGroupOffsets) = pendingOffsetsByGroup + .partition { case (group, _) => loadedGroups.contains(group)} + + loadedGroups.values.foreach { group => + val offsets = groupOffsets.getOrElse(group.groupId, Map.empty[TopicPartition, CommitRecordMetadataAndOffset]) + val pendingOffsets = pendingGroupOffsets.getOrElse(group.groupId, Map.empty[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]) + debug(s"Loaded group metadata $group with offsets $offsets and pending offsets $pendingOffsets") + loadGroup(group, offsets, pendingOffsets) + onGroupLoaded(group) + } + + // load groups which store offsets in kafka, but which have no active members and thus no group + // metadata stored in the log + (emptyGroupOffsets.keySet ++ pendingEmptyGroupOffsets.keySet).foreach { groupId => + val group = new GroupMetadata(groupId, Empty, time) + val offsets = emptyGroupOffsets.getOrElse(groupId, Map.empty[TopicPartition, CommitRecordMetadataAndOffset]) + val pendingOffsets = pendingEmptyGroupOffsets.getOrElse(groupId, Map.empty[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]) + debug(s"Loaded group metadata $group with offsets $offsets and pending offsets $pendingOffsets") + loadGroup(group, offsets, pendingOffsets) + onGroupLoaded(group) + } + + removedGroups.foreach { groupId => + // if the cache already contains a group which should be removed, raise an error. Note that it + // is possible (however unlikely) for a consumer group to be removed, and then to be used only for + // offset storage (i.e. by "simple" consumers) + if (groupMetadataCache.contains(groupId) && !emptyGroupOffsets.contains(groupId)) + throw new IllegalStateException(s"Unexpected unload of active group $groupId while " + + s"loading partition $topicPartition") + } + } + } + + private def loadGroup(group: GroupMetadata, offsets: Map[TopicPartition, CommitRecordMetadataAndOffset], + pendingTransactionalOffsets: Map[Long, mutable.Map[TopicPartition, CommitRecordMetadataAndOffset]]): Unit = { + // offsets are initialized prior to loading the group into the cache to ensure that clients see a consistent + // view of the group's offsets + trace(s"Initialized offsets $offsets for group ${group.groupId}") + group.initializeOffsets(offsets, pendingTransactionalOffsets.toMap) + + val currentGroup = addGroup(group) + if (group != currentGroup) + debug(s"Attempt to load group ${group.groupId} from log with generation ${group.generationId} failed " + + s"because there is already a cached group with generation ${currentGroup.generationId}") + } + + /** + * When this broker becomes a follower for an offsets topic partition clear out the cache for groups that belong to + * that partition. + * + * @param offsetsPartition Groups belonging to this partition of the offsets topic will be deleted from the cache. + */ + def removeGroupsForPartition(offsetsPartition: Int, + coordinatorEpoch: OptionalInt, + onGroupUnloaded: GroupMetadata => Unit): Unit = { + val topicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, offsetsPartition) + info(s"Scheduling unloading of offsets and group metadata from $topicPartition") + scheduler.scheduleOnce(topicPartition.toString, () => removeGroupsAndOffsets(topicPartition, coordinatorEpoch, onGroupUnloaded)) + } + + private [group] def removeGroupsAndOffsets(topicPartition: TopicPartition, + coordinatorEpoch: OptionalInt, + onGroupUnloaded: GroupMetadata => Unit): Unit = { + val offsetsPartition = topicPartition.partition + if (maybeUpdateCoordinatorEpoch(offsetsPartition, coordinatorEpoch)) { + var numOffsetsRemoved = 0 + var numGroupsRemoved = 0 + + debug(s"Started unloading offsets and group metadata for $topicPartition for " + + s"coordinator epoch $coordinatorEpoch") + inLock(partitionLock) { + // we need to guard the group removal in cache in the loading partition lock + // to prevent coordinator's check-and-get-group race condition + ownedPartitions.remove(offsetsPartition) + loadingPartitions.remove(offsetsPartition) + + for (group <- groupMetadataCache.values) { + if (partitionFor(group.groupId) == offsetsPartition) { + onGroupUnloaded(group) + groupMetadataCache.remove(group.groupId, group) + removeGroupFromAllProducers(group.groupId) + numGroupsRemoved += 1 + numOffsetsRemoved += group.numOffsets + } + } + } + info(s"Finished unloading $topicPartition for coordinator epoch $coordinatorEpoch. " + + s"Removed $numOffsetsRemoved cached offsets and $numGroupsRemoved cached groups.") + } else { + info(s"Not removing offsets and group metadata for $topicPartition " + + s"in epoch $coordinatorEpoch since current epoch is ${epochForPartitionId.get(topicPartition.partition)}") + } + } + + /** + * Update the cached coordinator epoch if the new value is larger than the old value. + * @return true if `epochOpt` is either empty or contains a value greater than or equal to the current epoch + */ + private def maybeUpdateCoordinatorEpoch( + partitionId: Int, + epochOpt: OptionalInt + ): Boolean = { + val updatedEpoch = epochForPartitionId.compute(partitionId, (_, currentEpoch) => { + if (currentEpoch == null) { + if (epochOpt.isPresent) epochOpt.getAsInt + else null + } else { + if (epochOpt.isPresent && epochOpt.getAsInt > currentEpoch) epochOpt.getAsInt + else currentEpoch + } + }) + if (epochOpt.isPresent) { + epochOpt.getAsInt == updatedEpoch + } else { + true + } + } + + // visible for testing + private[group] def cleanupGroupMetadata(): Unit = { + val currentTimestamp = time.milliseconds() + val numOffsetsRemoved = cleanupGroupMetadata(groupMetadataCache.values, RequestLocal.noCaching, + _.removeExpiredOffsets(currentTimestamp, config.offsetsRetentionMs)) + offsetExpiredSensor.record(numOffsetsRemoved) + if (numOffsetsRemoved > 0) + info(s"Removed $numOffsetsRemoved expired offsets in ${time.milliseconds() - currentTimestamp} milliseconds.") + } + + /** + * This function is used to clean up group offsets given the groups and also a function that performs the offset deletion. + * @param groups Groups whose metadata are to be cleaned up + * @param selector A function that implements deletion of (all or part of) group offsets. This function is called while + * a group lock is held, therefore there is no need for the caller to also obtain a group lock. + * @return The cumulative number of offsets removed + */ + def cleanupGroupMetadata(groups: Iterable[GroupMetadata], requestLocal: RequestLocal, + selector: GroupMetadata => Map[TopicPartition, OffsetAndMetadata]): Int = { + var offsetsRemoved = 0 + + groups.foreach { group => + val groupId = group.groupId + val (removedOffsets, groupIsDead, generation) = group.inLock { + val removedOffsets = selector(group) + if (group.is(Empty) && !group.hasOffsets) { + info(s"Group $groupId transitioned to Dead in generation ${group.generationId}") + group.transitionTo(Dead) + } + (removedOffsets, group.is(Dead), group.generationId) + } + + val offsetsPartition = partitionFor(groupId) + val appendPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, offsetsPartition) + onlinePartition(offsetsPartition) match { + case Some(partition) => + // We always use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically. + val timestampType = TimestampType.CREATE_TIME + val timestamp = time.milliseconds() + + val tombstones = ArrayBuffer.empty[SimpleRecord] + removedOffsets.foreachEntry { (topicPartition, offsetAndMetadata) => + trace(s"Removing expired/deleted offset and metadata for $groupId, $topicPartition: $offsetAndMetadata") + val commitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition) + tombstones += new SimpleRecord(timestamp, commitKey, null) + } + trace(s"Marked ${removedOffsets.size} offsets in $appendPartition for deletion.") + + // We avoid writing the tombstone when the generationId is 0, since this group is only using + // Kafka for offset storage. + if (groupIsDead && groupMetadataCache.remove(groupId, group) && generation > 0) { + // Append the tombstone messages to the partition. It is okay if the replicas don't receive these (say, + // if we crash or leaders move) since the new leaders will still expire the consumers with heartbeat and + // retry removing this group. + val groupMetadataKey = GroupMetadataManager.groupMetadataKey(group.groupId) + tombstones += new SimpleRecord(timestamp, groupMetadataKey, null) + trace(s"Group $groupId removed from the metadata cache and marked for deletion in $appendPartition.") + } + + if (tombstones.nonEmpty) { + try { + // do not need to require acks since even if the tombstone is lost, + // it will be appended again in the next purge cycle + val records = MemoryRecords.withRecords(RecordBatch.CURRENT_MAGIC_VALUE, 0L, compression, timestampType, tombstones.toArray: _*) + partition.appendRecordsToLeader(records, origin = AppendOrigin.COORDINATOR, requiredAcks = 0, + requestLocal = requestLocal) + + offsetsRemoved += removedOffsets.size + trace(s"Successfully appended ${tombstones.size} tombstones to $appendPartition for expired/deleted " + + s"offsets and/or metadata for group $groupId") + } catch { + case t: Throwable => + error(s"Failed to append ${tombstones.size} tombstones to $appendPartition for expired/deleted " + + s"offsets and/or metadata for group $groupId.", t) + // ignore and continue + } + } + + + case None => + info(s"BrokerId $brokerId is no longer a coordinator for the group $groupId. Proceeding cleanup for other alive groups") + } + } + + offsetsRemoved + } + + /** + * Complete pending transactional offset commits of the groups of `producerId` from the provided + * `completedPartitions`. This method is invoked when a commit or abort marker is fully written + * to the log. It may be invoked when a group lock is held by the caller, for instance when delayed + * operations are completed while appending offsets for a group. Since we need to acquire one or + * more group metadata locks to handle transaction completion, this operation is scheduled on + * the scheduler thread to avoid deadlocks. + */ + def scheduleHandleTxnCompletion(producerId: Long, completedPartitions: Set[Int], isCommit: Boolean): CompletableFuture[Void] = { + val future = new CompletableFuture[Void]() + scheduler.scheduleOnce(s"handleTxnCompletion-$producerId", () => { + try { + handleTxnCompletion(producerId, completedPartitions, isCommit) + future.complete(null) + } catch { + case e: Throwable => future.completeExceptionally(e) + } + }) + future + } + + private[group] def handleTxnCompletion(producerId: Long, completedPartitions: Set[Int], isCommit: Boolean): Unit = { + val pendingGroups = groupsBelongingToPartitions(producerId, completedPartitions) + pendingGroups.foreach { groupId => + getGroup(groupId) match { + case Some(group) => group.inLock { + if (!group.is(Dead)) { + group.completePendingTxnOffsetCommit(producerId, isCommit) + removeProducerGroup(producerId, groupId) + } + } + case _ => + info(s"Group $groupId has moved away from $brokerId after transaction marker was written but before the " + + s"cache was updated. The cache on the new group owner will be updated instead.") + } + } + } + + private def addProducerGroup(producerId: Long, groupId: String) = openGroupsForProducer synchronized { + openGroupsForProducer.getOrElseUpdate(producerId, mutable.Set.empty[String]).add(groupId) + } + + private def removeProducerGroup(producerId: Long, groupId: String) = openGroupsForProducer synchronized { + openGroupsForProducer.getOrElseUpdate(producerId, mutable.Set.empty[String]).remove(groupId) + if (openGroupsForProducer(producerId).isEmpty) + openGroupsForProducer.remove(producerId) + } + + private def groupsBelongingToPartitions(producerId: Long, partitions: Set[Int]) = openGroupsForProducer synchronized { + val (ownedGroups, _) = openGroupsForProducer.getOrElse(producerId, mutable.Set.empty[String]) + .partition(group => partitions.contains(partitionFor(group))) + ownedGroups + } + + private def removeGroupFromAllProducers(groupId: String): Unit = openGroupsForProducer synchronized { + openGroupsForProducer.foreachEntry { (_, groups) => + groups.remove(groupId) + } + } + + /* + * Check if the offset metadata length is valid + */ + private def validateOffsetMetadataLength(metadata: String) : Boolean = { + metadata == null || metadata.length() <= config.maxMetadataSize + } + + + def shutdown(): Unit = { + shuttingDown.set(true) + scheduler.shutdown() + metrics.removeSensor(GroupMetadataManager.LoadTimeSensor) + metrics.removeSensor(GroupMetadataManager.OffsetCommitsSensor) + metrics.removeSensor(GroupMetadataManager.OffsetExpiredSensor) + + // TODO: clear the caches + } + + private def onlinePartition(partition: Int): Option[Partition] = + replicaManager.onlinePartition(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partition)) + + /** + * Add a partition to the owned partition set. + * + * NOTE: this is for test only. + */ + private[group] def addOwnedPartition(partition: Int): Unit = { + inLock(partitionLock) { + ownedPartitions.add(partition) + } + } + + /** + * Add a partition to the loading partitions set. Return true if the partition was not + * already loading. + * + * Visible for testing + */ + private[group] def addLoadingPartition(partition: Int): Boolean = { + inLock(partitionLock) { + if (ownedPartitions.contains(partition)) { + false + } else { + loadingPartitions.add(partition) + } + } + } + +} + +/** + * Messages stored for the group topic has versions for both the key and value fields. Key + * version is used to indicate the type of the message (also to differentiate different types + * of messages from being compacted together if they have the same field values); and value + * version is used to evolve the messages within their data types: + * + * key version 0: group consumption offset + * -> value version 0: [offset, metadata, timestamp] + * + * key version 1: group consumption offset + * -> value version 1: [offset, metadata, commit_timestamp, expire_timestamp] + * + * key version 2: group metadata + * -> value version 0: [protocol_type, generation, protocol, leader, members] + */ +object GroupMetadataManager { + // Metrics names + val MetricsGroup: String = "group-coordinator-metrics" + val LoadTimeSensor: String = "GroupPartitionLoadTime" + val OffsetCommitsSensor: String = "OffsetCommits" + val OffsetExpiredSensor: String = "OffsetExpired" + + /** + * Generates the key for offset commit message for given (group, topic, partition) + * + * @param groupId the ID of the group to generate the key + * @param topicPartition the TopicPartition to generate the key + * @return key for offset commit message + */ + def offsetCommitKey(groupId: String, topicPartition: TopicPartition): Array[Byte] = { + MessageUtil.toVersionPrefixedBytes(OffsetCommitKey.HIGHEST_SUPPORTED_VERSION, + new OffsetCommitKey() + .setGroup(groupId) + .setTopic(topicPartition.topic) + .setPartition(topicPartition.partition)) + } + + /** + * Generates the key for group metadata message for given group + * + * @param groupId the ID of the group to generate the key + * @return key bytes for group metadata message + */ + def groupMetadataKey(groupId: String): Array[Byte] = { + MessageUtil.toVersionPrefixedBytes(GroupMetadataKeyData.HIGHEST_SUPPORTED_VERSION, + new GroupMetadataKeyData() + .setGroup(groupId)) + } + + /** + * Generates the payload for offset commit message from given offset and metadata + * + * @param offsetAndMetadata consumer's current offset and metadata + * @param maxVersion the highest version allowed, we may use a lower version for compatibility reasons + * we serialize with the highest supported non-flexible version until a tagged field is introduced + * or the version is bumped. + * @return payload for offset commit message + */ + def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata, maxVersion: Short = 3): Array[Byte] = { + val version = + if (offsetAndMetadata.expireTimestampMs.isPresent) Math.min(1, maxVersion).toShort + else maxVersion + MessageUtil.toVersionPrefixedBytes(version, new OffsetCommitValue() + .setOffset(offsetAndMetadata.committedOffset) + .setMetadata(offsetAndMetadata.metadata) + .setCommitTimestamp(offsetAndMetadata.commitTimestampMs) + .setLeaderEpoch(offsetAndMetadata.leaderEpoch.orElse(RecordBatch.NO_PARTITION_LEADER_EPOCH)) + // version 1 has a non empty expireTimestamp field + .setExpireTimestamp(offsetAndMetadata.expireTimestampMs.orElse(OffsetCommitRequest.DEFAULT_TIMESTAMP)) + ) + } + + /** + * Generates the payload for group metadata message from given offset and metadata + * assuming the generation id, selected protocol, leader and member assignment are all available + * + * @param groupMetadata current group metadata + * @param assignment the assignment for the rebalancing generation + * @param version the version to serialize it with, the default is `3`, the highest supported non-flexible version + * until a tagged field is introduced or the version is bumped. The default should always be used + * outside of tests + * @return payload for offset commit message + */ + def groupMetadataValue(groupMetadata: GroupMetadata, + assignment: Map[String, Array[Byte]], + version: Short = 3): Array[Byte] = { + MessageUtil.toVersionPrefixedBytes(version, new GroupMetadataValue() + .setProtocolType(groupMetadata.protocolType.getOrElse("")) + .setGeneration(groupMetadata.generationId) + .setProtocol(groupMetadata.protocolName.orNull) + .setLeader(groupMetadata.leaderOrNull) + .setCurrentStateTimestamp(groupMetadata.currentStateTimestampOrDefault) + .setMembers(groupMetadata.allMemberMetadata.map { memberMetadata => + new GroupMetadataValue.MemberMetadata() + .setMemberId(memberMetadata.memberId) + .setClientId(memberMetadata.clientId) + .setClientHost(memberMetadata.clientHost) + .setSessionTimeout(memberMetadata.sessionTimeoutMs) + .setRebalanceTimeout(memberMetadata.rebalanceTimeoutMs) + .setGroupInstanceId(memberMetadata.groupInstanceId.orNull) + // The group is non-empty, so the current protocol must be defined + .setSubscription(groupMetadata.protocolName.map(memberMetadata.metadata) + .getOrElse(throw new IllegalStateException("Attempted to write non-empty group metadata with no defined protocol."))) + .setAssignment(assignment.getOrElse(memberMetadata.memberId, + throw new IllegalStateException(s"Attempted to write member ${memberMetadata.memberId} of group ${groupMetadata.groupId} with no assignment."))) + }.asJava)) + } + + /** + * Decodes the offset messages' key + * + * @param buffer input byte-buffer + * @return an OffsetKey or GroupMetadataKey object from the message + */ + def readMessageKey(buffer: ByteBuffer): BaseKey = { + val version = buffer.getShort + if (version >= OffsetCommitKey.LOWEST_SUPPORTED_VERSION && version <= OffsetCommitKey.HIGHEST_SUPPORTED_VERSION) { + // version 0 and 1 refer to offset + val key = new OffsetCommitKey(new ByteBufferAccessor(buffer), version) + OffsetKey(version, GroupTopicPartition(key.group, new TopicPartition(key.topic, key.partition))) + } else if (version >= GroupMetadataKeyData.LOWEST_SUPPORTED_VERSION && version <= GroupMetadataKeyData.HIGHEST_SUPPORTED_VERSION) { + // version 2 refers to group metadata + val key = new GroupMetadataKeyData(new ByteBufferAccessor(buffer), version) + GroupMetadataKey(version, key.group) + } else { + UnknownKey(version) + } + } + + /** + * Decodes the offset messages' payload and retrieves offset and metadata from it + * + * @param buffer input byte-buffer + * @return an offset-metadata object from the message + */ + def readOffsetMessageValue(buffer: ByteBuffer): OffsetAndMetadata = { + // tombstone + if (buffer == null) null + else { + val version = buffer.getShort + if (version >= OffsetCommitValue.LOWEST_SUPPORTED_VERSION && version <= OffsetCommitValue.HIGHEST_SUPPORTED_VERSION) { + val value = new OffsetCommitValue(new ByteBufferAccessor(buffer), version) + new OffsetAndMetadata( + value.offset, + if (value.leaderEpoch == RecordBatch.NO_PARTITION_LEADER_EPOCH) OptionalInt.empty() else OptionalInt.of(value.leaderEpoch), + value.metadata, + value.commitTimestamp, + if (value.expireTimestamp == OffsetCommitRequest.DEFAULT_TIMESTAMP) OptionalLong.empty() else OptionalLong.of(value.expireTimestamp)) + } else throw new IllegalStateException(s"Unknown offset message version: $version") + } + } + + /** + * Decodes the group metadata messages' payload and retrieves its member metadata from it + * + * @param groupId The ID of the group to be read + * @param buffer input byte-buffer + * @param time the time instance to use + * @return a group metadata object from the message + */ + def readGroupMessageValue(groupId: String, buffer: ByteBuffer, time: Time): GroupMetadata = { + // tombstone + if (buffer == null) null + else { + val version = buffer.getShort + if (version >= GroupMetadataValue.LOWEST_SUPPORTED_VERSION && version <= GroupMetadataValue.HIGHEST_SUPPORTED_VERSION) { + val value = new GroupMetadataValue(new ByteBufferAccessor(buffer), version) + val members = value.members.asScala.map { memberMetadata => + new MemberMetadata( + memberId = memberMetadata.memberId, + groupInstanceId = Option(memberMetadata.groupInstanceId), + clientId = memberMetadata.clientId, + clientHost = memberMetadata.clientHost, + rebalanceTimeoutMs = if (version == 0) memberMetadata.sessionTimeout else memberMetadata.rebalanceTimeout, + sessionTimeoutMs = memberMetadata.sessionTimeout, + protocolType = value.protocolType, + supportedProtocols = List((value.protocol, memberMetadata.subscription)), + assignment = memberMetadata.assignment) + } + GroupMetadata.loadGroup( + groupId = groupId, + initialState = if (members.isEmpty) Empty else Stable, + generationId = value.generation, + protocolType = value.protocolType, + protocolName = value.protocol, + leaderId = value.leader, + currentStateTimestamp = if (value.currentStateTimestamp == -1) None else Some(value.currentStateTimestamp), + members = members, + time = time) + } else throw new IllegalStateException(s"Unknown group metadata message version: $version") + } + } + + def maybeConvertOffsetCommitError(error: Errors) : Errors = { + error match { + case Errors.NETWORK_EXCEPTION => + // When committing offsets transactionally, we now verify the transaction with the + // transaction coordinator. Verification can fail with `NETWORK_EXCEPTION`, a retriable + // error which older clients may not expect and retry correctly. We translate the error to + // `COORDINATOR_LOAD_IN_PROGRESS` because it causes clients to retry the request without an + // unnecessary coordinator lookup. + Errors.COORDINATOR_LOAD_IN_PROGRESS + + case Errors.UNKNOWN_TOPIC_OR_PARTITION + | Errors.NOT_ENOUGH_REPLICAS + | Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND => + Errors.COORDINATOR_NOT_AVAILABLE + + case Errors.NOT_LEADER_OR_FOLLOWER + | Errors.KAFKA_STORAGE_ERROR => + Errors.NOT_COORDINATOR + + case Errors.MESSAGE_TOO_LARGE + | Errors.RECORD_LIST_TOO_LARGE + | Errors.INVALID_FETCH_SIZE => + Errors.INVALID_COMMIT_OFFSET_SIZE + + // We may see INVALID_TXN_STATE or INVALID_PID_MAPPING here due to transaction verification. + // They can be returned without mapping to a new error. + case other => other + } + } + +} + +case class GroupTopicPartition(group: String, topicPartition: TopicPartition) { + + def this(group: String, topic: String, partition: Int) = + this(group, new TopicPartition(topic, partition)) + + override def toString: String = + "[%s,%s,%d]".format(group, topicPartition.topic, topicPartition.partition) +} + +sealed trait BaseKey{ + def version: Short + def key: Any +} + +case class OffsetKey(version: Short, key: GroupTopicPartition) extends BaseKey { + override def toString: String = key.toString +} + +case class GroupMetadataKey(version: Short, key: String) extends BaseKey { + override def toString: String = key +} + +case class UnknownKey(version: Short) extends BaseKey { + override def key: String = null + override def toString: String = key +} diff --git a/core/src/main/scala/kafka/coordinator/group/MemberMetadata.scala b/core/src/main/scala/kafka/coordinator/group/MemberMetadata.scala new file mode 100644 index 0000000000000..6ad6273977052 --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/group/MemberMetadata.scala @@ -0,0 +1,153 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import java.util + +import kafka.utils.nonthreadsafe + +case class MemberSummary(memberId: String, + groupInstanceId: Option[String], + clientId: String, + clientHost: String, + metadata: Array[Byte], + assignment: Array[Byte]) + +private object MemberMetadata { + def plainProtocolSet(supportedProtocols: List[(String, Array[Byte])]): Set[String] = supportedProtocols.map(_._1).toSet +} + +/** + * Member metadata contains the following metadata: + * + * Heartbeat metadata: + * 1. negotiated heartbeat session timeout + * 2. timestamp of the latest heartbeat + * + * Protocol metadata: + * 1. the list of supported protocols (ordered by preference) + * 2. the metadata associated with each protocol + * + * In addition, it also contains the following state information: + * + * 1. Awaiting rebalance callback: when the group is in the prepare-rebalance state, + * its rebalance callback will be kept in the metadata if the + * member has sent the join group request + * 2. Awaiting sync callback: when the group is in the awaiting-sync state, its sync callback + * is kept in metadata until the leader provides the group assignment + * and the group transitions to stable + */ +@nonthreadsafe +private[group] class MemberMetadata(var memberId: String, + val groupInstanceId: Option[String], + val clientId: String, + val clientHost: String, + var rebalanceTimeoutMs: Int, + var sessionTimeoutMs: Int, + val protocolType: String, + var supportedProtocols: List[(String, Array[Byte])], + var assignment: Array[Byte] = Array.empty[Byte]) { + + var awaitingJoinCallback: JoinGroupResult => Unit = _ + var awaitingSyncCallback: SyncGroupResult => Unit = _ + var isNew: Boolean = false + + def isStaticMember: Boolean = groupInstanceId.isDefined + + // This variable is used to track heartbeat completion through the delayed + // heartbeat purgatory. When scheduling a new heartbeat expiration, we set + // this value to `false`. Upon receiving the heartbeat (or any other event + // indicating the liveness of the client), we set it to `true` so that the + // delayed heartbeat can be completed. + var heartbeatSatisfied: Boolean = false + + def isAwaitingJoin: Boolean = awaitingJoinCallback != null + def isAwaitingSync: Boolean = awaitingSyncCallback != null + + /** + * Get metadata corresponding to the provided protocol. + */ + def metadata(protocol: String): Array[Byte] = { + supportedProtocols.find(_._1 == protocol) match { + case Some((_, metadata)) => metadata + case None => + throw new IllegalArgumentException("Member does not support protocol") + } + } + + def hasSatisfiedHeartbeat: Boolean = { + if (isNew) { + // New members can be expired while awaiting join, so we have to check this first + heartbeatSatisfied + } else if (isAwaitingJoin || isAwaitingSync) { + // Members that are awaiting a rebalance automatically satisfy expected heartbeats + true + } else { + // Otherwise we require the next heartbeat + heartbeatSatisfied + } + } + + /** + * Check if the provided protocol metadata matches the currently stored metadata. + */ + def matches(protocols: List[(String, Array[Byte])]): Boolean = { + if (protocols.size != this.supportedProtocols.size) + return false + + for (i <- protocols.indices) { + val p1 = protocols(i) + val p2 = supportedProtocols(i) + if (p1._1 != p2._1 || !util.Arrays.equals(p1._2, p2._2)) + return false + } + true + } + + def summary(protocol: String): MemberSummary = { + MemberSummary(memberId, groupInstanceId, clientId, clientHost, metadata(protocol), assignment) + } + + def summaryNoMetadata(): MemberSummary = { + MemberSummary(memberId, groupInstanceId, clientId, clientHost, Array.empty[Byte], Array.empty[Byte]) + } + + /** + * Vote for one of the potential group protocols. This takes into account the protocol preference as + * indicated by the order of supported protocols and returns the first one also contained in the set + */ + def vote(candidates: Set[String]): String = { + supportedProtocols.find({ case (protocol, _) => candidates.contains(protocol)}) match { + case Some((protocol, _)) => protocol + case None => + throw new IllegalArgumentException("Member does not support any of the candidate protocols") + } + } + + override def toString: String = { + "MemberMetadata(" + + s"memberId=$memberId, " + + s"groupInstanceId=$groupInstanceId, " + + s"clientId=$clientId, " + + s"clientHost=$clientHost, " + + s"sessionTimeoutMs=$sessionTimeoutMs, " + + s"rebalanceTimeoutMs=$rebalanceTimeoutMs, " + + s"supportedProtocols=${supportedProtocols.map(_._1)}" + + ")" + } +} diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala index 1e348b19b3e1b..e0019f0d773fd 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionCoordinator.scala @@ -16,7 +16,7 @@ */ package kafka.coordinator.transaction -import kafka.server.{KafkaConfig, ReplicaManager} +import kafka.server.{KafkaConfig, MetadataCache, ReplicaManager} import kafka.utils.Logging import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.internals.Topic @@ -27,15 +27,13 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{AddPartitionsToTxnResponse, TransactionResult} import org.apache.kafka.common.utils.{LogContext, ProducerIdAndEpoch, Time} -import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionLogConfig, TransactionMetadata, TransactionState, TransactionStateManagerConfig, TxnTransitMetadata} -import org.apache.kafka.metadata.MetadataCache +import org.apache.kafka.coordinator.transaction.ProducerIdManager import org.apache.kafka.server.common.{RequestLocal, TransactionVersion} import org.apache.kafka.server.util.Scheduler -import java.util import java.util.Properties import java.util.concurrent.atomic.AtomicBoolean -import scala.jdk.OptionConverters._ +import scala.jdk.CollectionConverters._ object TransactionCoordinator { @@ -47,18 +45,15 @@ object TransactionCoordinator { metadataCache: MetadataCache, time: Time): TransactionCoordinator = { - val transactionLogConfig = new TransactionLogConfig(config) - val transactionStateManagerConfig = new TransactionStateManagerConfig(config) - val txnConfig = TransactionConfig(transactionStateManagerConfig.transactionalIdExpirationMs, - transactionStateManagerConfig.transactionMaxTimeoutMs, - transactionLogConfig.transactionTopicPartitions, - transactionLogConfig.transactionTopicReplicationFactor, - transactionLogConfig.transactionTopicSegmentBytes, - transactionLogConfig.transactionLoadBufferSize, - transactionLogConfig.transactionTopicMinISR, - transactionStateManagerConfig.transactionAbortTimedOutTransactionCleanupIntervalMs, - transactionStateManagerConfig.transactionRemoveExpiredTransactionalIdCleanupIntervalMs, - transactionStateManagerConfig.transaction2PCEnabled, + val txnConfig = TransactionConfig(config.transactionStateManagerConfig.transactionalIdExpirationMs, + config.transactionStateManagerConfig.transactionMaxTimeoutMs, + config.transactionLogConfig.transactionTopicPartitions, + config.transactionLogConfig.transactionTopicReplicationFactor, + config.transactionLogConfig.transactionTopicSegmentBytes, + config.transactionLogConfig.transactionLoadBufferSize, + config.transactionLogConfig.transactionTopicMinISR, + config.transactionStateManagerConfig.transactionAbortTimedOutTransactionCleanupIntervalMs, + config.transactionStateManagerConfig.transactionRemoveExpiredTransactionalIdCleanupIntervalMs, config.requestTimeoutMs) val txnStateManager = new TransactionStateManager(config.brokerId, scheduler, replicaManager, metadataCache, txnConfig, @@ -113,8 +108,6 @@ class TransactionCoordinator(txnConfig: TransactionConfig, def handleInitProducerId(transactionalId: String, transactionTimeoutMs: Int, - enableTwoPCFlag: Boolean, - keepPreparedTxn: Boolean, expectedProducerIdAndEpoch: Option[ProducerIdAndEpoch], responseCallback: InitProducerIdCallback, requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { @@ -131,35 +124,24 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // if transactional id is empty then return error as invalid request. This is // to make TransactionCoordinator's behavior consistent with producer client responseCallback(initTransactionError(Errors.INVALID_REQUEST)) - } else if (enableTwoPCFlag && !txnManager.isTransaction2pcEnabled()) { - // if the request is to enable two-phase commit but the broker 2PC config is set to false, - // 2PC functionality is disabled, clients that attempt to use this functionality - // would receive an authorization failed error. - responseCallback(initTransactionError(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED)) - } else if (keepPreparedTxn) { - // if the request is to keep the prepared transaction, then return an - // unsupported version error since the feature hasn't been implemented yet. - responseCallback(initTransactionError(Errors.UNSUPPORTED_VERSION)) - } else if (!txnManager.validateTransactionTimeoutMs(enableTwoPCFlag, transactionTimeoutMs)) { + } else if (!txnManager.validateTransactionTimeoutMs(transactionTimeoutMs)) { // check transactionTimeoutMs is not larger than the broker configured maximum allowed value responseCallback(initTransactionError(Errors.INVALID_TRANSACTION_TIMEOUT)) } else { - val resolvedTxnTimeoutMs = if (enableTwoPCFlag) Int.MaxValue else transactionTimeoutMs val coordinatorEpochAndMetadata = txnManager.getTransactionState(transactionalId).flatMap { case None => try { - val createdMetadata = new TransactionMetadata(transactionalId, - producerIdManager.generateProducerId(), - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH, - RecordBatch.NO_PRODUCER_EPOCH, - resolvedTxnTimeoutMs, - TransactionState.EMPTY, - util.Set.of(), - -1, - time.milliseconds(), - TransactionVersion.TV_0) + val createdMetadata = new TransactionMetadata(transactionalId = transactionalId, + producerId = producerIdManager.generateProducerId(), + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = transactionTimeoutMs, + state = Empty, + topicPartitions = collection.mutable.Set.empty[TopicPartition], + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TransactionVersion.TV_0) txnManager.putTransactionStateIfNotExists(createdMetadata) } catch { case e: Exception => Left(Errors.forException(e)) @@ -173,10 +155,10 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val coordinatorEpoch = existingEpochAndMetadata.coordinatorEpoch val txnMetadata = existingEpochAndMetadata.transactionMetadata - txnMetadata.inLock(() => - prepareInitProducerIdTransit(transactionalId, resolvedTxnTimeoutMs, coordinatorEpoch, txnMetadata, + txnMetadata.inLock { + prepareInitProducerIdTransit(transactionalId, transactionTimeoutMs, coordinatorEpoch, txnMetadata, expectedProducerIdAndEpoch) - ) + } } result match { @@ -184,7 +166,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, responseCallback(initTransactionError(error)) case Right((coordinatorEpoch, newMetadata)) => - if (newMetadata.txnState == TransactionState.PREPARE_EPOCH_FENCE) { + if (newMetadata.txnState == PrepareEpochFence) { // abort the ongoing transaction and then return CONCURRENT_TRANSACTIONS to let client wait and retry def sendRetriableErrorCallback(error: Errors, newProducerId: Long, newProducerEpoch: Short): Unit = { if (error != Errors.NONE) { @@ -239,7 +221,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // could be a retry after a valid epoch bump that the producer never received the response for txnMetadata.producerEpoch == RecordBatch.NO_PRODUCER_EPOCH || producerIdAndEpoch.producerId == txnMetadata.producerId || - (producerIdAndEpoch.producerId == txnMetadata.prevProducerId && TransactionMetadata.isEpochExhausted(producerIdAndEpoch.epoch)) + (producerIdAndEpoch.producerId == txnMetadata.previousProducerId && TransactionMetadata.isEpochExhausted(producerIdAndEpoch.epoch)) } if (txnMetadata.pendingTransitionInProgress) { @@ -251,23 +233,24 @@ class TransactionCoordinator(txnConfig: TransactionConfig, } else { // caller should have synchronized on txnMetadata already txnMetadata.state match { - case TransactionState.PREPARE_ABORT | TransactionState.PREPARE_COMMIT => + case PrepareAbort | PrepareCommit => // reply to client and let it backoff and retry Left(Errors.CONCURRENT_TRANSACTIONS) - case TransactionState.COMPLETE_ABORT | TransactionState.COMPLETE_COMMIT | TransactionState.EMPTY => + case CompleteAbort | CompleteCommit | Empty => val transitMetadataResult = // If the epoch is exhausted and the expected epoch (if provided) matches it, generate a new producer ID - try { - if (txnMetadata.isProducerEpochExhausted && - expectedProducerIdAndEpoch.forall(_.epoch == txnMetadata.producerEpoch)) + if (txnMetadata.isProducerEpochExhausted && + expectedProducerIdAndEpoch.forall(_.epoch == txnMetadata.producerEpoch)) { + try { Right(txnMetadata.prepareProducerIdRotation(producerIdManager.generateProducerId(), transactionTimeoutMs, time.milliseconds(), expectedProducerIdAndEpoch.isDefined)) - else - Right(txnMetadata.prepareIncrementProducerEpoch(transactionTimeoutMs, expectedProducerIdAndEpoch.map(e => Short.box(e.epoch)).toJava, - time.milliseconds())) - } catch { - case e: Exception => Left(Errors.forException(e)) + } catch { + case e: Exception => Left(Errors.forException(e)) + } + } else { + txnMetadata.prepareIncrementProducerEpoch(transactionTimeoutMs, expectedProducerIdAndEpoch.map(_.epoch), + time.milliseconds()) } transitMetadataResult match { @@ -275,7 +258,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Left(err) => Left(err) } - case TransactionState.ONGOING => + case Ongoing => // indicate to abort the current ongoing txn first. Note that this epoch is never returned to the // user. We will abort the ongoing transaction and return CONCURRENT_TRANSACTIONS to the client. // This forces the client to retry, which will ensure that the epoch is bumped a second time. In @@ -283,7 +266,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // then when the client retries, we will generate a new producerId. Right(coordinatorEpoch, txnMetadata.prepareFenceProducerEpoch()) - case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => + case Dead | PrepareEpochFence => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) @@ -295,13 +278,12 @@ class TransactionCoordinator(txnConfig: TransactionConfig, def handleListTransactions( filteredProducerIds: Set[Long], filteredStates: Set[String], - filteredDuration: Long = -1L, - filteredTransactionalIdPattern: String = null + filteredDuration: Long = -1L ): ListTransactionsResponseData = { if (!isActive.get()) { new ListTransactionsResponseData().setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code) } else { - txnManager.listTransactionStates(filteredProducerIds, filteredStates, filteredDuration, filteredTransactionalIdPattern) + txnManager.listTransactionStates(filteredProducerIds, filteredStates, filteredDuration) } } @@ -327,12 +309,12 @@ class TransactionCoordinator(txnConfig: TransactionConfig, transactionState.setErrorCode(Errors.TRANSACTIONAL_ID_NOT_FOUND.code) case Right(Some(coordinatorEpochAndMetadata)) => val txnMetadata = coordinatorEpochAndMetadata.transactionMetadata - txnMetadata.inLock(() => { - if (txnMetadata.state == TransactionState.DEAD) { + txnMetadata.inLock { + if (txnMetadata.state == Dead) { // The transaction state is being expired, so ignore it transactionState.setErrorCode(Errors.TRANSACTIONAL_ID_NOT_FOUND.code) } else { - txnMetadata.topicPartitions.forEach(topicPartition => { + txnMetadata.topicPartitions.foreach { topicPartition => var topicData = transactionState.topics.find(topicPartition.topic) if (topicData == null) { topicData = new DescribeTransactionsResponseData.TopicData() @@ -340,17 +322,17 @@ class TransactionCoordinator(txnConfig: TransactionConfig, transactionState.topics.add(topicData) } topicData.partitions.add(topicPartition.partition) - }) + } transactionState .setErrorCode(Errors.NONE.code) .setProducerId(txnMetadata.producerId) .setProducerEpoch(txnMetadata.producerEpoch) - .setTransactionState(txnMetadata.state.stateName) + .setTransactionState(txnMetadata.state.name) .setTransactionTimeoutMs(txnMetadata.txnTimeoutMs) .setTransactionStartTimeMs(txnMetadata.txnStartTimestamp) } - }) + } } } } @@ -358,15 +340,13 @@ class TransactionCoordinator(txnConfig: TransactionConfig, def handleVerifyPartitionsInTransaction(transactionalId: String, producerId: Long, producerEpoch: Short, - partitions: util.Set[TopicPartition], + partitions: collection.Set[TopicPartition], responseCallback: VerifyPartitionsCallback): Unit = { if (transactionalId == null || transactionalId.isEmpty) { debug(s"Returning ${Errors.INVALID_REQUEST} error code to client for $transactionalId's AddPartitions request for verification") - val errors = new util.HashMap[TopicPartition, Errors]() - partitions.forEach(partition => errors.put(partition, Errors.INVALID_REQUEST)) - responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, errors)) + responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, partitions.map(_ -> Errors.INVALID_REQUEST).toMap.asJava)) } else { - val result: ApiResult[util.Map[TopicPartition, Errors]] = + val result: ApiResult[Map[TopicPartition, Errors]] = txnManager.getTransactionState(transactionalId).flatMap { case None => Left(Errors.INVALID_PRODUCER_ID_MAPPING) @@ -376,35 +356,31 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // Given the txnMetadata is valid, we check if the partitions are in the transaction. // Pending state is not checked since there is a final validation on the append to the log. // Partitions are added to metadata when the add partitions state is persisted, and removed when the end marker is persisted. - txnMetadata.inLock(() => { + txnMetadata.inLock { if (txnMetadata.producerId != producerId) { Left(Errors.INVALID_PRODUCER_ID_MAPPING) } else if (txnMetadata.producerEpoch != producerEpoch) { Left(Errors.PRODUCER_FENCED) - } else if (txnMetadata.state == TransactionState.PREPARE_COMMIT || txnMetadata.state == TransactionState.PREPARE_ABORT) { + } else if (txnMetadata.state == PrepareCommit || txnMetadata.state == PrepareAbort) { Left(Errors.CONCURRENT_TRANSACTIONS) } else { - val errors = new util.HashMap[TopicPartition, Errors]() - partitions.forEach(part => { + Right(partitions.map { part => if (txnMetadata.topicPartitions.contains(part)) - errors.put(part, Errors.NONE) + (part, Errors.NONE) else - errors.put(part, Errors.TRANSACTION_ABORTABLE) - }) - Right(errors) + (part, Errors.TRANSACTION_ABORTABLE) + }.toMap) } - }) + } } result match { case Left(err) => debug(s"Returning $err error code to client for $transactionalId's AddPartitions request for verification") - val errors = new util.HashMap[TopicPartition, Errors]() - partitions.forEach(partition => errors.put(partition, err)) - responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, errors)) + responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, partitions.map(_ -> err).toMap.asJava)) case Right(errors) => - responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, errors)) + responseCallback(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, errors.asJava)) } } @@ -413,7 +389,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, def handleAddPartitionsToTransaction(transactionalId: String, producerId: Long, producerEpoch: Short, - partitions: util.Set[TopicPartition], + partitions: collection.Set[TopicPartition], responseCallback: AddPartitionsCallback, clientTransactionVersion: TransactionVersion, requestLocal: RequestLocal = RequestLocal.noCaching): Unit = { @@ -431,7 +407,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val txnMetadata = epochAndMetadata.transactionMetadata // generate the new transaction metadata with added partitions - txnMetadata.inLock(() => { + txnMetadata.inLock { if (txnMetadata.pendingTransitionInProgress) { // return a retriable exception to let the client backoff and retry // This check is performed first so that the pending transition can complete before subsequent checks. @@ -442,15 +418,15 @@ class TransactionCoordinator(txnConfig: TransactionConfig, Left(Errors.INVALID_PRODUCER_ID_MAPPING) } else if (txnMetadata.producerEpoch != producerEpoch) { Left(Errors.PRODUCER_FENCED) - } else if (txnMetadata.state == TransactionState.PREPARE_COMMIT || txnMetadata.state == TransactionState.PREPARE_ABORT) { + } else if (txnMetadata.state == PrepareCommit || txnMetadata.state == PrepareAbort) { Left(Errors.CONCURRENT_TRANSACTIONS) - } else if (txnMetadata.state == TransactionState.ONGOING && txnMetadata.topicPartitions.containsAll(partitions)) { + } else if (txnMetadata.state == Ongoing && partitions.subsetOf(txnMetadata.topicPartitions)) { // this is an optimization: if the partitions are already in the metadata reply OK immediately Left(Errors.NONE) } else { - Right(coordinatorEpoch, txnMetadata.prepareAddPartitions(partitions, time.milliseconds(), clientTransactionVersion)) + Right(coordinatorEpoch, txnMetadata.prepareAddPartitions(partitions.toSet, time.milliseconds(), clientTransactionVersion)) } - }) + } } result match { @@ -556,60 +532,60 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val txnMetadata = epochAndTxnMetadata.transactionMetadata val coordinatorEpoch = epochAndTxnMetadata.coordinatorEpoch - txnMetadata.inLock(() => { + txnMetadata.inLock { if (txnMetadata.producerId != producerId) Left(Errors.INVALID_PRODUCER_ID_MAPPING) // Strict equality is enforced on the client side requests, as they shouldn't bump the producer epoch. else if ((isFromClient && producerEpoch != txnMetadata.producerEpoch) || producerEpoch < txnMetadata.producerEpoch) Left(Errors.PRODUCER_FENCED) - else if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != TransactionState.PREPARE_EPOCH_FENCE) + else if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != PrepareEpochFence) Left(Errors.CONCURRENT_TRANSACTIONS) else txnMetadata.state match { - case TransactionState.ONGOING => + case Ongoing => val nextState = if (txnMarkerResult == TransactionResult.COMMIT) - TransactionState.PREPARE_COMMIT + PrepareCommit else - TransactionState.PREPARE_ABORT + PrepareAbort - if (nextState == TransactionState.PREPARE_ABORT && txnMetadata.pendingState.filter(s => s == TransactionState.PREPARE_EPOCH_FENCE).isPresent) { + if (nextState == PrepareAbort && txnMetadata.pendingState.contains(PrepareEpochFence)) { // We should clear the pending state to make way for the transition to PrepareAbort and also bump // the epoch in the transaction metadata we are about to append. isEpochFence = true - txnMetadata.pendingState(util.Optional.empty()) - txnMetadata.setProducerEpoch(producerEpoch) - txnMetadata.setLastProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) + txnMetadata.pendingState = None + txnMetadata.producerEpoch = producerEpoch + txnMetadata.lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH } Right(coordinatorEpoch, txnMetadata.prepareAbortOrCommit(nextState, TransactionVersion.fromFeatureLevel(0), RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false)) - case TransactionState.COMPLETE_COMMIT => + case CompleteCommit => if (txnMarkerResult == TransactionResult.COMMIT) Left(Errors.NONE) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.COMPLETE_ABORT => + case CompleteAbort => if (txnMarkerResult == TransactionResult.ABORT) Left(Errors.NONE) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.PREPARE_COMMIT => + case PrepareCommit => if (txnMarkerResult == TransactionResult.COMMIT) Left(Errors.CONCURRENT_TRANSACTIONS) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.PREPARE_ABORT => + case PrepareAbort => if (txnMarkerResult == TransactionResult.ABORT) Left(Errors.CONCURRENT_TRANSACTIONS) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.EMPTY => + case Empty => logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => + case Dead | PrepareEpochFence => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) throw new IllegalStateException(errorMsg) } - }) + } } preAppendResult match { @@ -630,7 +606,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndMetadata) => if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { val txnMetadata = epochAndMetadata.transactionMetadata - txnMetadata.inLock(() => { + txnMetadata.inLock { if (txnMetadata.producerId != producerId) Left(Errors.INVALID_PRODUCER_ID_MAPPING) else if (txnMetadata.producerEpoch != producerEpoch) @@ -638,25 +614,25 @@ class TransactionCoordinator(txnConfig: TransactionConfig, else if (txnMetadata.pendingTransitionInProgress) Left(Errors.CONCURRENT_TRANSACTIONS) else txnMetadata.state match { - case TransactionState.EMPTY| TransactionState.ONGOING | TransactionState.COMPLETE_COMMIT | TransactionState.COMPLETE_ABORT => + case Empty| Ongoing | CompleteCommit | CompleteAbort => logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.PREPARE_COMMIT => + case PrepareCommit => if (txnMarkerResult != TransactionResult.COMMIT) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case TransactionState.PREPARE_ABORT => + case PrepareAbort => if (txnMarkerResult != TransactionResult.ABORT) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => + case Dead | PrepareEpochFence => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) throw new IllegalStateException(errorMsg) } - }) + } } else { debug(s"The transaction coordinator epoch has changed to ${epochAndMetadata.coordinatorEpoch} after $txnMarkerResult was " + s"successfully appended to the log for $transactionalId with old epoch $coordinatorEpoch") @@ -689,7 +665,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndMetadata) => if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { // This was attempted epoch fence that failed, so mark this state on the metadata - epochAndMetadata.transactionMetadata.hasFailedEpochFence(true) + epochAndMetadata.transactionMetadata.hasFailedEpochFence = true warn(s"The coordinator failed to write an epoch fence transition for producer $transactionalId to the transaction log " + s"with error $error. The epoch was increased to ${newMetadata.producerEpoch} but not returned to the client") } @@ -778,14 +754,14 @@ class TransactionCoordinator(txnConfig: TransactionConfig, val txnMetadata = epochAndTxnMetadata.transactionMetadata val coordinatorEpoch = epochAndTxnMetadata.coordinatorEpoch - txnMetadata.inLock(() => { + txnMetadata.inLock { producerIdCopy = txnMetadata.producerId producerEpochCopy = txnMetadata.producerEpoch // PrepareEpochFence has slightly different epoch bumping logic so don't include it here. // Note that, it can only happen when the current state is Ongoing. - isEpochFence = txnMetadata.pendingState.filter(s => s == TransactionState.PREPARE_EPOCH_FENCE).isPresent + isEpochFence = txnMetadata.pendingState.contains(PrepareEpochFence) // True if the client retried a request that had overflowed the epoch, and a new producer ID is stored in the txnMetadata - val retryOnOverflow = !isEpochFence && txnMetadata.prevProducerId == producerId && + val retryOnOverflow = !isEpochFence && txnMetadata.previousProducerId == producerId && producerEpoch == Short.MaxValue - 1 && txnMetadata.producerEpoch == 0 // True if the client retried an endTxn request, and the bumped producer epoch is stored in the txnMetadata. val retryOnEpochBump = !isEpochFence && txnMetadata.producerEpoch == producerEpoch + 1 @@ -797,11 +773,11 @@ class TransactionCoordinator(txnConfig: TransactionConfig, // Return producer fenced even in the cases where the epoch is higher and could indicate an invalid state transition. // Use the following criteria to determine if a v2 retry is valid: txnMetadata.state match { - case TransactionState.ONGOING | TransactionState.EMPTY | TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => + case Ongoing | Empty | Dead | PrepareEpochFence => producerEpoch == txnMetadata.producerEpoch - case TransactionState.PREPARE_COMMIT | TransactionState.PREPARE_ABORT => + case PrepareCommit | PrepareAbort => retryOnEpochBump - case TransactionState.COMPLETE_COMMIT | TransactionState.COMPLETE_ABORT => + case CompleteCommit | CompleteAbort => retryOnEpochBump || retryOnOverflow || producerEpoch == txnMetadata.producerEpoch } } else { @@ -825,10 +801,12 @@ class TransactionCoordinator(txnConfig: TransactionConfig, Right(RecordBatch.NO_PRODUCER_ID) } - if (nextState == TransactionState.PREPARE_ABORT && isEpochFence) { - // We should clear the pending state to make way for the transition to PrepareAbort - txnMetadata.pendingState(util.Optional.empty()) - // For TV2+, don't manually set the epoch - let prepareAbortOrCommit handle it naturally. + if (nextState == PrepareAbort && isEpochFence) { + // We should clear the pending state to make way for the transition to PrepareAbort and also bump + // the epoch in the transaction metadata we are about to append. + txnMetadata.pendingState = None + txnMetadata.producerEpoch = producerEpoch + txnMetadata.lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH } nextProducerIdOrErrors.flatMap { @@ -837,7 +815,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, } } - if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != TransactionState.PREPARE_EPOCH_FENCE) { + if (txnMetadata.pendingTransitionInProgress && txnMetadata.pendingState.get != PrepareEpochFence) { // This check is performed first so that the pending transition can complete before the next checks. // With TV2, we may be transitioning over a producer epoch overflow, and the producer may be using the // new producer ID that is still only in pending state. @@ -847,14 +825,14 @@ class TransactionCoordinator(txnConfig: TransactionConfig, else if (!isValidEpoch) Left(Errors.PRODUCER_FENCED) else txnMetadata.state match { - case TransactionState.ONGOING => + case Ongoing => val nextState = if (txnMarkerResult == TransactionResult.COMMIT) - TransactionState.PREPARE_COMMIT + PrepareCommit else - TransactionState.PREPARE_ABORT + PrepareAbort generateTxnTransitMetadataForTxnCompletion(nextState, false) - case TransactionState.COMPLETE_COMMIT => + case CompleteCommit => if (txnMarkerResult == TransactionResult.COMMIT) { if (isRetry) Left(Errors.NONE) @@ -865,42 +843,42 @@ class TransactionCoordinator(txnConfig: TransactionConfig, if (isRetry) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else - generateTxnTransitMetadataForTxnCompletion(TransactionState.PREPARE_ABORT, true) + generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) } - case TransactionState.COMPLETE_ABORT => + case CompleteAbort => if (txnMarkerResult == TransactionResult.ABORT) { if (isRetry) Left(Errors.NONE) else - generateTxnTransitMetadataForTxnCompletion(TransactionState.PREPARE_ABORT, true) + generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) } else { // Commit. logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) } - case TransactionState.PREPARE_COMMIT => + case PrepareCommit => if (txnMarkerResult == TransactionResult.COMMIT) Left(Errors.CONCURRENT_TRANSACTIONS) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.PREPARE_ABORT => + case PrepareAbort => if (txnMarkerResult == TransactionResult.ABORT) Left(Errors.CONCURRENT_TRANSACTIONS) else logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.EMPTY => + case Empty => if (txnMarkerResult == TransactionResult.ABORT) { - generateTxnTransitMetadataForTxnCompletion(TransactionState.PREPARE_ABORT, true) + generateTxnTransitMetadataForTxnCompletion(PrepareAbort, true) } else { logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) } - case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => + case Dead | PrepareEpochFence => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) throw new IllegalStateException(errorMsg) } - }) + } } preAppendResult match { @@ -925,7 +903,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndMetadata) => if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { val txnMetadata = epochAndMetadata.transactionMetadata - txnMetadata.inLock(() => { + txnMetadata.inLock { if (txnMetadata.producerId != producerId) Left(Errors.INVALID_PRODUCER_ID_MAPPING) else if (txnMetadata.producerEpoch != producerEpoch && txnMetadata.producerEpoch != producerEpoch + 1) @@ -933,26 +911,26 @@ class TransactionCoordinator(txnConfig: TransactionConfig, else if (txnMetadata.pendingTransitionInProgress) Left(Errors.CONCURRENT_TRANSACTIONS) else txnMetadata.state match { - case TransactionState.EMPTY | TransactionState.ONGOING | TransactionState.COMPLETE_COMMIT | TransactionState.COMPLETE_ABORT => + case Empty| Ongoing | CompleteCommit | CompleteAbort => logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) - case TransactionState.PREPARE_COMMIT => + case PrepareCommit => if (txnMarkerResult != TransactionResult.COMMIT) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case TransactionState.PREPARE_ABORT => + case PrepareAbort => if (txnMarkerResult != TransactionResult.ABORT) logInvalidStateTransitionAndReturnError(transactionalId, txnMetadata.state, txnMarkerResult) else Right(txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case TransactionState.DEAD | TransactionState.PREPARE_EPOCH_FENCE => + case Dead | PrepareEpochFence => val errorMsg = s"Found transactionalId $transactionalId with state ${txnMetadata.state}. " + s"This is illegal as we should never have transitioned to this state." fatal(errorMsg) throw new IllegalStateException(errorMsg) } - }) + } } else { debug(s"The transaction coordinator epoch has changed to ${epochAndMetadata.coordinatorEpoch} after $txnMarkerResult was " + s"successfully appended to the log for $transactionalId with old epoch $coordinatorEpoch") @@ -984,10 +962,10 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndMetadata) => if (epochAndMetadata.coordinatorEpoch == coordinatorEpoch) { - // For TV2, we allow re-bumping the epoch on retry, since we don't complete the epoch bump. - // Therefore, we don't set hasFailedEpochFence = true. + // This was attempted epoch fence that failed, so mark this state on the metadata + epochAndMetadata.transactionMetadata.hasFailedEpochFence = true warn(s"The coordinator failed to write an epoch fence transition for producer $transactionalId to the transaction log " + - s"with error $error") + s"with error $error. The epoch was increased to ${newMetadata.producerEpoch} but not returned to the client") } } } @@ -1033,7 +1011,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, case Some(epochAndTxnMetadata) => val txnMetadata = epochAndTxnMetadata.transactionMetadata - val transitMetadataOpt = txnMetadata.inLock(() => { + val transitMetadataOpt = txnMetadata.inLock { if (txnMetadata.producerId != txnIdAndPidEpoch.producerId) { error(s"Found incorrect producerId when expiring transactionalId: ${txnIdAndPidEpoch.transactionalId}. " + s"Expected producerId: ${txnIdAndPidEpoch.producerId}. Found producerId: " + @@ -1046,7 +1024,7 @@ class TransactionCoordinator(txnConfig: TransactionConfig, } else { Some(txnMetadata.prepareFenceProducerEpoch()) } - }) + } transitMetadataOpt.foreach { txnTransitMetadata => endTransaction(txnMetadata.transactionalId, diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala index f024e88aa8e2b..2d8a8c5a84044 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionLog.scala @@ -19,14 +19,12 @@ package kafka.coordinator.transaction import java.nio.ByteBuffer import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.protocol.{ByteBufferAccessor, MessageUtil} -import org.apache.kafka.common.record.RecordBatch +import org.apache.kafka.common.record.{Record, RecordBatch} import org.apache.kafka.common.TopicPartition -import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState, TxnTransitMetadata} -import org.apache.kafka.coordinator.transaction.generated.{CoordinatorRecordType, TransactionLogKey, TransactionLogValue} +import org.apache.kafka.coordinator.transaction.generated.{TransactionLogKey, TransactionLogValue} import org.apache.kafka.server.common.TransactionVersion -import java.util - +import scala.collection.mutable import scala.jdk.CollectionConverters._ /** @@ -52,8 +50,9 @@ object TransactionLog { * * @return key bytes */ - def keyToBytes(transactionalId: String): Array[Byte] = { - MessageUtil.toCoordinatorTypePrefixedBytes(new TransactionLogKey().setTransactionalId(transactionalId)) + private[transaction] def keyToBytes(transactionalId: String): Array[Byte] = { + MessageUtil.toVersionPrefixedBytes(TransactionLogKey.HIGHEST_SUPPORTED_VERSION, + new TransactionLogKey().setTransactionalId(transactionalId)) } /** @@ -61,13 +60,13 @@ object TransactionLog { * * @return value payload bytes */ - def valueToBytes(txnMetadata: TxnTransitMetadata, + private[transaction] def valueToBytes(txnMetadata: TxnTransitMetadata, transactionVersionLevel: TransactionVersion): Array[Byte] = { - if (txnMetadata.txnState == TransactionState.EMPTY && !txnMetadata.topicPartitions.isEmpty) + if (txnMetadata.txnState == Empty && txnMetadata.topicPartitions.nonEmpty) throw new IllegalStateException(s"Transaction is not expected to have any partitions since its state is ${txnMetadata.txnState}: $txnMetadata") - val transactionPartitions = if (txnMetadata.txnState == TransactionState.EMPTY) null - else txnMetadata.topicPartitions.asScala + val transactionPartitions = if (txnMetadata.txnState == Empty) null + else txnMetadata.topicPartitions .groupBy(_.topic) .map { case (topic, partitions) => new TransactionLogValue.PartitionsSchema() @@ -92,15 +91,19 @@ object TransactionLog { /** * Decodes the transaction log messages' key * - * @return left with the version if the key is not a transaction log key, right with the transactional id otherwise + * @return the key */ - def readTxnRecordKey(buffer: ByteBuffer): Either[Short, String] = { + def readTxnRecordKey(buffer: ByteBuffer): BaseKey = { val version = buffer.getShort - Either.cond( - version == CoordinatorRecordType.TRANSACTION_LOG.id, - new TransactionLogKey(new ByteBufferAccessor(buffer), 0.toShort).transactionalId, - version - ) + if (version >= TransactionLogKey.LOWEST_SUPPORTED_VERSION && version <= TransactionLogKey.HIGHEST_SUPPORTED_VERSION) { + val value = new TransactionLogKey(new ByteBufferAccessor(buffer), version) + TxnKey( + version = version, + transactionalId = value.transactionalId + ) + } else { + UnknownKey(version) + } } /** @@ -115,26 +118,71 @@ object TransactionLog { val version = buffer.getShort if (version >= TransactionLogValue.LOWEST_SUPPORTED_VERSION && version <= TransactionLogValue.HIGHEST_SUPPORTED_VERSION) { val value = new TransactionLogValue(new ByteBufferAccessor(buffer), version) - val state = TransactionState.fromId(value.transactionStatus) - val tps: util.Set[TopicPartition] = new util.HashSet[TopicPartition]() - if (!state.equals(TransactionState.EMPTY)) - value.transactionPartitions.forEach(partitionsSchema => { - partitionsSchema.partitionIds.forEach(partitionId => tps.add(new TopicPartition(partitionsSchema.topic, partitionId.intValue()))) - }) - Some(new TransactionMetadata( - transactionalId, - value.producerId, - value.previousProducerId, - value.nextProducerId, - value.producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - value.transactionTimeoutMs, - state, - tps, - value.transactionStartTimestampMs, - value.transactionLastUpdateTimestampMs, - TransactionVersion.fromFeatureLevel(value.clientTransactionVersion))) + val transactionMetadata = new TransactionMetadata( + transactionalId = transactionalId, + producerId = value.producerId, + previousProducerId = value.previousProducerId, + nextProducerId = value.nextProducerId, + producerEpoch = value.producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = value.transactionTimeoutMs, + state = TransactionState.fromId(value.transactionStatus), + topicPartitions = mutable.Set.empty[TopicPartition], + txnStartTimestamp = value.transactionStartTimestampMs, + txnLastUpdateTimestamp = value.transactionLastUpdateTimestampMs, + clientTransactionVersion = TransactionVersion.fromFeatureLevel(value.clientTransactionVersion)) + + if (!transactionMetadata.state.equals(Empty)) + value.transactionPartitions.forEach(partitionsSchema => + transactionMetadata.addPartitions(partitionsSchema.partitionIds + .asScala + .map(partitionId => new TopicPartition(partitionsSchema.topic, partitionId)) + .toSet) + ) + Some(transactionMetadata) } else throw new IllegalStateException(s"Unknown version $version from the transaction log message value") } } + + /** + * Exposed for printing records using [[kafka.tools.DumpLogSegments]] + */ + def formatRecordKeyAndValue(record: Record): (Option[String], Option[String]) = { + TransactionLog.readTxnRecordKey(record.key) match { + case txnKey: TxnKey => + val keyString = s"transaction_metadata::transactionalId=${txnKey.transactionalId}" + + val valueString = TransactionLog.readTxnRecordValue(txnKey.transactionalId, record.value) match { + case None => "" + + case Some(txnMetadata) => s"producerId:${txnMetadata.producerId}," + + s"producerEpoch:${txnMetadata.producerEpoch}," + + s"state=${txnMetadata.state}," + + s"partitions=${txnMetadata.topicPartitions.mkString("[", ",", "]")}," + + s"txnLastUpdateTimestamp=${txnMetadata.txnLastUpdateTimestamp}," + + s"txnTimeoutMs=${txnMetadata.txnTimeoutMs}" + } + + (Some(keyString), Some(valueString)) + + case unknownKey: UnknownKey => + (Some(s"unknown::version=${unknownKey.version}"), None) + } + } + } + +sealed trait BaseKey{ + def version: Short + def transactionalId: String +} + +case class TxnKey(version: Short, transactionalId: String) extends BaseKey { + override def toString: String = transactionalId +} + +case class UnknownKey(version: Short) extends BaseKey { + override def transactionalId: String = null + override def toString: String = transactionalId +} + diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala index 6c395feb5827f..2e71a72420957 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerChannelManager.scala @@ -21,7 +21,7 @@ import kafka.coordinator.transaction.TransactionMarkerChannelManager.{LogAppendR import java.util import java.util.concurrent.{BlockingQueue, ConcurrentHashMap, LinkedBlockingQueue} -import kafka.server.KafkaConfig +import kafka.server.{KafkaConfig, MetadataCache} import kafka.utils.Logging import org.apache.kafka.clients._ import org.apache.kafka.common.metrics.Metrics @@ -32,15 +32,12 @@ import org.apache.kafka.common.requests.{TransactionResult, WriteTxnMarkersReque import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{Node, Reconfigurable, TopicPartition} -import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TxnTransitMetadata} -import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.{InterBrokerSendThread, RequestAndCompletionHandler} import scala.collection.{concurrent, immutable} import scala.jdk.CollectionConverters._ -import scala.jdk.javaapi.OptionConverters object TransactionMarkerChannelManager { private val UnknownDestinationQueueSizeMetricName = "UnknownDestinationQueueSize" @@ -326,16 +323,16 @@ class TransactionMarkerChannelManager( info(s"Replaced an existing pending complete txn $prev with $pendingCompleteTxn while adding markers to send.") } addTxnMarkersToBrokerQueue(txnMetadata.producerId, - txnMetadata.producerEpoch, txnResult, pendingCompleteTxn, txnMetadata.topicPartitions.asScala.toSet) + txnMetadata.producerEpoch, txnResult, pendingCompleteTxn, txnMetadata.topicPartitions.toSet) maybeWriteTxnCompletion(transactionalId) } def numTxnsWithPendingMarkers: Int = transactionsWithPendingMarkers.size private def hasPendingMarkersToWrite(txnMetadata: TransactionMetadata): Boolean = { - txnMetadata.inLock(() => - !txnMetadata.topicPartitions.isEmpty - ) + txnMetadata.inLock { + txnMetadata.topicPartitions.nonEmpty + } } def maybeWriteTxnCompletion(transactionalId: String): Unit = { @@ -385,7 +382,7 @@ class TransactionMarkerChannelManager( topicPartitions: immutable.Set[TopicPartition]): Unit = { val txnTopicPartition = txnStateManager.partitionFor(pendingCompleteTxn.transactionalId) val partitionsByDestination: immutable.Map[Option[Node], immutable.Set[TopicPartition]] = topicPartitions.groupBy { topicPartition: TopicPartition => - OptionConverters.toScala(metadataCache.getPartitionLeaderEndpoint(topicPartition.topic, topicPartition.partition, interBrokerListenerName)) + metadataCache.getPartitionLeaderEndpoint(topicPartition.topic, topicPartition.partition, interBrokerListenerName) } val coordinatorEpoch = pendingCompleteTxn.coordinatorEpoch @@ -422,9 +419,9 @@ class TransactionMarkerChannelManager( val txnMetadata = epochAndMetadata.transactionMetadata - txnMetadata.inLock(() => + txnMetadata.inLock { topicPartitions.foreach(txnMetadata.removePartition) - ) + } maybeWriteTxnCompletion(transactionalId) } diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandler.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandler.scala index 63990fda9853b..d95dabab6c356 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandler.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandler.scala @@ -131,7 +131,7 @@ class TransactionMarkerRequestCompletionHandler(brokerId: Int, txnMarkerChannelManager.removeMarkersForTxn(pendingCompleteTxn) abortSending = true } else { - txnMetadata.inLock(() => { + txnMetadata.inLock { for ((topicPartition, error) <- errors.asScala) { error match { case Errors.NONE => @@ -178,7 +178,7 @@ class TransactionMarkerRequestCompletionHandler(brokerId: Int, throw new IllegalStateException(s"Unexpected error ${other.exceptionName} while sending txn marker for $transactionalId") } } - }) + } } if (!abortSending) { diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala new file mode 100644 index 0000000000000..31daebac76391 --- /dev/null +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionMetadata.scala @@ -0,0 +1,641 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.transaction + +import java.util.concurrent.locks.ReentrantLock +import kafka.utils.{CoreUtils, Logging, nonthreadsafe} +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.record.RecordBatch +import org.apache.kafka.server.common.TransactionVersion + +import scala.collection.{immutable, mutable} + + +object TransactionState { + val AllStates: Set[TransactionState] = Set( + Empty, + Ongoing, + PrepareCommit, + PrepareAbort, + CompleteCommit, + CompleteAbort, + Dead, + PrepareEpochFence + ) + + def fromName(name: String): Option[TransactionState] = { + AllStates.find(_.name == name) + } + + def fromId(id: Byte): TransactionState = { + id match { + case 0 => Empty + case 1 => Ongoing + case 2 => PrepareCommit + case 3 => PrepareAbort + case 4 => CompleteCommit + case 5 => CompleteAbort + case 6 => Dead + case 7 => PrepareEpochFence + case _ => throw new IllegalStateException(s"Unknown transaction state id $id from the transaction status message") + } + } +} + +private[transaction] sealed trait TransactionState { + def id: Byte + + /** + * Get the name of this state. This is exposed through the `DescribeTransactions` API. + */ + def name: String + + def validPreviousStates: Set[TransactionState] + + def isExpirationAllowed: Boolean = false +} + +/** + * Transaction has not existed yet + * + * transition: received AddPartitionsToTxnRequest => Ongoing + * received AddOffsetsToTxnRequest => Ongoing + * received EndTxnRequest with abort and TransactionV2 enabled => PrepareAbort + */ +private[transaction] case object Empty extends TransactionState { + val id: Byte = 0 + val name: String = "Empty" + val validPreviousStates: Set[TransactionState] = Set(Empty, CompleteCommit, CompleteAbort) + override def isExpirationAllowed: Boolean = true +} + +/** + * Transaction has started and ongoing + * + * transition: received EndTxnRequest with commit => PrepareCommit + * received EndTxnRequest with abort => PrepareAbort + * received AddPartitionsToTxnRequest => Ongoing + * received AddOffsetsToTxnRequest => Ongoing + */ +private[transaction] case object Ongoing extends TransactionState { + val id: Byte = 1 + val name: String = "Ongoing" + val validPreviousStates: Set[TransactionState] = Set(Ongoing, Empty, CompleteCommit, CompleteAbort) +} + +/** + * Group is preparing to commit + * + * transition: received acks from all partitions => CompleteCommit + */ +private[transaction] case object PrepareCommit extends TransactionState { + val id: Byte = 2 + val name: String = "PrepareCommit" + val validPreviousStates: Set[TransactionState] = Set(Ongoing) +} + +/** + * Group is preparing to abort + * + * transition: received acks from all partitions => CompleteAbort + * + * Note, In transaction v2, we allow Empty, CompleteCommit, CompleteAbort to transition to PrepareAbort. because the + * client may not know the txn state on the server side, it needs to send endTxn request when uncertain. + */ +private[transaction] case object PrepareAbort extends TransactionState { + val id: Byte = 3 + val name: String = "PrepareAbort" + val validPreviousStates: Set[TransactionState] = Set(Ongoing, PrepareEpochFence, Empty, CompleteCommit, CompleteAbort) +} + +/** + * Group has completed commit + * + * Will soon be removed from the ongoing transaction cache + */ +private[transaction] case object CompleteCommit extends TransactionState { + val id: Byte = 4 + val name: String = "CompleteCommit" + val validPreviousStates: Set[TransactionState] = Set(PrepareCommit) + override def isExpirationAllowed: Boolean = true +} + +/** + * Group has completed abort + * + * Will soon be removed from the ongoing transaction cache + */ +private[transaction] case object CompleteAbort extends TransactionState { + val id: Byte = 5 + val name: String = "CompleteAbort" + val validPreviousStates: Set[TransactionState] = Set(PrepareAbort) + override def isExpirationAllowed: Boolean = true +} + +/** + * TransactionalId has expired and is about to be removed from the transaction cache + */ +private[transaction] case object Dead extends TransactionState { + val id: Byte = 6 + val name: String = "Dead" + val validPreviousStates: Set[TransactionState] = Set(Empty, CompleteAbort, CompleteCommit) +} + +/** + * We are in the middle of bumping the epoch and fencing out older producers. + */ + +private[transaction] case object PrepareEpochFence extends TransactionState { + val id: Byte = 7 + val name: String = "PrepareEpochFence" + val validPreviousStates: Set[TransactionState] = Set(Ongoing) +} + +private[transaction] object TransactionMetadata { + def isEpochExhausted(producerEpoch: Short): Boolean = producerEpoch >= Short.MaxValue - 1 +} + +// this is a immutable object representing the target transition of the transaction metadata +private[transaction] case class TxnTransitMetadata(producerId: Long, + prevProducerId: Long, + nextProducerId: Long, + producerEpoch: Short, + lastProducerEpoch: Short, + txnTimeoutMs: Int, + txnState: TransactionState, + topicPartitions: immutable.Set[TopicPartition], + txnStartTimestamp: Long, + txnLastUpdateTimestamp: Long, + clientTransactionVersion: TransactionVersion) { + override def toString: String = { + "TxnTransitMetadata(" + + s"producerId=$producerId, " + + s"previousProducerId=$prevProducerId, " + + s"nextProducerId=$nextProducerId, " + + s"producerEpoch=$producerEpoch, " + + s"lastProducerEpoch=$lastProducerEpoch, " + + s"txnTimeoutMs=$txnTimeoutMs, " + + s"txnState=$txnState, " + + s"topicPartitions=$topicPartitions, " + + s"txnStartTimestamp=$txnStartTimestamp, " + + s"txnLastUpdateTimestamp=$txnLastUpdateTimestamp, " + + s"clientTransactionVersion=$clientTransactionVersion)" + } +} + +/** + * + * @param producerId producer id + * @param previousProducerId producer id for the last committed transaction with this transactional ID + * @param nextProducerId Latest producer ID sent to the producer for the given transactional ID + * @param producerEpoch current epoch of the producer + * @param lastProducerEpoch last epoch of the producer + * @param txnTimeoutMs timeout to be used to abort long running transactions + * @param state current state of the transaction + * @param topicPartitions current set of partitions that are part of this transaction + * @param txnStartTimestamp time the transaction was started, i.e., when first partition is added + * @param txnLastUpdateTimestamp updated when any operation updates the TransactionMetadata. To be used for expiration + * @param clientTransactionVersion TransactionVersion used by the client when the state was transitioned + */ +@nonthreadsafe +private[transaction] class TransactionMetadata(val transactionalId: String, + var producerId: Long, + var previousProducerId: Long, + var nextProducerId: Long, + var producerEpoch: Short, + var lastProducerEpoch: Short, + var txnTimeoutMs: Int, + var state: TransactionState, + val topicPartitions: mutable.Set[TopicPartition], + @volatile var txnStartTimestamp: Long = -1, + @volatile var txnLastUpdateTimestamp: Long, + var clientTransactionVersion: TransactionVersion) extends Logging { + + // pending state is used to indicate the state that this transaction is going to + // transit to, and for blocking future attempts to transit it again if it is not legal; + // initialized as the same as the current state + var pendingState: Option[TransactionState] = None + + // Indicates that during a previous attempt to fence a producer, the bumped epoch may not have been + // successfully written to the log. If this is true, we will not bump the epoch again when fencing + var hasFailedEpochFence: Boolean = false + + private[transaction] val lock = new ReentrantLock + + def inLock[T](fun: => T): T = CoreUtils.inLock(lock)(fun) + + def addPartitions(partitions: collection.Set[TopicPartition]): Unit = { + topicPartitions ++= partitions + } + + def removePartition(topicPartition: TopicPartition): Unit = { + if (state != PrepareCommit && state != PrepareAbort) + throw new IllegalStateException(s"Transaction metadata's current state is $state, and its pending state is $pendingState " + + s"while trying to remove partitions whose txn marker has been sent, this is not expected") + + topicPartitions -= topicPartition + } + + // this is visible for test only + def prepareNoTransit(): TxnTransitMetadata = { + // do not call transitTo as it will set the pending state, a follow-up call to abort the transaction will set its pending state + TxnTransitMetadata(producerId, previousProducerId, nextProducerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, state, topicPartitions.toSet, + txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) + } + + def prepareFenceProducerEpoch(): TxnTransitMetadata = { + if (producerEpoch == Short.MaxValue) + throw new IllegalStateException(s"Cannot fence producer with epoch equal to Short.MaxValue since this would overflow") + + // If we've already failed to fence an epoch (because the write to the log failed), we don't increase it again. + // This is safe because we never return the epoch to client if we fail to fence the epoch + val bumpedEpoch = if (hasFailedEpochFence) producerEpoch else (producerEpoch + 1).toShort + + prepareTransitionTo(PrepareEpochFence, producerId, bumpedEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, + topicPartitions.toSet, txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) + } + + def prepareIncrementProducerEpoch(newTxnTimeoutMs: Int, + expectedProducerEpoch: Option[Short], + updateTimestamp: Long): Either[Errors, TxnTransitMetadata] = { + if (isProducerEpochExhausted) + throw new IllegalStateException(s"Cannot allocate any more producer epochs for producerId $producerId") + + val bumpedEpoch = (producerEpoch + 1).toShort + val epochBumpResult: Either[Errors, (Short, Short)] = expectedProducerEpoch match { + case None => + // If no expected epoch was provided by the producer, bump the current epoch and set the last epoch to -1 + // In the case of a new producer, producerEpoch will be -1 and bumpedEpoch will be 0 + Right(bumpedEpoch, RecordBatch.NO_PRODUCER_EPOCH) + + case Some(expectedEpoch) => + if (producerEpoch == RecordBatch.NO_PRODUCER_EPOCH || expectedEpoch == producerEpoch) + // If the expected epoch matches the current epoch, or if there is no current epoch, the producer is attempting + // to continue after an error and no other producer has been initialized. Bump the current and last epochs. + // The no current epoch case means this is a new producer; producerEpoch will be -1 and bumpedEpoch will be 0 + Right(bumpedEpoch, producerEpoch) + else if (expectedEpoch == lastProducerEpoch) + // If the expected epoch matches the previous epoch, it is a retry of a successful call, so just return the + // current epoch without bumping. There is no danger of this producer being fenced, because a new producer + // calling InitProducerId would have caused the last epoch to be set to -1. + // Note that if the IBP is prior to 2.4.IV1, the lastProducerId and lastProducerEpoch will not be written to + // the transaction log, so a retry that spans a coordinator change will fail. We expect this to be a rare case. + Right(producerEpoch, lastProducerEpoch) + else { + // Otherwise, the producer has a fenced epoch and should receive an PRODUCER_FENCED error + info(s"Expected producer epoch $expectedEpoch does not match current " + + s"producer epoch $producerEpoch or previous producer epoch $lastProducerEpoch") + Left(Errors.PRODUCER_FENCED) + } + } + + epochBumpResult match { + case Right((nextEpoch, lastEpoch)) => Right(prepareTransitionTo(Empty, producerId, nextEpoch, lastEpoch, newTxnTimeoutMs, + immutable.Set.empty[TopicPartition], -1, updateTimestamp, clientTransactionVersion)) + + case Left(err) => Left(err) + } + } + + def prepareProducerIdRotation(newProducerId: Long, + newTxnTimeoutMs: Int, + updateTimestamp: Long, + recordLastEpoch: Boolean): TxnTransitMetadata = { + if (hasPendingTransaction) + throw new IllegalStateException("Cannot rotate producer ids while a transaction is still pending") + + prepareTransitionTo(Empty, newProducerId, 0, if (recordLastEpoch) producerEpoch else RecordBatch.NO_PRODUCER_EPOCH, + newTxnTimeoutMs, immutable.Set.empty[TopicPartition], -1, updateTimestamp, clientTransactionVersion) + } + + def prepareAddPartitions(addedTopicPartitions: immutable.Set[TopicPartition], updateTimestamp: Long, clientTransactionVersion: TransactionVersion): TxnTransitMetadata = { + val newTxnStartTimestamp = state match { + case Empty | CompleteAbort | CompleteCommit => updateTimestamp + case _ => txnStartTimestamp + } + + prepareTransitionTo(Ongoing, producerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, + (topicPartitions ++ addedTopicPartitions).toSet, newTxnStartTimestamp, updateTimestamp, clientTransactionVersion) + } + + def prepareAbortOrCommit(newState: TransactionState, clientTransactionVersion: TransactionVersion, nextProducerId: Long, updateTimestamp: Long, noPartitionAdded: Boolean): TxnTransitMetadata = { + val (updatedProducerEpoch, updatedLastProducerEpoch) = if (clientTransactionVersion.supportsEpochBump()) { + // We already ensured that we do not overflow here. MAX_SHORT is the highest possible value. + ((producerEpoch + 1).toShort, producerEpoch) + } else { + (producerEpoch, lastProducerEpoch) + } + + // With transaction V2, it is allowed to abort the transaction without adding any partitions. Then, the transaction + // start time is uncertain but it is still required. So we can use the update time as the transaction start time. + val newTxnStartTimestamp = if (noPartitionAdded) updateTimestamp else txnStartTimestamp + prepareTransitionTo(newState, producerId, nextProducerId, updatedProducerEpoch, updatedLastProducerEpoch, txnTimeoutMs, topicPartitions.toSet, + newTxnStartTimestamp, updateTimestamp, clientTransactionVersion) + } + + def prepareComplete(updateTimestamp: Long): TxnTransitMetadata = { + val newState = if (state == PrepareCommit) CompleteCommit else CompleteAbort + + // Since the state change was successfully written to the log, unset the flag for a failed epoch fence + hasFailedEpochFence = false + val (updatedProducerId, updatedProducerEpoch) = + // In the prepareComplete transition for the overflow case, the lastProducerEpoch is kept at MAX-1, + // which is the last epoch visible to the client. + // Internally, however, during the transition between prepareAbort/prepareCommit and prepareComplete, the producer epoch + // reaches MAX but the client only sees the transition as MAX-1 followed by 0. + // When an epoch overflow occurs, we set the producerId to nextProducerId and reset the epoch to 0, + // but lastProducerEpoch remains MAX-1 to maintain consistency with what the client last saw. + if (clientTransactionVersion.supportsEpochBump() && nextProducerId != RecordBatch.NO_PRODUCER_ID) { + (nextProducerId, 0.toShort) + } else { + (producerId, producerEpoch) + } + prepareTransitionTo(newState, updatedProducerId, RecordBatch.NO_PRODUCER_ID, updatedProducerEpoch, lastProducerEpoch, txnTimeoutMs, Set.empty[TopicPartition], + txnStartTimestamp, updateTimestamp, clientTransactionVersion) + } + + def prepareDead(): TxnTransitMetadata = { + prepareTransitionTo(Dead, producerId, producerEpoch, lastProducerEpoch, txnTimeoutMs, Set.empty[TopicPartition], + txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) + } + + /** + * Check if the epochs have been exhausted for the current producerId. We do not allow the client to use an + * epoch equal to Short.MaxValue to ensure that the coordinator will always be able to fence an existing producer. + */ + def isProducerEpochExhausted: Boolean = TransactionMetadata.isEpochExhausted(producerEpoch) + + private def hasPendingTransaction: Boolean = { + state match { + case Ongoing | PrepareAbort | PrepareCommit => true + case _ => false + } + } + + private def prepareTransitionTo(updatedState: TransactionState, + updatedProducerId: Long, + updatedEpoch: Short, + updatedLastEpoch: Short, + updatedTxnTimeoutMs: Int, + updatedTopicPartitions: immutable.Set[TopicPartition], + updatedTxnStartTimestamp: Long, + updateTimestamp: Long, + clientTransactionVersion: TransactionVersion): TxnTransitMetadata = { + prepareTransitionTo(updatedState, updatedProducerId, RecordBatch.NO_PRODUCER_ID, updatedEpoch, updatedLastEpoch, updatedTxnTimeoutMs, updatedTopicPartitions, updatedTxnStartTimestamp, updateTimestamp, clientTransactionVersion) + } + + private def prepareTransitionTo(updatedState: TransactionState, + updatedProducerId: Long, + nextProducerId: Long, + updatedEpoch: Short, + updatedLastEpoch: Short, + updatedTxnTimeoutMs: Int, + updatedTopicPartitions: immutable.Set[TopicPartition], + updatedTxnStartTimestamp: Long, + updateTimestamp: Long, + clientTransactionVersion: TransactionVersion): TxnTransitMetadata = { + if (pendingState.isDefined) + throw new IllegalStateException(s"Preparing transaction state transition to $updatedState " + + s"while it already a pending state ${pendingState.get}") + + if (updatedProducerId < 0) + throw new IllegalArgumentException(s"Illegal new producer id $updatedProducerId") + + // The epoch is initialized to NO_PRODUCER_EPOCH when the TransactionMetadata + // is created for the first time and it could stay like this until transitioning + // to Dead. + if (updatedState != Dead && updatedEpoch < 0) + throw new IllegalArgumentException(s"Illegal new producer epoch $updatedEpoch") + + // check that the new state transition is valid and update the pending state if necessary + if (updatedState.validPreviousStates.contains(state)) { + val transitMetadata = TxnTransitMetadata(updatedProducerId, producerId, nextProducerId, updatedEpoch, updatedLastEpoch, updatedTxnTimeoutMs, updatedState, + updatedTopicPartitions, updatedTxnStartTimestamp, updateTimestamp, clientTransactionVersion) + debug(s"TransactionalId $transactionalId prepare transition from $state to $transitMetadata") + pendingState = Some(updatedState) + transitMetadata + } else { + throw new IllegalStateException(s"Preparing transaction state transition to $updatedState failed since the target state" + + s" $updatedState is not a valid previous state of the current state $state") + } + } + + def completeTransitionTo(transitMetadata: TxnTransitMetadata): Unit = { + // metadata transition is valid only if all the following conditions are met: + // + // 1. the new state is already indicated in the pending state. + // 2. the epoch should be either the same value, the old value + 1, or 0 if we have a new producerId. + // 3. the last update time is no smaller than the old value. + // 4. the old partitions set is a subset of the new partitions set. + // + // plus, we should only try to update the metadata after the corresponding log entry has been successfully + // written and replicated (see TransactionStateManager#appendTransactionToLog) + // + // if valid, transition is done via overwriting the whole object to ensure synchronization + + val toState = pendingState.getOrElse { + fatal(s"$this's transition to $transitMetadata failed since pendingState is not defined: this should not happen") + + throw new IllegalStateException(s"TransactionalId $transactionalId " + + "completing transaction state transition while it does not have a pending state") + } + + if (toState != transitMetadata.txnState) { + throwStateTransitionFailure(transitMetadata) + } else { + toState match { + case Empty => // from initPid + if ((producerEpoch != transitMetadata.producerEpoch && !validProducerEpochBump(transitMetadata)) || + transitMetadata.topicPartitions.nonEmpty || + transitMetadata.txnStartTimestamp != -1) { + + throwStateTransitionFailure(transitMetadata) + } else { + txnTimeoutMs = transitMetadata.txnTimeoutMs + producerEpoch = transitMetadata.producerEpoch + lastProducerEpoch = transitMetadata.lastProducerEpoch + producerId = transitMetadata.producerId + previousProducerId = transitMetadata.prevProducerId + } + + case Ongoing => // from addPartitions + if (!validProducerEpoch(transitMetadata) || + !topicPartitions.subsetOf(transitMetadata.topicPartitions) || + txnTimeoutMs != transitMetadata.txnTimeoutMs) { + + throwStateTransitionFailure(transitMetadata) + } else { + txnStartTimestamp = transitMetadata.txnStartTimestamp + addPartitions(transitMetadata.topicPartitions) + } + + case PrepareAbort | PrepareCommit => // from endTxn + // In V2, we allow state transits from Empty, CompleteCommit and CompleteAbort to PrepareAbort. It is possible + // their updated start time is not equal to the current start time. + val allowedEmptyAbort = toState == PrepareAbort && transitMetadata.clientTransactionVersion.supportsEpochBump() && + (state == Empty || state == CompleteCommit || state == CompleteAbort) + val validTimestamp = txnStartTimestamp == transitMetadata.txnStartTimestamp || allowedEmptyAbort + if (!validProducerEpoch(transitMetadata) || + !topicPartitions.toSet.equals(transitMetadata.topicPartitions) || + txnTimeoutMs != transitMetadata.txnTimeoutMs || !validTimestamp) { + + throwStateTransitionFailure(transitMetadata) + } else if (transitMetadata.clientTransactionVersion.supportsEpochBump()) { + producerEpoch = transitMetadata.producerEpoch + lastProducerEpoch = transitMetadata.lastProducerEpoch + nextProducerId = transitMetadata.nextProducerId + txnStartTimestamp = transitMetadata.txnStartTimestamp + } + + case CompleteAbort | CompleteCommit => // from write markers + if (!validProducerEpoch(transitMetadata) || + txnTimeoutMs != transitMetadata.txnTimeoutMs || + transitMetadata.txnStartTimestamp == -1) { + throwStateTransitionFailure(transitMetadata) + } else { + txnStartTimestamp = transitMetadata.txnStartTimestamp + topicPartitions.clear() + if (transitMetadata.clientTransactionVersion.supportsEpochBump()) { + producerEpoch = transitMetadata.producerEpoch + lastProducerEpoch = transitMetadata.lastProducerEpoch + previousProducerId = transitMetadata.prevProducerId + producerId = transitMetadata.producerId + nextProducerId = transitMetadata.nextProducerId + } + } + + case PrepareEpochFence => + // We should never get here, since once we prepare to fence the epoch, we immediately set the pending state + // to PrepareAbort, and then consequently to CompleteAbort after the markers are written.. So we should never + // ever try to complete a transition to PrepareEpochFence, as it is not a valid previous state for any other state, and hence + // can never be transitioned out of. + throwStateTransitionFailure(transitMetadata) + + + case Dead => + // The transactionalId was being expired. The completion of the operation should result in removal of the + // the metadata from the cache, so we should never realistically transition to the dead state. + throw new IllegalStateException(s"TransactionalId $transactionalId is trying to complete a transition to " + + s"$toState. This means that the transactionalId was being expired, and the only acceptable completion of " + + s"this operation is to remove the transaction metadata from the cache, not to persist the $toState in the log.") + } + + debug(s"TransactionalId $transactionalId complete transition from $state to $transitMetadata") + clientTransactionVersion = transitMetadata.clientTransactionVersion + txnLastUpdateTimestamp = transitMetadata.txnLastUpdateTimestamp + pendingState = None + state = toState + } + } + + /** + * Validates the producer epoch and ID based on transaction state and version. + * + * Logic: + * * 1. **Overflow Case in Transactions V2:** + * * - During overflow (epoch reset to 0), we compare both `lastProducerEpoch` values since it + * * does not change during completion. + * * - For PrepareComplete, the producer ID has been updated. We ensure that the `prevProducerID` + * * in the transit metadata matches the current producer ID, confirming the change. + * * + * * 2. **Epoch Bump Case in Transactions V2:** + * * - For PrepareCommit or PrepareAbort, the producer epoch has been bumped. We ensure the `lastProducerEpoch` + * * in transit metadata matches the current producer epoch, confirming the bump. + * * - We also verify that the producer ID remains the same. + * * + * * 3. **Other Cases:** + * * - For other states and versions, check if the producer epoch and ID match the current values. + * + * @param transitMetadata The transaction transition metadata containing state, producer epoch, and ID. + * @return true if the producer epoch and ID are valid; false otherwise. + */ + private def validProducerEpoch(transitMetadata: TxnTransitMetadata): Boolean = { + val isAtLeastTransactionsV2 = transitMetadata.clientTransactionVersion.supportsEpochBump() + val txnState = transitMetadata.txnState + val transitProducerEpoch = transitMetadata.producerEpoch + val transitProducerId = transitMetadata.producerId + val transitLastProducerEpoch = transitMetadata.lastProducerEpoch + + (isAtLeastTransactionsV2, txnState, transitProducerEpoch) match { + case (true, CompleteCommit | CompleteAbort, epoch) if epoch == 0.toShort => + transitLastProducerEpoch == lastProducerEpoch && + transitMetadata.prevProducerId == producerId + + case (true, PrepareCommit | PrepareAbort, _) => + transitLastProducerEpoch == producerEpoch && + transitProducerId == producerId + + case _ => + transitProducerEpoch == producerEpoch && + transitProducerId == producerId + } + } + + private def validProducerEpochBump(transitMetadata: TxnTransitMetadata): Boolean = { + val transitEpoch = transitMetadata.producerEpoch + val transitProducerId = transitMetadata.producerId + transitEpoch == producerEpoch + 1 || (transitEpoch == 0 && transitProducerId != producerId) + } + + private def throwStateTransitionFailure(txnTransitMetadata: TxnTransitMetadata): Unit = { + fatal(s"${this.toString}'s transition to $txnTransitMetadata failed: this should not happen") + + throw new IllegalStateException(s"TransactionalId $transactionalId failed transition to state $txnTransitMetadata " + + "due to unexpected metadata") + } + + def pendingTransitionInProgress: Boolean = pendingState.isDefined + + override def toString: String = { + "TransactionMetadata(" + + s"transactionalId=$transactionalId, " + + s"producerId=$producerId, " + + s"previousProducerId=$previousProducerId, " + + s"nextProducerId=$nextProducerId, " + + s"producerEpoch=$producerEpoch, " + + s"lastProducerEpoch=$lastProducerEpoch, " + + s"txnTimeoutMs=$txnTimeoutMs, " + + s"state=$state, " + + s"pendingState=$pendingState, " + + s"topicPartitions=$topicPartitions, " + + s"txnStartTimestamp=$txnStartTimestamp, " + + s"txnLastUpdateTimestamp=$txnLastUpdateTimestamp, " + + s"clientTransactionVersion=$clientTransactionVersion)" + } + + override def equals(that: Any): Boolean = that match { + case other: TransactionMetadata => + transactionalId == other.transactionalId && + producerId == other.producerId && + producerEpoch == other.producerEpoch && + lastProducerEpoch == other.lastProducerEpoch && + txnTimeoutMs == other.txnTimeoutMs && + state.equals(other.state) && + topicPartitions.equals(other.topicPartitions) && + txnStartTimestamp == other.txnStartTimestamp && + txnLastUpdateTimestamp == other.txnLastUpdateTimestamp && + clientTransactionVersion == other.clientTransactionVersion + case _ => false + } + + override def hashCode(): Int = { + val fields = Seq(transactionalId, producerId, producerEpoch, txnTimeoutMs, state, topicPartitions, + txnStartTimestamp, txnLastUpdateTimestamp, clientTransactionVersion) + fields.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b) + } +} diff --git a/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala b/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala index 82b960c5ba799..a6e7dd30bf062 100644 --- a/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala +++ b/core/src/main/scala/kafka/coordinator/transaction/TransactionStateManager.scala @@ -18,12 +18,11 @@ package kafka.coordinator.transaction import java.nio.ByteBuffer import java.util.Properties -import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.ReentrantReadWriteLock -import kafka.server.ReplicaManager +import kafka.server.{MetadataCache, ReplicaManager} import kafka.utils.CoreUtils.{inReadLock, inWriteLock} -import kafka.utils.Logging +import kafka.utils.{Logging, Pool} import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.ListTransactionsResponseData @@ -34,19 +33,14 @@ import org.apache.kafka.common.record.{FileRecords, MemoryRecords, MemoryRecords import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests.TransactionResult import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.common.{KafkaException, TopicIdPartition, TopicPartition} -import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionMetadata, TransactionState, TransactionStateManagerConfig, TxnTransitMetadata} -import org.apache.kafka.metadata.MetadataCache +import org.apache.kafka.common.{KafkaException, TopicPartition} +import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.server.common.{RequestLocal, TransactionVersion} import org.apache.kafka.server.config.ServerConfigs import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.Scheduler import org.apache.kafka.storage.internals.log.AppendOrigin -import com.google.re2j.{Pattern, PatternSyntaxException} -import org.apache.kafka.common.errors.InvalidRegularExpression - -import java.util.Optional import scala.jdk.CollectionConverters._ import scala.collection.mutable @@ -113,8 +107,6 @@ class TransactionStateManager(brokerId: Int, version } - private[transaction] def isTransaction2pcEnabled(): Boolean = { config.transaction2PCEnable } - // visible for testing only private[transaction] def addLoadingPartition(partitionId: Int, coordinatorEpoch: Int): Unit = { val partitionAndLeaderEpoch = TransactionPartitionAndLeaderEpoch(partitionId, coordinatorEpoch) @@ -131,15 +123,13 @@ class TransactionStateManager(brokerId: Int, val now = time.milliseconds() inReadLock(stateLock) { transactionMetadataCache.flatMap { case (_, entry) => - entry.metadataPerTransactionalId.asScala.filter { case (_, txnMetadata) => + entry.metadataPerTransactionalId.filter { case (_, txnMetadata) => if (txnMetadata.pendingTransitionInProgress) { false } else { txnMetadata.state match { - case TransactionState.ONGOING => - // Do not apply timeout to distributed two phase commit transactions. - (!txnMetadata.isDistributedTwoPhaseCommitTxn) && - (txnMetadata.txnStartTimestamp + txnMetadata.txnTimeoutMs < now) + case Ongoing => + txnMetadata.txnStartTimestamp + txnMetadata.txnTimeoutMs < now case _ => false } } @@ -161,7 +151,7 @@ class TransactionStateManager(brokerId: Int, val maxBatchSize = logConfig.maxMessageSize val expired = mutable.ListBuffer.empty[TransactionalIdCoordinatorEpochAndMetadata] var recordsBuilder: MemoryRecordsBuilder = null - val stateEntries = txnMetadataCacheEntry.metadataPerTransactionalId.values.asScala.iterator.buffered + val stateEntries = txnMetadataCacheEntry.metadataPerTransactionalId.values.iterator.buffered def flushRecordsBuilder(): Unit = { writeTombstonesForExpiredTransactionalIds( @@ -178,7 +168,7 @@ class TransactionStateManager(brokerId: Int, val transactionalId = txnMetadata.transactionalId var fullBatch = false - txnMetadata.inLock(() => { + txnMetadata.inLock { if (txnMetadata.pendingState.isEmpty && shouldExpire(txnMetadata, currentTimeMs)) { if (recordsBuilder == null) { recordsBuilder = MemoryRecords.builder( @@ -201,7 +191,7 @@ class TransactionStateManager(brokerId: Int, fullBatch = true } } - }) + } if (fullBatch) { flushRecordsBuilder() @@ -258,16 +248,16 @@ class TransactionStateManager(brokerId: Int, expiredForPartition: Iterable[TransactionalIdCoordinatorEpochAndMetadata], tombstoneRecords: MemoryRecords ): Unit = { - def removeFromCacheCallback(responses: collection.Map[TopicIdPartition, PartitionResponse]): Unit = { + def removeFromCacheCallback(responses: collection.Map[TopicPartition, PartitionResponse]): Unit = { responses.foreachEntry { (topicPartition, response) => inReadLock(stateLock) { transactionMetadataCache.get(topicPartition.partition).foreach { txnMetadataCacheEntry => expiredForPartition.foreach { idCoordinatorEpochAndMetadata => val transactionalId = idCoordinatorEpochAndMetadata.transactionalId val txnMetadata = txnMetadataCacheEntry.metadataPerTransactionalId.get(transactionalId) - txnMetadata.inLock(() => { + txnMetadata.inLock { if (txnMetadataCacheEntry.coordinatorEpoch == idCoordinatorEpochAndMetadata.coordinatorEpoch - && txnMetadata.pendingState.filter(s => s == TransactionState.DEAD).isPresent + && txnMetadata.pendingState.contains(Dead) && txnMetadata.producerEpoch == idCoordinatorEpochAndMetadata.transitMetadata.producerEpoch && response.error == Errors.NONE) { txnMetadataCacheEntry.metadataPerTransactionalId.remove(transactionalId) @@ -278,9 +268,9 @@ class TransactionStateManager(brokerId: Int, s" expected producerEpoch: ${idCoordinatorEpochAndMetadata.transitMetadata.producerEpoch}," + s" coordinatorEpoch: ${txnMetadataCacheEntry.coordinatorEpoch}, expected coordinatorEpoch: " + s"${idCoordinatorEpochAndMetadata.coordinatorEpoch}") - txnMetadata.pendingState(Optional.empty()) + txnMetadata.pendingState = None } - }) + } } } } @@ -293,7 +283,7 @@ class TransactionStateManager(brokerId: Int, requiredAcks = TransactionLog.EnforcedRequiredAcks, internalTopicsAllowed = true, origin = AppendOrigin.COORDINATOR, - entriesPerPartition = Map(replicaManager.topicIdPartition(transactionPartition) -> tombstoneRecords), + entriesPerPartition = Map(transactionPartition -> tombstoneRecords), responseCallback = removeFromCacheCallback, requestLocal = RequestLocal.noCaching) } @@ -320,8 +310,7 @@ class TransactionStateManager(brokerId: Int, def listTransactionStates( filterProducerIds: Set[Long], filterStateNames: Set[String], - filterDurationMs: Long, - filterTransactionalIdPattern: String + filterDurationMs: Long ): ListTransactionsResponseData = { inReadLock(stateLock) { val response = new ListTransactionsResponseData() @@ -330,15 +319,15 @@ class TransactionStateManager(brokerId: Int, } else { val filterStates = mutable.Set.empty[TransactionState] filterStateNames.foreach { stateName => - TransactionState.fromName(stateName).ifPresentOrElse( - state => filterStates += state, - () => response.unknownStateFilters.add(stateName) - ) + TransactionState.fromName(stateName) match { + case Some(state) => filterStates += state + case None => response.unknownStateFilters.add(stateName) + } } val now : Long = time.milliseconds() - def shouldInclude(txnMetadata: TransactionMetadata, pattern: Pattern): Boolean = { - if (txnMetadata.state == TransactionState.DEAD) { + def shouldInclude(txnMetadata: TransactionMetadata): Boolean = { + if (txnMetadata.state == Dead) { // We filter the `Dead` state since it is a transient state which // indicates that the transactionalId and its metadata are in the // process of expiration and removal. @@ -349,34 +338,23 @@ class TransactionStateManager(brokerId: Int, false } else if (filterDurationMs >= 0 && (now - txnMetadata.txnStartTimestamp) <= filterDurationMs) { false - } else if (pattern != null) { - pattern.matcher(txnMetadata.transactionalId).matches() } else { true } } val states = new java.util.ArrayList[ListTransactionsResponseData.TransactionState] - val pattern = if (filterTransactionalIdPattern != null && filterTransactionalIdPattern.nonEmpty) { - try { - Pattern.compile(filterTransactionalIdPattern) - } - catch { - case e: PatternSyntaxException => - throw new InvalidRegularExpression(String.format("Transaction ID pattern `%s` is not a valid regular expression: %s.", filterTransactionalIdPattern, e.getMessage)) - } - } else null transactionMetadataCache.foreachEntry { (_, cache) => - cache.metadataPerTransactionalId.forEach { (_, txnMetadata) => - txnMetadata.inLock(() => { - if (shouldInclude(txnMetadata, pattern)) { + cache.metadataPerTransactionalId.values.foreach { txnMetadata => + txnMetadata.inLock { + if (shouldInclude(txnMetadata)) { states.add(new ListTransactionsResponseData.TransactionState() .setTransactionalId(txnMetadata.transactionalId) .setProducerId(txnMetadata.producerId) - .setTransactionState(txnMetadata.state.stateName) + .setTransactionState(txnMetadata.state.name) ) } - }) + } } } response.setErrorCode(Errors.NONE.code) @@ -403,7 +381,7 @@ class TransactionStateManager(brokerId: Int, case Some(cacheEntry) => val txnMetadata = Option(cacheEntry.metadataPerTransactionalId.get(transactionalId)).orElse { createdTxnMetadataOpt.map { createdTxnMetadata => - Option(cacheEntry.metadataPerTransactionalId.putIfAbsent(transactionalId, createdTxnMetadata)) + Option(cacheEntry.metadataPerTransactionalId.putIfNotExists(transactionalId, createdTxnMetadata)) .getOrElse(createdTxnMetadata) } } @@ -417,18 +395,10 @@ class TransactionStateManager(brokerId: Int, } /** - * Validates the provided transaction timeout. - * - If 2PC is enabled, the timeout is always valid (set to Int.MAX by default). - * - Otherwise, the timeout must be a positive value and not exceed the - * configured transaction max timeout. - * - * @param enableTwoPC Whether Two-Phase Commit (2PC) is enabled. - * @param txnTimeoutMs The requested transaction timeout in milliseconds. - * @return `true` if the timeout is valid, `false` otherwise. + * Validate the given transaction timeout value */ - def validateTransactionTimeoutMs(enableTwoPC: Boolean, txnTimeoutMs: Int): Boolean = { - enableTwoPC || (txnTimeoutMs <= config.transactionMaxTimeoutMs && txnTimeoutMs > 0) - } + def validateTransactionTimeoutMs(txnTimeoutMs: Int): Boolean = + txnTimeoutMs <= config.transactionMaxTimeoutMs && txnTimeoutMs > 0 def transactionTopicConfigs: Properties = { val props = new Properties @@ -445,10 +415,10 @@ class TransactionStateManager(brokerId: Int, def partitionFor(transactionalId: String): Int = Utils.abs(transactionalId.hashCode) % transactionTopicPartitionCount - private def loadTransactionMetadata(topicPartition: TopicPartition, coordinatorEpoch: Int): ConcurrentMap[String, TransactionMetadata] = { + private def loadTransactionMetadata(topicPartition: TopicPartition, coordinatorEpoch: Int): Pool[String, TransactionMetadata] = { def logEndOffset = replicaManager.getLogEndOffset(topicPartition).getOrElse(-1L) - val loadedTransactions = new ConcurrentHashMap[String, TransactionMetadata] + val loadedTransactions = new Pool[String, TransactionMetadata] replicaManager.getLog(topicPartition) match { case None => @@ -468,7 +438,10 @@ class TransactionStateManager(brokerId: Int, while (currOffset < logEndOffset && readAtLeastOneRecord && !shuttingDown.get() && inReadLock(stateLock) { loadingPartitions.exists { idAndEpoch: TransactionPartitionAndLeaderEpoch => idAndEpoch.txnPartitionId == topicPartition.partition && idAndEpoch.coordinatorEpoch == coordinatorEpoch}}) { - val fetchDataInfo = log.read(currOffset, config.transactionLogLoadBufferSize, FetchIsolation.LOG_END, true) + val fetchDataInfo = log.read(currOffset, + maxLength = config.transactionLogLoadBufferSize, + isolation = FetchIsolation.LOG_END, + minOneMessage = true) readAtLeastOneRecord = fetchDataInfo.records.sizeInBytes > 0 @@ -492,22 +465,25 @@ class TransactionStateManager(brokerId: Int, fileRecords.readInto(buffer, 0) MemoryRecords.readableRecords(buffer) } + memRecords.batches.forEach { batch => for (record <- batch.asScala) { require(record.hasKey, "Transaction state log's key should not be null") TransactionLog.readTxnRecordKey(record.key) match { - case Left(version) => - warn(s"Unknown message key with version $version" + - s" while loading transaction state from $topicPartition. Ignoring it. " + - "It could be a left over from an aborted upgrade.") - case Right(transactionalId) => + case txnKey: TxnKey => // load transaction metadata along with transaction state + val transactionalId = txnKey.transactionalId TransactionLog.readTxnRecordValue(transactionalId, record.value) match { case None => loadedTransactions.remove(transactionalId) case Some(txnMetadata) => loadedTransactions.put(transactionalId, txnMetadata) } + + case unknownKey: UnknownKey => + warn(s"Unknown message key with version ${unknownKey.version}" + + s" while loading transaction state from $topicPartition. Ignoring it. " + + "It could be a left over from an aborted upgrade.") } } currOffset = batch.nextOffset @@ -526,7 +502,7 @@ class TransactionStateManager(brokerId: Int, */ private[transaction] def addLoadedTransactionsToCache(txnTopicPartition: Int, coordinatorEpoch: Int, - loadedTransactions: ConcurrentMap[String, TransactionMetadata]): Unit = { + loadedTransactions: Pool[String, TransactionMetadata]): Unit = { val txnMetadataCacheEntry = TxnMetadataCacheEntry(coordinatorEpoch, loadedTransactions) val previousTxnMetadataCacheEntryOpt = transactionMetadataCache.put(txnTopicPartition, txnMetadataCacheEntry) @@ -566,21 +542,22 @@ class TransactionStateManager(brokerId: Int, addLoadedTransactionsToCache(topicPartition.partition, coordinatorEpoch, loadedTransactions) val transactionsPendingForCompletion = new mutable.ListBuffer[TransactionalIdCoordinatorEpochAndTransitMetadata] - loadedTransactions.forEach((transactionalId, txnMetadata) => { - txnMetadata.inLock(() => { - // if state is PrepareCommit or PrepareAbort we need to complete the transaction - txnMetadata.state match { - case TransactionState.PREPARE_ABORT => - transactionsPendingForCompletion += - TransactionalIdCoordinatorEpochAndTransitMetadata(transactionalId, coordinatorEpoch, TransactionResult.ABORT, txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case TransactionState.PREPARE_COMMIT => - transactionsPendingForCompletion += - TransactionalIdCoordinatorEpochAndTransitMetadata(transactionalId, coordinatorEpoch, TransactionResult.COMMIT, txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) - case _ => - // nothing needs to be done + loadedTransactions.foreach { + case (transactionalId, txnMetadata) => + txnMetadata.inLock { + // if state is PrepareCommit or PrepareAbort we need to complete the transaction + txnMetadata.state match { + case PrepareAbort => + transactionsPendingForCompletion += + TransactionalIdCoordinatorEpochAndTransitMetadata(transactionalId, coordinatorEpoch, TransactionResult.ABORT, txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) + case PrepareCommit => + transactionsPendingForCompletion += + TransactionalIdCoordinatorEpochAndTransitMetadata(transactionalId, coordinatorEpoch, TransactionResult.COMMIT, txnMetadata, txnMetadata.prepareComplete(time.milliseconds())) + case _ => + // nothing needs to be done + } } - }) - }) + } // we first remove the partition from loading partition then send out the markers for those pending to be // completed transactions, so that when the markers get sent the attempt of appending the complete transaction @@ -664,18 +641,17 @@ class TransactionStateManager(brokerId: Int, val timestamp = time.milliseconds() val records = MemoryRecords.withRecords(TransactionLog.EnforcedCompression, new SimpleRecord(timestamp, keyBytes, valueBytes)) - val transactionStateTopicPartition = new TopicPartition(Topic.TRANSACTION_STATE_TOPIC_NAME, partitionFor(transactionalId)) - val transactionStateTopicIdPartition = replicaManager.topicIdPartition(transactionStateTopicPartition) - val recordsPerPartition = Map(transactionStateTopicIdPartition -> records) + val topicPartition = new TopicPartition(Topic.TRANSACTION_STATE_TOPIC_NAME, partitionFor(transactionalId)) + val recordsPerPartition = Map(topicPartition -> records) // set the callback function to update transaction status in cache after log append completed - def updateCacheCallback(responseStatus: collection.Map[TopicIdPartition, PartitionResponse]): Unit = { + def updateCacheCallback(responseStatus: collection.Map[TopicPartition, PartitionResponse]): Unit = { // the append response should only contain the topics partition - if (responseStatus.size != 1 || !responseStatus.contains(transactionStateTopicIdPartition)) + if (responseStatus.size != 1 || !responseStatus.contains(topicPartition)) throw new IllegalStateException("Append status %s should only have one partition %s" - .format(responseStatus, transactionStateTopicPartition)) + .format(responseStatus, topicPartition)) - val status = responseStatus(transactionStateTopicIdPartition) + val status = responseStatus(topicPartition) var responseError = if (status.error == Errors.NONE) { Errors.NONE @@ -715,7 +691,7 @@ class TransactionStateManager(brokerId: Int, case Right(Some(epochAndMetadata)) => val metadata = epochAndMetadata.transactionMetadata - metadata.inLock(() => { + metadata.inLock { if (epochAndMetadata.coordinatorEpoch != coordinatorEpoch) { // the cache may have been changed due to txn topic partition emigration and immigration, // in this case directly return NOT_COORDINATOR to client and let it to re-discover the transaction coordinator @@ -727,7 +703,7 @@ class TransactionStateManager(brokerId: Int, metadata.completeTransitionTo(newMetadata) debug(s"Updating $transactionalId's transaction state to $newMetadata with coordinator epoch $coordinatorEpoch for $transactionalId succeeded") } - }) + } case Right(None) => // this transactional id no longer exists, maybe the corresponding partition has already been migrated out. @@ -742,7 +718,7 @@ class TransactionStateManager(brokerId: Int, getTransactionState(transactionalId) match { case Right(Some(epochAndTxnMetadata)) => val metadata = epochAndTxnMetadata.transactionMetadata - metadata.inLock(() => { + metadata.inLock { if (epochAndTxnMetadata.coordinatorEpoch == coordinatorEpoch) { if (retryOnError(responseError)) { info(s"TransactionalId ${metadata.transactionalId} append transaction log for $newMetadata transition failed due to $responseError, " + @@ -751,13 +727,13 @@ class TransactionStateManager(brokerId: Int, info(s"TransactionalId ${metadata.transactionalId} append transaction log for $newMetadata transition failed due to $responseError, " + s"resetting pending state from ${metadata.pendingState}, aborting state transition and returning $responseError in the callback") - metadata.pendingState(Optional.empty()) + metadata.pendingState = None } } else { info(s"TransactionalId ${metadata.transactionalId} append transaction log for $newMetadata transition failed due to $responseError, " + s"aborting state transition and returning the error in the callback since the coordinator epoch has changed from ${epochAndTxnMetadata.coordinatorEpoch} to $coordinatorEpoch") } - }) + } case Right(None) => // Do nothing here, since we want to return the original append error to the user. @@ -792,7 +768,7 @@ class TransactionStateManager(brokerId: Int, case Right(Some(epochAndMetadata)) => val metadata = epochAndMetadata.transactionMetadata - val append: Boolean = metadata.inLock(() => { + val append: Boolean = metadata.inLock { if (epochAndMetadata.coordinatorEpoch != coordinatorEpoch) { // the coordinator epoch has changed, reply to client immediately with NOT_COORDINATOR responseCallback(Errors.NOT_COORDINATOR) @@ -802,7 +778,7 @@ class TransactionStateManager(brokerId: Int, // under the same coordinator epoch, so directly append to txn log now true } - }) + } if (append) { replicaManager.appendRecords( timeout = newMetadata.txnTimeoutMs.toLong, @@ -837,7 +813,7 @@ class TransactionStateManager(brokerId: Int, private[transaction] case class TxnMetadataCacheEntry(coordinatorEpoch: Int, - metadataPerTransactionalId: ConcurrentMap[String, TransactionMetadata]) { + metadataPerTransactionalId: Pool[String, TransactionMetadata]) { override def toString: String = { s"TxnMetadataCacheEntry(coordinatorEpoch=$coordinatorEpoch, numTransactionalEntries=${metadataPerTransactionalId.size})" } @@ -855,7 +831,6 @@ private[transaction] case class TransactionConfig(transactionalIdExpirationMs: I transactionLogMinInsyncReplicas: Int = TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_DEFAULT, abortTimedOutTransactionsIntervalMs: Int = TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_DEFAULT, removeExpiredTransactionalIdsIntervalMs: Int = TransactionStateManagerConfig.TRANSACTIONS_REMOVE_EXPIRED_TRANSACTIONAL_ID_CLEANUP_INTERVAL_MS_DEFAULT, - transaction2PCEnable: Boolean = TransactionStateManagerConfig.TRANSACTIONS_2PC_ENABLED_DEFAULT, requestTimeoutMs: Int = ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT) case class TransactionalIdAndProducerIdEpoch(transactionalId: String, producerId: Long, producerEpoch: Short) { diff --git a/core/src/main/scala/kafka/log/LogCleaner.scala b/core/src/main/scala/kafka/log/LogCleaner.scala new file mode 100644 index 0000000000000..a4f96ff7e6303 --- /dev/null +++ b/core/src/main/scala/kafka/log/LogCleaner.scala @@ -0,0 +1,1339 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.log + +import java.io.{File, IOException} +import java.nio._ +import java.util.Date +import java.util.concurrent.TimeUnit +import kafka.common._ +import kafka.log.LogCleaner.{CleanerRecopyPercentMetricName, DeadThreadCountMetricName, MaxBufferUtilizationPercentMetricName, MaxCleanTimeMetricName, MaxCompactionDelayMetricsName} +import kafka.server.{BrokerReconfigurable, KafkaConfig} +import kafka.utils.{Logging, Pool} +import org.apache.kafka.common.{KafkaException, TopicPartition} +import org.apache.kafka.common.config.ConfigException +import org.apache.kafka.common.errors.{CorruptRecordException, KafkaStorageException} +import org.apache.kafka.common.record.MemoryRecords.RecordFilter +import org.apache.kafka.common.record.MemoryRecords.RecordFilter.BatchRetention +import org.apache.kafka.common.record._ +import org.apache.kafka.common.utils.{BufferSupplier, Time} +import org.apache.kafka.server.config.ServerConfigs +import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.util.ShutdownableThread +import org.apache.kafka.storage.internals.log.{AbortedTxn, CleanerConfig, LastRecord, LogDirFailureChannel, LogSegment, LogSegmentOffsetOverflowException, OffsetMap, SkimpyOffsetMap, TransactionIndex} +import org.apache.kafka.storage.internals.utils.Throttler + +import scala.jdk.CollectionConverters._ +import scala.collection.mutable.ListBuffer +import scala.collection.{Iterable, Seq, Set, mutable} +import scala.util.control.ControlThrowable + +/** + * The cleaner is responsible for removing obsolete records from logs which have the "compact" retention strategy. + * A message with key K and offset O is obsolete if there exists a message with key K and offset O' such that O < O'. + * + * Each log can be thought of being split into two sections of segments: a "clean" section which has previously been cleaned followed by a + * "dirty" section that has not yet been cleaned. The dirty section is further divided into the "cleanable" section followed by an "uncleanable" section. + * The uncleanable section is excluded from cleaning. The active log segment is always uncleanable. If there is a + * compaction lag time set, segments whose largest message timestamp is within the compaction lag time of the cleaning operation are also uncleanable. + * + * The cleaning is carried out by a pool of background threads. Each thread chooses the dirtiest log that has the "compact" retention policy + * and cleans that. The dirtiness of the log is guessed by taking the ratio of bytes in the dirty section of the log to the total bytes in the log. + * + * To clean a log the cleaner first builds a mapping of key=>last_offset for the dirty section of the log. See {@link OffsetMap} for details of + * the implementation of the mapping. + * + * Once the key=>last_offset map is built, the log is cleaned by recopying each log segment but omitting any key that appears in the offset map with a + * higher offset than what is found in the segment (i.e. messages with a key that appears in the dirty section of the log). + * + * To avoid segments shrinking to very small sizes with repeated cleanings we implement a rule by which if we will merge successive segments when + * doing a cleaning if their log and index size are less than the maximum log and index size prior to the clean beginning. + * + * Cleaned segments are swapped into the log as they become available. + * + * One nuance that the cleaner must handle is log truncation. If a log is truncated while it is being cleaned the cleaning of that log is aborted. + * + * Messages with null payload are treated as deletes for the purpose of log compaction. This means that they receive special treatment by the cleaner. + * The cleaner will only retain delete records for a period of time to avoid accumulating space indefinitely. This period of time is configurable on a per-topic + * basis and is measured from the time the segment enters the clean portion of the log (at which point any prior message with that key has been removed). + * Delete markers in the clean section of the log that are older than this time will not be retained when log segments are being recopied as part of cleaning. + * This time is tracked by setting the base timestamp of a record batch with delete markers when the batch is recopied in the first cleaning that encounters + * it. The relative timestamps of the records in the batch are also modified when recopied in this cleaning according to the new base timestamp of the batch. + * + * Note that cleaning is more complicated with the idempotent/transactional producer capabilities. The following + * are the key points: + * + * 1. In order to maintain sequence number continuity for active producers, we always retain the last batch + * from each producerId, even if all the records from the batch have been removed. The batch will be removed + * once the producer either writes a new batch or is expired due to inactivity. + * 2. We do not clean beyond the last stable offset. This ensures that all records observed by the cleaner have + * been decided (i.e. committed or aborted). In particular, this allows us to use the transaction index to + * collect the aborted transactions ahead of time. + * 3. Records from aborted transactions are removed by the cleaner immediately without regard to record keys. + * 4. Transaction markers are retained until all record batches from the same transaction have been removed and + * a sufficient amount of time has passed to reasonably ensure that an active consumer wouldn't consume any + * data from the transaction prior to reaching the offset of the marker. This follows the same logic used for + * tombstone deletion. + * + * @param initialConfig Initial configuration parameters for the cleaner. Actual config may be dynamically updated. + * @param logDirs The directories where offset checkpoints reside + * @param logs The pool of logs + * @param logDirFailureChannel The channel used to add offline log dirs that may be encountered when cleaning the log + * @param time A way to control the passage of time + */ +class LogCleaner(initialConfig: CleanerConfig, + val logDirs: Seq[File], + val logs: Pool[TopicPartition, UnifiedLog], + val logDirFailureChannel: LogDirFailureChannel, + time: Time = Time.SYSTEM) extends Logging with BrokerReconfigurable { + // Visible for test. + private[log] val metricsGroup = new KafkaMetricsGroup(this.getClass) + + /* Log cleaner configuration which may be dynamically updated */ + @volatile private var config = initialConfig + + /* for managing the state of partitions being cleaned. package-private to allow access in tests */ + private[log] val cleanerManager = new LogCleanerManager(logDirs, logs, logDirFailureChannel) + + /* a throttle used to limit the I/O of all the cleaner threads to a user-specified maximum rate */ + private[log] val throttler = new Throttler(config.maxIoBytesPerSecond, 300, "cleaner-io", "bytes", time) + + private[log] val cleaners = mutable.ArrayBuffer[CleanerThread]() + + /** + * @param f to compute the result + * @return the max value (int value) or 0 if there is no cleaner + */ + private[log] def maxOverCleanerThreads(f: CleanerThread => Double): Int = + cleaners.map(f).maxOption.getOrElse(0.0d).toInt + + /* a metric to track the maximum utilization of any thread's buffer in the last cleaning */ + metricsGroup.newGauge(MaxBufferUtilizationPercentMetricName, + () => maxOverCleanerThreads(_.lastStats.bufferUtilization) * 100) + + /* a metric to track the recopy rate of each thread's last cleaning */ + metricsGroup.newGauge(CleanerRecopyPercentMetricName, () => { + val stats = cleaners.map(_.lastStats) + val recopyRate = stats.iterator.map(_.bytesWritten).sum.toDouble / math.max(stats.iterator.map(_.bytesRead).sum, 1) + (100 * recopyRate).toInt + }) + + /* a metric to track the maximum cleaning time for the last cleaning from each thread */ + metricsGroup.newGauge(MaxCleanTimeMetricName, () => maxOverCleanerThreads(_.lastStats.elapsedSecs)) + + // a metric to track delay between the time when a log is required to be compacted + // as determined by max compaction lag and the time of last cleaner run. + metricsGroup.newGauge(MaxCompactionDelayMetricsName, + () => maxOverCleanerThreads(_.lastPreCleanStats.maxCompactionDelayMs.toDouble) / 1000) + + metricsGroup.newGauge(DeadThreadCountMetricName, () => deadThreadCount) + + private[log] def deadThreadCount: Int = cleaners.count(_.isThreadFailed) + + /** + * Start the background cleaner threads + */ + def startup(): Unit = { + info("Starting the log cleaner") + (0 until config.numThreads).foreach { i => + val cleaner = new CleanerThread(i) + cleaners += cleaner + cleaner.start() + } + } + + /** + * Stop the background cleaner threads + */ + private[this] def shutdownCleaners(): Unit = { + info("Shutting down the log cleaner.") + cleaners.foreach(_.shutdown()) + cleaners.clear() + } + + /** + * Stop the background cleaner threads + */ + def shutdown(): Unit = { + try { + shutdownCleaners() + } finally { + removeMetrics() + } + } + + /** + * Remove metrics + */ + def removeMetrics(): Unit = { + LogCleaner.MetricNames.foreach(metricsGroup.removeMetric) + cleanerManager.removeMetrics() + } + + /** + * @return A set of configs that is reconfigurable in LogCleaner + */ + override def reconfigurableConfigs: Set[String] = { + LogCleaner.ReconfigurableConfigs + } + + /** + * Validate the new cleaner threads num is reasonable + * + * @param newConfig A submitted new KafkaConfig instance that contains new cleaner config + */ + override def validateReconfiguration(newConfig: KafkaConfig): Unit = { + val numThreads = LogCleaner.cleanerConfig(newConfig).numThreads + val currentThreads = config.numThreads + if (numThreads < 1) + throw new ConfigException(s"Log cleaner threads should be at least 1") + if (numThreads < currentThreads / 2) + throw new ConfigException(s"Log cleaner threads cannot be reduced to less than half the current value $currentThreads") + if (numThreads > currentThreads * 2) + throw new ConfigException(s"Log cleaner threads cannot be increased to more than double the current value $currentThreads") + + } + + /** + * Reconfigure log clean config. The will: + * 1. update desiredRatePerSec in Throttler with logCleanerIoMaxBytesPerSecond, if necessary + * 2. stop current log cleaners and create new ones. + * That ensures that if any of the cleaners had failed, new cleaners are created to match the new config. + * + * @param oldConfig the old log cleaner config + * @param newConfig the new log cleaner config reconfigured + */ + override def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = { + config = LogCleaner.cleanerConfig(newConfig) + + val maxIoBytesPerSecond = config.maxIoBytesPerSecond + if (maxIoBytesPerSecond != oldConfig.logCleanerIoMaxBytesPerSecond) { + info(s"Updating logCleanerIoMaxBytesPerSecond: $maxIoBytesPerSecond") + throttler.updateDesiredRatePerSec(maxIoBytesPerSecond) + } + // call shutdownCleaners() instead of shutdown to avoid unnecessary deletion of metrics + shutdownCleaners() + startup() + } + + /** + * Abort the cleaning of a particular partition, if it's in progress. This call blocks until the cleaning of + * the partition is aborted. + * + * @param topicPartition The topic and partition to abort cleaning + */ + def abortCleaning(topicPartition: TopicPartition): Unit = { + cleanerManager.abortCleaning(topicPartition) + } + + /** + * Update checkpoint file to remove partitions if necessary. + * + * @param dataDir The data dir to be updated if necessary + * @param partitionToRemove The topicPartition to be removed, default none + */ + def updateCheckpoints(dataDir: File, partitionToRemove: Option[TopicPartition] = None): Unit = { + cleanerManager.updateCheckpoints(dataDir, partitionToRemove = partitionToRemove) + } + + /** + * Alter the checkpoint directory for the `topicPartition`, to remove the data in `sourceLogDir`, and add the data in `destLogDir` + * Generally occurs when the disk balance ends and replaces the previous file with the future file + * + * @param topicPartition The topic and partition to alter checkpoint + * @param sourceLogDir The source log dir to remove checkpoint + * @param destLogDir The dest log dir to remove checkpoint + */ + def alterCheckpointDir(topicPartition: TopicPartition, sourceLogDir: File, destLogDir: File): Unit = { + cleanerManager.alterCheckpointDir(topicPartition, sourceLogDir, destLogDir) + } + + /** + * Stop cleaning logs in the provided directory when handling log dir failure + * + * @param dir the absolute path of the log dir + */ + def handleLogDirFailure(dir: String): Unit = { + cleanerManager.handleLogDirFailure(dir) + } + + /** + * Truncate cleaner offset checkpoint for the given partition if its checkpoint offset is larger than the given offset + * + * @param dataDir The data dir to be truncated if necessary + * @param topicPartition The topic and partition to truncate checkpoint offset + * @param offset The given offset to be compared + */ + def maybeTruncateCheckpoint(dataDir: File, topicPartition: TopicPartition, offset: Long): Unit = { + cleanerManager.maybeTruncateCheckpoint(dataDir, topicPartition, offset) + } + + /** + * Abort the cleaning of a particular partition if it's in progress, and pause any future cleaning of this partition. + * This call blocks until the cleaning of the partition is aborted and paused. + * + * @param topicPartition The topic and partition to abort and pause cleaning + */ + def abortAndPauseCleaning(topicPartition: TopicPartition): Unit = { + cleanerManager.abortAndPauseCleaning(topicPartition) + } + + /** + * Resume the cleaning of paused partitions. + * + * @param topicPartitions The collection of topicPartitions to be resumed cleaning + */ + def resumeCleaning(topicPartitions: Iterable[TopicPartition]): Unit = { + cleanerManager.resumeCleaning(topicPartitions) + } + + /** + * For testing, a way to know when work has completed. This method waits until the + * cleaner has processed up to the given offset on the specified topic/partition + * + * @param topicPartition The topic and partition to be cleaned + * @param offset The first dirty offset that the cleaner doesn't have to clean + * @param maxWaitMs The maximum time in ms to wait for cleaner + * + * @return A boolean indicating whether the work has completed before timeout + */ + def awaitCleaned(topicPartition: TopicPartition, offset: Long, maxWaitMs: Long = 60000L): Boolean = { + def isCleaned = cleanerManager.allCleanerCheckpoints.get(topicPartition).fold(false)(_ >= offset) + var remainingWaitMs = maxWaitMs + while (!isCleaned && remainingWaitMs > 0) { + val sleepTime = math.min(100, remainingWaitMs) + Thread.sleep(sleepTime) + remainingWaitMs -= sleepTime + } + isCleaned + } + + /** + * To prevent race between retention and compaction, + * retention threads need to make this call to obtain: + * + * @return A list of log partitions that retention threads can safely work on + */ + def pauseCleaningForNonCompactedPartitions(): Iterable[(TopicPartition, UnifiedLog)] = { + cleanerManager.pauseCleaningForNonCompactedPartitions() + } + + // Only for testing + private[kafka] def currentConfig: CleanerConfig = config + + // Only for testing + private[log] def cleanerCount: Int = cleaners.size + + /** + * The cleaner threads do the actual log cleaning. Each thread processes does its cleaning repeatedly by + * choosing the dirtiest log, cleaning it, and then swapping in the cleaned segments. + */ + private[log] class CleanerThread(threadId: Int) + extends ShutdownableThread(s"kafka-log-cleaner-thread-$threadId", false) with Logging { + protected override def loggerName: String = classOf[LogCleaner].getName + + this.logIdent = logPrefix + + if (config.dedupeBufferSize / config.numThreads > Int.MaxValue) + warn("Cannot use more than 2G of cleaner buffer space per cleaner thread, ignoring excess buffer space...") + + val cleaner = new Cleaner(id = threadId, + offsetMap = new SkimpyOffsetMap(math.min(config.dedupeBufferSize / config.numThreads, Int.MaxValue).toInt, + config.hashAlgorithm), + ioBufferSize = config.ioBufferSize / config.numThreads / 2, + maxIoBufferSize = config.maxMessageSize, + dupBufferLoadFactor = config.dedupeBufferLoadFactor, + throttler = throttler, + time = time, + checkDone = checkDone) + + @volatile var lastStats: CleanerStats = new CleanerStats() + @volatile var lastPreCleanStats: PreCleanStats = new PreCleanStats() + + /** + * Check if the cleaning for a partition is aborted. If so, throw an exception. + * + * @param topicPartition The topic and partition to check + */ + private def checkDone(topicPartition: TopicPartition): Unit = { + if (!isRunning) + throw new ThreadShutdownException + cleanerManager.checkCleaningAborted(topicPartition) + } + + /** + * The main loop for the cleaner thread + * Clean a log if there is a dirty log available, otherwise sleep for a bit + */ + override def doWork(): Unit = { + val cleaned = tryCleanFilthiestLog() + if (!cleaned) + pause(config.backoffMs, TimeUnit.MILLISECONDS) + + cleanerManager.maintainUncleanablePartitions() + } + + /** + * Cleans a log if there is a dirty log available + * + * @return whether a log was cleaned + */ + private def tryCleanFilthiestLog(): Boolean = { + try { + cleanFilthiestLog() + } catch { + case e: LogCleaningException => + warn(s"Unexpected exception thrown when cleaning log ${e.log}. Marking its partition (${e.log.topicPartition}) as uncleanable", e) + cleanerManager.markPartitionUncleanable(e.log.parentDir, e.log.topicPartition) + + false + } + } + + @throws(classOf[LogCleaningException]) + private def cleanFilthiestLog(): Boolean = { + val preCleanStats = new PreCleanStats() + val ltc = cleanerManager.grabFilthiestCompactedLog(time, preCleanStats) + val cleaned = ltc match { + case None => + false + case Some(cleanable) => + // there's a log, clean it + this.lastPreCleanStats = preCleanStats + try { + cleanLog(cleanable) + true + } catch { + case e @ (_: ThreadShutdownException | _: ControlThrowable) => throw e + case e: Exception => throw new LogCleaningException(cleanable.log, e.getMessage, e) + } + } + val deletable: Iterable[(TopicPartition, UnifiedLog)] = cleanerManager.deletableLogs() + try { + deletable.foreach { case (_, log) => + try { + log.deleteOldSegments() + } catch { + case e @ (_: ThreadShutdownException | _: ControlThrowable) => throw e + case e: Exception => throw new LogCleaningException(log, e.getMessage, e) + } + } + } finally { + cleanerManager.doneDeleting(deletable.map(_._1)) + } + + cleaned + } + + private def cleanLog(cleanable: LogToClean): Unit = { + val startOffset = cleanable.firstDirtyOffset + var endOffset = startOffset + try { + val (nextDirtyOffset, cleanerStats) = cleaner.clean(cleanable) + endOffset = nextDirtyOffset + recordStats(cleaner.id, cleanable.log.name, startOffset, endOffset, cleanerStats) + } catch { + case _: LogCleaningAbortedException => // task can be aborted, let it go. + case _: KafkaStorageException => // partition is already offline. let it go. + case e: IOException => + val logDirectory = cleanable.log.parentDir + val msg = s"Failed to clean up log for ${cleanable.topicPartition} in dir $logDirectory due to IOException" + logDirFailureChannel.maybeAddOfflineLogDir(logDirectory, msg, e) + } finally { + cleanerManager.doneCleaning(cleanable.topicPartition, cleanable.log.parentDirFile, endOffset) + } + } + + /** + * Log out statistics on a single run of the cleaner. + * + * @param id The cleaner thread id + * @param name The cleaned log name + * @param from The cleaned offset that is the first dirty offset to begin + * @param to The cleaned offset that is the first not cleaned offset to end + * @param stats The statistics for this round of cleaning + */ + private def recordStats(id: Int, name: String, from: Long, to: Long, stats: CleanerStats): Unit = { + this.lastStats = stats + def mb(bytes: Double) = bytes / (1024*1024) + val message = + "%n\tLog cleaner thread %d cleaned log %s (dirty section = [%d, %d])%n".format(id, name, from, to) + + "\t%,.1f MB of log processed in %,.1f seconds (%,.1f MB/sec).%n".format(mb(stats.bytesRead.toDouble), + stats.elapsedSecs, + mb(stats.bytesRead.toDouble / stats.elapsedSecs)) + + "\tIndexed %,.1f MB in %.1f seconds (%,.1f Mb/sec, %.1f%% of total time)%n".format(mb(stats.mapBytesRead.toDouble), + stats.elapsedIndexSecs, + mb(stats.mapBytesRead.toDouble) / stats.elapsedIndexSecs, + 100 * stats.elapsedIndexSecs / stats.elapsedSecs) + + "\tBuffer utilization: %.1f%%%n".format(100 * stats.bufferUtilization) + + "\tCleaned %,.1f MB in %.1f seconds (%,.1f Mb/sec, %.1f%% of total time)%n".format(mb(stats.bytesRead.toDouble), + stats.elapsedSecs - stats.elapsedIndexSecs, + mb(stats.bytesRead.toDouble) / (stats.elapsedSecs - stats.elapsedIndexSecs), 100 * (stats.elapsedSecs - stats.elapsedIndexSecs) / stats.elapsedSecs) + + "\tStart size: %,.1f MB (%,d messages)%n".format(mb(stats.bytesRead.toDouble), stats.messagesRead) + + "\tEnd size: %,.1f MB (%,d messages)%n".format(mb(stats.bytesWritten.toDouble), stats.messagesWritten) + + "\t%.1f%% size reduction (%.1f%% fewer messages)%n".format(100.0 * (1.0 - stats.bytesWritten.toDouble/stats.bytesRead), + 100.0 * (1.0 - stats.messagesWritten.toDouble/stats.messagesRead)) + info(message) + if (lastPreCleanStats.delayedPartitions > 0) { + info("\tCleanable partitions: %d, Delayed partitions: %d, max delay: %d".format(lastPreCleanStats.cleanablePartitions, lastPreCleanStats.delayedPartitions, lastPreCleanStats.maxCompactionDelayMs)) + } + if (stats.invalidMessagesRead > 0) { + warn("\tFound %d invalid messages during compaction.".format(stats.invalidMessagesRead)) + } + } + + } +} + +object LogCleaner { + val ReconfigurableConfigs: Set[String] = Set( + CleanerConfig.LOG_CLEANER_THREADS_PROP, + CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP, + CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_LOAD_FACTOR_PROP, + CleanerConfig.LOG_CLEANER_IO_BUFFER_SIZE_PROP, + ServerConfigs.MESSAGE_MAX_BYTES_CONFIG, + CleanerConfig.LOG_CLEANER_IO_MAX_BYTES_PER_SECOND_PROP, + CleanerConfig.LOG_CLEANER_BACKOFF_MS_PROP + ) + + def cleanerConfig(config: KafkaConfig): CleanerConfig = { + new CleanerConfig(config.logCleanerThreads, + config.logCleanerDedupeBufferSize, + config.logCleanerDedupeBufferLoadFactor, + config.logCleanerIoBufferSize, + config.messageMaxBytes, + config.logCleanerIoMaxBytesPerSecond, + config.logCleanerBackoffMs, + config.logCleanerEnable) + + } + + private val MaxBufferUtilizationPercentMetricName = "max-buffer-utilization-percent" + private val CleanerRecopyPercentMetricName = "cleaner-recopy-percent" + private val MaxCleanTimeMetricName = "max-clean-time-secs" + private val MaxCompactionDelayMetricsName = "max-compaction-delay-secs" + private val DeadThreadCountMetricName = "DeadThreadCount" + // package private for testing + private[log] val MetricNames = Set( + MaxBufferUtilizationPercentMetricName, + CleanerRecopyPercentMetricName, + MaxCleanTimeMetricName, + MaxCompactionDelayMetricsName, + DeadThreadCountMetricName) +} + +/** + * This class holds the actual logic for cleaning a log + * @param id An identifier used for logging + * @param offsetMap The map used for deduplication + * @param ioBufferSize The size of the buffers to use. Memory usage will be 2x this number as there is a read and write buffer. + * @param maxIoBufferSize The maximum size of a message that can appear in the log + * @param dupBufferLoadFactor The maximum percent full for the deduplication buffer + * @param throttler The throttler instance to use for limiting I/O rate. + * @param time The time instance + * @param checkDone Check if the cleaning for a partition is finished or aborted. + */ +private[log] class Cleaner(val id: Int, + val offsetMap: OffsetMap, + ioBufferSize: Int, + maxIoBufferSize: Int, + dupBufferLoadFactor: Double, + throttler: Throttler, + time: Time, + checkDone: TopicPartition => Unit) extends Logging { + + protected override def loggerName: String = classOf[LogCleaner].getName + + this.logIdent = s"Cleaner $id: " + + /* buffer used for read i/o */ + private var readBuffer = ByteBuffer.allocate(ioBufferSize) + + /* buffer used for write i/o */ + private var writeBuffer = ByteBuffer.allocate(ioBufferSize) + + private val decompressionBufferSupplier = BufferSupplier.create() + + require(offsetMap.slots * dupBufferLoadFactor > 1, "offset map is too small to fit in even a single message, so log cleaning will never make progress. You can increase log.cleaner.dedupe.buffer.size or decrease log.cleaner.threads") + + /** + * Clean the given log + * + * @param cleanable The log to be cleaned + * + * @return The first offset not cleaned and the statistics for this round of cleaning + */ + private[log] def clean(cleanable: LogToClean): (Long, CleanerStats) = { + doClean(cleanable, time.milliseconds()) + } + + /** + * Clean the given log + * + * @param cleanable The log to be cleaned + * @param currentTime The current timestamp for doing cleaning + * + * @return The first offset not cleaned and the statistics for this round of cleaning + * */ + private[log] def doClean(cleanable: LogToClean, currentTime: Long): (Long, CleanerStats) = { + info("Beginning cleaning of log %s".format(cleanable.log.name)) + + // figure out the timestamp below which it is safe to remove delete tombstones + // this position is defined to be a configurable time beneath the last modified time of the last clean segment + // this timestamp is only used on the older message formats older than MAGIC_VALUE_V2 + val legacyDeleteHorizonMs = + cleanable.log.logSegments(0, cleanable.firstDirtyOffset).lastOption match { + case None => 0L + case Some(seg) => seg.lastModified - cleanable.log.config.deleteRetentionMs + } + + val log = cleanable.log + val stats = new CleanerStats() + + // build the offset map + info("Building offset map for %s...".format(cleanable.log.name)) + val upperBoundOffset = cleanable.firstUncleanableOffset + buildOffsetMap(log, cleanable.firstDirtyOffset, upperBoundOffset, offsetMap, stats) + val endOffset = offsetMap.latestOffset + 1 + stats.indexDone() + + // determine the timestamp up to which the log will be cleaned + // this is the lower of the last active segment and the compaction lag + val cleanableHorizonMs = log.logSegments(0, cleanable.firstUncleanableOffset).lastOption.map(_.lastModified).getOrElse(0L) + + // group the segments and clean the groups + info("Cleaning log %s (cleaning prior to %s, discarding tombstones prior to upper bound deletion horizon %s)...".format(log.name, new Date(cleanableHorizonMs), new Date(legacyDeleteHorizonMs))) + val transactionMetadata = new CleanedTransactionMetadata + + val groupedSegments = groupSegmentsBySize(log.logSegments(0, endOffset), log.config.segmentSize, + log.config.maxIndexSize, cleanable.firstUncleanableOffset) + for (group <- groupedSegments) + cleanSegments(log, group, offsetMap, currentTime, stats, transactionMetadata, legacyDeleteHorizonMs, upperBoundOffset) + + // record buffer utilization + stats.bufferUtilization = offsetMap.utilization + + stats.allDone() + + (endOffset, stats) + } + + /** + * Clean a group of segments into a single replacement segment + * + * @param log The log being cleaned + * @param segments The group of segments being cleaned + * @param map The offset map to use for cleaning segments + * @param currentTime The current time in milliseconds + * @param stats Collector for cleaning statistics + * @param transactionMetadata State of ongoing transactions which is carried between the cleaning + * of the grouped segments + * @param legacyDeleteHorizonMs The delete horizon used for tombstones whose version is less than 2 + * @param upperBoundOffsetOfCleaningRound The upper bound offset of this round of cleaning + */ + private[log] def cleanSegments(log: UnifiedLog, + segments: Seq[LogSegment], + map: OffsetMap, + currentTime: Long, + stats: CleanerStats, + transactionMetadata: CleanedTransactionMetadata, + legacyDeleteHorizonMs: Long, + upperBoundOffsetOfCleaningRound: Long): Unit = { + // create a new segment with a suffix appended to the name of the log and indexes + val cleaned = UnifiedLog.createNewCleanedSegment(log.dir, log.config, segments.head.baseOffset) + transactionMetadata.cleanedIndex = Some(cleaned.txnIndex) + + try { + // clean segments into the new destination segment + val iter = segments.iterator + var currentSegmentOpt: Option[LogSegment] = Some(iter.next()) + val lastOffsetOfActiveProducers = log.lastRecordsOfActiveProducers + + while (currentSegmentOpt.isDefined) { + val currentSegment = currentSegmentOpt.get + val nextSegmentOpt = if (iter.hasNext) Some(iter.next()) else None + + // Note that it is important to collect aborted transactions from the full log segment + // range since we need to rebuild the full transaction index for the new segment. + val startOffset = currentSegment.baseOffset + val upperBoundOffset = nextSegmentOpt.map(_.baseOffset).getOrElse(currentSegment.readNextOffset) + val abortedTransactions = log.collectAbortedTransactions(startOffset, upperBoundOffset) + transactionMetadata.addAbortedTransactions(abortedTransactions) + + val retainLegacyDeletesAndTxnMarkers = currentSegment.lastModified > legacyDeleteHorizonMs + info(s"Cleaning $currentSegment in log ${log.name} into ${cleaned.baseOffset} " + + s"with an upper bound deletion horizon $legacyDeleteHorizonMs computed from " + + s"the segment last modified time of ${currentSegment.lastModified}," + + s"${if(retainLegacyDeletesAndTxnMarkers) "retaining" else "discarding"} deletes.") + + try { + cleanInto(log.topicPartition, currentSegment.log, cleaned, map, retainLegacyDeletesAndTxnMarkers, log.config.deleteRetentionMs, + log.config.maxMessageSize, transactionMetadata, lastOffsetOfActiveProducers, + upperBoundOffsetOfCleaningRound, stats, currentTime = currentTime) + } catch { + case e: LogSegmentOffsetOverflowException => + // Split the current segment. It's also safest to abort the current cleaning process, so that we retry from + // scratch once the split is complete. + info(s"Caught segment overflow error during cleaning: ${e.getMessage}") + log.splitOverflowedSegment(currentSegment) + throw new LogCleaningAbortedException() + } + currentSegmentOpt = nextSegmentOpt + } + + cleaned.onBecomeInactiveSegment() + // flush new segment to disk before swap + cleaned.flush() + + // update the modification date to retain the last modified date of the original files + val modified = segments.last.lastModified + cleaned.setLastModified(modified) + + // swap in new segment + info(s"Swapping in cleaned segment $cleaned for segment(s) $segments in log $log") + log.replaceSegments(List(cleaned), segments) + } catch { + case e: LogCleaningAbortedException => + try cleaned.deleteIfExists() + catch { + case deleteException: Exception => + e.addSuppressed(deleteException) + } finally throw e + } + } + + /** + * Clean the given source log segment into the destination segment using the key=>offset mapping + * provided + * + * @param topicPartition The topic and partition of the log segment to clean + * @param sourceRecords The dirty log segment + * @param dest The cleaned log segment + * @param map The key=>offset mapping + * @param retainLegacyDeletesAndTxnMarkers Should tombstones (lower than version 2) and markers be retained while cleaning this segment + * @param deleteRetentionMs Defines how long a tombstone should be kept as defined by log configuration + * @param maxLogMessageSize The maximum message size of the corresponding topic + * @param transactionMetadata The state of ongoing transactions which is carried between the cleaning of the grouped segments + * @param lastRecordsOfActiveProducers The active producers and its last data offset + * @param upperBoundOffsetOfCleaningRound Next offset of the last batch in the source segment + * @param stats Collector for cleaning statistics + * @param currentTime The time at which the clean was initiated + */ + private[log] def cleanInto(topicPartition: TopicPartition, + sourceRecords: FileRecords, + dest: LogSegment, + map: OffsetMap, + retainLegacyDeletesAndTxnMarkers: Boolean, + deleteRetentionMs: Long, + maxLogMessageSize: Int, + transactionMetadata: CleanedTransactionMetadata, + lastRecordsOfActiveProducers: mutable.Map[Long, LastRecord], + upperBoundOffsetOfCleaningRound: Long, + stats: CleanerStats, + currentTime: Long): Unit = { + val logCleanerFilter: RecordFilter = new RecordFilter(currentTime, deleteRetentionMs) { + var discardBatchRecords: Boolean = _ + + override def checkBatchRetention(batch: RecordBatch): RecordFilter.BatchRetentionResult = { + // we piggy-back on the tombstone retention logic to delay deletion of transaction markers. + // note that we will never delete a marker until all the records from that transaction are removed. + val canDiscardBatch = shouldDiscardBatch(batch, transactionMetadata) + + if (batch.isControlBatch) + discardBatchRecords = canDiscardBatch && batch.deleteHorizonMs().isPresent && batch.deleteHorizonMs().getAsLong <= this.currentTime + else + discardBatchRecords = canDiscardBatch + + def isBatchLastRecordOfProducer: Boolean = { + // We retain the batch in order to preserve the state of active producers. There are three cases: + // 1) The producer is no longer active, which means we can delete all records for that producer. + // 2) The producer is still active and has a last data offset. We retain the batch that contains + // this offset since it also contains the last sequence number for this producer. + // 3) The last entry in the log is a transaction marker. We retain this marker since it has the + // last producer epoch, which is needed to ensure fencing. + lastRecordsOfActiveProducers.get(batch.producerId).exists { lastRecord => + if (lastRecord.lastDataOffset.isPresent) { + batch.lastOffset == lastRecord.lastDataOffset.getAsLong + } else { + batch.isControlBatch && batch.producerEpoch == lastRecord.producerEpoch + } + } + } + + val batchRetention: BatchRetention = + if (batch.hasProducerId && isBatchLastRecordOfProducer) + BatchRetention.RETAIN_EMPTY + else if (batch.nextOffset == upperBoundOffsetOfCleaningRound) { + // retain the last batch of the cleaning round, even if it's empty, so that last offset information + // is not lost after cleaning. + BatchRetention.RETAIN_EMPTY + } else if (discardBatchRecords) + BatchRetention.DELETE + else + BatchRetention.DELETE_EMPTY + new RecordFilter.BatchRetentionResult(batchRetention, canDiscardBatch && batch.isControlBatch) + } + + override def shouldRetainRecord(batch: RecordBatch, record: Record): Boolean = { + if (discardBatchRecords) + // The batch is only retained to preserve producer sequence information; the records can be removed + false + else if (batch.isControlBatch) + true + else + Cleaner.this.shouldRetainRecord(map, retainLegacyDeletesAndTxnMarkers, batch, record, stats, currentTime = this.currentTime) + } + } + + var position = 0 + while (position < sourceRecords.sizeInBytes) { + checkDone(topicPartition) + // read a chunk of messages and copy any that are to be retained to the write buffer to be written out + readBuffer.clear() + writeBuffer.clear() + + sourceRecords.readInto(readBuffer, position) + val records = MemoryRecords.readableRecords(readBuffer) + throttler.maybeThrottle(records.sizeInBytes) + val result = records.filterTo(logCleanerFilter, writeBuffer, decompressionBufferSupplier) + + stats.readMessages(result.messagesRead, result.bytesRead) + stats.recopyMessages(result.messagesRetained, result.bytesRetained) + + position += result.bytesRead + + // if any messages are to be retained, write them out + val outputBuffer = result.outputBuffer + if (outputBuffer.position() > 0) { + outputBuffer.flip() + val retained = MemoryRecords.readableRecords(outputBuffer) + // it's OK not to hold the Log's lock in this case, because this segment is only accessed by other threads + // after `Log.replaceSegments` (which acquires the lock) is called + dest.append(result.maxOffset, result.maxTimestamp, result.shallowOffsetOfMaxTimestamp(), retained) + throttler.maybeThrottle(outputBuffer.limit()) + } + + // if we read bytes but didn't get even one complete batch, our I/O buffer is too small, grow it and try again + // `result.bytesRead` contains bytes from `messagesRead` and any discarded batches. + if (readBuffer.limit() > 0 && result.bytesRead == 0) + growBuffersOrFail(sourceRecords, position, maxLogMessageSize, records) + } + restoreBuffers() + } + + + /** + * Grow buffers to process next batch of records from `sourceRecords.` Buffers are doubled in size + * up to a maximum of `maxLogMessageSize`. In some scenarios, a record could be bigger than the + * current maximum size configured for the log. For example: + * 1. A compacted topic using compression may contain a message set slightly larger than max.message.bytes + * 2. max.message.bytes of a topic could have been reduced after writing larger messages + * In these cases, grow the buffer to hold the next batch. + * + * @param sourceRecords The dirty log segment records to process + * @param position The current position in the read buffer to read from + * @param maxLogMessageSize The maximum record size in bytes for the topic + * @param memoryRecords The memory records in read buffer + */ + private def growBuffersOrFail(sourceRecords: FileRecords, + position: Int, + maxLogMessageSize: Int, + memoryRecords: MemoryRecords): Unit = { + + val maxSize = if (readBuffer.capacity >= maxLogMessageSize) { + val nextBatchSize = memoryRecords.firstBatchSize + val logDesc = s"log segment ${sourceRecords.file} at position $position" + if (nextBatchSize == null) + throw new IllegalStateException(s"Could not determine next batch size for $logDesc") + if (nextBatchSize <= 0) + throw new IllegalStateException(s"Invalid batch size $nextBatchSize for $logDesc") + if (nextBatchSize <= readBuffer.capacity) + throw new IllegalStateException(s"Batch size $nextBatchSize < buffer size ${readBuffer.capacity}, but not processed for $logDesc") + val bytesLeft = sourceRecords.channel.size - position + if (nextBatchSize > bytesLeft) + throw new CorruptRecordException(s"Log segment may be corrupt, batch size $nextBatchSize > $bytesLeft bytes left in segment for $logDesc") + nextBatchSize.intValue + } else + maxLogMessageSize + + growBuffers(maxSize) + } + + /** + * Check if a batch should be discard by cleaned transaction state + * + * @param batch The batch of records to check + * @param transactionMetadata The maintained transaction state about cleaning + * + * @return if the batch can be discarded + */ + private def shouldDiscardBatch(batch: RecordBatch, + transactionMetadata: CleanedTransactionMetadata): Boolean = { + if (batch.isControlBatch) + transactionMetadata.onControlBatchRead(batch) + else + transactionMetadata.onBatchRead(batch) + } + + /** + * Check if a record should be retained + * + * @param map The offset map(key=>offset) to use for cleaning segments + * @param retainDeletesForLegacyRecords Should tombstones (lower than version 2) and markers be retained while cleaning this segment + * @param batch The batch of records that the record belongs to + * @param record The record to check + * @param stats The collector for cleaning statistics + * @param currentTime The current time that used to compare with the delete horizon time of the batch when judging a non-legacy record + * + * @return if the record can be retained + */ + private def shouldRetainRecord(map: OffsetMap, + retainDeletesForLegacyRecords: Boolean, + batch: RecordBatch, + record: Record, + stats: CleanerStats, + currentTime: Long): Boolean = { + val pastLatestOffset = record.offset > map.latestOffset + if (pastLatestOffset) + return true + + if (record.hasKey) { + val key = record.key + val foundOffset = map.get(key) + /* First,the message must have the latest offset for the key + * then there are two cases in which we can retain a message: + * 1) The message has value + * 2) The message doesn't has value but it can't be deleted now. + */ + val latestOffsetForKey = record.offset() >= foundOffset + val legacyRecord = batch.magic() < RecordBatch.MAGIC_VALUE_V2 + def shouldRetainDeletes = { + if (!legacyRecord) + !batch.deleteHorizonMs().isPresent || currentTime < batch.deleteHorizonMs().getAsLong + else + retainDeletesForLegacyRecords + } + val isRetainedValue = record.hasValue || shouldRetainDeletes + latestOffsetForKey && isRetainedValue + } else { + stats.invalidMessage() + false + } + } + + /** + * Double the I/O buffer capacity + * + * @param maxLogMessageSize The maximum record size in bytes allowed + */ + private def growBuffers(maxLogMessageSize: Int): Unit = { + val maxBufferSize = math.max(maxLogMessageSize, maxIoBufferSize) + if (readBuffer.capacity >= maxBufferSize || writeBuffer.capacity >= maxBufferSize) + throw new IllegalStateException("This log contains a message larger than maximum allowable size of %s.".format(maxBufferSize)) + val newSize = math.min(this.readBuffer.capacity * 2, maxBufferSize) + info(s"Growing cleaner I/O buffers from ${readBuffer.capacity} bytes to $newSize bytes.") + this.readBuffer = ByteBuffer.allocate(newSize) + this.writeBuffer = ByteBuffer.allocate(newSize) + } + + /** + * Restore the I/O buffer capacity to its original size + */ + private def restoreBuffers(): Unit = { + if (this.readBuffer.capacity > this.ioBufferSize) + this.readBuffer = ByteBuffer.allocate(this.ioBufferSize) + if (this.writeBuffer.capacity > this.ioBufferSize) + this.writeBuffer = ByteBuffer.allocate(this.ioBufferSize) + } + + /** + * Group the segments in a log into groups totaling less than a given size. the size is enforced separately for the log data and the index data. + * We collect a group of such segments together into a single + * destination segment. This prevents segment sizes from shrinking too much. + * + * @param segments The log segments to group + * @param maxSize the maximum size in bytes for the total of all log data in a group + * @param maxIndexSize the maximum size in bytes for the total of all index data in a group + * @param firstUncleanableOffset The upper(exclusive) offset to clean to + * + * @return A list of grouped segments + */ + private[log] def groupSegmentsBySize(segments: Iterable[LogSegment], maxSize: Int, maxIndexSize: Int, firstUncleanableOffset: Long): List[Seq[LogSegment]] = { + var grouped = List[List[LogSegment]]() + var segs = segments.toList + while (segs.nonEmpty) { + var group = List(segs.head) + var logSize = segs.head.size.toLong + var indexSize = segs.head.offsetIndex.sizeInBytes.toLong + var timeIndexSize = segs.head.timeIndex.sizeInBytes.toLong + segs = segs.tail + while (segs.nonEmpty && + logSize + segs.head.size <= maxSize && + indexSize + segs.head.offsetIndex.sizeInBytes <= maxIndexSize && + timeIndexSize + segs.head.timeIndex.sizeInBytes <= maxIndexSize && + //if first segment size is 0, we don't need to do the index offset range check. + //this will avoid empty log left every 2^31 message. + (segs.head.size == 0 || + lastOffsetForFirstSegment(segs, firstUncleanableOffset) - group.last.baseOffset <= Int.MaxValue)) { + group = segs.head :: group + logSize += segs.head.size + indexSize += segs.head.offsetIndex.sizeInBytes + timeIndexSize += segs.head.timeIndex.sizeInBytes + segs = segs.tail + } + grouped ::= group.reverse + } + grouped.reverse + } + + /** + * We want to get the last offset in the first log segment in segs. + * LogSegment.nextOffset() gives the exact last offset in a segment, but can be expensive since it requires + * scanning the segment from the last index entry. + * Therefore, we estimate the last offset of the first log segment by using + * the base offset of the next segment in the list. + * If the next segment doesn't exist, first Uncleanable Offset will be used. + * + * @param segs Remaining segments to group. + * @param firstUncleanableOffset The upper(exclusive) offset to clean to + * @return The estimated last offset for the first segment in segs + */ + private def lastOffsetForFirstSegment(segs: List[LogSegment], firstUncleanableOffset: Long): Long = { + if (segs.size > 1) { + /* if there is a next segment, use its base offset as the bounding offset to guarantee we know + * the worst case offset */ + segs(1).baseOffset - 1 + } else { + //for the last segment in the list, use the first uncleanable offset. + firstUncleanableOffset - 1 + } + } + + /** + * Build a map of key_hash => offset for the keys in the cleanable dirty portion of the log to use in cleaning. + * @param log The log to use + * @param start The offset at which dirty messages begin + * @param end The ending offset for the map that is being built + * @param map The map in which to store the mappings + * @param stats Collector for cleaning statistics + */ + private[log] def buildOffsetMap(log: UnifiedLog, + start: Long, + end: Long, + map: OffsetMap, + stats: CleanerStats): Unit = { + map.clear() + val dirty = log.logSegments(start, end).toBuffer + val nextSegmentStartOffsets = new ListBuffer[Long] + if (dirty.nonEmpty) { + for (nextSegment <- dirty.tail) nextSegmentStartOffsets.append(nextSegment.baseOffset) + nextSegmentStartOffsets.append(end) + } + info("Building offset map for log %s for %d segments in offset range [%d, %d).".format(log.name, dirty.size, start, end)) + + val transactionMetadata = new CleanedTransactionMetadata + val abortedTransactions = log.collectAbortedTransactions(start, end) + transactionMetadata.addAbortedTransactions(abortedTransactions) + + // Add all the cleanable dirty segments. We must take at least map.slots * load_factor, + // but we may be able to fit more (if there is lots of duplication in the dirty section of the log) + var full = false + for ((segment, nextSegmentStartOffset) <- dirty.zip(nextSegmentStartOffsets) if !full) { + checkDone(log.topicPartition) + + full = buildOffsetMapForSegment(log.topicPartition, segment, map, start, nextSegmentStartOffset, log.config.maxMessageSize, + transactionMetadata, stats) + if (full) + debug("Offset map is full, %d segments fully mapped, segment with base offset %d is partially mapped".format(dirty.indexOf(segment), segment.baseOffset)) + } + info("Offset map for log %s complete.".format(log.name)) + } + + /** + * Add the messages in the given segment to the offset map + * + * @param topicPartition The topic and partition of the log segment to build offset + * @param segment The segment to index + * @param map The map in which to store the key=>offset mapping + * @param startOffset The offset at which dirty messages begin + * @param nextSegmentStartOffset The base offset for next segment when building current segment + * @param maxLogMessageSize The maximum size in bytes for record allowed + * @param transactionMetadata The state of ongoing transactions for the log between offset range to build + * @param stats Collector for cleaning statistics + * + * @return If the map was filled whilst loading from this segment + */ + private def buildOffsetMapForSegment(topicPartition: TopicPartition, + segment: LogSegment, + map: OffsetMap, + startOffset: Long, + nextSegmentStartOffset: Long, + maxLogMessageSize: Int, + transactionMetadata: CleanedTransactionMetadata, + stats: CleanerStats): Boolean = { + var position = segment.offsetIndex.lookup(startOffset).position + val maxDesiredMapSize = (map.slots * this.dupBufferLoadFactor).toInt + while (position < segment.log.sizeInBytes) { + checkDone(topicPartition) + readBuffer.clear() + try { + segment.log.readInto(readBuffer, position) + } catch { + case e: Exception => + throw new KafkaException(s"Failed to read from segment $segment of partition $topicPartition " + + "while loading offset map", e) + } + val records = MemoryRecords.readableRecords(readBuffer) + throttler.maybeThrottle(records.sizeInBytes) + + val startPosition = position + for (batch <- records.batches.asScala) { + if (batch.isControlBatch) { + transactionMetadata.onControlBatchRead(batch) + stats.indexMessagesRead(1) + } else { + val isAborted = transactionMetadata.onBatchRead(batch) + if (isAborted) { + // If the batch is aborted, do not bother populating the offset map. + // Note that abort markers are supported in v2 and above, which means count is defined. + stats.indexMessagesRead(batch.countOrNull) + } else { + val recordsIterator = batch.streamingIterator(decompressionBufferSupplier) + try { + for (record <- recordsIterator.asScala) { + if (record.hasKey && record.offset >= startOffset) { + if (map.size < maxDesiredMapSize) + map.put(record.key, record.offset) + else + return true + } + stats.indexMessagesRead(1) + } + } finally recordsIterator.close() + } + } + + if (batch.lastOffset >= startOffset) + map.updateLatestOffset(batch.lastOffset) + } + val bytesRead = records.validBytes + position += bytesRead + stats.indexBytesRead(bytesRead) + + // if we didn't read even one complete message, our read buffer may be too small + if (position == startPosition) + growBuffersOrFail(segment.log, position, maxLogMessageSize, records) + } + + // In the case of offsets gap, fast forward to latest expected offset in this segment. + map.updateLatestOffset(nextSegmentStartOffset - 1L) + + restoreBuffers() + false + } +} + +/** + * A simple struct for collecting pre-clean stats + */ +private class PreCleanStats { + var maxCompactionDelayMs = 0L + var delayedPartitions = 0 + var cleanablePartitions = 0 + + def updateMaxCompactionDelay(delayMs: Long): Unit = { + maxCompactionDelayMs = Math.max(maxCompactionDelayMs, delayMs) + if (delayMs > 0) { + delayedPartitions += 1 + } + } + def recordCleanablePartitions(numOfCleanables: Int): Unit = { + cleanablePartitions = numOfCleanables + } +} + +/** + * A simple struct for collecting stats about log cleaning + */ +private class CleanerStats(time: Time = Time.SYSTEM) { + val startTime = time.milliseconds + var mapCompleteTime: Long = -1L + var endTime: Long = -1L + var bytesRead = 0L + var bytesWritten = 0L + var mapBytesRead = 0L + var mapMessagesRead = 0L + var messagesRead = 0L + var invalidMessagesRead = 0L + var messagesWritten = 0L + var bufferUtilization = 0.0d + + def readMessages(messagesRead: Int, bytesRead: Int): Unit = { + this.messagesRead += messagesRead + this.bytesRead += bytesRead + } + + def invalidMessage(): Unit = { + invalidMessagesRead += 1 + } + + def recopyMessages(messagesWritten: Int, bytesWritten: Int): Unit = { + this.messagesWritten += messagesWritten + this.bytesWritten += bytesWritten + } + + def indexMessagesRead(size: Int): Unit = { + mapMessagesRead += size + } + + def indexBytesRead(size: Int): Unit = { + mapBytesRead += size + } + + def indexDone(): Unit = { + mapCompleteTime = time.milliseconds + } + + def allDone(): Unit = { + endTime = time.milliseconds + } + + def elapsedSecs: Double = (endTime - startTime) / 1000.0 + + def elapsedIndexSecs: Double = (mapCompleteTime - startTime) / 1000.0 + +} + +/** + * Helper class for a log, its topic/partition, the first cleanable position, the first uncleanable dirty position, + * and whether it needs compaction immediately. + */ +private case class LogToClean(topicPartition: TopicPartition, + log: UnifiedLog, + firstDirtyOffset: Long, + uncleanableOffset: Long, + needCompactionNow: Boolean = false) extends Ordered[LogToClean] { + val cleanBytes: Long = log.logSegments(-1, firstDirtyOffset).map(_.size.toLong).sum + val (firstUncleanableOffset, cleanableBytes) = LogCleanerManager.calculateCleanableBytes(log, firstDirtyOffset, uncleanableOffset) + val totalBytes: Long = cleanBytes + cleanableBytes + val cleanableRatio: Double = cleanableBytes / totalBytes.toDouble + override def compare(that: LogToClean): Int = math.signum(this.cleanableRatio - that.cleanableRatio).toInt +} + +/** + * This is a helper class to facilitate tracking transaction state while cleaning the log. It maintains a set + * of the ongoing aborted and committed transactions as the cleaner is working its way through the log. This + * class is responsible for deciding when transaction markers can be removed and is therefore also responsible + * for updating the cleaned transaction index accordingly. + */ +private[log] class CleanedTransactionMetadata { + private val ongoingCommittedTxns = mutable.Set.empty[Long] + private val ongoingAbortedTxns = mutable.Map.empty[Long, AbortedTransactionMetadata] + // Minheap of aborted transactions sorted by the transaction first offset + private val abortedTransactions = mutable.PriorityQueue.empty[AbortedTxn](new Ordering[AbortedTxn] { + override def compare(x: AbortedTxn, y: AbortedTxn): Int = java.lang.Long.compare(x.firstOffset, y.firstOffset) + }.reverse) + + // Output cleaned index to write retained aborted transactions + var cleanedIndex: Option[TransactionIndex] = None + + /** + * Update the cleaned transaction state with the new found aborted transactions that has just been traversed. + * + * @param abortedTransactions The new found aborted transactions to add + */ + def addAbortedTransactions(abortedTransactions: List[AbortedTxn]): Unit = { + this.abortedTransactions ++= abortedTransactions + } + + /** + * Update the cleaned transaction state with a control batch that has just been traversed by the cleaner. + * Return true if the control batch can be discarded. + * + * @param controlBatch The control batch that been traversed + * + * @return True if the control batch can be discarded + */ + def onControlBatchRead(controlBatch: RecordBatch): Boolean = { + consumeAbortedTxnsUpTo(controlBatch.lastOffset) + + val controlRecordIterator = controlBatch.iterator + if (controlRecordIterator.hasNext) { + val controlRecord = controlRecordIterator.next() + val controlType = ControlRecordType.parse(controlRecord.key) + val producerId = controlBatch.producerId + controlType match { + case ControlRecordType.ABORT => + ongoingAbortedTxns.remove(producerId) match { + // Retain the marker until all batches from the transaction have been removed. + case Some(abortedTxnMetadata) if abortedTxnMetadata.lastObservedBatchOffset.isDefined => + cleanedIndex.foreach(_.append(abortedTxnMetadata.abortedTxn)) + false + case _ => true + } + + case ControlRecordType.COMMIT => + // This marker is eligible for deletion if we didn't traverse any batches from the transaction + !ongoingCommittedTxns.remove(producerId) + + case _ => false + } + } else { + // An empty control batch was already cleaned, so it's safe to discard + true + } + } + + private def consumeAbortedTxnsUpTo(offset: Long): Unit = { + while (abortedTransactions.headOption.exists(_.firstOffset <= offset)) { + val abortedTxn = abortedTransactions.dequeue() + ongoingAbortedTxns.getOrElseUpdate(abortedTxn.producerId, new AbortedTransactionMetadata(abortedTxn)) + } + } + + /** + * Update the transactional state for the incoming non-control batch. If the batch is part of + * an aborted transaction, return true to indicate that it is safe to discard. + * + * @param batch The batch to read when updating the transactional state + * + * @return Whether the batch is part of an aborted transaction or not + */ + def onBatchRead(batch: RecordBatch): Boolean = { + consumeAbortedTxnsUpTo(batch.lastOffset) + if (batch.isTransactional) { + ongoingAbortedTxns.get(batch.producerId) match { + case Some(abortedTransactionMetadata) => + abortedTransactionMetadata.lastObservedBatchOffset = Some(batch.lastOffset) + true + case None => + ongoingCommittedTxns += batch.producerId + false + } + } else { + false + } + } + +} + +private class AbortedTransactionMetadata(val abortedTxn: AbortedTxn) { + var lastObservedBatchOffset: Option[Long] = None + + override def toString: String = s"(txn: $abortedTxn, lastOffset: $lastObservedBatchOffset)" +} diff --git a/core/src/main/scala/kafka/log/LogCleanerManager.scala b/core/src/main/scala/kafka/log/LogCleanerManager.scala new file mode 100644 index 0000000000000..ea39f435d4096 --- /dev/null +++ b/core/src/main/scala/kafka/log/LogCleanerManager.scala @@ -0,0 +1,687 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.log + +import java.lang.{Long => JLong} +import java.io.File +import java.util.concurrent.TimeUnit +import java.util.concurrent.locks.ReentrantLock +import kafka.common.LogCleaningAbortedException +import kafka.utils.CoreUtils._ +import kafka.utils.{Logging, Pool} +import org.apache.kafka.common.{KafkaException, TopicPartition} +import org.apache.kafka.common.errors.KafkaStorageException +import org.apache.kafka.common.utils.Time +import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile +import org.apache.kafka.storage.internals.log.LogDirFailureChannel +import org.apache.kafka.server.metrics.KafkaMetricsGroup + +import java.util.Comparator +import scala.collection.{Iterable, Seq, mutable} +import scala.jdk.CollectionConverters._ + +private[log] sealed trait LogCleaningState +private[log] case object LogCleaningInProgress extends LogCleaningState +private[log] case object LogCleaningAborted extends LogCleaningState +private[log] case class LogCleaningPaused(pausedCount: Int) extends LogCleaningState + +private[log] class LogCleaningException(val log: UnifiedLog, + private val message: String, + private val cause: Throwable) extends KafkaException(message, cause) + +/** + * This class manages the state of each partition being cleaned. + * LogCleaningState defines the cleaning states that a TopicPartition can be in. + * 1. None : No cleaning state in a TopicPartition. In this state, it can become LogCleaningInProgress + * or LogCleaningPaused(1). Valid previous state are LogCleaningInProgress and LogCleaningPaused(1) + * 2. LogCleaningInProgress : The cleaning is currently in progress. In this state, it can become None when log cleaning is finished + * or become LogCleaningAborted. Valid previous state is None. + * 3. LogCleaningAborted : The cleaning abort is requested. In this state, it can become LogCleaningPaused(1). + * Valid previous state is LogCleaningInProgress. + * 4-a. LogCleaningPaused(1) : The cleaning is paused once. No log cleaning can be done in this state. + * In this state, it can become None or LogCleaningPaused(2). + * Valid previous state is None, LogCleaningAborted or LogCleaningPaused(2). + * 4-b. LogCleaningPaused(i) : The cleaning is paused i times where i>= 2. No log cleaning can be done in this state. + * In this state, it can become LogCleaningPaused(i-1) or LogCleaningPaused(i+1). + * Valid previous state is LogCleaningPaused(i-1) or LogCleaningPaused(i+1). + */ +private[log] class LogCleanerManager(val logDirs: Seq[File], + val logs: Pool[TopicPartition, UnifiedLog], + val logDirFailureChannel: LogDirFailureChannel) extends Logging { + import LogCleanerManager._ + + private val metricsGroup = new KafkaMetricsGroup(this.getClass) + + protected override def loggerName: String = classOf[LogCleaner].getName + + // package-private for testing + private[log] val offsetCheckpointFile = "cleaner-offset-checkpoint" + + /* the offset checkpoints holding the last cleaned point for each log */ + @volatile private var checkpoints = logDirs.map(dir => + (dir, new OffsetCheckpointFile(new File(dir, offsetCheckpointFile), logDirFailureChannel))).toMap + + /* the set of logs currently being cleaned */ + private val inProgress = mutable.HashMap[TopicPartition, LogCleaningState]() + + /* the set of uncleanable partitions (partitions that have raised an unexpected error during cleaning) + * for each log directory */ + private val uncleanablePartitions = mutable.HashMap[String, mutable.Set[TopicPartition]]() + + /* a global lock used to control all access to the in-progress set and the offset checkpoints */ + private val lock = new ReentrantLock + + /* for coordinating the pausing and the cleaning of a partition */ + private val pausedCleaningCond = lock.newCondition() + + // Visible for testing + private[log] val gaugeMetricNameWithTag = new java.util.HashMap[String, java.util.List[java.util.Map[String, String]]]() + + /* gauges for tracking the number of partitions marked as uncleanable for each log directory */ + for (dir <- logDirs) { + val metricTag = Map("logDirectory" -> dir.getAbsolutePath).asJava + metricsGroup.newGauge(UncleanablePartitionsCountMetricName, + () => inLock(lock) { uncleanablePartitions.get(dir.getAbsolutePath).map(_.size).getOrElse(0) }, + metricTag + ) + gaugeMetricNameWithTag.computeIfAbsent(UncleanablePartitionsCountMetricName, _ => new java.util.ArrayList[java.util.Map[String, String]]()) + .add(metricTag) + } + + /* gauges for tracking the number of uncleanable bytes from uncleanable partitions for each log directory */ + for (dir <- logDirs) { + val metricTag = Map("logDirectory" -> dir.getAbsolutePath).asJava + metricsGroup.newGauge(UncleanableBytesMetricName, + () => inLock(lock) { + uncleanablePartitions.get(dir.getAbsolutePath) match { + case Some(partitions) => + val lastClean = allCleanerCheckpoints + val now = Time.SYSTEM.milliseconds + partitions.iterator.map { tp => + Option(logs.get(tp)).map { + log => + val lastCleanOffset: Option[Long] = lastClean.get(tp) + val offsetsToClean = cleanableOffsets(log, lastCleanOffset, now) + val (_, uncleanableBytes) = calculateCleanableBytes(log, offsetsToClean.firstDirtyOffset, offsetsToClean.firstUncleanableDirtyOffset) + uncleanableBytes + }.getOrElse(0L) + }.sum + case None => 0 + } + }, + metricTag + ) + gaugeMetricNameWithTag.computeIfAbsent(UncleanableBytesMetricName, _ => new java.util.ArrayList[java.util.Map[String, String]]()) + .add(metricTag) + } + + /* a gauge for tracking the cleanable ratio of the dirtiest log */ + @volatile private var dirtiestLogCleanableRatio = 0.0 + metricsGroup.newGauge(MaxDirtyPercentMetricName, () => (100 * dirtiestLogCleanableRatio).toInt) + + /* a gauge for tracking the time since the last log cleaner run, in milli seconds */ + @volatile private var timeOfLastRun: Long = Time.SYSTEM.milliseconds + metricsGroup.newGauge(TimeSinceLastRunMsMetricName, () => Time.SYSTEM.milliseconds - timeOfLastRun) + + /** + * @return the position processed for all logs. + */ + def allCleanerCheckpoints: Map[TopicPartition, Long] = { + inLock(lock) { + checkpoints.values.flatMap(checkpoint => { + try { + checkpoint.read().asScala.map{ case (tp, offset) => tp -> Long2long(offset) } + } catch { + case e: KafkaStorageException => + error(s"Failed to access checkpoint file ${checkpoint.file.getName} in dir ${checkpoint.file.getParentFile.getAbsolutePath}", e) + Map.empty[TopicPartition, Long] + } + }).toMap + } + } + + /** + * Package private for unit test. Get the cleaning state of the partition. + */ + private[log] def cleaningState(tp: TopicPartition): Option[LogCleaningState] = { + inLock(lock) { + inProgress.get(tp) + } + } + + /** + * Package private for unit test. Set the cleaning state of the partition. + */ + private[log] def setCleaningState(tp: TopicPartition, state: LogCleaningState): Unit = { + inLock(lock) { + inProgress.put(tp, state) + } + } + + /** + * Choose the log to clean next and add it to the in-progress set. We recompute this + * each time from the full set of logs to allow logs to be dynamically added to the pool of logs + * the log manager maintains. + */ + def grabFilthiestCompactedLog(time: Time, preCleanStats: PreCleanStats = new PreCleanStats()): Option[LogToClean] = { + inLock(lock) { + val now = time.milliseconds + this.timeOfLastRun = now + val lastClean = allCleanerCheckpoints + + val dirtyLogs = logs.filter { + case (_, log) => log.config.compact + }.filterNot { + case (topicPartition, log) => + inProgress.contains(topicPartition) || isUncleanablePartition(log, topicPartition) + }.map { + case (topicPartition, log) => // create a LogToClean instance for each + try { + val lastCleanOffset = lastClean.get(topicPartition) + val offsetsToClean = cleanableOffsets(log, lastCleanOffset, now) + // update checkpoint for logs with invalid checkpointed offsets + if (offsetsToClean.forceUpdateCheckpoint) + updateCheckpoints(log.parentDirFile, partitionToUpdateOrAdd = Option(topicPartition, offsetsToClean.firstDirtyOffset)) + val compactionDelayMs = maxCompactionDelay(log, offsetsToClean.firstDirtyOffset, now) + preCleanStats.updateMaxCompactionDelay(compactionDelayMs) + + LogToClean(topicPartition, log, offsetsToClean.firstDirtyOffset, offsetsToClean.firstUncleanableDirtyOffset, compactionDelayMs > 0) + } catch { + case e: Throwable => throw new LogCleaningException(log, + s"Failed to calculate log cleaning stats for partition $topicPartition", e) + } + }.filter(ltc => ltc.totalBytes > 0) // skip any empty logs + + this.dirtiestLogCleanableRatio = if (dirtyLogs.nonEmpty) dirtyLogs.max.cleanableRatio else 0 + // and must meet the minimum threshold for dirty byte ratio or have some bytes required to be compacted + val cleanableLogs = dirtyLogs.filter { ltc => + (ltc.needCompactionNow && ltc.cleanableBytes > 0) || ltc.cleanableRatio > ltc.log.config.minCleanableRatio + } + + if (cleanableLogs.isEmpty) + None + else { + preCleanStats.recordCleanablePartitions(cleanableLogs.size) + val filthiest = cleanableLogs.max + inProgress.put(filthiest.topicPartition, LogCleaningInProgress) + Some(filthiest) + } + } + } + + /** + * Pause logs cleaning for logs that do not have compaction enabled + * and do not have other deletion or compaction in progress. + * This is to handle potential race between retention and cleaner threads when users + * switch topic configuration between compacted and non-compacted topic. + * @return retention logs that have log cleaning successfully paused + */ + def pauseCleaningForNonCompactedPartitions(): Iterable[(TopicPartition, UnifiedLog)] = { + inLock(lock) { + val deletableLogs = logs.filter { + case (_, log) => !log.config.compact // pick non-compacted logs + }.filterNot { + case (topicPartition, _) => inProgress.contains(topicPartition) // skip any logs already in-progress + } + + deletableLogs.foreach { + case (topicPartition, _) => inProgress.put(topicPartition, LogCleaningPaused(1)) + } + deletableLogs + } + } + + /** + * Find any logs that have compaction enabled. Mark them as being cleaned + * Include logs without delete enabled, as they may have segments + * that precede the start offset. + */ + def deletableLogs(): Iterable[(TopicPartition, UnifiedLog)] = { + inLock(lock) { + val toClean = logs.filter { case (topicPartition, log) => + !inProgress.contains(topicPartition) && log.config.compact && + !isUncleanablePartition(log, topicPartition) + } + toClean.foreach { case (tp, _) => inProgress.put(tp, LogCleaningInProgress) } + toClean + } + + } + + /** + * Abort the cleaning of a particular partition, if it's in progress. This call blocks until the cleaning of + * the partition is aborted. + * This is implemented by first abortAndPausing and then resuming the cleaning of the partition. + */ + def abortCleaning(topicPartition: TopicPartition): Unit = { + inLock(lock) { + abortAndPauseCleaning(topicPartition) + resumeCleaning(Seq(topicPartition)) + } + } + + /** + * Abort the cleaning of a particular partition if it's in progress, and pause any future cleaning of this partition. + * This call blocks until the cleaning of the partition is aborted and paused. + * 1. If the partition is not in progress, mark it as paused. + * 2. Otherwise, first mark the state of the partition as aborted. + * 3. The cleaner thread checks the state periodically and if it sees the state of the partition is aborted, it + * throws a LogCleaningAbortedException to stop the cleaning task. + * 4. When the cleaning task is stopped, doneCleaning() is called, which sets the state of the partition as paused. + * 5. abortAndPauseCleaning() waits until the state of the partition is changed to paused. + * 6. If the partition is already paused, a new call to this function + * will increase the paused count by one. + */ + def abortAndPauseCleaning(topicPartition: TopicPartition): Unit = { + inLock(lock) { + inProgress.get(topicPartition) match { + case None => + inProgress.put(topicPartition, LogCleaningPaused(1)) + case Some(LogCleaningInProgress) => + inProgress.put(topicPartition, LogCleaningAborted) + case Some(LogCleaningPaused(count)) => + inProgress.put(topicPartition, LogCleaningPaused(count + 1)) + case Some(s) => + throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be aborted and paused since it is in $s state.") + } + while (!isCleaningInStatePaused(topicPartition)) + pausedCleaningCond.await(100, TimeUnit.MILLISECONDS) + } + } + + /** + * Resume the cleaning of paused partitions. + * Each call of this function will undo one pause. + */ + def resumeCleaning(topicPartitions: Iterable[TopicPartition]): Unit = { + inLock(lock) { + topicPartitions.foreach { + topicPartition => + inProgress.get(topicPartition) match { + case None => + throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be resumed since it is not paused.") + case Some(state) => + state match { + case LogCleaningPaused(count) if count == 1 => + inProgress.remove(topicPartition) + case LogCleaningPaused(count) if count > 1 => + inProgress.put(topicPartition, LogCleaningPaused(count - 1)) + case s => + throw new IllegalStateException(s"Compaction for partition $topicPartition cannot be resumed since it is in $s state.") + } + } + } + } + } + + /** + * Check if the cleaning for a partition is in a particular state. The caller is expected to hold lock while making the call. + */ + private def isCleaningInState(topicPartition: TopicPartition, expectedState: LogCleaningState): Boolean = { + inProgress.get(topicPartition) match { + case None => false + case Some(state) => + if (state == expectedState) + true + else + false + } + } + + /** + * Check if the cleaning for a partition is paused. The caller is expected to hold lock while making the call. + */ + private def isCleaningInStatePaused(topicPartition: TopicPartition): Boolean = { + inProgress.get(topicPartition) match { + case None => false + case Some(state) => + state match { + case _: LogCleaningPaused => + true + case _ => + false + } + } + } + + /** + * Check if the cleaning for a partition is aborted. If so, throw an exception. + */ + def checkCleaningAborted(topicPartition: TopicPartition): Unit = { + inLock(lock) { + if (isCleaningInState(topicPartition, LogCleaningAborted)) + throw new LogCleaningAbortedException() + } + } + + /** + * Update checkpoint file, adding or removing partitions if necessary. + * + * @param dataDir The File object to be updated + * @param partitionToUpdateOrAdd The [TopicPartition, Long] map data to be updated. pass "none" if doing remove, not add + * @param partitionToRemove The TopicPartition to be removed + */ + def updateCheckpoints(dataDir: File, + partitionToUpdateOrAdd: Option[(TopicPartition, JLong)] = None, + partitionToRemove: Option[TopicPartition] = None): Unit = { + inLock(lock) { + val checkpoint = checkpoints(dataDir) + if (checkpoint != null) { + try { + val currentCheckpoint = checkpoint.read().asScala.filter { case (tp, _) => logs.keys.contains(tp) }.toMap + // remove the partition offset if any + var updatedCheckpoint = partitionToRemove match { + case Some(topicPartition) => currentCheckpoint - topicPartition + case None => currentCheckpoint + } + // update or add the partition offset if any + updatedCheckpoint = partitionToUpdateOrAdd match { + case Some(updatedOffset) => updatedCheckpoint + updatedOffset + case None => updatedCheckpoint + } + + checkpoint.write(updatedCheckpoint.asJava) + } catch { + case e: KafkaStorageException => + error(s"Failed to access checkpoint file ${checkpoint.file.getName} in dir ${checkpoint.file.getParentFile.getAbsolutePath}", e) + } + } + } + } + + /** + * alter the checkpoint directory for the topicPartition, to remove the data in sourceLogDir, and add the data in destLogDir + */ + def alterCheckpointDir(topicPartition: TopicPartition, sourceLogDir: File, destLogDir: File): Unit = { + inLock(lock) { + try { + checkpoints.get(sourceLogDir).flatMap(_.read().asScala.get(topicPartition)) match { + case Some(offset) => + debug(s"Removing the partition offset data in checkpoint file for '$topicPartition' " + + s"from ${sourceLogDir.getAbsoluteFile} directory.") + updateCheckpoints(sourceLogDir, partitionToRemove = Option(topicPartition)) + + debug(s"Adding the partition offset data in checkpoint file for '$topicPartition' " + + s"to ${destLogDir.getAbsoluteFile} directory.") + updateCheckpoints(destLogDir, partitionToUpdateOrAdd = Option(topicPartition, offset)) + case None => + } + } catch { + case e: KafkaStorageException => + error(s"Failed to access checkpoint file in dir ${sourceLogDir.getAbsolutePath}", e) + } + + val logUncleanablePartitions = uncleanablePartitions.getOrElse(sourceLogDir.toString, mutable.Set[TopicPartition]()) + if (logUncleanablePartitions.contains(topicPartition)) { + logUncleanablePartitions.remove(topicPartition) + markPartitionUncleanable(destLogDir.toString, topicPartition) + } + } + } + + /** + * Stop cleaning logs in the provided directory + * + * @param dir the absolute path of the log dir + */ + def handleLogDirFailure(dir: String): Unit = { + warn(s"Stopping cleaning logs in dir $dir") + inLock(lock) { + checkpoints = checkpoints.filter { case (k, _) => k.getAbsolutePath != dir } + } + } + + /** + * Truncate the checkpointed offset for the given partition if its checkpointed offset is larger than the given offset + */ + def maybeTruncateCheckpoint(dataDir: File, topicPartition: TopicPartition, offset: JLong): Unit = { + inLock(lock) { + if (logs.get(topicPartition).config.compact) { + val checkpoint = checkpoints(dataDir) + if (checkpoint != null) { + val existing = checkpoint.read() + if (existing.getOrDefault(topicPartition, 0L) > offset) { + existing.put(topicPartition, offset) + checkpoint.write(existing) + } + } + } + } + } + + /** + * Save out the endOffset and remove the given log from the in-progress set, if not aborted. + */ + def doneCleaning(topicPartition: TopicPartition, dataDir: File, endOffset: Long): Unit = { + inLock(lock) { + inProgress.get(topicPartition) match { + case Some(LogCleaningInProgress) => + updateCheckpoints(dataDir, partitionToUpdateOrAdd = Option(topicPartition, endOffset)) + inProgress.remove(topicPartition) + case Some(LogCleaningAborted) => + inProgress.put(topicPartition, LogCleaningPaused(1)) + pausedCleaningCond.signalAll() + case None => + throw new IllegalStateException(s"State for partition $topicPartition should exist.") + case s => + throw new IllegalStateException(s"In-progress partition $topicPartition cannot be in $s state.") + } + } + } + + def doneDeleting(topicPartitions: Iterable[TopicPartition]): Unit = { + inLock(lock) { + topicPartitions.foreach { + topicPartition => + inProgress.get(topicPartition) match { + case Some(LogCleaningInProgress) => + inProgress.remove(topicPartition) + case Some(LogCleaningAborted) => + inProgress.put(topicPartition, LogCleaningPaused(1)) + pausedCleaningCond.signalAll() + case None => + throw new IllegalStateException(s"State for partition $topicPartition should exist.") + case s => + throw new IllegalStateException(s"In-progress partition $topicPartition cannot be in $s state.") + } + } + } + } + + /** + * Returns an immutable set of the uncleanable partitions for a given log directory + * Only used for testing + */ + private[log] def uncleanablePartitions(logDir: String): Set[TopicPartition] = { + var partitions: Set[TopicPartition] = Set() + inLock(lock) { partitions ++= uncleanablePartitions.getOrElse(logDir, partitions) } + partitions + } + + def markPartitionUncleanable(logDir: String, partition: TopicPartition): Unit = { + inLock(lock) { + uncleanablePartitions.get(logDir) match { + case Some(partitions) => + partitions.add(partition) + case None => + uncleanablePartitions.put(logDir, mutable.Set(partition)) + } + } + } + + private def isUncleanablePartition(log: UnifiedLog, topicPartition: TopicPartition): Boolean = { + inLock(lock) { + uncleanablePartitions.get(log.parentDir).exists(partitions => partitions.contains(topicPartition)) + } + } + + def maintainUncleanablePartitions(): Unit = { + // Remove deleted partitions from uncleanablePartitions + inLock(lock) { + // Remove deleted partitions + uncleanablePartitions.values.foreach { partitions => + partitions.filterInPlace(logs.contains) + } + + // Remove entries with empty partition set. + uncleanablePartitions.filterInPlace { + case (_, partitions) => partitions.nonEmpty + } + } + } + + def removeMetrics(): Unit = { + GaugeMetricNameNoTag.foreach(metricsGroup.removeMetric) + gaugeMetricNameWithTag.asScala.foreach { metricNameAndTags => + metricNameAndTags._2.asScala.foreach { tag => + metricsGroup.removeMetric(metricNameAndTags._1, tag) + } + } + gaugeMetricNameWithTag.clear() + } +} + +/** + * Helper class for the range of cleanable dirty offsets of a log and whether to update the checkpoint associated with + * the log + * + * @param firstDirtyOffset the lower (inclusive) offset to begin cleaning from + * @param firstUncleanableDirtyOffset the upper(exclusive) offset to clean to + * @param forceUpdateCheckpoint whether to update the checkpoint associated with this log. if true, checkpoint should be + * reset to firstDirtyOffset + */ +private case class OffsetsToClean(firstDirtyOffset: Long, + firstUncleanableDirtyOffset: Long, + forceUpdateCheckpoint: Boolean = false) { +} + +private[log] object LogCleanerManager extends Logging { + private val UncleanablePartitionsCountMetricName = "uncleanable-partitions-count" + private val UncleanableBytesMetricName = "uncleanable-bytes" + private val MaxDirtyPercentMetricName = "max-dirty-percent" + private val TimeSinceLastRunMsMetricName = "time-since-last-run-ms" + + // Visible for testing + private[log] val GaugeMetricNameNoTag = Set( + MaxDirtyPercentMetricName, + TimeSinceLastRunMsMetricName + ) + + private def isCompactAndDelete(log: UnifiedLog): Boolean = { + log.config.compact && log.config.delete + } + + /** + * get max delay between the time when log is required to be compacted as determined + * by maxCompactionLagMs and the current time. + */ + private def maxCompactionDelay(log: UnifiedLog, firstDirtyOffset: Long, now: Long) : Long = { + val dirtyNonActiveSegments = log.nonActiveLogSegmentsFrom(firstDirtyOffset) + val firstBatchTimestamps = log.getFirstBatchTimestampForSegments(dirtyNonActiveSegments).stream.filter(_ > 0) + + val earliestDirtySegmentTimestamp = firstBatchTimestamps.min(Comparator.naturalOrder()).orElse(Long.MaxValue) + + val maxCompactionLagMs = math.max(log.config.maxCompactionLagMs, 0L) + val cleanUntilTime = now - maxCompactionLagMs + + if (earliestDirtySegmentTimestamp < cleanUntilTime) + cleanUntilTime - earliestDirtySegmentTimestamp + else + 0L + } + + /** + * Returns the range of dirty offsets that can be cleaned. + * + * @param log the log + * @param lastCleanOffset the last checkpointed offset + * @param now the current time in milliseconds of the cleaning operation + * @return OffsetsToClean containing offsets for cleanable portion of log and whether the log checkpoint needs updating + */ + def cleanableOffsets(log: UnifiedLog, lastCleanOffset: Option[Long], now: Long): OffsetsToClean = { + // If the log segments are abnormally truncated and hence the checkpointed offset is no longer valid; + // reset to the log starting offset and log the error + val (firstDirtyOffset, forceUpdateCheckpoint) = { + val logStartOffset = log.logStartOffset + val checkpointDirtyOffset = lastCleanOffset.getOrElse(logStartOffset) + + if (checkpointDirtyOffset < logStartOffset) { + // Don't bother with the warning if compact and delete are enabled. + if (!isCompactAndDelete(log)) + warn(s"Resetting first dirty offset of ${log.name} to log start offset $logStartOffset " + + s"since the checkpointed offset $checkpointDirtyOffset is invalid.") + (logStartOffset, true) + } else if (checkpointDirtyOffset > log.logEndOffset) { + // The dirty offset has gotten ahead of the log end offset. This could happen if there was data + // corruption at the end of the log. We conservatively assume that the full log needs cleaning. + warn(s"The last checkpoint dirty offset for partition ${log.name} is $checkpointDirtyOffset, " + + s"which is larger than the log end offset ${log.logEndOffset}. Resetting to the log start offset $logStartOffset.") + (logStartOffset, true) + } else { + (checkpointDirtyOffset, false) + } + } + + val minCompactionLagMs = math.max(log.config.compactionLagMs, 0L) + + // Find the first segment that cannot be cleaned. We cannot clean past: + // 1. The active segment + // 2. The last stable offset (including the high watermark) + // 3. Any segments closer to the head of the log than the minimum compaction lag time + val firstUncleanableDirtyOffset: Long = Seq( + + // we do not clean beyond the last stable offset + Some(log.lastStableOffset), + + // the active segment is always uncleanable + Option(log.activeSegment.baseOffset), + + // the first segment whose largest message timestamp is within a minimum time lag from now + if (minCompactionLagMs > 0) { + // dirty log segments + val dirtyNonActiveSegments = log.nonActiveLogSegmentsFrom(firstDirtyOffset) + dirtyNonActiveSegments.asScala.find { s => + val isUncleanable = s.largestTimestamp > now - minCompactionLagMs + debug(s"Checking if log segment may be cleaned: log='${log.name}' segment.baseOffset=${s.baseOffset} " + + s"segment.largestTimestamp=${s.largestTimestamp}; now - compactionLag=${now - minCompactionLagMs}; " + + s"is uncleanable=$isUncleanable") + isUncleanable + }.map(_.baseOffset) + } else None + ).flatten.min + + debug(s"Finding range of cleanable offsets for log=${log.name}. Last clean offset=$lastCleanOffset " + + s"now=$now => firstDirtyOffset=$firstDirtyOffset firstUncleanableOffset=$firstUncleanableDirtyOffset " + + s"activeSegment.baseOffset=${log.activeSegment.baseOffset}") + + OffsetsToClean(firstDirtyOffset, math.max(firstDirtyOffset, firstUncleanableDirtyOffset), forceUpdateCheckpoint) + } + + /** + * Given the first dirty offset and an uncleanable offset, calculates the total cleanable bytes for this log + * @return the biggest uncleanable offset and the total amount of cleanable bytes + */ + def calculateCleanableBytes(log: UnifiedLog, firstDirtyOffset: Long, uncleanableOffset: Long): (Long, Long) = { + val firstUncleanableSegment = log.nonActiveLogSegmentsFrom(uncleanableOffset).asScala.headOption.getOrElse(log.activeSegment) + val firstUncleanableOffset = firstUncleanableSegment.baseOffset + val cleanableBytes = log.logSegments(math.min(firstDirtyOffset, firstUncleanableOffset), firstUncleanableOffset).map(_.size.toLong).sum + + (firstUncleanableOffset, cleanableBytes) + } + +} diff --git a/core/src/main/scala/kafka/log/LogManager.scala b/core/src/main/scala/kafka/log/LogManager.scala index d3f64793685d2..0ffe0e0488b3d 100755 --- a/core/src/main/scala/kafka/log/LogManager.scala +++ b/core/src/main/scala/kafka/log/LogManager.scala @@ -22,31 +22,30 @@ import java.io.{File, IOException} import java.nio.file.{Files, NoSuchFileException} import java.util.concurrent._ import java.util.concurrent.atomic.AtomicInteger +import kafka.server.metadata.ConfigRepository import kafka.server.{KafkaConfig, KafkaRaftServer} +import kafka.server.metadata.BrokerMetadataPublisher.info import kafka.utils.threadsafe -import kafka.utils.{CoreUtils, Logging} +import kafka.utils.{CoreUtils, Logging, Pool} import org.apache.kafka.common.{DirectoryId, KafkaException, TopicPartition, Uuid} import org.apache.kafka.common.utils.{Exit, KafkaThread, Time, Utils} import org.apache.kafka.common.errors.{InconsistentTopicIdException, KafkaStorageException, LogDirNotFoundException} -import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import scala.jdk.CollectionConverters._ import scala.collection._ import scala.collection.mutable.ArrayBuffer import scala.util.{Failure, Success, Try} import org.apache.kafka.image.TopicsImage -import org.apache.kafka.metadata.ConfigRepository import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, PropertiesUtils} -import java.util.{Collections, Optional, OptionalLong, Properties} +import java.util.{Collections, OptionalLong, Properties} import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.{FileLock, Scheduler} -import org.apache.kafka.storage.internals.log.{CleanerConfig, LogCleaner, LogConfig, LogDirFailureChannel, LogManager => JLogManager, LogOffsetsListener, ProducerStateManagerConfig, RemoteIndexCache, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig, RemoteIndexCache} import org.apache.kafka.storage.internals.checkpoint.{CleanShutdownFileHandler, OffsetCheckpointFile} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.util -import java.util.stream.Collectors /** * The entry point to the kafka log management subsystem. The log manager is responsible for log creation, retrieval, and cleaning. @@ -76,26 +75,26 @@ class LogManager(logDirs: Seq[File], brokerTopicStats: BrokerTopicStats, logDirFailureChannel: LogDirFailureChannel, time: Time, + val keepPartitionMetadataFile: Boolean, remoteStorageSystemEnable: Boolean, - val initialTaskDelayMs: Long, - cleanerFactory: (CleanerConfig, util.List[File], ConcurrentMap[TopicPartition, UnifiedLog], LogDirFailureChannel, Time) => LogCleaner = - (cleanerConfig, files, map, logDirFailureChannel, time) => new LogCleaner(cleanerConfig, files, map, logDirFailureChannel, time) - ) extends Logging { + val initialTaskDelayMs: Long) extends Logging { + + import LogManager._ private val metricsGroup = new KafkaMetricsGroup(this.getClass) private val logCreationOrDeletionLock = new Object - private val currentLogs = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() + private val currentLogs = new Pool[TopicPartition, UnifiedLog]() // Future logs are put in the directory with "-future" suffix. Future log is created when user wants to move replica // from one log directory to another log directory on the same broker. The directory of the future log will be renamed // to replace the current log of the partition after the future log catches up with the current log - private val futureLogs = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() + private val futureLogs = new Pool[TopicPartition, UnifiedLog]() // Each element in the queue contains the log object to be deleted and the time it is scheduled for deletion. private val logsToBeDeleted = new LinkedBlockingQueue[(UnifiedLog, Long)]() // Map of stray partition to stray log. This holds all stray logs detected on the broker. // Visible for testing - private val strayLogs = new ConcurrentHashMap[TopicPartition, UnifiedLog]() + private val strayLogs = new Pool[TopicPartition, UnifiedLog]() private val _liveLogDirs: ConcurrentLinkedQueue[File] = createAndValidateLogDirs(logDirs, initialOfflineDirs) @volatile private var _currentDefaultConfig = initialDefaultConfig @@ -127,9 +126,9 @@ class LogManager(logDirs: Seq[File], def directoryIdsSet: Predef.Set[Uuid] = directoryIds.values.toSet @volatile private var recoveryPointCheckpoints = liveLogDirs.map(dir => - (dir, new OffsetCheckpointFile(new File(dir, JLogManager.RECOVERY_POINT_CHECKPOINT_FILE), logDirFailureChannel))).toMap + (dir, new OffsetCheckpointFile(new File(dir, RecoveryPointCheckpointFile), logDirFailureChannel))).toMap @volatile private var logStartOffsetCheckpoints = liveLogDirs.map(dir => - (dir, new OffsetCheckpointFile(new File(dir, JLogManager.LOG_START_OFFSET_CHECKPOINT_FILE), logDirFailureChannel))).toMap + (dir, new OffsetCheckpointFile(new File(dir, LogStartOffsetCheckpointFile), logDirFailureChannel))).toMap private val preferredLogDirs = new ConcurrentHashMap[TopicPartition, String]() @@ -232,8 +231,8 @@ class LogManager(logDirs: Seq[File], if (cleaner != null) cleaner.handleLogDirFailure(dir) - def removeOfflineLogs(logs: util.concurrent.ConcurrentMap[TopicPartition, UnifiedLog]): Iterable[TopicPartition] = { - val offlineTopicPartitions: Iterable[TopicPartition] = logs.asScala.collect { + def removeOfflineLogs(logs: Pool[TopicPartition, UnifiedLog]): Iterable[TopicPartition] = { + val offlineTopicPartitions: Iterable[TopicPartition] = logs.collect { case (tp, log) if log.parentDir == dir => tp } offlineTopicPartitions.foreach { topicPartition => { @@ -261,7 +260,7 @@ class LogManager(logDirs: Seq[File], private def lockLogDirs(dirs: Seq[File]): Seq[FileLock] = { dirs.flatMap { dir => try { - val lock = new FileLock(new File(dir, JLogManager.LOCK_FILE_NAME)) + val lock = new FileLock(new File(dir, LockFileName)) if (!lock.tryLock()) throw new KafkaException("Failed to acquire lock on file .lock in " + lock.file.getParent + ". A Kafka instance in another process or thread is using this directory.") @@ -330,27 +329,27 @@ class LogManager(logDirs: Seq[File], val logRecoveryPoint = recoveryPoints.getOrDefault(topicPartition, 0L) val logStartOffset = logStartOffsets.getOrDefault(topicPartition, 0L) - val log = UnifiedLog.create( - logDir, - config, - logStartOffset, - logRecoveryPoint, - scheduler, - brokerTopicStats, - time, - maxTransactionTimeoutMs, - producerStateManagerConfig, - producerIdExpirationCheckIntervalMs, - logDirFailureChannel, - hadCleanShutdown, - Optional.empty, - numRemainingSegments, - remoteStorageSystemEnable, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) - - if (logDir.getName.endsWith(UnifiedLog.DELETE_DIR_SUFFIX)) { + val log = UnifiedLog( + dir = logDir, + config = config, + logStartOffset = logStartOffset, + recoveryPoint = logRecoveryPoint, + maxTransactionTimeoutMs = maxTransactionTimeoutMs, + producerStateManagerConfig = producerStateManagerConfig, + producerIdExpirationCheckIntervalMs = producerIdExpirationCheckIntervalMs, + scheduler = scheduler, + time = time, + brokerTopicStats = brokerTopicStats, + logDirFailureChannel = logDirFailureChannel, + lastShutdownClean = hadCleanShutdown, + topicId = None, + keepPartitionMetadataFile = keepPartitionMetadataFile, + numRemainingSegments = numRemainingSegments, + remoteStorageSystemEnable = remoteStorageSystemEnable) + + if (logDir.getName.endsWith(UnifiedLog.DeleteDirSuffix)) { addLogToBeDeleted(log) - } else if (logDir.getName.endsWith(UnifiedLog.STRAY_DIR_SUFFIX)) { + } else if (logDir.getName.endsWith(UnifiedLog.StrayDirSuffix)) { addStrayLog(topicPartition, log) warn(s"Loaded stray log: $logDir") } else if (isStray(log)) { @@ -358,7 +357,7 @@ class LogManager(logDirs: Seq[File], // Broker with an offline directory may be unable to detect it still holds a to-be-deleted replica, // and can create a conflicting topic partition for a new incarnation of the topic in one of the remaining online directories. // So upon a restart in which the offline directory is back online we need to clean up the old replica directory. - log.renameDir(UnifiedLog.logStrayDirName(log.topicPartition), false) + log.renameDir(UnifiedLog.logStrayDirName(log.topicPartition), shouldReinitialize = false) addStrayLog(log.topicPartition, log) warn(s"Log in ${logDir.getAbsolutePath} marked stray and renamed to ${log.dir.getAbsolutePath}") } else { @@ -630,11 +629,8 @@ class LogManager(logDirs: Seq[File], initialTaskDelayMs) } if (cleanerConfig.enableCleaner) { - _cleaner = cleanerFactory(cleanerConfig, liveLogDirs.asJava, currentLogs, logDirFailureChannel, time) + _cleaner = new LogCleaner(cleanerConfig, liveLogDirs, currentLogs, logDirFailureChannel, time = time) _cleaner.startup() - } else { - warn("The config `log.cleaner.enable` is deprecated and will be removed in Kafka 5.0. Starting from Kafka 5.0, the log cleaner will always be enabled, and this config will be ignored.") - } } @@ -683,7 +679,7 @@ class LogManager(logDirs: Seq[File], try { jobs.foreachEntry { (dir, dirJobs) => - if (JLogManager.waitForAllToComplete(dirJobs.toList.asJava, + if (waitForAllToComplete(dirJobs, e => warn(s"There was an error in one of the threads during LogManager shutdown: ${e.getCause}"))) { val logs = logsInDir(localLogsByDir, dir) @@ -764,7 +760,7 @@ class LogManager(logDirs: Seq[File], def truncateFullyAndStartAt(topicPartition: TopicPartition, newOffset: Long, isFuture: Boolean, - logStartOffsetOpt: Optional[JLong] = Optional.empty): Unit = { + logStartOffsetOpt: Option[Long] = None): Unit = { val log = { if (isFuture) futureLogs.get(topicPartition) @@ -896,9 +892,9 @@ class LogManager(logDirs: Seq[File], /** * Resume cleaning of the provided partition and log a message about it. */ - def resumeCleaning(topicPartition: TopicPartition): Unit = { + private def resumeCleaning(topicPartition: TopicPartition): Unit = { if (cleaner != null) { - cleaner.resumeCleaning(util.Set.of(topicPartition)) + cleaner.resumeCleaning(Seq(topicPartition)) info(s"Cleaning for partition $topicPartition is resumed") } } @@ -1011,7 +1007,7 @@ class LogManager(logDirs: Seq[File], * @throws InconsistentTopicIdException if the topic ID in the log does not match the topic ID provided */ def getOrCreateLog(topicPartition: TopicPartition, isNew: Boolean = false, isFuture: Boolean = false, - topicId: Optional[Uuid], targetLogDirectoryId: Option[Uuid] = Option.empty): UnifiedLog = { + topicId: Option[Uuid], targetLogDirectoryId: Option[Uuid] = Option.empty): UnifiedLog = { logCreationOrDeletionLock synchronized { val log = getLog(topicPartition, isFuture).getOrElse { // create the log if it has not already been created in another thread @@ -1056,23 +1052,21 @@ class LogManager(logDirs: Seq[File], .get // If Failure, will throw val config = fetchLogConfig(topicPartition.topic) - val log = UnifiedLog.create( - logDir, - config, - 0L, - 0L, - scheduler, - brokerTopicStats, - time, - maxTransactionTimeoutMs, - producerStateManagerConfig, - producerIdExpirationCheckIntervalMs, - logDirFailureChannel, - true, - topicId, - new ConcurrentHashMap[String, Integer](), - remoteStorageSystemEnable, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) + val log = UnifiedLog( + dir = logDir, + config = config, + logStartOffset = 0L, + recoveryPoint = 0L, + maxTransactionTimeoutMs = maxTransactionTimeoutMs, + producerStateManagerConfig = producerStateManagerConfig, + producerIdExpirationCheckIntervalMs = producerIdExpirationCheckIntervalMs, + scheduler = scheduler, + time = time, + brokerTopicStats = brokerTopicStats, + logDirFailureChannel = logDirFailureChannel, + topicId = topicId, + keepPartitionMetadataFile = keepPartitionMetadataFile, + remoteStorageSystemEnable = remoteStorageSystemEnable) if (isFuture) futureLogs.put(topicPartition, log) @@ -1087,8 +1081,8 @@ class LogManager(logDirs: Seq[File], } // Ensure topic IDs are consistent - topicId.ifPresent { topicId => - log.topicId.ifPresent { logTopicId => + topicId.foreach { topicId => + log.topicId.foreach { logTopicId => if (topicId != logTopicId) throw new InconsistentTopicIdException(s"Tried to assign topic ID $topicId to log for topic partition $topicPartition," + s"but log already contained topic ID $logTopicId") @@ -1185,16 +1179,15 @@ class LogManager(logDirs: Seq[File], } private def findAbandonedFutureLogs(brokerId: Int, newTopicsImage: TopicsImage): Iterable[(UnifiedLog, Option[UnifiedLog])] = { - futureLogs.asScala.values.flatMap { futureLog => - val topicId = futureLog.topicId.orElseThrow(() => - new RuntimeException(s"The log dir $futureLog does not have a topic ID, " + + futureLogs.values.flatMap { futureLog => + val topicId = futureLog.topicId.getOrElse { + throw new RuntimeException(s"The log dir $futureLog does not have a topic ID, " + "which is not allowed when running in KRaft mode.") - ) + } val partitionId = futureLog.topicPartition.partition() Option(newTopicsImage.getPartition(topicId, partitionId)) .filter(pr => directoryId(futureLog.parentDir).contains(pr.directory(brokerId))) - .map(_ => (futureLog, Option(currentLogs.get(futureLog.topicPartition)).filter(currentLog => - currentLog.topicId.filter(_ == topicId).isPresent))) + .map(_ => (futureLog, Option(currentLogs.get(futureLog.topicPartition)).filter(currentLog => currentLog.topicId.contains(topicId)))) } } @@ -1223,7 +1216,7 @@ class LogManager(logDirs: Seq[File], def replaceCurrentWithFutureLog(sourceLog: Option[UnifiedLog], destLog: UnifiedLog, updateHighWatermark: Boolean = false): Unit = { val topicPartition = destLog.topicPartition - destLog.renameDir(UnifiedLog.logDirName(topicPartition), true) + destLog.renameDir(UnifiedLog.logDirName(topicPartition), shouldReinitialize = true) // the metrics tags still contain "future", so we have to remove it. // we will add metrics back after sourceLog remove the metrics destLog.removeLogMetrics() @@ -1244,7 +1237,7 @@ class LogManager(logDirs: Seq[File], try { sourceLog.foreach { srcLog => - srcLog.renameDir(UnifiedLog.logDeleteDirName(topicPartition), true) + srcLog.renameDir(UnifiedLog.logDeleteDirName(topicPartition), shouldReinitialize = true) // Now that replica in source log directory has been successfully renamed for deletion. // Close the log, update checkpoint files, and enqueue this log to be deleted. srcLog.close() @@ -1290,15 +1283,15 @@ class LogManager(logDirs: Seq[File], if (cleaner != null && !isFuture) { cleaner.abortCleaning(topicPartition) if (checkpoint) { - cleaner.updateCheckpoints(removedLog.parentDirFile, Optional.of(topicPartition)) + cleaner.updateCheckpoints(removedLog.parentDirFile, partitionToRemove = Option(topicPartition)) } } if (isStray) { // Move aside stray partitions, don't delete them - removedLog.renameDir(UnifiedLog.logStrayDirName(topicPartition), false) + removedLog.renameDir(UnifiedLog.logStrayDirName(topicPartition), shouldReinitialize = false) warn(s"Log for partition ${removedLog.topicPartition} is marked as stray and renamed to ${removedLog.dir.getAbsolutePath}") } else { - removedLog.renameDir(UnifiedLog.logDeleteDirName(topicPartition), false) + removedLog.renameDir(UnifiedLog.logDeleteDirName(topicPartition), shouldReinitialize = false) addLogToBeDeleted(removedLog) info(s"Log for partition ${removedLog.topicPartition} is renamed to ${removedLog.dir.getAbsolutePath} and is scheduled for deletion") } @@ -1348,7 +1341,7 @@ class LogManager(logDirs: Seq[File], val logsByDirCached = logsByDir logDirs.foreach { logDir => - if (cleaner != null) cleaner.updateCheckpoints(logDir, Optional.empty()) + if (cleaner != null) cleaner.updateCheckpoints(logDir) val logsToCheckpoint = logsInDir(logsByDirCached, logDir) checkpointRecoveryOffsetsInDir(logDir, logsToCheckpoint) checkpointLogStartOffsetsInDir(logDir, logsToCheckpoint) @@ -1386,22 +1379,19 @@ class LogManager(logDirs: Seq[File], val startMs = time.milliseconds // clean current logs. - val deletableLogs: util.Map[TopicPartition, UnifiedLog] = { + val deletableLogs = { if (cleaner != null) { // prevent cleaner from working on same partitions when changing cleanup policy cleaner.pauseCleaningForNonCompactedPartitions() } else { - currentLogs.entrySet().stream() - .filter(e => !e.getValue.config.compact) - .collect(Collectors.toMap( - (e: util.Map.Entry[TopicPartition, UnifiedLog]) => e.getKey, - (e: util.Map.Entry[TopicPartition, UnifiedLog]) => e.getValue - )) + currentLogs.filter { + case (_, log) => !log.config.compact + } } } try { - deletableLogs.forEach { + deletableLogs.foreach { case (topicPartition, log) => debug(s"Garbage collecting '${log.name}'") total += log.deleteOldSegments() @@ -1415,7 +1405,7 @@ class LogManager(logDirs: Seq[File], } } finally { if (cleaner != null) { - cleaner.resumeCleaning(deletableLogs.keySet()) + cleaner.resumeCleaning(deletableLogs.map(_._1)) } } @@ -1426,10 +1416,10 @@ class LogManager(logDirs: Seq[File], /** * Get all the partition logs */ - def allLogs: Iterable[UnifiedLog] = currentLogs.asScala.values ++ futureLogs.asScala.values + def allLogs: Iterable[UnifiedLog] = currentLogs.values ++ futureLogs.values def logsByTopic(topic: String): Seq[UnifiedLog] = { - (currentLogs.asScala.toList ++ futureLogs.asScala.toList).collect { + (currentLogs.toList ++ futureLogs.toList).collect { case (topicPartition, log) if topicPartition.topic == topic => log } } @@ -1445,8 +1435,8 @@ class LogManager(logDirs: Seq[File], def addToDir(tp: TopicPartition, log: UnifiedLog): Unit = { byDir.getOrElseUpdate(log.parentDir, new mutable.AnyRefMap[TopicPartition, UnifiedLog]()).put(tp, log) } - currentLogs.asScala.foreachEntry(addToDir) - futureLogs.asScala.foreachEntry(addToDir) + currentLogs.foreachEntry(addToDir) + futureLogs.foreachEntry(addToDir) byDir } @@ -1474,7 +1464,7 @@ class LogManager(logDirs: Seq[File], private def flushDirtyLogs(): Unit = { debug("Checking for dirty logs to flush...") - for ((topicPartition, log) <- currentLogs.asScala.toList ++ futureLogs.asScala.toList) { + for ((topicPartition, log) <- currentLogs.toList ++ futureLogs.toList) { try { val timeSinceLastFlush = time.milliseconds - log.lastFlushTime debug(s"Checking if flush is needed on ${topicPartition.topic} flush interval ${log.config.flushMs}" + @@ -1488,7 +1478,7 @@ class LogManager(logDirs: Seq[File], } } - private def removeLogAndMetrics(logs: util.concurrent.ConcurrentMap[TopicPartition, UnifiedLog], tp: TopicPartition): Option[UnifiedLog] = { + private def removeLogAndMetrics(logs: Pool[TopicPartition, UnifiedLog], tp: TopicPartition): Option[UnifiedLog] = { val removedLog = logs.remove(tp) if (removedLog != null) { removedLog.removeLogMetrics() @@ -1523,6 +1513,25 @@ class LogManager(logDirs: Seq[File], } object LogManager { + val LockFileName = ".lock" + + /** + * Wait all jobs to complete + * @param jobs jobs + * @param callback this will be called to handle the exception caused by each Future#get + * @return true if all pass. Otherwise, false + */ + private[log] def waitForAllToComplete(jobs: Seq[Future[_]], callback: Throwable => Unit): Boolean = { + jobs.count(future => Try(future.get) match { + case Success(_) => false + case Failure(e) => + callback(e) + true + }) == 0 + } + + val RecoveryPointCheckpointFile = "recovery-point-offset-checkpoint" + val LogStartOffsetCheckpointFile = "log-start-offset-checkpoint" def apply(config: KafkaConfig, initialOfflineDirs: Seq[String], @@ -1530,16 +1539,16 @@ object LogManager { kafkaScheduler: Scheduler, time: Time, brokerTopicStats: BrokerTopicStats, - logDirFailureChannel: LogDirFailureChannel): LogManager = { + logDirFailureChannel: LogDirFailureChannel, + keepPartitionMetadataFile: Boolean): LogManager = { val defaultProps = config.extractLogConfigMap - LogConfig.validateBrokerLogConfigValues(defaultProps, config.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + LogConfig.validateBrokerLogConfigValues(defaultProps, config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) val defaultLogConfig = new LogConfig(defaultProps) - val cleanerConfig = new CleanerConfig(config) - val transactionLogConfig = new TransactionLogConfig(config) + val cleanerConfig = LogCleaner.cleanerConfig(config) - new LogManager(logDirs = config.logDirs.asScala.map(new File(_).getAbsoluteFile), + new LogManager(logDirs = config.logDirs.map(new File(_).getAbsoluteFile), initialOfflineDirs = initialOfflineDirs.map(new File(_).getAbsoluteFile), configRepository = configRepository, initialDefaultConfig = defaultLogConfig, @@ -1549,14 +1558,56 @@ object LogManager { flushRecoveryOffsetCheckpointMs = config.logFlushOffsetCheckpointIntervalMs, flushStartOffsetCheckpointMs = config.logFlushStartOffsetCheckpointIntervalMs, retentionCheckMs = config.logCleanupIntervalMs, - maxTransactionTimeoutMs = new TransactionStateManagerConfig(config).transactionMaxTimeoutMs, - producerStateManagerConfig = new ProducerStateManagerConfig(transactionLogConfig.producerIdExpirationMs, transactionLogConfig.transactionPartitionVerificationEnable), - producerIdExpirationCheckIntervalMs = transactionLogConfig.producerIdExpirationCheckIntervalMs, + maxTransactionTimeoutMs = config.transactionStateManagerConfig.transactionMaxTimeoutMs, + producerStateManagerConfig = new ProducerStateManagerConfig(config.transactionLogConfig.producerIdExpirationMs, config.transactionLogConfig.transactionPartitionVerificationEnable), + producerIdExpirationCheckIntervalMs = config.transactionLogConfig.producerIdExpirationCheckIntervalMs, scheduler = kafkaScheduler, brokerTopicStats = brokerTopicStats, logDirFailureChannel = logDirFailureChannel, time = time, - remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled, + keepPartitionMetadataFile = keepPartitionMetadataFile, + remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), initialTaskDelayMs = config.logInitialTaskDelayMs) } + + /** + * Returns true if the given log should not be on the current broker + * according to the metadata image. + * + * @param brokerId The ID of the current broker. + * @param newTopicsImage The new topics image after broker has been reloaded + * @param log The log object to check + * @return true if the log should not exist on the broker, false otherwise. + */ + def isStrayKraftReplica( + brokerId: Int, + newTopicsImage: TopicsImage, + log: UnifiedLog + ): Boolean = { + if (log.topicId.isEmpty) { + // Missing topic ID could result from storage failure or unclean shutdown after topic creation but before flushing + // data to the `partition.metadata` file. And before appending data to the log, the `partition.metadata` is always + // flushed to disk. So if the topic ID is missing, it mostly means no data was appended, and we can treat this as + // a stray log. + info(s"The topicId does not exist in $log, treat it as a stray log") + return true + } + + val topicId = log.topicId.get + val partitionId = log.topicPartition.partition() + Option(newTopicsImage.getPartition(topicId, partitionId)) match { + case Some(partition) => + if (!partition.replicas.contains(brokerId)) { + info(s"Found stray log dir $log: the current replica assignment ${partition.replicas.mkString("[", ", ", "]")} " + + s"does not contain the local brokerId $brokerId.") + true + } else { + false + } + + case None => + info(s"Found stray log dir $log: the topicId $topicId does not exist in the metadata image") + true + } + } } diff --git a/core/src/main/scala/kafka/log/UnifiedLog.scala b/core/src/main/scala/kafka/log/UnifiedLog.scala new file mode 100644 index 0000000000000..9a977a262b6df --- /dev/null +++ b/core/src/main/scala/kafka/log/UnifiedLog.scala @@ -0,0 +1,2216 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.log + +import kafka.log.remote.RemoteLogManager +import kafka.utils._ +import org.apache.kafka.common.errors._ +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.message.DescribeProducersResponseData +import org.apache.kafka.common.record.FileRecords.TimestampAndOffset +import org.apache.kafka.common.record._ +import org.apache.kafka.common.requests.ListOffsetsRequest +import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH_OFFSET +import org.apache.kafka.common.requests.ProduceResponse.RecordError +import org.apache.kafka.common.utils.{PrimitiveRef, Time, Utils} +import org.apache.kafka.common.{InvalidRecordException, KafkaException, TopicPartition, Uuid} +import org.apache.kafka.server.common.{OffsetAndEpoch, RequestLocal} +import org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig +import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.record.BrokerCompressionType +import org.apache.kafka.server.storage.log.{FetchIsolation, UnexpectedAppendOffsetException} +import org.apache.kafka.server.util.Scheduler +import org.apache.kafka.storage.internals.checkpoint.{LeaderEpochCheckpointFile, PartitionMetadataFile} +import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache +import org.apache.kafka.storage.internals.log.LocalLog.SplitSegmentResult +import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, BatchMetadata, CompletedTxn, FetchDataInfo, LastRecord, LeaderHwChange, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogValidator, OffsetResultHolder, OffsetsOutOfOrderException, ProducerAppendInfo, ProducerStateManager, ProducerStateManagerConfig, RollParams, SegmentDeletionReason, VerificationGuard, UnifiedLog => JUnifiedLog} +import org.apache.kafka.storage.log.metrics.{BrokerTopicMetrics, BrokerTopicStats} + +import java.io.{File, IOException} +import java.lang.{Long => JLong} +import java.nio.file.{Files, Path} +import java.util +import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap, ScheduledFuture} +import java.util.stream.Collectors +import java.util.{Collections, Optional, OptionalInt, OptionalLong} +import scala.collection.mutable.{ArrayBuffer, ListBuffer} +import scala.collection.{Seq, mutable} +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters.{RichOption, RichOptional, RichOptionalInt} + +/** + * A log which presents a unified view of local and tiered log segments. + * + * The log consists of tiered and local segments with the tiered portion of the log being optional. There could be an + * overlap between the tiered and local segments. The active segment is always guaranteed to be local. If tiered segments + * are present, they always appear at the beginning of the log, followed by an optional region of overlap, followed by the local + * segments including the active segment. + * + * NOTE: this class handles state and behavior specific to tiered segments as well as any behavior combining both tiered + * and local segments. The state and behavior specific to local segments are handled by the encapsulated LocalLog instance. + * + * @param logStartOffset The earliest offset allowed to be exposed to kafka client. + * The logStartOffset can be updated by : + * - user's DeleteRecordsRequest + * - broker's log retention + * - broker's log truncation + * - broker's log recovery + * The logStartOffset is used to decide the following: + * - Log deletion. LogSegment whose nextOffset <= log's logStartOffset can be deleted. + * It may trigger log rolling if the active segment is deleted. + * - Earliest offset of the log in response to ListOffsetRequest. To avoid OffsetOutOfRange exception after user seeks to earliest offset, + * we make sure that logStartOffset <= log's highWatermark + * Other activities such as log cleaning are not affected by logStartOffset. + * @param localLog The LocalLog instance containing non-empty log segments recovered from disk + * @param brokerTopicStats Container for Broker Topic Yammer Metrics + * @param producerIdExpirationCheckIntervalMs How often to check for producer ids which need to be expired + * @param leaderEpochCache The LeaderEpochFileCache instance (if any) containing state associated + * with the provided logStartOffset and nextOffsetMetadata + * @param producerStateManager The ProducerStateManager instance containing state associated with the provided segments + * @param _topicId optional Uuid to specify the topic ID for the topic if it exists. Should only be specified when + * first creating the log through Partition.makeLeader or Partition.makeFollower. When reloading a log, + * this field will be populated by reading the topic ID value from partition.metadata if it exists. + * @param keepPartitionMetadataFile boolean flag to indicate whether the partition.metadata file should be kept in the + * log directory. A partition.metadata file is only created when the raft controller is used + * or the ZK controller and this broker's inter-broker protocol version is at least 2.8. + * This file will persist the topic ID on the broker. If inter-broker protocol for a ZK controller + * is downgraded below 2.8, a topic ID may be lost and a new ID generated upon re-upgrade. + * If the inter-broker protocol version on a ZK cluster is below 2.8, partition.metadata + * will be deleted to avoid ID conflicts upon re-upgrade. + * @param remoteStorageSystemEnable flag to indicate whether the system level remote log storage is enabled or not. + */ +@threadsafe +class UnifiedLog(@volatile var logStartOffset: Long, + private val localLog: LocalLog, + val brokerTopicStats: BrokerTopicStats, + val producerIdExpirationCheckIntervalMs: Int, + @volatile var leaderEpochCache: LeaderEpochFileCache, + val producerStateManager: ProducerStateManager, + @volatile private var _topicId: Option[Uuid], + val keepPartitionMetadataFile: Boolean, + val remoteStorageSystemEnable: Boolean = false, + @volatile private var logOffsetsListener: LogOffsetsListener = LogOffsetsListener.NO_OP_OFFSETS_LISTENER) extends Logging with AutoCloseable { + + import kafka.log.UnifiedLog._ + + // For compatibility, metrics are defined to be under `Log` class + private val metricsGroup = new KafkaMetricsGroup(getClass.getPackage.getName, "Log") + + this.logIdent = s"[UnifiedLog partition=$topicPartition, dir=$parentDir] " + + /* A lock that guards all modifications to the log */ + private val lock = new Object + private val validatorMetricsRecorder = newValidatorMetricsRecorder(brokerTopicStats.allTopicsStats) + + /* The earliest offset which is part of an incomplete transaction. This is used to compute the + * last stable offset (LSO) in ReplicaManager. Note that it is possible that the "true" first unstable offset + * gets removed from the log (through record or segment deletion). In this case, the first unstable offset + * will point to the log start offset, which may actually be either part of a completed transaction or not + * part of a transaction at all. However, since we only use the LSO for the purpose of restricting the + * read_committed consumer to fetching decided data (i.e. committed, aborted, or non-transactional), this + * temporary abuse seems justifiable and saves us from scanning the log after deletion to find the first offsets + * of each ongoing transaction in order to compute a new first unstable offset. It is possible, however, + * that this could result in disagreement between replicas depending on when they began replicating the log. + * In the worst case, the LSO could be seen by a consumer to go backwards. + */ + @volatile private var firstUnstableOffsetMetadata: Option[LogOffsetMetadata] = None + + /* Keep track of the current high watermark in order to ensure that segments containing offsets at or above it are + * not eligible for deletion. This means that the active segment is only eligible for deletion if the high watermark + * equals the log end offset (which may never happen for a partition under consistent load). This is needed to + * prevent the log start offset (which is exposed in fetch responses) from getting ahead of the high watermark. + */ + @volatile private var highWatermarkMetadata: LogOffsetMetadata = new LogOffsetMetadata(logStartOffset) + + @volatile var partitionMetadataFile: Option[PartitionMetadataFile] = None + + @volatile private[kafka] var _localLogStartOffset: Long = logStartOffset + + def localLogStartOffset(): Long = _localLogStartOffset + + // This is the offset(inclusive) until which segments are copied to the remote storage. + @volatile private[kafka] var _highestOffsetInRemoteStorage: Long = -1L + + def highestOffsetInRemoteStorage(): Long = _highestOffsetInRemoteStorage + + locally { + def updateLocalLogStartOffset(offset: Long): Unit = { + _localLogStartOffset = offset + + if (highWatermark < offset) { + updateHighWatermark(offset) + } + + if (this.recoveryPoint < offset) { + localLog.updateRecoveryPoint(offset) + } + } + + initializePartitionMetadata() + updateLogStartOffset(logStartOffset) + updateLocalLogStartOffset(math.max(logStartOffset, localLog.segments.firstSegmentBaseOffset.orElse(0L))) + if (!remoteLogEnabled()) + logStartOffset = localLogStartOffset() + maybeIncrementFirstUnstableOffset() + initializeTopicId() + + logOffsetsListener.onHighWatermarkUpdated(highWatermarkMetadata.messageOffset) + } + + def setLogOffsetsListener(listener: LogOffsetsListener): Unit = { + logOffsetsListener = listener + } + + def updateLogStartOffsetFromRemoteTier(remoteLogStartOffset: Long): Unit = { + if (!remoteLogEnabled()) { + error("Ignoring the call as the remote log storage is disabled") + return + } + maybeIncrementLogStartOffset(remoteLogStartOffset, LogStartOffsetIncrementReason.SegmentDeletion) + } + + def remoteLogEnabled(): Boolean = { + UnifiedLog.isRemoteLogEnabled(remoteStorageSystemEnable, config, topicPartition.topic()) + } + + /** + * Initialize topic ID information for the log by maintaining the partition metadata file and setting the in-memory _topicId. + * Delete partition metadata file if the version does not support topic IDs. + * Set _topicId based on a few scenarios: + * - Recover topic ID if present and topic IDs are supported. Ensure we do not try to assign a provided topicId that is inconsistent + * with the ID on file. + * - If we were provided a topic ID when creating the log, partition metadata files are supported, and one does not yet exist + * set _topicId and write to the partition metadata file. + * - Otherwise set _topicId to None + */ + private def initializeTopicId(): Unit = { + val partMetadataFile = partitionMetadataFile.getOrElse( + throw new KafkaException("The partitionMetadataFile should have been initialized")) + + if (partMetadataFile.exists()) { + if (keepPartitionMetadataFile) { + val fileTopicId = partMetadataFile.read().topicId + if (_topicId.isDefined && !_topicId.contains(fileTopicId)) + throw new InconsistentTopicIdException(s"Tried to assign topic ID $topicId to log for topic partition $topicPartition," + + s"but log already contained topic ID $fileTopicId") + + _topicId = Some(fileTopicId) + + } else { + try partMetadataFile.delete() + catch { + case e: IOException => + error(s"Error while trying to delete partition metadata file $partMetadataFile", e) + } + } + } else if (keepPartitionMetadataFile) { + _topicId.foreach(partMetadataFile.record) + scheduler.scheduleOnce("flush-metadata-file", () => maybeFlushMetadataFile()) + } else { + // We want to keep the file and the in-memory topic ID in sync. + _topicId = None + } + } + + def topicId: Option[Uuid] = _topicId + + def dir: File = localLog.dir + + def parentDir: String = localLog.parentDir + + def parentDirFile: File = localLog.parentDirFile + + def name: String = localLog.name + + def recoveryPoint: Long = localLog.recoveryPoint + + def topicPartition: TopicPartition = localLog.topicPartition + + def time: Time = localLog.time + + def scheduler: Scheduler = localLog.scheduler + + def config: LogConfig = localLog.config + + def logDirFailureChannel: LogDirFailureChannel = localLog.logDirFailureChannel + + def updateConfig(newConfig: LogConfig): LogConfig = { + val oldConfig = localLog.config + localLog.updateConfig(newConfig) + oldConfig + } + + def highWatermark: Long = highWatermarkMetadata.messageOffset + + /** + * Update the high watermark to a new offset. The new high watermark will be lower + * bounded by the log start offset and upper bounded by the log end offset. + * + * This is intended to be called by the leader when initializing the high watermark. + * + * @param hw the suggested new value for the high watermark + * @return the updated high watermark offset + */ + def updateHighWatermark(hw: Long): Long = { + updateHighWatermark(new LogOffsetMetadata(hw)) + } + + /** + * Update high watermark with offset metadata. The new high watermark will be lower + * bounded by the log start offset and upper bounded by the log end offset. + * + * @param highWatermarkMetadata the suggested high watermark with offset metadata + * @return the updated high watermark offset + */ + def updateHighWatermark(highWatermarkMetadata: LogOffsetMetadata): Long = { + val endOffsetMetadata = localLog.logEndOffsetMetadata + val newHighWatermarkMetadata = if (highWatermarkMetadata.messageOffset < logStartOffset) { + new LogOffsetMetadata(logStartOffset) + } else if (highWatermarkMetadata.messageOffset >= endOffsetMetadata.messageOffset) { + endOffsetMetadata + } else { + highWatermarkMetadata + } + + updateHighWatermarkMetadata(newHighWatermarkMetadata) + newHighWatermarkMetadata.messageOffset + } + + /** + * Update the high watermark to a new value if and only if it is larger than the old value. It is + * an error to update to a value which is larger than the log end offset. + * + * This method is intended to be used by the leader to update the high watermark after follower + * fetch offsets have been updated. + * + * @return the old high watermark, if updated by the new value + */ + def maybeIncrementHighWatermark(newHighWatermark: LogOffsetMetadata): Option[LogOffsetMetadata] = { + if (newHighWatermark.messageOffset > logEndOffset) + throw new IllegalArgumentException(s"High watermark $newHighWatermark update exceeds current " + + s"log end offset ${localLog.logEndOffsetMetadata}") + + lock.synchronized { + val oldHighWatermark = fetchHighWatermarkMetadata + + // Ensure that the high watermark increases monotonically. We also update the high watermark when the new + // offset metadata is on a newer segment, which occurs whenever the log is rolled to a new segment. + if (oldHighWatermark.messageOffset < newHighWatermark.messageOffset || + (oldHighWatermark.messageOffset == newHighWatermark.messageOffset && oldHighWatermark.onOlderSegment(newHighWatermark))) { + updateHighWatermarkMetadata(newHighWatermark) + Some(oldHighWatermark) + } else { + None + } + } + } + + /** + * Update high watermark with a new value. The new high watermark will be lower + * bounded by the log start offset and upper bounded by the log end offset. + * + * This method is intended to be used by the follower to update its high watermark after + * replication from the leader. + * + * @return the new high watermark if the high watermark changed, None otherwise. + */ + def maybeUpdateHighWatermark(hw: Long): Option[Long] = { + lock.synchronized { + val oldHighWatermark = highWatermarkMetadata + updateHighWatermark(new LogOffsetMetadata(hw)) match { + case oldHighWatermark.messageOffset => + None + case newHighWatermark => + Some(newHighWatermark) + } + } + } + + /** + * Get the offset and metadata for the current high watermark. If offset metadata is not + * known, this will do a lookup in the index and cache the result. + */ + private def fetchHighWatermarkMetadata: LogOffsetMetadata = { + localLog.checkIfMemoryMappedBufferClosed() + + val offsetMetadata = highWatermarkMetadata + if (offsetMetadata.messageOffsetOnly) { + lock.synchronized { + val fullOffset = maybeConvertToOffsetMetadata(highWatermark) + updateHighWatermarkMetadata(fullOffset) + fullOffset + } + } else { + offsetMetadata + } + } + + private def updateHighWatermarkMetadata(newHighWatermark: LogOffsetMetadata): Unit = { + if (newHighWatermark.messageOffset < 0) + throw new IllegalArgumentException("High watermark offset should be non-negative") + + lock synchronized { + if (newHighWatermark.messageOffset < highWatermarkMetadata.messageOffset) { + warn(s"Non-monotonic update of high watermark from $highWatermarkMetadata to $newHighWatermark") + } + + highWatermarkMetadata = newHighWatermark + producerStateManager.onHighWatermarkUpdated(newHighWatermark.messageOffset) + logOffsetsListener.onHighWatermarkUpdated(newHighWatermark.messageOffset) + maybeIncrementFirstUnstableOffset() + } + trace(s"Setting high watermark $newHighWatermark") + } + + /** + * Get the first unstable offset. Unlike the last stable offset, which is always defined, + * the first unstable offset only exists if there are transactions in progress. + * + * @return the first unstable offset, if it exists + */ + private[log] def firstUnstableOffset: Option[Long] = firstUnstableOffsetMetadata.map(_.messageOffset) + + private def fetchLastStableOffsetMetadata: LogOffsetMetadata = { + localLog.checkIfMemoryMappedBufferClosed() + + // cache the current high watermark to avoid a concurrent update invalidating the range check + val highWatermarkMetadata = fetchHighWatermarkMetadata + + firstUnstableOffsetMetadata match { + case Some(offsetMetadata) if offsetMetadata.messageOffset < highWatermarkMetadata.messageOffset => + if (offsetMetadata.messageOffsetOnly) { + lock synchronized { + val fullOffset = maybeConvertToOffsetMetadata(offsetMetadata.messageOffset) + if (firstUnstableOffsetMetadata.contains(offsetMetadata)) + firstUnstableOffsetMetadata = Some(fullOffset) + fullOffset + } + } else { + offsetMetadata + } + case _ => highWatermarkMetadata + } + } + + /** + * The last stable offset (LSO) is defined as the first offset such that all lower offsets have been "decided." + * Non-transactional messages are considered decided immediately, but transactional messages are only decided when + * the corresponding COMMIT or ABORT marker is written. This implies that the last stable offset will be equal + * to the high watermark if there are no transactional messages in the log. Note also that the LSO cannot advance + * beyond the high watermark. + */ + def lastStableOffset: Long = { + firstUnstableOffsetMetadata match { + case Some(offsetMetadata) if offsetMetadata.messageOffset < highWatermark => offsetMetadata.messageOffset + case _ => highWatermark + } + } + + def lastStableOffsetLag: Long = highWatermark - lastStableOffset + + /** + * Fully materialize and return an offset snapshot including segment position info. This method will update + * the LogOffsetMetadata for the high watermark and last stable offset if they are message-only. Throws an + * offset out of range error if the segment info cannot be loaded. + */ + def fetchOffsetSnapshot: LogOffsetSnapshot = { + val lastStable = fetchLastStableOffsetMetadata + val highWatermark = fetchHighWatermarkMetadata + + new LogOffsetSnapshot( + logStartOffset, + localLog.logEndOffsetMetadata, + highWatermark, + lastStable + ) + } + + + private var metricNames: Map[String, java.util.Map[String, String]] = Map.empty + + newMetrics() + private[log] def newMetrics(): Unit = { + val tags = (Map("topic" -> topicPartition.topic, "partition" -> topicPartition.partition.toString) ++ + (if (isFuture) Map("is-future" -> "true") else Map.empty)).asJava + metricsGroup.newGauge(LogMetricNames.NumLogSegments, () => numberOfSegments, tags) + metricsGroup.newGauge(LogMetricNames.LogStartOffset, () => logStartOffset, tags) + metricsGroup.newGauge(LogMetricNames.LogEndOffset, () => logEndOffset, tags) + metricsGroup.newGauge(LogMetricNames.Size, () => size, tags) + metricNames = Map(LogMetricNames.NumLogSegments -> tags, + LogMetricNames.LogStartOffset -> tags, + LogMetricNames.LogEndOffset -> tags, + LogMetricNames.Size -> tags) + + } + + val producerExpireCheck: ScheduledFuture[_] = scheduler.schedule("PeriodicProducerExpirationCheck", () => removeExpiredProducers(time.milliseconds), + producerIdExpirationCheckIntervalMs, producerIdExpirationCheckIntervalMs) + + // Visible for testing + def removeExpiredProducers(currentTimeMs: Long): Unit = { + lock synchronized { + producerStateManager.removeExpiredProducers(currentTimeMs) + } + } + + def loadProducerState(lastOffset: Long): Unit = lock synchronized { + rebuildProducerState(lastOffset, producerStateManager) + maybeIncrementFirstUnstableOffset() + updateHighWatermark(localLog.logEndOffsetMetadata) + } + + private def initializePartitionMetadata(): Unit = lock synchronized { + val partitionMetadata = PartitionMetadataFile.newFile(dir) + partitionMetadataFile = Some(new PartitionMetadataFile(partitionMetadata, logDirFailureChannel)) + } + + private def maybeFlushMetadataFile(): Unit = { + partitionMetadataFile.foreach(_.maybeFlush()) + } + + /** Only used for ZK clusters when we update and start using topic IDs on existing topics */ + def assignTopicId(topicId: Uuid): Unit = { + _topicId match { + case Some(currentId) => + if (!currentId.equals(topicId)) { + throw new InconsistentTopicIdException(s"Tried to assign topic ID $topicId to log for topic partition $topicPartition," + + s"but log already contained topic ID $currentId") + } + + case None => + if (keepPartitionMetadataFile) { + _topicId = Some(topicId) + partitionMetadataFile match { + case Some(partMetadataFile) => + if (!partMetadataFile.exists()) { + partMetadataFile.record(topicId) + scheduler.scheduleOnce("flush-metadata-file", () => maybeFlushMetadataFile()) + } + case _ => warn(s"The topic id $topicId will not be persisted to the partition metadata file " + + "since the partition is deleted") + } + } + } + } + + private def reinitializeLeaderEpochCache(): Unit = lock synchronized { + leaderEpochCache = UnifiedLog.createLeaderEpochCache( + dir, topicPartition, logDirFailureChannel, Option.apply(leaderEpochCache), scheduler) + } + + private def updateHighWatermarkWithLogEndOffset(): Unit = { + // Update the high watermark in case it has gotten ahead of the log end offset following a truncation + // or if a new segment has been rolled and the offset metadata needs to be updated. + if (highWatermark >= localLog.logEndOffset) { + updateHighWatermarkMetadata(localLog.logEndOffsetMetadata) + } + } + + private def updateLogStartOffset(offset: Long): Unit = { + logStartOffset = offset + + if (highWatermark < offset) { + updateHighWatermark(offset) + } + + if (localLog.recoveryPoint < offset) { + localLog.updateRecoveryPoint(offset) + } + } + + def updateHighestOffsetInRemoteStorage(offset: Long): Unit = { + if (!remoteLogEnabled()) + warn(s"Unable to update the highest offset in remote storage with offset $offset since remote storage is not enabled. The existing highest offset is ${highestOffsetInRemoteStorage()}.") + else if (offset > highestOffsetInRemoteStorage()) _highestOffsetInRemoteStorage = offset + } + + // Rebuild producer state until lastOffset. This method may be called from the recovery code path, and thus must be + // free of all side-effects, i.e. it must not update any log-specific state. + private def rebuildProducerState(lastOffset: Long, + producerStateManager: ProducerStateManager): Unit = lock synchronized { + localLog.checkIfMemoryMappedBufferClosed() + JUnifiedLog.rebuildProducerState(producerStateManager, localLog.segments, logStartOffset, lastOffset, time, false, logIdent) + } + + @threadsafe + def hasLateTransaction(currentTimeMs: Long): Boolean = { + producerStateManager.hasLateTransaction(currentTimeMs) + } + + @threadsafe + def producerIdCount: Int = producerStateManager.producerIdCount + + def activeProducers: Seq[DescribeProducersResponseData.ProducerState] = { + lock synchronized { + producerStateManager.activeProducers.asScala.map { case (producerId, state) => + new DescribeProducersResponseData.ProducerState() + .setProducerId(producerId) + .setProducerEpoch(state.producerEpoch) + .setLastSequence(state.lastSeq) + .setLastTimestamp(state.lastTimestamp) + .setCoordinatorEpoch(state.coordinatorEpoch) + .setCurrentTxnStartOffset(state.currentTxnFirstOffset.orElse(-1L)) + } + }.toSeq + } + + private[log] def activeProducersWithLastSequence: mutable.Map[Long, Int] = lock synchronized { + val result = mutable.Map[Long, Int]() + producerStateManager.activeProducers.forEach { case (producerId, producerIdEntry) => + result.put(producerId.toLong, producerIdEntry.lastSeq) + } + result + } + + private[log] def lastRecordsOfActiveProducers: mutable.Map[Long, LastRecord] = lock synchronized { + val result = mutable.Map[Long, LastRecord]() + producerStateManager.activeProducers.forEach { case (producerId, producerIdEntry) => + val lastDataOffset = if (producerIdEntry.lastDataOffset >= 0) Some(producerIdEntry.lastDataOffset) else None + val lastRecord = new LastRecord( + if (lastDataOffset.isEmpty) OptionalLong.empty() else OptionalLong.of(lastDataOffset.get), + producerIdEntry.producerEpoch) + result.put(producerId.toLong, lastRecord) + } + result + } + + /** + * Maybe create and return the VerificationGuard for the given producer ID if the transaction is not yet ongoing. + * Creation starts the verification process. Otherwise return the sentinel VerificationGuard. + */ + def maybeStartTransactionVerification(producerId: Long, sequence: Int, epoch: Short, supportsEpochBump: Boolean): VerificationGuard = lock synchronized { + if (hasOngoingTransaction(producerId, epoch)) + VerificationGuard.SENTINEL + else + maybeCreateVerificationGuard(producerId, sequence, epoch, supportsEpochBump) + } + + /** + * Maybe create the VerificationStateEntry for the given producer ID -- always return the VerificationGuard + */ + private def maybeCreateVerificationGuard(producerId: Long, + sequence: Int, + epoch: Short, + supportsEpochBump: Boolean): VerificationGuard = lock synchronized { + producerStateManager.maybeCreateVerificationStateEntry(producerId, sequence, epoch, supportsEpochBump).verificationGuard + } + + /** + * If an VerificationStateEntry is present for the given producer ID, return its VerificationGuard, otherwise, return the + * sentinel VerificationGuard. + */ + def verificationGuard(producerId: Long): VerificationGuard = lock synchronized { + val entry = producerStateManager.verificationStateEntry(producerId) + if (entry != null) entry.verificationGuard else VerificationGuard.SENTINEL + } + + /** + * Return true if the given producer ID has a transaction ongoing. + * Note, if the incoming producer epoch is newer than the stored one, the transaction may have finished. + */ + def hasOngoingTransaction(producerId: Long, producerEpoch: Short): Boolean = lock synchronized { + val entry = producerStateManager.activeProducers.get(producerId) + entry != null && entry.currentTxnFirstOffset.isPresent && entry.producerEpoch() == producerEpoch + } + + /** + * The number of segments in the log. + * Take care! this is an O(n) operation. + */ + def numberOfSegments: Int = localLog.segments.numberOfSegments + + /** + * Close this log. + * The memory mapped buffer for index files of this log will be left open until the log is deleted. + */ + override def close(): Unit = { + debug("Closing log") + lock synchronized { + logOffsetsListener = LogOffsetsListener.NO_OP_OFFSETS_LISTENER + maybeFlushMetadataFile() + localLog.checkIfMemoryMappedBufferClosed() + producerExpireCheck.cancel(true) + maybeHandleIOException(s"Error while renaming dir for $topicPartition in dir ${dir.getParent}") { + // We take a snapshot at the last written offset to hopefully avoid the need to scan the log + // after restarting and to ensure that we cannot inadvertently hit the upgrade optimization + // (the clean shutdown file is written after the logs are all closed). + producerStateManager.takeSnapshot() + } + localLog.close() + } + } + + /** + * Rename the directory of the local log. If the log's directory is being renamed for async deletion due to a + * StopReplica request, then the shouldReinitialize parameter should be set to false, otherwise it should be set to true. + * + * @param name The new name that this log's directory is being renamed to + * @param shouldReinitialize Whether the log's metadata should be reinitialized after renaming + * @throws KafkaStorageException if rename fails + */ + def renameDir(name: String, shouldReinitialize: Boolean): Unit = { + lock synchronized { + maybeHandleIOException(s"Error while renaming dir for $topicPartition in log dir ${dir.getParent}") { + // Flush partitionMetadata file before initializing again + maybeFlushMetadataFile() + if (localLog.renameDir(name)) { + producerStateManager.updateParentDir(dir) + if (shouldReinitialize) { + // re-initialize leader epoch cache so that LeaderEpochCheckpointFile.checkpoint can correctly reference + // the checkpoint file in renamed log directory + reinitializeLeaderEpochCache() + initializePartitionMetadata() + } else { + leaderEpochCache.clear() + partitionMetadataFile = None + } + } + } + } + } + + /** + * Close file handlers used by this log but don't write to disk. This is called if the log directory is offline + */ + def closeHandlers(): Unit = { + debug("Closing handlers") + lock synchronized { + localLog.closeHandlers() + } + } + + /** + * Append this message set to the active segment of the local log, assigning offsets and Partition Leader Epochs + * + * @param records The records to append + * @param origin Declares the origin of the append which affects required validations + * @param requestLocal request local instance + * @throws KafkaStorageException If the append fails due to an I/O error. + * @return Information about the appended messages including the first and last offset. + */ + def appendAsLeader(records: MemoryRecords, + leaderEpoch: Int, + origin: AppendOrigin = AppendOrigin.CLIENT, + requestLocal: RequestLocal = RequestLocal.noCaching, + verificationGuard: VerificationGuard = VerificationGuard.SENTINEL): LogAppendInfo = { + val validateAndAssignOffsets = origin != AppendOrigin.RAFT_LEADER + append(records, origin, validateAndAssignOffsets, leaderEpoch, Some(requestLocal), verificationGuard, ignoreRecordSize = false) + } + + /** + * Even though we always write to disk with record version v2 since Apache Kafka 4.0, older record versions may have + * been persisted to disk before that. In order to test such scenarios, we need the ability to append with older + * record versions. This method exists for that purpose and hence it should only be used from test code. + * + * Also see #appendAsLeader. + */ + private[kafka] def appendAsLeaderWithRecordVersion(records: MemoryRecords, leaderEpoch: Int, recordVersion: RecordVersion): LogAppendInfo = { + append(records, AppendOrigin.CLIENT, true, leaderEpoch, Some(RequestLocal.noCaching), + VerificationGuard.SENTINEL, ignoreRecordSize = false, recordVersion.value) + } + + /** + * Append this message set to the active segment of the local log without assigning offsets or Partition Leader Epochs + * + * @param records The records to append + * @throws KafkaStorageException If the append fails due to an I/O error. + * @return Information about the appended messages including the first and last offset. + */ + def appendAsFollower(records: MemoryRecords): LogAppendInfo = { + append(records, + origin = AppendOrigin.REPLICATION, + validateAndAssignOffsets = false, + leaderEpoch = -1, + requestLocal = None, + verificationGuard = VerificationGuard.SENTINEL, + // disable to check the validation of record size since the record is already accepted by leader. + ignoreRecordSize = true) + } + + /** + * Append this message set to the active segment of the local log, rolling over to a fresh segment if necessary. + * + * This method will generally be responsible for assigning offsets to the messages, + * however if the assignOffsets=false flag is passed we will only check that the existing offsets are valid. + * + * @param records The log records to append + * @param origin Declares the origin of the append which affects required validations + * @param validateAndAssignOffsets Should the log assign offsets to this message set or blindly apply what it is given + * @param leaderEpoch The partition's leader epoch which will be applied to messages when offsets are assigned on the leader + * @param requestLocal The request local instance if validateAndAssignOffsets is true + * @param ignoreRecordSize true to skip validation of record size. + * @throws KafkaStorageException If the append fails due to an I/O error. + * @throws OffsetsOutOfOrderException If out of order offsets found in 'records' + * @throws UnexpectedAppendOffsetException If the first or last offset in append is less than next offset + * @return Information about the appended messages including the first and last offset. + */ + private def append(records: MemoryRecords, + origin: AppendOrigin, + validateAndAssignOffsets: Boolean, + leaderEpoch: Int, + requestLocal: Option[RequestLocal], + verificationGuard: VerificationGuard, + ignoreRecordSize: Boolean, + toMagic: Byte = RecordBatch.CURRENT_MAGIC_VALUE): LogAppendInfo = { + // We want to ensure the partition metadata file is written to the log dir before any log data is written to disk. + // This will ensure that any log data can be recovered with the correct topic ID in the case of failure. + maybeFlushMetadataFile() + + val appendInfo = analyzeAndValidateRecords(records, origin, ignoreRecordSize, !validateAndAssignOffsets, leaderEpoch) + + // return if we have no valid messages or if this is a duplicate of the last appended entry + if (appendInfo.validBytes <= 0) appendInfo + else { + + // trim any invalid bytes or partial messages before appending it to the on-disk log + var validRecords = trimInvalidBytes(records, appendInfo) + + // they are valid, insert them in the log + lock synchronized { + maybeHandleIOException(s"Error while appending records to $topicPartition in dir ${dir.getParent}") { + localLog.checkIfMemoryMappedBufferClosed() + if (validateAndAssignOffsets) { + // assign offsets to the message set + val offset = PrimitiveRef.ofLong(localLog.logEndOffset) + appendInfo.setFirstOffset(offset.value) + val validateAndOffsetAssignResult = try { + val targetCompression = BrokerCompressionType.targetCompression(config.compression, appendInfo.sourceCompression()) + val validator = new LogValidator(validRecords, + topicPartition, + time, + appendInfo.sourceCompression, + targetCompression, + config.compact, + toMagic, + config.messageTimestampType, + config.messageTimestampBeforeMaxMs, + config.messageTimestampAfterMaxMs, + leaderEpoch, + origin + ) + validator.validateMessagesAndAssignOffsets(offset, + validatorMetricsRecorder, + requestLocal.getOrElse(throw new IllegalArgumentException( + "requestLocal should be defined if assignOffsets is true") + ).bufferSupplier + ) + } catch { + case e: IOException => + throw new KafkaException(s"Error validating messages while appending to log $name", e) + } + + validRecords = validateAndOffsetAssignResult.validatedRecords + appendInfo.setMaxTimestamp(validateAndOffsetAssignResult.maxTimestampMs) + appendInfo.setShallowOffsetOfMaxTimestamp(validateAndOffsetAssignResult.shallowOffsetOfMaxTimestamp) + appendInfo.setLastOffset(offset.value - 1) + appendInfo.setRecordValidationStats(validateAndOffsetAssignResult.recordValidationStats) + if (config.messageTimestampType == TimestampType.LOG_APPEND_TIME) + appendInfo.setLogAppendTime(validateAndOffsetAssignResult.logAppendTimeMs) + + // re-validate message sizes if there's a possibility that they have changed (due to re-compression or message + // format conversion) + if (!ignoreRecordSize && validateAndOffsetAssignResult.messageSizeMaybeChanged) { + validRecords.batches.forEach { batch => + if (batch.sizeInBytes > config.maxMessageSize) { + // we record the original message set size instead of the trimmed size + // to be consistent with pre-compression bytesRejectedRate recording + brokerTopicStats.topicStats(topicPartition.topic).bytesRejectedRate.mark(records.sizeInBytes) + brokerTopicStats.allTopicsStats.bytesRejectedRate.mark(records.sizeInBytes) + throw new RecordTooLargeException(s"Message batch size is ${batch.sizeInBytes} bytes in append to" + + s"partition $topicPartition which exceeds the maximum configured size of ${config.maxMessageSize}.") + } + } + } + } else { + // we are taking the offsets we are given + if (appendInfo.firstOrLastOffsetOfFirstBatch < localLog.logEndOffset) { + // we may still be able to recover if the log is empty + // one example: fetching from log start offset on the leader which is not batch aligned, + // which may happen as a result of AdminClient#deleteRecords() + val hasFirstOffset = appendInfo.firstOffset != UnifiedLog.UnknownOffset + val firstOffset = if (hasFirstOffset) appendInfo.firstOffset else records.batches.iterator().next().baseOffset() + + val firstOrLast = if (hasFirstOffset) "First offset" else "Last offset of the first batch" + throw new UnexpectedAppendOffsetException( + s"Unexpected offset in append to $topicPartition. $firstOrLast " + + s"${appendInfo.firstOrLastOffsetOfFirstBatch} is less than the next offset ${localLog.logEndOffset}. " + + s"First 10 offsets in append: ${records.records.asScala.take(10).map(_.offset)}, last offset in" + + s" append: ${appendInfo.lastOffset}. Log start offset = $logStartOffset", + firstOffset, appendInfo.lastOffset) + } + } + + // update the epoch cache with the epoch stamped onto the message by the leader + validRecords.batches.forEach { batch => + if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) { + assignEpochStartOffset(batch.partitionLeaderEpoch, batch.baseOffset) + } else { + // In partial upgrade scenarios, we may get a temporary regression to the message format. In + // order to ensure the safety of leader election, we clear the epoch cache so that we revert + // to truncation by high watermark after the next leader election. + if (leaderEpochCache.nonEmpty) { + warn(s"Clearing leader epoch cache after unexpected append with message format v${batch.magic}") + leaderEpochCache.clearAndFlush() + } + } + } + + // check messages set size may be exceed config.segmentSize + if (validRecords.sizeInBytes > config.segmentSize) { + throw new RecordBatchTooLargeException(s"Message batch size is ${validRecords.sizeInBytes} bytes in append " + + s"to partition $topicPartition, which exceeds the maximum configured segment size of ${config.segmentSize}.") + } + + // maybe roll the log if this segment is full + val segment = maybeRoll(validRecords.sizeInBytes, appendInfo) + + val logOffsetMetadata = new LogOffsetMetadata( + appendInfo.firstOrLastOffsetOfFirstBatch, + segment.baseOffset, + segment.size) + + // now that we have valid records, offsets assigned, and timestamps updated, we need to + // validate the idempotent/transactional state of the producers and collect some metadata + val (updatedProducers, completedTxns, maybeDuplicate) = analyzeAndValidateProducerState( + logOffsetMetadata, validRecords, origin, verificationGuard) + + maybeDuplicate match { + case Some(duplicate) => + appendInfo.setFirstOffset(duplicate.firstOffset) + appendInfo.setLastOffset(duplicate.lastOffset) + appendInfo.setLogAppendTime(duplicate.timestamp) + appendInfo.setLogStartOffset(logStartOffset) + case None => + // Append the records, and increment the local log end offset immediately after the append because a + // write to the transaction index below may fail and we want to ensure that the offsets + // of future appends still grow monotonically. The resulting transaction index inconsistency + // will be cleaned up after the log directory is recovered. Note that the end offset of the + // ProducerStateManager will not be updated and the last stable offset will not advance + // if the append to the transaction index fails. + localLog.append(appendInfo.lastOffset, appendInfo.maxTimestamp, appendInfo.shallowOffsetOfMaxTimestamp, validRecords) + updateHighWatermarkWithLogEndOffset() + + // update the producer state + updatedProducers.values.forEach(producerAppendInfo => producerStateManager.update(producerAppendInfo)) + + // update the transaction index with the true last stable offset. The last offset visible + // to consumers using READ_COMMITTED will be limited by this value and the high watermark. + completedTxns.foreach { completedTxn => + val lastStableOffset = producerStateManager.lastStableOffset(completedTxn) + segment.updateTxnIndex(completedTxn, lastStableOffset) + producerStateManager.completeTxn(completedTxn) + } + + // always update the last producer id map offset so that the snapshot reflects the current offset + // even if there isn't any idempotent data being written + producerStateManager.updateMapEndOffset(appendInfo.lastOffset + 1) + + // update the first unstable offset (which is used to compute LSO) + maybeIncrementFirstUnstableOffset() + + trace(s"Appended message set with last offset: ${appendInfo.lastOffset}, " + + s"first offset: ${appendInfo.firstOffset}, " + + s"next offset: ${localLog.logEndOffset}, " + + s"and messages: $validRecords") + + if (localLog.unflushedMessages >= config.flushInterval) flush(false) + } + appendInfo + } + } + } + } + + def assignEpochStartOffset(leaderEpoch: Int, startOffset: Long): Unit = + leaderEpochCache.assign(leaderEpoch, startOffset) + + def latestEpoch: Option[Int] = leaderEpochCache.latestEpoch.toScala + + def endOffsetForEpoch(leaderEpoch: Int): Option[OffsetAndEpoch] = { + val entry = leaderEpochCache.endOffsetFor(leaderEpoch, logEndOffset) + val (foundEpoch, foundOffset) = (entry.getKey, entry.getValue) + if (foundOffset == UNDEFINED_EPOCH_OFFSET) + None + else + Some(new OffsetAndEpoch(foundOffset, foundEpoch)) + } + + private def maybeIncrementFirstUnstableOffset(): Unit = lock synchronized { + localLog.checkIfMemoryMappedBufferClosed() + + val updatedFirstUnstableOffset = producerStateManager.firstUnstableOffset.toScala match { + case Some(logOffsetMetadata) if logOffsetMetadata.messageOffsetOnly || logOffsetMetadata.messageOffset < logStartOffset => + val offset = math.max(logOffsetMetadata.messageOffset, logStartOffset) + Some(maybeConvertToOffsetMetadata(offset)) + case other => other + } + + if (updatedFirstUnstableOffset != this.firstUnstableOffsetMetadata) { + debug(s"First unstable offset updated to $updatedFirstUnstableOffset") + this.firstUnstableOffsetMetadata = updatedFirstUnstableOffset + } + } + + def maybeIncrementLocalLogStartOffset(newLocalLogStartOffset: Long, reason: LogStartOffsetIncrementReason): Unit = { + lock synchronized { + if (newLocalLogStartOffset > localLogStartOffset()) { + _localLogStartOffset = newLocalLogStartOffset + info(s"Incremented local log start offset to ${localLogStartOffset()} due to reason $reason") + } + } + } + + /** + * Increment the log start offset if the provided offset is larger. + * + * If the log start offset changed, then this method also update a few key offset such that + * `logStartOffset <= logStableOffset <= highWatermark`. The leader epoch cache is also updated + * such that all of offsets referenced in that component point to valid offset in this log. + * + * @throws OffsetOutOfRangeException if the log start offset is greater than the high watermark + * @return true if the log start offset was updated; otherwise false + */ + def maybeIncrementLogStartOffset(newLogStartOffset: Long, + reason: LogStartOffsetIncrementReason): Boolean = { + // We don't have to write the log start offset to log-start-offset-checkpoint immediately. + // The deleteRecordsOffset may be lost only if all in-sync replicas of this broker are shutdown + // in an unclean manner within log.flush.start.offset.checkpoint.interval.ms. The chance of this happening is low. + var updatedLogStartOffset = false + maybeHandleIOException(s"Exception while increasing log start offset for $topicPartition to $newLogStartOffset in dir ${dir.getParent}") { + lock synchronized { + if (newLogStartOffset > highWatermark) + throw new OffsetOutOfRangeException(s"Cannot increment the log start offset to $newLogStartOffset of partition $topicPartition " + + s"since it is larger than the high watermark $highWatermark") + + if (remoteLogEnabled()) { + // This should be set log-start-offset is set more than the current local-log-start-offset + _localLogStartOffset = math.max(newLogStartOffset, localLogStartOffset()) + } + + localLog.checkIfMemoryMappedBufferClosed() + if (newLogStartOffset > logStartOffset) { + updatedLogStartOffset = true + updateLogStartOffset(newLogStartOffset) + info(s"Incremented log start offset to $newLogStartOffset due to $reason") + leaderEpochCache.truncateFromStartAsyncFlush(logStartOffset) + producerStateManager.onLogStartOffsetIncremented(newLogStartOffset) + maybeIncrementFirstUnstableOffset() + } + } + } + + updatedLogStartOffset + } + + private def analyzeAndValidateProducerState(appendOffsetMetadata: LogOffsetMetadata, + records: MemoryRecords, + origin: AppendOrigin, + requestVerificationGuard: VerificationGuard): + (util.Map[JLong, ProducerAppendInfo], List[CompletedTxn], Option[BatchMetadata]) = { + val updatedProducers = new util.HashMap[JLong, ProducerAppendInfo] + val completedTxns = ListBuffer.empty[CompletedTxn] + var relativePositionInSegment = appendOffsetMetadata.relativePositionInSegment + + records.batches.forEach { batch => + if (batch.hasProducerId) { + // if this is a client produce request, there will be up to 5 batches which could have been duplicated. + // If we find a duplicate, we return the metadata of the appended batch to the client. + if (origin == AppendOrigin.CLIENT) { + val maybeLastEntry = producerStateManager.lastEntry(batch.producerId) + + val duplicateBatch = maybeLastEntry.flatMap(_.findDuplicateBatch(batch)) + if (duplicateBatch.isPresent) { + return (updatedProducers, completedTxns.toList, Some(duplicateBatch.get())) + } + } + + if (origin == AppendOrigin.CLIENT || origin == AppendOrigin.COORDINATOR) { + // Verify that if the record is transactional & the append origin is client/coordinator, that we either have an ongoing transaction or verified transaction state. + // This guarantees that transactional records are never written to the log outside of the transaction coordinator's knowledge of an open transaction on + // the partition. If we do not have an ongoing transaction or correct guard, return an error and do not append. + // There are two phases -- the first append to the log and subsequent appends. + // + // 1. First append: Verification starts with creating a VerificationGuard, sending a verification request to the transaction coordinator, and + // given a "verified" response, continuing the append path. (A non-verified response throws an error.) We create the unique VerificationGuard for the transaction + // to ensure there is no race between the transaction coordinator response and an abort marker getting written to the log. We need a unique guard because we could + // have a sequence of events where we start a transaction verification, have the transaction coordinator send a verified response, write an abort marker, + // start a new transaction not aware of the partition, and receive the stale verification (ABA problem). With a unique VerificationGuard, this sequence would not + // result in appending to the log and would return an error. The guard is removed after the first append to the transaction and from then, we can rely on phase 2. + // + // 2. Subsequent appends: Once we write to the transaction, the in-memory state currentTxnFirstOffset is populated. This field remains until the + // transaction is completed or aborted. We can guarantee the transaction coordinator knows about the transaction given step 1 and that the transaction is still + // ongoing. If the transaction is expected to be ongoing, we will not set a VerificationGuard. If the transaction is aborted, hasOngoingTransaction is false and + // requestVerificationGuard is the sentinel, so we will throw an error. A subsequent produce request (retry) should create verification state and return to phase 1. + if (batch.isTransactional && !hasOngoingTransaction(batch.producerId, batch.producerEpoch()) && batchMissingRequiredVerification(batch, requestVerificationGuard)) + throw new InvalidTxnStateException("Record was not part of an ongoing transaction") + } + + // We cache offset metadata for the start of each transaction. This allows us to + // compute the last stable offset without relying on additional index lookups. + val firstOffsetMetadata = if (batch.isTransactional) + Optional.of(new LogOffsetMetadata(batch.baseOffset, appendOffsetMetadata.segmentBaseOffset, relativePositionInSegment)) + else + Optional.empty[LogOffsetMetadata] + + val maybeCompletedTxn = JUnifiedLog.updateProducers(producerStateManager, batch, updatedProducers, firstOffsetMetadata, origin) + maybeCompletedTxn.ifPresent(ct => completedTxns += ct) + } + + relativePositionInSegment += batch.sizeInBytes + } + (updatedProducers, completedTxns.toList, None) + } + + private def batchMissingRequiredVerification(batch: MutableRecordBatch, requestVerificationGuard: VerificationGuard): Boolean = { + producerStateManager.producerStateManagerConfig().transactionVerificationEnabled() && !batch.isControlBatch && + !verificationGuard(batch.producerId).verify(requestVerificationGuard) + } + + /** + * Validate the following: + *

            + *
          1. each message matches its CRC + *
          2. each message size is valid (if ignoreRecordSize is false) + *
          3. that the sequence numbers of the incoming record batches are consistent with the existing state and with each other + *
          4. that the offsets are monotonically increasing (if requireOffsetsMonotonic is true) + *
          + * + * Also compute the following quantities: + *
            + *
          1. First offset in the message set + *
          2. Last offset in the message set + *
          3. Number of messages + *
          4. Number of valid bytes + *
          5. Whether the offsets are monotonically increasing + *
          6. Whether any compression codec is used (if many are used, then the last one is given) + *
          + */ + private def analyzeAndValidateRecords(records: MemoryRecords, + origin: AppendOrigin, + ignoreRecordSize: Boolean, + requireOffsetsMonotonic: Boolean, + leaderEpoch: Int): LogAppendInfo = { + var validBytesCount = 0 + var firstOffset = UnifiedLog.UnknownOffset + var lastOffset = -1L + var lastLeaderEpoch = RecordBatch.NO_PARTITION_LEADER_EPOCH + var sourceCompression = CompressionType.NONE + var monotonic = true + var maxTimestamp = RecordBatch.NO_TIMESTAMP + var shallowOffsetOfMaxTimestamp = -1L + var readFirstMessage = false + var lastOffsetOfFirstBatch = -1L + + records.batches.forEach { batch => + if (origin == AppendOrigin.RAFT_LEADER && batch.partitionLeaderEpoch != leaderEpoch) { + throw new InvalidRecordException("Append from Raft leader did not set the batch epoch correctly") + } + // we only validate V2 and higher to avoid potential compatibility issues with older clients + if (batch.magic >= RecordBatch.MAGIC_VALUE_V2 && origin == AppendOrigin.CLIENT && batch.baseOffset != 0) + throw new InvalidRecordException(s"The baseOffset of the record batch in the append to $topicPartition should " + + s"be 0, but it is ${batch.baseOffset}") + + // update the first offset if on the first message. For magic versions older than 2, we use the last offset + // to avoid the need to decompress the data (the last offset can be obtained directly from the wrapper message). + // For magic version 2, we can get the first offset directly from the batch header. + // When appending to the leader, we will update LogAppendInfo.baseOffset with the correct value. In the follower + // case, validation will be more lenient. + // Also indicate whether we have the accurate first offset or not + if (!readFirstMessage) { + if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) + firstOffset = batch.baseOffset + lastOffsetOfFirstBatch = batch.lastOffset + readFirstMessage = true + } + + // check that offsets are monotonically increasing + if (lastOffset >= batch.lastOffset) + monotonic = false + + // update the last offset seen + lastOffset = batch.lastOffset + lastLeaderEpoch = batch.partitionLeaderEpoch + + // Check if the message sizes are valid. + val batchSize = batch.sizeInBytes + if (!ignoreRecordSize && batchSize > config.maxMessageSize) { + brokerTopicStats.topicStats(topicPartition.topic).bytesRejectedRate.mark(records.sizeInBytes) + brokerTopicStats.allTopicsStats.bytesRejectedRate.mark(records.sizeInBytes) + throw new RecordTooLargeException(s"The record batch size in the append to $topicPartition is $batchSize bytes " + + s"which exceeds the maximum configured value of ${config.maxMessageSize}.") + } + + // check the validity of the message by checking CRC + if (!batch.isValid) { + brokerTopicStats.allTopicsStats.invalidMessageCrcRecordsPerSec.mark() + throw new CorruptRecordException(s"Record is corrupt (stored crc = ${batch.checksum()}) in topic partition $topicPartition.") + } + + if (batch.maxTimestamp > maxTimestamp) { + maxTimestamp = batch.maxTimestamp + shallowOffsetOfMaxTimestamp = lastOffset + } + + validBytesCount += batchSize + + val batchCompression = CompressionType.forId(batch.compressionType.id) + // sourceCompression is only used on the leader path, which only contains one batch if version is v2 or messages are compressed + if (batchCompression != CompressionType.NONE) + sourceCompression = batchCompression + } + + if (requireOffsetsMonotonic && !monotonic) + throw new OffsetsOutOfOrderException(s"Out of order offsets found in append to $topicPartition: " + + records.records.asScala.map(_.offset)) + + val lastLeaderEpochOpt: OptionalInt = if (lastLeaderEpoch != RecordBatch.NO_PARTITION_LEADER_EPOCH) + OptionalInt.of(lastLeaderEpoch) + else + OptionalInt.empty() + + new LogAppendInfo(firstOffset, lastOffset, lastLeaderEpochOpt, maxTimestamp, shallowOffsetOfMaxTimestamp, + RecordBatch.NO_TIMESTAMP, logStartOffset, RecordValidationStats.EMPTY, sourceCompression, + validBytesCount, lastOffsetOfFirstBatch, Collections.emptyList[RecordError], LeaderHwChange.NONE) + } + + /** + * Trim any invalid bytes from the end of this message set (if there are any) + * + * @param records The records to trim + * @param info The general information of the message set + * @return A trimmed message set. This may be the same as what was passed in or it may not. + */ + private def trimInvalidBytes(records: MemoryRecords, info: LogAppendInfo): MemoryRecords = { + val validBytes = info.validBytes + if (validBytes < 0) + throw new CorruptRecordException(s"Cannot append record batch with illegal length $validBytes to " + + s"log for $topicPartition. A possible cause is a corrupted produce request.") + if (validBytes == records.sizeInBytes) { + records + } else { + // trim invalid bytes + val validByteBuffer = records.buffer.duplicate() + validByteBuffer.limit(validBytes) + MemoryRecords.readableRecords(validByteBuffer) + } + } + + private def checkLogStartOffset(offset: Long): Unit = { + if (offset < logStartOffset) + throw new OffsetOutOfRangeException(s"Received request for offset $offset for partition $topicPartition, " + + s"but we only have log segments starting from offset: $logStartOffset.") + } + + /** + * Read messages from the log. + * + * @param startOffset The offset to begin reading at + * @param maxLength The maximum number of bytes to read + * @param isolation The fetch isolation, which controls the maximum offset we are allowed to read + * @param minOneMessage If this is true, the first message will be returned even if it exceeds `maxLength` (if one exists) + * @throws OffsetOutOfRangeException If startOffset is beyond the log end offset or before the log start offset + * @return The fetch data information including fetch starting offset metadata and messages read. + */ + def read(startOffset: Long, + maxLength: Int, + isolation: FetchIsolation, + minOneMessage: Boolean): FetchDataInfo = { + checkLogStartOffset(startOffset) + val maxOffsetMetadata = isolation match { + case FetchIsolation.LOG_END => localLog.logEndOffsetMetadata + case FetchIsolation.HIGH_WATERMARK => fetchHighWatermarkMetadata + case FetchIsolation.TXN_COMMITTED => fetchLastStableOffsetMetadata + } + localLog.read(startOffset, maxLength, minOneMessage, maxOffsetMetadata, isolation == FetchIsolation.TXN_COMMITTED) + } + + private[log] def collectAbortedTransactions(startOffset: Long, upperBoundOffset: Long): List[AbortedTxn] = { + localLog.collectAbortedTransactions(logStartOffset, startOffset, upperBoundOffset).asScala.toList + } + + /** + * Get an offset based on the given timestamp + * The offset returned is the offset of the first message whose timestamp is greater than or equals to the + * given timestamp. + * + * If no such message is found, the log end offset is returned. + * + * `NOTE:` OffsetRequest V0 does not use this method, the behavior of OffsetRequest V0 remains the same as before + * , i.e. it only gives back the timestamp based on the last modification time of the log segments. + * + * @param targetTimestamp The given timestamp for offset fetching. + * @param remoteLogManager Optional RemoteLogManager instance if it exists. + * @return the offset-result holder + *
            + *
          • When the partition is not enabled with remote storage, then it contains offset of the first message + * whose timestamp is greater than or equals to the given timestamp; None if no such message is found. + *
          • When the partition is enabled with remote storage, then it contains the job/task future and gets + * completed in the async fashion. + *
          • All special timestamp offset results are returned immediately irrespective of the remote storage. + *
          + */ + def fetchOffsetByTimestamp(targetTimestamp: Long, remoteLogManager: Option[RemoteLogManager] = None): OffsetResultHolder = { + maybeHandleIOException(s"Error while fetching offset by timestamp for $topicPartition in dir ${dir.getParent}") { + debug(s"Searching offset for timestamp $targetTimestamp") + + // For the earliest and latest, we do not need to return the timestamp. + if (targetTimestamp == ListOffsetsRequest.EARLIEST_TIMESTAMP || + (!remoteLogEnabled() && targetTimestamp == ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP)) { + // The first cached epoch usually corresponds to the log start offset, but we have to verify this since + // it may not be true following a message format version bump as the epoch will not be available for + // log entries written in the older format. + val earliestEpochEntry = leaderEpochCache.earliestEntry() + val epochOpt = if (earliestEpochEntry.isPresent && earliestEpochEntry.get().startOffset <= logStartOffset) { + Optional.of[Integer](earliestEpochEntry.get().epoch) + } else Optional.empty[Integer]() + + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, logStartOffset, epochOpt)) + } else if (targetTimestamp == ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) { + val curLocalLogStartOffset = localLogStartOffset() + + val epochResult: Optional[Integer] = { + val epochOpt = leaderEpochCache.epochForOffset(curLocalLogStartOffset) + if (epochOpt.isPresent) Optional.of(epochOpt.getAsInt) else Optional.empty() + } + + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, curLocalLogStartOffset, epochResult)) + } else if (targetTimestamp == ListOffsetsRequest.LATEST_TIMESTAMP) { + val latestEpoch = leaderEpochCache.latestEpoch() + val epoch = if (latestEpoch.isPresent) Optional.of[Integer](latestEpoch.getAsInt) else Optional.empty[Integer]() + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, logEndOffset, epoch)) + } else if (targetTimestamp == ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) { + if (remoteLogEnabled()) { + val curHighestRemoteOffset = highestOffsetInRemoteStorage() + val epochOpt = leaderEpochCache.epochForOffset(curHighestRemoteOffset) + val epochResult: Optional[Integer] = + if (epochOpt.isPresent) Optional.of(epochOpt.getAsInt) + else if (curHighestRemoteOffset == -1) Optional.of(RecordBatch.NO_PARTITION_LEADER_EPOCH) + else Optional.empty() + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, curHighestRemoteOffset, epochResult)) + } else { + new OffsetResultHolder(new TimestampAndOffset(RecordBatch.NO_TIMESTAMP, -1L, Optional.of(-1))) + } + } else if (targetTimestamp == ListOffsetsRequest.MAX_TIMESTAMP) { + // Cache to avoid race conditions. `toBuffer` is faster than most alternatives and provides + // constant time access while being safe to use with concurrent collections unlike `toArray`. + val latestTimestampSegment = logSegments.asScala.toBuffer.maxBy[Long](_.maxTimestampSoFar) + // cache the timestamp and offset + val maxTimestampSoFar = latestTimestampSegment.readMaxTimestampAndOffsetSoFar + // lookup the position of batch to avoid extra I/O + val position = latestTimestampSegment.offsetIndex.lookup(maxTimestampSoFar.offset) + val timestampAndOffsetOpt = latestTimestampSegment.log.batchesFrom(position.position).asScala + .find(_.maxTimestamp() == maxTimestampSoFar.timestamp) + .flatMap(batch => batch.offsetOfMaxTimestamp().toScala.map(new TimestampAndOffset(batch.maxTimestamp(), _, + Optional.of[Integer](batch.partitionLeaderEpoch()).filter(_ >= 0)))) + new OffsetResultHolder(timestampAndOffsetOpt.toJava) + } else { + // We need to search the first segment whose largest timestamp is >= the target timestamp if there is one. + if (remoteLogEnabled() && !isEmpty) { + if (remoteLogManager.isEmpty) { + throw new KafkaException("RemoteLogManager is empty even though the remote log storage is enabled.") + } + + val asyncOffsetReadFutureHolder = remoteLogManager.get.asyncOffsetRead(topicPartition, targetTimestamp, + logStartOffset, leaderEpochCache, () => searchOffsetInLocalLog(targetTimestamp, localLogStartOffset())) + + new OffsetResultHolder(Optional.empty(), Optional.of(asyncOffsetReadFutureHolder)) + } else { + new OffsetResultHolder(searchOffsetInLocalLog(targetTimestamp, logStartOffset).toJava) + } + } + } + } + + /** + * Checks if the log is empty. + * @return Returns True when the log is empty. Otherwise, false. + */ + private[log] def isEmpty = { + logStartOffset == logEndOffset + } + + private def searchOffsetInLocalLog(targetTimestamp: Long, startOffset: Long): Option[TimestampAndOffset] = { + // Cache to avoid race conditions. `toBuffer` is faster than most alternatives and provides + // constant time access while being safe to use with concurrent collections unlike `toArray`. + val segmentsCopy = logSegments.asScala.toBuffer + val targetSeg = segmentsCopy.find(_.largestTimestamp >= targetTimestamp) + targetSeg.flatMap(_.findOffsetByTimestamp(targetTimestamp, startOffset).toScala) + } + + def legacyFetchOffsetsBefore(timestamp: Long, maxNumOffsets: Int): Seq[Long] = { + // Cache to avoid race conditions. `toBuffer` is faster than most alternatives and provides + // constant time access while being safe to use with concurrent collections unlike `toArray`. + val allSegments = logSegments.asScala.toBuffer + val lastSegmentHasSize = allSegments.last.size > 0 + + val offsetTimeArray = + if (lastSegmentHasSize) + new Array[(Long, Long)](allSegments.length + 1) + else + new Array[(Long, Long)](allSegments.length) + + for (i <- allSegments.indices) + offsetTimeArray(i) = (math.max(allSegments(i).baseOffset, logStartOffset), allSegments(i).lastModified) + if (lastSegmentHasSize) + offsetTimeArray(allSegments.length) = (logEndOffset, time.milliseconds) + + var startIndex = -1 + timestamp match { + case ListOffsetsRequest.LATEST_TIMESTAMP => + startIndex = offsetTimeArray.length - 1 + case ListOffsetsRequest.EARLIEST_TIMESTAMP => + startIndex = 0 + case _ => + var isFound = false + debug("Offset time array = " + offsetTimeArray.foreach(o => "%d, %d".format(o._1, o._2))) + startIndex = offsetTimeArray.length - 1 + while (startIndex >= 0 && !isFound) { + if (offsetTimeArray(startIndex)._2 <= timestamp) + isFound = true + else + startIndex -= 1 + } + } + + val retSize = maxNumOffsets.min(startIndex + 1) + val ret = new Array[Long](retSize) + for (j <- 0 until retSize) { + ret(j) = offsetTimeArray(startIndex)._1 + startIndex -= 1 + } + // ensure that the returned seq is in descending order of offsets + ret.toSeq.sortBy(-_) + } + + /** + * Given a message offset, find its corresponding offset metadata in the log. + * 1. If the message offset is less than the log-start-offset (or) local-log-start-offset, then it returns the + * message-only metadata. + * 2. If the message offset is beyond the log-end-offset, then it returns the message-only metadata. + * 3. For all other cases, it returns the offset metadata from the log. + */ + private[log] def maybeConvertToOffsetMetadata(offset: Long): LogOffsetMetadata = { + try { + localLog.convertToOffsetMetadataOrThrow(offset) + } catch { + case _: OffsetOutOfRangeException => + new LogOffsetMetadata(offset) + } + } + + /** + * Delete any local log segments starting with the oldest segment and moving forward until until + * the user-supplied predicate is false or the segment containing the current high watermark is reached. + * We do not delete segments with offsets at or beyond the high watermark to ensure that the log start + * offset can never exceed it. If the high watermark has not yet been initialized, no segments are eligible + * for deletion. + * + * @param predicate A function that takes in a candidate log segment and the next higher segment + * (if there is one) and returns true iff it is deletable + * @param reason The reason for the segment deletion + * @return The number of segments deleted + */ + private def deleteOldSegments(predicate: (LogSegment, Option[LogSegment]) => Boolean, + reason: SegmentDeletionReason): Int = { + lock synchronized { + val deletable = deletableSegments(predicate) + if (deletable.nonEmpty) + deleteSegments(deletable, reason) + else + 0 + } + } + + /** + * @return true if this topic enables tiered storage and remote log copy is enabled (i.e. remote.log.copy.disable=false) + */ + private def remoteLogEnabledAndRemoteCopyEnabled(): Boolean = { + remoteLogEnabled() && !config.remoteLogCopyDisable() + } + + /** + * Find segments starting from the oldest until the user-supplied predicate is false. + * A final segment that is empty will never be returned. + * + * @param predicate A function that takes in a candidate log segment, the next higher segment + * (if there is one). It returns true iff the segment is deletable. + * @return the segments ready to be deleted + */ + private[log] def deletableSegments(predicate: (LogSegment, Option[LogSegment]) => Boolean): Iterable[LogSegment] = { + def isSegmentEligibleForDeletion(nextSegmentOpt: Option[LogSegment], upperBoundOffset: Long): Boolean = { + val allowDeletionDueToLogStartOffsetIncremented = nextSegmentOpt.isDefined && logStartOffset >= nextSegmentOpt.get.baseOffset + // Segments are eligible for deletion when: + // 1. they are uploaded to the remote storage + // 2. log-start-offset was incremented higher than the largest offset in the candidate segment + // Note: when remote log copy is disabled, we will fall back to local log check using retention.ms/bytes + if (remoteLogEnabledAndRemoteCopyEnabled()) { + (upperBoundOffset > 0 && upperBoundOffset - 1 <= highestOffsetInRemoteStorage()) || + allowDeletionDueToLogStartOffsetIncremented + } else { + true + } + } + + if (localLog.segments.isEmpty) { + Seq.empty + } else { + val deletable = ArrayBuffer.empty[LogSegment] + val segmentsIterator = localLog.segments.values.iterator + var segmentOpt = nextOption(segmentsIterator) + var shouldRoll = false + while (segmentOpt.isDefined) { + val segment = segmentOpt.get + val nextSegmentOpt = nextOption(segmentsIterator) + val isLastSegmentAndEmpty = nextSegmentOpt.isEmpty && segment.size == 0 + val upperBoundOffset = if (nextSegmentOpt.nonEmpty) nextSegmentOpt.get.baseOffset() else logEndOffset + // We don't delete segments with offsets at or beyond the high watermark to ensure that the log start + // offset can never exceed it. + val predicateResult = highWatermark >= upperBoundOffset && predicate(segment, nextSegmentOpt) + + // Roll the active segment when it breaches the configured retention policy. The rolled segment will be + // eligible for deletion and gets removed in the next iteration. + if (predicateResult && remoteLogEnabled() && nextSegmentOpt.isEmpty && segment.size > 0) { + shouldRoll = true + } + if (predicateResult && !isLastSegmentAndEmpty && isSegmentEligibleForDeletion(nextSegmentOpt, upperBoundOffset)) { + deletable += segment + segmentOpt = nextSegmentOpt + } else { + segmentOpt = Option.empty + } + } + if (shouldRoll) { + info("Rolling the active segment to make it eligible for deletion") + roll() + } + deletable + } + } + + private def incrementStartOffset(startOffset: Long, reason: LogStartOffsetIncrementReason): Unit = { + if (remoteLogEnabledAndRemoteCopyEnabled()) maybeIncrementLocalLogStartOffset(startOffset, reason) + else maybeIncrementLogStartOffset(startOffset, reason) + } + + private def deleteSegments(deletable: Iterable[LogSegment], reason: SegmentDeletionReason): Int = { + maybeHandleIOException(s"Error while deleting segments for $topicPartition in dir ${dir.getParent}") { + val numToDelete = deletable.size + if (numToDelete > 0) { + // we must always have at least one segment, so if we are going to delete all the segments, create a new one first + var segmentsToDelete = deletable + if (localLog.segments.numberOfSegments == numToDelete) { + val newSegment = roll() + if (deletable.last.baseOffset == newSegment.baseOffset) { + warn(s"Empty active segment at ${deletable.last.baseOffset} was deleted and recreated due to $reason") + segmentsToDelete = deletable.dropRight(1) + } + } + localLog.checkIfMemoryMappedBufferClosed() + if (segmentsToDelete.nonEmpty) { + // increment the local-log-start-offset or log-start-offset before removing the segment for lookups + val newLocalLogStartOffset = localLog.segments.higherSegment(segmentsToDelete.last.baseOffset()).get.baseOffset() + incrementStartOffset(newLocalLogStartOffset, LogStartOffsetIncrementReason.SegmentDeletion) + // remove the segments for lookups + localLog.removeAndDeleteSegments(segmentsToDelete.toList.asJava, true, reason) + } + deleteProducerSnapshots(deletable.toList.asJava, asyncDelete = true) + } + numToDelete + } + } + + /** + * If topic deletion is enabled, delete any local log segments that have either expired due to time based retention + * or because the log size is > retentionSize. + * + * Whether or not deletion is enabled, delete any local log segments that are before the log start offset + */ + def deleteOldSegments(): Int = { + if (config.delete) { + deleteLogStartOffsetBreachedSegments() + + deleteRetentionSizeBreachedSegments() + + deleteRetentionMsBreachedSegments() + } else { + deleteLogStartOffsetBreachedSegments() + } + } + + private def deleteRetentionMsBreachedSegments(): Int = { + val retentionMs = localRetentionMs(config, remoteLogEnabledAndRemoteCopyEnabled()) + if (retentionMs < 0) return 0 + val startMs = time.milliseconds + + def shouldDelete(segment: LogSegment, nextSegmentOpt: Option[LogSegment]): Boolean = { + val shouldDelete = startMs - segment.largestTimestamp > retentionMs + debug(s"$segment retentionMs breached: $shouldDelete, startMs=$startMs, retentionMs=$retentionMs") + shouldDelete + } + + deleteOldSegments(shouldDelete, RetentionMsBreach(this, remoteLogEnabledAndRemoteCopyEnabled())) + } + + private def deleteRetentionSizeBreachedSegments(): Int = { + val retentionSize: Long = localRetentionSize(config, remoteLogEnabledAndRemoteCopyEnabled()) + if (retentionSize < 0 || size < retentionSize) return 0 + var diff = size - retentionSize + def shouldDelete(segment: LogSegment, nextSegmentOpt: Option[LogSegment]): Boolean = { + val segmentSize = segment.size + val shouldDelete = diff - segmentSize >= 0 + debug(s"$segment retentionSize breached: $shouldDelete, log size before delete segment=$diff, after delete segment=${diff - segmentSize}") + if (shouldDelete) { + diff -= segmentSize + } + shouldDelete + } + + deleteOldSegments(shouldDelete, RetentionSizeBreach(this, remoteLogEnabledAndRemoteCopyEnabled())) + } + + private def deleteLogStartOffsetBreachedSegments(): Int = { + def shouldDelete(segment: LogSegment, nextSegmentOpt: Option[LogSegment]): Boolean = { + val isRemoteLogEnabled = remoteLogEnabled() + val localLSO = localLogStartOffset() + val shouldDelete = nextSegmentOpt.exists(_.baseOffset <= (if (isRemoteLogEnabled) localLSO else logStartOffset)) + debug(s"$segment logStartOffset breached: $shouldDelete, nextSegmentOpt=$nextSegmentOpt, " + + s"${if (isRemoteLogEnabled) s"localLogStartOffset=$localLSO" else s"logStartOffset=$logStartOffset"}") + shouldDelete + } + + deleteOldSegments(shouldDelete, StartOffsetBreach(this, remoteLogEnabled())) + } + + def isFuture: Boolean = localLog.isFuture + + /** + * The size of the log in bytes + */ + def size: Long = localLog.segments.sizeInBytes + + /** + * The log size in bytes for all segments that are only in local log but not yet in remote log. + */ + def onlyLocalLogSegmentsSize: Long = + UnifiedLog.sizeInBytes(logSegments.stream.filter(_.baseOffset >= highestOffsetInRemoteStorage()).collect(Collectors.toList[LogSegment])) + + /** + * The number of segments that are only in local log but not yet in remote log. + */ + def onlyLocalLogSegmentsCount: Long = + logSegments.stream().filter(_.baseOffset >= highestOffsetInRemoteStorage()).count() + + /** + * The offset of the next message that will be appended to the log + */ + def logEndOffset: Long = localLog.logEndOffset + + /** + * The offset metadata of the next message that will be appended to the log + */ + def logEndOffsetMetadata: LogOffsetMetadata = localLog.logEndOffsetMetadata + + /** + * Roll the log over to a new empty log segment if necessary. + * The segment will be rolled if one of the following conditions met: + * 1. The logSegment is full + * 2. The maxTime has elapsed since the timestamp of first message in the segment (or since the + * create time if the first message does not have a timestamp) + * 3. The index is full + * + * @param messagesSize The messages set size in bytes. + * @param appendInfo log append information + * + * @return The currently active segment after (perhaps) rolling to a new segment + */ + private def maybeRoll(messagesSize: Int, appendInfo: LogAppendInfo): LogSegment = lock synchronized { + val segment = localLog.segments.activeSegment + val now = time.milliseconds + + val maxTimestampInMessages = appendInfo.maxTimestamp + val maxOffsetInMessages = appendInfo.lastOffset + + if (segment.shouldRoll(new RollParams(config.maxSegmentMs, config.segmentSize, appendInfo.maxTimestamp, appendInfo.lastOffset, messagesSize, now))) { + debug(s"Rolling new log segment (log_size = ${segment.size}/${config.segmentSize}}, " + + s"offset_index_size = ${segment.offsetIndex.entries}/${segment.offsetIndex.maxEntries}, " + + s"time_index_size = ${segment.timeIndex.entries}/${segment.timeIndex.maxEntries}, " + + s"inactive_time_ms = ${segment.timeWaitedForRoll(now, maxTimestampInMessages)}/${config.segmentMs - segment.rollJitterMs}).") + + /* + maxOffsetInMessages - Integer.MAX_VALUE is a heuristic value for the first offset in the set of messages. + Since the offset in messages will not differ by more than Integer.MAX_VALUE, this is guaranteed <= the real + first offset in the set. Determining the true first offset in the set requires decompression, which the follower + is trying to avoid during log append. Prior behavior assigned new baseOffset = logEndOffset from old segment. + This was problematic in the case that two consecutive messages differed in offset by + Integer.MAX_VALUE.toLong + 2 or more. In this case, the prior behavior would roll a new log segment whose + base offset was too low to contain the next message. This edge case is possible when a replica is recovering a + highly compacted topic from scratch. + Note that this is only required for pre-V2 message formats because these do not store the first message offset + in the header. + */ + val rollOffset = if (appendInfo.firstOffset == UnifiedLog.UnknownOffset) + maxOffsetInMessages - Integer.MAX_VALUE + else + appendInfo.firstOffset + + roll(Some(rollOffset)) + } else { + segment + } + } + + /** + * Roll the local log over to a new active segment starting with the expectedNextOffset (when provided), + * or localLog.logEndOffset otherwise. This will trim the index to the exact size of the number of entries + * it currently contains. + * + * @return The newly rolled segment + */ + def roll(expectedNextOffset: Option[Long] = None): LogSegment = lock synchronized { + val nextOffset : JLong = expectedNextOffset match { + case Some(offset) => offset + case None => 0L + } + val newSegment = localLog.roll(nextOffset) + // Take a snapshot of the producer state to facilitate recovery. It is useful to have the snapshot + // offset align with the new segment offset since this ensures we can recover the segment by beginning + // with the corresponding snapshot file and scanning the segment data. Because the segment base offset + // may actually be ahead of the current producer state end offset (which corresponds to the log end offset), + // we manually override the state offset here prior to taking the snapshot. + producerStateManager.updateMapEndOffset(newSegment.baseOffset) + // We avoid potentially-costly fsync call, since we acquire UnifiedLog#lock here + // which could block subsequent produces in the meantime. + // flush is done in the scheduler thread along with segment flushing below + val maybeSnapshot = producerStateManager.takeSnapshot(false) + updateHighWatermarkWithLogEndOffset() + // Schedule an asynchronous flush of the old segment + scheduler.scheduleOnce("flush-log", () => { + maybeSnapshot.ifPresent(f => flushProducerStateSnapshot(f.toPath)) + flushUptoOffsetExclusive(newSegment.baseOffset) + }) + newSegment + } + + /** + * Flush all local log segments + * + * @param forceFlushActiveSegment should be true during a clean shutdown, and false otherwise. The reason is that + * we have to pass logEndOffset + 1 to the `localLog.flush(offset: Long): Unit` function to flush empty + * active segments, which is important to make sure we persist the active segment file during shutdown, particularly + * when it's empty. + */ + def flush(forceFlushActiveSegment: Boolean): Unit = flush(logEndOffset, forceFlushActiveSegment) + + /** + * Flush local log segments for all offsets up to offset-1 + * + * @param offset The offset to flush up to (non-inclusive); the new recovery point + */ + def flushUptoOffsetExclusive(offset: Long): Unit = flush(offset, includingOffset = false) + + /** + * Flush local log segments for all offsets up to offset-1 if includingOffset=false; up to offset + * if includingOffset=true. The recovery point is set to offset. + * + * @param offset The offset to flush up to; the new recovery point + * @param includingOffset Whether the flush includes the provided offset. + */ + private def flush(offset: Long, includingOffset: Boolean): Unit = { + val flushOffset = if (includingOffset) offset + 1 else offset + val newRecoveryPoint = offset + val includingOffsetStr = if (includingOffset) "inclusive" else "exclusive" + maybeHandleIOException(s"Error while flushing log for $topicPartition in dir ${dir.getParent} with offset $offset " + + s"($includingOffsetStr) and recovery point $newRecoveryPoint") { + if (flushOffset > localLog.recoveryPoint) { + debug(s"Flushing log up to offset $offset ($includingOffsetStr)" + + s"with recovery point $newRecoveryPoint, last flushed: $lastFlushTime, current time: ${time.milliseconds()}," + + s"unflushed: ${localLog.unflushedMessages}") + localLog.flush(flushOffset) + lock synchronized { + localLog.markFlushed(newRecoveryPoint) + } + } + } + } + + /** + * Completely delete the local log directory and all contents from the file system with no delay + */ + private[log] def delete(): Unit = { + maybeHandleIOException(s"Error while deleting log for $topicPartition in dir ${dir.getParent}") { + lock synchronized { + localLog.checkIfMemoryMappedBufferClosed() + producerExpireCheck.cancel(true) + leaderEpochCache.clear() + val deletedSegments = localLog.deleteAllSegments() + deleteProducerSnapshots(deletedSegments, asyncDelete = false) + localLog.deleteEmptyDir() + } + } + } + + // visible for testing + private[log] def takeProducerSnapshot(): Unit = lock synchronized { + localLog.checkIfMemoryMappedBufferClosed() + producerStateManager.takeSnapshot() + } + + // visible for testing + private[log] def latestProducerSnapshotOffset: OptionalLong = lock synchronized { + producerStateManager.latestSnapshotOffset + } + + // visible for testing + private[log] def oldestProducerSnapshotOffset: OptionalLong = lock synchronized { + producerStateManager.oldestSnapshotOffset + } + + // visible for testing + private[log] def latestProducerStateEndOffset: Long = lock synchronized { + producerStateManager.mapEndOffset + } + + private[log] def flushProducerStateSnapshot(snapshot: Path): Unit = { + maybeHandleIOException(s"Error while deleting producer state snapshot $snapshot for $topicPartition in dir ${dir.getParent}") { + Utils.flushFileIfExists(snapshot) + } + } + + /** + * Truncate this log so that it ends with the greatest offset < targetOffset. + * + * @param targetOffset The offset to truncate to, an upper bound on all offsets in the log after truncation is complete. + * @return True iff targetOffset < logEndOffset + */ + private[kafka] def truncateTo(targetOffset: Long): Boolean = { + maybeHandleIOException(s"Error while truncating log to offset $targetOffset for $topicPartition in dir ${dir.getParent}") { + if (targetOffset < 0) + throw new IllegalArgumentException(s"Cannot truncate partition $topicPartition to a negative offset (%d).".format(targetOffset)) + if (targetOffset >= localLog.logEndOffset) { + info(s"Truncating to $targetOffset has no effect as the largest offset in the log is ${localLog.logEndOffset - 1}") + + // Always truncate epoch cache since we may have a conflicting epoch entry at the + // end of the log from the leader. This could happen if this broker was a leader + // and inserted the first start offset entry, but then failed to append any entries + // before another leader was elected. + lock synchronized { + leaderEpochCache.truncateFromEndAsyncFlush(logEndOffset) + } + + false + } else { + info(s"Truncating to offset $targetOffset") + lock synchronized { + localLog.checkIfMemoryMappedBufferClosed() + if (localLog.segments.firstSegmentBaseOffset.getAsLong > targetOffset) { + truncateFullyAndStartAt(targetOffset) + } else { + val deletedSegments = localLog.truncateTo(targetOffset) + deleteProducerSnapshots(deletedSegments, asyncDelete = true) + leaderEpochCache.truncateFromEndAsyncFlush(targetOffset) + logStartOffset = math.min(targetOffset, logStartOffset) + rebuildProducerState(targetOffset, producerStateManager) + if (highWatermark >= localLog.logEndOffset) + updateHighWatermark(localLog.logEndOffsetMetadata) + } + true + } + } + } + } + + /** + * Delete all data in the log and start at the new offset + * + * @param newOffset The new offset to start the log with + * @param logStartOffsetOpt The log start offset to set for the log. If None, the new offset will be used. + */ + def truncateFullyAndStartAt(newOffset: Long, + logStartOffsetOpt: Option[Long] = None): Unit = { + maybeHandleIOException(s"Error while truncating the entire log for $topicPartition in dir ${dir.getParent}") { + debug(s"Truncate and start at offset $newOffset, logStartOffset: ${logStartOffsetOpt.getOrElse(newOffset)}") + lock synchronized { + localLog.truncateFullyAndStartAt(newOffset) + leaderEpochCache.clearAndFlush() + producerStateManager.truncateFullyAndStartAt(newOffset) + logStartOffset = logStartOffsetOpt.getOrElse(newOffset) + if (remoteLogEnabled()) _localLogStartOffset = newOffset + rebuildProducerState(newOffset, producerStateManager) + updateHighWatermark(localLog.logEndOffsetMetadata) + } + } + } + + /** + * The time this log is last known to have been fully flushed to disk + */ + def lastFlushTime: Long = localLog.lastFlushTime + + /** + * The active segment that is currently taking appends + */ + def activeSegment: LogSegment = localLog.segments.activeSegment + + /** + * All the log segments in this log ordered from oldest to newest + */ + def logSegments: util.Collection[LogSegment] = localLog.segments.values + + /** + * Get all segments beginning with the segment that includes "from" and ending with the segment + * that includes up to "to-1" or the end of the log (if to > logEndOffset). + */ + def logSegments(from: Long, to: Long): Iterable[LogSegment] = lock synchronized { + localLog.segments.values(from, to).asScala + } + + def nonActiveLogSegmentsFrom(from: Long): util.Collection[LogSegment] = lock synchronized { + localLog.segments.nonActiveLogSegmentsFrom(from) + } + + override def toString: String = { + val logString = new StringBuilder + logString.append(s"Log(dir=$dir") + topicId.foreach(id => logString.append(s", topicId=$id")) + logString.append(s", topic=${topicPartition.topic}") + logString.append(s", partition=${topicPartition.partition}") + logString.append(s", highWatermark=$highWatermark") + logString.append(s", lastStableOffset=$lastStableOffset") + logString.append(s", logStartOffset=$logStartOffset") + logString.append(s", logEndOffset=$logEndOffset") + logString.append(")") + logString.toString + } + + private[log] def replaceSegments(newSegments: Seq[LogSegment], oldSegments: Seq[LogSegment]): Unit = { + lock synchronized { + localLog.checkIfMemoryMappedBufferClosed() + val deletedSegments = UnifiedLog.replaceSegments(localLog.segments, newSegments, oldSegments, dir, topicPartition, + config, scheduler, logDirFailureChannel, logIdent) + deleteProducerSnapshots(deletedSegments.toList.asJava, asyncDelete = true) + } + } + + /** + * This function does not acquire Log.lock. The caller has to make sure log segments don't get deleted during + * this call, and also protects against calling this function on the same segment in parallel. + * + * Currently, it is used by LogCleaner threads on log compact non-active segments only with LogCleanerManager's lock + * to ensure no other logcleaner threads and retention thread can work on the same segment. + */ + private[log] def getFirstBatchTimestampForSegments(segments: util.Collection[LogSegment]): util.Collection[JLong] = { + segments.stream().map[JLong](s => s.getFirstBatchTimestamp).collect(Collectors.toList()) + } + + /** + * remove deleted log metrics + */ + private[log] def removeLogMetrics(): Unit = { + metricNames.foreach { + case (name, tags) => metricsGroup.removeMetric(name, tags) + } + metricNames = Map.empty + } + + /** + * Add the given segment to the segments in this log. If this segment replaces an existing segment, delete it. + * @param segment The segment to add + */ + @threadsafe + private[log] def addSegment(segment: LogSegment): LogSegment = localLog.segments.add(segment) + + private def maybeHandleIOException[T](msg: => String)(fun: => T): T = { + LocalLog.maybeHandleIOException(logDirFailureChannel, parentDir, () => msg, () => fun) + } + + private[log] def splitOverflowedSegment(segment: LogSegment): List[LogSegment] = lock synchronized { + val result = UnifiedLog.splitOverflowedSegment(segment, localLog.segments, dir, topicPartition, config, scheduler, logDirFailureChannel, logIdent) + deleteProducerSnapshots(result.deletedSegments, asyncDelete = true) + result.newSegments.asScala.toList + } + + private[log] def deleteProducerSnapshots(segments: util.Collection[LogSegment], asyncDelete: Boolean): Unit = { + JUnifiedLog.deleteProducerSnapshots(segments, producerStateManager, asyncDelete, scheduler, config, logDirFailureChannel, parentDir, topicPartition) + } +} + +object UnifiedLog extends Logging { + val LogFileSuffix: String = LogFileUtils.LOG_FILE_SUFFIX + + val IndexFileSuffix: String = LogFileUtils.INDEX_FILE_SUFFIX + + val TimeIndexFileSuffix: String = LogFileUtils.TIME_INDEX_FILE_SUFFIX + + val TxnIndexFileSuffix: String = LogFileUtils.TXN_INDEX_FILE_SUFFIX + + val CleanedFileSuffix: String = LogFileUtils.CLEANED_FILE_SUFFIX + + val SwapFileSuffix: String = LogFileUtils.SWAP_FILE_SUFFIX + + val DeleteDirSuffix: String = LogFileUtils.DELETE_DIR_SUFFIX + + val StrayDirSuffix: String = LogFileUtils.STRAY_DIR_SUFFIX + + val UnknownOffset: Long = LocalLog.UNKNOWN_OFFSET + + def isRemoteLogEnabled(remoteStorageSystemEnable: Boolean, + config: LogConfig, + topic: String): Boolean = { + // Remote log is enabled only for non-compact and non-internal topics + remoteStorageSystemEnable && + !(config.compact || Topic.isInternal(topic) + || TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_TOPIC_NAME.equals(topic) + || Topic.CLUSTER_METADATA_TOPIC_NAME.equals(topic)) && + config.remoteStorageEnable() + } + + def apply(dir: File, + config: LogConfig, + logStartOffset: Long, + recoveryPoint: Long, + scheduler: Scheduler, + brokerTopicStats: BrokerTopicStats, + time: Time, + maxTransactionTimeoutMs: Int, + producerStateManagerConfig: ProducerStateManagerConfig, + producerIdExpirationCheckIntervalMs: Int, + logDirFailureChannel: LogDirFailureChannel, + lastShutdownClean: Boolean = true, + topicId: Option[Uuid], + keepPartitionMetadataFile: Boolean, + numRemainingSegments: ConcurrentMap[String, Integer] = new ConcurrentHashMap[String, Integer], + remoteStorageSystemEnable: Boolean = false, + logOffsetsListener: LogOffsetsListener = LogOffsetsListener.NO_OP_OFFSETS_LISTENER): UnifiedLog = { + // create the log directory if it doesn't exist + Files.createDirectories(dir.toPath) + val topicPartition = UnifiedLog.parseTopicPartitionName(dir) + val segments = new LogSegments(topicPartition) + // The created leaderEpochCache will be truncated by LogLoader if necessary + // so it is guaranteed that the epoch entries will be correct even when on-disk + // checkpoint was stale (due to async nature of LeaderEpochFileCache#truncateFromStart/End). + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + dir, + topicPartition, + logDirFailureChannel, + None, + scheduler) + val producerStateManager = new ProducerStateManager(topicPartition, dir, + maxTransactionTimeoutMs, producerStateManagerConfig, time) + val isRemoteLogEnabled = UnifiedLog.isRemoteLogEnabled(remoteStorageSystemEnable, config, topicPartition.topic) + val offsets = new LogLoader( + dir, + topicPartition, + config, + scheduler, + time, + logDirFailureChannel, + lastShutdownClean, + segments, + logStartOffset, + recoveryPoint, + leaderEpochCache, + producerStateManager, + numRemainingSegments, + isRemoteLogEnabled, + ).load() + val localLog = new LocalLog(dir, config, segments, offsets.recoveryPoint, + offsets.nextOffsetMetadata, scheduler, time, topicPartition, logDirFailureChannel) + new UnifiedLog(offsets.logStartOffset, + localLog, + brokerTopicStats, + producerIdExpirationCheckIntervalMs, + leaderEpochCache, + producerStateManager, + topicId, + keepPartitionMetadataFile, + remoteStorageSystemEnable, + logOffsetsListener) + } + + def logDeleteDirName(topicPartition: TopicPartition): String = LocalLog.logDeleteDirName(topicPartition) + + def logFutureDirName(topicPartition: TopicPartition): String = LocalLog.logFutureDirName(topicPartition) + + def logStrayDirName(topicPartition: TopicPartition): String = LocalLog.logStrayDirName(topicPartition) + + def logDirName(topicPartition: TopicPartition): String = LocalLog.logDirName(topicPartition) + + def transactionIndexFile(dir: File, offset: Long, suffix: String = ""): File = LogFileUtils.transactionIndexFile(dir, offset, suffix) + + def offsetFromFile(file: File): Long = LogFileUtils.offsetFromFile(file) + + def sizeInBytes(segments: util.Collection[LogSegment]): Long = LogSegments.sizeInBytes(segments) + + def parseTopicPartitionName(dir: File): TopicPartition = LocalLog.parseTopicPartitionName(dir) + + /** + * Create a new LeaderEpochFileCache instance and load the epoch entries from the backing checkpoint file or + * the provided currentCache (if not empty). + * + * @param dir The directory in which the log will reside + * @param topicPartition The topic partition + * @param logDirFailureChannel The LogDirFailureChannel to asynchronously handle log dir failure + * @param currentCache The current LeaderEpochFileCache instance (if any) + * @param scheduler The scheduler for executing asynchronous tasks + * @return The new LeaderEpochFileCache instance (if created), none otherwise + */ + def createLeaderEpochCache(dir: File, + topicPartition: TopicPartition, + logDirFailureChannel: LogDirFailureChannel, + currentCache: Option[LeaderEpochFileCache], + scheduler: Scheduler): LeaderEpochFileCache = { + val leaderEpochFile = LeaderEpochCheckpointFile.newFile(dir) + val checkpointFile = new LeaderEpochCheckpointFile(leaderEpochFile, logDirFailureChannel) + currentCache.map(_.withCheckpoint(checkpointFile)).getOrElse(new LeaderEpochFileCache(topicPartition, checkpointFile, scheduler)) + } + + private[log] def replaceSegments(existingSegments: LogSegments, + newSegments: Seq[LogSegment], + oldSegments: Seq[LogSegment], + dir: File, + topicPartition: TopicPartition, + config: LogConfig, + scheduler: Scheduler, + logDirFailureChannel: LogDirFailureChannel, + logPrefix: String, + isRecoveredSwapFile: Boolean = false): Iterable[LogSegment] = { + LocalLog.replaceSegments(existingSegments, + newSegments.asJava, + oldSegments.asJava, + dir, + topicPartition, + config, + scheduler, + logDirFailureChannel, + logPrefix, + isRecoveredSwapFile).asScala + } + + private[log] def splitOverflowedSegment(segment: LogSegment, + existingSegments: LogSegments, + dir: File, + topicPartition: TopicPartition, + config: LogConfig, + scheduler: Scheduler, + logDirFailureChannel: LogDirFailureChannel, + logPrefix: String): SplitSegmentResult = { + LocalLog.splitOverflowedSegment(segment, existingSegments, dir, topicPartition, config, scheduler, logDirFailureChannel, logPrefix) + } + + private[log] def createNewCleanedSegment(dir: File, logConfig: LogConfig, baseOffset: Long): LogSegment = { + LocalLog.createNewCleanedSegment(dir, logConfig, baseOffset) + } + + // Visible for benchmarking + def newValidatorMetricsRecorder(allTopicsStats: BrokerTopicMetrics): LogValidator.MetricsRecorder = { + new LogValidator.MetricsRecorder { + def recordInvalidMagic(): Unit = + allTopicsStats.invalidMagicNumberRecordsPerSec.mark() + + def recordInvalidOffset(): Unit = + allTopicsStats.invalidOffsetOrSequenceRecordsPerSec.mark() + + def recordInvalidSequence(): Unit = + allTopicsStats.invalidOffsetOrSequenceRecordsPerSec.mark() + + def recordInvalidChecksums(): Unit = + allTopicsStats.invalidMessageCrcRecordsPerSec.mark() + + def recordNoKeyCompactedTopic(): Unit = + allTopicsStats.noKeyCompactedTopicRecordsPerSec.mark() + } + } + + private[log] def localRetentionMs(config: LogConfig, remoteLogEnabledAndRemoteCopyEnabled: Boolean): Long = { + if (remoteLogEnabledAndRemoteCopyEnabled) config.localRetentionMs else config.retentionMs + } + + private[log] def localRetentionSize(config: LogConfig, remoteLogEnabledAndRemoteCopyEnabled: Boolean): Long = { + if (remoteLogEnabledAndRemoteCopyEnabled) config.localRetentionBytes else config.retentionSize + } + + /** + * Wraps the value of iterator.next() in an option. + * Note: this facility is a part of the Iterator class starting from scala v2.13. + * + * @param iterator the iterator + * @tparam T the type of object held within the iterator + * @return Some(iterator.next) if a next element exists, None otherwise. + */ + private[log] def nextOption[T](iterator: util.Iterator[T]): Option[T] = { + if (iterator.hasNext) + Some(iterator.next()) + else + None + } + +} + +object LogMetricNames { + val NumLogSegments: String = "NumLogSegments" + val LogStartOffset: String = "LogStartOffset" + val LogEndOffset: String = "LogEndOffset" + val Size: String = "Size" + + def allMetricNames: List[String] = { + List(NumLogSegments, LogStartOffset, LogEndOffset, Size) + } +} + +case class RetentionMsBreach(log: UnifiedLog, remoteLogEnabledAndRemoteCopyEnabled: Boolean) extends SegmentDeletionReason { + override def logReason(toDelete: util.List[LogSegment]): Unit = { + val retentionMs = UnifiedLog.localRetentionMs(log.config, remoteLogEnabledAndRemoteCopyEnabled) + toDelete.forEach { segment => + if (segment.largestRecordTimestamp.isPresent) + if (remoteLogEnabledAndRemoteCopyEnabled) + log.info(s"Deleting segment $segment due to local log retention time ${retentionMs}ms breach based on the largest " + + s"record timestamp in the segment") + else + log.info(s"Deleting segment $segment due to log retention time ${retentionMs}ms breach based on the largest " + + s"record timestamp in the segment") + else { + if (remoteLogEnabledAndRemoteCopyEnabled) + log.info(s"Deleting segment $segment due to local log retention time ${retentionMs}ms breach based on the " + + s"last modified time of the segment") + else + log.info(s"Deleting segment $segment due to log retention time ${retentionMs}ms breach based on the " + + s"last modified time of the segment") + } + } + } +} + +case class RetentionSizeBreach(log: UnifiedLog, remoteLogEnabledAndRemoteCopyEnabled: Boolean) extends SegmentDeletionReason { + override def logReason(toDelete: util.List[LogSegment]): Unit = { + var size = log.size + toDelete.forEach { segment => + size -= segment.size + if (remoteLogEnabledAndRemoteCopyEnabled) log.info(s"Deleting segment $segment due to local log retention size ${UnifiedLog.localRetentionSize(log.config, remoteLogEnabledAndRemoteCopyEnabled)} breach. " + + s"Local log size after deletion will be $size.") + else log.info(s"Deleting segment $segment due to log retention size ${log.config.retentionSize} breach. Log size " + + s"after deletion will be $size.") + } + } +} + +case class StartOffsetBreach(log: UnifiedLog, remoteLogEnabled: Boolean) extends SegmentDeletionReason { + override def logReason(toDelete: util.List[LogSegment]): Unit = { + if (remoteLogEnabled) + log.info(s"Deleting segments due to local log start offset ${log.localLogStartOffset()} breach: ${toDelete.asScala.mkString(",")}") + else + log.info(s"Deleting segments due to log start offset ${log.logStartOffset} breach: ${toDelete.asScala.mkString(",")}") + } +} diff --git a/core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala b/core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala index fe1050222b12b..83973ede23dbf 100755 --- a/core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala +++ b/core/src/main/scala/kafka/metrics/KafkaMetricsConfig.scala @@ -33,8 +33,8 @@ class KafkaMetricsConfig(props: VerifiableProperties) { * Comma-separated list of reporter types. These classes should be on the * classpath and will be instantiated at run-time. */ - val reporters: Seq[String] = (if (props.containsKey(MetricConfigs.KAFKA_METRICS_REPORTER_CLASSES_CONFIG)) - Csv.parseCsvList(props.getString(MetricConfigs.KAFKA_METRICS_REPORTER_CLASSES_CONFIG)) else MetricConfigs.KAFKA_METRIC_REPORTER_CLASSES_DEFAULT).asScala + val reporters: Seq[String] = Csv.parseCsvList(props.getString(MetricConfigs.KAFKA_METRICS_REPORTER_CLASSES_CONFIG, + MetricConfigs.KAFKA_METRIC_REPORTER_CLASSES_DEFAULT)).asScala /** * The metrics polling interval (in seconds). diff --git a/core/src/main/scala/kafka/metrics/KafkaMetricsReporter.scala b/core/src/main/scala/kafka/metrics/KafkaMetricsReporter.scala index eb6bae3ced657..136bb88b289ce 100755 --- a/core/src/main/scala/kafka/metrics/KafkaMetricsReporter.scala +++ b/core/src/main/scala/kafka/metrics/KafkaMetricsReporter.scala @@ -21,9 +21,8 @@ package kafka.metrics import kafka.utils.{CoreUtils, VerifiableProperties} -import org.apache.kafka.common.utils.Utils - import java.util.concurrent.atomic.AtomicBoolean + import scala.collection.Seq import scala.collection.mutable.ArrayBuffer @@ -63,7 +62,7 @@ object KafkaMetricsReporter { val metricsConfig = new KafkaMetricsConfig(verifiableProps) if (metricsConfig.reporters.nonEmpty) { metricsConfig.reporters.foreach(reporterType => { - val reporter = Utils.newInstance(reporterType, classOf[KafkaMetricsReporter]) + val reporter = CoreUtils.createObject[KafkaMetricsReporter](reporterType) reporter.init(verifiableProps) reporters += reporter reporter match { diff --git a/core/src/main/scala/kafka/network/RequestChannel.scala b/core/src/main/scala/kafka/network/RequestChannel.scala index 473ab172a093e..a16e03a1916d4 100644 --- a/core/src/main/scala/kafka/network/RequestChannel.scala +++ b/core/src/main/scala/kafka/network/RequestChannel.scala @@ -227,8 +227,6 @@ object RequestChannel extends Logging { Seq(specifiedMetricName, header.apiKey.name) } else if (header.apiKey == ApiKeys.ADD_PARTITIONS_TO_TXN && body[AddPartitionsToTxnRequest].allVerifyOnlyRequest) { Seq(RequestMetrics.VERIFY_PARTITIONS_IN_TXN_METRIC_NAME) - } else if (header.apiKey == ApiKeys.LIST_CONFIG_RESOURCES && header.apiVersion == 0) { - Seq(RequestMetrics.LIST_CLIENT_METRICS_RESOURCES_METRIC_NAME, header.apiKey.name) } else { Seq(header.apiKey.name) } @@ -342,6 +340,7 @@ object RequestChannel extends Logging { } class RequestChannel(val queueSize: Int, + val metricNamePrefix: String, time: Time, val metrics: RequestChannelMetrics) { import RequestChannel._ @@ -350,11 +349,13 @@ class RequestChannel(val queueSize: Int, private val requestQueue = new ArrayBlockingQueue[BaseRequest](queueSize) private val processors = new ConcurrentHashMap[Int, Processor]() + private val requestQueueSizeMetricName = metricNamePrefix.concat(RequestQueueSizeMetric) + private val responseQueueSizeMetricName = metricNamePrefix.concat(ResponseQueueSizeMetric) private val callbackQueue = new ArrayBlockingQueue[BaseRequest](queueSize) - metricsGroup.newGauge(RequestQueueSizeMetric, () => requestQueue.size) + metricsGroup.newGauge(requestQueueSizeMetricName, () => requestQueue.size) - metricsGroup.newGauge(ResponseQueueSizeMetric, () => { + metricsGroup.newGauge(responseQueueSizeMetricName, () => { processors.values.asScala.foldLeft(0) {(total, processor) => total + processor.responseQueueSize } @@ -364,13 +365,13 @@ class RequestChannel(val queueSize: Int, if (processors.putIfAbsent(processor.id, processor) != null) warn(s"Unexpected processor with processorId ${processor.id}") - metricsGroup.newGauge(ResponseQueueSizeMetric, () => processor.responseQueueSize, + metricsGroup.newGauge(responseQueueSizeMetricName, () => processor.responseQueueSize, Map(ProcessorMetricTag -> processor.id.toString).asJava) } def removeProcessor(processorId: Int): Unit = { processors.remove(processorId) - metricsGroup.removeMetric(ResponseQueueSizeMetric, Map(ProcessorMetricTag -> processorId.toString).asJava) + metricsGroup.removeMetric(responseQueueSizeMetricName, Map(ProcessorMetricTag -> processorId.toString).asJava) } /** Send a request to be handled, potentially blocking until there is room in the queue for the request */ diff --git a/core/src/main/scala/kafka/network/SocketServer.scala b/core/src/main/scala/kafka/network/SocketServer.scala index 884c00002c5b5..de658a82da0ae 100644 --- a/core/src/main/scala/kafka/network/SocketServer.scala +++ b/core/src/main/scala/kafka/network/SocketServer.scala @@ -25,10 +25,11 @@ import java.util import java.util.Optional import java.util.concurrent._ import java.util.concurrent.atomic._ +import kafka.cluster.EndPoint import kafka.network.Processor._ import kafka.network.RequestChannel.{CloseConnectionResponse, EndThrottlingResponse, NoOpResponse, SendResponse, StartThrottlingResponse} import kafka.network.SocketServer._ -import kafka.server.{BrokerReconfigurable, KafkaConfig} +import kafka.server.{ApiVersionManager, BrokerReconfigurable, KafkaConfig} import org.apache.kafka.common.message.ApiMessageType.ListenerType import kafka.utils._ import org.apache.kafka.common.config.ConfigException @@ -45,7 +46,7 @@ import org.apache.kafka.common.utils.{KafkaThread, LogContext, Time, Utils} import org.apache.kafka.common.{Endpoint, KafkaException, MetricName, Reconfigurable} import org.apache.kafka.network.{ConnectionQuotaEntity, ConnectionThrottledException, SocketServerConfigs, TooManyConnectionsException} import org.apache.kafka.security.CredentialProvider -import org.apache.kafka.server.{ApiVersionManager, ServerSocketFactory} +import org.apache.kafka.server.ServerSocketFactory import org.apache.kafka.server.config.QuotaConfig import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.network.ConnectionDisconnectListener @@ -95,8 +96,8 @@ class SocketServer( memoryPoolSensor.add(new Meter(TimeUnit.MILLISECONDS, memoryPoolDepletedPercentMetricName, memoryPoolDepletedTimeMetricName)) private val memoryPool = if (config.queuedMaxBytes > 0) new SimpleMemoryPool(config.queuedMaxBytes, config.socketRequestMaxBytes, false, memoryPoolSensor) else MemoryPool.NONE // data-plane - private[network] val dataPlaneAcceptors = new ConcurrentHashMap[Endpoint, DataPlaneAcceptor]() - val dataPlaneRequestChannel = new RequestChannel(maxQueuedRequests, time, apiVersionManager.newRequestMetrics) + private[network] val dataPlaneAcceptors = new ConcurrentHashMap[EndPoint, DataPlaneAcceptor]() + val dataPlaneRequestChannel = new RequestChannel(maxQueuedRequests, DataPlaneAcceptor.MetricPrefix, time, apiVersionManager.newRequestMetrics) private[this] val nextProcessorId: AtomicInteger = new AtomicInteger(0) val connectionQuotas = new ConnectionQuotas(config, time, metrics) @@ -112,7 +113,7 @@ class SocketServer( private var stopped = false // Socket server metrics - metricsGroup.newGauge(s"NetworkProcessorAvgIdlePercent", () => SocketServer.this.synchronized { + metricsGroup.newGauge(s"${DataPlaneAcceptor.MetricPrefix}NetworkProcessorAvgIdlePercent", () => SocketServer.this.synchronized { val dataPlaneProcessors = dataPlaneAcceptors.asScala.values.flatMap(a => a.processors) val ioWaitRatioMetricNames = dataPlaneProcessors.map { p => metrics.metricName("io-wait-ratio", MetricsGroup, p.metricTags) @@ -128,7 +129,7 @@ class SocketServer( metricsGroup.newGauge("MemoryPoolAvailable", () => memoryPool.availableMemory) metricsGroup.newGauge("MemoryPoolUsed", () => memoryPool.size() - memoryPool.availableMemory) - metricsGroup.newGauge(s"ExpiredConnectionsKilledCount", () => SocketServer.this.synchronized { + metricsGroup.newGauge(s"${DataPlaneAcceptor.MetricPrefix}ExpiredConnectionsKilledCount", () => SocketServer.this.synchronized { val dataPlaneProcessors = dataPlaneAcceptors.asScala.values.flatMap(a => a.processors) val expiredConnectionsKilledCountMetricNames = dataPlaneProcessors.map { p => metrics.metricName("expired-connections-killed-count", MetricsGroup, p.metricTags) @@ -160,8 +161,8 @@ class SocketServer( * Therefore, we do not know that any particular request processor will be running by the end of * this function -- just that it might be running. * - * @param authorizerFutures Future per [[Endpoint]] used to wait before starting the - * processor corresponding to the [[Endpoint]]. Any endpoint + * @param authorizerFutures Future per [[EndPoint]] used to wait before starting the + * processor corresponding to the [[EndPoint]]. Any endpoint * that does not appear in this map will be started once all * authorizerFutures are complete. * @@ -180,7 +181,7 @@ class SocketServer( // Because of ephemeral ports, we need to match acceptors to futures by looking at // the listener name, rather than the endpoint object. val authorizerFuture = authorizerFutures.find { - case (endpoint, _) => acceptor.endPoint.listener.equals(endpoint.listener()) + case (endpoint, _) => acceptor.endPoint.listenerName.value().equals(endpoint.listenerName().get()) } match { case None => allAuthorizerFuturesComplete case Some((_, future)) => future @@ -209,24 +210,23 @@ class SocketServer( enableFuture } - private def createDataPlaneAcceptorAndProcessors(endpoint: Endpoint): Unit = synchronized { + private def createDataPlaneAcceptorAndProcessors(endpoint: EndPoint): Unit = synchronized { if (stopped) { throw new RuntimeException("Can't create new data plane acceptor and processors: SocketServer is stopped.") } - val listenerName = ListenerName.normalised(endpoint.listener) - val parsedConfigs = config.valuesFromThisConfigWithPrefixOverride(listenerName.configPrefix) - connectionQuotas.addListener(config, listenerName) - val isPrivilegedListener = config.interBrokerListenerName == listenerName + val parsedConfigs = config.valuesFromThisConfigWithPrefixOverride(endpoint.listenerName.configPrefix) + connectionQuotas.addListener(config, endpoint.listenerName) + val isPrivilegedListener = config.interBrokerListenerName == endpoint.listenerName val dataPlaneAcceptor = createDataPlaneAcceptor(endpoint, isPrivilegedListener, dataPlaneRequestChannel) config.addReconfigurable(dataPlaneAcceptor) dataPlaneAcceptor.configure(parsedConfigs) dataPlaneAcceptors.put(endpoint, dataPlaneAcceptor) - info(s"Created data-plane acceptor and processors for endpoint : ${listenerName}") + info(s"Created data-plane acceptor and processors for endpoint : ${endpoint.listenerName}") } - private def endpoints = config.listeners.map(l => ListenerName.normalised(l.listener) -> l).toMap + private def endpoints = config.listeners.map(l => l.listenerName -> l).toMap - protected def createDataPlaneAcceptor(endPoint: Endpoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { + protected def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { new DataPlaneAcceptor(this, endPoint, config, nodeId, connectionQuotas, time, isPrivilegedListener, requestChannel, metrics, credentialProvider, logContext, memoryPool, apiVersionManager) } @@ -277,7 +277,7 @@ class SocketServer( /** * This method is called to dynamically add listeners. */ - def addListeners(listenersAdded: Seq[Endpoint]): Unit = synchronized { + def addListeners(listenersAdded: Seq[EndPoint]): Unit = synchronized { if (stopped) { throw new RuntimeException("can't add new listeners: SocketServer is stopped.") } @@ -297,10 +297,10 @@ class SocketServer( } } - def removeListeners(listenersRemoved: Seq[Endpoint]): Unit = synchronized { + def removeListeners(listenersRemoved: Seq[EndPoint]): Unit = synchronized { info(s"Removing data-plane listeners for endpoints $listenersRemoved") listenersRemoved.foreach { endpoint => - connectionQuotas.removeListener(config, ListenerName.normalised(endpoint.listener)) + connectionQuotas.removeListener(config, endpoint.listenerName) dataPlaneAcceptors.asScala.remove(endpoint).foreach { acceptor => acceptor.beginShutdown() acceptor.close() @@ -345,7 +345,7 @@ class SocketServer( // For test usage def dataPlaneAcceptor(listenerName: String): Option[DataPlaneAcceptor] = { dataPlaneAcceptors.asScala.foreach { case (endPoint, acceptor) => - if (endPoint.listener == listenerName) + if (endPoint.listenerName.value() == listenerName) return Some(acceptor) } None @@ -370,11 +370,13 @@ object SocketServer { } object DataPlaneAcceptor { + val ThreadPrefix: String = "data-plane" + val MetricPrefix: String = "" val ListenerReconfigurableConfigs: Set[String] = Set(SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG) } class DataPlaneAcceptor(socketServer: SocketServer, - endPoint: Endpoint, + endPoint: EndPoint, config: KafkaConfig, nodeId: Int, connectionQuotas: ConnectionQuotas, @@ -400,11 +402,14 @@ class DataPlaneAcceptor(socketServer: SocketServer, memoryPool, apiVersionManager) with ListenerReconfigurable { + override def metricPrefix(): String = DataPlaneAcceptor.MetricPrefix + override def threadPrefix(): String = DataPlaneAcceptor.ThreadPrefix + /** * Returns the listener name associated with this reconfigurable. Listener-specific * configs corresponding to this listener name are provided for reconfiguration. */ - override def listenerName(): ListenerName = ListenerName.normalised(endPoint.listener) + override def listenerName(): ListenerName = endPoint.listenerName /** * Returns the names of configs that may be reconfigured. @@ -451,7 +456,7 @@ class DataPlaneAcceptor(socketServer: SocketServer, val newNumNetworkThreads = configs.get(SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG).asInstanceOf[Int] if (newNumNetworkThreads != processors.length) { - info(s"Resizing network thread pool size for ${endPoint.listener} listener from ${processors.length} to $newNumNetworkThreads") + info(s"Resizing network thread pool size for ${endPoint.listenerName} listener from ${processors.length} to $newNumNetworkThreads") if (newNumNetworkThreads > processors.length) { addProcessors(newNumNetworkThreads - processors.length) } else if (newNumNetworkThreads < processors.length) { @@ -472,7 +477,7 @@ class DataPlaneAcceptor(socketServer: SocketServer, * Thread that accepts and configures new connections. There is one of these per endpoint. */ private[kafka] abstract class Acceptor(val socketServer: SocketServer, - val endPoint: Endpoint, + val endPoint: EndPoint, var config: KafkaConfig, nodeId: Int, val connectionQuotas: ConnectionQuotas, @@ -490,6 +495,9 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, val shouldRun = new AtomicBoolean(true) + def metricPrefix(): String + def threadPrefix(): String + private val sendBufferSize = config.socketSendBufferBytes private val recvBufferSize = config.socketReceiveBufferBytes private val listenBacklogSize = config.socketListenBacklogSize @@ -514,8 +522,8 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, // Build the metric name explicitly in order to keep the existing name for compatibility private val backwardCompatibilityMetricGroup = new KafkaMetricsGroup("kafka.network", "Acceptor") private val blockedPercentMeterMetricName = backwardCompatibilityMetricGroup.metricName( - "AcceptorBlockedPercent", - Map(ListenerMetricTag -> endPoint.listener).asJava) + s"${metricPrefix()}AcceptorBlockedPercent", + Map(ListenerMetricTag -> endPoint.listenerName.value).asJava) private val blockedPercentMeter = metricsGroup.newMeter(blockedPercentMeterMetricName,"blocked time", TimeUnit.NANOSECONDS) private var currentProcessorIndex = 0 private[network] val throttledSockets = new mutable.PriorityQueue[DelayedCloseSocket]() @@ -523,7 +531,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, private[network] val startedFuture = new CompletableFuture[Void]() val thread: KafkaThread = KafkaThread.nonDaemon( - s"data-plane-kafka-socket-acceptor-${endPoint.listener}-${endPoint.securityProtocol}-${endPoint.port}", + s"${threadPrefix()}-kafka-socket-acceptor-${endPoint.listenerName}-${endPoint.securityProtocol}-${endPoint.port}", this) def start(): Unit = synchronized { @@ -535,19 +543,19 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, serverChannel = openServerSocket(endPoint.host, endPoint.port, listenBacklogSize) debug(s"Opened endpoint ${endPoint.host}:${endPoint.port}") } - debug(s"Starting processors for listener ${endPoint.listener}") + debug(s"Starting processors for listener ${endPoint.listenerName}") processors.foreach(_.start()) - debug(s"Starting acceptor thread for listener ${endPoint.listener}") + debug(s"Starting acceptor thread for listener ${endPoint.listenerName}") thread.start() startedFuture.complete(null) started.set(true) } catch { case e: ClosedChannelException => - debug(s"Refusing to start acceptor for ${endPoint.listener} since the acceptor has already been shut down.") + debug(s"Refusing to start acceptor for ${endPoint.listenerName} since the acceptor has already been shut down.") startedFuture.completeExceptionally(e) case t: Throwable => - error(s"Unable to start acceptor for ${endPoint.listener}", t) - startedFuture.completeExceptionally(new RuntimeException(s"Unable to start acceptor for ${endPoint.listener}", t)) + error(s"Unable to start acceptor for ${endPoint.listenerName}", t) + startedFuture.completeExceptionally(new RuntimeException(s"Unable to start acceptor for ${endPoint.listenerName}", t)) } } @@ -628,7 +636,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, new InetSocketAddress(host, port) } val serverChannel = socketServer.socketFactory.openServerSocket( - endPoint.listener, + endPoint.listenerName.value(), socketAddress, listenBacklogSize, recvBufferSize) @@ -682,15 +690,14 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, private def accept(key: SelectionKey): Option[SocketChannel] = { val serverSocketChannel = key.channel().asInstanceOf[ServerSocketChannel] val socketChannel = serverSocketChannel.accept() - val listenerName = ListenerName.normalised(endPoint.listener) try { - connectionQuotas.inc(listenerName, socketChannel.socket.getInetAddress, blockedPercentMeter) + connectionQuotas.inc(endPoint.listenerName, socketChannel.socket.getInetAddress, blockedPercentMeter) configureAcceptedSocketChannel(socketChannel) Some(socketChannel) } catch { case e: TooManyConnectionsException => info(s"Rejected connection from ${e.ip}, address already has the configured maximum of ${e.count} connections.") - connectionQuotas.closeChannel(this, listenerName, socketChannel) + connectionQuotas.closeChannel(this, endPoint.listenerName, socketChannel) None case e: ConnectionThrottledException => val ip = socketChannel.socket.getInetAddress @@ -700,7 +707,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, None case e: IOException => error(s"Encountered an error while configuring the connection, closing it.", e) - connectionQuotas.closeChannel(this, listenerName, socketChannel) + connectionQuotas.closeChannel(this, endPoint.listenerName, socketChannel) None } } @@ -742,7 +749,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, def wakeup(): Unit = nioSelector.wakeup() def addProcessors(toCreate: Int): Unit = synchronized { - val listenerName = ListenerName.normalised(endPoint.listener) + val listenerName = endPoint.listenerName val securityProtocol = endPoint.securityProtocol val listenerProcessors = new ArrayBuffer[Processor]() @@ -762,7 +769,7 @@ private[kafka] abstract class Acceptor(val socketServer: SocketServer, listenerName: ListenerName, securityProtocol: SecurityProtocol, connectionDisconnectListeners: Seq[ConnectionDisconnectListener]): Processor = { - val name = s"data-plane-kafka-network-thread-$nodeId-${endPoint.listener}-${endPoint.securityProtocol}-$id" + val name = s"${threadPrefix()}-kafka-network-thread-$nodeId-${endPoint.listenerName}-${endPoint.securityProtocol}-$id" new Processor(id, time, config.socketRequestMaxBytes, @@ -873,7 +880,7 @@ private[kafka] class Processor( credentialProvider.tokenCache, time, logContext, - version => apiVersionManager.apiVersionResponse(0, version < 4) + version => apiVersionManager.apiVersionResponse(throttleTimeMs = 0, version < 4) ) ) @@ -1152,14 +1159,14 @@ private[kafka] class Processor( */ def accept(socketChannel: SocketChannel, mayBlock: Boolean, - acceptorBlockedPercentMeter: com.yammer.metrics.core.Meter): Boolean = { + acceptorIdlePercentMeter: com.yammer.metrics.core.Meter): Boolean = { val accepted = { if (newConnections.offer(socketChannel)) true else if (mayBlock) { val startNs = time.nanoseconds newConnections.put(socketChannel) - acceptorBlockedPercentMeter.mark(time.nanoseconds() - startNs) + acceptorIdlePercentMeter.mark(time.nanoseconds() - startNs) true } else false diff --git a/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala b/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala index 0ee16a1cd78ed..bd80c0aca4d10 100644 --- a/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala +++ b/core/src/main/scala/kafka/raft/KafkaMetadataLog.scala @@ -16,6 +16,7 @@ */ package kafka.raft +import kafka.log.UnifiedLog import kafka.raft.KafkaMetadataLog.FullTruncation import kafka.raft.KafkaMetadataLog.RetentionMsBreach import kafka.raft.KafkaMetadataLog.RetentionSizeBreach @@ -24,14 +25,12 @@ import kafka.raft.KafkaMetadataLog.UnknownReason import kafka.utils.Logging import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.InvalidConfigurationException -import org.apache.kafka.common.errors.CorruptRecordException import org.apache.kafka.common.record.{MemoryRecords, Records} -import org.apache.kafka.common.utils.LogContext import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} -import org.apache.kafka.raft.{Isolation, LogAppendInfo, LogFetchInfo, LogOffsetMetadata, MetadataLogConfig, OffsetMetadata, ReplicatedLog, SegmentPosition, ValidOffsetAndEpoch} -import org.apache.kafka.server.common.OffsetAndEpoch -import org.apache.kafka.server.config.ServerLogConfigs +import org.apache.kafka.raft.{Isolation, KafkaRaftClient, LogAppendInfo, LogFetchInfo, LogOffsetMetadata, OffsetAndEpoch, OffsetMetadata, ReplicatedLog, ValidOffsetAndEpoch} +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.Scheduler import org.apache.kafka.snapshot.FileRawSnapshotReader @@ -42,7 +41,7 @@ import org.apache.kafka.snapshot.RawSnapshotWriter import org.apache.kafka.snapshot.SnapshotPath import org.apache.kafka.snapshot.Snapshots import org.apache.kafka.storage.internals -import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, LogDirFailureChannel, LogStartOffsetIncrementReason, ProducerStateManagerConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, LogDirFailureChannel, LogStartOffsetIncrementReason, ProducerStateManagerConfig} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.io.File @@ -60,11 +59,10 @@ final class KafkaMetadataLog private ( // polling thread when snapshots are created. This object is also used to store any opened snapshot reader. snapshots: mutable.TreeMap[OffsetAndEpoch, Option[FileRawSnapshotReader]], topicPartition: TopicPartition, - config: MetadataLogConfig, - nodeId: Int + config: MetadataLogConfig ) extends ReplicatedLog with Logging { - this.logIdent = s"[MetadataLog partition=$topicPartition, nodeId=$nodeId] " + this.logIdent = s"[MetadataLog partition=$topicPartition, nodeId=${config.nodeId}] " override def read(startOffset: Long, readIsolation: Isolation): LogFetchInfo = { val isolation = readIsolation match { @@ -73,14 +71,17 @@ final class KafkaMetadataLog private ( case _ => throw new IllegalArgumentException(s"Unhandled read isolation $readIsolation") } - val fetchInfo = log.read(startOffset, config.internalMaxFetchSizeInBytes, isolation, true) + val fetchInfo = log.read(startOffset, + maxLength = config.maxFetchSizeInBytes, + isolation = isolation, + minOneMessage = true) new LogFetchInfo( fetchInfo.records, new LogOffsetMetadata( fetchInfo.fetchOffsetMetadata.messageOffset, - Optional.of(new SegmentPosition( + Optional.of(SegmentPosition( fetchInfo.fetchOffsetMetadata.segmentBaseOffset, fetchInfo.fetchOffsetMetadata.relativePositionInSegment)) ) @@ -88,39 +89,34 @@ final class KafkaMetadataLog private ( } override def appendAsLeader(records: Records, epoch: Int): LogAppendInfo = { - if (records.sizeInBytes == 0) { + if (records.sizeInBytes == 0) throw new IllegalArgumentException("Attempt to append an empty record set") - } handleAndConvertLogAppendInfo( log.appendAsLeader(records.asInstanceOf[MemoryRecords], - epoch, - AppendOrigin.RAFT_LEADER + leaderEpoch = epoch, + origin = AppendOrigin.RAFT_LEADER, + requestLocal = RequestLocal.noCaching ) ) } - override def appendAsFollower(records: Records, epoch: Int): LogAppendInfo = { - if (records.sizeInBytes == 0) { + override def appendAsFollower(records: Records): LogAppendInfo = { + if (records.sizeInBytes == 0) throw new IllegalArgumentException("Attempt to append an empty record set") - } - handleAndConvertLogAppendInfo(log.appendAsFollower(records.asInstanceOf[MemoryRecords], epoch)) + handleAndConvertLogAppendInfo(log.appendAsFollower(records.asInstanceOf[MemoryRecords])) } private def handleAndConvertLogAppendInfo(appendInfo: internals.log.LogAppendInfo): LogAppendInfo = { - if (appendInfo.firstOffset == UnifiedLog.UNKNOWN_OFFSET) { - throw new CorruptRecordException(s"Append failed unexpectedly $appendInfo") - } else { + if (appendInfo.firstOffset != UnifiedLog.UnknownOffset) new LogAppendInfo(appendInfo.firstOffset, appendInfo.lastOffset) - } + else + throw new KafkaException(s"Append failed unexpectedly") } override def lastFetchedEpoch: Int = { - val latestEpoch = log.latestEpoch - if (latestEpoch.isPresent) - latestEpoch.get() - else { + log.latestEpoch.getOrElse { latestSnapshotId().map[Int] { snapshotId => val logEndOffset = endOffset().offset if (snapshotId.offset == startOffset && snapshotId.offset == logEndOffset) { @@ -138,17 +134,17 @@ final class KafkaMetadataLog private ( } override def endOffsetForEpoch(epoch: Int): OffsetAndEpoch = { - (log.endOffsetForEpoch(epoch).toScala, earliestSnapshotId().toScala) match { + (log.endOffsetForEpoch(epoch), earliestSnapshotId().toScala) match { case (Some(offsetAndEpoch), Some(snapshotId)) if ( offsetAndEpoch.offset == snapshotId.offset && - offsetAndEpoch.epoch() == epoch) => + offsetAndEpoch.leaderEpoch == epoch) => // The epoch is smaller than the smallest epoch on the log. Override the diverging // epoch to the oldest snapshot which should be the snapshot at the log start offset new OffsetAndEpoch(snapshotId.offset, snapshotId.epoch) case (Some(offsetAndEpoch), _) => - new OffsetAndEpoch(offsetAndEpoch.offset, offsetAndEpoch.epoch()) + new OffsetAndEpoch(offsetAndEpoch.offset, offsetAndEpoch.leaderEpoch) case (None, _) => new OffsetAndEpoch(endOffset.offset, lastFetchedEpoch) @@ -159,7 +155,7 @@ final class KafkaMetadataLog private ( val endOffsetMetadata = log.logEndOffsetMetadata new LogOffsetMetadata( endOffsetMetadata.messageOffset, - Optional.of(new SegmentPosition( + Optional.of(SegmentPosition( endOffsetMetadata.segmentBaseOffset, endOffsetMetadata.relativePositionInSegment) ) @@ -179,14 +175,14 @@ final class KafkaMetadataLog private ( } override def truncateToLatestSnapshot(): Boolean = { - val latestEpoch = log.latestEpoch.orElse(0) + val latestEpoch = log.latestEpoch.getOrElse(0) val (truncated, forgottenSnapshots) = latestSnapshotId().toScala match { case Some(snapshotId) if ( snapshotId.epoch > latestEpoch || (snapshotId.epoch == latestEpoch && snapshotId.offset > endOffset().offset) ) => // Truncate the log fully if the latest snapshot is greater than the log end offset - log.truncateFullyAndStartAt(snapshotId.offset, Optional.empty) + log.truncateFullyAndStartAt(snapshotId.offset) // Forget snapshots less than the log start offset snapshots synchronized { @@ -230,7 +226,7 @@ final class KafkaMetadataLog private ( override def highWatermark: LogOffsetMetadata = { val hwm = log.fetchOffsetSnapshot.highWatermark val segmentPosition: Optional[OffsetMetadata] = if (!hwm.messageOffsetOnly) { - Optional.of(new SegmentPosition(hwm.segmentBaseOffset, hwm.relativePositionInSegment)) + Optional.of(SegmentPosition(hwm.segmentBaseOffset, hwm.relativePositionInSegment)) } else { Optional.empty() } @@ -419,7 +415,7 @@ final class KafkaMetadataLog private ( */ private def readSnapshotTimestamp(snapshotId: OffsetAndEpoch): Option[Long] = { readSnapshot(snapshotId).toScala.map { reader => - Snapshots.lastContainedLogTimestamp(reader, new LogContext(logIdent)) + Snapshots.lastContainedLogTimestamp(reader) } } @@ -510,11 +506,10 @@ final class KafkaMetadataLog private ( // Keep deleting snapshots and segments as long as we exceed the retention size def shouldClean(snapshotId: OffsetAndEpoch): Option[SnapshotDeletionReason] = { snapshotSizes.get(snapshotId).flatMap { snapshotSize => - val logSize = log.size - if (logSize + snapshotTotalSize > config.retentionMaxBytes) { + if (log.size + snapshotTotalSize > config.retentionMaxBytes) { val oldSnapshotTotalSize = snapshotTotalSize snapshotTotalSize -= snapshotSize - Some(RetentionSizeBreach(logSize, oldSnapshotTotalSize, config.retentionMaxBytes)) + Some(RetentionSizeBreach(log.size, oldSnapshotTotalSize, config.retentionMaxBytes)) } else { None } @@ -556,8 +551,8 @@ final class KafkaMetadataLog private ( if (expiredSnapshots.nonEmpty) { scheduler.scheduleOnce( "delete-snapshot-files", - () => KafkaMetadataLog.deleteSnapshotFiles(log.dir.toPath, expiredSnapshots), - config.internalDeleteDelayMillis + () => KafkaMetadataLog.deleteSnapshotFiles(log.dir.toPath, expiredSnapshots, this), + config.fileDeleteDelayMs ) } } @@ -584,15 +579,11 @@ object KafkaMetadataLog extends Logging { dataDir: File, time: Time, scheduler: Scheduler, - config: MetadataLogConfig, - nodeId: Int + config: MetadataLogConfig ): KafkaMetadataLog = { val props = new Properties() - props.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, config.internalMaxBatchSizeInBytes.toString) - if (config.internalSegmentBytes() != null) - props.setProperty(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, config.internalSegmentBytes().toString) - else - props.setProperty(TopicConfig.SEGMENT_BYTES_CONFIG, config.logSegmentBytes.toString) + props.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, config.maxBatchSizeInBytes.toString) + props.setProperty(TopicConfig.SEGMENT_BYTES_CONFIG, config.logSegmentBytes.toString) props.setProperty(TopicConfig.SEGMENT_MS_CONFIG, config.logSegmentMillis.toString) props.setProperty(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT.toString) @@ -602,7 +593,11 @@ object KafkaMetadataLog extends Logging { LogConfig.validate(props) val defaultLogConfig = new LogConfig(props) - if (defaultLogConfig.retentionMs >= 0) { + if (config.logSegmentBytes < config.logSegmentMinBytes) { + throw new InvalidConfigurationException( + s"Cannot set ${KRaftConfigs.METADATA_LOG_SEGMENT_BYTES_CONFIG} below ${config.logSegmentMinBytes}: ${config.logSegmentBytes}" + ) + } else if (defaultLogConfig.retentionMs >= 0) { throw new InvalidConfigurationException( s"Cannot set ${TopicConfig.RETENTION_MS_CONFIG} above -1: ${defaultLogConfig.retentionMs}." ) @@ -612,20 +607,21 @@ object KafkaMetadataLog extends Logging { ) } - val log = UnifiedLog.create( - dataDir, - defaultLogConfig, - 0L, - 0L, - scheduler, - new BrokerTopicStats, - time, - Integer.MAX_VALUE, - new ProducerStateManagerConfig(Integer.MAX_VALUE, false), - Integer.MAX_VALUE, - new LogDirFailureChannel(5), - false, - Optional.of(topicId) + val log = UnifiedLog( + dir = dataDir, + config = defaultLogConfig, + logStartOffset = 0L, + recoveryPoint = 0L, + scheduler = scheduler, + brokerTopicStats = new BrokerTopicStats, + time = time, + maxTransactionTimeoutMs = Int.MaxValue, + producerStateManagerConfig = new ProducerStateManagerConfig(Int.MaxValue, false), + producerIdExpirationCheckIntervalMs = Int.MaxValue, + logDirFailureChannel = new LogDirFailureChannel(5), + lastShutdownClean = false, + topicId = Some(topicId), + keepPartitionMetadataFile = true ) val metadataLog = new KafkaMetadataLog( @@ -634,12 +630,12 @@ object KafkaMetadataLog extends Logging { scheduler, recoverSnapshots(log), topicPartition, - config, - nodeId + config ) - if (defaultLogConfig.segmentSize() < config.logSegmentBytes()) { - metadataLog.error(s"Overriding ${MetadataLogConfig.INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG} is only supported for testing. Setting " + + // Print a warning if users have overridden the internal config + if (config.logSegmentMinBytes != KafkaRaftClient.MAX_BATCH_SIZE_BYTES) { + metadataLog.error(s"Overriding ${KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG} is only supported for testing. Setting " + s"this value too low may lead to an inability to write batches of metadata records.") } @@ -701,10 +697,12 @@ object KafkaMetadataLog extends Logging { private def deleteSnapshotFiles( logDir: Path, - expiredSnapshots: mutable.TreeMap[OffsetAndEpoch, Option[FileRawSnapshotReader]]): Unit = { + expiredSnapshots: mutable.TreeMap[OffsetAndEpoch, Option[FileRawSnapshotReader]], + logging: Logging + ): Unit = { expiredSnapshots.foreach { case (snapshotId, snapshotReader) => snapshotReader.foreach { reader => - Utils.closeQuietly(reader, "FileRawSnapshotReader") + Utils.closeQuietly(reader, "reader") } Snapshots.deleteIfExists(logDir, snapshotId) } diff --git a/core/src/main/scala/kafka/raft/RaftManager.scala b/core/src/main/scala/kafka/raft/RaftManager.scala new file mode 100644 index 0000000000000..baaccab9d19b2 --- /dev/null +++ b/core/src/main/scala/kafka/raft/RaftManager.scala @@ -0,0 +1,301 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.raft + +import java.io.File +import java.net.InetSocketAddress +import java.nio.file.Files +import java.nio.file.Paths +import java.util.OptionalInt +import java.util.concurrent.CompletableFuture +import java.util.{Map => JMap} +import java.util.{Collection => JCollection} +import kafka.log.LogManager +import kafka.log.UnifiedLog +import kafka.server.KafkaConfig +import kafka.utils.CoreUtils +import kafka.utils.Logging +import org.apache.kafka.clients.{ApiVersions, ManualMetadataUpdater, MetadataRecoveryStrategy, NetworkClient} +import org.apache.kafka.common.KafkaException +import org.apache.kafka.common.Node +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.Uuid +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.network.{ChannelBuilders, ListenerName, NetworkReceive, Selectable, Selector} +import org.apache.kafka.common.protocol.ApiMessage +import org.apache.kafka.common.requests.RequestContext +import org.apache.kafka.common.requests.RequestHeader +import org.apache.kafka.common.security.JaasContext +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.common.utils.{LogContext, Time, Utils} +import org.apache.kafka.raft.{ExternalKRaftMetrics, Endpoints, FileQuorumStateStore, KafkaNetworkChannel, KafkaRaftClient, KafkaRaftClientDriver, LeaderAndEpoch, QuorumConfig, RaftClient, ReplicatedLog} +import org.apache.kafka.server.ProcessRole +import org.apache.kafka.server.common.Feature +import org.apache.kafka.server.common.serialization.RecordSerde +import org.apache.kafka.server.util.{FileLock, KafkaScheduler} +import org.apache.kafka.server.fault.FaultHandler +import org.apache.kafka.server.util.timer.SystemTimer + +import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters._ + +object KafkaRaftManager { + private def createLogDirectory(logDir: File, logDirName: String): File = { + val logDirPath = logDir.getAbsolutePath + val dir = new File(logDirPath, logDirName) + Files.createDirectories(dir.toPath) + dir + } + + private def lockDataDir(dataDir: File): FileLock = { + val lock = new FileLock(new File(dataDir, LogManager.LockFileName)) + + if (!lock.tryLock()) { + throw new KafkaException( + s"Failed to acquire lock on file .lock in ${lock.file.getParent}. A Kafka instance in another process or " + + "thread is using this directory." + ) + } + + lock + } + + /** + * Test if the configured metadata log dir is one of the data log dirs. + */ + private def hasDifferentLogDir(config: KafkaConfig): Boolean = { + !config + .logDirs + .map(Paths.get(_).toAbsolutePath) + .contains(Paths.get(config.metadataLogDir).toAbsolutePath) + } +} + +trait RaftManager[T] { + def handleRequest( + context: RequestContext, + header: RequestHeader, + request: ApiMessage, + createdTimeMs: Long + ): CompletableFuture[ApiMessage] + + def register( + listener: RaftClient.Listener[T] + ): Unit + + def leaderAndEpoch: LeaderAndEpoch + + def client: RaftClient[T] + + def replicatedLog: ReplicatedLog + + def voterNode(id: Int, listener: ListenerName): Option[Node] +} + +class KafkaRaftManager[T]( + clusterId: String, + config: KafkaConfig, + metadataLogDirUuid: Uuid, + recordSerde: RecordSerde[T], + topicPartition: TopicPartition, + topicId: Uuid, + time: Time, + metrics: Metrics, + externalKRaftMetrics: ExternalKRaftMetrics, + threadNamePrefixOpt: Option[String], + val controllerQuorumVotersFuture: CompletableFuture[JMap[Integer, InetSocketAddress]], + bootstrapServers: JCollection[InetSocketAddress], + localListeners: Endpoints, + fatalFaultHandler: FaultHandler +) extends RaftManager[T] with Logging { + + val apiVersions = new ApiVersions() + private val raftConfig = new QuorumConfig(config) + private val threadNamePrefix = threadNamePrefixOpt.getOrElse("kafka-raft") + private val logContext = new LogContext(s"[RaftManager id=${config.nodeId}] ") + this.logIdent = logContext.logPrefix() + + private val scheduler = new KafkaScheduler(1, true, threadNamePrefix + "-scheduler") + scheduler.startup() + + private val dataDir = createDataDir() + + private val dataDirLock = { + // Acquire the log dir lock if the metadata log dir is different from the log dirs + val differentMetadataLogDir = KafkaRaftManager.hasDifferentLogDir(config) + + // Or this node is only a controller + val isOnlyController = config.processRoles == Set(ProcessRole.ControllerRole) + + if (differentMetadataLogDir || isOnlyController) { + Some(KafkaRaftManager.lockDataDir(new File(config.metadataLogDir))) + } else { + None + } + } + + override val replicatedLog: ReplicatedLog = buildMetadataLog() + private val netChannel = buildNetworkChannel() + private val expirationTimer = new SystemTimer("raft-expiration-executor") + private val expirationService = new TimingWheelExpirationService(expirationTimer) + override val client: KafkaRaftClient[T] = buildRaftClient() + private val clientDriver = new KafkaRaftClientDriver[T](client, threadNamePrefix, fatalFaultHandler, logContext) + + def startup(): Unit = { + client.initialize( + controllerQuorumVotersFuture.get(), + new FileQuorumStateStore(new File(dataDir, FileQuorumStateStore.DEFAULT_FILE_NAME)), + metrics, + externalKRaftMetrics + ) + netChannel.start() + clientDriver.start() + } + + def shutdown(): Unit = { + CoreUtils.swallow(expirationService.shutdown(), this) + Utils.closeQuietly(expirationTimer, "expiration timer") + CoreUtils.swallow(clientDriver.shutdown(), this) + CoreUtils.swallow(scheduler.shutdown(), this) + Utils.closeQuietly(netChannel, "net channel") + Utils.closeQuietly(replicatedLog, "replicated log") + CoreUtils.swallow(dataDirLock.foreach(_.destroy()), this) + } + + override def register( + listener: RaftClient.Listener[T] + ): Unit = { + client.register(listener) + } + + override def handleRequest( + context: RequestContext, + header: RequestHeader, + request: ApiMessage, + createdTimeMs: Long + ): CompletableFuture[ApiMessage] = { + clientDriver.handleRequest(context, header, request, createdTimeMs) + } + + private def buildRaftClient(): KafkaRaftClient[T] = { + new KafkaRaftClient( + OptionalInt.of(config.nodeId), + metadataLogDirUuid, + recordSerde, + netChannel, + replicatedLog, + time, + expirationService, + logContext, + // Controllers should always flush the log on replication because they may become voters + config.processRoles.contains(ProcessRole.ControllerRole), + clusterId, + bootstrapServers, + localListeners, + Feature.KRAFT_VERSION.supportedVersionRange(), + raftConfig + ) + } + + private def buildNetworkChannel(): KafkaNetworkChannel = { + val (listenerName, netClient) = buildNetworkClient() + new KafkaNetworkChannel(time, listenerName, netClient, config.quorumConfig.requestTimeoutMs, threadNamePrefix) + } + + private def createDataDir(): File = { + val logDirName = UnifiedLog.logDirName(topicPartition) + KafkaRaftManager.createLogDirectory(new File(config.metadataLogDir), logDirName) + } + + private def buildMetadataLog(): KafkaMetadataLog = { + KafkaMetadataLog( + topicPartition, + topicId, + dataDir, + time, + scheduler, + config = MetadataLogConfig(config, KafkaRaftClient.MAX_BATCH_SIZE_BYTES, KafkaRaftClient.MAX_FETCH_SIZE_BYTES) + ) + } + + private def buildNetworkClient(): (ListenerName, NetworkClient) = { + val controllerListenerName = new ListenerName(config.controllerListenerNames.head) + val controllerSecurityProtocol = config.effectiveListenerSecurityProtocolMap.getOrElse( + controllerListenerName, + SecurityProtocol.forName(controllerListenerName.value()) + ) + val channelBuilder = ChannelBuilders.clientChannelBuilder( + controllerSecurityProtocol, + JaasContext.Type.SERVER, + config, + controllerListenerName, + config.saslMechanismControllerProtocol, + time, + logContext + ) + + val metricGroupPrefix = "raft-channel" + val collectPerConnectionMetrics = false + + val selector = new Selector( + NetworkReceive.UNLIMITED, + config.connectionsMaxIdleMs, + metrics, + time, + metricGroupPrefix, + Map.empty[String, String].asJava, + collectPerConnectionMetrics, + channelBuilder, + logContext + ) + + val clientId = s"raft-client-${config.nodeId}" + val maxInflightRequestsPerConnection = 1 + val reconnectBackoffMs = 50 + val reconnectBackoffMsMs = 500 + val discoverBrokerVersions = true + + val networkClient = new NetworkClient( + selector, + new ManualMetadataUpdater(), + clientId, + maxInflightRequestsPerConnection, + reconnectBackoffMs, + reconnectBackoffMsMs, + Selectable.USE_DEFAULT_BUFFER_SIZE, + config.socketReceiveBufferBytes, + config.quorumConfig.requestTimeoutMs, + config.connectionSetupTimeoutMs, + config.connectionSetupTimeoutMaxMs, + time, + discoverBrokerVersions, + apiVersions, + logContext, + MetadataRecoveryStrategy.NONE + ) + + (controllerListenerName, networkClient) + } + + override def leaderAndEpoch: LeaderAndEpoch = { + client.leaderAndEpoch + } + + override def voterNode(id: Int, listener: ListenerName): Option[Node] = { + client.voterNode(id, listener).toScala + } +} diff --git a/core/src/main/scala/kafka/raft/SegmentPosition.scala b/core/src/main/scala/kafka/raft/SegmentPosition.scala new file mode 100644 index 0000000000000..eb6a59f35d3bc --- /dev/null +++ b/core/src/main/scala/kafka/raft/SegmentPosition.scala @@ -0,0 +1,23 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.raft + +import org.apache.kafka.raft.OffsetMetadata + +case class SegmentPosition(baseOffset: Long, relativePosition: Int) extends OffsetMetadata { + override def toString: String = s"(segmentBaseOffset=$baseOffset,relativePositionInSegment=$relativePosition)" +} diff --git a/core/src/main/scala/kafka/raft/TimingWheelExpirationService.scala b/core/src/main/scala/kafka/raft/TimingWheelExpirationService.scala new file mode 100644 index 0000000000000..3c330fb6f2ec2 --- /dev/null +++ b/core/src/main/scala/kafka/raft/TimingWheelExpirationService.scala @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.raft + +import java.util.concurrent.CompletableFuture +import org.apache.kafka.common.errors.TimeoutException +import org.apache.kafka.raft.ExpirationService +import org.apache.kafka.server.util.ShutdownableThread +import org.apache.kafka.server.util.timer.{Timer, TimerTask} + +object TimingWheelExpirationService { + private val WorkTimeoutMs: Long = 200L + + private class TimerTaskCompletableFuture[T](delayMs: Long) extends TimerTask(delayMs) { + val future = new CompletableFuture[T] + override def run(): Unit = { + future.completeExceptionally(new TimeoutException( + s"Future failed to be completed before timeout of $delayMs ms was reached")) + } + } +} + +class TimingWheelExpirationService(timer: Timer) extends ExpirationService { + import TimingWheelExpirationService._ + + private val expirationReaper = new ExpiredOperationReaper() + + expirationReaper.start() + + override def failAfter[T](timeoutMs: Long): CompletableFuture[T] = { + val task = new TimerTaskCompletableFuture[T](timeoutMs) + task.future.whenComplete { (_, _) => + task.cancel() + } + timer.add(task) + task.future + } + + private class ExpiredOperationReaper extends ShutdownableThread("raft-expiration-reaper", false) { + + override def doWork(): Unit = { + timer.advanceClock(WorkTimeoutMs) + } + } + + def shutdown(): Unit = { + expirationReaper.shutdown() + } +} diff --git a/core/src/main/scala/kafka/server/AbstractFetcherManager.scala b/core/src/main/scala/kafka/server/AbstractFetcherManager.scala index 42580250b5b2f..764693ca3ace8 100755 --- a/core/src/main/scala/kafka/server/AbstractFetcherManager.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherManager.scala @@ -22,11 +22,9 @@ import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.common.utils.Utils import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.PartitionFetchState import scala.collection.{Map, Set, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters._ abstract class AbstractFetcherManager[T <: AbstractFetcherThread](val name: String, clientId: String, numFetchers: Int) extends Logging { @@ -45,7 +43,8 @@ abstract class AbstractFetcherManager[T <: AbstractFetcherThread](val name: Stri metricsGroup.newGauge("MaxLag", () => { // current max lag across all fetchers/topics/partitions fetcherThreadMap.values.foldLeft(0L) { (curMaxLagAll, fetcherThread) => - val maxLagThread = fetcherThread.fetcherLagStats.stats.values.stream().mapToLong(v => v.lag).max().orElse(0L) + val maxLagThread = fetcherThread.fetcherLagStats.stats.values.foldLeft(0L)((curMaxLagThread, lagMetrics) => + math.max(curMaxLagThread, lagMetrics.lag)) math.max(curMaxLagAll, maxLagThread) } }, tags) @@ -71,7 +70,7 @@ abstract class AbstractFetcherManager[T <: AbstractFetcherThread](val name: Stri if (id.fetcherId >= newSize) thread.shutdown() partitionStates.foreachEntry { (topicPartition, currentFetchState) => - val initialFetchState = InitialFetchState(currentFetchState.topicId.toScala, thread.leader.brokerEndPoint(), + val initialFetchState = InitialFetchState(currentFetchState.topicId, thread.leader.brokerEndPoint(), currentLeaderEpoch = currentFetchState.currentLeaderEpoch, initOffset = currentFetchState.fetchOffset) allRemovedPartitionsMap += topicPartition -> initialFetchState diff --git a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala index 8dd621d19509f..be663d19ec808 100755 --- a/core/src/main/scala/kafka/server/AbstractFetcherThread.scala +++ b/core/src/main/scala/kafka/server/AbstractFetcherThread.scala @@ -18,8 +18,9 @@ package kafka.server import com.yammer.metrics.core.Meter +import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.utils.CoreUtils.inLock -import kafka.utils.Logging +import kafka.utils.{Logging, Pool} import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.PartitionStates import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset @@ -29,14 +30,9 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{FileRecords, MemoryRecords, Records} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.common.requests._ - +import org.apache.kafka.common.utils.Time import org.apache.kafka.common.{ClientIdAndBroker, InvalidRecordException, TopicPartition, Uuid} import org.apache.kafka.server.common.OffsetAndEpoch -import org.apache.kafka.server.LeaderEndPoint -import org.apache.kafka.server.ResultWithPartitions -import org.apache.kafka.server.ReplicaState -import org.apache.kafka.server.PartitionFetchState -import org.apache.kafka.server.log.remote.storage.RetriableRemoteStorageException import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.util.ShutdownableThread import org.apache.kafka.storage.internals.log.LogAppendInfo @@ -45,12 +41,12 @@ import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.nio.ByteBuffer import java.util import java.util.Optional -import java.util.concurrent.{ConcurrentHashMap, TimeUnit} +import java.util.concurrent.TimeUnit import java.util.concurrent.atomic.AtomicLong import java.util.concurrent.locks.ReentrantLock import scala.collection.{Map, Set, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.{RichOption, RichOptional} +import scala.jdk.OptionConverters.{RichOption, RichOptionalInt} import scala.math._ /** @@ -82,24 +78,21 @@ abstract class AbstractFetcherThread(name: String, /* callbacks to be defined in subclass */ // process fetched data - protected def processPartitionData( - topicPartition: TopicPartition, - fetchOffset: Long, - partitionLeaderEpoch: Int, - partitionData: FetchData - ): Option[LogAppendInfo] + protected def processPartitionData(topicPartition: TopicPartition, + fetchOffset: Long, + partitionData: FetchData): Option[LogAppendInfo] protected def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit protected def truncateFullyAndStartAt(topicPartition: TopicPartition, offset: Long): Unit - protected def latestEpoch(topicPartition: TopicPartition): Optional[Integer] + protected def latestEpoch(topicPartition: TopicPartition): Option[Int] protected def logStartOffset(topicPartition: TopicPartition): Long protected def logEndOffset(topicPartition: TopicPartition): Long - protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Optional[OffsetAndEpoch] + protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] override def shutdown(): Unit = { initiateShutdown() @@ -120,11 +113,9 @@ abstract class AbstractFetcherThread(name: String, private def maybeFetch(): Unit = { val fetchRequestOpt = inLock(partitionMapLock) { - val result = leader.buildFetch(partitionStates.partitionStateMap) - val fetchRequestOpt = result.result - val partitionsWithError = result.partitionsWithError + val ResultWithPartitions(fetchRequestOpt, partitionsWithError) = leader.buildFetch(partitionStates.partitionStateMap.asScala) - handlePartitionsWithErrors(partitionsWithError.asScala, "maybeFetch") + handlePartitionsWithErrors(partitionsWithError, "maybeFetch") if (fetchRequestOpt.isEmpty) { trace(s"There are no active partitions. Back off for $fetchBackOffMs ms before sending a fetch request") @@ -134,9 +125,9 @@ abstract class AbstractFetcherThread(name: String, fetchRequestOpt } - fetchRequestOpt.ifPresent(replicaFetch => - processFetchRequest(replicaFetch.partitionData, replicaFetch.fetchRequest) - ) + fetchRequestOpt.foreach { case ReplicaFetch(sessionPartitions, fetchRequest) => + processFetchRequest(sessionPartitions, fetchRequest) + } } // deal with partitions with errors, potentially due to leadership changes @@ -157,7 +148,7 @@ abstract class AbstractFetcherThread(name: String, partitionStates.partitionStateMap.forEach { (tp, state) => if (state.isTruncating) { - latestEpoch(tp).toScala match { + latestEpoch(tp) match { case Some(epoch) => partitionsWithEpochs += tp -> new EpochData() .setPartition(tp.partition) @@ -210,13 +201,11 @@ abstract class AbstractFetcherThread(name: String, * occur during truncation. */ private def truncateToEpochEndOffsets(latestEpochsForPartitions: Map[TopicPartition, EpochData]): Unit = { - val endOffsets = leader.fetchEpochEndOffsets(latestEpochsForPartitions.asJava) - // Ensure we hold a lock during truncation - + val endOffsets = leader.fetchEpochEndOffsets(latestEpochsForPartitions) + //Ensure we hold a lock during truncation. inLock(partitionMapLock) { //Check no leadership and no leader epoch changes happened whilst we were unlocked, fetching epochs - - val epochEndOffsets = endOffsets.asScala.filter { case (tp, _) => + val epochEndOffsets = endOffsets.filter { case (tp, _) => val curPartitionState = partitionStates.stateValue(tp) val partitionEpochRequest = latestEpochsForPartitions.getOrElse(tp, { throw new IllegalStateException( @@ -226,18 +215,18 @@ abstract class AbstractFetcherThread(name: String, curPartitionState != null && leaderEpochInRequest == curPartitionState.currentLeaderEpoch } - val result = maybeTruncateToEpochEndOffsets(epochEndOffsets, latestEpochsForPartitions) - handlePartitionsWithErrors(result.partitionsWithError.asScala, "truncateToEpochEndOffsets") - updateFetchOffsetAndMaybeMarkTruncationComplete(result.result) + val ResultWithPartitions(fetchOffsets, partitionsWithError) = maybeTruncateToEpochEndOffsets(epochEndOffsets, latestEpochsForPartitions) + handlePartitionsWithErrors(partitionsWithError, "truncateToEpochEndOffsets") + updateFetchOffsetAndMaybeMarkTruncationComplete(fetchOffsets) } } // Visibility for unit tests protected[server] def truncateOnFetchResponse(epochEndOffsets: Map[TopicPartition, EpochEndOffset]): Unit = { inLock(partitionMapLock) { - val result = maybeTruncateToEpochEndOffsets(epochEndOffsets, Map.empty) - handlePartitionsWithErrors(result.partitionsWithError.asScala, "truncateOnFetchResponse") - updateFetchOffsetAndMaybeMarkTruncationComplete(result.result) + val ResultWithPartitions(fetchOffsets, partitionsWithError) = maybeTruncateToEpochEndOffsets(epochEndOffsets, Map.empty) + handlePartitionsWithErrors(partitionsWithError, "truncateOnFetchResponse") + updateFetchOffsetAndMaybeMarkTruncationComplete(fetchOffsets) } } @@ -292,7 +281,7 @@ abstract class AbstractFetcherThread(name: String, } } - new ResultWithPartitions(fetchOffsets, partitionsWithError.asJava) + ResultWithPartitions(fetchOffsets, partitionsWithError) } /** @@ -315,8 +304,7 @@ abstract class AbstractFetcherThread(name: String, } } - // visible for testing - private[server] def processFetchRequest(sessionPartitions: util.Map[TopicPartition, FetchRequest.PartitionData], + private def processFetchRequest(sessionPartitions: util.Map[TopicPartition, FetchRequest.PartitionData], fetchRequest: FetchRequest.Builder): Unit = { val partitionsWithError = mutable.Set[TopicPartition]() val divergingEndOffsets = mutable.Map.empty[TopicPartition, EpochEndOffset] @@ -324,7 +312,7 @@ abstract class AbstractFetcherThread(name: String, try { trace(s"Sending fetch request $fetchRequest") - responseData = leader.fetch(fetchRequest).asScala + responseData = leader.fetch(fetchRequest) } catch { case t: Throwable => if (isRunning) { @@ -342,15 +330,10 @@ abstract class AbstractFetcherThread(name: String, responseData.foreachEntry { (topicPartition, partitionData) => Option(partitionStates.stateValue(topicPartition)).foreach { currentFetchState => // It's possible that a partition is removed and re-added or truncated when there is a pending fetch request. - // In this case, we only want to process the fetch response if: - // - the partition state is ready for fetch - // - the current offset is the same as the offset requested - // - the current leader epoch is the same as the leader epoch requested + // In this case, we only want to process the fetch response if the partition state is ready for fetch and + // the current offset is the same as the offset requested. val fetchPartitionData = sessionPartitions.get(topicPartition) - if (fetchPartitionData != null && - fetchPartitionData.fetchOffset == currentFetchState.fetchOffset && - fetchPartitionData.currentLeaderEpoch.map[Boolean](_ == currentFetchState.currentLeaderEpoch).orElse(true) && - currentFetchState.isReadyForFetch) { + if (fetchPartitionData != null && fetchPartitionData.fetchOffset == currentFetchState.fetchOffset && currentFetchState.isReadyForFetch) { Errors.forCode(partitionData.errorCode) match { case Errors.NONE => try { @@ -365,16 +348,10 @@ abstract class AbstractFetcherThread(name: String, .setLeaderEpoch(partitionData.divergingEpoch.epoch) .setEndOffset(partitionData.divergingEpoch.endOffset) } else { - /* Once we hand off the partition data to the subclass, we can't mess with it any more in this thread - * - * When appending batches to the log only append record batches up to the leader epoch when the FETCH - * request was handled. This is done to make sure that logs are not inconsistent because of log - * truncation and append after the FETCH request was handled. See KAFKA-18723 for more details. - */ + // Once we hand off the partition data to the subclass, we can't mess with it any more in this thread val logAppendInfoOpt = processPartitionData( topicPartition, currentFetchState.fetchOffset, - currentFetchState.currentLeaderEpoch, partitionData ) @@ -387,10 +364,10 @@ abstract class AbstractFetcherThread(name: String, // ReplicaDirAlterThread may have removed topicPartition from the partitionStates after processing the partition data if ((validBytes > 0 || currentFetchState.lag.isEmpty) && partitionStates.contains(topicPartition)) { val lastFetchedEpoch = - if (logAppendInfo.lastLeaderEpoch.isPresent) logAppendInfo.lastLeaderEpoch else currentFetchState.lastFetchedEpoch + if (logAppendInfo.lastLeaderEpoch.isPresent) logAppendInfo.lastLeaderEpoch.toScala else currentFetchState.lastFetchedEpoch // Update partitionStates only if there is no exception during processPartitionData - val newFetchState = new PartitionFetchState(currentFetchState.topicId, nextOffset, Optional.of(lag), - currentFetchState.currentLeaderEpoch, ReplicaState.FETCHING, lastFetchedEpoch) + val newFetchState = PartitionFetchState(currentFetchState.topicId, nextOffset, Some(lag), + currentFetchState.currentLeaderEpoch, state = Fetching, lastFetchedEpoch) partitionStates.updateAndMoveToEnd(topicPartition, newFetchState) if (validBytes > 0) fetcherStats.byteRate.mark(validBytes) } @@ -483,9 +460,9 @@ abstract class AbstractFetcherThread(name: String, partitionMapLock.lockInterruptibly() try { Option(partitionStates.stateValue(topicPartition)).foreach { state => - val newState = new PartitionFetchState(state.topicId, math.min(truncationOffset, state.fetchOffset), - state.lag, state.currentLeaderEpoch, state.delay, ReplicaState.TRUNCATING, - Optional.empty()) + val newState = PartitionFetchState(state.topicId, math.min(truncationOffset, state.fetchOffset), + state.lag, state.currentLeaderEpoch, state.delay, state = Truncating, + lastFetchedEpoch = None) partitionStates.updateAndMoveToEnd(topicPartition, newState) partitionMapCond.signalAll() } @@ -515,12 +492,12 @@ abstract class AbstractFetcherThread(name: String, // With old message format, `latestEpoch` will be empty and we use Truncating state // to truncate to high watermark. val lastFetchedEpoch = latestEpoch(tp) - val state = if (lastFetchedEpoch.isPresent) ReplicaState.FETCHING else ReplicaState.TRUNCATING - new PartitionFetchState(initialFetchState.topicId.toJava, initialFetchState.initOffset, Optional.empty(), initialFetchState.currentLeaderEpoch, + val state = if (lastFetchedEpoch.nonEmpty) Fetching else Truncating + PartitionFetchState(initialFetchState.topicId, initialFetchState.initOffset, None, initialFetchState.currentLeaderEpoch, state, lastFetchedEpoch) } else { - new PartitionFetchState(initialFetchState.topicId.toJava, initialFetchState.initOffset, Optional.empty(), initialFetchState.currentLeaderEpoch, - ReplicaState.TRUNCATING, Optional.empty()) + PartitionFetchState(initialFetchState.topicId, initialFetchState.initOffset, None, initialFetchState.currentLeaderEpoch, + state = Truncating, lastFetchedEpoch = None) } } @@ -546,7 +523,7 @@ abstract class AbstractFetcherThread(name: String, partitions.foreach { tp => val currentState = partitionStates.stateValue(tp) if (currentState != null) { - val updatedState = currentState.updateTopicId(topicIds(tp.topic).toJava) + val updatedState = currentState.updateTopicId(topicIds(tp.topic)) partitionStates.update(tp, updatedState) } } @@ -567,10 +544,10 @@ abstract class AbstractFetcherThread(name: String, case Some(offsetTruncationState) => val lastFetchedEpoch = latestEpoch(topicPartition) val state = if (leader.isTruncationOnFetchSupported || offsetTruncationState.truncationCompleted) - ReplicaState.FETCHING + Fetching else - ReplicaState.TRUNCATING - new PartitionFetchState(currentFetchState.topicId, offsetTruncationState.offset, currentFetchState.lag, + Truncating + PartitionFetchState(currentFetchState.topicId, offsetTruncationState.offset, currentFetchState.lag, currentFetchState.currentLeaderEpoch, currentFetchState.delay, state, lastFetchedEpoch) case None => currentFetchState } @@ -623,34 +600,32 @@ abstract class AbstractFetcherThread(name: String, // get (leader epoch, end offset) pair that corresponds to the largest leader epoch // less than or equal to the requested epoch. - val endOffsetForEpochOpt = endOffsetForEpoch(tp, leaderEpochOffset.leaderEpoch) - if (endOffsetForEpochOpt.isPresent) { - val offsetAndEpoch = endOffsetForEpochOpt.get - val followerEndOffset = offsetAndEpoch.offset - val followerEpoch = offsetAndEpoch.epoch() - if (followerEpoch != leaderEpochOffset.leaderEpoch) { - // the follower does not know about the epoch that leader replied with - // we truncate to the end offset of the largest epoch that is smaller than the - // epoch the leader replied with, and send another offset for leader epoch request - val intermediateOffsetToTruncateTo = min(followerEndOffset, replicaEndOffset) - info(s"Based on replica's leader epoch, leader replied with epoch ${leaderEpochOffset.leaderEpoch} " + - s"unknown to the replica for $tp. " + - s"Will truncate to $intermediateOffsetToTruncateTo and send another leader epoch request to the leader.") - OffsetTruncationState(intermediateOffsetToTruncateTo, truncationCompleted = false) - } else { - val offsetToTruncateTo = min(followerEndOffset, leaderEpochOffset.endOffset) - OffsetTruncationState(min(offsetToTruncateTo, replicaEndOffset), truncationCompleted = true) - } - } else { - // This can happen if the follower was not tracking leader epochs at that point (before the - // upgrade, or if this broker is new). Since the leader replied with epoch < - // requested epoch from follower, so should be safe to truncate to leader's - // offset (this is the same behavior as post-KIP-101 and pre-KIP-279) - warn(s"Based on replica's leader epoch, leader replied with epoch ${leaderEpochOffset.leaderEpoch} " + - s"below any replica's tracked epochs for $tp. " + - s"The leader's offset only ${leaderEpochOffset.endOffset} will be used for truncation.") - - OffsetTruncationState(min(leaderEpochOffset.endOffset, replicaEndOffset), truncationCompleted = true) + endOffsetForEpoch(tp, leaderEpochOffset.leaderEpoch) match { + case Some(offsetAndEpoch) => + val followerEndOffset = offsetAndEpoch.offset + val followerEpoch = offsetAndEpoch.leaderEpoch + if (followerEpoch != leaderEpochOffset.leaderEpoch) { + // the follower does not know about the epoch that leader replied with + // we truncate to the end offset of the largest epoch that is smaller than the + // epoch the leader replied with, and send another offset for leader epoch request + val intermediateOffsetToTruncateTo = min(followerEndOffset, replicaEndOffset) + info(s"Based on replica's leader epoch, leader replied with epoch ${leaderEpochOffset.leaderEpoch} " + + s"unknown to the replica for $tp. " + + s"Will truncate to $intermediateOffsetToTruncateTo and send another leader epoch request to the leader.") + OffsetTruncationState(intermediateOffsetToTruncateTo, truncationCompleted = false) + } else { + val offsetToTruncateTo = min(followerEndOffset, leaderEpochOffset.endOffset) + OffsetTruncationState(min(offsetToTruncateTo, replicaEndOffset), truncationCompleted = true) + } + case None => + // This can happen if the follower was not tracking leader epochs at that point (before the + // upgrade, or if this broker is new). Since the leader replied with epoch < + // requested epoch from follower, so should be safe to truncate to leader's + // offset (this is the same behavior as post-KIP-101 and pre-KIP-279) + warn(s"Based on replica's leader epoch, leader replied with epoch ${leaderEpochOffset.leaderEpoch} " + + s"below any replica's tracked epochs for $tp. " + + s"The leader's offset only ${leaderEpochOffset.endOffset} will be used for truncation.") + OffsetTruncationState(min(leaderEpochOffset.endOffset, replicaEndOffset), truncationCompleted = true) } } } @@ -679,8 +654,8 @@ abstract class AbstractFetcherThread(name: String, truncate(topicPartition, OffsetTruncationState(leaderEndOffset, truncationCompleted = true)) fetcherLagStats.getAndMaybePut(topicPartition).lag = 0 - new PartitionFetchState(topicId.toJava, leaderEndOffset, Optional.of(0L), currentLeaderEpoch, - ReplicaState.FETCHING, latestEpoch(topicPartition)) + PartitionFetchState(topicId, leaderEndOffset, Some(0), currentLeaderEpoch, + state = Fetching, lastFetchedEpoch = latestEpoch(topicPartition)) } else { /** * If the leader's log end offset is greater than the follower's log end offset, there are two possibilities: @@ -719,8 +694,8 @@ abstract class AbstractFetcherThread(name: String, val initialLag = leaderEndOffset - offsetToFetch fetcherLagStats.getAndMaybePut(topicPartition).lag = initialLag - new PartitionFetchState(topicId.toJava, offsetToFetch, Optional.of(initialLag), currentLeaderEpoch, - ReplicaState.FETCHING, latestEpoch(topicPartition)) + PartitionFetchState(topicId, offsetToFetch, Some(initialLag), currentLeaderEpoch, + state = Fetching, lastFetchedEpoch = latestEpoch(topicPartition)) } } @@ -742,7 +717,7 @@ abstract class AbstractFetcherThread(name: String, fetchState: PartitionFetchState, leaderEpochInRequest: Optional[Integer]): Boolean = { try { - val newFetchState = fetchOffsetAndTruncate(topicPartition, fetchState.topicId.toScala, fetchState.currentLeaderEpoch) + val newFetchState = fetchOffsetAndTruncate(topicPartition, fetchState.topicId, fetchState.currentLeaderEpoch) partitionStates.updateAndMoveToEnd(topicPartition, newFetchState) info(s"Current offset ${fetchState.fetchOffset} for partition $topicPartition is " + s"out of range, which typically implies a leader change. Reset fetch offset to ${newFetchState.fetchOffset}") @@ -787,7 +762,7 @@ abstract class AbstractFetcherThread(name: String, // TODO: use fetchTierStateMachine.maybeAdvanceState when implementing async tiering logic in KAFKA-13560 - fetcherLagStats.getAndMaybePut(topicPartition).lag = newFetchState.lag.orElse(0L) + fetcherLagStats.getAndMaybePut(topicPartition).lag = newFetchState.lag.getOrElse(0) partitionStates.updateAndMoveToEnd(topicPartition, newFetchState) debug(s"Current offset ${fetchState.fetchOffset} for partition $topicPartition is " + s"out of range or moved to remote tier. Reset fetch offset to ${newFetchState.fetchOffset}") @@ -797,8 +772,7 @@ abstract class AbstractFetcherThread(name: String, onPartitionFenced(topicPartition, leaderEpochInRequest) case e@(_: UnknownTopicOrPartitionException | _: UnknownLeaderEpochException | - _: NotLeaderOrFollowerException | - _: RetriableRemoteStorageException) => + _: NotLeaderOrFollowerException) => info(s"Could not build remote log auxiliary state for $topicPartition due to error: ${e.getMessage}") false case e: Throwable => @@ -813,15 +787,9 @@ abstract class AbstractFetcherThread(name: String, for (partition <- partitions) { Option(partitionStates.stateValue(partition)).foreach { currentFetchState => if (!currentFetchState.isDelayed) { - partitionStates.updateAndMoveToEnd(partition, - new PartitionFetchState( - currentFetchState.topicId, - currentFetchState.fetchOffset, - currentFetchState.lag, - currentFetchState.currentLeaderEpoch, - Optional.of(delay), - currentFetchState.state, - currentFetchState.lastFetchedEpoch)) + partitionStates.updateAndMoveToEnd(partition, PartitionFetchState(currentFetchState.topicId, currentFetchState.fetchOffset, + currentFetchState.lag, currentFetchState.currentLeaderEpoch, Some(delay), + currentFetchState.state, currentFetchState.lastFetchedEpoch)) } } } @@ -881,6 +849,14 @@ abstract class AbstractFetcherThread(name: String, } } +object AbstractFetcherThread { + + case class ReplicaFetch(partitionData: util.Map[TopicPartition, FetchRequest.PartitionData], fetchRequest: FetchRequest.Builder) + + case class ResultWithPartitions[R](result: R, partitionsWithError: Set[TopicPartition]) + +} + object FetcherMetrics { val ConsumerLag = "ConsumerLag" val RequestsPerSec = "RequestsPerSec" @@ -910,10 +886,11 @@ class FetcherLagMetrics(metricId: ClientIdTopicPartition) { } class FetcherLagStats(metricId: ClientIdAndBroker) { - val stats = new ConcurrentHashMap[TopicPartition, FetcherLagMetrics] + private val valueFactory = (k: TopicPartition) => new FetcherLagMetrics(ClientIdTopicPartition(metricId.clientId, k)) + val stats = new Pool[TopicPartition, FetcherLagMetrics](Some(valueFactory)) def getAndMaybePut(topicPartition: TopicPartition): FetcherLagMetrics = { - stats.computeIfAbsent(topicPartition, k => new FetcherLagMetrics(ClientIdTopicPartition(metricId.clientId, k))) + stats.getAndMaybePut(topicPartition) } def unregister(topicPartition: TopicPartition): Unit = { @@ -922,7 +899,9 @@ class FetcherLagStats(metricId: ClientIdAndBroker) { } def unregister(): Unit = { - stats.forEach((key, _) => unregister(key)) + stats.keys.toBuffer.foreach { key: TopicPartition => + unregister(key) + } } } @@ -948,6 +927,61 @@ case class ClientIdTopicPartition(clientId: String, topicPartition: TopicPartiti override def toString: String = s"$clientId-$topicPartition" } +sealed trait ReplicaState + +case object Truncating extends ReplicaState + +case object Fetching extends ReplicaState + +object PartitionFetchState { + def apply(topicId: Option[Uuid], offset: Long, lag: Option[Long], currentLeaderEpoch: Int, state: ReplicaState, + lastFetchedEpoch: Option[Int]): PartitionFetchState = { + PartitionFetchState(topicId, offset, lag, currentLeaderEpoch, None, state, lastFetchedEpoch) + } +} + + +/** + * case class to keep partition offset and its state(truncatingLog, delayed) + * This represents a partition as being either: + * (1) Truncating its log, for example, having recently become a follower + * (2) Delayed, for example, due to an error, where we subsequently back off a bit + * (3) ReadyForFetch, the active state where the thread is actively fetching data. + */ +case class PartitionFetchState(topicId: Option[Uuid], + fetchOffset: Long, + lag: Option[Long], + currentLeaderEpoch: Int, + delay: Option[Long], + state: ReplicaState, + lastFetchedEpoch: Option[Int]) { + + private val dueMs = delay.map(_ + Time.SYSTEM.milliseconds) + + def isReadyForFetch: Boolean = state == Fetching && !isDelayed + + def isReplicaInSync: Boolean = lag.isDefined && lag.get <= 0 + + def isTruncating: Boolean = state == Truncating && !isDelayed + + def isDelayed: Boolean = dueMs.exists(_ > Time.SYSTEM.milliseconds) + + override def toString: String = { + s"FetchState(topicId=$topicId" + + s", fetchOffset=$fetchOffset" + + s", currentLeaderEpoch=$currentLeaderEpoch" + + s", lastFetchedEpoch=$lastFetchedEpoch" + + s", state=$state" + + s", lag=$lag" + + s", delay=${delay.getOrElse(0)}ms" + + s")" + } + + def updateTopicId(topicId: Option[Uuid]): PartitionFetchState = { + this.copy(topicId = topicId) + } +} + case class OffsetTruncationState(offset: Long, truncationCompleted: Boolean) { def this(offset: Long) = this(offset, true) diff --git a/core/src/main/scala/kafka/server/AclApis.scala b/core/src/main/scala/kafka/server/AclApis.scala index 549180dc030f4..fe4adf5f937b5 100644 --- a/core/src/main/scala/kafka/server/AclApis.scala +++ b/core/src/main/scala/kafka/server/AclApis.scala @@ -22,7 +22,6 @@ import kafka.utils.Logging import org.apache.kafka.common.acl.AclOperation._ import org.apache.kafka.common.acl.AclBinding import org.apache.kafka.common.errors._ -import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.message.CreateAclsResponseData.AclCreationResult import org.apache.kafka.common.message.DeleteAclsResponseData.DeleteAclsFilterResult import org.apache.kafka.common.message._ @@ -31,13 +30,10 @@ import org.apache.kafka.common.requests._ import org.apache.kafka.common.resource.Resource.CLUSTER_NAME import org.apache.kafka.common.resource.ResourceType import org.apache.kafka.security.authorizer.AuthorizerUtils -import org.apache.kafka.server.ProcessRole import org.apache.kafka.server.authorizer._ -import org.apache.kafka.server.purgatory.DelayedFuturePurgatory import java.util import java.util.concurrent.CompletableFuture -import java.util.stream.Collectors import scala.collection.mutable.ArrayBuffer import scala.collection.mutable import scala.jdk.CollectionConverters._ @@ -47,13 +43,13 @@ import scala.jdk.OptionConverters.RichOptional * Logic to handle ACL requests. */ class AclApis(authHelper: AuthHelper, - authorizerPlugin: Option[Plugin[Authorizer]], + authorizer: Option[Authorizer], requestHelper: RequestHandlerHelper, - role: ProcessRole, + name: String, config: KafkaConfig) extends Logging { - this.logIdent = "[AclApis-%s-%s] ".format(role, config.nodeId) + this.logIdent = "[AclApis-%s-%s] ".format(name, config.nodeId) private val alterAclsPurgatory = - new DelayedFuturePurgatory("AlterAcls", config.nodeId) + new DelayedFuturePurgatory(purgatoryName = "AlterAcls", brokerId = config.nodeId) def isClosed: Boolean = alterAclsPurgatory.isShutdown @@ -62,7 +58,7 @@ class AclApis(authHelper: AuthHelper, def handleDescribeAcls(request: RequestChannel.Request): CompletableFuture[Unit] = { authHelper.authorizeClusterOperation(request, DESCRIBE) val describeAclsRequest = request.body[DescribeAclsRequest] - authorizerPlugin match { + authorizer match { case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new DescribeAclsResponse(new DescribeAclsResponseData() @@ -75,7 +71,7 @@ class AclApis(authHelper: AuthHelper, requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new DescribeAclsResponse(new DescribeAclsResponseData() .setThrottleTimeMs(requestThrottleMs) - .setResources(DescribeAclsResponse.aclsResources(auth.get.acls(filter))), + .setResources(DescribeAclsResponse.aclsResources(auth.acls(filter))), describeAclsRequest.version)) } CompletableFuture.completedFuture[Unit](()) @@ -85,7 +81,7 @@ class AclApis(authHelper: AuthHelper, authHelper.authorizeClusterOperation(request, ALTER) val createAclsRequest = request.body[CreateAclsRequest] - authorizerPlugin match { + authorizer match { case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => createAclsRequest.getErrorResponse(requestThrottleMs, new SecurityDisabledException("No Authorizer is configured."))) @@ -110,11 +106,11 @@ class AclApis(authHelper: AuthHelper, } val future = new CompletableFuture[util.List[AclCreationResult]]() - val createResults = auth.get.createAcls(request.context, validBindings.asJava).stream().map(_.toCompletableFuture).toList + val createResults = auth.createAcls(request.context, validBindings.asJava).asScala.map(_.toCompletableFuture) def sendResponseCallback(): Unit = { val aclCreationResults = allBindings.map { acl => - val result = errorResults.getOrElse(acl, createResults.get(validBindings.indexOf(acl)).get) + val result = errorResults.getOrElse(acl, createResults(validBindings.indexOf(acl)).get) val creationResult = new AclCreationResult() result.exception.toScala.foreach { throwable => val apiError = ApiError.fromThrowable(throwable) @@ -126,7 +122,7 @@ class AclApis(authHelper: AuthHelper, } future.complete(aclCreationResults.asJava) } - alterAclsPurgatory.tryCompleteElseWatch(config.connectionsMaxIdleMs, createResults, () => sendResponseCallback()) + alterAclsPurgatory.tryCompleteElseWatch(config.connectionsMaxIdleMs, createResults, sendResponseCallback) future.thenApply[Unit] { aclCreationResults => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => @@ -140,7 +136,7 @@ class AclApis(authHelper: AuthHelper, def handleDeleteAcls(request: RequestChannel.Request): CompletableFuture[Unit] = { authHelper.authorizeClusterOperation(request, ALTER) val deleteAclsRequest = request.body[DeleteAclsRequest] - authorizerPlugin match { + authorizer match { case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => deleteAclsRequest.getErrorResponse(requestThrottleMs, @@ -149,16 +145,15 @@ class AclApis(authHelper: AuthHelper, case Some(auth) => val future = new CompletableFuture[util.List[DeleteAclsFilterResult]]() - val deleteResults = auth.get.deleteAcls(request.context, deleteAclsRequest.filters) - .stream().map(_.toCompletableFuture).toList + val deleteResults = auth.deleteAcls(request.context, deleteAclsRequest.filters) + .asScala.map(_.toCompletableFuture).toList def sendResponseCallback(): Unit = { - val filterResults: util.List[DeleteAclsFilterResult] = deleteResults.stream().map(_.get) - .map(DeleteAclsResponse.filterResult).collect(Collectors.toList()) + val filterResults = deleteResults.map(_.get).map(DeleteAclsResponse.filterResult).asJava future.complete(filterResults) } - alterAclsPurgatory.tryCompleteElseWatch(config.connectionsMaxIdleMs, deleteResults, () => sendResponseCallback()) + alterAclsPurgatory.tryCompleteElseWatch(config.connectionsMaxIdleMs, deleteResults, sendResponseCallback) future.thenApply[Unit] { filterResults => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new DeleteAclsResponse( diff --git a/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala b/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala new file mode 100644 index 0000000000000..b461b866cbc55 --- /dev/null +++ b/core/src/main/scala/kafka/server/AddPartitionsToTxnManager.scala @@ -0,0 +1,314 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import kafka.server.AddPartitionsToTxnManager.{VerificationFailureRateMetricName, VerificationTimeMsMetricName} +import kafka.utils.Logging +import org.apache.kafka.clients.{ClientResponse, NetworkClient, RequestCompletionHandler} +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.{Node, TopicPartition} +import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartitionsToTxnTopic, AddPartitionsToTxnTopicCollection, AddPartitionsToTxnTransaction, AddPartitionsToTxnTransactionCollection} +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.{AddPartitionsToTxnRequest, AddPartitionsToTxnResponse, MetadataResponse} +import org.apache.kafka.common.utils.Time +import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.util.{InterBrokerSendThread, RequestAndCompletionHandler} + +import java.util +import java.util.concurrent.TimeUnit +import scala.collection.{Seq, mutable} +import scala.jdk.CollectionConverters._ + +object AddPartitionsToTxnManager { + type AppendCallback = Map[TopicPartition, Errors] => Unit + + val VerificationFailureRateMetricName = "VerificationFailureRate" + val VerificationTimeMsMetricName = "VerificationTimeMs" + + def produceRequestVersionToTransactionSupportedOperation(version: Short): TransactionSupportedOperation = { + if (version > 11) { + addPartition + } else if (version > 10) { + genericErrorSupported + } else { + defaultError + } + } + + def txnOffsetCommitRequestVersionToTransactionSupportedOperation(version: Short): TransactionSupportedOperation = { + if (version > 4) { + addPartition + } else if (version > 3) { + genericErrorSupported + } else { + defaultError + } + } +} + +/** + * This is an enum which handles the Partition Response based on the Request Version and the exact operation + * defaultError: This is the default workflow which maps to cases when the Produce Request Version or the Txn_offset_commit request was lower than the first version supporting the new Error Class + * genericErrorSupported: This maps to the case when the clients are updated to handle the TransactionAbortableException + * addPartition: This allows the partition to be added to the transactions inflight with the Produce and TxnOffsetCommit requests. Plus the behaviors in genericErrorSupported. + */ +sealed trait TransactionSupportedOperation { + val supportsEpochBump = false; +} +case object defaultError extends TransactionSupportedOperation +case object genericErrorSupported extends TransactionSupportedOperation +case object addPartition extends TransactionSupportedOperation { + override val supportsEpochBump = true +} + +/* + * Data structure to hold the transactional data to send to a node. Note -- at most one request per transactional ID + * will exist at a time in the map. If a given transactional ID exists in the map, and a new request with the same ID + * comes in, one request will be in the map and one will return to the producer with a response depending on the epoch. + */ +class TransactionDataAndCallbacks(val transactionData: AddPartitionsToTxnTransactionCollection, + val callbacks: mutable.Map[String, AddPartitionsToTxnManager.AppendCallback], + val startTimeMs: mutable.Map[String, Long], + val transactionSupportedOperation: TransactionSupportedOperation) + +class AddPartitionsToTxnManager( + config: KafkaConfig, + client: NetworkClient, + metadataCache: MetadataCache, + partitionFor: String => Int, + time: Time +) extends InterBrokerSendThread( + "AddPartitionsToTxnSenderThread-" + config.brokerId, + client, + config.requestTimeoutMs, + time +) with Logging { + + this.logIdent = logPrefix + + private val interBrokerListenerName = config.interBrokerListenerName + private val inflightNodes = mutable.HashSet[Node]() + private val nodesToTransactions = mutable.Map[Node, TransactionDataAndCallbacks]() + + private val metricsGroup = new KafkaMetricsGroup(this.getClass) + private val verificationFailureRate = metricsGroup.newMeter(VerificationFailureRateMetricName, "failures", TimeUnit.SECONDS) + private val verificationTimeMs = metricsGroup.newHistogram(VerificationTimeMsMetricName) + + def addOrVerifyTransaction( + transactionalId: String, + producerId: Long, + producerEpoch: Short, + topicPartitions: Seq[TopicPartition], + callback: AddPartitionsToTxnManager.AppendCallback, + transactionSupportedOperation: TransactionSupportedOperation + ): Unit = { + val coordinatorNode = getTransactionCoordinator(partitionFor(transactionalId)) + if (coordinatorNode.isEmpty) { + callback(topicPartitions.map(tp => tp -> Errors.COORDINATOR_NOT_AVAILABLE).toMap) + } else { + val topicCollection = new AddPartitionsToTxnTopicCollection() + topicPartitions.groupBy(_.topic).foreachEntry { (topic, tps) => + topicCollection.add(new AddPartitionsToTxnTopic() + .setName(topic) + .setPartitions(tps.map(tp => Int.box(tp.partition)).toList.asJava)) + } + + val transactionData = new AddPartitionsToTxnTransaction() + .setTransactionalId(transactionalId) + .setProducerId(producerId) + .setProducerEpoch(producerEpoch) + .setVerifyOnly(!transactionSupportedOperation.supportsEpochBump) + .setTopics(topicCollection) + + addTxnData(coordinatorNode.get, transactionData, callback, transactionSupportedOperation) + + } + } + + private def addTxnData( + node: Node, + transactionData: AddPartitionsToTxnTransaction, + callback: AddPartitionsToTxnManager.AppendCallback, + transactionSupportedOperation: TransactionSupportedOperation + ): Unit = { + nodesToTransactions.synchronized { + val curTime = time.milliseconds() + // Check if we have already have either node or individual transaction. Add the Node if it isn't there. + val existingNodeAndTransactionData = nodesToTransactions.getOrElseUpdate(node, + new TransactionDataAndCallbacks( + new AddPartitionsToTxnTransactionCollection(1), + mutable.Map[String, AddPartitionsToTxnManager.AppendCallback](), + mutable.Map[String, Long](), + transactionSupportedOperation)) + + val existingTransactionData = existingNodeAndTransactionData.transactionData.find(transactionData.transactionalId) + + // There are 3 cases if we already have existing data + // 1. Incoming data has a higher epoch -- return INVALID_PRODUCER_EPOCH for existing data since it is fenced + // 2. Incoming data has the same epoch -- return NETWORK_EXCEPTION for existing data, since the client is likely retrying and we want another retriable exception + // 3. Incoming data has a lower epoch -- return INVALID_PRODUCER_EPOCH for the incoming data since it is fenced, do not add incoming data to verify + if (existingTransactionData != null) { + if (existingTransactionData.producerEpoch <= transactionData.producerEpoch) { + val error = if (existingTransactionData.producerEpoch < transactionData.producerEpoch) + Errors.INVALID_PRODUCER_EPOCH + else + Errors.NETWORK_EXCEPTION + val oldCallback = existingNodeAndTransactionData.callbacks(transactionData.transactionalId) + existingNodeAndTransactionData.transactionData.remove(transactionData) + sendCallback(oldCallback, topicPartitionsToError(existingTransactionData, error), existingNodeAndTransactionData.startTimeMs(transactionData.transactionalId)) + } else { + // If the incoming transactionData's epoch is lower, we can return with INVALID_PRODUCER_EPOCH immediately. + sendCallback(callback, topicPartitionsToError(transactionData, Errors.INVALID_PRODUCER_EPOCH), curTime) + return + } + } + + existingNodeAndTransactionData.transactionData.add(transactionData) + existingNodeAndTransactionData.callbacks.put(transactionData.transactionalId, callback) + existingNodeAndTransactionData.startTimeMs.put(transactionData.transactionalId, curTime) + wakeup() + } + } + + private def getTransactionCoordinator(partition: Int): Option[Node] = { + metadataCache.getLeaderAndIsr(Topic.TRANSACTION_STATE_TOPIC_NAME, partition) + .filter(_.leader != MetadataResponse.NO_LEADER_ID) + .flatMap(metadata => metadataCache.getAliveBrokerNode(metadata.leader, interBrokerListenerName)) + } + + private def topicPartitionsToError(transactionData: AddPartitionsToTxnTransaction, error: Errors): Map[TopicPartition, Errors] = { + val topicPartitionsToError = mutable.Map[TopicPartition, Errors]() + transactionData.topics.forEach { topic => + topic.partitions.forEach { partition => + topicPartitionsToError.put(new TopicPartition(topic.name, partition), error) + } + } + verificationFailureRate.mark(topicPartitionsToError.size) + topicPartitionsToError.toMap + } + + private def sendCallback(callback: AddPartitionsToTxnManager.AppendCallback, errorMap: Map[TopicPartition, Errors], startTimeMs: Long): Unit = { + verificationTimeMs.update(time.milliseconds() - startTimeMs) + callback(errorMap) + } + + private class AddPartitionsToTxnHandler(node: Node, transactionDataAndCallbacks: TransactionDataAndCallbacks) extends RequestCompletionHandler { + override def onComplete(response: ClientResponse): Unit = { + // Note: Synchronization is not needed on inflightNodes since it is always accessed from this thread. + inflightNodes.remove(node) + if (response.authenticationException != null) { + error(s"AddPartitionsToTxnRequest failed for node ${response.destination} with an " + + "authentication exception.", response.authenticationException) + sendCallbacksToAll(Errors.forException(response.authenticationException).code) + } else if (response.versionMismatch != null) { + // We may see unsupported version exception if we try to send a verify only request to a broker that can't handle it. + // In this case, skip verification. + warn(s"AddPartitionsToTxnRequest failed for node ${response.destination} with invalid version exception. This suggests verification is not supported." + + s"Continuing handling the produce request.") + transactionDataAndCallbacks.callbacks.foreach { case (txnId, callback) => + sendCallback(callback, Map.empty, transactionDataAndCallbacks.startTimeMs(txnId)) + } + } else if (response.wasDisconnected || response.wasTimedOut) { + warn(s"AddPartitionsToTxnRequest failed for node ${response.destination} with a network exception.") + sendCallbacksToAll(Errors.NETWORK_EXCEPTION.code) + } else { + val addPartitionsToTxnResponseData = response.responseBody.asInstanceOf[AddPartitionsToTxnResponse].data + if (addPartitionsToTxnResponseData.errorCode != 0) { + error(s"AddPartitionsToTxnRequest for node ${response.destination} returned with error ${Errors.forCode(addPartitionsToTxnResponseData.errorCode)}.") + // The client should not be exposed to CLUSTER_AUTHORIZATION_FAILED so modify the error to signify the verification did not complete. + // Return INVALID_TXN_STATE. + val finalError = if (addPartitionsToTxnResponseData.errorCode == Errors.CLUSTER_AUTHORIZATION_FAILED.code) + Errors.INVALID_TXN_STATE.code + else + addPartitionsToTxnResponseData.errorCode + + sendCallbacksToAll(finalError) + } else { + addPartitionsToTxnResponseData.resultsByTransaction.forEach { transactionResult => + val unverified = mutable.Map[TopicPartition, Errors]() + transactionResult.topicResults.forEach { topicResult => + topicResult.resultsByPartition.forEach { partitionResult => + val tp = new TopicPartition(topicResult.name, partitionResult.partitionIndex) + if (partitionResult.partitionErrorCode != Errors.NONE.code) { + // Producers expect to handle INVALID_PRODUCER_EPOCH in this scenario. + val code = + if (partitionResult.partitionErrorCode == Errors.PRODUCER_FENCED.code) + Errors.INVALID_PRODUCER_EPOCH.code + else if (partitionResult.partitionErrorCode() == Errors.TRANSACTION_ABORTABLE.code + && transactionDataAndCallbacks.transactionSupportedOperation == defaultError) // For backward compatibility with clients. + Errors.INVALID_TXN_STATE.code + else + partitionResult.partitionErrorCode + unverified.put(tp, Errors.forCode(code)) + } + } + } + verificationFailureRate.mark(unverified.size) + val callback = transactionDataAndCallbacks.callbacks(transactionResult.transactionalId) + sendCallback(callback, unverified.toMap, transactionDataAndCallbacks.startTimeMs(transactionResult.transactionalId)) + } + } + } + wakeup() + } + + private def buildErrorMap(transactionalId: String, errorCode: Short): Map[TopicPartition, Errors] = { + val transactionData = transactionDataAndCallbacks.transactionData.find(transactionalId) + topicPartitionsToError(transactionData, Errors.forCode(errorCode)) + } + + private def sendCallbacksToAll(errorCode: Short): Unit = { + transactionDataAndCallbacks.callbacks.foreach { case (txnId, callback) => + sendCallback(callback, buildErrorMap(txnId, errorCode), transactionDataAndCallbacks.startTimeMs(txnId)) + } + } + } + + override def generateRequests(): util.Collection[RequestAndCompletionHandler] = { + // build and add requests to queue + val list = new util.ArrayList[RequestAndCompletionHandler]() + val currentTimeMs = time.milliseconds() + val removedNodes = mutable.Set[Node]() + nodesToTransactions.synchronized { + nodesToTransactions.foreach { case (node, transactionDataAndCallbacks) => + if (!inflightNodes.contains(node)) { + list.add(new RequestAndCompletionHandler( + currentTimeMs, + node, + AddPartitionsToTxnRequest.Builder.forBroker(transactionDataAndCallbacks.transactionData), + new AddPartitionsToTxnHandler(node, transactionDataAndCallbacks) + )) + + removedNodes.add(node) + } + } + removedNodes.foreach { node => + inflightNodes.add(node) + nodesToTransactions.remove(node) + } + } + list + } + + override def shutdown(): Unit = { + super.shutdown() + metricsGroup.removeMetric(VerificationFailureRateMetricName) + metricsGroup.removeMetric(VerificationTimeMsMetricName) + } + +} diff --git a/core/src/main/scala/kafka/server/ApiVersionManager.scala b/core/src/main/scala/kafka/server/ApiVersionManager.scala new file mode 100644 index 0000000000000..e286bc9352ac0 --- /dev/null +++ b/core/src/main/scala/kafka/server/ApiVersionManager.scala @@ -0,0 +1,175 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import org.apache.kafka.common.feature.SupportedVersionRange +import org.apache.kafka.common.message.ApiMessageType.ListenerType +import org.apache.kafka.common.protocol.ApiKeys +import org.apache.kafka.common.requests.ApiVersionsResponse +import org.apache.kafka.network.metrics.RequestChannelMetrics +import org.apache.kafka.server.{BrokerFeatures, ClientMetricsManager} +import org.apache.kafka.server.common.FinalizedFeatures + +import scala.collection.mutable +import scala.jdk.CollectionConverters._ + +trait ApiVersionManager { + def enableUnstableLastVersion: Boolean + def listenerType: ListenerType + def enabledApis: collection.Set[ApiKeys] + + def apiVersionResponse(throttleTimeMs: Int, alterFeatureLevel0: Boolean): ApiVersionsResponse + + def isApiEnabled(apiKey: ApiKeys, apiVersion: Short): Boolean = { + apiKey != null && apiKey.inScope(listenerType) && apiKey.isVersionEnabled(apiVersion, enableUnstableLastVersion) + } + def newRequestMetrics: RequestChannelMetrics = new RequestChannelMetrics(enabledApis.asJava) + + def features: FinalizedFeatures +} + +object ApiVersionManager { + def apply( + listenerType: ListenerType, + config: KafkaConfig, + forwardingManager: ForwardingManager, + supportedFeatures: BrokerFeatures, + metadataCache: MetadataCache, + clientMetricsManager: Option[ClientMetricsManager] + ): ApiVersionManager = { + new DefaultApiVersionManager( + listenerType, + forwardingManager, + supportedFeatures, + metadataCache, + config.unstableApiVersionsEnabled, + clientMetricsManager + ) + } +} + +/** + * A simple ApiVersionManager that does not support forwarding and does not have metadata cache, used in kraft controller. + * its enabled apis are determined by the listener type, its finalized features are dynamically determined by the controller. + * + * @param listenerType the listener type + * @param enabledApis the enabled apis, which are computed by the listener type + * @param brokerFeatures the broker features + * @param enableUnstableLastVersion whether to enable unstable last version, see [[KafkaConfig.unstableApiVersionsEnabled]] + * @param featuresProvider a provider to the finalized features supported + */ +class SimpleApiVersionManager( + val listenerType: ListenerType, + val enabledApis: collection.Set[ApiKeys], + brokerFeatures: org.apache.kafka.common.feature.Features[SupportedVersionRange], + val enableUnstableLastVersion: Boolean, + val featuresProvider: () => FinalizedFeatures +) extends ApiVersionManager { + + def this( + listenerType: ListenerType, + enableUnstableLastVersion: Boolean, + featuresProvider: () => FinalizedFeatures + ) = { + this( + listenerType, + ApiKeys.apisForListener(listenerType).asScala, + BrokerFeatures.defaultSupportedFeatures(enableUnstableLastVersion), + enableUnstableLastVersion, + featuresProvider + ) + } + + private val apiVersions = ApiVersionsResponse.collectApis(listenerType, enabledApis.asJava, enableUnstableLastVersion) + + override def apiVersionResponse( + throttleTimeMs: Int, + alterFeatureLevel0: Boolean + ): ApiVersionsResponse = { + val currentFeatures = features + new ApiVersionsResponse.Builder(). + setThrottleTimeMs(throttleTimeMs). + setApiVersions(apiVersions). + setSupportedFeatures(brokerFeatures). + setFinalizedFeatures(currentFeatures.finalizedFeatures()). + setFinalizedFeaturesEpoch(currentFeatures.finalizedFeaturesEpoch()). + setZkMigrationEnabled(false). + setAlterFeatureLevel0(alterFeatureLevel0). + build() + } + + override def features: FinalizedFeatures = featuresProvider.apply() +} + +/** + * The default ApiVersionManager that supports forwarding and has metadata cache, used in broker and zk controller. + * When forwarding is enabled, the enabled apis are determined by the broker listener type and the controller apis, + * otherwise the enabled apis are determined by the broker listener type, which is the same with SimpleApiVersionManager. + * + * @param listenerType the listener type + * @param forwardingManager the forwarding manager, + * @param brokerFeatures the broker features + * @param metadataCache the metadata cache, used to get the finalized features and the metadata version + * @param enableUnstableLastVersion whether to enable unstable last version, see [[KafkaConfig.unstableApiVersionsEnabled]] + * @param clientMetricsManager the client metrics manager, helps to determine whether client telemetry is enabled + */ +class DefaultApiVersionManager( + val listenerType: ListenerType, + forwardingManager: ForwardingManager, + brokerFeatures: BrokerFeatures, + metadataCache: MetadataCache, + val enableUnstableLastVersion: Boolean, + val clientMetricsManager: Option[ClientMetricsManager] = None +) extends ApiVersionManager { + + val enabledApis: mutable.Set[ApiKeys] = ApiKeys.apisForListener(listenerType).asScala + + override def apiVersionResponse( + throttleTimeMs: Int, + alterFeatureLevel0: Boolean + ): ApiVersionsResponse = { + val finalizedFeatures = metadataCache.features() + val controllerApiVersions = forwardingManager.controllerApiVersions + val clientTelemetryEnabled = clientMetricsManager match { + case Some(manager) => manager.isTelemetryReceiverConfigured + case None => false + } + val apiVersions = if (controllerApiVersions.isDefined) { + ApiVersionsResponse.controllerApiVersions( + controllerApiVersions.get, + listenerType, + enableUnstableLastVersion, + clientTelemetryEnabled) + } else { + ApiVersionsResponse.brokerApiVersions( + listenerType, + enableUnstableLastVersion, + clientTelemetryEnabled) + } + new ApiVersionsResponse.Builder(). + setThrottleTimeMs(throttleTimeMs). + setApiVersions(apiVersions). + setSupportedFeatures(brokerFeatures.supportedFeatures). + setFinalizedFeatures(finalizedFeatures.finalizedFeatures()). + setFinalizedFeaturesEpoch(finalizedFeatures.finalizedFeaturesEpoch()). + setZkMigrationEnabled(false). + setAlterFeatureLevel0(alterFeatureLevel0). + build() + } + + override def features: FinalizedFeatures = metadataCache.features() +} diff --git a/core/src/main/scala/kafka/server/AuthHelper.scala b/core/src/main/scala/kafka/server/AuthHelper.scala index 60140a14dc2d2..4d21fb4385959 100644 --- a/core/src/main/scala/kafka/server/AuthHelper.scala +++ b/core/src/main/scala/kafka/server/AuthHelper.scala @@ -24,7 +24,6 @@ import org.apache.kafka.clients.admin.EndpointType import org.apache.kafka.common.acl.AclOperation import org.apache.kafka.common.acl.AclOperation.DESCRIBE import org.apache.kafka.common.errors.ClusterAuthorizationException -import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.message.DescribeClusterResponseData import org.apache.kafka.common.message.DescribeClusterResponseData.DescribeClusterBrokerCollection import org.apache.kafka.common.protocol.Errors @@ -39,7 +38,7 @@ import org.apache.kafka.server.authorizer.{Action, AuthorizationResult, Authoriz import scala.collection.Seq import scala.jdk.CollectionConverters._ -class AuthHelper(authorizer: Option[Plugin[Authorizer]]) { +class AuthHelper(authorizer: Option[Authorizer]) { def authorize(requestContext: RequestContext, operation: AclOperation, resourceType: ResourceType, @@ -50,13 +49,13 @@ class AuthHelper(authorizer: Option[Plugin[Authorizer]]) { authorizer.forall { authZ => val resource = new ResourcePattern(resourceType, resourceName, PatternType.LITERAL) val actions = Collections.singletonList(new Action(operation, resource, refCount, logIfAllowed, logIfDenied)) - authZ.get.authorize(requestContext, actions).get(0) == AuthorizationResult.ALLOWED + authZ.authorize(requestContext, actions).get(0) == AuthorizationResult.ALLOWED } } def authorizeClusterOperation(request: RequestChannel.Request, operation: AclOperation): Unit = { if (!authorize(request.context, operation, CLUSTER, CLUSTER_NAME)) - throw new ClusterAuthorizationException(s"Request $request needs $operation permission.") + throw new ClusterAuthorizationException(s"Request $request is not authorized.") } def authorizedOperations(request: RequestChannel.Request, resource: Resource): Int = { @@ -65,7 +64,7 @@ class AuthHelper(authorizer: Option[Plugin[Authorizer]]) { case Some(authZ) => val resourcePattern = new ResourcePattern(resource.resourceType, resource.name, PatternType.LITERAL) val actions = supportedOps.map { op => new Action(op, resourcePattern, 1, false, false) } - authZ.get.authorize(request.context, actions.asJava).asScala + authZ.authorize(request.context, actions.asJava).asScala .zip(supportedOps) .filter(_._1 == AuthorizationResult.ALLOWED) .map(_._2).toSet @@ -78,7 +77,7 @@ class AuthHelper(authorizer: Option[Plugin[Authorizer]]) { def authorizeByResourceType(requestContext: RequestContext, operation: AclOperation, resourceType: ResourceType): Boolean = { authorizer.forall { authZ => - authZ.get.authorizeByResourceType(requestContext, operation, resourceType) == AuthorizationResult.ALLOWED + authZ.authorizeByResourceType(requestContext, operation, resourceType) == AuthorizationResult.ALLOWED } } @@ -110,7 +109,7 @@ class AuthHelper(authorizer: Option[Plugin[Authorizer]]) { val resource = new ResourcePattern(resourceType, resourceName, PatternType.LITERAL) new Action(operation, resource, count, logIfAllowed, logIfDenied) }.toBuffer - authZ.get.authorize(requestContext, actions.asJava).asScala + authZ.authorize(requestContext, actions.asJava).asScala .zip(resourceNameToCount.keySet) .collect { case (authzResult, resourceName) if authzResult == AuthorizationResult.ALLOWED => resourceName diff --git a/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala b/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala index 1398a8ad7c17a..e3abde0bda42e 100644 --- a/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala +++ b/core/src/main/scala/kafka/server/AutoTopicCreationManager.scala @@ -18,7 +18,6 @@ package kafka.server import java.util.concurrent.ConcurrentHashMap -import java.util.concurrent.locks.ReentrantLock import java.util.{Collections, Properties} import kafka.coordinator.transaction.TransactionCoordinator import kafka.utils.Logging @@ -30,13 +29,10 @@ import org.apache.kafka.common.message.CreateTopicsRequestData import org.apache.kafka.common.message.CreateTopicsRequestData.{CreatableTopic, CreatableTopicConfig, CreatableTopicConfigCollection} import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{AbstractResponse, CreateTopicsRequest, CreateTopicsResponse, EnvelopeResponse, RequestContext, RequestHeader} +import org.apache.kafka.common.requests.{CreateTopicsRequest, RequestContext, RequestHeader} import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.coordinator.share.ShareCoordinator -import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} -import org.apache.kafka.server.quota.ControllerMutationQuota -import org.apache.kafka.common.utils.Time import scala.collection.{Map, Seq, Set, mutable} import scala.jdk.CollectionConverters._ @@ -49,99 +45,18 @@ trait AutoTopicCreationManager { controllerMutationQuota: ControllerMutationQuota, metadataRequestContext: Option[RequestContext] ): Seq[MetadataResponseTopic] - - def createStreamsInternalTopics( - topics: Map[String, CreatableTopic], - requestContext: RequestContext, - timeoutMs: Long - ): Unit - - def getStreamsInternalTopicCreationErrors( - topicNames: Set[String], - currentTimeMs: Long - ): Map[String, String] - - def close(): Unit = {} - } -/** - * Thread-safe cache that stores topic creation errors with per-entry expiration. - * - Expiration: maintained by a min-heap (priority queue) on expiration time - * - Capacity: enforced by evicting entries with earliest expiration time (not LRU) - * - Updates: old entries remain in queue but are ignored via reference equality check - */ -private[server] class ExpiringErrorCache(maxSize: Int, time: Time) { - - private case class Entry(topicName: String, errorMessage: String, expirationTimeMs: Long) - - private val byTopic = new ConcurrentHashMap[String, Entry]() - private val expiryQueue = new java.util.PriorityQueue[Entry](11, new java.util.Comparator[Entry] { - override def compare(a: Entry, b: Entry): Int = java.lang.Long.compare(a.expirationTimeMs, b.expirationTimeMs) - }) - private val lock = new ReentrantLock() - - def put(topicName: String, errorMessage: String, ttlMs: Long): Unit = { - lock.lock() - try { - val currentTimeMs = time.milliseconds() - val expirationTimeMs = currentTimeMs + ttlMs - val entry = Entry(topicName, errorMessage, expirationTimeMs) - byTopic.put(topicName, entry) - expiryQueue.add(entry) - - // Clean up expired entries and enforce capacity - while (!expiryQueue.isEmpty && - (expiryQueue.peek().expirationTimeMs <= currentTimeMs || byTopic.size() > maxSize)) { - val evicted = expiryQueue.poll() - val current = byTopic.get(evicted.topicName) - if (current != null && (current eq evicted)) { - byTopic.remove(evicted.topicName) - } - } - } finally { - lock.unlock() - } - } - - def getErrorsForTopics(topicNames: Set[String], currentTimeMs: Long): Map[String, String] = { - val result = mutable.Map.empty[String, String] - topicNames.foreach { topicName => - val entry = byTopic.get(topicName) - if (entry != null && entry.expirationTimeMs > currentTimeMs) { - result.put(topicName, entry.errorMessage) - } - } - result.toMap - } - - private[server] def clear(): Unit = { - lock.lock() - try { - byTopic.clear() - expiryQueue.clear() - } finally { - lock.unlock() - } - } -} - - class DefaultAutoTopicCreationManager( config: KafkaConfig, channelManager: NodeToControllerChannelManager, groupCoordinator: GroupCoordinator, txnCoordinator: TransactionCoordinator, - shareCoordinator: ShareCoordinator, - time: Time, - topicErrorCacheCapacity: Int = 1000 + shareCoordinator: Option[ShareCoordinator] ) extends AutoTopicCreationManager with Logging { private val inflightTopics = Collections.newSetFromMap(new ConcurrentHashMap[String, java.lang.Boolean]()) - // Hardcoded default capacity; can be overridden in tests via constructor param - private val topicCreationErrorCache = new ExpiringErrorCache(topicErrorCacheCapacity, time) - /** * Initiate auto topic creation for the given topics. * @@ -168,26 +83,9 @@ class DefaultAutoTopicCreationManager( uncreatableTopicResponses ++ creatableTopicResponses } - override def createStreamsInternalTopics( - topics: Map[String, CreatableTopic], - requestContext: RequestContext, - timeoutMs: Long - ): Unit = { - if (topics.nonEmpty) { - sendCreateTopicRequestWithErrorCaching(topics, Some(requestContext), timeoutMs) - } - } - - override def getStreamsInternalTopicCreationErrors( - topicNames: Set[String], - currentTimeMs: Long - ): Map[String, String] = { - topicCreationErrorCache.getErrorsForTopics(topicNames, currentTimeMs) - } - private def sendCreateTopicRequest( creatableTopics: Map[String, CreatableTopic], - requestContext: Option[RequestContext] + metadataRequestContext: Option[RequestContext] ): Seq[MetadataResponseTopic] = { val topicsToCreate = new CreateTopicsRequestData.CreatableTopicCollection(creatableTopics.size) topicsToCreate.addAll(creatableTopics.values.asJavaCollection) @@ -198,22 +96,6 @@ class DefaultAutoTopicCreationManager( .setTopics(topicsToCreate) ) - // Capture request header information for proper envelope response parsing - val requestHeaderForParsing = requestContext.map { context => - val requestVersion = - channelManager.controllerApiVersions.toScala match { - case None => - ApiKeys.CREATE_TOPICS.latestVersion() - case Some(nodeApiVersions) => - nodeApiVersions.latestUsableVersion(ApiKeys.CREATE_TOPICS) - } - - new RequestHeader(ApiKeys.CREATE_TOPICS, - requestVersion, - context.clientId, - context.correlationId) - } - val requestCompletionHandler = new ControllerRequestCompletionHandler { override def onTimeout(): Unit = { clearInflightRequests(creatableTopics) @@ -227,58 +109,31 @@ class DefaultAutoTopicCreationManager( } else if (response.versionMismatch() != null) { warn(s"Auto topic creation failed for ${creatableTopics.keys} with invalid version exception") } else { - if (response.hasResponse) { - response.responseBody() match { - case envelopeResponse: EnvelopeResponse => - // Unwrap the envelope response to get the actual CreateTopicsResponse - val envelopeError = envelopeResponse.error() - if (envelopeError != Errors.NONE) { - warn(s"Auto topic creation failed for ${creatableTopics.keys} with envelope error: ${envelopeError}") - } else { - requestHeaderForParsing match { - case Some(requestHeader) => - try { - // Use the captured request header for proper envelope response parsing - val createTopicsResponse = AbstractResponse.parseResponse( - envelopeResponse.responseData(), requestHeader).asInstanceOf[CreateTopicsResponse] - - createTopicsResponse.data().topics().forEach(topicResult => { - val error = Errors.forCode(topicResult.errorCode) - if (error != Errors.NONE) { - warn(s"Auto topic creation failed for ${topicResult.name} with error '${error.name}': ${topicResult.errorMessage}") - } - }) - } catch { - case e: Exception => - warn(s"Failed to parse envelope response for auto topic creation of ${creatableTopics.keys}", e) - } - case None => - warn(s"Cannot parse envelope response without original request header information") - } - } - case createTopicsResponse: CreateTopicsResponse => - createTopicsResponse.data().topics().forEach(topicResult => { - val error = Errors.forCode(topicResult.errorCode) - if (error != Errors.NONE) { - warn(s"Auto topic creation failed for ${topicResult.name} with error '${error.name}': ${topicResult.errorMessage}") - } - }) - case other => - warn(s"Auto topic creation request received unexpected response type: ${other.getClass.getSimpleName}") - } - } debug(s"Auto topic creation completed for ${creatableTopics.keys} with response ${response.responseBody}.") } } } - val request = (requestContext, requestHeaderForParsing) match { - case (Some(context), Some(requestHeader)) => - ForwardingManager.buildEnvelopeRequest(context, - createTopicsRequest.build(requestHeader.apiVersion()).serializeWithHeader(requestHeader)) - case _ => - createTopicsRequest - } + val request = metadataRequestContext.map { context => + val requestVersion = + channelManager.controllerApiVersions.toScala match { + case None => + // We will rely on the Metadata request to be retried in the case + // that the latest version is not usable by the controller. + ApiKeys.CREATE_TOPICS.latestVersion() + case Some(nodeApiVersions) => + nodeApiVersions.latestUsableVersion(ApiKeys.CREATE_TOPICS) + } + + // Borrow client information such as client id and correlation id from the original request, + // in order to correlate the create request with the original metadata request. + val requestHeader = new RequestHeader(ApiKeys.CREATE_TOPICS, + requestVersion, + context.clientId, + context.correlationId) + ForwardingManager.buildEnvelopeRequest(context, + createTopicsRequest.build(requestVersion).serializeWithHeader(requestHeader)) + }.getOrElse(createTopicsRequest) channelManager.sendRequest(request, requestCompletionHandler) @@ -307,19 +162,22 @@ class DefaultAutoTopicCreationManager( .setReplicationFactor(config.groupCoordinatorConfig.offsetsTopicReplicationFactor) .setConfigs(convertToTopicConfigCollections(groupCoordinator.groupMetadataTopicConfigs)) case TRANSACTION_STATE_TOPIC_NAME => - val transactionLogConfig = new TransactionLogConfig(config) new CreatableTopic() .setName(topic) - .setNumPartitions(transactionLogConfig.transactionTopicPartitions) - .setReplicationFactor(transactionLogConfig.transactionTopicReplicationFactor) + .setNumPartitions(config.transactionLogConfig.transactionTopicPartitions) + .setReplicationFactor(config.transactionLogConfig.transactionTopicReplicationFactor) .setConfigs(convertToTopicConfigCollections( txnCoordinator.transactionTopicConfigs)) case SHARE_GROUP_STATE_TOPIC_NAME => + val props = shareCoordinator match { + case Some(coordinator) => coordinator.shareGroupStateTopicConfigs() + case None => new Properties() + } new CreatableTopic() .setName(topic) .setNumPartitions(config.shareCoordinatorConfig.shareCoordinatorStateTopicNumPartitions()) .setReplicationFactor(config.shareCoordinatorConfig.shareCoordinatorStateTopicReplicationFactor()) - .setConfigs(convertToTopicConfigCollections(shareCoordinator.shareGroupStateTopicConfigs())) + .setConfigs(convertToTopicConfigCollections(props)) case topicName => new CreatableTopic() .setName(topicName) @@ -379,133 +237,4 @@ class DefaultAutoTopicCreationManager( (creatableTopics, uncreatableTopics) } - - private def sendCreateTopicRequestWithErrorCaching( - creatableTopics: Map[String, CreatableTopic], - requestContext: Option[RequestContext], - timeoutMs: Long - ): Seq[MetadataResponseTopic] = { - val topicsToCreate = new CreateTopicsRequestData.CreatableTopicCollection(creatableTopics.size) - topicsToCreate.addAll(creatableTopics.values.asJavaCollection) - - val createTopicsRequest = new CreateTopicsRequest.Builder( - new CreateTopicsRequestData() - .setTimeoutMs(config.requestTimeoutMs) - .setTopics(topicsToCreate) - ) - - // Capture request header information for proper envelope response parsing - val requestHeaderForParsing = requestContext.map { context => - val requestVersion = - channelManager.controllerApiVersions.toScala match { - case None => - ApiKeys.CREATE_TOPICS.latestVersion() - case Some(nodeApiVersions) => - nodeApiVersions.latestUsableVersion(ApiKeys.CREATE_TOPICS) - } - - new RequestHeader(ApiKeys.CREATE_TOPICS, - requestVersion, - context.clientId, - context.correlationId) - } - - val requestCompletionHandler = new ControllerRequestCompletionHandler { - override def onTimeout(): Unit = { - clearInflightRequests(creatableTopics) - debug(s"Auto topic creation timed out for ${creatableTopics.keys}.") - cacheTopicCreationErrors(creatableTopics.keys.toSet, "Auto topic creation timed out.", timeoutMs) - } - - override def onComplete(response: ClientResponse): Unit = { - clearInflightRequests(creatableTopics) - if (response.authenticationException() != null) { - val authException = response.authenticationException() - warn(s"Auto topic creation failed for ${creatableTopics.keys} with authentication exception: ${authException.getMessage}") - cacheTopicCreationErrors(creatableTopics.keys.toSet, authException.getMessage, timeoutMs) - } else if (response.versionMismatch() != null) { - val versionException = response.versionMismatch() - warn(s"Auto topic creation failed for ${creatableTopics.keys} with version mismatch exception: ${versionException.getMessage}") - cacheTopicCreationErrors(creatableTopics.keys.toSet, versionException.getMessage, timeoutMs) - } else { - if (response.hasResponse) { - response.responseBody() match { - case envelopeResponse: EnvelopeResponse => - // Unwrap the envelope response to get the actual CreateTopicsResponse - val envelopeError = envelopeResponse.error() - if (envelopeError != Errors.NONE) { - warn(s"Auto topic creation failed for ${creatableTopics.keys} with envelope error: ${envelopeError}") - cacheTopicCreationErrors(creatableTopics.keys.toSet, s"Envelope error: ${envelopeError}", timeoutMs) - } else { - requestHeaderForParsing match { - case Some(requestHeader) => - try { - // Use the captured request header for proper envelope response parsing - val createTopicsResponse = AbstractResponse.parseResponse( - envelopeResponse.responseData(), requestHeader).asInstanceOf[CreateTopicsResponse] - - cacheTopicCreationErrorsFromResponse(createTopicsResponse, timeoutMs) - } catch { - case e: Exception => - warn(s"Failed to parse envelope response for auto topic creation of ${creatableTopics.keys}", e) - cacheTopicCreationErrors(creatableTopics.keys.toSet, s"Response parsing error: ${e.getMessage}", timeoutMs) - } - case None => - warn(s"Cannot parse envelope response without original request header information") - cacheTopicCreationErrors(creatableTopics.keys.toSet, "Missing request header for envelope parsing", timeoutMs) - } - } - case createTopicsResponse: CreateTopicsResponse => - cacheTopicCreationErrorsFromResponse(createTopicsResponse, timeoutMs) - case unexpectedResponse => - warn(s"Auto topic creation request received unexpected response type: ${unexpectedResponse.getClass.getSimpleName}") - cacheTopicCreationErrors(creatableTopics.keys.toSet, s"Unexpected response type: ${unexpectedResponse.getClass.getSimpleName}", timeoutMs) - } - debug(s"Auto topic creation completed for ${creatableTopics.keys} with response ${response.responseBody}.") - } - } - } - } - - val request = (requestContext, requestHeaderForParsing) match { - case (Some(context), Some(requestHeader)) => - ForwardingManager.buildEnvelopeRequest(context, - createTopicsRequest.build(requestHeader.apiVersion()).serializeWithHeader(requestHeader)) - case _ => - createTopicsRequest - } - - channelManager.sendRequest(request, requestCompletionHandler) - - val creatableTopicResponses = creatableTopics.keySet.toSeq.map { topic => - new MetadataResponseTopic() - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) - .setName(topic) - .setIsInternal(Topic.isInternal(topic)) - } - - creatableTopicResponses - } - - private def cacheTopicCreationErrors(topicNames: Set[String], errorMessage: String, ttlMs: Long): Unit = { - topicNames.foreach { topicName => - topicCreationErrorCache.put(topicName, errorMessage, ttlMs) - } - } - - private def cacheTopicCreationErrorsFromResponse(response: CreateTopicsResponse, ttlMs: Long): Unit = { - response.data().topics().forEach { topicResult => - if (topicResult.errorCode() != Errors.NONE.code()) { - val errorMessage = Option(topicResult.errorMessage()) - .filter(_.nonEmpty) - .getOrElse(Errors.forCode(topicResult.errorCode()).message()) - topicCreationErrorCache.put(topicResult.name(), errorMessage, ttlMs) - debug(s"Cached topic creation error for ${topicResult.name()}: $errorMessage") - } - } - } - - override def close(): Unit = { - topicCreationErrorCache.clear() - } } diff --git a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala index 2368ebc21ccd2..36c666b6467be 100644 --- a/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala +++ b/core/src/main/scala/kafka/server/BrokerLifecycleManager.scala @@ -28,7 +28,7 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{BrokerHeartbeatRequest, BrokerHeartbeatResponse, BrokerRegistrationRequest, BrokerRegistrationResponse} import org.apache.kafka.metadata.{BrokerState, VersionRange} import org.apache.kafka.queue.EventQueue.DeadlineFunction -import org.apache.kafka.common.utils.{LogContext, Time} +import org.apache.kafka.common.utils.{ExponentialBackoff, LogContext, Time} import org.apache.kafka.queue.{EventQueue, KafkaEventQueue} import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} @@ -58,6 +58,7 @@ class BrokerLifecycleManager( val config: KafkaConfig, val time: Time, val threadNamePrefix: String, + val isZkBroker: Boolean, val logDirs: Set[Uuid], val shutdownHook: () => Unit = () => {} ) extends Logging { @@ -65,6 +66,9 @@ class BrokerLifecycleManager( private def logPrefix(): String = { val builder = new StringBuilder("[BrokerLifecycleManager") builder.append(" id=").append(config.nodeId) + if (isZkBroker) { + builder.append(" isZkBroker=true") + } builder.append("] ") builder.toString() } @@ -89,6 +93,18 @@ class BrokerLifecycleManager( private val initialTimeoutNs = MILLISECONDS.toNanos(config.initialRegistrationTimeoutMs.longValue()) + /** + * The exponential backoff to use for resending communication. + */ + private val resendExponentialBackoff = + new ExponentialBackoff(100, 2, config.brokerSessionTimeoutMs.toLong / 2, 0.02) + + /** + * The number of times we've tried and failed to communicate. This variable can only be + * read or written from the BrokerToControllerRequestThread. + */ + private var failedAttempts = 0L + /** * The broker incarnation ID. This ID uniquely identifies each time we start the broker */ @@ -142,7 +158,7 @@ class BrokerLifecycleManager( private var offlineDirs = mutable.Map[Uuid, Boolean]() /** - * True if we sent an event queue to the active controller requesting controlled + * True if we sent a event queue to the active controller requesting controlled * shutdown. This variable can only be read or written from the event queue thread. */ private var gotControlledShutdownResponse = false @@ -248,14 +264,16 @@ class BrokerLifecycleManager( new OfflineDirBrokerFailureEvent(directory)) } - def resendBrokerRegistration(): Unit = { - eventQueue.append(new ResendBrokerRegistrationEvent()) + def resendBrokerRegistrationUnlessZkMode(): Unit = { + eventQueue.append(new ResendBrokerRegistrationUnlessZkModeEvent()) } - private class ResendBrokerRegistrationEvent extends EventQueue.Event { + private class ResendBrokerRegistrationUnlessZkModeEvent extends EventQueue.Event { override def run(): Unit = { - registered = false - scheduleNextCommunicationImmediately() + if (!isZkBroker) { + registered = false + scheduleNextCommunicationImmediately() + } } } @@ -348,9 +366,12 @@ class BrokerLifecycleManager( _clusterId = clusterId _advertisedListeners = advertisedListeners.duplicate() _supportedFeatures = new util.HashMap[String, VersionRange](supportedFeatures) - eventQueue.scheduleDeferred("initialRegistrationTimeout", - new DeadlineFunction(time.nanoseconds() + initialTimeoutNs), - new RegistrationTimeoutEvent()) + if (!isZkBroker) { + // Only KRaft brokers block on registration during startup + eventQueue.scheduleDeferred("initialRegistrationTimeout", + new DeadlineFunction(time.nanoseconds() + initialTimeoutNs), + new RegistrationTimeoutEvent()) + } sendBrokerRegistration() info(s"Incarnation $incarnationId of broker $nodeId in cluster $clusterId " + "is now STARTING.") @@ -372,7 +393,7 @@ class BrokerLifecycleManager( }) val data = new BrokerRegistrationRequestData(). setBrokerId(nodeId). - setIsMigratingZkBroker(false). + setIsMigratingZkBroker(isZkBroker). setClusterId(_clusterId). setFeatures(features). setIncarnationId(incarnationId). @@ -428,6 +449,7 @@ class BrokerLifecycleManager( val message = response.responseBody().asInstanceOf[BrokerRegistrationResponse] val errorCode = Errors.forCode(message.data().errorCode()) if (errorCode == Errors.NONE) { + failedAttempts = 0 _brokerEpoch = message.data().brokerEpoch() registered = true initialRegistrationSucceeded = true @@ -501,6 +523,7 @@ class BrokerLifecycleManager( val errorCode = Errors.forCode(message.data().errorCode()) if (errorCode == Errors.NONE) { val responseData = message.data() + failedAttempts = 0 currentOfflineDirs.foreach(cur => offlineDirs.put(cur, true)) _state match { case BrokerState.STARTING => @@ -563,9 +586,10 @@ class BrokerLifecycleManager( } private def scheduleNextCommunicationAfterFailure(): Unit = { + val delayMs = resendExponentialBackoff.backoff(failedAttempts) + failedAttempts = failedAttempts + 1 nextSchedulingShouldBeImmediate = false // never immediately reschedule after a failure - scheduleNextCommunication(NANOSECONDS.convert( - config.brokerHeartbeatIntervalMs.longValue() , MILLISECONDS)) + scheduleNextCommunication(NANOSECONDS.convert(delayMs, MILLISECONDS)) } private def scheduleNextCommunicationAfterSuccess(): Unit = { diff --git a/core/src/main/scala/kafka/server/BrokerServer.scala b/core/src/main/scala/kafka/server/BrokerServer.scala index 689c62b868749..6d2d1089088cf 100644 --- a/core/src/main/scala/kafka/server/BrokerServer.scala +++ b/core/src/main/scala/kafka/server/BrokerServer.scala @@ -17,16 +17,16 @@ package kafka.server -import kafka.coordinator.group.CoordinatorPartitionWriter +import kafka.coordinator.group.{CoordinatorLoaderImpl, CoordinatorPartitionWriter, GroupCoordinatorAdapter} import kafka.coordinator.transaction.TransactionCoordinator import kafka.log.LogManager -import kafka.network.SocketServer +import kafka.log.remote.RemoteLogManager +import kafka.network.{DataPlaneAcceptor, SocketServer} import kafka.raft.KafkaRaftManager import kafka.server.metadata._ -import kafka.server.share.{ShareCoordinatorMetadataCacheHelperImpl, SharePartitionManager} +import kafka.server.share.SharePartitionManager import kafka.utils.CoreUtils import org.apache.kafka.common.config.ConfigException -import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName @@ -34,7 +34,7 @@ import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache import org.apache.kafka.common.utils.{LogContext, Time, Utils} import org.apache.kafka.common.{ClusterResource, TopicPartition, Uuid} -import org.apache.kafka.coordinator.common.runtime.{CoordinatorLoaderImpl, CoordinatorRecord} +import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord import org.apache.kafka.coordinator.group.metrics.{GroupCoordinatorMetrics, GroupCoordinatorRuntimeMetrics} import org.apache.kafka.coordinator.group.{GroupConfigManager, GroupCoordinator, GroupCoordinatorRecordSerde, GroupCoordinatorService} import org.apache.kafka.coordinator.share.metrics.{ShareCoordinatorMetrics, ShareCoordinatorRuntimeMetrics} @@ -42,26 +42,25 @@ import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorRec import org.apache.kafka.coordinator.transaction.ProducerIdManager import org.apache.kafka.image.publisher.{BrokerRegistrationTracker, MetadataPublisher} import org.apache.kafka.metadata.{BrokerState, ListenerInfo} -import org.apache.kafka.metadata.publisher.{AclPublisher, DelegationTokenPublisher, ScramPublisher} -import org.apache.kafka.security.{CredentialProvider, DelegationTokenManager} +import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.{ApiMessageAndVersion, DirectoryEventHandler, NodeToControllerChannelManager, TopicIdPartition} -import org.apache.kafka.server.config.{ConfigType, DelegationTokenManagerConfigs} -import org.apache.kafka.server.log.remote.storage.{RemoteLogManager, RemoteLogManagerConfig} +import org.apache.kafka.server.config.ConfigType +import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.{ClientMetricsReceiverPlugin, KafkaYammerMetrics} import org.apache.kafka.server.network.{EndpointReadyFutures, KafkaAuthorizerServerInfo} -import org.apache.kafka.server.share.persister.{DefaultStatePersister, NoOpStatePersister, Persister, PersisterStateManager} +import org.apache.kafka.server.share.persister.{DefaultStatePersister, NoOpShareStatePersister, Persister, PersisterStateManager} import org.apache.kafka.server.share.session.ShareSessionCache import org.apache.kafka.server.util.timer.{SystemTimer, SystemTimerReaper} import org.apache.kafka.server.util.{Deadline, FutureUtils, KafkaScheduler} -import org.apache.kafka.server.{AssignmentsManager, BrokerFeatures, ClientMetricsManager, DefaultApiVersionManager, DelayedActionQueue, ProcessRole} -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager +import org.apache.kafka.server.{AssignmentsManager, BrokerFeatures, ClientMetricsManager, DelayedActionQueue} import org.apache.kafka.storage.internals.log.LogDirFailureChannel import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.time.Duration import java.util import java.util.Optional +import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.locks.{Condition, ReentrantLock} import java.util.concurrent.{CompletableFuture, ExecutionException, TimeUnit, TimeoutException} import scala.collection.Map @@ -95,13 +94,15 @@ class BrokerServer( private var assignmentsManager: AssignmentsManager = _ + private val isShuttingDown = new AtomicBoolean(false) + val lock: ReentrantLock = new ReentrantLock() val awaitShutdownCond: Condition = lock.newCondition() var status: ProcessStatus = SHUTDOWN @volatile var dataPlaneRequestProcessor: KafkaApis = _ - var authorizerPlugin: Option[Plugin[Authorizer]] = None + var authorizer: Option[Authorizer] = None @volatile var socketServer: SocketServer = _ var dataPlaneRequestHandlerPool: KafkaRequestHandlerPool = _ @@ -111,7 +112,7 @@ class BrokerServer( var tokenManager: DelegationTokenManager = _ - var dynamicConfigHandlers: Map[ConfigType, ConfigHandler] = _ + var dynamicConfigHandlers: Map[String, ConfigHandler] = _ @volatile private[this] var _replicaManager: ReplicaManager = _ @@ -124,7 +125,7 @@ class BrokerServer( var transactionCoordinator: TransactionCoordinator = _ - var shareCoordinator: ShareCoordinator = _ + var shareCoordinator: Option[ShareCoordinator] = None var clientToControllerChannelManager: NodeToControllerChannelManager = _ @@ -169,7 +170,10 @@ class BrokerServer( info(s"Transition from $status to $to") status = to - if (to == SHUTDOWN) { + if (to == SHUTTING_DOWN) { + isShuttingDown.set(true) + } else if (to == SHUTDOWN) { + isShuttingDown.set(false) awaitShutdownCond.signalAll() } } finally { @@ -189,10 +193,7 @@ class BrokerServer( info("Starting broker") val clientMetricsReceiverPlugin = new ClientMetricsReceiverPlugin() - config.dynamicConfig.initialize(Some(clientMetricsReceiverPlugin)) - quotaManagers = QuotaFactory.instantiate(config, metrics, time, s"broker-${config.nodeId}-", ProcessRole.BrokerRole.toString) - DynamicBrokerConfig.readDynamicBrokerConfigsFromSnapshot(raftManager, config, quotaManagers, logContext) /* start scheduler */ kafkaScheduler = new KafkaScheduler(config.backgroundThreads) @@ -201,9 +202,11 @@ class BrokerServer( /* register broker metrics */ brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + quotaManagers = QuotaFactory.instantiate(config, metrics, time, s"broker-${config.nodeId}-") + logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size) - metadataCache = new KRaftMetadataCache(config.nodeId, () => raftManager.client.kraftVersion()) + metadataCache = MetadataCache.kRaftMetadataCache(config.nodeId, () => raftManager.client.kraftVersion()) // Create log manager, but don't start it because we need to delay any potential unclean shutdown log recovery // until we catch up on the metadata log and have up-to-date topic and broker configs. @@ -213,11 +216,15 @@ class BrokerServer( kafkaScheduler, time, brokerTopicStats, - logDirFailureChannel) + logDirFailureChannel, + keepPartitionMetadataFile = true) + + remoteLogManagerOpt = createRemoteLogManager() lifecycleManager = new BrokerLifecycleManager(config, time, s"broker-${config.nodeId}-", + isZkBroker = false, logDirs = logManager.directoryIdsSet, () => new Thread(() => shutdown(), "kafka-shutdown-thread").start()) @@ -245,24 +252,16 @@ class BrokerServer( forwardingManager = new ForwardingManagerImpl(clientToControllerChannelManager, metrics) clientMetricsManager = new ClientMetricsManager(clientMetricsReceiverPlugin, config.clientTelemetryMaxBytes, time, metrics) - val apiVersionManager = new DefaultApiVersionManager( + val apiVersionManager = ApiVersionManager( ListenerType.BROKER, - () => forwardingManager.controllerApiVersions, + config, + forwardingManager, brokerFeatures, metadataCache, - config.unstableApiVersionsEnabled, - Optional.of(clientMetricsManager) - ) - - val shareFetchSessionCache : ShareSessionCache = new ShareSessionCache( - config.shareGroupConfig.shareGroupMaxShareSessions() - ) - - val connectionDisconnectListeners = Seq( - clientMetricsManager.connectionDisconnectListener(), - shareFetchSessionCache.connectionDisconnectListener() + Some(clientMetricsManager) ) + val connectionDisconnectListeners = Seq(clientMetricsManager.connectionDisconnectListener()) // Create and start the socket server acceptor threads so that the bound port is known. // Delay starting processors until the end of the initialization sequence to ensure // that credentials have been loaded before processing authentications. @@ -277,12 +276,10 @@ class BrokerServer( clientQuotaMetadataManager = new ClientQuotaMetadataManager(quotaManagers, socketServer.connectionQuotas) val listenerInfo = ListenerInfo.create(Optional.of(config.interBrokerListenerName.value()), - config.effectiveAdvertisedBrokerListeners.asJava). + config.effectiveAdvertisedBrokerListeners.map(_.toJava).asJava). withWildcardHostnamesResolved(). withEphemeralPortsCorrected(name => socketServer.boundPort(new ListenerName(name))) - remoteLogManagerOpt = createRemoteLogManager(listenerInfo) - alterPartitionManager = AlterPartitionManager( config, scheduler = kafkaScheduler, @@ -348,6 +345,8 @@ class BrokerServer( logDirFailureChannel = logDirFailureChannel, alterPartitionManager = alterPartitionManager, brokerTopicStats = brokerTopicStats, + isShuttingDown = isShuttingDown, + threadNamePrefix = None, // The ReplicaManager only runs on the broker, and already includes the ID in thread names. delayedRemoteFetchPurgatoryParam = None, brokerEpochSupplier = () => lifecycleManager.brokerEpoch, addPartitionsToTxnManager = Some(addPartitionsToTxnManager), @@ -356,10 +355,8 @@ class BrokerServer( ) /* start token manager */ - tokenManager = new DelegationTokenManager(new DelegationTokenManagerConfigs(config), tokenCache) - - // Create and initialize an authorizer if one is configured. - authorizerPlugin = config.createNewAuthorizer(metrics, ProcessRole.BrokerRole.toString) + tokenManager = new DelegationTokenManager(config, tokenCache, time) + tokenManager.startup() /* initializing the groupConfigManager */ groupConfigManager = new GroupConfigManager(config.groupCoordinatorConfig.extractGroupConfigMap(config.shareGroupConfig)) @@ -387,9 +384,9 @@ class BrokerServer( autoTopicCreationManager = new DefaultAutoTopicCreationManager( config, clientToControllerChannelManager, groupCoordinator, - transactionCoordinator, shareCoordinator, time) + transactionCoordinator, shareCoordinator) - dynamicConfigHandlers = Map[ConfigType, ConfigHandler]( + dynamicConfigHandlers = Map[String, ConfigHandler]( ConfigType.TOPIC -> new TopicConfigHandler(replicaManager, config, quotaManagers), ConfigType.BROKER -> new BrokerConfigHandler(config, quotaManagers), ConfigType.CLIENT_METRICS -> new ClientMetricsConfigHandler(clientMetricsManager), @@ -404,7 +401,7 @@ class BrokerServer( config, "heartbeat", s"broker-${config.nodeId}-", - config.brokerHeartbeatIntervalMs + config.brokerSessionTimeoutMs / 2 // KAFKA-14392 ) lifecycleManager.start( () => sharedServer.loader.lastAppliedOffset(), @@ -415,6 +412,10 @@ class BrokerServer( logManager.readBrokerEpochFromCleanShutdownFiles() ) + // Create and initialize an authorizer if one is configured. + authorizer = config.createNewAuthorizer() + authorizer.foreach(_.configure(config.originals)) + // The FetchSessionCache is divided into config.numIoThreads shards, each responsible // for Math.max(1, shardNum * sessionIdRange) <= sessionId < (shardNum + 1) * sessionIdRange val sessionIdRange = Int.MaxValue / NumFetchSessionCacheShards @@ -427,6 +428,10 @@ class BrokerServer( )) val fetchManager = new FetchManager(Time.SYSTEM, new FetchSessionCache(fetchSessionCacheShards)) + val shareFetchSessionCache : ShareSessionCache = new ShareSessionCache( + config.shareGroupConfig.shareGroupMaxGroups * config.groupCoordinatorConfig.shareGroupMaxSize, + KafkaBroker.MIN_INCREMENTAL_FETCH_SESSION_EVICTION_MS) + sharePartitionManager = new SharePartitionManager( replicaManager, time, @@ -434,10 +439,10 @@ class BrokerServer( config.shareGroupConfig.shareGroupRecordLockDurationMs, config.shareGroupConfig.shareGroupDeliveryCountLimit, config.shareGroupConfig.shareGroupPartitionMaxRecordLocks, - config.remoteLogManagerConfig.remoteFetchMaxWaitMs().toLong, + config.shareGroupConfig.shareFetchMaxFetchRecords, persister, groupConfigManager, - brokerTopicStats + metrics ) dataPlaneRequestProcessor = new KafkaApis( @@ -453,7 +458,7 @@ class BrokerServer( configRepository = metadataCache, metadataCache = metadataCache, metrics = metrics, - authorizerPlugin = authorizerPlugin, + authorizer = authorizer, quotas = quotaManagers, fetchManager = fetchManager, sharePartitionManager = sharePartitionManager, @@ -462,12 +467,29 @@ class BrokerServer( time = time, tokenManager = tokenManager, apiVersionManager = apiVersionManager, - clientMetricsManager = clientMetricsManager, - groupConfigManager = groupConfigManager) + clientMetricsManager = clientMetricsManager) dataPlaneRequestHandlerPool = new KafkaRequestHandlerPool(config.nodeId, socketServer.dataPlaneRequestChannel, dataPlaneRequestProcessor, time, - config.numIoThreads, "RequestHandlerAvgIdlePercent") + config.numIoThreads, s"${DataPlaneAcceptor.MetricPrefix}RequestHandlerAvgIdlePercent", + DataPlaneAcceptor.ThreadPrefix) + + // Start RemoteLogManager before initializing broker metadata publishers. + remoteLogManagerOpt.foreach { rlm => + val listenerName = config.remoteLogManagerConfig.remoteLogMetadataManagerListenerName() + if (listenerName != null) { + val endpoint = listenerInfo.listeners().values().stream + .filter(e => + e.listenerName().isPresent && + ListenerName.normalised(e.listenerName().get()).equals(ListenerName.normalised(listenerName)) + ) + .findFirst() + .orElseThrow(() => new ConfigException(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP, + listenerName, "Should be set as a listener name within valid broker listener name list: " + listenerInfo.listeners().values())) + rlm.onEndPointCreated(endpoint) + } + rlm.startup() + } metadataPublishers.add(new MetadataVersionConfigValidator(config, sharedServer.metadataPublishingFaultHandler)) brokerMetadataPublisher = new BrokerMetadataPublisher(config, @@ -477,7 +499,6 @@ class BrokerServer( groupCoordinator, transactionCoordinator, shareCoordinator, - sharePartitionManager, new DynamicConfigPublisher( config, sharedServer.metadataPublishingFaultHandler, @@ -497,12 +518,12 @@ class BrokerServer( quotaManagers, ), new ScramPublisher( - config.nodeId, + config, sharedServer.metadataPublishingFaultHandler, "broker", credentialProvider), new DelegationTokenPublisher( - config.nodeId, + config, sharedServer.metadataPublishingFaultHandler, "broker", tokenManager), @@ -510,7 +531,7 @@ class BrokerServer( config.nodeId, sharedServer.metadataPublishingFaultHandler, "broker", - authorizerPlugin.toJava + authorizer ), sharedServer.initialBrokerMetadataLoadFaultHandler, sharedServer.metadataPublishingFaultHandler @@ -522,7 +543,7 @@ class BrokerServer( }) metadataPublishers.add(brokerMetadataPublisher) brokerRegistrationTracker = new BrokerRegistrationTracker(config.brokerId, - () => lifecycleManager.resendBrokerRegistration()) + () => lifecycleManager.resendBrokerRegistrationUnlessZkMode()) metadataPublishers.add(brokerRegistrationTracker) @@ -567,7 +588,7 @@ class BrokerServer( // authorizer future is completed. val endpointReadyFutures = { val builder = new EndpointReadyFutures.Builder() - builder.build(authorizerPlugin.toJava, + builder.build(authorizer.toJava, new KafkaAuthorizerServerInfo( new ClusterResource(clusterId), config.nodeId, @@ -602,68 +623,77 @@ class BrokerServer( // Create group coordinator, but don't start it until we've started replica manager. // Hardcode Time.SYSTEM for now as some Streams tests fail otherwise, it would be good // to fix the underlying issue. - val time = Time.SYSTEM - val serde = new GroupCoordinatorRecordSerde - val timer = new SystemTimerReaper( - "group-coordinator-reaper", - new SystemTimer("group-coordinator") - ) - val loader = new CoordinatorLoaderImpl[CoordinatorRecord]( - time, - tp => replicaManager.getLog(tp).toJava, - tp => replicaManager.getLogEndOffset(tp).map(Long.box).toJava, - serde, - config.groupCoordinatorConfig.offsetsLoadBufferSize, - CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS - ) - val writer = new CoordinatorPartitionWriter( - replicaManager - ) - new GroupCoordinatorService.Builder(config.brokerId, config.groupCoordinatorConfig) - .withTime(time) - .withTimer(timer) - .withLoader(loader) - .withWriter(writer) - .withCoordinatorRuntimeMetrics(new GroupCoordinatorRuntimeMetrics(metrics)) - .withGroupCoordinatorMetrics(new GroupCoordinatorMetrics(KafkaYammerMetrics.defaultRegistry, metrics)) - .withGroupConfigManager(groupConfigManager) - .withPersister(persister) - .withAuthorizerPlugin(authorizerPlugin.toJava) - .build() + if (config.isNewGroupCoordinatorEnabled) { + val time = Time.SYSTEM + val serde = new GroupCoordinatorRecordSerde + val timer = new SystemTimerReaper( + "group-coordinator-reaper", + new SystemTimer("group-coordinator") + ) + val loader = new CoordinatorLoaderImpl[CoordinatorRecord]( + time, + replicaManager, + serde, + config.groupCoordinatorConfig.offsetsLoadBufferSize + ) + val writer = new CoordinatorPartitionWriter( + replicaManager + ) + new GroupCoordinatorService.Builder(config.brokerId, config.groupCoordinatorConfig) + .withTime(time) + .withTimer(timer) + .withLoader(loader) + .withWriter(writer) + .withCoordinatorRuntimeMetrics(new GroupCoordinatorRuntimeMetrics(metrics)) + .withGroupCoordinatorMetrics(new GroupCoordinatorMetrics(KafkaYammerMetrics.defaultRegistry, metrics)) + .withGroupConfigManager(groupConfigManager) + .withAuthorizer(authorizer.toJava) + .build() + } else { + GroupCoordinatorAdapter( + config, + replicaManager, + Time.SYSTEM, + metrics + ) + } } - private def createShareCoordinator(): ShareCoordinator = { - val time = Time.SYSTEM - val timer = new SystemTimerReaper( - "share-coordinator-reaper", - new SystemTimer("share-coordinator") - ) - - val serde = new ShareCoordinatorRecordSerde - val loader = new CoordinatorLoaderImpl[CoordinatorRecord]( - time, - tp => replicaManager.getLog(tp).toJava, - tp => replicaManager.getLogEndOffset(tp).map(Long.box).toJava, - serde, - config.shareCoordinatorConfig.shareCoordinatorLoadBufferSize(), - CoordinatorLoaderImpl.DEFAULT_COMMIT_INTERVAL_OFFSETS - ) - val writer = new CoordinatorPartitionWriter( - replicaManager - ) - new ShareCoordinatorService.Builder(config.brokerId, config.shareCoordinatorConfig) - .withTimer(timer) - .withTime(time) - .withLoader(loader) - .withWriter(writer) - .withCoordinatorRuntimeMetrics(new ShareCoordinatorRuntimeMetrics(metrics)) - .withCoordinatorMetrics(new ShareCoordinatorMetrics(metrics)) - .withShareGroupEnabledConfigSupplier(() => config.shareGroupConfig.isShareGroupEnabled) - .build() + private def createShareCoordinator(): Option[ShareCoordinator] = { + if (config.shareGroupConfig.isShareGroupEnabled && + config.shareGroupConfig.shareGroupPersisterClassName().nonEmpty) { + val time = Time.SYSTEM + val timer = new SystemTimerReaper( + "share-coordinator-reaper", + new SystemTimer("share-coordinator") + ) + + val serde = new ShareCoordinatorRecordSerde + val loader = new CoordinatorLoaderImpl[CoordinatorRecord]( + time, + replicaManager, + serde, + config.shareCoordinatorConfig.shareCoordinatorLoadBufferSize() + ) + val writer = new CoordinatorPartitionWriter( + replicaManager + ) + Some(new ShareCoordinatorService.Builder(config.brokerId, config.shareCoordinatorConfig) + .withTimer(timer) + .withTime(time) + .withLoader(loader) + .withWriter(writer) + .withCoordinatorRuntimeMetrics(new ShareCoordinatorRuntimeMetrics(metrics)) + .withCoordinatorMetrics(new ShareCoordinatorMetrics(metrics)) + .build()) + } else { + None + } } private def createShareStatePersister(): Persister = { - if (config.shareGroupConfig.shareGroupPersisterClassName.nonEmpty) { + if (config.shareGroupConfig.isShareGroupEnabled && + config.shareGroupConfig.shareGroupPersisterClassName.nonEmpty) { val klass = Utils.loadClass(config.shareGroupConfig.shareGroupPersisterClassName, classOf[Object]).asInstanceOf[Class[Persister]] if (klass.getName.equals(classOf[DefaultStatePersister].getName)) { @@ -671,7 +701,7 @@ class BrokerServer( .newInstance( new PersisterStateManager( NetworkUtils.buildNetworkClient("Persister", config, metrics, Time.SYSTEM, new LogContext(s"[Persister broker=${config.brokerId}]")), - new ShareCoordinatorMetadataCacheHelperImpl(metadataCache, key => shareCoordinator.partitionFor(key), config.interBrokerListenerName), + new ShareCoordinatorMetadataCacheHelperImpl(metadataCache, key => shareCoordinator.get.partitionFor(key), config.interBrokerListenerName), Time.SYSTEM, new SystemTimerReaper( "persister-state-manager-reaper", @@ -679,42 +709,31 @@ class BrokerServer( ) ) ) - } else if (klass.getName.equals(classOf[NoOpStatePersister].getName)) { - info("Using no-op persister") - new NoOpStatePersister() + } else if (klass.getName.equals(classOf[NoOpShareStatePersister].getName)) { + info("Using no op persister") + new NoOpShareStatePersister() } else { - error("Unknown persister specified. Persister is only factory-pluggable!") - throw new IllegalArgumentException("Unknown persister specified " + config.shareGroupConfig.shareGroupPersisterClassName) + error("Unknown persister specified. Persister is only factory pluggable!") + throw new IllegalArgumentException("Unknown persiser specified " + config.shareGroupConfig.shareGroupPersisterClassName) } } else { - // in case share coordinator not enabled or persister class name deliberately empty (key=) - info("Using no-op persister") - new NoOpStatePersister() + // in case share coordinator not enabled or + // persister class name deliberately empty (key=) + info("Using no op persister") + new NoOpShareStatePersister() } } - protected def createRemoteLogManager(listenerInfo: ListenerInfo): Option[RemoteLogManager] = { - if (config.remoteLogManagerConfig.isRemoteStorageSystemEnabled) { - val listenerName = config.remoteLogManagerConfig.remoteLogMetadataManagerListenerName() - val endpoint = if (listenerName != null) { - Some(listenerInfo.listeners().values().stream - .filter(e => ListenerName.normalised(e.listener()).equals(ListenerName.normalised(listenerName))) - .findFirst() - .orElseThrow(() => new ConfigException(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP, - listenerName, "Should be set as a listener name within valid broker listener name list: " + listenerInfo.listeners().values()))) - } else { - None - } - - val rlm = new RemoteLogManager(config.remoteLogManagerConfig, config.brokerId, config.logDirs.get(0), clusterId, time, + protected def createRemoteLogManager(): Option[RemoteLogManager] = { + if (config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) { + Some(new RemoteLogManager(config.remoteLogManagerConfig, config.brokerId, config.logDirs.head, clusterId, time, (tp: TopicPartition) => logManager.getLog(tp).toJava, (tp: TopicPartition, remoteLogStartOffset: java.lang.Long) => { logManager.getLog(tp).foreach { log => log.updateLogStartOffsetFromRemoteTier(remoteLogStartOffset) } }, - brokerTopicStats, metrics, endpoint.toJava) - Some(rlm) + brokerTopicStats, metrics)) } else { None } @@ -757,7 +776,7 @@ class BrokerServer( CoreUtils.swallow(dataPlaneRequestHandlerPool.shutdown(), this) if (dataPlaneRequestProcessor != null) CoreUtils.swallow(dataPlaneRequestProcessor.close(), this) - authorizerPlugin.foreach(Utils.closeQuietly(_, "authorizer plugin")) + authorizer.foreach(Utils.closeQuietly(_, "authorizer")) /** * We must shutdown the scheduler early because otherwise, the scheduler could touch other @@ -779,11 +798,11 @@ class BrokerServer( CoreUtils.swallow(groupConfigManager.close(), this) if (groupCoordinator != null) CoreUtils.swallow(groupCoordinator.shutdown(), this) - if (shareCoordinator != null) - CoreUtils.swallow(shareCoordinator.shutdown(), this) + if (shareCoordinator.isDefined) + CoreUtils.swallow(shareCoordinator.get.shutdown(), this) - if (autoTopicCreationManager != null) - CoreUtils.swallow(autoTopicCreationManager.close(), this) + if (tokenManager != null) + CoreUtils.swallow(tokenManager.shutdown(), this) if (assignmentsManager != null) CoreUtils.swallow(assignmentsManager.close(), this) @@ -821,6 +840,8 @@ class BrokerServer( if (persister != null) CoreUtils.swallow(persister.stop(), this) + isShuttingDown.set(false) + if (lifecycleManager != null) CoreUtils.swallow(lifecycleManager.close(), this) diff --git a/core/src/main/scala/kafka/server/ClientQuotaManager.scala b/core/src/main/scala/kafka/server/ClientQuotaManager.scala new file mode 100644 index 0000000000000..6ffc8e82d0f88 --- /dev/null +++ b/core/src/main/scala/kafka/server/ClientQuotaManager.scala @@ -0,0 +1,647 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import java.{lang, util} +import java.util.concurrent.{ConcurrentHashMap, DelayQueue, TimeUnit} +import java.util.concurrent.locks.ReentrantReadWriteLock +import java.util.function.Consumer +import kafka.network.RequestChannel +import kafka.server.ClientQuotaManager._ +import kafka.utils.Logging +import org.apache.kafka.common.{Cluster, MetricName} +import org.apache.kafka.common.metrics._ +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.metrics.stats.{Avg, CumulativeSum, Rate} +import org.apache.kafka.common.security.auth.KafkaPrincipal +import org.apache.kafka.common.utils.{Sanitizer, Time} +import org.apache.kafka.server.config.ClientQuotaManagerConfig +import org.apache.kafka.server.quota.{ClientQuotaCallback, ClientQuotaEntity, ClientQuotaType, QuotaType, QuotaUtils, SensorAccess, ThrottleCallback, ThrottledChannel} +import org.apache.kafka.server.util.ShutdownableThread +import org.apache.kafka.network.Session + +import scala.jdk.CollectionConverters._ + +/** + * Represents the sensors aggregated per client + * @param metricTags Quota metric tags for the client + * @param quotaSensor @Sensor that tracks the quota + * @param throttleTimeSensor @Sensor that tracks the throttle time + */ +case class ClientSensors(metricTags: Map[String, String], quotaSensor: Sensor, throttleTimeSensor: Sensor) + +object QuotaTypes { + val NoQuotas = 0 + val ClientIdQuotaEnabled = 1 + val UserQuotaEnabled = 2 + val UserClientIdQuotaEnabled = 4 + val CustomQuotas = 8 // No metric update optimizations are used with custom quotas +} + +object ClientQuotaManager { + // Purge sensors after 1 hour of inactivity + val InactiveSensorExpirationTimeSeconds = 3600 + private val DefaultName = "" + val DefaultClientIdQuotaEntity: KafkaQuotaEntity = KafkaQuotaEntity(None, Some(DefaultClientIdEntity)) + val DefaultUserQuotaEntity: KafkaQuotaEntity = KafkaQuotaEntity(Some(DefaultUserEntity), None) + val DefaultUserClientIdQuotaEntity: KafkaQuotaEntity = KafkaQuotaEntity(Some(DefaultUserEntity), Some(DefaultClientIdEntity)) + + sealed trait BaseUserEntity extends ClientQuotaEntity.ConfigEntity + + case class UserEntity(sanitizedUser: String) extends BaseUserEntity { + override def entityType: ClientQuotaEntity.ConfigEntityType = ClientQuotaEntity.ConfigEntityType.USER + override def name: String = Sanitizer.desanitize(sanitizedUser) + override def toString: String = s"user $sanitizedUser" + } + + case class ClientIdEntity(clientId: String) extends ClientQuotaEntity.ConfigEntity { + override def entityType: ClientQuotaEntity.ConfigEntityType = ClientQuotaEntity.ConfigEntityType.CLIENT_ID + override def name: String = clientId + override def toString: String = s"client-id $clientId" + } + + case object DefaultUserEntity extends BaseUserEntity { + override def entityType: ClientQuotaEntity.ConfigEntityType = ClientQuotaEntity.ConfigEntityType.DEFAULT_USER + override def name: String = DefaultName + override def toString: String = "default user" + } + + case object DefaultClientIdEntity extends ClientQuotaEntity.ConfigEntity { + override def entityType: ClientQuotaEntity.ConfigEntityType = ClientQuotaEntity.ConfigEntityType.DEFAULT_CLIENT_ID + override def name: String = DefaultName + override def toString: String = "default client-id" + } + + case class KafkaQuotaEntity(userEntity: Option[BaseUserEntity], + clientIdEntity: Option[ClientQuotaEntity.ConfigEntity]) extends ClientQuotaEntity { + override def configEntities: util.List[ClientQuotaEntity.ConfigEntity] = + (userEntity.toList ++ clientIdEntity.toList).asJava + + def sanitizedUser: String = userEntity.map { + case entity: UserEntity => entity.sanitizedUser + case DefaultUserEntity => DefaultName + }.getOrElse("") + + def clientId: String = clientIdEntity.map(_.name).getOrElse("") + + override def toString: String = { + val user = userEntity.map(_.toString).getOrElse("") + val clientId = clientIdEntity.map(_.toString).getOrElse("") + s"$user $clientId".trim + } + } + + object DefaultTags { + val User = "user" + val ClientId = "client-id" + } +} + +/** + * Helper class that records per-client metrics. It is also responsible for maintaining Quota usage statistics + * for all clients. + *

          + * Quotas can be set at , user or client-id levels. For a given client connection, + * the most specific quota matching the connection will be applied. For example, if both a + * and a user quota match a connection, the quota will be used. Otherwise, user quota takes + * precedence over client-id quota. The order of precedence is: + *

            + *
          • /config/users//clients/ + *
          • /config/users//clients/ + *
          • /config/users/ + *
          • /config/users//clients/ + *
          • /config/users//clients/ + *
          • /config/users/ + *
          • /config/clients/ + *
          • /config/clients/ + *
          + * Quota limits including defaults may be updated dynamically. The implementation is optimized for the case + * where a single level of quotas is configured. + * + * @param config @ClientQuotaManagerConfig quota configs + * @param metrics @Metrics Metrics instance + * @param quotaType Quota type of this quota manager + * @param time @Time object to use + * @param threadNamePrefix The thread prefix to use + * @param clientQuotaCallback An optional @ClientQuotaCallback + */ +class ClientQuotaManager(private val config: ClientQuotaManagerConfig, + private val metrics: Metrics, + private val quotaType: QuotaType, + private val time: Time, + private val threadNamePrefix: String, + private val clientQuotaCallback: Option[ClientQuotaCallback] = None) extends Logging { + + private val lock = new ReentrantReadWriteLock() + private val sensorAccessor = new SensorAccess(lock, metrics) + private val quotaCallback = clientQuotaCallback.getOrElse(new DefaultQuotaCallback) + private val clientQuotaType = QuotaType.toClientQuotaType(quotaType) + + @volatile + private var quotaTypesEnabled = clientQuotaCallback match { + case Some(_) => QuotaTypes.CustomQuotas + case None => QuotaTypes.NoQuotas + } + + private val delayQueueSensor = metrics.sensor(quotaType.toString + "-delayQueue") + delayQueueSensor.add(metrics.metricName("queue-size", quotaType.toString, + "Tracks the size of the delay queue"), new CumulativeSum()) + + private val delayQueue = new DelayQueue[ThrottledChannel]() + private[server] val throttledChannelReaper = new ThrottledChannelReaper(delayQueue, threadNamePrefix) + start() // Use start method to keep spotbugs happy + private def start(): Unit = { + throttledChannelReaper.start() + } + + /** + * Reaper thread that triggers channel unmute callbacks on all throttled channels + * @param delayQueue DelayQueue to dequeue from + */ + class ThrottledChannelReaper(delayQueue: DelayQueue[ThrottledChannel], prefix: String) extends ShutdownableThread( + s"${prefix}ThrottledChannelReaper-$quotaType", false) { + + override def doWork(): Unit = { + val throttledChannel: ThrottledChannel = delayQueue.poll(1, TimeUnit.SECONDS) + if (throttledChannel != null) { + // Decrement the size of the delay queue + delayQueueSensor.record(-1) + // Notify the socket server that throttling is done for this channel, so that it can try to unmute the channel. + throttledChannel.notifyThrottlingDone() + } + } + } + + /** + * Returns true if any quotas are enabled for this quota manager. This is used + * to determine if quota related metrics should be created. + * Note: If any quotas (static defaults, dynamic defaults or quota overrides) have + * been configured for this broker at any time for this quota type, quotasEnabled will + * return true until the next broker restart, even if all quotas are subsequently deleted. + */ + def quotasEnabled: Boolean = quotaTypesEnabled != QuotaTypes.NoQuotas + + /** + * See {recordAndGetThrottleTimeMs}. + */ + def maybeRecordAndGetThrottleTimeMs(request: RequestChannel.Request, value: Double, timeMs: Long): Int = { + maybeRecordAndGetThrottleTimeMs(request.session, request.header.clientId, value, timeMs) + } + + /** + * See {recordAndGetThrottleTimeMs}. + */ + def maybeRecordAndGetThrottleTimeMs(session: Session, clientId: String, value: Double, timeMs: Long): Int = { + // Record metrics only if quotas are enabled. + if (quotasEnabled) { + recordAndGetThrottleTimeMs(session, clientId, value, timeMs) + } else { + 0 + } + } + + /** + * Records that a user/clientId accumulated or would like to accumulate the provided amount at the + * the specified time, returns throttle time in milliseconds. + * + * @param session The session from which the user is extracted + * @param clientId The client id + * @param value The value to accumulate + * @param timeMs The time at which to accumulate the value + * @return The throttle time in milliseconds defines as the time to wait until the average + * rate gets back to the defined quota + */ + def recordAndGetThrottleTimeMs(session: Session, clientId: String, value: Double, timeMs: Long): Int = { + val clientSensors = getOrCreateQuotaSensors(session, clientId) + try { + clientSensors.quotaSensor.record(value, timeMs, true) + 0 + } catch { + case e: QuotaViolationException => + val throttleTimeMs = throttleTime(e, timeMs).toInt + debug(s"Quota violated for sensor (${clientSensors.quotaSensor.name}). Delay time: ($throttleTimeMs)") + throttleTimeMs + } + } + + /** + * Records that a user/clientId changed some metric being throttled without checking for + * quota violation. The aggregate value will subsequently be used for throttling when the + * next request is processed. + */ + def recordNoThrottle(session: Session, clientId: String, value: Double): Unit = { + val clientSensors = getOrCreateQuotaSensors(session, clientId) + clientSensors.quotaSensor.record(value, time.milliseconds(), false) + } + + /** + * "Unrecord" the given value that has already been recorded for the given user/client by recording a negative value + * of the same quantity. + * + * For a throttled fetch, the broker should return an empty response and thus should not record the value. Ideally, + * we would like to compute the throttle time before actually recording the value, but the current Sensor code + * couples value recording and quota checking very tightly. As a workaround, we will unrecord the value for the fetch + * in case of throttling. Rate keeps the sum of values that fall in each time window, so this should bring the + * overall sum back to the previous value. + */ + def unrecordQuotaSensor(request: RequestChannel.Request, value: Double, timeMs: Long): Unit = { + val clientSensors = getOrCreateQuotaSensors(request.session, request.header.clientId) + clientSensors.quotaSensor.record(value * -1, timeMs, false) + } + + /** + * Returns maximum value that could be recorded without guaranteed throttling. + * Recording any larger value will always be throttled, even if no other values were recorded in the quota window. + * This is used for deciding the maximum bytes that can be fetched at once + */ + def getMaxValueInQuotaWindow(session: Session, clientId: String): Double = { + if (quotasEnabled) { + val clientSensors = getOrCreateQuotaSensors(session, clientId) + Option(quotaCallback.quotaLimit(clientQuotaType, clientSensors.metricTags.asJava)) + .map(_.toDouble * (config.numQuotaSamples - 1) * config.quotaWindowSizeSeconds) + .getOrElse(Double.MaxValue) + } else { + Double.MaxValue + } + } + + /** + * Throttle a client by muting the associated channel for the given throttle time. + * + * @param request client request + * @param throttleTimeMs Duration in milliseconds for which the channel is to be muted. + * @param throttleCallback Callback for channel throttling + */ + def throttle( + request: RequestChannel.Request, + throttleCallback: ThrottleCallback, + throttleTimeMs: Int + ): Unit = { + if (throttleTimeMs > 0) { + val clientSensors = getOrCreateQuotaSensors(request.session, request.headerForLoggingOrThrottling().clientId) + clientSensors.throttleTimeSensor.record(throttleTimeMs) + val throttledChannel = new ThrottledChannel(time, throttleTimeMs, throttleCallback) + delayQueue.add(throttledChannel) + delayQueueSensor.record() + debug("Channel throttled for sensor (%s). Delay time: (%d)".format(clientSensors.quotaSensor.name(), throttleTimeMs)) + } + } + + /** + * Returns the quota for the client with the specified (non-encoded) user principal and client-id. + * + * Note: this method is expensive, it is meant to be used by tests only + */ + def quota(user: String, clientId: String): Quota = { + val userPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, user) + quota(userPrincipal, clientId) + } + + /** + * Returns the quota for the client with the specified user principal and client-id. + * + * Note: this method is expensive, it is meant to be used by tests only + */ + def quota(userPrincipal: KafkaPrincipal, clientId: String): Quota = { + val metricTags = quotaCallback.quotaMetricTags(clientQuotaType, userPrincipal, clientId) + Quota.upperBound(quotaLimit(metricTags)) + } + + private def quotaLimit(metricTags: util.Map[String, String]): Double = { + Option(quotaCallback.quotaLimit(clientQuotaType, metricTags)).map(_.toDouble).getOrElse(Long.MaxValue) + } + + /** + * This calculates the amount of time needed to bring the metric within quota + * assuming that no new metrics are recorded. + * + * See {QuotaUtils.throttleTime} for the details. + */ + protected def throttleTime(e: QuotaViolationException, timeMs: Long): Long = { + QuotaUtils.throttleTime(e, timeMs) + } + + /** + * This function either returns the sensors for a given client id or creates them if they don't exist + * First sensor of the tuple is the quota enforcement sensor. Second one is the throttle time sensor + */ + def getOrCreateQuotaSensors(session: Session, clientId: String): ClientSensors = { + // Use cached sanitized principal if using default callback + val metricTags = quotaCallback match { + case callback: DefaultQuotaCallback => callback.quotaMetricTags(session.sanitizedUser, clientId) + case _ => quotaCallback.quotaMetricTags(clientQuotaType, session.principal, clientId).asScala.toMap + } + // Names of the sensors to access + val sensors = ClientSensors( + metricTags, + sensorAccessor.getOrCreate( + getQuotaSensorName(metricTags), + ClientQuotaManager.InactiveSensorExpirationTimeSeconds, + sensor => registerQuotaMetrics(metricTags)(sensor) + ), + sensorAccessor.getOrCreate( + getThrottleTimeSensorName(metricTags), + ClientQuotaManager.InactiveSensorExpirationTimeSeconds, + sensor => sensor.add(throttleMetricName(metricTags), new Avg) + ) + ) + if (quotaCallback.quotaResetRequired(clientQuotaType)) + updateQuotaMetricConfigs() + sensors + } + + protected def registerQuotaMetrics(metricTags: Map[String, String])(sensor: Sensor): Unit = { + sensor.add( + clientQuotaMetricName(metricTags), + new Rate, + getQuotaMetricConfig(metricTags) + ) + } + + private def metricTagsToSensorSuffix(metricTags: Map[String, String]): String = + metricTags.values.mkString(":") + + private def getThrottleTimeSensorName(metricTags: Map[String, String]): String = + s"${quotaType}ThrottleTime-${metricTagsToSensorSuffix(metricTags)}" + + private def getQuotaSensorName(metricTags: Map[String, String]): String = + s"$quotaType-${metricTagsToSensorSuffix(metricTags)}" + + protected def getQuotaMetricConfig(metricTags: Map[String, String]): MetricConfig = { + getQuotaMetricConfig(quotaLimit(metricTags.asJava)) + } + + private def getQuotaMetricConfig(quotaLimit: Double): MetricConfig = { + new MetricConfig() + .timeWindow(config.quotaWindowSizeSeconds, TimeUnit.SECONDS) + .samples(config.numQuotaSamples) + .quota(new Quota(quotaLimit, true)) + } + + protected def getOrCreateSensor(sensorName: String, expirationTimeSeconds: Long, registerMetrics: Consumer[Sensor]): Sensor = { + sensorAccessor.getOrCreate( + sensorName, + expirationTimeSeconds, + registerMetrics) + } + + /** + * Overrides quotas for , or or the dynamic defaults + * for any of these levels. + * + * @param userEntity user to override if quota applies to or + * @param clientEntity sanitized client entity to override if quota applies to or + * @param quota custom quota to apply or None if quota override is being removed + */ + def updateQuota( + userEntity: Option[BaseUserEntity], + clientEntity: Option[ClientQuotaEntity.ConfigEntity], + quota: Option[Quota] + ): Unit = { + /* + * Acquire the write lock to apply changes in the quota objects. + * This method changes the quota in the overriddenQuota map and applies the update on the actual KafkaMetric object (if it exists). + * If the KafkaMetric hasn't been created, the most recent value will be used from the overriddenQuota map. + * The write lock prevents quota update and creation at the same time. It also guards against concurrent quota change + * notifications + */ + lock.writeLock().lock() + try { + val quotaEntity = KafkaQuotaEntity(userEntity, clientEntity) + + if (userEntity.nonEmpty) { + if (quotaEntity.clientIdEntity.nonEmpty) + quotaTypesEnabled |= QuotaTypes.UserClientIdQuotaEnabled + else + quotaTypesEnabled |= QuotaTypes.UserQuotaEnabled + } else if (clientEntity.nonEmpty) + quotaTypesEnabled |= QuotaTypes.ClientIdQuotaEnabled + + quota match { + case Some(newQuota) => quotaCallback.updateQuota(clientQuotaType, quotaEntity, newQuota.bound) + case None => quotaCallback.removeQuota(clientQuotaType, quotaEntity) + } + val updatedEntity = if (userEntity.contains(DefaultUserEntity) || clientEntity.contains(DefaultClientIdEntity)) + None // more than one entity may need updating, so `updateQuotaMetricConfigs` will go through all metrics + else + Some(quotaEntity) + updateQuotaMetricConfigs(updatedEntity) + + } finally { + lock.writeLock().unlock() + } + } + + /** + * Updates metrics configs. This is invoked when quota configs are updated when partitions leaders change + * and custom callbacks that implement partition-based quotas have updated quotas. + * + * @param updatedQuotaEntity If set to one entity and quotas have only been enabled at one + * level, then an optimized update is performed with a single metric update. If None is provided, + * or if custom callbacks are used or if multi-level quotas have been enabled, all metric configs + * are checked and updated if required. + */ + def updateQuotaMetricConfigs(updatedQuotaEntity: Option[KafkaQuotaEntity] = None): Unit = { + val allMetrics = metrics.metrics() + + // If using custom quota callbacks or if multiple-levels of quotas are defined or + // if this is a default quota update, traverse metrics to find all affected values. + // Otherwise, update just the single matching one. + val singleUpdate = quotaTypesEnabled match { + case QuotaTypes.NoQuotas | QuotaTypes.ClientIdQuotaEnabled | QuotaTypes.UserQuotaEnabled | QuotaTypes.UserClientIdQuotaEnabled => + updatedQuotaEntity.nonEmpty + case _ => false + } + if (singleUpdate) { + val quotaEntity = updatedQuotaEntity.getOrElse(throw new IllegalStateException("Quota entity not specified")) + val user = quotaEntity.sanitizedUser + val clientId = quotaEntity.clientId + val metricTags = Map(DefaultTags.User -> user, DefaultTags.ClientId -> clientId) + + val quotaMetricName = clientQuotaMetricName(metricTags) + // Change the underlying metric config if the sensor has been created + val metric = allMetrics.get(quotaMetricName) + if (metric != null) { + Option(quotaLimit(metricTags.asJava)).foreach { newQuota => + info(s"Sensor for $quotaEntity already exists. Changing quota to $newQuota in MetricConfig") + metric.config(getQuotaMetricConfig(newQuota)) + } + } + } else { + val quotaMetricName = clientQuotaMetricName(Map.empty) + allMetrics.forEach { (metricName, metric) => + if (metricName.name == quotaMetricName.name && metricName.group == quotaMetricName.group) { + val metricTags = metricName.tags + Option(quotaLimit(metricTags)).foreach { newQuota => + if (newQuota != metric.config.quota.bound) { + info(s"Sensor for quota-id $metricTags already exists. Setting quota to $newQuota in MetricConfig") + metric.config(getQuotaMetricConfig(newQuota)) + } + } + } + } + } + } + + /** + * Returns the MetricName of the metric used for the quota. The name is used to create the + * metric but also to find the metric when the quota is changed. + */ + protected def clientQuotaMetricName(quotaMetricTags: Map[String, String]): MetricName = { + metrics.metricName("byte-rate", quotaType.toString, + "Tracking byte-rate per user/client-id", + quotaMetricTags.asJava) + } + + private def throttleMetricName(quotaMetricTags: Map[String, String]): MetricName = { + metrics.metricName("throttle-time", + quotaType.toString, + "Tracking average throttle-time per user/client-id", + quotaMetricTags.asJava) + } + + def initiateShutdown(): Unit = { + throttledChannelReaper.initiateShutdown() + // improve shutdown time by waking up any ShutdownableThread(s) blocked on poll by sending a no-op + delayQueue.add(new ThrottledChannel(time, 0, new ThrottleCallback { + override def startThrottling(): Unit = {} + override def endThrottling(): Unit = {} + })) + } + + def shutdown(): Unit = { + initiateShutdown() + throttledChannelReaper.awaitShutdown() + } + + private class DefaultQuotaCallback extends ClientQuotaCallback { + private val overriddenQuotas = new ConcurrentHashMap[ClientQuotaEntity, Quota]() + + override def configure(configs: util.Map[String, _]): Unit = {} + + override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = { + quotaMetricTags(Sanitizer.sanitize(principal.getName), clientId).asJava + } + + override def quotaLimit(quotaType: ClientQuotaType, metricTags: util.Map[String, String]): lang.Double = { + val sanitizedUser = metricTags.get(DefaultTags.User) + val clientId = metricTags.get(DefaultTags.ClientId) + var quota: Quota = null + + if (sanitizedUser != null && clientId != null) { + val userEntity = Some(UserEntity(sanitizedUser)) + val clientIdEntity = Some(ClientIdEntity(clientId)) + if (sanitizedUser.nonEmpty && clientId.nonEmpty) { + // /config/users//clients/ + quota = overriddenQuotas.get(KafkaQuotaEntity(userEntity, clientIdEntity)) + if (quota == null) { + // /config/users//clients/ + quota = overriddenQuotas.get(KafkaQuotaEntity(userEntity, Some(DefaultClientIdEntity))) + } + if (quota == null) { + // /config/users//clients/ + quota = overriddenQuotas.get(KafkaQuotaEntity(Some(DefaultUserEntity), clientIdEntity)) + } + if (quota == null) { + // /config/users//clients/ + quota = overriddenQuotas.get(DefaultUserClientIdQuotaEntity) + } + } else if (sanitizedUser.nonEmpty) { + // /config/users/ + quota = overriddenQuotas.get(KafkaQuotaEntity(userEntity, None)) + if (quota == null) { + // /config/users/ + quota = overriddenQuotas.get(DefaultUserQuotaEntity) + } + } else if (clientId.nonEmpty) { + // /config/clients/ + quota = overriddenQuotas.get(KafkaQuotaEntity(None, clientIdEntity)) + if (quota == null) { + // /config/clients/ + quota = overriddenQuotas.get(DefaultClientIdQuotaEntity) + } + } + } + if (quota == null) null else quota.bound + } + + override def updateClusterMetadata(cluster: Cluster): Boolean = { + // Default quota callback does not use any cluster metadata + false + } + + override def updateQuota(quotaType: ClientQuotaType, entity: ClientQuotaEntity, newValue: Double): Unit = { + val quotaEntity = entity.asInstanceOf[KafkaQuotaEntity] + info(s"Changing $quotaType quota for $quotaEntity to $newValue") + overriddenQuotas.put(quotaEntity, new Quota(newValue, true)) + } + + override def removeQuota(quotaType: ClientQuotaType, entity: ClientQuotaEntity): Unit = { + val quotaEntity = entity.asInstanceOf[KafkaQuotaEntity] + info(s"Removing $quotaType quota for $quotaEntity") + overriddenQuotas.remove(quotaEntity) + } + + override def quotaResetRequired(quotaType: ClientQuotaType): Boolean = false + + def quotaMetricTags(sanitizedUser: String, clientId: String) : Map[String, String] = { + val (userTag, clientIdTag) = quotaTypesEnabled match { + case QuotaTypes.NoQuotas | QuotaTypes.ClientIdQuotaEnabled => + ("", clientId) + case QuotaTypes.UserQuotaEnabled => + (sanitizedUser, "") + case QuotaTypes.UserClientIdQuotaEnabled => + (sanitizedUser, clientId) + case _ => + val userEntity = Some(UserEntity(sanitizedUser)) + val clientIdEntity = Some(ClientIdEntity(clientId)) + + var metricTags = (sanitizedUser, clientId) + // 1) /config/users//clients/ + if (!overriddenQuotas.containsKey(KafkaQuotaEntity(userEntity, clientIdEntity))) { + // 2) /config/users//clients/ + metricTags = (sanitizedUser, clientId) + if (!overriddenQuotas.containsKey(KafkaQuotaEntity(userEntity, Some(DefaultClientIdEntity)))) { + // 3) /config/users/ + metricTags = (sanitizedUser, "") + if (!overriddenQuotas.containsKey(KafkaQuotaEntity(userEntity, None))) { + // 4) /config/users//clients/ + metricTags = (sanitizedUser, clientId) + if (!overriddenQuotas.containsKey(KafkaQuotaEntity(Some(DefaultUserEntity), clientIdEntity))) { + // 5) /config/users//clients/ + metricTags = (sanitizedUser, clientId) + if (!overriddenQuotas.containsKey(DefaultUserClientIdQuotaEntity)) { + // 6) /config/users/ + metricTags = (sanitizedUser, "") + if (!overriddenQuotas.containsKey(DefaultUserQuotaEntity)) { + // 7) /config/clients/ + // 8) /config/clients/ + metricTags = ("", clientId) + } + } + } + } + } + } + metricTags + } + Map(DefaultTags.User -> userTag, DefaultTags.ClientId -> clientIdTag) + } + + override def close(): Unit = {} + } +} diff --git a/core/src/main/scala/kafka/server/ConfigAdminManager.scala b/core/src/main/scala/kafka/server/ConfigAdminManager.scala index 7394d2cfc43c6..319b45020d45d 100644 --- a/core/src/main/scala/kafka/server/ConfigAdminManager.scala +++ b/core/src/main/scala/kafka/server/ConfigAdminManager.scala @@ -20,6 +20,7 @@ import kafka.server.logger.RuntimeLoggerManager import java.util import java.util.Properties +import kafka.server.metadata.ConfigRepository import kafka.utils._ import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry} import org.apache.kafka.clients.admin.AlterConfigOp.OpType @@ -35,7 +36,6 @@ import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.{Alte import org.apache.kafka.common.protocol.Errors.{INVALID_REQUEST, UNKNOWN_SERVER_ERROR} import org.apache.kafka.common.requests.ApiError import org.apache.kafka.common.resource.{Resource, ResourceType} -import org.apache.kafka.metadata.ConfigRepository import org.slf4j.{Logger, LoggerFactory} import scala.collection.{Map, Seq} @@ -58,8 +58,9 @@ import scala.jdk.CollectionConverters._ * KIP-412 added support for changing log4j log levels via IncrementalAlterConfigs, but * not via the original AlterConfigs. In retrospect, this would have been better off as a * separate RPC, since the semantics are quite different. In particular, KIP-226 configs - * are stored durably and persist across broker restarts, but KIP-412 log4j levels do not. - * However, we have to handle it here now in order to maintain compatibility. + * are stored durably (in ZK or KRaft) and persist across broker restarts, but KIP-412 + * log4j levels do not. However, we have to handle it here now in order to maintain + * compatibility. * * Configuration processing is split into two parts. * - The first step, called "preprocessing," handles setting KIP-412 log levels, validating @@ -68,10 +69,14 @@ import scala.jdk.CollectionConverters._ * - The second step is "persistence," and handles storing the configurations durably to our * metadata store. * - * The active controller performs its own configuration validation step in + * When KIP-590 forwarding is active (such as in KRaft mode), preprocessing will happen + * on the broker, while persistence will happen on the active controller. (If KIP-590 + * forwarding is not active, then both steps are done on the same broker.) + * + * In KRaft mode, the active controller performs its own configuration validation step in * [[kafka.server.ControllerConfigurationValidator]]. This is mainly important for * TOPIC resources, since we already validated changes to BROKER resources on the - * forwarding broker. The controller is also responsible for enforcing the configured + * forwarding broker. The KRaft controller is also responsible for enforcing the configured * [[org.apache.kafka.server.policy.AlterConfigPolicy]]. */ class ConfigAdminManager(nodeId: Int, @@ -151,7 +156,7 @@ class ConfigAdminManager(nodeId: Int, } catch { case t: Throwable => val err = ApiError.fromThrowable(t) - error(s"Error preprocessing incrementalAlterConfigs request on $configResource", t) + info(s"Error preprocessing incrementalAlterConfigs request on $configResource", t) results.put(resource, err) } } @@ -252,7 +257,7 @@ class ConfigAdminManager(nodeId: Int, } catch { case t: Throwable => val err = ApiError.fromThrowable(t) - error(s"Error preprocessing alterConfigs request on ${configResource}: ${err}") + info(s"Error preprocessing alterConfigs request on $configResource: $err") results.put(resource, err) } } diff --git a/core/src/main/scala/kafka/server/ConfigHandler.scala b/core/src/main/scala/kafka/server/ConfigHandler.scala index 3e82db290925e..0892afcfc6f0e 100644 --- a/core/src/main/scala/kafka/server/ConfigHandler.scala +++ b/core/src/main/scala/kafka/server/ConfigHandler.scala @@ -18,6 +18,7 @@ package kafka.server import java.util.{Collections, Properties} +import kafka.log.UnifiedLog import kafka.server.QuotaFactory.QuotaManagers import kafka.utils.Logging import org.apache.kafka.server.config.QuotaConfig @@ -25,8 +26,7 @@ import org.apache.kafka.common.metrics.Quota._ import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.server.ClientMetricsManager import org.apache.kafka.server.common.StopPartition -import org.apache.kafka.server.log.remote.TopicPartitionLog -import org.apache.kafka.storage.internals.log.{LogStartOffsetIncrementReason, ThrottledReplicaListValidator, UnifiedLog} +import org.apache.kafka.storage.internals.log.{LogStartOffsetIncrementReason, ThrottledReplicaListValidator} import scala.jdk.CollectionConverters._ import scala.collection.Seq @@ -54,7 +54,7 @@ class TopicConfigHandler(private val replicaManager: ReplicaManager, val wasRemoteLogEnabled = logs.exists(_.remoteLogEnabled()) val wasCopyDisabled = logs.exists(_.config.remoteLogCopyDisable()) - logManager.updateTopicConfig(topic, topicConfig, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled, + logManager.updateTopicConfig(topic, topicConfig, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), wasRemoteLogEnabled) maybeUpdateRemoteLogComponents(topic, logs, wasRemoteLogEnabled, wasCopyDisabled) } @@ -75,13 +75,13 @@ class TopicConfigHandler(private val replicaManager: ReplicaManager, if (isRemoteLogEnabled && (!wasRemoteLogEnabled || (wasCopyDisabled && !isCopyDisabled))) { val topicIds = Collections.singletonMap(topic, replicaManager.metadataCache.getTopicId(topic)) replicaManager.remoteLogManager.foreach(rlm => - rlm.onLeadershipChange((leaderPartitions.toSet: Set[TopicPartitionLog]).asJava, (followerPartitions.toSet: Set[TopicPartitionLog]).asJava, topicIds)) + rlm.onLeadershipChange(leaderPartitions.toSet.asJava, followerPartitions.toSet.asJava, topicIds)) } // When copy disabled, we should stop leaderCopyRLMTask, but keep expirationTask if (isRemoteLogEnabled && !wasCopyDisabled && isCopyDisabled) { replicaManager.remoteLogManager.foreach(rlm => { - rlm.stopLeaderCopyRLMTasks((leaderPartitions.toSet: Set[TopicPartitionLog] ).asJava) + rlm.stopLeaderCopyRLMTasks(leaderPartitions.toSet.asJava) }) } diff --git a/core/src/main/scala/kafka/server/ConfigHelper.scala b/core/src/main/scala/kafka/server/ConfigHelper.scala index 743937b54fca5..095a474441a13 100644 --- a/core/src/main/scala/kafka/server/ConfigHelper.scala +++ b/core/src/main/scala/kafka/server/ConfigHelper.scala @@ -20,9 +20,10 @@ package kafka.server import kafka.network.RequestChannel import java.util.{Collections, Properties} -import kafka.utils.Logging +import kafka.server.metadata.ConfigRepository +import kafka.utils.{Log4jController, Logging} import org.apache.kafka.common.acl.AclOperation.DESCRIBE_CONFIGS -import org.apache.kafka.common.config.{ConfigDef, ConfigResource} +import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigResource} import org.apache.kafka.common.errors.{ApiException, InvalidRequestException} import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.DescribeConfigsRequestData.DescribeConfigsResource @@ -33,19 +34,20 @@ import org.apache.kafka.common.requests.DescribeConfigsResponse.ConfigSource import org.apache.kafka.common.resource.Resource.CLUSTER_NAME import org.apache.kafka.common.resource.ResourceType.{CLUSTER, GROUP, TOPIC} import org.apache.kafka.coordinator.group.GroupConfig -import org.apache.kafka.metadata.{ConfigRepository, MetadataCache} -import org.apache.kafka.server.ConfigHelperUtils.createResponseConfig import org.apache.kafka.server.config.ServerTopicConfigSynonyms -import org.apache.kafka.server.logger.LoggingController -import org.apache.kafka.server.metrics.ClientMetricsConfigs import org.apache.kafka.storage.internals.log.LogConfig +import scala.collection.mutable.ListBuffer import scala.collection.{Map, mutable} import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.RichOptional class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepository: ConfigRepository) extends Logging { + def allConfigs(config: AbstractConfig): mutable.Map[String, Any] = { + config.originals.asScala.filter(_._2 != null) ++ config.nonInternalValues.asScala + } + def handleDescribeConfigsRequest( request: RequestChannel.Request, authHelper: AuthHelper @@ -83,6 +85,21 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo includeSynonyms: Boolean, includeDocumentation: Boolean): List[DescribeConfigsResponseData.DescribeConfigsResult] = { resourceToConfigNames.map { resource => + + def createResponseConfig(configs: Map[String, Any], + createConfigEntry: (String, Any) => DescribeConfigsResponseData.DescribeConfigsResourceResult): DescribeConfigsResponseData.DescribeConfigsResult = { + val filteredConfigPairs = if (resource.configurationKeys == null || resource.configurationKeys.isEmpty) + configs.toBuffer + else + configs.filter { case (configName, _) => + resource.configurationKeys.asScala.contains(configName) + }.toBuffer + + val configEntries = filteredConfigPairs.map { case (name, value) => createConfigEntry(name, value) } + new DescribeConfigsResponseData.DescribeConfigsResult().setErrorCode(Errors.NONE.code) + .setConfigs(configEntries.asJava) + } + try { val configResult = ConfigResource.Type.forId(resource.resourceType) match { case ConfigResource.Type.TOPIC => @@ -91,7 +108,7 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo if (metadataCache.contains(topic)) { val topicProps = configRepository.topicConfig(topic) val logConfig = LogConfig.fromProps(config.extractLogConfigMap, topicProps) - createResponseConfig(resource, logConfig, createTopicConfigEntry(logConfig, topicProps, includeSynonyms, includeDocumentation)(_, _)) + createResponseConfig(allConfigs(logConfig), createTopicConfigEntry(logConfig, topicProps, includeSynonyms, includeDocumentation)) } else { new DescribeConfigsResponseData.DescribeConfigsResult().setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) .setConfigs(Collections.emptyList[DescribeConfigsResponseData.DescribeConfigsResourceResult]) @@ -99,11 +116,11 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo case ConfigResource.Type.BROKER => if (resource.resourceName == null || resource.resourceName.isEmpty) - createResponseConfig(resource, config.dynamicConfig.currentDynamicDefaultConfigs.asJava, - createBrokerConfigEntry(perBrokerConfig = false, includeSynonyms, includeDocumentation)(_, _)) + createResponseConfig(config.dynamicConfig.currentDynamicDefaultConfigs, + createBrokerConfigEntry(perBrokerConfig = false, includeSynonyms, includeDocumentation)) else if (resourceNameToBrokerId(resource.resourceName) == config.brokerId) - createResponseConfig(resource, config, - createBrokerConfigEntry(perBrokerConfig = true, includeSynonyms, includeDocumentation)(_, _)) + createResponseConfig(allConfigs(config), + createBrokerConfigEntry(perBrokerConfig = true, includeSynonyms, includeDocumentation)) else throw new InvalidRequestException(s"Unexpected broker id, expected ${config.brokerId} or empty string, but received ${resource.resourceName}") @@ -113,18 +130,27 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo else if (resourceNameToBrokerId(resource.resourceName) != config.brokerId) throw new InvalidRequestException(s"Unexpected broker id, expected ${config.brokerId} but received ${resource.resourceName}") else - createResponseConfig(resource, LoggingController.loggers, - (name: String, value: Object) => new DescribeConfigsResponseData.DescribeConfigsResourceResult().setName(name) + createResponseConfig(Log4jController.loggers, + (name, value) => new DescribeConfigsResponseData.DescribeConfigsResourceResult().setName(name) .setValue(value.toString).setConfigSource(ConfigSource.DYNAMIC_BROKER_LOGGER_CONFIG.id) .setIsSensitive(false).setReadOnly(false).setSynonyms(List.empty.asJava)) case ConfigResource.Type.CLIENT_METRICS => - if (resource.resourceName == null || resource.resourceName.isEmpty) { + val subscriptionName = resource.resourceName + if (subscriptionName == null || subscriptionName.isEmpty) { throw new InvalidRequestException("Client metrics subscription name must not be empty") } else { - val clientMetricsProps = configRepository.config(new ConfigResource(ConfigResource.Type.CLIENT_METRICS, resource.resourceName)) - val clientMetricsConfig = ClientMetricsConfigs.fromProps(ClientMetricsConfigs.defaultConfigsMap(), clientMetricsProps) - createResponseConfig(resource, clientMetricsConfig, createClientMetricsConfigEntry(clientMetricsConfig, clientMetricsProps, includeSynonyms, includeDocumentation)(_, _)) + val entityProps = configRepository.config(new ConfigResource(ConfigResource.Type.CLIENT_METRICS, subscriptionName)) + val configEntries = new ListBuffer[DescribeConfigsResponseData.DescribeConfigsResourceResult]() + entityProps.forEach((name, value) => { + configEntries += new DescribeConfigsResponseData.DescribeConfigsResourceResult().setName(name.toString) + .setValue(value.toString).setConfigSource(ConfigSource.CLIENT_METRICS_CONFIG.id()) + .setIsSensitive(false).setReadOnly(false).setSynonyms(List.empty.asJava) + }) + + new DescribeConfigsResponseData.DescribeConfigsResult() + .setErrorCode(Errors.NONE.code) + .setConfigs(configEntries.asJava) } case ConfigResource.Type.GROUP => @@ -134,7 +160,7 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo } else { val groupProps = configRepository.groupConfig(group) val groupConfig = GroupConfig.fromProps(config.groupCoordinatorConfig.extractGroupConfigMap(config.shareGroupConfig), groupProps) - createResponseConfig(resource, groupConfig, createGroupConfigEntry(groupConfig, groupProps, includeSynonyms, includeDocumentation)(_, _)) + createResponseConfig(allConfigs(groupConfig), createGroupConfigEntry(groupConfig, groupProps, includeSynonyms, includeDocumentation)) } case resourceType => throw new InvalidRequestException(s"Unsupported resource type: $resourceType") @@ -159,8 +185,8 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo } } - private def createGroupConfigEntry(groupConfig: GroupConfig, groupProps: Properties, includeSynonyms: Boolean, includeDocumentation: Boolean) - (name: String, value: Any): DescribeConfigsResponseData.DescribeConfigsResourceResult = { + def createGroupConfigEntry(groupConfig: GroupConfig, groupProps: Properties, includeSynonyms: Boolean, includeDocumentation: Boolean) + (name: String, value: Any): DescribeConfigsResponseData.DescribeConfigsResourceResult = { val allNames = brokerSynonyms(name) val configEntryType = GroupConfig.configType(name).toScala val isSensitive = KafkaConfig.maybeSensitive(configEntryType) @@ -184,28 +210,6 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo .setDocumentation(configDocumentation).setConfigType(dataType.id) } - private def createClientMetricsConfigEntry(clientMetricsConfig: ClientMetricsConfigs, clientMetricsProps: Properties, includeSynonyms: Boolean, includeDocumentation: Boolean) - (name: String, value: Any): DescribeConfigsResponseData.DescribeConfigsResourceResult = { - val configEntryType = ClientMetricsConfigs.configType(name).toScala - val valueAsString = ConfigDef.convertToString(value, configEntryType.orNull) - val allSynonyms = { - if (!clientMetricsProps.containsKey(name)) { - List.empty - } else { - List(new DescribeConfigsResponseData.DescribeConfigsSynonym().setName(name).setValue(valueAsString) - .setSource(ConfigSource.CLIENT_METRICS_CONFIG.id)) - } - } - val source = if (allSynonyms.isEmpty) ConfigSource.DEFAULT_CONFIG.id else allSynonyms.head.source - val synonyms = if (!includeSynonyms) List.empty else allSynonyms - val dataType = configResponseType(configEntryType) - val configDocumentation = if (includeDocumentation) clientMetricsConfig.documentationOf(name) else null - new DescribeConfigsResponseData.DescribeConfigsResourceResult() - .setName(name).setValue(valueAsString).setConfigSource(source) - .setIsSensitive(false).setReadOnly(false).setSynonyms(synonyms.asJava) - .setDocumentation(configDocumentation).setConfigType(dataType.id) - } - def createTopicConfigEntry(logConfig: LogConfig, topicProps: Properties, includeSynonyms: Boolean, includeDocumentation: Boolean) (name: String, value: Any): DescribeConfigsResponseData.DescribeConfigsResourceResult = { val configEntryType = LogConfig.configType(name).toScala @@ -304,4 +308,4 @@ class ConfigHelper(metadataCache: MetadataCache, config: KafkaConfig, configRepo throw new InvalidRequestException(s"Broker id must be an integer, but it is: $resourceName") } } -} \ No newline at end of file +} diff --git a/core/src/main/scala/kafka/server/ControllerApis.scala b/core/src/main/scala/kafka/server/ControllerApis.scala index f10b769d9c12f..1e343e776d512 100644 --- a/core/src/main/scala/kafka/server/ControllerApis.scala +++ b/core/src/main/scala/kafka/server/ControllerApis.scala @@ -24,6 +24,7 @@ import java.util.Map.Entry import java.util.concurrent.CompletableFuture import java.util.function.Consumer import kafka.network.RequestChannel +import kafka.raft.RaftManager import kafka.server.QuotaFactory.QuotaManagers import kafka.server.logger.RuntimeLoggerManager import kafka.server.metadata.KRaftMetadataCache @@ -33,14 +34,15 @@ import org.apache.kafka.common.Uuid.ZERO_UUID import org.apache.kafka.common.acl.AclOperation.{ALTER, ALTER_CONFIGS, CLUSTER_ACTION, CREATE, CREATE_TOKENS, DELETE, DESCRIBE, DESCRIBE_CONFIGS} import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors.{ApiException, ClusterAuthorizationException, InvalidRequestException, TopicDeletionDisabledException, UnsupportedVersionException} -import org.apache.kafka.common.internals.{FatalExitError, Plugin, Topic} +import org.apache.kafka.common.internals.FatalExitError +import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.AlterConfigsResponseData.{AlterConfigsResourceResponse => OldAlterConfigsResourceResponse} import org.apache.kafka.common.message.CreatePartitionsRequestData.CreatePartitionsTopic import org.apache.kafka.common.message.CreatePartitionsResponseData.CreatePartitionsTopicResult import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult import org.apache.kafka.common.message.DeleteTopicsResponseData.{DeletableTopicResult, DeletableTopicResultCollection} import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.AlterConfigsResourceResponse -import org.apache.kafka.common.message._ +import org.apache.kafka.common.message.{CreateTopicsRequestData, _} import org.apache.kafka.common.protocol.Errors._ import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage, Errors} import org.apache.kafka.common.requests._ @@ -54,12 +56,8 @@ import org.apache.kafka.image.publisher.ControllerRegistrationsPublisher import org.apache.kafka.metadata.{BrokerHeartbeatReply, BrokerRegistrationReply} import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.raft.RaftManager -import org.apache.kafka.security.DelegationTokenManager -import org.apache.kafka.server.{ApiVersionManager, ProcessRole} import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.{ApiMessageAndVersion, RequestLocal} -import org.apache.kafka.server.quota.ControllerMutationQuota import scala.jdk.CollectionConverters._ @@ -69,7 +67,7 @@ import scala.jdk.CollectionConverters._ */ class ControllerApis( val requestChannel: RequestChannel, - val authorizerPlugin: Option[Plugin[Authorizer]], + val authorizer: Option[Authorizer], val quotas: QuotaManagers, val time: Time, val controller: Controller, @@ -82,11 +80,11 @@ class ControllerApis( ) extends ApiRequestHandler with Logging { this.logIdent = s"[ControllerApis nodeId=${config.nodeId}] " - val authHelper = new AuthHelper(authorizerPlugin) + val authHelper = new AuthHelper(authorizer) val configHelper = new ConfigHelper(metadataCache, config, metadataCache) val requestHelper = new RequestHandlerHelper(requestChannel, quotas, time) val runtimeLoggerManager = new RuntimeLoggerManager(config.nodeId, logger.underlying) - private val aclApis = new AclApis(authHelper, authorizerPlugin, requestHelper, ProcessRole.ControllerRole, config) + private val aclApis = new AclApis(authHelper, authorizer, requestHelper, "controller", config) def isClosed: Boolean = aclApis.isClosed @@ -188,7 +186,7 @@ class ControllerApis( def handleFetch(request: RequestChannel.Request): CompletableFuture[Unit] = { authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - handleRaftRequest(request, response => FetchResponse.of(response.asInstanceOf[FetchResponseData])) + handleRaftRequest(request, response => new FetchResponse(response.asInstanceOf[FetchResponseData])) } def handleFetchSnapshot(request: RequestChannel.Request): CompletableFuture[Unit] = { @@ -198,7 +196,7 @@ class ControllerApis( private def handleDeleteTopics(request: RequestChannel.Request): CompletableFuture[Unit] = { val deleteTopicsRequest = request.body[DeleteTopicsRequest] - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request.session, request.header, 5) + val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 5) val context = new ControllerRequestContext(request.context.header.data, request.context.principal, requestTimeoutMsToDeadlineNs(time, deleteTopicsRequest.data.timeoutMs), controllerMutationQuotaRecorderFor(controllerMutationQuota)) @@ -231,9 +229,9 @@ class ControllerApis( // Check if topic deletion is enabled at all. if (!config.deleteTopicEnable) { if (apiVersion < 3) { - return CompletableFuture.failedFuture(new InvalidRequestException("This version does not support topic deletion.")) + throw new InvalidRequestException("Topic deletion is disabled.") } else { - return CompletableFuture.failedFuture(new TopicDeletionDisabledException()) + throw new TopicDeletionDisabledException() } } // The first step is to load up the names and IDs that have been provided by the @@ -362,7 +360,7 @@ class ControllerApis( private def handleCreateTopics(request: RequestChannel.Request): CompletableFuture[Unit] = { val createTopicsRequest = request.body[CreateTopicsRequest] - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request.session, request.header, 6) + val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 6) val context = new ControllerRequestContext(request.context.header.data, request.context.principal, requestTimeoutMsToDeadlineNs(time, createTopicsRequest.data.timeoutMs), controllerMutationQuotaRecorderFor(controllerMutationQuota)) @@ -644,7 +642,9 @@ class ControllerApis( def createResponseCallback(requestThrottleMs: Int, e: Throwable): UnregisterBrokerResponse = { if (e != null) { - decommissionRequest.getErrorResponse(requestThrottleMs, e) + new UnregisterBrokerResponse(new UnregisterBrokerResponseData(). + setThrottleTimeMs(requestThrottleMs). + setErrorCode(Errors.forException(e).code)) } else { new UnregisterBrokerResponse(new UnregisterBrokerResponseData(). setThrottleTimeMs(requestThrottleMs)) @@ -797,7 +797,7 @@ class ControllerApis( authHelper.filterByAuthorized(request.context, ALTER, TOPIC, topics)(n => n) } val createPartitionsRequest = request.body[CreatePartitionsRequest] - val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request.session, request.header, 3) + val controllerMutationQuota = quotas.controllerMutation.newQuotaFor(request, strictSinceVersion = 3) val context = new ControllerRequestContext(request.context.header.data, request.context.principal, requestTimeoutMsToDeadlineNs(time, createPartitionsRequest.data.timeoutMs), controllerMutationQuotaRecorderFor(controllerMutationQuota)) @@ -971,7 +971,7 @@ class ControllerApis( new RenewDelegationTokenResponseData() .setThrottleTimeMs(requestThrottleMs) .setErrorCode(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED.code) - .setExpiryTimestampMs(DelegationTokenManager.ERROR_TIMESTAMP))) + .setExpiryTimestampMs(DelegationTokenManager.ErrorTimestamp))) CompletableFuture.completedFuture[Unit](()) } else { val context = new ControllerRequestContext( @@ -995,7 +995,7 @@ class ControllerApis( new ExpireDelegationTokenResponseData() .setThrottleTimeMs(requestThrottleMs) .setErrorCode(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED.code) - .setExpiryTimestampMs(DelegationTokenManager.ERROR_TIMESTAMP))) + .setExpiryTimestampMs(DelegationTokenManager.ErrorTimestamp))) CompletableFuture.completedFuture[Unit](()) } else { val context = new ControllerRequestContext( @@ -1071,7 +1071,7 @@ class ControllerApis( EndpointType.CONTROLLER, clusterId, () => registrationsPublisher.describeClusterControllers(request.context.listenerName()), - () => raftManager.client.leaderAndEpoch.leaderId().orElse(-1) + () => raftManager.leaderAndEpoch.leaderId().orElse(-1) ) requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new DescribeClusterResponse(response.setThrottleTimeMs(requestThrottleMs))) diff --git a/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala b/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala new file mode 100644 index 0000000000000..1a644e30d9adc --- /dev/null +++ b/core/src/main/scala/kafka/server/ControllerMutationQuotaManager.scala @@ -0,0 +1,282 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import kafka.network.RequestChannel +import org.apache.kafka.common.MetricName +import org.apache.kafka.common.errors.ThrottlingQuotaExceededException +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.metrics.QuotaViolationException +import org.apache.kafka.common.metrics.Sensor +import org.apache.kafka.common.metrics.stats.Rate +import org.apache.kafka.common.metrics.stats.TokenBucket +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.utils.Time +import org.apache.kafka.network.Session +import org.apache.kafka.server.quota.{ClientQuotaCallback, QuotaType} +import org.apache.kafka.server.config.ClientQuotaManagerConfig + +import scala.jdk.CollectionConverters._ + +/** + * The ControllerMutationQuota trait defines a quota for a given user/clientId pair. Such + * quota is not meant to be cached forever but rather during the lifetime of processing + * a request. + */ +trait ControllerMutationQuota { + def isExceeded: Boolean + def record(permits: Double): Unit + def throttleTime: Int +} + +/** + * Default quota used when quota is disabled. + */ +object UnboundedControllerMutationQuota extends ControllerMutationQuota { + override def isExceeded: Boolean = false + override def record(permits: Double): Unit = () + override def throttleTime: Int = 0 +} + +/** + * The AbstractControllerMutationQuota is the base class of StrictControllerMutationQuota and + * PermissiveControllerMutationQuota. + * + * @param time @Time object to use + */ +abstract class AbstractControllerMutationQuota(private val time: Time) extends ControllerMutationQuota { + protected var lastThrottleTimeMs = 0L + protected var lastRecordedTimeMs = 0L + + protected def updateThrottleTime(e: QuotaViolationException, timeMs: Long): Unit = { + lastThrottleTimeMs = ControllerMutationQuotaManager.throttleTimeMs(e, timeMs) + lastRecordedTimeMs = timeMs + } + + override def throttleTime: Int = { + // If a throttle time has been recorded, we adjust it by deducting the time elapsed + // between the recording and now. We do this because `throttleTime` may be called + // long after having recorded it, especially when a request waits in the purgatory. + val deltaTimeMs = time.milliseconds - lastRecordedTimeMs + Math.max(0, lastThrottleTimeMs - deltaTimeMs).toInt + } +} + +/** + * The StrictControllerMutationQuota defines a strict quota for a given user/clientId pair. The + * quota is strict meaning that 1) it does not accept any mutations once the quota is exhausted + * until it gets back to the defined rate; and 2) it does not throttle for any number of mutations + * if quota is not already exhausted. + * + * @param time @Time object to use + * @param quotaSensor @Sensor object with a defined quota for a given user/clientId pair + */ +class StrictControllerMutationQuota(private val time: Time, + private val quotaSensor: Sensor) + extends AbstractControllerMutationQuota(time) { + + override def isExceeded: Boolean = lastThrottleTimeMs > 0 + + override def record(permits: Double): Unit = { + val timeMs = time.milliseconds + try { + quotaSensor synchronized { + quotaSensor.checkQuotas(timeMs) + quotaSensor.record(permits, timeMs, false) + } + } catch { + case e: QuotaViolationException => + updateThrottleTime(e, timeMs) + throw new ThrottlingQuotaExceededException(lastThrottleTimeMs.toInt, + Errors.THROTTLING_QUOTA_EXCEEDED.message) + } + } +} + +/** + * The PermissiveControllerMutationQuota defines a permissive quota for a given user/clientId pair. + * The quota is permissive meaning that 1) it does accept any mutations even if the quota is + * exhausted; and 2) it does throttle as soon as the quota is exhausted. + * + * @param time @Time object to use + * @param quotaSensor @Sensor object with a defined quota for a given user/clientId pair + */ +class PermissiveControllerMutationQuota(private val time: Time, + private val quotaSensor: Sensor) + extends AbstractControllerMutationQuota(time) { + + override def isExceeded: Boolean = false + + override def record(permits: Double): Unit = { + val timeMs = time.milliseconds + try { + quotaSensor.record(permits, timeMs, true) + } catch { + case e: QuotaViolationException => + updateThrottleTime(e, timeMs) + } + } +} + +object ControllerMutationQuotaManager { + + /** + * This calculates the amount of time needed to bring the TokenBucket within quota + * assuming that no new metrics are recorded. + * + * Basically, if a value < 0 is observed, the time required to bring it to zero is + * -value / refill rate (quota bound) * 1000. + */ + def throttleTimeMs(e: QuotaViolationException, timeMs: Long): Long = { + e.metric().measurable() match { + case _: TokenBucket => + Math.round(-e.value() / e.bound() * 1000) + case _ => throw new IllegalArgumentException( + s"Metric ${e.metric().metricName()} is not a TokenBucket metric, value ${e.metric().measurable()}") + } + } +} + +/** + * The ControllerMutationQuotaManager is a specialized ClientQuotaManager used in the context + * of throttling controller's operations/mutations. + * + * @param config @ClientQuotaManagerConfig quota configs + * @param metrics @Metrics Metrics instance + * @param time @Time object to use + * @param threadNamePrefix The thread prefix to use + * @param quotaCallback @ClientQuotaCallback ClientQuotaCallback to use + */ +class ControllerMutationQuotaManager(private val config: ClientQuotaManagerConfig, + private val metrics: Metrics, + private val time: Time, + private val threadNamePrefix: String, + private val quotaCallback: Option[ClientQuotaCallback]) + extends ClientQuotaManager(config, metrics, QuotaType.CONTROLLER_MUTATION, time, threadNamePrefix, quotaCallback) { + + override protected def clientQuotaMetricName(quotaMetricTags: Map[String, String]): MetricName = { + metrics.metricName("tokens", QuotaType.CONTROLLER_MUTATION.toString, + "Tracking remaining tokens in the token bucket per user/client-id", + quotaMetricTags.asJava) + } + + private def clientRateMetricName(quotaMetricTags: Map[String, String]): MetricName = { + metrics.metricName("mutation-rate", QuotaType.CONTROLLER_MUTATION.toString, + "Tracking mutation-rate per user/client-id", + quotaMetricTags.asJava) + } + + override protected def registerQuotaMetrics(metricTags: Map[String, String])(sensor: Sensor): Unit = { + sensor.add( + clientRateMetricName(metricTags), + new Rate + ) + sensor.add( + clientQuotaMetricName(metricTags), + new TokenBucket, + getQuotaMetricConfig(metricTags) + ) + } + + /** + * Records that a user/clientId accumulated or would like to accumulate the provided amount at the + * the specified time, returns throttle time in milliseconds. The quota is strict meaning that it + * does not accept any mutations once the quota is exhausted until it gets back to the defined rate. + * + * @param session The session from which the user is extracted + * @param clientId The client id + * @param value The value to accumulate + * @param timeMs The time at which to accumulate the value + * @return The throttle time in milliseconds defines as the time to wait until the average + * rate gets back to the defined quota + */ + override def recordAndGetThrottleTimeMs(session: Session, clientId: String, value: Double, timeMs: Long): Int = { + val clientSensors = getOrCreateQuotaSensors(session, clientId) + val quotaSensor = clientSensors.quotaSensor + try { + quotaSensor synchronized { + quotaSensor.checkQuotas(timeMs) + quotaSensor.record(value, timeMs, false) + } + 0 + } catch { + case e: QuotaViolationException => + val throttleTimeMs = ControllerMutationQuotaManager.throttleTimeMs(e, timeMs).toInt + debug(s"Quota violated for sensor (${quotaSensor.name}). Delay time: ($throttleTimeMs)") + throttleTimeMs + } + } + + /** + * Returns a StrictControllerMutationQuota for the given user/clientId pair or + * a UnboundedControllerMutationQuota$ if the quota is disabled. + * + * @param session The session from which the user is extracted + * @param clientId The client id + * @return ControllerMutationQuota + */ + def newStrictQuotaFor(session: Session, clientId: String): ControllerMutationQuota = { + if (quotasEnabled) { + val clientSensors = getOrCreateQuotaSensors(session, clientId) + new StrictControllerMutationQuota(time, clientSensors.quotaSensor) + } else { + UnboundedControllerMutationQuota + } + } + + def newStrictQuotaFor(request: RequestChannel.Request): ControllerMutationQuota = + newStrictQuotaFor(request.session, request.header.clientId) + + /** + * Returns a PermissiveControllerMutationQuota for the given user/clientId pair or + * a UnboundedControllerMutationQuota$ if the quota is disabled. + * + * @param session The session from which the user is extracted + * @param clientId The client id + * @return ControllerMutationQuota + */ + def newPermissiveQuotaFor(session: Session, clientId: String): ControllerMutationQuota = { + if (quotasEnabled) { + val clientSensors = getOrCreateQuotaSensors(session, clientId) + new PermissiveControllerMutationQuota(time, clientSensors.quotaSensor) + } else { + UnboundedControllerMutationQuota + } + } + + def newPermissiveQuotaFor(request: RequestChannel.Request): ControllerMutationQuota = + newPermissiveQuotaFor(request.session, request.header.clientId) + + /** + * Returns a ControllerMutationQuota based on `strictSinceVersion`. It returns a strict + * quota if the version is equal to or above of the `strictSinceVersion`, a permissive + * quota if the version is below, and a unbounded quota if the quota is disabled. + * + * When the quota is strictly enforced. Any operation above the quota is not allowed + * and rejected with a THROTTLING_QUOTA_EXCEEDED error. + * + * @param request The request to extract the user and the clientId from + * @param strictSinceVersion The version since quota is strict + * @return + */ + def newQuotaFor(request: RequestChannel.Request, strictSinceVersion: Short): ControllerMutationQuota = { + if (request.header.apiVersion() >= strictSinceVersion) + newStrictQuotaFor(request) + else + newPermissiveQuotaFor(request) + } +} diff --git a/core/src/main/scala/kafka/server/ControllerServer.scala b/core/src/main/scala/kafka/server/ControllerServer.scala index e41705ed3bae9..1933a55dfeb88 100644 --- a/core/src/main/scala/kafka/server/ControllerServer.scala +++ b/core/src/main/scala/kafka/server/ControllerServer.scala @@ -17,17 +17,15 @@ package kafka.server -import kafka.network.SocketServer +import kafka.network.{DataPlaneAcceptor, SocketServer} import kafka.raft.KafkaRaftManager import kafka.server.QuotaFactory.QuotaManagers import scala.collection.immutable -import kafka.server.metadata.{ClientQuotaMetadataManager, DynamicClientQuotaPublisher, DynamicConfigPublisher, DynamicTopicClusterQuotaPublisher, KRaftMetadataCache, KRaftMetadataCachePublisher} +import kafka.server.metadata.{AclPublisher, ClientQuotaMetadataManager, DelegationTokenPublisher, DynamicClientQuotaPublisher, DynamicConfigPublisher, DynamicTopicClusterQuotaPublisher, KRaftMetadataCache, KRaftMetadataCachePublisher, ScramPublisher} import kafka.utils.{CoreUtils, Logging} -import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache import org.apache.kafka.common.utils.{LogContext, Utils} @@ -38,15 +36,13 @@ import org.apache.kafka.image.publisher.{ControllerRegistrationsPublisher, Metad import org.apache.kafka.metadata.{KafkaConfigSchema, ListenerInfo} import org.apache.kafka.metadata.authorizer.ClusterMetadataAuthorizer import org.apache.kafka.metadata.bootstrap.BootstrapMetadata -import org.apache.kafka.metadata.publisher.{AclPublisher, DelegationTokenPublisher, FeaturesPublisher, ScramPublisher} +import org.apache.kafka.metadata.publisher.FeaturesPublisher import org.apache.kafka.raft.QuorumConfig -import org.apache.kafka.security.{CredentialProvider, DelegationTokenManager} -import org.apache.kafka.server.{ProcessRole, SimpleApiVersionManager} +import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.config.ServerLogConfigs.{ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG} import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion, NodeToControllerChannelManager} import org.apache.kafka.server.config.ConfigType -import org.apache.kafka.server.config.DelegationTokenManagerConfigs import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} import org.apache.kafka.server.network.{EndpointReadyFutures, KafkaAuthorizerServerInfo} import org.apache.kafka.server.policy.{AlterConfigPolicy, CreateTopicPolicy} @@ -84,7 +80,7 @@ class ControllerServer( var status: ProcessStatus = SHUTDOWN var linuxIoMetricsCollector: LinuxIoMetricsCollector = _ - @volatile var authorizerPlugin: Option[Plugin[Authorizer]] = None + @volatile var authorizer: Option[Authorizer] = None var tokenCache: DelegationTokenCache = _ var credentialProvider: CredentialProvider = _ var socketServer: SocketServer = _ @@ -140,13 +136,14 @@ class ControllerServer( metricsGroup.newGauge("linux-disk-write-bytes", () => linuxIoMetricsCollector.writeBytes()) } - authorizerPlugin = config.createNewAuthorizer(metrics, ProcessRole.ControllerRole.toString) + authorizer = config.createNewAuthorizer() + authorizer.foreach(_.configure(config.originals)) - metadataCache = new KRaftMetadataCache(config.nodeId, () => raftManager.client.kraftVersion()) + metadataCache = MetadataCache.kRaftMetadataCache(config.nodeId, () => raftManager.client.kraftVersion()) metadataCachePublisher = new KRaftMetadataCachePublisher(metadataCache) - featuresPublisher = new FeaturesPublisher(logContext, sharedServer.metadataPublishingFaultHandler) + featuresPublisher = new FeaturesPublisher(logContext) registrationsPublisher = new ControllerRegistrationsPublisher() @@ -160,11 +157,6 @@ class ControllerServer( raftManager.client.kraftVersion().featureLevel()) ) - // metrics will be set to null when closing a controller, so we should recreate it for testing - if (sharedServer.metrics == null){ - sharedServer.metrics = new Metrics() - } - tokenCache = new DelegationTokenCache(ScramMechanism.mechanismNames) credentialProvider = new CredentialProvider(ScramMechanism.mechanismNames, tokenCache) socketServer = new SocketServer(config, @@ -175,14 +167,14 @@ class ControllerServer( sharedServer.socketFactory) val listenerInfo = ListenerInfo - .create(config.effectiveAdvertisedControllerListeners.asJava) + .create(config.effectiveAdvertisedControllerListeners.map(_.toJava).asJava) .withWildcardHostnamesResolved() .withEphemeralPortsCorrected(name => socketServer.boundPort(new ListenerName(name))) socketServerFirstBoundPortFuture.complete(listenerInfo.firstListener().port()) val endpointReadyFutures = { val builder = new EndpointReadyFutures.Builder() - builder.build(authorizerPlugin.toJava, + builder.build(authorizer.toJava, new KafkaAuthorizerServerInfo( new ClusterResource(clusterId), config.nodeId, @@ -207,10 +199,9 @@ class ControllerServer( QuorumFeatures.defaultSupportedFeatureMap(config.unstableFeatureVersionsEnabled), controllerNodes.asScala.map(node => Integer.valueOf(node.id())).asJava) - val delegationTokenManagerConfigs = new DelegationTokenManagerConfigs(config) val delegationTokenKeyString = { - if (delegationTokenManagerConfigs.tokenAuthEnabled) { - delegationTokenManagerConfigs.delegationTokenSecretKey.value + if (config.tokenAuthEnabled) { + config.delegationTokenSecretKey.value } else { null } @@ -225,7 +216,7 @@ class ControllerServer( val maxIdleIntervalNs = config.metadataMaxIdleIntervalNs.fold(OptionalLong.empty)(OptionalLong.of) - quorumControllerMetrics = new QuorumControllerMetrics(Optional.of(KafkaYammerMetrics.defaultRegistry), time, config.brokerSessionTimeoutMs) + quorumControllerMetrics = new QuorumControllerMetrics(Optional.of(KafkaYammerMetrics.defaultRegistry), time) new QuorumController.Builder(config.nodeId, sharedServer.clusterId). setTime(time). @@ -249,10 +240,11 @@ class ControllerServer( setNonFatalFaultHandler(sharedServer.nonFatalQuorumControllerFaultHandler). setDelegationTokenCache(tokenCache). setDelegationTokenSecretKey(delegationTokenKeyString). - setDelegationTokenMaxLifeMs(delegationTokenManagerConfigs.delegationTokenMaxLifeMs). - setDelegationTokenExpiryTimeMs(delegationTokenManagerConfigs.delegationTokenExpiryTimeMs). - setDelegationTokenExpiryCheckIntervalMs(delegationTokenManagerConfigs.delegationTokenExpiryCheckIntervalMs). + setDelegationTokenMaxLifeMs(config.delegationTokenMaxLifeMs). + setDelegationTokenExpiryTimeMs(config.delegationTokenExpiryTimeMs). + setDelegationTokenExpiryCheckIntervalMs(config.delegationTokenExpiryCheckIntervalMs). setUncleanLeaderElectionCheckIntervalMs(config.uncleanLeaderElectionCheckIntervalMs). + setInterBrokerListenerName(config.interBrokerListenerName.value()). setControllerPerformanceSamplePeriodMs(config.controllerPerformanceSamplePeriodMs). setControllerPerformanceAlwaysLogThresholdMs(config.controllerPerformanceAlwaysLogThresholdMs) } @@ -260,20 +252,18 @@ class ControllerServer( // If we are using a ClusterMetadataAuthorizer, requests to add or remove ACLs must go // through the controller. - authorizerPlugin.foreach { plugin => - plugin.get match { - case a: ClusterMetadataAuthorizer => a.setAclMutator(controller) - case _ => - } + authorizer match { + case Some(a: ClusterMetadataAuthorizer) => a.setAclMutator(controller) + case _ => } quotaManagers = QuotaFactory.instantiate(config, metrics, time, - s"controller-${config.nodeId}-", ProcessRole.ControllerRole.toString) + s"controller-${config.nodeId}-") clientQuotaMetadataManager = new ClientQuotaMetadataManager(quotaManagers, socketServer.connectionQuotas) controllerApis = new ControllerApis(socketServer.dataPlaneRequestChannel, - authorizerPlugin, + authorizer, quotaManagers, time, controller, @@ -288,7 +278,8 @@ class ControllerServer( controllerApis, time, config.numIoThreads, - "RequestHandlerAvgIdlePercent", + s"${DataPlaneAcceptor.MetricPrefix}RequestHandlerAvgIdlePercent", + DataPlaneAcceptor.ThreadPrefix, "controller") // Set up the metadata cache publisher. @@ -318,7 +309,7 @@ class ControllerServer( metadataPublishers.add(new DynamicConfigPublisher( config, sharedServer.metadataPublishingFaultHandler, - immutable.Map[ConfigType, ConfigHandler]( + immutable.Map[String, ConfigHandler]( // controllers don't host topics, so no need to do anything with dynamic topic config changes here ConfigType.BROKER -> new BrokerConfigHandler(config, quotaManagers) ), @@ -351,7 +342,7 @@ class ControllerServer( // Set up the SCRAM publisher. metadataPublishers.add(new ScramPublisher( - config.nodeId, + config, sharedServer.metadataPublishingFaultHandler, "controller", credentialProvider @@ -361,10 +352,10 @@ class ControllerServer( // We need a tokenManager for the Publisher // The tokenCache in the tokenManager is the same used in DelegationTokenControlManager metadataPublishers.add(new DelegationTokenPublisher( - config.nodeId, + config, sharedServer.metadataPublishingFaultHandler, "controller", - new DelegationTokenManager(delegationTokenManagerConfigs, tokenCache) + new DelegationTokenManager(config, tokenCache, time) )) // Set up the metrics publisher. @@ -378,7 +369,7 @@ class ControllerServer( config.nodeId, sharedServer.metadataPublishingFaultHandler, "controller", - authorizerPlugin.toJava + authorizer )) // Install all metadata publishers. @@ -470,7 +461,7 @@ class ControllerServer( CoreUtils.swallow(quotaManagers.shutdown(), this) Utils.closeQuietly(controller, "controller") Utils.closeQuietly(quorumControllerMetrics, "quorum controller metrics") - authorizerPlugin.foreach(Utils.closeQuietly(_, "authorizer plugin")) + authorizer.foreach(Utils.closeQuietly(_, "authorizer")) createTopicPolicy.foreach(policy => Utils.closeQuietly(policy, "create topic policy")) alterConfigPolicy.foreach(policy => Utils.closeQuietly(policy, "alter config policy")) socketServerFirstBoundPortFuture.completeExceptionally(new RuntimeException("shutting down")) diff --git a/core/src/main/scala/kafka/server/DelayedDeleteRecords.scala b/core/src/main/scala/kafka/server/DelayedDeleteRecords.scala new file mode 100644 index 0000000000000..c2cffb6a07796 --- /dev/null +++ b/core/src/main/scala/kafka/server/DelayedDeleteRecords.scala @@ -0,0 +1,134 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + + +import kafka.utils.Logging + +import java.util.concurrent.TimeUnit +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.message.DeleteRecordsResponseData +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.DeleteRecordsResponse +import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.purgatory.DelayedOperation + +import scala.collection._ + + +case class DeleteRecordsPartitionStatus(requiredOffset: Long, + responseStatus: DeleteRecordsResponseData.DeleteRecordsPartitionResult) { + @volatile var acksPending = false + + override def toString: String = "[acksPending: %b, error: %s, lowWatermark: %d, requiredOffset: %d]" + .format(acksPending, Errors.forCode(responseStatus.errorCode).toString, responseStatus.lowWatermark, requiredOffset) +} + +/** + * A delayed delete records operation that can be created by the replica manager and watched + * in the delete records operation purgatory + */ +class DelayedDeleteRecords(delayMs: Long, + deleteRecordsStatus: Map[TopicPartition, DeleteRecordsPartitionStatus], + replicaManager: ReplicaManager, + responseCallback: Map[TopicPartition, DeleteRecordsResponseData.DeleteRecordsPartitionResult] => Unit) + extends DelayedOperation(delayMs) with Logging { + + // first update the acks pending variable according to the error code + deleteRecordsStatus.foreachEntry { (topicPartition, status) => + if (status.responseStatus.errorCode == Errors.NONE.code) { + // Timeout error state will be cleared when required acks are received + status.acksPending = true + status.responseStatus.setErrorCode(Errors.REQUEST_TIMED_OUT.code) + } else { + status.acksPending = false + } + + trace("Initial partition status for %s is %s".format(topicPartition, status)) + } + + /** + * The delayed delete records operation can be completed if every partition specified in the request satisfied one of the following: + * + * 1) There was an error while checking if all replicas have caught up to the deleteRecordsOffset: set an error in response + * 2) The low watermark of the partition has caught up to the deleteRecordsOffset. set the low watermark in response + * + */ + override def tryComplete(): Boolean = { + // check for each partition if it still has pending acks + deleteRecordsStatus.foreachEntry { (topicPartition, status) => + trace(s"Checking delete records satisfaction for $topicPartition, current status $status") + // skip those partitions that have already been satisfied + if (status.acksPending) { + val (lowWatermarkReached, error, lw) = replicaManager.getPartition(topicPartition) match { + case HostedPartition.Online(partition) => + partition.leaderLogIfLocal match { + case Some(_) => + val leaderLW = partition.lowWatermarkIfLeader + (leaderLW >= status.requiredOffset, Errors.NONE, leaderLW) + case None => + (false, Errors.NOT_LEADER_OR_FOLLOWER, DeleteRecordsResponse.INVALID_LOW_WATERMARK) + } + + case HostedPartition.Offline(_) => + (false, Errors.KAFKA_STORAGE_ERROR, DeleteRecordsResponse.INVALID_LOW_WATERMARK) + + case HostedPartition.None => + (false, Errors.UNKNOWN_TOPIC_OR_PARTITION, DeleteRecordsResponse.INVALID_LOW_WATERMARK) + } + if (error != Errors.NONE || lowWatermarkReached) { + status.acksPending = false + status.responseStatus.setErrorCode(error.code) + status.responseStatus.setLowWatermark(lw) + } + } + } + + // check if every partition has satisfied at least one of case A or B + if (!deleteRecordsStatus.values.exists(_.acksPending)) + forceComplete() + else + false + } + + override def onExpiration(): Unit = { + deleteRecordsStatus.foreachEntry { (topicPartition, status) => + if (status.acksPending) { + DelayedDeleteRecordsMetrics.recordExpiration(topicPartition) + } + } + } + + /** + * Upon completion, return the current response status along with the error code per partition + */ + override def onComplete(): Unit = { + val responseStatus = deleteRecordsStatus.map { case (k, status) => k -> status.responseStatus } + responseCallback(responseStatus) + } +} + +object DelayedDeleteRecordsMetrics { + private val metricsGroup = new KafkaMetricsGroup(DelayedDeleteRecordsMetrics.getClass) + + private val aggregateExpirationMeter = metricsGroup.newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS) + + def recordExpiration(partition: TopicPartition): Unit = { + aggregateExpirationMeter.mark() + } +} diff --git a/core/src/main/scala/kafka/server/DelayedElectLeader.scala b/core/src/main/scala/kafka/server/DelayedElectLeader.scala new file mode 100644 index 0000000000000..890f81526eb4d --- /dev/null +++ b/core/src/main/scala/kafka/server/DelayedElectLeader.scala @@ -0,0 +1,85 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import kafka.utils.Logging +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.ApiError +import org.apache.kafka.server.purgatory.DelayedOperation + +import scala.collection.{Map, mutable} + +/** A delayed elect leader operation that can be created by the replica manager and watched + * in the elect leader purgatory + */ +class DelayedElectLeader( + delayMs: Long, + expectedLeaders: Map[TopicPartition, Int], + results: Map[TopicPartition, ApiError], + replicaManager: ReplicaManager, + responseCallback: Map[TopicPartition, ApiError] => Unit +) extends DelayedOperation(delayMs) with Logging { + + private val waitingPartitions = mutable.Map() ++= expectedLeaders + private val fullResults = mutable.Map() ++= results + + + /** + * Call-back to execute when a delayed operation gets expired and hence forced to complete. + */ + override def onExpiration(): Unit = {} + + /** + * Process for completing an operation; This function needs to be defined + * in subclasses and will be called exactly once in forceComplete() + */ + override def onComplete(): Unit = { + // This could be called to force complete, so I need the full list of partitions, so I can time them all out. + updateWaiting() + val timedOut = waitingPartitions.map { + case (tp, _) => tp -> new ApiError(Errors.REQUEST_TIMED_OUT, null) + } + responseCallback(timedOut ++ fullResults) + } + + /** + * Try to complete the delayed operation by first checking if the operation + * can be completed by now. If yes execute the completion logic by calling + * forceComplete() and return true iff forceComplete returns true; otherwise return false + * + * This function needs to be defined in subclasses + */ + override def tryComplete(): Boolean = { + updateWaiting() + debug(s"tryComplete() waitingPartitions: $waitingPartitions") + waitingPartitions.isEmpty && forceComplete() + } + + private def updateWaiting(): Unit = { + val metadataCache = replicaManager.metadataCache + val completedPartitions = waitingPartitions.collect { + case (tp, leader) if metadataCache.getLeaderAndIsr(tp.topic, tp.partition).exists(_.leader == leader) => tp + } + completedPartitions.foreach { tp => + waitingPartitions -= tp + fullResults += tp -> ApiError.NONE + } + } + +} diff --git a/core/src/main/scala/kafka/server/DelayedFuture.scala b/core/src/main/scala/kafka/server/DelayedFuture.scala new file mode 100644 index 0000000000000..a24bc3870895a --- /dev/null +++ b/core/src/main/scala/kafka/server/DelayedFuture.scala @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import kafka.utils.Logging + +import java.util +import java.util.concurrent._ +import java.util.function.BiConsumer +import org.apache.kafka.common.errors.TimeoutException +import org.apache.kafka.common.utils.KafkaThread +import org.apache.kafka.server.purgatory.{DelayedOperation, DelayedOperationKey, DelayedOperationPurgatory} + +import scala.collection.Seq + +/** + * A delayed operation using CompletionFutures that can be created by KafkaApis and watched + * in a DelayedFuturePurgatory purgatory. This is used for ACL updates using async Authorizers. + */ +class DelayedFuture[T](timeoutMs: Long, + futures: Seq[CompletableFuture[T]], + responseCallback: () => Unit) + extends DelayedOperation(timeoutMs) with Logging { + + /** + * The operation can be completed if all the futures have completed successfully + * or failed with exceptions. + */ + override def tryComplete() : Boolean = { + trace(s"Trying to complete operation for ${futures.size} futures") + + val pending = futures.count(future => !future.isDone) + if (pending == 0) { + trace("All futures have been completed or have errors, completing the delayed operation") + forceComplete() + } else { + trace(s"$pending future still pending, not completing the delayed operation") + false + } + } + + /** + * Timeout any pending futures and invoke responseCallback. This is invoked when all + * futures have completed or the operation has timed out. + */ + override def onComplete(): Unit = { + val pendingFutures = futures.filterNot(_.isDone) + trace(s"Completing operation for ${futures.size} futures, expired ${pendingFutures.size}") + pendingFutures.foreach(_.completeExceptionally(new TimeoutException(s"Request has been timed out after $timeoutMs ms"))) + responseCallback.apply() + } + + /** + * This is invoked after onComplete(), so no actions required. + */ + override def onExpiration(): Unit = { + } +} + +class DelayedFuturePurgatory(purgatoryName: String, brokerId: Int) { + private val purgatory = new DelayedOperationPurgatory[DelayedFuture[_]](purgatoryName, brokerId) + private val executor = new ThreadPoolExecutor(1, 1, 0, TimeUnit.MILLISECONDS, + new LinkedBlockingQueue[Runnable](), + new ThreadFactory { + override def newThread(r: Runnable): Thread = new KafkaThread(s"DelayedExecutor-$purgatoryName", r, true) + }) + private val purgatoryKey = new DelayedOperationKey() { + override def keyLabel(): String = "delayed-future-key" + } + + def tryCompleteElseWatch[T](timeoutMs: Long, + futures: Seq[CompletableFuture[T]], + responseCallback: () => Unit): DelayedFuture[T] = { + val delayedFuture = new DelayedFuture[T](timeoutMs, futures, responseCallback) + val done = purgatory.tryCompleteElseWatch(delayedFuture, util.Collections.singletonList(purgatoryKey)) + if (!done) { + val callbackAction = new BiConsumer[Void, Throwable]() { + override def accept(result: Void, exception: Throwable): Unit = delayedFuture.forceComplete() + } + CompletableFuture.allOf(futures.toArray: _*).whenCompleteAsync(callbackAction, executor) + } + delayedFuture + } + + def shutdown(): Unit = { + executor.shutdownNow() + executor.awaitTermination(60, TimeUnit.SECONDS) + purgatory.shutdown() + } + + def isShutdown: Boolean = executor.isShutdown +} diff --git a/core/src/main/scala/kafka/server/DelayedProduce.scala b/core/src/main/scala/kafka/server/DelayedProduce.scala index 1d21ec78e4c63..9c212416ce6b7 100644 --- a/core/src/main/scala/kafka/server/DelayedProduce.scala +++ b/core/src/main/scala/kafka/server/DelayedProduce.scala @@ -17,11 +17,12 @@ package kafka.server -import java.util.concurrent.{ConcurrentHashMap, TimeUnit} +import java.util.concurrent.TimeUnit +import java.util.concurrent.locks.Lock import com.typesafe.scalalogging.Logger import com.yammer.metrics.core.Meter -import kafka.utils.Logging -import org.apache.kafka.common.{TopicIdPartition, TopicPartition} +import kafka.utils.{Logging, Pool} +import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.server.metrics.KafkaMetricsGroup @@ -29,6 +30,7 @@ import org.apache.kafka.server.purgatory.DelayedOperation import scala.collection._ import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters.RichOption case class ProducePartitionStatus(requiredOffset: Long, responseStatus: PartitionResponse) { @volatile var acksPending = false @@ -41,7 +43,7 @@ case class ProducePartitionStatus(requiredOffset: Long, responseStatus: Partitio * The produce metadata maintained by the delayed produce operation */ case class ProduceMetadata(produceRequiredAcks: Short, - produceStatus: Map[TopicIdPartition, ProducePartitionStatus]) { + produceStatus: Map[TopicPartition, ProducePartitionStatus]) { override def toString = s"[requiredAcks: $produceRequiredAcks, partitionStatus: $produceStatus]" } @@ -57,8 +59,9 @@ object DelayedProduce { class DelayedProduce(delayMs: Long, produceMetadata: ProduceMetadata, replicaManager: ReplicaManager, - responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit) - extends DelayedOperation(delayMs) with Logging { + responseCallback: Map[TopicPartition, PartitionResponse] => Unit, + lockOpt: Option[Lock]) + extends DelayedOperation(delayMs, lockOpt.toJava) with Logging { override lazy val logger: Logger = DelayedProduce.logger @@ -88,11 +91,11 @@ class DelayedProduce(delayMs: Long, */ override def tryComplete(): Boolean = { // check for each partition if it still has pending acks - produceMetadata.produceStatus.foreachEntry { (topicIdPartition, status) => - trace(s"Checking produce satisfaction for $topicIdPartition, current status $status") + produceMetadata.produceStatus.foreachEntry { (topicPartition, status) => + trace(s"Checking produce satisfaction for $topicPartition, current status $status") // skip those partitions that have already been satisfied if (status.acksPending) { - val (hasEnough, error) = replicaManager.getPartitionOrError(topicIdPartition.topicPartition()) match { + val (hasEnough, error) = replicaManager.getPartitionOrError(topicPartition) match { case Left(err) => // Case A (false, err) @@ -117,10 +120,10 @@ class DelayedProduce(delayMs: Long, } override def onExpiration(): Unit = { - produceMetadata.produceStatus.foreachEntry { (topicIdPartition, status) => + produceMetadata.produceStatus.foreachEntry { (topicPartition, status) => if (status.acksPending) { - debug(s"Expiring produce request for partition $topicIdPartition with status $status") - DelayedProduceMetrics.recordExpiration(topicIdPartition.topicPartition()) + debug(s"Expiring produce request for partition $topicPartition with status $status") + DelayedProduceMetrics.recordExpiration(topicPartition) } } } @@ -139,13 +142,15 @@ object DelayedProduceMetrics { private val aggregateExpirationMeter = metricsGroup.newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS) - private val partitionExpirationMeters = new ConcurrentHashMap[TopicPartition, Meter] + private val partitionExpirationMeterFactory = (key: TopicPartition) => + metricsGroup.newMeter("ExpiresPerSec", + "requests", + TimeUnit.SECONDS, + Map("topic" -> key.topic, "partition" -> key.partition.toString).asJava) + private val partitionExpirationMeters = new Pool[TopicPartition, Meter](valueFactory = Some(partitionExpirationMeterFactory)) def recordExpiration(partition: TopicPartition): Unit = { aggregateExpirationMeter.mark() - partitionExpirationMeters.computeIfAbsent(partition, key => metricsGroup.newMeter("ExpiresPerSec", - "requests", - TimeUnit.SECONDS, - Map("topic" -> key.topic, "partition" -> key.partition.toString).asJava)).mark() + partitionExpirationMeters.getAndMaybePut(partition).mark() } } diff --git a/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala b/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala index cb14a14b3e902..45bfe69844ad8 100644 --- a/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala +++ b/core/src/main/scala/kafka/server/DelayedRemoteFetch.scala @@ -22,13 +22,11 @@ import kafka.utils.Logging import org.apache.kafka.common.TopicIdPartition import org.apache.kafka.common.errors._ import org.apache.kafka.common.protocol.Errors -import org.apache.kafka.server.LogReadResult import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.purgatory.DelayedOperation import org.apache.kafka.server.storage.log.{FetchParams, FetchPartitionData} import org.apache.kafka.storage.internals.log.{LogOffsetMetadata, RemoteLogReadResult, RemoteStorageFetchInfo} -import java.util import java.util.concurrent.{CompletableFuture, Future, TimeUnit} import java.util.{Optional, OptionalInt, OptionalLong} import scala.collection._ @@ -37,9 +35,9 @@ import scala.collection._ * A remote fetch operation that can be created by the replica manager and watched * in the remote fetch operation purgatory */ -class DelayedRemoteFetch(remoteFetchTasks: util.Map[TopicIdPartition, Future[Void]], - remoteFetchResults: util.Map[TopicIdPartition, CompletableFuture[RemoteLogReadResult]], - remoteFetchInfos: util.Map[TopicIdPartition, RemoteStorageFetchInfo], +class DelayedRemoteFetch(remoteFetchTask: Future[Void], + remoteFetchResult: CompletableFuture[RemoteLogReadResult], + remoteFetchInfo: RemoteStorageFetchInfo, remoteFetchMaxWaitMs: Long, fetchPartitionStatus: Seq[(TopicIdPartition, FetchPartitionStatus)], fetchParams: FetchParams, @@ -57,7 +55,7 @@ class DelayedRemoteFetch(remoteFetchTasks: util.Map[TopicIdPartition, Future[Voi * * Case a: This broker is no longer the leader of the partition it tries to fetch * Case b: This broker does not know the partition it tries to fetch - * Case c: All the remote storage read request completed (succeeded or failed) + * Case c: The remote storage read request completed (succeeded or failed) * Case d: The partition is in an offline log directory on this broker * * Upon completion, should return whatever data is available for each valid partition @@ -82,23 +80,16 @@ class DelayedRemoteFetch(remoteFetchTasks: util.Map[TopicIdPartition, Future[Voi return forceComplete() } } - // Case c - if (remoteFetchResults.values().stream().allMatch(taskResult => taskResult.isDone)) + if (remoteFetchResult.isDone) // Case c forceComplete() else false } override def onExpiration(): Unit = { - // cancel the remote storage read task, if it has not been executed yet and - // avoid interrupting the task if it is already running as it may force closing opened/cached resources as transaction index. - remoteFetchTasks.forEach { (topicIdPartition, task) => - if (task != null && !task.isDone) { - if (!task.cancel(false)) { - debug(s"Remote fetch task for remoteFetchInfo: ${remoteFetchInfos.get(topicIdPartition)} could not be cancelled.") - } - } - } + // cancel the remote storage read task, if it has not been executed yet + val cancelled = remoteFetchTask.cancel(true) + if (!cancelled) debug(s"Remote fetch task for RemoteStorageFetchInfo: $remoteFetchInfo could not be cancelled and its isDone value is ${remoteFetchTask.isDone}") DelayedRemoteFetchMetrics.expiredRequestMeter.mark() } @@ -108,8 +99,7 @@ class DelayedRemoteFetch(remoteFetchTasks: util.Map[TopicIdPartition, Future[Voi */ override def onComplete(): Unit = { val fetchPartitionData = localReadResults.map { case (tp, result) => - val remoteFetchResult = remoteFetchResults.get(tp) - if (remoteFetchInfos.containsKey(tp) + if (tp.topicPartition().equals(remoteFetchInfo.topicPartition) && remoteFetchResult.isDone && result.error == Errors.NONE && result.info.delayedRemoteStorageFetch.isPresent) { @@ -123,9 +113,9 @@ class DelayedRemoteFetch(remoteFetchTasks: util.Map[TopicIdPartition, Future[Voi result.leaderLogStartOffset, info.records, Optional.empty(), - if (result.lastStableOffset.isPresent) OptionalLong.of(result.lastStableOffset.getAsLong) else OptionalLong.empty(), + if (result.lastStableOffset.isDefined) OptionalLong.of(result.lastStableOffset.get) else OptionalLong.empty(), info.abortedTransactions, - if (result.preferredReadReplica.isPresent) OptionalInt.of(result.preferredReadReplica.getAsInt) else OptionalInt.empty(), + if (result.preferredReadReplica.isDefined) OptionalInt.of(result.preferredReadReplica.get) else OptionalInt.empty(), false) } } else { diff --git a/core/src/main/scala/kafka/server/DelayedRemoteListOffsets.scala b/core/src/main/scala/kafka/server/DelayedRemoteListOffsets.scala new file mode 100644 index 0000000000000..f2bb8c37d85a6 --- /dev/null +++ b/core/src/main/scala/kafka/server/DelayedRemoteListOffsets.scala @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import com.yammer.metrics.core.Meter +import kafka.utils.{Logging, Pool} +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.errors.ApiException +import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.ListOffsetsResponse +import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.purgatory.DelayedOperation +import org.apache.kafka.storage.internals.log.OffsetResultHolder.FileRecordsOrError + +import java.util.Optional +import java.util.concurrent.TimeUnit +import scala.collection.{Map, mutable} +import scala.jdk.CollectionConverters._ + +class DelayedRemoteListOffsets(delayMs: Long, + version: Int, + statusByPartition: mutable.Map[TopicPartition, ListOffsetsPartitionStatus], + replicaManager: ReplicaManager, + responseCallback: List[ListOffsetsTopicResponse] => Unit) + extends DelayedOperation(delayMs) with Logging { + // Mark the status as completed, if there is no async task to track. + // If there is a task to track, then build the response as REQUEST_TIMED_OUT by default. + statusByPartition.foreachEntry { (topicPartition, status) => + status.completed = status.futureHolderOpt.isEmpty + if (status.futureHolderOpt.isPresent) { + status.responseOpt = Some(buildErrorResponse(Errors.REQUEST_TIMED_OUT, topicPartition.partition())) + } + trace(s"Initial partition status for $topicPartition is $status") + } + + /** + * Call-back to execute when a delayed operation gets expired and hence forced to complete. + */ + override def onExpiration(): Unit = { + statusByPartition.foreachEntry { (topicPartition, status) => + if (!status.completed) { + debug(s"Expiring list offset request for partition $topicPartition with status $status") + status.futureHolderOpt.ifPresent(futureHolder => futureHolder.jobFuture.cancel(true)) + DelayedRemoteListOffsetsMetrics.recordExpiration(topicPartition) + } + } + } + + /** + * Process for completing an operation; This function needs to be defined + * in subclasses and will be called exactly once in forceComplete() + */ + override def onComplete(): Unit = { + val responseTopics = statusByPartition.groupBy(e => e._1.topic()).map { + case (topic, status) => + new ListOffsetsTopicResponse().setName(topic).setPartitions(status.values.flatMap(s => s.responseOpt).toList.asJava) + }.toList + responseCallback(responseTopics) + } + + /** + * Try to complete the delayed operation by first checking if the operation + * can be completed by now. If yes execute the completion logic by calling + * forceComplete() and return true iff forceComplete returns true; otherwise return false + * + * This function needs to be defined in subclasses + */ + override def tryComplete(): Boolean = { + var completable = true + statusByPartition.foreachEntry { (partition, status) => + if (!status.completed) { + try { + replicaManager.getPartitionOrException(partition) + } catch { + case e: ApiException => + status.futureHolderOpt.ifPresent { futureHolder => + futureHolder.jobFuture.cancel(false) + futureHolder.taskFuture.complete(new FileRecordsOrError(Optional.of(e), Optional.empty())) + } + } + + status.futureHolderOpt.ifPresent { futureHolder => + if (futureHolder.taskFuture.isDone) { + val taskFuture = futureHolder.taskFuture.get() + val response = { + if (taskFuture.hasException) { + buildErrorResponse(Errors.forException(taskFuture.exception().get()), partition.partition()) + } else if (!taskFuture.hasTimestampAndOffset) { + val error = status.maybeOffsetsError + .map(e => if (version >= 5) Errors.forException(e) else Errors.LEADER_NOT_AVAILABLE) + .getOrElse(Errors.NONE) + buildErrorResponse(error, partition.partition()) + } else { + var partitionResponse = buildErrorResponse(Errors.NONE, partition.partition()) + val found = taskFuture.timestampAndOffset().get() + if (status.lastFetchableOffset.isDefined && found.offset >= status.lastFetchableOffset.get) { + if (status.maybeOffsetsError.isDefined) { + val error = if (version >= 5) Errors.forException(status.maybeOffsetsError.get) else Errors.LEADER_NOT_AVAILABLE + partitionResponse.setErrorCode(error.code()) + } + } else { + partitionResponse = new ListOffsetsPartitionResponse() + .setPartitionIndex(partition.partition()) + .setErrorCode(Errors.NONE.code()) + .setTimestamp(found.timestamp) + .setOffset(found.offset) + + if (found.leaderEpoch.isPresent && version >= 4) { + partitionResponse.setLeaderEpoch(found.leaderEpoch.get) + } + } + partitionResponse + } + } + status.responseOpt = Some(response) + status.completed = true + } + completable = completable && futureHolder.taskFuture.isDone + } + } + } + if (completable) { + forceComplete() + } else { + false + } + } + + private def buildErrorResponse(e: Errors, partitionIndex: Int): ListOffsetsPartitionResponse = { + new ListOffsetsPartitionResponse() + .setPartitionIndex(partitionIndex) + .setErrorCode(e.code) + .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP) + .setOffset(ListOffsetsResponse.UNKNOWN_OFFSET) + } +} + +object DelayedRemoteListOffsetsMetrics { + private val metricsGroup = new KafkaMetricsGroup(DelayedRemoteListOffsetsMetrics.getClass) + private[server] val aggregateExpirationMeter = metricsGroup.newMeter("ExpiresPerSec", "requests", TimeUnit.SECONDS) + private val partitionExpirationMeterFactory = (key: TopicPartition) => + metricsGroup.newMeter("ExpiresPerSec", + "requests", + TimeUnit.SECONDS, + Map("topic" -> key.topic, "partition" -> key.partition.toString).asJava) + private[server] val partitionExpirationMeters = new Pool[TopicPartition, Meter](valueFactory = Some(partitionExpirationMeterFactory)) + + def recordExpiration(partition: TopicPartition): Unit = { + aggregateExpirationMeter.mark() + partitionExpirationMeters.getAndMaybePut(partition).mark() + } +} \ No newline at end of file diff --git a/core/src/main/scala/kafka/server/DelegationTokenManager.scala b/core/src/main/scala/kafka/server/DelegationTokenManager.scala new file mode 100644 index 0000000000000..4fa4cae209f11 --- /dev/null +++ b/core/src/main/scala/kafka/server/DelegationTokenManager.scala @@ -0,0 +1,282 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import java.nio.ByteBuffer +import java.nio.charset.StandardCharsets +import java.security.InvalidKeyException + +import javax.crypto.spec.SecretKeySpec +import javax.crypto.{Mac, SecretKey} +import kafka.utils.Logging +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.security.auth.KafkaPrincipal +import org.apache.kafka.common.security.scram.internals.{ScramFormatter, ScramMechanism} +import org.apache.kafka.common.security.scram.ScramCredential +import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache +import org.apache.kafka.common.security.token.delegation.{DelegationToken, TokenInformation} +import org.apache.kafka.common.utils.Time + +import scala.jdk.CollectionConverters._ +import scala.collection.mutable + +object DelegationTokenManager { + private val DefaultHmacAlgorithm = "HmacSHA512" + val CurrentVersion = 3 + val ErrorTimestamp = -1 + + /** + * + * @param tokenId + * @param secretKey + * @return + */ + def createHmac(tokenId: String, secretKey: String) : Array[Byte] = { + createHmac(tokenId, createSecretKey(secretKey.getBytes(StandardCharsets.UTF_8))) + } + + /** + * Convert the byte[] to a secret key + * @param keybytes the byte[] to create the secret key from + * @return the secret key + */ + private def createSecretKey(keybytes: Array[Byte]) : SecretKey = { + new SecretKeySpec(keybytes, DefaultHmacAlgorithm) + } + + /** + * Compute HMAC of the identifier using the secret key + * @param tokenId the bytes of the identifier + * @param secretKey the secret key + * @return String of the generated hmac + */ + def createHmac(tokenId: String, secretKey: SecretKey) : Array[Byte] = { + val mac = Mac.getInstance(DefaultHmacAlgorithm) + try + mac.init(secretKey) + catch { + case ike: InvalidKeyException => throw new IllegalArgumentException("Invalid key to HMAC computation", ike) + } + mac.doFinal(tokenId.getBytes(StandardCharsets.UTF_8)) + } + + def filterToken(requesterPrincipal: KafkaPrincipal, owners : Option[List[KafkaPrincipal]], token: TokenInformation, + authorizeToken: String => Boolean, authorizeRequester: KafkaPrincipal => Boolean) : Boolean = { + + val allow = + //exclude tokens which are not requested + if (owners.isDefined && !owners.get.exists(owner => token.ownerOrRenewer(owner))) { + false + //Owners and the renewers can describe their own tokens + } else if (token.ownerOrRenewer(requesterPrincipal)) { + true + // Check permission for non-owned tokens + } else if (authorizeToken(token.tokenId) || authorizeRequester(token.owner)) { + true + } + else { + false + } + + allow + } +} + +class DelegationTokenManager(val config: KafkaConfig, + val tokenCache: DelegationTokenCache, + val time: Time) extends Logging { + this.logIdent = s"[Token Manager on Node ${config.brokerId}]: " + + protected val lock = new Object() + + import DelegationTokenManager._ + + type CreateResponseCallback = CreateTokenResult => Unit + type RenewResponseCallback = (Errors, Long) => Unit + type ExpireResponseCallback = (Errors, Long) => Unit + + val secretKey: SecretKey = { + val keyBytes = if (config.tokenAuthEnabled) config.delegationTokenSecretKey.value.getBytes(StandardCharsets.UTF_8) else null + if (keyBytes == null || keyBytes.isEmpty) null + else + createSecretKey(keyBytes) + } + + val tokenMaxLifetime: Long = config.delegationTokenMaxLifeMs + val defaultTokenRenewTime: Long = config.delegationTokenExpiryTimeMs + + def startup(): Unit = { + // Nothing to do. Overridden for Zk case + } + + def shutdown(): Unit = { + // Nothing to do. Overridden for Zk case + } + + /** + * + * @param token + */ + protected def updateCache(token: DelegationToken): Unit = { + val hmacString = token.hmacAsBase64String + val scramCredentialMap = prepareScramCredentials(hmacString) + tokenCache.updateCache(token, scramCredentialMap.asJava) + } + /** + * @param hmacString + */ + private def prepareScramCredentials(hmacString: String) : Map[String, ScramCredential] = { + val scramCredentialMap = mutable.Map[String, ScramCredential]() + + def scramCredential(mechanism: ScramMechanism): ScramCredential = { + new ScramFormatter(mechanism).generateCredential(hmacString, mechanism.minIterations) + } + + for (mechanism <- ScramMechanism.values) + scramCredentialMap(mechanism.mechanismName) = scramCredential(mechanism) + + scramCredentialMap.toMap + } + + /** + * @param token + */ + def updateToken(token: DelegationToken): Unit = { + updateCache(token) + } + + /** + * + * @param owner + * @param renewers + * @param maxLifeTimeMs + * @param responseCallback + */ + def createToken(owner: KafkaPrincipal, + tokenRequester: KafkaPrincipal, + renewers: List[KafkaPrincipal], + maxLifeTimeMs: Long, + responseCallback: CreateResponseCallback): Unit = { + // Must be forwarded to KRaft Controller or handled in DelegationTokenManagerZk + throw new IllegalStateException("API createToken was not forwarded to a handler.") + } + + /** + * + * @param principal + * @param hmac + * @param renewLifeTimeMs + * @param renewCallback + */ + def renewToken(principal: KafkaPrincipal, + hmac: ByteBuffer, + renewLifeTimeMs: Long, + renewCallback: RenewResponseCallback): Unit = { + // Must be forwarded to KRaft Controller or handled in DelegationTokenManagerZk + throw new IllegalStateException("API renewToken was not forwarded to a handler.") + } + + def getDelegationToken(tokenInfo: TokenInformation): DelegationToken = { + val hmac = createHmac(tokenInfo.tokenId, secretKey) + new DelegationToken(tokenInfo, hmac) + } + + /** + * + * @param principal + * @param hmac + * @param expireLifeTimeMs + * @param expireResponseCallback + */ + def expireToken(principal: KafkaPrincipal, + hmac: ByteBuffer, + expireLifeTimeMs: Long, + expireResponseCallback: ExpireResponseCallback): Unit = { + // Must be forwarded to KRaft Controller or handled in DelegationTokenManagerZk + throw new IllegalStateException("API expireToken was not forwarded to a handler.") + } + + /** + * + * @param tokenId + */ + def removeToken(tokenId: String): Unit = { + removeCache(tokenId) + } + + /** + * + * @param tokenId + */ + protected def removeCache(tokenId: String): Unit = { + tokenCache.removeCache(tokenId) + } + + /** + * + * @return + */ + def expireTokens(): Unit = { + lock.synchronized { + for (tokenInfo <- getAllTokenInformation) { + val now = time.milliseconds + if (tokenInfo.maxTimestamp < now || tokenInfo.expiryTimestamp < now) { + info(s"Delegation token expired for token: ${tokenInfo.tokenId} for owner: ${tokenInfo.owner}") + removeToken(tokenInfo.tokenId) + } + } + } + } + + def getAllTokenInformation: List[TokenInformation] = tokenCache.tokens.asScala.toList + + def getTokens(filterToken: TokenInformation => Boolean): List[DelegationToken] = { + getAllTokenInformation.filter(filterToken).map(token => getDelegationToken(token)) + } + +} + +case class CreateTokenResult(owner: KafkaPrincipal, + tokenRequester: KafkaPrincipal, + issueTimestamp: Long, + expiryTimestamp: Long, + maxTimestamp: Long, + tokenId: String, + hmac: Array[Byte], + error: Errors) { + + override def equals(other: Any): Boolean = { + other match { + case that: CreateTokenResult => + error.equals(that.error) && + owner.equals(that.owner) && + tokenRequester.equals(that.tokenRequester) && + tokenId.equals(that.tokenId) && + issueTimestamp.equals(that.issueTimestamp) && + expiryTimestamp.equals(that.expiryTimestamp) && + maxTimestamp.equals(that.maxTimestamp) && + (hmac sameElements that.hmac) + case _ => false + } + } + + override def hashCode(): Int = { + val fields = Seq(owner, tokenRequester, issueTimestamp, expiryTimestamp, maxTimestamp, tokenId, hmac, error) + fields.map(_.hashCode()).foldLeft(0)((a, b) => 31 * a + b) + } +} diff --git a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala index 124a4c7b78f4c..4f0e574a079e5 100755 --- a/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala +++ b/core/src/main/scala/kafka/server/DynamicBrokerConfig.scala @@ -21,44 +21,38 @@ import java.util import java.util.{Collections, Properties} import java.util.concurrent.CopyOnWriteArrayList import java.util.concurrent.locks.ReentrantReadWriteLock -import kafka.log.LogManager +import kafka.cluster.EndPoint +import kafka.log.{LogCleaner, LogManager} import kafka.network.{DataPlaneAcceptor, SocketServer} -import kafka.raft.KafkaRaftManager import kafka.server.DynamicBrokerConfig._ import kafka.utils.{CoreUtils, Logging} import org.apache.kafka.common.Reconfigurable -import org.apache.kafka.common.Endpoint import org.apache.kafka.common.config.internals.BrokerSecurityConfigs -import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigException, ConfigResource, SaslConfigs, SslConfigs} -import org.apache.kafka.common.metadata.{ConfigRecord, MetadataRecordType} +import org.apache.kafka.common.config.{AbstractConfig, ConfigDef, ConfigException, SaslConfigs, SslConfigs} import org.apache.kafka.common.metrics.{Metrics, MetricsReporter} +import org.apache.kafka.common.config.types.Password import org.apache.kafka.common.network.{ListenerName, ListenerReconfigurable} import org.apache.kafka.common.security.authenticator.LoginManager -import org.apache.kafka.common.utils.LogContext -import org.apache.kafka.common.utils.{BufferSupplier, ConfigUtils, Utils} -import org.apache.kafka.config +import org.apache.kafka.common.utils.{ConfigUtils, Utils} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.KafkaRaftClient -import org.apache.kafka.server.{DynamicThreadPool, ProcessRole} -import org.apache.kafka.server.common.ApiMessageAndVersion -import org.apache.kafka.server.config.{DynamicProducerStateManagerConfig, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms} +import org.apache.kafka.security.PasswordEncoder +import org.apache.kafka.server.ProcessRole +import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.{ClientMetricsReceiverPlugin, MetricConfigs} import org.apache.kafka.server.telemetry.ClientTelemetry -import org.apache.kafka.snapshot.RecordsSnapshotReader -import org.apache.kafka.storage.internals.log.{LogCleaner, LogConfig} +import org.apache.kafka.storage.internals.log.{LogConfig, ProducerStateManagerConfig} -import scala.util.Using import scala.collection._ import scala.jdk.CollectionConverters._ /** * Dynamic broker configurations may be defined at two levels: *
            - *
          • Per-broker configurations are persisted at the controller and can be described + *
          • Per-broker configurations are persisted at the controller and can be described * or altered using AdminClient with the resource name brokerId.
          • - *
          • Cluster-wide default configurations are persisted at the cluster level and can be + *
          • Cluster-wide default configurations are persisted at the cluster level and can be * described or altered using AdminClient with an empty resource name.
          • *
          * The order of precedence for broker configs is: @@ -91,15 +85,14 @@ object DynamicBrokerConfig { private[server] val DynamicProducerStateManagerConfig = Set(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_CONFIG, TransactionLogConfig.TRANSACTION_PARTITION_VERIFICATION_ENABLE_CONFIG) val AllDynamicConfigs = DynamicSecurityConfigs ++ - LogCleaner.RECONFIGURABLE_CONFIGS.asScala ++ + LogCleaner.ReconfigurableConfigs ++ DynamicLogConfig.ReconfigurableConfigs ++ - DynamicThreadPool.RECONFIGURABLE_CONFIGS.asScala ++ + DynamicThreadPool.ReconfigurableConfigs ++ Set(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG) ++ DynamicListenerConfig.ReconfigurableConfigs ++ SocketServer.ReconfigurableConfigs ++ DynamicProducerStateManagerConfig ++ - DynamicRemoteLogConfig.ReconfigurableConfigs ++ - Set(AbstractConfig.CONFIG_PROVIDERS_CONFIG) + DynamicRemoteLogConfig.ReconfigurableConfigs private val ClusterLevelListenerConfigs = Set(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG) private val PerBrokerConfigs = (DynamicSecurityConfigs ++ DynamicListenerConfig.ReconfigurableConfigs).diff( @@ -114,6 +107,13 @@ object DynamicBrokerConfig { private val ListenerConfigRegex = """listener\.name\.[^.]*\.(.*)""".r + private val DynamicPasswordConfigs = { + val passwordConfigs = KafkaConfig.configKeys.filter(_._2.`type` == ConfigDef.Type.PASSWORD).keySet + AllDynamicConfigs.intersect(passwordConfigs) + } + + def isPasswordConfig(name: String): Boolean = DynamicBrokerConfig.DynamicPasswordConfigs.exists(name.endsWith) + def brokerConfigSynonyms(name: String, matchListenerOverride: Boolean): List[String] = { name match { case ServerLogConfigs.LOG_ROLL_TIME_MILLIS_CONFIG | ServerLogConfigs.LOG_ROLL_TIME_HOURS_CONFIG => @@ -194,57 +194,6 @@ object DynamicBrokerConfig { } props } - - private[server] def readDynamicBrokerConfigsFromSnapshot( - raftManager: KafkaRaftManager[ApiMessageAndVersion], - config: KafkaConfig, - quotaManagers: QuotaFactory.QuotaManagers, - logContext: LogContext - ): Unit = { - def putOrRemoveIfNull(props: Properties, key: String, value: String): Unit = { - if (value == null) { - props.remove(key) - } else { - props.put(key, value) - } - } - raftManager.replicatedLog.latestSnapshotId().ifPresent { latestSnapshotId => - raftManager.replicatedLog.readSnapshot(latestSnapshotId).ifPresent { rawSnapshotReader => - Using.resource( - RecordsSnapshotReader.of( - rawSnapshotReader, - raftManager.recordSerde, - BufferSupplier.create(), - KafkaRaftClient.MAX_BATCH_SIZE_BYTES, - true, - logContext - ) - ) { reader => - val dynamicPerBrokerConfigs = new Properties() - val dynamicDefaultConfigs = new Properties() - while (reader.hasNext) { - val batch = reader.next() - batch.forEach { record => - if (record.message().apiKey() == MetadataRecordType.CONFIG_RECORD.id) { - val configRecord = record.message().asInstanceOf[ConfigRecord] - if (DynamicBrokerConfig.AllDynamicConfigs.contains(configRecord.name()) && - configRecord.resourceType() == ConfigResource.Type.BROKER.id()) { - if (configRecord.resourceName().isEmpty) { - putOrRemoveIfNull(dynamicDefaultConfigs, configRecord.name(), configRecord.value()) - } else if (configRecord.resourceName() == config.brokerId.toString) { - putOrRemoveIfNull(dynamicPerBrokerConfigs, configRecord.name(), configRecord.value()) - } - } - } - } - } - val configHandler = new BrokerConfigHandler(config, quotaManagers) - configHandler.processConfigChanges("", dynamicDefaultConfigs) - configHandler.processConfigChanges(config.brokerId.toString, dynamicPerBrokerConfigs) - } - } - } - } } class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging { @@ -261,6 +210,7 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging private val lock = new ReentrantReadWriteLock private var metricsReceiverPluginOpt: Option[ClientMetricsReceiverPlugin] = _ private var currentConfig: KafkaConfig = _ + private val dynamicConfigPasswordEncoder = Some(PasswordEncoder.NOOP) private[server] def initialize(clientMetricsReceiverPluginOpt: Option[ClientMetricsReceiverPlugin]): Unit = { currentConfig = new KafkaConfig(kafkaConfig.props, false) @@ -291,11 +241,9 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging * directly. They are provided both old and new configs. */ def addReconfigurables(kafkaServer: KafkaBroker): Unit = { - kafkaServer.authorizerPlugin.foreach { plugin => - plugin.get match { - case authz: Reconfigurable => addReconfigurable(authz) - case _ => - } + kafkaServer.authorizer match { + case Some(authz: Reconfigurable) => addReconfigurable(authz) + case _ => } addReconfigurable(kafkaServer.kafkaYammerMetrics) addReconfigurable(new DynamicMetricsReporters(kafkaConfig.brokerId, kafkaServer.config, kafkaServer.metrics, kafkaServer.clusterId)) @@ -313,11 +261,9 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging * Add reconfigurables to be notified when a dynamic controller config is updated. */ def addReconfigurables(controller: ControllerServer): Unit = { - controller.authorizerPlugin.foreach { plugin => - plugin.get match { - case authz: Reconfigurable => addReconfigurable(authz) - case _ => - } + controller.authorizer match { + case Some(authz: Reconfigurable) => addReconfigurable(authz) + case _ => } if (!kafkaConfig.processRoles.contains(ProcessRole.BrokerRole)) { // only add these if the controller isn't also running the broker role @@ -336,17 +282,6 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging reconfigurables.add(reconfigurable) } - def addBrokerReconfigurable(reconfigurable: config.BrokerReconfigurable): Unit = { - verifyReconfigurableConfigs(reconfigurable.reconfigurableConfigs.asScala) - brokerReconfigurables.add(new BrokerReconfigurable { - override def reconfigurableConfigs: Set[String] = reconfigurable.reconfigurableConfigs().asScala - - override def validateReconfiguration(newConfig: KafkaConfig): Unit = reconfigurable.validateReconfiguration(newConfig) - - override def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = reconfigurable.reconfigure(oldConfig, newConfig) - }) - } - def addBrokerReconfigurable(reconfigurable: BrokerReconfigurable): Unit = { verifyReconfigurableConfigs(reconfigurable.reconfigurableConfigs) brokerReconfigurables.add(reconfigurable) @@ -401,12 +336,12 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging } /** - * Config updates are triggered through actual changes in stored values. + * All config updates through ZooKeeper are triggered through actual changes in values stored in ZooKeeper. * For some configs like SSL keystores and truststores, we also want to reload the store if it was modified - * in-place, even though the actual value of the file path and password haven't changed. This scenario is - * handled when a config update request using admin client is processed by the AdminManager. If any of - * the SSL configs have changed, then the update will be handled when configuration changes are processed. - * At the moment, only listener configs are considered for reloading. + * in-place, even though the actual value of the file path and password haven't changed. This scenario alone + * is handled here when a config update request using admin client is processed by ZkAdminManager. If any of + * the SSL configs have changed, then the update will not be done here, but will be handled later when ZK + * changes are processed. At the moment, only listener configs are considered for reloading. */ private[server] def reloadUpdatedFilesWithoutConfigChange(newProps: Properties): Unit = CoreUtils.inWriteLock(lock) { reconfigurables.forEach(r => { @@ -423,6 +358,27 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging }) } + private def passwordEncoder: PasswordEncoder = { + dynamicConfigPasswordEncoder.getOrElse(throw new ConfigException("Password encoder secret not configured")) + } + + private[server] def toPersistentProps(configProps: Properties, perBrokerConfig: Boolean): Properties = { + val props = configProps.clone().asInstanceOf[Properties] + + def encodePassword(configName: String, value: String): Unit = { + if (value != null) { + if (!perBrokerConfig) + throw new ConfigException("Password config can be defined only at broker level") + props.setProperty(configName, passwordEncoder.encode(new Password(value))) + } + } + configProps.asScala.foreachEntry { (name, value) => + if (isPasswordConfig(name)) + encodePassword(name, value) + } + props + } + private[server] def fromPersistentProps(persistentProps: Properties, perBrokerConfig: Boolean): Properties = { val props = persistentProps.clone().asInstanceOf[Properties] @@ -441,6 +397,22 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging if (!perBrokerConfig) removeInvalidProps(perBrokerConfigs(props), "Per-broker configs defined at default cluster level will be ignored") + def decodePassword(configName: String, value: String): Unit = { + if (value != null) { + try { + props.setProperty(configName, passwordEncoder.decode(value).value) + } catch { + case e: Exception => + error(s"Dynamic password config $configName could not be decoded, ignoring.", e) + props.remove(configName) + } + } + } + + props.asScala.foreachEntry { (name, value) => + if (isPasswordConfig(name)) + decodePassword(name, value) + } props } @@ -629,9 +601,6 @@ class DynamicBrokerConfig(private val kafkaConfig: KafkaConfig) extends Logging } } -/** - * Implement [[config.BrokerReconfigurable]] instead. - */ trait BrokerReconfigurable { def reconfigurableConfigs: Set[String] @@ -719,6 +688,42 @@ class DynamicLogConfig(logManager: LogManager) extends BrokerReconfigurable with } } +object DynamicThreadPool { + val ReconfigurableConfigs = Set( + ServerConfigs.NUM_IO_THREADS_CONFIG, + ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, + ServerLogConfigs.NUM_RECOVERY_THREADS_PER_DATA_DIR_CONFIG, + ServerConfigs.BACKGROUND_THREADS_CONFIG) + + def validateReconfiguration(currentConfig: KafkaConfig, newConfig: KafkaConfig): Unit = { + newConfig.values.forEach { (k, v) => + if (ReconfigurableConfigs.contains(k)) { + val newValue = v.asInstanceOf[Int] + val oldValue = getValue(currentConfig, k) + if (newValue != oldValue) { + val errorMsg = s"Dynamic thread count update validation failed for $k=$v" + if (newValue <= 0) + throw new ConfigException(s"$errorMsg, value should be at least 1") + if (newValue < oldValue / 2) + throw new ConfigException(s"$errorMsg, value should be at least half the current value $oldValue") + if (newValue > oldValue * 2) + throw new ConfigException(s"$errorMsg, value should not be greater than double the current value $oldValue") + } + } + } + } + + def getValue(config: KafkaConfig, name: String): Int = { + name match { + case ServerConfigs.NUM_IO_THREADS_CONFIG => config.numIoThreads + case ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG => config.numReplicaFetchers + case ServerLogConfigs.NUM_RECOVERY_THREADS_PER_DATA_DIR_CONFIG => config.numRecoveryThreadsPerDataDir + case ServerConfigs.BACKGROUND_THREADS_CONFIG => config.backgroundThreads + case n => throw new IllegalStateException(s"Unexpected config $n") + } + } +} + class ControllerDynamicThreadPool(controller: ControllerServer) extends BrokerReconfigurable { override def reconfigurableConfigs: Set[String] = { @@ -738,7 +743,7 @@ class ControllerDynamicThreadPool(controller: ControllerServer) extends BrokerRe class BrokerDynamicThreadPool(server: KafkaBroker) extends BrokerReconfigurable { override def reconfigurableConfigs: Set[String] = { - DynamicThreadPool.RECONFIGURABLE_CONFIGS.asScala + DynamicThreadPool.ReconfigurableConfigs } override def validateReconfiguration(newConfig: KafkaConfig): Unit = { @@ -932,31 +937,25 @@ class DynamicClientQuotaCallback( override def reconfigurableConfigs(): util.Set[String] = { val configs = new util.HashSet[String]() - quotaManagers.clientQuotaCallbackPlugin.ifPresent { plugin => - plugin.get() match { - case callback: Reconfigurable => configs.addAll(callback.reconfigurableConfigs) - case _ => - } + quotaManagers.clientQuotaCallback.ifPresent { + case callback: Reconfigurable => configs.addAll(callback.reconfigurableConfigs) + case _ => } configs } override def validateReconfiguration(configs: util.Map[String, _]): Unit = { - quotaManagers.clientQuotaCallbackPlugin.ifPresent { plugin => - plugin.get() match { - case callback: Reconfigurable => callback.validateReconfiguration(configs) - case _ => - } + quotaManagers.clientQuotaCallback.ifPresent { + case callback: Reconfigurable => callback.validateReconfiguration(configs) + case _ => } } override def reconfigure(configs: util.Map[String, _]): Unit = { - quotaManagers.clientQuotaCallbackPlugin.ifPresent { plugin => - plugin.get() match { - case callback: Reconfigurable => - serverConfig.dynamicConfig.maybeReconfigure(callback, serverConfig.dynamicConfig.currentKafkaConfig, configs) - case _ => - } + quotaManagers.clientQuotaCallback.ifPresent { + case callback: Reconfigurable => + serverConfig.dynamicConfig.maybeReconfigure(callback, serverConfig.dynamicConfig.currentKafkaConfig, configs) + case _ => } } } @@ -969,12 +968,12 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi def validateReconfiguration(newConfig: KafkaConfig): Unit = { val oldConfig = server.config - val newListeners = newConfig.listeners.map(l => ListenerName.normalised(l.listener)).toSet - val oldAdvertisedListeners = oldConfig.effectiveAdvertisedBrokerListeners.map(l => ListenerName.normalised(l.listener)).toSet - val oldListeners = oldConfig.listeners.map(l => ListenerName.normalised(l.listener)).toSet + val newListeners = newConfig.listeners.map(_.listenerName).toSet + val oldAdvertisedListeners = oldConfig.effectiveAdvertisedBrokerListeners.map(_.listenerName).toSet + val oldListeners = oldConfig.listeners.map(_.listenerName).toSet if (!oldAdvertisedListeners.subsetOf(newListeners)) throw new ConfigException(s"Advertised listeners '$oldAdvertisedListeners' must be a subset of listeners '$newListeners'") - if (!newListeners.subsetOf(newConfig.effectiveListenerSecurityProtocolMap.keySet.asScala)) + if (!newListeners.subsetOf(newConfig.effectiveListenerSecurityProtocolMap.keySet)) throw new ConfigException(s"Listeners '$newListeners' must be subset of listener map '${newConfig.effectiveListenerSecurityProtocolMap}'") newListeners.intersect(oldListeners).foreach { listenerName => def immutableListenerConfigs(kafkaConfig: KafkaConfig, prefix: String): Map[String, AnyRef] = { @@ -986,7 +985,7 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi if (immutableListenerConfigs(newConfig, listenerName.configPrefix) != immutableListenerConfigs(oldConfig, listenerName.configPrefix)) throw new ConfigException(s"Configs cannot be updated dynamically for existing listener $listenerName, " + "restart broker or create a new listener for update") - if (oldConfig.effectiveListenerSecurityProtocolMap.get(listenerName) != newConfig.effectiveListenerSecurityProtocolMap.get(listenerName)) + if (oldConfig.effectiveListenerSecurityProtocolMap(listenerName) != newConfig.effectiveListenerSecurityProtocolMap(listenerName)) throw new ConfigException(s"Security protocol cannot be updated for existing listener $listenerName") } } @@ -996,8 +995,8 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi val newListenerMap = listenersToMap(newListeners) val oldListeners = oldConfig.listeners val oldListenerMap = listenersToMap(oldListeners) - val listenersRemoved = oldListeners.filterNot(e => newListenerMap.contains(ListenerName.normalised(e.listener))) - val listenersAdded = newListeners.filterNot(e => oldListenerMap.contains(ListenerName.normalised(e.listener))) + val listenersRemoved = oldListeners.filterNot(e => newListenerMap.contains(e.listenerName)) + val listenersAdded = newListeners.filterNot(e => oldListenerMap.contains(e.listenerName)) if (listenersRemoved.nonEmpty || listenersAdded.nonEmpty) { LoginManager.closeAll() // Clear SASL login cache to force re-login if (listenersRemoved.nonEmpty) server.socketServer.removeListeners(listenersRemoved) @@ -1005,8 +1004,29 @@ class DynamicListenerConfig(server: KafkaBroker) extends BrokerReconfigurable wi } } - private def listenersToMap(listeners: Seq[Endpoint]): Map[ListenerName, Endpoint] = - listeners.map(e => (ListenerName.normalised(e.listener), e)).toMap + private def listenersToMap(listeners: Seq[EndPoint]): Map[ListenerName, EndPoint] = + listeners.map(e => (e.listenerName, e)).toMap + +} + +class DynamicProducerStateManagerConfig(val producerStateManagerConfig: ProducerStateManagerConfig) extends BrokerReconfigurable with Logging { + def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = { + if (producerStateManagerConfig.producerIdExpirationMs != newConfig.transactionLogConfig.producerIdExpirationMs) { + info(s"Reconfigure ${TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_CONFIG} from ${producerStateManagerConfig.producerIdExpirationMs} to ${newConfig.transactionLogConfig.producerIdExpirationMs}") + producerStateManagerConfig.setProducerIdExpirationMs(newConfig.transactionLogConfig.producerIdExpirationMs) + } + if (producerStateManagerConfig.transactionVerificationEnabled != newConfig.transactionLogConfig.transactionPartitionVerificationEnable) { + info(s"Reconfigure ${TransactionLogConfig.TRANSACTION_PARTITION_VERIFICATION_ENABLE_CONFIG} from ${producerStateManagerConfig.transactionVerificationEnabled} to ${newConfig.transactionLogConfig.transactionPartitionVerificationEnable}") + producerStateManagerConfig.setTransactionVerificationEnabled(newConfig.transactionLogConfig.transactionPartitionVerificationEnable) + } + } + + def validateReconfiguration(newConfig: KafkaConfig): Unit = { + if (newConfig.transactionLogConfig.producerIdExpirationMs < 0) + throw new ConfigException(s"${TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_CONFIG} cannot be less than 0, current value is ${producerStateManagerConfig.producerIdExpirationMs}, and new value is ${newConfig.transactionLogConfig.producerIdExpirationMs}") + } + + override def reconfigurableConfigs: Set[String] = DynamicProducerStateManagerConfig } @@ -1030,19 +1050,9 @@ class DynamicRemoteLogConfig(server: KafkaBroker) extends BrokerReconfigurable w if (RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP.equals(k) || RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP.equals(k) || - RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP.equals(k) || - RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP.equals(k)) { + RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP.equals(k)) { val newValue = v.asInstanceOf[Int] - val oldValue: Int = { - // This logic preserves backward compatibility in scenarios where - // `remote.log.manager.thread.pool.size` is configured in config file, - // but `remote.log.manager.follower.thread.pool.size` is set dynamically. - // This can be removed once `remote.log.manager.thread.pool.size` is removed. - if (RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP.equals(k)) - server.config.remoteLogManagerConfig.remoteLogManagerFollowerThreadPoolSize() - else - server.config.getInt(k) - } + val oldValue = server.config.getInt(k) if (newValue != oldValue) { val errorMsg = s"Dynamic thread count update validation failed for $k=$v" if (newValue <= 0) @@ -1094,9 +1104,6 @@ class DynamicRemoteLogConfig(server: KafkaBroker) extends BrokerReconfigurable w if (newRLMConfig.remoteLogManagerExpirationThreadPoolSize() != oldRLMConfig.remoteLogManagerExpirationThreadPoolSize()) remoteLogManager.resizeExpirationThreadPool(newRLMConfig.remoteLogManagerExpirationThreadPoolSize()) - if (newRLMConfig.remoteLogManagerFollowerThreadPoolSize() != oldRLMConfig.remoteLogManagerFollowerThreadPoolSize()) - remoteLogManager.resizeFollowerThreadPool(newRLMConfig.remoteLogManagerFollowerThreadPoolSize()) - if (newRLMConfig.remoteLogReaderThreads() != oldRLMConfig.remoteLogReaderThreads()) remoteLogManager.resizeReaderThreadPool(newRLMConfig.remoteLogReaderThreads()) } @@ -1122,7 +1129,6 @@ object DynamicRemoteLogConfig { RemoteLogManagerConfig.REMOTE_LIST_OFFSETS_REQUEST_TIMEOUT_MS_PROP, RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP, RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP, - RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP, RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP ) } diff --git a/core/src/main/scala/kafka/server/DynamicConfig.scala b/core/src/main/scala/kafka/server/DynamicConfig.scala index ad48b904c13bd..7a401ec1eb426 100644 --- a/core/src/main/scala/kafka/server/DynamicConfig.scala +++ b/core/src/main/scala/kafka/server/DynamicConfig.scala @@ -21,6 +21,7 @@ import kafka.server.DynamicBrokerConfig.AllDynamicConfigs import java.util.Properties import org.apache.kafka.common.config.ConfigDef +import org.apache.kafka.coordinator.group.GroupConfig import org.apache.kafka.server.config.QuotaConfig import java.util @@ -53,6 +54,47 @@ object DynamicConfig { def validate(props: Properties): util.Map[String, AnyRef] = DynamicConfig.validate(brokerConfigs, props, customPropsAllowed = true) } + object Client { + private val clientConfigs = QuotaConfig.userAndClientQuotaConfigs() + + def configKeys: util.Map[String, ConfigDef.ConfigKey] = clientConfigs.configKeys + + def names: util.Set[String] = clientConfigs.names + + def validate(props: Properties): util.Map[String, AnyRef] = DynamicConfig.validate(clientConfigs, props, customPropsAllowed = false) + } + + object User { + private val userConfigs = QuotaConfig.scramMechanismsPlusUserAndClientQuotaConfigs() + + def configKeys: util.Map[String, ConfigDef.ConfigKey] = userConfigs.configKeys + + def names: util.Set[String] = userConfigs.names + + def validate(props: Properties): util.Map[String, AnyRef] = DynamicConfig.validate(userConfigs, props, customPropsAllowed = false) + } + + object Ip { + private val ipConfigs = QuotaConfig.ipConfigs + + def configKeys: util.Map[String, ConfigDef.ConfigKey] = ipConfigs.configKeys + + def names: util.Set[String] = ipConfigs.names + + def validate(props: Properties): util.Map[String, AnyRef] = DynamicConfig.validate(ipConfigs, props, customPropsAllowed = false) + } + + object ClientMetrics { + private val clientConfigs = org.apache.kafka.server.metrics.ClientMetricsConfigs.configDef() + + def names: util.Set[String] = clientConfigs.names + } + + object Group { + private val groupConfigs = GroupConfig.configDef() + + def names: util.Set[String] = groupConfigs.names + } private def validate(configDef: ConfigDef, props: Properties, customPropsAllowed: Boolean) = { // Validate Names diff --git a/core/src/main/scala/kafka/server/FetchSession.scala b/core/src/main/scala/kafka/server/FetchSession.scala index 51db1fcb092fe..773958cd4318b 100644 --- a/core/src/main/scala/kafka/server/FetchSession.scala +++ b/core/src/main/scala/kafka/server/FetchSession.scala @@ -224,7 +224,7 @@ class CachedPartition(var topic: String, * fields are read or modified. This includes modification of the session partition map. * * @param id The unique fetch session ID. - * @param privileged True if this session is privileged. Sessions created by followers + * @param privileged True if this session is privileged. Sessions crated by followers * are privileged; session created by consumers are not. * @param partitionMap The CachedPartitionMap. * @param usesTopicIds True if this session is using topic IDs @@ -270,7 +270,8 @@ class FetchSession(val id: Int, // Update the cached partition data based on the request. def update(fetchData: FetchSession.REQ_MAP, - toForget: util.List[TopicIdPartition]): (TL, TL, TL) = synchronized { + toForget: util.List[TopicIdPartition], + reqMetadata: JFetchMetadata): (TL, TL, TL) = synchronized { val added = new TL val updated = new TL val removed = new TL @@ -879,7 +880,7 @@ class FetchManager(private val time: Time, s", but request version $reqVersion means that we can not.") new SessionErrorContext(Errors.FETCH_SESSION_TOPIC_ID_ERROR, reqMetadata) } else { - val (added, updated, removed) = session.update(fetchData, toForget) + val (added, updated, removed) = session.update(fetchData, toForget, reqMetadata) if (session.isEmpty) { debug(s"Created a new sessionless FetchContext and closing session id ${session.id}, " + s"epoch ${session.epoch}: after removing ${partitionsToLogString(removed)}, " + diff --git a/core/src/main/scala/kafka/server/ForwardingManager.scala b/core/src/main/scala/kafka/server/ForwardingManager.scala index 7737d2d2171f2..45c95e38db82f 100644 --- a/core/src/main/scala/kafka/server/ForwardingManager.scala +++ b/core/src/main/scala/kafka/server/ForwardingManager.scala @@ -26,9 +26,7 @@ import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, EnvelopeRequest, EnvelopeResponse, RequestContext, RequestHeader} import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager} -import org.apache.kafka.server.metrics.ForwardingManagerMetrics -import java.util.Optional import java.util.concurrent.TimeUnit import scala.jdk.OptionConverters.RichOptional @@ -87,7 +85,7 @@ trait ForwardingManager { responseCallback: Option[AbstractResponse] => Unit ): Unit - def controllerApiVersions: Optional[NodeApiVersions] + def controllerApiVersions: Option[NodeApiVersions] } object ForwardingManager { @@ -118,7 +116,7 @@ class ForwardingManagerImpl( metrics: Metrics ) extends ForwardingManager with AutoCloseable with Logging { - val forwardingManagerMetrics: ForwardingManagerMetrics = new ForwardingManagerMetrics(metrics, channelManager.getTimeoutMs) + val forwardingManagerMetrics: ForwardingManagerMetrics = ForwardingManagerMetrics(metrics, channelManager.getTimeoutMs) override def forwardRequest( requestContext: RequestContext, @@ -134,7 +132,7 @@ class ForwardingManagerImpl( class ForwardingResponseHandler extends ControllerRequestCompletionHandler { override def onComplete(clientResponse: ClientResponse): Unit = { - forwardingManagerMetrics.decrementQueueLength() + forwardingManagerMetrics.queueLength.getAndDecrement() forwardingManagerMetrics.remoteTimeMsHist.record(clientResponse.requestLatencyMs()) forwardingManagerMetrics.queueTimeMsHist.record(clientResponse.receivedTimeMs() - clientResponse.requestLatencyMs() - requestCreationTimeMs) @@ -175,22 +173,22 @@ class ForwardingManagerImpl( override def onTimeout(): Unit = { debug(s"Forwarding of the request ${requestToString()} failed due to timeout exception") - forwardingManagerMetrics.decrementQueueLength() + forwardingManagerMetrics.queueLength.getAndDecrement() forwardingManagerMetrics.queueTimeMsHist.record(channelManager.getTimeoutMs) val response = requestBody.getErrorResponse(new TimeoutException()) responseCallback(Option(response)) } } - forwardingManagerMetrics.incrementQueueLength() + forwardingManagerMetrics.queueLength.getAndIncrement() channelManager.sendRequest(envelopeRequest, new ForwardingResponseHandler) } override def close(): Unit = forwardingManagerMetrics.close() - override def controllerApiVersions: Optional[NodeApiVersions] = - channelManager.controllerApiVersions + override def controllerApiVersions: Option[NodeApiVersions] = + channelManager.controllerApiVersions.toScala private def parseResponse( buffer: ByteBuffer, diff --git a/core/src/main/scala/kafka/server/ForwardingManagerMetrics.scala b/core/src/main/scala/kafka/server/ForwardingManagerMetrics.scala new file mode 100644 index 0000000000000..a846f8c49551d --- /dev/null +++ b/core/src/main/scala/kafka/server/ForwardingManagerMetrics.scala @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import org.apache.kafka.common.MetricName +import org.apache.kafka.common.metrics.{Gauge, MetricConfig, Metrics} +import org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing +import org.apache.kafka.common.metrics.stats.{Percentile, Percentiles} + +import java.util.concurrent.atomic.AtomicInteger + +final class ForwardingManagerMetrics private ( + metrics: Metrics, + timeoutMs: Long, +) extends AutoCloseable { + import ForwardingManagerMetrics._ + + /** + * A histogram describing the amount of time in milliseconds each admin request spends in the broker's forwarding manager queue, waiting to be sent to the controller. + * This does not include the time that the request spends waiting for a response from the controller. + */ + val queueTimeMsHist: LatencyHistogram = new LatencyHistogram(metrics, queueTimeMsName, metricGroupName, timeoutMs) + + /** + * A histogram describing the amount of time in milliseconds each request sent by the ForwardingManager spends waiting for a response. + * This does not include the time spent in the queue. + */ + val remoteTimeMsHist: LatencyHistogram = new LatencyHistogram(metrics, remoteTimeMsName, metricGroupName, timeoutMs) + + val queueLengthName: MetricName = metrics.metricName( + "QueueLength", + metricGroupName, + "The current number of RPCs that are waiting in the broker's forwarding manager queue, waiting to be sent to the controller." + ) + val queueLength: AtomicInteger = new AtomicInteger(0) + metrics.addMetric(queueLengthName, new FuncGauge(_ => queueLength.get())) + + override def close(): Unit = { + queueTimeMsHist.close() + remoteTimeMsHist.close() + metrics.removeMetric(queueLengthName) + } +} + +object ForwardingManagerMetrics { + + val metricGroupName = "ForwardingManager" + val queueTimeMsName = "QueueTimeMs" + val remoteTimeMsName = "RemoteTimeMs" + + final class LatencyHistogram ( + metrics: Metrics, + name: String, + group: String, + maxLatency: Long + ) extends AutoCloseable { + private val sensor = metrics.sensor(name) + val latencyP99Name: MetricName = metrics.metricName(s"$name.p99", group) + val latencyP999Name: MetricName = metrics.metricName(s"$name.p999", group) + + sensor.add(new Percentiles( + 4000, + maxLatency, + BucketSizing.CONSTANT, + new Percentile(latencyP99Name, 99), + new Percentile(latencyP999Name, 99.9) + )) + + override def close(): Unit = { + metrics.removeSensor(name) + metrics.removeMetric(latencyP99Name) + metrics.removeMetric(latencyP999Name) + } + + def record(latencyMs: Long): Unit = sensor.record(latencyMs) + } + + private final class FuncGauge[T](func: Long => T) extends Gauge[T] { + override def value(config: MetricConfig, now: Long): T = { + func(now) + } + } + + def apply(metrics: Metrics, timeoutMs: Long): ForwardingManagerMetrics = new ForwardingManagerMetrics(metrics, timeoutMs) +} diff --git a/core/src/main/scala/kafka/server/KafkaApis.scala b/core/src/main/scala/kafka/server/KafkaApis.scala index 6ca9014f65410..195a42c719270 100644 --- a/core/src/main/scala/kafka/server/KafkaApis.scala +++ b/core/src/main/scala/kafka/server/KafkaApis.scala @@ -21,20 +21,20 @@ import kafka.coordinator.transaction.{InitProducerIdResult, TransactionCoordinat import kafka.network.RequestChannel import kafka.server.QuotaFactory.{QuotaManagers, UNBOUNDED_QUOTA} import kafka.server.handlers.DescribeTopicPartitionsRequestHandler -import kafka.server.share.{ShareFetchUtils, SharePartitionManager} +import kafka.server.metadata.{ConfigRepository, KRaftMetadataCache} +import kafka.server.share.SharePartitionManager import kafka.utils.Logging +import org.apache.kafka.admin.AdminUtils import org.apache.kafka.clients.CommonClientConfigs import org.apache.kafka.clients.admin.EndpointType import org.apache.kafka.common.acl.AclOperation import org.apache.kafka.common.acl.AclOperation._ -import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic.{GROUP_METADATA_TOPIC_NAME, SHARE_GROUP_STATE_TOPIC_NAME, TRANSACTION_STATE_TOPIC_NAME, isInternal} -import org.apache.kafka.common.internals.{FatalExitError, Plugin, Topic} +import org.apache.kafka.common.internals.{FatalExitError, Topic} import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.{AddPartitionsToTxnResult, AddPartitionsToTxnResultCollection} import org.apache.kafka.common.message.DeleteRecordsResponseData.{DeleteRecordsPartitionResult, DeleteRecordsTopicResult} -import org.apache.kafka.common.message.DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic -import org.apache.kafka.common.message.DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic +import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData.ClientMetricsResource import org.apache.kafka.common.message.ListOffsetsRequestData.ListOffsetsPartition import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} import org.apache.kafka.common.message.MetadataResponseData.{MetadataResponsePartition, MetadataResponseTopic} @@ -57,18 +57,15 @@ import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.security.token.delegation.{DelegationToken, TokenInformation} import org.apache.kafka.common.utils.{ProducerIdAndEpoch, Time} import org.apache.kafka.common.{Node, TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.coordinator.group.{Group, GroupConfig, GroupConfigManager, GroupCoordinator} +import org.apache.kafka.coordinator.group.{Group, GroupCoordinator} import org.apache.kafka.coordinator.share.ShareCoordinator -import org.apache.kafka.metadata.{ConfigRepository, MetadataCache} -import org.apache.kafka.security.DelegationTokenManager -import org.apache.kafka.server.{ApiVersionManager, ClientMetricsManager, ProcessRole} +import org.apache.kafka.server.ClientMetricsManager import org.apache.kafka.server.authorizer._ -import org.apache.kafka.server.common.{GroupVersion, RequestLocal, ShareVersion, StreamsVersion, TransactionVersion} +import org.apache.kafka.server.common.{GroupVersion, RequestLocal, TransactionVersion} import org.apache.kafka.server.share.context.ShareFetchContext import org.apache.kafka.server.share.{ErroneousAndValidPartitionData, SharePartitionKey} import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager import org.apache.kafka.storage.internals.log.AppendOrigin import org.apache.kafka.storage.log.metrics.BrokerTopicStats @@ -82,7 +79,6 @@ import scala.annotation.nowarn import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Seq, Set, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.javaapi.OptionConverters /** * Logic to handle the various Kafka requests @@ -92,14 +88,14 @@ class KafkaApis(val requestChannel: RequestChannel, val replicaManager: ReplicaManager, val groupCoordinator: GroupCoordinator, val txnCoordinator: TransactionCoordinator, - val shareCoordinator: ShareCoordinator, + val shareCoordinator: Option[ShareCoordinator], val autoTopicCreationManager: AutoTopicCreationManager, val brokerId: Int, val config: KafkaConfig, val configRepository: ConfigRepository, val metadataCache: MetadataCache, val metrics: Metrics, - val authorizerPlugin: Option[Plugin[Authorizer]], + val authorizer: Option[Authorizer], val quotas: QuotaManagers, val fetchManager: FetchManager, val sharePartitionManager: SharePartitionManager, @@ -108,19 +104,21 @@ class KafkaApis(val requestChannel: RequestChannel, time: Time, val tokenManager: DelegationTokenManager, val apiVersionManager: ApiVersionManager, - val clientMetricsManager: ClientMetricsManager, - val groupConfigManager: GroupConfigManager + val clientMetricsManager: ClientMetricsManager ) extends ApiRequestHandler with Logging { - type ProduceResponseStats = Map[TopicIdPartition, RecordValidationStats] + type FetchResponseStats = Map[TopicPartition, RecordValidationStats] this.logIdent = "[KafkaApi-%d] ".format(brokerId) val configHelper = new ConfigHelper(metadataCache, config, configRepository) - val authHelper = new AuthHelper(authorizerPlugin) + val authHelper = new AuthHelper(authorizer) val requestHelper = new RequestHandlerHelper(requestChannel, quotas, time) - val aclApis = new AclApis(authHelper, authorizerPlugin, requestHelper, ProcessRole.BrokerRole, config) + val aclApis = new AclApis(authHelper, authorizer, requestHelper, "broker", config) val configManager = new ConfigAdminManager(brokerId, config, configRepository) - val describeTopicPartitionsRequestHandler = new DescribeTopicPartitionsRequestHandler( - metadataCache, authHelper, config) + val describeTopicPartitionsRequestHandler : Option[DescribeTopicPartitionsRequestHandler] = metadataCache match { + case kRaftMetadataCache: KRaftMetadataCache => + Some(new DescribeTopicPartitionsRequestHandler(kRaftMetadataCache, authHelper, config)) + case _ => None + } def close(): Unit = { aclApis.close() @@ -228,23 +226,18 @@ class KafkaApis(val requestChannel: RequestChannel, case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => handleDescribeTopicPartitionsRequest(request) case ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS => handleGetTelemetrySubscriptionsRequest(request) case ApiKeys.PUSH_TELEMETRY => handlePushTelemetryRequest(request) - case ApiKeys.LIST_CONFIG_RESOURCES => handleListConfigResources(request) + case ApiKeys.LIST_CLIENT_METRICS_RESOURCES => handleListClientMetricsResources(request) case ApiKeys.ADD_RAFT_VOTER => forwardToController(request) case ApiKeys.REMOVE_RAFT_VOTER => forwardToController(request) case ApiKeys.SHARE_GROUP_HEARTBEAT => handleShareGroupHeartbeat(request).exceptionally(handleError) case ApiKeys.SHARE_GROUP_DESCRIBE => handleShareGroupDescribe(request).exceptionally(handleError) - case ApiKeys.SHARE_FETCH => handleShareFetchRequest(request).exceptionally(handleError) - case ApiKeys.SHARE_ACKNOWLEDGE => handleShareAcknowledgeRequest(request).exceptionally(handleError) - case ApiKeys.INITIALIZE_SHARE_GROUP_STATE => handleInitializeShareGroupStateRequest(request).exceptionally(handleError) - case ApiKeys.READ_SHARE_GROUP_STATE => handleReadShareGroupStateRequest(request).exceptionally(handleError) - case ApiKeys.WRITE_SHARE_GROUP_STATE => handleWriteShareGroupStateRequest(request).exceptionally(handleError) - case ApiKeys.DELETE_SHARE_GROUP_STATE => handleDeleteShareGroupStateRequest(request).exceptionally(handleError) - case ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY => handleReadShareGroupStateSummaryRequest(request).exceptionally(handleError) - case ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS => handleDescribeShareGroupOffsetsRequest(request).exceptionally(handleError) - case ApiKeys.ALTER_SHARE_GROUP_OFFSETS => handleAlterShareGroupOffsetsRequest(request).exceptionally(handleError) - case ApiKeys.DELETE_SHARE_GROUP_OFFSETS => handleDeleteShareGroupOffsetsRequest(request).exceptionally(handleError) - case ApiKeys.STREAMS_GROUP_DESCRIBE => handleStreamsGroupDescribe(request).exceptionally(handleError) - case ApiKeys.STREAMS_GROUP_HEARTBEAT => handleStreamsGroupHeartbeat(request).exceptionally(handleError) + case ApiKeys.SHARE_FETCH => handleShareFetchRequest(request) + case ApiKeys.SHARE_ACKNOWLEDGE => handleShareAcknowledgeRequest(request) + case ApiKeys.INITIALIZE_SHARE_GROUP_STATE => handleInitializeShareGroupStateRequest(request) + case ApiKeys.READ_SHARE_GROUP_STATE => handleReadShareGroupStateRequest(request) + case ApiKeys.WRITE_SHARE_GROUP_STATE => handleWriteShareGroupStateRequest(request) + case ApiKeys.DELETE_SHARE_GROUP_STATE => handleDeleteShareGroupStateRequest(request) + case ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY => handleReadShareGroupStateSummaryRequest(request) case _ => throw new IllegalStateException(s"No handler for request api key ${request.header.apiKey}") } } catch { @@ -275,21 +268,11 @@ class KafkaApis(val requestChannel: RequestChannel, ): CompletableFuture[Unit] = { val offsetCommitRequest = request.body[OffsetCommitRequest] - // Reject the request if not authorized to the group. + // Reject the request if not authorized to the group if (!authHelper.authorize(request.context, READ, GROUP, offsetCommitRequest.data.groupId)) { requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) CompletableFuture.completedFuture[Unit](()) } else { - val useTopicIds = OffsetCommitResponse.useTopicIds(request.header.apiVersion) - - if (useTopicIds) { - offsetCommitRequest.data.topics.forEach { topic => - if (topic.topicId != Uuid.ZERO_UUID) { - metadataCache.getTopicName(topic.topicId).ifPresent(name => topic.setName(name)) - } - } - } - val authorizedTopics = authHelper.filterByAuthorized( request.context, READ, @@ -297,40 +280,28 @@ class KafkaApis(val requestChannel: RequestChannel, offsetCommitRequest.data.topics.asScala )(_.name) - val responseBuilder = OffsetCommitResponse.newBuilder(useTopicIds) + val responseBuilder = new OffsetCommitResponse.Builder() val authorizedTopicsRequest = new mutable.ArrayBuffer[OffsetCommitRequestData.OffsetCommitRequestTopic]() offsetCommitRequest.data.topics.forEach { topic => - if (useTopicIds && topic.name.isEmpty) { - // If the topic name is undefined, it means that the topic id is unknown so we add - // the topic and all its partitions to the response with UNKNOWN_TOPIC_ID. - responseBuilder.addPartitions[OffsetCommitRequestData.OffsetCommitRequestPartition]( - topic.topicId, topic.name, topic.partitions, _.partitionIndex, Errors.UNKNOWN_TOPIC_ID) - } else if (!authorizedTopics.contains(topic.name)) { + if (!authorizedTopics.contains(topic.name)) { // If the topic is not authorized, we add the topic and all its partitions // to the response with TOPIC_AUTHORIZATION_FAILED. responseBuilder.addPartitions[OffsetCommitRequestData.OffsetCommitRequestPartition]( - topic.topicId, topic.name, topic.partitions, _.partitionIndex, Errors.TOPIC_AUTHORIZATION_FAILED) + topic.name, topic.partitions, _.partitionIndex, Errors.TOPIC_AUTHORIZATION_FAILED) } else if (!metadataCache.contains(topic.name)) { // If the topic is unknown, we add the topic and all its partitions // to the response with UNKNOWN_TOPIC_OR_PARTITION. responseBuilder.addPartitions[OffsetCommitRequestData.OffsetCommitRequestPartition]( - topic.topicId, topic.name, topic.partitions, _.partitionIndex, Errors.UNKNOWN_TOPIC_OR_PARTITION) + topic.name, topic.partitions, _.partitionIndex, Errors.UNKNOWN_TOPIC_OR_PARTITION) } else { // Otherwise, we check all partitions to ensure that they all exist. - val topicWithValidPartitions = new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(topic.topicId) - .setName(topic.name) + val topicWithValidPartitions = new OffsetCommitRequestData.OffsetCommitRequestTopic().setName(topic.name) topic.partitions.forEach { partition => - if (metadataCache.getLeaderAndIsr(topic.name, partition.partitionIndex).isPresent) { + if (metadataCache.getLeaderAndIsr(topic.name, partition.partitionIndex).nonEmpty) { topicWithValidPartitions.partitions.add(partition) } else { - responseBuilder.addPartition( - topic.topicId, - topic.name, - partition.partitionIndex, - Errors.UNKNOWN_TOPIC_OR_PARTITION - ) + responseBuilder.addPartition(topic.name, partition.partitionIndex, Errors.UNKNOWN_TOPIC_OR_PARTITION) } } @@ -344,23 +315,42 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendMaybeThrottle(request, responseBuilder.build()) CompletableFuture.completedFuture(()) } else { - groupCoordinator.commitOffsets( - request.context, - new OffsetCommitRequestData() - .setGroupId(offsetCommitRequest.data.groupId) - .setMemberId(offsetCommitRequest.data.memberId) - .setGenerationIdOrMemberEpoch(offsetCommitRequest.data.generationIdOrMemberEpoch) - .setRetentionTimeMs(offsetCommitRequest.data.retentionTimeMs) - .setGroupInstanceId(offsetCommitRequest.data.groupInstanceId) - .setTopics(authorizedTopicsRequest.asJava), - requestLocal.bufferSupplier - ).handle[Unit] { (results, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, responseBuilder.merge(results).build()) - } - } + // For version > 0, store offsets in Coordinator. + commitOffsetsToCoordinator( + request, + offsetCommitRequest, + authorizedTopicsRequest, + responseBuilder, + requestLocal + ) + } + } + } + + private def commitOffsetsToCoordinator( + request: RequestChannel.Request, + offsetCommitRequest: OffsetCommitRequest, + authorizedTopicsRequest: mutable.ArrayBuffer[OffsetCommitRequestData.OffsetCommitRequestTopic], + responseBuilder: OffsetCommitResponse.Builder, + requestLocal: RequestLocal + ): CompletableFuture[Unit] = { + val offsetCommitRequestData = new OffsetCommitRequestData() + .setGroupId(offsetCommitRequest.data.groupId) + .setMemberId(offsetCommitRequest.data.memberId) + .setGenerationIdOrMemberEpoch(offsetCommitRequest.data.generationIdOrMemberEpoch) + .setRetentionTimeMs(offsetCommitRequest.data.retentionTimeMs) + .setGroupInstanceId(offsetCommitRequest.data.groupInstanceId) + .setTopics(authorizedTopicsRequest.asJava) + + groupCoordinator.commitOffsets( + request.context, + offsetCommitRequestData, + requestLocal.bufferSupplier + ).handle[Unit] { (results, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, offsetCommitRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, responseBuilder.merge(results).build()) } } } @@ -374,12 +364,12 @@ class KafkaApis(val requestChannel: RequestChannel, (x.leaderReplicaIdOpt.getOrElse(-1), x.getLeaderEpoch) case Left(x) => debug(s"Unable to retrieve local leaderId and Epoch with error $x, falling back to metadata cache") - OptionConverters.toScala(metadataCache.getLeaderAndIsr(tp.topic, tp.partition)) match { + metadataCache.getLeaderAndIsr(tp.topic, tp.partition) match { case Some(pinfo) => (pinfo.leader(), pinfo.leaderEpoch()) case None => (-1, -1) } } - LeaderNode(leaderId, leaderEpoch, OptionConverters.toScala(metadataCache.getAliveBrokerNode(leaderId, ln))) + LeaderNode(leaderId, leaderEpoch, metadataCache.getAliveBrokerNode(leaderId, ln)) } /** @@ -397,73 +387,57 @@ class KafkaApis(val requestChannel: RequestChannel, } } - val unauthorizedTopicResponses = mutable.Map[TopicIdPartition, PartitionResponse]() - val nonExistingTopicResponses = mutable.Map[TopicIdPartition, PartitionResponse]() - val invalidRequestResponses = mutable.Map[TopicIdPartition, PartitionResponse]() - val authorizedRequestInfo = mutable.Map[TopicIdPartition, MemoryRecords]() - val topicIdToPartitionData = new mutable.ArrayBuffer[(TopicIdPartition, ProduceRequestData.PartitionProduceData)] - - produceRequest.data.topicData.forEach { topic => - topic.partitionData.forEach { partition => - val (topicName, topicId) = if (topic.topicId().equals(Uuid.ZERO_UUID)) { - (topic.name(), metadataCache.getTopicId(topic.name())) - } else { - (metadataCache.getTopicName(topic.topicId).orElse(topic.name), topic.topicId()) - } - - val topicPartition = new TopicPartition(topicName, partition.index()) - // To be compatible with the old version, only return UNKNOWN_TOPIC_ID if request version uses topicId, but the corresponding topic name can't be found. - if (topicName.isEmpty && request.header.apiVersion > 12) - nonExistingTopicResponses += new TopicIdPartition(topicId, topicPartition) -> new PartitionResponse(Errors.UNKNOWN_TOPIC_ID) - else - topicIdToPartitionData += new TopicIdPartition(topicId, topicPartition) -> partition - } - } + val unauthorizedTopicResponses = mutable.Map[TopicPartition, PartitionResponse]() + val nonExistingTopicResponses = mutable.Map[TopicPartition, PartitionResponse]() + val invalidRequestResponses = mutable.Map[TopicPartition, PartitionResponse]() + val authorizedRequestInfo = mutable.Map[TopicPartition, MemoryRecords]() // cache the result to avoid redundant authorization calls - val authorizedTopics = authHelper.filterByAuthorized(request.context, WRITE, TOPIC, topicIdToPartitionData)(_._1.topic) + val authorizedTopics = authHelper.filterByAuthorized(request.context, WRITE, TOPIC, + produceRequest.data().topicData().asScala)(_.name()) - topicIdToPartitionData.foreach { case (topicIdPartition, partition) => + produceRequest.data.topicData.forEach(topic => topic.partitionData.forEach { partition => + val topicPartition = new TopicPartition(topic.name, partition.index) // This caller assumes the type is MemoryRecords and that is true on current serialization // We cast the type to avoid causing big change to code base. // https://issues.apache.org/jira/browse/KAFKA-10698 val memoryRecords = partition.records.asInstanceOf[MemoryRecords] - if (!authorizedTopics.contains(topicIdPartition.topic)) - unauthorizedTopicResponses += topicIdPartition -> new PartitionResponse(Errors.TOPIC_AUTHORIZATION_FAILED) - else if (!metadataCache.contains(topicIdPartition.topicPartition)) - nonExistingTopicResponses += topicIdPartition -> new PartitionResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION) + if (!authorizedTopics.contains(topicPartition.topic)) + unauthorizedTopicResponses += topicPartition -> new PartitionResponse(Errors.TOPIC_AUTHORIZATION_FAILED) + else if (!metadataCache.contains(topicPartition)) + nonExistingTopicResponses += topicPartition -> new PartitionResponse(Errors.UNKNOWN_TOPIC_OR_PARTITION) else try { ProduceRequest.validateRecords(request.header.apiVersion, memoryRecords) - authorizedRequestInfo += (topicIdPartition -> memoryRecords) + authorizedRequestInfo += (topicPartition -> memoryRecords) } catch { case e: ApiException => - invalidRequestResponses += topicIdPartition -> new PartitionResponse(Errors.forException(e)) + invalidRequestResponses += topicPartition -> new PartitionResponse(Errors.forException(e)) } - } + }) // the callback for sending a produce response // The construction of ProduceResponse is able to accept auto-generated protocol data so // KafkaApis#handleProduceRequest should apply auto-generated protocol to avoid extra conversion. // https://issues.apache.org/jira/browse/KAFKA-10730 @nowarn("cat=deprecation") - def sendResponseCallback(responseStatus: Map[TopicIdPartition, PartitionResponse]): Unit = { + def sendResponseCallback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { val mergedResponseStatus = responseStatus ++ unauthorizedTopicResponses ++ nonExistingTopicResponses ++ invalidRequestResponses var errorInResponse = false val nodeEndpoints = new mutable.HashMap[Int, Node] - mergedResponseStatus.foreachEntry { (topicIdPartition, status) => + mergedResponseStatus.foreachEntry { (topicPartition, status) => if (status.error != Errors.NONE) { errorInResponse = true debug("Produce request with correlation id %d from client %s on partition %s failed due to %s".format( request.header.correlationId, request.header.clientId, - topicIdPartition, + topicPartition, status.error.exceptionName)) if (request.header.apiVersion >= 10) { status.error match { case Errors.NOT_LEADER_OR_FOLLOWER => - val leaderNode = getCurrentLeader(topicIdPartition.topicPartition(), request.context.listenerName) + val leaderNode = getCurrentLeader(topicPartition, request.context.listenerName) leaderNode.node.foreach { node => nodeEndpoints.put(node.id(), node) } @@ -481,7 +455,7 @@ class KafkaApis(val requestChannel: RequestChannel, // that the request quota is not enforced if acks == 0. val timeMs = time.milliseconds() val requestSize = request.sizeInBytes - val bandwidthThrottleTimeMs = quotas.produce.maybeRecordAndGetThrottleTimeMs(request.session, request.header.clientId(), requestSize, timeMs) + val bandwidthThrottleTimeMs = quotas.produce.maybeRecordAndGetThrottleTimeMs(request, requestSize, timeMs) val requestThrottleTimeMs = if (produceRequest.acks == 0) 0 else quotas.request.maybeRecordAndGetThrottleTimeMs(request, timeMs) @@ -520,16 +494,16 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def processingStatsCallback(processingStats: ProduceResponseStats): Unit = { - processingStats.foreachEntry { (topicIdPartition, info) => - updateRecordConversionStats(request, topicIdPartition.topicPartition(), info) + def processingStatsCallback(processingStats: FetchResponseStats): Unit = { + processingStats.foreachEntry { (tp, info) => + updateRecordConversionStats(request, tp, info) } } if (authorizedRequestInfo.isEmpty) sendResponseCallback(Map.empty) else { - val internalTopicsAllowed = request.header.clientId == "__admin_client" + val internalTopicsAllowed = request.header.clientId == AdminUtils.ADMIN_CLIENT_ID val transactionSupportedOperation = AddPartitionsToTxnManager.produceRequestVersionToTransactionSupportedOperation(request.header.apiVersion()) // call the replica manager to append messages to the replicas replicaManager.handleProduceAppend( @@ -628,12 +602,12 @@ class KafkaApis(val requestChannel: RequestChannel, val partitions = new util.LinkedHashMap[TopicIdPartition, FetchResponseData.PartitionData] val reassigningPartitions = mutable.Set[TopicIdPartition]() val nodeEndpoints = new mutable.HashMap[Int, Node] - responsePartitionData.foreach { case (topicIdPartition, data) => + responsePartitionData.foreach { case (tp, data) => val abortedTransactions = data.abortedTransactions.orElse(null) val lastStableOffset: Long = data.lastStableOffset.orElse(FetchResponse.INVALID_LAST_STABLE_OFFSET) - if (data.isReassignmentFetch) reassigningPartitions.add(topicIdPartition) + if (data.isReassignmentFetch) reassigningPartitions.add(tp) val partitionData = new FetchResponseData.PartitionData() - .setPartitionIndex(topicIdPartition.partition) + .setPartitionIndex(tp.partition) .setErrorCode(maybeDownConvertStorageError(data.error).code) .setHighWatermark(data.highWatermark) .setLastStableOffset(lastStableOffset) @@ -645,7 +619,7 @@ class KafkaApis(val requestChannel: RequestChannel, if (versionId >= 16) { data.error match { case Errors.NOT_LEADER_OR_FOLLOWER | Errors.FENCED_LEADER_EPOCH => - val leaderNode = getCurrentLeader(topicIdPartition.topicPartition(), request.context.listenerName) + val leaderNode = getCurrentLeader(tp.topicPartition(), request.context.listenerName) leaderNode.node.foreach { node => nodeEndpoints.put(node.id(), node) } @@ -656,8 +630,8 @@ class KafkaApis(val requestChannel: RequestChannel, } } - data.divergingEpoch.ifPresent(epoch => partitionData.setDivergingEpoch(epoch)) - partitions.put(topicIdPartition, partitionData) + data.divergingEpoch.ifPresent(partitionData.setDivergingEpoch(_)) + partitions.put(tp, partitionData) } erroneous.foreach { case (tp, data) => partitions.put(tp, data) } @@ -691,14 +665,14 @@ class KafkaApis(val requestChannel: RequestChannel, val responseSize = fetchContext.getResponseSize(partitions, versionId) val timeMs = time.milliseconds() val requestThrottleTimeMs = quotas.request.maybeRecordAndGetThrottleTimeMs(request, timeMs) - val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request.session, request.header.clientId(), responseSize, timeMs) + val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request, responseSize, timeMs) val maxThrottleTimeMs = math.max(bandwidthThrottleTimeMs, requestThrottleTimeMs) val fetchResponse = if (maxThrottleTimeMs > 0) { request.apiThrottleTimeMs = maxThrottleTimeMs // Even if we need to throttle for request quota violation, we should "unrecord" the already recorded value // from the fetch quota because we are going to return an empty response. - quotas.fetch.unrecordQuotaSensor(request.session, request.header.clientId(), responseSize, timeMs) + quotas.fetch.unrecordQuotaSensor(request, responseSize, timeMs) if (bandwidthThrottleTimeMs > requestThrottleTimeMs) { requestHelper.throttle(quotas.fetch, request, bandwidthThrottleTimeMs) } else { @@ -730,7 +704,7 @@ class KafkaApis(val requestChannel: RequestChannel, val maxQuotaWindowBytes = if (fetchRequest.isFromFollower) Int.MaxValue else - quotas.fetch.maxValueInQuotaWindow(request.session, clientId).toInt + quotas.fetch.getMaxValueInQuotaWindow(request.session, clientId).toInt val fetchMaxBytes = Math.min(Math.min(fetchRequest.maxBytes, config.fetchMaxBytes), maxQuotaWindowBytes) val fetchMinBytes = Math.min(fetchRequest.minBytes, fetchMaxBytes) @@ -748,6 +722,7 @@ class KafkaApis(val requestChannel: RequestChannel, } val params = new FetchParams( + versionId, fetchRequest.replicaId, fetchRequest.replicaEpoch, fetchRequest.maxWait, @@ -792,20 +767,18 @@ class KafkaApis(val requestChannel: RequestChannel, .setName(topic.name) .setPartitions(topic.partitions.asScala.map(partition => buildErrorResponse(Errors.TOPIC_AUTHORIZATION_FAILED, partition)).asJava) - ).asJava + ) - def sendResponseCallback(response: util.Collection[ListOffsetsTopicResponse]): Void = { - val mergedResponses = new util.ArrayList(response) - mergedResponses.addAll(unauthorizedResponseStatus) + def sendResponseCallback(response: Seq[ListOffsetsTopicResponse]): Unit = { + val mergedResponses = response ++ unauthorizedResponseStatus requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new ListOffsetsResponse(new ListOffsetsResponseData() .setThrottleTimeMs(requestThrottleMs) - .setTopics(mergedResponses))) - null + .setTopics(mergedResponses.asJava))) } if (authorizedRequestInfo.isEmpty) { - sendResponseCallback(util.List.of) + sendResponseCallback(Seq.empty) } else { replicaManager.fetchOffset(authorizedRequestInfo, offsetRequest.duplicatePartitions().asScala, offsetRequest.isolationLevel(), offsetRequest.replicaId(), clientId, correlationId, version, @@ -835,15 +808,15 @@ class KafkaApis(val requestChannel: RequestChannel, errorUnavailableEndpoints: Boolean, errorUnavailableListeners: Boolean ): Seq[MetadataResponseTopic] = { - val topicResponses = metadataCache.getTopicMetadata(topics.asJava, listenerName, + val topicResponses = metadataCache.getTopicMetadata(topics, listenerName, errorUnavailableEndpoints, errorUnavailableListeners) if (topics.isEmpty || topicResponses.size == topics.size || fetchAllTopics) { - topicResponses.asScala + topicResponses } else { - val nonExistingTopics = topics.diff(topicResponses.asScala.map(_.name).toSet) + val nonExistingTopics = topics.diff(topicResponses.map(_.name).toSet) val nonExistingTopicResponses = if (allowAutoTopicCreation) { - val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request.session, request.header.clientId()) + val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request) autoTopicCreationManager.createTopics(nonExistingTopics, controllerMutationQuota, Some(request.context)) } else { nonExistingTopics.map { topic => @@ -865,7 +838,7 @@ class KafkaApis(val requestChannel: RequestChannel, } } - topicResponses.asScala ++ nonExistingTopicResponses + topicResponses ++ nonExistingTopicResponses } } @@ -890,13 +863,13 @@ class KafkaApis(val requestChannel: RequestChannel, // Only get topicIds and topicNames when supporting topicId val unknownTopicIds = topicIds.filter(metadataCache.getTopicName(_).isEmpty) - val knownTopicNames = topicIds.flatMap(id => OptionConverters.toScala(metadataCache.getTopicName(id))) + val knownTopicNames = topicIds.flatMap(metadataCache.getTopicName) val unknownTopicIdsTopicMetadata = unknownTopicIds.map(topicId => metadataResponseTopic(Errors.UNKNOWN_TOPIC_ID, null, topicId, isInternal = false, util.Collections.emptyList())).toSeq val topics = if (metadataRequest.isAllTopics) - metadataCache.getAllTopics.asScala + metadataCache.getAllTopics() else if (useTopicId) knownTopicNames else @@ -978,30 +951,42 @@ class KafkaApis(val requestChannel: RequestChannel, val brokers = metadataCache.getAliveBrokerNodes(request.context.listenerName) trace("Sending topic metadata %s and brokers %s for correlation id %d to client %s".format(completeTopicMetadata.mkString(","), - brokers.asScala.mkString(","), request.header.correlationId, request.header.clientId)) - val controllerId = metadataCache.getRandomAliveBrokerId.orElse(MetadataResponse.NO_CONTROLLER_ID) + brokers.mkString(","), request.header.correlationId, request.header.clientId)) + val controllerId = { + metadataCache.getControllerId.flatMap { + case ZkCachedControllerId(id) => Some(id) + case KRaftCachedControllerId(_) => metadataCache.getRandomAliveBrokerId + } + } requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => MetadataResponse.prepareResponse( requestVersion, requestThrottleMs, - brokers, + brokers.toList.asJava, clusterId, - controllerId, + controllerId.getOrElse(MetadataResponse.NO_CONTROLLER_ID), completeTopicMetadata.asJava, clusterAuthorizedOperations )) } def handleDescribeTopicPartitionsRequest(request: RequestChannel.Request): Unit = { - val response = describeTopicPartitionsRequestHandler.handleDescribeTopicPartitionsRequest(request) - trace("Sending topic partitions metadata %s for correlation id %d to client %s".format(response.topics().asScala.mkString(","), - request.header.correlationId, request.header.clientId)) - - requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => { - response.setThrottleTimeMs(requestThrottleMs) - new DescribeTopicPartitionsResponse(response) - }) + describeTopicPartitionsRequestHandler match { + case Some(handler) => { + val response = handler.handleDescribeTopicPartitionsRequest(request) + trace("Sending topic partitions metadata %s for correlation id %d to client %s".format(response.topics().asScala.mkString(","), + request.header.correlationId, request.header.clientId)) + + requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => { + response.setThrottleTimeMs(requestThrottleMs) + new DescribeTopicPartitionsResponse(response) + }) + } + case None => { + requestHelper.sendMaybeThrottle(request, request.body[DescribeTopicPartitionsRequest].getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + } + } } /** @@ -1016,11 +1001,9 @@ class KafkaApis(val requestChannel: RequestChannel, groups.forEach { groupOffsetFetch => val isAllPartitions = groupOffsetFetch.topics == null if (!authHelper.authorize(request.context, DESCRIBE, GROUP, groupOffsetFetch.groupId)) { - futures += CompletableFuture.completedFuture(OffsetFetchResponse.groupError( - groupOffsetFetch, - Errors.GROUP_AUTHORIZATION_FAILED, - request.header.apiVersion() - )) + futures += CompletableFuture.completedFuture(new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(groupOffsetFetch.groupId) + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)) } else if (isAllPartitions) { futures += fetchAllOffsetsForGroup( request.context, @@ -1039,139 +1022,83 @@ class KafkaApis(val requestChannel: RequestChannel, CompletableFuture.allOf(futures.toArray: _*).handle[Unit] { (_, _) => val groupResponses = new ArrayBuffer[OffsetFetchResponseData.OffsetFetchResponseGroup](futures.size) futures.foreach(future => groupResponses += future.get()) - requestHelper.sendMaybeThrottle(request, new OffsetFetchResponse.Builder(groupResponses.asJava).build(request.context.apiVersion)) + requestHelper.sendMaybeThrottle(request, new OffsetFetchResponse(groupResponses.asJava, request.context.apiVersion)) } } private def fetchAllOffsetsForGroup( requestContext: RequestContext, - groupFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup, + offsetFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup, requireStable: Boolean ): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = { - val useTopicIds = OffsetFetchRequest.useTopicIds(requestContext.apiVersion) - groupCoordinator.fetchAllOffsets( requestContext, - groupFetchRequest, + offsetFetchRequest, requireStable - ).handle[OffsetFetchResponseData.OffsetFetchResponseGroup] { (groupFetchResponse, exception) => + ).handle[OffsetFetchResponseData.OffsetFetchResponseGroup] { (offsetFetchResponse, exception) => if (exception != null) { - OffsetFetchResponse.groupError( - groupFetchRequest, - Errors.forException(exception), - requestContext.apiVersion() - ) - } else if (groupFetchResponse.errorCode() != Errors.NONE.code) { - groupFetchResponse + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(offsetFetchRequest.groupId) + .setErrorCode(Errors.forException(exception).code) + } else if (offsetFetchResponse.errorCode() != Errors.NONE.code) { + offsetFetchResponse } else { // Clients are not allowed to see offsets for topics that are not authorized for Describe. - val authorizedNames = authHelper.filterByAuthorized( + val (authorizedOffsets, _) = authHelper.partitionSeqByAuthorized( requestContext, DESCRIBE, TOPIC, - groupFetchResponse.topics.asScala + offsetFetchResponse.topics.asScala )(_.name) - - val topics = new mutable.ArrayBuffer[OffsetFetchResponseData.OffsetFetchResponseTopics] - groupFetchResponse.topics.forEach { topic => - if (authorizedNames.contains(topic.name)) { - if (useTopicIds) { - // If the topic is not provided by the group coordinator, we set it - // using the metadata cache. - if (topic.topicId == Uuid.ZERO_UUID) { - topic.setTopicId(metadataCache.getTopicId(topic.name)) - } - // If we don't have the topic id at all, we skip the topic because - // we can not serialize it without it. - if (topic.topicId != Uuid.ZERO_UUID) { - topics += topic - } - } else { - topics += topic - } - } - } - groupFetchResponse.setTopics(topics.asJava) + offsetFetchResponse.setTopics(authorizedOffsets.asJava) } } } private def fetchOffsetsForGroup( requestContext: RequestContext, - groupFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup, + offsetFetchRequest: OffsetFetchRequestData.OffsetFetchRequestGroup, requireStable: Boolean ): CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup] = { - val useTopicIds = OffsetFetchRequest.useTopicIds(requestContext.apiVersion) - - if (useTopicIds) { - groupFetchRequest.topics.forEach { topic => - if (topic.topicId != Uuid.ZERO_UUID) { - metadataCache.getTopicName(topic.topicId).ifPresent(name => topic.setName(name)) - } - } - } - // Clients are not allowed to see offsets for topics that are not authorized for Describe. - val authorizedTopicNames = authHelper.filterByAuthorized( + val (authorizedTopics, unauthorizedTopics) = authHelper.partitionSeqByAuthorized( requestContext, DESCRIBE, TOPIC, - groupFetchRequest.topics.asScala + offsetFetchRequest.topics.asScala )(_.name) - val authorizedTopics = new mutable.ArrayBuffer[OffsetFetchRequestData.OffsetFetchRequestTopics] - val errorTopics = new mutable.ArrayBuffer[OffsetFetchResponseData.OffsetFetchResponseTopics] - - def buildErrorResponse( - topic: OffsetFetchRequestData.OffsetFetchRequestTopics, - error: Errors - ): OffsetFetchResponseData.OffsetFetchResponseTopics = { - val topicResponse = new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setTopicId(topic.topicId) - .setName(topic.name) - topic.partitionIndexes.forEach { partitionIndex => - topicResponse.partitions.add(new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(partitionIndex) - .setCommittedOffset(-1) - .setErrorCode(error.code)) - } - topicResponse - } - - groupFetchRequest.topics.forEach { topic => - if (useTopicIds && topic.name.isEmpty) { - errorTopics += buildErrorResponse(topic, Errors.UNKNOWN_TOPIC_ID) - } else if (!authorizedTopicNames.contains(topic.name)) { - errorTopics += buildErrorResponse(topic, Errors.TOPIC_AUTHORIZATION_FAILED) - } else { - authorizedTopics += topic - } - } - groupCoordinator.fetchOffsets( requestContext, new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupFetchRequest.groupId) - .setMemberId(groupFetchRequest.memberId) - .setMemberEpoch(groupFetchRequest.memberEpoch) + .setGroupId(offsetFetchRequest.groupId) + .setMemberId(offsetFetchRequest.memberId) + .setMemberEpoch(offsetFetchRequest.memberEpoch) .setTopics(authorizedTopics.asJava), requireStable - ).handle[OffsetFetchResponseData.OffsetFetchResponseGroup] { (groupFetchResponse, exception) => + ).handle[OffsetFetchResponseData.OffsetFetchResponseGroup] { (offsetFetchResponse, exception) => if (exception != null) { - OffsetFetchResponse.groupError( - groupFetchRequest, - Errors.forException(exception), - requestContext.apiVersion() - ) - } else if (groupFetchResponse.errorCode() != Errors.NONE.code) { - groupFetchResponse + new OffsetFetchResponseData.OffsetFetchResponseGroup() + .setGroupId(offsetFetchRequest.groupId) + .setErrorCode(Errors.forException(exception).code) + } else if (offsetFetchResponse.errorCode() != Errors.NONE.code) { + offsetFetchResponse } else { val topics = new util.ArrayList[OffsetFetchResponseData.OffsetFetchResponseTopics]( - groupFetchRequest.topics.size + errorTopics.size + offsetFetchResponse.topics.size + unauthorizedTopics.size ) - topics.addAll(groupFetchResponse.topics) - topics.addAll(errorTopics.asJava) - groupFetchResponse.setTopics(topics) + topics.addAll(offsetFetchResponse.topics) + unauthorizedTopics.foreach { topic => + val topicResponse = new OffsetFetchResponseData.OffsetFetchResponseTopics().setName(topic.name) + topic.partitionIndexes.forEach { partitionIndex => + topicResponse.partitions.add(new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(partitionIndex) + .setCommittedOffset(-1) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code)) + } + topics.add(topicResponse) + } + offsetFetchResponse.setTopics(topics) } } } @@ -1245,12 +1172,15 @@ class KafkaApis(val requestChannel: RequestChannel, else { if (keyType == CoordinatorType.SHARE.id) { authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) + if (shareCoordinator.isEmpty) { + return (Errors.INVALID_REQUEST, Node.noNode) + } try { SharePartitionKey.validate(key) } catch { case e: IllegalArgumentException => error(s"Share coordinator key is invalid", e) - return (Errors.INVALID_REQUEST, Node.noNode) + (Errors.INVALID_REQUEST, Node.noNode()) } } val (partition, internalTopicName) = CoordinatorType.forId(keyType) match { @@ -1261,30 +1191,30 @@ class KafkaApis(val requestChannel: RequestChannel, (txnCoordinator.partitionFor(key), TRANSACTION_STATE_TOPIC_NAME) case CoordinatorType.SHARE => - // We know that shareCoordinator is defined at this stage. - (shareCoordinator.partitionFor(SharePartitionKey.getInstance(key)), SHARE_GROUP_STATE_TOPIC_NAME) + (shareCoordinator.foreach(coordinator => coordinator.partitionFor(SharePartitionKey.getInstance(key))), SHARE_GROUP_STATE_TOPIC_NAME) } - val topicMetadata = metadataCache.getTopicMetadata(Set(internalTopicName).asJava, request.context.listenerName, false, false).asScala + val topicMetadata = metadataCache.getTopicMetadata(Set(internalTopicName), request.context.listenerName) if (topicMetadata.headOption.isEmpty) { - val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request.session, request.header.clientId) + val controllerMutationQuota = quotas.controllerMutation.newPermissiveQuotaFor(request) autoTopicCreationManager.createTopics(Seq(internalTopicName).toSet, controllerMutationQuota, None) (Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode) } else { if (topicMetadata.head.errorCode != Errors.NONE.code) { (Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode) } else { - val coordinatorEndpoint = topicMetadata.head.partitions.stream() - .filter(_.partitionIndex() == partition) + val coordinatorEndpoint = topicMetadata.head.partitions.asScala + .find(_.partitionIndex == partition) .filter(_.leaderId != MetadataResponse.NO_LEADER_ID) - .flatMap(metadata => metadataCache.getAliveBrokerNode(metadata.leaderId, request.context.listenerName).stream()) - .findFirst() + .flatMap(metadata => metadataCache. + getAliveBrokerNode(metadata.leaderId, request.context.listenerName)) - if (coordinatorEndpoint.isPresent) { - (Errors.NONE, coordinatorEndpoint.get) - } else { - (Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode) + coordinatorEndpoint match { + case Some(endpoint) => + (Errors.NONE, endpoint) + case _ => + (Errors.COORDINATOR_NOT_AVAILABLE, Node.noNode) } } } @@ -1601,10 +1531,6 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception) return } - if (initProducerIdRequest.enable2Pc() && !authHelper.authorize(request.context, TWO_PHASE_COMMIT, TRANSACTIONAL_ID, transactionalId)) { - requestHelper.sendErrorResponseMaybeThrottle(request, Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.exception) - return - } } else if (!authHelper.authorize(request.context, IDEMPOTENT_WRITE, CLUSTER, CLUSTER_NAME, true, false) && !authHelper.authorizeByResourceType(request.context, AclOperation.WRITE, ResourceType.TOPIC)) { requestHelper.sendErrorResponseMaybeThrottle(request, Errors.CLUSTER_AUTHORIZATION_FAILED.exception) @@ -1640,19 +1566,8 @@ class KafkaApis(val requestChannel: RequestChannel, } producerIdAndEpoch match { - case Right(producerIdAndEpoch) => - val enableTwoPC = initProducerIdRequest.enable2Pc() - val keepPreparedTxn = initProducerIdRequest.keepPreparedTxn() - - txnCoordinator.handleInitProducerId( - transactionalId, - initProducerIdRequest.data.transactionTimeoutMs, - enableTwoPC, - keepPreparedTxn, - producerIdAndEpoch, - sendResponseCallback, - requestLocal - ) + case Right(producerIdAndEpoch) => txnCoordinator.handleInitProducerId(transactionalId, initProducerIdRequest.data.transactionTimeoutMs, + producerIdAndEpoch, sendResponseCallback, requestLocal) case Left(error) => requestHelper.sendErrorResponseMaybeThrottle(request, error.exception) } } @@ -1732,8 +1647,40 @@ class KafkaApis(val requestChannel: RequestChannel, trace(s"End transaction marker append for producer id $producerId completed with status: $currentErrors") updateErrors(producerId, currentErrors) - if (numAppends.decrementAndGet() == 0) { - requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors)) + def maybeSendResponse(): Unit = { + if (numAppends.decrementAndGet() == 0) { + requestHelper.sendResponseExemptThrottle(request, new WriteTxnMarkersResponse(errors)) + } + } + + // The new group coordinator uses GroupCoordinator#completeTransaction so we do + // not need to call GroupCoordinator#onTransactionCompleted here. + if (config.isNewGroupCoordinatorEnabled) { + maybeSendResponse() + return + } + + val successfulOffsetsPartitions = currentErrors.asScala.filter { case (topicPartition, error) => + topicPartition.topic == GROUP_METADATA_TOPIC_NAME && error == Errors.NONE + }.keys + + // If no end transaction marker has been written to a __consumer_offsets partition, we do not + // need to call GroupCoordinator#onTransactionCompleted. + if (successfulOffsetsPartitions.isEmpty) { + maybeSendResponse() + return + } + + // Otherwise, we call GroupCoordinator#onTransactionCompleted to materialize the offsets + // into the cache and we wait until the meterialization is completed. + groupCoordinator.onTransactionCompleted(producerId, successfulOffsetsPartitions.asJava, result).whenComplete { (_, exception) => + if (exception != null) { + error(s"Received an exception while trying to update the offsets cache on transaction marker append", exception) + val updatedErrors = new ConcurrentHashMap[TopicPartition, Errors]() + successfulOffsetsPartitions.foreach(updatedErrors.put(_, Errors.UNKNOWN_SERVER_ERROR)) + updateErrors(producerId, updatedErrors) + } + maybeSendResponse() } } @@ -1779,9 +1726,11 @@ class KafkaApis(val requestChannel: RequestChannel, } } - val controlRecords = mutable.Map.empty[TopicIdPartition, MemoryRecords] + val controlRecords = mutable.Map.empty[TopicPartition, MemoryRecords] partitionsWithCompatibleMessageFormat.foreach { partition => - if (partition.topic == GROUP_METADATA_TOPIC_NAME) { + if (groupCoordinator.isNewGroupCoordinator && partition.topic == GROUP_METADATA_TOPIC_NAME) { + // When the new group coordinator is used, writing the end marker is fully delegated + // to the group coordinator. groupCoordinator.completeTransaction( partition, marker.producerId, @@ -1807,8 +1756,7 @@ class KafkaApis(val requestChannel: RequestChannel, } else { // Otherwise, the regular appendRecords path is used for all the non __consumer_offsets // partitions or for all partitions when the new group coordinator is disabled. - // If topicIdPartition contains Uuid.ZERO_UUid all functionality will fall back on topic name. - controlRecords += replicaManager.topicIdPartition(partition) -> MemoryRecords.withEndTransactionMarker( + controlRecords += partition -> MemoryRecords.withEndTransactionMarker( producerId, marker.producerEpoch, new EndTransactionMarker(controlRecordType, marker.coordinatorEpoch) @@ -1825,8 +1773,8 @@ class KafkaApis(val requestChannel: RequestChannel, entriesPerPartition = controlRecords, requestLocal = requestLocal, responseCallback = errors => { - errors.foreachEntry { (topicIdPartition, partitionResponse) => - addResultAndMaybeComplete(topicIdPartition.topicPartition(), partitionResponse.error) + errors.foreachEntry { (tp, partitionResponse) => + addResultAndMaybeComplete(tp, partitionResponse.error) } } ) @@ -1894,7 +1842,7 @@ class KafkaApis(val requestChannel: RequestChannel, } else { val unauthorizedTopicErrors = mutable.Map[TopicPartition, Errors]() val nonExistingTopicErrors = mutable.Map[TopicPartition, Errors]() - val authorizedPartitions = new util.HashSet[TopicPartition]() + val authorizedPartitions = mutable.Set[TopicPartition]() // Only request versions less than 4 need write authorization since they come from clients. val authorizedTopics = @@ -1916,7 +1864,7 @@ class KafkaApis(val requestChannel: RequestChannel, // partitions which failed, and an 'OPERATION_NOT_ATTEMPTED' error code for the partitions which succeeded // the authorization check to indicate that they were not added to the transaction. val partitionErrors = unauthorizedTopicErrors ++ nonExistingTopicErrors ++ - authorizedPartitions.asScala.map(_ -> Errors.OPERATION_NOT_ATTEMPTED) + authorizedPartitions.map(_ -> Errors.OPERATION_NOT_ATTEMPTED) addResultAndMaybeSendResponse(AddPartitionsToTxnResponse.resultForTransaction(transactionalId, partitionErrors.asJava)) } else { def sendResponseCallback(error: Errors): Unit = { @@ -1995,7 +1943,7 @@ class KafkaApis(val requestChannel: RequestChannel, txnCoordinator.handleAddPartitionsToTransaction(transactionalId, addOffsetsToTxnRequest.data.producerId, addOffsetsToTxnRequest.data.producerEpoch, - util.Set.of(offsetTopicPartition), + Set(offsetTopicPartition), sendResponseCallback, TransactionVersion.TV_0, // This request will always come from the client not using TV 2. requestLocal) @@ -2061,7 +2009,7 @@ class KafkaApis(val requestChannel: RequestChannel, val topicWithValidPartitions = new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic().setName(topic.name) topic.partitions.forEach { partition => - if (metadataCache.getLeaderAndIsr(topic.name, partition.partitionIndex).isPresent()) { + if (metadataCache.getLeaderAndIsr(topic.name, partition.partitionIndex).nonEmpty) { topicWithValidPartitions.partitions.add(partition) } else { responseBuilder.addPartition(topic.name, partition.partitionIndex, Errors.UNKNOWN_TOPIC_OR_PARTITION) @@ -2235,12 +2183,12 @@ class KafkaApis(val requestChannel: RequestChannel, (replicaManager.describeLogDirs(partitions), Errors.NONE) } else { - (util.Collections.emptyList[DescribeLogDirsResponseData.DescribeLogDirsResult], Errors.CLUSTER_AUTHORIZATION_FAILED) + (List.empty[DescribeLogDirsResponseData.DescribeLogDirsResult], Errors.CLUSTER_AUTHORIZATION_FAILED) } } requestHelper.sendResponseMaybeThrottle(request, throttleTimeMs => new DescribeLogDirsResponse(new DescribeLogDirsResponseData() .setThrottleTimeMs(throttleTimeMs) - .setResults(logDirInfos) + .setResults(logDirInfos.asJava) .setErrorCode(error.code))) } @@ -2281,7 +2229,7 @@ class KafkaApis(val requestChannel: RequestChannel, new ExpireDelegationTokenResponseData() .setThrottleTimeMs(requestThrottleMs) .setErrorCode(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED.code) - .setExpiryTimestampMs(DelegationTokenManager.ERROR_TIMESTAMP))) + .setExpiryTimestampMs(DelegationTokenManager.ErrorTimestamp))) } else { forwardToController(request) } @@ -2294,7 +2242,7 @@ class KafkaApis(val requestChannel: RequestChannel, new RenewDelegationTokenResponseData() .setThrottleTimeMs(requestThrottleMs) .setErrorCode(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED.code) - .setExpiryTimestampMs(DelegationTokenManager.ERROR_TIMESTAMP))) + .setExpiryTimestampMs(DelegationTokenManager.ErrorTimestamp))) } else { forwardToController(request) } @@ -2304,28 +2252,28 @@ class KafkaApis(val requestChannel: RequestChannel, val describeTokenRequest = request.body[DescribeDelegationTokenRequest] // the callback for sending a describe token response - def sendResponseCallback(error: Errors, tokenDetails: util.List[DelegationToken]): Unit = { + def sendResponseCallback(error: Errors, tokenDetails: List[DelegationToken]): Unit = { requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => - new DescribeDelegationTokenResponse(request.context.requestVersion(), requestThrottleMs, error, tokenDetails)) + new DescribeDelegationTokenResponse(request.context.requestVersion(), requestThrottleMs, error, tokenDetails.asJava)) trace("Sending describe token response for correlation id %d to client %s." .format(request.header.correlationId, request.header.clientId)) } if (!allowTokenRequests(request)) - sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, Collections.emptyList) - else if (!tokenManager.isEnabled) - sendResponseCallback(Errors.DELEGATION_TOKEN_AUTH_DISABLED, Collections.emptyList) + sendResponseCallback(Errors.DELEGATION_TOKEN_REQUEST_NOT_ALLOWED, List.empty) + else if (!config.tokenAuthEnabled) + sendResponseCallback(Errors.DELEGATION_TOKEN_AUTH_DISABLED, List.empty) else { val requestPrincipal = request.context.principal if (describeTokenRequest.ownersListEmpty()) { - sendResponseCallback(Errors.NONE, Collections.emptyList) + sendResponseCallback(Errors.NONE, List()) } else { - val owners: Optional[util.List[KafkaPrincipal]] = if (describeTokenRequest.data.owners == null) - Optional.empty() + val owners = if (describeTokenRequest.data.owners == null) + None else - Optional.of(describeTokenRequest.data.owners.stream().map(p => new KafkaPrincipal(p.principalType(), p.principalName)).toList) + Some(describeTokenRequest.data.owners.asScala.map(p => new KafkaPrincipal(p.principalType(), p.principalName)).toList) def authorizeToken(tokenId: String) = authHelper.authorize(request.context, DESCRIBE, DELEGATION_TOKEN, tokenId) def authorizeRequester(owner: KafkaPrincipal) = authHelper.authorize(request.context, DESCRIBE_TOKENS, USER, owner.toString) def eligible(token: TokenInformation) = DelegationTokenManager @@ -2382,7 +2330,7 @@ class KafkaApis(val requestChannel: RequestChannel, val topicWithValidPartitions = new OffsetDeleteRequestData.OffsetDeleteRequestTopic().setName(topic.name) topic.partitions.forEach { partition => - if (metadataCache.getLeaderAndIsr(topic.name, partition.partitionIndex).isPresent) { + if (metadataCache.getLeaderAndIsr(topic.name, partition.partitionIndex).nonEmpty) { topicWithValidPartitions.partitions.add(partition) } else { responseBuilder.addPartition(topic.name, partition.partitionIndex, Errors.UNKNOWN_TOPIC_OR_PARTITION) @@ -2420,7 +2368,7 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => describeClientQuotasRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) } else { - val result = metadataCache.describeClientQuotas(describeClientQuotasRequest.data()) + val result = metadataCache.asInstanceOf[KRaftMetadataCache].describeClientQuotas(describeClientQuotasRequest.data()) requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => { result.setThrottleTimeMs(requestThrottleMs) new DescribeClientQuotasResponse(result) @@ -2435,7 +2383,7 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => describeUserScramCredentialsRequest.getErrorResponse(requestThrottleMs, Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) } else { - val result = metadataCache.describeScramCredentials(describeUserScramCredentialsRequest.data()) + val result = metadataCache.asInstanceOf[KRaftMetadataCache].describeScramCredentials(describeUserScramCredentialsRequest.data()) requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => new DescribeUserScramCredentialsResponse(result.setThrottleTimeMs(requestThrottleMs))) } @@ -2449,7 +2397,7 @@ class KafkaApis(val requestChannel: RequestChannel, () => { val brokers = new DescribeClusterResponseData.DescribeClusterBrokerCollection() val describeClusterRequest = request.body[DescribeClusterRequest] - metadataCache.getBrokerNodes(request.context.listenerName).forEach { node => + metadataCache.getBrokerNodes(request.context.listenerName).foreach { node => if (!node.isFenced || describeClusterRequest.data().includeFencedBrokers()) { brokers.add(new DescribeClusterResponseData.DescribeClusterBroker(). setBrokerId(node.id). @@ -2462,7 +2410,14 @@ class KafkaApis(val requestChannel: RequestChannel, brokers }, () => { - metadataCache.getRandomAliveBrokerId.orElse(-1) + metadataCache.getControllerId match { + case Some(value) => + value match { + case ZkCachedControllerId (id) => id + case KRaftCachedControllerId (_) => metadataCache.getRandomAliveBrokerId.getOrElse(- 1) + } + case None => -1 + } } ) requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => @@ -2557,13 +2512,7 @@ class KafkaApis(val requestChannel: RequestChannel, val filteredProducerIds = listTransactionsRequest.data.producerIdFilters.asScala.map(Long.unbox).toSet val filteredStates = listTransactionsRequest.data.stateFilters.asScala.toSet val durationFilter = listTransactionsRequest.data.durationFilter() - val transactionalIdPatternFilter = listTransactionsRequest.data.transactionalIdPattern - val response = txnCoordinator.handleListTransactions( - filteredProducerIds, - filteredStates, - durationFilter, - transactionalIdPatternFilter - ) + val response = txnCoordinator.handleListTransactions(filteredProducerIds, filteredStates, durationFilter) // The response should contain only transactionalIds that the principal // has `Describe` permission to access. @@ -2584,6 +2533,7 @@ class KafkaApis(val requestChannel: RequestChannel, } def isConsumerGroupProtocolEnabled(): Boolean = { + groupCoordinator.isNewGroupCoordinator && config.groupCoordinatorRebalanceProtocols.contains(Group.GroupType.CONSUMER) && groupVersion().isConsumerRebalanceProtocolSupported } @@ -2679,7 +2629,7 @@ class KafkaApis(val requestChannel: RequestChannel, } // Clients are not allowed to see topics that are not authorized for Describe. - if (authorizerPlugin.isDefined) { + if (!authorizer.isEmpty) { val topicsToCheck = response.groups.stream() .flatMap(group => group.members.stream) .flatMap(member => util.stream.Stream.of(member.assignment, member.targetAssignment)) @@ -2712,238 +2662,6 @@ class KafkaApis(val requestChannel: RequestChannel, } } } - } - - private def streamsVersion(): StreamsVersion = { - StreamsVersion.fromFeatureLevel(metadataCache.features.finalizedFeatures.getOrDefault(StreamsVersion.FEATURE_NAME, 0.toShort)) - } - - private def isStreamsGroupProtocolEnabled: Boolean = { - config.groupCoordinatorRebalanceProtocols.contains(Group.GroupType.STREAMS) && - streamsVersion().streamsGroupSupported - } - - def handleStreamsGroupHeartbeat(request: RequestChannel.Request): CompletableFuture[Unit] = { - val streamsGroupHeartbeatRequest = request.body[StreamsGroupHeartbeatRequest] - - if (!isStreamsGroupProtocolEnabled) { - // The API is not enabled by default. If it is not enabled, we fail directly here. - requestHelper.sendMaybeThrottle(request, streamsGroupHeartbeatRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - CompletableFuture.completedFuture[Unit](()) - } else if (!authHelper.authorize(request.context, READ, GROUP, streamsGroupHeartbeatRequest.data.groupId)) { - requestHelper.sendMaybeThrottle(request, streamsGroupHeartbeatRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) - CompletableFuture.completedFuture[Unit](()) - } else { - val requestContext = request.context - - if (streamsGroupHeartbeatRequest.data().topology() != null) { - val requiredTopics: Seq[String] = - streamsGroupHeartbeatRequest.data().topology().subtopologies().iterator().asScala.flatMap(subtopology => - (subtopology.sourceTopics().iterator().asScala:Iterator[String]) - ++ (subtopology.repartitionSinkTopics().iterator().asScala:Iterator[String]) - ++ (subtopology.repartitionSourceTopics().iterator().asScala.map(_.name()):Iterator[String]) - ++ (subtopology.stateChangelogTopics().iterator().asScala.map(_.name()):Iterator[String]) - ).distinct.toSeq - - // While correctness of the heartbeat request is checked inside the group coordinator, - // we are checking early that topics in the topology have valid names and are not internal - // kafka topics, since we need to pass it to the authorization helper before passing the - // request to the group coordinator. - - val prohibitedTopics = requiredTopics.filter(Topic.isInternal) - if (prohibitedTopics.nonEmpty) { - val errorResponse = new StreamsGroupHeartbeatResponseData() - errorResponse.setErrorCode(Errors.STREAMS_INVALID_TOPOLOGY.code) - errorResponse.setErrorMessage(f"Use of Kafka internal topics ${prohibitedTopics.mkString(",")} in a Kafka Streams topology is prohibited.") - requestHelper.sendMaybeThrottle(request, new StreamsGroupHeartbeatResponse(errorResponse)) - return CompletableFuture.completedFuture[Unit](()) - } - - val invalidTopics = requiredTopics.filterNot(Topic.isValid) - if (invalidTopics.nonEmpty) { - val errorResponse = new StreamsGroupHeartbeatResponseData() - errorResponse.setErrorCode(Errors.STREAMS_INVALID_TOPOLOGY.code) - errorResponse.setErrorMessage(f"Topic names ${invalidTopics.mkString(",")} are not valid topic names.") - requestHelper.sendMaybeThrottle(request, new StreamsGroupHeartbeatResponse(errorResponse)) - return CompletableFuture.completedFuture[Unit](()) - } - - if (requiredTopics.nonEmpty) { - val authorizedTopics = authHelper.filterByAuthorized(request.context, DESCRIBE, TOPIC, requiredTopics)(identity) - if (authorizedTopics.size < requiredTopics.size) { - val responseData = new StreamsGroupHeartbeatResponseData().setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - requestHelper.sendMaybeThrottle(request, new StreamsGroupHeartbeatResponse(responseData)) - return CompletableFuture.completedFuture[Unit](()) - } - } - } - - groupCoordinator.streamsGroupHeartbeat( - request.context, - streamsGroupHeartbeatRequest.data - ).handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, streamsGroupHeartbeatRequest.getErrorResponse(exception)) - } else { - val responseData = response.data() - val topicsToCreate = response.creatableTopics().asScala - if (topicsToCreate.nonEmpty) { - - val createTopicUnauthorized = - if(!authHelper.authorize(request.context, CREATE, CLUSTER, CLUSTER_NAME, logIfDenied = false)) - authHelper.partitionSeqByAuthorized(request.context, CREATE, TOPIC, topicsToCreate.keys.toSeq)(identity[String])._2 - else Set.empty - - if (createTopicUnauthorized.nonEmpty) { - if (responseData.status() == null) { - responseData.setStatus(new util.ArrayList()); - } - val missingInternalTopicStatus = - responseData.status().stream().filter(x => x.statusCode() == StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()).findFirst() - if (missingInternalTopicStatus.isPresent) { - missingInternalTopicStatus.get().setStatusDetail( - missingInternalTopicStatus.get().statusDetail() + "; Unauthorized to CREATE on topics " + createTopicUnauthorized.mkString(", ") + "." - ) - } else { - responseData.status().add( - new StreamsGroupHeartbeatResponseData.Status() - .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) - .setStatusDetail("Unauthorized to CREATE on topics " + createTopicUnauthorized.mkString(", ") + ".") - ) - } - } else { - // Compute group-specific timeout for caching errors (2 * heartbeat interval) - val heartbeatIntervalMs = Option(groupConfigManager.groupConfig(streamsGroupHeartbeatRequest.data.groupId).orElse(null)) - .map(_.streamsHeartbeatIntervalMs().toLong) - .getOrElse(config.groupCoordinatorConfig.streamsGroupHeartbeatIntervalMs().toLong) - val timeoutMs = heartbeatIntervalMs * 2 - - autoTopicCreationManager.createStreamsInternalTopics(topicsToCreate, requestContext, timeoutMs) - - // Check for cached topic creation errors only if there's already a MISSING_INTERNAL_TOPICS status - val hasMissingInternalTopicsStatus = responseData.status() != null && - responseData.status().stream().anyMatch(s => s.statusCode() == StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) - - if (hasMissingInternalTopicsStatus) { - val currentTimeMs = time.milliseconds() - val cachedErrors = autoTopicCreationManager.getStreamsInternalTopicCreationErrors(topicsToCreate.keys.toSet, currentTimeMs) - if (cachedErrors.nonEmpty) { - val missingInternalTopicStatus = - responseData.status().stream().filter(x => x.statusCode() == StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()).findFirst() - val creationErrorDetails = cachedErrors.map { case (topic, error) => s"$topic ($error)" }.mkString(", ") - if (missingInternalTopicStatus.isPresent) { - val existingDetail = Option(missingInternalTopicStatus.get().statusDetail()).getOrElse("") - missingInternalTopicStatus.get().setStatusDetail( - existingDetail + s"; Creation failed: $creationErrorDetails." - ) - } - } - } - } - } - requestHelper.sendMaybeThrottle(request, new StreamsGroupHeartbeatResponse(responseData)) - } - } - } - } - - def handleStreamsGroupDescribe(request: RequestChannel.Request): CompletableFuture[Unit] = { - val streamsGroupDescribeRequest = request.body[StreamsGroupDescribeRequest] - val includeAuthorizedOperations = streamsGroupDescribeRequest.data.includeAuthorizedOperations - - if (!isStreamsGroupProtocolEnabled) { - // The API is not enabled by default. If it is not enabled, we fail directly here. - requestHelper.sendMaybeThrottle(request, request.body[StreamsGroupDescribeRequest].getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - CompletableFuture.completedFuture[Unit](()) - } else { - val response = new StreamsGroupDescribeResponseData() - - val authorizedGroups = new ArrayBuffer[String]() - streamsGroupDescribeRequest.data.groupIds.forEach { groupId => - if (!authHelper.authorize(request.context, DESCRIBE, GROUP, groupId)) { - response.groups.add(new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupId) - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code) - ) - } else { - authorizedGroups += groupId - } - } - - groupCoordinator.streamsGroupDescribe( - request.context, - authorizedGroups.asJava - ).handle[Unit] { (results, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, streamsGroupDescribeRequest.getErrorResponse(exception)) - } else { - if (includeAuthorizedOperations) { - results.forEach { groupResult => - if (groupResult.errorCode == Errors.NONE.code) { - groupResult.setAuthorizedOperations(authHelper.authorizedOperations( - request, - new Resource(ResourceType.GROUP, groupResult.groupId) - )) - } - } - } - - if (response.groups.isEmpty) { - // If the response is empty, we can directly reuse the results. - response.setGroups(results) - } else { - // Otherwise, we have to copy the results into the existing ones. - response.groups.addAll(results) - } - - // Clients are not allowed to see topics that are not authorized for Describe. - if (authorizerPlugin.isDefined) { - val topicsToCheck = response.groups.stream() - .filter(group => group.topology != null) - .flatMap(group => group.topology.subtopologies.stream) - .flatMap(subtopology => java.util.stream.Stream.concat( - java.util.stream.Stream.concat( - java.util.stream.Stream.concat( - subtopology.sourceTopics.stream, - subtopology.repartitionSinkTopics.stream), - subtopology.repartitionSourceTopics.stream.map(_.name)), - subtopology.stateChangelogTopics.stream.map(_.name))) - .collect(Collectors.toSet[String]) - .asScala - - val authorizedTopics = authHelper.filterByAuthorized(request.context, DESCRIBE, TOPIC, - topicsToCheck)(identity) - - val updatedGroups = response.groups.stream.map { group => - val hasUnauthorizedTopic = if (group.topology == null) false else - group.topology.subtopologies.stream() - .flatMap(subtopology => java.util.stream.Stream.concat( - java.util.stream.Stream.concat( - java.util.stream.Stream.concat( - subtopology.sourceTopics.stream, - subtopology.repartitionSinkTopics.stream), - subtopology.repartitionSourceTopics.stream.map(_.name)), - subtopology.stateChangelogTopics.stream.map(_.name))) - .anyMatch(topic => !authorizedTopics.contains(topic)) - - if (hasUnauthorizedTopic) { - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(group.groupId) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage("The described group uses topics that the client is not authorized to describe.") - .setMembers(List.empty.asJava) - .setTopology(null) - } else { - group - } - }.collect(Collectors.toList[StreamsGroupDescribeResponseData.DescribedGroup]) - response.setGroups(updatedGroups) - } - - requestHelper.sendMaybeThrottle(request, new StreamsGroupDescribeResponse(response)) - } - } - } } @@ -2967,60 +2685,16 @@ class KafkaApis(val requestChannel: RequestChannel, } } - /** - * Handle ListConfigResourcesRequest. If resourceTypes are not specified, it uses ListConfigResourcesRequest#supportedResourceTypes - * to retrieve config resources. If resourceTypes are specified, it returns matched config resources. - * If a config resource type is not supported, the handler returns UNSUPPORTED_VERSION. - */ - private def handleListConfigResources(request: RequestChannel.Request): Unit = { - val listConfigResourcesRequest = request.body[ListConfigResourcesRequest] + def handleListClientMetricsResources(request: RequestChannel.Request): Unit = { + val listClientMetricsResourcesRequest = request.body[ListClientMetricsResourcesRequest] if (!authHelper.authorize(request.context, DESCRIBE_CONFIGS, CLUSTER, CLUSTER_NAME)) { - requestHelper.sendMaybeThrottle(request, listConfigResourcesRequest.getErrorResponse(Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) + requestHelper.sendMaybeThrottle(request, listClientMetricsResourcesRequest.getErrorResponse(Errors.CLUSTER_AUTHORIZATION_FAILED.exception)) } else { - val data = new ListConfigResourcesResponseData() - - val supportedResourceTypes = listConfigResourcesRequest.supportedResourceTypes() - var resourceTypes = listConfigResourcesRequest.data().resourceTypes() - if (resourceTypes.isEmpty) { - resourceTypes = supportedResourceTypes.stream().toList - } - - resourceTypes.forEach(resourceType => - if (!supportedResourceTypes.contains(resourceType)) { - requestHelper.sendMaybeThrottle(request, new ListConfigResourcesResponse(data.setErrorCode(Errors.UNSUPPORTED_VERSION.code()))) - return - } - ) - - val result = new util.ArrayList[ListConfigResourcesResponseData.ConfigResource]() - if (resourceTypes.contains(ConfigResource.Type.GROUP.id)) { - groupConfigManager.groupIds().forEach(id => - result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(id).setResourceType(ConfigResource.Type.GROUP.id)) - ) - } - if (resourceTypes.contains(ConfigResource.Type.CLIENT_METRICS.id)) { - clientMetricsManager.listClientMetricsResources.forEach(name => - result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(name).setResourceType(ConfigResource.Type.CLIENT_METRICS.id)) - ) - } - if (resourceTypes.contains(ConfigResource.Type.BROKER_LOGGER.id)) { - metadataCache.getBrokerNodes(request.context.listenerName).forEach(node => - result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(node.id.toString).setResourceType(ConfigResource.Type.BROKER_LOGGER.id)) - ) - } - if (resourceTypes.contains(ConfigResource.Type.BROKER.id)) { - metadataCache.getBrokerNodes(request.context.listenerName).forEach(node => - result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(node.id.toString).setResourceType(ConfigResource.Type.BROKER.id)) - ) - } - if (resourceTypes.contains(ConfigResource.Type.TOPIC.id)) { - metadataCache.getAllTopics.forEach(name => - result.add(new ListConfigResourcesResponseData.ConfigResource().setResourceName(name).setResourceType(ConfigResource.Type.TOPIC.id)) - ) - } - data.setConfigResources(result) - requestHelper.sendMaybeThrottle(request, new ListConfigResourcesResponse(data)) + val data = new ListClientMetricsResourcesResponseData().setClientMetricsResources( + clientMetricsManager.listClientMetricsResources.stream.map( + name => new ClientMetricsResource().setName(name)).toList) + requestHelper.sendMaybeThrottle(request, new ListClientMetricsResourcesResponse(data)) } } @@ -3034,24 +2708,9 @@ class KafkaApis(val requestChannel: RequestChannel, requestHelper.sendMaybeThrottle(request, shareGroupHeartbeatRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) CompletableFuture.completedFuture[Unit](()) } else { - if (shareGroupHeartbeatRequest.data.subscribedTopicNames != null && - !shareGroupHeartbeatRequest.data.subscribedTopicNames.isEmpty) { - // Check the authorization if the subscribed topic names are provided. - // Clients are not allowed to see topics that are not authorized for Describe. - val subscribedTopicSet = shareGroupHeartbeatRequest.data.subscribedTopicNames.asScala.toSet - val authorizedTopics = authHelper.filterByAuthorized(request.context, DESCRIBE, TOPIC, - subscribedTopicSet)(identity) - if (authorizedTopics.size < subscribedTopicSet.size) { - val responseData = new ShareGroupHeartbeatResponseData() - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - requestHelper.sendMaybeThrottle(request, new ShareGroupHeartbeatResponse(responseData)) - return CompletableFuture.completedFuture[Unit](()) - } - } - groupCoordinator.shareGroupHeartbeat( request.context, - shareGroupHeartbeatRequest.data + shareGroupHeartbeatRequest.data, ).handle[Unit] { (response, exception) => if (exception != null) { @@ -3111,34 +2770,6 @@ class KafkaApis(val requestChannel: RequestChannel, response.groups.addAll(results) } - // Clients are not allowed to see topics that are not authorized for Describe. - if (authorizerPlugin.isDefined) { - val topicsToCheck = response.groups.stream() - .flatMap(group => group.members.stream) - .flatMap(member => member.assignment.topicPartitions.stream) - .map(topicPartition => topicPartition.topicName) - .collect(Collectors.toSet[String]) - .asScala - val authorizedTopics = authHelper.filterByAuthorized(request.context, DESCRIBE, TOPIC, - topicsToCheck)(identity) - val updatedGroups = response.groups.stream().map { group => - val hasUnauthorizedTopic = group.members.stream() - .flatMap(member => member.assignment.topicPartitions.stream) - .anyMatch(tp => !authorizedTopics.contains(tp.topicName)) - - if (hasUnauthorizedTopic) { - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId(group.groupId) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage("The group has described topic(s) that the client is not authorized to describe.") - .setMembers(List.empty.asJava) - } else { - group - } - }.collect(Collectors.toList[ShareGroupDescribeResponseData.DescribedGroup]) - response.setGroups(updatedGroups) - } - requestHelper.sendMaybeThrottle(request, new ShareGroupDescribeResponse(response)) } } @@ -3148,12 +2779,12 @@ class KafkaApis(val requestChannel: RequestChannel, /** * Handle a shareFetch request */ - def handleShareFetchRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { + def handleShareFetchRequest(request: RequestChannel.Request): Unit = { val shareFetchRequest = request.body[ShareFetchRequest] if (!isShareGroupProtocolEnabled) { requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) - return CompletableFuture.completedFuture[Unit](()) + return } val groupId = shareFetchRequest.data.groupId @@ -3161,7 +2792,7 @@ class KafkaApis(val requestChannel: RequestChannel, // Share Fetch needs permission to perform the READ action on the named group resource (groupId) if (!authHelper.authorize(request.context, READ, GROUP, groupId)) { requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.GROUP_AUTHORIZATION_FAILED.exception)) - return CompletableFuture.completedFuture[Unit](()) + return } val memberId = shareFetchRequest.data.memberId @@ -3184,22 +2815,11 @@ class KafkaApis(val requestChannel: RequestChannel, try { // Creating the shareFetchContext for Share Session Handling. if context creation fails, the request is failed directly here. - shareFetchContext = sharePartitionManager.newContext(groupId, shareFetchData, forgottenTopics, newReqMetadata, isAcknowledgeDataPresent, request.context.connectionId) + shareFetchContext = sharePartitionManager.newContext(groupId, shareFetchData, forgottenTopics, newReqMetadata, isAcknowledgeDataPresent) } catch { - case _: ShareSessionLimitReachedException => - sharePartitionManager.createIdleShareFetchTimerTask(shareFetchRequest.maxWait).handle( - (_, exception) => { - if (exception != null) { - requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, exception)) - } else { - requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.SHARE_SESSION_LIMIT_REACHED.exception)) - } - } - ) - return CompletableFuture.completedFuture[Unit](()) case e: Exception => requestHelper.sendMaybeThrottle(request, shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e)) - return CompletableFuture.completedFuture[Unit](()) + return } val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = shareFetchContext.getErroneousAndValidTopicIdPartitions @@ -3207,8 +2827,12 @@ class KafkaApis(val requestChannel: RequestChannel, erroneousAndValidPartitionData.erroneous.forEach { case(tp, _) => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp } - erroneousAndValidPartitionData.validTopicIdPartitions.forEach(tp => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp) - shareFetchData.forEach { tp => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp} + erroneousAndValidPartitionData.validTopicIdPartitions.forEach { + case(tp, _) => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp + } + shareFetchData.forEach { + case(tp, _) => if (!topicIdPartitionSeq.contains(tp)) topicIdPartitionSeq += tp + } // Kafka share consumers need READ permission on each topic they are fetching. val authorizedTopics = authHelper.filterByAuthorized( @@ -3239,9 +2863,9 @@ class KafkaApis(val requestChannel: RequestChannel, // Handling the Fetch from the ShareFetchRequest. // Variable to store the topic partition wise result of fetching. - val fetchResult: CompletableFuture[Map[TopicIdPartition, ShareFetchResponseData.PartitionData]] = handleFetchFromShareFetchRequest( + val fetchResult: CompletableFuture[Map[TopicIdPartition, ShareFetchResponseData.PartitionData]] = + handleFetchFromShareFetchRequest( request, - shareSessionEpoch, erroneousAndValidPartitionData, sharePartitionManager, authorizedTopics @@ -3289,7 +2913,6 @@ class KafkaApis(val requestChannel: RequestChannel, .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(value) - .setRecords(MemoryRecords.EMPTY) topic.partitions.add(fetchPartitionData) } topicPartitionAcknowledgements.remove(topicId) @@ -3305,7 +2928,6 @@ class KafkaApis(val requestChannel: RequestChannel, .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(value) - .setRecords(MemoryRecords.EMPTY) topicData.partitions.add(fetchPartitionData) } shareFetchResponse.data.responses.add(topicData) @@ -3338,7 +2960,6 @@ class KafkaApis(val requestChannel: RequestChannel, // Visible for Testing def handleFetchFromShareFetchRequest(request: RequestChannel.Request, - shareSessionEpoch: Int, erroneousAndValidPartitionData: ErroneousAndValidPartitionData, sharePartitionManagerInstance: SharePartitionManager, authorizedTopics: Set[String] @@ -3347,29 +2968,30 @@ class KafkaApis(val requestChannel: RequestChannel, val erroneous = mutable.Map.empty[TopicIdPartition, ShareFetchResponseData.PartitionData] erroneousAndValidPartitionData.erroneous.forEach { (topicIdPartition, partitionData) => erroneous.put(topicIdPartition, partitionData) } - val interestedTopicPartitions = new util.ArrayList[TopicIdPartition] + val interestedWithMaxBytes = new util.LinkedHashMap[TopicIdPartition, Integer] - erroneousAndValidPartitionData.validTopicIdPartitions.forEach { topicIdPartition => + erroneousAndValidPartitionData.validTopicIdPartitions.forEach { case (topicIdPartition, sharePartitionData) => if (!authorizedTopics.contains(topicIdPartition.topicPartition.topic)) erroneous += topicIdPartition -> ShareFetchResponse.partitionResponse(topicIdPartition, Errors.TOPIC_AUTHORIZATION_FAILED) else if (!metadataCache.contains(topicIdPartition.topicPartition)) erroneous += topicIdPartition -> ShareFetchResponse.partitionResponse(topicIdPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION) else - interestedTopicPartitions.add(topicIdPartition) + interestedWithMaxBytes.put(topicIdPartition, sharePartitionData.maxBytes) } val shareFetchRequest = request.body[ShareFetchRequest] val clientId = request.header.clientId + val versionId = request.header.apiVersion val groupId = shareFetchRequest.data.groupId - if (interestedTopicPartitions.isEmpty) { + if (interestedWithMaxBytes.isEmpty) { CompletableFuture.completedFuture(erroneous) } else { // for share fetch from consumer, cap fetchMaxBytes to the maximum bytes that could be fetched without being // throttled given no bytes were recorded in the recent quota window. Trying to fetch more bytes would result // in a guaranteed throttling potentially blocking consumer progress. - val maxQuotaWindowBytes = quotas.fetch.maxValueInQuotaWindow(request.session, clientId).toInt + val maxQuotaWindowBytes = quotas.fetch.getMaxValueInQuotaWindow(request.session, clientId).toInt val fetchMaxBytes = Math.min(Math.min(shareFetchRequest.maxBytes, config.fetchMaxBytes), maxQuotaWindowBytes) val fetchMinBytes = Math.min(shareFetchRequest.minBytes, fetchMaxBytes) @@ -3383,12 +3005,13 @@ class KafkaApis(val requestChannel: RequestChannel, request.context.listenerName.value)) val params = new FetchParams( + versionId, FetchRequest.CONSUMER_REPLICA_ID, -1, shareFetchRequest.maxWait, fetchMinBytes, fetchMaxBytes, - FetchIsolation.of(FetchRequest.CONSUMER_REPLICA_ID, groupConfigManager.groupConfig(groupId).map(_.shareIsolationLevel()).orElse(GroupConfig.defaultShareIsolationLevel)), + FetchIsolation.HIGH_WATERMARK, clientMetadata, true ) @@ -3398,10 +3021,7 @@ class KafkaApis(val requestChannel: RequestChannel, groupId, shareFetchRequest.data.memberId, params, - shareSessionEpoch, - shareFetchRequest.data.maxRecords, - shareFetchRequest.data.batchSize, - interestedTopicPartitions + interestedWithMaxBytes ).thenApply{ result => val combinedResult = mutable.Map.empty[TopicIdPartition, ShareFetchResponseData.PartitionData] result.asScala.foreach { case (tp, data) => @@ -3469,13 +3089,13 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def handleShareAcknowledgeRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { + def handleShareAcknowledgeRequest(request: RequestChannel.Request): Unit = { val shareAcknowledgeRequest = request.body[ShareAcknowledgeRequest] if (!isShareGroupProtocolEnabled) { requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) - return CompletableFuture.completedFuture[Unit](()) + return } val groupId = shareAcknowledgeRequest.data.groupId @@ -3484,7 +3104,7 @@ class KafkaApis(val requestChannel: RequestChannel, if (!authHelper.authorize(request.context, READ, GROUP, groupId)) { requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.GROUP_AUTHORIZATION_FAILED.exception)) - return CompletableFuture.completedFuture[Unit](()) + return } val memberId = shareAcknowledgeRequest.data.memberId @@ -3497,7 +3117,7 @@ class KafkaApis(val requestChannel: RequestChannel, } catch { case e: Exception => requestHelper.sendMaybeThrottle(request, shareAcknowledgeRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, e)) - return CompletableFuture.completedFuture[Unit](()) + return } val topicIdPartitionSeq: mutable.Set[TopicIdPartition] = mutable.Set() @@ -3543,344 +3163,67 @@ class KafkaApis(val requestChannel: RequestChannel, } } - def handleInitializeShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { + def handleInitializeShareGroupStateRequest(request: RequestChannel.Request): Unit = { val initializeShareGroupStateRequest = request.body[InitializeShareGroupStateRequest] - // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, - // hence requests won't reach Persister. - - if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { - requestHelper.sendMaybeThrottle(request, new InitializeShareGroupStateResponse( - InitializeShareGroupStateResponse.toGlobalErrorResponse( - initializeShareGroupStateRequest.data(), - Errors.CLUSTER_AUTHORIZATION_FAILED - ))) - return CompletableFuture.completedFuture[Unit](()) - } - - shareCoordinator.initializeState(request.context, initializeShareGroupStateRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, initializeShareGroupStateRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new InitializeShareGroupStateResponse(response)) - } - } + // TODO: Implement the InitializeShareGroupStateRequest handling + requestHelper.sendMaybeThrottle(request, initializeShareGroupStateRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + CompletableFuture.completedFuture[Unit](()) } def handleReadShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { val readShareGroupStateRequest = request.body[ReadShareGroupStateRequest] - // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, - // hence requests won't reach Persister. - - if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { - requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateResponse( - ReadShareGroupStateResponse.toGlobalErrorResponse( - readShareGroupStateRequest.data(), - Errors.CLUSTER_AUTHORIZATION_FAILED - ))) - return CompletableFuture.completedFuture[Unit](()) - } - - shareCoordinator.readState(request.context, readShareGroupStateRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, readShareGroupStateRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateResponse(response)) - } - } - } - - def handleWriteShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { - val writeShareGroupStateRequest = request.body[WriteShareGroupStateRequest] - // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, - // hence requests won't reach Persister. - - if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { - requestHelper.sendMaybeThrottle(request, new WriteShareGroupStateResponse( - WriteShareGroupStateResponse.toGlobalErrorResponse( - writeShareGroupStateRequest.data(), - Errors.CLUSTER_AUTHORIZATION_FAILED - ))) - return CompletableFuture.completedFuture[Unit](()) - } - - shareCoordinator.writeState(request.context, writeShareGroupStateRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, writeShareGroupStateRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new WriteShareGroupStateResponse(response)) - } - } - } - - def handleDeleteShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { - val deleteShareGroupStateRequest = request.body[DeleteShareGroupStateRequest] - // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, - // hence requests won't reach Persister. - - if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { - requestHelper.sendMaybeThrottle(request, new DeleteShareGroupStateResponse( - DeleteShareGroupStateResponse.toGlobalErrorResponse( - deleteShareGroupStateRequest.data(), - Errors.CLUSTER_AUTHORIZATION_FAILED - ))) - return CompletableFuture.completedFuture[Unit](()) - } - - shareCoordinator.deleteState(request.context, deleteShareGroupStateRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, deleteShareGroupStateRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new DeleteShareGroupStateResponse(response)) - } - } - } - def handleReadShareGroupStateSummaryRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { - val readShareGroupStateSummaryRequest = request.body[ReadShareGroupStateSummaryRequest] - // We do not need a check for isShareGroupProtocolEnabled in this RPC since there is a check for it in ShareFetch/ShareAcknowledge RPCs, - // hence requests won't reach Persister. - - if (!authorizeClusterOperation(request, CLUSTER_ACTION)) { - requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateSummaryResponse( - ReadShareGroupStateSummaryResponse.toGlobalErrorResponse( - readShareGroupStateSummaryRequest.data(), - Errors.CLUSTER_AUTHORIZATION_FAILED - ))) - return CompletableFuture.completedFuture[Unit](()) - } + authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - shareCoordinator.readStateSummary(request.context, readShareGroupStateSummaryRequest.data) - .handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, readShareGroupStateSummaryRequest.getErrorResponse(exception)) - } else { - requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateSummaryResponse(response)) + shareCoordinator match { + case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => + readShareGroupStateRequest.getErrorResponse(requestThrottleMs, + new ApiException("Share coordinator is not enabled."))) + CompletableFuture.completedFuture[Unit](()) + case Some(coordinator) => coordinator.readState(request.context, readShareGroupStateRequest.data) + .handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, readShareGroupStateRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, new ReadShareGroupStateResponse(response)) + } } - } - } - - def handleDescribeShareGroupOffsetsRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { - val describeShareGroupOffsetsRequest = request.body[DescribeShareGroupOffsetsRequest] - val groups = describeShareGroupOffsetsRequest.groups() - - val futures = new mutable.ArrayBuffer[CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup]](groups.size) - groups.forEach { groupDescribeOffsets => - val isAllPartitions = groupDescribeOffsets.topics == null - if (!isShareGroupProtocolEnabled) { - futures += CompletableFuture.completedFuture(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup() - .setGroupId(groupDescribeOffsets.groupId) - .setErrorCode(Errors.UNSUPPORTED_VERSION.code)) - } else if (!authHelper.authorize(request.context, DESCRIBE, GROUP, groupDescribeOffsets.groupId)) { - futures += CompletableFuture.completedFuture(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup() - .setGroupId(groupDescribeOffsets.groupId) - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)) - } else if (isAllPartitions) { - futures += describeShareGroupAllOffsetsForGroup( - request.context, - groupDescribeOffsets - ) - } else if (groupDescribeOffsets.topics.isEmpty) { - futures += CompletableFuture.completedFuture(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup() - .setGroupId(groupDescribeOffsets.groupId)) - } else { - futures += describeShareGroupOffsetsForGroup( - request.context, - groupDescribeOffsets - ) - } - } - - CompletableFuture.allOf(futures.toArray: _*).handle[Unit] { (_, _) => - val groupResponses = new ArrayBuffer[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup](futures.size) - val responseData = new DescribeShareGroupOffsetsResponseData().setGroups(groupResponses.asJava) - futures.foreach(future => groupResponses += future.join) - requestHelper.sendMaybeThrottle(request, new DescribeShareGroupOffsetsResponse(responseData)) } } - private def describeShareGroupAllOffsetsForGroup(requestContext: RequestContext, - groupDescribeOffsetsRequest: DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestGroup - ): CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] = { - groupCoordinator.describeShareGroupAllOffsets( - requestContext, - groupDescribeOffsetsRequest - ).handle[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] { (groupDescribeOffsetsResponse, exception) => - if (exception != null) { - val error = Errors.forException(exception) - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup() - .setGroupId(groupDescribeOffsetsRequest.groupId) - .setErrorCode(error.code) - .setErrorMessage(error.message) - } else { - // Clients are not allowed to see offsets for topics that are not authorized for Describe. - val (authorizedOffsets, _) = authHelper.partitionSeqByAuthorized( - requestContext, - DESCRIBE, - TOPIC, - groupDescribeOffsetsResponse.topics.asScala - )(_.topicName) - groupDescribeOffsetsResponse.setTopics(authorizedOffsets.asJava) - } - } - } + def handleWriteShareGroupStateRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { + val writeShareRequest = request.body[WriteShareGroupStateRequest] - private def describeShareGroupOffsetsForGroup(requestContext: RequestContext, - groupDescribeOffsetsRequest: DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestGroup - ): CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] = { - // Clients are not allowed to see offsets for topics that are not authorized for Describe. - val (authorizedTopics, unauthorizedTopics) = authHelper.partitionSeqByAuthorized( - requestContext, - DESCRIBE, - TOPIC, - groupDescribeOffsetsRequest.topics.asScala - )(_.topicName) + authHelper.authorizeClusterOperation(request, CLUSTER_ACTION) - groupCoordinator.describeShareGroupOffsets( - requestContext, - new DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestGroup() - .setGroupId(groupDescribeOffsetsRequest.groupId) - .setTopics(authorizedTopics.asJava) - ).handle[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] { (groupDescribeOffsetsResponse, exception) => - if (exception != null) { - val error = Errors.forException(exception) - new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup() - .setGroupId(groupDescribeOffsetsRequest.groupId) - .setErrorCode(error.code) - .setErrorMessage(error.message) - } else if (groupDescribeOffsetsResponse.errorCode() != Errors.NONE.code) { - groupDescribeOffsetsResponse - } else { - val topics = new util.ArrayList[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic]( - groupDescribeOffsetsResponse.topics.size + unauthorizedTopics.size - ) - topics.addAll(groupDescribeOffsetsResponse.topics) - unauthorizedTopics.foreach { topic => - val topicResponse = new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topic.topicName) - .setTopicId(Uuid.ZERO_UUID) - topic.partitions().forEach { partitionIndex => - topicResponse.partitions.add(new DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(partitionIndex) - .setStartOffset(-1) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message)) + shareCoordinator match { + case None => requestHelper.sendResponseMaybeThrottle(request, requestThrottleMs => + writeShareRequest.getErrorResponse(requestThrottleMs, + new ApiException("Share coordinator is not enabled."))) + CompletableFuture.completedFuture[Unit](()) + case Some(coordinator) => coordinator.writeState(request.context, writeShareRequest.data) + .handle[Unit] { (response, exception) => + if (exception != null) { + requestHelper.sendMaybeThrottle(request, writeShareRequest.getErrorResponse(exception)) + } else { + requestHelper.sendMaybeThrottle(request, new WriteShareGroupStateResponse(response)) } - topics.add(topicResponse) } - groupDescribeOffsetsResponse.setTopics(topics) - } } } - def handleAlterShareGroupOffsetsRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { - val alterShareGroupOffsetsRequest = request.body[AlterShareGroupOffsetsRequest] - val groupId = alterShareGroupOffsetsRequest.data.groupId - - if (!isShareGroupProtocolEnabled) { - requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) - return CompletableFuture.completedFuture[Unit](()) - } else if (!authHelper.authorize(request.context, READ, GROUP, groupId)) { - requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(Errors.GROUP_AUTHORIZATION_FAILED.exception)) - } else { - val responseBuilder = new AlterShareGroupOffsetsResponse.Builder() - val authorizedTopicPartitions = new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopicCollection() - - alterShareGroupOffsetsRequest.data.topics.forEach(topic => { - val topicError = { - if (!authHelper.authorize(request.context, READ, TOPIC, topic.topicName)) { - Some(new ApiError(Errors.TOPIC_AUTHORIZATION_FAILED)) - } else if (!metadataCache.contains(topic.topicName)) { - Some(new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION)) - } else { - None - } - } - topicError match { - case Some(error) => - topic.partitions.forEach(partition => responseBuilder.addPartition(topic.topicName, partition.partitionIndex, metadataCache.topicNamesToIds, error)) - case None => - authorizedTopicPartitions.add(topic.duplicate) - } - }) - - val data = new AlterShareGroupOffsetsRequestData() - .setGroupId(groupId) - .setTopics(authorizedTopicPartitions) - groupCoordinator.alterShareGroupOffsets( - request.context, - groupId, - data - ).handle[Unit] { (response, exception) => - if (exception != null) { - requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(exception)) - } else if (response.errorCode != Errors.NONE.code) { - requestHelper.sendMaybeThrottle(request, alterShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, response.errorCode, response.errorMessage)) - } else { - requestHelper.sendMaybeThrottle(request, responseBuilder.merge(response, metadataCache.topicNamesToIds).build()) - } - } - } + def handleDeleteShareGroupStateRequest(request: RequestChannel.Request): Unit = { + val deleteShareGroupStateRequest = request.body[DeleteShareGroupStateRequest] + // TODO: Implement the DeleteShareGroupStateRequest handling + requestHelper.sendMaybeThrottle(request, deleteShareGroupStateRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) CompletableFuture.completedFuture[Unit](()) } - def handleDeleteShareGroupOffsetsRequest(request: RequestChannel.Request): CompletableFuture[Unit] = { - val deleteShareGroupOffsetsRequest = request.body[DeleteShareGroupOffsetsRequest] - - val groupId = deleteShareGroupOffsetsRequest.data.groupId - - if (!isShareGroupProtocolEnabled) { - requestHelper.sendMaybeThrottle(request, deleteShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.UNSUPPORTED_VERSION.exception)) - return CompletableFuture.completedFuture[Unit](()) - } else if (!authHelper.authorize(request.context, DELETE, GROUP, groupId)) { - requestHelper.sendMaybeThrottle(request, deleteShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, Errors.GROUP_AUTHORIZATION_FAILED.exception)) - return CompletableFuture.completedFuture[Unit](()) - } - - val deleteShareGroupOffsetsResponseTopics: util.List[DeleteShareGroupOffsetsResponseTopic] = new util.ArrayList[DeleteShareGroupOffsetsResponseTopic]() - - val authorizedTopics: util.List[DeleteShareGroupOffsetsRequestTopic] = - new util.ArrayList[DeleteShareGroupOffsetsRequestTopic] - - deleteShareGroupOffsetsRequest.data.topics.forEach{ topic => - if (!authHelper.authorize(request.context, READ, TOPIC, topic.topicName)) { - deleteShareGroupOffsetsResponseTopics.add( - new DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic() - .setTopicName(topic.topicName) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message) - ) - } else { - authorizedTopics.add(topic) - } - } - - groupCoordinator.deleteShareGroupOffsets( - request.context, - new DeleteShareGroupOffsetsRequestData().setGroupId(groupId).setTopics(authorizedTopics) - ).handle[Unit] {(responseData, exception) => { - if (exception != null) { - requestHelper.sendMaybeThrottle(request, deleteShareGroupOffsetsRequest.getErrorResponse( - AbstractResponse.DEFAULT_THROTTLE_TIME, - Errors.forException(exception).code, - exception.getMessage)) - } else if (responseData.errorCode() != Errors.NONE.code) { - requestHelper.sendMaybeThrottle( - request, - deleteShareGroupOffsetsRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, responseData.errorCode, responseData.errorMessage) - ) - } else { - responseData.responses.forEach { topic => { - deleteShareGroupOffsetsResponseTopics.add(topic) - }} - val deleteShareGroupStateResponse = new DeleteShareGroupOffsetsResponse(new DeleteShareGroupOffsetsResponseData() - .setResponses(deleteShareGroupOffsetsResponseTopics)) - requestHelper.sendMaybeThrottle(request, deleteShareGroupStateResponse) - } - }} + def handleReadShareGroupStateSummaryRequest(request: RequestChannel.Request): Unit = { + val readShareGroupStateSummaryRequest = request.body[ReadShareGroupStateSummaryRequest] + // TODO: Implement the ReadShareGroupStateSummaryRequest handling + requestHelper.sendMaybeThrottle(request, readShareGroupStateSummaryRequest.getErrorResponse(Errors.UNSUPPORTED_VERSION.exception)) + CompletableFuture.completedFuture[Unit](()) } // Visible for Testing @@ -4070,8 +3413,7 @@ class KafkaApis(val requestChannel: RequestChannel, // Prepare share fetch response val response = - ShareFetchResponse.of(shareFetchResponse.error, throttleTimeMs, responseData, nodeEndpoints.values.toList.asJava, - ShareFetchUtils.recordLockDurationMsOrDefault(groupConfigManager, groupId, config.shareGroupConfig.shareGroupRecordLockDurationMs)) + ShareFetchResponse.of(shareFetchResponse.error, throttleTimeMs, responseData, nodeEndpoints.values.toList.asJava) // record the bytes out metrics only when the response is being sent. response.data.responses.forEach { topicResponse => topicResponse.partitions.forEach { data => @@ -4094,14 +3436,14 @@ class KafkaApis(val requestChannel: RequestChannel, val responseSize = shareFetchContext.responseSize(partitions, versionId) val timeMs = time.milliseconds() val requestThrottleTimeMs = quotas.request.maybeRecordAndGetThrottleTimeMs(request, timeMs) - val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request.session, request.header.clientId(), responseSize, timeMs) + val bandwidthThrottleTimeMs = quotas.fetch.maybeRecordAndGetThrottleTimeMs(request, responseSize, timeMs) val maxThrottleTimeMs = math.max(bandwidthThrottleTimeMs, requestThrottleTimeMs) if (maxThrottleTimeMs > 0) { request.apiThrottleTimeMs = maxThrottleTimeMs // Even if we need to throttle for request quota violation, we should "unrecord" the already recorded value // from the fetch quota because we are going to return an empty response. - quotas.fetch.unrecordQuotaSensor(request.session, request.header.clientId(), responseSize, timeMs) + quotas.fetch.unrecordQuotaSensor(request, responseSize, timeMs) if (bandwidthThrottleTimeMs > requestThrottleTimeMs) { requestHelper.throttle(quotas.fetch, request, bandwidthThrottleTimeMs) } else { @@ -4129,12 +3471,8 @@ class KafkaApis(val requestChannel: RequestChannel, .setCurrentLeader(partitionData.currentLeader) } - private def shareVersion(): ShareVersion = { - ShareVersion.fromFeatureLevel(metadataCache.features.finalizedFeatures.getOrDefault(ShareVersion.FEATURE_NAME, 0.toShort)) - } - private def isShareGroupProtocolEnabled: Boolean = { - config.shareGroupConfig.isShareGroupEnabled || shareVersion().supportsShareGroups + groupCoordinator.isNewGroupCoordinator && config.shareGroupConfig.isShareGroupEnabled } private def updateRecordConversionStats(request: RequestChannel.Request, @@ -4156,10 +3494,6 @@ class KafkaApis(val requestChannel: RequestChannel, } request.temporaryMemoryBytes = conversionStats.temporaryMemoryBytes } - - def authorizeClusterOperation(request: RequestChannel.Request, operation: AclOperation): Boolean = { - authHelper.authorize(request.context, operation, CLUSTER, CLUSTER_NAME) - } } object KafkaApis { diff --git a/core/src/main/scala/kafka/server/KafkaBroker.scala b/core/src/main/scala/kafka/server/KafkaBroker.scala index 46576d97d338a..f2e71b68fa15f 100644 --- a/core/src/main/scala/kafka/server/KafkaBroker.scala +++ b/core/src/main/scala/kafka/server/KafkaBroker.scala @@ -18,20 +18,20 @@ package kafka.server import kafka.log.LogManager +import kafka.log.remote.RemoteLogManager import kafka.network.SocketServer import kafka.utils.Logging import org.apache.kafka.common.ClusterResource -import org.apache.kafka.common.internals.{ClusterResourceListeners, Plugin} +import org.apache.kafka.common.internals.ClusterResourceListeners import org.apache.kafka.common.metrics.{Metrics, MetricsReporter} import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache import org.apache.kafka.common.utils.Time import org.apache.kafka.coordinator.group.GroupCoordinator -import org.apache.kafka.metadata.{BrokerState, MetadataCache} +import org.apache.kafka.metadata.BrokerState import org.apache.kafka.security.CredentialProvider import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.NodeToControllerChannelManager -import org.apache.kafka.server.log.remote.storage.RemoteLogManager import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} import org.apache.kafka.server.util.Scheduler import org.apache.kafka.storage.internals.log.LogDirFailureChannel @@ -78,7 +78,7 @@ trait KafkaBroker extends Logging { // acquire lock while handling Fetch requests. val NumFetchSessionCacheShards: Int = 8 - def authorizerPlugin: Option[Plugin[Authorizer]] + def authorizer: Option[Authorizer] def brokerState: BrokerState def clusterId: String def config: KafkaConfig diff --git a/core/src/main/scala/kafka/server/KafkaConfig.scala b/core/src/main/scala/kafka/server/KafkaConfig.scala index 671bee32d2839..37f60691c6efd 100755 --- a/core/src/main/scala/kafka/server/KafkaConfig.scala +++ b/core/src/main/scala/kafka/server/KafkaConfig.scala @@ -20,15 +20,14 @@ package kafka.server import java.util import java.util.concurrent.TimeUnit import java.util.Properties +import kafka.cluster.EndPoint import kafka.utils.{CoreUtils, Logging} import kafka.utils.Implicits._ -import org.apache.kafka.common.{Endpoint, Reconfigurable} +import org.apache.kafka.common.Reconfigurable import org.apache.kafka.common.config.{ConfigDef, ConfigException, ConfigResource, TopicConfig} import org.apache.kafka.common.config.ConfigDef.ConfigKey import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.config.types.Password -import org.apache.kafka.common.internals.Plugin -import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.record.TimestampType import org.apache.kafka.common.security.auth.KafkaPrincipalSerde @@ -38,16 +37,17 @@ import org.apache.kafka.coordinator.group.Group.GroupType import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinatorConfig} import org.apache.kafka.coordinator.share.ShareCoordinatorConfig +import org.apache.kafka.coordinator.transaction.{AddPartitionsToTxnConfig, TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} +import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.security.authorizer.AuthorizerUtils import org.apache.kafka.server.ProcessRole import org.apache.kafka.server.authorizer.Authorizer import org.apache.kafka.server.common.MetadataVersion -import org.apache.kafka.server.config.AbstractKafkaConfig.getMap -import org.apache.kafka.server.config.{AbstractKafkaConfig, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs} +import org.apache.kafka.server.config.{AbstractKafkaConfig, DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.MetricConfigs +import org.apache.kafka.server.util.Csv import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig} import scala.jdk.CollectionConverters._ @@ -204,10 +204,18 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) private val _shareCoordinatorConfig = new ShareCoordinatorConfig(this) def shareCoordinatorConfig: ShareCoordinatorConfig = _shareCoordinatorConfig + private val _transactionLogConfig = new TransactionLogConfig(this) + private val _transactionStateManagerConfig = new TransactionStateManagerConfig(this) + private val _addPartitionsToTxnConfig = new AddPartitionsToTxnConfig(this) + def transactionLogConfig: TransactionLogConfig = _transactionLogConfig + def transactionStateManagerConfig: TransactionStateManagerConfig = _transactionStateManagerConfig + def addPartitionsToTxnConfig: AddPartitionsToTxnConfig = _addPartitionsToTxnConfig + private val _quotaConfig = new QuotaConfig(this) def quotaConfig: QuotaConfig = _quotaConfig /** ********* General Configuration ***********/ + var brokerId: Int = getInt(ServerConfigs.BROKER_ID_CONFIG) val nodeId: Int = getInt(KRaftConfigs.NODE_ID_CONFIG) val initialRegistrationTimeoutMs: Int = getInt(KRaftConfigs.INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_CONFIG) val brokerHeartbeatIntervalMs: Int = getInt(KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG) @@ -222,7 +230,14 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) case role => throw new ConfigException(s"Unknown process role '$role'" + " (only 'broker' and 'controller' are allowed roles)") } - roles.toSet + + val distinctRoles: Set[ProcessRole] = roles.toSet + + if (distinctRoles.size != roles.size) { + throw new ConfigException(s"Duplicate role names found in `${KRaftConfigs.PROCESS_ROLES_CONFIG}`: $roles") + } + + distinctRoles } def isKRaftCombinedMode: Boolean = { @@ -230,15 +245,24 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } def metadataLogDir: String = { - Option(getString(MetadataLogConfig.METADATA_LOG_DIR_CONFIG)) match { + Option(getString(KRaftConfigs.METADATA_LOG_DIR_CONFIG)) match { case Some(dir) => dir - case None => logDirs.get(0) + case None => logDirs.head } } + def metadataLogSegmentBytes = getInt(KRaftConfigs.METADATA_LOG_SEGMENT_BYTES_CONFIG) + def metadataLogSegmentMillis = getLong(KRaftConfigs.METADATA_LOG_SEGMENT_MILLIS_CONFIG) + def metadataRetentionBytes = getLong(KRaftConfigs.METADATA_MAX_RETENTION_BYTES_CONFIG) + def metadataRetentionMillis = getLong(KRaftConfigs.METADATA_MAX_RETENTION_MILLIS_CONFIG) + def metadataNodeIDConfig = getInt(KRaftConfigs.NODE_ID_CONFIG) + def metadataLogSegmentMinBytes = getInt(KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG) val serverMaxStartupTimeMs = getLong(KRaftConfigs.SERVER_MAX_STARTUP_TIME_MS_CONFIG) + def backgroundThreads = getInt(ServerConfigs.BACKGROUND_THREADS_CONFIG) + def numIoThreads = getInt(ServerConfigs.NUM_IO_THREADS_CONFIG) def messageMaxBytes = getInt(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG) + val requestTimeoutMs = getInt(ServerConfigs.REQUEST_TIMEOUT_MS_CONFIG) val connectionSetupTimeoutMs = getLong(ServerConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG) val connectionSetupTimeoutMaxMs = getLong(ServerConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG) @@ -248,30 +272,30 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } /************* Metadata Configuration ***********/ - val metadataSnapshotMaxNewRecordBytes = getLong(MetadataLogConfig.METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_CONFIG) - val metadataSnapshotMaxIntervalMs = getLong(MetadataLogConfig.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG) + val metadataSnapshotMaxNewRecordBytes = getLong(KRaftConfigs.METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_CONFIG) + val metadataSnapshotMaxIntervalMs = getLong(KRaftConfigs.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG) val metadataMaxIdleIntervalNs: Option[Long] = { - val value = TimeUnit.NANOSECONDS.convert(getInt(MetadataLogConfig.METADATA_MAX_IDLE_INTERVAL_MS_CONFIG).toLong, TimeUnit.MILLISECONDS) + val value = TimeUnit.NANOSECONDS.convert(getInt(KRaftConfigs.METADATA_MAX_IDLE_INTERVAL_MS_CONFIG).toLong, TimeUnit.MILLISECONDS) if (value > 0) Some(value) else None } /************* Authorizer Configuration ***********/ - def createNewAuthorizer(metrics: Metrics, role: String): Option[Plugin[Authorizer]] = { + def createNewAuthorizer(): Option[Authorizer] = { val className = getString(ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG) if (className == null || className.isEmpty) None else { - Some(AuthorizerUtils.createAuthorizer(className, originals, metrics, ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG, role)) + Some(AuthorizerUtils.createAuthorizer(className)) } } val earlyStartListeners: Set[ListenerName] = { - val listenersSet = listeners.map(l => ListenerName.normalised(l.listener)).toSet - val controllerListenersSet = controllerListeners.map(l => ListenerName.normalised(l.listener)).toSet - Option(getList(ServerConfigs.EARLY_START_LISTENERS_CONFIG)) match { + val listenersSet = listeners.map(_.listenerName).toSet + val controllerListenersSet = controllerListeners.map(_.listenerName).toSet + Option(getString(ServerConfigs.EARLY_START_LISTENERS_CONFIG)) match { case None => controllerListenersSet - case Some(list) => - list.asScala.map(_.trim()).filterNot(_.isEmpty).map { str => + case Some(str) => + str.split(",").map(_.trim()).filterNot(_.isEmpty).map { str => val listenerName = new ListenerName(str) if (!listenersSet.contains(listenerName) && !controllerListenersSet.contains(listenerName)) throw new ConfigException(s"${ServerConfigs.EARLY_START_LISTENERS_CONFIG} contains " + @@ -287,9 +311,9 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) val socketReceiveBufferBytes = getInt(SocketServerConfigs.SOCKET_RECEIVE_BUFFER_BYTES_CONFIG) val socketRequestMaxBytes = getInt(SocketServerConfigs.SOCKET_REQUEST_MAX_BYTES_CONFIG) val socketListenBacklogSize = getInt(SocketServerConfigs.SOCKET_LISTEN_BACKLOG_SIZE_CONFIG) - def maxConnectionsPerIp = getInt(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG) - def maxConnectionsPerIpOverrides: Map[String, Int] = - getMap(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, getString(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG)).asScala.map { case (k, v) => (k, v.toInt)} + val maxConnectionsPerIp = getInt(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG) + val maxConnectionsPerIpOverrides: Map[String, Int] = + getMap(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, getString(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG)).map { case (k, v) => (k, v.toInt)} def maxConnections = getInt(SocketServerConfigs.MAX_CONNECTIONS_CONFIG) def maxConnectionCreationRate = getInt(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG) val connectionsMaxIdleMs = getLong(SocketServerConfigs.CONNECTIONS_MAX_IDLE_MS_CONFIG) @@ -305,9 +329,11 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) /** ********* Log Configuration ***********/ val autoCreateTopicsEnable = getBoolean(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG) val numPartitions = getInt(ServerLogConfigs.NUM_PARTITIONS_CONFIG) + val logDirs: Seq[String] = Csv.parseCsvList(Option(getString(ServerLogConfigs.LOG_DIRS_CONFIG)).getOrElse(getString(ServerLogConfigs.LOG_DIR_CONFIG))).asScala def logSegmentBytes = getInt(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG) def logFlushIntervalMessages = getLong(ServerLogConfigs.LOG_FLUSH_INTERVAL_MESSAGES_CONFIG) def logCleanerThreads = getInt(CleanerConfig.LOG_CLEANER_THREADS_PROP) + def numRecoveryThreadsPerDataDir = getInt(ServerLogConfigs.NUM_RECOVERY_THREADS_PER_DATA_DIR_CONFIG) val logFlushSchedulerIntervalMs = getLong(ServerLogConfigs.LOG_FLUSH_SCHEDULER_INTERVAL_MS_CONFIG) val logFlushOffsetCheckpointIntervalMs = getInt(ServerLogConfigs.LOG_FLUSH_OFFSET_CHECKPOINT_INTERVAL_MS_CONFIG).toLong val logFlushStartOffsetCheckpointIntervalMs = getInt(ServerLogConfigs.LOG_FLUSH_START_OFFSET_CHECKPOINT_INTERVAL_MS_CONFIG).toLong @@ -316,10 +342,15 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) def logRetentionBytes = getLong(ServerLogConfigs.LOG_RETENTION_BYTES_CONFIG) def logCleanerDedupeBufferSize = getLong(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_SIZE_PROP) + def logCleanerDedupeBufferLoadFactor = getDouble(CleanerConfig.LOG_CLEANER_DEDUPE_BUFFER_LOAD_FACTOR_PROP) + def logCleanerIoBufferSize = getInt(CleanerConfig.LOG_CLEANER_IO_BUFFER_SIZE_PROP) + def logCleanerIoMaxBytesPerSecond = getDouble(CleanerConfig.LOG_CLEANER_IO_MAX_BYTES_PER_SECOND_PROP) def logCleanerDeleteRetentionMs = getLong(CleanerConfig.LOG_CLEANER_DELETE_RETENTION_MS_PROP) def logCleanerMinCompactionLagMs = getLong(CleanerConfig.LOG_CLEANER_MIN_COMPACTION_LAG_MS_PROP) def logCleanerMaxCompactionLagMs = getLong(CleanerConfig.LOG_CLEANER_MAX_COMPACTION_LAG_MS_PROP) + def logCleanerBackoffMs = getLong(CleanerConfig.LOG_CLEANER_BACKOFF_MS_PROP) def logCleanerMinCleanRatio = getDouble(CleanerConfig.LOG_CLEANER_MIN_CLEAN_RATIO_PROP) + val logCleanerEnable = getBoolean(CleanerConfig.LOG_CLEANER_ENABLE_PROP) def logIndexSizeMaxBytes = getInt(ServerLogConfigs.LOG_INDEX_SIZE_MAX_BYTES_CONFIG) def logIndexIntervalBytes = getInt(ServerLogConfigs.LOG_INDEX_INTERVAL_BYTES_CONFIG) def logDeleteDelayMs = getLong(ServerLogConfigs.LOG_DELETE_DELAY_MS_CONFIG) @@ -349,6 +380,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) val replicaFetchMinBytes = getInt(ReplicationConfigs.REPLICA_FETCH_MIN_BYTES_CONFIG) val replicaFetchResponseMaxBytes = getInt(ReplicationConfigs.REPLICA_FETCH_RESPONSE_MAX_BYTES_CONFIG) val replicaFetchBackoffMs = getInt(ReplicationConfigs.REPLICA_FETCH_BACKOFF_MS_CONFIG) + def numReplicaFetchers = getInt(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG) val replicaHighWatermarkCheckpointIntervalMs = getLong(ReplicationConfigs.REPLICA_HIGH_WATERMARK_CHECKPOINT_INTERVAL_MS_CONFIG) val fetchPurgatoryPurgeIntervalRequests = getInt(ReplicationConfigs.FETCH_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG) val producerPurgatoryPurgeIntervalRequests = getInt(ReplicationConfigs.PRODUCER_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG) @@ -361,13 +393,24 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) /** ********* Controlled shutdown configuration ***********/ val controlledShutdownEnable = getBoolean(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG) - /** Group coordinator configs */ + /** New group coordinator configs */ + val isNewGroupCoordinatorEnabled = getBoolean(GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG) val groupCoordinatorRebalanceProtocols = { val protocols = getList(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG) .asScala.map(_.toUpperCase).map(GroupType.valueOf).toSet if (!protocols.contains(GroupType.CLASSIC)) { throw new ConfigException(s"Disabling the '${GroupType.CLASSIC}' protocol is not supported.") } + if (protocols.contains(GroupType.CONSUMER) && !isNewGroupCoordinatorEnabled) { + warn(s"The new '${GroupType.CONSUMER}' rebalance protocol is only supported with the new group coordinator.") + } + if (protocols.contains(GroupType.SHARE)) { + if (!isNewGroupCoordinatorEnabled) { + warn(s"The new '${GroupType.SHARE}' rebalance protocol is only supported with the new group coordinator.") + } + warn(s"Share groups and the new '${GroupType.SHARE}' rebalance protocol are enabled. " + + "This is part of the early access of KIP-932 and MUST NOT be used in production.") + } protocols } @@ -391,8 +434,17 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) Set.empty[String] } + def interBrokerListenerName = getInterBrokerListenerNameAndSecurityProtocol._1 + def interBrokerSecurityProtocol = getInterBrokerListenerNameAndSecurityProtocol._2 def saslMechanismInterBrokerProtocol = getString(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG) + /** ********* DelegationToken Configuration **************/ + val delegationTokenSecretKey = getPassword(DelegationTokenManagerConfigs.DELEGATION_TOKEN_SECRET_KEY_CONFIG) + val tokenAuthEnabled = delegationTokenSecretKey != null && delegationTokenSecretKey.value.nonEmpty + val delegationTokenMaxLifeMs = getLong(DelegationTokenManagerConfigs.DELEGATION_TOKEN_MAX_LIFETIME_CONFIG) + val delegationTokenExpiryTimeMs = getLong(DelegationTokenManagerConfigs.DELEGATION_TOKEN_EXPIRY_TIME_MS_CONFIG) + val delegationTokenExpiryCheckIntervalMs = getLong(DelegationTokenManagerConfigs.DELEGATION_TOKEN_EXPIRY_CHECK_INTERVAL_MS_CONFIG) + /** ********* Fetch Configuration **************/ val maxIncrementalFetchSessionCacheSlots = getInt(ServerConfigs.MAX_INCREMENTAL_FETCH_SESSION_CACHE_SLOTS_CONFIG) val fetchMaxBytes = getInt(ServerConfigs.FETCH_MAX_BYTES_CONFIG) @@ -434,43 +486,60 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) millis } - def listeners: Seq[Endpoint] = - CoreUtils.listenerListToEndPoints(getList(SocketServerConfigs.LISTENERS_CONFIG), effectiveListenerSecurityProtocolMap) + private def getMap(propName: String, propValue: String): Map[String, String] = { + try { + Csv.parseCsvMap(propValue).asScala + } catch { + case e: Exception => throw new IllegalArgumentException("Error parsing configuration property '%s': %s".format(propName, e.getMessage)) + } + } + + def listeners: Seq[EndPoint] = + CoreUtils.listenerListToEndPoints(getString(SocketServerConfigs.LISTENERS_CONFIG), effectiveListenerSecurityProtocolMap) + + def controllerListenerNames: Seq[String] = { + val value = Option(getString(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG)).getOrElse("") + if (value.isEmpty) { + Seq.empty + } else { + value.split(",") + } + } - def controllerListeners: Seq[Endpoint] = - listeners.filter(l => controllerListenerNames.contains(l.listener)) + def controllerListeners: Seq[EndPoint] = + listeners.filter(l => controllerListenerNames.contains(l.listenerName.value())) def saslMechanismControllerProtocol: String = getString(KRaftConfigs.SASL_MECHANISM_CONTROLLER_PROTOCOL_CONFIG) - def dataPlaneListeners: Seq[Endpoint] = { + def dataPlaneListeners: Seq[EndPoint] = { listeners.filterNot { listener => - val name = listener.listener + val name = listener.listenerName.value() controllerListenerNames.contains(name) } } - def effectiveAdvertisedControllerListeners: Seq[Endpoint] = { - val advertisedListenersProp = getList(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG) + def effectiveAdvertisedControllerListeners: Seq[EndPoint] = { + val advertisedListenersProp = getString(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG) val controllerAdvertisedListeners = if (advertisedListenersProp != null) { CoreUtils.listenerListToEndPoints(advertisedListenersProp, effectiveListenerSecurityProtocolMap, requireDistinctPorts=false) - .filter(l => controllerListenerNames.contains(l.listener)) + .filter(l => controllerListenerNames.contains(l.listenerName.value())) } else { Seq.empty } val controllerListenersValue = controllerListeners - controllerListenerNames.asScala.flatMap { name => + controllerListenerNames.flatMap { name => controllerAdvertisedListeners - .find(endpoint => ListenerName.normalised(endpoint.listener).equals(ListenerName.normalised(name))) + .find(endpoint => endpoint.listenerName.equals(ListenerName.normalised(name))) .orElse( // If users don't define advertised.listeners, the advertised controller listeners inherit from listeners configuration // which match listener names in controller.listener.names. // Removing "0.0.0.0" host to avoid validation errors. This is to be compatible with the old behavior before 3.9. // The null or "" host does a reverse lookup in ListenerInfo#withWildcardHostnamesResolved. controllerListenersValue - .find(endpoint => ListenerName.normalised(endpoint.listener).equals(ListenerName.normalised(name))) + .find(endpoint => endpoint.listenerName.equals(ListenerName.normalised(name))) .map(endpoint => if (endpoint.host == "0.0.0.0") { - new Endpoint(endpoint.listener, endpoint.securityProtocol, null, endpoint.port) + new EndPoint(null, endpoint.port, endpoint.listenerName, endpoint.securityProtocol) } else { endpoint }) @@ -478,16 +547,67 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) } } - def effectiveAdvertisedBrokerListeners: Seq[Endpoint] = { + def effectiveAdvertisedBrokerListeners: Seq[EndPoint] = { // Use advertised listeners if defined, fallback to listeners otherwise - val advertisedListenersProp = getList(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG) + val advertisedListenersProp = getString(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG) val advertisedListeners = if (advertisedListenersProp != null) { CoreUtils.listenerListToEndPoints(advertisedListenersProp, effectiveListenerSecurityProtocolMap, requireDistinctPorts=false) } else { listeners } // Only expose broker listeners - advertisedListeners.filterNot(l => controllerListenerNames.contains(l.listener)) + advertisedListeners.filterNot(l => controllerListenerNames.contains(l.listenerName.value())) + } + + private def getInterBrokerListenerNameAndSecurityProtocol: (ListenerName, SecurityProtocol) = { + Option(getString(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG)) match { + case Some(_) if originals.containsKey(ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG) => + throw new ConfigException(s"Only one of ${ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG} and " + + s"${ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG} should be set.") + case Some(name) => + val listenerName = ListenerName.normalised(name) + val securityProtocol = effectiveListenerSecurityProtocolMap.getOrElse(listenerName, + throw new ConfigException(s"Listener with name ${listenerName.value} defined in " + + s"${ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG} not found in ${SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG}.")) + (listenerName, securityProtocol) + case None => + val securityProtocol = getSecurityProtocol(getString(ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG), + ReplicationConfigs.INTER_BROKER_SECURITY_PROTOCOL_CONFIG) + (ListenerName.forSecurityProtocol(securityProtocol), securityProtocol) + } + } + + private def getSecurityProtocol(protocolName: String, configName: String): SecurityProtocol = { + try SecurityProtocol.forName(protocolName) + catch { + case _: IllegalArgumentException => + throw new ConfigException(s"Invalid security protocol `$protocolName` defined in $configName") + } + } + + def effectiveListenerSecurityProtocolMap: Map[ListenerName, SecurityProtocol] = { + val mapValue = getMap(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, getString(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG)) + .map { case (listenerName, protocolName) => + ListenerName.normalised(listenerName) -> getSecurityProtocol(protocolName, SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG) + } + if (!originals.containsKey(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG)) { + // Nothing was specified explicitly for listener.security.protocol.map, so we are using the default value, + // and we are using KRaft. + // Add PLAINTEXT mappings for controller listeners as long as there is no SSL or SASL_{PLAINTEXT,SSL} in use + def isSslOrSasl(name: String): Boolean = name.equals(SecurityProtocol.SSL.name) || name.equals(SecurityProtocol.SASL_SSL.name) || name.equals(SecurityProtocol.SASL_PLAINTEXT.name) + // check controller listener names (they won't appear in listeners when process.roles=broker) + // as well as listeners for occurrences of SSL or SASL_* + if (controllerListenerNames.exists(isSslOrSasl) || + Csv.parseCsvList(getString(SocketServerConfigs.LISTENERS_CONFIG)).asScala.exists(listenerValue => isSslOrSasl(EndPoint.parseListenerName(listenerValue)))) { + mapValue // don't add default mappings since we found something that is SSL or SASL_* + } else { + // add the PLAINTEXT mappings for all controller listener names that are not explicitly PLAINTEXT + mapValue ++ controllerListenerNames.filterNot(SecurityProtocol.PLAINTEXT.name.equals(_)).map( + new ListenerName(_) -> SecurityProtocol.PLAINTEXT) + } + } else { + mapValue + } } validateValues() @@ -499,21 +619,14 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) require(logRollTimeMillis >= 1, "log.roll.ms must be greater than or equal to 1") require(logRollTimeJitterMillis >= 0, "log.roll.jitter.ms must be greater than or equal to 0") require(logRetentionTimeMillis >= 1 || logRetentionTimeMillis == -1, "log.retention.ms must be unlimited (-1) or, greater than or equal to 1") - require(logDirs.size > 0, "At least one log directory must be defined via log.dirs or log.dir.") + require(logDirs.nonEmpty, "At least one log directory must be defined via log.dirs or log.dir.") require(logCleanerDedupeBufferSize / logCleanerThreads > 1024 * 1024, "log.cleaner.dedupe.buffer.size must be at least 1MB per cleaner thread.") require(replicaFetchWaitMaxMs <= replicaSocketTimeoutMs, "replica.socket.timeout.ms should always be at least replica.fetch.wait.max.ms" + " to prevent unnecessary socket timeouts") require(replicaFetchWaitMaxMs <= replicaLagTimeMaxMs, "replica.fetch.wait.max.ms should always be less than or equal to replica.lag.time.max.ms" + " to prevent frequent changes in ISR") - if (brokerHeartbeatIntervalMs * 2 > brokerSessionTimeoutMs) { - error(s"${KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG} ($brokerHeartbeatIntervalMs ms) must be less than or equal to half of the ${KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG} ($brokerSessionTimeoutMs ms). " + - s"The ${KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG} is configured on controller. The ${KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG} is configured on broker. " + - s"If a broker doesn't send heartbeat request within ${KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG}, it loses broker lease. " + - s"Please increase ${KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG} or decrease ${KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG}.") - } - - val advertisedBrokerListenerNames = effectiveAdvertisedBrokerListeners.map(l => ListenerName.normalised(l.listener)).toSet + val advertisedBrokerListenerNames = effectiveAdvertisedBrokerListeners.map(_.listenerName).toSet // validate KRaft-related configs val voterIds = QuorumConfig.parseVoterIds(quorumConfig.voters) @@ -536,8 +649,8 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must contain at least one value appearing in the '${SocketServerConfigs.LISTENERS_CONFIG}' configuration when running the KRaft controller role") } def validateControllerListenerNamesMustAppearInListenersForKRaftController(): Unit = { - val listenerNameValues = listeners.map(_.listener).toSet - require(controllerListenerNames.stream().allMatch(cln => listenerNameValues.contains(cln)), + val listenerNameValues = listeners.map(_.listenerName.value).toSet + require(controllerListenerNames.forall(cln => listenerNameValues.contains(cln)), s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must only contain values appearing in the '${SocketServerConfigs.LISTENERS_CONFIG}' configuration when running the KRaft controller role") } def validateAdvertisedBrokerListenersNonEmptyForBroker(): Unit = { @@ -557,25 +670,22 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) require(!voterIds.contains(nodeId), s"If ${KRaftConfigs.PROCESS_ROLES_CONFIG} contains just the 'broker' role, the node id $nodeId must not be included in the set of voters ${QuorumConfig.QUORUM_VOTERS_CONFIG}=${voterIds.asScala.toSet}") // controller.listener.names must be non-empty... - require(controllerListenerNames.size() > 0, + require(controllerListenerNames.nonEmpty, s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must contain at least one value when running KRaft with just the broker role") // controller.listener.names are forbidden in listeners... require(controllerListeners.isEmpty, s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} must not contain a value appearing in the '${SocketServerConfigs.LISTENERS_CONFIG}' configuration when running KRaft with just the broker role") // controller.listener.names must all appear in listener.security.protocol.map - controllerListenerNames.forEach { name => + controllerListenerNames.foreach { name => val listenerName = ListenerName.normalised(name) - if (!effectiveListenerSecurityProtocolMap.containsKey(listenerName)) { + if (!effectiveListenerSecurityProtocolMap.contains(listenerName)) { throw new ConfigException(s"Controller listener with name ${listenerName.value} defined in " + s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} not found in ${SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG} (an explicit security mapping for each controller listener is required if ${SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG} is non-empty, or if there are security protocols other than PLAINTEXT in use)") } } - // controller.quorum.auto.join.enable must be false for KRaft broker-only - require(!quorumConfig.autoJoin, - s"${QuorumConfig.QUORUM_AUTO_JOIN_ENABLE_CONFIG} is only supported when ${KRaftConfigs.PROCESS_ROLES_CONFIG} contains the 'controller' role.") // warn that only the first controller listener is used if there is more than one if (controllerListenerNames.size > 1) { - warn(s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} has multiple entries; only the first will be used since ${KRaftConfigs.PROCESS_ROLES_CONFIG}=broker: ${controllerListenerNames}") + warn(s"${KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG} has multiple entries; only the first will be used since ${KRaftConfigs.PROCESS_ROLES_CONFIG}=broker: ${controllerListenerNames.asJava}") } // warn if create.topic.policy.class.name or alter.config.policy.class.name is defined in the broker role warnIfConfigDefinedInWrongRole(ProcessRole.ControllerRole, ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG) @@ -606,7 +716,7 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _]) validateControllerListenerNamesMustAppearInListenersForKRaftController() } - val listenerNames = listeners.map(l => ListenerName.normalised(l.listener)).toSet + val listenerNames = listeners.map(_.listenerName).toSet if (processRoles.contains(ProcessRole.BrokerRole)) { validateAdvertisedBrokerListenersNonEmptyForBroker() require(advertisedBrokerListenerNames.contains(interBrokerListenerName), diff --git a/core/src/main/scala/kafka/server/KafkaRaftServer.scala b/core/src/main/scala/kafka/server/KafkaRaftServer.scala index e3497a6ff88aa..5d31f72db83ef 100644 --- a/core/src/main/scala/kafka/server/KafkaRaftServer.scala +++ b/core/src/main/scala/kafka/server/KafkaRaftServer.scala @@ -18,6 +18,7 @@ package kafka.server import java.io.File import java.util.concurrent.CompletableFuture +import kafka.log.UnifiedLog import kafka.utils.{CoreUtils, Logging, Mx4jLoader} import org.apache.kafka.common.config.{ConfigDef, ConfigResource} import org.apache.kafka.common.internals.Topic @@ -30,7 +31,7 @@ import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsem import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.{ProcessRole, ServerSocketFactory} import org.apache.kafka.server.config.ServerTopicConfigSynonyms -import org.apache.kafka.storage.internals.log.{LogConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.LogConfig import org.slf4j.Logger import java.util @@ -134,7 +135,7 @@ object KafkaRaftServer { // Load and verify the original ensemble. val loader = new MetaPropertiesEnsemble.Loader() loader.addMetadataLogDir(config.metadataLogDir) - .addLogDirs(config.logDirs) + .addLogDirs(config.logDirs.asJava) val initialMetaPropsEnsemble = loader.load() val verificationFlags = util.EnumSet.of(REQUIRE_AT_LEAST_ONE_VALID, REQUIRE_METADATA_LOG_DIR) initialMetaPropsEnsemble.verify(Optional.empty(), OptionalInt.of(config.nodeId), verificationFlags) diff --git a/core/src/main/scala/kafka/server/KafkaRequestHandler.scala b/core/src/main/scala/kafka/server/KafkaRequestHandler.scala index 815fe4966eb81..b65bd41e5a7d9 100755 --- a/core/src/main/scala/kafka/server/KafkaRequestHandler.scala +++ b/core/src/main/scala/kafka/server/KafkaRequestHandler.scala @@ -95,7 +95,7 @@ class KafkaRequestHandler( time: Time, nodeName: String = "broker" ) extends Runnable with Logging { - this.logIdent = s"[Kafka Request Handler $id on ${nodeName.capitalize} $brokerId] " + this.logIdent = s"[Kafka Request Handler $id on ${nodeName.capitalize} $brokerId], " private val shutdownComplete = new CountDownLatch(1) private val requestLocal = RequestLocal.withThreadConfinedCaching @volatile private var stopped = false @@ -199,6 +199,7 @@ class KafkaRequestHandlerPool( time: Time, numThreads: Int, requestHandlerAvgIdleMetricName: String, + logAndThreadNamePrefix : String, nodeName: String = "broker" ) extends Logging { private val metricsGroup = new KafkaMetricsGroup(this.getClass) @@ -207,7 +208,7 @@ class KafkaRequestHandlerPool( /* a meter to track the average free capacity of the request handlers */ private val aggregateIdleMeter = metricsGroup.newMeter(requestHandlerAvgIdleMetricName, "percent", TimeUnit.NANOSECONDS) - this.logIdent = s"[data-plane Kafka Request Handler on ${nodeName.capitalize} $brokerId] " + this.logIdent = "[" + logAndThreadNamePrefix + " Kafka Request Handler on Broker " + brokerId + "], " val runnables = new mutable.ArrayBuffer[KafkaRequestHandler](numThreads) for (i <- 0 until numThreads) { createHandler(i) @@ -215,7 +216,7 @@ class KafkaRequestHandlerPool( def createHandler(id: Int): Unit = synchronized { runnables += new KafkaRequestHandler(id, brokerId, aggregateIdleMeter, threadPoolSize, requestChannel, apis, time, nodeName) - KafkaThread.daemon("data-plane-kafka-request-handler-" + id, runnables(id)).start() + KafkaThread.daemon(logAndThreadNamePrefix + "-kafka-request-handler-" + id, runnables(id)).start() } def resizeThreadPool(newSize: Int): Unit = synchronized { diff --git a/core/src/main/scala/kafka/server/LeaderEndPoint.scala b/core/src/main/scala/kafka/server/LeaderEndPoint.scala new file mode 100644 index 0000000000000..889fb6472160c --- /dev/null +++ b/core/src/main/scala/kafka/server/LeaderEndPoint.scala @@ -0,0 +1,117 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.requests.FetchRequest +import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset +import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} +import org.apache.kafka.server.common.OffsetAndEpoch +import org.apache.kafka.server.network.BrokerEndPoint + +import scala.collection.Map + +/** + * This trait defines the APIs to be used to access a broker that is a leader. + */ +trait LeaderEndPoint { + + type FetchData = FetchResponseData.PartitionData + type EpochData = OffsetForLeaderEpochRequestData.OffsetForLeaderPartition + + /** + * A boolean specifying if truncation when fetching from the leader is supported + */ + def isTruncationOnFetchSupported: Boolean + + /** + * Initiate closing access to fetches from leader. + */ + def initiateClose(): Unit + + /** + * Closes access to fetches from leader. + * `initiateClose` must be called prior to invoking `close`. + */ + def close(): Unit + + /** + * The specific broker (host:port) we want to connect to. + */ + def brokerEndPoint(): BrokerEndPoint + + /** + * Given a fetchRequest, carries out the expected request and returns + * the results from fetching from the leader. + * + * @param fetchRequest The fetch request we want to carry out + * + * @return A map of topic partition -> fetch data + */ + def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] + + /** + * Fetches the epoch and log start offset of the given topic partition from the leader. + * + * @param topicPartition The topic partition that we want to fetch from + * @param currentLeaderEpoch An int representing the current leader epoch of the requester + * + * @return An OffsetAndEpoch object representing the earliest offset and epoch in the leader's topic partition. + */ + def fetchEarliestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch + + /** + * Fetches the epoch and log end offset of the given topic partition from the leader. + * + * @param topicPartition The topic partition that we want to fetch from + * @param currentLeaderEpoch An int representing the current leader epoch of the requester + * + * @return An OffsetAndEpoch object representing the latest offset and epoch in the leader's topic partition. + */ + def fetchLatestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch + + /** + * Fetches offset for leader epoch from the leader for each given topic partition + * + * @param partitions A map of topic partition -> leader epoch of the replica + * + * @return A map of topic partition -> end offset for a requested leader epoch + */ + def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] + + /** + * Fetches the epoch and local log start offset from the leader for the given partition and the current leader-epoch + * + * @param topicPartition The topic partition that we want to fetch from + * @param currentLeaderEpoch An int representing the current leader epoch of the requester + * + * @return An OffsetAndEpoch object representing the earliest local offset and epoch in the leader's topic partition. + */ + def fetchEarliestLocalOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch + + /** + * Builds a fetch request, given a partition map. + * + * @param partitions A map of topic partitions to their respective partition fetch state + * + * @return A ResultWithPartitions, used to create the fetchRequest for fetch. + */ + def buildFetch(partitions: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] + +} diff --git a/core/src/main/scala/kafka/server/ListOffsetsPartitionStatus.scala b/core/src/main/scala/kafka/server/ListOffsetsPartitionStatus.scala new file mode 100644 index 0000000000000..51507e12043c4 --- /dev/null +++ b/core/src/main/scala/kafka/server/ListOffsetsPartitionStatus.scala @@ -0,0 +1,48 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import org.apache.kafka.common.errors.ApiException +import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse +import org.apache.kafka.storage.internals.log.AsyncOffsetReadFutureHolder +import org.apache.kafka.storage.internals.log.OffsetResultHolder.FileRecordsOrError + +import java.util.Optional + +class ListOffsetsPartitionStatus(val futureHolderOpt: Optional[AsyncOffsetReadFutureHolder[FileRecordsOrError]], + val lastFetchableOffset: Option[Long], + val maybeOffsetsError: Option[ApiException]) { + + @volatile var responseOpt: Option[ListOffsetsPartitionResponse] = None + @volatile var completed = false + + override def toString: String = { + s"[responseOpt: $responseOpt, lastFetchableOffset: $lastFetchableOffset, " + + s"maybeOffsetsError: $maybeOffsetsError, completed: $completed]" + } +} + +object ListOffsetsPartitionStatus { + def apply(responseOpt: Option[ListOffsetsPartitionResponse], + futureHolderOpt: Optional[AsyncOffsetReadFutureHolder[FileRecordsOrError]] = Optional.empty(), + lastFetchableOffset: Option[Long] = None, + maybeOffsetsError: Option[ApiException] = None): ListOffsetsPartitionStatus = { + val status = new ListOffsetsPartitionStatus(futureHolderOpt, lastFetchableOffset, maybeOffsetsError) + status.responseOpt = responseOpt + status + } +} diff --git a/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala b/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala index f32d9f8037adb..1e2a6cd033e48 100644 --- a/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala +++ b/core/src/main/scala/kafka/server/LocalLeaderEndPoint.scala @@ -17,10 +17,11 @@ package kafka.server +import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.server.QuotaFactory.UNBOUNDED_QUOTA import kafka.utils.Logging import org.apache.kafka.common.errors.KafkaStorageException -import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} +import org.apache.kafka.common.message.FetchResponseData import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_EPOCH @@ -28,14 +29,13 @@ import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, RequestUti import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.server.common.OffsetAndEpoch import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.LeaderEndPoint -import org.apache.kafka.server.{PartitionFetchState, ReplicaFetch, ResultWithPartitions} import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} import java.util import java.util.Optional -import scala.collection.{Map, Seq, mutable} +import scala.collection.{Map, Seq, Set, mutable} import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters.RichOption /** * Facilitates fetches from a local replica leader. @@ -63,8 +63,8 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, override def brokerEndPoint(): BrokerEndPoint = sourceBroker - override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { - var partitionData: Seq[(TopicPartition, FetchResponseData.PartitionData)] = null + override def fetch(fetchRequest: FetchRequest.Builder): collection.Map[TopicPartition, FetchData] = { + var partitionData: Seq[(TopicPartition, FetchData)] = null val request = fetchRequest.build() // We can build the map from the request since it contains topic IDs and names. @@ -92,6 +92,7 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, val fetchData = request.fetchData(topicNames.asJava) val fetchParams = new FetchParams( + request.version, FetchRequest.FUTURE_LOCAL_REPLICA_ID, -1, 0L, // timeout is 0 so that the callback will be executed immediately @@ -111,7 +112,7 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, if (partitionData == null) throw new IllegalStateException(s"Failed to fetch data for partitions ${fetchData.keySet().toArray.mkString(",")}") - partitionData.toMap.asJava + partitionData.toMap } override def fetchEarliestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = { @@ -135,8 +136,8 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, new OffsetAndEpoch(localLogStartOffset, epoch.orElse(0)) } - override def fetchEpochEndOffsets(partitions: util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): util.Map[TopicPartition, EpochEndOffset] = { - partitions.asScala.map { case (tp, epochData) => + override def fetchEpochEndOffsets(partitions: collection.Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { + partitions.map { case (tp, epochData) => try { val endOffset = if (epochData.leaderEpoch == UNDEFINED_EPOCH) { new EpochEndOffset() @@ -157,62 +158,58 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, .setPartition(tp.partition) .setErrorCode(Errors.forException(t).code) } - }.asJava + } } - override def buildFetch(partitions: util.Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[util.Optional[ReplicaFetch]] = { + override def buildFetch(partitions: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = { // Only include replica in the fetch request if it is not throttled. if (quota.isQuotaExceeded) { - new ResultWithPartitions(util.Optional.empty(), util.Set.of()) + ResultWithPartitions(None, Set.empty) } else { - val selectPartition = selectPartitionToFetch(partitions) - if (selectPartition.isPresent) { - val (tp, fetchState) = selectPartition.get() - buildFetchForPartition(tp, fetchState) - } else { - new ResultWithPartitions(util.Optional.empty(), util.Set.of()) + selectPartitionToFetch(partitions) match { + case Some((tp, fetchState)) => + buildFetchForPartition(tp, fetchState) + case None => + ResultWithPartitions(None, Set.empty) } } } - private def selectPartitionToFetch(partitions: util.Map[TopicPartition, PartitionFetchState]): Optional[(TopicPartition, PartitionFetchState)] = { + private def selectPartitionToFetch(partitions: Map[TopicPartition, PartitionFetchState]): Option[(TopicPartition, PartitionFetchState)] = { // Only move one partition at a time to increase its catch-up rate and thus reduce the time spent on // moving any given replica. Replicas are selected in ascending order (lexicographically by topic) from the // partitions that are ready to fetch. Once selected, we will continue fetching the same partition until it // becomes unavailable or is removed. inProgressPartition.foreach { tp => - val fetchStateOpt = Option(partitions.get(tp)) + val fetchStateOpt = partitions.get(tp) fetchStateOpt.filter(_.isReadyForFetch).foreach { fetchState => - return Optional.of((tp, fetchState)) + return Some((tp, fetchState)) } } inProgressPartition = None - val nextPartitionOpt = nextReadyPartition(partitions.asScala.toMap) + val nextPartitionOpt = nextReadyPartition(partitions) nextPartitionOpt.foreach { case (tp, fetchState) => inProgressPartition = Some(tp) info(s"Beginning/resuming copy of partition $tp from offset ${fetchState.fetchOffset}. " + s"Including this partition, there are ${partitions.size} remaining partitions to copy by this thread.") } - nextPartitionOpt match { - case Some((tp, fetchState)) => Optional.of((tp, fetchState)) - case None => Optional.empty() - } + nextPartitionOpt } - private def buildFetchForPartition(topicPartition: TopicPartition, fetchState: PartitionFetchState): ResultWithPartitions[Optional[ReplicaFetch]] = { + private def buildFetchForPartition(topicPartition: TopicPartition, fetchState: PartitionFetchState): ResultWithPartitions[Option[ReplicaFetch]] = { val requestMap = new util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData] val partitionsWithError = mutable.Set[TopicPartition]() try { val logStartOffset = replicaManager.futureLocalLogOrException(topicPartition).logStartOffset val lastFetchedEpoch = if (isTruncationOnFetchSupported) - fetchState.lastFetchedEpoch + fetchState.lastFetchedEpoch.map(_.asInstanceOf[Integer]).toJava else Optional.empty[Integer] - val topicId = fetchState.topicId.orElse(Uuid.ZERO_UUID) + val topicId = fetchState.topicId.getOrElse(Uuid.ZERO_UUID) requestMap.put(topicPartition, new FetchRequest.PartitionData(topicId, fetchState.fetchOffset, logStartOffset, fetchSize, Optional.of(fetchState.currentLeaderEpoch), lastFetchedEpoch)) } catch { @@ -222,7 +219,7 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, } val fetchRequestOpt = if (requestMap.isEmpty) { - Optional.empty[ReplicaFetch]() + None } else { val version: Short = if (fetchState.topicId.isEmpty) 12 @@ -231,10 +228,10 @@ class LocalLeaderEndPoint(sourceBroker: BrokerEndPoint, // Set maxWait and minBytes to 0 because the response should return immediately if // the future log has caught up with the current log of the partition val requestBuilder = FetchRequest.Builder.forReplica(version, replicaId, -1, 0, 0, requestMap).setMaxBytes(maxBytes) - Optional.of(new ReplicaFetch(requestMap, requestBuilder)) + Some(ReplicaFetch(requestMap, requestBuilder)) } - new ResultWithPartitions(fetchRequestOpt, partitionsWithError.asJava) + ResultWithPartitions(fetchRequestOpt, partitionsWithError) } private def nextReadyPartition(partitions: Map[TopicPartition, PartitionFetchState]): Option[(TopicPartition, PartitionFetchState)] = { diff --git a/core/src/main/scala/kafka/server/MetadataCache.scala b/core/src/main/scala/kafka/server/MetadataCache.scala new file mode 100644 index 0000000000000..d8fd26c2b3b23 --- /dev/null +++ b/core/src/main/scala/kafka/server/MetadataCache.scala @@ -0,0 +1,222 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import kafka.server.metadata.KRaftMetadataCache +import org.apache.kafka.admin.BrokerMetadata +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.message.MetadataResponseData +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common._ +import org.apache.kafka.image.MetadataImage +import org.apache.kafka.metadata.{BrokerRegistration, LeaderAndIsr, PartitionRegistration} +import org.apache.kafka.server.common.{FinalizedFeatures, KRaftVersion, MetadataVersion} + +import java.util +import java.util.Collections +import java.util.concurrent.ThreadLocalRandom +import java.util.function.Supplier +import scala.collection._ +import scala.jdk.CollectionConverters.CollectionHasAsScala + +/** + * Used to represent the controller id cached in the metadata cache of the broker. This trait is + * extended to represent if the controller is KRaft controller or Zk controller. + */ +sealed trait CachedControllerId { + val id: Int +} + +case class ZkCachedControllerId(id: Int) extends CachedControllerId +case class KRaftCachedControllerId(id: Int) extends CachedControllerId + +trait MetadataCache { + /** + * Return topic metadata for a given set of topics and listener. See KafkaApis#handleTopicMetadataRequest for details + * on the use of the two boolean flags. + * + * @param topics The set of topics. + * @param listenerName The listener name. + * @param errorUnavailableEndpoints If true, we return an error on unavailable brokers. This is used to support + * MetadataResponse version 0. + * @param errorUnavailableListeners If true, return LEADER_NOT_AVAILABLE if the listener is not found on the leader. + * This is used for MetadataResponse versions 0-5. + * @return A collection of topic metadata. + */ + def getTopicMetadata( + topics: collection.Set[String], + listenerName: ListenerName, + errorUnavailableEndpoints: Boolean = false, + errorUnavailableListeners: Boolean = false): collection.Seq[MetadataResponseData.MetadataResponseTopic] + + def getAllTopics(): collection.Set[String] + + def getTopicPartitions(topicName: String): collection.Set[TopicPartition] + + def hasAliveBroker(brokerId: Int): Boolean + + def getAliveBrokers(): Iterable[BrokerMetadata] + + def getTopicId(topicName: String): Uuid + + def getTopicName(topicId: Uuid): Option[String] + + def getAliveBrokerNode(brokerId: Int, listenerName: ListenerName): Option[Node] + + def getAliveBrokerNodes(listenerName: ListenerName): Iterable[Node] + + def getBrokerNodes(listenerName: ListenerName): Iterable[Node] + + def getLeaderAndIsr(topic: String, partitionId: Int): Option[LeaderAndIsr] + + /** + * Return the number of partitions in the given topic, or None if the given topic does not exist. + */ + def numPartitions(topic: String): Option[Int] + + def topicNamesToIds(): util.Map[String, Uuid] + + def topicIdsToNames(): util.Map[Uuid, String] + + def topicIdInfo(): (util.Map[String, Uuid], util.Map[Uuid, String]) + + /** + * Get a partition leader's endpoint + * + * @return If the leader is known, and the listener name is available, return Some(node). If the leader is known, + * but the listener is unavailable, return Some(Node.NO_NODE). Otherwise, if the leader is not known, + * return None + */ + def getPartitionLeaderEndpoint(topic: String, partitionId: Int, listenerName: ListenerName): Option[Node] + + def getPartitionReplicaEndpoints(tp: TopicPartition, listenerName: ListenerName): Map[Int, Node] + + def getControllerId: Option[CachedControllerId] + + def getClusterMetadata(clusterId: String, listenerName: ListenerName): Cluster + + def contains(topic: String): Boolean + + def contains(tp: TopicPartition): Boolean + + def metadataVersion(): MetadataVersion + + def getRandomAliveBrokerId: Option[Int] + + def features(): FinalizedFeatures +} + +object MetadataCache { + def kRaftMetadataCache( + brokerId: Int, + kraftVersionSupplier: Supplier[KRaftVersion] + ): KRaftMetadataCache = { + new KRaftMetadataCache(brokerId, kraftVersionSupplier) + } + + def toCluster(clusterId: String, image: MetadataImage): Cluster = { + val brokerToNodes = new util.HashMap[Integer, util.List[Node]] + image.cluster().brokers() + .values().stream() + .filter(broker => !broker.fenced()) + .forEach { broker => brokerToNodes.put(broker.id(), broker.nodes()) } + + def getNodes(id: Int): util.List[Node] = brokerToNodes.get(id) + + val partitionInfos = new util.ArrayList[PartitionInfo] + val internalTopics = new util.HashSet[String] + + def toArray(replicas: Array[Int]): Array[Node] = { + util.Arrays.stream(replicas) + .mapToObj(replica => getNodes(replica)) + .flatMap(replica => replica.stream()).toArray(size => new Array[Node](size)) + } + + val topicImages = image.topics().topicsByName().values() + if (topicImages != null) { + topicImages.forEach { topic => + topic.partitions().forEach { (key, value) => + val partitionId = key + val partition = value + val nodes = getNodes(partition.leader) + if (nodes != null) { + nodes.forEach(node => { + partitionInfos.add(new PartitionInfo(topic.name(), + partitionId, + node, + toArray(partition.replicas), + toArray(partition.isr), + getOfflineReplicas(image, partition).stream() + .map(replica => getNodes(replica)) + .flatMap(replica => replica.stream()).toArray(size => new Array[Node](size)))) + }) + if (Topic.isInternal(topic.name())) { + internalTopics.add(topic.name()) + } + } + } + } + } + + val controllerNode = getNodes(getRandomAliveBroker(image).getOrElse(-1)) match { + case null => Node.noNode() + case nodes => nodes.get(0) + } + // Note: the constructor of Cluster does not allow us to reference unregistered nodes. + // So, for example, if partition foo-0 has replicas [1, 2] but broker 2 is not + // registered, we pass its replicas as [1, -1]. This doesn't make a lot of sense, but + // we are duplicating the behavior of ZkMetadataCache, for now. + new Cluster(clusterId, brokerToNodes.values().stream().flatMap(n => n.stream()).collect(util.stream.Collectors.toList()), + partitionInfos, Collections.emptySet(), internalTopics, controllerNode) + } + + private def getOfflineReplicas(image: MetadataImage, + partition: PartitionRegistration, + listenerName: ListenerName = null): util.List[Integer] = { + val offlineReplicas = new util.ArrayList[Integer](0) + for (brokerId <- partition.replicas) { + Option(image.cluster().broker(brokerId)) match { + case None => offlineReplicas.add(brokerId) + case Some(broker) => if (listenerName == null || isReplicaOffline(partition, listenerName, broker)) { + offlineReplicas.add(brokerId) + } + } + } + offlineReplicas + } + + private def isReplicaOffline(partition: PartitionRegistration, listenerName: ListenerName, broker: BrokerRegistration) = + broker.fenced() || !broker.listeners().containsKey(listenerName.value()) || isReplicaInOfflineDir(broker, partition) + + private def isReplicaInOfflineDir(broker: BrokerRegistration, partition: PartitionRegistration): Boolean = + !broker.hasOnlineDir(partition.directory(broker.id())) + + private def getRandomAliveBroker(image: MetadataImage): Option[Int] = { + val aliveBrokers = getAliveBrokers(image).toList + if (aliveBrokers.isEmpty) { + None + } else { + Some(aliveBrokers(ThreadLocalRandom.current().nextInt(aliveBrokers.size)).id) + } + } + + private def getAliveBrokers(image: MetadataImage): Iterable[BrokerMetadata] = { + image.cluster().brokers().values().asScala.filterNot(_.fenced()). + map(b => new BrokerMetadata(b.id, b.rack)) + } +} diff --git a/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala b/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala index 0caa03ec05299..c353a82550316 100644 --- a/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala +++ b/core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala @@ -17,6 +17,7 @@ package kafka.server +import kafka.raft.RaftManager import kafka.utils.Logging import org.apache.kafka.clients._ import org.apache.kafka.common.metrics.Metrics @@ -27,7 +28,6 @@ import org.apache.kafka.common.security.JaasContext import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.common.{Node, Reconfigurable} -import org.apache.kafka.raft.RaftManager import org.apache.kafka.server.common.{ApiMessageAndVersion, ControllerRequestCompletionHandler, NodeToControllerChannelManager} import org.apache.kafka.server.util.{InterBrokerSendThread, RequestAndCompletionHandler} @@ -37,7 +37,7 @@ import java.util.concurrent.LinkedBlockingDeque import java.util.concurrent.atomic.AtomicReference import scala.collection.Seq import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.{RichOption, RichOptional, RichOptionalInt} +import scala.jdk.OptionConverters.{RichOption, RichOptionalInt} case class ControllerInformation( node: Option[Node], @@ -55,9 +55,8 @@ object RaftControllerNodeProvider { raftManager: RaftManager[ApiMessageAndVersion], config: KafkaConfig, ): RaftControllerNodeProvider = { - val controllerListenerName = new ListenerName(config.controllerListenerNames.get(0)) - val controllerSecurityProtocol = Option(config.effectiveListenerSecurityProtocolMap.get(controllerListenerName)) - .getOrElse(SecurityProtocol.forName(controllerListenerName.value())) + val controllerListenerName = new ListenerName(config.controllerListenerNames.head) + val controllerSecurityProtocol = config.effectiveListenerSecurityProtocolMap.getOrElse(controllerListenerName, SecurityProtocol.forName(controllerListenerName.value())) val controllerSaslMechanism = config.saslMechanismControllerProtocol new RaftControllerNodeProvider( raftManager, @@ -79,10 +78,10 @@ class RaftControllerNodeProvider( val saslMechanism: String ) extends ControllerNodeProvider with Logging { - private def idToNode(id: Int): Option[Node] = raftManager.client.voterNode(id, listenerName).toScala + private def idToNode(id: Int): Option[Node] = raftManager.voterNode(id, listenerName) override def getControllerInfo(): ControllerInformation = - ControllerInformation(raftManager.client.leaderAndEpoch.leaderId.toScala.flatMap(idToNode), + ControllerInformation(raftManager.leaderAndEpoch.leaderId.toScala.flatMap(idToNode), listenerName, securityProtocol, saslMechanism) } diff --git a/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala b/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala index 80d41e3b0cf13..94bb6f5140dca 100644 --- a/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala +++ b/core/src/main/scala/kafka/server/RemoteLeaderEndPoint.scala @@ -18,11 +18,11 @@ package kafka.server import java.util.{Collections, Optional} +import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.utils.Logging import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.{TopicPartition, Uuid} -import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.{OffsetForLeaderTopic, OffsetForLeaderTopicCollection} import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset @@ -30,11 +30,10 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, ListOffsetsRequest, ListOffsetsResponse, OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.LeaderEndPoint -import org.apache.kafka.server.{PartitionFetchState, ReplicaFetch, ResultWithPartitions} import scala.jdk.CollectionConverters._ -import scala.collection.mutable +import scala.collection.{Map, mutable} +import scala.jdk.OptionConverters.RichOption /** * Facilitates fetches from a remote replica leader. @@ -72,7 +71,7 @@ class RemoteLeaderEndPoint(logPrefix: String, override def brokerEndPoint(): BrokerEndPoint = blockingSender.brokerEndPoint() - override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { + override def fetch(fetchRequest: FetchRequest.Builder): collection.Map[TopicPartition, FetchData] = { val clientResponse = try { blockingSender.sendRequest(fetchRequest) } catch { @@ -86,10 +85,10 @@ class RemoteLeaderEndPoint(logPrefix: String, if (fetchResponse.error == Errors.FETCH_SESSION_TOPIC_ID_ERROR) { throw Errors.forCode(fetchResponse.error().code()).exception() } else { - java.util.Map.of() + Map.empty } } else { - fetchResponse.responseData(fetchSessionHandler.sessionTopicNames, clientResponse.requestHeader().apiVersion()) + fetchResponse.responseData(fetchSessionHandler.sessionTopicNames, clientResponse.requestHeader().apiVersion()).asScala } } @@ -128,14 +127,14 @@ class RemoteLeaderEndPoint(logPrefix: String, } } - override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = { + override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { if (partitions.isEmpty) { debug("Skipping leaderEpoch request since all partitions do not have an epoch") - return java.util.Map.of() + return Map.empty } val topics = new OffsetForLeaderTopicCollection(partitions.size) - partitions.forEach { (topicPartition, epochData) => + partitions.foreachEntry { (topicPartition, epochData) => var topic = topics.find(topicPartition.topic) if (topic == null) { topic = new OffsetForLeaderTopic().setTopic(topicPartition.topic) @@ -156,39 +155,40 @@ class RemoteLeaderEndPoint(logPrefix: String, val tp = new TopicPartition(offsetForLeaderTopicResult.topic, offsetForLeaderPartitionResult.partition) tp -> offsetForLeaderPartitionResult } - }.toMap.asJava + }.toMap } catch { case t: Throwable => warn(s"Error when sending leader epoch request for $partitions", t) // if we get any unexpected exception, mark all partitions with an error val error = Errors.forException(t) - partitions.asScala.map { case (tp, _) => + partitions.map { case (tp, _) => tp -> new EpochEndOffset() .setPartition(tp.partition) .setErrorCode(error.code) - }.asJava + } } } - override def buildFetch(partitions: java.util.Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[java.util.Optional[ReplicaFetch]] = { + override def buildFetch(partitions: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = { val partitionsWithError = mutable.Set[TopicPartition]() + val builder = fetchSessionHandler.newBuilder(partitions.size, false) - partitions.forEach { (topicPartition, fetchState) => + partitions.foreachEntry { (topicPartition, fetchState) => // We will not include a replica in the fetch request if it should be throttled. if (fetchState.isReadyForFetch && !shouldFollowerThrottle(quota, fetchState, topicPartition)) { try { val logStartOffset = replicaManager.localLogOrException(topicPartition).logStartOffset val lastFetchedEpoch = if (isTruncationOnFetchSupported) - fetchState.lastFetchedEpoch() + fetchState.lastFetchedEpoch.map(_.asInstanceOf[Integer]).toJava else Optional.empty[Integer] builder.add(topicPartition, new FetchRequest.PartitionData( - fetchState.topicId().orElse(Uuid.ZERO_UUID), - fetchState.fetchOffset(), + fetchState.topicId.getOrElse(Uuid.ZERO_UUID), + fetchState.fetchOffset, logStartOffset, fetchSize, - Optional.of(fetchState.currentLeaderEpoch()), + Optional.of(fetchState.currentLeaderEpoch), lastFetchedEpoch)) } catch { case _: KafkaStorageException => @@ -201,10 +201,10 @@ class RemoteLeaderEndPoint(logPrefix: String, val fetchData = builder.build() val fetchRequestOpt = if (fetchData.sessionPartitions.isEmpty && fetchData.toForget.isEmpty) { - Optional.empty[ReplicaFetch] + None } else { val metadataVersion = metadataVersionSupplier() - val version: Short = if (!fetchData.canUseTopicIds) { + val version: Short = if (metadataVersion.fetchRequestVersion >= 13 && !fetchData.canUseTopicIds) { 12 } else { metadataVersion.fetchRequestVersion @@ -215,10 +215,10 @@ class RemoteLeaderEndPoint(logPrefix: String, .removed(fetchData.toForget) .replaced(fetchData.toReplace) .metadata(fetchData.metadata) - Optional.of(new ReplicaFetch(fetchData.sessionPartitions(), requestBuilder)) + Some(ReplicaFetch(fetchData.sessionPartitions(), requestBuilder)) } - new ResultWithPartitions(fetchRequestOpt, partitionsWithError.asJava) + ResultWithPartitions(fetchRequestOpt, partitionsWithError) } /** diff --git a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsManager.scala b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsManager.scala index e0473166b365d..7b2d7863077ea 100644 --- a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsManager.scala @@ -20,7 +20,6 @@ package kafka.server import org.apache.kafka.common.TopicPartition import org.apache.kafka.server.common.DirectoryEventHandler import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.LeaderEndPoint import org.apache.kafka.storage.log.metrics.BrokerTopicStats class ReplicaAlterLogDirsManager(brokerConfig: KafkaConfig, @@ -36,7 +35,7 @@ class ReplicaAlterLogDirsManager(brokerConfig: KafkaConfig, override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaAlterLogDirsThread = { val threadName = s"ReplicaAlterLogDirsThread-$fetcherId" - val leader: LeaderEndPoint = new LocalLeaderEndPoint(sourceBroker, brokerConfig, replicaManager, quotaManager) + val leader = new LocalLeaderEndPoint(sourceBroker, brokerConfig, replicaManager, quotaManager) new ReplicaAlterLogDirsThread(threadName, leader, failedPartitions, replicaManager, quotaManager, brokerTopicStats, brokerConfig.replicaFetchBackoffMs, directoryEventHandler) } diff --git a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala index 81bb41100f78a..56492de34856f 100644 --- a/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaAlterLogDirsThread.scala @@ -24,10 +24,7 @@ import org.apache.kafka.common.requests.FetchResponse import org.apache.kafka.server.common.{DirectoryEventHandler, OffsetAndEpoch, TopicIdPartition} import org.apache.kafka.storage.internals.log.{LogAppendInfo, LogStartOffsetIncrementReason} import org.apache.kafka.storage.log.metrics.BrokerTopicStats -import org.apache.kafka.server.LeaderEndPoint -import org.apache.kafka.server.PartitionFetchState -import java.util.Optional import java.util.concurrent.ConcurrentHashMap import scala.collection.{Map, Set} @@ -52,7 +49,7 @@ class ReplicaAlterLogDirsThread(name: String, // Visible for testing private[server] val promotionStates: ConcurrentHashMap[TopicPartition, PromotionState] = new ConcurrentHashMap() - override protected def latestEpoch(topicPartition: TopicPartition): Optional[Integer] = { + override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = { replicaMgr.futureLocalLogOrException(topicPartition).latestEpoch } @@ -64,17 +61,14 @@ class ReplicaAlterLogDirsThread(name: String, replicaMgr.futureLocalLogOrException(topicPartition).logEndOffset } - override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Optional[OffsetAndEpoch] = { + override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = { replicaMgr.futureLocalLogOrException(topicPartition).endOffsetForEpoch(epoch) } // process fetched data - override def processPartitionData( - topicPartition: TopicPartition, - fetchOffset: Long, - partitionLeaderEpoch: Int, - partitionData: FetchData - ): Option[LogAppendInfo] = { + override def processPartitionData(topicPartition: TopicPartition, + fetchOffset: Long, + partitionData: FetchData): Option[LogAppendInfo] = { val partition = replicaMgr.getPartitionOrException(topicPartition) val futureLog = partition.futureLocalLogOrException val records = toMemoryRecords(FetchResponse.recordsOrFail(partitionData)) @@ -84,7 +78,7 @@ class ReplicaAlterLogDirsThread(name: String, topicPartition, fetchOffset, futureLog.logEndOffset)) val logAppendInfo = if (records.sizeInBytes() > 0) - partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = true, partitionLeaderEpoch) + partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = true) else None diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala b/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala index 96308fb400f2d..621f50f9168cd 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherManager.scala @@ -22,12 +22,12 @@ import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.LeaderEndPoint class ReplicaFetcherManager(brokerConfig: KafkaConfig, protected val replicaManager: ReplicaManager, metrics: Metrics, time: Time, + threadNamePrefix: Option[String] = None, quotaManager: ReplicationQuotaManager, metadataVersionSupplier: () => MetadataVersion, brokerEpochSupplier: () => Long) @@ -37,16 +37,17 @@ class ReplicaFetcherManager(brokerConfig: KafkaConfig, numFetchers = brokerConfig.numReplicaFetchers) { override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaFetcherThread = { - val threadName = s"ReplicaFetcherThread-$fetcherId-${sourceBroker.id}" + val prefix = threadNamePrefix.map(tp => s"$tp:").getOrElse("") + val threadName = s"${prefix}ReplicaFetcherThread-$fetcherId-${sourceBroker.id}" val logContext = new LogContext(s"[ReplicaFetcher replicaId=${brokerConfig.brokerId}, leaderId=${sourceBroker.id}, " + s"fetcherId=$fetcherId] ") val endpoint = new BrokerBlockingSender(sourceBroker, brokerConfig, metrics, time, fetcherId, s"broker-${brokerConfig.brokerId}-fetcher-$fetcherId", logContext) val fetchSessionHandler = new FetchSessionHandler(logContext, sourceBroker.id) - val leader: LeaderEndPoint = new RemoteLeaderEndPoint(logContext.logPrefix, endpoint, fetchSessionHandler, brokerConfig, + val leader = new RemoteLeaderEndPoint(logContext.logPrefix, endpoint, fetchSessionHandler, brokerConfig, replicaManager, quotaManager, metadataVersionSupplier, brokerEpochSupplier) new ReplicaFetcherThread(threadName, leader, brokerConfig, failedPartitions, replicaManager, - quotaManager, logContext.logPrefix) + quotaManager, logContext.logPrefix, metadataVersionSupplier) } def shutdown(): Unit = { diff --git a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala index fa2f6bb7f35bd..7f0c6d41dbdf6 100644 --- a/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala +++ b/core/src/main/scala/kafka/server/ReplicaFetcherThread.scala @@ -18,12 +18,11 @@ package kafka.server import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.record.MemoryRecords import org.apache.kafka.common.requests.FetchResponse -import org.apache.kafka.server.common.OffsetAndEpoch +import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch} import org.apache.kafka.storage.internals.log.{LogAppendInfo, LogStartOffsetIncrementReason} -import org.apache.kafka.server.LeaderEndPoint -import java.util.Optional import scala.collection.mutable class ReplicaFetcherThread(name: String, @@ -32,7 +31,8 @@ class ReplicaFetcherThread(name: String, failedPartitions: FailedPartitions, replicaMgr: ReplicaManager, quota: ReplicaQuota, - logPrefix: String) + logPrefix: String, + metadataVersionSupplier: () => MetadataVersion) extends AbstractFetcherThread(name = name, clientId = name, leader = leader, @@ -47,7 +47,7 @@ class ReplicaFetcherThread(name: String, // Visible for testing private[server] val partitionsWithNewHighWatermark = mutable.Buffer[TopicPartition]() - override protected def latestEpoch(topicPartition: TopicPartition): Optional[Integer] = { + override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = { replicaMgr.localLogOrException(topicPartition).latestEpoch } @@ -59,7 +59,7 @@ class ReplicaFetcherThread(name: String, replicaMgr.localLogOrException(topicPartition).logEndOffset } - override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Optional[OffsetAndEpoch] = { + override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = { replicaMgr.localLogOrException(topicPartition).endOffsetForEpoch(epoch) } @@ -98,17 +98,16 @@ class ReplicaFetcherThread(name: String, } // process fetched data - override def processPartitionData( - topicPartition: TopicPartition, - fetchOffset: Long, - partitionLeaderEpoch: Int, - partitionData: FetchData - ): Option[LogAppendInfo] = { + override def processPartitionData(topicPartition: TopicPartition, + fetchOffset: Long, + partitionData: FetchData): Option[LogAppendInfo] = { val logTrace = isTraceEnabled val partition = replicaMgr.getPartitionOrException(topicPartition) val log = partition.localLogOrException val records = toMemoryRecords(FetchResponse.recordsOrFail(partitionData)) + maybeWarnIfOversizedRecords(records, topicPartition) + if (fetchOffset != log.logEndOffset) throw new IllegalStateException("Offset mismatch for partition %s: fetched offset = %d, log end offset = %d.".format( topicPartition, fetchOffset, log.logEndOffset)) @@ -118,7 +117,7 @@ class ReplicaFetcherThread(name: String, .format(log.logEndOffset, topicPartition, records.sizeInBytes, partitionData.highWatermark)) // Append the leader's messages to the log - val logAppendInfo = partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false, partitionLeaderEpoch) + val logAppendInfo = partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false) if (logTrace) trace("Follower has replica log end offset %d after appending %d bytes of messages for partition %s" @@ -128,7 +127,7 @@ class ReplicaFetcherThread(name: String, // For the follower replica, we do not need to keep its segment base offset and physical position. // These values will be computed upon becoming leader or handling a preferred read replica fetch. var maybeUpdateHighWatermarkMessage = s"but did not update replica high watermark" - log.maybeUpdateHighWatermark(partitionData.highWatermark).ifPresent { newHighWatermark => + log.maybeUpdateHighWatermark(partitionData.highWatermark).foreach { newHighWatermark => maybeUpdateHighWatermarkMessage = s"and updated replica high watermark to $newHighWatermark" partitionsWithNewHighWatermark += topicPartition } @@ -158,15 +157,29 @@ class ReplicaFetcherThread(name: String, } } + private def maybeWarnIfOversizedRecords(records: MemoryRecords, topicPartition: TopicPartition): Unit = { + // oversized messages don't cause replication to fail from fetch request version 3 (KIP-74) + if (metadataVersionSupplier().fetchRequestVersion <= 2 && records.sizeInBytes > 0 && records.validBytes <= 0) + error(s"Replication is failing due to a message that is greater than replica.fetch.max.bytes for partition $topicPartition. " + + "This generally occurs when the max.message.bytes has been overridden to exceed this value and a suitably large " + + "message has also been sent. To fix this problem increase replica.fetch.max.bytes in your broker config to be " + + "equal or larger than your settings for max.message.bytes, both at a broker and topic level.") + } + /** * Truncate the log for each partition's epoch based on leader's returned epoch and offset. * The logic for finding the truncation offset is implemented in AbstractFetcherThread.getOffsetTruncationState */ override def truncate(tp: TopicPartition, offsetTruncationState: OffsetTruncationState): Unit = { val partition = replicaMgr.getPartitionOrException(tp) + val log = partition.localLogOrException partition.truncateTo(offsetTruncationState.offset, isFuture = false) + if (offsetTruncationState.offset < log.highWatermark) + warn(s"Truncating $tp to offset ${offsetTruncationState.offset} below high watermark " + + s"${log.highWatermark}") + // mark the future replica for truncation only when we do last truncation if (offsetTruncationState.truncationCompleted) replicaMgr.replicaAlterLogDirsManager.markPartitionsForTruncation(brokerConfig.brokerId, tp, diff --git a/core/src/main/scala/kafka/server/ReplicaManager.scala b/core/src/main/scala/kafka/server/ReplicaManager.scala index 10b41b88bd092..57a2d9a658b1d 100644 --- a/core/src/main/scala/kafka/server/ReplicaManager.scala +++ b/core/src/main/scala/kafka/server/ReplicaManager.scala @@ -19,22 +19,22 @@ package kafka.server import com.yammer.metrics.core.Meter import kafka.cluster.{Partition, PartitionListener} import kafka.controller.StateChangeLogger -import kafka.log.LogManager +import kafka.log.remote.RemoteLogManager +import kafka.log.{LogManager, UnifiedLog} import kafka.server.HostedPartition.Online import kafka.server.QuotaFactory.QuotaManagers import kafka.server.ReplicaManager.{AtMinIsrPartitionCountMetricName, FailedIsrUpdatesPerSecMetricName, IsrExpandsPerSecMetricName, IsrShrinksPerSecMetricName, LeaderCountMetricName, OfflineReplicaCountMetricName, PartitionCountMetricName, PartitionsWithLateTransactionsCountMetricName, ProducerIdCountMetricName, ReassigningPartitionsMetricName, UnderMinIsrPartitionCountMetricName, UnderReplicatedPartitionsMetricName, createLogReadResult, isListOffsetsTimestampUnsupported} import kafka.server.share.DelayedShareFetch import kafka.utils._ -import org.apache.kafka.common.{IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.errors._ -import org.apache.kafka.common.internals.{Plugin, Topic} +import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult import org.apache.kafka.common.message.DescribeLogDirsResponseData.DescribeLogDirsTopic import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderTopic import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.{EpochEndOffset, OffsetForLeaderTopicResult} -import org.apache.kafka.common.message.{DescribeLogDirsResponseData, DescribeProducersResponseData} +import org.apache.kafka.common.message.{DescribeLogDirsResponseData, DescribeProducersResponseData, FetchResponseData} import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.Errors @@ -45,27 +45,20 @@ import org.apache.kafka.common.replica._ import org.apache.kafka.common.requests.FetchRequest.PartitionData import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests._ -import org.apache.kafka.common.utils.{Exit, Time, Utils} -import org.apache.kafka.coordinator.transaction.{AddPartitionsToTxnConfig, TransactionLogConfig} +import org.apache.kafka.common.utils.{Exit, Time} +import org.apache.kafka.common.{IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.image.{LocalReplicaChanges, MetadataImage, TopicsDelta} import org.apache.kafka.metadata.LeaderConstants.NO_LEADER -import org.apache.kafka.metadata.MetadataCache -import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition} -import org.apache.kafka.server.log.remote.TopicPartitionLog -import org.apache.kafka.server.config.ReplicationConfigs -import org.apache.kafka.server.log.remote.storage.RemoteLogManager +import org.apache.kafka.server.{ActionQueue, DelayedActionQueue, common} +import org.apache.kafka.server.common.{DirectoryEventHandler, RequestLocal, StopPartition, TopicOptionalIdPartition} import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, DelayedRemoteListOffsets, DeleteRecordsPartitionStatus, ListOffsetsPartitionStatus, TopicPartitionOperationKey} +import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.share.fetch.{DelayedShareFetchKey, DelayedShareFetchPartitionKey} import org.apache.kafka.server.storage.log.{FetchParams, FetchPartitionData} -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager.TransactionSupportedOperation -import org.apache.kafka.server.util.timer.{SystemTimer, TimerTask} import org.apache.kafka.server.util.{Scheduler, ShutdownableThread} -import org.apache.kafka.server.{ActionQueue, DelayedActionQueue, LogReadResult, common} import org.apache.kafka.storage.internals.checkpoint.{LazyOffsetCheckpoints, OffsetCheckpointFile, OffsetCheckpoints} -import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogConfig, LogDirFailureChannel, LogOffsetMetadata, LogReadInfo, OffsetResultHolder, RecordValidationException, RemoteLogReadResult, RemoteStorageFetchInfo, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LeaderHwChange, LogAppendInfo, LogConfig, LogDirFailureChannel, LogOffsetMetadata, LogReadInfo, OffsetResultHolder, RecordValidationException, RemoteLogReadResult, RemoteStorageFetchInfo, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import java.io.File @@ -73,12 +66,12 @@ import java.lang.{Long => JLong} import java.nio.file.{Files, Paths} import java.util import java.util.concurrent.atomic.AtomicBoolean -import java.util.concurrent.{CompletableFuture, ConcurrentHashMap, Future, RejectedExecutionException, TimeUnit} +import java.util.concurrent.locks.Lock +import java.util.concurrent.{CompletableFuture, Future, RejectedExecutionException, TimeUnit} import java.util.{Collections, Optional, OptionalInt, OptionalLong} -import java.util.function.Consumer import scala.collection.{Map, Seq, Set, immutable, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOptional +import scala.jdk.OptionConverters.{RichOption, RichOptional} /* * Result metadata of a log append operation on the log @@ -106,6 +99,64 @@ case class LogDeleteRecordsResult(requestedOffset: Long, lowWatermark: Long, exc } } +/** + * Result metadata of a log read operation on the log + * @param info @FetchDataInfo returned by the @Log read + * @param divergingEpoch Optional epoch and end offset which indicates the largest epoch such + * that subsequent records are known to diverge on the follower/consumer + * @param highWatermark high watermark of the local replica + * @param leaderLogStartOffset The log start offset of the leader at the time of the read + * @param leaderLogEndOffset The log end offset of the leader at the time of the read + * @param followerLogStartOffset The log start offset of the follower taken from the Fetch request + * @param fetchTimeMs The time the fetch was received + * @param lastStableOffset Current LSO or None if the result has an exception + * @param preferredReadReplica the preferred read replica to be used for future fetches + * @param exception Exception if error encountered while reading from the log + */ +case class LogReadResult(info: FetchDataInfo, + divergingEpoch: Option[FetchResponseData.EpochEndOffset], + highWatermark: Long, + leaderLogStartOffset: Long, + leaderLogEndOffset: Long, + followerLogStartOffset: Long, + fetchTimeMs: Long, + lastStableOffset: Option[Long], + preferredReadReplica: Option[Int] = None, + exception: Option[Throwable] = None) { + + def error: Errors = exception match { + case None => Errors.NONE + case Some(e) => Errors.forException(e) + } + + def toFetchPartitionData(isReassignmentFetch: Boolean): FetchPartitionData = new FetchPartitionData( + this.error, + this.highWatermark, + this.leaderLogStartOffset, + this.info.records, + this.divergingEpoch.toJava, + if (this.lastStableOffset.isDefined) OptionalLong.of(this.lastStableOffset.get) else OptionalLong.empty(), + this.info.abortedTransactions, + if (this.preferredReadReplica.isDefined) OptionalInt.of(this.preferredReadReplica.get) else OptionalInt.empty(), + isReassignmentFetch) + + override def toString: String = { + "LogReadResult(" + + s"info=$info, " + + s"divergingEpoch=$divergingEpoch, " + + s"highWatermark=$highWatermark, " + + s"leaderLogStartOffset=$leaderLogStartOffset, " + + s"leaderLogEndOffset=$leaderLogEndOffset, " + + s"followerLogStartOffset=$followerLogStartOffset, " + + s"fetchTimeMs=$fetchTimeMs, " + + s"preferredReadReplica=$preferredReadReplica, " + + s"lastStableOffset=$lastStableOffset, " + + s"error=$error" + + ")" + } + +} + /** * Trait to represent the state of hosted partitions. We create a concrete (active) Partition * instance when the broker receives a LeaderAndIsr request from the controller or a metadata @@ -172,35 +223,34 @@ object ReplicaManager { ListOffsetsRequest.LATEST_TIMESTAMP -> 1.toShort, ListOffsetsRequest.MAX_TIMESTAMP -> 7.toShort, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP -> 8.toShort, - ListOffsetsRequest.LATEST_TIERED_TIMESTAMP -> 9.toShort, - ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP -> 11.toShort + ListOffsetsRequest.LATEST_TIERED_TIMESTAMP -> 9.toShort ) def createLogReadResult(highWatermark: Long, leaderLogStartOffset: Long, leaderLogEndOffset: Long, e: Throwable): LogReadResult = { - new LogReadResult(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - Optional.empty(), + LogReadResult(info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + divergingEpoch = None, highWatermark, leaderLogStartOffset, leaderLogEndOffset, - -1L, - -1L, - OptionalLong.empty(), - Optional.of(e)) + followerLogStartOffset = -1L, + fetchTimeMs = -1L, + lastStableOffset = None, + exception = Some(e)) } def createLogReadResult(e: Throwable): LogReadResult = { - new LogReadResult(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - Optional.empty(), - UnifiedLog.UNKNOWN_OFFSET, - UnifiedLog.UNKNOWN_OFFSET, - UnifiedLog.UNKNOWN_OFFSET, - UnifiedLog.UNKNOWN_OFFSET, - -1L, - OptionalLong.empty(), - Optional.of(e)) + LogReadResult(info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + divergingEpoch = None, + highWatermark = UnifiedLog.UnknownOffset, + leaderLogStartOffset = UnifiedLog.UnknownOffset, + leaderLogEndOffset = UnifiedLog.UnknownOffset, + followerLogStartOffset = UnifiedLog.UnknownOffset, + fetchTimeMs = -1L, + lastStableOffset = None, + exception = Some(e)) } private[server] def isListOffsetsTimestampUnsupported(timestamp: JLong, version: Short): Boolean = { @@ -220,21 +270,20 @@ class ReplicaManager(val config: KafkaConfig, logDirFailureChannel: LogDirFailureChannel, val alterPartitionManager: AlterPartitionManager, val brokerTopicStats: BrokerTopicStats = new BrokerTopicStats(), + val isShuttingDown: AtomicBoolean = new AtomicBoolean(false), delayedProducePurgatoryParam: Option[DelayedOperationPurgatory[DelayedProduce]] = None, delayedFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedFetch]] = None, delayedDeleteRecordsPurgatoryParam: Option[DelayedOperationPurgatory[DelayedDeleteRecords]] = None, delayedRemoteFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedRemoteFetch]] = None, delayedRemoteListOffsetsPurgatoryParam: Option[DelayedOperationPurgatory[DelayedRemoteListOffsets]] = None, delayedShareFetchPurgatoryParam: Option[DelayedOperationPurgatory[DelayedShareFetch]] = None, + threadNamePrefix: Option[String] = None, val brokerEpochSupplier: () => Long = () => -1, addPartitionsToTxnManager: Option[AddPartitionsToTxnManager] = None, val directoryEventHandler: DirectoryEventHandler = DirectoryEventHandler.NOOP, val defaultActionQueue: ActionQueue = new DelayedActionQueue ) extends Logging { private val metricsGroup = new KafkaMetricsGroup(this.getClass) - private val addPartitionsToTxnConfig = new AddPartitionsToTxnConfig(config) - private val shareFetchPurgatoryName = "ShareFetch" - private val delayedShareFetchTimer = new SystemTimer(shareFetchPurgatoryName) val delayedProducePurgatory = delayedProducePurgatoryParam.getOrElse( new DelayedOperationPurgatory[DelayedProduce]( @@ -256,14 +305,17 @@ class ReplicaManager(val config: KafkaConfig, "RemoteListOffsets", config.brokerId)) val delayedShareFetchPurgatory = delayedShareFetchPurgatoryParam.getOrElse( new DelayedOperationPurgatory[DelayedShareFetch]( - shareFetchPurgatoryName, delayedShareFetchTimer, config.brokerId, + "ShareFetch", config.brokerId, config.shareGroupConfig.shareFetchPurgatoryPurgeIntervalRequests)) /* epoch of the controller that last changed the leader */ + @volatile private[server] var controllerEpoch: Int = 0 protected val localBrokerId = config.brokerId - protected val allPartitions = new ConcurrentHashMap[TopicPartition, HostedPartition] + protected val allPartitions = new Pool[TopicPartition, HostedPartition]( + valueFactory = Some(tp => HostedPartition.Online(Partition(tp, time, this))) + ) private val replicaStateChangeLock = new Object - val replicaFetcherManager = createReplicaFetcherManager(metrics, time, quotaManagers.follower) + val replicaFetcherManager = createReplicaFetcherManager(metrics, time, threadNamePrefix, quotaManagers.follower) private[server] val replicaAlterLogDirsManager = createReplicaAlterLogDirsManager(quotaManagers.alterLogDirs, brokerTopicStats) private val highWatermarkCheckPointThreadStarted = new AtomicBoolean(false) @volatile private[server] var highWatermarkCheckpoints: Map[String, OffsetCheckpointFile] = logManager.liveLogDirs.map(dir => @@ -284,7 +336,7 @@ class ReplicaManager(val config: KafkaConfig, } // Visible for testing - private[server] val replicaSelectorPlugin: Option[Plugin[ReplicaSelector]] = createReplicaSelector(metrics) + private[server] val replicaSelectorOpt: Option[ReplicaSelector] = createReplicaSelector() metricsGroup.newGauge(LeaderCountMetricName, () => leaderPartitionsIterator.size) // Visible for testing @@ -344,7 +396,7 @@ class ReplicaManager(val config: KafkaConfig, } private def maybeRemoveTopicMetrics(topic: String): Unit = { - val topicHasNonOfflinePartition = allPartitions.values.asScala.exists { + val topicHasNonOfflinePartition = allPartitions.values.exists { case online: HostedPartition.Online => topic == online.partition.topic case HostedPartition.None | HostedPartition.Offline(_) => false } @@ -392,14 +444,6 @@ class ReplicaManager(val config: KafkaConfig, delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchKeys) } - /** - * Add a timer task to the delayedShareFetchTimer. - * @param timerTask The timer task to be added to the delayedShareFetchTimer - */ - private[server] def addShareFetchTimerRequest(timerTask: TimerTask): Unit = { - delayedShareFetchTimer.add(timerTask) - } - /** * Registers the provided listener to the partition iff the partition is online. */ @@ -483,11 +527,6 @@ class ReplicaManager(val config: KafkaConfig, errorMap } - def topicIdPartition(topicPartition: TopicPartition): TopicIdPartition = { - val topicId = metadataCache.getTopicId(topicPartition.topic()) - new TopicIdPartition(topicId, topicPartition) - } - def getPartition(topicPartition: TopicPartition): HostedPartition = { Option(allPartitions.get(topicPartition)).getOrElse(HostedPartition.None) } @@ -502,13 +541,8 @@ class ReplicaManager(val config: KafkaConfig, // Visible for testing def createPartition(topicPartition: TopicPartition): Partition = { val partition = Partition(topicPartition, time, this) - addOnlinePartition(topicPartition, partition) - partition - } - - // Visible for testing - private[server] def addOnlinePartition(topicPartition: TopicPartition, partition: Partition): Unit = { allPartitions.put(topicPartition, HostedPartition.Online(partition)) + partition } def onlinePartition(topicPartition: TopicPartition): Option[Partition] = { @@ -521,14 +555,14 @@ class ReplicaManager(val config: KafkaConfig, // An iterator over all non offline partitions. This is a weakly consistent iterator; a partition made offline after // the iterator has been constructed could still be returned by this iterator. private def onlinePartitionsIterator: Iterator[Partition] = { - allPartitions.values.asScala.iterator.flatMap { + allPartitions.values.iterator.flatMap { case HostedPartition.Online(partition) => Some(partition) case _ => None } } private def offlinePartitionCount: Int = { - allPartitions.values.asScala.iterator.count(_.getClass == HostedPartition.Offline.getClass) + allPartitions.values.iterator.count(_.getClass == HostedPartition.Offline.getClass) } def getPartitionOrException(topicPartition: TopicPartition): Partition = { @@ -543,27 +577,6 @@ class ReplicaManager(val config: KafkaConfig, } } - def getPartitionOrException(topicIdPartition: TopicIdPartition): Partition = { - getPartitionOrError(topicIdPartition.topicPartition()) match { - case Left(Errors.KAFKA_STORAGE_ERROR) => - throw new KafkaStorageException(s"Partition ${topicIdPartition.topicPartition()} is in an offline log directory") - - case Left(error) => - throw error.exception(s"Error while fetching partition state for ${topicIdPartition.topicPartition()}") - - case Right(partition) => - // Get topic id for an existing partition from disk if topicId is none get it from the metadata cache - val topicId = partition.topicId.getOrElse(metadataCache.getTopicId(topicIdPartition.topic())) - // If topic id is set to zero_uuid fall back to non topic id aware behaviour - val topicIdNotProvided = topicIdPartition.topicId() == Uuid.ZERO_UUID - if (topicIdNotProvided || topicId == topicIdPartition.topicId()) { - partition - } else { - throw new UnknownTopicIdException(s"Partition $topicIdPartition's topic id doesn't match the one on disk $topicId.'") - } - } - } - def getPartitionOrError(topicPartition: TopicPartition): Either[Errors, Partition] = { getPartition(topicPartition) match { case HostedPartition.Online(partition) => @@ -608,50 +621,6 @@ class ReplicaManager(val config: KafkaConfig, def addToActionQueue(action: Runnable): Unit = defaultActionQueue.add(action) - /** - * Append messages to leader replicas of the partition, without waiting on replication. - * - * Noted that all pending delayed check operations are stored in a queue. All callers to ReplicaManager.appendRecordsToLeader() - * are expected to call ActionQueue.tryCompleteActions for all affected partitions, without holding any conflicting - * locks. - * - * @param requiredAcks the required acks -- it is only used to ensure that the append meets the - * required acks. - * @param internalTopicsAllowed boolean indicating whether internal topics can be appended to - * @param origin source of the append request (ie, client, replication, coordinator) - * @param entriesPerPartition the records per topic partition to be appended. - * If topic partition contains Uuid.ZERO_UUID as topicId the method - * will fall back to the old behaviour and rely on topic name. - * @param requestLocal container for the stateful instances scoped to this request -- this must correspond to the - * thread calling this method - * @param actionQueue the action queue to use. ReplicaManager#defaultActionQueue is used by default. - * @param verificationGuards the mapping from topic partition to verification guards if transaction verification is used - */ - def appendRecordsToLeader( - requiredAcks: Short, - internalTopicsAllowed: Boolean, - origin: AppendOrigin, - entriesPerPartition: Map[TopicIdPartition, MemoryRecords], - requestLocal: RequestLocal = RequestLocal.noCaching, - actionQueue: ActionQueue = this.defaultActionQueue, - verificationGuards: Map[TopicPartition, VerificationGuard] = Map.empty - ): Map[TopicIdPartition, LogAppendResult] = { - val startTimeMs = time.milliseconds - val localProduceResultsWithTopicId = appendToLocalLog( - internalTopicsAllowed = internalTopicsAllowed, - origin, - entriesPerPartition, - requiredAcks, - requestLocal, - verificationGuards.toMap - ) - debug("Produce to local log in %d ms".format(time.milliseconds - startTimeMs)) - - addCompletePurgatoryAction(actionQueue, localProduceResultsWithTopicId) - - localProduceResultsWithTopicId - } - /** * Append messages to leader replicas of the partition, and wait for them to be replicated to other replicas; * the callback function will be triggered either when timeout or the required acks are satisfied; @@ -665,47 +634,48 @@ class ReplicaManager(val config: KafkaConfig, * @param requiredAcks number of replicas who must acknowledge the append before sending the response * @param internalTopicsAllowed boolean indicating whether internal topics can be appended to * @param origin source of the append request (ie, client, replication, coordinator) - * @param entriesPerPartition the records per topic partition to be appended. - * If topic partition contains Uuid.ZERO_UUID as topicId the method - * will fall back to the old behaviour and rely on topic name. + * @param entriesPerPartition the records per partition to be appended * @param responseCallback callback for sending the response + * @param delayedProduceLock lock for the delayed actions * @param recordValidationStatsCallback callback for updating stats on record conversions * @param requestLocal container for the stateful instances scoped to this request -- this must correspond to the * thread calling this method + * @param actionQueue the action queue to use. ReplicaManager#defaultActionQueue is used by default. * @param verificationGuards the mapping from topic partition to verification guards if transaction verification is used */ def appendRecords(timeout: Long, requiredAcks: Short, internalTopicsAllowed: Boolean, origin: AppendOrigin, - entriesPerPartition: Map[TopicIdPartition, MemoryRecords], - responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit, - recordValidationStatsCallback: Map[TopicIdPartition, RecordValidationStats] => Unit = _ => (), + entriesPerPartition: Map[TopicPartition, MemoryRecords], + responseCallback: Map[TopicPartition, PartitionResponse] => Unit, + delayedProduceLock: Option[Lock] = None, + recordValidationStatsCallback: Map[TopicPartition, RecordValidationStats] => Unit = _ => (), requestLocal: RequestLocal = RequestLocal.noCaching, + actionQueue: ActionQueue = this.defaultActionQueue, verificationGuards: Map[TopicPartition, VerificationGuard] = Map.empty): Unit = { if (!isValidRequiredAcks(requiredAcks)) { sendInvalidRequiredAcksResponse(entriesPerPartition, responseCallback) return } - val localProduceResults = appendRecordsToLeader( - requiredAcks, - internalTopicsAllowed, - origin, - entriesPerPartition, - requestLocal, - defaultActionQueue, - verificationGuards - ) + val sTime = time.milliseconds + val localProduceResultsWithTopicId = appendToLocalLog(internalTopicsAllowed = internalTopicsAllowed, + origin, entriesPerPartition, requiredAcks, requestLocal, verificationGuards.toMap) + debug("Produce to local log in %d ms".format(time.milliseconds - sTime)) + val localProduceResults : Map[TopicPartition, LogAppendResult] = localProduceResultsWithTopicId.map { + case(k, v) => (k.topicPartition, v)} val produceStatus = buildProducePartitionStatus(localProduceResults) + addCompletePurgatoryAction(actionQueue, localProduceResultsWithTopicId) recordValidationStatsCallback(localProduceResults.map { case (k, v) => k -> v.info.recordValidationStats }) maybeAddDelayedProduce( requiredAcks, + delayedProduceLock, timeout, entriesPerPartition, localProduceResults, @@ -726,6 +696,7 @@ class ReplicaManager(val config: KafkaConfig, * @param recordValidationStatsCallback callback for updating stats on record conversions * @param requestLocal container for the stateful instances scoped to this request -- this must correspond to the * thread calling this method + * @param actionQueue the action queue to use. ReplicaManager#defaultActionQueue is used by default. * @param transactionSupportedOperation determines the supported Operation based on the client's Request api version * * The responseCallback is wrapped so that it is scheduled on a request handler thread. There, it should be called with @@ -735,20 +706,20 @@ class ReplicaManager(val config: KafkaConfig, requiredAcks: Short, internalTopicsAllowed: Boolean, transactionalId: String, - entriesPerPartition: Map[TopicIdPartition, MemoryRecords], - responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit, - recordValidationStatsCallback: Map[TopicIdPartition, RecordValidationStats] => Unit = _ => (), + entriesPerPartition: Map[TopicPartition, MemoryRecords], + responseCallback: Map[TopicPartition, PartitionResponse] => Unit, + recordValidationStatsCallback: Map[TopicPartition, RecordValidationStats] => Unit = _ => (), requestLocal: RequestLocal = RequestLocal.noCaching, + actionQueue: ActionQueue = this.defaultActionQueue, transactionSupportedOperation: TransactionSupportedOperation): Unit = { val transactionalProducerInfo = mutable.HashSet[(Long, Short)]() val topicPartitionBatchInfo = mutable.Map[TopicPartition, Int]() - val topicIds = entriesPerPartition.keys.map(tp => tp.topic() -> tp.topicId()).toMap - entriesPerPartition.foreachEntry { (topicIdPartition, records) => + entriesPerPartition.foreachEntry { (topicPartition, records) => // Produce requests (only requests that require verification) should only have one batch per partition in "batches" but check all just to be safe. val transactionalBatches = records.batches.asScala.filter(batch => batch.hasProducerId && batch.isTransactional) transactionalBatches.foreach(batch => transactionalProducerInfo.add(batch.producerId, batch.producerEpoch)) - if (transactionalBatches.nonEmpty) topicPartitionBatchInfo.put(topicIdPartition.topicPartition(), records.firstBatch.baseSequence) + if (transactionalBatches.nonEmpty) topicPartitionBatchInfo.put(topicPartition, records.firstBatch.baseSequence) } if (transactionalProducerInfo.size > 1) { throw new InvalidPidMappingException("Transactional records contained more than one producer ID") @@ -757,7 +728,7 @@ class ReplicaManager(val config: KafkaConfig, def postVerificationCallback(newRequestLocal: RequestLocal, results: (Map[TopicPartition, Errors], Map[TopicPartition, VerificationGuard])): Unit = { val (preAppendErrors, verificationGuards) = results - val errorResults: Map[TopicIdPartition, LogAppendResult] = preAppendErrors.map { + val errorResults = preAppendErrors.map { case (topicPartition, error) => // translate transaction coordinator errors to known producer response errors val customException = @@ -783,21 +754,17 @@ class ReplicaManager(val config: KafkaConfig, } case _ => None } - new TopicIdPartition(topicIds.getOrElse(topicPartition.topic(), Uuid.ZERO_UUID), topicPartition) -> LogAppendResult( + topicPartition -> LogAppendResult( LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, Some(customException.getOrElse(error.exception)), hasCustomErrorMessage = customException.isDefined ) } - // In non-transaction paths, errorResults is typically empty, so we can - // directly use entriesPerPartition instead of creating a new filtered collection - val entriesWithoutErrorsPerPartition = - if (errorResults.nonEmpty) entriesPerPartition.filter { case (key, _) => !errorResults.contains(key) } - else entriesPerPartition + val entriesWithoutErrorsPerPartition = entriesPerPartition.filter { case (key, _) => !errorResults.contains(key) } val preAppendPartitionResponses = buildProducePartitionStatus(errorResults).map { case (k, status) => k -> status.responseStatus } - def newResponseCallback(responses: Map[TopicIdPartition, PartitionResponse]): Unit = { + def newResponseCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { responseCallback(preAppendPartitionResponses ++ responses) } @@ -810,6 +777,7 @@ class ReplicaManager(val config: KafkaConfig, responseCallback = newResponseCallback, recordValidationStatsCallback = recordValidationStatsCallback, requestLocal = newRequestLocal, + actionQueue = actionQueue, verificationGuards = verificationGuards ) } @@ -830,8 +798,8 @@ class ReplicaManager(val config: KafkaConfig, requestLocal ) - val retryTimeoutMs = Math.min(addPartitionsToTxnConfig.addPartitionsToTxnRetryBackoffMaxMs(), config.requestTimeoutMs) - val addPartitionsRetryBackoffMs = addPartitionsToTxnConfig.addPartitionsToTxnRetryBackoffMs() + val retryTimeoutMs = Math.min(config.addPartitionsToTxnConfig.addPartitionsToTxnRetryBackoffMaxMs(), config.requestTimeoutMs) + val addPartitionsRetryBackoffMs = config.addPartitionsToTxnConfig.addPartitionsToTxnRetryBackoffMs val startVerificationTimeMs = time.milliseconds def maybeRetryOnConcurrentTransactions(results: (Map[TopicPartition, Errors], Map[TopicPartition, VerificationGuard])): Unit = { if (time.milliseconds() - startVerificationTimeMs >= retryTimeoutMs) { @@ -870,14 +838,15 @@ class ReplicaManager(val config: KafkaConfig, } private def buildProducePartitionStatus( - results: Map[TopicIdPartition, LogAppendResult] - ): Map[TopicIdPartition, ProducePartitionStatus] = { - results.map { case (topicIdPartition, result) => - topicIdPartition -> ProducePartitionStatus( + results: Map[TopicPartition, LogAppendResult] + ): Map[TopicPartition, ProducePartitionStatus] = { + results.map { case (topicPartition, result) => + topicPartition -> ProducePartitionStatus( result.info.lastOffset + 1, // required offset new PartitionResponse( result.error, result.info.firstOffset, + result.info.lastOffset, result.info.logAppendTime, result.info.logStartOffset, result.info.recordErrors, @@ -889,19 +858,19 @@ class ReplicaManager(val config: KafkaConfig, private def addCompletePurgatoryAction( actionQueue: ActionQueue, - appendResults: Map[TopicIdPartition, LogAppendResult] + appendResults: Map[TopicOptionalIdPartition, LogAppendResult] ): Unit = { actionQueue.add { - () => appendResults.foreach { case (topicIdPartition, result) => - val requestKey = new TopicPartitionOperationKey(topicIdPartition.topicPartition) + () => appendResults.foreach { case (topicOptionalIdPartition, result) => + val requestKey = new TopicPartitionOperationKey(topicOptionalIdPartition.topicPartition) result.info.leaderHwChange match { case LeaderHwChange.INCREASED => // some delayed operations may be unblocked after HW changed delayedProducePurgatory.checkAndComplete(requestKey) delayedFetchPurgatory.checkAndComplete(requestKey) delayedDeleteRecordsPurgatory.checkAndComplete(requestKey) - if (topicIdPartition.topicId != Uuid.ZERO_UUID) delayedShareFetchPurgatory.checkAndComplete(new DelayedShareFetchPartitionKey( - topicIdPartition.topicId, topicIdPartition.partition)) + if (topicOptionalIdPartition.topicId.isPresent) delayedShareFetchPurgatory.checkAndComplete(new DelayedShareFetchPartitionKey( + topicOptionalIdPartition.topicId.get, topicOptionalIdPartition.partition)) case LeaderHwChange.SAME => // probably unblock some follower fetch requests since log end offset has been updated delayedFetchPurgatory.checkAndComplete(requestKey) @@ -914,16 +883,17 @@ class ReplicaManager(val config: KafkaConfig, private def maybeAddDelayedProduce( requiredAcks: Short, + delayedProduceLock: Option[Lock], timeoutMs: Long, - entriesPerPartition: Map[TopicIdPartition, MemoryRecords], - initialAppendResults: Map[TopicIdPartition, LogAppendResult], - initialProduceStatus: Map[TopicIdPartition, ProducePartitionStatus], - responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit, + entriesPerPartition: Map[TopicPartition, MemoryRecords], + initialAppendResults: Map[TopicPartition, LogAppendResult], + initialProduceStatus: Map[TopicPartition, ProducePartitionStatus], + responseCallback: Map[TopicPartition, PartitionResponse] => Unit, ): Unit = { if (delayedProduceRequestRequired(requiredAcks, entriesPerPartition, initialAppendResults)) { // create delayed produce operation val produceMetadata = ProduceMetadata(requiredAcks, initialProduceStatus) - val delayedProduce = new DelayedProduce(timeoutMs, produceMetadata, this, responseCallback) + val delayedProduce = new DelayedProduce(timeoutMs, produceMetadata, this, responseCallback, delayedProduceLock) // create a list of (topic, partition) pairs to use as keys for this delayed produce operation val producerRequestKeys = entriesPerPartition.keys.map(new TopicPartitionOperationKey(_)).toList @@ -939,13 +909,12 @@ class ReplicaManager(val config: KafkaConfig, } } - private def sendInvalidRequiredAcksResponse( - entries: Map[TopicIdPartition, MemoryRecords], - responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit): Unit = { + private def sendInvalidRequiredAcksResponse(entries: Map[TopicPartition, MemoryRecords], + responseCallback: Map[TopicPartition, PartitionResponse] => Unit): Unit = { // If required.acks is outside accepted range, something is wrong with the client // Just return an error and don't handle the request at all - val responseStatus = entries.map { case (topicIdPartition, _) => - topicIdPartition -> new PartitionResponse( + val responseStatus = entries.map { case (topicPartition, _) => + topicPartition -> new PartitionResponse( Errors.INVALID_REQUIRED_ACKS, LogAppendInfo.UNKNOWN_LOG_APPEND_INFO.firstOffset, RecordBatch.NO_TIMESTAMP, @@ -1026,13 +995,10 @@ class ReplicaManager(val config: KafkaConfig, callback: ((Map[TopicPartition, Errors], Map[TopicPartition, VerificationGuard])) => Unit, transactionSupportedOperation: TransactionSupportedOperation ): Unit = { - def transactionPartitionVerificationEnable = { - new TransactionLogConfig(config).transactionPartitionVerificationEnable - } // Skip verification if the request is not transactional or transaction verification is disabled. - if (transactionalId == null + if (transactionalId == null || + (!config.transactionLogConfig.transactionPartitionVerificationEnable && !transactionSupportedOperation.supportsEpochBump) || addPartitionsToTxnManager.isEmpty - || (!transactionSupportedOperation.supportsEpochBump && !transactionPartitionVerificationEnable) ) { callback((Map.empty[TopicPartition, Errors], Map.empty[TopicPartition, VerificationGuard])) return @@ -1063,18 +1029,18 @@ class ReplicaManager(val config: KafkaConfig, } def invokeCallback( - verificationErrors: java.util.Map[TopicPartition, Errors] + verificationErrors: Map[TopicPartition, Errors] ): Unit = { - callback((errors ++ verificationErrors.asScala, verificationGuards.toMap)) + callback((errors ++ verificationErrors, verificationGuards.toMap)) } addPartitionsToTxnManager.foreach(_.addOrVerifyTransaction( - transactionalId, - producerId, - producerEpoch, - verificationGuards.keys.toSeq.asJava, - invokeCallback, - transactionSupportedOperation + transactionalId = transactionalId, + producerId = producerId, + producerEpoch = producerEpoch, + topicPartitions = verificationGuards.keys.toSeq, + callback = invokeCallback, + transactionSupportedOperation = transactionSupportedOperation )) } @@ -1159,15 +1125,7 @@ class ReplicaManager(val config: KafkaConfig, // Stop current replica movement if the destinationDir is different from the existing destination log directory if (partition.futureReplicaDirChanged(destinationDir)) { replicaAlterLogDirsManager.removeFetcherForPartitions(Set(topicPartition)) - // There's a chance that the future replica can be promoted between the check for futureReplicaDirChanged - // and call to removeFetcherForPartitions. We want to avoid resuming cleaning again in that case to avoid - // an IllegalStateException. The presence of a future log after the call to removeFetcherForPartitions - // implies that it has not been promoted as both synchronize on partitionMapLock. - val futureReplicaPromoted = partition.futureLog.isEmpty partition.removeFutureLocalReplica() - if (!futureReplicaPromoted) { - logManager.resumeCleaning(topicPartition) - } } case HostedPartition.Offline(_) => throw new KafkaStorageException(s"Partition $topicPartition is offline") @@ -1196,7 +1154,7 @@ class ReplicaManager(val config: KafkaConfig, val futureLog = futureLocalLogOrException(topicPartition) logManager.abortAndPauseCleaning(topicPartition) - val initialFetchState = InitialFetchState(topicId.toScala, new BrokerEndPoint(config.brokerId, "localhost", -1), + val initialFetchState = InitialFetchState(topicId, new BrokerEndPoint(config.brokerId, "localhost", -1), partition.getLeaderEpoch, futureLog.highWatermark) replicaAlterLogDirsManager.addFetcherForPartitions(Map(topicPartition -> initialFetchState)) } @@ -1229,10 +1187,10 @@ class ReplicaManager(val config: KafkaConfig, * 2) size and lag of current and future logs for each partition in the given log directory. Only logs of the queried partitions * are included. There may be future logs (which will replace the current logs of the partition in the future) on the broker after KIP-113 is implemented. */ - def describeLogDirs(partitions: Set[TopicPartition]): util.List[DescribeLogDirsResponseData.DescribeLogDirsResult] = { + def describeLogDirs(partitions: Set[TopicPartition]): List[DescribeLogDirsResponseData.DescribeLogDirsResult] = { val logsByDir = logManager.allLogs.groupBy(log => log.parentDir) - config.logDirs.stream().distinct().map(logDir => { + config.logDirs.toSet.map { logDir: String => val file = Paths.get(logDir) val absolutePath = file.toAbsolutePath.toString try { @@ -1261,11 +1219,11 @@ class ReplicaManager(val config: KafkaConfig, } val describeLogDirsResult = new DescribeLogDirsResponseData.DescribeLogDirsResult() - .setLogDir(absolutePath) - .setTopics(topicInfos) + .setLogDir(absolutePath).setTopics(topicInfos) .setErrorCode(Errors.NONE.code) - .setTotalBytes(totalBytes) - .setUsableBytes(usableBytes) + .setTotalBytes(totalBytes).setUsableBytes(usableBytes) + if (!topicInfos.isEmpty) + describeLogDirsResult.setTopics(topicInfos) describeLogDirsResult } catch { @@ -1280,7 +1238,7 @@ class ReplicaManager(val config: KafkaConfig, .setLogDir(absolutePath) .setErrorCode(Errors.forException(t).code) } - }).toList() + }.toList } // See: https://bugs.openjdk.java.net/browse/JDK-8162520 @@ -1313,7 +1271,7 @@ class ReplicaManager(val config: KafkaConfig, val deleteRecordsStatus = localDeleteRecordsResults.map { case (topicPartition, result) => topicPartition -> - new DeleteRecordsPartitionStatus( + DeleteRecordsPartitionStatus( result.requestedOffset, // requested offset new DeleteRecordsPartitionResult() .setLowWatermark(result.lowWatermark) @@ -1322,31 +1280,8 @@ class ReplicaManager(val config: KafkaConfig, } if (delayedDeleteRecordsRequired(localDeleteRecordsResults)) { - def onAcks(topicPartition: TopicPartition, status: DeleteRecordsPartitionStatus): Unit = { - val (lowWatermarkReached, error, lw) = getPartition(topicPartition) match { - case HostedPartition.Online(partition) => - partition.leaderLogIfLocal match { - case Some(_) => - val leaderLW = partition.lowWatermarkIfLeader - (leaderLW >= status.requiredOffset, Errors.NONE, leaderLW) - case None => - (false, Errors.NOT_LEADER_OR_FOLLOWER, DeleteRecordsResponse.INVALID_LOW_WATERMARK) - } - - case HostedPartition.Offline(_) => - (false, Errors.KAFKA_STORAGE_ERROR, DeleteRecordsResponse.INVALID_LOW_WATERMARK) - - case HostedPartition.None => - (false, Errors.UNKNOWN_TOPIC_OR_PARTITION, DeleteRecordsResponse.INVALID_LOW_WATERMARK) - } - if (error != Errors.NONE || lowWatermarkReached) { - status.setAcksPending(false) - status.responseStatus.setErrorCode(error.code) - status.responseStatus.setLowWatermark(lw) - } - } // create delayed delete records operation - val delayedDeleteRecords = new DelayedDeleteRecords(timeout, deleteRecordsStatus.asJava, onAcks, response => responseCallback(response.asScala)) + val delayedDeleteRecords = new DelayedDeleteRecords(timeout, deleteRecordsStatus, this, responseCallback) // create a list of (topic, partition) pairs to use as keys for this delayed delete records operation val deleteRecordsRequestKeys = offsetPerPartition.keys.map(new TopicPartitionOperationKey(_)).toList @@ -1368,8 +1303,8 @@ class ReplicaManager(val config: KafkaConfig, // 2. there is data to append // 3. at least one partition append was successful (fewer errors than partitions) private def delayedProduceRequestRequired(requiredAcks: Short, - entriesPerPartition: Map[TopicIdPartition, MemoryRecords], - localProduceResults: Map[TopicIdPartition, LogAppendResult]): Boolean = { + entriesPerPartition: Map[TopicPartition, MemoryRecords], + localProduceResults: Map[TopicPartition, LogAppendResult]): Boolean = { requiredAcks == -1 && entriesPerPartition.nonEmpty && localProduceResults.values.count(_.exception.isDefined) < entriesPerPartition.size @@ -1384,21 +1319,21 @@ class ReplicaManager(val config: KafkaConfig, */ private def appendToLocalLog(internalTopicsAllowed: Boolean, origin: AppendOrigin, - entriesPerPartition: Map[TopicIdPartition, MemoryRecords], + entriesPerPartition: Map[TopicPartition, MemoryRecords], requiredAcks: Short, requestLocal: RequestLocal, verificationGuards: Map[TopicPartition, VerificationGuard]): - Map[TopicIdPartition, LogAppendResult] = { + Map[TopicOptionalIdPartition, LogAppendResult] = { val traceEnabled = isTraceEnabled - def processFailedRecord(topicIdPartition: TopicIdPartition, t: Throwable) = { - val logStartOffset = onlinePartition(topicIdPartition.topicPartition()).map(_.logStartOffset).getOrElse(-1L) - brokerTopicStats.topicStats(topicIdPartition.topic).failedProduceRequestRate.mark() + def processFailedRecord(topicPartition: TopicPartition, t: Throwable) = { + val logStartOffset = onlinePartition(topicPartition).map(_.logStartOffset).getOrElse(-1L) + brokerTopicStats.topicStats(topicPartition.topic).failedProduceRequestRate.mark() brokerTopicStats.allTopicsStats.failedProduceRequestRate.mark() t match { case _: InvalidProducerEpochException => - info(s"Error processing append operation on partition $topicIdPartition", t) + info(s"Error processing append operation on partition $topicPartition", t) case _ => - error(s"Error processing append operation on partition $topicIdPartition", t) + error(s"Error processing append operation on partition $topicPartition", t) } logStartOffset @@ -1407,35 +1342,37 @@ class ReplicaManager(val config: KafkaConfig, if (traceEnabled) trace(s"Append [$entriesPerPartition] to local log") - entriesPerPartition.map { case (topicIdPartition, records) => - brokerTopicStats.topicStats(topicIdPartition.topic).totalProduceRequestRate.mark() + entriesPerPartition.map { case (topicPartition, records) => + brokerTopicStats.topicStats(topicPartition.topic).totalProduceRequestRate.mark() brokerTopicStats.allTopicsStats.totalProduceRequestRate.mark() // reject appending to internal topics if it is not allowed - if (Topic.isInternal(topicIdPartition.topic) && !internalTopicsAllowed) { - (topicIdPartition, LogAppendResult( + if (Topic.isInternal(topicPartition.topic) && !internalTopicsAllowed) { + (new TopicOptionalIdPartition(Optional.empty(), topicPartition), LogAppendResult( LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, - Some(new InvalidTopicException(s"Cannot append to internal topic ${topicIdPartition.topic}")), + Some(new InvalidTopicException(s"Cannot append to internal topic ${topicPartition.topic}")), hasCustomErrorMessage = false)) } else { try { - val partition = getPartitionOrException(topicIdPartition) + val partition = getPartitionOrException(topicPartition) val info = partition.appendRecordsToLeader(records, origin, requiredAcks, requestLocal, - verificationGuards.getOrElse(topicIdPartition.topicPartition(), VerificationGuard.SENTINEL)) + verificationGuards.getOrElse(topicPartition, VerificationGuard.SENTINEL)) val numAppendedMessages = info.numMessages // update stats for successfully appended bytes and messages as bytesInRate and messageInRate - brokerTopicStats.topicStats(topicIdPartition.topic).bytesInRate.mark(records.sizeInBytes) + brokerTopicStats.topicStats(topicPartition.topic).bytesInRate.mark(records.sizeInBytes) brokerTopicStats.allTopicsStats.bytesInRate.mark(records.sizeInBytes) - brokerTopicStats.topicStats(topicIdPartition.topic).messagesInRate.mark(numAppendedMessages) + brokerTopicStats.topicStats(topicPartition.topic).messagesInRate.mark(numAppendedMessages) brokerTopicStats.allTopicsStats.messagesInRate.mark(numAppendedMessages) if (traceEnabled) - trace(s"${records.sizeInBytes} written to log $topicIdPartition beginning at offset " + + trace(s"${records.sizeInBytes} written to log $topicPartition beginning at offset " + s"${info.firstOffset} and ending at offset ${info.lastOffset}") - (topicIdPartition, LogAppendResult(info, exception = None, hasCustomErrorMessage = false)) + var topicId: Optional[Uuid] = Optional.empty() + if (partition.topicId.isDefined) topicId = Optional.of(partition.topicId.get) + (new TopicOptionalIdPartition(topicId, topicPartition), LogAppendResult(info, exception = None, hasCustomErrorMessage = false)) } catch { // NOTE: Failed produce requests metric is not incremented for known exceptions // it is supposed to indicate un-expected failures of a broker in handling a produce request @@ -1444,17 +1381,16 @@ class ReplicaManager(val config: KafkaConfig, _: RecordTooLargeException | _: RecordBatchTooLargeException | _: CorruptRecordException | - _: KafkaStorageException | - _: UnknownTopicIdException) => - (topicIdPartition, LogAppendResult(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, Some(e), hasCustomErrorMessage = false)) + _: KafkaStorageException) => + (new TopicOptionalIdPartition(Optional.empty(), topicPartition), LogAppendResult(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, Some(e), hasCustomErrorMessage = false)) case rve: RecordValidationException => - val logStartOffset = processFailedRecord(topicIdPartition, rve.invalidException) + val logStartOffset = processFailedRecord(topicPartition, rve.invalidException) val recordErrors = rve.recordErrors - (topicIdPartition, LogAppendResult(LogAppendInfo.unknownLogAppendInfoWithAdditionalInfo(logStartOffset, recordErrors), + (new TopicOptionalIdPartition(Optional.empty(), topicPartition), LogAppendResult(LogAppendInfo.unknownLogAppendInfoWithAdditionalInfo(logStartOffset, recordErrors), Some(rve.invalidException), hasCustomErrorMessage = true)) case t: Throwable => - val logStartOffset = processFailedRecord(topicIdPartition, t) - (topicIdPartition, LogAppendResult(LogAppendInfo.unknownLogAppendInfoWithLogStartOffset(logStartOffset), + val logStartOffset = processFailedRecord(topicPartition, t) + (new TopicOptionalIdPartition(Optional.empty(), topicPartition), LogAppendResult(LogAppendInfo.unknownLogAppendInfoWithLogStartOffset(logStartOffset), Some(t), hasCustomErrorMessage = false)) } } @@ -1469,7 +1405,7 @@ class ReplicaManager(val config: KafkaConfig, correlationId: Int, version: Short, buildErrorResponse: (Errors, ListOffsetsPartition) => ListOffsetsPartitionResponse, - responseCallback: Consumer[util.Collection[ListOffsetsTopicResponse]], + responseCallback: List[ListOffsetsTopicResponse] => Unit, timeoutMs: Int = 0): Unit = { val statusByPartition = mutable.Map[TopicPartition, ListOffsetsPartitionStatus]() topics.foreach { topic => @@ -1478,11 +1414,9 @@ class ReplicaManager(val config: KafkaConfig, if (duplicatePartitions.contains(topicPartition)) { debug(s"OffsetRequest with correlation id $correlationId from client $clientId on partition $topicPartition " + s"failed because the partition is duplicated in the request.") - statusByPartition += topicPartition -> - ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.INVALID_REQUEST, partition))).build() + statusByPartition += topicPartition -> ListOffsetsPartitionStatus(Some(buildErrorResponse(Errors.INVALID_REQUEST, partition))) } else if (isListOffsetsTimestampUnsupported(partition.timestamp(), version)) { - statusByPartition += topicPartition -> - ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.UNSUPPORTED_VERSION, partition))).build() + statusByPartition += topicPartition -> ListOffsetsPartitionStatus(Some(buildErrorResponse(Errors.UNSUPPORTED_VERSION, partition))) } else { try { val fetchOnlyFromLeader = replicaId != ListOffsetsRequest.DEBUGGING_REPLICA_ID @@ -1515,19 +1449,15 @@ class ReplicaManager(val config: KafkaConfig, if (timestampAndOffsetOpt.leaderEpoch.isPresent && version >= 4) partitionResponse.setLeaderEpoch(timestampAndOffsetOpt.leaderEpoch.get) } - ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(partitionResponse)).build() + ListOffsetsPartitionStatus(Some(partitionResponse)) } else if (resultHolder.timestampAndOffsetOpt.isEmpty && resultHolder.futureHolderOpt.isEmpty) { // This is an empty offset response scenario resultHolder.maybeOffsetsError.map(e => throw e) - ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.NONE, partition))).build() + ListOffsetsPartitionStatus(Some(buildErrorResponse(Errors.NONE, partition))) } else if (resultHolder.timestampAndOffsetOpt.isEmpty && resultHolder.futureHolderOpt.isPresent) { // This case is for topic enabled with remote storage and we want to search the timestamp in // remote storage using async fashion. - ListOffsetsPartitionStatus.builder() - .futureHolderOpt(resultHolder.futureHolderOpt()) - .lastFetchableOffset(resultHolder.lastFetchableOffset) - .maybeOffsetsError(resultHolder.maybeOffsetsError) - .build() + ListOffsetsPartitionStatus(None, resultHolder.futureHolderOpt(), resultHolder.lastFetchableOffset.toScala.map(_.longValue()), resultHolder.maybeOffsetsError.toScala) } else { throw new IllegalStateException(s"Unexpected result holder state $resultHolder") } @@ -1544,22 +1474,19 @@ class ReplicaManager(val config: KafkaConfig, _ : UnsupportedForMessageFormatException) => debug(s"Offset request with correlation id $correlationId from client $clientId on " + s"partition $topicPartition failed due to ${e.getMessage}") - statusByPartition += topicPartition -> - ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.forException(e), partition))).build() + statusByPartition += topicPartition -> ListOffsetsPartitionStatus(Some(buildErrorResponse(Errors.forException(e), partition))) + // Only V5 and newer ListOffset calls should get OFFSET_NOT_AVAILABLE case e: OffsetNotAvailableException => if (version >= 5) { - statusByPartition += topicPartition -> - ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.forException(e), partition))).build() + statusByPartition += topicPartition -> ListOffsetsPartitionStatus(Some(buildErrorResponse(Errors.forException(e), partition))) } else { - statusByPartition += topicPartition -> - ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.LEADER_NOT_AVAILABLE, partition))).build() + statusByPartition += topicPartition -> ListOffsetsPartitionStatus(Some(buildErrorResponse(Errors.LEADER_NOT_AVAILABLE, partition))) } case e: Throwable => error("Error while responding to offset request", e) - statusByPartition += topicPartition -> - ListOffsetsPartitionStatus.builder().responseOpt(Optional.of(buildErrorResponse(Errors.forException(e), partition))).build() + statusByPartition += topicPartition -> ListOffsetsPartitionStatus(Some(buildErrorResponse(Errors.forException(e), partition))) } } } @@ -1568,7 +1495,7 @@ class ReplicaManager(val config: KafkaConfig, if (delayedRemoteListOffsetsRequired(statusByPartition)) { val delayMs: Long = if (timeoutMs > 0) timeoutMs else config.remoteLogManagerConfig.remoteListOffsetsRequestTimeoutMs() // create delayed remote list offsets operation - val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version, statusByPartition.asJava, tp => getPartitionOrException(tp), responseCallback) + val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version, statusByPartition, this, responseCallback) // create a list of (topic, partition) pairs to use as keys for this delayed remote list offsets operation val listOffsetsRequestKeys = statusByPartition.keys.map(new TopicPartitionOperationKey(_)).toList // try to complete the request immediately, otherwise put it into the purgatory @@ -1577,9 +1504,9 @@ class ReplicaManager(val config: KafkaConfig, // we can respond immediately val responseTopics = statusByPartition.groupBy(e => e._1.topic()).map { case (topic, status) => - new ListOffsetsTopicResponse().setName(topic).setPartitions(status.values.flatMap(s => Some(s.responseOpt.get())).toList.asJava) + new ListOffsetsTopicResponse().setName(topic).setPartitions(status.values.flatMap(s => s.responseOpt).toList.asJava) }.toList - responseCallback.accept(responseTopics.asJava) + responseCallback(responseTopics) } } @@ -1597,18 +1524,15 @@ class ReplicaManager(val config: KafkaConfig, } /** - * Initiates an asynchronous remote storage fetch operation for the given remote fetch information. - * - * This method schedules a remote fetch task with the remote log manager and sets up the necessary - * completion handling for the operation. The remote fetch result will be used to populate the - * delayed remote fetch purgatory when completed. - * - * @param remoteFetchInfo The remote storage fetch information - * - * @return A tuple containing the remote fetch task and the remote fetch result + * Returns [[LogReadResult]] with error if a task for RemoteStorageFetchInfo could not be scheduled successfully + * else returns [[None]]. */ - private def processRemoteFetch(remoteFetchInfo: RemoteStorageFetchInfo): (Future[Void], CompletableFuture[RemoteLogReadResult]) = { - val key = new TopicPartitionOperationKey(remoteFetchInfo.topicIdPartition) + private def processRemoteFetch(remoteFetchInfo: RemoteStorageFetchInfo, + params: FetchParams, + responseCallback: Seq[(TopicIdPartition, FetchPartitionData)] => Unit, + logReadResults: Seq[(TopicIdPartition, LogReadResult)], + fetchPartitionStatus: Seq[(TopicIdPartition, FetchPartitionStatus)]): Option[LogReadResult] = { + val key = new TopicPartitionOperationKey(remoteFetchInfo.topicPartition.topic(), remoteFetchInfo.topicPartition.partition()) val remoteFetchResult = new CompletableFuture[RemoteLogReadResult] var remoteFetchTask: Future[Void] = null try { @@ -1618,39 +1542,31 @@ class ReplicaManager(val config: KafkaConfig, }) } catch { case e: RejectedExecutionException => - warn(s"Unable to fetch data from remote storage for remoteFetchInfo: $remoteFetchInfo", e) - // Store the error in RemoteLogReadResult if any in scheduling the remote fetch task. - // It will be sent back to the client in DelayedRemoteFetch along with other successful remote fetch results. - remoteFetchResult.complete(new RemoteLogReadResult(Optional.empty, Optional.of(e))) + // Return the error if any in scheduling the remote fetch task + warn("Unable to fetch data from remote storage", e) + return Some(createLogReadResult(e)) } - (remoteFetchTask, remoteFetchResult) - } - - /** - * Process all remote fetches by creating async read tasks and handling them in DelayedRemoteFetch collectively. - */ - private def processRemoteFetches(remoteFetchInfos: util.HashMap[TopicIdPartition, RemoteStorageFetchInfo], - params: FetchParams, - responseCallback: Seq[(TopicIdPartition, FetchPartitionData)] => Unit, - logReadResults: Seq[(TopicIdPartition, LogReadResult)], - remoteFetchPartitionStatus: Seq[(TopicIdPartition, FetchPartitionStatus)]): Unit = { - val remoteFetchTasks = new util.HashMap[TopicIdPartition, Future[Void]] - val remoteFetchResults = new util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]] - - remoteFetchInfos.forEach { (topicIdPartition, remoteFetchInfo) => - val (task, result) = processRemoteFetch(remoteFetchInfo) - remoteFetchTasks.put(topicIdPartition, task) - remoteFetchResults.put(topicIdPartition, result) - } - val remoteFetchMaxWaitMs = config.remoteLogManagerConfig.remoteFetchMaxWaitMs().toLong - val remoteFetch = new DelayedRemoteFetch(remoteFetchTasks, remoteFetchResults, remoteFetchInfos, remoteFetchMaxWaitMs, - remoteFetchPartitionStatus, params, logReadResults, this, responseCallback) + val remoteFetch = new DelayedRemoteFetch(remoteFetchTask, remoteFetchResult, remoteFetchInfo, remoteFetchMaxWaitMs, + fetchPartitionStatus, params, logReadResults, this, responseCallback) + delayedRemoteFetchPurgatory.tryCompleteElseWatch(remoteFetch, util.Collections.singletonList(key)) + None + } + + private def buildPartitionToFetchPartitionData(logReadResults: Seq[(TopicIdPartition, LogReadResult)], + remoteFetchTopicPartition: TopicPartition, + error: LogReadResult): Seq[(TopicIdPartition, FetchPartitionData)] = { + logReadResults.map { case (tp, result) => + val fetchPartitionData = { + if (tp.topicPartition().equals(remoteFetchTopicPartition)) + error + else + result + }.toFetchPartitionData(false) - // create a list of (topic, partition) pairs to use as keys for this delayed fetch operation - val delayedFetchKeys = remoteFetchPartitionStatus.map { case (tp, _) => new TopicPartitionOperationKey(tp) }.toList - delayedRemoteFetchPurgatory.tryCompleteElseWatch(remoteFetch, delayedFetchKeys.asJava) + tp -> fetchPartitionData + } } /** @@ -1668,8 +1584,8 @@ class ReplicaManager(val config: KafkaConfig, var bytesReadable: Long = 0 var errorReadingData = false - // topic-partitions that have to be read from remote storage - val remoteFetchInfos = new util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]() + // The 1st topic-partition that has to be read from remote storage + var remoteFetchInfo: Optional[RemoteStorageFetchInfo] = Optional.empty() var hasDivergingEpoch = false var hasPreferredReadReplica = false @@ -1680,12 +1596,12 @@ class ReplicaManager(val config: KafkaConfig, brokerTopicStats.allTopicsStats.totalFetchRequestRate.mark() if (logReadResult.error != Errors.NONE) errorReadingData = true - if (logReadResult.info.delayedRemoteStorageFetch.isPresent) { - remoteFetchInfos.put(topicIdPartition, logReadResult.info.delayedRemoteStorageFetch.get()) + if (!remoteFetchInfo.isPresent && logReadResult.info.delayedRemoteStorageFetch.isPresent) { + remoteFetchInfo = logReadResult.info.delayedRemoteStorageFetch } - if (logReadResult.divergingEpoch.isPresent) + if (logReadResult.divergingEpoch.nonEmpty) hasDivergingEpoch = true - if (logReadResult.preferredReadReplica.isPresent) + if (logReadResult.preferredReadReplica.nonEmpty) hasPreferredReadReplica = true bytesReadable = bytesReadable + logReadResult.info.records.sizeInBytes logReadResultMap.put(topicIdPartition, logReadResult) @@ -1698,7 +1614,7 @@ class ReplicaManager(val config: KafkaConfig, // 4) some error happens while reading data // 5) we found a diverging epoch // 6) has a preferred read replica - if (remoteFetchInfos.isEmpty && (params.maxWaitMs <= 0 || fetchInfos.isEmpty || bytesReadable >= params.minBytes || errorReadingData || + if (!remoteFetchInfo.isPresent && (params.maxWaitMs <= 0 || fetchInfos.isEmpty || bytesReadable >= params.minBytes || errorReadingData || hasDivergingEpoch || hasPreferredReadReplica)) { val fetchPartitionData = logReadResults.map { case (tp, result) => val isReassignmentFetch = params.isFromFollower && isAddingReplica(tp.topicPartition, params.replicaId) @@ -1715,8 +1631,15 @@ class ReplicaManager(val config: KafkaConfig, }) } - if (!remoteFetchInfos.isEmpty) { - processRemoteFetches(remoteFetchInfos, params, responseCallback, logReadResults, fetchPartitionStatus.toSeq) + if (remoteFetchInfo.isPresent) { + val maybeLogReadResultWithError = processRemoteFetch(remoteFetchInfo.get(), params, responseCallback, logReadResults, fetchPartitionStatus) + if (maybeLogReadResultWithError.isDefined) { + // If there is an error in scheduling the remote fetch task, return what we currently have + // (the data read from local log segment for the other topic-partitions) and an error for the topic-partition + // that we couldn't read from remote storage + val partitionToFetchPartitionData = buildPartitionToFetchPartitionData(logReadResults, remoteFetchInfo.get().topicPartition, maybeLogReadResultWithError.get) + responseCallback(partitionToFetchPartitionData) + } } else { // If there is not enough data to respond and there is no remote data, we will let the fetch request // wait for new data. @@ -1753,9 +1676,9 @@ class ReplicaManager(val config: KafkaConfig, if (params.isFromFollower && shouldLeaderThrottle(quota, partition, params.replicaId)) { // If the partition is being throttled, simply return an empty set. new FetchDataInfo(givenFetchedDataInfo.fetchOffsetMetadata, MemoryRecords.EMPTY) - } else if (givenFetchedDataInfo.firstEntryIncomplete) { - // Replace incomplete message sets with an empty one as consumers can make progress in such - // cases and don't need to report a `RecordTooLargeException` + } else if (!params.hardMaxBytesLimit && givenFetchedDataInfo.firstEntryIncomplete) { + // For FetchRequest version 3, we replace incomplete message sets with an empty one as consumers can make + // progress in such cases and don't need to report a `RecordTooLargeException` new FetchDataInfo(givenFetchedDataInfo.fetchOffsetMetadata, MemoryRecords.EMPTY) } else { givenFetchedDataInfo @@ -1789,22 +1712,22 @@ class ReplicaManager(val config: KafkaConfig, metadata => findPreferredReadReplica(partition, metadata, params.replicaId, fetchInfo.fetchOffset, fetchTimeMs)) if (preferredReadReplica.isDefined) { - replicaSelectorPlugin.foreach { selector => - debug(s"Replica selector ${selector.get.getClass.getSimpleName} returned preferred replica " + + replicaSelectorOpt.foreach { selector => + debug(s"Replica selector ${selector.getClass.getSimpleName} returned preferred replica " + s"${preferredReadReplica.get} for ${params.clientMetadata}") } // If a preferred read-replica is set, skip the read val offsetSnapshot = partition.fetchOffsetSnapshot(fetchInfo.currentLeaderEpoch, fetchOnlyFromLeader = false) - new LogReadResult(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - Optional.empty(), - offsetSnapshot.highWatermark.messageOffset, - offsetSnapshot.logStartOffset, - offsetSnapshot.logEndOffset.messageOffset, - followerLogStartOffset, - -1L, - OptionalLong.of(offsetSnapshot.lastStableOffset.messageOffset), - if (preferredReadReplica.isDefined) OptionalInt.of(preferredReadReplica.get) else OptionalInt.empty(), - Optional.empty()) + LogReadResult(info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + divergingEpoch = None, + highWatermark = offsetSnapshot.highWatermark.messageOffset, + leaderLogStartOffset = offsetSnapshot.logStartOffset, + leaderLogEndOffset = offsetSnapshot.logEndOffset.messageOffset, + followerLogStartOffset = followerLogStartOffset, + fetchTimeMs = -1L, + lastStableOffset = Some(offsetSnapshot.lastStableOffset.messageOffset), + preferredReadReplica = preferredReadReplica, + exception = None) } else { log = partition.localLogWithEpochOrThrow(fetchInfo.currentLeaderEpoch, params.fetchOnlyLeader()) @@ -1819,16 +1742,16 @@ class ReplicaManager(val config: KafkaConfig, val fetchDataInfo = checkFetchDataInfo(partition, readInfo.fetchedData) - new LogReadResult(fetchDataInfo, - readInfo.divergingEpoch, - readInfo.highWatermark, - readInfo.logStartOffset, - readInfo.logEndOffset, - followerLogStartOffset, - fetchTimeMs, - OptionalLong.of(readInfo.lastStableOffset), - if (preferredReadReplica.isDefined) OptionalInt.of(preferredReadReplica.get) else OptionalInt.empty(), - Optional.empty() + LogReadResult(info = fetchDataInfo, + divergingEpoch = readInfo.divergingEpoch.toScala, + highWatermark = readInfo.highWatermark, + leaderLogStartOffset = readInfo.logStartOffset, + leaderLogEndOffset = readInfo.logEndOffset, + followerLogStartOffset = followerLogStartOffset, + fetchTimeMs = fetchTimeMs, + lastStableOffset = Some(readInfo.lastStableOffset), + preferredReadReplica = preferredReadReplica, + exception = None ) } } catch { @@ -1852,33 +1775,29 @@ class ReplicaManager(val config: KafkaConfig, error(s"Error processing fetch with max size $adjustedMaxBytes from $fetchSource " + s"on partition $tp: $fetchInfo", e) - new LogReadResult(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - Optional.empty(), - UnifiedLog.UNKNOWN_OFFSET, - UnifiedLog.UNKNOWN_OFFSET, - UnifiedLog.UNKNOWN_OFFSET, - UnifiedLog.UNKNOWN_OFFSET, - -1L, - OptionalLong.empty(), - Optional.of(e) + LogReadResult(info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + divergingEpoch = None, + highWatermark = UnifiedLog.UnknownOffset, + leaderLogStartOffset = UnifiedLog.UnknownOffset, + leaderLogEndOffset = UnifiedLog.UnknownOffset, + followerLogStartOffset = UnifiedLog.UnknownOffset, + fetchTimeMs = -1L, + lastStableOffset = None, + exception = Some(e) ) } } var limitBytes = params.maxBytes val result = new mutable.ArrayBuffer[(TopicIdPartition, LogReadResult)] - var minOneMessage = true + var minOneMessage = !params.hardMaxBytesLimit readPartitionInfo.foreach { case (tp, fetchInfo) => val readResult = read(tp, fetchInfo, limitBytes, minOneMessage) val recordBatchSize = readResult.info.records.sizeInBytes // Once we read from a non-empty partition, we stop ignoring request and partition level size limits if (recordBatchSize > 0) minOneMessage = false - // Because we don't know how much data will be retrieved in remote fetch yet, and we don't want to block the API call - // to query remoteLogMetadata, assume it will fetch the max bytes size of data to avoid to exceed the "fetch.max.bytes" setting. - val estimatedRecordBatchSize = if (recordBatchSize == 0 && readResult.info.delayedRemoteStorageFetch.isPresent) - readResult.info.delayedRemoteStorageFetch.get.fetchMaxBytes else recordBatchSize - limitBytes = math.max(0, limitBytes - estimatedRecordBatchSize) + limitBytes = math.max(0, limitBytes - recordBatchSize) result += (tp -> readResult) } result @@ -1906,7 +1825,7 @@ class ReplicaManager(val config: KafkaConfig, createLogReadResult(highWatermark, leaderLogStartOffset, leaderLogEndOffset, new OffsetMovedToTieredStorageException("Given offset" + offset + " is moved to tiered storage")) } else { - val throttleTimeMs = remoteLogManager.get.getFetchThrottleTimeMs + val throttleTimeMs = remoteLogManager.get.getFetchThrottleTimeMs() val fetchDataInfo = if (throttleTimeMs > 0) { // Record the throttle time for the remote log fetches remoteLogManager.get.fetchThrottleTimeSensor().record(throttleTimeMs, time.milliseconds()) @@ -1924,21 +1843,21 @@ class ReplicaManager(val config: KafkaConfig, ) } else { // For consume fetch requests, create a dummy FetchDataInfo with the remote storage fetch information. - // For the topic-partitions that need remote data, we will use this information to read the data in another thread. + // For the first topic-partition that needs remote data, we will use this information to read the data in another thread. new FetchDataInfo(new LogOffsetMetadata(offset), MemoryRecords.EMPTY, false, Optional.empty(), - Optional.of(new RemoteStorageFetchInfo(adjustedMaxBytes, minOneMessage, tp, - fetchInfo, params.isolation))) + Optional.of(new RemoteStorageFetchInfo(adjustedMaxBytes, minOneMessage, tp.topicPartition(), + fetchInfo, params.isolation, params.hardMaxBytesLimit()))) } - new LogReadResult(fetchDataInfo, - Optional.empty(), + LogReadResult(fetchDataInfo, + divergingEpoch = None, highWatermark, leaderLogStartOffset, leaderLogEndOffset, fetchInfo.logStartOffset, fetchTimeMs, - OptionalLong.of(log.lastStableOffset), - Optional.empty[Throwable]()) + Some(log.lastStableOffset), + exception = None) } } else { createLogReadResult(exception) @@ -1960,9 +1879,9 @@ class ReplicaManager(val config: KafkaConfig, if (FetchRequest.isValidBrokerId(replicaId)) None else { - replicaSelectorPlugin.flatMap { replicaSelector => + replicaSelectorOpt.flatMap { replicaSelector => val replicaEndpoints = metadataCache.getPartitionReplicaEndpoints(partition.topicPartition, - new ListenerName(clientMetadata.listenerName)).asScala + new ListenerName(clientMetadata.listenerName)) val replicaInfoSet = mutable.Set[ReplicaView]() partition.remoteReplicas.foreach { replica => @@ -1991,7 +1910,7 @@ class ReplicaManager(val config: KafkaConfig, replicaInfoSet.add(leaderReplica) val partitionInfo = new DefaultPartitionView(replicaInfoSet.asJava, leaderReplica) - replicaSelector.get.select(partition.topicPartition, clientMetadata, partitionInfo).toScala.collect { + replicaSelector.select(partition.topicPartition, clientMetadata, partitionInfo).toScala.collect { // Even though the replica selector can return the leader, we don't want to send it out with the // FetchResponse, so we exclude it here case selected if !selected.endpoint.isEmpty && selected != leaderReplica => selected.endpoint.id @@ -2012,6 +1931,189 @@ class ReplicaManager(val config: KafkaConfig, def getLogConfig(topicPartition: TopicPartition): Option[LogConfig] = localLog(topicPartition).map(_.config) + def becomeLeaderOrFollower(correlationId: Int, + leaderAndIsrRequest: LeaderAndIsrRequest, + onLeadershipChange: (Iterable[Partition], Iterable[Partition]) => Unit): LeaderAndIsrResponse = { + val startMs = time.milliseconds() + replicaStateChangeLock synchronized { + val controllerId = leaderAndIsrRequest.controllerId + val requestPartitionStates = leaderAndIsrRequest.partitionStates.asScala + stateChangeLogger.info(s"Handling LeaderAndIsr request correlationId $correlationId from controller " + + s"$controllerId for ${requestPartitionStates.size} partitions") + if (stateChangeLogger.isTraceEnabled) + requestPartitionStates.foreach { partitionState => + stateChangeLogger.trace(s"Received LeaderAndIsr request $partitionState " + + s"correlation id $correlationId from controller $controllerId " + + s"epoch ${leaderAndIsrRequest.controllerEpoch}") + } + val topicIds = leaderAndIsrRequest.topicIds() + def topicIdFromRequest(topicName: String): Option[Uuid] = { + val topicId = topicIds.get(topicName) + // if invalid topic ID return None + if (topicId == null || topicId == Uuid.ZERO_UUID) + None + else + Some(topicId) + } + + val response = { + if (leaderAndIsrRequest.controllerEpoch < controllerEpoch) { + stateChangeLogger.warn(s"Ignoring LeaderAndIsr request from controller $controllerId with " + + s"correlation id $correlationId since its controller epoch ${leaderAndIsrRequest.controllerEpoch} is old. " + + s"Latest known controller epoch is $controllerEpoch") + leaderAndIsrRequest.getErrorResponse(Errors.STALE_CONTROLLER_EPOCH.exception) + } else { + val responseMap = new mutable.HashMap[TopicPartition, Errors] + controllerEpoch = leaderAndIsrRequest.controllerEpoch + + val partitions = new mutable.HashSet[Partition]() + val partitionsToBeLeader = new mutable.HashMap[Partition, LeaderAndIsrRequest.PartitionState]() + val partitionsToBeFollower = new mutable.HashMap[Partition, LeaderAndIsrRequest.PartitionState]() + val topicIdUpdateFollowerPartitions = new mutable.HashSet[Partition]() + val allTopicPartitionsInRequest = new mutable.HashSet[TopicPartition]() + + // First create the partition if it doesn't exist already + requestPartitionStates.foreach { partitionState => + val topicPartition = new TopicPartition(partitionState.topicName, partitionState.partitionIndex) + allTopicPartitionsInRequest += topicPartition + val partitionOpt = getPartition(topicPartition) match { + case HostedPartition.Offline(_) => + stateChangeLogger.warn(s"Ignoring LeaderAndIsr request from " + + s"controller $controllerId with correlation id $correlationId " + + s"epoch $controllerEpoch for partition $topicPartition as the local replica for the " + + "partition is in an offline log directory") + responseMap.put(topicPartition, Errors.KAFKA_STORAGE_ERROR) + None + + case HostedPartition.Online(partition) => + Some(partition) + + case HostedPartition.None => + val partition = Partition(topicPartition, time, this) + allPartitions.putIfNotExists(topicPartition, HostedPartition.Online(partition)) + Some(partition) + } + + // Next check the topic ID and the partition's leader epoch + partitionOpt.foreach { partition => + val currentLeaderEpoch = partition.getLeaderEpoch + val requestLeaderEpoch = partitionState.leaderEpoch + val requestTopicId = topicIdFromRequest(topicPartition.topic) + val logTopicId = partition.topicId + + if (!hasConsistentTopicId(requestTopicId, logTopicId)) { + stateChangeLogger.error(s"Topic ID in memory: ${logTopicId.get} does not" + + s" match the topic ID for partition $topicPartition received: " + + s"${requestTopicId.get}.") + responseMap.put(topicPartition, Errors.INCONSISTENT_TOPIC_ID) + } else if (requestLeaderEpoch >= currentLeaderEpoch) { + // If the leader epoch is valid record the epoch of the controller that made the leadership decision. + // This is useful while updating the isr to maintain the decision maker controller's epoch in the zookeeper path + if (partitionState.replicas.contains(localBrokerId)) { + partitions += partition + if (partitionState.leader == localBrokerId) { + partitionsToBeLeader.put(partition, partitionState) + } else { + partitionsToBeFollower.put(partition, partitionState) + } + } else { + stateChangeLogger.warn(s"Ignoring LeaderAndIsr request from controller $controllerId with " + + s"correlation id $correlationId epoch $controllerEpoch for partition $topicPartition as itself is not " + + s"in assigned replica list ${partitionState.replicas.asScala.mkString(",")}") + responseMap.put(topicPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION) + } + } else if (requestLeaderEpoch < currentLeaderEpoch) { + stateChangeLogger.warn(s"Ignoring LeaderAndIsr request from " + + s"controller $controllerId with correlation id $correlationId " + + s"epoch $controllerEpoch for partition $topicPartition since its associated " + + s"leader epoch $requestLeaderEpoch is smaller than the current " + + s"leader epoch $currentLeaderEpoch") + responseMap.put(topicPartition, Errors.STALE_CONTROLLER_EPOCH) + } else { + val error = requestTopicId match { + case Some(topicId) if logTopicId.isEmpty => + // The controller may send LeaderAndIsr to upgrade to using topic IDs without bumping the epoch. + // If we have a matching epoch, we expect the log to be defined. + val log = localLogOrException(partition.topicPartition) + log.assignTopicId(topicId) + stateChangeLogger.info(s"Updating log for $topicPartition to assign topic ID " + + s"$topicId from LeaderAndIsr request from controller $controllerId with correlation " + + s"id $correlationId epoch $controllerEpoch") + if (partitionState.leader != localBrokerId) + topicIdUpdateFollowerPartitions.add(partition) + Errors.NONE + case None if logTopicId.isDefined && partitionState.leader != localBrokerId => + // If we have a topic ID in the log but not in the request, we must have previously had topic IDs but + // are now downgrading. If we are a follower, remove the topic ID from the PartitionFetchState. + stateChangeLogger.info(s"Updating PartitionFetchState for $topicPartition to remove log topic ID " + + s"${logTopicId.get} since LeaderAndIsr request from controller $controllerId with correlation " + + s"id $correlationId epoch $controllerEpoch did not contain a topic ID") + topicIdUpdateFollowerPartitions.add(partition) + Errors.NONE + case _ => + stateChangeLogger.info(s"Ignoring LeaderAndIsr request from " + + s"controller $controllerId with correlation id $correlationId " + + s"epoch $controllerEpoch for partition $topicPartition since its associated " + + s"leader epoch $requestLeaderEpoch matches the current leader epoch") + Errors.STALE_CONTROLLER_EPOCH + } + responseMap.put(topicPartition, error) + } + } + } + + val highWatermarkCheckpoints = new LazyOffsetCheckpoints(this.highWatermarkCheckpoints.asJava) + val partitionsBecomeLeader = if (partitionsToBeLeader.nonEmpty) + makeLeaders(controllerId, controllerEpoch, partitionsToBeLeader, correlationId, responseMap, + highWatermarkCheckpoints, topicIdFromRequest) + else + Set.empty[Partition] + val partitionsBecomeFollower = if (partitionsToBeFollower.nonEmpty) + makeFollowers(controllerId, controllerEpoch, partitionsToBeFollower, correlationId, responseMap, + highWatermarkCheckpoints, topicIdFromRequest) + else + Set.empty[Partition] + + val followerTopicSet = partitionsBecomeFollower.map(_.topic).toSet + updateLeaderAndFollowerMetrics(followerTopicSet) + + if (topicIdUpdateFollowerPartitions.nonEmpty) + updateTopicIdForFollowers(controllerId, controllerEpoch, topicIdUpdateFollowerPartitions, correlationId, topicIdFromRequest) + + // We initialize highwatermark thread after the first LeaderAndIsr request. This ensures that all the partitions + // have been completely populated before starting the checkpointing there by avoiding weird race conditions + startHighWatermarkCheckPointThread() + + maybeAddLogDirFetchers(partitions, highWatermarkCheckpoints, topicIdFromRequest) + + replicaFetcherManager.shutdownIdleFetcherThreads() + replicaAlterLogDirsManager.shutdownIdleFetcherThreads() + + remoteLogManager.foreach(rlm => rlm.onLeadershipChange(partitionsBecomeLeader.asJava, partitionsBecomeFollower.asJava, topicIds)) + + onLeadershipChange(partitionsBecomeLeader, partitionsBecomeFollower) + + val topics = new util.LinkedHashMap[Uuid, util.List[LeaderAndIsrResponse.PartitionError]] + responseMap.foreachEntry { (tp, error) => + val topicId = topicIds.get(tp.topic) + var partitionErrors = topics.get(topicId) + if (partitionErrors == null) { + partitionErrors = new util.ArrayList[LeaderAndIsrResponse.PartitionError]() + topics.put(topicId, partitionErrors) + } + partitionErrors.add(new LeaderAndIsrResponse.PartitionError(tp.partition(), error.code)) + } + new LeaderAndIsrResponse(Errors.NONE, topics) + } + } + val endMs = time.milliseconds() + val elapsedMs = endMs - startMs + stateChangeLogger.info(s"Finished LeaderAndIsr request in ${elapsedMs}ms correlationId $correlationId from controller " + + s"$controllerId for ${requestPartitionStates.size} partitions") + response + } + } + /** * Checks if the topic ID provided in the request is consistent with the topic ID in the log. * When using this method to handle a Fetch request, the topic ID may have been provided by an earlier request. @@ -2041,7 +2143,9 @@ class ReplicaManager(val config: KafkaConfig, private def updateLeaderAndFollowerMetrics(newFollowerTopics: Set[String]): Unit = { val leaderTopicSet = leaderPartitionsIterator.map(_.topic).toSet newFollowerTopics.diff(leaderTopicSet).foreach(brokerTopicStats.removeOldLeaderMetrics) - // Currently, there are no follower metrics that need to be updated. + + // remove metrics for brokers which are not followers of a topic + leaderTopicSet.diff(newFollowerTopics).foreach(brokerTopicStats.removeOldFollowerMetrics) } protected[server] def maybeAddLogDirFetchers(partitions: Set[Partition], @@ -2074,13 +2178,248 @@ class ReplicaManager(val config: KafkaConfig, } } + /* + * Make the current broker to become leader for a given set of partitions by: + * + * 1. Stop fetchers for these partitions + * 2. Update the partition metadata in cache + * 3. Add these partitions to the leader partitions set + * + * If an unexpected error is thrown in this function, it will be propagated to KafkaApis where + * the error message will be set on each partition since we do not know which partition caused it. Otherwise, + * return the set of partitions that are made leader due to this method + * + * TODO: the above may need to be fixed later + */ + private def makeLeaders(controllerId: Int, + controllerEpoch: Int, + partitionStates: Map[Partition, LeaderAndIsrRequest.PartitionState], + correlationId: Int, + responseMap: mutable.Map[TopicPartition, Errors], + highWatermarkCheckpoints: OffsetCheckpoints, + topicIds: String => Option[Uuid]): Set[Partition] = { + val traceEnabled = stateChangeLogger.isTraceEnabled + partitionStates.keys.foreach { partition => + if (traceEnabled) + stateChangeLogger.trace(s"Handling LeaderAndIsr request correlationId $correlationId from " + + s"controller $controllerId epoch $controllerEpoch starting the become-leader transition for " + + s"partition ${partition.topicPartition}") + responseMap.put(partition.topicPartition, Errors.NONE) + } + + val partitionsToMakeLeaders = mutable.Set[Partition]() + + try { + // First stop fetchers for all the partitions + replicaFetcherManager.removeFetcherForPartitions(partitionStates.keySet.map(_.topicPartition)) + stateChangeLogger.info(s"Stopped fetchers as part of LeaderAndIsr request correlationId $correlationId from " + + s"controller $controllerId epoch $controllerEpoch as part of the become-leader transition for " + + s"${partitionStates.size} partitions") + // Update the partition information to be the leader + partitionStates.foreachEntry { (partition, partitionState) => + try { + if (partition.makeLeader(partitionState, highWatermarkCheckpoints, topicIds(partitionState.topicName))) { + partitionsToMakeLeaders += partition + } + } catch { + case e: KafkaStorageException => + stateChangeLogger.error(s"Skipped the become-leader state change with " + + s"correlation id $correlationId from controller $controllerId epoch $controllerEpoch for partition ${partition.topicPartition} " + + s"(last update controller epoch ${partitionState.controllerEpoch}) since " + + s"the replica for the partition is offline due to storage error $e") + // If there is an offline log directory, a Partition object may have been created and have been added + // to `ReplicaManager.allPartitions` before `createLogIfNotExists()` failed to create local replica due + // to KafkaStorageException. In this case `ReplicaManager.allPartitions` will map this topic-partition + // to an empty Partition object. We need to map this topic-partition to OfflinePartition instead. + markPartitionOffline(partition.topicPartition) + responseMap.put(partition.topicPartition, Errors.KAFKA_STORAGE_ERROR) + } + } + + } catch { + case e: Throwable => + partitionStates.keys.foreach { partition => + stateChangeLogger.error(s"Error while processing LeaderAndIsr request correlationId $correlationId received " + + s"from controller $controllerId epoch $controllerEpoch for partition ${partition.topicPartition}", e) + } + // Re-throw the exception for it to be caught in KafkaApis + throw e + } + + if (traceEnabled) + partitionStates.keys.foreach { partition => + stateChangeLogger.trace(s"Completed LeaderAndIsr request correlationId $correlationId from controller $controllerId " + + s"epoch $controllerEpoch for the become-leader transition for partition ${partition.topicPartition}") + } + + partitionsToMakeLeaders + } + + /* + * Make the current broker to become follower for a given set of partitions by: + * + * 1. Remove these partitions from the leader partitions set. + * 2. Mark the replicas as followers so that no more data can be added from the producer clients. + * 3. Stop fetchers for these partitions so that no more data can be added by the replica fetcher threads. + * 4. Truncate the log and checkpoint offsets for these partitions. + * 5. Clear the produce and fetch requests in the purgatory + * 6. If the broker is not shutting down, add the fetcher to the new leaders. + * + * The ordering of doing these steps make sure that the replicas in transition will not + * take any more messages before checkpointing offsets so that all messages before the checkpoint + * are guaranteed to be flushed to disks + * + * If an unexpected error is thrown in this function, it will be propagated to KafkaApis where + * the error message will be set on each partition since we do not know which partition caused it. Otherwise, + * return the set of partitions that are made follower due to this method + */ + private def makeFollowers(controllerId: Int, + controllerEpoch: Int, + partitionStates: Map[Partition, LeaderAndIsrRequest.PartitionState], + correlationId: Int, + responseMap: mutable.Map[TopicPartition, Errors], + highWatermarkCheckpoints: OffsetCheckpoints, + topicIds: String => Option[Uuid]) : Set[Partition] = { + val traceLoggingEnabled = stateChangeLogger.isTraceEnabled + partitionStates.foreachEntry { (partition, partitionState) => + if (traceLoggingEnabled) + stateChangeLogger.trace(s"Handling LeaderAndIsr request correlationId $correlationId from controller $controllerId " + + s"epoch $controllerEpoch starting the become-follower transition for partition ${partition.topicPartition} with leader " + + s"${partitionState.leader}") + responseMap.put(partition.topicPartition, Errors.NONE) + } + + val partitionsToMakeFollower: mutable.Set[Partition] = mutable.Set() + try { + partitionStates.foreachEntry { (partition, partitionState) => + val newLeaderBrokerId = partitionState.leader + try { + if (metadataCache.hasAliveBroker(newLeaderBrokerId)) { + // Only change partition state when the leader is available + if (partition.makeFollower(partitionState, highWatermarkCheckpoints, topicIds(partitionState.topicName))) { + // Skip invoking onBecomingFollower listeners as the listeners are not registered for zk-based features. + partitionsToMakeFollower += partition + } + } else { + // The leader broker should always be present in the metadata cache. + // If not, we should record the error message and abort the transition process for this partition + stateChangeLogger.error(s"Received LeaderAndIsrRequest with correlation id $correlationId from " + + s"controller $controllerId epoch $controllerEpoch for partition ${partition.topicPartition} " + + s"(last update controller epoch ${partitionState.controllerEpoch}) " + + s"but cannot become follower since the new leader $newLeaderBrokerId is unavailable.") + // Create the local replica even if the leader is unavailable. This is required to ensure that we include + // the partition's high watermark in the checkpoint file (see KAFKA-1647) + partition.createLogIfNotExists(isNew = partitionState.isNew, isFutureReplica = false, + highWatermarkCheckpoints, topicIds(partitionState.topicName)) + } + } catch { + case e: KafkaStorageException => + stateChangeLogger.error(s"Skipped the become-follower state change with correlation id $correlationId from " + + s"controller $controllerId epoch $controllerEpoch for partition ${partition.topicPartition} " + + s"(last update controller epoch ${partitionState.controllerEpoch}) with leader " + + s"$newLeaderBrokerId since the replica for the partition is offline due to storage error $e") + // If there is an offline log directory, a Partition object may have been created and have been added + // to `ReplicaManager.allPartitions` before `createLogIfNotExists()` failed to create local replica due + // to KafkaStorageException. In this case `ReplicaManager.allPartitions` will map this topic-partition + // to an empty Partition object. We need to map this topic-partition to OfflinePartition instead. + markPartitionOffline(partition.topicPartition) + responseMap.put(partition.topicPartition, Errors.KAFKA_STORAGE_ERROR) + } + } + + // Stopping the fetchers must be done first in order to initialize the fetch + // position correctly. + replicaFetcherManager.removeFetcherForPartitions(partitionsToMakeFollower.map(_.topicPartition)) + stateChangeLogger.info(s"Stopped fetchers as part of become-follower request from controller $controllerId " + + s"epoch $controllerEpoch with correlation id $correlationId for ${partitionsToMakeFollower.size} partitions") + + partitionsToMakeFollower.foreach { partition => + completeDelayedOperationsWhenNotPartitionLeader(partition.topicPartition, partition.topicId) + } + + if (isShuttingDown.get()) { + if (traceLoggingEnabled) { + partitionsToMakeFollower.foreach { partition => + stateChangeLogger.trace(s"Skipped the adding-fetcher step of the become-follower state " + + s"change with correlation id $correlationId from controller $controllerId epoch $controllerEpoch for " + + s"partition ${partition.topicPartition} with leader ${partitionStates(partition).leader} " + + "since it is shutting down") + } + } + } else { + // we do not need to check if the leader exists again since this has been done at the beginning of this process + val partitionsToMakeFollowerWithLeaderAndOffset = partitionsToMakeFollower.map { partition => + val leaderNode = partition.leaderReplicaIdOpt.flatMap(leaderId => metadataCache. + getAliveBrokerNode(leaderId, config.interBrokerListenerName)).getOrElse(Node.noNode()) + val leader = new BrokerEndPoint(leaderNode.id(), leaderNode.host(), leaderNode.port()) + val log = partition.localLogOrException + val fetchOffset = initialFetchOffset(log) + partition.topicPartition -> InitialFetchState(topicIds(partition.topic), leader, partition.getLeaderEpoch, fetchOffset) + }.toMap + + replicaFetcherManager.addFetcherForPartitions(partitionsToMakeFollowerWithLeaderAndOffset) + } + } catch { + case e: Throwable => + stateChangeLogger.error(s"Error while processing LeaderAndIsr request with correlationId $correlationId " + + s"received from controller $controllerId epoch $controllerEpoch", e) + // Re-throw the exception for it to be caught in KafkaApis + throw e + } + + if (traceLoggingEnabled) + partitionStates.keys.foreach { partition => + stateChangeLogger.trace(s"Completed LeaderAndIsr request correlationId $correlationId from controller $controllerId " + + s"epoch $controllerEpoch for the become-follower transition for partition ${partition.topicPartition} with leader " + + s"${partitionStates(partition).leader}") + } + + partitionsToMakeFollower + } + + private def updateTopicIdForFollowers(controllerId: Int, + controllerEpoch: Int, + partitions: Set[Partition], + correlationId: Int, + topicIds: String => Option[Uuid]): Unit = { + val traceLoggingEnabled = stateChangeLogger.isTraceEnabled + + try { + if (isShuttingDown.get()) { + if (traceLoggingEnabled) { + partitions.foreach { partition => + stateChangeLogger.trace(s"Skipped the update topic ID step of the become-follower state " + + s"change with correlation id $correlationId from controller $controllerId epoch $controllerEpoch for " + + s"partition ${partition.topicPartition} since it is shutting down") + } + } + } else { + val partitionsToUpdateFollowerWithLeader = mutable.Map.empty[TopicPartition, Int] + partitions.foreach { partition => + partition.leaderReplicaIdOpt.foreach { leader => + if (metadataCache.hasAliveBroker(leader)) { + partitionsToUpdateFollowerWithLeader += partition.topicPartition -> leader + } + } + } + replicaFetcherManager.maybeUpdateTopicIds(partitionsToUpdateFollowerWithLeader, topicIds) + } + } catch { + case e: Throwable => + stateChangeLogger.error(s"Error while processing LeaderAndIsr request with correlationId $correlationId " + + s"received from controller $controllerId epoch $controllerEpoch when trying to update topic IDs in the fetchers", e) + // Re-throw the exception for it to be caught in KafkaApis + throw e + } + } + /** * From IBP 2.7 onwards, we send latest fetch epoch in the request and truncate if a * diverging epoch is returned in the response, avoiding the need for a separate * OffsetForLeaderEpoch request. */ protected def initialFetchOffset(log: UnifiedLog): Long = { - if (log.latestEpoch.isPresent) + if (log.latestEpoch.nonEmpty) log.logEndOffset else log.highWatermark @@ -2090,7 +2429,7 @@ class ReplicaManager(val config: KafkaConfig, trace("Evaluating ISR list of partitions to see which replicas can be removed from the ISR") // Shrink ISRs for non offline partitions - allPartitions.forEach { (topicPartition, _) => + allPartitions.keys.foreach { topicPartition => onlinePartition(topicPartition).foreach(_.maybeShrinkIsr()) } } @@ -2213,7 +2552,7 @@ class ReplicaManager(val config: KafkaConfig, delayedShareFetchPurgatory.shutdown() if (checkpointHW) checkpointHighWatermarks() - replicaSelectorPlugin.foreach(_.close) + replicaSelectorOpt.foreach(_.close) removeAllTopicMetrics() addPartitionsToTxnManager.foreach(_.shutdown()) info("Shut down completely") @@ -2221,25 +2560,25 @@ class ReplicaManager(val config: KafkaConfig, private def removeAllTopicMetrics(): Unit = { val allTopics = new util.HashSet[String] - allPartitions.forEach((partition, _) => + allPartitions.keys.foreach(partition => if (allTopics.add(partition.topic())) { brokerTopicStats.removeMetrics(partition.topic()) }) } - protected def createReplicaFetcherManager(metrics: Metrics, time: Time, quotaManager: ReplicationQuotaManager) = { - new ReplicaFetcherManager(config, this, metrics, time, quotaManager, () => metadataCache.metadataVersion(), brokerEpochSupplier) + protected def createReplicaFetcherManager(metrics: Metrics, time: Time, threadNamePrefix: Option[String], quotaManager: ReplicationQuotaManager) = { + new ReplicaFetcherManager(config, this, metrics, time, threadNamePrefix, quotaManager, () => metadataCache.metadataVersion(), brokerEpochSupplier) } protected def createReplicaAlterLogDirsManager(quotaManager: ReplicationQuotaManager, brokerTopicStats: BrokerTopicStats) = { new ReplicaAlterLogDirsManager(config, this, quotaManager, brokerTopicStats, directoryEventHandler) } - private def createReplicaSelector(metrics: Metrics): Option[Plugin[ReplicaSelector]] = { + private def createReplicaSelector(): Option[ReplicaSelector] = { config.replicaSelectorClassName.map { className => - val tmpReplicaSelector: ReplicaSelector = Utils.newInstance(className, classOf[ReplicaSelector]) + val tmpReplicaSelector: ReplicaSelector = CoreUtils.createObject[ReplicaSelector](className) tmpReplicaSelector.configure(config.originals()) - Plugin.wrapInstance(tmpReplicaSelector, metrics, ReplicationConfigs.REPLICA_SELECTOR_CLASS_CONFIG) + tmpReplicaSelector } } @@ -2391,7 +2730,7 @@ class ReplicaManager(val config: KafkaConfig, replicaFetcherManager.shutdownIdleFetcherThreads() replicaAlterLogDirsManager.shutdownIdleFetcherThreads() - remoteLogManager.foreach(rlm => rlm.onLeadershipChange((leaderChangedPartitions.toSet: Set[TopicPartitionLog]).asJava, (followerChangedPartitions.toSet: Set[TopicPartitionLog]).asJava, localChanges.topicIds())) + remoteLogManager.foreach(rlm => rlm.onLeadershipChange(leaderChangedPartitions.asJava, followerChangedPartitions.asJava, localChanges.topicIds())) } if (metadataVersion.isDirectoryAssignmentSupported) { @@ -2414,8 +2753,9 @@ class ReplicaManager(val config: KafkaConfig, localLeaders.foreachEntry { (tp, info) => getOrCreatePartition(tp, delta, info.topicId).foreach { case (partition, isNew) => try { + val state = info.partition.toLeaderAndIsrPartitionState(tp, isNew) val partitionAssignedDirectoryId = directoryIds.find(_._1.topicPartition() == tp).map(_._2) - partition.makeLeader(info.partition, isNew, offsetCheckpoints, Some(info.topicId), partitionAssignedDirectoryId) + partition.makeLeader(state, offsetCheckpoints, Some(info.topicId), partitionAssignedDirectoryId) changedPartitions.add(partition) } catch { @@ -2455,8 +2795,9 @@ class ReplicaManager(val config: KafkaConfig, // - This also ensures that the local replica is created even if the leader // is unavailable. This is required to ensure that we include the partition's // high watermark in the checkpoint file (see KAFKA-1647). + val state = info.partition.toLeaderAndIsrPartitionState(tp, isNew) val partitionAssignedDirectoryId = directoryIds.find(_._1.topicPartition() == tp).map(_._2) - val isNewLeaderEpoch = partition.makeFollower(info.partition, isNew, offsetCheckpoints, Some(info.topicId), partitionAssignedDirectoryId) + val isNewLeaderEpoch = partition.makeFollower(state, offsetCheckpoints, Some(info.topicId), partitionAssignedDirectoryId) if (isInControlledShutdown && (info.partition.leader == NO_LEADER || !info.partition.isr.contains(config.brokerId))) { @@ -2508,7 +2849,7 @@ class ReplicaManager(val config: KafkaConfig, case Some(node) => val log = partition.localLogOrException partitionAndOffsets.put(topicPartition, InitialFetchState( - log.topicId.toScala, + log.topicId, new BrokerEndPoint(node.id, node.host, node.port), partition.getLeaderEpoch, initialFetchOffset(log) diff --git a/core/src/main/scala/kafka/server/RequestHandlerHelper.scala b/core/src/main/scala/kafka/server/RequestHandlerHelper.scala index 083e0d7cafff8..8229607b5be3d 100644 --- a/core/src/main/scala/kafka/server/RequestHandlerHelper.scala +++ b/core/src/main/scala/kafka/server/RequestHandlerHelper.scala @@ -23,7 +23,7 @@ import org.apache.kafka.common.errors.ClusterAuthorizationException import org.apache.kafka.common.network.Send import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse} import org.apache.kafka.common.utils.Time -import org.apache.kafka.server.quota.{ClientQuotaManager, ControllerMutationQuota, ThrottleCallback} +import org.apache.kafka.server.quota.ThrottleCallback class RequestHandlerHelper( requestChannel: RequestChannel, @@ -40,7 +40,7 @@ class RequestHandlerHelper( override def startThrottling(): Unit = requestChannel.startThrottling(request) override def endThrottling(): Unit = requestChannel.endThrottling(request) } - quotaManager.throttle(request.header.clientId(), request.session, callback, throttleTimeMs) + quotaManager.throttle(request, callback, throttleTimeMs) } def handleError(request: RequestChannel.Request, e: Throwable): Unit = { diff --git a/core/src/main/scala/kafka/server/SharedServer.scala b/core/src/main/scala/kafka/server/SharedServer.scala index aba9035cb7e94..69d2353fb833a 100644 --- a/core/src/main/scala/kafka/server/SharedServer.scala +++ b/core/src/main/scala/kafka/server/SharedServer.scala @@ -37,7 +37,7 @@ import org.apache.kafka.raft.Endpoints import org.apache.kafka.server.{ProcessRole, ServerSocketFactory} import org.apache.kafka.server.common.ApiMessageAndVersion import org.apache.kafka.server.fault.{FaultHandler, LoggingFaultHandler, ProcessTerminatingFaultHandler} -import org.apache.kafka.server.metrics.{BrokerServerMetrics, KafkaYammerMetrics, NodeMetrics} +import org.apache.kafka.server.metrics.{BrokerServerMetrics, KafkaYammerMetrics} import java.net.InetSocketAddress import java.util.Arrays @@ -116,7 +116,6 @@ class SharedServer( @volatile var raftManager: KafkaRaftManager[ApiMessageAndVersion] = _ @volatile var brokerMetrics: BrokerServerMetrics = _ @volatile var controllerServerMetrics: ControllerMetadataMetrics = _ - @volatile var nodeMetrics: NodeMetrics = _ @volatile var loader: MetadataLoader = _ private val snapshotsDisabledReason = new AtomicReference[String](null) @volatile var snapshotEmitter: SnapshotEmitter = _ @@ -299,7 +298,6 @@ class SharedServer( raftManager = _raftManager _raftManager.startup() - nodeMetrics = new NodeMetrics(metrics, controllerConfig.unstableFeatureVersionsEnabled) metadataLoaderMetrics = if (brokerMetrics != null) { new MetadataLoaderMetrics(Optional.of(KafkaYammerMetrics.defaultRegistry()), elapsedNs => brokerMetrics.updateBatchProcessingTime(elapsedNs), @@ -342,7 +340,7 @@ class SharedServer( throw new RuntimeException("Unable to install metadata publishers.", t) } } - _raftManager.client.register(loader) + _raftManager.register(loader) debug("Completed SharedServer startup.") started = true } catch { @@ -389,8 +387,6 @@ class SharedServer( controllerServerMetrics = null Utils.closeQuietly(brokerMetrics, "broker metrics") brokerMetrics = null - Utils.closeQuietly(nodeMetrics, "node metrics") - nodeMetrics = null Utils.closeQuietly(metrics, "metrics") metrics = null CoreUtils.swallow(AppInfoParser.unregisterAppInfo(MetricsPrefix, sharedServerConfig.nodeId.toString, metrics), this) diff --git a/core/src/main/scala/kafka/server/metadata/AclPublisher.scala b/core/src/main/scala/kafka/server/metadata/AclPublisher.scala new file mode 100644 index 0000000000000..43fb2058df38c --- /dev/null +++ b/core/src/main/scala/kafka/server/metadata/AclPublisher.scala @@ -0,0 +1,102 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server.metadata + +import kafka.utils.Logging +import org.apache.kafka.image.loader.{LoaderManifest, LoaderManifestType} +import org.apache.kafka.image.{MetadataDelta, MetadataImage} +import org.apache.kafka.metadata.authorizer.ClusterMetadataAuthorizer +import org.apache.kafka.server.authorizer.Authorizer +import org.apache.kafka.server.fault.FaultHandler + +import scala.concurrent.TimeoutException + + +class AclPublisher( + nodeId: Int, + faultHandler: FaultHandler, + nodeType: String, + authorizer: Option[Authorizer], +) extends Logging with org.apache.kafka.image.publisher.MetadataPublisher { + logIdent = s"[${name()}] " + + override def name(): String = s"AclPublisher $nodeType id=$nodeId" + + private var completedInitialLoad = false + + override def onMetadataUpdate( + delta: MetadataDelta, + newImage: MetadataImage, + manifest: LoaderManifest + ): Unit = { + val deltaName = s"MetadataDelta up to ${newImage.offset()}" + + // Apply changes to ACLs. This needs to be handled carefully because while we are + // applying these changes, the Authorizer is continuing to return authorization + // results in other threads. We never want to expose an invalid state. For example, + // if the user created a DENY ALL acl and then created an ALLOW ACL for topic foo, + // we want to apply those changes in that order, not the reverse order! Otherwise + // there could be a window during which incorrect authorization results are returned. + Option(delta.aclsDelta()).foreach { aclsDelta => + authorizer match { + case Some(authorizer: ClusterMetadataAuthorizer) => if (manifest.`type`().equals(LoaderManifestType.SNAPSHOT)) { + try { + // If the delta resulted from a snapshot load, we want to apply the new changes + // all at once using ClusterMetadataAuthorizer#loadSnapshot. If this is the + // first snapshot load, it will also complete the futures returned by + // Authorizer#start (which we wait for before processing RPCs). + info(s"Loading authorizer snapshot at offset ${newImage.offset()}") + authorizer.loadSnapshot(newImage.acls().acls()) + } catch { + case t: Throwable => faultHandler.handleFault("Error loading " + + s"authorizer snapshot in $deltaName", t) + } + } else { + try { + // Because the changes map is a LinkedHashMap, the deltas will be returned in + // the order they were performed. + aclsDelta.changes().forEach((key, value) => + if (value.isPresent) { + authorizer.addAcl(key, value.get()) + } else { + authorizer.removeAcl(key) + }) + } catch { + case t: Throwable => faultHandler.handleFault("Error loading " + + s"authorizer changes in $deltaName", t) + } + } + if (!completedInitialLoad) { + // If we are receiving this onMetadataUpdate call, that means the MetadataLoader has + // loaded up to the local high water mark. So we complete the initial load, enabling + // the authorizer. + completedInitialLoad = true + authorizer.completeInitialLoad() + } + case _ => // No ClusterMetadataAuthorizer is configured. There is nothing to do. + } + } + } + + override def close(): Unit = { + authorizer match { + case Some(authorizer: ClusterMetadataAuthorizer) => authorizer.completeInitialLoad(new TimeoutException) + case _ => + } + } +} diff --git a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala index 8df8a27558008..1985f04348f75 100644 --- a/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/BrokerMetadataPublisher.scala @@ -20,24 +20,18 @@ package kafka.server.metadata import java.util.OptionalInt import kafka.coordinator.transaction.TransactionCoordinator import kafka.log.LogManager -import kafka.server.share.SharePartitionManager import kafka.server.{KafkaConfig, ReplicaManager} import kafka.utils.Logging import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.TimeoutException import org.apache.kafka.common.internals.Topic -import org.apache.kafka.coordinator.common.runtime.{KRaftCoordinatorMetadataDelta, KRaftCoordinatorMetadataImage} import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.coordinator.share.ShareCoordinator -import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.image.loader.LoaderManifest import org.apache.kafka.image.publisher.MetadataPublisher import org.apache.kafka.image.{MetadataDelta, MetadataImage, TopicDelta} -import org.apache.kafka.metadata.publisher.{AclPublisher, DelegationTokenPublisher, ScramPublisher} -import org.apache.kafka.server.common.MetadataVersion.MINIMUM_VERSION -import org.apache.kafka.server.common.{FinalizedFeatures, RequestLocal, ShareVersion} +import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.fault.FaultHandler -import org.apache.kafka.storage.internals.log.{LogManager => JLogManager} import java.util.concurrent.CompletableFuture import scala.collection.mutable @@ -74,8 +68,7 @@ class BrokerMetadataPublisher( replicaManager: ReplicaManager, groupCoordinator: GroupCoordinator, txnCoordinator: TransactionCoordinator, - shareCoordinator: ShareCoordinator, - sharePartitionManager: SharePartitionManager, + shareCoordinator: Option[ShareCoordinator], var dynamicConfigPublisher: DynamicConfigPublisher, dynamicClientQuotaPublisher: DynamicClientQuotaPublisher, dynamicTopicClusterQuotaPublisher: DynamicTopicClusterQuotaPublisher, @@ -83,7 +76,7 @@ class BrokerMetadataPublisher( delegationTokenPublisher: DelegationTokenPublisher, aclPublisher: AclPublisher, fatalFaultHandler: FaultHandler, - metadataPublishingFaultHandler: FaultHandler + metadataPublishingFaultHandler: FaultHandler, ) extends MetadataPublisher with Logging { logIdent = s"[BrokerMetadataPublisher id=${config.nodeId}] " @@ -104,11 +97,6 @@ class BrokerMetadataPublisher( */ val firstPublishFuture = new CompletableFuture[Void] - /** - * The share version being used in the broker metadata. - */ - private var finalizedShareVersion: Short = FinalizedFeatures.fromKRaftVersion(MINIMUM_VERSION).finalizedFeatures().getOrDefault(ShareVersion.FEATURE_NAME, 0.toShort) - override def name(): String = "BrokerMetadataPublisher" override def onMetadataUpdate( @@ -175,16 +163,18 @@ class BrokerMetadataPublisher( case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating txn " + s"coordinator with local changes in $deltaName", t) } - try { - updateCoordinator(newImage, - delta, - Topic.SHARE_GROUP_STATE_TOPIC_NAME, - shareCoordinator.onElection, - (partitionIndex, leaderEpochOpt) => shareCoordinator.onResignation(partitionIndex, toOptionalInt(leaderEpochOpt)) - ) - } catch { - case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share " + - s"coordinator with local changes in $deltaName", t) + if (shareCoordinator.isDefined) { + try { + updateCoordinator(newImage, + delta, + Topic.SHARE_GROUP_STATE_TOPIC_NAME, + shareCoordinator.get.onElection, + (partitionIndex, leaderEpochOpt) => shareCoordinator.get.onResignation(partitionIndex, toOptionalInt(leaderEpochOpt)) + ) + } catch { + case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share " + + s"coordinator with local changes in $deltaName", t) + } } try { // Notify the group coordinator about deleted topics. @@ -202,16 +192,6 @@ class BrokerMetadataPublisher( case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating group " + s"coordinator with deleted partitions in $deltaName", t) } - try { - // Notify the share coordinator about deleted topics. - val deletedTopicIds = topicsDelta.deletedTopicIds() - if (!deletedTopicIds.isEmpty) { - shareCoordinator.onTopicsDeleted(topicsDelta.deletedTopicIds, RequestLocal.noCaching.bufferSupplier) - } - } catch { - case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share " + - s"coordinator with deleted partitions in $deltaName", t) - } } // Apply configuration deltas. @@ -224,17 +204,17 @@ class BrokerMetadataPublisher( dynamicTopicClusterQuotaPublisher.onMetadataUpdate(delta, newImage) // Apply SCRAM delta. - scramPublisher.onMetadataUpdate(delta, newImage, manifest) + scramPublisher.onMetadataUpdate(delta, newImage) // Apply DelegationToken delta. - delegationTokenPublisher.onMetadataUpdate(delta, newImage, manifest) + delegationTokenPublisher.onMetadataUpdate(delta, newImage) // Apply ACL delta. aclPublisher.onMetadataUpdate(delta, newImage, manifest) try { // Propagate the new image to the group coordinator. - groupCoordinator.onNewMetadataImage(new KRaftCoordinatorMetadataImage(newImage), new KRaftCoordinatorMetadataDelta(delta)) + groupCoordinator.onNewMetadataImage(newImage, delta) } catch { case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating group " + s"coordinator with local changes in $deltaName", t) @@ -242,7 +222,7 @@ class BrokerMetadataPublisher( try { // Propagate the new image to the share coordinator. - shareCoordinator.onNewMetadataImage(new KRaftCoordinatorMetadataImage(newImage), newImage.features(), new KRaftCoordinatorMetadataDelta(delta)) + shareCoordinator.foreach(coordinator => coordinator.onNewMetadataImage(newImage, delta)) } catch { case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share " + s"coordinator with local changes in $deltaName", t) @@ -251,24 +231,6 @@ class BrokerMetadataPublisher( if (_firstPublish) { finishInitializingReplicaManager() } - - if (delta.featuresDelta != null) { - try { - val newFinalizedFeatures = new FinalizedFeatures(newImage.features.metadataVersionOrThrow, newImage.features.finalizedVersions, newImage.provenance.lastContainedOffset) - val newFinalizedShareVersion = newFinalizedFeatures.finalizedFeatures().getOrDefault(ShareVersion.FEATURE_NAME, 0.toShort) - // Share version feature has been toggled. - if (newFinalizedShareVersion != finalizedShareVersion) { - finalizedShareVersion = newFinalizedShareVersion - val shareVersion: ShareVersion = ShareVersion.fromFeatureLevel(finalizedShareVersion) - info(s"Feature share.version has been updated to version $finalizedShareVersion") - sharePartitionManager.onShareVersionToggle(shareVersion, config.shareGroupConfig.isShareGroupEnabled) - } - } catch { - case t: Throwable => metadataPublishingFaultHandler.handleFault("Error updating share partition manager " + - s" with share version feature change in $deltaName", t) - } - } - } catch { case t: Throwable => metadataPublishingFaultHandler.handleFault("Uncaught exception while " + s"publishing broker metadata from $deltaName", t) @@ -288,11 +250,6 @@ class BrokerMetadataPublisher( /** * Update the coordinator of local replica changes: election and resignation. * - * When the topic is deleted or a partition of the topic is deleted, {@param resignation} - * callback must be called with {@code None}. The coordinator expects the leader epoch to be - * incremented when the {@param resignation} callback is called but the leader epoch - * is not incremented when a topic is deleted. - * * @param image latest metadata image * @param delta metadata delta from the previous image and the latest image * @param topicName name of the topic associated with the coordinator @@ -313,7 +270,7 @@ class BrokerMetadataPublisher( if (topicsDelta.topicWasDeleted(topicName)) { topicsDelta.image.getTopic(topicName).partitions.entrySet.forEach { entry => if (entry.getValue.leader == brokerId) { - resignation(entry.getKey, None) + resignation(entry.getKey, Some(entry.getValue.leaderEpoch)) } } } @@ -340,8 +297,8 @@ class BrokerMetadataPublisher( // Start log manager, which will perform (potentially lengthy) // recovery-from-unclean-shutdown if required. logManager.startup( - metadataCache.getAllTopics().asScala, - isStray = log => JLogManager.isStrayKraftReplica(brokerId, newImage.topics(), log) + metadataCache.getAllTopics(), + isStray = log => LogManager.isStrayKraftReplica(brokerId, newImage.topics(), log) ) // Rename all future replicas which are in the same directory as the @@ -368,24 +325,25 @@ class BrokerMetadataPublisher( try { // Start the group coordinator. groupCoordinator.startup(() => metadataCache.numPartitions(Topic.GROUP_METADATA_TOPIC_NAME) - .orElse(config.groupCoordinatorConfig.offsetsTopicPartitions)) + .getOrElse(config.groupCoordinatorConfig.offsetsTopicPartitions)) } catch { case t: Throwable => fatalFaultHandler.handleFault("Error starting GroupCoordinator", t) } try { - val transactionLogConfig = new TransactionLogConfig(config) // Start the transaction coordinator. txnCoordinator.startup(() => metadataCache.numPartitions( - Topic.TRANSACTION_STATE_TOPIC_NAME).orElse(transactionLogConfig.transactionTopicPartitions)) + Topic.TRANSACTION_STATE_TOPIC_NAME).getOrElse(config.transactionLogConfig.transactionTopicPartitions)) } catch { case t: Throwable => fatalFaultHandler.handleFault("Error starting TransactionCoordinator", t) } - try { - // Start the share coordinator. - shareCoordinator.startup(() => metadataCache.numPartitions(Topic.SHARE_GROUP_STATE_TOPIC_NAME) - .orElse(config.shareCoordinatorConfig.shareCoordinatorStateTopicNumPartitions())) - } catch { - case t: Throwable => fatalFaultHandler.handleFault("Error starting Share coordinator", t) + if (config.shareGroupConfig.isShareGroupEnabled && shareCoordinator.isDefined) { + try { + // Start the share coordinator. + shareCoordinator.get.startup(() => metadataCache.numPartitions( + Topic.SHARE_GROUP_STATE_TOPIC_NAME).getOrElse(config.shareCoordinatorConfig.shareCoordinatorStateTopicNumPartitions())) + } catch { + case t: Throwable => fatalFaultHandler.handleFault("Error starting Share coordinator", t) + } } } diff --git a/core/src/main/scala/kafka/server/metadata/ClientQuotaMetadataManager.scala b/core/src/main/scala/kafka/server/metadata/ClientQuotaMetadataManager.scala index cda7661907dd9..8fae9941b4112 100644 --- a/core/src/main/scala/kafka/server/metadata/ClientQuotaMetadataManager.scala +++ b/core/src/main/scala/kafka/server/metadata/ClientQuotaMetadataManager.scala @@ -18,22 +18,24 @@ package kafka.server.metadata import kafka.network.ConnectionQuotas +import kafka.server.ClientQuotaManager +import kafka.server.ClientQuotaManager.BaseUserEntity import kafka.server.QuotaFactory.QuotaManagers import kafka.server.metadata.ClientQuotaMetadataManager.transferToClientQuotaEntity import kafka.utils.Logging import org.apache.kafka.common.metrics.Quota import org.apache.kafka.common.quota.ClientQuotaEntity -import org.apache.kafka.server.quota.ClientQuotaEntity.ConfigEntity import org.apache.kafka.common.utils.Sanitizer +import org.apache.kafka.server.quota.ClientQuotaEntity.{ConfigEntity => ClientQuotaConfigEntity} import java.net.{InetAddress, UnknownHostException} -import java.util.Optional import org.apache.kafka.image.{ClientQuotaDelta, ClientQuotasDelta} import org.apache.kafka.server.config.QuotaConfig -import org.apache.kafka.server.quota.ClientQuotaManager import scala.jdk.OptionConverters.RichOptionalDouble + + // A strict hierarchy of entities that we support sealed trait QuotaEntity case class IpEntity(ip: String) extends QuotaEntity @@ -148,13 +150,13 @@ class ClientQuotaMetadataManager(private[metadata] val quotaManagers: QuotaManag // Convert entity into Options with sanitized values for QuotaManagers val (userEntity, clientEntity) = transferToClientQuotaEntity(quotaEntity) - val quotaValue = newValue.map(v => Optional.of(new Quota(v, true))).getOrElse(Optional.empty[Quota]()) + val quotaValue = newValue.map(new Quota(_, true)) try { manager.updateQuota( - userEntity, - clientEntity, - quotaValue + userEntity = userEntity, + clientEntity = clientEntity, + quota = quotaValue ) } catch { case t: Throwable => error(s"Failed to update user-client quota $quotaEntity", t) @@ -164,24 +166,24 @@ class ClientQuotaMetadataManager(private[metadata] val quotaManagers: QuotaManag object ClientQuotaMetadataManager { - def transferToClientQuotaEntity(quotaEntity: QuotaEntity): (Optional[ConfigEntity], Optional[ConfigEntity]) = { + def transferToClientQuotaEntity(quotaEntity: QuotaEntity): (Option[BaseUserEntity], Option[ClientQuotaConfigEntity]) = { quotaEntity match { case UserEntity(user) => - (Optional.of(new ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Optional.empty()) + (Some(ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), None) case DefaultUserEntity => - (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.empty()) + (Some(ClientQuotaManager.DefaultUserEntity), None) case ClientIdEntity(clientId) => - (Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity(clientId))) + (None, Some(ClientQuotaManager.ClientIdEntity(clientId))) case DefaultClientIdEntity => - (Optional.empty(), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)) + (None, Some(ClientQuotaManager.DefaultClientIdEntity)) case ExplicitUserExplicitClientIdEntity(user, clientId) => - (Optional.of(new ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Optional.of(new ClientQuotaManager.ClientIdEntity(clientId))) + (Some(ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Some(ClientQuotaManager.ClientIdEntity(clientId))) case ExplicitUserDefaultClientIdEntity(user) => - (Optional.of(new ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)) + (Some(ClientQuotaManager.UserEntity(Sanitizer.sanitize(user))), Some(ClientQuotaManager.DefaultClientIdEntity)) case DefaultUserExplicitClientIdEntity(clientId) => - (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.of(new ClientQuotaManager.ClientIdEntity(clientId))) + (Some(ClientQuotaManager.DefaultUserEntity), Some(ClientQuotaManager.ClientIdEntity(clientId))) case DefaultUserDefaultClientIdEntity => - (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)) + (Some(ClientQuotaManager.DefaultUserEntity), Some(ClientQuotaManager.DefaultClientIdEntity)) case IpEntity(_) | DefaultIpEntity => throw new IllegalStateException("Should not see IP quota entities here") } } diff --git a/core/src/main/scala/kafka/server/metadata/ConfigRepository.scala b/core/src/main/scala/kafka/server/metadata/ConfigRepository.scala new file mode 100644 index 0000000000000..9f59a07ff57eb --- /dev/null +++ b/core/src/main/scala/kafka/server/metadata/ConfigRepository.scala @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server.metadata + +import java.util.Properties + +import org.apache.kafka.common.config.ConfigResource +import org.apache.kafka.common.config.ConfigResource.Type + +trait ConfigRepository { + /** + * Return a copy of the topic configuration for the given topic. Future changes will not be reflected. + * + * @param topicName the name of the topic for which the configuration will be returned + * @return a copy of the topic configuration for the given topic + */ + def topicConfig(topicName: String): Properties = { + config(new ConfigResource(Type.TOPIC, topicName)) + } + + /** + * Return a copy of the broker configuration for the given broker. Future changes will not be reflected. + * + * @param brokerId the id of the broker for which configuration will be returned + * @return a copy of the broker configuration for the given broker + */ + def brokerConfig(brokerId: Int): Properties = { + config(new ConfigResource(Type.BROKER, brokerId.toString)) + } + + /** + * Return a copy of the group configuration for the given group. Future changes will not be reflected. + * + * @param groupName the name of the group for which configuration will be returned + * @return a copy of the group configuration for the given group + */ + def groupConfig(groupName: String): Properties = { + config(new ConfigResource(Type.GROUP, groupName)) + } + + /** + * Return a copy of the configuration for the given resource. Future changes will not be reflected. + * @param configResource the resource for which the configuration will be returned + * @return a copy of the configuration for the given resource + */ + def config(configResource: ConfigResource): Properties +} diff --git a/core/src/main/scala/kafka/server/metadata/DelegationTokenPublisher.scala b/core/src/main/scala/kafka/server/metadata/DelegationTokenPublisher.scala new file mode 100644 index 0000000000000..34e14442b4d6d --- /dev/null +++ b/core/src/main/scala/kafka/server/metadata/DelegationTokenPublisher.scala @@ -0,0 +1,83 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server.metadata + +import kafka.server.DelegationTokenManager +import kafka.server.KafkaConfig +import kafka.utils.Logging +import org.apache.kafka.image.loader.LoaderManifest +import org.apache.kafka.image.{MetadataDelta, MetadataImage} +import org.apache.kafka.server.fault.FaultHandler + + +class DelegationTokenPublisher( + conf: KafkaConfig, + faultHandler: FaultHandler, + nodeType: String, + tokenManager: DelegationTokenManager, +) extends Logging with org.apache.kafka.image.publisher.MetadataPublisher { + logIdent = s"[${name()}] " + + var _firstPublish = true + + override def name(): String = s"DelegationTokenPublisher $nodeType id=${conf.nodeId}" + + override def onMetadataUpdate( + delta: MetadataDelta, + newImage: MetadataImage, + manifest: LoaderManifest + ): Unit = { + onMetadataUpdate(delta, newImage) + } + + def onMetadataUpdate( + delta: MetadataDelta, + newImage: MetadataImage, + ): Unit = { + val deltaName = if (_firstPublish) { + s"initial MetadataDelta up to ${newImage.highestOffsetAndEpoch().offset}" + } else { + s"update MetadataDelta up to ${newImage.highestOffsetAndEpoch().offset}" + } + try { + if (_firstPublish) { + // Initialize the tokenCache with the Image + Option(newImage.delegationTokens()).foreach { delegationTokenImage => + delegationTokenImage.tokens().forEach { (_, delegationTokenData) => + tokenManager.updateToken(tokenManager.getDelegationToken(delegationTokenData.tokenInformation())) + } + } + _firstPublish = false + } + // Apply changes to DelegationTokens. + Option(delta.delegationTokenDelta()).foreach { delegationTokenDelta => + delegationTokenDelta.changes().forEach { + case (tokenId, delegationTokenData) => + if (delegationTokenData.isPresent) { + tokenManager.updateToken(tokenManager.getDelegationToken(delegationTokenData.get().tokenInformation())) + } else { + tokenManager.removeToken(tokenId) + } + } + } + } catch { + case t: Throwable => faultHandler.handleFault("Uncaught exception while " + + s"publishing DelegationToken changes from $deltaName", t) + } + } +} diff --git a/core/src/main/scala/kafka/server/metadata/DynamicConfigPublisher.scala b/core/src/main/scala/kafka/server/metadata/DynamicConfigPublisher.scala index d30d5dd246745..6904921fb0cfa 100644 --- a/core/src/main/scala/kafka/server/metadata/DynamicConfigPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/DynamicConfigPublisher.scala @@ -31,7 +31,7 @@ import org.apache.kafka.server.fault.FaultHandler class DynamicConfigPublisher( conf: KafkaConfig, faultHandler: FaultHandler, - dynamicConfigHandlers: Map[ConfigType, ConfigHandler], + dynamicConfigHandlers: Map[String, ConfigHandler], nodeType: String, ) extends Logging with org.apache.kafka.image.publisher.MetadataPublisher { logIdent = s"[${name()}] " diff --git a/core/src/main/scala/kafka/server/metadata/DynamicTopicClusterQuotaPublisher.scala b/core/src/main/scala/kafka/server/metadata/DynamicTopicClusterQuotaPublisher.scala index 7798c18b4d69b..68788ffe3cd18 100644 --- a/core/src/main/scala/kafka/server/metadata/DynamicTopicClusterQuotaPublisher.scala +++ b/core/src/main/scala/kafka/server/metadata/DynamicTopicClusterQuotaPublisher.scala @@ -13,12 +13,11 @@ **/ package kafka.server.metadata -import kafka.server.KafkaConfig +import kafka.server.{KafkaConfig, MetadataCache} import kafka.server.QuotaFactory.QuotaManagers import kafka.utils.Logging import org.apache.kafka.image.{MetadataDelta, MetadataImage} import org.apache.kafka.image.loader.LoaderManifest -import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.fault.FaultHandler /** @@ -50,10 +49,10 @@ class DynamicTopicClusterQuotaPublisher ( newImage: MetadataImage, ): Unit = { try { - quotaManagers.clientQuotaCallbackPlugin().ifPresent(plugin => { + quotaManagers.clientQuotaCallback().ifPresent(clientQuotaCallback => { if (delta.topicsDelta() != null || delta.clusterDelta() != null) { val cluster = MetadataCache.toCluster(clusterId, newImage) - if (plugin.get().updateClusterMetadata(cluster)) { + if (clientQuotaCallback.updateClusterMetadata(cluster)) { quotaManagers.fetch.updateQuotaMetricConfigs() quotaManagers.produce.updateQuotaMetricConfigs() quotaManagers.request.updateQuotaMetricConfigs() diff --git a/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala b/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala index 88b2cf07012a6..aa993d319668d 100644 --- a/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala +++ b/core/src/main/scala/kafka/server/metadata/KRaftMetadataCache.scala @@ -17,7 +17,9 @@ package kafka.server.metadata +import kafka.server.{CachedControllerId, KRaftCachedControllerId, MetadataCache} import kafka.utils.Logging +import org.apache.kafka.admin.BrokerMetadata import org.apache.kafka.common._ import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.errors.InvalidTopicException @@ -29,15 +31,15 @@ import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.MetadataResponse import org.apache.kafka.image.MetadataImage -import org.apache.kafka.metadata.{BrokerRegistration, LeaderAndIsr, MetadataCache, PartitionRegistration, Replicas} +import org.apache.kafka.metadata.{BrokerRegistration, LeaderAndIsr, PartitionRegistration, Replicas} import org.apache.kafka.server.common.{FinalizedFeatures, KRaftVersion, MetadataVersion} import java.util import java.util.concurrent.ThreadLocalRandom -import java.util.function.{Predicate, Supplier} -import java.util.stream.Collectors -import java.util.Properties +import java.util.function.Supplier +import java.util.{Collections, Properties} import scala.collection.mutable.ListBuffer +import scala.collection.{Map, Seq, Set, mutable} import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.RichOptional import scala.util.control.Breaks._ @@ -46,7 +48,7 @@ import scala.util.control.Breaks._ class KRaftMetadataCache( val brokerId: Int, val kraftVersionSupplier: Supplier[KRaftVersion] -) extends MetadataCache with Logging { +) extends MetadataCache with Logging with ConfigRepository { this.logIdent = s"[MetadataCache brokerId=$brokerId] " // This is the cache state. Every MetadataImage instance is immutable, and updates @@ -148,7 +150,9 @@ class KRaftMetadataCache( * @param topicName The name of the topic. * @param listenerName The listener name. * @param startIndex The smallest index of the partitions to be included in the result. - * + * @param upperIndex The upper limit of the index of the partitions to be included in the result. + * Note that, the upper index can be larger than the largest partition index in + * this topic. * @return A collection of topic partition metadata and next partition index (-1 means * no next partition). */ @@ -237,29 +241,43 @@ class KRaftMetadataCache( } // errorUnavailableEndpoints exists to support v0 MetadataResponses - override def getTopicMetadata(topics: util.Set[String], + override def getTopicMetadata(topics: Set[String], listenerName: ListenerName, errorUnavailableEndpoints: Boolean = false, - errorUnavailableListeners: Boolean = false): util.List[MetadataResponseTopic] = { + errorUnavailableListeners: Boolean = false): Seq[MetadataResponseTopic] = { val image = _currentImage - topics.stream().flatMap(topic => - getPartitionMetadata(image, topic, listenerName, errorUnavailableEndpoints, errorUnavailableListeners) match { - case Some(partitionMetadata) => - util.stream.Stream.of(new MetadataResponseTopic() - .setErrorCode(Errors.NONE.code) - .setName(topic) - .setTopicId(Option(image.topics().getTopic(topic).id()).getOrElse(Uuid.ZERO_UUID)) - .setIsInternal(Topic.isInternal(topic)) - .setPartitions(partitionMetadata.toBuffer.asJava)) - case None => util.stream.Stream.empty() + topics.toSeq.flatMap { topic => + getPartitionMetadata(image, topic, listenerName, errorUnavailableEndpoints, errorUnavailableListeners).map { partitionMetadata => + new MetadataResponseTopic() + .setErrorCode(Errors.NONE.code) + .setName(topic) + .setTopicId(Option(image.topics().getTopic(topic).id()).getOrElse(Uuid.ZERO_UUID)) + .setIsInternal(Topic.isInternal(topic)) + .setPartitions(partitionMetadata.toBuffer.asJava) } - ).collect(Collectors.toList()) + } } - override def describeTopicResponse( - topics: util.Iterator[String], + /** + * Get the topic metadata for the given topics. + * + * The quota is used to limit the number of partitions to return. The NextTopicPartition field points to the first + * partition can't be returned due the limit. + * If a topic can't return any partition due to quota limit reached, this topic will not be included in the response. + * + * Note, the topics should be sorted in alphabetical order. The topics in the DescribeTopicPartitionsResponseData + * will also be sorted in alphabetical order. + * + * @param topics The iterator of topics and their corresponding first partition id to fetch. + * @param listenerName The listener name. + * @param firstTopicPartitionStartIndex The start partition index for the first topic + * @param maximumNumberOfPartitions The max number of partitions to return. + * @param ignoreTopicsWithExceptions Whether ignore the topics with exception. + */ + def getTopicMetadataForDescribeTopicResponse( + topics: Iterator[String], listenerName: ListenerName, - topicPartitionStartIndex: util.function.Function[String, Integer], + topicPartitionStartIndex: String => Int, maximumNumberOfPartitions: Int, ignoreTopicsWithExceptions: Boolean ): DescribeTopicPartitionsResponseData = { @@ -267,7 +285,7 @@ class KRaftMetadataCache( var remaining = maximumNumberOfPartitions val result = new DescribeTopicPartitionsResponseData() breakable { - topics.forEachRemaining { topicName => + topics.foreach { topicName => if (remaining > 0) { val (partitionResponse, nextPartition) = getPartitionMetadataForDescribeTopicResponse( @@ -320,81 +338,93 @@ class KRaftMetadataCache( result } - override def getAllTopics(): util.Set[String] = _currentImage.topics().topicsByName().keySet() + override def getAllTopics(): Set[String] = _currentImage.topics().topicsByName().keySet().asScala + + override def getTopicPartitions(topicName: String): Set[TopicPartition] = { + Option(_currentImage.topics().getTopic(topicName)) match { + case None => Set.empty + case Some(topic) => topic.partitions().keySet().asScala.map(new TopicPartition(topicName, _)) + } + } - override def getTopicId(topicName: String): Uuid = util.Optional.ofNullable(_currentImage.topics.topicsByName.get(topicName)) - .map(_.id) - .orElse(Uuid.ZERO_UUID) + override def getTopicId(topicName: String): Uuid = _currentImage.topics().topicsByName().asScala.get(topicName).map(_.id()).getOrElse(Uuid.ZERO_UUID) - override def getTopicName(topicId: Uuid): util.Optional[String] = util.Optional.ofNullable(_currentImage.topics().topicsById().get(topicId)).map(t => t.name) + override def getTopicName(topicId: Uuid): Option[String] = _currentImage.topics().topicsById.asScala.get(topicId).map(_.name()) override def hasAliveBroker(brokerId: Int): Boolean = { Option(_currentImage.cluster.broker(brokerId)).count(!_.fenced()) == 1 } - override def isBrokerFenced(brokerId: Int): Boolean = { + def isBrokerFenced(brokerId: Int): Boolean = { Option(_currentImage.cluster.broker(brokerId)).count(_.fenced) == 1 } - override def isBrokerShuttingDown(brokerId: Int): Boolean = { + def isBrokerShuttingDown(brokerId: Int): Boolean = { Option(_currentImage.cluster.broker(brokerId)).count(_.inControlledShutdown) == 1 } - override def getAliveBrokerNode(brokerId: Int, listenerName: ListenerName): util.Optional[Node] = { - util.Optional.ofNullable(_currentImage.cluster().broker(brokerId)) - .filter(Predicate.not(_.fenced)) - .flatMap(broker => broker.node(listenerName.value)) + override def getAliveBrokers(): Iterable[BrokerMetadata] = getAliveBrokers(_currentImage) + + private def getAliveBrokers(image: MetadataImage): Iterable[BrokerMetadata] = { + image.cluster().brokers().values().asScala.filterNot(_.fenced()). + map(b => new BrokerMetadata(b.id, b.rack)) + } + + override def getAliveBrokerNode(brokerId: Int, listenerName: ListenerName): Option[Node] = { + Option(_currentImage.cluster().broker(brokerId)).filterNot(_.fenced()). + flatMap(_.node(listenerName.value()).toScala) } - override def getAliveBrokerNodes(listenerName: ListenerName): util.List[Node] = { - _currentImage.cluster.brokers.values.stream - .filter(Predicate.not(_.fenced)) - .flatMap(broker => broker.node(listenerName.value).stream) - .collect(Collectors.toList()) + override def getAliveBrokerNodes(listenerName: ListenerName): Seq[Node] = { + _currentImage.cluster().brokers().values().asScala.filterNot(_.fenced()). + flatMap(_.node(listenerName.value()).toScala).toSeq } - override def getBrokerNodes(listenerName: ListenerName): util.List[Node] = { - _currentImage.cluster.brokers.values.stream - .flatMap(broker => broker.node(listenerName.value).stream) - .collect(Collectors.toList()) + override def getBrokerNodes(listenerName: ListenerName): Seq[Node] = { + _currentImage.cluster().brokers().values().asScala.flatMap(_.node(listenerName.value()).asScala).toSeq } - override def getLeaderAndIsr(topicName: String, partitionId: Int): util.Optional[LeaderAndIsr] = { - util.Optional.ofNullable(_currentImage.topics().getTopic(topicName)). - flatMap(topic => util.Optional.ofNullable(topic.partitions().get(partitionId))). - flatMap(partition => util.Optional.ofNullable(new LeaderAndIsr(partition.leader, partition.leaderEpoch, + override def getLeaderAndIsr(topicName: String, partitionId: Int): Option[LeaderAndIsr] = { + Option(_currentImage.topics().getTopic(topicName)). + flatMap(topic => Option(topic.partitions().get(partitionId))). + flatMap(partition => Some(new LeaderAndIsr(partition.leader, partition.leaderEpoch, util.Arrays.asList(partition.isr.map(i => i: java.lang.Integer): _*), partition.leaderRecoveryState, partition.partitionEpoch))) } - override def numPartitions(topicName: String): util.Optional[Integer] = { - util.Optional.ofNullable(_currentImage.topics().getTopic(topicName)). + override def numPartitions(topicName: String): Option[Int] = { + Option(_currentImage.topics().getTopic(topicName)). map(topic => topic.partitions().size()) } + override def topicNamesToIds(): util.Map[String, Uuid] = _currentImage.topics.topicNameToIdView() + override def topicIdsToNames(): util.Map[Uuid, String] = _currentImage.topics.topicIdToNameView() - override def topicNamesToIds(): util.Map[String, Uuid] = _currentImage.topics().topicNameToIdView() + override def topicIdInfo(): (util.Map[String, Uuid], util.Map[Uuid, String]) = { + val image = _currentImage + (image.topics.topicNameToIdView(), image.topics.topicIdToNameView()) + } // if the leader is not known, return None; // if the leader is known and corresponding node is available, return Some(node) // if the leader is known but corresponding node with the listener name is not available, return Some(NO_NODE) - override def getPartitionLeaderEndpoint(topicName: String, partitionId: Int, listenerName: ListenerName): util.Optional[Node] = { + override def getPartitionLeaderEndpoint(topicName: String, partitionId: Int, listenerName: ListenerName): Option[Node] = { val image = _currentImage Option(image.topics().getTopic(topicName)) match { - case None => util.Optional.empty() + case None => None case Some(topic) => Option(topic.partitions().get(partitionId)) match { - case None => util.Optional.empty() + case None => None case Some(partition) => Option(image.cluster().broker(partition.leader)) match { - case None => util.Optional.of(Node.noNode) - case Some(broker) => util.Optional.of(broker.node(listenerName.value()).orElse(Node.noNode())) + case None => Some(Node.noNode) + case Some(broker) => Some(broker.node(listenerName.value()).orElse(Node.noNode())) } } } } - override def getPartitionReplicaEndpoints(tp: TopicPartition, listenerName: ListenerName): util.Map[Integer, Node] = { + override def getPartitionReplicaEndpoints(tp: TopicPartition, listenerName: ListenerName): Map[Int, Node] = { val image = _currentImage - val result = new util.HashMap[Integer, Node]() + val result = new mutable.HashMap[Int, Node]() Option(image.topics().getTopic(tp.topic())).foreach { topic => Option(topic.partitions().get(tp.partition())).foreach { partition => partition.replicas.foreach { replicaId => @@ -411,25 +441,74 @@ class KRaftMetadataCache( result } - override def getRandomAliveBrokerId: util.Optional[Integer] = { + /** + * Choose a random broker node to report as the controller. We do this because we want + * the client to send requests destined for the controller to a random broker. + * Clients do not have direct access to the controller in the KRaft world, as explained + * in KIP-590. + */ + override def getControllerId: Option[CachedControllerId] = + getRandomAliveBroker(_currentImage).map(KRaftCachedControllerId) + + override def getRandomAliveBrokerId: Option[Int] = { getRandomAliveBroker(_currentImage) } - private def getRandomAliveBroker(image: MetadataImage): util.Optional[Integer] = { - val aliveBrokers = image.cluster().brokers().values().stream() - .filter(Predicate.not(_.fenced)) - .map(_.id()).toList + private def getRandomAliveBroker(image: MetadataImage): Option[Int] = { + val aliveBrokers = getAliveBrokers(image).toList if (aliveBrokers.isEmpty) { - util.Optional.empty() + None } else { - util.Optional.of(aliveBrokers.get(ThreadLocalRandom.current().nextInt(aliveBrokers.size))) + Some(aliveBrokers(ThreadLocalRandom.current().nextInt(aliveBrokers.size)).id) } } - override def getAliveBrokerEpoch(brokerId: Int): util.Optional[java.lang.Long] = { - util.Optional.ofNullable(_currentImage.cluster().broker(brokerId)) - .filter(Predicate.not(_.fenced)) - .map(brokerRegistration => brokerRegistration.epoch()) + def getAliveBrokerEpoch(brokerId: Int): Option[Long] = { + Option(_currentImage.cluster().broker(brokerId)).filterNot(_.fenced()). + map(brokerRegistration => brokerRegistration.epoch()) + } + + override def getClusterMetadata(clusterId: String, listenerName: ListenerName): Cluster = { + val image = _currentImage + val nodes = new util.HashMap[Integer, Node] + image.cluster().brokers().values().forEach { broker => + if (!broker.fenced()) { + broker.node(listenerName.value()).toScala.foreach { node => + nodes.put(broker.id(), node) + } + } + } + + def node(id: Int): Node = { + Option(nodes.get(id)).getOrElse(Node.noNode()) + } + + val partitionInfos = new util.ArrayList[PartitionInfo] + val internalTopics = new util.HashSet[String] + + image.topics().topicsByName().values().forEach { topic => + topic.partitions().forEach { (key, value) => + val partitionId = key + val partition = value + partitionInfos.add(new PartitionInfo(topic.name(), + partitionId, + node(partition.leader), + partition.replicas.map(replica => node(replica)), + partition.isr.map(replica => node(replica)), + getOfflineReplicas(image, partition, listenerName).asScala. + map(replica => node(replica)).toArray)) + if (Topic.isInternal(topic.name())) { + internalTopics.add(topic.name()) + } + } + } + val controllerNode = node(getRandomAliveBroker(image).getOrElse(-1)) + // Note: the constructor of Cluster does not allow us to reference unregistered nodes. + // So, for example, if partition foo-0 has replicas [1, 2] but broker 2 is not + // registered, we pass its replicas as [1, -1]. This doesn't make a lot of sense, but + // we are duplicating the behavior of ZkMetadataCache, for now. + new Cluster(clusterId, nodes.values(), + partitionInfos, Collections.emptySet(), internalTopics, controllerNode) } override def contains(topicName: String): Boolean = @@ -453,11 +532,11 @@ class KRaftMetadataCache( override def config(configResource: ConfigResource): Properties = _currentImage.configs().configProperties(configResource) - override def describeClientQuotas(request: DescribeClientQuotasRequestData): DescribeClientQuotasResponseData = { + def describeClientQuotas(request: DescribeClientQuotasRequestData): DescribeClientQuotasResponseData = { _currentImage.clientQuotas().describe(request) } - override def describeScramCredentials(request: DescribeUserScramCredentialsRequestData): DescribeUserScramCredentialsResponseData = { + def describeScramCredentials(request: DescribeUserScramCredentialsRequestData): DescribeUserScramCredentialsResponseData = { _currentImage.scram().describe(request) } @@ -473,7 +552,8 @@ class KRaftMetadataCache( new FinalizedFeatures( image.features().metadataVersionOrThrow(), finalizedFeatures, - image.highestOffsetAndEpoch().offset) + image.highestOffsetAndEpoch().offset, + true) } } diff --git a/core/src/main/scala/kafka/server/metadata/ScramPublisher.scala b/core/src/main/scala/kafka/server/metadata/ScramPublisher.scala new file mode 100644 index 0000000000000..09789249571f0 --- /dev/null +++ b/core/src/main/scala/kafka/server/metadata/ScramPublisher.scala @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server.metadata + +import kafka.server.KafkaConfig +import kafka.utils.Logging +import org.apache.kafka.image.loader.LoaderManifest +import org.apache.kafka.image.{MetadataDelta, MetadataImage} +import org.apache.kafka.security.CredentialProvider +import org.apache.kafka.server.fault.FaultHandler + + +class ScramPublisher( + conf: KafkaConfig, + faultHandler: FaultHandler, + nodeType: String, + credentialProvider: CredentialProvider, +) extends Logging with org.apache.kafka.image.publisher.MetadataPublisher { + logIdent = s"[${name()}] " + + override def name(): String = s"ScramPublisher $nodeType id=${conf.nodeId}" + + override def onMetadataUpdate( + delta: MetadataDelta, + newImage: MetadataImage, + manifest: LoaderManifest + ): Unit = { + onMetadataUpdate(delta, newImage) + } + + def onMetadataUpdate( + delta: MetadataDelta, + newImage: MetadataImage, + ): Unit = { + val deltaName = s"MetadataDelta up to ${newImage.highestOffsetAndEpoch().offset}" + try { + // Apply changes to SCRAM credentials. + Option(delta.scramDelta()).foreach { scramDelta => + scramDelta.changes().forEach { + case (mechanism, userChanges) => + userChanges.forEach { + case (userName, change) => + if (change.isPresent) { + credentialProvider.updateCredential(mechanism, userName, change.get().toCredential(mechanism)) + } else { + credentialProvider.removeCredentials(mechanism, userName) + } + } + } + } + } catch { + case t: Throwable => faultHandler.handleFault("Uncaught exception while " + + s"publishing SCRAM changes from $deltaName", t) + } + } +} diff --git a/core/src/main/scala/kafka/server/metadata/ShareCoordinatorMetadataCacheHelperImpl.java b/core/src/main/scala/kafka/server/metadata/ShareCoordinatorMetadataCacheHelperImpl.java new file mode 100644 index 0000000000000..28148eab7ffc4 --- /dev/null +++ b/core/src/main/scala/kafka/server/metadata/ShareCoordinatorMetadataCacheHelperImpl.java @@ -0,0 +1,103 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server.metadata; + +import kafka.server.MetadataCache; + +import org.apache.kafka.common.Node; +import org.apache.kafka.common.message.MetadataResponseData; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.protocol.Errors; +import org.apache.kafka.common.requests.MetadataResponse; +import org.apache.kafka.server.share.SharePartitionKey; +import org.apache.kafka.server.share.persister.ShareCoordinatorMetadataCacheHelper; + +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.function.Function; + +import scala.jdk.javaapi.CollectionConverters; +import scala.jdk.javaapi.OptionConverters; + +public class ShareCoordinatorMetadataCacheHelperImpl implements ShareCoordinatorMetadataCacheHelper { + private final MetadataCache metadataCache; + private final Function keyToPartitionMapper; + private final ListenerName interBrokerListenerName; + + public ShareCoordinatorMetadataCacheHelperImpl( + MetadataCache metadataCache, + Function keyToPartitionMapper, + ListenerName interBrokerListenerName + ) { + Objects.requireNonNull(metadataCache, "metadataCache must not be null"); + Objects.requireNonNull(keyToPartitionMapper, "keyToPartitionMapper must not be null"); + Objects.requireNonNull(interBrokerListenerName, "interBrokerListenerName must not be null"); + + this.metadataCache = metadataCache; + this.keyToPartitionMapper = keyToPartitionMapper; + this.interBrokerListenerName = interBrokerListenerName; + } + + @Override + public boolean containsTopic(String topic) { + return metadataCache.contains(topic); + } + + @Override + public Node getShareCoordinator(SharePartitionKey key, String internalTopicName) { + if (metadataCache.contains(internalTopicName)) { + Set topicSet = new HashSet<>(); + topicSet.add(internalTopicName); + + List topicMetadata = CollectionConverters.asJava( + metadataCache.getTopicMetadata( + CollectionConverters.asScala(topicSet), + interBrokerListenerName, + false, + false + ) + ); + + if (topicMetadata == null || topicMetadata.isEmpty() || topicMetadata.get(0).errorCode() != Errors.NONE.code()) { + return Node.noNode(); + } else { + int partition = keyToPartitionMapper.apply(key); + Optional response = topicMetadata.get(0).partitions().stream() + .filter(responsePart -> responsePart.partitionIndex() == partition + && responsePart.leaderId() != MetadataResponse.NO_LEADER_ID) + .findFirst(); + + if (response.isPresent()) { + return OptionConverters.toJava(metadataCache.getAliveBrokerNode(response.get().leaderId(), interBrokerListenerName)) + .orElse(Node.noNode()); + } else { + return Node.noNode(); + } + } + } + return Node.noNode(); + } + + @Override + public List getClusterNodes() { + return CollectionConverters.asJava(metadataCache.getAliveBrokerNodes(interBrokerListenerName).toSeq()); + } +} diff --git a/core/src/main/scala/kafka/tools/DumpLogSegments.scala b/core/src/main/scala/kafka/tools/DumpLogSegments.scala index 0703c5474f08b..4053c35f2a0be 100755 --- a/core/src/main/scala/kafka/tools/DumpLogSegments.scala +++ b/core/src/main/scala/kafka/tools/DumpLogSegments.scala @@ -21,34 +21,33 @@ import com.fasterxml.jackson.databind.JsonNode import java.io._ import com.fasterxml.jackson.databind.node.{IntNode, JsonNodeFactory, ObjectNode, TextNode} +import kafka.coordinator.transaction.TransactionLog +import kafka.log._ +import kafka.utils.CoreUtils import org.apache.kafka.clients.consumer.internals.ConsumerProtocol import org.apache.kafka.common.message.ConsumerProtocolAssignment import org.apache.kafka.common.message.ConsumerProtocolAssignmentJsonConverter import org.apache.kafka.common.message.ConsumerProtocolSubscription import org.apache.kafka.common.message.ConsumerProtocolSubscriptionJsonConverter import org.apache.kafka.common.message.KRaftVersionRecordJsonConverter -import org.apache.kafka.common.message.LeaderChangeMessageJsonConverter import org.apache.kafka.common.message.SnapshotFooterRecordJsonConverter import org.apache.kafka.common.message.SnapshotHeaderRecordJsonConverter import org.apache.kafka.common.message.VotersRecordJsonConverter import org.apache.kafka.common.metadata.{MetadataJsonConverters, MetadataRecordType} -import org.apache.kafka.common.protocol.{ApiMessage, ByteBufferAccessor} +import org.apache.kafka.common.protocol.{ByteBufferAccessor, Message} import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Utils -import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, GroupMetadataValueJsonConverter, CoordinatorRecordJsonConverters => GroupCoordinatorRecordJsonConverters, CoordinatorRecordType => GroupCoordinatorRecordType} -import org.apache.kafka.coordinator.common.runtime.CoordinatorRecordSerde import org.apache.kafka.coordinator.common.runtime.Deserializer.UnknownRecordTypeException +import org.apache.kafka.coordinator.group.generated.{ConsumerGroupCurrentMemberAssignmentKey, ConsumerGroupCurrentMemberAssignmentKeyJsonConverter, ConsumerGroupCurrentMemberAssignmentValue, ConsumerGroupCurrentMemberAssignmentValueJsonConverter, ConsumerGroupMemberMetadataKey, ConsumerGroupMemberMetadataKeyJsonConverter, ConsumerGroupMemberMetadataValue, ConsumerGroupMemberMetadataValueJsonConverter, ConsumerGroupMetadataKey, ConsumerGroupMetadataKeyJsonConverter, ConsumerGroupMetadataValue, ConsumerGroupMetadataValueJsonConverter, ConsumerGroupPartitionMetadataKey, ConsumerGroupPartitionMetadataKeyJsonConverter, ConsumerGroupPartitionMetadataValue, ConsumerGroupPartitionMetadataValueJsonConverter, ConsumerGroupRegularExpressionKey, ConsumerGroupRegularExpressionKeyJsonConverter, ConsumerGroupRegularExpressionValue, ConsumerGroupRegularExpressionValueJsonConverter, ConsumerGroupTargetAssignmentMemberKey, ConsumerGroupTargetAssignmentMemberKeyJsonConverter, ConsumerGroupTargetAssignmentMemberValue, ConsumerGroupTargetAssignmentMemberValueJsonConverter, ConsumerGroupTargetAssignmentMetadataKey, ConsumerGroupTargetAssignmentMetadataKeyJsonConverter, ConsumerGroupTargetAssignmentMetadataValue, ConsumerGroupTargetAssignmentMetadataValueJsonConverter, GroupMetadataKey, GroupMetadataKeyJsonConverter, GroupMetadataValue, GroupMetadataValueJsonConverter, OffsetCommitKey, OffsetCommitKeyJsonConverter, OffsetCommitValue, OffsetCommitValueJsonConverter, ShareGroupCurrentMemberAssignmentKey, ShareGroupCurrentMemberAssignmentKeyJsonConverter, ShareGroupCurrentMemberAssignmentValue, ShareGroupCurrentMemberAssignmentValueJsonConverter, ShareGroupMemberMetadataKey, ShareGroupMemberMetadataKeyJsonConverter, ShareGroupMemberMetadataValue, ShareGroupMemberMetadataValueJsonConverter, ShareGroupMetadataKey, ShareGroupMetadataKeyJsonConverter, ShareGroupMetadataValue, ShareGroupMetadataValueJsonConverter, ShareGroupPartitionMetadataKey, ShareGroupPartitionMetadataKeyJsonConverter, ShareGroupPartitionMetadataValue, ShareGroupPartitionMetadataValueJsonConverter, ShareGroupStatePartitionMetadataKey, ShareGroupStatePartitionMetadataKeyJsonConverter, ShareGroupStatePartitionMetadataValue, ShareGroupStatePartitionMetadataValueJsonConverter, ShareGroupTargetAssignmentMemberKey, ShareGroupTargetAssignmentMemberKeyJsonConverter, ShareGroupTargetAssignmentMemberValue, ShareGroupTargetAssignmentMemberValueJsonConverter, ShareGroupTargetAssignmentMetadataKey, ShareGroupTargetAssignmentMetadataKeyJsonConverter, ShareGroupTargetAssignmentMetadataValue, ShareGroupTargetAssignmentMetadataValueJsonConverter} import org.apache.kafka.coordinator.group.GroupCoordinatorRecordSerde import org.apache.kafka.coordinator.share.ShareCoordinatorRecordSerde -import org.apache.kafka.coordinator.share.generated.{CoordinatorRecordJsonConverters => ShareCoordinatorRecordJsonConverters} -import org.apache.kafka.coordinator.transaction.TransactionCoordinatorRecordSerde -import org.apache.kafka.coordinator.transaction.generated.{CoordinatorRecordJsonConverters => TransactionCoordinatorRecordJsonConverters} +import org.apache.kafka.coordinator.share.generated.{ShareSnapshotKey, ShareSnapshotKeyJsonConverter, ShareSnapshotValue, ShareSnapshotValueJsonConverter, ShareUpdateKey, ShareUpdateKeyJsonConverter, ShareUpdateValue, ShareUpdateValueJsonConverter} import org.apache.kafka.metadata.MetadataRecordSerde import org.apache.kafka.metadata.bootstrap.BootstrapDirectory import org.apache.kafka.snapshot.Snapshots import org.apache.kafka.server.log.remote.metadata.storage.serialization.RemoteLogMetadataSerde import org.apache.kafka.server.util.{CommandDefaultOptions, CommandLineUtils} -import org.apache.kafka.storage.internals.log.{CorruptSnapshotException, LogFileUtils, OffsetIndex, ProducerStateManager, TimeIndex, TransactionIndex, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CorruptSnapshotException, LogFileUtils, OffsetIndex, ProducerStateManager, TimeIndex, TransactionIndex} import org.apache.kafka.tools.api.{Decoder, StringDecoder} import java.nio.ByteBuffer @@ -76,16 +75,16 @@ object DumpLogSegments { val filename = file.getName val suffix = filename.substring(filename.lastIndexOf(".")) suffix match { - case UnifiedLog.LOG_FILE_SUFFIX | Snapshots.SUFFIX => + case UnifiedLog.LogFileSuffix | Snapshots.SUFFIX => dumpLog(file, opts.shouldPrintDataLog, nonConsecutivePairsForLogFilesMap, opts.isDeepIteration, opts.messageParser, opts.skipRecordMetadata, opts.maxBytes) - case UnifiedLog.INDEX_FILE_SUFFIX => + case UnifiedLog.IndexFileSuffix => dumpIndex(file, opts.indexSanityOnly, opts.verifyOnly, misMatchesForIndexFilesMap, opts.maxMessageSize) - case UnifiedLog.TIME_INDEX_FILE_SUFFIX => + case UnifiedLog.TimeIndexFileSuffix => dumpTimeIndex(file, opts.indexSanityOnly, opts.verifyOnly, timeIndexDumpErrors) case LogFileUtils.PRODUCER_SNAPSHOT_FILE_SUFFIX => dumpProducerIdSnapshot(file) - case UnifiedLog.TXN_INDEX_FILE_SUFFIX => + case UnifiedLog.TxnIndexFileSuffix => dumpTxnIndex(file) case _ => System.err.println(s"Ignoring unknown file $file") @@ -143,7 +142,7 @@ object DumpLogSegments { misMatchesForIndexFilesMap: mutable.Map[String, List[(Long, Long)]], maxMessageSize: Int): Unit = { val startOffset = file.getName.split("\\.")(0).toLong - val logFile = new File(file.getAbsoluteFile.getParent, file.getName.split("\\.")(0) + UnifiedLog.LOG_FILE_SUFFIX) + val logFile = new File(file.getAbsoluteFile.getParent, file.getName.split("\\.")(0) + UnifiedLog.LogFileSuffix) val fileRecords = FileRecords.open(logFile, false) val index = new OffsetIndex(file, startOffset, -1, false) @@ -184,9 +183,9 @@ object DumpLogSegments { verifyOnly: Boolean, timeIndexDumpErrors: TimeIndexDumpErrors): Unit = { val startOffset = file.getName.split("\\.")(0).toLong - val logFile = new File(file.getAbsoluteFile.getParent, file.getName.split("\\.")(0) + UnifiedLog.LOG_FILE_SUFFIX) + val logFile = new File(file.getAbsoluteFile.getParent, file.getName.split("\\.")(0) + UnifiedLog.LogFileSuffix) val fileRecords = FileRecords.open(logFile, false) - val indexFile = new File(file.getAbsoluteFile.getParent, file.getName.split("\\.")(0) + UnifiedLog.INDEX_FILE_SUFFIX) + val indexFile = new File(file.getAbsoluteFile.getParent, file.getName.split("\\.")(0) + UnifiedLog.IndexFileSuffix) val index = new OffsetIndex(indexFile, startOffset, -1, false) val timeIndex = new TimeIndex(file, startOffset, -1, false) @@ -267,7 +266,7 @@ object DumpLogSegments { parser: MessageParser[_, _], skipRecordMetadata: Boolean, maxBytes: Int): Unit = { - if (file.getName.endsWith(UnifiedLog.LOG_FILE_SUFFIX)) { + if (file.getName.endsWith(UnifiedLog.LogFileSuffix)) { val startOffset = file.getName.split("\\.")(0).toLong println(s"Log starting offset: $startOffset") } else if (file.getName.endsWith(Snapshots.SUFFIX)) { @@ -311,7 +310,26 @@ object DumpLogSegments { } if (batch.isControlBatch) { - printControlRecord(record) + val controlTypeId = ControlRecordType.parseTypeId(record.key) + ControlRecordType.fromTypeId(controlTypeId) match { + case ControlRecordType.ABORT | ControlRecordType.COMMIT => + val endTxnMarker = EndTransactionMarker.deserialize(record) + print(s" endTxnMarker: ${endTxnMarker.controlType} coordinatorEpoch: ${endTxnMarker.coordinatorEpoch}") + case ControlRecordType.SNAPSHOT_HEADER => + val header = ControlRecordUtils.deserializeSnapshotHeaderRecord(record) + print(s" SnapshotHeader ${SnapshotHeaderRecordJsonConverter.write(header, header.version())}") + case ControlRecordType.SNAPSHOT_FOOTER => + val footer = ControlRecordUtils.deserializeSnapshotFooterRecord(record) + print(s" SnapshotFooter ${SnapshotFooterRecordJsonConverter.write(footer, footer.version())}") + case ControlRecordType.KRAFT_VERSION => + val kraftVersion = ControlRecordUtils.deserializeKRaftVersionRecord(record) + print(s" KRaftVersion ${KRaftVersionRecordJsonConverter.write(kraftVersion, kraftVersion.version())}") + case ControlRecordType.KRAFT_VOTERS=> + val voters = ControlRecordUtils.deserializeVotersRecord(record) + print(s" KRaftVoters ${VotersRecordJsonConverter.write(voters, voters.version())}") + case controlType => + print(s" controlType: $controlType($controlTypeId)") + } } } if (printContents && !batch.isControlBatch) { @@ -333,32 +351,6 @@ object DumpLogSegments { } finally fileRecords.closeHandlers() } - private def printControlRecord(record: Record): Unit = { - val controlTypeId = ControlRecordType.parseTypeId(record.key) - ControlRecordType.fromTypeId(controlTypeId) match { - case ControlRecordType.ABORT | ControlRecordType.COMMIT => - val endTxnMarker = EndTransactionMarker.deserialize(record) - print(s" endTxnMarker: ${endTxnMarker.controlType} coordinatorEpoch: ${endTxnMarker.coordinatorEpoch}") - case ControlRecordType.LEADER_CHANGE => - val leaderChangeMessage = ControlRecordUtils.deserializeLeaderChangeMessage(record) - print(s" LeaderChange: ${LeaderChangeMessageJsonConverter.write(leaderChangeMessage, leaderChangeMessage.version())}") - case ControlRecordType.SNAPSHOT_HEADER => - val header = ControlRecordUtils.deserializeSnapshotHeaderRecord(record) - print(s" SnapshotHeader ${SnapshotHeaderRecordJsonConverter.write(header, header.version())}") - case ControlRecordType.SNAPSHOT_FOOTER => - val footer = ControlRecordUtils.deserializeSnapshotFooterRecord(record) - print(s" SnapshotFooter ${SnapshotFooterRecordJsonConverter.write(footer, footer.version())}") - case ControlRecordType.KRAFT_VERSION => - val kraftVersion = ControlRecordUtils.deserializeKRaftVersionRecord(record) - print(s" KRaftVersion ${KRaftVersionRecordJsonConverter.write(kraftVersion, kraftVersion.version())}") - case ControlRecordType.KRAFT_VOTERS=> - val voters = ControlRecordUtils.deserializeVotersRecord(record) - print(s" KRaftVoters ${VotersRecordJsonConverter.write(voters, voters.version())}") - case controlType => - print(s" controlType: $controlType($controlTypeId)") - } - } - private def printBatchLevel(batch: FileLogInputStream.FileChannelRecordBatch, accumulativeBytes: Long): Unit = { if (batch.magic >= RecordBatch.MAGIC_VALUE_V2) print("baseOffset: " + batch.baseOffset + " lastOffset: " + batch.lastOffset + " count: " + batch.countOrNull + @@ -426,65 +418,53 @@ object DumpLogSegments { } } - abstract class CoordinatorRecordMessageParser(serde: CoordinatorRecordSerde) extends MessageParser[String, String] { - override def parse(record: Record): (Option[String], Option[String]) = { - if (!record.hasKey) - throw new RuntimeException(s"Failed to decode message at offset ${record.offset} using the " + - "specified decoder (message had a missing key)") - - try { - val r = serde.deserialize(record.key, record.value) - ( - Some(prepareKey(r.key)), - Option(r.value).map(v => prepareValue(v.message, v.version)).orElse(Some("")) - ) - } catch { - case e: UnknownRecordTypeException => - ( - Some(s"Unknown record type ${e.unknownType} at offset ${record.offset}, skipping."), - None - ) - - case e: Throwable => - ( - Some(s"Error at offset ${record.offset}, skipping. ${e.getMessage}"), - None - ) + // Package private for testing. + class OffsetsMessageParser extends MessageParser[String, String] { + private val serde = new GroupCoordinatorRecordSerde() + + private def prepareKey(message: Message, version: Short): String = { + val messageAsJson = message match { + case m: OffsetCommitKey => + OffsetCommitKeyJsonConverter.write(m, version) + case m: GroupMetadataKey => + GroupMetadataKeyJsonConverter.write(m, version) + case m: ConsumerGroupMetadataKey => + ConsumerGroupMetadataKeyJsonConverter.write(m, version) + case m: ConsumerGroupPartitionMetadataKey => + ConsumerGroupPartitionMetadataKeyJsonConverter.write(m, version) + case m: ConsumerGroupMemberMetadataKey => + ConsumerGroupMemberMetadataKeyJsonConverter.write(m, version) + case m: ConsumerGroupTargetAssignmentMetadataKey => + ConsumerGroupTargetAssignmentMetadataKeyJsonConverter.write(m, version) + case m: ConsumerGroupTargetAssignmentMemberKey => + ConsumerGroupTargetAssignmentMemberKeyJsonConverter.write(m, version) + case m: ConsumerGroupCurrentMemberAssignmentKey => + ConsumerGroupCurrentMemberAssignmentKeyJsonConverter.write(m, version) + case m: ConsumerGroupRegularExpressionKey => + ConsumerGroupRegularExpressionKeyJsonConverter.write(m, version) + case m: ShareGroupMetadataKey => + ShareGroupMetadataKeyJsonConverter.write(m, version) + case m: ShareGroupPartitionMetadataKey => + ShareGroupPartitionMetadataKeyJsonConverter.write(m, version) + case m: ShareGroupMemberMetadataKey => + ShareGroupMemberMetadataKeyJsonConverter.write(m, version) + case m: ShareGroupTargetAssignmentMetadataKey => + ShareGroupTargetAssignmentMetadataKeyJsonConverter.write(m, version) + case m: ShareGroupTargetAssignmentMemberKey => + ShareGroupTargetAssignmentMemberKeyJsonConverter.write(m, version) + case m: ShareGroupCurrentMemberAssignmentKey => + ShareGroupCurrentMemberAssignmentKeyJsonConverter.write(m, version) + case m: ShareGroupStatePartitionMetadataKey => + ShareGroupStatePartitionMetadataKeyJsonConverter.write(m, version) + case _ => throw new UnknownRecordTypeException(version) } - } - - private def prepareKey(message: ApiMessage): String = { - val json = new ObjectNode(JsonNodeFactory.instance) - json.set("type", new TextNode(message.apiKey.toString)) - json.set("data", keyAsJson(message)) - json.toString - } - private def prepareValue(message: ApiMessage, version: Short): String = { val json = new ObjectNode(JsonNodeFactory.instance) - json.set("version", new TextNode(version.toString)) - json.set("data", valueAsJson(message, version)) + json.set("type", new TextNode(version.toString)) + json.set("data", messageAsJson) json.toString } - protected def keyAsJson(message: ApiMessage): JsonNode - protected def valueAsJson(message: ApiMessage, version: Short): JsonNode - } - - // Package private for testing. - class OffsetsMessageParser extends CoordinatorRecordMessageParser(new GroupCoordinatorRecordSerde()) { - protected def keyAsJson(message: ApiMessage): JsonNode = { - GroupCoordinatorRecordJsonConverters.writeRecordKeyAsJson(message) - } - - protected def valueAsJson(message: ApiMessage, version: Short): JsonNode = { - if (message.apiKey == GroupCoordinatorRecordType.GROUP_METADATA.id) { - prepareGroupMetadataValue(message.asInstanceOf[GroupMetadataValue], version) - } else { - GroupCoordinatorRecordJsonConverters.writeRecordValueAsJson(message, version) - } - } - private def prepareGroupMetadataValue(message: GroupMetadataValue, version: Short): JsonNode = { val json = GroupMetadataValueJsonConverter.write(message, version) @@ -537,16 +517,80 @@ object DumpLogSegments { json } - } - // Package private for testing. - class TransactionLogMessageParser extends CoordinatorRecordMessageParser(new TransactionCoordinatorRecordSerde()) { - override protected def keyAsJson(message: ApiMessage): JsonNode = { - TransactionCoordinatorRecordJsonConverters.writeRecordKeyAsJson(message) + private def prepareValue(message: Message, version: Short): String = { + val messageAsJson = message match { + case m: OffsetCommitValue => + OffsetCommitValueJsonConverter.write(m, version) + case m: GroupMetadataValue => + prepareGroupMetadataValue(m, version) + case m: ConsumerGroupMetadataValue => + ConsumerGroupMetadataValueJsonConverter.write(m, version) + case m: ConsumerGroupPartitionMetadataValue => + ConsumerGroupPartitionMetadataValueJsonConverter.write(m, version) + case m: ConsumerGroupMemberMetadataValue => + ConsumerGroupMemberMetadataValueJsonConverter.write(m, version) + case m: ConsumerGroupTargetAssignmentMetadataValue => + ConsumerGroupTargetAssignmentMetadataValueJsonConverter.write(m, version) + case m: ConsumerGroupTargetAssignmentMemberValue => + ConsumerGroupTargetAssignmentMemberValueJsonConverter.write(m, version) + case m: ConsumerGroupCurrentMemberAssignmentValue => + ConsumerGroupCurrentMemberAssignmentValueJsonConverter.write(m, version) + case m: ConsumerGroupRegularExpressionValue => + ConsumerGroupRegularExpressionValueJsonConverter.write(m, version) + case m: ShareGroupMetadataValue => + ShareGroupMetadataValueJsonConverter.write(m, version) + case m: ShareGroupPartitionMetadataValue => + ShareGroupPartitionMetadataValueJsonConverter.write(m, version) + case m: ShareGroupMemberMetadataValue => + ShareGroupMemberMetadataValueJsonConverter.write(m, version) + case m: ShareGroupTargetAssignmentMetadataValue => + ShareGroupTargetAssignmentMetadataValueJsonConverter.write(m, version) + case m: ShareGroupTargetAssignmentMemberValue => + ShareGroupTargetAssignmentMemberValueJsonConverter.write(m, version) + case m: ShareGroupCurrentMemberAssignmentValue => + ShareGroupCurrentMemberAssignmentValueJsonConverter.write(m, version) + case m: ShareGroupStatePartitionMetadataValue => + ShareGroupStatePartitionMetadataValueJsonConverter.write(m, version) + case _ => throw new IllegalStateException(s"Message value ${message.getClass.getSimpleName} is not supported.") + } + + val json = new ObjectNode(JsonNodeFactory.instance) + json.set("version", new TextNode(version.toString)) + json.set("data", messageAsJson) + json.toString } - override protected def valueAsJson(message: ApiMessage, version: Short): JsonNode = { - TransactionCoordinatorRecordJsonConverters.writeRecordValueAsJson(message, version) + override def parse(record: Record): (Option[String], Option[String]) = { + if (!record.hasKey) + throw new RuntimeException(s"Failed to decode message at offset ${record.offset} using offset " + + "topic decoder (message had a missing key)") + + try { + val r = serde.deserialize(record.key, record.value) + ( + Some(prepareKey(r.key.message, r.key.version)), + Option(r.value).map(v => prepareValue(v.message, v.version)).orElse(Some("")) + ) + } catch { + case e: UnknownRecordTypeException => + ( + Some(s"Unknown record type ${e.unknownType} at offset ${record.offset}, skipping."), + None + ) + + case e: Throwable => + ( + Some(s"Error at offset ${record.offset}, skipping. ${e.getMessage}"), + None + ) + } + } + } + + private class TransactionLogMessageParser extends MessageParser[String, String] { + override def parse(record: Record): (Option[String], Option[String]) = { + TransactionLog.formatRecordKeyAndValue(record) } } @@ -575,7 +619,7 @@ object DumpLogSegments { private class RemoteMetadataLogMessageParser extends MessageParser[String, String] { private val metadataRecordSerde = new RemoteLogMetadataSerde - + override def parse(record: Record): (Option[String], Option[String]) = { val output = try { val data = new Array[Byte](record.value.remaining) @@ -591,13 +635,64 @@ object DumpLogSegments { } // for test visibility - class ShareGroupStateMessageParser extends CoordinatorRecordMessageParser(new ShareCoordinatorRecordSerde()) { - override protected def keyAsJson(message: ApiMessage): JsonNode = { - ShareCoordinatorRecordJsonConverters.writeRecordKeyAsJson(message) + class ShareGroupStateMessageParser extends MessageParser[String, String] { + private val serde = new ShareCoordinatorRecordSerde() + + private def prepareKey(message: Message, version: Short): String = { + val messageAsJson = message match { + case m: ShareSnapshotKey => + ShareSnapshotKeyJsonConverter.write(m, version) + case m: ShareUpdateKey => + ShareUpdateKeyJsonConverter.write(m, version) + case _ => throw new UnknownRecordTypeException(version) + } + + jsonString(messageAsJson, version) + } + + private def prepareValue(message: Message, version: Short): String = { + val messageAsJson = message match { + case m: ShareSnapshotValue => + ShareSnapshotValueJsonConverter.write(m, version) + case m: ShareUpdateValue => + ShareUpdateValueJsonConverter.write(m, version) + case _ => throw new IllegalStateException(s"Message value ${message.getClass.getSimpleName} is not supported.") + } + + jsonString(messageAsJson, version) } - override protected def valueAsJson(message: ApiMessage, version: Short): JsonNode = { - ShareCoordinatorRecordJsonConverters.writeRecordValueAsJson(message, version) + private def jsonString(jsonNode: JsonNode, version: Short): String = { + val json = new ObjectNode(JsonNodeFactory.instance) + json.set("type", new TextNode(version.toString)) + json.set("data", jsonNode) + json.toString + } + + override def parse(record: Record): (Option[String], Option[String]) = { + if (!record.hasKey) + throw new RuntimeException(s"Failed to decode message at offset ${record.offset} using share group state " + + "topic decoder (message had a missing key)") + + try { + val r = serde.deserialize(record.key, record.value) + ( + Some(prepareKey(r.key.message, r.key.version)), + Option(r.value).map(v => prepareValue(v.message, v.version)).orElse(Some("")) + ) + } catch { + case e: UnknownRecordTypeException => + ( + Some(s"Unknown record type ${e.unknownType} at offset ${record.offset}, skipping."), + None + ) + + case e: Throwable => + ( + Some(s"Error at offset ${record.offset}, skipping. ${e.getMessage}"), + None + ) + } } } @@ -634,11 +729,11 @@ object DumpLogSegments { private val transactionLogOpt = parser.accepts("transaction-log-decoder", "If set, log data will be parsed as " + "transaction metadata from the __transaction_state topic.") private val clusterMetadataOpt = parser.accepts("cluster-metadata-decoder", "If set, log data will be parsed as cluster metadata records.") - private val remoteMetadataOpt = parser.accepts("remote-log-metadata-decoder", "If set, log data will be parsed as TopicBasedRemoteLogMetadataManager (RLMM) metadata records." + + private val remoteMetadataOpt = parser.accepts("remote-log-metadata-decoder", "If set, log data will be parsed as TopicBasedRemoteLogMetadataManager (RLMM) metadata records." + " Instead, the value-decoder-class option can be used if a custom RLMM implementation is configured.") private val shareStateOpt = parser.accepts("share-group-state-decoder", "If set, log data will be parsed as share group state data from the " + "__share_group_state topic.") - private val skipRecordMetadataOpt = parser.accepts("skip-record-metadata", "Skip metadata when printing records. This flag also skips control records.") + private val skipRecordMetadataOpt = parser.accepts("skip-record-metadata", "Whether to skip printing metadata for each record.") options = parser.parse(args : _*) def messageParser: MessageParser[_, _] = @@ -653,8 +748,8 @@ object DumpLogSegments { } else if (options.has(shareStateOpt)) { new ShareGroupStateMessageParser } else { - val valueDecoder = Utils.newInstance(options.valueOf(valueDecoderOpt), classOf[Decoder[_]]) - val keyDecoder = Utils.newInstance(options.valueOf(keyDecoderOpt), classOf[Decoder[_]]) + val valueDecoder = CoreUtils.createObject[org.apache.kafka.tools.api.Decoder[_]](options.valueOf(valueDecoderOpt)) + val keyDecoder = CoreUtils.createObject[org.apache.kafka.tools.api.Decoder[_]](options.valueOf(keyDecoderOpt)) new DecoderMessageParser(keyDecoder, valueDecoder) } diff --git a/core/src/main/scala/kafka/tools/StorageTool.scala b/core/src/main/scala/kafka/tools/StorageTool.scala index d8048d4d0aa2c..40892bca38c92 100644 --- a/core/src/main/scala/kafka/tools/StorageTool.scala +++ b/core/src/main/scala/kafka/tools/StorageTool.scala @@ -126,46 +126,30 @@ object StorageTool extends Logging { setClusterId(namespace.getString("cluster_id")). setUnstableFeatureVersionsEnabled(config.unstableFeatureVersionsEnabled). setIgnoreFormatted(namespace.getBoolean("ignore_formatted")). - setControllerListenerName(config.controllerListenerNames.get(0)). + setControllerListenerName(config.controllerListenerNames.head). setMetadataLogDirectory(config.metadataLogDir) - - Option(namespace.getString("release_version")).foreach(releaseVersion => { - try { - formatter.setReleaseVersion(MetadataVersion.fromVersionString(releaseVersion, config.unstableFeatureVersionsEnabled)) - } catch { - case e: Throwable => - throw new TerseFailure(e.getMessage) - } - }) - + Option(namespace.getString("release_version")).foreach( + releaseVersion => formatter. + setReleaseVersion(MetadataVersion.fromVersionString(releaseVersion))) Option(namespace.getList[String]("feature")).foreach( featureNamesAndLevels(_).foreachEntry { (k, v) => formatter.setFeatureLevel(k, v) }) - val initialControllers = namespace.getString("initial_controllers") - val isStandalone = namespace.getBoolean("standalone") - val staticVotersEmpty = config.quorumConfig.voters().isEmpty - formatter.setHasDynamicQuorum(staticVotersEmpty) - if (!staticVotersEmpty && (Option(initialControllers).isDefined || isStandalone)) { - throw new TerseFailure("You cannot specify " + - QuorumConfig.QUORUM_VOTERS_CONFIG + " and format the node " + - "with --initial-controllers or --standalone. " + - "If you want to use dynamic quorum, please remove " + - QuorumConfig.QUORUM_VOTERS_CONFIG + " and specify " + - QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG + " instead.") - } - Option(initialControllers). + Option(namespace.getString("initial_controllers")). foreach(v => formatter.setInitialControllers(DynamicVoters.parse(v))) - if (isStandalone) { + if (namespace.getBoolean("standalone")) { formatter.setInitialControllers(createStandaloneDynamicVoters(config)) } - if (!namespace.getBoolean("no_initial_controllers") && - config.processRoles.contains(ProcessRole.ControllerRole) && - staticVotersEmpty && - formatter.initialVoters().isEmpty) { + if (namespace.getBoolean("no_initial_controllers")) { + formatter.setNoInitialControllersFlag(true) + } else { + if (config.processRoles.contains(ProcessRole.ControllerRole)) { + if (config.quorumConfig.voters().isEmpty && formatter.initialVoters().isEmpty) { throw new TerseFailure("Because " + QuorumConfig.QUORUM_VOTERS_CONFIG + " is not set on this controller, you must specify one of the following: " + "--standalone, --initial-controllers, or --no-initial-controllers."); + } + } } Option(namespace.getList("add_scram")). foreach(scramArgs => formatter.setScramArguments(scramArgs.asInstanceOf[util.List[String]])) @@ -177,9 +161,9 @@ object StorageTool extends Logging { * Maps the given release version to the corresponding metadata version * and prints the corresponding features. * - * @param namespace Arguments containing the release version. - * @param printStream The print stream to output the version mapping. - * @param validFeatures List of features to be considered in the output. + * @param namespace Arguments containing the release version. + * @param printStream The print stream to output the version mapping. + * @param validFeatures List of features to be considered in the output */ def runVersionMappingCommand( namespace: Namespace, @@ -188,7 +172,7 @@ object StorageTool extends Logging { ): Unit = { val releaseVersion = Option(namespace.getString("release_version")).getOrElse(MetadataVersion.LATEST_PRODUCTION.toString) try { - val metadataVersion = MetadataVersion.fromVersionString(releaseVersion, true) + val metadataVersion = MetadataVersion.fromVersionString(releaseVersion) val metadataVersionLevel = metadataVersion.featureLevel() printStream.print(f"metadata.version=$metadataVersionLevel%d ($releaseVersion%s)%n") @@ -199,7 +183,8 @@ object StorageTool extends Logging { } } catch { case e: IllegalArgumentException => - throw new TerseFailure(e.getMessage) + throw new TerseFailure(s"Unknown release version '$releaseVersion'. Supported versions are: " + + s"${MetadataVersion.MINIMUM_VERSION.version} to ${MetadataVersion.LATEST_PRODUCTION.version}") } } @@ -334,21 +319,18 @@ object StorageTool extends Logging { val reconfigurableQuorumOptions = formatParser.addMutuallyExclusiveGroup() reconfigurableQuorumOptions.addArgument("--standalone", "-s") - .help("Used to initialize a controller as a single-node dynamic quorum. When setting this flag, " + - "the controller.quorum.voters config must not be set, and controller.quorum.bootstrap.servers is set instead.") + .help("Used to initialize a controller as a single-node dynamic quorum.") .action(storeTrue()) reconfigurableQuorumOptions.addArgument("--no-initial-controllers", "-N") - .help("Used to initialize a server without specifying a dynamic quorum. When setting this flag, " + - "the controller.quorum.voters config should not be set, and controller.quorum.bootstrap.servers is set instead.") + .help("Used to initialize a server without a dynamic quorum topology.") .action(storeTrue()) reconfigurableQuorumOptions.addArgument("--initial-controllers", "-I") - .help("Used to initialize a server with the specified dynamic quorum. The argument " + + .help("Used to initialize a server with a specific dynamic quorum topology. The argument " + "is a comma-separated list of id@hostname:port:directory. The same values must be used to " + "format all nodes. For example:\n0@example.com:8082:JEXY6aqzQY-32P5TStzaFg,1@example.com:8083:" + - "MvDxzVmcRsaTz33bUuRU6A,2@example.com:8084:07R5amHmR32VDA6jHkGbTA\n. When setting this flag, " + - "the controller.quorum.voters config must not be set, and controller.quorum.bootstrap.servers is set instead.") + "MvDxzVmcRsaTz33bUuRU6A,2@example.com:8084:07R5amHmR32VDA6jHkGbTA\n") .action(store()) } @@ -394,7 +376,7 @@ object StorageTool extends Logging { def configToLogDirectories(config: KafkaConfig): Seq[String] = { val directories = new mutable.TreeSet[String] - directories ++= config.logDirs.asScala + directories ++= config.logDirs Option(config.metadataLogDir).foreach(directories.add) directories.toSeq } diff --git a/core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala b/core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala index 081fbec3c95d7..95df38c4e1497 100644 --- a/core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala +++ b/core/src/main/scala/kafka/tools/TestRaftRequestHandler.scala @@ -18,15 +18,14 @@ package kafka.tools import kafka.network.RequestChannel -import kafka.server.ApiRequestHandler +import kafka.raft.RaftManager +import kafka.server.{ApiRequestHandler, ApiVersionManager} import kafka.utils.Logging import org.apache.kafka.common.internals.FatalExitError import org.apache.kafka.common.message.{BeginQuorumEpochResponseData, EndQuorumEpochResponseData, FetchResponseData, FetchSnapshotResponseData, VoteResponseData} import org.apache.kafka.common.protocol.{ApiKeys, ApiMessage} import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, BeginQuorumEpochResponse, EndQuorumEpochResponse, FetchResponse, FetchSnapshotResponse, VoteResponse} import org.apache.kafka.common.utils.Time -import org.apache.kafka.raft.RaftManager -import org.apache.kafka.server.ApiVersionManager import org.apache.kafka.server.common.RequestLocal /** @@ -66,7 +65,7 @@ class TestRaftRequestHandler( } private def handleApiVersions(request: RequestChannel.Request): Unit = { - requestChannel.sendResponse(request, apiVersionManager.apiVersionResponse(0, request.header.apiVersion() < 4), None) + requestChannel.sendResponse(request, apiVersionManager.apiVersionResponse(throttleTimeMs = 0, request.header.apiVersion() < 4), None) } private def handleVote(request: RequestChannel.Request): Unit = { @@ -82,7 +81,7 @@ class TestRaftRequestHandler( } private def handleFetch(request: RequestChannel.Request): Unit = { - handle(request, response => FetchResponse.of(response.asInstanceOf[FetchResponseData])) + handle(request, response => new FetchResponse(response.asInstanceOf[FetchResponseData])) } private def handleFetchSnapshot(request: RequestChannel.Request): Unit = { diff --git a/core/src/main/scala/kafka/tools/TestRaftServer.scala b/core/src/main/scala/kafka/tools/TestRaftServer.scala index 48e101443a1f5..418a276bbd17d 100644 --- a/core/src/main/scala/kafka/tools/TestRaftServer.scala +++ b/core/src/main/scala/kafka/tools/TestRaftServer.scala @@ -21,24 +21,23 @@ import java.net.InetSocketAddress import java.util.concurrent.atomic.{AtomicInteger, AtomicLong} import java.util.concurrent.{CompletableFuture, CountDownLatch, LinkedBlockingDeque, TimeUnit} import joptsimple.{OptionException, OptionSpec} -import kafka.network.SocketServer -import kafka.raft.{DefaultExternalKRaftMetrics, KafkaRaftManager} -import kafka.server.{KafkaConfig, KafkaRequestHandlerPool} +import kafka.network.{DataPlaneAcceptor, SocketServer} +import kafka.raft.{DefaultExternalKRaftMetrics, KafkaRaftManager, RaftManager} +import kafka.server.{KafkaConfig, KafkaRequestHandlerPool, SimpleApiVersionManager} import kafka.utils.{CoreUtils, Logging} +import org.apache.kafka.common.errors.InvalidConfigurationException import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.metrics.stats.Percentiles.BucketSizing import org.apache.kafka.common.metrics.stats.{Meter, Percentile, Percentiles} -import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.{ObjectSerializationCache, Writable} import org.apache.kafka.common.security.scram.internals.ScramMechanism import org.apache.kafka.common.security.token.delegation.internals.DelegationTokenCache import org.apache.kafka.common.utils.{Exit, Time, Utils} import org.apache.kafka.common.{TopicPartition, Uuid, protocol} import org.apache.kafka.raft.errors.NotLeaderException -import org.apache.kafka.raft.{Batch, BatchReader, Endpoints, LeaderAndEpoch, QuorumConfig, RaftClient, RaftManager} +import org.apache.kafka.raft.{Batch, BatchReader, Endpoints, LeaderAndEpoch, QuorumConfig, RaftClient} import org.apache.kafka.security.CredentialProvider -import org.apache.kafka.server.SimpleApiVersionManager import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion} import org.apache.kafka.server.common.serialization.RecordSerde import org.apache.kafka.server.config.KRaftConfigs @@ -88,7 +87,7 @@ class TestRaftServer( val endpoints = Endpoints.fromInetSocketAddresses( config.effectiveAdvertisedControllerListeners .map { endpoint => - (ListenerName.normalised(endpoint.listener), InetSocketAddress.createUnresolved(endpoint.host, endpoint.port)) + (endpoint.listenerName, InetSocketAddress.createUnresolved(endpoint.host, endpoint.port)) } .toMap .asJava @@ -114,8 +113,8 @@ class TestRaftServer( workloadGenerator = new RaftWorkloadGenerator( raftManager, time, - recordsPerSec = throughput, - recordSize = recordSize + recordsPerSec = 20000, + recordSize = 256 ) val requestHandler = new TestRaftRequestHandler( @@ -131,7 +130,8 @@ class TestRaftServer( requestHandler, time, config.numIoThreads, - "RequestHandlerAvgIdlePercent" + s"${DataPlaneAcceptor.MetricPrefix}RequestHandlerAvgIdlePercent", + DataPlaneAcceptor.ThreadPrefix ) workloadGenerator.start() @@ -180,7 +180,7 @@ class TestRaftServer( private var claimedEpoch: Option[Int] = None - raftManager.client.register(this) + raftManager.register(this) override def handleLeaderChange(newLeaderAndEpoch: LeaderAndEpoch): Unit = { if (newLeaderAndEpoch.isLeader(config.nodeId)) { @@ -427,7 +427,7 @@ object TestRaftServer extends Logging { } private class TestRaftServerOptions(args: Array[String]) extends CommandDefaultOptions(args) { - val configOpt: OptionSpec[String] = parser.accepts("config", "REQUIRED: The configured file") + val configOpt: OptionSpec[String] = parser.accepts("config", "Required configured file") .withRequiredArg .describedAs("filename") .ofType(classOf[String]) @@ -445,14 +445,12 @@ object TestRaftServer extends Logging { .ofType(classOf[Int]) .defaultsTo(256) - val directoryId: OptionSpec[String] = parser.accepts("replica-directory-id", "REQUIRED: The directory id of the replica") + val directoryId: OptionSpec[String] = parser.accepts("replica-directory-id", "The directory id of the replica") .withRequiredArg .describedAs("directory id") .ofType(classOf[String]) options = parser.parse(args : _*) - - def checkArgs(): Unit = CommandLineUtils.checkRequiredArgs(parser, options, configOpt, directoryId) } def main(args: Array[String]): Unit = { @@ -460,11 +458,16 @@ object TestRaftServer extends Logging { try { CommandLineUtils.maybePrintHelpOrVersion(opts, "Standalone raft server for performance testing") - opts.checkArgs() val configFile = opts.options.valueOf(opts.configOpt) - val directoryIdAsString = opts.options.valueOf(opts.directoryId) + if (configFile == null) { + throw new InvalidConfigurationException("Missing configuration file. Should specify with '--config'") + } + val directoryIdAsString = opts.options.valueOf(opts.directoryId) + if (directoryIdAsString == null) { + throw new InvalidConfigurationException("Missing replica directory id. Should specify with --replica-directory-id") + } val serverProps = Utils.loadProps(configFile) // KafkaConfig requires either `process.roles` or `zookeeper.connect`. Neither are diff --git a/core/src/main/scala/kafka/utils/CoreUtils.scala b/core/src/main/scala/kafka/utils/CoreUtils.scala index 66f9bd4865731..1355643d91d3b 100755 --- a/core/src/main/scala/kafka/utils/CoreUtils.scala +++ b/core/src/main/scala/kafka/utils/CoreUtils.scala @@ -17,21 +17,25 @@ package kafka.utils -import java.io.File +import java.io._ +import java.nio._ import java.util.concurrent.locks.{Lock, ReadWriteLock} -import java.lang.management.ManagementFactory +import java.lang.management._ +import java.util.{Base64, Properties, UUID} import com.typesafe.scalalogging.Logger -import javax.management.ObjectName +import javax.management._ +import scala.collection._ import scala.collection.Seq +import kafka.cluster.EndPoint import org.apache.commons.validator.routines.InetAddressValidator -import org.apache.kafka.common.Endpoint import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.Utils import org.apache.kafka.network.SocketServerConfigs import org.slf4j.event.Level +import java.util import scala.jdk.CollectionConverters._ /** @@ -74,9 +78,9 @@ object CoreUtils { /** * Recursively delete the list of files/directories and any subfiles (if any exist) - * @param files list of files to be deleted + * @param files sequence of files to be deleted */ - def delete(files: java.util.List[String]): Unit = files.forEach(f => Utils.delete(new File(f))) + def delete(files: Seq[String]): Unit = files.foreach(f => Utils.delete(new File(f))) /** * Register the given mbean with the platform mbean server, @@ -105,6 +109,15 @@ object CoreUtils { } } + /** + * Create an instance of the class with the given class name + */ + def createObject[T <: AnyRef](className: String, args: AnyRef*): T = { + val klass = Utils.loadClass(className, classOf[Object]).asInstanceOf[Class[T]] + val constructor = klass.getConstructor(args.map(_.getClass): _*) + constructor.newInstance(args: _*) + } + /** * Execute the given function inside the lock */ @@ -121,22 +134,32 @@ object CoreUtils { def inWriteLock[T](lock: ReadWriteLock)(fun: => T): T = inLock[T](lock.writeLock)(fun) - def listenerListToEndPoints(listeners: java.util.List[String], securityProtocolMap: java.util.Map[ListenerName, SecurityProtocol]): Seq[Endpoint] = { + /** + * Returns a list of duplicated items + */ + def duplicates[T](s: Iterable[T]): Iterable[T] = { + s.groupBy(identity) + .map { case (k, l) => (k, l.size)} + .filter { case (_, l) => l > 1 } + .keys + } + + def listenerListToEndPoints(listeners: String, securityProtocolMap: Map[ListenerName, SecurityProtocol]): Seq[EndPoint] = { listenerListToEndPoints(listeners, securityProtocolMap, requireDistinctPorts = true) } - private def checkDuplicateListenerPorts(endpoints: Seq[Endpoint], listeners: java.util.List[String]): Unit = { + private def checkDuplicateListenerPorts(endpoints: Seq[EndPoint], listeners: String): Unit = { val distinctPorts = endpoints.map(_.port).distinct require(distinctPorts.size == endpoints.map(_.port).size, s"Each listener must have a different port, listeners: $listeners") } - def listenerListToEndPoints(listeners: java.util.List[String], securityProtocolMap: java.util.Map[ListenerName, SecurityProtocol], requireDistinctPorts: Boolean): Seq[Endpoint] = { + def listenerListToEndPoints(listeners: String, securityProtocolMap: Map[ListenerName, SecurityProtocol], requireDistinctPorts: Boolean): Seq[EndPoint] = { def validateOneIsIpv4AndOtherIpv6(first: String, second: String): Boolean = (inetAddressValidator.isValidInet4Address(first) && inetAddressValidator.isValidInet6Address(second)) || (inetAddressValidator.isValidInet6Address(first) && inetAddressValidator.isValidInet4Address(second)) - def validate(endPoints: Seq[Endpoint]): Unit = { - val distinctListenerNames = endPoints.map(_.listener).distinct + def validate(endPoints: Seq[EndPoint]): Unit = { + val distinctListenerNames = endPoints.map(_.listenerName).distinct require(distinctListenerNames.size == endPoints.size, s"Each listener must have a different name, listeners: $listeners") val (duplicatePorts, _) = endPoints.filter { @@ -185,7 +208,8 @@ object CoreUtils { } val endPoints = try { - SocketServerConfigs.listenerListToEndPoints(listeners, securityProtocolMap).asScala + SocketServerConfigs.listenerListToEndPoints(listeners, securityProtocolMap.asJava). + asScala.map(EndPoint.fromJava(_)) } catch { case e: Exception => throw new IllegalArgumentException(s"Error creating broker listeners from '$listeners': ${e.getMessage}", e) @@ -193,4 +217,31 @@ object CoreUtils { validate(endPoints) endPoints } + + def generateUuidAsBase64(): String = { + val uuid = UUID.randomUUID() + Base64.getUrlEncoder.withoutPadding.encodeToString(getBytesFromUuid(uuid)) + } + + def getBytesFromUuid(uuid: UUID): Array[Byte] = { + // Extract bytes for uuid which is 128 bits (or 16 bytes) long. + val uuidBytes = ByteBuffer.wrap(new Array[Byte](16)) + uuidBytes.putLong(uuid.getMostSignificantBits) + uuidBytes.putLong(uuid.getLeastSignificantBits) + uuidBytes.array + } + + def propsWith(key: String, value: String): Properties = { + propsWith((key, value)) + } + + def propsWith(props: (String, String)*): Properties = { + val properties = new Properties() + props.foreach { case (k, v) => properties.put(k, v) } + properties + } + + def replicaToBrokerAssignmentAsScala(map: util.Map[Integer, util.List[Integer]]): Map[Int, Seq[Int]] = { + map.asScala.map(e => (e._1.asInstanceOf[Int], e._2.asScala.map(_.asInstanceOf[Int]))) + } } diff --git a/core/src/main/scala/kafka/utils/Log4jController.scala b/core/src/main/scala/kafka/utils/Log4jController.scala new file mode 100644 index 0000000000000..61573b878ccd6 --- /dev/null +++ b/core/src/main/scala/kafka/utils/Log4jController.scala @@ -0,0 +1,135 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.utils + +import org.apache.kafka.common.utils.Utils +import org.apache.logging.log4j.core.LoggerContext +import org.apache.logging.log4j.core.config.Configurator +import org.apache.logging.log4j.{Level, LogManager} + +import java.util +import java.util.Locale +import scala.jdk.CollectionConverters._ + + +object Log4jController { + + /** + * Note: In log4j, the root logger's name was "root" and Kafka also followed that name for dynamic logging control feature. + * + * The root logger's name is changed in log4j2 to empty string (see: [[LogManager.ROOT_LOGGER_NAME]]) but for backward- + * compatibility. Kafka keeps its original root logger name. It is why here is a dedicated definition for the root logger name. + */ + val ROOT_LOGGER = "root" + + /** + * Returns a map of the log4j loggers and their assigned log level. + * If a logger does not have a log level assigned, we return the log level of the first ancestor with a level configured. + */ + def loggers: Map[String, String] = { + val logContext = LogManager.getContext(false).asInstanceOf[LoggerContext] + val rootLoggerLevel = logContext.getRootLogger.getLevel.toString + + // Loggers defined in the configuration + val configured = logContext.getConfiguration.getLoggers.asScala + .values + .filterNot(_.getName.equals(LogManager.ROOT_LOGGER_NAME)) + .map { logger => + logger.getName -> logger.getLevel.toString + }.toMap + + // Loggers actually running + val actual = logContext.getLoggers.asScala + .filterNot(_.getName.equals(LogManager.ROOT_LOGGER_NAME)) + .map { logger => + logger.getName -> logger.getLevel.toString + }.toMap + + (configured ++ actual) + (ROOT_LOGGER -> rootLoggerLevel) + } + + /** + * Sets the log level of a particular logger. If the given logLevel is not an available log4j level + * (i.e., one of OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE, ALL) it falls back to DEBUG. + * + * @see [[Level.toLevel]] + */ + def logLevel(loggerName: String, logLevel: String): Boolean = { + if (Utils.isBlank(loggerName) || Utils.isBlank(logLevel)) + return false + + val level = Level.toLevel(logLevel.toUpperCase(Locale.ROOT)) + + if (loggerName == ROOT_LOGGER) { + Configurator.setLevel(LogManager.ROOT_LOGGER_NAME, level) + true + } else { + if (loggerExists(loggerName) && level != null) { + Configurator.setLevel(loggerName, level) + true + } + else false + } + } + + def unsetLogLevel(loggerName: String): Boolean = { + val nullLevel: Level = null + if (loggerName == ROOT_LOGGER) { + Configurator.setLevel(LogManager.ROOT_LOGGER_NAME, nullLevel) + true + } else { + if (loggerExists(loggerName)) { + Configurator.setLevel(loggerName, nullLevel) + true + } + else false + } + } + + def loggerExists(loggerName: String): Boolean = loggers.contains(loggerName) +} + +/** + * An MBean that allows the user to dynamically alter log4j levels at runtime. + * The companion object contains the singleton instance of this class and + * registers the MBean. The [[kafka.utils.Logging]] trait forces initialization + * of the companion object. + */ +class Log4jController extends Log4jControllerMBean { + + def getLoggers: util.List[String] = { + // we replace scala collection by java collection so mbean client is able to deserialize it without scala library. + new util.ArrayList[String](Log4jController.loggers.map { + case (logger, level) => s"$logger=$level" + }.toSeq.asJava) + } + + + def getLogLevel(loggerName: String): String = { + Log4jController.loggers.getOrElse(loggerName, "No such logger.") + } + + def setLogLevel(loggerName: String, level: String): Boolean = Log4jController.logLevel(loggerName, level) +} + + +trait Log4jControllerMBean { + def getLoggers: java.util.List[String] + def getLogLevel(logger: String): String + def setLogLevel(logger: String, level: String): Boolean +} diff --git a/core/src/main/scala/kafka/utils/Logging.scala b/core/src/main/scala/kafka/utils/Logging.scala index e08a6873fc1ef..dd83e90336099 100755 --- a/core/src/main/scala/kafka/utils/Logging.scala +++ b/core/src/main/scala/kafka/utils/Logging.scala @@ -18,22 +18,19 @@ package kafka.utils import com.typesafe.scalalogging.Logger -import org.apache.kafka.server.logger.LoggingController import org.slf4j.{LoggerFactory, Marker, MarkerFactory} + object Log4jControllerRegistration { private val logger = Logger(this.getClass.getName) - private val loggingMBean = new LoggingController - registerMBean(loggingMBean, "kafka.Log4jController") - - private def registerMBean(mbean: LoggingController, typeAttr: String): Unit = { - try { - CoreUtils.registerMBean(mbean, s"kafka:type=$typeAttr") - logger.info("Registered `kafka:type={}` MBean", typeAttr) - } catch { - case e: Exception => logger.warn("Couldn't register `kafka:type={}` MBean", typeAttr, e) - } + try { + val log4jController = Class.forName("kafka.utils.Log4jController").asInstanceOf[Class[Object]] + val instance = log4jController.getDeclaredConstructor().newInstance() + CoreUtils.registerMBean(instance, "kafka:type=kafka.Log4jController") + logger.info("Registered kafka:type=kafka.Log4jController MBean") + } catch { + case _: Exception => logger.info("Couldn't register kafka:type=kafka.Log4jController MBean") } } diff --git a/core/src/main/scala/kafka/utils/Mx4jLoader.scala b/core/src/main/scala/kafka/utils/Mx4jLoader.scala index 7e8b1dba53e6a..5fbbebed47572 100644 --- a/core/src/main/scala/kafka/utils/Mx4jLoader.scala +++ b/core/src/main/scala/kafka/utils/Mx4jLoader.scala @@ -30,14 +30,12 @@ import javax.management.ObjectName * * This is a Scala port of org.apache.cassandra.utils.Mx4jTool written by Ran Tavory for CASSANDRA-1068 * */ -@deprecated object Mx4jLoader extends Logging { def maybeLoad(): Boolean = { val props = new VerifiableProperties(System.getProperties) if (!props.getBoolean("kafka_mx4jenable", default = false)) return false - warn("MX4j is deprecated and will be removed in Kafka 5.0") val address = props.getString("mx4jaddress", "0.0.0.0") val port = props.getInt("mx4jport", 8082) try { diff --git a/core/src/main/scala/kafka/utils/Pool.scala b/core/src/main/scala/kafka/utils/Pool.scala new file mode 100644 index 0000000000000..1a59b41bad51c --- /dev/null +++ b/core/src/main/scala/kafka/utils/Pool.scala @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.utils + +import java.util.concurrent._ + +import org.apache.kafka.common.KafkaException + +import collection.Set +import scala.jdk.CollectionConverters._ + +class Pool[K,V](valueFactory: Option[K => V] = None) extends Iterable[(K, V)] { + + private val pool: ConcurrentMap[K, V] = new ConcurrentHashMap[K, V] + + def put(k: K, v: V): V = pool.put(k, v) + + def putAll(map: java.util.Map[K, V]): Unit = pool.putAll(map) + + def putIfNotExists(k: K, v: V): V = pool.putIfAbsent(k, v) + + /** + * Gets the value associated with the given key. If there is no associated + * value, then create the value using the pool's value factory and return the + * value associated with the key. The user should declare the factory method + * as lazy if its side-effects need to be avoided. + * + * @param key The key to lookup. + * @return The final value associated with the key. + */ + def getAndMaybePut(key: K): V = { + if (valueFactory.isEmpty) + throw new KafkaException("Empty value factory in pool.") + getAndMaybePut(key, valueFactory.get(key)) + } + + /** + * Gets the value associated with the given key. If there is no associated + * value, then create the value using the provided by `createValue` and return the + * value associated with the key. + * + * @param key The key to lookup. + * @param createValue Factory function. + * @return The final value associated with the key. + */ + def getAndMaybePut(key: K, createValue: => V): V = + pool.computeIfAbsent(key, _ => createValue) + + def contains(id: K): Boolean = pool.containsKey(id) + + def get(key: K): V = pool.get(key) + + def remove(key: K): V = pool.remove(key) + + def remove(key: K, value: V): Boolean = pool.remove(key, value) + + def removeAll(keys: Iterable[K]): Unit = pool.keySet.removeAll(keys.asJavaCollection) + + def keys: Set[K] = pool.keySet.asScala + + def values: Iterable[V] = pool.values.asScala + + def clear(): Unit = { pool.clear() } + + def foreachEntry(f: (K, V) => Unit): Unit = { + pool.forEach((k, v) => f(k, v)) + } + + override def size: Int = pool.size + + override def iterator: Iterator[(K, V)] = new Iterator[(K,V)]() { + + private val iter = pool.entrySet.iterator + + def hasNext: Boolean = iter.hasNext + + def next(): (K, V) = { + val n = iter.next + (n.getKey, n.getValue) + } + + } + +} diff --git a/core/src/main/scala/kafka/utils/json/DecodeJson.scala b/core/src/main/scala/kafka/utils/json/DecodeJson.scala new file mode 100644 index 0000000000000..9c7bec0bdd11a --- /dev/null +++ b/core/src/main/scala/kafka/utils/json/DecodeJson.scala @@ -0,0 +1,109 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.utils.json + +import scala.collection.{Factory, Map, Seq} +import scala.jdk.CollectionConverters._ +import com.fasterxml.jackson.databind.{JsonMappingException, JsonNode} + +/** + * A type class for parsing JSON. This should typically be used via `JsonValue.apply`. + */ +trait DecodeJson[T] { + + /** + * Decode the JSON node provided into an instance of `Right[T]`, if possible. Otherwise, return an error message + * wrapped by an instance of `Left`. + */ + def decodeEither(node: JsonNode): Either[String, T] + + /** + * Decode the JSON node provided into an instance of `T`. + * + * @throws JsonMappingException if `node` cannot be decoded into `T`. + */ + def decode(node: JsonNode): T = + decodeEither(node) match { + case Right(x) => x + case Left(x) => throw new JsonMappingException(null, x) + } + +} + +/** + * Contains `DecodeJson` type class instances. That is, we need one instance for each type that we want to be able to + * to parse into. It is a compiler error to try to parse into a type for which there is no instance. + */ +object DecodeJson { + + implicit object DecodeBoolean extends DecodeJson[Boolean] { + def decodeEither(node: JsonNode): Either[String, Boolean] = + if (node.isBoolean) Right(node.booleanValue) else Left(s"Expected `Boolean` value, received $node") + } + + implicit object DecodeDouble extends DecodeJson[Double] { + def decodeEither(node: JsonNode): Either[String, Double] = + if (node.isDouble || node.isLong || node.isInt) + Right(node.doubleValue) + else Left(s"Expected `Double` value, received $node") + } + + implicit object DecodeInt extends DecodeJson[Int] { + def decodeEither(node: JsonNode): Either[String, Int] = + if (node.isInt) Right(node.intValue) else Left(s"Expected `Int` value, received $node") + } + + implicit object DecodeLong extends DecodeJson[Long] { + def decodeEither(node: JsonNode): Either[String, Long] = + if (node.isLong || node.isInt) Right(node.longValue) else Left(s"Expected `Long` value, received $node") + } + + implicit object DecodeString extends DecodeJson[String] { + def decodeEither(node: JsonNode): Either[String, String] = + if (node.isTextual) Right(node.textValue) else Left(s"Expected `String` value, received $node") + } + + implicit def decodeOption[E](implicit decodeJson: DecodeJson[E]): DecodeJson[Option[E]] = (node: JsonNode) => { + if (node.isNull) Right(None) + else decodeJson.decodeEither(node).map(Some(_)) + } + + implicit def decodeSeq[E, S[+T] <: Seq[E]](implicit decodeJson: DecodeJson[E], factory: Factory[E, S[E]]): DecodeJson[S[E]] = (node: JsonNode) => { + if (node.isArray) + decodeIterator(node.elements.asScala)(decodeJson.decodeEither) + else Left(s"Expected JSON array, received $node") + } + + implicit def decodeMap[V, M[K, +V] <: Map[K, V]](implicit decodeJson: DecodeJson[V], factory: Factory[(String, V), M[String, V]]): DecodeJson[M[String, V]] = (node: JsonNode) => { + if (node.isObject) + decodeIterator(node.fields.asScala)(e => decodeJson.decodeEither(e.getValue).map(v => (e.getKey, v))) + else Left(s"Expected JSON object, received $node") + } + + private def decodeIterator[S, T, C](it: Iterator[S])(f: S => Either[String, T])(implicit factory: Factory[T, C]): Either[String, C] = { + val result = factory.newBuilder + while (it.hasNext) { + f(it.next()) match { + case Right(x) => result += x + case Left(x) => return Left(x) + } + } + Right(result.result()) + } + +} diff --git a/core/src/main/scala/kafka/utils/json/JsonArray.scala b/core/src/main/scala/kafka/utils/json/JsonArray.scala new file mode 100644 index 0000000000000..c22eda8651d75 --- /dev/null +++ b/core/src/main/scala/kafka/utils/json/JsonArray.scala @@ -0,0 +1,27 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.utils.json + +import scala.collection.Iterator +import scala.jdk.CollectionConverters._ + +import com.fasterxml.jackson.databind.node.ArrayNode + +class JsonArray private[json] (protected val node: ArrayNode) extends JsonValue { + def iterator: Iterator[JsonValue] = node.elements.asScala.map(JsonValue(_)) +} diff --git a/core/src/main/scala/kafka/utils/json/JsonObject.scala b/core/src/main/scala/kafka/utils/json/JsonObject.scala new file mode 100644 index 0000000000000..9bf91ae1a6b0a --- /dev/null +++ b/core/src/main/scala/kafka/utils/json/JsonObject.scala @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.utils.json + +import com.fasterxml.jackson.databind.JsonMappingException + +import scala.jdk.CollectionConverters._ + +import com.fasterxml.jackson.databind.node.ObjectNode + +import scala.collection.Iterator + +/** + * A thin wrapper over Jackson's `ObjectNode` for a more idiomatic API. See `JsonValue` for more details. + */ +class JsonObject private[json] (protected val node: ObjectNode) extends JsonValue { + + def apply(name: String): JsonValue = + get(name).getOrElse(throw new JsonMappingException(null, s"No such field exists: `$name`")) + + def get(name: String): Option[JsonValue] = Option(node.get(name)).map(JsonValue(_)) + + def iterator: Iterator[(String, JsonValue)] = node.fields.asScala.map { entry => + (entry.getKey, JsonValue(entry.getValue)) + } + +} diff --git a/core/src/main/scala/kafka/utils/json/JsonValue.scala b/core/src/main/scala/kafka/utils/json/JsonValue.scala new file mode 100644 index 0000000000000..ff62c6c12d138 --- /dev/null +++ b/core/src/main/scala/kafka/utils/json/JsonValue.scala @@ -0,0 +1,116 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.utils.json + +import com.fasterxml.jackson.databind.{JsonMappingException, JsonNode} +import com.fasterxml.jackson.databind.node.{ArrayNode, ObjectNode} + +/** + * A simple wrapper over Jackson's JsonNode that enables type safe parsing via the `DecodeJson` type + * class. + * + * Typical usage would be something like: + * + * {{{ + * val jsonNode: JsonNode = ??? + * val jsonObject = JsonValue(jsonNode).asJsonObject + * val intValue = jsonObject("int_field").to[Int] + * val optionLongValue = jsonObject("option_long_field").to[Option[Long]] + * val mapStringIntField = jsonObject("map_string_int_field").to[Map[String, Int]] + * val seqStringField = jsonObject("seq_string_field").to[Seq[String] + * }}} + * + * The `to` method throws an exception if the value cannot be converted to the requested type. An alternative is the + * `toEither` method that returns an `Either` instead. + */ +trait JsonValue { + + protected def node: JsonNode + + /** + * Decode this JSON value into an instance of `T`. + * + * @throws JsonMappingException if this value cannot be decoded into `T`. + */ + def to[T](implicit decodeJson: DecodeJson[T]): T = decodeJson.decode(node) + + /** + * Decode this JSON value into an instance of `Right[T]`, if possible. Otherwise, return an error message + * wrapped by an instance of `Left`. + */ + def toEither[T](implicit decodeJson: DecodeJson[T]): Either[String, T] = decodeJson.decodeEither(node) + + /** + * If this is a JSON object, return an instance of JsonObject. Otherwise, throw a JsonMappingException. + */ + def asJsonObject: JsonObject = + asJsonObjectOption.getOrElse(throw new JsonMappingException(null, s"Expected JSON object, received $node")) + + /** + * If this is a JSON object, return a JsonObject wrapped by a `Some`. Otherwise, return None. + */ + def asJsonObjectOption: Option[JsonObject] = this match { + case j: JsonObject => Some(j) + case _ => node match { + case n: ObjectNode => Some(new JsonObject(n)) + case _ => None + } + } + + /** + * If this is a JSON array, return an instance of JsonArray. Otherwise, throw a JsonMappingException. + */ + def asJsonArray: JsonArray = + asJsonArrayOption.getOrElse(throw new JsonMappingException(null, s"Expected JSON array, received $node")) + + /** + * If this is a JSON array, return a JsonArray wrapped by a `Some`. Otherwise, return None. + */ + def asJsonArrayOption: Option[JsonArray] = this match { + case j: JsonArray => Some(j) + case _ => node match { + case n: ArrayNode => Some(new JsonArray(n)) + case _ => None + } + } + + override def hashCode: Int = node.hashCode + + override def equals(a: Any): Boolean = a match { + case a: JsonValue => node == a.node + case _ => false + } + + override def toString: String = node.toString + +} + +object JsonValue { + + /** + * Create an instance of `JsonValue` from Jackson's `JsonNode`. + */ + def apply(node: JsonNode): JsonValue = node match { + case n: ObjectNode => new JsonObject(n) + case n: ArrayNode => new JsonArray(n) + case _ => new BasicJsonValue(node) + } + + private class BasicJsonValue private[json] (protected val node: JsonNode) extends JsonValue + +} diff --git a/core/src/test/java/kafka/admin/AdminFenceProducersTest.java b/core/src/test/java/kafka/admin/AdminFenceProducersTest.java new file mode 100644 index 0000000000000..eab7cd18e75af --- /dev/null +++ b/core/src/test/java/kafka/admin/AdminFenceProducersTest.java @@ -0,0 +1,148 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.admin; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.FenceProducersOptions; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.errors.ApiException; +import org.apache.kafka.common.errors.InvalidProducerEpochException; +import org.apache.kafka.common.errors.ProducerFencedException; +import org.apache.kafka.common.errors.TimeoutException; +import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.coordinator.transaction.TransactionLogConfig; +import org.apache.kafka.coordinator.transaction.TransactionStateManagerConfig; +import org.apache.kafka.server.config.ServerLogConfigs; + +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.ExecutionException; + +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@ClusterTestDefaults(serverProperties = { + @ClusterConfigProperty(key = ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, value = "false"), + @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + @ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, value = "1"), + @ClusterConfigProperty(key = TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_CONFIG, value = "2000") +}) +public class AdminFenceProducersTest { + private static final String TOPIC_NAME = "mytopic"; + private static final String TXN_ID = "mytxnid"; + private static final String INCORRECT_BROKER_PORT = "225"; + private static final ProducerRecord RECORD = new ProducerRecord<>(TOPIC_NAME, null, new byte[1]); + private final ClusterInstance clusterInstance; + + AdminFenceProducersTest(ClusterInstance clusterInstance) { + this.clusterInstance = clusterInstance; + } + + private KafkaProducer createProducer() { + Properties config = new Properties(); + config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + config.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, TXN_ID); + config.put(ProducerConfig.TRANSACTION_TIMEOUT_CONFIG, "2000"); + config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); + config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName()); + + return new KafkaProducer<>(config); + } + + @ClusterTest + void testFenceAfterProducerCommit() throws Exception { + clusterInstance.createTopic(TOPIC_NAME, 1, (short) 1); + + try (KafkaProducer producer = createProducer(); + Admin adminClient = clusterInstance.admin()) { + producer.initTransactions(); + producer.beginTransaction(); + producer.send(RECORD).get(); + producer.commitTransaction(); + + adminClient.fenceProducers(Collections.singletonList(TXN_ID)).all().get(); + + producer.beginTransaction(); + ExecutionException exceptionDuringSend = assertThrows( + ExecutionException.class, + () -> producer.send(RECORD).get(), "expected InvalidProducerEpochException" + ); + + // In Transaction V2, the ProducerFencedException will be converted to InvalidProducerEpochException when + // coordinator handles AddPartitionRequest. + assertInstanceOf(InvalidProducerEpochException.class, exceptionDuringSend.getCause()); + + // InvalidProducerEpochException is treated as fatal error. The commitTransaction will return this last + // fatal error. + assertThrows(InvalidProducerEpochException.class, producer::commitTransaction); + } + } + + @ClusterTest + void testFenceProducerTimeoutMs() { + Map config = new HashMap<>(); + config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:" + INCORRECT_BROKER_PORT); + + try (Admin adminClient = clusterInstance.admin(config)) { + ExecutionException exception = assertThrows( + ExecutionException.class, () -> + adminClient.fenceProducers(Collections.singletonList(TXN_ID), new FenceProducersOptions().timeoutMs(0)).all().get()); + assertInstanceOf(TimeoutException.class, exception.getCause()); + } + } + + @ClusterTest + void testFenceBeforeProducerCommit() throws Exception { + clusterInstance.createTopic(TOPIC_NAME, 1, (short) 1); + + try (KafkaProducer producer = createProducer(); + Admin adminClient = clusterInstance.admin()) { + + producer.initTransactions(); + producer.beginTransaction(); + producer.send(RECORD).get(); + + adminClient.fenceProducers(Collections.singletonList(TXN_ID)).all().get(); + + ExecutionException exceptionDuringSend = assertThrows( + ExecutionException.class, () -> + producer.send(RECORD).get(), "expected ProducerFencedException" + ); + assertTrue(exceptionDuringSend.getCause() instanceof ProducerFencedException || + exceptionDuringSend.getCause() instanceof InvalidProducerEpochException); + + ApiException exceptionDuringCommit = assertThrows( + ApiException.class, + producer::commitTransaction, "Expected Exception" + ); + assertTrue(exceptionDuringCommit instanceof ProducerFencedException || + exceptionDuringCommit instanceof InvalidProducerEpochException); + } + } +} \ No newline at end of file diff --git a/core/src/test/java/kafka/admin/ClientTelemetryTest.java b/core/src/test/java/kafka/admin/ClientTelemetryTest.java new file mode 100644 index 0000000000000..e43f52f7271a7 --- /dev/null +++ b/core/src/test/java/kafka/admin/ClientTelemetryTest.java @@ -0,0 +1,195 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.admin; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.MetricName; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.errors.InvalidConfigurationException; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.MetricsReporter; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.server.telemetry.ClientTelemetry; +import org.apache.kafka.server.telemetry.ClientTelemetryReceiver; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.UUID; +import java.util.concurrent.ExecutionException; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static org.apache.kafka.clients.admin.AdminClientConfig.METRIC_REPORTER_CLASSES_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class ClientTelemetryTest { + + @ClusterTest( + types = Type.KRAFT, + brokers = 3, + serverProperties = { + @ClusterConfigProperty(key = METRIC_REPORTER_CLASSES_CONFIG, value = "kafka.admin.ClientTelemetryTest$GetIdClientTelemetry"), + }) + public void testClientInstanceId(ClusterInstance clusterInstance) throws InterruptedException, ExecutionException { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + configs.put(AdminClientConfig.ENABLE_METRICS_PUSH_CONFIG, true); + try (Admin admin = Admin.create(configs)) { + String testTopicName = "test_topic"; + admin.createTopics(Collections.singletonList(new NewTopic(testTopicName, 1, (short) 1))); + clusterInstance.waitForTopic(testTopicName, 1); + + Map producerConfigs = new HashMap<>(); + producerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + producerConfigs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfigs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + + try (Producer producer = new KafkaProducer<>(producerConfigs)) { + producer.send(new ProducerRecord<>(testTopicName, 0, null, "bar")).get(); + producer.flush(); + Uuid producerClientId = producer.clientInstanceId(Duration.ofSeconds(3)); + assertNotNull(producerClientId); + assertEquals(producerClientId, producer.clientInstanceId(Duration.ofSeconds(3))); + } + + Map consumerConfigs = new HashMap<>(); + consumerConfigs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + consumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + consumerConfigs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + consumerConfigs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + + try (Consumer consumer = new KafkaConsumer<>(consumerConfigs)) { + consumer.assign(Collections.singletonList(new TopicPartition(testTopicName, 0))); + consumer.seekToBeginning(Collections.singletonList(new TopicPartition(testTopicName, 0))); + Uuid consumerClientId = consumer.clientInstanceId(Duration.ofSeconds(5)); + // before poll, the clientInstanceId will return null + assertNull(consumerClientId); + List values = new ArrayList<>(); + ConsumerRecords records = consumer.poll(Duration.ofSeconds(1)); + for (ConsumerRecord record : records) { + values.add(record.value()); + } + assertEquals(1, values.size()); + assertEquals("bar", values.get(0)); + consumerClientId = consumer.clientInstanceId(Duration.ofSeconds(3)); + assertNotNull(consumerClientId); + assertEquals(consumerClientId, consumer.clientInstanceId(Duration.ofSeconds(3))); + } + Uuid uuid = admin.clientInstanceId(Duration.ofSeconds(3)); + assertNotNull(uuid); + assertEquals(uuid, admin.clientInstanceId(Duration.ofSeconds(3))); + } + } + + @ClusterTest(types = {Type.CO_KRAFT, Type.KRAFT}) + public void testIntervalMsParser(ClusterInstance clusterInstance) { + List alterOpts = asList("--bootstrap-server", clusterInstance.bootstrapServers(), + "--alter", "--entity-type", "client-metrics", "--entity-name", "test", "--add-config", "interval.ms=bbb"); + try (Admin client = clusterInstance.admin()) { + ConfigCommand.ConfigCommandOptions addOpts = new ConfigCommand.ConfigCommandOptions(toArray(alterOpts)); + + Throwable e = assertThrows(ExecutionException.class, () -> ConfigCommand.alterConfig(client, addOpts)); + assertTrue(e.getMessage().contains(InvalidConfigurationException.class.getSimpleName())); + } + } + + @ClusterTest(types = Type.KRAFT) + public void testMetrics(ClusterInstance clusterInstance) { + Map configs = new HashMap<>(); + configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + List expectedMetricsName = Arrays.asList("request-size-max", "io-wait-ratio", "response-total", + "version", "io-time-ns-avg", "network-io-rate"); + try (Admin admin = Admin.create(configs)) { + Set actualMetricsName = admin.metrics().keySet().stream() + .map(MetricName::name) + .collect(Collectors.toSet()); + expectedMetricsName.forEach(expectedName -> assertTrue(actualMetricsName.contains(expectedName), + String.format("actual metrics name: %s dont contains expected: %s", actualMetricsName, + expectedName))); + assertTrue(actualMetricsName.containsAll(expectedMetricsName)); + } + } + + private static String[] toArray(List... lists) { + return Stream.of(lists).flatMap(List::stream).toArray(String[]::new); + } + + /** + * We should add a ClientTelemetry into plugins to test the clientInstanceId method Otherwise the + * {@link org.apache.kafka.common.protocol.ApiKeys.GET_TELEMETRY_SUBSCRIPTIONS } command will not be supported + * by the server + **/ + public static class GetIdClientTelemetry implements ClientTelemetry, MetricsReporter { + + + @Override + public void init(List metrics) { + } + + @Override + public void metricChange(KafkaMetric metric) { + } + + @Override + public void metricRemoval(KafkaMetric metric) { + } + + @Override + public void close() { + } + + @Override + public void configure(Map configs) { + } + + @Override + public ClientTelemetryReceiver clientReceiver() { + return (context, payload) -> { + }; + } + } + +} diff --git a/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java b/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java new file mode 100644 index 0000000000000..294407caedaf5 --- /dev/null +++ b/core/src/test/java/kafka/admin/ConfigCommandIntegrationTest.java @@ -0,0 +1,631 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.admin; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientTestUtils; +import org.apache.kafka.clients.admin.AlterConfigsOptions; +import org.apache.kafka.clients.admin.AlterConfigsResult; +import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.InvalidConfigurationException; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.server.common.MetadataVersion; +import org.apache.kafka.test.TestUtils; + +import org.mockito.Mockito; + +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.atomic.AtomicReference; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static java.util.Arrays.asList; +import static java.util.Collections.singleton; +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.apache.kafka.common.config.SslConfigs.SSL_TRUSTSTORE_LOCATION_CONFIG; +import static org.apache.kafka.common.config.SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG; +import static org.apache.kafka.common.config.SslConfigs.SSL_TRUSTSTORE_TYPE_CONFIG; +import static org.apache.kafka.coordinator.group.GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG; +import static org.apache.kafka.coordinator.group.GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG; +import static org.apache.kafka.server.config.ReplicationConfigs.AUTO_LEADER_REBALANCE_ENABLE_CONFIG; +import static org.apache.kafka.server.config.ServerConfigs.MESSAGE_MAX_BYTES_CONFIG; +import static org.apache.kafka.server.config.ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertInstanceOf; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; + +public class ConfigCommandIntegrationTest { + private final String defaultBrokerId = "0"; + private final String defaultGroupName = "group"; + private final String defaultClientMetricsName = "cm"; + private final ClusterInstance cluster; + + private static Runnable run(Stream command) { + return () -> { + try { + ConfigCommand.main(command.toArray(String[]::new)); + } catch (RuntimeException e) { + // do nothing. + } finally { + Exit.resetExitProcedure(); + } + }; + } + + public ConfigCommandIntegrationTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + @ClusterTest + public void testExitWithNonZeroStatusOnUpdatingUnallowedConfig() { + assertNonZeroStatusExit(Stream.concat(quorumArgs(), Stream.of( + "--entity-name", "0", + "--entity-type", "brokers", + "--alter", + "--add-config", "security.inter.broker.protocol=PLAINTEXT")), + errOut -> assertTrue(errOut.contains("Cannot update these configs dynamically: Set(security.inter.broker.protocol)"), errOut)); + } + + @ClusterTest + public void testNullStatusOnKraftCommandAlterUserQuota() { + Stream command = Stream.concat(quorumArgs(), Stream.of( + "--entity-type", "users", + "--entity-name", "admin", + "--alter", "--add-config", "consumer_byte_rate=20000")); + String message = captureStandardStream(false, run(command)); + assertEquals("Completed updating config for user admin.", message); + } + + @ClusterTest + public void testNullStatusOnKraftCommandAlterGroup() { + Stream command = Stream.concat(quorumArgs(), Stream.of( + "--entity-type", "groups", + "--entity-name", "group", + "--alter", "--add-config", "consumer.session.timeout.ms=50000")); + String message = captureStandardStream(false, run(command)); + assertEquals("Completed updating config for group group.", message); + + // Test for the --group alias + command = Stream.concat(quorumArgs(), Stream.of( + "--group", "group", + "--alter", "--add-config", "consumer.session.timeout.ms=50000")); + message = captureStandardStream(false, run(command)); + assertEquals("Completed updating config for group group.", message); + } + + @ClusterTest + public void testNullStatusOnKraftCommandAlterClientMetrics() { + Stream command = Stream.concat(quorumArgs(), Stream.of( + "--entity-type", "client-metrics", + "--entity-name", "cm", + "--alter", "--add-config", "metrics=org.apache")); + String message = captureStandardStream(false, run(command)); + assertEquals("Completed updating config for client-metric cm.", message); + + // Test for the --client-metrics alias + command = Stream.concat(quorumArgs(), Stream.of( + "--client-metrics", "cm", + "--alter", "--add-config", "metrics=org.apache")); + message = captureStandardStream(false, run(command)); + assertEquals("Completed updating config for client-metric cm.", message); + } + + @ClusterTest + public void testDynamicBrokerConfigUpdateUsingKraft() throws Exception { + List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); + + try (Admin client = cluster.admin()) { + // Add config + alterAndVerifyConfig(client, Optional.of(defaultBrokerId), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "110000"), alterOpts); + alterAndVerifyConfig(client, Optional.empty(), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "120000"), alterOpts); + + // Change config + alterAndVerifyConfig(client, Optional.of(defaultBrokerId), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "130000"), alterOpts); + alterAndVerifyConfig(client, Optional.empty(), singletonMap(MESSAGE_MAX_BYTES_CONFIG, "140000"), alterOpts); + + // Delete config + deleteAndVerifyConfigValue(client, defaultBrokerId, singleton(MESSAGE_MAX_BYTES_CONFIG), true, alterOpts); + + // Listener configs: should work only with listener name + alterAndVerifyConfig(client, Optional.of(defaultBrokerId), + singletonMap("listener.name.internal.ssl.keystore.location", "/tmp/test.jks"), alterOpts); + // Per-broker config configured at default cluster-level should fail + assertThrows(ExecutionException.class, + () -> alterConfigWithAdmin(client, Optional.empty(), + singletonMap("listener.name.internal.ssl.keystore.location", "/tmp/test.jks"), alterOpts)); + deleteAndVerifyConfigValue(client, defaultBrokerId, + singleton("listener.name.internal.ssl.keystore.location"), false, alterOpts); + alterConfigWithAdmin(client, Optional.of(defaultBrokerId), + singletonMap("listener.name.external.ssl.keystore.password", "secret"), alterOpts); + + // Password config update with encoder secret should succeed and encoded password must be stored in ZK + Map configs = new HashMap<>(); + configs.put("listener.name.external.ssl.keystore.password", "secret"); + configs.put("log.cleaner.threads", "2"); + // Password encoder configs + + // Password config update at default cluster-level should fail + assertThrows(ExecutionException.class, + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), configs, alterOpts)); + } + } + + @ClusterTest + public void testGroupConfigUpdateUsingKraft() throws Exception { + List alterOpts = Stream.concat(entityOp(Optional.of(defaultGroupName)).stream(), + Stream.of("--entity-type", "groups", "--alter")) + .collect(Collectors.toList()); + verifyGroupConfigUpdate(alterOpts); + + // Test for the --group alias + verifyGroupConfigUpdate(asList("--group", defaultGroupName, "--alter")); + } + + private void verifyGroupConfigUpdate(List alterOpts) throws Exception { + try (Admin client = cluster.admin()) { + // Add config + Map configs = new HashMap<>(); + configs.put(CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "50000"); + configs.put(CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, "6000"); + alterAndVerifyGroupConfig(client, defaultGroupName, configs, alterOpts); + + // Delete config + configs.put(CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "45000"); + configs.put(CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, "5000"); + deleteAndVerifyGroupConfigValue(client, defaultGroupName, configs, alterOpts); + + // Unknown config configured should fail + assertThrows(ExecutionException.class, () -> alterConfigWithAdmin(client, singletonMap("unknown.config", "20000"), alterOpts)); + } + } + + + @ClusterTest(types = {Type.KRAFT}) + public void testClientMetricsConfigUpdate() throws Exception { + List alterOpts = Stream.concat(entityOp(Optional.of(defaultClientMetricsName)).stream(), + Stream.of("--entity-type", "client-metrics", "--alter")) + .collect(Collectors.toList()); + verifyClientMetricsConfigUpdate(alterOpts); + + // Test for the --client-metrics alias + verifyClientMetricsConfigUpdate(asList("--client-metrics", defaultClientMetricsName, "--alter")); + } + + private void verifyClientMetricsConfigUpdate(List alterOpts) throws Exception { + try (Admin client = cluster.admin()) { + // Add config + Map configs = new HashMap<>(); + configs.put("metrics", ""); + configs.put("interval.ms", "6000"); + alterAndVerifyClientMetricsConfig(client, defaultClientMetricsName, configs, alterOpts); + + // Delete config + deleteAndVerifyClientMetricsConfigValue(client, defaultClientMetricsName, configs.keySet(), alterOpts); + + // Unknown config configured should fail + assertThrows(ExecutionException.class, () -> alterConfigWithAdmin(client, singletonMap("unknown.config", "20000"), alterOpts)); + } + } + + @ClusterTest + public void testAlterReadOnlyConfigInKRaftThenShouldFail() { + List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); + + try (Admin client = cluster.admin()) { + assertThrows(ExecutionException.class, + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), + singletonMap(AUTO_CREATE_TOPICS_ENABLE_CONFIG, "false"), alterOpts)); + assertThrows(ExecutionException.class, + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), + singletonMap(AUTO_LEADER_REBALANCE_ENABLE_CONFIG, "false"), alterOpts)); + assertThrows(ExecutionException.class, + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), + singletonMap("broker.id", "1"), alterOpts)); + } + } + + @ClusterTest + public void testUpdateClusterWideConfigInKRaftThenShouldSuccessful() throws Exception { + List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); + + try (Admin client = cluster.admin()) { + alterAndVerifyConfig(client, Optional.of(defaultBrokerId), + singletonMap("log.flush.interval.messages", "100"), alterOpts); + alterAndVerifyConfig(client, Optional.of(defaultBrokerId), + singletonMap("log.retention.bytes", "20"), alterOpts); + alterAndVerifyConfig(client, Optional.of(defaultBrokerId), + singletonMap("log.retention.ms", "2"), alterOpts); + } + } + + @ClusterTest + public void testUpdatePerBrokerConfigWithListenerNameInKRaftThenShouldSuccessful() throws Exception { + List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); + String listenerName = "listener.name.internal."; + + try (Admin client = cluster.admin()) { + alterAndVerifyConfig(client, Optional.of(defaultBrokerId), + singletonMap(listenerName + "ssl.truststore.type", "PKCS12"), alterOpts); + alterAndVerifyConfig(client, Optional.of(defaultBrokerId), + singletonMap(listenerName + "ssl.truststore.location", "/temp/test.jks"), alterOpts); + alterConfigWithAdmin(client, Optional.of(defaultBrokerId), + singletonMap(listenerName + "ssl.truststore.password", "password"), alterOpts); + verifyConfigSecretValue(client, Optional.of(defaultBrokerId), + singleton(listenerName + "ssl.truststore.password")); + } + } + + @ClusterTest + public void testUpdatePerBrokerConfigInKRaftThenShouldFail() { + List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); + + try (Admin client = cluster.admin()) { + assertThrows(ExecutionException.class, + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), + singletonMap(SSL_TRUSTSTORE_TYPE_CONFIG, "PKCS12"), alterOpts)); + assertThrows(ExecutionException.class, + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), + singletonMap(SSL_TRUSTSTORE_LOCATION_CONFIG, "/temp/test.jks"), alterOpts)); + assertThrows(ExecutionException.class, + () -> alterConfigWithAdmin(client, Optional.of(defaultBrokerId), + singletonMap(SSL_TRUSTSTORE_PASSWORD_CONFIG, "password"), alterOpts)); + } + } + + @ClusterTest + public void testUpdateInvalidBrokerConfigs() { + updateAndCheckInvalidBrokerConfig(Optional.empty()); + updateAndCheckInvalidBrokerConfig(Optional.of(cluster.anyBrokerSocketServer().config().brokerId() + "")); + } + + private void updateAndCheckInvalidBrokerConfig(Optional brokerIdOrDefault) { + List alterOpts = generateDefaultAlterOpts(cluster.bootstrapServers()); + try (Admin client = cluster.admin()) { + alterConfigWithAdmin(client, brokerIdOrDefault, Collections.singletonMap("invalid", "2"), alterOpts); + + Stream describeCommand = Stream.concat( + Stream.concat( + Stream.of("--bootstrap-server", cluster.bootstrapServers()), + Stream.of(entityOp(brokerIdOrDefault).toArray(new String[0]))), + Stream.of("--entity-type", "brokers", "--describe")); + String describeResult = captureStandardStream(false, run(describeCommand)); + + // We will treat unknown config as sensitive + assertTrue(describeResult.contains("sensitive=true"), describeResult); + // Sensitive config will not return + assertTrue(describeResult.contains("invalid=null"), describeResult); + } + } + + @ClusterTest + public void testUpdateInvalidTopicConfigs() throws ExecutionException, InterruptedException { + List alterOpts = asList("--bootstrap-server", cluster.bootstrapServers(), "--entity-type", "topics", "--alter"); + try (Admin client = cluster.admin()) { + client.createTopics(Collections.singletonList(new NewTopic("test-config-topic", 1, (short) 1))).all().get(); + assertInstanceOf( + InvalidConfigurationException.class, + assertThrows( + ExecutionException.class, + () -> ConfigCommand.alterConfig( + client, + new ConfigCommand.ConfigCommandOptions( + toArray(alterOpts, + asList("--add-config", "invalid=2", "--entity-type", "topics", "--entity-name", "test-config-topic")))) + ).getCause() + ); + } + } + + // Test case from KAFKA-13788 + @ClusterTest(serverProperties = { + // Must be at greater than 1MB per cleaner thread, set to 2M+2 so that we can set 2 cleaner threads. + @ClusterConfigProperty(key = "log.cleaner.dedupe.buffer.size", value = "2097154"), + }) + public void testUpdateBrokerConfigNotAffectedByInvalidConfig() { + try (Admin client = cluster.admin()) { + ConfigCommand.alterConfig(client, new ConfigCommand.ConfigCommandOptions( + toArray(asList("--bootstrap-server", cluster.bootstrapServers(), + "--alter", + "--add-config", "log.cleaner.threadzz=2", + "--entity-type", "brokers", + "--entity-default")))); + + ConfigCommand.alterConfig(client, new ConfigCommand.ConfigCommandOptions( + toArray(asList("--bootstrap-server", cluster.bootstrapServers(), + "--alter", + "--add-config", "log.cleaner.threads=2", + "--entity-type", "brokers", + "--entity-default")))); + kafka.utils.TestUtils.waitUntilTrue( + () -> cluster.brokerSocketServers().stream().allMatch(broker -> broker.config().getInt("log.cleaner.threads") == 2), + () -> "Timeout waiting for topic config propagating to broker", + org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS, + 100L); + } + } + + @ClusterTest( + // Must be at greater than 1MB per cleaner thread, set to 2M+2 so that we can set 2 cleaner threads. + serverProperties = {@ClusterConfigProperty(key = "log.cleaner.dedupe.buffer.size", value = "2097154")}, + metadataVersion = MetadataVersion.IBP_3_9_IV0 + ) + public void testUnsupportedVersionException() { + try (Admin client = cluster.admin()) { + Admin spyAdmin = Mockito.spy(client); + + AlterConfigsResult mockResult = AdminClientTestUtils.alterConfigsResult( + new ConfigResource(ConfigResource.Type.BROKER, ""), new UnsupportedVersionException("simulated error")); + Mockito.doReturn(mockResult).when(spyAdmin) + .incrementalAlterConfigs(any(java.util.Map.class), any(AlterConfigsOptions.class)); + assertEquals( + "The INCREMENTAL_ALTER_CONFIGS API is not supported by the cluster. The API is supported starting from version 2.3.0. You may want to use an older version of this tool to interact with your cluster, or upgrade your brokers to version 2.3.0 or newer to avoid this error.", + assertThrows(UnsupportedVersionException.class, () -> { + ConfigCommand.alterConfig(spyAdmin, new ConfigCommand.ConfigCommandOptions( + toArray(asList( + "--bootstrap-server", cluster.bootstrapServers(), + "--alter", + "--add-config", "log.cleaner.threads=2", + "--entity-type", "brokers", + "--entity-default")))); + }).getMessage() + ); + Mockito.verify(spyAdmin).incrementalAlterConfigs(any(java.util.Map.class), any(AlterConfigsOptions.class)); + } + } + + private void assertNonZeroStatusExit(Stream args, Consumer checkErrOut) { + AtomicReference exitStatus = new AtomicReference<>(); + Exit.setExitProcedure((status, __) -> { + exitStatus.set(status); + throw new RuntimeException(); + }); + + String errOut = captureStandardStream(true, run(args)); + + checkErrOut.accept(errOut); + assertNotNull(exitStatus.get()); + assertEquals(1, exitStatus.get()); + } + + private Stream quorumArgs() { + return Stream.of("--bootstrap-server", cluster.bootstrapServers()); + } + + private List entityOp(Optional entityId) { + return entityId.map(id -> asList("--entity-name", id)) + .orElse(singletonList("--entity-default")); + } + + private List generateDefaultAlterOpts(String bootstrapServers) { + return asList("--bootstrap-server", bootstrapServers, + "--entity-type", "brokers", "--alter"); + } + + private void alterAndVerifyConfig(Admin client, + Optional brokerId, + Map config, + List alterOpts) throws Exception { + alterConfigWithAdmin(client, brokerId, config, alterOpts); + verifyConfig(client, brokerId, config); + } + + private void alterAndVerifyGroupConfig(Admin client, + String groupName, + Map config, + List alterOpts) throws Exception { + alterConfigWithAdmin(client, config, alterOpts); + verifyGroupConfig(client, groupName, config); + } + + private void alterAndVerifyClientMetricsConfig(Admin client, + String clientMetricsName, + Map config, + List alterOpts) throws Exception { + alterConfigWithAdmin(client, config, alterOpts); + verifyClientMetricsConfig(client, clientMetricsName, config); + } + + private void alterConfigWithAdmin(Admin client, Optional resourceName, Map config, List alterOpts) { + String configStr = transferConfigMapToString(config); + List bootstrapOpts = quorumArgs().collect(Collectors.toList()); + ConfigCommand.ConfigCommandOptions addOpts = + new ConfigCommand.ConfigCommandOptions(toArray(bootstrapOpts, + entityOp(resourceName), + alterOpts, + asList("--add-config", configStr))); + addOpts.checkArgs(); + ConfigCommand.alterConfig(client, addOpts); + } + + private void alterConfigWithAdmin(Admin client, Map config, List alterOpts) { + String configStr = transferConfigMapToString(config); + List bootstrapOpts = quorumArgs().collect(Collectors.toList()); + ConfigCommand.ConfigCommandOptions addOpts = + new ConfigCommand.ConfigCommandOptions(toArray(bootstrapOpts, + alterOpts, + asList("--add-config", configStr))); + addOpts.checkArgs(); + ConfigCommand.alterConfig(client, addOpts); + } + + private void verifyConfig(Admin client, Optional brokerId, Map config) throws Exception { + ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, brokerId.orElse("")); + TestUtils.waitForCondition(() -> { + Map current = getConfigEntryStream(client, configResource) + .filter(configEntry -> Objects.nonNull(configEntry.value())) + .collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value)); + return config.entrySet().stream().allMatch(e -> e.getValue().equals(current.get(e.getKey()))); + }, 10000, config + " are not updated"); + } + + private void verifyGroupConfig(Admin client, String groupName, Map config) throws Exception { + ConfigResource configResource = new ConfigResource(ConfigResource.Type.GROUP, groupName); + TestUtils.waitForCondition(() -> { + Map current = getConfigEntryStream(client, configResource) + .filter(configEntry -> Objects.nonNull(configEntry.value())) + .collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value)); + return config.entrySet().stream().allMatch(e -> e.getValue().equals(current.get(e.getKey()))); + }, 10000, config + " are not updated"); + } + + private void verifyClientMetricsConfig(Admin client, String clientMetricsName, Map config) throws Exception { + ConfigResource configResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, clientMetricsName); + TestUtils.waitForCondition(() -> { + Map current = getConfigEntryStream(client, configResource) + .filter(configEntry -> Objects.nonNull(configEntry.value())) + .collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value)); + if (config.isEmpty()) + return current.isEmpty(); + return config.entrySet().stream().allMatch(e -> e.getValue().equals(current.get(e.getKey()))); + }, 10000, config + " are not updated"); + } + + private Stream getConfigEntryStream(Admin client, + ConfigResource configResource) throws InterruptedException, ExecutionException { + return client.describeConfigs(singletonList(configResource)) + .all() + .get() + .values() + .stream() + .flatMap(e -> e.entries().stream()); + } + + private void deleteAndVerifyConfigValue(Admin client, + String brokerId, + Set config, + boolean hasDefaultValue, + List alterOpts) throws Exception { + ConfigCommand.ConfigCommandOptions deleteOpts = + new ConfigCommand.ConfigCommandOptions(toArray(alterOpts, asList("--entity-name", brokerId), + asList("--delete-config", String.join(",", config)))); + deleteOpts.checkArgs(); + ConfigCommand.alterConfig(client, deleteOpts); + verifyPerBrokerConfigValue(client, brokerId, config, hasDefaultValue); + } + + private void deleteAndVerifyGroupConfigValue(Admin client, + String groupName, + Map defaultConfigs, + List alterOpts) throws Exception { + List bootstrapOpts = quorumArgs().collect(Collectors.toList()); + ConfigCommand.ConfigCommandOptions deleteOpts = + new ConfigCommand.ConfigCommandOptions(toArray(bootstrapOpts, + alterOpts, + asList("--delete-config", String.join(",", defaultConfigs.keySet())))); + deleteOpts.checkArgs(); + ConfigCommand.alterConfig(client, deleteOpts); + verifyGroupConfig(client, groupName, defaultConfigs); + } + + private void deleteAndVerifyClientMetricsConfigValue(Admin client, + String clientMetricsName, + Set defaultConfigs, + List alterOpts) throws Exception { + List bootstrapOpts = quorumArgs().collect(Collectors.toList()); + ConfigCommand.ConfigCommandOptions deleteOpts = + new ConfigCommand.ConfigCommandOptions(toArray(bootstrapOpts, + alterOpts, + asList("--delete-config", String.join(",", defaultConfigs)))); + deleteOpts.checkArgs(); + ConfigCommand.alterConfig(client, deleteOpts); + // There are no default configs returned for client metrics + verifyClientMetricsConfig(client, clientMetricsName, Collections.emptyMap()); + } + + private void verifyPerBrokerConfigValue(Admin client, + String brokerId, + Set config, + boolean hasDefaultValue) throws Exception { + ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, brokerId); + TestUtils.waitForCondition(() -> { + if (hasDefaultValue) { + Map current = getConfigEntryStream(client, configResource) + .filter(configEntry -> Objects.nonNull(configEntry.value())) + .collect(Collectors.toMap(ConfigEntry::name, ConfigEntry::value)); + return config.stream().allMatch(current::containsKey); + } else { + return getConfigEntryStream(client, configResource) + .noneMatch(configEntry -> config.contains(configEntry.name())); + } + }, 5000, config + " are not updated"); + } + + private void verifyConfigSecretValue(Admin client, Optional brokerId, Set config) throws Exception { + ConfigResource configResource = new ConfigResource(ConfigResource.Type.BROKER, brokerId.orElse("")); + TestUtils.waitForCondition(() -> { + Map current = getConfigEntryStream(client, configResource) + .filter(ConfigEntry::isSensitive) + .collect(HashMap::new, (map, entry) -> map.put(entry.name(), entry.value()), HashMap::putAll); + return config.stream().allMatch(current::containsKey); + }, 5000, config + " are not updated"); + } + + @SafeVarargs + private static String[] toArray(List... lists) { + return Stream.of(lists).flatMap(List::stream).toArray(String[]::new); + } + + private String transferConfigMapToString(Map configs) { + return configs.entrySet() + .stream() + .map(e -> e.getKey() + "=" + e.getValue()) + .collect(Collectors.joining(",")); + } + + // Copied from ToolsTestUtils.java, can be removed after we move ConfigCommand to tools module + static String captureStandardStream(boolean isErr, Runnable runnable) { + ByteArrayOutputStream outputStream = new ByteArrayOutputStream(); + PrintStream currentStream = isErr ? System.err : System.out; + PrintStream tempStream = new PrintStream(outputStream); + if (isErr) + System.setErr(tempStream); + else + System.setOut(tempStream); + try { + runnable.run(); + return outputStream.toString().trim(); + } finally { + if (isErr) + System.setErr(currentStream); + else + System.setOut(currentStream); + + tempStream.close(); + } + } +} diff --git a/core/src/test/java/kafka/admin/ConfigCommandTest.java b/core/src/test/java/kafka/admin/ConfigCommandTest.java new file mode 100644 index 0000000000000..10c24111e4757 --- /dev/null +++ b/core/src/test/java/kafka/admin/ConfigCommandTest.java @@ -0,0 +1,1476 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.admin; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AlterClientQuotasOptions; +import org.apache.kafka.clients.admin.AlterClientQuotasResult; +import org.apache.kafka.clients.admin.AlterConfigOp; +import org.apache.kafka.clients.admin.AlterConfigsOptions; +import org.apache.kafka.clients.admin.AlterConfigsResult; +import org.apache.kafka.clients.admin.Config; +import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.ConfigTest; +import org.apache.kafka.clients.admin.DescribeClientQuotasOptions; +import org.apache.kafka.clients.admin.DescribeClientQuotasResult; +import org.apache.kafka.clients.admin.DescribeConfigsOptions; +import org.apache.kafka.clients.admin.DescribeConfigsResult; +import org.apache.kafka.clients.admin.DescribeUserScramCredentialsOptions; +import org.apache.kafka.clients.admin.DescribeUserScramCredentialsResult; +import org.apache.kafka.clients.admin.MockAdminClient; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.InvalidConfigurationException; +import org.apache.kafka.common.internals.KafkaFutureImpl; +import org.apache.kafka.common.quota.ClientQuotaAlteration; +import org.apache.kafka.common.quota.ClientQuotaEntity; +import org.apache.kafka.common.quota.ClientQuotaFilter; +import org.apache.kafka.common.quota.ClientQuotaFilterComponent; +import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.server.config.ConfigType; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.File; +import java.io.IOException; +import java.util.AbstractMap.SimpleImmutableEntry; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Objects; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicReference; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import scala.collection.Seq; +import scala.jdk.javaapi.CollectionConverters; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class ConfigCommandTest { + private static final List BROKER_BOOTSTRAP = Arrays.asList("--bootstrap-server", "localhost:9092"); + private static final List CONTROLLER_BOOTSTRAP = Arrays.asList("--bootstrap-controller", "localhost:9093"); + + @Test + public void shouldExitWithNonZeroStatusOnArgError() { + assertNonZeroStatusExit("--blah"); + } + + @Test + public void shouldExitWithNonZeroStatusAlterUserQuotaWithoutEntityName() { + assertNonZeroStatusExit(toArray(BROKER_BOOTSTRAP, Arrays.asList( + "--entity-type", "users", + "--alter", "--add-config", "consumer_byte_rate=20000"))); + } + + @Test + public void shouldExitWithNonZeroStatusOnBrokerCommandError() { + assertNonZeroStatusExit("--bootstrap-server", "invalid host", + "--entity-type", "brokers", + "--entity-name", "1", + "--describe"); + } + + @Test + public void shouldExitWithNonZeroStatusIfBothBootstrapServerAndBootstrapControllerGiven() { + assertNonZeroStatusExit(toArray(BROKER_BOOTSTRAP, CONTROLLER_BOOTSTRAP, Arrays.asList( + "--describe", "--broker-defaults"))); + } + + public static void assertNonZeroStatusExit(String... args) { + AtomicReference exitStatus = new AtomicReference<>(); + Exit.setExitProcedure((status, __) -> { + exitStatus.set(status); + throw new RuntimeException(); + }); + + try { + ConfigCommand.main(args); + } catch (RuntimeException e) { + // do nothing. + } finally { + Exit.resetExitProcedure(); + } + + assertNotNull(exitStatus.get()); + assertEquals(1, exitStatus.get()); + } + + @Test + public void shouldParseArgumentsForClientsEntityTypeWithBrokerBootstrap() { + testArgumentParse(BROKER_BOOTSTRAP, "clients"); + } + + @Test + public void shouldParseArgumentsForClientsEntityTypeWithControllerBootstrap() { + testArgumentParse(CONTROLLER_BOOTSTRAP, "clients"); + } + + @Test + public void shouldParseArgumentsForUsersEntityTypeWithBrokerBootstrap() { + testArgumentParse(BROKER_BOOTSTRAP, "users"); + } + + @Test + public void shouldParseArgumentsForUsersEntityTypeWithControllerBootstrap() { + testArgumentParse(CONTROLLER_BOOTSTRAP, "users"); + } + + @Test + public void shouldParseArgumentsForTopicsEntityTypeWithBrokerBootstrap() { + testArgumentParse(BROKER_BOOTSTRAP, "topics"); + } + + @Test + public void shouldParseArgumentsForTopicsEntityTypeWithControllerBootstrap() { + testArgumentParse(CONTROLLER_BOOTSTRAP, "topics"); + } + + @Test + public void shouldParseArgumentsForBrokersEntityTypeWithBrokerBootstrap() { + testArgumentParse(BROKER_BOOTSTRAP, "brokers"); + } + + @Test + public void shouldParseArgumentsForBrokersEntityTypeWithControllerBootstrap() { + testArgumentParse(CONTROLLER_BOOTSTRAP, "brokers"); + } + + @Test + public void shouldParseArgumentsForBrokerLoggersEntityTypeWithBrokerBootstrap() { + testArgumentParse(BROKER_BOOTSTRAP, "broker-loggers"); + } + + @Test + public void shouldParseArgumentsForBrokerLoggersEntityTypeWithControllerBootstrap() { + testArgumentParse(CONTROLLER_BOOTSTRAP, "broker-loggers"); + } + + @Test + public void shouldParseArgumentsForIpEntityTypeWithBrokerBootstrap() { + testArgumentParse(BROKER_BOOTSTRAP, "ips"); + } + + @Test + public void shouldParseArgumentsForIpEntityTypeWithControllerBootstrap() { + testArgumentParse(CONTROLLER_BOOTSTRAP, "ips"); + } + + @Test + public void shouldParseArgumentsForGroupEntityTypeWithBrokerBootstrap() { + testArgumentParse(BROKER_BOOTSTRAP, "groups"); + } + + @Test + public void shouldParseArgumentsForGroupEntityTypeWithControllerBootstrap() { + testArgumentParse(CONTROLLER_BOOTSTRAP, "groups"); + } + + public void testArgumentParse(List bootstrapArguments, String entityType) { + String shortFlag = "--" + entityType.substring(0, entityType.length() - 1); + String connectOpts1 = bootstrapArguments.get(0); + String connectOpts2 = bootstrapArguments.get(1); + + // Should parse correctly + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + "--entity-name", "1", + "--entity-type", entityType, + "--describe")); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + shortFlag, "1", + "--describe")); + createOpts.checkArgs(); + + // For --alter and added config + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + "--entity-name", "1", + "--entity-type", entityType, + "--alter", + "--add-config", "a=b,c=d")); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + "--entity-name", "1", + "--entity-type", entityType, + "--alter", + "--add-config-file", "/tmp/new.properties")); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + shortFlag, "1", + "--alter", + "--add-config", "a=b,c=d")); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + shortFlag, "1", + "--alter", + "--add-config-file", "/tmp/new.properties")); + createOpts.checkArgs(); + + // For alter and deleted config + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + "--entity-name", "1", + "--entity-type", entityType, + "--alter", + "--delete-config", "a,b,c")); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + shortFlag, "1", + "--alter", + "--delete-config", "a,b,c")); + createOpts.checkArgs(); + + // For alter and both added, deleted config + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + "--entity-name", "1", + "--entity-type", entityType, + "--alter", + "--add-config", "a=b,c=d", + "--delete-config", "a")); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + shortFlag, "1", + "--alter", + "--add-config", "a=b,c=d", + "--delete-config", "a")); + createOpts.checkArgs(); + + Properties addedProps = ConfigCommand.parseConfigsToBeAdded(createOpts); + assertEquals(2, addedProps.size()); + assertEquals("b", addedProps.getProperty("a")); + assertEquals("d", addedProps.getProperty("c")); + + Seq deletedProps = ConfigCommand.parseConfigsToBeDeleted(createOpts); + assertEquals(1, deletedProps.size()); + assertEquals("a", deletedProps.apply(0)); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + "--entity-name", "1", + "--entity-type", entityType, + "--alter", + "--add-config", "a=b,c=,d=e,f=")); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + shortFlag, "1", + "--alter", + "--add-config", "a._-c=b,c=,d=e,f=")); + createOpts.checkArgs(); + + Properties addedProps2 = ConfigCommand.parseConfigsToBeAdded(createOpts); + assertEquals(4, addedProps2.size()); + assertEquals("b", addedProps2.getProperty("a._-c")); + assertEquals("e", addedProps2.getProperty("d")); + assertTrue(addedProps2.getProperty("c").isEmpty()); + assertTrue(addedProps2.getProperty("f").isEmpty()); + + ConfigCommand.ConfigCommandOptions inValidCreateOpts = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + shortFlag, "1", + "--alter", + "--add-config", "a;c=b")); + + assertThrows(IllegalArgumentException.class, + () -> ConfigCommand.parseConfigsToBeAdded(inValidCreateOpts)); + + ConfigCommand.ConfigCommandOptions inValidCreateOpts2 = new ConfigCommand.ConfigCommandOptions(toArray(connectOpts1, connectOpts2, + shortFlag, "1", + "--alter", + "--add-config", "a,=b")); + + assertThrows(IllegalArgumentException.class, + () -> ConfigCommand.parseConfigsToBeAdded(inValidCreateOpts2)); + } + + @Test + public void shouldFailIfAddAndAddFile() { + // Should not parse correctly + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-name", "1", + "--entity-type", "brokers", + "--alter", + "--add-config", "a=b,c=d", + "--add-config-file", "/tmp/new.properties" + )); + assertThrows(IllegalArgumentException.class, createOpts::checkArgs); + } + + @Test + public void testEntityDefaultForType() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-default", + "--entity-type", "topics", + "--describe" + )); + assertThrows(IllegalArgumentException.class, createOpts::checkArgs); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-default", + "--entity-type", "clients", + "--describe" + )); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-default", + "--entity-type", "users", + "--describe" + )); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-default", + "--entity-type", "brokers", + "--describe" + )); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-default", + "--entity-type", "broker-loggers", + "--describe" + )); + assertThrows(IllegalArgumentException.class, createOpts::checkArgs); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-default", + "--entity-type", "ips", + "--describe" + )); + createOpts.checkArgs(); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-default", + "--entity-type", "client-metrics", + "--describe" + )); + assertThrows(IllegalArgumentException.class, createOpts::checkArgs); + + createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-default", + "--entity-type", "groups", + "--describe" + )); + assertThrows(IllegalArgumentException.class, createOpts::checkArgs); + } + + @Test + public void testParseConfigsToBeAddedForAddConfigFile() throws IOException { + String fileContents = + "a=b\n" + + "c = d\n" + + "json = {\"key\": \"val\"}\n" + + "nested = [[1, 2], [3, 4]]"; + + File file = TestUtils.tempFile(fileContents); + + List addConfigFileArgs = Arrays.asList("--add-config-file", file.getPath()); + + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092", + "--entity-name", "1", + "--entity-type", "brokers", + "--alter"), + addConfigFileArgs)); + createOpts.checkArgs(); + + Properties addedProps = ConfigCommand.parseConfigsToBeAdded(createOpts); + assertEquals(4, addedProps.size()); + assertEquals("b", addedProps.getProperty("a")); + assertEquals("d", addedProps.getProperty("c")); + assertEquals("{\"key\": \"val\"}", addedProps.getProperty("json")); + assertEquals("[[1, 2], [3, 4]]", addedProps.getProperty("nested")); + } + + public void testExpectedEntityTypeNames(List expectedTypes, List expectedNames, List connectOpts, String... args) { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList(connectOpts.get(0), connectOpts.get(1), "--describe"), Arrays.asList(args))); + createOpts.checkArgs(); + assertEquals(createOpts.entityTypes().toSeq(), seq(expectedTypes)); + assertEquals(createOpts.entityNames().toSeq(), seq(expectedNames)); + } + + @Test + public void testOptionEntityTypeNames() { + List connectOpts = Arrays.asList("--bootstrap-server", "localhost:9092"); + + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.singletonList("A"), connectOpts, "--entity-type", "topics", "--entity-name", "A"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.singletonList("1.2.3.4"), connectOpts, "--entity-name", "1.2.3.4", "--entity-type", "ips"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.CLIENT_METRICS), Collections.singletonList("A"), connectOpts, "--entity-type", "client-metrics", "--entity-name", "A"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.singletonList("A"), connectOpts, "--entity-type", "groups", "--entity-name", "A"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.USER, ConfigType.CLIENT), Arrays.asList("A", ""), connectOpts, + "--entity-type", "users", "--entity-type", "clients", "--entity-name", "A", "--entity-default"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.USER, ConfigType.CLIENT), Arrays.asList("", "B"), connectOpts, + "--entity-default", "--entity-name", "B", "--entity-type", "users", "--entity-type", "clients"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.singletonList("A"), connectOpts, "--topic", "A"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.singletonList("1.2.3.4"), connectOpts, "--ip", "1.2.3.4"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.singletonList("A"), connectOpts, "--group", "A"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Arrays.asList("B", "A"), connectOpts, "--client", "B", "--user", "A"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Arrays.asList("B", ""), connectOpts, "--client", "B", "--user-defaults"); + testExpectedEntityTypeNames(Arrays.asList(ConfigType.CLIENT, ConfigType.USER), Collections.singletonList("A"), connectOpts, + "--entity-type", "clients", "--entity-type", "users", "--entity-name", "A"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.TOPIC), Collections.emptyList(), connectOpts, "--entity-type", "topics"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.IP), Collections.emptyList(), connectOpts, "--entity-type", "ips"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.GROUP), Collections.emptyList(), connectOpts, "--entity-type", "groups"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.CLIENT_METRICS), Collections.emptyList(), connectOpts, "--entity-type", "client-metrics"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER), Collections.singletonList("0"), connectOpts, "--entity-name", "0", "--entity-type", "brokers"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER), Collections.singletonList("0"), connectOpts, "--broker", "0"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.USER), Collections.emptyList(), connectOpts, "--entity-type", "users"); + testExpectedEntityTypeNames(Collections.singletonList(ConfigType.BROKER), Collections.emptyList(), connectOpts, "--entity-type", "brokers"); + } + + @Test + public void shouldFailIfUnrecognisedEntityType() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", + "--entity-name", "client", "--entity-type", "not-recognised", "--alter", "--add-config", "a=b,c=d"}); + assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); + } + + @Test + public void shouldFailIfBrokerEntityTypeIsNotAnInteger() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", + "--entity-name", "A", "--entity-type", "brokers", "--alter", "--add-config", "a=b,c=d"}); + assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); + } + + @Test + public void shouldFailIfShortBrokerEntityTypeIsNotAnInteger() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", + "--broker", "A", "--alter", "--add-config", "a=b,c=d"}); + assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); + } + + @Test + public void shouldFailIfMixedEntityTypeFlags() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", + "--entity-name", "A", "--entity-type", "users", "--client", "B", "--describe"}); + assertThrows(IllegalArgumentException.class, createOpts::checkArgs); + } + + @Test + public void shouldFailIfInvalidHost() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", + "--entity-name", "A,B", "--entity-type", "ips", "--describe"}); + assertThrows(IllegalArgumentException.class, createOpts::checkArgs); + } + + @Test + public void shouldFailIfUnresolvableHost() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(new String[]{"--bootstrap-server", "localhost:9092", + "--entity-name", "RFC2606.invalid", "--entity-type", "ips", "--describe"}); + assertThrows(IllegalArgumentException.class, createOpts::checkArgs); + } + + private Entry, Map> argsAndExpectedEntity(Optional entityName, String entityType) { + String command; + switch (entityType) { + case ClientQuotaEntity.USER: + command = "users"; + break; + case ClientQuotaEntity.CLIENT_ID: + command = "clients"; + break; + case ClientQuotaEntity.IP: + command = "ips"; + break; + default: + throw new IllegalArgumentException("Unknown command: " + entityType); + } + + return entityName.map(name -> { + if (name.isEmpty()) + return new SimpleImmutableEntry<>(Arrays.asList("--entity-type", command, "--entity-default"), Collections.singletonMap(entityType, (String) null)); + return new SimpleImmutableEntry<>(Arrays.asList("--entity-type", command, "--entity-name", name), Collections.singletonMap(entityType, name)); + }).orElse(new SimpleImmutableEntry<>(Collections.emptyList(), Collections.emptyMap())); + } + + private void verifyAlterCommandFails(String expectedErrorMessage, List alterOpts) { + Admin mockAdminClient = mock(Admin.class); + ConfigCommand.ConfigCommandOptions opts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092", + "--alter"), alterOpts)); + IllegalArgumentException e = assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(mockAdminClient, opts)); + assertTrue(e.getMessage().contains(expectedErrorMessage), "Unexpected exception: " + e); + } + + @Test + public void shouldNotAlterNonQuotaIpConfigsUsingBootstrapServer() { + // when using --bootstrap-server, it should be illegal to alter anything that is not a connection quota + // for ip entities + List ipEntityOpts = Arrays.asList("--entity-type", "ips", "--entity-name", "127.0.0.1"); + String invalidProp = "some_config"; + verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, Arrays.asList("--add-config", "connection_creation_rate=10000,some_config=10"))); + verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, Arrays.asList("--add-config", "some_config=10"))); + verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, Arrays.asList("--delete-config", "connection_creation_rate=10000,some_config=10"))); + verifyAlterCommandFails(invalidProp, concat(ipEntityOpts, Arrays.asList("--delete-config", "some_config=10"))); + } + + private void verifyDescribeQuotas(List describeArgs, ClientQuotaFilter expectedFilter) { + ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092", + "--describe"), describeArgs)); + KafkaFutureImpl>> describeFuture = new KafkaFutureImpl<>(); + describeFuture.complete(Collections.emptyMap()); + DescribeClientQuotasResult describeResult = mock(DescribeClientQuotasResult.class); + when(describeResult.entities()).thenReturn(describeFuture); + + AtomicBoolean describedConfigs = new AtomicBoolean(); + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) { + assertTrue(filter.strict()); + assertEquals(new HashSet<>(expectedFilter.components()), new HashSet<>(filter.components())); + describedConfigs.set(true); + return describeResult; + } + }; + ConfigCommand.describeConfig(mockAdminClient, describeOpts); + assertTrue(describedConfigs.get()); + } + + @Test + public void testDescribeIpConfigs() { + String entityType = ClientQuotaEntity.IP; + String knownHost = "1.2.3.4"; + ClientQuotaFilter defaultIpFilter = ClientQuotaFilter.containsOnly(Collections.singletonList(ClientQuotaFilterComponent.ofDefaultEntity(entityType))); + ClientQuotaFilter singleIpFilter = ClientQuotaFilter.containsOnly(Collections.singletonList(ClientQuotaFilterComponent.ofEntity(entityType, knownHost))); + ClientQuotaFilter allIpsFilter = ClientQuotaFilter.containsOnly(Collections.singletonList(ClientQuotaFilterComponent.ofEntityType(entityType))); + verifyDescribeQuotas(Arrays.asList("--entity-default", "--entity-type", "ips"), defaultIpFilter); + verifyDescribeQuotas(Collections.singletonList("--ip-defaults"), defaultIpFilter); + verifyDescribeQuotas(Arrays.asList("--entity-type", "ips", "--entity-name", knownHost), singleIpFilter); + verifyDescribeQuotas(Arrays.asList("--ip", knownHost), singleIpFilter); + verifyDescribeQuotas(Arrays.asList("--entity-type", "ips"), allIpsFilter); + } + + public void verifyAlterQuotas(List alterOpts, ClientQuotaEntity expectedAlterEntity, + Map expectedProps, Set expectedAlterOps) { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092", + "--alter"), alterOpts)); + + AtomicBoolean describedConfigs = new AtomicBoolean(); + KafkaFutureImpl>> describeFuture = new KafkaFutureImpl<>(); + describeFuture.complete(Collections.singletonMap(expectedAlterEntity, expectedProps)); + DescribeClientQuotasResult describeResult = mock(DescribeClientQuotasResult.class); + when(describeResult.entities()).thenReturn(describeFuture); + + Set expectedFilterComponents = expectedAlterEntity.entries().entrySet().stream().map(e -> { + String entityType = e.getKey(); + String entityName = e.getValue(); + return entityName == null + ? ClientQuotaFilterComponent.ofDefaultEntity(e.getKey()) + : ClientQuotaFilterComponent.ofEntity(entityType, entityName); + }).collect(Collectors.toSet()); + + AtomicBoolean alteredConfigs = new AtomicBoolean(); + KafkaFutureImpl alterFuture = new KafkaFutureImpl<>(); + alterFuture.complete(null); + AlterClientQuotasResult alterResult = mock(AlterClientQuotasResult.class); + when(alterResult.all()).thenReturn(alterFuture); + + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) { + assertTrue(filter.strict()); + assertEquals(expectedFilterComponents, new HashSet<>(filter.components())); + describedConfigs.set(true); + return describeResult; + } + + @Override + public AlterClientQuotasResult alterClientQuotas(Collection entries, AlterClientQuotasOptions options) { + assertFalse(options.validateOnly()); + assertEquals(1, entries.size()); + ClientQuotaAlteration alteration = entries.iterator().next(); + assertEquals(expectedAlterEntity, alteration.entity()); + Collection ops = alteration.ops(); + assertEquals(expectedAlterOps, new HashSet<>(ops)); + alteredConfigs.set(true); + return alterResult; + } + }; + ConfigCommand.alterConfig(mockAdminClient, createOpts); + assertTrue(describedConfigs.get()); + assertTrue(alteredConfigs.get()); + } + + @Test + public void testAlterIpConfig() { + Entry, Map> singleIpArgsAndEntity = argsAndExpectedEntity(Optional.of("1.2.3.4"), ClientQuotaEntity.IP); + Entry, Map> defaultIpArgsAndEntity = argsAndExpectedEntity(Optional.of(""), ClientQuotaEntity.IP); + + + List deleteArgs = Arrays.asList("--delete-config", "connection_creation_rate"); + Set deleteAlterationOps = new HashSet<>(Collections.singletonList(new ClientQuotaAlteration.Op("connection_creation_rate", null))); + Map propsToDelete = Collections.singletonMap("connection_creation_rate", 50.0); + + List addArgs = Arrays.asList("--add-config", "connection_creation_rate=100"); + Set addAlterationOps = new HashSet<>(Collections.singletonList(new ClientQuotaAlteration.Op("connection_creation_rate", 100.0))); + + verifyAlterQuotas( + concat(singleIpArgsAndEntity.getKey(), deleteArgs), + new ClientQuotaEntity(singleIpArgsAndEntity.getValue()), + propsToDelete, + deleteAlterationOps); + verifyAlterQuotas( + concat(singleIpArgsAndEntity.getKey(), addArgs), + new ClientQuotaEntity(singleIpArgsAndEntity.getValue()), + Collections.emptyMap(), + addAlterationOps); + verifyAlterQuotas( + concat(defaultIpArgsAndEntity.getKey(), deleteArgs), + new ClientQuotaEntity(defaultIpArgsAndEntity.getValue()), + propsToDelete, + deleteAlterationOps); + verifyAlterQuotas( + concat(defaultIpArgsAndEntity.getKey(), addArgs), + new ClientQuotaEntity(defaultIpArgsAndEntity.getValue()), + Collections.emptyMap(), + addAlterationOps); + } + + private void verifyAlterUserClientQuotas(String user, String client) { + List alterArgs = Arrays.asList("--add-config", "consumer_byte_rate=20000,producer_byte_rate=10000", + "--delete-config", "request_percentage"); + Map propsToDelete = Collections.singletonMap("request_percentage", 50.0); + + Set alterationOps = new HashSet<>(Arrays.asList( + new ClientQuotaAlteration.Op("consumer_byte_rate", 20000d), + new ClientQuotaAlteration.Op("producer_byte_rate", 10000d), + new ClientQuotaAlteration.Op("request_percentage", null) + )); + + Entry, Map> userArgsAndEntity = argsAndExpectedEntity(Optional.ofNullable(user), ClientQuotaEntity.USER); + Entry, Map> clientArgsAndEntry = argsAndExpectedEntity(Optional.ofNullable(client), ClientQuotaEntity.CLIENT_ID); + + verifyAlterQuotas( + concat(alterArgs, userArgsAndEntity.getKey(), clientArgsAndEntry.getKey()), + new ClientQuotaEntity(concat(userArgsAndEntity.getValue(), clientArgsAndEntry.getValue())), + propsToDelete, + alterationOps); + } + + @Test + public void shouldAddClientConfig() { + verifyAlterUserClientQuotas("test-user-1", "test-client-1"); + verifyAlterUserClientQuotas("test-user-2", ""); + verifyAlterUserClientQuotas("test-user-3", null); + verifyAlterUserClientQuotas("", "test-client-2"); + verifyAlterUserClientQuotas("", ""); + verifyAlterUserClientQuotas("", null); + verifyAlterUserClientQuotas(null, "test-client-3"); + verifyAlterUserClientQuotas(null, ""); + } + + private final List userEntityOpts = Arrays.asList("--entity-type", "users", "--entity-name", "admin"); + private final List clientEntityOpts = Arrays.asList("--entity-type", "clients", "--entity-name", "admin"); + private final List addScramOpts = Arrays.asList("--add-config", "SCRAM-SHA-256=[iterations=8192,password=foo-secret]"); + private final List deleteScramOpts = Arrays.asList("--delete-config", "SCRAM-SHA-256"); + + @Test + public void shouldNotAlterNonQuotaNonScramUserOrClientConfigUsingBootstrapServer() { + // when using --bootstrap-server, it should be illegal to alter anything that is not a quota and not a SCRAM credential + // for both user and client entities + String invalidProp = "some_config"; + verifyAlterCommandFails(invalidProp, concat(userEntityOpts, + Arrays.asList("-add-config", "consumer_byte_rate=20000,producer_byte_rate=10000,some_config=10"))); + verifyAlterCommandFails(invalidProp, concat(userEntityOpts, + Arrays.asList("--add-config", "consumer_byte_rate=20000,producer_byte_rate=10000,some_config=10"))); + verifyAlterCommandFails(invalidProp, concat(clientEntityOpts, Arrays.asList("--add-config", "some_config=10"))); + verifyAlterCommandFails(invalidProp, concat(userEntityOpts, Arrays.asList("--delete-config", "consumer_byte_rate,some_config"))); + verifyAlterCommandFails(invalidProp, concat(userEntityOpts, Arrays.asList("--delete-config", "SCRAM-SHA-256,some_config"))); + verifyAlterCommandFails(invalidProp, concat(clientEntityOpts, Arrays.asList("--delete-config", "some_config"))); + } + + @Test + public void shouldNotAlterScramClientConfigUsingBootstrapServer() { + // when using --bootstrap-server, it should be illegal to alter SCRAM credentials for client entities + verifyAlterCommandFails("SCRAM-SHA-256", concat(clientEntityOpts, addScramOpts)); + verifyAlterCommandFails("SCRAM-SHA-256", concat(clientEntityOpts, deleteScramOpts)); + } + + @Test + public void shouldNotCreateUserScramCredentialConfigWithUnderMinimumIterationsUsingBootstrapServer() { + // when using --bootstrap-server, it should be illegal to create a SCRAM credential for a user + // with an iterations value less than the minimum + verifyAlterCommandFails("SCRAM-SHA-256", concat(userEntityOpts, Arrays.asList("--add-config", "SCRAM-SHA-256=[iterations=100,password=foo-secret]"))); + } + + @Test + public void shouldNotAlterUserScramCredentialAndClientQuotaConfigsSimultaneouslyUsingBootstrapServer() { + // when using --bootstrap-server, it should be illegal to alter both SCRAM credentials and quotas for user entities + String expectedErrorMessage = "SCRAM-SHA-256"; + List secondUserEntityOpts = Arrays.asList("--entity-type", "users", "--entity-name", "admin1"); + List addQuotaOpts = Arrays.asList("--add-config", "consumer_byte_rate=20000"); + List deleteQuotaOpts = Arrays.asList("--delete-config", "consumer_byte_rate"); + + verifyAlterCommandFails(expectedErrorMessage, concat(userEntityOpts, addScramOpts, userEntityOpts, deleteQuotaOpts)); + verifyAlterCommandFails(expectedErrorMessage, concat(userEntityOpts, addScramOpts, secondUserEntityOpts, deleteQuotaOpts)); + verifyAlterCommandFails(expectedErrorMessage, concat(userEntityOpts, deleteScramOpts, userEntityOpts, addQuotaOpts)); + verifyAlterCommandFails(expectedErrorMessage, concat(userEntityOpts, deleteScramOpts, secondUserEntityOpts, addQuotaOpts)); + + // change order of quota/SCRAM commands, verify alter still fails + verifyAlterCommandFails(expectedErrorMessage, concat(userEntityOpts, deleteQuotaOpts, userEntityOpts, addScramOpts)); + verifyAlterCommandFails(expectedErrorMessage, concat(secondUserEntityOpts, deleteQuotaOpts, userEntityOpts, addScramOpts)); + verifyAlterCommandFails(expectedErrorMessage, concat(userEntityOpts, addQuotaOpts, userEntityOpts, deleteScramOpts)); + verifyAlterCommandFails(expectedErrorMessage, concat(secondUserEntityOpts, addQuotaOpts, userEntityOpts, deleteScramOpts)); + } + + public void verifyUserScramCredentialsNotDescribed(List requestOpts) { + // User SCRAM credentials should not be described when specifying + // --describe --entity-type users --entity-default (or --user-defaults) with --bootstrap-server + KafkaFutureImpl>> describeFuture = new KafkaFutureImpl<>(); + describeFuture.complete(Collections.singletonMap(new ClientQuotaEntity(Collections.singletonMap("", "")), Collections.singletonMap("request_percentage", 50.0))); + DescribeClientQuotasResult describeClientQuotasResult = mock(DescribeClientQuotasResult.class); + when(describeClientQuotasResult.entities()).thenReturn(describeFuture); + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) { + return describeClientQuotasResult; + } + + @Override + public DescribeUserScramCredentialsResult describeUserScramCredentials(List users, DescribeUserScramCredentialsOptions options) { + throw new IllegalStateException("Incorrectly described SCRAM credentials when specifying --entity-default with --bootstrap-server"); + } + }; + ConfigCommand.ConfigCommandOptions opts = new ConfigCommand.ConfigCommandOptions(toArray(Arrays.asList("--bootstrap-server", "localhost:9092", "--describe"), requestOpts)); + ConfigCommand.describeConfig(mockAdminClient, opts); // fails if describeUserScramCredentials() is invoked + } + + @Test + public void shouldNotDescribeUserScramCredentialsWithEntityDefaultUsingBootstrapServer() { + String expectedMsg = "The use of --entity-default or --user-defaults is not allowed with User SCRAM Credentials using --bootstrap-server."; + List defaultUserOpt = Collections.singletonList("--user-defaults"); + List verboseDefaultUserOpts = Arrays.asList("--entity-type", "users", "--entity-default"); + verifyAlterCommandFails(expectedMsg, concat(verboseDefaultUserOpts, addScramOpts)); + verifyAlterCommandFails(expectedMsg, concat(verboseDefaultUserOpts, deleteScramOpts)); + verifyUserScramCredentialsNotDescribed(verboseDefaultUserOpts); + verifyAlterCommandFails(expectedMsg, concat(defaultUserOpt, addScramOpts)); + verifyAlterCommandFails(expectedMsg, concat(defaultUserOpt, deleteScramOpts)); + verifyUserScramCredentialsNotDescribed(defaultUserOpt); + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void shouldAlterTopicConfig(boolean file) { + String filePath = ""; + Map addedConfigs = new HashMap<>(); + addedConfigs.put("delete.retention.ms", "1000000"); + addedConfigs.put("min.insync.replicas", "2"); + if (file) { + File f = kafka.utils.TestUtils.tempPropertiesFile(CollectionConverters.asScala(addedConfigs)); + filePath = f.getPath(); + } + + String resourceName = "my-topic"; + ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-name", resourceName, + "--entity-type", "topics", + "--alter", + file ? "--add-config-file" : "--add-config", + file ? filePath : addedConfigs.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining(",")), + "--delete-config", "unclean.leader.election.enable")); + AtomicBoolean alteredConfigs = new AtomicBoolean(); + + ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName); + List configEntries = Arrays.asList(newConfigEntry("min.insync.replicas", "1"), newConfigEntry("unclean.leader.election.enable", "1")); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + future.complete(Collections.singletonMap(resource, new Config(configEntries))); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + KafkaFutureImpl alterFuture = new KafkaFutureImpl<>(); + alterFuture.complete(null); + AlterConfigsResult alterResult = mock(AlterConfigsResult.class); + when(alterResult.all()).thenReturn(alterFuture); + + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily"); + assertEquals(1, resources.size()); + ConfigResource res = resources.iterator().next(); + assertEquals(res.type(), ConfigResource.Type.TOPIC); + assertEquals(res.name(), resourceName); + return describeResult; + } + + @Override + public synchronized AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options) { + assertEquals(1, configs.size()); + Map.Entry> entry = configs.entrySet().iterator().next(); + Collection alterConfigOps = entry.getValue(); + assertEquals(ConfigResource.Type.TOPIC, entry.getKey().type()); + assertEquals(3, alterConfigOps.size()); + + Set expectedConfigOps = new HashSet<>(Arrays.asList( + new AlterConfigOp(newConfigEntry("delete.retention.ms", "1000000"), AlterConfigOp.OpType.SET), + new AlterConfigOp(newConfigEntry("min.insync.replicas", "2"), AlterConfigOp.OpType.SET), + new AlterConfigOp(newConfigEntry("unclean.leader.election.enable", ""), AlterConfigOp.OpType.DELETE) + )); + assertEquals(expectedConfigOps.size(), alterConfigOps.size()); + expectedConfigOps.forEach(expectedOp -> { + Optional actual = alterConfigOps.stream() + .filter(op -> Objects.equals(op.configEntry().name(), expectedOp.configEntry().name())) + .findFirst(); + assertTrue(actual.isPresent()); + assertEquals(expectedOp.opType(), actual.get().opType()); + assertEquals(expectedOp.configEntry().name(), actual.get().configEntry().name()); + assertEquals(expectedOp.configEntry().value(), actual.get().configEntry().value()); + }); + alteredConfigs.set(true); + return alterResult; + } + }; + ConfigCommand.alterConfig(mockAdminClient, alterOpts); + assertTrue(alteredConfigs.get()); + verify(describeResult).all(); + } + + public ConfigEntry newConfigEntry(String name, String value) { + return ConfigTest.newConfigEntry(name, value, ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, Collections.emptyList()); + } + + @Test + public void shouldDescribeConfigSynonyms() { + String resourceName = "my-topic"; + ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-name", resourceName, + "--entity-type", "topics", + "--describe", + "--all")); + + ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + future.complete(Collections.singletonMap(resource, new Config(Collections.emptyList()))); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertTrue(options.includeSynonyms(), "Synonyms not requested"); + assertEquals(Collections.singleton(resource), new HashSet<>(resources)); + return describeResult; + } + }; + ConfigCommand.describeConfig(mockAdminClient, describeOpts); + verify(describeResult).all(); + } + + @Test + public void shouldAddBrokerLoggerConfig() { + Node node = new Node(1, "localhost", 9092); + verifyAlterBrokerLoggerConfig(node, "1", "1", Arrays.asList( + new ConfigEntry("kafka.log.LogCleaner", "INFO"), + new ConfigEntry("kafka.server.ReplicaManager", "INFO"), + new ConfigEntry("kafka.server.KafkaApi", "INFO") + )); + } + + @Test + public void testNoSpecifiedEntityOptionWithDescribeBrokersInBootstrapServerIsAllowed() { + String[] optsList = new String[]{"--bootstrap-server", "localhost:9092", + "--entity-type", ConfigType.BROKER, + "--describe" + }; + + new ConfigCommand.ConfigCommandOptions(optsList).checkArgs(); + } + + @Test + public void testDescribeAllBrokerConfig() { + String[] optsList = new String[]{"--bootstrap-server", "localhost:9092", + "--entity-type", ConfigType.BROKER, + "--entity-name", "1", + "--describe", + "--all"}; + + new ConfigCommand.ConfigCommandOptions(optsList).checkArgs(); + } + + @Test + public void testDescribeAllTopicConfig() { + String[] optsList = new String[]{"--bootstrap-server", "localhost:9092", + "--entity-type", ConfigType.TOPIC, + "--entity-name", "foo", + "--describe", + "--all"}; + + new ConfigCommand.ConfigCommandOptions(optsList).checkArgs(); + } + + @Test + public void testEntityDefaultOptionWithDescribeBrokerLoggerIsNotAllowed() { + String[] optsList = new String[]{"--bootstrap-server", "localhost:9092", + "--entity-type", ConfigCommand.BrokerLoggerConfigType(), + "--entity-default", + "--describe" + }; + + assertThrows(IllegalArgumentException.class, () -> new ConfigCommand.ConfigCommandOptions(optsList).checkArgs()); + } + + @Test + public void testEntityDefaultOptionWithAlterBrokerLoggerIsNotAllowed() { + String[] optsList = new String[]{"--bootstrap-server", "localhost:9092", + "--entity-type", ConfigCommand.BrokerLoggerConfigType(), + "--entity-default", + "--alter", + "--add-config", "kafka.log.LogCleaner=DEBUG" + }; + + assertThrows(IllegalArgumentException.class, () -> new ConfigCommand.ConfigCommandOptions(optsList).checkArgs()); + } + + @Test + public void shouldRaiseInvalidConfigurationExceptionWhenAddingInvalidBrokerLoggerConfig() { + Node node = new Node(1, "localhost", 9092); + // verifyAlterBrokerLoggerConfig tries to alter kafka.log.LogCleaner, kafka.server.ReplicaManager and kafka.server.KafkaApi + // yet, we make it so DescribeConfigs returns only one logger, implying that kafka.server.ReplicaManager and kafka.log.LogCleaner are invalid + assertThrows(InvalidConfigurationException.class, () -> verifyAlterBrokerLoggerConfig(node, "1", "1", Collections.singletonList( + new ConfigEntry("kafka.server.KafkaApi", "INFO") + ))); + } + + @Test + public void shouldAddDefaultBrokerDynamicConfig() { + Node node = new Node(1, "localhost", 9092); + verifyAlterBrokerConfig(node, "", Collections.singletonList("--entity-default")); + } + + @Test + public void shouldAddBrokerDynamicConfig() { + Node node = new Node(1, "localhost", 9092); + verifyAlterBrokerConfig(node, "1", Arrays.asList("--entity-name", "1")); + } + + public void verifyAlterBrokerConfig(Node node, String resourceName, List resourceOpts) { + String[] optsList = toArray(Arrays.asList("--bootstrap-server", "localhost:9092", + "--entity-type", "brokers", + "--alter", + "--add-config", "message.max.bytes=10,leader.replication.throttled.rate=10"), resourceOpts); + ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(optsList); + Map brokerConfigs = new HashMap<>(); + brokerConfigs.put("num.io.threads", "5"); + + ConfigResource resource = new ConfigResource(ConfigResource.Type.BROKER, resourceName); + List configEntries = Collections.singletonList(new ConfigEntry("num.io.threads", "5")); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + future.complete(Collections.singletonMap(resource, new Config(configEntries))); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + KafkaFutureImpl alterFuture = new KafkaFutureImpl<>(); + alterFuture.complete(null); + AlterConfigsResult alterResult = mock(AlterConfigsResult.class); + when(alterResult.all()).thenReturn(alterFuture); + + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily"); + assertEquals(1, resources.size()); + ConfigResource res = resources.iterator().next(); + assertEquals(ConfigResource.Type.BROKER, res.type()); + assertEquals(resourceName, res.name()); + return describeResult; + } + + @Override + public synchronized AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options) { + assertEquals(1, configs.size()); + Map.Entry> entry = configs.entrySet().iterator().next(); + ConfigResource res = entry.getKey(); + Collection config = entry.getValue(); + assertEquals(ConfigResource.Type.BROKER, res.type()); + config.forEach(e -> brokerConfigs.put(e.configEntry().name(), e.configEntry().value())); + return alterResult; + } + }; + ConfigCommand.alterConfig(mockAdminClient, alterOpts); + Map expected = new HashMap<>(); + expected.put("message.max.bytes", "10"); + expected.put("num.io.threads", "5"); + expected.put("leader.replication.throttled.rate", "10"); + assertEquals(expected, brokerConfigs); + verify(describeResult).all(); + } + + @Test + public void shouldDescribeConfigBrokerWithoutEntityName() { + ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-type", "brokers", + "--describe")); + + String brokerDefaultEntityName = ""; + ConfigResource resourceCustom = new ConfigResource(ConfigResource.Type.BROKER, "1"); + ConfigResource resourceDefault = new ConfigResource(ConfigResource.Type.BROKER, brokerDefaultEntityName); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + Config emptyConfig = new Config(Collections.emptyList()); + Map resultMap = new HashMap<>(); + resultMap.put(resourceCustom, emptyConfig); + resultMap.put(resourceDefault, emptyConfig); + future.complete(resultMap); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + // make sure it will be called 2 times: (1) for broker "1" (2) for default broker "" + when(describeResult.all()).thenReturn(future); + + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertTrue(options.includeSynonyms(), "Synonyms not requested"); + ConfigResource resource = resources.iterator().next(); + assertEquals(ConfigResource.Type.BROKER, resource.type()); + assertTrue(Objects.equals(resourceCustom.name(), resource.name()) || Objects.equals(resourceDefault.name(), resource.name())); + assertEquals(1, resources.size()); + return describeResult; + } + }; + ConfigCommand.describeConfig(mockAdminClient, describeOpts); + verify(describeResult, times(2)).all(); + } + + private void verifyAlterBrokerLoggerConfig(Node node, String resourceName, String entityName, + List describeConfigEntries) { + String[] optsList = toArray("--bootstrap-server", "localhost:9092", + "--entity-type", ConfigCommand.BrokerLoggerConfigType(), + "--alter", + "--entity-name", entityName, + "--add-config", "kafka.log.LogCleaner=DEBUG", + "--delete-config", "kafka.server.ReplicaManager,kafka.server.KafkaApi"); + ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(optsList); + AtomicBoolean alteredConfigs = new AtomicBoolean(); + + ConfigResource resource = new ConfigResource(ConfigResource.Type.BROKER_LOGGER, resourceName); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + future.complete(Collections.singletonMap(resource, new Config(describeConfigEntries))); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + KafkaFutureImpl alterFuture = new KafkaFutureImpl<>(); + alterFuture.complete(null); + AlterConfigsResult alterResult = mock(AlterConfigsResult.class); + when(alterResult.all()).thenReturn(alterFuture); + + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertEquals(1, resources.size()); + ConfigResource res = resources.iterator().next(); + assertEquals(ConfigResource.Type.BROKER_LOGGER, res.type()); + assertEquals(resourceName, res.name()); + return describeResult; + } + + @Override + public synchronized AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options) { + assertEquals(1, configs.size()); + Map.Entry> entry = configs.entrySet().iterator().next(); + ConfigResource res = entry.getKey(); + Collection alterConfigOps = entry.getValue(); + assertEquals(ConfigResource.Type.BROKER_LOGGER, res.type()); + assertEquals(3, alterConfigOps.size()); + + List expectedConfigOps = Arrays.asList( + new AlterConfigOp(new ConfigEntry("kafka.server.ReplicaManager", ""), AlterConfigOp.OpType.DELETE), + new AlterConfigOp(new ConfigEntry("kafka.server.KafkaApi", ""), AlterConfigOp.OpType.DELETE), + new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", "DEBUG"), AlterConfigOp.OpType.SET) + ); + assertEquals(expectedConfigOps.size(), alterConfigOps.size()); + Iterator alterConfigOpsIter = alterConfigOps.iterator(); + for (AlterConfigOp expectedConfigOp : expectedConfigOps) { + assertEquals(expectedConfigOp, alterConfigOpsIter.next()); + } + alteredConfigs.set(true); + return alterResult; + } + }; + ConfigCommand.alterConfig(mockAdminClient, alterOpts); + assertTrue(alteredConfigs.get()); + verify(describeResult).all(); + } + + @Test + public void shouldNotUpdateBrokerConfigIfMalformedEntityName() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-name", "1,2,3", //Don't support multiple brokers currently + "--entity-type", "brokers", + "--alter", + "--add-config", "leader.replication.throttled.rate=10")); + assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); + } + + @Test + public void shouldNotUpdateBrokerConfigIfMalformedConfig() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-name", "1", + "--entity-type", "brokers", + "--alter", + "--add-config", "a==")); + assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); + } + + @Test + public void shouldNotUpdateBrokerConfigIfMalformedBracketConfig() { + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-name", "1", + "--entity-type", "brokers", + "--alter", + "--add-config", "a=[b,c,d=e")); + assertThrows(IllegalArgumentException.class, () -> ConfigCommand.alterConfig(new DummyAdminClient(new Node(1, "localhost", 9092)), createOpts)); + } + + @Test + public void shouldNotUpdateConfigIfNonExistingConfigIsDeleted() { + String resourceName = "my-topic"; + ConfigCommand.ConfigCommandOptions createOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-name", resourceName, + "--entity-type", "topics", + "--alter", + "--delete-config", "missing_config1, missing_config2")); + + ConfigResource resource = new ConfigResource(ConfigResource.Type.TOPIC, resourceName); + List configEntries = Collections.emptyList(); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + future.complete(Collections.singletonMap(resource, new Config(configEntries))); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertEquals(1, resources.size()); + ConfigResource res = resources.iterator().next(); + assertEquals(res.type(), ConfigResource.Type.TOPIC); + assertEquals(res.name(), resourceName); + return describeResult; + } + }; + + assertThrows(InvalidConfigurationException.class, () -> ConfigCommand.alterConfig(mockAdminClient, createOpts)); + verify(describeResult).all(); + } + + @Test + public void shouldAlterClientMetricsConfig() { + Node node = new Node(1, "localhost", 9092); + verifyAlterClientMetricsConfig(node, "1", Arrays.asList("--entity-type", "client-metrics", "--entity-name", "1")); + + // Test for the --client-metrics alias + node = new Node(1, "localhost", 9092); + verifyAlterClientMetricsConfig(node, "1", Arrays.asList("--client-metrics", "1")); + } + + private void verifyAlterClientMetricsConfig(Node node, String resourceName, List resourceOpts) { + List optsList = concat(Arrays.asList("--bootstrap-server", "localhost:9092", + "--alter", + "--delete-config", "interval.ms", + "--add-config", "metrics=org.apache.kafka.consumer.," + + "match=[client_software_name=kafka.python,client_software_version=1\\.2\\..*]"), resourceOpts); + ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray(optsList)); + + ConfigResource resource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, resourceName); + List configEntries = Collections.singletonList(new ConfigEntry("interval.ms", "1000", + ConfigEntry.ConfigSource.DYNAMIC_CLIENT_METRICS_CONFIG, false, false, Collections.emptyList(), + ConfigEntry.ConfigType.UNKNOWN, null)); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + future.complete(Collections.singletonMap(resource, new Config(configEntries))); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + KafkaFutureImpl alterFuture = new KafkaFutureImpl<>(); + alterFuture.complete(null); + AlterConfigsResult alterResult = mock(AlterConfigsResult.class); + when(alterResult.all()).thenReturn(alterFuture); + + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily"); + assertEquals(1, resources.size()); + ConfigResource res = resources.iterator().next(); + assertEquals(ConfigResource.Type.CLIENT_METRICS, res.type()); + assertEquals(resourceName, res.name()); + return describeResult; + } + + @Override + public synchronized AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options) { + assertEquals(1, configs.size()); + Map.Entry> entry = configs.entrySet().iterator().next(); + ConfigResource res = entry.getKey(); + Collection alterConfigOps = entry.getValue(); + assertEquals(ConfigResource.Type.CLIENT_METRICS, res.type()); + assertEquals(3, alterConfigOps.size()); + + List expectedConfigOps = Arrays.asList( + new AlterConfigOp(new ConfigEntry("interval.ms", ""), AlterConfigOp.OpType.DELETE), + new AlterConfigOp(new ConfigEntry("match", "client_software_name=kafka.python,client_software_version=1\\.2\\..*"), AlterConfigOp.OpType.SET), + new AlterConfigOp(new ConfigEntry("metrics", "org.apache.kafka.consumer."), AlterConfigOp.OpType.SET) + ); + assertEquals(expectedConfigOps.size(), alterConfigOps.size()); + Iterator alterConfigOpsIter = alterConfigOps.iterator(); + for (AlterConfigOp expectedConfigOp : expectedConfigOps) { + assertEquals(expectedConfigOp, alterConfigOpsIter.next()); + } + return alterResult; + } + }; + ConfigCommand.alterConfig(mockAdminClient, alterOpts); + verify(describeResult).all(); + verify(alterResult).all(); + } + + @Test + public void shouldDescribeClientMetricsConfigWithoutEntityName() { + ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-type", "client-metrics", + "--describe")); + + ConfigResource resourceCustom = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, "1"); + ConfigEntry configEntry = new ConfigEntry("metrics", "*"); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertTrue(options.includeSynonyms()); + assertEquals(1, resources.size()); + ConfigResource resource = resources.iterator().next(); + assertEquals(ConfigResource.Type.CLIENT_METRICS, resource.type()); + assertEquals(resourceCustom.name(), resource.name()); + future.complete(Collections.singletonMap(resourceCustom, new Config(Collections.singletonList(configEntry)))); + return describeResult; + } + }; + mockAdminClient.incrementalAlterConfigs(Collections.singletonMap(resourceCustom, + Collections.singletonList(new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET))), new AlterConfigsOptions()); + ConfigCommand.describeConfig(mockAdminClient, describeOpts); + verify(describeResult).all(); + } + + @Test + public void shouldNotAlterClientMetricsConfigWithoutEntityName() { + ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-type", "client-metrics", + "--alter", + "--add-config", "interval.ms=1000")); + + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, alterOpts::checkArgs); + assertEquals("An entity name must be specified with --alter of client-metrics", exception.getMessage()); + } + + @Test + public void shouldAlterGroupConfig() { + Node node = new Node(1, "localhost", 9092); + verifyAlterGroupConfig(node, "group", Arrays.asList("--entity-type", "groups", "--entity-name", "group")); + + // Test for the --group alias + verifyAlterGroupConfig(node, "groupUsingAlias", Arrays.asList("--group", "groupUsingAlias")); + } + + private void verifyAlterGroupConfig(Node node, String resourceName, List resourceOpts) { + List optsList = concat(Arrays.asList("--bootstrap-server", "localhost:9092", + "--alter", + "--delete-config", "consumer.session.timeout.ms", + "--add-config", "consumer.heartbeat.interval.ms=6000"), resourceOpts); + ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray(optsList)); + + ConfigResource resource = new ConfigResource(ConfigResource.Type.GROUP, resourceName); + List configEntries = Collections.singletonList(new ConfigEntry("consumer.session.timeout.ms", "45000", + ConfigEntry.ConfigSource.DYNAMIC_GROUP_CONFIG, false, false, Collections.emptyList(), + ConfigEntry.ConfigType.UNKNOWN, null)); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + future.complete(Collections.singletonMap(resource, new Config(configEntries))); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + KafkaFutureImpl alterFuture = new KafkaFutureImpl<>(); + alterFuture.complete(null); + AlterConfigsResult alterResult = mock(AlterConfigsResult.class); + when(alterResult.all()).thenReturn(alterFuture); + + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertFalse(options.includeSynonyms(), "Config synonyms requested unnecessarily"); + assertEquals(1, resources.size()); + ConfigResource res = resources.iterator().next(); + assertEquals(ConfigResource.Type.GROUP, res.type()); + assertEquals(resourceName, res.name()); + return describeResult; + } + + @Override + public synchronized AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options) { + assertEquals(1, configs.size()); + Map.Entry> entry = configs.entrySet().iterator().next(); + ConfigResource res = entry.getKey(); + Collection alterConfigOps = entry.getValue(); + assertEquals(ConfigResource.Type.GROUP, res.type()); + assertEquals(2, alterConfigOps.size()); + + List expectedConfigOps = Arrays.asList( + new AlterConfigOp(new ConfigEntry("consumer.session.timeout.ms", ""), AlterConfigOp.OpType.DELETE), + new AlterConfigOp(new ConfigEntry("consumer.heartbeat.interval.ms", "6000"), AlterConfigOp.OpType.SET) + ); + assertEquals(expectedConfigOps.size(), alterConfigOps.size()); + Iterator alterConfigOpsIter = alterConfigOps.iterator(); + for (AlterConfigOp expectedConfigOp : expectedConfigOps) { + assertEquals(expectedConfigOp, alterConfigOpsIter.next()); + } + return alterResult; + } + }; + ConfigCommand.alterConfig(mockAdminClient, alterOpts); + verify(describeResult).all(); + verify(alterResult).all(); + } + + @Test + public void shouldDescribeGroupConfigWithoutEntityName() { + ConfigCommand.ConfigCommandOptions describeOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-type", "groups", + "--describe")); + + verifyDescribeGroupConfig(describeOpts, "group"); + } + + private void verifyDescribeGroupConfig(ConfigCommand.ConfigCommandOptions describeOpts, String resourceName) { + ConfigResource resourceCustom = new ConfigResource(ConfigResource.Type.GROUP, resourceName); + ConfigEntry configEntry = new ConfigEntry("consumer.heartbeat.interval.ms", "6000"); + KafkaFutureImpl> future = new KafkaFutureImpl<>(); + DescribeConfigsResult describeResult = mock(DescribeConfigsResult.class); + when(describeResult.all()).thenReturn(future); + + Node node = new Node(1, "localhost", 9092); + MockAdminClient mockAdminClient = new MockAdminClient(Collections.singletonList(node), node) { + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + assertTrue(options.includeSynonyms()); + assertEquals(1, resources.size()); + ConfigResource resource = resources.iterator().next(); + assertEquals(ConfigResource.Type.GROUP, resource.type()); + assertEquals(resourceCustom.name(), resource.name()); + future.complete(Collections.singletonMap(resourceCustom, new Config(Collections.singletonList(configEntry)))); + return describeResult; + } + }; + mockAdminClient.incrementalAlterConfigs(Collections.singletonMap(resourceCustom, + Collections.singletonList(new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET))), new AlterConfigsOptions()); + ConfigCommand.describeConfig(mockAdminClient, describeOpts); + verify(describeResult).all(); + } + + @Test + public void shouldNotAlterGroupConfigWithoutEntityName() { + ConfigCommand.ConfigCommandOptions alterOpts = new ConfigCommand.ConfigCommandOptions(toArray("--bootstrap-server", "localhost:9092", + "--entity-type", "groups", + "--alter", + "--add-config", "consumer.heartbeat.interval.ms=6000")); + + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, alterOpts::checkArgs); + assertEquals("An entity name must be specified with --alter of groups", exception.getMessage()); + } + + public static String[] toArray(String... first) { + return first; + } + + @SafeVarargs + public static String[] toArray(List... lists) { + return Stream.of(lists).flatMap(List::stream).toArray(String[]::new); + } + + @SafeVarargs + public static List concat(List... lists) { + return Stream.of(lists).flatMap(List::stream).collect(Collectors.toList()); + } + + @SafeVarargs + public static Map concat(Map... maps) { + Map res = new HashMap<>(); + Stream.of(maps) + .map(Map::entrySet) + .flatMap(Collection::stream) + .forEach(e -> res.put(e.getKey(), e.getValue())); + return res; + } + + + static class DummyAdminClient extends MockAdminClient { + public DummyAdminClient(Node node) { + super(Collections.singletonList(node), node); + } + + @Override + public synchronized DescribeConfigsResult describeConfigs(Collection resources, DescribeConfigsOptions options) { + return mock(DescribeConfigsResult.class); + } + + @Override + public synchronized AlterConfigsResult incrementalAlterConfigs(Map> configs, AlterConfigsOptions options) { + return mock(AlterConfigsResult.class); + } + + @Override + public DescribeClientQuotasResult describeClientQuotas(ClientQuotaFilter filter, DescribeClientQuotasOptions options) { + return mock(DescribeClientQuotasResult.class); + } + + @Override + public AlterClientQuotasResult alterClientQuotas(Collection entries, AlterClientQuotasOptions options) { + return mock(AlterClientQuotasResult.class); + } + } + + private Seq seq(Collection seq) { + return CollectionConverters.asScala(seq).toSeq(); + } +} diff --git a/core/src/test/java/kafka/admin/DeleteTopicTest.java b/core/src/test/java/kafka/admin/DeleteTopicTest.java new file mode 100644 index 0000000000000..522462f4b3be5 --- /dev/null +++ b/core/src/test/java/kafka/admin/DeleteTopicTest.java @@ -0,0 +1,383 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.admin; + +import kafka.log.UnifiedLog; +import kafka.server.KafkaBroker; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AdminClientConfig; +import org.apache.kafka.clients.admin.NewPartitionReassignment; +import org.apache.kafka.clients.admin.NewPartitions; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.errors.TopicDeletionDisabledException; +import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.SimpleRecord; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.TestUtils; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.metadata.BrokerState; +import org.apache.kafka.server.common.RequestLocal; +import org.apache.kafka.server.config.ServerConfigs; +import org.apache.kafka.storage.internals.log.AppendOrigin; +import org.apache.kafka.storage.internals.log.VerificationGuard; + +import java.util.ArrayList; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.ExecutionException; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import scala.Option; +import scala.jdk.javaapi.OptionConverters; + +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.fail; + + +@ClusterTestDefaults(types = {Type.KRAFT}, + brokers = 3, + serverProperties = { + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + @ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + @ClusterConfigProperty(key = "log.initial.task.delay.ms", value = "100"), + @ClusterConfigProperty(key = "log.segment.delete.delay.ms", value = "1000") + }) +public class DeleteTopicTest { + private static final String DEFAULT_TOPIC = "topic"; + private final Map> expectedReplicaAssignment = Map.of(0, List.of(0, 1, 2)); + + @ClusterTest + public void testDeleteTopicWithAllAliveReplicas(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + cluster.waitForTopic(DEFAULT_TOPIC, 0); + } + } + + @ClusterTest + public void testResumeDeleteTopicWithRecoveredFollower(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); + int leaderId = waitUtilLeaderIsKnown(cluster.brokers(), topicPartition); + KafkaBroker follower = findFollower(cluster.brokers().values(), leaderId); + + // shutdown one follower replica + follower.shutdown(); + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + + TestUtils.waitForCondition(() -> cluster.brokers().values() + .stream() + .filter(broker -> broker.config().brokerId() != follower.config().brokerId()) + .allMatch(b -> b.logManager().getLog(topicPartition, false).isEmpty()), + "Online replicas have not deleted log."); + + follower.startup(); + cluster.waitForTopic(DEFAULT_TOPIC, 0); + } + } + + @ClusterTest(brokers = 4) + public void testPartitionReassignmentDuringDeleteTopic(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); + Map servers = findPartitionHostingBrokers(cluster.brokers()); + int leaderId = waitUtilLeaderIsKnown(cluster.brokers(), topicPartition); + KafkaBroker follower = findFollower(servers.values(), leaderId); + follower.shutdown(); + + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + Properties properties = new Properties(); + properties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); + + try (Admin otherAdmin = Admin.create(properties)) { + waitUtilTopicGone(otherAdmin); + assertThrows(ExecutionException.class, () -> otherAdmin.alterPartitionReassignments( + Map.of(topicPartition, Optional.of(new NewPartitionReassignment(List.of(1, 2, 3)))) + ).all().get()); + } + + follower.startup(); + cluster.waitForTopic(DEFAULT_TOPIC, 0); + } + } + + @ClusterTest(brokers = 4) + public void testIncreasePartitionCountDuringDeleteTopic(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); + Map partitionHostingBrokers = findPartitionHostingBrokers(cluster.brokers()); + waitForReplicaCreated(partitionHostingBrokers, topicPartition, "Replicas for topic test not created."); + int leaderId = waitUtilLeaderIsKnown(partitionHostingBrokers, topicPartition); + KafkaBroker follower = findFollower(partitionHostingBrokers.values(), leaderId); + // shutdown a broker to make sure the following topic deletion will be suspended + follower.shutdown(); + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + + // increase the partition count for topic + Properties properties = new Properties(); + properties.setProperty(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); + + try (Admin otherAdmin = Admin.create(properties)) { + otherAdmin.createPartitions(Map.of(DEFAULT_TOPIC, NewPartitions.increaseTo(2))).all().get(); + } catch (ExecutionException ignored) { + // do nothing + } + + follower.startup(); + cluster.waitForTopic(DEFAULT_TOPIC, 0); + } + } + + @ClusterTest + public void testDeleteTopicDuringAddPartition(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + int leaderId = waitUtilLeaderIsKnown(cluster.brokers(), new TopicPartition(DEFAULT_TOPIC, 0)); + TopicPartition newTopicPartition = new TopicPartition(DEFAULT_TOPIC, 1); + KafkaBroker follower = findFollower(cluster.brokers().values(), leaderId); + follower.shutdown(); + + // wait until the broker is in shutting down state + int followerBrokerId = follower.config().brokerId(); + TestUtils.waitForCondition(() -> follower.brokerState().equals(BrokerState.SHUTTING_DOWN), + "Follower " + followerBrokerId + " was not shutdown"); + Map newPartitionSet = Map.of(DEFAULT_TOPIC, NewPartitions.increaseTo(3)); + admin.createPartitions(newPartitionSet); + cluster.waitForTopic(DEFAULT_TOPIC, 3); + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + + follower.startup(); + // test if topic deletion is resumed + cluster.waitForTopic(DEFAULT_TOPIC, 0); + waitForReplicaDeleted(cluster.brokers(), newTopicPartition, "Replica logs not for new partition [" + DEFAULT_TOPIC + ",1] not deleted after delete topic is complete."); + } + } + + @ClusterTest + public void testAddPartitionDuringDeleteTopic(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + // partitions to be added to the topic later + TopicPartition newTopicPartition = new TopicPartition(DEFAULT_TOPIC, 1); + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + Map newPartitionSet = Map.of(DEFAULT_TOPIC, NewPartitions.increaseTo(3)); + admin.createPartitions(newPartitionSet); + cluster.waitForTopic(DEFAULT_TOPIC, 0); + waitForReplicaDeleted(cluster.brokers(), newTopicPartition, "Replica logs not deleted after delete topic is complete"); + } + } + + @ClusterTest + public void testRecreateTopicAfterDeletion(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + cluster.waitForTopic(DEFAULT_TOPIC, 0); + // re-create topic on same replicas + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + waitForReplicaCreated(cluster.brokers(), topicPartition, "Replicas for topic " + DEFAULT_TOPIC + " not created."); + } + } + @ClusterTest + public void testDeleteNonExistingTopic(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); + String topic = "test2"; + TestUtils.waitForCondition(() -> { + try { + admin.deleteTopics(List.of(topic)).all().get(); + return false; + } catch (Exception exception) { + return exception.getCause() instanceof UnknownTopicOrPartitionException; + } + }, "Topic test2 should not exist."); + + cluster.waitForTopic(topic, 0); + + waitForReplicaCreated(cluster.brokers(), topicPartition, "Replicas for topic test not created."); + TestUtils.waitUntilLeaderIsElectedOrChangedWithAdmin(admin, DEFAULT_TOPIC, 0, 1000); + } + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "log.cleaner.enable", value = "true"), + @ClusterConfigProperty(key = "log.cleanup.policy", value = "compact"), + @ClusterConfigProperty(key = "log.cleaner.dedupe.buffer.size", value = "1048577") + }) + public void testDeleteTopicWithCleaner(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); + // for simplicity, we are validating cleaner offsets on a single broker + KafkaBroker server = cluster.brokers().values().stream().findFirst().orElseThrow(); + TestUtils.waitForCondition(() -> server.logManager().getLog(topicPartition, false).isDefined(), + "Replicas for topic test not created."); + UnifiedLog log = server.logManager().getLog(topicPartition, false).get(); + writeDups(100, 3, log); + // force roll the segment so that cleaner can work on it + server.logManager().getLog(topicPartition, false).get().roll(Option.empty()); + // wait for cleaner to clean + server.logManager().cleaner().awaitCleaned(topicPartition, 0, 60000); + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + + cluster.waitForTopic(DEFAULT_TOPIC, 0); + } + } + + @ClusterTest + public void testDeleteTopicAlreadyMarkedAsDeleted(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + + TestUtils.waitForCondition(() -> { + try { + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + return false; + } catch (Exception exception) { + return exception.getCause() instanceof UnknownTopicOrPartitionException; + } + }, "Topic " + DEFAULT_TOPIC + " should be marked for deletion or already deleted."); + + cluster.waitForTopic(DEFAULT_TOPIC, 0); + } + } + + @ClusterTest(controllers = 1, + serverProperties = {@ClusterConfigProperty(key = ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, value = "false")}) + public void testDisableDeleteTopic(ClusterInstance cluster) throws Exception { + try (Admin admin = cluster.admin()) { + admin.createTopics(List.of(new NewTopic(DEFAULT_TOPIC, expectedReplicaAssignment))).all().get(); + TopicPartition topicPartition = new TopicPartition(DEFAULT_TOPIC, 0); + TestUtils.waitForCondition(() -> { + try { + admin.deleteTopics(List.of(DEFAULT_TOPIC)).all().get(); + return false; + } catch (Exception exception) { + return exception.getCause() instanceof TopicDeletionDisabledException; + } + }, "TopicDeletionDisabledException should be returned when deleting " + DEFAULT_TOPIC); + + waitForReplicaCreated(cluster.brokers(), topicPartition, "TopicDeletionDisabledException should be returned when deleting " + DEFAULT_TOPIC); + assertDoesNotThrow(() -> admin.describeTopics(List.of(DEFAULT_TOPIC)).allTopicNames().get()); + assertDoesNotThrow(() -> waitUtilLeaderIsKnown(cluster.brokers(), topicPartition)); + } + } + + private int waitUtilLeaderIsKnown(Map idToBroker, + TopicPartition topicPartition) throws InterruptedException { + TestUtils.waitForCondition(() -> isLeaderKnown(idToBroker, topicPartition).get().isPresent(), 15000, + "Partition " + topicPartition + " not made yet" + " after 15 seconds"); + return isLeaderKnown(idToBroker, topicPartition).get().get(); + } + + private void waitForReplicaCreated(Map clusters, + TopicPartition topicPartition, + String failMessage) throws InterruptedException { + TestUtils.waitForCondition(() -> clusters.values().stream().allMatch(broker -> + broker.logManager().getLog(topicPartition, false).isDefined()), + failMessage); + } + + private void waitForReplicaDeleted(Map clusters, + TopicPartition newTopicPartition, + String failMessage) throws InterruptedException { + TestUtils.waitForCondition(() -> clusters.values().stream().allMatch(broker -> + broker.logManager().getLog(newTopicPartition, false).isEmpty()), + failMessage); + } + + private Supplier> isLeaderKnown(Map idToBroker, TopicPartition topicPartition) { + return () -> idToBroker.values() + .stream() + .filter(broker -> OptionConverters.toJava(broker.replicaManager().onlinePartition(topicPartition)) + .stream().anyMatch(tp -> tp.leaderIdIfLocal().isDefined())) + .map(broker -> broker.config().brokerId()) + .findFirst(); + } + + private KafkaBroker findFollower(Collection idToBroker, int leaderId) { + return idToBroker.stream() + .filter(broker -> broker.config().brokerId() != leaderId) + .findFirst() + .orElseGet(() -> fail("Can't find any follower")); + } + + private void waitUtilTopicGone(Admin admin) throws Exception { + TestUtils.waitForCondition(() -> { + try { + admin.describeTopics(List.of(DEFAULT_TOPIC)).allTopicNames().get(); + return false; + } catch (Exception exception) { + return exception.getCause() instanceof UnknownTopicOrPartitionException; + } + }, "Topic" + DEFAULT_TOPIC + " should be deleted"); + } + + private Map findPartitionHostingBrokers(Map brokers) { + return brokers.entrySet() + .stream() + .filter(broker -> expectedReplicaAssignment.get(0).contains(broker.getValue().config().brokerId())) + .collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue)); + } + + private List writeDups(int numKeys, int numDups, UnifiedLog log) { + int counter = 0; + List result = new ArrayList<>(); + + for (int i = 0; i < numDups; i++) { + for (int key = 0; key < numKeys; key++) { + int count = counter; + log.appendAsLeader( + MemoryRecords.withRecords( + Compression.NONE, + new SimpleRecord( + String.valueOf(key).getBytes(), + String.valueOf(counter).getBytes() + ) + ), + 0, + AppendOrigin.CLIENT, + RequestLocal.noCaching(), + VerificationGuard.SENTINEL + ); + counter++; + result.add(new int[] {key, count}); + } + } + return result; + } +} diff --git a/core/src/test/java/kafka/admin/DescribeAuthorizedOperationsTest.java b/core/src/test/java/kafka/admin/DescribeAuthorizedOperationsTest.java new file mode 100644 index 0000000000000..d58f1d9ec6e3a --- /dev/null +++ b/core/src/test/java/kafka/admin/DescribeAuthorizedOperationsTest.java @@ -0,0 +1,252 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.admin; + +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.ConsumerGroupDescription; +import org.apache.kafka.clients.admin.DescribeClusterOptions; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsOptions; +import org.apache.kafka.clients.admin.DescribeConsumerGroupsResult; +import org.apache.kafka.clients.admin.DescribeTopicsOptions; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.consumer.OffsetAndMetadata; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.acl.AccessControlEntry; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.acl.AclOperation; +import org.apache.kafka.common.config.SaslConfigs; +import org.apache.kafka.common.resource.PatternType; +import org.apache.kafka.common.resource.Resource; +import org.apache.kafka.common.resource.ResourcePattern; +import org.apache.kafka.common.resource.ResourceType; +import org.apache.kafka.common.security.auth.KafkaPrincipal; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.JaasUtils; +import org.apache.kafka.common.test.api.ClusterConfig; +import org.apache.kafka.common.test.api.ClusterTemplate; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.common.test.junit.ClusterTestExtensions; +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig; +import org.apache.kafka.security.authorizer.AclEntry; + +import org.junit.jupiter.api.extension.ExtendWith; + +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ExecutionException; + +import static org.apache.kafka.common.acl.AccessControlEntryFilter.ANY; +import static org.apache.kafka.common.acl.AclOperation.ALL; +import static org.apache.kafka.common.acl.AclOperation.ALTER; +import static org.apache.kafka.common.acl.AclOperation.DELETE; +import static org.apache.kafka.common.acl.AclOperation.DESCRIBE; +import static org.apache.kafka.common.acl.AclPermissionType.ALLOW; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; + +@ExtendWith(ClusterTestExtensions.class) +public class DescribeAuthorizedOperationsTest { + private static final String GROUP1 = "group1"; + private static final String GROUP2 = "group2"; + private static final String GROUP3 = "group3"; + private static final ResourcePattern GROUP1_PATTERN = new ResourcePattern(ResourceType.GROUP, GROUP1, PatternType.LITERAL); + private static final ResourcePattern GROUP2_PATTERN = new ResourcePattern(ResourceType.GROUP, GROUP2, PatternType.LITERAL); + private static final ResourcePattern GROUP3_PATTERN = new ResourcePattern(ResourceType.GROUP, GROUP3, PatternType.LITERAL); + private static final ResourcePattern CLUSTER_PATTERN = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL); + private static final AccessControlEntry ALTER_ENTRY = createAccessControlEntry(JaasUtils.KAFKA_PLAIN_USER1, ALTER); + private static final AccessControlEntry DESCRIBE_ENTRY = createAccessControlEntry(JaasUtils.KAFKA_PLAIN_USER1, DESCRIBE); + + static List generator() { + return List.of( + ClusterConfig.defaultBuilder() + .setTypes(Set.of(Type.KRAFT)) + .setServerProperties(Map.of(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "1")) + .setServerProperties(Map.of(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1")) + .setBrokerSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT) + .setControllerSecurityProtocol(SecurityProtocol.SASL_PLAINTEXT) + .build() + ); + } + + private static AccessControlEntry createAccessControlEntry(String username, AclOperation operation) { + return new AccessControlEntry( + new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username).toString(), + AclEntry.WILDCARD_HOST, + operation, + ALLOW + ); + } + + private Map createAdminConfig(String username, String password) { + Map configs = new HashMap<>(); + configs.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name); + configs.put(SaslConfigs.SASL_MECHANISM, "PLAIN"); + configs.put(SaslConfigs.SASL_JAAS_CONFIG, + String.format("org.apache.kafka.common.security.plain.PlainLoginModule required username=\"%s\" password=\"%s\";", username, password)); + return configs; + } + + private void setupSecurity(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD))) { + ResourcePattern topicResource = new ResourcePattern(ResourceType.TOPIC, AclEntry.WILDCARD_RESOURCE, PatternType.LITERAL); + + admin.createAcls(List.of( + new AclBinding(CLUSTER_PATTERN, ALTER_ENTRY), + new AclBinding(topicResource, DESCRIBE_ENTRY) + )).all().get(); + + clusterInstance.waitAcls(new AclBindingFilter(CLUSTER_PATTERN.toFilter(), ANY), Set.of(ALTER_ENTRY)); + clusterInstance.waitAcls(new AclBindingFilter(topicResource.toFilter(), ANY), Set.of(DESCRIBE_ENTRY)); + } + } + + @ClusterTemplate("generator") + public void testConsumerGroupAuthorizedOperations(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + setupSecurity(clusterInstance); + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD)); + Admin user1 = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD)); + ) { + admin.createTopics(List.of(new NewTopic("topic1", 1, (short) 1))); + clusterInstance.waitForTopic("topic1", 1); + + // create consumers to avoid group not found error + TopicPartition tp = new TopicPartition("topic1", 0); + OffsetAndMetadata offsetAndMetadata = new OffsetAndMetadata(0); + admin.alterConsumerGroupOffsets(GROUP1, Map.of(tp, offsetAndMetadata)).all().get(); + admin.alterConsumerGroupOffsets(GROUP2, Map.of(tp, offsetAndMetadata)).all().get(); + admin.alterConsumerGroupOffsets(GROUP3, Map.of(tp, offsetAndMetadata)).all().get(); + + AccessControlEntry allOperationsEntry = createAccessControlEntry(JaasUtils.KAFKA_PLAIN_USER1, ALL); + AccessControlEntry describeEntry = createAccessControlEntry(JaasUtils.KAFKA_PLAIN_USER1, DESCRIBE); + AccessControlEntry deleteEntry = createAccessControlEntry(JaasUtils.KAFKA_PLAIN_USER1, DELETE); + user1.createAcls(List.of( + new AclBinding(GROUP1_PATTERN, allOperationsEntry), + new AclBinding(GROUP2_PATTERN, describeEntry), + new AclBinding(GROUP3_PATTERN, deleteEntry) + )).all(); + clusterInstance.waitAcls(new AclBindingFilter(GROUP1_PATTERN.toFilter(), ANY), Set.of(allOperationsEntry)); + clusterInstance.waitAcls(new AclBindingFilter(GROUP2_PATTERN.toFilter(), ANY), Set.of(describeEntry)); + clusterInstance.waitAcls(new AclBindingFilter(GROUP3_PATTERN.toFilter(), ANY), Set.of(deleteEntry)); + + DescribeConsumerGroupsResult describeConsumerGroupsResult = user1.describeConsumerGroups( + List.of(GROUP1, GROUP2, GROUP3), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)); + assertEquals(3, describeConsumerGroupsResult.describedGroups().size()); + + ConsumerGroupDescription group1Description = describeConsumerGroupsResult.describedGroups().get(GROUP1).get(); + assertEquals(AclEntry.supportedOperations(ResourceType.GROUP), group1Description.authorizedOperations()); + + ConsumerGroupDescription group2Description = describeConsumerGroupsResult.describedGroups().get(GROUP2).get(); + assertEquals(Set.of(DESCRIBE), group2Description.authorizedOperations()); + + ConsumerGroupDescription group3Description = describeConsumerGroupsResult.describedGroups().get(GROUP3).get(); + assertEquals(Set.of(DESCRIBE, DELETE), group3Description.authorizedOperations()); + } + } + + @ClusterTemplate("generator") + public void testClusterAuthorizedOperations(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + setupSecurity(clusterInstance); + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD))) { + // test without includeAuthorizedOperations flag + Set authorizedOperations = admin.describeCluster().authorizedOperations().get(); + assertNull(authorizedOperations); + + // test with includeAuthorizedOperations flag + authorizedOperations = admin.describeCluster(new DescribeClusterOptions().includeAuthorizedOperations(true)).authorizedOperations().get(); + assertEquals(Set.of(DESCRIBE, ALTER), authorizedOperations); + } + + // enable all operations for cluster resource + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD))) { + AccessControlEntry allOperationEntry = createAccessControlEntry(JaasUtils.KAFKA_PLAIN_USER1, ALL); + admin.createAcls(List.of(new AclBinding(CLUSTER_PATTERN, allOperationEntry))).all().get(); + clusterInstance.waitAcls( + new AclBindingFilter(CLUSTER_PATTERN.toFilter(), ANY), + Set.of(allOperationEntry, ALTER_ENTRY) + ); + } + + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD))) { + Set authorizedOperations = admin.describeCluster(new DescribeClusterOptions().includeAuthorizedOperations(true)).authorizedOperations().get(); + assertEquals(AclEntry.supportedOperations(ResourceType.CLUSTER), authorizedOperations); + } + } + + @ClusterTemplate("generator") + public void testTopicAuthorizedOperations(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + String topic1 = "topic1"; + String topic2 = "topic2"; + setupSecurity(clusterInstance); + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD))) { + admin.createTopics(List.of( + new NewTopic(topic1, 1, (short) 1), + new NewTopic(topic2, 1, (short) 1) + )); + clusterInstance.waitForTopic(topic1, 1); + clusterInstance.waitForTopic(topic2, 1); + } + + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD))) { + // test without includeAuthorizedOperations flag + Map topicDescriptions = admin.describeTopics(List.of(topic1, topic2)).allTopicNames().get(); + assertNull(topicDescriptions.get(topic1).authorizedOperations()); + assertNull(topicDescriptions.get(topic2).authorizedOperations()); + + // test with includeAuthorizedOperations flag + topicDescriptions = admin.describeTopics( + List.of(topic1, topic2), + new DescribeTopicsOptions().includeAuthorizedOperations(true)).allTopicNames().get(); + assertEquals(Set.of(DESCRIBE), topicDescriptions.get(topic1).authorizedOperations()); + assertEquals(Set.of(DESCRIBE), topicDescriptions.get(topic2).authorizedOperations()); + } + + // add few permissions + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_ADMIN, JaasUtils.KAFKA_PLAIN_ADMIN_PASSWORD))) { + ResourcePattern topic1Resource = new ResourcePattern(ResourceType.TOPIC, topic1, PatternType.LITERAL); + ResourcePattern topic2Resource = new ResourcePattern(ResourceType.TOPIC, topic2, PatternType.LITERAL); + AccessControlEntry allOperationEntry = createAccessControlEntry(JaasUtils.KAFKA_PLAIN_USER1, ALL); + AccessControlEntry deleteEntry = createAccessControlEntry(JaasUtils.KAFKA_PLAIN_USER1, DELETE); + admin.createAcls(List.of( + new AclBinding(topic1Resource, allOperationEntry), + new AclBinding(topic2Resource, deleteEntry) + )).all().get(); + clusterInstance.waitAcls( + new AclBindingFilter(topic1Resource.toFilter(), ANY), + Set.of(allOperationEntry) + ); + clusterInstance.waitAcls( + new AclBindingFilter(topic2Resource.toFilter(), ANY), + Set.of(deleteEntry) + ); + } + + try (Admin admin = clusterInstance.admin(createAdminConfig(JaasUtils.KAFKA_PLAIN_USER1, JaasUtils.KAFKA_PLAIN_USER1_PASSWORD))) { + Map topicDescriptions = admin.describeTopics( + List.of(topic1, topic2), + new DescribeTopicsOptions().includeAuthorizedOperations(true)).allTopicNames().get(); + assertEquals(AclEntry.supportedOperations(ResourceType.TOPIC), topicDescriptions.get(topic1).authorizedOperations()); + assertEquals(Set.of(DESCRIBE, DELETE), topicDescriptions.get(topic2).authorizedOperations()); + } + } +} \ No newline at end of file diff --git a/core/src/test/java/kafka/admin/UserScramCredentialsCommandTest.java b/core/src/test/java/kafka/admin/UserScramCredentialsCommandTest.java new file mode 100644 index 0000000000000..8ca269f4b6866 --- /dev/null +++ b/core/src/test/java/kafka/admin/UserScramCredentialsCommandTest.java @@ -0,0 +1,183 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.admin; + +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.utils.Exit; +import org.apache.kafka.test.NoRetryException; +import org.apache.kafka.test.TestUtils; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; +import java.util.OptionalInt; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@SuppressWarnings("dontUseSystemExit") +public class UserScramCredentialsCommandTest { + private static final String USER1 = "user1"; + private static final String USER2 = "user2"; + + private final ClusterInstance cluster; + + public UserScramCredentialsCommandTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + static class ConfigCommandResult { + public final String stdout; + public final OptionalInt exitStatus; + + public ConfigCommandResult(String stdout) { + this(stdout, OptionalInt.empty()); + } + + public ConfigCommandResult(String stdout, OptionalInt exitStatus) { + this.stdout = stdout; + this.exitStatus = exitStatus; + } + } + + private ConfigCommandResult runConfigCommandViaBroker(String... args) { + AtomicReference exitStatus = new AtomicReference<>(OptionalInt.empty()); + Exit.setExitProcedure((status, __) -> { + exitStatus.set(OptionalInt.of(status)); + throw new RuntimeException(); + }); + + List commandArgs = new ArrayList<>(Arrays.asList("--bootstrap-server", cluster.bootstrapServers())); + commandArgs.addAll(Arrays.asList(args)); + try { + String output = ConfigCommandIntegrationTest.captureStandardStream(false, () -> { + ConfigCommand.main(commandArgs.toArray(new String[0])); + }); + return new ConfigCommandResult(output); + } catch (Exception e) { + return new ConfigCommandResult("", exitStatus.get()); + } finally { + Exit.resetExitProcedure(); + } + } + + @ClusterTest + public void testUserScramCredentialsRequests() throws Exception { + createAndAlterUser(USER1); + // now do the same thing for user2 + createAndAlterUser(USER2); + + // describe both + // we don't know the order that quota or scram users come out, so we have 2 possibilities for each, 4 total + String quotaPossibilityAOut = quotaMessage(USER1) + "\n" + quotaMessage(USER2); + String quotaPossibilityBOut = quotaMessage(USER2) + "\n" + quotaMessage(USER1); + String scramPossibilityAOut = describeUserMessage(USER1) + "\n" + describeUserMessage(USER2); + String scramPossibilityBOut = describeUserMessage(USER2) + "\n" + describeUserMessage(USER1); + describeUsers( + quotaPossibilityAOut + "\n" + scramPossibilityAOut, + quotaPossibilityAOut + "\n" + scramPossibilityBOut, + quotaPossibilityBOut + "\n" + scramPossibilityAOut, + quotaPossibilityBOut + "\n" + scramPossibilityBOut); + + // now delete configs, in opposite order, for user1 and user2, and describe + deleteConfig(USER1, "consumer_byte_rate"); + deleteConfig(USER2, "SCRAM-SHA-256"); + describeUsers(quotaMessage(USER2) + "\n" + describeUserMessage(USER1)); + + // now delete the rest of the configs, for user1 and user2, and describe + deleteConfig(USER1, "SCRAM-SHA-256"); + deleteConfig(USER2, "consumer_byte_rate"); + describeUsers(""); + } + + @ClusterTest + public void testAlterWithEmptyPassword() { + String user1 = "user1"; + ConfigCommandResult result = runConfigCommandViaBroker("--user", user1, "--alter", "--add-config", "SCRAM-SHA-256=[iterations=4096,password=]"); + assertTrue(result.exitStatus.isPresent(), "Expected System.exit() to be called with an empty password"); + assertEquals(1, result.exitStatus.getAsInt(), "Expected empty password to cause failure with exit status=1"); + } + + @ClusterTest + public void testDescribeUnknownUser() { + String unknownUser = "unknownUser"; + ConfigCommandResult result = runConfigCommandViaBroker("--user", unknownUser, "--describe"); + assertFalse(result.exitStatus.isPresent(), "Expected System.exit() to not be called with an unknown user"); + assertEquals("", result.stdout); + } + + private void createAndAlterUser(String user) throws InterruptedException { + // create and describe a credential + ConfigCommandResult result = runConfigCommandViaBroker("--user", user, "--alter", "--add-config", "SCRAM-SHA-256=[iterations=4096,password=foo-secret]"); + assertEquals(updateUserMessage(user), result.stdout); + TestUtils.waitForCondition( + () -> { + try { + return Objects.equals(runConfigCommandViaBroker("--user", user, "--describe").stdout, describeUserMessage(user)); + } catch (Exception e) { + throw new NoRetryException(e); + } + }, + () -> "Failed to describe SCRAM credential change '" + user + "'"); + // create a user quota and describe the user again + result = runConfigCommandViaBroker("--user", user, "--alter", "--add-config", "consumer_byte_rate=20000"); + assertEquals(updateUserMessage(user), result.stdout); + TestUtils.waitForCondition( + () -> { + try { + return Objects.equals(runConfigCommandViaBroker("--user", user, "--describe").stdout, quotaMessage(user) + "\n" + describeUserMessage(user)); + } catch (Exception e) { + throw new NoRetryException(e); + } + }, + () -> "Failed to describe Quota change for '" + user + "'"); + } + + private void deleteConfig(String user, String config) { + ConfigCommandResult result = runConfigCommandViaBroker("--user", user, "--alter", "--delete-config", config); + assertEquals(updateUserMessage(user), result.stdout); + } + + private void describeUsers(String... msgs) throws InterruptedException { + TestUtils.waitForCondition( + () -> { + try { + String output = runConfigCommandViaBroker("--entity-type", "users", "--describe").stdout; + return Arrays.asList(msgs).contains(output); + } catch (Exception e) { + throw new NoRetryException(e); + } + }, + () -> "Failed to describe config"); + } + + private static String describeUserMessage(String user) { + return "SCRAM credential configs for user-principal '" + user + "' are SCRAM-SHA-256=iterations=4096"; + } + + private static String updateUserMessage(String user) { + return "Completed updating config for user " + user + "."; + } + + private static String quotaMessage(String user) { + return "Quota configs for user-principal '" + user + "' are consumer_byte_rate=20000.0"; + } +} diff --git a/core/src/test/java/kafka/clients/consumer/ConsumerIntegrationTest.java b/core/src/test/java/kafka/clients/consumer/ConsumerIntegrationTest.java new file mode 100644 index 0000000000000..4007f55ee5a44 --- /dev/null +++ b/core/src/test/java/kafka/clients/consumer/ConsumerIntegrationTest.java @@ -0,0 +1,184 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.clients.consumer; + +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRebalanceListener; +import org.apache.kafka.clients.consumer.GroupProtocol; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.consumer.internals.AbstractHeartbeatRequestManager; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.TestUtils; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTests; + +import java.time.Duration; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class ConsumerIntegrationTest { + + @ClusterTests({ + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + @ClusterConfigProperty(key = "group.coordinator.new.enable", value = "false") + }), + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + @ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic") + }) + }) + public void testAsyncConsumerWithOldGroupCoordinator(ClusterInstance clusterInstance) throws Exception { + String topic = "test-topic"; + clusterInstance.createTopic(topic, 1, (short) 1); + try (KafkaConsumer consumer = new KafkaConsumer<>(Map.of( + ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers(), + ConsumerConfig.GROUP_ID_CONFIG, "test-group", + ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName(), + ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName(), + ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name()))) { + consumer.subscribe(Collections.singletonList(topic)); + TestUtils.waitForCondition(() -> { + try { + consumer.poll(Duration.ofMillis(1000)); + return false; + } catch (UnsupportedVersionException e) { + return e.getMessage().equals(AbstractHeartbeatRequestManager.CONSUMER_PROTOCOL_NOT_SUPPORTED_MSG); + } + }, "Should get UnsupportedVersionException and how to revert to classic protocol"); + } + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + }) + public void testFetchPartitionsAfterFailedListenerWithGroupProtocolClassic(ClusterInstance clusterInstance) + throws InterruptedException { + testFetchPartitionsAfterFailedListener(clusterInstance, GroupProtocol.CLASSIC); + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + }) + public void testFetchPartitionsAfterFailedListenerWithGroupProtocolConsumer(ClusterInstance clusterInstance) + throws InterruptedException { + testFetchPartitionsAfterFailedListener(clusterInstance, GroupProtocol.CONSUMER); + } + + private static void testFetchPartitionsAfterFailedListener(ClusterInstance clusterInstance, GroupProtocol groupProtocol) + throws InterruptedException { + var topic = "topic"; + try (var producer = clusterInstance.producer(Map.of( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class, + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class))) { + producer.send(new ProducerRecord<>(topic, "key".getBytes(), "value".getBytes())); + } + + try (var consumer = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()))) { + consumer.subscribe(List.of(topic), new ConsumerRebalanceListener() { + private int count = 0; + @Override + public void onPartitionsRevoked(Collection partitions) { + } + + @Override + public void onPartitionsAssigned(Collection partitions) { + count++; + if (count == 1) throw new IllegalArgumentException("temporary error"); + } + }); + + TestUtils.waitForCondition(() -> consumer.poll(Duration.ofSeconds(1)).count() == 1, + 5000, + "failed to poll data"); + } + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + }) + public void testFetchPartitionsWithAlwaysFailedListenerWithGroupProtocolClassic(ClusterInstance clusterInstance) + throws InterruptedException { + testFetchPartitionsWithAlwaysFailedListener(clusterInstance, GroupProtocol.CLASSIC); + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + @ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + }) + public void testFetchPartitionsWithAlwaysFailedListenerWithGroupProtocolConsumer(ClusterInstance clusterInstance) + throws InterruptedException { + testFetchPartitionsWithAlwaysFailedListener(clusterInstance, GroupProtocol.CONSUMER); + } + + private static void testFetchPartitionsWithAlwaysFailedListener(ClusterInstance clusterInstance, GroupProtocol groupProtocol) + throws InterruptedException { + var topic = "topic"; + try (var producer = clusterInstance.producer(Map.of( + ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class, + ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class))) { + producer.send(new ProducerRecord<>(topic, "key".getBytes(), "value".getBytes())); + } + + try (var consumer = clusterInstance.consumer(Map.of( + ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol.name()))) { + consumer.subscribe(List.of(topic), new ConsumerRebalanceListener() { + @Override + public void onPartitionsRevoked(Collection partitions) { + } + + @Override + public void onPartitionsAssigned(Collection partitions) { + throw new IllegalArgumentException("always failed"); + } + }); + + long startTimeMillis = System.currentTimeMillis(); + long currentTimeMillis = System.currentTimeMillis(); + while (currentTimeMillis < startTimeMillis + 3000) { + currentTimeMillis = System.currentTimeMillis(); + try { + // In the async consumer, there is a possibility that the ConsumerRebalanceListenerCallbackCompletedEvent + // has not yet reached the application thread. And a poll operation might still succeed, but it + // should not return any records since none of the assigned topic partitions are marked as fetchable. + assertEquals(0, consumer.poll(Duration.ofSeconds(1)).count()); + } catch (KafkaException ex) { + assertEquals("User rebalance callback throws an error", ex.getMessage()); + } + Thread.sleep(300); + } + } + } +} diff --git a/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java new file mode 100644 index 0000000000000..7f536c9872f91 --- /dev/null +++ b/core/src/test/java/kafka/log/remote/RemoteLogManagerTest.java @@ -0,0 +1,3760 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.log.remote; + +import kafka.cluster.Partition; +import kafka.log.UnifiedLog; +import kafka.server.KafkaConfig; + +import org.apache.kafka.common.Endpoint; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.Uuid; +import org.apache.kafka.common.compress.Compression; +import org.apache.kafka.common.errors.ReplicaNotAvailableException; +import org.apache.kafka.common.metrics.KafkaMetric; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.record.FileRecords; +import org.apache.kafka.common.record.MemoryRecords; +import org.apache.kafka.common.record.RecordBatch; +import org.apache.kafka.common.record.RemoteLogInputStream; +import org.apache.kafka.common.record.SimpleRecord; +import org.apache.kafka.common.requests.FetchRequest; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.common.test.api.Flaky; +import org.apache.kafka.common.utils.MockTime; +import org.apache.kafka.common.utils.Time; +import org.apache.kafka.server.common.OffsetAndEpoch; +import org.apache.kafka.server.common.StopPartition; +import org.apache.kafka.server.config.ServerConfigs; +import org.apache.kafka.server.log.remote.quota.RLMQuotaManager; +import org.apache.kafka.server.log.remote.quota.RLMQuotaManagerConfig; +import org.apache.kafka.server.log.remote.storage.ClassLoaderAwareRemoteStorageManager; +import org.apache.kafka.server.log.remote.storage.LogSegmentData; +import org.apache.kafka.server.log.remote.storage.NoOpRemoteLogMetadataManager; +import org.apache.kafka.server.log.remote.storage.NoOpRemoteStorageManager; +import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig; +import org.apache.kafka.server.log.remote.storage.RemoteLogMetadataManager; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentId; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadata.CustomMetadata; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentMetadataUpdate; +import org.apache.kafka.server.log.remote.storage.RemoteLogSegmentState; +import org.apache.kafka.server.log.remote.storage.RemoteStorageException; +import org.apache.kafka.server.log.remote.storage.RemoteStorageManager; +import org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType; +import org.apache.kafka.server.metrics.KafkaMetricsGroup; +import org.apache.kafka.server.metrics.KafkaYammerMetrics; +import org.apache.kafka.server.storage.log.FetchIsolation; +import org.apache.kafka.server.util.MockScheduler; +import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; +import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; +import org.apache.kafka.storage.internals.log.EpochEntry; +import org.apache.kafka.storage.internals.log.FetchDataInfo; +import org.apache.kafka.storage.internals.log.LazyIndex; +import org.apache.kafka.storage.internals.log.LogConfig; +import org.apache.kafka.storage.internals.log.LogDirFailureChannel; +import org.apache.kafka.storage.internals.log.LogFileUtils; +import org.apache.kafka.storage.internals.log.LogSegment; +import org.apache.kafka.storage.internals.log.OffsetIndex; +import org.apache.kafka.storage.internals.log.ProducerStateManager; +import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; +import org.apache.kafka.storage.internals.log.TimeIndex; +import org.apache.kafka.storage.internals.log.TransactionIndex; +import org.apache.kafka.storage.log.metrics.BrokerTopicStats; +import org.apache.kafka.test.TestUtils; + +import com.yammer.metrics.core.Gauge; +import com.yammer.metrics.core.MetricName; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.CsvSource; +import org.junit.jupiter.params.provider.ValueSource; +import org.mockito.ArgumentCaptor; +import org.mockito.InOrder; +import org.mockito.MockedConstruction; +import org.mockito.Mockito; +import org.opentest4j.AssertionFailedError; + +import java.io.BufferedReader; +import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.io.UncheckedIOException; +import java.nio.ByteBuffer; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NavigableMap; +import java.util.NoSuchElementException; +import java.util.Optional; +import java.util.OptionalInt; +import java.util.Properties; +import java.util.Set; +import java.util.TreeMap; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.BiConsumer; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import scala.Option; +import scala.jdk.javaapi.CollectionConverters; + +import static kafka.log.remote.RemoteLogManager.isRemoteSegmentWithinLeaderEpochs; +import static org.apache.kafka.common.record.TimestampType.CREATE_TIME; +import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX; +import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_CONSUMER_PREFIX; +import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_PRODUCER_PREFIX; +import static org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig.REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP; +import static org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND; +import static org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_NUM; +import static org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_SIZE_SECONDS; +import static org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND; +import static org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_NUM; +import static org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_SIZE_SECONDS; +import static org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX; +import static org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig.DEFAULT_REMOTE_STORAGE_MANAGER_CONFIG_PREFIX; +import static org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics.REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC; +import static org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics.REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC; +import static org.apache.kafka.server.log.remote.storage.RemoteStorageMetrics.REMOTE_STORAGE_THREAD_POOL_METRICS; +import static org.apache.kafka.test.TestUtils.tempFile; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTimeoutPreemptively; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyMap; +import static org.mockito.ArgumentMatchers.anySet; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.clearInvocations; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.inOrder; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.mockConstruction; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.verifyNoMoreInteractions; +import static org.mockito.Mockito.when; + +public class RemoteLogManagerTest { + private final Time time = new MockTime(); + private final int brokerId = 0; + private final String logDir = TestUtils.tempDirectory("kafka-").toString(); + private final String clusterId = "dummyId"; + private final String remoteLogStorageTestProp = "remote.log.storage.test"; + private final String remoteLogStorageTestVal = "storage.test"; + private final String remoteLogMetadataTestProp = "remote.log.metadata.test"; + private final String remoteLogMetadataTestVal = "metadata.test"; + private final String remoteLogMetadataCommonClientTestProp = REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "common.client.test"; + private final String remoteLogMetadataCommonClientTestVal = "common.test"; + private final String remoteLogMetadataProducerTestProp = REMOTE_LOG_METADATA_PRODUCER_PREFIX + "producer.test"; + private final String remoteLogMetadataProducerTestVal = "producer.test"; + private final String remoteLogMetadataConsumerTestProp = REMOTE_LOG_METADATA_CONSUMER_PREFIX + "consumer.test"; + private final String remoteLogMetadataConsumerTestVal = "consumer.test"; + private final String remoteLogMetadataTopicPartitionsNum = "1"; + private final long quotaExceededThrottleTime = 1000L; + private final long quotaAvailableThrottleTime = 0L; + + private final RemoteStorageManager remoteStorageManager = mock(RemoteStorageManager.class); + private final RemoteLogMetadataManager remoteLogMetadataManager = mock(RemoteLogMetadataManager.class); + private final RLMQuotaManager rlmCopyQuotaManager = mock(RLMQuotaManager.class); + private KafkaConfig config; + + private BrokerTopicStats brokerTopicStats = null; + private final Metrics metrics = new Metrics(time); + private RemoteLogManager remoteLogManager = null; + + private final TopicIdPartition leaderTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("Leader", 0)); + private final String leaderTopic = "Leader"; + private final TopicIdPartition followerTopicIdPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("Follower", 0)); + private final Map topicIds = new HashMap<>(); + private final TopicPartition tp = new TopicPartition("TestTopic", 5); + private final EpochEntry epochEntry0 = new EpochEntry(0, 0); + private final EpochEntry epochEntry1 = new EpochEntry(1, 100); + private final EpochEntry epochEntry2 = new EpochEntry(2, 200); + private final List totalEpochEntries = Arrays.asList(epochEntry0, epochEntry1, epochEntry2); + private LeaderEpochCheckpointFile checkpoint; + private final AtomicLong currentLogStartOffset = new AtomicLong(0L); + + private UnifiedLog mockLog = mock(UnifiedLog.class); + + private final MockScheduler scheduler = new MockScheduler(time); + private final Properties brokerConfig = kafka.utils.TestUtils.createDummyBrokerConfig(); + + @BeforeEach + void setUp() throws Exception { + checkpoint = new LeaderEpochCheckpointFile(TestUtils.tempFile(), new LogDirFailureChannel(1)); + topicIds.put(leaderTopicIdPartition.topicPartition().topic(), leaderTopicIdPartition.topicId()); + topicIds.put(followerTopicIdPartition.topicPartition().topic(), followerTopicIdPartition.topicId()); + Properties props = brokerConfig; + props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); + props.setProperty(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP, "100"); + appendRLMConfig(props); + config = KafkaConfig.fromProps(props); + brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig().isRemoteStorageSystemEnabled()); + + remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> currentLogStartOffset.set(offset), + brokerTopicStats, metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return remoteStorageManager; + } + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + public RLMQuotaManager createRLMCopyQuotaManager() { + return rlmCopyQuotaManager; + } + public Duration quotaTimeout() { + return Duration.ofMillis(100); + } + @Override + long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) { + return 0L; + } + }; + doReturn(true).when(remoteLogMetadataManager).isReady(any(TopicIdPartition.class)); + } + + @AfterEach + void tearDown() { + if (remoteLogManager != null) { + remoteLogManager.close(); + remoteLogManager = null; + } + kafka.utils.TestUtils.clearYammerMetrics(); + } + + @Test + void testGetLeaderEpochCheckpoint() { + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + assertEquals(totalEpochEntries, remoteLogManager.getLeaderEpochEntries(mockLog, 0, 300)); + + List epochEntries = remoteLogManager.getLeaderEpochEntries(mockLog, 100, 200); + assertEquals(1, epochEntries.size()); + assertEquals(epochEntry1, epochEntries.get(0)); + } + + @Test + void testFindHighestRemoteOffsetOnEmptyRemoteStorage() throws RemoteStorageException { + List totalEpochEntries = Arrays.asList( + new EpochEntry(0, 0), + new EpochEntry(1, 500) + ); + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); + OffsetAndEpoch offsetAndEpoch = remoteLogManager.findHighestRemoteOffset(tpId, mockLog); + assertEquals(new OffsetAndEpoch(-1L, -1), offsetAndEpoch); + } + + @Test + void testFindHighestRemoteOffset() throws RemoteStorageException { + List totalEpochEntries = Arrays.asList( + new EpochEntry(0, 0), + new EpochEntry(1, 500) + ); + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); + when(remoteLogMetadataManager.highestOffsetForEpoch(eq(tpId), anyInt())).thenAnswer(ans -> { + Integer epoch = ans.getArgument(1, Integer.class); + if (epoch == 0) { + return Optional.of(200L); + } else { + return Optional.empty(); + } + }); + OffsetAndEpoch offsetAndEpoch = remoteLogManager.findHighestRemoteOffset(tpId, mockLog); + assertEquals(new OffsetAndEpoch(200L, 0), offsetAndEpoch); + } + + @Test + void testFindHighestRemoteOffsetWithUncleanLeaderElection() throws RemoteStorageException { + List totalEpochEntries = Arrays.asList( + new EpochEntry(0, 0), + new EpochEntry(1, 150), + new EpochEntry(2, 300) + ); + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); + when(remoteLogMetadataManager.highestOffsetForEpoch(eq(tpId), anyInt())).thenAnswer(ans -> { + Integer epoch = ans.getArgument(1, Integer.class); + if (epoch == 0) { + return Optional.of(200L); + } else { + return Optional.empty(); + } + }); + OffsetAndEpoch offsetAndEpoch = remoteLogManager.findHighestRemoteOffset(tpId, mockLog); + assertEquals(new OffsetAndEpoch(149L, 0), offsetAndEpoch); + } + + @Test + void testRemoteLogMetadataManagerWithUserDefinedConfigs() { + String key = "key"; + String configPrefix = "config.prefix"; + Properties props = new Properties(); + props.putAll(brokerConfig); + props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX_PROP, configPrefix); + props.put(configPrefix + key, "world"); + props.put("remote.log.metadata.y", "z"); + appendRLMConfig(props); + KafkaConfig config = KafkaConfig.fromProps(props); + + Map metadataMangerConfig = config.remoteLogManagerConfig().remoteLogMetadataManagerProps(); + assertEquals(props.get(configPrefix + key), metadataMangerConfig.get(key)); + assertFalse(metadataMangerConfig.containsKey("remote.log.metadata.y")); + } + + @Test + void testRemoteStorageManagerWithUserDefinedConfigs() { + String key = "key"; + String configPrefix = "config.prefix"; + Properties props = new Properties(); + props.putAll(brokerConfig); + props.put(RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CONFIG_PREFIX_PROP, configPrefix); + props.put(configPrefix + key, "world"); + props.put("remote.storage.manager.y", "z"); + appendRLMConfig(props); + KafkaConfig config = KafkaConfig.fromProps(props); + + Map remoteStorageManagerConfig = config.remoteLogManagerConfig().remoteStorageManagerProps(); + assertEquals(props.get(configPrefix + key), remoteStorageManagerConfig.get(key)); + assertFalse(remoteStorageManagerConfig.containsKey("remote.storage.manager.y")); + } + + @Test + void testRemoteLogMetadataManagerWithEndpointConfig() { + String host = "localhost"; + int port = 1234; + String securityProtocol = "PLAINTEXT"; + Endpoint endPoint = new Endpoint(securityProtocol, SecurityProtocol.PLAINTEXT, host, port); + remoteLogManager.onEndPointCreated(endPoint); + remoteLogManager.startup(); + + ArgumentCaptor> capture = ArgumentCaptor.forClass(Map.class); + verify(remoteLogMetadataManager, times(1)).configure(capture.capture()); + assertEquals(host + ":" + port, capture.getValue().get(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "bootstrap.servers")); + assertEquals(securityProtocol, capture.getValue().get(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "security.protocol")); + assertEquals(clusterId, capture.getValue().get("cluster.id")); + assertEquals(brokerId, capture.getValue().get(ServerConfigs.BROKER_ID_CONFIG)); + } + + @Test + void testRemoteLogMetadataManagerWithEndpointConfigOverridden() throws IOException { + Properties props = new Properties(); + props.putAll(brokerConfig); + // override common security.protocol by adding "RLMM prefix" and "remote log metadata common client prefix" + props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "security.protocol", "SSL"); + appendRLMConfig(props); + KafkaConfig config = KafkaConfig.fromProps(props); + try (RemoteLogManager remoteLogManager = new RemoteLogManager( + config.remoteLogManagerConfig(), + brokerId, + logDir, + clusterId, + time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> { }, + brokerTopicStats, + metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return remoteStorageManager; + } + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }) { + + String host = "localhost"; + int port = 1234; + String securityProtocol = "PLAINTEXT"; + Endpoint endpoint = new Endpoint(securityProtocol, SecurityProtocol.PLAINTEXT, host, port); + remoteLogManager.onEndPointCreated(endpoint); + remoteLogManager.startup(); + + ArgumentCaptor> capture = ArgumentCaptor.forClass(Map.class); + verify(remoteLogMetadataManager, times(1)).configure(capture.capture()); + assertEquals(host + ":" + port, capture.getValue().get(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "bootstrap.servers")); + // should be overridden as SSL + assertEquals("SSL", capture.getValue().get(REMOTE_LOG_METADATA_COMMON_CLIENT_PREFIX + "security.protocol")); + assertEquals(clusterId, capture.getValue().get("cluster.id")); + assertEquals(brokerId, capture.getValue().get(ServerConfigs.BROKER_ID_CONFIG)); + } + } + + @Test + void testStartup() { + remoteLogManager.startup(); + ArgumentCaptor> capture = ArgumentCaptor.forClass(Map.class); + verify(remoteStorageManager, times(1)).configure(capture.capture()); + assertEquals(brokerId, capture.getValue().get("broker.id")); + assertEquals(remoteLogStorageTestVal, capture.getValue().get(remoteLogStorageTestProp)); + + verify(remoteLogMetadataManager, times(1)).configure(capture.capture()); + assertEquals(brokerId, capture.getValue().get("broker.id")); + assertEquals(logDir, capture.getValue().get("log.dir")); + + // verify the configs starting with "remote.log.metadata", "remote.log.metadata.common.client." + // "remote.log.metadata.producer.", and "remote.log.metadata.consumer." are correctly passed in + assertEquals(remoteLogMetadataTopicPartitionsNum, capture.getValue().get(REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP)); + assertEquals(remoteLogMetadataTestVal, capture.getValue().get(remoteLogMetadataTestProp)); + assertEquals(remoteLogMetadataConsumerTestVal, capture.getValue().get(remoteLogMetadataConsumerTestProp)); + assertEquals(remoteLogMetadataProducerTestVal, capture.getValue().get(remoteLogMetadataProducerTestProp)); + assertEquals(remoteLogMetadataCommonClientTestVal, capture.getValue().get(remoteLogMetadataCommonClientTestProp)); + } + + // This test creates 2 log segments, 1st one has start offset of 0, 2nd one (and active one) has start offset of 150. + // The leader epochs are [0->0, 1->100, 2->200]. We are verifying: + // 1. There's only 1 segment copied to remote storage + // 2. The segment got copied to remote storage is the old segment, not the active one + // 3. The log segment metadata stored into remoteLogMetadataManager is what we expected, both before and after copying the log segments + // 4. The log segment got copied to remote storage has the expected metadata + // 5. The highest remote offset is updated to the expected value + @Test + void testCopyLogSegmentsToRemoteShouldCopyExpectedLogSegment() throws Exception { + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + long lso = 250L; + long leo = 300L; + assertCopyExpectedLogSegmentsToRemote(oldSegmentStartOffset, nextSegmentStartOffset, lso, leo); + } + + /** + * The following values will be equal when the active segment gets rotated to passive and there are no new messages: + * last-stable-offset = high-water-mark = log-end-offset = base-offset-of-active-segment. + * This test asserts that the active log segment that was rotated after log.roll.ms are copied to remote storage. + */ + @Test + void testCopyLogSegmentToRemoteForStaleTopic() throws Exception { + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + long lso = 150L; + long leo = 150L; + assertCopyExpectedLogSegmentsToRemote(oldSegmentStartOffset, nextSegmentStartOffset, lso, leo); + } + + private void assertCopyExpectedLogSegmentsToRemote(long oldSegmentStartOffset, + long nextSegmentStartOffset, + long lastStableOffset, + long logEndOffset) throws Exception { + long oldSegmentEndOffset = nextSegmentStartOffset - 1; + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(-1L)); + + File tempFile = TestUtils.tempFile(); + File mockProducerSnapshotIndex = TestUtils.tempFile(); + File tempDir = TestUtils.tempDirectory(); + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + verify(oldSegment, times(0)).readNextOffset(); + verify(activeSegment, times(0)).readNextOffset(); + + FileRecords fileRecords = mock(FileRecords.class); + when(oldSegment.log()).thenReturn(fileRecords); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(lastStableOffset); + when(mockLog.logEndOffset()).thenReturn(logEndOffset); + + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.timeIndex()).thenReturn(timeIdx); + when(oldSegment.offsetIndex()).thenReturn(idx); + when(oldSegment.txnIndex()).thenReturn(txnIndex); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture); + when(remoteStorageManager.copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class))) + .thenReturn(Optional.empty()); + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(quotaAvailableThrottleTime); + + // Verify the metrics for remote writes and for failures is zero before attempt to copy log segment + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyBytesRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteCopyRequestRate().count()); + // Verify aggregate metrics + assertEquals(0, brokerTopicStats.allTopicsStats().remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().remoteCopyBytesRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().count()); + + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + task.copyLogSegmentsToRemote(mockLog); + + // verify remoteLogMetadataManager did add the expected RemoteLogSegmentMetadata + ArgumentCaptor remoteLogSegmentMetadataArg = ArgumentCaptor.forClass(RemoteLogSegmentMetadata.class); + verify(remoteLogMetadataManager).addRemoteLogSegmentMetadata(remoteLogSegmentMetadataArg.capture()); + // The old segment should only contain leader epoch [0->0, 1->100] since its offset range is [0, 149] + Map expectedLeaderEpochs = new TreeMap<>(); + expectedLeaderEpochs.put(epochEntry0.epoch, epochEntry0.startOffset); + expectedLeaderEpochs.put(epochEntry1.epoch, epochEntry1.startOffset); + verifyRemoteLogSegmentMetadata(remoteLogSegmentMetadataArg.getValue(), oldSegmentStartOffset, oldSegmentEndOffset, expectedLeaderEpochs); + + // verify copyLogSegmentData is passing the RemoteLogSegmentMetadata we created above + // and verify the logSegmentData passed is expected + ArgumentCaptor remoteLogSegmentMetadataArg2 = ArgumentCaptor.forClass(RemoteLogSegmentMetadata.class); + ArgumentCaptor logSegmentDataArg = ArgumentCaptor.forClass(LogSegmentData.class); + verify(remoteStorageManager, times(1)).copyLogSegmentData(remoteLogSegmentMetadataArg2.capture(), logSegmentDataArg.capture()); + assertEquals(remoteLogSegmentMetadataArg.getValue(), remoteLogSegmentMetadataArg2.getValue()); + // The old segment should only contain leader epoch [0->0, 1->100] since its offset range is [0, 149] + verifyLogSegmentData(logSegmentDataArg.getValue(), idx, timeIdx, txnIndex, tempFile, mockProducerSnapshotIndex, + Arrays.asList(epochEntry0, epochEntry1)); + + // verify remoteLogMetadataManager did add the expected RemoteLogSegmentMetadataUpdate + ArgumentCaptor remoteLogSegmentMetadataUpdateArg = ArgumentCaptor.forClass(RemoteLogSegmentMetadataUpdate.class); + verify(remoteLogMetadataManager, times(1)).updateRemoteLogSegmentMetadata(remoteLogSegmentMetadataUpdateArg.capture()); + verifyRemoteLogSegmentMetadataUpdate(remoteLogSegmentMetadataUpdateArg.getValue()); + + // verify the highest remote offset is updated to the expected value + ArgumentCaptor argument = ArgumentCaptor.forClass(Long.class); + verify(mockLog, times(2)).updateHighestOffsetInRemoteStorage(argument.capture()); + assertEquals(oldSegmentEndOffset, argument.getValue()); + + // Verify the metric for remote writes is updated correctly + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyRequestRate().count()); + assertEquals(10, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyBytesRate().count()); + // Verify we did not report any failure for remote writes + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteCopyRequestRate().count()); + // Verify aggregate metrics + assertEquals(1, brokerTopicStats.allTopicsStats().remoteCopyRequestRate().count()); + assertEquals(10, brokerTopicStats.allTopicsStats().remoteCopyBytesRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().count()); + } + + // We are verifying that if the size of a piece of custom metadata is bigger than the configured limit, + // the copy task should be cancelled and there should be an attempt to delete the just copied segment. + @Test + void testCustomMetadataSizeExceedsLimit() throws Exception { + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + long lastStableOffset = 150L; + long logEndOffset = 150L; + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(-1L)); + + File tempFile = TestUtils.tempFile(); + File mockProducerSnapshotIndex = TestUtils.tempFile(); + File tempDir = TestUtils.tempDirectory(); + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + verify(oldSegment, times(0)).readNextOffset(); + verify(activeSegment, times(0)).readNextOffset(); + + FileRecords fileRecords = mock(FileRecords.class); + when(oldSegment.log()).thenReturn(fileRecords); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(lastStableOffset); + when(mockLog.logEndOffset()).thenReturn(logEndOffset); + + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.timeIndex()).thenReturn(timeIdx); + when(oldSegment.offsetIndex()).thenReturn(idx); + when(oldSegment.txnIndex()).thenReturn(txnIndex); + + int customMetadataSizeLimit = 128; + CustomMetadata customMetadata = new CustomMetadata(new byte[customMetadataSizeLimit * 2]); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture); + when(remoteStorageManager.copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class))) + .thenReturn(Optional.of(customMetadata)); + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(quotaAvailableThrottleTime); + + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, customMetadataSizeLimit); + task.copyLogSegmentsToRemote(mockLog); + + ArgumentCaptor remoteLogSegmentMetadataArg = ArgumentCaptor.forClass(RemoteLogSegmentMetadata.class); + verify(remoteLogMetadataManager).addRemoteLogSegmentMetadata(remoteLogSegmentMetadataArg.capture()); + + // Check we attempt to delete the segment data providing the custom metadata back. + RemoteLogSegmentMetadataUpdate expectedMetadataUpdate = new RemoteLogSegmentMetadataUpdate( + remoteLogSegmentMetadataArg.getValue().remoteLogSegmentId(), time.milliseconds(), + Optional.of(customMetadata), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, brokerId); + RemoteLogSegmentMetadata expectedDeleteMetadata = remoteLogSegmentMetadataArg.getValue().createWithUpdates(expectedMetadataUpdate); + verify(remoteStorageManager, times(1)).deleteLogSegmentData(eq(expectedDeleteMetadata)); + + // Check the task is cancelled in the end. + assertTrue(task.isCancelled()); + + // The metadata update should be posted. + verify(remoteLogMetadataManager, times(2)).updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class)); + + // Verify the metrics + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyBytesRate().count()); + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteCopyRequestRate().count()); + // Verify aggregate metrics + assertEquals(1, brokerTopicStats.allTopicsStats().remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().remoteCopyBytesRate().count()); + assertEquals(1, brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().count()); + } + + @Test + void testFailedCopyShouldDeleteTheDanglingSegment() throws Exception { + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + long lastStableOffset = 150L; + long logEndOffset = 150L; + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(-1L)); + + File tempFile = TestUtils.tempFile(); + File mockProducerSnapshotIndex = TestUtils.tempFile(); + File tempDir = TestUtils.tempDirectory(); + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + verify(oldSegment, times(0)).readNextOffset(); + verify(activeSegment, times(0)).readNextOffset(); + + FileRecords fileRecords = mock(FileRecords.class); + when(oldSegment.log()).thenReturn(fileRecords); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(lastStableOffset); + when(mockLog.logEndOffset()).thenReturn(logEndOffset); + + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.timeIndex()).thenReturn(timeIdx); + when(oldSegment.offsetIndex()).thenReturn(idx); + when(oldSegment.txnIndex()).thenReturn(txnIndex); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(quotaAvailableThrottleTime); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture); + + // throw exception when copyLogSegmentData + when(remoteStorageManager.copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class))) + .thenThrow(new RemoteStorageException("test")); + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + task.copyLogSegmentsToRemote(mockLog); + + ArgumentCaptor remoteLogSegmentMetadataArg = ArgumentCaptor.forClass(RemoteLogSegmentMetadata.class); + verify(remoteLogMetadataManager).addRemoteLogSegmentMetadata(remoteLogSegmentMetadataArg.capture()); + // verify the segment is deleted + verify(remoteStorageManager, times(1)).deleteLogSegmentData(eq(remoteLogSegmentMetadataArg.getValue())); + + // verify deletion state update + verify(remoteLogMetadataManager, times(2)).updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class)); + + // Verify the metrics + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyBytesRate().count()); + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteCopyRequestRate().count()); + // Verify aggregate metrics + assertEquals(1, brokerTopicStats.allTopicsStats().remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().remoteCopyBytesRate().count()); + assertEquals(1, brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().count()); + } + + @Test + void testLeadershipChangesWithoutRemoteLogManagerConfiguring() { + assertThrows(KafkaException.class, () -> { + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + }, "RemoteLogManager is not configured when remote storage system is enabled"); + } + + @Test + void testRemoteLogManagerTasksAvgIdlePercentAndMetadataCountMetrics() throws Exception { + remoteLogManager.startup(); + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + int segmentCount = 3; + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.parentDir()).thenReturn("dir1"); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); + + File tempFile = TestUtils.tempFile(); + File mockProducerSnapshotIndex = TestUtils.tempFile(); + File tempDir = TestUtils.tempDirectory(); + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + + FileRecords fileRecords = mock(FileRecords.class); + when(oldSegment.log()).thenReturn(fileRecords); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(250L); + when(mockLog.logEndOffset()).thenReturn(500L); + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", 100L); + logProps.put("retention.ms", -1L); + LogConfig logConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(logConfig); + + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.timeIndex()).thenReturn(timeIdx); + when(oldSegment.offsetIndex()).thenReturn(idx); + when(oldSegment.txnIndex()).thenReturn(txnIndex); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture); + + CountDownLatch copyLogSegmentLatch = new CountDownLatch(1); + doAnswer(ans -> { + // waiting for verification + copyLogSegmentLatch.await(5000, TimeUnit.MILLISECONDS); + return Optional.empty(); + }).when(remoteStorageManager).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class)); + + CountDownLatch remoteLogMetadataCountLatch = new CountDownLatch(1); + doAnswer(ans -> { + remoteLogMetadataCountLatch.await(5000, TimeUnit.MILLISECONDS); + return null; + }).when(remoteStorageManager).deleteLogSegmentData(any(RemoteLogSegmentMetadata.class)); + + Partition mockLeaderPartition = mockPartition(leaderTopicIdPartition); + Partition mockFollowerPartition = mockPartition(followerTopicIdPartition); + List list = listRemoteLogSegmentMetadata(leaderTopicIdPartition, segmentCount, 100, 1024, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + // return the metadataList 3 times, then return empty list to simulate all segments are deleted + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)).thenReturn(list.iterator()).thenReturn(Collections.emptyIterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)).thenReturn(list.iterator()).thenReturn(list.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 1)).thenReturn(list.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 2)).thenReturn(list.iterator()); + + // before running tasks, the remote log manager tasks should be all idle and the remote log metadata count should be 0 + assertEquals(1.0, (double) yammerMetricValue("RemoteLogManagerTasksAvgIdlePercent")); + assertEquals(0, safeLongYammerMetricValue("RemoteLogMetadataCount,topic=" + leaderTopic)); + assertEquals(0, safeLongYammerMetricValue("RemoteLogSizeBytes,topic=" + leaderTopic)); + assertEquals(0, safeLongYammerMetricValue("RemoteLogMetadataCount")); + assertEquals(0, safeLongYammerMetricValue("RemoteLogSizeBytes")); + remoteLogManager.onLeadershipChange(Collections.singleton(mockLeaderPartition), Collections.singleton(mockFollowerPartition), topicIds); + assertTrue((double) yammerMetricValue("RemoteLogManagerTasksAvgIdlePercent") < 1.0); + + copyLogSegmentLatch.countDown(); + + // Now, the `RemoteLogMetadataCount` should set to the expected value + TestUtils.waitForCondition(() -> safeLongYammerMetricValue("RemoteLogMetadataCount,topic=" + leaderTopic) == segmentCount && + safeLongYammerMetricValue("RemoteLogMetadataCount") == segmentCount, + "Didn't show the expected RemoteLogMetadataCount metric value."); + + TestUtils.waitForCondition( + () -> 3072 == safeLongYammerMetricValue("RemoteLogSizeBytes,topic=" + leaderTopic) && + 3072 == safeLongYammerMetricValue("RemoteLogSizeBytes"), + String.format("Expected to find 3072 for RemoteLogSizeBytes metric value, but found %d for 'Leader' topic and %d for all topic", + safeLongYammerMetricValue("RemoteLogSizeBytes,topic=" + leaderTopic), + safeLongYammerMetricValue("RemoteLogSizeBytes"))); + + remoteLogMetadataCountLatch.countDown(); + + TestUtils.waitForCondition(() -> safeLongYammerMetricValue("RemoteLogMetadataCount,topic=" + leaderTopic) == 0 && + safeLongYammerMetricValue("RemoteLogMetadataCount") == 0, + "Didn't reset to 0 for RemoteLogMetadataCount metric value when no remote log metadata."); + + TestUtils.waitForCondition( + () -> 0 == safeLongYammerMetricValue("RemoteLogSizeBytes,topic=" + leaderTopic) && + 0 == safeLongYammerMetricValue("RemoteLogSizeBytes"), + String.format("Didn't reset to 0 for RemoteLogSizeBytes metric value when no remote log metadata - found %d for 'Leader' topic and %d for all topic.", + safeLongYammerMetricValue("RemoteLogSizeBytes,topic=" + leaderTopic), + safeLongYammerMetricValue("RemoteLogSizeBytes"))); + } + + @Test + void testRemoteLogTaskUpdateRemoteLogSegmentMetadataAfterLogDirChanged() throws Exception { + remoteLogManager.startup(); + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + int segmentCount = 3; + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.parentDir()).thenReturn("dir1"); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) + .thenReturn(Optional.of(0L)) + .thenReturn(Optional.of(nextSegmentStartOffset - 1)); + + File tempFile = TestUtils.tempFile(); + File mockProducerSnapshotIndex = TestUtils.tempFile(); + File tempDir = TestUtils.tempDirectory(); + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + + FileRecords fileRecords = mock(FileRecords.class); + when(oldSegment.log()).thenReturn(fileRecords); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(250L); + when(mockLog.logEndOffset()).thenReturn(500L); + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", 100L); + logProps.put("retention.ms", -1L); + LogConfig logConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(logConfig); + + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.timeIndex()).thenReturn(timeIdx); + when(oldSegment.offsetIndex()).thenReturn(idx); + when(oldSegment.txnIndex()).thenReturn(txnIndex); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture); + + CountDownLatch copyLogSegmentLatch = new CountDownLatch(1); + doAnswer(ans -> { + // waiting for verification + copyLogSegmentLatch.await(5000, TimeUnit.MILLISECONDS); + return Optional.empty(); + }).when(remoteStorageManager).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class)); + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(quotaAvailableThrottleTime); + + Partition mockLeaderPartition = mockPartition(leaderTopicIdPartition); + List metadataList = listRemoteLogSegmentMetadata(leaderTopicIdPartition, segmentCount, 100, 1024, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)).thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)).thenReturn(metadataList.iterator()).thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 1)).thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 2)).thenReturn(metadataList.iterator()); + + // leadership change to log in dir1 + remoteLogManager.onLeadershipChange(Collections.singleton(mockLeaderPartition), Collections.emptySet(), topicIds); + + TestUtils.waitForCondition(() -> { + ArgumentCaptor argument = ArgumentCaptor.forClass(Long.class); + verify(mockLog, times(1)).updateHighestOffsetInRemoteStorage(argument.capture()); + return 0L == argument.getValue(); + }, "Timed out waiting for updateHighestOffsetInRemoteStorage(0) get invoked for dir1 log"); + + UnifiedLog oldMockLog = mockLog; + Mockito.clearInvocations(oldMockLog); + // simulate altering log dir completes, and the new partition leader changes to the same broker in different log dir (dir2) + mockLog = mock(UnifiedLog.class); + when(mockLog.parentDir()).thenReturn("dir2"); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(mockLog.config()).thenReturn(logConfig); + when(mockLog.logEndOffset()).thenReturn(500L); + + remoteLogManager.onLeadershipChange(Collections.singleton(mockLeaderPartition), Collections.emptySet(), topicIds); + + // after copyLogSegment completes for log (in dir1), updateHighestOffsetInRemoteStorage will be triggered with new offset + // even though the leader replica has changed to log in dir2 + copyLogSegmentLatch.countDown(); + TestUtils.waitForCondition(() -> { + ArgumentCaptor argument = ArgumentCaptor.forClass(Long.class); + verify(oldMockLog, times(1)).updateHighestOffsetInRemoteStorage(argument.capture()); + return nextSegmentStartOffset - 1 == argument.getValue(); + }, "Timed out waiting for updateHighestOffsetInRemoteStorage(149) get invoked for dir1 log"); + + // On the next run of RLMTask, the log in dir2 will be picked and start by updateHighestOffsetInRemoteStorage to the expected offset + TestUtils.waitForCondition(() -> { + ArgumentCaptor argument = ArgumentCaptor.forClass(Long.class); + verify(mockLog, times(1)).updateHighestOffsetInRemoteStorage(argument.capture()); + return nextSegmentStartOffset - 1 == argument.getValue(); + }, "Timed out waiting for updateHighestOffsetInRemoteStorage(149) get invoked for dir2 log"); + + } + + @Test + void testRemoteLogManagerRemoteMetrics() throws Exception { + remoteLogManager.startup(); + long oldestSegmentStartOffset = 0L; + long olderSegmentStartOffset = 75L; + long nextSegmentStartOffset = 150L; + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.parentDir()).thenReturn("dir1"); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); + + File tempFile = TestUtils.tempFile(); + File mockProducerSnapshotIndex = TestUtils.tempFile(); + File tempDir = TestUtils.tempDirectory(); + // create 3 log segments, with 0, 75 and 150 as log start offset + LogSegment oldestSegment = mock(LogSegment.class); + LogSegment olderSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(oldestSegment.baseOffset()).thenReturn(oldestSegmentStartOffset); + when(olderSegment.baseOffset()).thenReturn(olderSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + + FileRecords oldestFileRecords = mock(FileRecords.class); + when(oldestSegment.log()).thenReturn(oldestFileRecords); + when(oldestFileRecords.file()).thenReturn(tempFile); + when(oldestFileRecords.sizeInBytes()).thenReturn(10); + when(oldestSegment.readNextOffset()).thenReturn(olderSegmentStartOffset); + + FileRecords olderFileRecords = mock(FileRecords.class); + when(olderSegment.log()).thenReturn(olderFileRecords); + when(olderFileRecords.file()).thenReturn(tempFile); + when(olderFileRecords.sizeInBytes()).thenReturn(10); + when(olderSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldestSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldestSegment, olderSegment, activeSegment))); + + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(250L); + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", 1000000L); + logProps.put("retention.ms", -1L); + LogConfig logConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(logConfig); + + OffsetIndex oldestIdx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldestSegmentStartOffset, ""), oldestSegmentStartOffset, 1000).get(); + TimeIndex oldestTimeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldestSegmentStartOffset, ""), oldestSegmentStartOffset, 1500).get(); + File oldestTxnFile = UnifiedLog.transactionIndexFile(tempDir, oldestSegmentStartOffset, ""); + oldestTxnFile.createNewFile(); + TransactionIndex oldestTxnIndex = new TransactionIndex(oldestSegmentStartOffset, oldestTxnFile); + when(oldestSegment.timeIndex()).thenReturn(oldestTimeIdx); + when(oldestSegment.offsetIndex()).thenReturn(oldestIdx); + when(oldestSegment.txnIndex()).thenReturn(oldestTxnIndex); + + OffsetIndex olderIdx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, olderSegmentStartOffset, ""), olderSegmentStartOffset, 1000).get(); + TimeIndex olderTimeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, olderSegmentStartOffset, ""), olderSegmentStartOffset, 1500).get(); + File olderTxnFile = UnifiedLog.transactionIndexFile(tempDir, olderSegmentStartOffset, ""); + oldestTxnFile.createNewFile(); + TransactionIndex olderTxnIndex = new TransactionIndex(olderSegmentStartOffset, olderTxnFile); + when(olderSegment.timeIndex()).thenReturn(olderTimeIdx); + when(olderSegment.offsetIndex()).thenReturn(olderIdx); + when(olderSegment.txnIndex()).thenReturn(olderTxnIndex); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture); + Iterator iterator = listRemoteLogSegmentMetadata(leaderTopicIdPartition, 5, 100, 1024, RemoteLogSegmentState.COPY_SEGMENT_FINISHED).iterator(); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)).thenReturn(iterator); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 2)).thenReturn(iterator); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 1)).thenReturn(iterator); + + CountDownLatch remoteLogSizeComputationTimeLatch = new CountDownLatch(1); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)).thenAnswer(ans -> { + // advance the mock timer 1000ms to add value for RemoteLogSizeComputationTime metric + time.sleep(1000); + return iterator; + }).thenAnswer(ans -> { + // wait for verifying RemoteLogSizeComputationTime metric value. + remoteLogSizeComputationTimeLatch.await(5000, TimeUnit.MILLISECONDS); + return Collections.emptyIterator(); + }); + + CountDownLatch latch = new CountDownLatch(1); + doAnswer(ans -> Optional.empty()).doAnswer(ans -> { + // waiting for verification + latch.await(5000, TimeUnit.MILLISECONDS); + return Optional.empty(); + }).when(remoteStorageManager).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class)); + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(quotaAvailableThrottleTime); + + Partition mockLeaderPartition = mockPartition(leaderTopicIdPartition); + + // This method is called by both Copy and Expiration task. On the first call, both tasks should see 175 bytes as + // the local log segments size + when(mockLog.onlyLocalLogSegmentsSize()).thenReturn(175L, 175L, 100L); + when(activeSegment.size()).thenReturn(100); + when(mockLog.onlyLocalLogSegmentsCount()).thenReturn(2L).thenReturn(1L); + + // before running tasks, the metric should not be registered + assertThrows(NoSuchElementException.class, () -> yammerMetricValue("RemoteCopyLagBytes,topic=" + leaderTopic)); + assertThrows(NoSuchElementException.class, () -> yammerMetricValue("RemoteCopyLagSegments,topic=" + leaderTopic)); + assertThrows(NoSuchElementException.class, () -> yammerMetricValue("RemoteLogSizeComputationTime,topic=" + leaderTopic)); + // all topic metrics should be 0 + assertEquals(0L, yammerMetricValue("RemoteCopyLagBytes")); + assertEquals(0L, yammerMetricValue("RemoteCopyLagSegments")); + assertEquals(0L, yammerMetricValue("RemoteLogSizeComputationTime")); + + remoteLogManager.onLeadershipChange(Collections.singleton(mockLeaderPartition), Collections.emptySet(), topicIds); + TestUtils.waitForCondition( + () -> 75 == safeLongYammerMetricValue("RemoteCopyLagBytes") && 75 == safeLongYammerMetricValue("RemoteCopyLagBytes,topic=" + leaderTopic), + String.format("Expected to find 75 for RemoteCopyLagBytes metric value, but found %d for topic 'Leader' and %d for all topics.", + safeLongYammerMetricValue("RemoteCopyLagBytes,topic=" + leaderTopic), + safeLongYammerMetricValue("RemoteCopyLagBytes"))); + TestUtils.waitForCondition( + () -> 1 == safeLongYammerMetricValue("RemoteCopyLagSegments") && 1 == safeLongYammerMetricValue("RemoteCopyLagSegments,topic=" + leaderTopic), + String.format("Expected to find 1 for RemoteCopyLagSegments metric value, but found %d for topic 'Leader' and %d for all topics.", + safeLongYammerMetricValue("RemoteCopyLagSegments,topic=" + leaderTopic), + safeLongYammerMetricValue("RemoteCopyLagSegments"))); + // unlock copyLogSegmentData + latch.countDown(); + + TestUtils.waitForCondition( + () -> safeLongYammerMetricValue("RemoteLogSizeComputationTime") >= 1000 && safeLongYammerMetricValue("RemoteLogSizeComputationTime,topic=" + leaderTopic) >= 1000, + String.format("Expected to find 1000 for RemoteLogSizeComputationTime metric value, but found %d for topic 'Leader' and %d for all topics.", + safeLongYammerMetricValue("RemoteLogSizeComputationTime,topic=" + leaderTopic), + safeLongYammerMetricValue("RemoteLogSizeComputationTime"))); + remoteLogSizeComputationTimeLatch.countDown(); + + TestUtils.waitForCondition( + () -> 0 == safeLongYammerMetricValue("RemoteCopyLagBytes") && 0 == safeLongYammerMetricValue("RemoteCopyLagBytes,topic=" + leaderTopic), + String.format("Expected to find 0 for RemoteCopyLagBytes metric value, but found %d for topic 'Leader' and %d for all topics.", + safeLongYammerMetricValue("RemoteCopyLagBytes,topic=" + leaderTopic), + safeLongYammerMetricValue("RemoteCopyLagBytes"))); + TestUtils.waitForCondition( + () -> 0 == safeLongYammerMetricValue("RemoteCopyLagSegments") && 0 == safeLongYammerMetricValue("RemoteCopyLagSegments,topic=" + leaderTopic), + String.format("Expected to find 0 for RemoteCopyLagSegments metric value, but found %d for topic 'Leader' and %d for all topics.", + safeLongYammerMetricValue("RemoteCopyLagSegments,topic=" + leaderTopic), + safeLongYammerMetricValue("RemoteCopyLagSegments"))); + } + + private Object yammerMetricValue(String name) { + Gauge gauge = (Gauge) KafkaYammerMetrics.defaultRegistry().allMetrics().entrySet().stream() + .filter(e -> e.getKey().getMBeanName().endsWith(name)) + .findFirst() + .get() + .getValue(); + return gauge.value(); + } + + private long safeLongYammerMetricValue(String name) { + try { + return (long) yammerMetricValue(name); + } catch (NoSuchElementException ex) { + return 0L; + } + } + + @Test + void testMetricsUpdateOnCopyLogSegmentsFailure() throws Exception { + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); + + File tempFile = TestUtils.tempFile(); + File mockProducerSnapshotIndex = TestUtils.tempFile(); + File tempDir = TestUtils.tempDirectory(); + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + + FileRecords fileRecords = mock(FileRecords.class); + when(oldSegment.log()).thenReturn(fileRecords); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(250L); + + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.timeIndex()).thenReturn(timeIdx); + when(oldSegment.offsetIndex()).thenReturn(idx); + when(oldSegment.txnIndex()).thenReturn(txnIndex); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + doThrow(new RuntimeException()).when(remoteStorageManager).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class)); + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(quotaAvailableThrottleTime); + + // Verify the metrics for remote write requests/failures is zero before attempt to copy log segment + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteCopyRequestRate().count()); + // Verify aggregate metrics + assertEquals(0, brokerTopicStats.allTopicsStats().remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().count()); + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + task.copyLogSegmentsToRemote(mockLog); + + // Verify we attempted to copy log segment metadata to remote storage + verify(remoteStorageManager, times(1)).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class)); + + // Verify we should not have updated the highest offset because of write failure + verify(mockLog).updateHighestOffsetInRemoteStorage(anyLong()); + // Verify the metric for remote write requests/failures was updated. + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyRequestRate().count()); + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteCopyRequestRate().count()); + // Verify aggregate metrics + assertEquals(1, brokerTopicStats.allTopicsStats().remoteCopyRequestRate().count()); + assertEquals(1, brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().count()); + } + + @Test + void testRLMTaskDoesNotUploadSegmentsWhenRemoteLogMetadataManagerIsNotInitialized() throws Exception { + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + // Throw a retryable exception so indicate that the remote log metadata manager is not initialized yet + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) + .thenThrow(new ReplicaNotAvailableException("Remote log metadata cache is not initialized for partition: " + leaderTopicIdPartition)); + + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + when(mockLog.lastStableOffset()).thenReturn(250L); + + // Ensure the metrics for remote write requests/failures is zero before attempt to copy log segment + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteCopyRequestRate().count()); + // Ensure aggregate metrics + assertEquals(0, brokerTopicStats.allTopicsStats().remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().count()); + + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + task.run(); + + // verify the remoteLogMetadataManager never add any metadata and remoteStorageManager never copy log segments + verify(remoteLogMetadataManager, never()).addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class)); + verify(remoteStorageManager, never()).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class)); + verify(remoteLogMetadataManager, never()).updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class)); + verify(mockLog, never()).updateHighestOffsetInRemoteStorage(anyLong()); + + // Verify the metric for remote write requests/failures was not updated. + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteCopyRequestRate().count()); + // Verify aggregate metrics + assertEquals(0, brokerTopicStats.allTopicsStats().remoteCopyRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteCopyRequestRate().count()); + } + + private void verifyRemoteLogSegmentMetadata(RemoteLogSegmentMetadata remoteLogSegmentMetadata, + long oldSegmentStartOffset, + long oldSegmentEndOffset, + Map expectedLeaderEpochs) { + assertEquals(leaderTopicIdPartition, remoteLogSegmentMetadata.remoteLogSegmentId().topicIdPartition()); + assertEquals(oldSegmentStartOffset, remoteLogSegmentMetadata.startOffset()); + assertEquals(oldSegmentEndOffset, remoteLogSegmentMetadata.endOffset()); + + NavigableMap leaderEpochs = remoteLogSegmentMetadata.segmentLeaderEpochs(); + assertEquals(expectedLeaderEpochs.size(), leaderEpochs.size()); + Iterator> leaderEpochEntries = expectedLeaderEpochs.entrySet().iterator(); + assertEquals(leaderEpochEntries.next(), leaderEpochs.firstEntry()); + assertEquals(leaderEpochEntries.next(), leaderEpochs.lastEntry()); + + assertEquals(brokerId, remoteLogSegmentMetadata.brokerId()); + assertEquals(RemoteLogSegmentState.COPY_SEGMENT_STARTED, remoteLogSegmentMetadata.state()); + } + + private void verifyRemoteLogSegmentMetadataUpdate(RemoteLogSegmentMetadataUpdate remoteLogSegmentMetadataUpdate) { + assertEquals(leaderTopicIdPartition, remoteLogSegmentMetadataUpdate.remoteLogSegmentId().topicIdPartition()); + assertEquals(brokerId, remoteLogSegmentMetadataUpdate.brokerId()); + + assertEquals(RemoteLogSegmentState.COPY_SEGMENT_FINISHED, remoteLogSegmentMetadataUpdate.state()); + } + + private void verifyLogSegmentData(LogSegmentData logSegmentData, + OffsetIndex idx, + TimeIndex timeIdx, + TransactionIndex txnIndex, + File tempFile, + File mockProducerSnapshotIndex, + List expectedLeaderEpoch) throws IOException { + assertEquals(idx.file().getAbsolutePath(), logSegmentData.offsetIndex().toAbsolutePath().toString()); + assertEquals(timeIdx.file().getAbsolutePath(), logSegmentData.timeIndex().toAbsolutePath().toString()); + assertEquals(txnIndex.file().getPath(), logSegmentData.transactionIndex().get().toAbsolutePath().toString()); + assertEquals(tempFile.getAbsolutePath(), logSegmentData.logSegment().toAbsolutePath().toString()); + assertEquals(mockProducerSnapshotIndex.getAbsolutePath(), logSegmentData.producerSnapshotIndex().toAbsolutePath().toString()); + + assertEquals(RemoteLogManager.epochEntriesAsByteBuffer(expectedLeaderEpoch), logSegmentData.leaderEpochIndex()); + } + + @Test + void testGetClassLoaderAwareRemoteStorageManager() throws Exception { + ClassLoaderAwareRemoteStorageManager rsmManager = mock(ClassLoaderAwareRemoteStorageManager.class); + try (RemoteLogManager remoteLogManager = + new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + t -> Optional.empty(), + (topicPartition, offset) -> { }, + brokerTopicStats, metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return rsmManager; + } + } + ) { + assertEquals(rsmManager, remoteLogManager.storageManager()); + } + } + + private void verifyInCache(TopicIdPartition... topicIdPartitions) { + Arrays.stream(topicIdPartitions).forEach(topicIdPartition -> + assertDoesNotThrow(() -> remoteLogManager.fetchRemoteLogSegmentMetadata(topicIdPartition.topicPartition(), 0, 0L)) + ); + } + + private void verifyNotInCache(TopicIdPartition... topicIdPartitions) { + Arrays.stream(topicIdPartitions).forEach(topicIdPartition -> + assertThrows(KafkaException.class, () -> + remoteLogManager.fetchRemoteLogSegmentMetadata(topicIdPartition.topicPartition(), 0, 0L)) + ); + } + + @Test + void testTopicIdCacheUpdates() throws RemoteStorageException { + remoteLogManager.startup(); + Partition mockLeaderPartition = mockPartition(leaderTopicIdPartition); + Partition mockFollowerPartition = mockPartition(followerTopicIdPartition); + + when(remoteLogMetadataManager.remoteLogSegmentMetadata(any(TopicIdPartition.class), anyInt(), anyLong())) + .thenReturn(Optional.empty()); + verifyNotInCache(followerTopicIdPartition, leaderTopicIdPartition); + // Load topicId cache + remoteLogManager.onLeadershipChange(Collections.singleton(mockLeaderPartition), Collections.singleton(mockFollowerPartition), topicIds); + verify(remoteLogMetadataManager, times(1)) + .onPartitionLeadershipChanges(Collections.singleton(leaderTopicIdPartition), Collections.singleton(followerTopicIdPartition)); + verifyInCache(followerTopicIdPartition, leaderTopicIdPartition); + + // Evicts from topicId cache + remoteLogManager.stopPartitions(Collections.singleton(new StopPartition(leaderTopicIdPartition.topicPartition(), true, true, true)), (tp, ex) -> { }); + verifyNotInCache(leaderTopicIdPartition); + verifyInCache(followerTopicIdPartition); + + // Evicts from topicId cache + remoteLogManager.stopPartitions(Collections.singleton(new StopPartition(followerTopicIdPartition.topicPartition(), true, true, true)), (tp, ex) -> { }); + verifyNotInCache(leaderTopicIdPartition, followerTopicIdPartition); + } + + @Test + void testFetchRemoteLogSegmentMetadata() throws RemoteStorageException { + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + remoteLogManager.fetchRemoteLogSegmentMetadata(leaderTopicIdPartition.topicPartition(), 10, 100L); + remoteLogManager.fetchRemoteLogSegmentMetadata(followerTopicIdPartition.topicPartition(), 20, 200L); + + verify(remoteLogMetadataManager) + .remoteLogSegmentMetadata(eq(leaderTopicIdPartition), anyInt(), anyLong()); + verify(remoteLogMetadataManager) + .remoteLogSegmentMetadata(eq(followerTopicIdPartition), anyInt(), anyLong()); + } + + @Test + public void testFetchNextSegmentWithTxnIndex() throws RemoteStorageException { + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + remoteLogManager.fetchNextSegmentWithTxnIndex(leaderTopicIdPartition.topicPartition(), 10, 100L); + remoteLogManager.fetchNextSegmentWithTxnIndex(followerTopicIdPartition.topicPartition(), 20, 200L); + + verify(remoteLogMetadataManager) + .nextSegmentWithTxnIndex(eq(leaderTopicIdPartition), anyInt(), anyLong()); + verify(remoteLogMetadataManager) + .nextSegmentWithTxnIndex(eq(followerTopicIdPartition), anyInt(), anyLong()); + } + + @Test + public void testFindNextSegmentWithTxnIndex() throws RemoteStorageException { + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) + .thenReturn(Optional.of(0L)); + when(remoteLogMetadataManager.nextSegmentWithTxnIndex(any(TopicIdPartition.class), anyInt(), anyLong())) + .thenAnswer(ans -> { + TopicIdPartition topicIdPartition = ans.getArgument(0); + int leaderEpoch = ans.getArgument(1); + long offset = ans.getArgument(2); + RemoteLogSegmentId segmentId = new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid()); + Map leaderEpochs = new TreeMap<>(); + leaderEpochs.put(leaderEpoch, offset); + RemoteLogSegmentMetadata metadata = new RemoteLogSegmentMetadata(segmentId, + offset, offset + 100, time.milliseconds(), 0, time.milliseconds(), 1024, leaderEpochs, true); + return Optional.of(metadata); + }); + + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + + // For offset-10, epoch is 0. + remoteLogManager.findNextSegmentWithTxnIndex(leaderTopicIdPartition.topicPartition(), 10, cache); + verify(remoteLogMetadataManager) + .nextSegmentWithTxnIndex(eq(leaderTopicIdPartition), eq(0), eq(10L)); + } + + @Test + public void testFindNextSegmentWithTxnIndexTraversesNextEpoch() throws RemoteStorageException { + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())) + .thenReturn(Optional.of(0L)); + when(remoteLogMetadataManager.nextSegmentWithTxnIndex(any(TopicIdPartition.class), anyInt(), anyLong())) + .thenAnswer(ans -> { + TopicIdPartition topicIdPartition = ans.getArgument(0); + int leaderEpoch = ans.getArgument(1); + long offset = ans.getArgument(2); + Optional metadataOpt = Optional.empty(); + if (leaderEpoch == 2) { + RemoteLogSegmentId segmentId = new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid()); + Map leaderEpochs = new TreeMap<>(); + leaderEpochs.put(leaderEpoch, offset); + RemoteLogSegmentMetadata metadata = new RemoteLogSegmentMetadata(segmentId, + offset, offset + 100, time.milliseconds(), 0, time.milliseconds(), 1024, leaderEpochs, true); + metadataOpt = Optional.of(metadata); + } + return metadataOpt; + }); + + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + + // For offset-10, epoch is 0. + // 1. For epoch 0 and 1, it returns empty and + // 2. For epoch 2, it returns the segment metadata. + remoteLogManager.findNextSegmentWithTxnIndex(leaderTopicIdPartition.topicPartition(), 10, cache); + verify(remoteLogMetadataManager) + .nextSegmentWithTxnIndex(eq(leaderTopicIdPartition), eq(0), eq(10L)); + verify(remoteLogMetadataManager) + .nextSegmentWithTxnIndex(eq(leaderTopicIdPartition), eq(1), eq(100L)); + verify(remoteLogMetadataManager) + .nextSegmentWithTxnIndex(eq(leaderTopicIdPartition), eq(2), eq(200L)); + } + + @Test + void testOnLeadershipChangeWillInvokeHandleLeaderOrFollowerPartitions() { + remoteLogManager.startup(); + RemoteLogManager spyRemoteLogManager = spy(remoteLogManager); + spyRemoteLogManager.onLeadershipChange( + Collections.emptySet(), Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + verify(spyRemoteLogManager).doHandleFollowerPartition(eq(followerTopicIdPartition)); + + Mockito.reset(spyRemoteLogManager); + + spyRemoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.emptySet(), topicIds); + verify(spyRemoteLogManager).doHandleLeaderPartition(eq(leaderTopicIdPartition), eq(false)); + } + + private MemoryRecords records(long timestamp, + long initialOffset, + int partitionLeaderEpoch) { + return MemoryRecords.withRecords(initialOffset, Compression.NONE, partitionLeaderEpoch, + new SimpleRecord(timestamp - 1, "first message".getBytes()), + new SimpleRecord(timestamp + 1, "second message".getBytes()), + new SimpleRecord(timestamp + 2, "third message".getBytes()) + ); + } + + @Test + void testFindOffsetByTimestamp() throws IOException, RemoteStorageException { + remoteLogManager.startup(); + TopicPartition tp = leaderTopicIdPartition.topicPartition(); + + long ts = time.milliseconds(); + long startOffset = 120; + int targetLeaderEpoch = 10; + + TreeMap validSegmentEpochs = new TreeMap<>(); + validSegmentEpochs.put(targetLeaderEpoch, startOffset); + + LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + leaderEpochFileCache.assign(4, 99L); + leaderEpochFileCache.assign(5, 99L); + leaderEpochFileCache.assign(targetLeaderEpoch, startOffset); + leaderEpochFileCache.assign(12, 500L); + + doTestFindOffsetByTimestamp(ts, startOffset, targetLeaderEpoch, validSegmentEpochs, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + + // Fetching message for timestamp `ts` will return the message with startOffset+1, and `ts+1` as there are no + // messages starting with the startOffset and with `ts`. + Optional maybeTimestampAndOffset1 = remoteLogManager.findOffsetByTimestamp(tp, ts, startOffset, leaderEpochFileCache); + assertEquals(Optional.of(new FileRecords.TimestampAndOffset(ts + 1, startOffset + 1, Optional.of(targetLeaderEpoch))), maybeTimestampAndOffset1); + + // Fetching message for `ts+2` will return the message with startOffset+2 and its timestamp value is `ts+2`. + Optional maybeTimestampAndOffset2 = remoteLogManager.findOffsetByTimestamp(tp, ts + 2, startOffset, leaderEpochFileCache); + assertEquals(Optional.of(new FileRecords.TimestampAndOffset(ts + 2, startOffset + 2, Optional.of(targetLeaderEpoch))), maybeTimestampAndOffset2); + + // Fetching message for `ts+3` will return None as there are no records with timestamp >= ts+3. + Optional maybeTimestampAndOffset3 = remoteLogManager.findOffsetByTimestamp(tp, ts + 3, startOffset, leaderEpochFileCache); + assertEquals(Optional.empty(), maybeTimestampAndOffset3); + } + + @Test + void testFindOffsetByTimestampWithInvalidEpochSegments() throws IOException, RemoteStorageException { + remoteLogManager.startup(); + TopicPartition tp = leaderTopicIdPartition.topicPartition(); + + long ts = time.milliseconds(); + long startOffset = 120; + int targetLeaderEpoch = 10; + + TreeMap validSegmentEpochs = new TreeMap<>(); + validSegmentEpochs.put(targetLeaderEpoch - 1, startOffset - 1); // invalid epochs not aligning with leader epoch cache + validSegmentEpochs.put(targetLeaderEpoch, startOffset); + + LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + leaderEpochFileCache.assign(4, 99L); + leaderEpochFileCache.assign(5, 99L); + leaderEpochFileCache.assign(targetLeaderEpoch, startOffset); + leaderEpochFileCache.assign(12, 500L); + + doTestFindOffsetByTimestamp(ts, startOffset, targetLeaderEpoch, validSegmentEpochs, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + + // Fetch offsets for this segment returns empty as the segment epochs are not with in the leader epoch cache. + Optional maybeTimestampAndOffset1 = remoteLogManager.findOffsetByTimestamp(tp, ts, startOffset, leaderEpochFileCache); + assertEquals(Optional.empty(), maybeTimestampAndOffset1); + + Optional maybeTimestampAndOffset2 = remoteLogManager.findOffsetByTimestamp(tp, ts + 2, startOffset, leaderEpochFileCache); + assertEquals(Optional.empty(), maybeTimestampAndOffset2); + + Optional maybeTimestampAndOffset3 = remoteLogManager.findOffsetByTimestamp(tp, ts + 3, startOffset, leaderEpochFileCache); + assertEquals(Optional.empty(), maybeTimestampAndOffset3); + } + + @Test + void testFindOffsetByTimestampWithSegmentNotReady() throws IOException, RemoteStorageException { + remoteLogManager.startup(); + TopicPartition tp = leaderTopicIdPartition.topicPartition(); + + long ts = time.milliseconds(); + long startOffset = 120; + int targetLeaderEpoch = 10; + + TreeMap validSegmentEpochs = new TreeMap<>(); + validSegmentEpochs.put(targetLeaderEpoch, startOffset); + + LeaderEpochFileCache leaderEpochFileCache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + leaderEpochFileCache.assign(4, 99L); + leaderEpochFileCache.assign(5, 99L); + leaderEpochFileCache.assign(targetLeaderEpoch, startOffset); + leaderEpochFileCache.assign(12, 500L); + + doTestFindOffsetByTimestamp(ts, startOffset, targetLeaderEpoch, validSegmentEpochs, RemoteLogSegmentState.COPY_SEGMENT_STARTED); + + Optional maybeTimestampAndOffset = remoteLogManager.findOffsetByTimestamp(tp, ts, startOffset, leaderEpochFileCache); + assertEquals(Optional.empty(), maybeTimestampAndOffset); + } + + private void doTestFindOffsetByTimestamp(long ts, long startOffset, int targetLeaderEpoch, + TreeMap validSegmentEpochs, + RemoteLogSegmentState state) throws IOException, RemoteStorageException { + TopicPartition tp = leaderTopicIdPartition.topicPartition(); + RemoteLogSegmentId remoteLogSegmentId = new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()); + + RemoteLogSegmentMetadata segmentMetadata = mock(RemoteLogSegmentMetadata.class); + when(segmentMetadata.remoteLogSegmentId()).thenReturn(remoteLogSegmentId); + when(segmentMetadata.maxTimestampMs()).thenReturn(ts + 2); + when(segmentMetadata.startOffset()).thenReturn(startOffset); + when(segmentMetadata.endOffset()).thenReturn(startOffset + 2); + when(segmentMetadata.segmentLeaderEpochs()).thenReturn(validSegmentEpochs); + when(segmentMetadata.state()).thenReturn(state); + + File tpDir = new File(logDir, tp.toString()); + Files.createDirectory(tpDir.toPath()); + File txnIdxFile = new File(tpDir, "txn-index" + UnifiedLog.TxnIndexFileSuffix()); + txnIdxFile.createNewFile(); + when(remoteStorageManager.fetchIndex(any(RemoteLogSegmentMetadata.class), any(IndexType.class))) + .thenAnswer(ans -> { + RemoteLogSegmentMetadata metadata = ans.getArgument(0); + IndexType indexType = ans.getArgument(1); + int maxEntries = (int) (metadata.endOffset() - metadata.startOffset()); + OffsetIndex offsetIdx = new OffsetIndex(new File(tpDir, metadata.startOffset() + UnifiedLog.IndexFileSuffix()), + metadata.startOffset(), maxEntries * 8); + TimeIndex timeIdx = new TimeIndex(new File(tpDir, metadata.startOffset() + UnifiedLog.TimeIndexFileSuffix()), + metadata.startOffset(), maxEntries * 12); + switch (indexType) { + case OFFSET: + return Files.newInputStream(offsetIdx.file().toPath()); + case TIMESTAMP: + return Files.newInputStream(timeIdx.file().toPath()); + case TRANSACTION: + return Files.newInputStream(txnIdxFile.toPath()); + } + return null; + }); + + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) + .thenAnswer(ans -> { + int leaderEpoch = ans.getArgument(1); + if (leaderEpoch == targetLeaderEpoch) + return Collections.singleton(segmentMetadata).iterator(); + else + return Collections.emptyIterator(); + }); + + // 3 messages are added with offset, and timestamp as below + // startOffset , ts-1 + // startOffset+1 , ts+1 + // startOffset+2 , ts+2 + when(remoteStorageManager.fetchLogSegment(segmentMetadata, 0)) + .thenAnswer(a -> new ByteArrayInputStream(records(ts, startOffset, targetLeaderEpoch).buffer().array())); + + when(mockLog.logEndOffset()).thenReturn(600L); + + remoteLogManager.onLeadershipChange(Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.emptySet(), topicIds); + } + + @Flaky("KAFKA-17779") + @Test + void testFetchOffsetByTimestampWithTieredStorageDoesNotFetchIndexWhenExistsLocally() throws Exception { + TopicPartition tp = new TopicPartition("sample", 0); + TopicIdPartition tpId = new TopicIdPartition(Uuid.randomUuid(), tp); + Map topicIds = Collections.singletonMap(tp.topic(), tpId.topicId()); + + List epochEntries = new ArrayList<>(); + epochEntries.add(new EpochEntry(0, 0L)); + epochEntries.add(new EpochEntry(1, 20L)); + epochEntries.add(new EpochEntry(3, 50L)); + epochEntries.add(new EpochEntry(4, 100L)); + epochEntries.add(new EpochEntry(5, 200L)); + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + long timestamp = time.milliseconds(); + RemoteLogSegmentMetadata metadata0 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(tpId, Uuid.randomUuid()), + 0, 99, timestamp, brokerId, timestamp, 1024, Optional.empty(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, truncateAndGetLeaderEpochs(epochEntries, 0L, 99L)); + RemoteLogSegmentMetadata metadata1 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(tpId, Uuid.randomUuid()), + 100, 199, timestamp + 1, brokerId, timestamp + 1, 1024, Optional.empty(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, truncateAndGetLeaderEpochs(epochEntries, 100L, 199L)); + // Note that the metadata2 is in COPY_SEGMENT_STARTED state + RemoteLogSegmentMetadata metadata2 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(tpId, Uuid.randomUuid()), + 100, 299, timestamp + 2, brokerId, timestamp + 2, 1024, Optional.empty(), RemoteLogSegmentState.COPY_SEGMENT_STARTED, truncateAndGetLeaderEpochs(epochEntries, 200L, 299L)); + + when(remoteLogMetadataManager.listRemoteLogSegments(eq(tpId), anyInt())) + .thenAnswer(ans -> { + int epoch = ans.getArgument(1); + if (epoch < 4) { + return Collections.singletonList(metadata0).iterator(); + } else if (epoch == 4) { + return Arrays.asList(metadata1, metadata2).iterator(); + } else { + throw new IllegalArgumentException("Unexpected call!"); + } + }); + // Different (timestamp, offset) is chosen for remote and local read result to assert the behaviour + // 9999 -> refers to read from local, 999 -> refers to read from remote + FileRecords.TimestampAndOffset expectedLocalResult = new FileRecords.TimestampAndOffset(timestamp + 9999, 9999, Optional.of(Integer.MAX_VALUE)); + FileRecords.TimestampAndOffset expectedRemoteResult = new FileRecords.TimestampAndOffset(timestamp + 999, 999, Optional.of(Integer.MAX_VALUE)); + Partition mockFollowerPartition = mockPartition(tpId); + + LogSegment logSegmentBaseOffset50 = mockLogSegment(50L, timestamp, null); + LogSegment logSegmentBaseOffset100 = mockLogSegment(100L, timestamp + 1, expectedLocalResult); + LogSegment logSegmentBaseOffset101 = mockLogSegment(101L, timestamp + 1, expectedLocalResult); + + // Constants representing the states of local log segments + final int twoSegmentsBaseOffsets50and100 = 0; + final int oneSegmentBaseOffset100 = 1; + final int oneSegmentBaseOffset101 = 2; + + AtomicInteger localLogOffsetState = new AtomicInteger(twoSegmentsBaseOffsets50and100); + + when(mockLog.logSegments()).thenAnswer(invocation -> { + if (localLogOffsetState.get() == twoSegmentsBaseOffsets50and100) { + return Arrays.asList(logSegmentBaseOffset50, logSegmentBaseOffset100); + } else if (localLogOffsetState.get() == oneSegmentBaseOffset100) { + return Collections.singletonList(logSegmentBaseOffset100); + } else if (localLogOffsetState.get() == oneSegmentBaseOffset101) { + return Collections.singletonList(logSegmentBaseOffset101); + } else { + throw new IllegalStateException("Unexpected localLogOffsetState"); + } + }); + + when(mockLog.logEndOffset()).thenReturn(300L); + remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + partition -> Optional.of(mockLog), + (topicPartition, offset) -> currentLogStartOffset.set(offset), + brokerTopicStats, metrics) { + @Override + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + @Override + Optional lookupTimestamp(RemoteLogSegmentMetadata rlsMetadata, long timestamp, long startingOffset) { + return Optional.of(expectedRemoteResult); + } + }; + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange(Collections.emptySet(), Collections.singleton(mockFollowerPartition), topicIds); + + // Read the offset from the remote storage, since the local-log starts from offset 50L and the message with `timestamp` does not exist in the local log + assertEquals(Optional.of(expectedRemoteResult), remoteLogManager.findOffsetByTimestamp(tp, timestamp, 0L, cache)); + // Short-circuits the read from the remote storage since the local-log starts from offset 50L and + // the message with (timestamp + 1) exists in the segment with base_offset: 100 which is available locally. + assertEquals(Optional.of(expectedLocalResult), remoteLogManager.findOffsetByTimestamp(tp, timestamp + 1, 0L, cache)); + + // Move the local-log start offset to 100L, still the read from the remote storage should be short-circuited + // as the message with (timestamp + 1) exists in the local log + localLogOffsetState.set(oneSegmentBaseOffset100); + assertEquals(Optional.of(expectedLocalResult), remoteLogManager.findOffsetByTimestamp(tp, timestamp + 1, 0L, cache)); + + // Move the local log start offset to 101L, now message with (timestamp + 1) does not exist in the local log and + // the indexes needs to be fetched from the remote storage + localLogOffsetState.set(oneSegmentBaseOffset101); + assertEquals(Optional.of(expectedRemoteResult), remoteLogManager.findOffsetByTimestamp(tp, timestamp + 1, 0L, cache)); + } + + private LogSegment mockLogSegment(long baseOffset, + long largestTimestamp, + FileRecords.TimestampAndOffset timestampAndOffset) throws IOException { + LogSegment logSegment = mock(LogSegment.class); + when(logSegment.baseOffset()).thenReturn(baseOffset); + when(logSegment.largestTimestamp()).thenReturn(largestTimestamp); + if (timestampAndOffset != null) { + when(logSegment.findOffsetByTimestamp(anyLong(), anyLong())) + .thenReturn(Optional.of(timestampAndOffset)); + } + return logSegment; + } + + @Test + void testIdempotentClose() throws IOException { + remoteLogManager.close(); + remoteLogManager.close(); + InOrder inorder = inOrder(remoteStorageManager, remoteLogMetadataManager); + inorder.verify(remoteStorageManager, times(1)).close(); + inorder.verify(remoteLogMetadataManager, times(1)).close(); + } + + @Test + public void testRemoveMetricsOnClose() throws IOException { + try (MockedConstruction mockMetricsGroupCtor = mockConstruction(KafkaMetricsGroup.class)) { + RemoteLogManager remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, + time, tp -> Optional.of(mockLog), (topicPartition, offset) -> { + }, brokerTopicStats, metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return remoteStorageManager; + } + + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }; + // Close RemoteLogManager so that metrics are removed + remoteLogManager.close(); + + KafkaMetricsGroup mockRlmMetricsGroup = mockMetricsGroupCtor.constructed().get(0); + KafkaMetricsGroup mockThreadPoolMetricsGroup = mockMetricsGroupCtor.constructed().get(1); + + List remoteLogManagerMetricNames = Arrays.asList( + REMOTE_LOG_MANAGER_TASKS_AVG_IDLE_PERCENT_METRIC, + REMOTE_LOG_READER_FETCH_RATE_AND_TIME_METRIC); + Set remoteStorageThreadPoolMetricNames = REMOTE_STORAGE_THREAD_POOL_METRICS; + + verify(mockRlmMetricsGroup, times(1)).newGauge(any(MetricName.class), any()); + verify(mockRlmMetricsGroup, times(1)).newTimer(any(MetricName.class), any(), any()); + // Verify that the RemoteLogManager metrics are removed + remoteLogManagerMetricNames.forEach(metricName -> verify(mockRlmMetricsGroup).removeMetric(metricName)); + + verify(mockThreadPoolMetricsGroup, times(remoteStorageThreadPoolMetricNames.size())).newGauge(anyString(), any()); + // Verify that the RemoteStorageThreadPool metrics are removed + remoteStorageThreadPoolMetricNames.forEach(metricName -> verify(mockThreadPoolMetricsGroup).removeMetric(metricName)); + + verifyNoMoreInteractions(mockRlmMetricsGroup); + verifyNoMoreInteractions(mockThreadPoolMetricsGroup); + } + } + + private static RemoteLogSegmentMetadata createRemoteLogSegmentMetadata(long startOffset, long endOffset, Map segmentEpochs) { + return new RemoteLogSegmentMetadata( + new RemoteLogSegmentId(new TopicIdPartition(Uuid.randomUuid(), + new TopicPartition("topic", 0)), Uuid.randomUuid()), + startOffset, endOffset, + 100000L, + 1, + 100000L, + 1000, + Optional.empty(), + RemoteLogSegmentState.COPY_SEGMENT_FINISHED, segmentEpochs); + } + + @Test + public void testBuildFilteredLeaderEpochMap() { + TreeMap leaderEpochToStartOffset = new TreeMap<>(); + leaderEpochToStartOffset.put(0, 0L); + leaderEpochToStartOffset.put(1, 0L); + leaderEpochToStartOffset.put(2, 0L); + leaderEpochToStartOffset.put(3, 30L); + leaderEpochToStartOffset.put(4, 40L); + leaderEpochToStartOffset.put(5, 60L); + leaderEpochToStartOffset.put(6, 60L); + leaderEpochToStartOffset.put(7, 70L); + leaderEpochToStartOffset.put(8, 70L); + + TreeMap expectedLeaderEpochs = new TreeMap<>(); + expectedLeaderEpochs.put(2, 0L); + expectedLeaderEpochs.put(3, 30L); + expectedLeaderEpochs.put(4, 40L); + expectedLeaderEpochs.put(6, 60L); + expectedLeaderEpochs.put(8, 70L); + + NavigableMap refinedLeaderEpochMap = RemoteLogManager.buildFilteredLeaderEpochMap(leaderEpochToStartOffset); + assertEquals(expectedLeaderEpochs, refinedLeaderEpochMap); + } + + @Test + public void testRemoteSegmentWithinLeaderEpochs() { + // Test whether a remote segment is within the leader epochs + final long logEndOffset = 90L; + + TreeMap leaderEpochToStartOffset = new TreeMap<>(); + leaderEpochToStartOffset.put(0, 0L); + leaderEpochToStartOffset.put(1, 10L); + leaderEpochToStartOffset.put(2, 20L); + leaderEpochToStartOffset.put(3, 30L); + leaderEpochToStartOffset.put(4, 40L); + leaderEpochToStartOffset.put(5, 50L); + leaderEpochToStartOffset.put(7, 70L); + + // Test whether a remote segment's epochs/offsets(multiple) are within the range of leader epochs + TreeMap segmentEpochs1 = new TreeMap<>(); + segmentEpochs1.put(1, 15L); + segmentEpochs1.put(2, 20L); + segmentEpochs1.put(3, 30L); + + assertTrue(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 15, + 35, + segmentEpochs1), logEndOffset, leaderEpochToStartOffset)); + + // Test whether a remote segment's epochs/offsets(single) are within the range of leader epochs + TreeMap segmentEpochs2 = new TreeMap<>(); + segmentEpochs2.put(1, 15L); + assertTrue(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 15, + 19, + segmentEpochs2), logEndOffset, leaderEpochToStartOffset)); + + // Test whether a remote segment's start offset is same as the offset of the respective leader epoch entry. + TreeMap segmentEpochs3 = new TreeMap<>(); + segmentEpochs3.put(0, 0L); // same as leader epoch's start offset + assertTrue(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 0, + 5, + segmentEpochs3), logEndOffset, leaderEpochToStartOffset)); + + // Test whether a remote segment's start offset is same as the offset of the respective leader epoch entry. + TreeMap segmentEpochs4 = new TreeMap<>(); + segmentEpochs4.put(7, 70L); // same as leader epoch's start offset + assertTrue(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 70, + 75, + segmentEpochs4), logEndOffset, leaderEpochToStartOffset)); + + // Test whether a remote segment's end offset is same as the end offset of the respective leader epoch entry. + TreeMap segmentEpochs5 = new TreeMap<>(); + segmentEpochs5.put(1, 15L); + segmentEpochs5.put(2, 20L); + + assertTrue(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 15, + 29, // same as end offset for epoch 2 in leaderEpochToStartOffset + segmentEpochs5), logEndOffset, leaderEpochToStartOffset)); + + // Test whether any of the epoch's is not with in the leader epoch chain. + TreeMap segmentEpochs6 = new TreeMap<>(); + segmentEpochs6.put(5, 55L); + segmentEpochs6.put(6, 60L); // epoch 6 exists here but it is missing in leaderEpochToStartOffset + segmentEpochs6.put(7, 70L); + + assertFalse(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 55, + 85, + segmentEpochs6), logEndOffset, leaderEpochToStartOffset)); + + // Test whether an epoch existing in remote segment does not exist in leader epoch chain. + TreeMap segmentEpochs7 = new TreeMap<>(); + segmentEpochs7.put(1, 15L); + segmentEpochs7.put(2, 20L); // epoch 3 is missing here which exists in leaderEpochToStartOffset + segmentEpochs7.put(4, 40L); + + assertFalse(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 15, + 45, + segmentEpochs7), logEndOffset, leaderEpochToStartOffset)); + + // Test a remote segment having larger end offset than the log end offset + assertFalse(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 15, + 95, // larger than log end offset + leaderEpochToStartOffset), logEndOffset, leaderEpochToStartOffset)); + + assertFalse(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 15, + 90, // equal to the log end offset + leaderEpochToStartOffset), logEndOffset, leaderEpochToStartOffset)); + + // Test whether a segment's first offset is earlier to the respective epoch's start offset + TreeMap segmentEpochs9 = new TreeMap<>(); + segmentEpochs9.put(1, 5L); + segmentEpochs9.put(2, 20L); + + assertFalse(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 5, // earlier to epoch 1's start offset + 25, + segmentEpochs9), logEndOffset, leaderEpochToStartOffset)); + + // Test whether a segment's last offset is more than the respective epoch's end offset + TreeMap segmentEpochs10 = new TreeMap<>(); + segmentEpochs10.put(1, 15L); + segmentEpochs10.put(2, 20L); + assertFalse(isRemoteSegmentWithinLeaderEpochs(createRemoteLogSegmentMetadata( + 15, + 35, // more than epoch 2's end offset + segmentEpochs10), logEndOffset, leaderEpochToStartOffset)); + } + + @Test + public void testRemoteSegmentWithinLeaderEpochsForOverlappingSegments() { + NavigableMap leaderEpochCache = new TreeMap<>(); + leaderEpochCache.put(7, 51L); + leaderEpochCache.put(9, 100L); + + TreeMap segment1Epochs = new TreeMap<>(); + segment1Epochs.put(5, 14L); + segment1Epochs.put(7, 15L); + segment1Epochs.put(9, 100L); + RemoteLogSegmentMetadata segment1 = createRemoteLogSegmentMetadata(14, 150, segment1Epochs); + assertTrue(isRemoteSegmentWithinLeaderEpochs(segment1, 210, leaderEpochCache)); + + // segment2Epochs are not within the leaderEpochCache + TreeMap segment2Epochs = new TreeMap<>(); + segment2Epochs.put(2, 5L); + segment2Epochs.put(3, 6L); + RemoteLogSegmentMetadata segment2 = createRemoteLogSegmentMetadata(2, 7, segment2Epochs); + assertFalse(isRemoteSegmentWithinLeaderEpochs(segment2, 210, leaderEpochCache)); + + // segment3Epochs are not within the leaderEpochCache + TreeMap segment3Epochs = new TreeMap<>(); + segment3Epochs.put(7, 15L); + segment3Epochs.put(9, 100L); + segment3Epochs.put(10, 200L); + RemoteLogSegmentMetadata segment3 = createRemoteLogSegmentMetadata(15, 250, segment3Epochs); + assertFalse(isRemoteSegmentWithinLeaderEpochs(segment3, 210, leaderEpochCache)); + + // segment4Epochs are not within the leaderEpochCache + TreeMap segment4Epochs = new TreeMap<>(); + segment4Epochs.put(8, 75L); + RemoteLogSegmentMetadata segment4 = createRemoteLogSegmentMetadata(75, 100, segment4Epochs); + assertFalse(isRemoteSegmentWithinLeaderEpochs(segment4, 210, leaderEpochCache)); + + // segment5Epochs does not match with the leaderEpochCache + TreeMap segment5Epochs = new TreeMap<>(); + segment5Epochs.put(7, 15L); + segment5Epochs.put(9, 101L); + RemoteLogSegmentMetadata segment5 = createRemoteLogSegmentMetadata(15, 150, segment5Epochs); + assertFalse(isRemoteSegmentWithinLeaderEpochs(segment5, 210, leaderEpochCache)); + + // segment6Epochs does not match with the leaderEpochCache + TreeMap segment6Epochs = new TreeMap<>(); + segment6Epochs.put(9, 99L); + RemoteLogSegmentMetadata segment6 = createRemoteLogSegmentMetadata(99, 150, segment6Epochs); + assertFalse(isRemoteSegmentWithinLeaderEpochs(segment6, 210, leaderEpochCache)); + } + + @Test + public void testCandidateLogSegmentsSkipsActiveSegment() { + UnifiedLog log = mock(UnifiedLog.class); + LogSegment segment1 = mock(LogSegment.class); + LogSegment segment2 = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(segment1.baseOffset()).thenReturn(5L); + when(segment2.baseOffset()).thenReturn(10L); + when(activeSegment.baseOffset()).thenReturn(15L); + + when(log.logSegments(5L, Long.MAX_VALUE)) + .thenReturn(CollectionConverters.asScala(Arrays.asList(segment1, segment2, activeSegment))); + + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + List expected = + Arrays.asList( + new RemoteLogManager.EnrichedLogSegment(segment1, 10L), + new RemoteLogManager.EnrichedLogSegment(segment2, 15L) + ); + List actual = task.candidateLogSegments(log, 5L, 20L); + assertEquals(expected, actual); + } + + @Test + public void testCandidateLogSegmentsSkipsSegmentsAfterLastStableOffset() { + UnifiedLog log = mock(UnifiedLog.class); + LogSegment segment1 = mock(LogSegment.class); + LogSegment segment2 = mock(LogSegment.class); + LogSegment segment3 = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + when(segment1.baseOffset()).thenReturn(5L); + when(segment2.baseOffset()).thenReturn(10L); + when(segment3.baseOffset()).thenReturn(15L); + when(activeSegment.baseOffset()).thenReturn(20L); + + when(log.logSegments(5L, Long.MAX_VALUE)) + .thenReturn(CollectionConverters.asScala(Arrays.asList(segment1, segment2, segment3, activeSegment))); + + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + List expected = + Arrays.asList( + new RemoteLogManager.EnrichedLogSegment(segment1, 10L), + new RemoteLogManager.EnrichedLogSegment(segment2, 15L) + ); + List actual = task.candidateLogSegments(log, 5L, 15L); + assertEquals(expected, actual); + } + + @Test + public void testRemoteSizeData() { + Supplier[] invalidRetentionSizeData = + new Supplier[]{ + () -> new RemoteLogManager.RetentionSizeData(10, 0), + () -> new RemoteLogManager.RetentionSizeData(10, -1), + () -> new RemoteLogManager.RetentionSizeData(-1, 10), + () -> new RemoteLogManager.RetentionSizeData(-1, -1), + () -> new RemoteLogManager.RetentionSizeData(-1, 0) + }; + + for (Supplier invalidRetentionSizeDataEntry : invalidRetentionSizeData) { + assertThrows(IllegalArgumentException.class, invalidRetentionSizeDataEntry::get); + } + } + + @Test + public void testRemoteSizeTime() { + Supplier[] invalidRetentionTimeData = + new Supplier[] { + () -> new RemoteLogManager.RetentionTimeData(-1, 10), + () -> new RemoteLogManager.RetentionTimeData(10, -1), + }; + + for (Supplier invalidRetentionTimeDataEntry : invalidRetentionTimeData) { + assertThrows(IllegalArgumentException.class, invalidRetentionTimeDataEntry::get); + } + } + + @Test + public void testStopPartitionsWithoutDeletion() throws RemoteStorageException { + remoteLogManager.startup(); + BiConsumer errorHandler = (topicPartition, throwable) -> fail("shouldn't be called"); + Set partitions = new HashSet<>(); + partitions.add(new StopPartition(leaderTopicIdPartition.topicPartition(), true, false, false)); + partitions.add(new StopPartition(followerTopicIdPartition.topicPartition(), true, false, false)); + remoteLogManager.onLeadershipChange(Collections.singleton(mockPartition(leaderTopicIdPartition)), + Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + assertNotNull(remoteLogManager.leaderCopyTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.followerTask(followerTopicIdPartition)); + + remoteLogManager.stopPartitions(partitions, errorHandler); + assertNull(remoteLogManager.leaderCopyTask(leaderTopicIdPartition)); + assertNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); + assertNull(remoteLogManager.followerTask(followerTopicIdPartition)); + verify(remoteLogMetadataManager, times(1)).onStopPartitions(any()); + verify(remoteStorageManager, times(0)).deleteLogSegmentData(any()); + verify(remoteLogMetadataManager, times(0)).updateRemoteLogSegmentMetadata(any()); + } + + @Test + public void testStopPartitionsWithDeletion() throws RemoteStorageException { + remoteLogManager.startup(); + BiConsumer errorHandler = + (topicPartition, ex) -> fail("shouldn't be called: " + ex); + Set partitions = new HashSet<>(); + partitions.add(new StopPartition(leaderTopicIdPartition.topicPartition(), true, true, true)); + partitions.add(new StopPartition(followerTopicIdPartition.topicPartition(), true, true, true)); + + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition))) + .thenAnswer(invocation -> listRemoteLogSegmentMetadata(leaderTopicIdPartition, 5, 100, 1024, RemoteLogSegmentState.DELETE_SEGMENT_FINISHED).iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(eq(followerTopicIdPartition))) + .thenAnswer(invocation -> listRemoteLogSegmentMetadata(followerTopicIdPartition, 3, 100, 1024, RemoteLogSegmentState.DELETE_SEGMENT_FINISHED).iterator()); + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any())) + .thenReturn(dummyFuture); + + remoteLogManager.onLeadershipChange(Collections.singleton(mockPartition(leaderTopicIdPartition)), + Collections.singleton(mockPartition(followerTopicIdPartition)), topicIds); + assertNotNull(remoteLogManager.leaderCopyTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.followerTask(followerTopicIdPartition)); + + remoteLogManager.stopPartitions(partitions, errorHandler); + assertNull(remoteLogManager.leaderCopyTask(leaderTopicIdPartition)); + assertNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); + assertNull(remoteLogManager.followerTask(followerTopicIdPartition)); + verify(remoteLogMetadataManager, times(1)).onStopPartitions(any()); + verify(remoteStorageManager, times(8)).deleteLogSegmentData(any()); + verify(remoteLogMetadataManager, times(16)).updateRemoteLogSegmentMetadata(any()); + } + + /** + * This test asserts that the newly elected leader for a partition is able to find the log-start-offset. + * Note that the case tested here is that the previous leader deleted the log segments up-to offset 500. And, the + * log-start-offset didn't propagate to the replicas before the leader-election. + */ + @Test + public void testFindLogStartOffset() throws RemoteStorageException, IOException { + List epochEntries = new ArrayList<>(); + epochEntries.add(new EpochEntry(0, 0L)); + epochEntries.add(new EpochEntry(1, 250L)); + epochEntries.add(new EpochEntry(2, 550L)); + checkpoint.write(epochEntries); + + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + long timestamp = time.milliseconds(); + int segmentSize = 1024; + List segmentMetadataList = Arrays.asList( + new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 500, 539, timestamp, brokerId, timestamp, segmentSize, truncateAndGetLeaderEpochs(epochEntries, 500L, 539L)), + new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 540, 700, timestamp, brokerId, timestamp, segmentSize, truncateAndGetLeaderEpochs(epochEntries, 540L, 700L)) + ); + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) + .thenAnswer(invocation -> { + int epoch = invocation.getArgument(1); + if (epoch == 1) + return segmentMetadataList.iterator(); + else + return Collections.emptyIterator(); + }); + try (RemoteLogManager remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> { }, + brokerTopicStats, metrics) { + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }) { + assertEquals(500L, remoteLogManager.findLogStartOffset(leaderTopicIdPartition, mockLog)); + } + } + + @Test + public void testFindLogStartOffsetFallbackToLocalLogStartOffsetWhenRemoteIsEmpty() throws RemoteStorageException, IOException { + List epochEntries = new ArrayList<>(); + epochEntries.add(new EpochEntry(1, 250L)); + epochEntries.add(new EpochEntry(2, 550L)); + checkpoint.write(epochEntries); + + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(mockLog.localLogStartOffset()).thenReturn(250L); + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) + .thenReturn(Collections.emptyIterator()); + + try (RemoteLogManager remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> { }, + brokerTopicStats, metrics) { + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }) { + assertEquals(250L, remoteLogManager.findLogStartOffset(leaderTopicIdPartition, mockLog)); + } + } + + @Test + public void testLogStartOffsetUpdatedOnStartup() throws RemoteStorageException, IOException, InterruptedException { + List epochEntries = new ArrayList<>(); + epochEntries.add(new EpochEntry(1, 250L)); + epochEntries.add(new EpochEntry(2, 550L)); + checkpoint.write(epochEntries); + + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + RemoteLogSegmentMetadata metadata = mock(RemoteLogSegmentMetadata.class); + when(metadata.startOffset()).thenReturn(600L); + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) + .thenAnswer(invocation -> { + int epoch = invocation.getArgument(1); + if (epoch == 2) + return Collections.singletonList(metadata).iterator(); + else + return Collections.emptyIterator(); + }); + + AtomicLong logStartOffset = new AtomicLong(0); + try (RemoteLogManager remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> logStartOffset.set(offset), + brokerTopicStats, metrics) { + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }) { + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + task.copyLogSegmentsToRemote(mockLog); + assertEquals(600L, logStartOffset.get()); + } + } + + @Test + public void testDeletionSkippedForSegmentsBeingCopied() throws RemoteStorageException, IOException, InterruptedException, ExecutionException { + RemoteLogMetadataManager remoteLogMetadataManager = new NoOpRemoteLogMetadataManager() { + List metadataList = new ArrayList<>(); + + @Override + public synchronized CompletableFuture addRemoteLogSegmentMetadata(RemoteLogSegmentMetadata remoteLogSegmentMetadata) { + metadataList.add(remoteLogSegmentMetadata); + return CompletableFuture.runAsync(() -> { }); + } + + @Override + public synchronized CompletableFuture updateRemoteLogSegmentMetadata(RemoteLogSegmentMetadataUpdate remoteLogSegmentMetadataUpdate) { + metadataList = metadataList.stream() + .map(m -> { + if (m.remoteLogSegmentId().equals(remoteLogSegmentMetadataUpdate.remoteLogSegmentId())) { + return m.createWithUpdates(remoteLogSegmentMetadataUpdate); + } + return m; + }) + .collect(Collectors.toList()); + return CompletableFuture.runAsync(() -> { }); + } + + @Override + public Optional highestOffsetForEpoch(TopicIdPartition topicIdPartition, int leaderEpoch) { + return Optional.of(-1L); + } + + @Override + public synchronized Iterator listRemoteLogSegments(TopicIdPartition topicIdPartition) { + return metadataList.iterator(); + } + + @Override + public synchronized Iterator listRemoteLogSegments(TopicIdPartition topicIdPartition, int leaderEpoch) { + return metadataList.iterator(); + } + }; + + remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> currentLogStartOffset.set(offset), + brokerTopicStats, metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return remoteStorageManager; + } + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + public RLMQuotaManager createRLMCopyQuotaManager() { + return rlmCopyQuotaManager; + } + public Duration quotaTimeout() { + return Duration.ofMillis(100); + } + @Override + long findLogStartOffset(TopicIdPartition topicIdPartition, UnifiedLog log) { + return 0L; + } + }; + + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + long lastStableOffset = 150L; + long logEndOffset = 150L; + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + + // leader epoch preparation + checkpoint.write(Collections.singletonList(epochEntry0)); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + + File tempFile = TestUtils.tempFile(); + FileRecords fileRecords = mock(FileRecords.class); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + + when(oldSegment.log()).thenReturn(fileRecords); + when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + + File mockProducerSnapshotIndex = TestUtils.tempFile(); + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + when(mockLog.lastStableOffset()).thenReturn(lastStableOffset); + when(mockLog.logEndOffset()).thenReturn(logEndOffset); + + File tempDir = TestUtils.tempDirectory(); + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.timeIndex()).thenReturn(timeIdx); + when(oldSegment.offsetIndex()).thenReturn(idx); + when(oldSegment.txnIndex()).thenReturn(txnIndex); + + CountDownLatch copyLogSegmentLatch = new CountDownLatch(1); + CountDownLatch copySegmentDataLatch = new CountDownLatch(1); + doAnswer(ans -> { + // unblock the expiration thread + copySegmentDataLatch.countDown(); + // introduce a delay in copying segment data + copyLogSegmentLatch.await(5000, TimeUnit.MILLISECONDS); + return Optional.empty(); + }).when(remoteStorageManager).copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class)); + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(quotaAvailableThrottleTime); + + // Set up expiration behaviour + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", 0L); + logProps.put("retention.ms", -1L); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + + RemoteLogManager.RLMCopyTask copyTask = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + Thread copyThread = new Thread(() -> { + try { + copyTask.copyLogSegmentsToRemote(mockLog); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + }); + + RemoteLogManager.RLMExpirationTask expirationTask = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + Thread expirationThread = new Thread(() -> { + try { + // wait until copy thread has started copying segment data + copySegmentDataLatch.await(); + expirationTask.cleanupExpiredRemoteLogSegments(); + copyLogSegmentLatch.countDown(); + } catch (RemoteStorageException | ExecutionException | InterruptedException e) { + throw new RuntimeException(e); + } + }); + + copyThread.start(); + expirationThread.start(); + + copyThread.join(10_000); + expirationThread.join(1_000); + + // Verify no segments were deleted + verify(remoteStorageManager, times(0)).deleteLogSegmentData(any(RemoteLogSegmentMetadata.class)); + + // Run expiration task again and verify the copied segment was deleted + RemoteLogSegmentMetadata remoteLogSegmentMetadata = remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition).next(); + expirationTask.cleanupExpiredRemoteLogSegments(); + verify(remoteStorageManager, times(1)).deleteLogSegmentData(remoteLogSegmentMetadata); + } + + @ParameterizedTest(name = "testDeletionOnRetentionBreachedSegments retentionSize={0} retentionMs={1}") + @CsvSource(value = {"0, -1", "-1, 0"}) + public void testDeletionOnRetentionBreachedSegments(long retentionSize, + long retentionMs) + throws RemoteStorageException, ExecutionException, InterruptedException { + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", retentionSize); + logProps.put("retention.ms", retentionMs); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + + List epochEntries = Collections.singletonList(epochEntry0); + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(200L); + + List metadataList = + listRemoteLogSegmentMetadata(leaderTopicIdPartition, 2, 100, 1024, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenAnswer(ans -> metadataList.iterator()); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenReturn(CompletableFuture.runAsync(() -> { })); + + // Verify the metrics for remote deletes and for failures is zero before attempt to delete segments + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteDeleteRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteDeleteRequestRate().count()); + // Verify aggregate metrics + assertEquals(0, brokerTopicStats.allTopicsStats().remoteDeleteRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteDeleteRequestRate().count()); + + + RemoteLogManager.RLMExpirationTask task = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + task.cleanupExpiredRemoteLogSegments(); + + assertEquals(200L, currentLogStartOffset.get()); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(0)); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(1)); + + // Verify the metric for remote delete is updated correctly + assertEquals(2, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteDeleteRequestRate().count()); + // Verify we did not report any failure for remote deletes + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteDeleteRequestRate().count()); + // Verify aggregate metrics + assertEquals(2, brokerTopicStats.allTopicsStats().remoteDeleteRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteDeleteRequestRate().count()); + } + + @ParameterizedTest(name = "testDeletionOnOverlappingRetentionBreachedSegments retentionSize={0} retentionMs={1}") + @CsvSource(value = {"0, -1", "-1, 0"}) + public void testDeletionOnOverlappingRetentionBreachedSegments(long retentionSize, + long retentionMs) + throws RemoteStorageException, ExecutionException, InterruptedException { + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", retentionSize); + logProps.put("retention.ms", retentionMs); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + + List epochEntries = Collections.singletonList(epochEntry0); + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(200L); + + RemoteLogSegmentMetadata metadata1 = listRemoteLogSegmentMetadata(leaderTopicIdPartition, 1, 100, 1024, + epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED) + .get(0); + // overlapping segment + RemoteLogSegmentMetadata metadata2 = new RemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + metadata1.startOffset(), metadata1.endOffset() + 5, metadata1.maxTimestampMs(), + metadata1.brokerId() + 1, metadata1.eventTimestampMs(), metadata1.segmentSizeInBytes() + 128, + metadata1.customMetadata(), metadata1.state(), metadata1.segmentLeaderEpochs()); + + // When there are overlapping/duplicate segments, the RemoteLogMetadataManager#listRemoteLogSegments + // returns the segments in order of (valid ++ unreferenced) segments: + // (eg) B0 uploaded segment S0 with offsets 0-100 and B1 uploaded segment S1 with offsets 0-200. + // We will mark the segment S0 as duplicate and add it to unreferencedSegmentIds. + // The order of segments returned by listRemoteLogSegments will be S1, S0. + // While computing the next-log-start-offset, taking the max of deleted segment's end-offset + 1. + List metadataList = new ArrayList<>(); + metadataList.add(metadata2); + metadataList.add(metadata1); + + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenAnswer(ans -> metadataList.iterator()); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenReturn(CompletableFuture.runAsync(() -> { })); + + // Verify the metrics for remote deletes and for failures is zero before attempt to delete segments + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteDeleteRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteDeleteRequestRate().count()); + // Verify aggregate metrics + assertEquals(0, brokerTopicStats.allTopicsStats().remoteDeleteRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteDeleteRequestRate().count()); + + RemoteLogManager.RLMExpirationTask task = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + task.cleanupExpiredRemoteLogSegments(); + + assertEquals(metadata2.endOffset() + 1, currentLogStartOffset.get()); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(0)); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(1)); + + // Verify the metric for remote delete is updated correctly + assertEquals(2, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteDeleteRequestRate().count()); + // Verify we did not report any failure for remote deletes + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteDeleteRequestRate().count()); + // Verify aggregate metrics + assertEquals(2, brokerTopicStats.allTopicsStats().remoteDeleteRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteDeleteRequestRate().count()); + } + + @ParameterizedTest(name = "testRemoteDeleteLagsOnRetentionBreachedSegments retentionSize={0} retentionMs={1}") + @CsvSource(value = {"0, -1", "-1, 0"}) + public void testRemoteDeleteLagsOnRetentionBreachedSegments(long retentionSize, + long retentionMs) + throws RemoteStorageException, ExecutionException, InterruptedException { + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", retentionSize); + logProps.put("retention.ms", retentionMs); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + + List epochEntries = Collections.singletonList(epochEntry0); + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(200L); + + List metadataList = + listRemoteLogSegmentMetadata(leaderTopicIdPartition, 2, 100, 1024, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenAnswer(ans -> metadataList.iterator()); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenReturn(CompletableFuture.runAsync(() -> { })); + + doAnswer(ans -> { + verifyRemoteDeleteMetrics(2048L, 2L); + return Optional.empty(); + }).doAnswer(ans -> { + verifyRemoteDeleteMetrics(1024L, 1L); + return Optional.empty(); + }).when(remoteStorageManager).deleteLogSegmentData(any(RemoteLogSegmentMetadata.class)); + + RemoteLogManager.RLMExpirationTask task = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + + verifyRemoteDeleteMetrics(0L, 0L); + + task.cleanupExpiredRemoteLogSegments(); + + assertEquals(200L, currentLogStartOffset.get()); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(0)); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(1)); + } + + @Test + public void testRemoteLogSizeRetentionShouldFilterOutCopySegmentStartState() + throws RemoteStorageException, ExecutionException, InterruptedException { + int segmentSize = 1024; + Map logProps = new HashMap<>(); + // set the retention.bytes to 10 segment size + logProps.put("retention.bytes", segmentSize * 10L); + logProps.put("retention.ms", -1L); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + + List epochEntries = Collections.singletonList(epochEntry0); + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(2000L); + + // creating remote log metadata list: + // s1. One segment with "COPY_SEGMENT_STARTED" state to simulate the segment was failing on copying to remote storage (dangling). + // it should be ignored for both remote log size calculation, but get deleted in the 1st run. + // s2. One segment with "DELETE_SEGMENT_FINISHED" state to simulate the remoteLogMetadataManager doesn't filter it out and returned. + // We should filter it out when calculating remote storage log size and deletion + // s3. One segment with "DELETE_SEGMENT_STARTED" state to simulate the segment was failing on deleting remote log (dangling). + // We should NOT count it when calculating remote storage log size and we should retry deletion. + // s4. Another segment with "COPY_SEGMENT_STARTED" state to simulate the segment is copying to remote storage. + // The segment state will change to "COPY_SEGMENT_FINISHED" state before checking deletion. + // In the 1st run, this segment should be skipped when calculating remote storage size. + // In the 2nd run, we should count it in when calculating remote storage size. + // s5. 11 segments with "COPY_SEGMENT_FINISHED" state. These are expected to be counted in when calculating remote storage log size + // + // Expected results (retention.size is 10240 (10 segments)): + // In the 1st run, the total remote storage size should be 1024 * 11 (s5) and 2 segments (s1, s3) will be deleted because they are dangling segments. + // Note: segments being copied are filtered out by the expiration logic, so s1 may be the result of an old failed copy cleanup where we weren't updating the state. + // In the 2nd run, the total remote storage size should be 1024 * 12 (s4, s5) and 2 segments (s4, s5[0]) will be deleted because of retention size breach. + RemoteLogSegmentMetadata s1 = createRemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 0, 99, segmentSize, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_STARTED); + RemoteLogSegmentMetadata s2 = createRemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 0, 99, segmentSize, epochEntries, RemoteLogSegmentState.DELETE_SEGMENT_FINISHED); + RemoteLogSegmentMetadata s3 = createRemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 0, 99, segmentSize, epochEntries, RemoteLogSegmentState.DELETE_SEGMENT_STARTED); + RemoteLogSegmentMetadata s4CopyStarted = createRemoteLogSegmentMetadata(new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()), + 200, 299, segmentSize, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_STARTED); + RemoteLogSegmentMetadata s4CopyFinished = createRemoteLogSegmentMetadata(s4CopyStarted.remoteLogSegmentId(), + s4CopyStarted.startOffset(), s4CopyStarted.endOffset(), segmentSize, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + List s5 = + listRemoteLogSegmentMetadata(leaderTopicIdPartition, 11, 100, 1024, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + + List metadataList = new LinkedList<>(); + metadataList.addAll(Arrays.asList(s1, s2, s3, s4CopyStarted)); + metadataList.addAll(s5); + + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenReturn(metadataList.iterator()).thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenReturn(CompletableFuture.runAsync(() -> { })); + doNothing().when(remoteStorageManager).deleteLogSegmentData(any(RemoteLogSegmentMetadata.class)); + + // RUN 1 + RemoteLogManager.RLMExpirationTask task = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + task.cleanupExpiredRemoteLogSegments(); + verify(remoteStorageManager, times(2)).deleteLogSegmentData(any(RemoteLogSegmentMetadata.class)); + verify(remoteStorageManager).deleteLogSegmentData(s1); + // make sure the s2 segment with "DELETE_SEGMENT_FINISHED" state is not invoking "deleteLogSegmentData" + verify(remoteStorageManager, never()).deleteLogSegmentData(s2); + verify(remoteStorageManager).deleteLogSegmentData(s3); + + clearInvocations(remoteStorageManager); + + // RUN 2 + // update the metadata list to remove deleted s1, s3, and set the state in s4 to COPY_SEGMENT_FINISHED + List updatedMetadataList = new LinkedList<>(); + updatedMetadataList.addAll(Arrays.asList(s2, s4CopyFinished)); + updatedMetadataList.addAll(s5); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(updatedMetadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenAnswer(ans -> updatedMetadataList.iterator()); + + doNothing().when(remoteStorageManager).deleteLogSegmentData(any(RemoteLogSegmentMetadata.class)); + task.cleanupExpiredRemoteLogSegments(); + + // make sure 2 segments got deleted + verify(remoteStorageManager, times(2)).deleteLogSegmentData(any(RemoteLogSegmentMetadata.class)); + verify(remoteStorageManager).deleteLogSegmentData(s4CopyFinished); + verify(remoteStorageManager).deleteLogSegmentData(s5.get(0)); + } + + @Test + public void testDeleteRetentionMsBeingCancelledBeforeSecondDelete() throws RemoteStorageException, ExecutionException, InterruptedException { + RemoteLogManager.RLMExpirationTask leaderTask = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(200L); + + List epochEntries = Collections.singletonList(epochEntry0); + + List metadataList = + listRemoteLogSegmentMetadata(leaderTopicIdPartition, 2, 100, 1024, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenAnswer(ans -> metadataList.iterator()); + + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", -1L); + logProps.put("retention.ms", 0L); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenAnswer(answer -> { + // cancel the task so that we don't delete the second segment + leaderTask.cancel(); + return CompletableFuture.runAsync(() -> { + }); + }); + + leaderTask.cleanupExpiredRemoteLogSegments(); + + assertEquals(200L, currentLogStartOffset.get()); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(0)); + verify(remoteStorageManager, never()).deleteLogSegmentData(metadataList.get(1)); + + // test that the 2nd log segment will be deleted by the new leader + RemoteLogManager.RLMExpirationTask newLeaderTask = remoteLogManager.new RLMExpirationTask(followerTopicIdPartition); + + Iterator firstIterator = metadataList.iterator(); + firstIterator.next(); + Iterator secondIterator = metadataList.iterator(); + secondIterator.next(); + Iterator thirdIterator = metadataList.iterator(); + thirdIterator.next(); + + when(remoteLogMetadataManager.listRemoteLogSegments(followerTopicIdPartition)) + .thenReturn(firstIterator); + when(remoteLogMetadataManager.listRemoteLogSegments(followerTopicIdPartition, 0)) + .thenReturn(secondIterator) + .thenReturn(thirdIterator); + + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenAnswer(answer -> CompletableFuture.runAsync(() -> { })); + + newLeaderTask.cleanupExpiredRemoteLogSegments(); + + assertEquals(200L, currentLogStartOffset.get()); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(0)); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(1)); + } + + @Test + public void testDeleteRetentionMsBiggerThanTimeMs() throws RemoteStorageException, ExecutionException, InterruptedException { + // add 1 month to the current time to avoid flaky test + LogConfig mockLogConfig = new LogConfig(Map.of("retention.ms", time.milliseconds() + 24 * 30 * 60 * 60 * 1000L)); + when(mockLog.config()).thenReturn(mockLogConfig); + + RemoteLogManager.RLMExpirationTask leaderTask = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(200L); + + List epochEntries = Collections.singletonList(epochEntry0); + + List metadataList = + listRemoteLogSegmentMetadata(leaderTopicIdPartition, 2, 100, 1024, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenAnswer(ans -> metadataList.iterator()); + + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + assertDoesNotThrow(leaderTask::cleanupExpiredRemoteLogSegments); + + verify(remoteStorageManager, never()).deleteLogSegmentData(any()); + } + + @ParameterizedTest(name = "testFailedDeleteExpiredSegments retentionSize={0} retentionMs={1}") + @CsvSource(value = {"0, -1", "-1, 0"}) + public void testFailedDeleteExpiredSegments(long retentionSize, + long retentionMs) throws RemoteStorageException, ExecutionException, InterruptedException { + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", retentionSize); + logProps.put("retention.ms", retentionMs); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + + List epochEntries = Collections.singletonList(epochEntry0); + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(200L); + + List metadataList = + listRemoteLogSegmentMetadata(leaderTopicIdPartition, 1, 100, 1024, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(metadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenAnswer(ans -> metadataList.iterator()); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenReturn(CompletableFuture.runAsync(() -> { })); + + // Verify the metrics for remote deletes and for failures is zero before attempt to delete segments + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteDeleteRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteDeleteRequestRate().count()); + // Verify aggregate metrics + assertEquals(0, brokerTopicStats.allTopicsStats().remoteDeleteRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteDeleteRequestRate().count()); + + RemoteLogManager.RLMExpirationTask task = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + doThrow(new RemoteStorageException("Failed to delete segment")).when(remoteStorageManager).deleteLogSegmentData(any()); + assertThrows(RemoteStorageException.class, task::cleanupExpiredRemoteLogSegments); + + assertEquals(100L, currentLogStartOffset.get()); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(0)); + + // Verify the metric for remote delete is updated correctly + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteDeleteRequestRate().count()); + // Verify we reported 1 failure for remote deletes + assertEquals(1, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).failedRemoteDeleteRequestRate().count()); + // Verify aggregate metrics + assertEquals(1, brokerTopicStats.allTopicsStats().remoteDeleteRequestRate().count()); + assertEquals(1, brokerTopicStats.allTopicsStats().failedRemoteDeleteRequestRate().count()); + + // make sure we'll retry the deletion in next run + doNothing().when(remoteStorageManager).deleteLogSegmentData(any()); + task.cleanupExpiredRemoteLogSegments(); + verify(remoteStorageManager).deleteLogSegmentData(metadataList.get(0)); + } + + @ParameterizedTest(name = "testDeleteLogSegmentDueToRetentionSizeBreach segmentCount={0} deletableSegmentCount={1}") + @CsvSource(value = {"50, 0", "50, 1", "50, 23", "50, 50"}) + public void testDeleteLogSegmentDueToRetentionSizeBreach(int segmentCount, + int deletableSegmentCount) + throws RemoteStorageException, ExecutionException, InterruptedException { + int recordsPerSegment = 100; + int segmentSize = 1024; + List epochEntries = Arrays.asList( + new EpochEntry(0, 0L), + new EpochEntry(1, 20L), + new EpochEntry(3, 50L), + new EpochEntry(4, 100L) + ); + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + int currentLeaderEpoch = epochEntries.get(epochEntries.size() - 1).epoch; + + long localLogSegmentsSize = 512L; + long retentionSize = ((long) segmentCount - deletableSegmentCount) * segmentSize + localLogSegmentsSize; + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", retentionSize); + logProps.put("retention.ms", -1L); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + when(mockLog.topicPartition()).thenReturn(tp); + + long localLogStartOffset = (long) segmentCount * recordsPerSegment; + long logEndOffset = ((long) segmentCount * recordsPerSegment) + 1; + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(mockLog.localLogStartOffset()).thenReturn(localLogStartOffset); + when(mockLog.logEndOffset()).thenReturn(logEndOffset); + when(mockLog.onlyLocalLogSegmentsSize()).thenReturn(localLogSegmentsSize); + + List segmentMetadataList = listRemoteLogSegmentMetadata( + leaderTopicIdPartition, segmentCount, recordsPerSegment, segmentSize, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + verifyDeleteLogSegment(segmentMetadataList, deletableSegmentCount, currentLeaderEpoch); + } + + @ParameterizedTest(name = "testDeleteLogSegmentDueToRetentionTimeBreach segmentCount={0} deletableSegmentCount={1}") + @CsvSource(value = {"50, 0", "50, 1", "50, 23", "50, 50"}) + public void testDeleteLogSegmentDueToRetentionTimeBreach(int segmentCount, + int deletableSegmentCount) + throws RemoteStorageException, ExecutionException, InterruptedException { + int recordsPerSegment = 100; + int segmentSize = 1024; + List epochEntries = Arrays.asList( + new EpochEntry(0, 0L), + new EpochEntry(1, 20L), + new EpochEntry(3, 50L), + new EpochEntry(4, 100L) + ); + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + int currentLeaderEpoch = epochEntries.get(epochEntries.size() - 1).epoch; + + long localLogSegmentsSize = 512L; + long retentionSize = -1L; + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", retentionSize); + logProps.put("retention.ms", 1L); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + when(mockLog.topicPartition()).thenReturn(tp); + + long localLogStartOffset = (long) segmentCount * recordsPerSegment; + long logEndOffset = ((long) segmentCount * recordsPerSegment) + 1; + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(mockLog.localLogStartOffset()).thenReturn(localLogStartOffset); + when(mockLog.logEndOffset()).thenReturn(logEndOffset); + when(mockLog.onlyLocalLogSegmentsSize()).thenReturn(localLogSegmentsSize); + + List segmentMetadataList = listRemoteLogSegmentMetadataByTime( + leaderTopicIdPartition, segmentCount, deletableSegmentCount, recordsPerSegment, segmentSize, epochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + verifyDeleteLogSegment(segmentMetadataList, deletableSegmentCount, currentLeaderEpoch); + } + + private void verifyRemoteDeleteMetrics(long remoteDeleteLagBytes, long remoteDeleteLagSegments) { + assertEquals(remoteDeleteLagBytes, safeLongYammerMetricValue("RemoteDeleteLagBytes"), + String.format("Expected to find %d for RemoteDeleteLagBytes metric value, but found %d", + remoteDeleteLagBytes, safeLongYammerMetricValue("RemoteDeleteLagBytes"))); + assertEquals(remoteDeleteLagSegments, safeLongYammerMetricValue("RemoteDeleteLagSegments"), + String.format("Expected to find %d for RemoteDeleteLagSegments metric value, but found %d", + remoteDeleteLagSegments, safeLongYammerMetricValue("RemoteDeleteLagSegments"))); + assertEquals(remoteDeleteLagBytes, safeLongYammerMetricValue("RemoteDeleteLagBytes,topic=" + leaderTopic), + String.format("Expected to find %d for RemoteDeleteLagBytes for 'Leader' topic metric value, but found %d", + remoteDeleteLagBytes, safeLongYammerMetricValue("RemoteDeleteLagBytes,topic=" + leaderTopic))); + assertEquals(remoteDeleteLagSegments, safeLongYammerMetricValue("RemoteDeleteLagSegments,topic=" + leaderTopic), + String.format("Expected to find %d for RemoteDeleteLagSegments for 'Leader' topic metric value, but found %d", + remoteDeleteLagSegments, safeLongYammerMetricValue("RemoteDeleteLagSegments,topic=" + leaderTopic))); + } + + private void verifyDeleteLogSegment(List segmentMetadataList, + int deletableSegmentCount, + int currentLeaderEpoch) + throws RemoteStorageException, ExecutionException, InterruptedException { + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(segmentMetadataList.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(eq(leaderTopicIdPartition), anyInt())) + .thenAnswer(invocation -> { + int leaderEpoch = invocation.getArgument(1); + return segmentMetadataList.stream() + .filter(segmentMetadata -> segmentMetadata.segmentLeaderEpochs().containsKey(leaderEpoch)) + .iterator(); + }); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenAnswer(answer -> CompletableFuture.runAsync(() -> { })); + RemoteLogManager.RLMExpirationTask task = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + task.cleanupExpiredRemoteLogSegments(); + + ArgumentCaptor deletedMetadataCapture = ArgumentCaptor.forClass(RemoteLogSegmentMetadata.class); + verify(remoteStorageManager, times(deletableSegmentCount)).deleteLogSegmentData(deletedMetadataCapture.capture()); + if (deletableSegmentCount > 0) { + List deletedMetadataList = deletedMetadataCapture.getAllValues(); + RemoteLogSegmentMetadata expectedEndMetadata = segmentMetadataList.get(deletableSegmentCount - 1); + assertEquals(segmentMetadataList.get(0), deletedMetadataList.get(0)); + assertEquals(expectedEndMetadata, deletedMetadataList.get(deletedMetadataList.size() - 1)); + assertEquals(currentLogStartOffset.get(), expectedEndMetadata.endOffset() + 1); + } + } + + + @Test + public void testDeleteRetentionMsOnExpiredSegment() throws RemoteStorageException, IOException { + AtomicLong logStartOffset = new AtomicLong(0); + try (RemoteLogManager remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> logStartOffset.set(offset), + brokerTopicStats, metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return remoteStorageManager; + } + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + }) { + RemoteLogManager.RLMExpirationTask task = remoteLogManager.new RLMExpirationTask(leaderTopicIdPartition); + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + when(mockLog.logEndOffset()).thenReturn(200L); + + List epochEntries = Collections.singletonList(epochEntry0); + + List remoteLogSegmentMetadatas = listRemoteLogSegmentMetadata( + leaderTopicIdPartition, 2, 100, 1024, epochEntries, RemoteLogSegmentState.DELETE_SEGMENT_FINISHED); + + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition)) + .thenReturn(remoteLogSegmentMetadatas.iterator()); + when(remoteLogMetadataManager.listRemoteLogSegments(leaderTopicIdPartition, 0)) + .thenReturn(remoteLogSegmentMetadatas.iterator()) + .thenReturn(remoteLogSegmentMetadatas.iterator()); + + checkpoint.write(epochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + Map logProps = new HashMap<>(); + logProps.put("retention.bytes", -1L); + logProps.put("retention.ms", 0L); + LogConfig mockLogConfig = new LogConfig(logProps); + when(mockLog.config()).thenReturn(mockLogConfig); + + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))) + .thenAnswer(answer -> CompletableFuture.runAsync(() -> { })); + + task.cleanupExpiredRemoteLogSegments(); + + verifyNoMoreInteractions(remoteStorageManager); + assertEquals(0L, logStartOffset.get()); + } catch (ExecutionException | InterruptedException e) { + throw new RuntimeException(e); + } + } + + private List listRemoteLogSegmentMetadata(TopicIdPartition topicIdPartition, + int segmentCount, + int recordsPerSegment, + int segmentSize, + RemoteLogSegmentState state) { + return listRemoteLogSegmentMetadata(topicIdPartition, segmentCount, recordsPerSegment, segmentSize, Collections.emptyList(), state); + } + + private List listRemoteLogSegmentMetadata(TopicIdPartition topicIdPartition, + int segmentCount, + int recordsPerSegment, + int segmentSize, + List epochEntries, + RemoteLogSegmentState state) { + return listRemoteLogSegmentMetadataByTime( + topicIdPartition, segmentCount, 0, recordsPerSegment, segmentSize, epochEntries, state); + } + + private List listRemoteLogSegmentMetadataByTime(TopicIdPartition topicIdPartition, + int segmentCount, + int deletableSegmentCount, + int recordsPerSegment, + int segmentSize, + List epochEntries, + RemoteLogSegmentState state) { + List segmentMetadataList = new ArrayList<>(); + for (int idx = 0; idx < segmentCount; idx++) { + long timestamp = time.milliseconds(); + if (idx < deletableSegmentCount) { + timestamp = time.milliseconds() - 1; + } + long startOffset = (long) idx * recordsPerSegment; + long endOffset = startOffset + recordsPerSegment - 1; + List localTotalEpochEntries = epochEntries.isEmpty() ? totalEpochEntries : epochEntries; + Map segmentLeaderEpochs = truncateAndGetLeaderEpochs(localTotalEpochEntries, startOffset, endOffset); + RemoteLogSegmentMetadata metadata = new RemoteLogSegmentMetadata( + new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid()), + startOffset, + endOffset, + timestamp, + brokerId, + timestamp, + segmentSize, + Optional.empty(), + state, + segmentLeaderEpochs + ); + segmentMetadataList.add(metadata); + } + return segmentMetadataList; + } + + private RemoteLogSegmentMetadata createRemoteLogSegmentMetadata(RemoteLogSegmentId segmentID, + long startOffset, + long endOffset, + int segmentSize, + List epochEntries, + RemoteLogSegmentState state) { + return new RemoteLogSegmentMetadata( + segmentID, + startOffset, + endOffset, + time.milliseconds(), + brokerId, + time.milliseconds(), + segmentSize, + Optional.empty(), + state, + truncateAndGetLeaderEpochs(epochEntries, startOffset, endOffset)); + } + + private Map truncateAndGetLeaderEpochs(List entries, + Long startOffset, + Long endOffset) { + LeaderEpochCheckpointFile myCheckpoint; + try { + myCheckpoint = new LeaderEpochCheckpointFile( + TestUtils.tempFile(), new LogDirFailureChannel(1)); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + myCheckpoint.write(entries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(null, myCheckpoint, scheduler); + cache.truncateFromStartAsyncFlush(startOffset); + cache.truncateFromEndAsyncFlush(endOffset); + return myCheckpoint.read().stream().collect(Collectors.toMap(e -> e.epoch, e -> e.startOffset)); + } + + @Test + public void testReadForMissingFirstBatchInRemote() throws RemoteStorageException, IOException { + FileInputStream fileInputStream = mock(FileInputStream.class); + ClassLoaderAwareRemoteStorageManager rsmManager = mock(ClassLoaderAwareRemoteStorageManager.class); + RemoteLogSegmentMetadata segmentMetadata = mock(RemoteLogSegmentMetadata.class); + LeaderEpochFileCache cache = mock(LeaderEpochFileCache.class); + when(cache.epochForOffset(anyLong())).thenReturn(OptionalInt.of(1)); + + when(remoteStorageManager.fetchLogSegment(any(RemoteLogSegmentMetadata.class), anyInt())) + .thenAnswer(a -> fileInputStream); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + int fetchOffset = 0; + int fetchMaxBytes = 10; + + FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData( + Uuid.randomUuid(), fetchOffset, 0, fetchMaxBytes, Optional.empty() + ); + + RemoteStorageFetchInfo fetchInfo = new RemoteStorageFetchInfo( + 0, false, tp, partitionData, FetchIsolation.TXN_COMMITTED, false + ); + + try (RemoteLogManager remoteLogManager = new RemoteLogManager( + config.remoteLogManagerConfig(), + brokerId, + logDir, + clusterId, + time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> { }, + brokerTopicStats, + metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return rsmManager; + } + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + + @Override + public Optional fetchRemoteLogSegmentMetadata(TopicPartition topicPartition, + int epochForOffset, long offset) { + return Optional.of(segmentMetadata); + } + + @Override + public Optional findNextSegmentMetadata(RemoteLogSegmentMetadata segmentMetadata, + LeaderEpochFileCache leaderEpochFileCacheOption) { + return Optional.empty(); + } + + @Override + int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, long offset) { + return 1; + } + + // This is the key scenario that we are testing here + @Override + EnrichedRecordBatch findFirstBatch(RemoteLogInputStream remoteLogInputStream, long offset) { + return new EnrichedRecordBatch(null, 0); + } + }) { + FetchDataInfo fetchDataInfo = remoteLogManager.read(fetchInfo); + assertEquals(fetchOffset, fetchDataInfo.fetchOffsetMetadata.messageOffset); + assertFalse(fetchDataInfo.firstEntryIncomplete); + assertEquals(MemoryRecords.EMPTY, fetchDataInfo.records); + // FetchIsolation is TXN_COMMITTED + assertTrue(fetchDataInfo.abortedTransactions.isPresent()); + assertTrue(fetchDataInfo.abortedTransactions.get().isEmpty()); + } + } + + @ParameterizedTest + @ValueSource(booleans = {true, false}) + public void testReadForFirstBatchMoreThanMaxFetchBytes(boolean minOneMessage) throws RemoteStorageException, IOException { + FileInputStream fileInputStream = mock(FileInputStream.class); + ClassLoaderAwareRemoteStorageManager rsmManager = mock(ClassLoaderAwareRemoteStorageManager.class); + RemoteLogSegmentMetadata segmentMetadata = mock(RemoteLogSegmentMetadata.class); + LeaderEpochFileCache cache = mock(LeaderEpochFileCache.class); + when(cache.epochForOffset(anyLong())).thenReturn(OptionalInt.of(1)); + + when(remoteStorageManager.fetchLogSegment(any(RemoteLogSegmentMetadata.class), anyInt())) + .thenAnswer(a -> fileInputStream); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + int fetchOffset = 0; + int fetchMaxBytes = 10; + int recordBatchSizeInBytes = fetchMaxBytes + 1; + RecordBatch firstBatch = mock(RecordBatch.class); + ArgumentCaptor capture = ArgumentCaptor.forClass(ByteBuffer.class); + + FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData( + Uuid.randomUuid(), fetchOffset, 0, fetchMaxBytes, Optional.empty() + ); + + RemoteStorageFetchInfo fetchInfo = new RemoteStorageFetchInfo( + 0, minOneMessage, tp, partitionData, FetchIsolation.HIGH_WATERMARK, false + ); + + try (RemoteLogManager remoteLogManager = new RemoteLogManager( + config.remoteLogManagerConfig(), + brokerId, + logDir, + clusterId, + time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> { }, + brokerTopicStats, + metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return rsmManager; + } + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + + public Optional fetchRemoteLogSegmentMetadata(TopicPartition topicPartition, + int epochForOffset, long offset) { + return Optional.of(segmentMetadata); + } + + int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, long offset) { + return 1; + } + + EnrichedRecordBatch findFirstBatch(RemoteLogInputStream remoteLogInputStream, long offset) { + when(firstBatch.sizeInBytes()).thenReturn(recordBatchSizeInBytes); + doNothing().when(firstBatch).writeTo(capture.capture()); + return new EnrichedRecordBatch(firstBatch, 0); + } + }) { + FetchDataInfo fetchDataInfo = remoteLogManager.read(fetchInfo); + // Common assertions + assertEquals(fetchOffset, fetchDataInfo.fetchOffsetMetadata.messageOffset); + assertFalse(fetchDataInfo.firstEntryIncomplete); + // FetchIsolation is HIGH_WATERMARK + assertEquals(Optional.empty(), fetchDataInfo.abortedTransactions); + + + if (minOneMessage) { + // Verify that the byte buffer has capacity equal to the size of the first batch + assertEquals(recordBatchSizeInBytes, capture.getValue().capacity()); + } else { + // Verify that the first batch is never written to the buffer + verify(firstBatch, never()).writeTo(any(ByteBuffer.class)); + assertEquals(MemoryRecords.EMPTY, fetchDataInfo.records); + } + } + } + + @Test + public void testReadForFirstBatchInLogCompaction() throws RemoteStorageException, IOException { + FileInputStream fileInputStream = mock(FileInputStream.class); + RemoteLogInputStream remoteLogInputStream = mock(RemoteLogInputStream.class); + ClassLoaderAwareRemoteStorageManager rsmManager = mock(ClassLoaderAwareRemoteStorageManager.class); + RemoteLogSegmentMetadata segmentMetadata = mock(RemoteLogSegmentMetadata.class); + LeaderEpochFileCache cache = mock(LeaderEpochFileCache.class); + when(cache.epochForOffset(anyLong())).thenReturn(OptionalInt.of(1)); + when(mockLog.leaderEpochCache()).thenReturn(cache); + + int fetchOffset = 0; + int fetchMaxBytes = 10; + int recordBatchSizeInBytes = fetchMaxBytes + 1; + RecordBatch firstBatch = mock(RecordBatch.class); + ArgumentCaptor capture = ArgumentCaptor.forClass(ByteBuffer.class); + + FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData( + Uuid.randomUuid(), fetchOffset, 0, fetchMaxBytes, Optional.empty() + ); + + when(rsmManager.fetchLogSegment(any(), anyInt())).thenReturn(fileInputStream); + when(segmentMetadata.topicIdPartition()).thenReturn(new TopicIdPartition(Uuid.randomUuid(), tp)); + // Fetching first time FirstBatch return null because of log compaction. + // Fetching second time FirstBatch return data. + when(remoteLogInputStream.nextBatch()).thenReturn(null, firstBatch); + // Return last offset greater than the requested offset. + when(firstBatch.lastOffset()).thenReturn(2L); + when(firstBatch.sizeInBytes()).thenReturn(recordBatchSizeInBytes); + doNothing().when(firstBatch).writeTo(capture.capture()); + RemoteStorageFetchInfo fetchInfo = new RemoteStorageFetchInfo( + 0, true, tp, partitionData, FetchIsolation.HIGH_WATERMARK, false + ); + + + try (RemoteLogManager remoteLogManager = new RemoteLogManager( + config.remoteLogManagerConfig(), + brokerId, + logDir, + clusterId, + time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> { + }, + brokerTopicStats, + metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return rsmManager; + } + + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + + public Optional fetchRemoteLogSegmentMetadata(TopicPartition topicPartition, + int epochForOffset, long offset) { + return Optional.of(segmentMetadata); + } + public RemoteLogInputStream getRemoteLogInputStream(InputStream in) { + return remoteLogInputStream; + } + + int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, long offset) { + return 1; + } + }) { + FetchDataInfo fetchDataInfo = remoteLogManager.read(fetchInfo); + // Common assertions + assertEquals(fetchOffset, fetchDataInfo.fetchOffsetMetadata.messageOffset); + assertFalse(fetchDataInfo.firstEntryIncomplete); + // FetchIsolation is HIGH_WATERMARK + assertEquals(Optional.empty(), fetchDataInfo.abortedTransactions); + // Verify that the byte buffer has capacity equal to the size of the first batch + assertEquals(recordBatchSizeInBytes, capture.getValue().capacity()); + + } + } + + @Test + public void testCopyQuotaManagerConfig() { + Properties defaultProps = new Properties(); + defaultProps.putAll(brokerConfig); + appendRLMConfig(defaultProps); + KafkaConfig defaultRlmConfig = KafkaConfig.fromProps(defaultProps); + RLMQuotaManagerConfig defaultConfig = RemoteLogManager.copyQuotaManagerConfig(defaultRlmConfig.remoteLogManagerConfig()); + assertEquals(DEFAULT_REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND, defaultConfig.quotaBytesPerSecond()); + assertEquals(DEFAULT_REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_NUM, defaultConfig.numQuotaSamples()); + assertEquals(DEFAULT_REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_SIZE_SECONDS, defaultConfig.quotaWindowSizeSeconds()); + + Properties customProps = new Properties(); + customProps.putAll(brokerConfig); + customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_MAX_BYTES_PER_SECOND_PROP, 100); + customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_NUM_PROP, 31); + customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPY_QUOTA_WINDOW_SIZE_SECONDS_PROP, 1); + appendRLMConfig(customProps); + KafkaConfig config = KafkaConfig.fromProps(customProps); + + RLMQuotaManagerConfig rlmCopyQuotaManagerConfig = RemoteLogManager.copyQuotaManagerConfig(config.remoteLogManagerConfig()); + assertEquals(100L, rlmCopyQuotaManagerConfig.quotaBytesPerSecond()); + assertEquals(31, rlmCopyQuotaManagerConfig.numQuotaSamples()); + assertEquals(1, rlmCopyQuotaManagerConfig.quotaWindowSizeSeconds()); + } + + @Test + public void testFetchQuotaManagerConfig() { + Properties defaultProps = new Properties(); + defaultProps.putAll(brokerConfig); + appendRLMConfig(defaultProps); + KafkaConfig defaultRlmConfig = KafkaConfig.fromProps(defaultProps); + + RLMQuotaManagerConfig defaultConfig = RemoteLogManager.fetchQuotaManagerConfig(defaultRlmConfig.remoteLogManagerConfig()); + assertEquals(DEFAULT_REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND, defaultConfig.quotaBytesPerSecond()); + assertEquals(DEFAULT_REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_NUM, defaultConfig.numQuotaSamples()); + assertEquals(DEFAULT_REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_SIZE_SECONDS, defaultConfig.quotaWindowSizeSeconds()); + + Properties customProps = new Properties(); + customProps.putAll(brokerConfig); + customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_MAX_BYTES_PER_SECOND_PROP, 100); + customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_NUM_PROP, 31); + customProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FETCH_QUOTA_WINDOW_SIZE_SECONDS_PROP, 1); + appendRLMConfig(customProps); + KafkaConfig rlmConfig = KafkaConfig.fromProps(customProps); + RLMQuotaManagerConfig rlmFetchQuotaManagerConfig = RemoteLogManager.fetchQuotaManagerConfig(rlmConfig.remoteLogManagerConfig()); + assertEquals(100L, rlmFetchQuotaManagerConfig.quotaBytesPerSecond()); + assertEquals(31, rlmFetchQuotaManagerConfig.numQuotaSamples()); + assertEquals(1, rlmFetchQuotaManagerConfig.quotaWindowSizeSeconds()); + } + + @Test + public void testEpochEntriesAsByteBuffer() throws Exception { + int expectedEpoch = 0; + long expectedStartOffset = 1L; + int expectedVersion = 0; + List epochs = Arrays.asList(new EpochEntry(expectedEpoch, expectedStartOffset)); + ByteBuffer buffer = RemoteLogManager.epochEntriesAsByteBuffer(epochs); + BufferedReader bufferedReader = new BufferedReader(new InputStreamReader(new ByteArrayInputStream(buffer.array()), StandardCharsets.UTF_8)); + + assertEquals(String.valueOf(expectedVersion), bufferedReader.readLine()); + assertEquals(String.valueOf(epochs.size()), bufferedReader.readLine()); + assertEquals(expectedEpoch + " " + expectedStartOffset, bufferedReader.readLine()); + } + + + @ParameterizedTest + @ValueSource(booleans = {false, true}) + public void testCopyQuota(boolean quotaExceeded) throws Exception { + RemoteLogManager.RLMCopyTask task = setupRLMTask(quotaExceeded); + + if (quotaExceeded) { + // Verify that the copy operation times out, since no segments can be copied due to quota being exceeded + assertThrows(AssertionFailedError.class, () -> assertTimeoutPreemptively(Duration.ofMillis(200), () -> task.copyLogSegmentsToRemote(mockLog))); + + Map allMetrics = metrics.metrics(); + KafkaMetric avgMetric = allMetrics.get(metrics.metricName("remote-copy-throttle-time-avg", "RemoteLogManager")); + KafkaMetric maxMetric = allMetrics.get(metrics.metricName("remote-copy-throttle-time-max", "RemoteLogManager")); + assertEquals(quotaExceededThrottleTime, ((Double) avgMetric.metricValue()).longValue()); + assertEquals(quotaExceededThrottleTime, ((Double) maxMetric.metricValue()).longValue()); + + // Verify the highest offset in remote storage is updated only once + ArgumentCaptor capture = ArgumentCaptor.forClass(Long.class); + verify(mockLog, times(1)).updateHighestOffsetInRemoteStorage(capture.capture()); + // Verify the highest offset in remote storage was -1L before the copy started + assertEquals(-1L, capture.getValue()); + } else { + // Verify the copy operation completes within the timeout, since it does not need to wait for quota availability + assertTimeoutPreemptively(Duration.ofMillis(100), () -> task.copyLogSegmentsToRemote(mockLog)); + + // Verify quota check was performed + verify(rlmCopyQuotaManager, times(1)).getThrottleTimeMs(); + // Verify bytes to copy was recorded with the quota manager + verify(rlmCopyQuotaManager, times(1)).record(10); + + Map allMetrics = metrics.metrics(); + KafkaMetric avgMetric = allMetrics.get(metrics.metricName("remote-copy-throttle-time-avg", "RemoteLogManager")); + KafkaMetric maxMetric = allMetrics.get(metrics.metricName("remote-copy-throttle-time-max", "RemoteLogManager")); + assertEquals(Double.NaN, avgMetric.metricValue()); + assertEquals(Double.NaN, maxMetric.metricValue()); + + // Verify the highest offset in remote storage is updated + ArgumentCaptor capture = ArgumentCaptor.forClass(Long.class); + verify(mockLog, times(2)).updateHighestOffsetInRemoteStorage(capture.capture()); + List capturedValues = capture.getAllValues(); + // Verify the highest offset in remote storage was -1L before the copy + assertEquals(-1L, capturedValues.get(0).longValue()); + // Verify it was updated to 149L after the copy + assertEquals(149L, capturedValues.get(1).longValue()); + } + } + + @Test + public void testRLMShutdownDuringQuotaExceededScenario() throws Exception { + remoteLogManager.startup(); + setupRLMTask(true); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.emptySet(), topicIds); + // Ensure the copy operation is waiting for quota to be available + TestUtils.waitForCondition(() -> { + verify(rlmCopyQuotaManager, atLeast(1)).getThrottleTimeMs(); + return true; + }, "Quota exceeded check did not happen"); + // Verify RLM is able to shut down + assertTimeoutPreemptively(Duration.ofMillis(100), () -> remoteLogManager.close()); + } + + // helper method to set up a RemoteLogManager.RLMTask for testing copy quota behaviour + private RemoteLogManager.RLMCopyTask setupRLMTask(boolean quotaExceeded) throws RemoteStorageException, IOException { + long oldSegmentStartOffset = 0L; + long nextSegmentStartOffset = 150L; + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(mockLog.parentDir()).thenReturn("dir1"); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); + + // create 2 log segments, with 0 and 150 as log start offset + LogSegment oldSegment = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + File tempFile = TestUtils.tempFile(); + FileRecords fileRecords = mock(FileRecords.class); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + + // Set up the segment that is eligible for copy + when(oldSegment.log()).thenReturn(fileRecords); + when(oldSegment.baseOffset()).thenReturn(oldSegmentStartOffset); + when(oldSegment.readNextOffset()).thenReturn(nextSegmentStartOffset); + + // set up the active segment + when(activeSegment.baseOffset()).thenReturn(nextSegmentStartOffset); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(oldSegment, activeSegment))); + + File mockProducerSnapshotIndex = TestUtils.tempFile(); + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockLog.lastStableOffset()).thenReturn(250L); + + File tempDir = TestUtils.tempDirectory(); + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldSegmentStartOffset, ""), oldSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldSegmentStartOffset, txnFile); + when(oldSegment.timeIndex()).thenReturn(timeIdx); + when(oldSegment.offsetIndex()).thenReturn(idx); + when(oldSegment.txnIndex()).thenReturn(txnIndex); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture); + when(remoteStorageManager.copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class))).thenReturn(Optional.empty()); + + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(quotaExceeded ? 1000L : 0L); + doNothing().when(rlmCopyQuotaManager).record(anyInt()); + + return remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + } + + @Test + public void testCopyThrottling() throws Exception { + long oldestSegmentStartOffset = 0L; + + when(mockLog.topicPartition()).thenReturn(leaderTopicIdPartition.topicPartition()); + + // leader epoch preparation + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(leaderTopicIdPartition.topicPartition(), checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.highestOffsetForEpoch(any(TopicIdPartition.class), anyInt())).thenReturn(Optional.of(0L)); + + // create 3 log segments + LogSegment segmentToCopy = mock(LogSegment.class); + LogSegment segmentToThrottle = mock(LogSegment.class); + LogSegment activeSegment = mock(LogSegment.class); + + File tempFile = TestUtils.tempFile(); + FileRecords fileRecords = mock(FileRecords.class); + when(fileRecords.file()).thenReturn(tempFile); + when(fileRecords.sizeInBytes()).thenReturn(10); + + // set up the segment that will be copied + when(segmentToCopy.log()).thenReturn(fileRecords); + when(segmentToCopy.baseOffset()).thenReturn(oldestSegmentStartOffset); + when(segmentToCopy.readNextOffset()).thenReturn(100L); + + // set up the segment that will not be copied because of hitting quota + when(segmentToThrottle.log()).thenReturn(fileRecords); + when(segmentToThrottle.baseOffset()).thenReturn(100L); + when(segmentToThrottle.readNextOffset()).thenReturn(150L); + + // set up the active segment + when(activeSegment.log()).thenReturn(fileRecords); + when(activeSegment.baseOffset()).thenReturn(150L); + + when(mockLog.activeSegment()).thenReturn(activeSegment); + when(mockLog.logStartOffset()).thenReturn(oldestSegmentStartOffset); + when(mockLog.logSegments(anyLong(), anyLong())).thenReturn(CollectionConverters.asScala(Arrays.asList(segmentToCopy, segmentToThrottle, activeSegment))); + + File mockProducerSnapshotIndex = TestUtils.tempFile(); + ProducerStateManager mockStateManager = mock(ProducerStateManager.class); + when(mockStateManager.fetchSnapshot(anyLong())).thenReturn(Optional.of(mockProducerSnapshotIndex)); + + when(mockLog.producerStateManager()).thenReturn(mockStateManager); + when(mockLog.lastStableOffset()).thenReturn(250L); + + File tempDir = TestUtils.tempDirectory(); + OffsetIndex idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(tempDir, oldestSegmentStartOffset, ""), oldestSegmentStartOffset, 1000).get(); + TimeIndex timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(tempDir, oldestSegmentStartOffset, ""), oldestSegmentStartOffset, 1500).get(); + File txnFile = UnifiedLog.transactionIndexFile(tempDir, oldestSegmentStartOffset, ""); + txnFile.createNewFile(); + TransactionIndex txnIndex = new TransactionIndex(oldestSegmentStartOffset, txnFile); + when(segmentToCopy.timeIndex()).thenReturn(timeIdx); + when(segmentToCopy.offsetIndex()).thenReturn(idx); + when(segmentToCopy.txnIndex()).thenReturn(txnIndex); + + CompletableFuture dummyFuture = new CompletableFuture<>(); + dummyFuture.complete(null); + when(remoteLogMetadataManager.addRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadata.class))).thenReturn(dummyFuture); + when(remoteLogMetadataManager.updateRemoteLogSegmentMetadata(any(RemoteLogSegmentMetadataUpdate.class))).thenReturn(dummyFuture); + when(remoteStorageManager.copyLogSegmentData(any(RemoteLogSegmentMetadata.class), any(LogSegmentData.class))).thenReturn(Optional.empty()); + + // After the first call, getThrottleTimeMs should return non-zero throttle time + when(rlmCopyQuotaManager.getThrottleTimeMs()).thenReturn(0L, 1000L); + doNothing().when(rlmCopyQuotaManager).record(anyInt()); + + RemoteLogManager.RLMCopyTask task = remoteLogManager.new RLMCopyTask(leaderTopicIdPartition, 128); + + // Verify that the copy operation times out, since the second segment cannot be copied due to quota being exceeded + assertThrows(AssertionFailedError.class, () -> assertTimeoutPreemptively(Duration.ofMillis(200), () -> task.copyLogSegmentsToRemote(mockLog))); + + // Verify the highest offset in remote storage is updated corresponding to the only segment that was copied + ArgumentCaptor capture = ArgumentCaptor.forClass(Long.class); + verify(mockLog, times(2)).updateHighestOffsetInRemoteStorage(capture.capture()); + List capturedValues = capture.getAllValues(); + // Verify the highest offset in remote storage was -1L before the copy + assertEquals(-1L, capturedValues.get(0).longValue()); + // Verify it was updated to 99L after the copy + assertEquals(99L, capturedValues.get(1).longValue()); + } + + @Test + public void testTierLagResetsToZeroOnBecomingFollower() { + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.emptySet(), topicIds); + RemoteLogManager.RLMCopyTask rlmTask = (RemoteLogManager.RLMCopyTask) remoteLogManager.rlmCopyTask(leaderTopicIdPartition); + assertNotNull(rlmTask); + rlmTask.recordLagStats(1024, 2); + assertEquals(1024, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyLagBytes()); + assertEquals(2, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyLagSegments()); + // The same node becomes follower now which was the previous leader + remoteLogManager.onLeadershipChange(Collections.emptySet(), + Collections.singleton(mockPartition(leaderTopicIdPartition)), topicIds); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyLagBytes()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyLagSegments()); + + // If the old task emits the tier-lag stats, then it should be discarded + rlmTask.recordLagStats(2048, 4); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyLagBytes()); + assertEquals(0, brokerTopicStats.topicStats(leaderTopicIdPartition.topic()).remoteCopyLagSegments()); + } + + @Test + public void testRemoteReadFetchDataInfo() throws RemoteStorageException, IOException { + checkpoint.write(totalEpochEntries); + LeaderEpochFileCache cache = new LeaderEpochFileCache(tp, checkpoint, scheduler); + when(mockLog.leaderEpochCache()).thenReturn(cache); + when(remoteLogMetadataManager.remoteLogSegmentMetadata(eq(leaderTopicIdPartition), anyInt(), anyLong())) + .thenAnswer(ans -> { + long offset = ans.getArgument(2); + RemoteLogSegmentId segmentId = new RemoteLogSegmentId(leaderTopicIdPartition, Uuid.randomUuid()); + RemoteLogSegmentMetadata segmentMetadata = createRemoteLogSegmentMetadata(segmentId, + offset - 10, offset + 99, 1024, totalEpochEntries, RemoteLogSegmentState.COPY_SEGMENT_FINISHED); + return Optional.of(segmentMetadata); + }); + + File segmentFile = tempFile(); + appendRecordsToFile(segmentFile, 100, 3); + FileInputStream fileInputStream = new FileInputStream(segmentFile); + when(remoteStorageManager.fetchLogSegment(any(RemoteLogSegmentMetadata.class), anyInt())) + .thenReturn(fileInputStream); + + RemoteLogManager remoteLogManager = new RemoteLogManager(config.remoteLogManagerConfig(), brokerId, logDir, clusterId, time, + tp -> Optional.of(mockLog), + (topicPartition, offset) -> currentLogStartOffset.set(offset), + brokerTopicStats, metrics) { + public RemoteStorageManager createRemoteStorageManager() { + return remoteStorageManager; + } + public RemoteLogMetadataManager createRemoteLogMetadataManager() { + return remoteLogMetadataManager; + } + int lookupPositionForOffset(RemoteLogSegmentMetadata remoteLogSegmentMetadata, long offset) { + return 0; + } + }; + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), Collections.emptySet(), topicIds); + + long fetchOffset = 10; + FetchRequest.PartitionData partitionData = new FetchRequest.PartitionData( + Uuid.randomUuid(), fetchOffset, 0, 100, Optional.empty()); + RemoteStorageFetchInfo remoteStorageFetchInfo = new RemoteStorageFetchInfo( + 1048576, true, leaderTopicIdPartition.topicPartition(), + partitionData, FetchIsolation.HIGH_WATERMARK, false); + FetchDataInfo fetchDataInfo = remoteLogManager.read(remoteStorageFetchInfo); + // firstBatch baseOffset may not be equal to the fetchOffset + assertEquals(9, fetchDataInfo.fetchOffsetMetadata.messageOffset); + assertEquals(273, fetchDataInfo.fetchOffsetMetadata.relativePositionInSegment); + } + + @Test + public void testRLMOpsWhenMetadataIsNotReady() throws InterruptedException { + CountDownLatch latch = new CountDownLatch(2); + when(remoteLogMetadataManager.isReady(any(TopicIdPartition.class))) + .thenAnswer(ans -> { + latch.countDown(); + return false; + }); + remoteLogManager.startup(); + remoteLogManager.onLeadershipChange( + Collections.singleton(mockPartition(leaderTopicIdPartition)), + Collections.singleton(mockPartition(followerTopicIdPartition)), + topicIds + ); + assertNotNull(remoteLogManager.rlmCopyTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.leaderExpirationTask(leaderTopicIdPartition)); + assertNotNull(remoteLogManager.followerTask(followerTopicIdPartition)); + + // Once the partitions are assigned to the broker either as leader (or) follower in RLM#onLeadershipChange, + // then it should have called the `isReady` method for each of the partitions. Otherwise, the test will fail. + latch.await(5, TimeUnit.SECONDS); + verify(remoteLogMetadataManager).configure(anyMap()); + verify(remoteLogMetadataManager).onPartitionLeadershipChanges(anySet(), anySet()); + verify(remoteLogMetadataManager, atLeastOnce()).isReady(eq(leaderTopicIdPartition)); + verify(remoteLogMetadataManager, atLeastOnce()).isReady(eq(followerTopicIdPartition)); + verifyNoMoreInteractions(remoteLogMetadataManager); + verify(remoteStorageManager).configure(anyMap()); + verifyNoMoreInteractions(remoteStorageManager); + } + + private void appendRecordsToFile(File file, int nRecords, int nRecordsPerBatch) throws IOException { + byte magic = RecordBatch.CURRENT_MAGIC_VALUE; + Compression compression = Compression.NONE; + long offset = 0; + List records = new ArrayList<>(); + try (FileRecords fileRecords = FileRecords.open(file)) { + for (long counter = 1; counter < nRecords + 1; counter++) { + records.add(new SimpleRecord("foo".getBytes())); + if (counter % nRecordsPerBatch == 0) { + fileRecords.append(MemoryRecords.withRecords(magic, offset, compression, CREATE_TIME, + records.toArray(new SimpleRecord[0]))); + offset += records.size(); + records.clear(); + } + } + fileRecords.flush(); + } + } + + private Partition mockPartition(TopicIdPartition topicIdPartition) { + TopicPartition tp = topicIdPartition.topicPartition(); + Partition partition = mock(Partition.class); + UnifiedLog log = mock(UnifiedLog.class); + when(partition.topicPartition()).thenReturn(tp); + when(partition.topic()).thenReturn(tp.topic()); + when(log.remoteLogEnabled()).thenReturn(true); + when(partition.log()).thenReturn(Option.apply(log)); + when(log.config()).thenReturn(new LogConfig(new Properties())); + return partition; + } + + private void appendRLMConfig(Properties props) { + props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, true); + props.put(RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP, NoOpRemoteStorageManager.class.getName()); + props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, NoOpRemoteLogMetadataManager.class.getName()); + props.put(DEFAULT_REMOTE_STORAGE_MANAGER_CONFIG_PREFIX + remoteLogStorageTestProp, remoteLogStorageTestVal); + // adding configs with "remote log metadata manager config prefix" + props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + REMOTE_LOG_METADATA_TOPIC_PARTITIONS_PROP, remoteLogMetadataTopicPartitionsNum); + props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + remoteLogMetadataTestProp, remoteLogMetadataTestVal); + props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + remoteLogMetadataCommonClientTestProp, remoteLogMetadataCommonClientTestVal); + props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + remoteLogMetadataConsumerTestProp, remoteLogMetadataConsumerTestVal); + props.put(DEFAULT_REMOTE_LOG_METADATA_MANAGER_CONFIG_PREFIX + remoteLogMetadataProducerTestProp, remoteLogMetadataProducerTestVal); + } + +} diff --git a/core/src/test/java/kafka/log/remote/RemoteLogOffsetReaderTest.java b/core/src/test/java/kafka/log/remote/RemoteLogOffsetReaderTest.java new file mode 100644 index 0000000000000..9737ed72a9bc3 --- /dev/null +++ b/core/src/test/java/kafka/log/remote/RemoteLogOffsetReaderTest.java @@ -0,0 +1,198 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.log.remote; + +import kafka.utils.TestUtils; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.AbstractConfig; +import org.apache.kafka.common.metrics.Metrics; +import org.apache.kafka.common.utils.Utils; +import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig; +import org.apache.kafka.server.log.remote.storage.RemoteStorageException; +import org.apache.kafka.server.util.MockTime; +import org.apache.kafka.storage.internals.checkpoint.LeaderEpochCheckpointFile; +import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache; +import org.apache.kafka.storage.internals.log.AsyncOffsetReadFutureHolder; +import org.apache.kafka.storage.internals.log.LogDirFailureChannel; +import org.apache.kafka.storage.internals.log.OffsetResultHolder; +import org.apache.kafka.storage.log.metrics.BrokerTopicStats; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.List; +import java.util.Optional; +import java.util.Properties; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.ReadWriteLock; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +import scala.Option; + +import static org.apache.kafka.common.record.FileRecords.TimestampAndOffset; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class RemoteLogOffsetReaderTest { + + private final MockTime time = new MockTime(); + private final TopicPartition topicPartition = new TopicPartition("test", 0); + private Path logDir; + private LeaderEpochFileCache cache; + private MockRemoteLogManager rlm; + + @BeforeEach + void setUp() throws IOException { + logDir = Files.createTempDirectory("kafka-test"); + LeaderEpochCheckpointFile checkpoint = new LeaderEpochCheckpointFile(TestUtils.tempFile(), new LogDirFailureChannel(1)); + cache = new LeaderEpochFileCache(topicPartition, checkpoint, time.scheduler); + rlm = new MockRemoteLogManager(2, 1, logDir.toString()); + } + + @AfterEach + void tearDown() throws IOException { + rlm.close(); + Utils.delete(logDir.toFile()); + } + + @Test + public void testReadRemoteLog() throws Exception { + AsyncOffsetReadFutureHolder asyncOffsetReadFutureHolder = + rlm.asyncOffsetRead(topicPartition, time.milliseconds(), 0L, cache, Option::empty); + asyncOffsetReadFutureHolder.taskFuture().get(1, TimeUnit.SECONDS); + assertTrue(asyncOffsetReadFutureHolder.taskFuture().isDone()); + + OffsetResultHolder.FileRecordsOrError result = asyncOffsetReadFutureHolder.taskFuture().get(); + assertFalse(result.hasException()); + assertTrue(result.hasTimestampAndOffset()); + assertEquals(new TimestampAndOffset(100L, 90L, Optional.of(3)), + result.timestampAndOffset().get()); + } + + @Test + public void testTaskQueueFullAndCancelTask() throws Exception { + rlm.pause(); + + List> holderList = new ArrayList<>(); + // Task queue size is 1 and number of threads is 2, so it can accept at-most 3 items + for (int i = 0; i < 3; i++) { + holderList.add(rlm.asyncOffsetRead(topicPartition, time.milliseconds(), 0L, cache, Option::empty)); + } + assertThrows(TimeoutException.class, () -> holderList.get(0).taskFuture().get(10, TimeUnit.MILLISECONDS)); + assertEquals(0, holderList.stream().filter(h -> h.taskFuture().isDone()).count()); + + assertThrows(RejectedExecutionException.class, () -> + holderList.add(rlm.asyncOffsetRead(topicPartition, time.milliseconds(), 0L, cache, Option::empty))); + + holderList.get(2).jobFuture().cancel(false); + + rlm.resume(); + for (AsyncOffsetReadFutureHolder holder : holderList) { + if (!holder.jobFuture().isCancelled()) { + holder.taskFuture().get(1, TimeUnit.SECONDS); + } + } + assertEquals(3, holderList.size()); + assertEquals(2, holderList.stream().filter(h -> h.taskFuture().isDone()).count()); + assertEquals(1, holderList.stream().filter(h -> !h.taskFuture().isDone()).count()); + } + + @Test + public void testThrowErrorOnFindOffsetByTimestamp() throws Exception { + RemoteStorageException exception = new RemoteStorageException("Error"); + try (RemoteLogManager rlm = new MockRemoteLogManager(2, 1, logDir.toString()) { + @Override + public Optional findOffsetByTimestamp(TopicPartition tp, + long timestamp, + long startingOffset, + LeaderEpochFileCache leaderEpochCache) throws RemoteStorageException { + throw exception; + } + }) { + AsyncOffsetReadFutureHolder futureHolder + = rlm.asyncOffsetRead(topicPartition, time.milliseconds(), 0L, cache, Option::empty); + futureHolder.taskFuture().get(1, TimeUnit.SECONDS); + + assertTrue(futureHolder.taskFuture().isDone()); + assertTrue(futureHolder.taskFuture().get().hasException()); + assertEquals(exception, futureHolder.taskFuture().get().exception().get()); + } + } + + private static class MockRemoteLogManager extends RemoteLogManager { + private final ReadWriteLock lock = new ReentrantReadWriteLock(); + + public MockRemoteLogManager(int threads, + int taskQueueSize, + String logDir) throws IOException { + super(rlmConfig(threads, taskQueueSize), + 1, + logDir, + "mock-cluster-id", + new MockTime(), + tp -> Optional.empty(), + (tp, logStartOffset) -> { }, + new BrokerTopicStats(true), + new Metrics() + ); + } + + @Override + public Optional findOffsetByTimestamp(TopicPartition tp, + long timestamp, + long startingOffset, + LeaderEpochFileCache leaderEpochCache) throws RemoteStorageException { + lock.readLock().lock(); + try { + return Optional.of(new TimestampAndOffset(100, 90, Optional.of(3))); + } finally { + lock.readLock().unlock(); + } + } + + void pause() { + lock.writeLock().lock(); + } + + void resume() { + lock.writeLock().unlock(); + } + } + + private static RemoteLogManagerConfig rlmConfig(int threads, int taskQueueSize) { + Properties props = new Properties(); + props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true"); + props.put(RemoteLogManagerConfig.REMOTE_STORAGE_MANAGER_CLASS_NAME_PROP, + "org.apache.kafka.server.log.remote.storage.NoOpRemoteStorageManager"); + props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, + "org.apache.kafka.server.log.remote.storage.NoOpRemoteLogMetadataManager"); + props.put(RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP, threads); + props.put(RemoteLogManagerConfig.REMOTE_LOG_READER_MAX_PENDING_TASKS_PROP, taskQueueSize); + AbstractConfig config = new AbstractConfig(RemoteLogManagerConfig.configDef(), props, false); + return new RemoteLogManagerConfig(config); + } +} \ No newline at end of file diff --git a/core/src/test/java/kafka/log/remote/RemoteLogReaderTest.java b/core/src/test/java/kafka/log/remote/RemoteLogReaderTest.java new file mode 100644 index 0000000000000..400cf3c2dff2c --- /dev/null +++ b/core/src/test/java/kafka/log/remote/RemoteLogReaderTest.java @@ -0,0 +1,132 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.log.remote; + +import kafka.utils.TestUtils; + +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.record.Records; +import org.apache.kafka.server.log.remote.quota.RLMQuotaManager; +import org.apache.kafka.server.log.remote.storage.RemoteStorageException; +import org.apache.kafka.storage.internals.log.FetchDataInfo; +import org.apache.kafka.storage.internals.log.LogOffsetMetadata; +import org.apache.kafka.storage.internals.log.RemoteLogReadResult; +import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; +import org.apache.kafka.storage.log.metrics.BrokerTopicStats; + +import com.yammer.metrics.core.Timer; + +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.util.concurrent.Callable; +import java.util.function.Consumer; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class RemoteLogReaderTest { + public static final String TOPIC = "test"; + RemoteLogManager mockRLM = mock(RemoteLogManager.class); + BrokerTopicStats brokerTopicStats = null; + RLMQuotaManager mockQuotaManager = mock(RLMQuotaManager.class); + LogOffsetMetadata logOffsetMetadata = new LogOffsetMetadata(100); + Records records = mock(Records.class); + Timer timer = mock(Timer.class); + + @BeforeEach + public void setUp() throws Exception { + TestUtils.clearYammerMetrics(); + brokerTopicStats = new BrokerTopicStats(true); + when(timer.time(any(Callable.class))).thenAnswer(ans -> ans.getArgument(0, Callable.class).call()); + } + + @Test + public void testRemoteLogReaderWithoutError() throws RemoteStorageException, IOException { + FetchDataInfo fetchDataInfo = new FetchDataInfo(logOffsetMetadata, records); + when(records.sizeInBytes()).thenReturn(100); + when(mockRLM.read(any(RemoteStorageFetchInfo.class))).thenReturn(fetchDataInfo); + + Consumer callback = mock(Consumer.class); + RemoteStorageFetchInfo remoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, new TopicPartition(TOPIC, 0), null, null, false); + RemoteLogReader remoteLogReader = + new RemoteLogReader(remoteStorageFetchInfo, mockRLM, callback, brokerTopicStats, mockQuotaManager, timer); + remoteLogReader.call(); + + // verify the callback did get invoked with the expected remoteLogReadResult + ArgumentCaptor remoteLogReadResultArg = ArgumentCaptor.forClass(RemoteLogReadResult.class); + verify(callback, times(1)).accept(remoteLogReadResultArg.capture()); + RemoteLogReadResult actualRemoteLogReadResult = remoteLogReadResultArg.getValue(); + assertFalse(actualRemoteLogReadResult.error.isPresent()); + assertTrue(actualRemoteLogReadResult.fetchDataInfo.isPresent()); + assertEquals(fetchDataInfo, actualRemoteLogReadResult.fetchDataInfo.get()); + + // verify the record method on quota manager was called with the expected value + ArgumentCaptor recordedArg = ArgumentCaptor.forClass(Double.class); + verify(mockQuotaManager, times(1)).record(recordedArg.capture()); + assertEquals(100, recordedArg.getValue()); + + // Verify metrics for remote reads are updated correctly + assertEquals(1, brokerTopicStats.topicStats(TOPIC).remoteFetchRequestRate().count()); + assertEquals(100, brokerTopicStats.topicStats(TOPIC).remoteFetchBytesRate().count()); + assertEquals(0, brokerTopicStats.topicStats(TOPIC).failedRemoteFetchRequestRate().count()); + // Verify aggregate metrics + assertEquals(1, brokerTopicStats.allTopicsStats().remoteFetchRequestRate().count()); + assertEquals(100, brokerTopicStats.allTopicsStats().remoteFetchBytesRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().failedRemoteFetchRequestRate().count()); + } + + @Test + public void testRemoteLogReaderWithError() throws RemoteStorageException, IOException { + when(mockRLM.read(any(RemoteStorageFetchInfo.class))).thenThrow(new RuntimeException("error")); + + Consumer callback = mock(Consumer.class); + RemoteStorageFetchInfo remoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, new TopicPartition(TOPIC, 0), null, null, false); + RemoteLogReader remoteLogReader = + new RemoteLogReader(remoteStorageFetchInfo, mockRLM, callback, brokerTopicStats, mockQuotaManager, timer); + remoteLogReader.call(); + + // verify the callback did get invoked with the expected remoteLogReadResult + ArgumentCaptor remoteLogReadResultArg = ArgumentCaptor.forClass(RemoteLogReadResult.class); + verify(callback, times(1)).accept(remoteLogReadResultArg.capture()); + RemoteLogReadResult actualRemoteLogReadResult = remoteLogReadResultArg.getValue(); + assertTrue(actualRemoteLogReadResult.error.isPresent()); + assertFalse(actualRemoteLogReadResult.fetchDataInfo.isPresent()); + + // verify the record method on quota manager was called with the expected value + ArgumentCaptor recordedArg = ArgumentCaptor.forClass(Double.class); + verify(mockQuotaManager, times(1)).record(recordedArg.capture()); + assertEquals(0, recordedArg.getValue()); + + // Verify metrics for remote reads are updated correctly + assertEquals(1, brokerTopicStats.topicStats(TOPIC).remoteFetchRequestRate().count()); + assertEquals(0, brokerTopicStats.topicStats(TOPIC).remoteFetchBytesRate().count()); + assertEquals(1, brokerTopicStats.topicStats(TOPIC).failedRemoteFetchRequestRate().count()); + // Verify aggregate metrics + assertEquals(1, brokerTopicStats.allTopicsStats().remoteFetchRequestRate().count()); + assertEquals(0, brokerTopicStats.allTopicsStats().remoteFetchBytesRate().count()); + assertEquals(1, brokerTopicStats.allTopicsStats().failedRemoteFetchRequestRate().count()); + } +} diff --git a/core/src/test/java/kafka/security/JaasModule.java b/core/src/test/java/kafka/security/JaasModule.java index 2930affd27b23..b4901cc7933f5 100644 --- a/core/src/test/java/kafka/security/JaasModule.java +++ b/core/src/test/java/kafka/security/JaasModule.java @@ -16,6 +16,7 @@ */ package kafka.security; +import java.util.Collections; import java.util.HashMap; import java.util.Map; import java.util.Optional; @@ -60,6 +61,10 @@ public static JaasModule oAuthBearerLoginModule(String username, boolean debug) ); } + public static JaasModule plainLoginModule(String username, String password) { + return plainLoginModule(username, password, false, Collections.emptyMap()); + } + public static JaasModule plainLoginModule(String username, String password, boolean debug, Map validUsers) { String name = "org.apache.kafka.common.security.plain.PlainLoginModule"; @@ -76,7 +81,7 @@ public static JaasModule plainLoginModule(String username, String password, bool } public static JaasModule scramLoginModule(String username, String password) { - return scramLoginModule(username, password, false, Map.of()); + return scramLoginModule(username, password, false, Collections.emptyMap()); } public static JaasModule scramLoginModule(String username, String password, boolean debug, Map tokenProps) { diff --git a/core/src/test/java/kafka/security/JaasTestUtils.java b/core/src/test/java/kafka/security/JaasTestUtils.java index 2e81dbe3502df..201a43313d6c5 100644 --- a/core/src/test/java/kafka/security/JaasTestUtils.java +++ b/core/src/test/java/kafka/security/JaasTestUtils.java @@ -31,6 +31,7 @@ import java.io.FileWriter; import java.io.IOException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -245,7 +246,7 @@ public static JaasSection kafkaClientSection(Optional mechanism, Optiona KAFKA_SCRAM_PASSWORD_2, KAFKA_OAUTH_BEARER_USER_2, SERVICE_NAME) - ).map(List::of).orElse(List.of())); + ).map(Collections::singletonList).orElse(Collections.emptyList())); } private static void writeToFile(File file, List jaasSections) throws IOException { @@ -255,17 +256,23 @@ private static void writeToFile(File file, List jaasSections) throw } public static boolean usesSslTransportLayer(SecurityProtocol securityProtocol) { - return switch (securityProtocol) { - case SSL, SASL_SSL -> true; - default -> false; - }; + switch (securityProtocol) { + case SSL: + case SASL_SSL: + return true; + default: + return false; + } } public static boolean usesSaslAuthentication(SecurityProtocol securityProtocol) { - return switch (securityProtocol) { - case SASL_PLAINTEXT, SASL_SSL -> true; - default -> false; - }; + switch (securityProtocol) { + case SASL_PLAINTEXT: + case SASL_SSL: + return true; + default: + return false; + } } public static Properties sslConfigs(ConnectionMode mode, diff --git a/core/src/test/java/kafka/security/minikdc/MiniKdc.java b/core/src/test/java/kafka/security/minikdc/MiniKdc.java index b612543771d81..8c2e90f11a6ae 100644 --- a/core/src/test/java/kafka/security/minikdc/MiniKdc.java +++ b/core/src/test/java/kafka/security/minikdc/MiniKdc.java @@ -64,6 +64,7 @@ import java.nio.charset.StandardCharsets; import java.nio.file.Files; import java.text.MessageFormat; +import java.util.Arrays; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -73,6 +74,7 @@ import java.util.Properties; import java.util.Set; import java.util.UUID; +import java.util.stream.Collectors; /** * Mini KDC based on Apache Directory Server that can be embedded in tests or used from command line as a standalone @@ -136,8 +138,8 @@ public class MiniKdc { * MiniKdc. */ public MiniKdc(Properties config, File workDir) { - Set requiredProperties = Set.of(ORG_NAME, ORG_DOMAIN, KDC_BIND_ADDRESS, KDC_PORT, - INSTANCE, TRANSPORT, MAX_TICKET_LIFETIME, MAX_RENEWABLE_LIFETIME); + Set requiredProperties = new HashSet<>(Arrays.asList(ORG_NAME, ORG_DOMAIN, KDC_BIND_ADDRESS, KDC_PORT, + INSTANCE, TRANSPORT, MAX_TICKET_LIFETIME, MAX_RENEWABLE_LIFETIME)); if (!config.keySet().containsAll(requiredProperties)) { throw new IllegalArgumentException("Missing required properties: " + requiredProperties); } @@ -179,7 +181,7 @@ public static void main(String[] args) throws Exception { config.putAll(userConfig); File keytabFile = new File(keytabPath).getAbsoluteFile(); - start(workDir, config, keytabFile, List.of(principals)); + start(workDir, config, keytabFile, Arrays.asList(principals)); } /** @@ -302,7 +304,7 @@ public void createPrincipal(File keytabFile, List principals) throws IOE byte keyVersion = (byte) encryptionKey.getKeyVersion(); return new KeytabEntry(principalWithRealm, 1, timestamp, keyVersion, encryptionKey); }); - }).toList(); + }).collect(Collectors.toList()); keytab.setEntries(entries); keytab.write(keytabFile); } @@ -403,11 +405,16 @@ private void initKdcServer() throws IOException, LdapInvalidDnException { // transport AbstractTransport absTransport; String transport = config.getProperty(TRANSPORT).trim(); - absTransport = switch (transport) { - case "TCP" -> new TcpTransport(bindAddress, port, 3, 50); - case "UDP" -> new UdpTransport(port); - default -> throw new IllegalArgumentException("Invalid transport: " + transport); - }; + switch (transport) { + case "TCP": + absTransport = new TcpTransport(bindAddress, port, 3, 50); + break; + case "UDP": + absTransport = new UdpTransport(port); + break; + default: + throw new IllegalArgumentException("Invalid transport: " + transport); + } kdc.addTransports(absTransport); kdc.setServiceName(config.getProperty(INSTANCE)); kdc.start(); @@ -436,7 +443,7 @@ private void writeKrb5Conf() throws IOException { reader.lines().forEach(line -> stringBuilder.append(line).append("{3}")); } String output = MessageFormat.format(stringBuilder.toString(), realm, host, String.valueOf(port), System.lineSeparator()); - Files.writeString(krb5conf.toPath(), output); + Files.write(krb5conf.toPath(), output.getBytes(StandardCharsets.UTF_8)); } private void refreshJvmKerberosConfig() throws ClassNotFoundException, NoSuchMethodException, InvocationTargetException, IllegalAccessException { diff --git a/core/src/test/java/kafka/security/minikdc/MiniKdcTest.java b/core/src/test/java/kafka/security/minikdc/MiniKdcTest.java index a696c6051199c..181d81980b187 100644 --- a/core/src/test/java/kafka/security/minikdc/MiniKdcTest.java +++ b/core/src/test/java/kafka/security/minikdc/MiniKdcTest.java @@ -21,7 +21,7 @@ import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; -import java.util.List; +import java.util.Collections; import java.util.Properties; public class MiniKdcTest { @@ -38,7 +38,7 @@ public void shouldNotStopImmediatelyWhenStarted() throws Exception { config.setProperty(MiniKdc.MAX_RENEWABLE_LIFETIME, "604800000"); config.setProperty(MiniKdc.INSTANCE, "DefaultKrbServer"); - MiniKdc minikdc = MiniKdc.start(TestUtils.tempDir(), config, TestUtils.tempFile(), List.of("foo")); + MiniKdc minikdc = MiniKdc.start(TestUtils.tempDir(), config, TestUtils.tempFile(), Collections.singletonList("foo")); boolean running = System.getProperty(MiniKdc.JAVA_SECURITY_KRB5_CONF) != null; try { Assertions.assertTrue(running, "MiniKdc stopped immediately; it should not have"); diff --git a/core/src/test/java/kafka/server/BootstrapControllersIntegrationTest.java b/core/src/test/java/kafka/server/BootstrapControllersIntegrationTest.java new file mode 100644 index 0000000000000..dcee16b8ec62c --- /dev/null +++ b/core/src/test/java/kafka/server/BootstrapControllersIntegrationTest.java @@ -0,0 +1,345 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AlterConfigOp; +import org.apache.kafka.clients.admin.Config; +import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.CreateTopicsResult; +import org.apache.kafka.clients.admin.DescribeClusterResult; +import org.apache.kafka.clients.admin.DescribeFeaturesResult; +import org.apache.kafka.clients.admin.DescribeMetadataQuorumResult; +import org.apache.kafka.clients.admin.FeatureUpdate; +import org.apache.kafka.clients.admin.FinalizedVersionRange; +import org.apache.kafka.clients.admin.ListOffsetsResult; +import org.apache.kafka.clients.admin.NewPartitionReassignment; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.OffsetSpec; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.admin.UpdateFeaturesOptions; +import org.apache.kafka.clients.admin.UpdateFeaturesResult; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionInfo; +import org.apache.kafka.common.acl.AccessControlEntry; +import org.apache.kafka.common.acl.AccessControlEntryFilter; +import org.apache.kafka.common.acl.AclBinding; +import org.apache.kafka.common.acl.AclBindingFilter; +import org.apache.kafka.common.acl.AclOperation; +import org.apache.kafka.common.acl.AclPermissionType; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.InvalidUpdateVersionException; +import org.apache.kafka.common.errors.MismatchedEndpointTypeException; +import org.apache.kafka.common.errors.UnsupportedEndpointTypeException; +import org.apache.kafka.common.errors.UnsupportedVersionException; +import org.apache.kafka.common.resource.PatternType; +import org.apache.kafka.common.resource.ResourcePattern; +import org.apache.kafka.common.resource.ResourceType; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.metadata.authorizer.StandardAuthorizer; +import org.apache.kafka.server.common.MetadataVersion; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.Timeout; + +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_CONTROLLERS_CONFIG; +import static org.apache.kafka.clients.admin.AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG; +import static org.apache.kafka.clients.admin.ConfigEntry.ConfigSource.DYNAMIC_BROKER_CONFIG; +import static org.apache.kafka.common.config.ConfigResource.Type.BROKER; +import static org.apache.kafka.server.config.ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@Timeout(120) +@ClusterTestDefaults(types = {Type.KRAFT}) +public class BootstrapControllersIntegrationTest { + private Map adminConfig(ClusterInstance clusterInstance, boolean usingBootstrapControllers) { + return usingBootstrapControllers ? + Collections.singletonMap(BOOTSTRAP_CONTROLLERS_CONFIG, clusterInstance.bootstrapControllers()) : + Collections.singletonMap(BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapServers()); + } + + @ClusterTest + public void testPutBrokersInBootstrapControllersConfig(ClusterInstance clusterInstance) { + Map config = Collections.singletonMap(BOOTSTRAP_CONTROLLERS_CONFIG, clusterInstance.bootstrapServers()); + try (Admin admin = Admin.create(config)) { + ExecutionException exception = assertThrows(ExecutionException.class, + () -> admin.describeCluster().clusterId().get(1, TimeUnit.MINUTES)); + assertNotNull(exception.getCause()); + assertEquals(MismatchedEndpointTypeException.class, exception.getCause().getClass()); + assertEquals("The request was sent to an endpoint of type BROKER, but we wanted " + + "an endpoint of type CONTROLLER", exception.getCause().getMessage()); + } + } + + @ClusterTest + public void testPutControllersInBootstrapBrokersConfig(ClusterInstance clusterInstance) { + Map config = Collections.singletonMap(BOOTSTRAP_SERVERS_CONFIG, clusterInstance.bootstrapControllers()); + try (Admin admin = Admin.create(config)) { + ExecutionException exception = assertThrows(ExecutionException.class, + () -> admin.describeCluster().clusterId().get(1, TimeUnit.MINUTES)); + assertNotNull(exception.getCause()); + assertEquals(UnsupportedVersionException.class, exception.getCause().getClass()); + assertEquals("The node does not support METADATA", exception.getCause().getMessage()); + } + } + + @ClusterTest + public void testDescribeClusterByControllers(ClusterInstance clusterInstance) throws Exception { + testDescribeCluster(clusterInstance, true); + } + + @ClusterTest + public void testDescribeCluster(ClusterInstance clusterInstance) throws Exception { + testDescribeCluster(clusterInstance, false); + } + + private void testDescribeCluster(ClusterInstance clusterInstance, boolean usingBootstrapControllers) throws Exception { + try (Admin admin = Admin.create(adminConfig(clusterInstance, usingBootstrapControllers))) { + DescribeClusterResult result = admin.describeCluster(); + assertEquals(clusterInstance.clusterId(), result.clusterId().get(1, TimeUnit.MINUTES)); + if (usingBootstrapControllers) { + assertTrue(clusterInstance.controllerIds().contains(result.controller().get().id())); + } + } + } + + @ClusterTest + public void testDescribeFeaturesByControllers(ClusterInstance clusterInstance) throws Exception { + testDescribeFeatures(clusterInstance, true); + } + + @ClusterTest + public void testDescribeFeatures(ClusterInstance clusterInstance) throws Exception { + testDescribeFeatures(clusterInstance, false); + } + + private void testDescribeFeatures(ClusterInstance clusterInstance, boolean usingBootstrapControllers) throws Exception { + try (Admin admin = Admin.create(adminConfig(clusterInstance, usingBootstrapControllers))) { + DescribeFeaturesResult result = admin.describeFeatures(); + short metadataVersion = clusterInstance.config().metadataVersion().featureLevel(); + assertEquals(new FinalizedVersionRange(metadataVersion, metadataVersion), + result.featureMetadata().get(1, TimeUnit.MINUTES).finalizedFeatures(). + get(MetadataVersion.FEATURE_NAME)); + } + } + + @ClusterTest + public void testUpdateFeaturesByControllers(ClusterInstance clusterInstance) { + testUpdateFeatures(clusterInstance, true); + } + + @ClusterTest + public void testUpdateFeatures(ClusterInstance clusterInstance) { + testUpdateFeatures(clusterInstance, false); + } + + private void testUpdateFeatures(ClusterInstance clusterInstance, boolean usingBootstrapControllers) { + try (Admin admin = Admin.create(adminConfig(clusterInstance, usingBootstrapControllers))) { + UpdateFeaturesResult result = admin.updateFeatures(Collections.singletonMap("foo.bar.feature", + new FeatureUpdate((short) 1, FeatureUpdate.UpgradeType.UPGRADE)), + new UpdateFeaturesOptions()); + ExecutionException exception = + assertThrows(ExecutionException.class, + () -> result.all().get(1, TimeUnit.MINUTES)); + assertNotNull(exception.getCause()); + assertEquals(InvalidUpdateVersionException.class, exception.getCause().getClass()); + assertTrue(exception.getCause().getMessage().endsWith("does not support this feature."), + "expected message to end with 'does not support this feature', but it was: " + + exception.getCause().getMessage()); + } + } + + @ClusterTest + public void testDescribeMetadataQuorumByControllers(ClusterInstance clusterInstance) throws Exception { + testDescribeMetadataQuorum(clusterInstance, true); + } + + @ClusterTest + public void testDescribeMetadataQuorum(ClusterInstance clusterInstance) throws Exception { + testDescribeMetadataQuorum(clusterInstance, false); + } + + private void testDescribeMetadataQuorum(ClusterInstance clusterInstance, boolean usingBootstrapControllers) throws Exception { + try (Admin admin = Admin.create(adminConfig(clusterInstance, usingBootstrapControllers))) { + DescribeMetadataQuorumResult result = admin.describeMetadataQuorum(); + assertTrue(clusterInstance.controllerIds().contains( + result.quorumInfo().get(1, TimeUnit.MINUTES).leaderId())); + } + } + + @ClusterTest + public void testUsingBootstrapControllersOnUnsupportedAdminApi(ClusterInstance clusterInstance) { + try (Admin admin = Admin.create(adminConfig(clusterInstance, true))) { + ListOffsetsResult result = admin.listOffsets(Collections.singletonMap( + new TopicPartition("foo", 0), OffsetSpec.earliest())); + ExecutionException exception = + assertThrows(ExecutionException.class, + () -> result.all().get(1, TimeUnit.MINUTES)); + assertNotNull(exception.getCause()); + assertEquals(UnsupportedEndpointTypeException.class, exception.getCause().getClass()); + assertEquals("This Admin API is not yet supported when communicating directly with " + + "the controller quorum.", exception.getCause().getMessage()); + } + } + + @ClusterTest + public void testIncrementalAlterConfigsByControllers(ClusterInstance clusterInstance) throws Exception { + testIncrementalAlterConfigs(clusterInstance, true); + } + + @ClusterTest + public void testIncrementalAlterConfigs(ClusterInstance clusterInstance) throws Exception { + testIncrementalAlterConfigs(clusterInstance, false); + } + + private void testIncrementalAlterConfigs(ClusterInstance clusterInstance, boolean usingBootstrapControllers) throws Exception { + try (Admin admin = Admin.create(adminConfig(clusterInstance, usingBootstrapControllers))) { + int nodeId = usingBootstrapControllers ? + clusterInstance.controllers().values().iterator().next().config().nodeId() : + clusterInstance.brokers().values().iterator().next().config().nodeId(); + ConfigResource nodeResource = new ConfigResource(BROKER, "" + nodeId); + ConfigResource defaultResource = new ConfigResource(BROKER, ""); + Map> alterations = new HashMap<>(); + alterations.put(nodeResource, Collections.singletonList( + new AlterConfigOp(new ConfigEntry("my.custom.config", "foo"), + AlterConfigOp.OpType.SET))); + alterations.put(defaultResource, Collections.singletonList( + new AlterConfigOp(new ConfigEntry("my.custom.config", "bar"), + AlterConfigOp.OpType.SET))); + admin.incrementalAlterConfigs(alterations).all().get(1, TimeUnit.MINUTES); + TestUtils.retryOnExceptionWithTimeout(30_000, () -> { + Config config = admin.describeConfigs(Collections.singletonList(nodeResource)). + all().get(1, TimeUnit.MINUTES).get(nodeResource); + ConfigEntry entry = config.entries().stream(). + filter(e -> e.name().equals("my.custom.config")). + findFirst().get(); + assertEquals(DYNAMIC_BROKER_CONFIG, entry.source(), + "Expected entry for my.custom.config to come from DYNAMIC_BROKER_CONFIG. " + + "Instead, the entry was: " + entry); + }); + } + } + + @ClusterTest(brokers = 3) + public void testAlterReassignmentsWithBootstrapControllers(ClusterInstance clusterInstance) throws ExecutionException, InterruptedException { + String topicName = "foo"; + try (Admin admin = Admin.create(adminConfig(clusterInstance, false))) { + Map> assignments = new HashMap<>(); + assignments.put(0, Arrays.asList(0, 1, 2)); + assignments.put(1, Arrays.asList(1, 2, 0)); + assignments.put(2, Arrays.asList(2, 1, 0)); + CreateTopicsResult createTopicResult = admin.createTopics(Collections.singletonList(new NewTopic(topicName, assignments))); + createTopicResult.all().get(); + waitForTopics(admin, Collections.singleton(topicName)); + + List part0Reassignment = Arrays.asList(2, 1, 0); + List part1Reassignment = Arrays.asList(0, 1, 2); + List part2Reassignment = Arrays.asList(1, 2); + Map> reassignments = new HashMap<>(); + reassignments.put(new TopicPartition(topicName, 0), Optional.of(new NewPartitionReassignment(part0Reassignment))); + reassignments.put(new TopicPartition(topicName, 1), Optional.of(new NewPartitionReassignment(part1Reassignment))); + reassignments.put(new TopicPartition(topicName, 2), Optional.of(new NewPartitionReassignment(part2Reassignment))); + + try (Admin adminWithBootstrapControllers = Admin.create(adminConfig(clusterInstance, true))) { + adminWithBootstrapControllers.alterPartitionReassignments(reassignments).all().get(); + TestUtils.waitForCondition( + () -> adminWithBootstrapControllers.listPartitionReassignments().reassignments().get().isEmpty(), + "The reassignment never completed."); + } + + List> expectedMapping = Arrays.asList(part0Reassignment, part1Reassignment, part2Reassignment); + TestUtils.waitForCondition(() -> { + Map topicInfoMap = admin.describeTopics(Collections.singleton(topicName)).allTopicNames().get(); + if (topicInfoMap.containsKey(topicName)) { + List> currentMapping = translatePartitionInfoToNodeIdList(topicInfoMap.get(topicName).partitions()); + return expectedMapping.equals(currentMapping); + } else { + return false; + } + }, "Timed out waiting for replica assignments for topic " + topicName); + } + } + + private static void waitForTopics(Admin admin, Set expectedTopics) throws InterruptedException { + TestUtils.waitForCondition(() -> admin.listTopics().names().get().containsAll(expectedTopics), + "timed out waiting for topics"); + } + + private static List> translatePartitionInfoToNodeIdList(List partitions) { + return partitions.stream() + .map(partition -> partition.replicas().stream().map(Node::id).collect(Collectors.toList())) + .collect(Collectors.toList()); + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = StandardAuthorizer.SUPER_USERS_CONFIG, value = "User:ANONYMOUS"), + @ClusterConfigProperty(key = AUTHORIZER_CLASS_NAME_CONFIG, value = "org.apache.kafka.metadata.authorizer.StandardAuthorizer") + }) + public void testAclsByControllers(ClusterInstance clusterInstance) throws Exception { + testAcls(clusterInstance, true); + } + + @ClusterTest(serverProperties = { + @ClusterConfigProperty(key = StandardAuthorizer.SUPER_USERS_CONFIG, value = "User:ANONYMOUS"), + @ClusterConfigProperty(key = AUTHORIZER_CLASS_NAME_CONFIG, value = "org.apache.kafka.metadata.authorizer.StandardAuthorizer") + }) + public void testAcls(ClusterInstance clusterInstance) throws Exception { + testAcls(clusterInstance, false); + } + + private void testAcls(ClusterInstance clusterInstance, boolean usingBootstrapControllers) throws Exception { + try (Admin admin = Admin.create(adminConfig(clusterInstance, usingBootstrapControllers))) { + ResourcePattern resourcePattern = new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL); + AccessControlEntry accessControlEntry = new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW); + AclBinding aclBinding = new AclBinding(resourcePattern, accessControlEntry); + assertDoesNotThrow(() -> admin.createAcls(Collections.singleton(aclBinding)).all().get(1, TimeUnit.MINUTES)); + + clusterInstance.waitAcls(new AclBindingFilter(resourcePattern.toFilter(), AccessControlEntryFilter.ANY), + Collections.singleton(accessControlEntry)); + + Collection aclBindings = admin.describeAcls(AclBindingFilter.ANY).values().get(1, TimeUnit.MINUTES); + assertEquals(1, aclBindings.size()); + assertEquals(aclBinding, aclBindings.iterator().next()); + + Collection deletedAclBindings = admin.deleteAcls(Collections.singleton(AclBindingFilter.ANY)).all().get(1, TimeUnit.MINUTES); + assertEquals(1, deletedAclBindings.size()); + assertEquals(aclBinding, deletedAclBindings.iterator().next()); + } + } +} diff --git a/core/src/test/java/kafka/server/LogManagerIntegrationTest.java b/core/src/test/java/kafka/server/LogManagerIntegrationTest.java new file mode 100644 index 0000000000000..3d386283943f5 --- /dev/null +++ b/core/src/test/java/kafka/server/LogManagerIntegrationTest.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.TopicPartitionInfo; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.storage.internals.checkpoint.PartitionMetadataFile; +import org.apache.kafka.test.TestUtils; + +import java.io.IOException; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.UUID; +import java.util.concurrent.ExecutionException; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class LogManagerIntegrationTest { + private final ClusterInstance cluster; + + public LogManagerIntegrationTest(ClusterInstance cluster) { + this.cluster = cluster; + } + + @ClusterTest(types = {Type.KRAFT, Type.CO_KRAFT}, brokers = 3) + public void testRestartBrokerNoErrorIfMissingPartitionMetadata() throws IOException, ExecutionException, InterruptedException { + + try (Admin admin = cluster.admin()) { + admin.createTopics(Collections.singletonList(new NewTopic("foo", 1, (short) 3))).all().get(); + } + cluster.waitForTopic("foo", 1); + + Optional partitionMetadataFile = Optional.ofNullable( + cluster.brokers().get(0).logManager() + .getLog(new TopicPartition("foo", 0), false).get() + .partitionMetadataFile().getOrElse(null)); + assertTrue(partitionMetadataFile.isPresent()); + + cluster.brokers().get(0).shutdown(); + try (Admin admin = cluster.admin()) { + TestUtils.waitForCondition(() -> { + List partitionInfos = admin.describeTopics(Collections.singletonList("foo")) + .topicNameValues().get("foo").get().partitions(); + return partitionInfos.get(0).isr().size() == 2; + }, "isr size is not shrink to 2"); + } + + // delete partition.metadata file here to simulate the scenario that partition.metadata not flush to disk yet + partitionMetadataFile.get().delete(); + assertFalse(partitionMetadataFile.get().exists()); + cluster.brokers().get(0).startup(); + // make sure there is no error during load logs + assertTrue(cluster.firstFatalException().isEmpty()); + try (Admin admin = cluster.admin()) { + TestUtils.waitForCondition(() -> { + List partitionInfos = admin.describeTopics(Collections.singletonList("foo")) + .topicNameValues().get("foo").get().partitions(); + return partitionInfos.get(0).isr().size() == 3; + }, "isr size is not expand to 3"); + } + + // make sure topic still work fine + Map producerConfigs = new HashMap<>(); + producerConfigs.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); + producerConfigs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerConfigs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + + try (Producer producer = new KafkaProducer<>(producerConfigs)) { + producer.send(new ProducerRecord<>("foo", 0, null, "bar")).get(); + producer.flush(); + } + + Map consumerConfigs = new HashMap<>(); + consumerConfigs.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, cluster.bootstrapServers()); + consumerConfigs.put(ConsumerConfig.GROUP_ID_CONFIG, UUID.randomUUID().toString()); + consumerConfigs.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + consumerConfigs.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + + try (Consumer consumer = new KafkaConsumer<>(consumerConfigs)) { + consumer.assign(Collections.singletonList(new TopicPartition("foo", 0))); + consumer.seekToBeginning(Collections.singletonList(new TopicPartition("foo", 0))); + List values = new ArrayList<>(); + ConsumerRecords records = consumer.poll(Duration.ofMinutes(1)); + for (ConsumerRecord record : records) { + values.add(record.value()); + } + assertEquals(1, values.size()); + assertEquals("bar", values.get(0)); + } + } +} diff --git a/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java b/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java index c67e941dd7a82..981217ce28770 100644 --- a/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java +++ b/core/src/test/java/kafka/server/ReconfigurableQuorumIntegrationTest.java @@ -24,16 +24,15 @@ import org.apache.kafka.common.Uuid; import org.apache.kafka.common.test.KafkaClusterTestKit; import org.apache.kafka.common.test.TestKitNodes; -import org.apache.kafka.common.test.api.TestKitDefaults; -import org.apache.kafka.raft.QuorumConfig; import org.apache.kafka.server.common.KRaftVersion; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; -import java.util.HashMap; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; import java.util.Map; -import java.util.Set; import java.util.TreeMap; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -84,8 +83,9 @@ public void testCreateAndDestroyReconfigurableCluster() throws Exception { new TestKitNodes.Builder(). setNumBrokerNodes(1). setNumControllerNodes(1). + setFeature(KRaftVersion.FEATURE_NAME, KRaftVersion.KRAFT_VERSION_1.featureLevel()). build() - ).setStandalone(true).build()) { + ).build()) { cluster.format(); cluster.startup(); try (Admin admin = Admin.create(cluster.clientProperties())) { @@ -107,29 +107,19 @@ static Map findVoterDirs(Admin admin) throws Exception { @Test public void testRemoveController() throws Exception { - final var nodes = new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(3). - build(); - - final Map initialVoters = new HashMap<>(); - for (final var controllerNode : nodes.controllerNodes().values()) { - initialVoters.put( - controllerNode.id(), - controllerNode.metadataDirectoryId() - ); - } - - try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder(nodes). - setInitialVoterSet(initialVoters). - build() - ) { + try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder( + new TestKitNodes.Builder(). + setNumBrokerNodes(1). + setNumControllerNodes(3). + setFeature(KRaftVersion.FEATURE_NAME, KRaftVersion.KRAFT_VERSION_1.featureLevel()). + build() + ).build()) { cluster.format(); cluster.startup(); try (Admin admin = Admin.create(cluster.clientProperties())) { TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { Map voters = findVoterDirs(admin); - assertEquals(Set.of(3000, 3001, 3002), voters.keySet()); + assertEquals(new HashSet<>(Arrays.asList(3000, 3001, 3002)), voters.keySet()); for (int replicaId : new int[] {3000, 3001, 3002}) { assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); } @@ -142,29 +132,19 @@ public void testRemoveController() throws Exception { @Test public void testRemoveAndAddSameController() throws Exception { - final var nodes = new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(4). - build(); - - final Map initialVoters = new HashMap<>(); - for (final var controllerNode : nodes.controllerNodes().values()) { - initialVoters.put( - controllerNode.id(), - controllerNode.metadataDirectoryId() - ); - } - - try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder(nodes). - setInitialVoterSet(initialVoters). - build() + try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder( + new TestKitNodes.Builder(). + setNumBrokerNodes(1). + setNumControllerNodes(4). + setFeature(KRaftVersion.FEATURE_NAME, KRaftVersion.KRAFT_VERSION_1.featureLevel()). + build()).build() ) { cluster.format(); cluster.startup(); try (Admin admin = Admin.create(cluster.clientProperties())) { TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { Map voters = findVoterDirs(admin); - assertEquals(Set.of(3000, 3001, 3002, 3003), voters.keySet()); + assertEquals(new HashSet<>(Arrays.asList(3000, 3001, 3002, 3003)), voters.keySet()); for (int replicaId : new int[] {3000, 3001, 3002, 3003}) { assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); } @@ -173,7 +153,7 @@ public void testRemoveAndAddSameController() throws Exception { admin.removeRaftVoter(3000, dirId).all().get(); TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { Map voters = findVoterDirs(admin); - assertEquals(Set.of(3001, 3002, 3003), voters.keySet()); + assertEquals(new HashSet<>(Arrays.asList(3001, 3002, 3003)), voters.keySet()); for (int replicaId : new int[] {3001, 3002, 3003}) { assertNotEquals(Uuid.ZERO_UUID, voters.get(replicaId)); } @@ -181,72 +161,9 @@ public void testRemoveAndAddSameController() throws Exception { admin.addRaftVoter( 3000, dirId, - Set.of(new RaftVoterEndpoint("CONTROLLER", "example.com", 8080)) + Collections.singleton(new RaftVoterEndpoint("CONTROLLER", "example.com", 8080)) ).all().get(); } } } - - @Test - public void testControllersAutoJoinStandaloneVoter() throws Exception { - final var nodes = new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(3). - build(); - try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder(nodes). - setConfigProp(QuorumConfig.QUORUM_AUTO_JOIN_ENABLE_CONFIG, true). - setStandalone(true). - build() - ) { - cluster.format(); - cluster.startup(); - try (Admin admin = Admin.create(cluster.clientProperties())) { - TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { - Map voters = findVoterDirs(admin); - assertEquals(Set.of(3000, 3001, 3002), voters.keySet()); - for (int replicaId : new int[] {3000, 3001, 3002}) { - assertEquals(nodes.controllerNodes().get(replicaId).metadataDirectoryId(), voters.get(replicaId)); - } - }); - } - } - } - - @Test - public void testNewVoterAutoRemovesAndAdds() throws Exception { - final var nodes = new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(3). - build(); - - // Configure the initial voters with one voter having a different directory ID. - // This simulates the case where the controller failed and is brought back up with a different directory ID. - final Map initialVoters = new HashMap<>(); - final var oldDirectoryId = Uuid.randomUuid(); - for (final var controllerNode : nodes.controllerNodes().values()) { - initialVoters.put( - controllerNode.id(), - controllerNode.id() == TestKitDefaults.CONTROLLER_ID_OFFSET ? - oldDirectoryId : controllerNode.metadataDirectoryId() - ); - } - - try (KafkaClusterTestKit cluster = new KafkaClusterTestKit.Builder(nodes). - setConfigProp(QuorumConfig.QUORUM_AUTO_JOIN_ENABLE_CONFIG, true). - setInitialVoterSet(initialVoters). - build() - ) { - cluster.format(); - cluster.startup(); - try (Admin admin = Admin.create(cluster.clientProperties())) { - TestUtils.retryOnExceptionWithTimeout(30_000, 10, () -> { - Map voters = findVoterDirs(admin); - assertEquals(Set.of(3000, 3001, 3002), voters.keySet()); - for (int replicaId : new int[] {3000, 3001, 3002}) { - assertEquals(nodes.controllerNodes().get(replicaId).metadataDirectoryId(), voters.get(replicaId)); - } - }); - } - } - } } diff --git a/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java b/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java index bd7d35507defe..65288fec3325a 100644 --- a/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java +++ b/core/src/test/java/kafka/server/handlers/DescribeTopicPartitionsRequestHandlerTest.java @@ -27,7 +27,6 @@ import org.apache.kafka.common.acl.AclOperation; import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.errors.SerializationException; -import org.apache.kafka.common.internals.Plugin; import org.apache.kafka.common.memory.MemoryPool; import org.apache.kafka.common.message.DescribeTopicPartitionsRequestData; import org.apache.kafka.common.message.DescribeTopicPartitionsResponseData; @@ -72,11 +71,13 @@ import java.net.InetAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.Properties; +import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertInstanceOf; @@ -114,7 +115,6 @@ public KafkaPrincipal deserialize(byte[] bytes) throws SerializationException { void testDescribeTopicPartitionsRequest() { // 1. Set up authorizer Authorizer authorizer = mock(Authorizer.class); - Plugin authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name"); String unauthorizedTopic = "unauthorized-topic"; String authorizedTopic = "authorized-topic"; String authorizedNonExistTopic = "authorized-non-exist"; @@ -132,7 +132,7 @@ void testDescribeTopicPartitionsRequest() { return AuthorizationResult.ALLOWED; else return AuthorizationResult.DENIED; - }).toList(); + }).collect(Collectors.toList()); }); // 2. Set up MetadataCache @@ -145,7 +145,7 @@ void testDescribeTopicPartitionsRequest() { BrokerEndpointCollection collection = new BrokerEndpointCollection(); collection.add(brokerEndpoint); - List records = List.of( + List records = Arrays.asList( new RegisterBrokerRecord() .setBrokerId(brokerId) .setBrokerEpoch(0) @@ -158,33 +158,33 @@ void testDescribeTopicPartitionsRequest() { new PartitionRecord() .setTopicId(authorizedTopicId) .setPartitionId(1) - .setReplicas(List.of(0, 1, 2)) + .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) - .setIsr(List.of(0)) - .setEligibleLeaderReplicas(List.of(1)) - .setLastKnownElr(List.of(2)) + .setIsr(Arrays.asList(0)) + .setEligibleLeaderReplicas(Arrays.asList(1)) + .setLastKnownElr(Arrays.asList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord() .setTopicId(authorizedTopicId) .setPartitionId(0) - .setReplicas(List.of(0, 1, 2)) + .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) - .setIsr(List.of(0)) - .setEligibleLeaderReplicas(List.of(1)) - .setLastKnownElr(List.of(2)) + .setIsr(Arrays.asList(0)) + .setEligibleLeaderReplicas(Arrays.asList(1)) + .setLastKnownElr(Arrays.asList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord() .setTopicId(unauthorizedTopicId) .setPartitionId(0) - .setReplicas(List.of(0, 1, 3)) + .setReplicas(Arrays.asList(0, 1, 3)) .setLeader(0) - .setIsr(List.of(0)) - .setEligibleLeaderReplicas(List.of(1)) - .setLastKnownElr(List.of(3)) + .setIsr(Arrays.asList(0)) + .setEligibleLeaderReplicas(Arrays.asList(1)) + .setLastKnownElr(Arrays.asList(3)) .setLeaderEpoch(0) .setPartitionEpoch(2) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()) @@ -192,12 +192,12 @@ void testDescribeTopicPartitionsRequest() { KRaftMetadataCache metadataCache = new KRaftMetadataCache(0, () -> KRaftVersion.KRAFT_VERSION_1); updateKraftMetadataCache(metadataCache, records); DescribeTopicPartitionsRequestHandler handler = - new DescribeTopicPartitionsRequestHandler(metadataCache, new AuthHelper(scala.Option.apply(authorizerPlugin)), createKafkaDefaultConfig()); + new DescribeTopicPartitionsRequestHandler(metadataCache, new AuthHelper(scala.Option.apply(authorizer)), createKafkaDefaultConfig()); // 3.1 Basic test DescribeTopicPartitionsRequest describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest( new DescribeTopicPartitionsRequestData() - .setTopics(List.of( + .setTopics(Arrays.asList( new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(unauthorizedTopic) )) @@ -225,7 +225,7 @@ void testDescribeTopicPartitionsRequest() { // 3.2 With cursor describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData() - .setTopics(List.of( + .setTopics(Arrays.asList( new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(unauthorizedTopic) )) @@ -314,7 +314,6 @@ void testDescribeTopicPartitionsRequest() { void testDescribeTopicPartitionsRequestWithEdgeCases() { // 1. Set up authorizer Authorizer authorizer = mock(Authorizer.class); - Plugin authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name"); String authorizedTopic = "authorized-topic1"; String authorizedTopic2 = "authorized-topic2"; @@ -330,7 +329,7 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { return AuthorizationResult.ALLOWED; else return AuthorizationResult.DENIED; - }).toList(); + }).collect(Collectors.toList()); }); // 2. Set up MetadataCache @@ -343,7 +342,7 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { BrokerEndpointCollection collection = new BrokerEndpointCollection(); collection.add(brokerEndpoint); - List records = List.of( + List records = Arrays.asList( new RegisterBrokerRecord() .setBrokerId(brokerId) .setBrokerEpoch(0) @@ -356,33 +355,33 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { new PartitionRecord() .setTopicId(authorizedTopicId) .setPartitionId(0) - .setReplicas(List.of(0, 1, 2)) + .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) - .setIsr(List.of(0)) - .setEligibleLeaderReplicas(List.of(1)) - .setLastKnownElr(List.of(2)) + .setIsr(Arrays.asList(0)) + .setEligibleLeaderReplicas(Arrays.asList(1)) + .setLastKnownElr(Arrays.asList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord() .setTopicId(authorizedTopicId) .setPartitionId(1) - .setReplicas(List.of(0, 1, 2)) + .setReplicas(Arrays.asList(0, 1, 2)) .setLeader(0) - .setIsr(List.of(0)) - .setEligibleLeaderReplicas(List.of(1)) - .setLastKnownElr(List.of(2)) + .setIsr(Arrays.asList(0)) + .setEligibleLeaderReplicas(Arrays.asList(1)) + .setLastKnownElr(Arrays.asList(2)) .setLeaderEpoch(0) .setPartitionEpoch(1) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()), new PartitionRecord() .setTopicId(authorizedTopicId2) .setPartitionId(0) - .setReplicas(List.of(0, 1, 3)) + .setReplicas(Arrays.asList(0, 1, 3)) .setLeader(0) - .setIsr(List.of(0)) - .setEligibleLeaderReplicas(List.of(1)) - .setLastKnownElr(List.of(3)) + .setIsr(Arrays.asList(0)) + .setEligibleLeaderReplicas(Arrays.asList(1)) + .setLastKnownElr(Arrays.asList(3)) .setLeaderEpoch(0) .setPartitionEpoch(2) .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED.value()) @@ -390,11 +389,11 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { KRaftMetadataCache metadataCache = new KRaftMetadataCache(0, () -> KRaftVersion.KRAFT_VERSION_1); updateKraftMetadataCache(metadataCache, records); DescribeTopicPartitionsRequestHandler handler = - new DescribeTopicPartitionsRequestHandler(metadataCache, new AuthHelper(scala.Option.apply(authorizerPlugin)), createKafkaDefaultConfig()); + new DescribeTopicPartitionsRequestHandler(metadataCache, new AuthHelper(scala.Option.apply(authorizer)), createKafkaDefaultConfig()); // 3.1 With cursor point to the first one DescribeTopicPartitionsRequest describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData() - .setTopics(List.of( + .setTopics(Arrays.asList( new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic2) )) @@ -425,7 +424,7 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { // 3.2 With cursor point to the second one. The first topic should be ignored. describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData() - .setTopics(List.of( + .setTopics(Arrays.asList( new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic2) )) @@ -449,7 +448,7 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { // 3.3 With cursor point to a non existing topic. Exception should be thrown if not querying all the topics. describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData() - .setTopics(List.of( + .setTopics(Arrays.asList( new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic2) )) @@ -464,7 +463,7 @@ void testDescribeTopicPartitionsRequestWithEdgeCases() { // 3.4 With cursor point to a negative partition id. Exception should be thrown if not querying all the topics. describeTopicPartitionsRequest = new DescribeTopicPartitionsRequest(new DescribeTopicPartitionsRequestData() - .setTopics(List.of( + .setTopics(Arrays.asList( new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic), new DescribeTopicPartitionsRequestData.TopicRequest().setName(authorizedTopic2) )) @@ -493,7 +492,7 @@ void updateKraftMetadataCache(KRaftMetadataCache kRaftMetadataCache, List delta.replay(record)); kRaftMetadataCache.setImage(delta.apply(new MetadataProvenance(100L, 10, 1000L, true))); } diff --git a/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java b/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java new file mode 100644 index 0000000000000..8db4bd4d898b0 --- /dev/null +++ b/core/src/test/java/kafka/server/integration/EligibleLeaderReplicasIntegrationTest.java @@ -0,0 +1,463 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server.integration; +import kafka.integration.KafkaServerTestHarness; +import kafka.server.KafkaBroker; +import kafka.server.KafkaConfig; +import kafka.utils.Logging; +import kafka.utils.TestUtils; + +import org.apache.kafka.clients.CommonClientConfigs; +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AlterConfigOp; +import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.FeatureUpdate; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.TopicDescription; +import org.apache.kafka.clients.admin.UpdateFeaturesOptions; +import org.apache.kafka.clients.consumer.Consumer; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.Producer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.common.Node; +import org.apache.kafka.common.TopicPartitionInfo; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.config.TopicConfig; +import org.apache.kafka.common.security.auth.SecurityProtocol; +import org.apache.kafka.common.serialization.StringDeserializer; +import org.apache.kafka.common.serialization.StringSerializer; +import org.apache.kafka.server.common.EligibleLeaderReplicasVersion; +import org.apache.kafka.server.common.MetadataVersion; +import org.apache.kafka.storage.internals.checkpoint.CleanShutdownFileHandler; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.io.File; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.ExecutionException; +import java.util.function.BiFunction; +import java.util.stream.Collectors; + +import scala.collection.JavaConverters; +import scala.collection.Seq; +import scala.collection.mutable.HashMap; + +import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class EligibleLeaderReplicasIntegrationTest extends KafkaServerTestHarness implements Logging { + private String bootstrapServer; + private String testTopicName; + private Admin adminClient; + + @Override + public MetadataVersion metadataVersion() { + return MetadataVersion.IBP_4_0_IV1; + } + + @Override + public Seq generateConfigs() { + List brokerConfigs = new ArrayList<>(); + brokerConfigs.addAll(scala.collection.JavaConverters.seqAsJavaList(TestUtils.createBrokerConfigs( + 5, // The tests require 4 brokers to host the partition. However, we need the 5th broker to handle the admin client requests. + true, + true, + scala.Option.empty(), + scala.Option.empty(), + scala.Option.empty(), + true, + false, + false, + false, + new HashMap<>(), + 1, + false, + 1, + (short) 4, + 0, + false + ))); + List configs = new ArrayList<>(); + for (Properties props : brokerConfigs) { + configs.add(KafkaConfig.fromProps(props)); + } + return JavaConverters.asScalaBuffer(configs).toSeq(); + } + + @BeforeEach + @Override + public void setUp(TestInfo info) { + super.setUp(info); + // create adminClient + Properties props = new Properties(); + bootstrapServer = bootstrapServers(listenerName()); + props.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); + adminClient = Admin.create(props); + adminClient.updateFeatures( + Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, + new FeatureUpdate(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)), + new UpdateFeaturesOptions() + ); + testTopicName = String.format("%s-%s", info.getTestMethod().get().getName(), "ELR-test"); + } + + @AfterEach + public void close() throws Exception { + if (adminClient != null) adminClient.close(); + } + + @ParameterizedTest + @ValueSource(strings = {"kraft"}) + public void testHighWatermarkShouldNotAdvanceIfUnderMinIsr(String quorum) throws ExecutionException, InterruptedException { + adminClient.createTopics( + Collections.singletonList(new NewTopic(testTopicName, 1, (short) 4))).all().get(); + TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000); + + ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName); + Collection ops = new ArrayList<>(); + ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET)); + Map> configOps = Collections.singletonMap(configResource, ops); + // alter configs on target cluster + adminClient.incrementalAlterConfigs(configOps).all().get(); + Producer producer = null; + Consumer consumer = null; + try { + TopicDescription testTopicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName); + TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0); + List initialReplicas = topicPartitionInfo.replicas(); + assertEquals(4, topicPartitionInfo.isr().size()); + assertEquals(0, topicPartitionInfo.elr().size()); + assertEquals(0, topicPartitionInfo.lastKnownElr().size()); + + Properties producerProps = new Properties(); + producerProps.putIfAbsent(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerProps.putIfAbsent(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName()); + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); + // Use Ack=1 for the producer. + producerProps.put(ProducerConfig.ACKS_CONFIG, "1"); + producer = new KafkaProducer(producerProps); + + Properties consumerProps = new Properties(); + consumerProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServer); + consumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, "test"); + consumerProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "10"); + consumerProps.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest"); + consumerProps.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + consumerProps.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName()); + consumer = new KafkaConsumer<>(consumerProps); + consumer.subscribe(Collections.singleton(testTopicName)); + + producer.send(new ProducerRecord<>(testTopicName, "0", "0")).get(); + waitUntilOneMessageIsConsumed(consumer); + + killBroker(initialReplicas.get(0).id()); + killBroker(initialReplicas.get(1).id()); + + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 2 && elrSize == 1; + }); + + // Now the partition is under min ISR. HWM should not advance. + producer.send(new ProducerRecord<>(testTopicName, "1", "1")).get(); + Thread.sleep(100); + assertEquals(0, consumer.poll(Duration.ofSeconds(1L)).count()); + + // Restore the min ISR and the previous log should be visible. + startBroker(initialReplicas.get(1).id()); + startBroker(initialReplicas.get(0).id()); + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 4 && elrSize == 0; + }); + + waitUntilOneMessageIsConsumed(consumer); + } finally { + restartDeadBrokers(false); + if (consumer != null) consumer.close(); + if (producer != null) producer.close(); + } + } + + void waitUntilOneMessageIsConsumed(Consumer consumer) { + TestUtils.waitUntilTrue( + () -> { + try { + ConsumerRecords record = consumer.poll(Duration.ofMillis(100L)); + return record.count() >= 1; + } catch (Exception e) { + return false; + } + }, + () -> "fail to consume messages", + DEFAULT_MAX_WAIT_MS, 100L + ); + } + + @ParameterizedTest + @ValueSource(strings = {"kraft"}) + public void testElrMemberCanBeElected(String quorum) throws ExecutionException, InterruptedException { + adminClient.createTopics( + Collections.singletonList(new NewTopic(testTopicName, 1, (short) 4))).all().get(); + TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000); + + ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName); + Collection ops = new ArrayList<>(); + ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET)); + Map> configOps = Collections.singletonMap(configResource, ops); + // alter configs on target cluster + adminClient.incrementalAlterConfigs(configOps).all().get(); + + try { + TopicDescription testTopicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName); + TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0); + List initialReplicas = topicPartitionInfo.replicas(); + assertEquals(4, topicPartitionInfo.isr().size()); + assertEquals(0, topicPartitionInfo.elr().size()); + assertEquals(0, topicPartitionInfo.lastKnownElr().size()); + + killBroker(initialReplicas.get(0).id()); + killBroker(initialReplicas.get(1).id()); + killBroker(initialReplicas.get(2).id()); + + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 1 && elrSize == 2; + }); + + killBroker(initialReplicas.get(3).id()); + + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 0 && elrSize == 3; + }); + + topicPartitionInfo = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName).partitions().get(0); + assertEquals(1, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString()); + int expectLastKnownLeader = initialReplicas.get(3).id(); + assertEquals(expectLastKnownLeader, topicPartitionInfo.lastKnownElr().get(0).id(), topicPartitionInfo.toString()); + + // At this point, all the replicas are failed and the last know leader is No.3 and 3 members in the ELR. + // Restart one broker of the ELR and it should be the leader. + + int expectLeader = topicPartitionInfo.elr().stream() + .filter(node -> node.id() != expectLastKnownLeader).collect(Collectors.toList()).get(0).id(); + + startBroker(expectLeader); + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 1 && elrSize == 2; + }); + + topicPartitionInfo = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName).partitions().get(0); + assertEquals(0, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString()); + assertEquals(expectLeader, topicPartitionInfo.leader().id(), topicPartitionInfo.toString()); + + // Start another 2 brokers and the ELR fields should be cleaned. + topicPartitionInfo.replicas().stream().filter(node -> node.id() != expectLeader).limit(2) + .forEach(node -> startBroker(node.id())); + + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 3 && elrSize == 0; + }); + + topicPartitionInfo = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName).partitions().get(0); + assertEquals(0, topicPartitionInfo.lastKnownElr().size(), topicPartitionInfo.toString()); + assertEquals(expectLeader, topicPartitionInfo.leader().id(), topicPartitionInfo.toString()); + } finally { + restartDeadBrokers(false); + } + } + + @ParameterizedTest + @ValueSource(strings = {"kraft"}) + public void testElrMemberShouldBeKickOutWhenUncleanShutdown(String quorum) throws ExecutionException, InterruptedException { + adminClient.createTopics( + Collections.singletonList(new NewTopic(testTopicName, 1, (short) 4))).all().get(); + TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000); + + ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName); + Collection ops = new ArrayList<>(); + ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET)); + Map> configOps = Collections.singletonMap(configResource, ops); + // alter configs on target cluster + adminClient.incrementalAlterConfigs(configOps).all().get(); + + try { + TopicDescription testTopicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName); + TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0); + List initialReplicas = topicPartitionInfo.replicas(); + assertEquals(4, topicPartitionInfo.isr().size()); + assertEquals(0, topicPartitionInfo.elr().size()); + assertEquals(0, topicPartitionInfo.lastKnownElr().size()); + + killBroker(initialReplicas.get(0).id()); + killBroker(initialReplicas.get(1).id()); + killBroker(initialReplicas.get(2).id()); + killBroker(initialReplicas.get(3).id()); + + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 0 && elrSize == 3; + }); + topicPartitionInfo = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName).partitions().get(0); + + int brokerToBeUncleanShutdown = topicPartitionInfo.elr().get(0).id(); + KafkaBroker broker = brokers().find(b -> { + return b.config().brokerId() == brokerToBeUncleanShutdown; + }).get(); + Seq dirs = broker.logManager().liveLogDirs(); + assertEquals(1, dirs.size()); + CleanShutdownFileHandler handler = new CleanShutdownFileHandler(dirs.apply(0).toString()); + assertTrue(handler.exists()); + assertDoesNotThrow(() -> handler.delete()); + + // After remove the clean shutdown file, the broker should report unclean shutdown during restart. + startBroker(brokerToBeUncleanShutdown); + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 0 && elrSize == 2; + }); + topicPartitionInfo = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName).partitions().get(0); + assertTrue(topicPartitionInfo.leader() == null); + assertEquals(1, topicPartitionInfo.lastKnownElr().size()); + } finally { + restartDeadBrokers(false); + } + } + + /* + This test is only valid for KIP-966 part 1. When the unclean recovery is implemented, it should be removed. + */ + @ParameterizedTest + @ValueSource(strings = {"kraft"}) + public void testLastKnownLeaderShouldBeElectedIfEmptyElr(String quorum) throws ExecutionException, InterruptedException { + adminClient.createTopics( + Collections.singletonList(new NewTopic(testTopicName, 1, (short) 4))).all().get(); + TestUtils.waitForPartitionMetadata(brokers(), testTopicName, 0, 1000); + + ConfigResource configResource = new ConfigResource(ConfigResource.Type.TOPIC, testTopicName); + Collection ops = new ArrayList<>(); + ops.add(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3"), AlterConfigOp.OpType.SET)); + Map> configOps = Collections.singletonMap(configResource, ops); + // alter configs on target cluster + adminClient.incrementalAlterConfigs(configOps).all().get(); + + try { + TopicDescription testTopicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName); + TopicPartitionInfo topicPartitionInfo = testTopicDescription.partitions().get(0); + List initialReplicas = topicPartitionInfo.replicas(); + assertEquals(4, topicPartitionInfo.isr().size()); + assertEquals(0, topicPartitionInfo.elr().size()); + assertEquals(0, topicPartitionInfo.lastKnownElr().size()); + + killBroker(initialReplicas.get(0).id()); + killBroker(initialReplicas.get(1).id()); + killBroker(initialReplicas.get(2).id()); + killBroker(initialReplicas.get(3).id()); + + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 0 && elrSize == 3; + }); + topicPartitionInfo = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName).partitions().get(0); + int lastKnownLeader = topicPartitionInfo.lastKnownElr().get(0).id(); + + Set initialReplicaSet = initialReplicas.stream().map(node -> node.id()).collect(Collectors.toSet()); + brokers().foreach(broker -> { + if (initialReplicaSet.contains(broker.config().brokerId())) { + Seq dirs = broker.logManager().liveLogDirs(); + assertEquals(1, dirs.size()); + CleanShutdownFileHandler handler = new CleanShutdownFileHandler(dirs.apply(0).toString()); + assertDoesNotThrow(() -> handler.delete()); + } + return true; + }); + + + // After remove the clean shutdown file, the broker should report unclean shutdown during restart. + topicPartitionInfo.replicas().stream().forEach(replica -> { + if (replica.id() != lastKnownLeader) startBroker(replica.id()); + }); + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize == 0 && elrSize == 1; + }); + topicPartitionInfo = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName).partitions().get(0); + assertTrue(topicPartitionInfo.leader() == null); + assertEquals(1, topicPartitionInfo.lastKnownElr().size()); + + // Now if the last known leader goes through unclean shutdown, it will still be elected. + startBroker(lastKnownLeader); + waitForIsrAndElr((isrSize, elrSize) -> { + return isrSize > 0 && elrSize == 0; + }); + + TestUtils.waitUntilTrue( + () -> { + try { + TopicPartitionInfo partition = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName).partitions().get(0); + if (partition.leader() == null) return false; + return partition.lastKnownElr().isEmpty() && partition.elr().isEmpty() && partition.leader().id() == lastKnownLeader; + } catch (Exception e) { + return false; + } + }, + () -> String.format("Partition metadata for %s is not correct", testTopicName), + DEFAULT_MAX_WAIT_MS, 100L + ); + } finally { + restartDeadBrokers(false); + } + } + + void waitForIsrAndElr(BiFunction isIsrAndElrSizeSatisfied) { + TestUtils.waitUntilTrue( + () -> { + try { + TopicDescription topicDescription = adminClient.describeTopics(Collections.singletonList(testTopicName)) + .allTopicNames().get().get(testTopicName); + TopicPartitionInfo partition = topicDescription.partitions().get(0); + return isIsrAndElrSizeSatisfied.apply(partition.isr().size(), partition.elr().size()); + } catch (Exception e) { + return false; + } + }, + () -> String.format("Partition metadata for %s is not propagated", testTopicName), + DEFAULT_MAX_WAIT_MS, 100L); + } +} diff --git a/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java b/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java index b5c8740639c2f..e2a05fc65f6ac 100644 --- a/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java +++ b/core/src/test/java/kafka/server/logger/RuntimeLoggerManagerTest.java @@ -16,12 +16,13 @@ */ package kafka.server.logger; +import kafka.utils.Log4jController; + import org.apache.kafka.clients.admin.AlterConfigOp; import org.apache.kafka.clients.admin.AlterConfigOp.OpType; import org.apache.kafka.common.errors.InvalidConfigurationException; import org.apache.kafka.common.errors.InvalidRequestException; import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.AlterableConfig; -import org.apache.kafka.server.logger.LoggingController; import org.junit.jupiter.api.Assertions; import org.junit.jupiter.api.Test; @@ -30,7 +31,7 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.util.List; +import java.util.Arrays; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -41,7 +42,7 @@ public class RuntimeLoggerManagerTest { @Test public void testValidateSetLogLevelConfig() { - MANAGER.validateLogLevelConfigs(List.of(new AlterableConfig(). + MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). setName(LOG.getName()). setConfigOperation(OpType.SET.id()). setValue("TRACE"))); @@ -49,7 +50,7 @@ public void testValidateSetLogLevelConfig() { @Test public void testValidateDeleteLogLevelConfig() { - MANAGER.validateLogLevelConfigs(List.of(new AlterableConfig(). + MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). setName(LOG.getName()). setConfigOperation(OpType.DELETE.id()). setValue(""))); @@ -61,7 +62,7 @@ public void testOperationNotAllowed(byte id) { OpType opType = AlterConfigOp.OpType.forId(id); assertEquals(opType + " operation is not allowed for the BROKER_LOGGER resource", Assertions.assertThrows(InvalidRequestException.class, - () -> MANAGER.validateLogLevelConfigs(List.of(new AlterableConfig(). + () -> MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). setName(LOG.getName()). setConfigOperation(id). setValue("TRACE")))).getMessage()); @@ -72,7 +73,7 @@ public void testValidateBogusLogLevelNameNotAllowed() { assertEquals("Cannot set the log level of " + LOG.getName() + " to BOGUS as it is not " + "a supported log level. Valid log levels are DEBUG, ERROR, FATAL, INFO, TRACE, WARN", Assertions.assertThrows(InvalidConfigurationException.class, - () -> MANAGER.validateLogLevelConfigs(List.of(new AlterableConfig(). + () -> MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). setName(LOG.getName()). setConfigOperation(OpType.SET.id()). setValue("BOGUS")))).getMessage()); @@ -80,19 +81,19 @@ public void testValidateBogusLogLevelNameNotAllowed() { @Test public void testValidateSetRootLogLevelConfig() { - MANAGER.validateLogLevelConfigs(List.of(new AlterableConfig(). - setName(LoggingController.ROOT_LOGGER). + MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). + setName(Log4jController.ROOT_LOGGER()). setConfigOperation(OpType.SET.id()). setValue("TRACE"))); } @Test public void testValidateRemoveRootLogLevelConfigNotAllowed() { - assertEquals("Removing the log level of the " + LoggingController.ROOT_LOGGER + + assertEquals("Removing the log level of the " + Log4jController.ROOT_LOGGER() + " logger is not allowed", Assertions.assertThrows(InvalidRequestException.class, - () -> MANAGER.validateLogLevelConfigs(List.of(new AlterableConfig(). - setName(LoggingController.ROOT_LOGGER). + () -> MANAGER.validateLogLevelConfigs(Arrays.asList(new AlterableConfig(). + setName(Log4jController.ROOT_LOGGER()). setConfigOperation(OpType.DELETE.id()). setValue("")))).getMessage()); } diff --git a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java index ffa9f8b11456a..11d3e26eaf39a 100644 --- a/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java +++ b/core/src/test/java/kafka/server/share/DelayedShareFetchTest.java @@ -17,82 +17,52 @@ package kafka.server.share; import kafka.cluster.Partition; -import kafka.server.QuotaFactory; +import kafka.server.LogReadResult; import kafka.server.ReplicaManager; import kafka.server.ReplicaQuota; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.errors.KafkaStorageException; -import org.apache.kafka.common.errors.TimeoutException; import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.Records; +import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.requests.FetchRequest; -import org.apache.kafka.common.utils.Time; -import org.apache.kafka.server.LogReadResult; -import org.apache.kafka.server.log.remote.storage.RemoteLogManager; import org.apache.kafka.server.purgatory.DelayedOperationKey; import org.apache.kafka.server.purgatory.DelayedOperationPurgatory; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; -import org.apache.kafka.server.share.fetch.PartitionMaxBytesStrategy; +import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; import org.apache.kafka.server.share.fetch.ShareFetch; -import org.apache.kafka.server.share.metrics.ShareGroupMetrics; import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchParams; import org.apache.kafka.server.storage.log.FetchPartitionData; -import org.apache.kafka.server.util.MockTime; import org.apache.kafka.server.util.timer.SystemTimer; import org.apache.kafka.server.util.timer.SystemTimerReaper; import org.apache.kafka.server.util.timer.Timer; -import org.apache.kafka.server.util.timer.TimerTask; -import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.apache.kafka.storage.internals.log.LogOffsetSnapshot; -import org.apache.kafka.storage.internals.log.RemoteLogReadResult; -import org.apache.kafka.storage.internals.log.RemoteStorageFetchInfo; -import org.apache.kafka.storage.log.metrics.BrokerTopicStats; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; -import org.mockito.MockedStatic; import org.mockito.Mockito; import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; -import java.util.LinkedHashSet; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.OptionalInt; -import java.util.OptionalLong; -import java.util.Set; import java.util.concurrent.CompletableFuture; -import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; import java.util.function.BiConsumer; -import java.util.function.Consumer; -import java.util.stream.Collectors; -import scala.Option; -import scala.Tuple2; -import scala.collection.Seq; -import scala.jdk.javaapi.CollectionConverters; - -import static kafka.server.share.PendingRemoteFetches.RemoteFetch; import static kafka.server.share.SharePartitionManagerTest.DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL; -import static kafka.server.share.SharePartitionManagerTest.REMOTE_FETCH_MAX_WAIT_MS; +import static kafka.server.share.SharePartitionManagerTest.PARTITION_MAX_BYTES; import static kafka.server.share.SharePartitionManagerTest.buildLogReadResult; import static kafka.server.share.SharePartitionManagerTest.mockReplicaManagerDelayedShareFetch; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; -import static org.junit.jupiter.api.Assertions.assertNotNull; -import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -101,7 +71,6 @@ import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; -import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; @@ -109,20 +78,15 @@ public class DelayedShareFetchTest { private static final int MAX_WAIT_MS = 5000; - private static final int BATCH_SIZE = 500; private static final int MAX_FETCH_RECORDS = 100; - private static final FetchParams FETCH_PARAMS = new FetchParams( + private static final FetchParams FETCH_PARAMS = new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); - private static final FetchDataInfo REMOTE_FETCH_INFO = new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), - MemoryRecords.EMPTY, false, Optional.empty(), Optional.of(mock(RemoteStorageFetchInfo.class))); - private static final BrokerTopicStats BROKER_TOPIC_STATS = new BrokerTopicStats(); private Timer mockTimer; @BeforeEach public void setUp() { - kafka.utils.TestUtils.clearYammerMetrics(); mockTimer = new SystemTimerReaper("DelayedShareFetchTestReaper", new SystemTimer("DelayedShareFetchTestTimer")); } @@ -138,54 +102,35 @@ public void testDelayedShareFetchTryCompleteReturnsFalseDueToNonAcquirablePartit Uuid topicId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(false); when(sp1.canAcquireRecords()).thenReturn(false); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - Partition p1 = mock(Partition.class); - when(p1.isLeader()).thenReturn(true); - - ReplicaManager replicaManager = mock(ReplicaManager.class); - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); - - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(new MockTime()); - Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) - .withShareGroupMetrics(shareGroupMetrics) - .withFetchId(fetchId) - .withReplicaManager(replicaManager) .build()); - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - // Since there is no partition that can be acquired, tryComplete should return false. assertFalse(delayedShareFetch.tryComplete()); assertFalse(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(0)).releasePartitionLocks(any()); assertTrue(delayedShareFetch.lock().tryLock()); - // Metrics shall not be recorded as no partition is acquired. - assertNull(shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId)); - assertNull(shareGroupMetrics.topicPartitionsFetchRatio(groupId)); - assertEquals(0, delayedShareFetch.expiredRequestMeter().count()); - delayedShareFetch.lock().unlock(); } @@ -196,24 +141,29 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch( - new FetchParams(FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, + new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp0.acquire(any(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + when(sp0.acquire(any(), anyInt(), any())).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); // We are testing the case when the share partition is getting fetched for the first time, so for the first time // the fetchOffsetMetadata will return empty. Post the readFromLog call, the fetchOffsetMetadata will be @@ -224,38 +174,15 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { LogOffsetMetadata hwmOffsetMetadata = new LogOffsetMetadata(1, 1, 1); mockTopicIdPartitionFetchBytes(replicaManager, tp0, hwmOffsetMetadata); - doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); BiConsumer exceptionHandler = mockExceptionHandler(); - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - Partition p1 = mock(Partition.class); - when(p1.isLeader()).thenReturn(true); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); - - Time time = mock(Time.class); - when(time.hiResClockMs()).thenReturn(100L).thenReturn(110L); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) .withExceptionHandler(exceptionHandler) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) - .withShareGroupMetrics(shareGroupMetrics) - .withTime(time) - .withFetchId(fetchId) .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - assertFalse(delayedShareFetch.isCompleted()); // Since sp1 cannot be acquired, tryComplete should return false. @@ -263,12 +190,6 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnFirstFetch() { assertFalse(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); assertTrue(delayedShareFetch.lock().tryLock()); - // Though the request is not completed but sp0 was acquired and hence the metric should be recorded. - assertEquals(1, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).count()); - assertEquals(10, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).sum()); - // Since the request is not completed, the fetch ratio should be null. - assertNull(shareGroupMetrics.topicPartitionsFetchRatio(groupId)); - delayedShareFetch.lock().unlock(); Mockito.verify(exceptionHandler, times(1)).accept(any(), any()); } @@ -280,24 +201,29 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch( - new FetchParams(FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, + new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp0.acquire(any(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + when(sp0.acquire(any(), anyInt(), any())).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); // We are testing the case when the share partition has been fetched before, hence we are mocking positionDiff // functionality to give the file position difference as 1 byte, so it doesn't satisfy the minBytes(2). @@ -307,27 +233,12 @@ public void testTryCompleteWhenMinBytesNotSatisfiedOnSubsequentFetch() { mockTopicIdPartitionFetchBytes(replicaManager, tp0, hwmOffsetMetadata); BiConsumer exceptionHandler = mockExceptionHandler(); - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - Partition p1 = mock(Partition.class); - when(p1.isLeader()).thenReturn(true); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); - - Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) .withExceptionHandler(exceptionHandler) - .withFetchId(fetchId) .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - assertFalse(delayedShareFetch.isCompleted()); // Since sp1 cannot be acquired, tryComplete should return false. @@ -346,46 +257,36 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); - - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); - - Time time = mock(Time.class); - when(time.hiResClockMs()).thenReturn(120L).thenReturn(140L); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) - .withShareGroupMetrics(shareGroupMetrics) - .withTime(time) - .withFetchId(fetchId) .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - assertFalse(delayedShareFetch.isCompleted()); // Since sp1 can be acquired, tryComplete should return true. @@ -393,11 +294,6 @@ public void testDelayedShareFetchTryCompleteReturnsTrue() { assertTrue(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); assertTrue(delayedShareFetch.lock().tryLock()); - assertEquals(1, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).count()); - assertEquals(20, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).sum()); - assertEquals(1, shareGroupMetrics.topicPartitionsFetchRatio(groupId).count()); - assertEquals(50, shareGroupMetrics.topicPartitionsFetchRatio(groupId).sum()); - delayedShareFetch.lock().unlock(); } @@ -408,37 +304,31 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, partitionMaxBytes, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(false); when(sp1.canAcquireRecords()).thenReturn(false); - - Time time = mock(Time.class); - when(time.hiResClockMs()).thenReturn(90L).thenReturn(140L); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) - .withShareGroupMetrics(shareGroupMetrics) - .withTime(time) - .withFetchId(fetchId) .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - assertFalse(delayedShareFetch.isCompleted()); delayedShareFetch.forceComplete(); @@ -449,12 +339,6 @@ public void testEmptyFutureReturnedByDelayedShareFetchOnComplete() { assertTrue(delayedShareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(0)).releasePartitionLocks(any()); assertTrue(delayedShareFetch.lock().tryLock()); - // As the request is completed by onComplete then both metrics shall be recorded. - assertEquals(1, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).count()); - assertEquals(50, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).sum()); - assertEquals(1, shareGroupMetrics.topicPartitionsFetchRatio(groupId).count()); - assertEquals(0, shareGroupMetrics.topicPartitionsFetchRatio(groupId).sum()); - delayedShareFetch.lock().unlock(); } @@ -465,43 +349,33 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); - - Time time = mock(Time.class); - when(time.hiResClockMs()).thenReturn(10L).thenReturn(140L); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - Uuid fetchId = Uuid.randomUuid(); + when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) - .withShareGroupMetrics(shareGroupMetrics) - .withTime(time) - .withFetchId(fetchId) .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - assertFalse(delayedShareFetch.isCompleted()); delayedShareFetch.forceComplete(); @@ -514,11 +388,6 @@ public void testReplicaManagerFetchShouldHappenOnComplete() { assertTrue(shareFetch.isCompleted()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); assertTrue(delayedShareFetch.lock().tryLock()); - assertEquals(1, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).count()); - assertEquals(130, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).sum()); - assertEquals(1, shareGroupMetrics.topicPartitionsFetchRatio(groupId).count()); - assertEquals(50, shareGroupMetrics.topicPartitionsFetchRatio(groupId).sum()); - delayedShareFetch.lock().unlock(); } @@ -528,6 +397,8 @@ public void testToCompleteAnAlreadyCompletedFuture() { Uuid topicId = Uuid.randomUuid(); ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); @@ -536,29 +407,23 @@ public void testToCompleteAnAlreadyCompletedFuture() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); + future, partitionMaxBytes, MAX_FETCH_RECORDS); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(new MockTime()); - Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) - .withShareGroupMetrics(shareGroupMetrics) - .withFetchId(fetchId) .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - assertFalse(delayedShareFetch.isCompleted()); // Force completing the share fetch request for the first time should complete the future with an empty map. delayedShareFetch.forceComplete(); assertTrue(delayedShareFetch.isCompleted()); // Verifying that the first forceComplete calls acquirablePartitions method in DelayedShareFetch. - Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(sharePartitions); + Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(); assertEquals(0, future.join().size()); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); @@ -568,13 +433,9 @@ public void testToCompleteAnAlreadyCompletedFuture() { delayedShareFetch.forceComplete(); assertTrue(delayedShareFetch.isCompleted()); // Verifying that the second forceComplete does not call acquirablePartitions method in DelayedShareFetch. - Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(sharePartitions); + Mockito.verify(delayedShareFetch, times(1)).acquirablePartitions(); Mockito.verify(delayedShareFetch, times(0)).releasePartitionLocks(any()); assertTrue(delayedShareFetch.lock().tryLock()); - // Assert both metrics shall be recorded only once. - assertEquals(1, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).count()); - assertEquals(1, shareGroupMetrics.topicPartitionsFetchRatio(groupId).count()); - delayedShareFetch.lock().unlock(); } @@ -586,11 +447,17 @@ public void testForceCompleteTriggersDelayedActionsQueue() { TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(topicId, new TopicPartition("foo", 1)); TopicIdPartition tp2 = new TopicIdPartition(topicId, new TopicPartition("foo", 2)); - List topicIdPartitions1 = List.of(tp0, tp1); + Map partitionMaxBytes1 = new HashMap<>(); + partitionMaxBytes1.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes1.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); + // No share partition is available for acquiring initially. + when(sp0.maybeAcquireFetchLock()).thenReturn(false); + when(sp1.maybeAcquireFetchLock()).thenReturn(false); + when(sp2.maybeAcquireFetchLock()).thenReturn(false); LinkedHashMap sharePartitions1 = new LinkedHashMap<>(); sharePartitions1.put(tp0, sp0); @@ -598,43 +465,22 @@ public void testForceCompleteTriggersDelayedActionsQueue() { sharePartitions1.put(tp2, sp2); ShareFetch shareFetch1 = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), topicIdPartitions1, BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes1, MAX_FETCH_RECORDS); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, replicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(replicaManager, delayedShareFetchPurgatory); List delayedShareFetchWatchKeys = new ArrayList<>(); - topicIdPartitions1.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - Partition p1 = mock(Partition.class); - when(p1.isLeader()).thenReturn(true); - - Partition p2 = mock(Partition.class); - when(p2.isLeader()).thenReturn(true); + partitionMaxBytes1.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); - when(replicaManager.getPartitionOrException(tp2.topicPartition())).thenReturn(p2); - - Uuid fetchId1 = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch1 = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch1) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions1) - .withFetchId(fetchId1) .build(); - // No share partition is available for acquiring initially. - when(sp0.maybeAcquireFetchLock(fetchId1)).thenReturn(false); - when(sp1.maybeAcquireFetchLock(fetchId1)).thenReturn(false); - when(sp2.maybeAcquireFetchLock(fetchId1)).thenReturn(false); - // We add a delayed share fetch entry to the purgatory which will be waiting for completion since neither of the // partitions in the share fetch request can be acquired. delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch1, delayedShareFetchWatchKeys); @@ -644,33 +490,30 @@ public void testForceCompleteTriggersDelayedActionsQueue() { assertTrue(delayedShareFetch1.lock().tryLock()); delayedShareFetch1.lock().unlock(); + Map partitionMaxBytes2 = new HashMap<>(); + partitionMaxBytes2.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes2.put(tp2, PARTITION_MAX_BYTES); ShareFetch shareFetch2 = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + new CompletableFuture<>(), partitionMaxBytes2, MAX_FETCH_RECORDS); - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp1)); + doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); LinkedHashMap sharePartitions2 = new LinkedHashMap<>(); sharePartitions2.put(tp0, sp0); sharePartitions2.put(tp1, sp1); sharePartitions2.put(tp2, sp2); - Uuid fetchId2 = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch2 = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch2) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions2) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) - .withFetchId(fetchId2) .build()); // sp1 can be acquired now - when(sp1.maybeAcquireFetchLock(fetchId2)).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + when(sp1.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); // when forceComplete is called for delayedShareFetch2, since tp1 is common in between delayed share fetch // requests, it should add a "check and complete" action for request key tp1 on the purgatory. @@ -693,6 +536,9 @@ public void testCombineLogReadResponse() { ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -703,33 +549,25 @@ public void testCombineLogReadResponse() { CompletableFuture> future = new CompletableFuture<>(); ShareFetch shareFetch = new ShareFetch( - new FetchParams(FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, + new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); - - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp1)); + future, partitionMaxBytes, MAX_FETCH_RECORDS); DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withReplicaManager(replicaManager) .withSharePartitions(sharePartitions) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) .build(); - LinkedHashMap topicPartitionData = new LinkedHashMap<>(); - topicPartitionData.put(tp0, 0L); - topicPartitionData.put(tp1, 0L); + LinkedHashMap topicPartitionData = new LinkedHashMap<>(); + topicPartitionData.put(tp0, mock(FetchRequest.PartitionData.class)); + topicPartitionData.put(tp1, mock(FetchRequest.PartitionData.class)); // Case 1 - logReadResponse contains tp0. LinkedHashMap logReadResponse = new LinkedHashMap<>(); - LogReadResult logReadResult = mock(LogReadResult.class); - Records records = mock(Records.class); - when(records.sizeInBytes()).thenReturn(2); - FetchDataInfo fetchDataInfo = new FetchDataInfo(mock(LogOffsetMetadata.class), records); - when(logReadResult.info()).thenReturn(fetchDataInfo); - logReadResponse.put(tp0, logReadResult); - - doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + logReadResponse.put(tp0, mock(LogReadResult.class)); + + doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); LinkedHashMap combinedLogReadResponse = delayedShareFetch.combineLogReadResponse(topicPartitionData, logReadResponse); assertEquals(topicPartitionData.keySet(), combinedLogReadResponse.keySet()); assertEquals(combinedLogReadResponse.get(tp0), logReadResponse.get(tp0)); @@ -750,54 +588,39 @@ public void testExceptionInMinBytesCalculation() { Uuid topicId = Uuid.randomUuid(); ReplicaManager replicaManager = mock(ReplicaManager.class); TopicIdPartition tp0 = new TopicIdPartition(topicId, new TopicPartition("foo", 0)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); + LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch( - new FetchParams(FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, + new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, MAX_FETCH_RECORDS); when(sp0.canAcquireRecords()).thenReturn(true); - when(sp0.acquire(any(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + when(sp0.acquire(any(), anyInt(), any())).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); + doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); // Mocking partition object to throw an exception during min bytes calculation while calling fetchOffsetSnapshot Partition partition = mock(Partition.class); when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(partition); when(partition.fetchOffsetSnapshot(any(), anyBoolean())).thenThrow(new RuntimeException("Exception thrown")); - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); - BiConsumer exceptionHandler = mockExceptionHandler(); - Time time = mock(Time.class); - when(time.hiResClockMs()).thenReturn(100L).thenReturn(110L).thenReturn(170L); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - Uuid fetchId = Uuid.randomUuid(); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) .withReplicaManager(replicaManager) .withExceptionHandler(exceptionHandler) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) - .withShareGroupMetrics(shareGroupMetrics) - .withTime(time) - .withFetchId(fetchId) .build()); - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - // Try complete should return false as the share partition has errored out. assertFalse(delayedShareFetch.tryComplete()); // Fetch should remain pending and should be completed on request timeout. @@ -809,10 +632,9 @@ public void testExceptionInMinBytesCalculation() { Mockito.verify(replicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); - Mockito.verify(sp0, times(1)).releaseFetchLock(fetchId); // Force complete the request as it's still pending. Return false from the share partition lock acquire. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(false); + when(sp0.maybeAcquireFetchLock()).thenReturn(false); assertTrue(delayedShareFetch.forceComplete()); assertTrue(delayedShareFetch.isCompleted()); @@ -821,94 +643,40 @@ public void testExceptionInMinBytesCalculation() { any(), any(), any(ReplicaQuota.class), anyBoolean()); Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); assertTrue(delayedShareFetch.lock().tryLock()); - assertEquals(2, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).count()); - assertEquals(70, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).sum()); - assertEquals(10, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).min()); - assertEquals(60, shareGroupMetrics.topicPartitionsAcquireTimeMs(groupId).max()); - assertEquals(1, shareGroupMetrics.topicPartitionsFetchRatio(groupId).count()); - assertEquals(0, shareGroupMetrics.topicPartitionsFetchRatio(groupId).sum()); - delayedShareFetch.lock().unlock(); Mockito.verify(exceptionHandler, times(1)).accept(any(), any()); } - @Test - public void testTryCompleteLocksReleasedOnCompleteException() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - - SharePartition sp0 = mock(SharePartition.class); - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); - - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) - .withFetchId(fetchId) - .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - assertFalse(delayedShareFetch.isCompleted()); - // Throw exception for onComplete. - doThrow(new RuntimeException()).when(delayedShareFetch).onComplete(); - // Try to complete the request. - assertFalse(delayedShareFetch.tryComplete()); - - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(any()); - Mockito.verify(sp0, times(1)).releaseFetchLock(fetchId); - } - @Test public void testLocksReleasedForCompletedFetch() { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp0 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); LinkedHashMap sharePartitions1 = new LinkedHashMap<>(); sharePartitions1.put(tp0, sp0); ReplicaManager replicaManager = mock(ReplicaManager.class); - doAnswer(invocation -> buildLogReadResult(List.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mockPartitionMaxBytes(Set.of(tp0)); + new CompletableFuture<>(), Map.of(tp0, PARTITION_MAX_BYTES), MAX_FETCH_RECORDS); - Uuid fetchId = Uuid.randomUuid(); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions1) .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) - .withFetchId(fetchId) .build(); DelayedShareFetch spy = spy(delayedShareFetch); - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); doReturn(false).when(spy).forceComplete(); assertFalse(spy.tryComplete()); - Mockito.verify(sp0, times(1)).releaseFetchLock(fetchId); + Mockito.verify(sp0, times(1)).releaseFetchLock(); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); } @@ -919,1345 +687,76 @@ public void testLocksReleasedAcquireException() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp0 = mock(SharePartition.class); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenThrow(new RuntimeException("Acquire exception")); LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - Uuid fetchId = Uuid.randomUuid(); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - ReplicaManager replicaManager = mock(ReplicaManager.class); - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); + new CompletableFuture<>(), Map.of(tp0, PARTITION_MAX_BYTES), MAX_FETCH_RECORDS); DelayedShareFetch delayedShareFetch = DelayedShareFetchTest.DelayedShareFetchBuilder.builder() .withShareFetchData(shareFetch) .withSharePartitions(sharePartitions) - .withFetchId(fetchId) - .withReplicaManager(replicaManager) .build(); - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - assertFalse(delayedShareFetch.tryComplete()); - Mockito.verify(sp0, times(1)).releaseFetchLock(fetchId); + Mockito.verify(sp0, times(1)).releaseFetchLock(); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); } - @Test - public void testTryCompleteWhenPartitionMaxBytesStrategyThrowsException() { - String groupId = "grp"; - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - SharePartition sp0 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - CompletableFuture> future = new CompletableFuture<>(); - - ShareFetch shareFetch = new ShareFetch( - new FetchParams(FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, - 2, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, BROKER_TOPIC_STATS); - - // partitionMaxBytesStrategy.maxBytes() function throws an exception - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); - when(partitionMaxBytesStrategy.maxBytes(anyInt(), any(), anyInt())).thenThrow(new IllegalArgumentException("Exception thrown")); - - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withExceptionHandler(mockExceptionHandler()) - .withPartitionMaxBytesStrategy(partitionMaxBytesStrategy) - .withFetchId(fetchId) - .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - assertFalse(delayedShareFetch.isCompleted()); - assertTrue(delayedShareFetch.tryComplete()); - assertTrue(delayedShareFetch.isCompleted()); - // releasePartitionLocks is called twice - first time from tryComplete and second time from onComplete - Mockito.verify(delayedShareFetch, times(2)).releasePartitionLocks(any()); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - - assertTrue(future.isDone()); - assertFalse(future.isCompletedExceptionally()); - Map partitionDataMap = future.join(); - assertEquals(1, partitionDataMap.size()); - assertTrue(partitionDataMap.containsKey(tp0)); - assertEquals("Exception thrown", partitionDataMap.get(tp0).errorMessage()); + static void mockTopicIdPartitionToReturnDataEqualToMinBytes(ReplicaManager replicaManager, TopicIdPartition topicIdPartition, int minBytes) { + LogOffsetMetadata hwmOffsetMetadata = new LogOffsetMetadata(1, 1, minBytes); + LogOffsetSnapshot endOffsetSnapshot = new LogOffsetSnapshot(1, mock(LogOffsetMetadata.class), + hwmOffsetMetadata, mock(LogOffsetMetadata.class)); + Partition partition = mock(Partition.class); + when(partition.isLeader()).thenReturn(true); + when(partition.getLeaderEpoch()).thenReturn(1); + when(partition.fetchOffsetSnapshot(any(), anyBoolean())).thenReturn(endOffsetSnapshot); + when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition())).thenReturn(partition); } - @Test - public void testPartitionMaxBytesFromUniformStrategyWhenAllPartitionsAreAcquirable() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - String groupId = "grp"; - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); - TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 4)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - SharePartition sp3 = mock(SharePartition.class); - SharePartition sp4 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(true); - when(sp2.canAcquireRecords()).thenReturn(true); - when(sp3.canAcquireRecords()).thenReturn(true); - when(sp4.canAcquireRecords()).thenReturn(true); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - sharePartitions.put(tp2, sp2); - sharePartitions.put(tp3, sp3); - sharePartitions.put(tp4, sp4); - - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp2.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp3.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp4.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - - // All 5 partitions are acquirable. - doAnswer(invocation -> buildLogReadResult(sharePartitions.keySet().stream().toList())).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); - when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); - when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); - when(sp3.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); - when(sp4.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); - - mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); - mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); - mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp2, 1); - mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp3, 1); - mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp4, 1); - - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) - .withFetchId(fetchId) - .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp3.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp4.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - assertTrue(delayedShareFetch.tryComplete()); - assertTrue(delayedShareFetch.isCompleted()); - - // Since all partitions are acquirable, maxbytes per partition = requestMaxBytes(i.e. 1024*1024) / acquiredTopicPartitions(i.e. 5) - int expectedPartitionMaxBytes = 1024 * 1024 / 5; - LinkedHashMap expectedReadPartitionInfo = new LinkedHashMap<>(); - sharePartitions.keySet().forEach(topicIdPartition -> expectedReadPartitionInfo.put(topicIdPartition, - new FetchRequest.PartitionData( - topicIdPartition.topicId(), - 0, - 0, - expectedPartitionMaxBytes, - Optional.empty() - ))); - - Mockito.verify(replicaManager, times(1)).readFromLog( - shareFetch.fetchParams(), - CollectionConverters.asScala( - sharePartitions.keySet().stream().map(topicIdPartition -> - new Tuple2<>(topicIdPartition, expectedReadPartitionInfo.get(topicIdPartition))).collect(Collectors.toList()) - ), - QuotaFactory.UNBOUNDED_QUOTA, - true); + private void mockTopicIdPartitionFetchBytes(ReplicaManager replicaManager, TopicIdPartition topicIdPartition, LogOffsetMetadata hwmOffsetMetadata) { + LogOffsetSnapshot endOffsetSnapshot = new LogOffsetSnapshot(1, mock(LogOffsetMetadata.class), + hwmOffsetMetadata, mock(LogOffsetMetadata.class)); + Partition partition = mock(Partition.class); + when(partition.fetchOffsetSnapshot(any(), anyBoolean())).thenReturn(endOffsetSnapshot); + when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition())).thenReturn(partition); } - @Test - public void testPartitionMaxBytesFromUniformStrategyWhenFewPartitionsAreAcquirable() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - String groupId = "grp"; - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); - TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 4)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - SharePartition sp3 = mock(SharePartition.class); - SharePartition sp4 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(true); - when(sp2.canAcquireRecords()).thenReturn(false); - when(sp3.canAcquireRecords()).thenReturn(false); - when(sp4.canAcquireRecords()).thenReturn(false); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - sharePartitions.put(tp2, sp2); - sharePartitions.put(tp3, sp3); - sharePartitions.put(tp4, sp4); - - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1, tp2, tp3, tp4), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - - // Only 2 out of 5 partitions are acquirable. - Set acquirableTopicPartitions = new LinkedHashSet<>(); - acquirableTopicPartitions.add(tp0); - acquirableTopicPartitions.add(tp1); - doAnswer(invocation -> buildLogReadResult(acquirableTopicPartitions.stream().toList())).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); - when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); - - mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp0, 1); - mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); - - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) - .withFetchId(fetchId) - .build()); - - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(false); - when(sp3.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp4.maybeAcquireFetchLock(fetchId)).thenReturn(false); - - assertTrue(delayedShareFetch.tryComplete()); - assertTrue(delayedShareFetch.isCompleted()); - - // Since only 2 partitions are acquirable, maxbytes per partition = requestMaxBytes(i.e. 1024*1024) / acquiredTopicPartitions(i.e. 2) - int expectedPartitionMaxBytes = 1024 * 1024 / 2; - LinkedHashMap expectedReadPartitionInfo = new LinkedHashMap<>(); - acquirableTopicPartitions.forEach(topicIdPartition -> expectedReadPartitionInfo.put(topicIdPartition, - new FetchRequest.PartitionData( - topicIdPartition.topicId(), - 0, - 0, - expectedPartitionMaxBytes, - Optional.empty() - ))); - - Mockito.verify(replicaManager, times(1)).readFromLog( - shareFetch.fetchParams(), - CollectionConverters.asScala( - acquirableTopicPartitions.stream().map(topicIdPartition -> - new Tuple2<>(topicIdPartition, expectedReadPartitionInfo.get(topicIdPartition))).collect(Collectors.toList()) - ), - QuotaFactory.UNBOUNDED_QUOTA, - true); + @SuppressWarnings("unchecked") + private static BiConsumer mockExceptionHandler() { + return mock(BiConsumer.class); } - @Test - public void testPartitionMaxBytesFromUniformStrategyInCombineLogReadResponse() { - String groupId = "grp"; - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - sharePartitions.put(tp2, sp2); - - ShareFetch shareFetch = new ShareFetch( - new FetchParams(FetchRequest.ORDINARY_CONSUMER_ID, -1, MAX_WAIT_MS, - 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withReplicaManager(replicaManager) - .withSharePartitions(sharePartitions) - .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) - .build(); - - LinkedHashMap topicPartitionData = new LinkedHashMap<>(); - topicPartitionData.put(tp0, 0L); - topicPartitionData.put(tp1, 0L); - topicPartitionData.put(tp2, 0L); + static class DelayedShareFetchBuilder { + ShareFetch shareFetch = mock(ShareFetch.class); + private ReplicaManager replicaManager = mock(ReplicaManager.class); + private BiConsumer exceptionHandler = mockExceptionHandler(); + private LinkedHashMap sharePartitions = mock(LinkedHashMap.class); - // Existing fetched data already contains tp0. - LinkedHashMap logReadResponse = new LinkedHashMap<>(); - LogReadResult logReadResult = mock(LogReadResult.class); - Records records = mock(Records.class); - when(records.sizeInBytes()).thenReturn(2); - FetchDataInfo fetchDataInfo = new FetchDataInfo(mock(LogOffsetMetadata.class), records); - when(logReadResult.info()).thenReturn(fetchDataInfo); - logReadResponse.put(tp0, logReadResult); - - Set fetchableTopicPartitions = new LinkedHashSet<>(); - fetchableTopicPartitions.add(tp1); - fetchableTopicPartitions.add(tp2); - // We will be doing replica manager fetch only for tp1 and tp2. - doAnswer(invocation -> buildLogReadResult(fetchableTopicPartitions.stream().toList())).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - LinkedHashMap combinedLogReadResponse = delayedShareFetch.combineLogReadResponse(topicPartitionData, logReadResponse); + DelayedShareFetchBuilder withShareFetchData(ShareFetch shareFetch) { + this.shareFetch = shareFetch; + return this; + } - assertEquals(topicPartitionData.keySet(), combinedLogReadResponse.keySet()); - // Since only 2 partitions are fetchable but the third one has already been fetched, maxbytes per partition = requestMaxBytes(i.e. 1024*1024) / acquiredTopicPartitions(i.e. 3) - int expectedPartitionMaxBytes = 1024 * 1024 / 3; - LinkedHashMap expectedReadPartitionInfo = new LinkedHashMap<>(); - fetchableTopicPartitions.forEach(topicIdPartition -> expectedReadPartitionInfo.put(topicIdPartition, - new FetchRequest.PartitionData( - topicIdPartition.topicId(), - 0, - 0, - expectedPartitionMaxBytes, - Optional.empty() - ))); + DelayedShareFetchBuilder withReplicaManager(ReplicaManager replicaManager) { + this.replicaManager = replicaManager; + return this; + } - Mockito.verify(replicaManager, times(1)).readFromLog( - shareFetch.fetchParams(), - CollectionConverters.asScala( - fetchableTopicPartitions.stream().map(topicIdPartition -> - new Tuple2<>(topicIdPartition, expectedReadPartitionInfo.get(topicIdPartition))).collect(Collectors.toList()) - ), - QuotaFactory.UNBOUNDED_QUOTA, - true); - } - - @Test - public void testOnCompleteExecutionOnTimeout() { - ShareFetch shareFetch = new ShareFetch( - FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - DelayedShareFetch delayedShareFetch = DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .build(); - assertFalse(delayedShareFetch.isCompleted()); - assertFalse(shareFetch.isCompleted()); - // Call run to execute onComplete and onExpiration. - delayedShareFetch.run(); - assertTrue(shareFetch.isCompleted()); - assertEquals(1, delayedShareFetch.expiredRequestMeter().count()); - } - - @SuppressWarnings("unchecked") - @Test - public void testRemoteStorageFetchTryCompleteReturnsFalse() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(true); - when(sp2.canAcquireRecords()).thenReturn(true); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - sharePartitions.put(tp2, sp2); - - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.nextFetchOffset()).thenReturn(10L); - when(sp1.nextFetchOffset()).thenReturn(20L); - when(sp2.nextFetchOffset()).thenReturn(30L); - - // Fetch offset matches with the cached entry for sp0 but not for sp1 and sp2. Hence, a replica manager fetch will happen for sp1 and sp2. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(10, 1, 0))); - when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - - // Mocking local log read result for tp1 and remote storage read result for tp2. - doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp1), Set.of(tp2))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - // Remote fetch related mocks. Remote fetch object does not complete within tryComplete in this mock. - RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); - when(remoteLogManager.asyncRead(any(), any())).thenReturn(mock(Future.class)); - when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - Partition p1 = mock(Partition.class); - when(p1.isLeader()).thenReturn(true); - - Partition p2 = mock(Partition.class); - when(p2.isLeader()).thenReturn(true); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); - when(replicaManager.getPartitionOrException(tp2.topicPartition())).thenReturn(p2); - - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1, tp2))) - .withFetchId(fetchId) - .build()); - - // All the topic partitions are acquirable. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - assertFalse(delayedShareFetch.isCompleted()); - assertFalse(delayedShareFetch.tryComplete()); - assertFalse(delayedShareFetch.isCompleted()); - // Remote fetch object gets created for delayed share fetch object. - assertNotNull(delayedShareFetch.pendingRemoteFetches()); - // Verify the locks are released for local log read topic partitions tp0 and tp1. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0, tp1)); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - - @SuppressWarnings("unchecked") - @Test - public void testRemoteStorageFetchPartitionLeaderChanged() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - - SharePartition sp0 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.nextFetchOffset()).thenReturn(10L); - - // Fetch offset does not match with the cached entry for sp0, hence, a replica manager fetch will happen for sp0. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - - // Mocking remote storage read result for tp0. - doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(), Set.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - // Remote fetch related mocks. Remote fetch object does not complete within tryComplete in this mock. - RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); - when(remoteLogManager.asyncRead(any(), any())).thenReturn(mock(Future.class)); - when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(false); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0))) - .withFetchId(fetchId) - .build()); - - // All the topic partitions are acquirable. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - // Mock the behaviour of replica manager such that remote storage fetch completion timer task completes on adding it to the watch queue. - doAnswer(invocationOnMock -> { - TimerTask timerTask = invocationOnMock.getArgument(0); - timerTask.run(); - return null; - }).when(replicaManager).addShareFetchTimerRequest(any()); - - assertFalse(delayedShareFetch.isCompleted()); - assertTrue(delayedShareFetch.tryComplete()); - assertTrue(delayedShareFetch.isCompleted()); - // Remote fetch object gets created for delayed share fetch object. - assertNotNull(delayedShareFetch.pendingRemoteFetches()); - // Verify the locks are released for local log read topic partitions tp0. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - - @SuppressWarnings("unchecked") - @Test - public void testRemoteStorageFetchTryCompleteThrowsException() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(true); - when(sp2.canAcquireRecords()).thenReturn(true); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - sharePartitions.put(tp2, sp2); - - CompletableFuture> future = new CompletableFuture<>(); - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - future, List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.nextFetchOffset()).thenReturn(10L); - when(sp1.nextFetchOffset()).thenReturn(20L); - when(sp2.nextFetchOffset()).thenReturn(25L); - - // Fetch offset does not match with the cached entry for sp0, sp1 and sp2. Hence, a replica manager fetch will happen for all. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - - // Mocking local log read result for tp0 and remote storage read result for tp1 and tp2. - doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp0), Set.of(tp1, tp2))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - // Remote fetch related mocks. Exception will be thrown during the creation of remoteFetch object for tp2. - // remoteFetchTask gets created for tp1 successfully. - Future remoteFetchTask = mock(Future.class); - doAnswer(invocation -> { - when(remoteFetchTask.isCancelled()).thenReturn(true); - return false; - }).when(remoteFetchTask).cancel(false); - RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); - when(remoteLogManager.asyncRead(any(), any())) - .thenReturn(remoteFetchTask) // for tp1 - .thenThrow(new RejectedExecutionException("Exception thrown")); // for tp2 - when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); - - BiConsumer exceptionHandler = mockExceptionHandler(); - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withExceptionHandler(exceptionHandler) - .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1, tp2))) - .withFetchId(fetchId) - .build()); - - // All the topic partitions are acquirable. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - assertFalse(delayedShareFetch.isCompleted()); - // tryComplete returns true and goes to forceComplete once the exception occurs. - assertTrue(delayedShareFetch.tryComplete()); - assertTrue(delayedShareFetch.isCompleted()); - // The future of shareFetch completes. - assertTrue(shareFetch.isCompleted()); - // The remoteFetchTask created for tp1 is cancelled successfully. - assertTrue(remoteFetchTask.isCancelled()); - assertFalse(future.isCompletedExceptionally()); - assertEquals(Set.of(tp1, tp2), future.join().keySet()); - // Exception occurred and was handled. - Mockito.verify(exceptionHandler, times(2)).accept(any(), any()); - // Verify the locks are released for all local and remote read topic partitions tp0, tp1 and tp2 because of exception occurrence. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp1, tp2)); - Mockito.verify(delayedShareFetch, times(1)).onComplete(); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - - @SuppressWarnings("unchecked") - @Test - public void testRemoteStorageFetchTryCompletionDueToBrokerBecomingOffline() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(true); - when(sp2.canAcquireRecords()).thenReturn(true); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - sharePartitions.put(tp2, sp2); - - CompletableFuture> future = new CompletableFuture<>(); - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - future, List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.nextFetchOffset()).thenReturn(10L); - when(sp1.nextFetchOffset()).thenReturn(20L); - when(sp2.nextFetchOffset()).thenReturn(30L); - - // Fetch offset matches with the cached entry for sp0 but not for sp1 and sp2. Hence, a replica manager fetch will happen for sp1 and sp2 during tryComplete. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(10, 1, 0))); - when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - - try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { - Map partitionDataMap = new LinkedHashMap<>(); - partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); - partitionDataMap.put(tp1, mock(ShareFetchResponseData.PartitionData.class)); - mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); - - // Mocking local log read result for tp1 and remote storage read result for tp2 on first replicaManager readFromLog call(from tryComplete). - // Mocking local log read result for tp0 and tp1 on second replicaManager readFromLog call(from onComplete). - doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp1), Set.of(tp2)) - ).doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp0, tp1), Set.of()) - ).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - // Remote fetch related mocks. Remote fetch object does not complete within tryComplete in this mock but the broker becomes unavailable. - Future remoteFetchTask = mock(Future.class); - doAnswer(invocation -> { - when(remoteFetchTask.isCancelled()).thenReturn(true); - return false; - }).when(remoteFetchTask).cancel(false); - - when(remoteFetchTask.cancel(false)).thenReturn(true); - RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); - when(remoteLogManager.asyncRead(any(), any())).thenReturn(remoteFetchTask); - when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); - when(replicaManager.getPartitionOrException(tp2.topicPartition())).thenThrow(mock(KafkaStorageException.class)); - - // Mock the behaviour of replica manager such that remote storage fetch completion timer task completes on adding it to the watch queue. - doAnswer(invocationOnMock -> { - TimerTask timerTask = invocationOnMock.getArgument(0); - timerTask.run(); - return null; - }).when(replicaManager).addShareFetchTimerRequest(any()); - - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1, tp2))) - .withFetchId(fetchId) - .build()); - - // All the topic partitions are acquirable. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - assertFalse(delayedShareFetch.isCompleted()); - assertTrue(delayedShareFetch.tryComplete()); - - assertTrue(delayedShareFetch.isCompleted()); - // Pending remote fetch object gets created for delayed share fetch. - assertNotNull(delayedShareFetch.pendingRemoteFetches()); - List remoteFetches = delayedShareFetch.pendingRemoteFetches().remoteFetches(); - assertEquals(1, remoteFetches.size()); - assertTrue(remoteFetches.get(0).remoteFetchTask().isCancelled()); - // Partition locks should be released for all 3 topic partitions - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0, tp1, tp2)); - assertTrue(shareFetch.isCompleted()); - // Share fetch response contained tp0 and tp1 (local fetch) but not tp2, since it errored out. - assertEquals(Set.of(tp0, tp1), future.join().keySet()); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - } - - @Test - public void testRemoteStorageFetchRequestCompletionOnFutureCompletionFailure() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(false); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - - CompletableFuture> future = new CompletableFuture<>(); - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - future, List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.nextFetchOffset()).thenReturn(10L); - // Fetch offset does not match with the cached entry for sp0. Hence, a replica manager fetch will happen for sp0. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - - // Mocking remote storage read result for tp0. - doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(), Set.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - // Remote fetch related mocks. Remote fetch object completes within tryComplete in this mock, hence request will move on to forceComplete. - RemoteLogReadResult remoteFetchResult = new RemoteLogReadResult( - Optional.empty(), - Optional.of(new TimeoutException("Error occurred while creating remote fetch result")) // Remote fetch result is returned with an error. - ); - RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); - doAnswer(invocationOnMock -> { - // Make sure that the callback is called to populate remoteFetchResult for the mock behaviour. - Consumer callback = invocationOnMock.getArgument(1); - callback.accept(remoteFetchResult); - return CompletableFuture.completedFuture(remoteFetchResult); - }).when(remoteLogManager).asyncRead(any(), any()); - when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); - - Uuid fetchId = Uuid.randomUuid(); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - Partition p1 = mock(Partition.class); - when(p1.isLeader()).thenReturn(true); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); - - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1))) - .withFetchId(fetchId) - .build()); - - // sp0 is acquirable, sp1 is not acquirable. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(false); - - when(sp0.acquire(any(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords().setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - - assertFalse(delayedShareFetch.isCompleted()); - assertTrue(delayedShareFetch.tryComplete()); - - assertTrue(delayedShareFetch.isCompleted()); - // Pending remote fetch object gets created for delayed share fetch. - assertNotNull(delayedShareFetch.pendingRemoteFetches()); - // Verify the locks are released for tp0. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); - assertTrue(shareFetch.isCompleted()); - assertEquals(Set.of(tp0), future.join().keySet()); - assertEquals(Errors.REQUEST_TIMED_OUT.code(), future.join().get(tp0).errorCode()); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - - @Test - public void testRemoteStorageFetchRequestCompletionOnFutureCompletionSuccessfully() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - - SharePartition sp0 = mock(SharePartition.class); - - - when(sp0.canAcquireRecords()).thenReturn(true); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - - CompletableFuture> future = new CompletableFuture<>(); - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.nextFetchOffset()).thenReturn(10L); - // Fetch offset does not match with the cached entry for sp0. Hence, a replica manager fetch will happen for sp0. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - - // Mocking remote storage read result for tp0. - doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(), Set.of(tp0))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - // Remote fetch related mocks. Remote fetch object completes within tryComplete in this mock, hence request will move on to forceComplete. - RemoteLogReadResult remoteFetchResult = new RemoteLogReadResult( - Optional.of(REMOTE_FETCH_INFO), - Optional.empty() // Remote fetch result is returned successfully without error. - ); - RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); - doAnswer(invocationOnMock -> { - // Make sure that the callback is called to populate remoteFetchResult for the mock behaviour. - Consumer callback = invocationOnMock.getArgument(1); - callback.accept(remoteFetchResult); - return CompletableFuture.completedFuture(remoteFetchResult); - }).when(remoteLogManager).asyncRead(any(), any()); - when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); - - Uuid fetchId = Uuid.randomUuid(); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0))) - .withFetchId(fetchId) - .build()); - - // sp0 is acquirable. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { - Map partitionDataMap = new LinkedHashMap<>(); - partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); - mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); - - assertFalse(delayedShareFetch.isCompleted()); - assertTrue(delayedShareFetch.tryComplete()); - - assertTrue(delayedShareFetch.isCompleted()); - // Pending remote fetch object gets created for delayed share fetch. - assertNotNull(delayedShareFetch.pendingRemoteFetches()); - // Verify the locks are released for tp0. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); - assertTrue(shareFetch.isCompleted()); - assertEquals(Set.of(tp0), future.join().keySet()); - assertEquals(Errors.NONE.code(), future.join().get(tp0).errorCode()); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - } - - @Test - public void testRemoteStorageFetchRequestCompletionAlongWithLocalLogRead() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(true); - when(sp2.canAcquireRecords()).thenReturn(true); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - sharePartitions.put(tp2, sp2); - - CompletableFuture> future = new CompletableFuture<>(); - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - future, List.of(tp0, tp1, tp2), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.nextFetchOffset()).thenReturn(10L); - when(sp1.nextFetchOffset()).thenReturn(20L); - when(sp2.nextFetchOffset()).thenReturn(30L); - - // Fetch offset does not match with the cached entry for sp0, sp1 and sp2. Hence, a replica manager fetch will happen for all of them in tryComplete. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - when(sp2.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - - try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { - Map partitionDataMap = new LinkedHashMap<>(); - partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); - partitionDataMap.put(tp1, mock(ShareFetchResponseData.PartitionData.class)); - partitionDataMap.put(tp2, mock(ShareFetchResponseData.PartitionData.class)); - mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); - - // Mocking local log read result for tp0, tp1 and remote storage read result for tp2 on first replicaManager readFromLog call(from tryComplete). - // Mocking local log read result for tp0 and tp1 on second replicaManager readFromLog call(from onComplete). - doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp0, tp1), Set.of(tp2)) - ).doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(tp0, tp1), Set.of()) - ).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - // Remote fetch related mocks. Remote fetch object completes within tryComplete in this mock, hence request will move on to forceComplete. - RemoteLogReadResult remoteFetchResult = new RemoteLogReadResult( - Optional.of(REMOTE_FETCH_INFO), - Optional.empty() // Remote fetch result is returned successfully without error. - ); - RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); - doAnswer(invocationOnMock -> { - // Make sure that the callback is called to populate remoteFetchResult for the mock behaviour. - Consumer callback = invocationOnMock.getArgument(1); - callback.accept(remoteFetchResult); - return CompletableFuture.completedFuture(remoteFetchResult); - }).when(remoteLogManager).asyncRead(any(), any()); - when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - Partition p1 = mock(Partition.class); - when(p1.isLeader()).thenReturn(true); - - Partition p2 = mock(Partition.class); - when(p2.isLeader()).thenReturn(true); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); - when(replicaManager.getPartitionOrException(tp2.topicPartition())).thenReturn(p2); - - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withReplicaManager(replicaManager) - .withSharePartitions(sharePartitions) - .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1, tp2))) - .withFetchId(fetchId) - .build()); - - // All the topic partitions are acquirable. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp2.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - assertFalse(delayedShareFetch.isCompleted()); - assertTrue(delayedShareFetch.tryComplete()); - - assertTrue(delayedShareFetch.isCompleted()); - // Pending remote fetch object gets created for delayed share fetch. - assertNotNull(delayedShareFetch.pendingRemoteFetches()); - // the future of shareFetch completes. - assertTrue(shareFetch.isCompleted()); - assertEquals(Set.of(tp0, tp1, tp2), future.join().keySet()); - // Verify the locks are released for both local log and remote storage read topic partitions tp0, tp1 and tp2. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0, tp1, tp2)); - assertEquals(Errors.NONE.code(), future.join().get(tp0).errorCode()); - assertEquals(Errors.NONE.code(), future.join().get(tp1).errorCode()); - assertEquals(Errors.NONE.code(), future.join().get(tp2).errorCode()); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - } - - @Test - public void testRemoteStorageFetchHappensForAllTopicPartitions() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp1.canAcquireRecords()).thenReturn(true); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - sharePartitions.put(tp1, sp1); - - CompletableFuture> future = new CompletableFuture<>(); - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - future, List.of(tp0, tp1), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - when(sp0.nextFetchOffset()).thenReturn(10L); - when(sp1.nextFetchOffset()).thenReturn(10L); - // Fetch offset does not match with the cached entry for sp0 and sp1. Hence, a replica manager fetch will happen for both. - when(sp0.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.empty()); - - LinkedHashSet remoteStorageFetchPartitions = new LinkedHashSet<>(); - remoteStorageFetchPartitions.add(tp0); - remoteStorageFetchPartitions.add(tp1); - - // Mocking remote storage read result for tp0 and tp1. - doAnswer(invocation -> buildLocalAndRemoteFetchResult(Set.of(), remoteStorageFetchPartitions)).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - - // Remote fetch related mocks. Remote fetch object completes within tryComplete in this mock, hence request will move on to forceComplete. - RemoteLogReadResult remoteFetchResult = new RemoteLogReadResult( - Optional.of(REMOTE_FETCH_INFO), - Optional.empty() // Remote fetch result is returned successfully without error. - ); - RemoteLogManager remoteLogManager = mock(RemoteLogManager.class); - doAnswer(invocationOnMock -> { - // Make sure that the callback is called to populate remoteFetchResult for the mock behaviour. - Consumer callback = invocationOnMock.getArgument(1); - callback.accept(remoteFetchResult); - return CompletableFuture.completedFuture(remoteFetchResult); - }).when(remoteLogManager).asyncRead(any(), any()); - when(replicaManager.remoteLogManager()).thenReturn(Option.apply(remoteLogManager)); - - Uuid fetchId = Uuid.randomUuid(); - - Partition p0 = mock(Partition.class); - when(p0.isLeader()).thenReturn(true); - - Partition p1 = mock(Partition.class); - when(p1.isLeader()).thenReturn(true); - - when(replicaManager.getPartitionOrException(tp0.topicPartition())).thenReturn(p0); - when(replicaManager.getPartitionOrException(tp1.topicPartition())).thenReturn(p1); - - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withSharePartitions(sharePartitions) - .withReplicaManager(replicaManager) - .withPartitionMaxBytesStrategy(mockPartitionMaxBytes(Set.of(tp0, tp1))) - .withFetchId(fetchId) - .build()); - - // sp0 and sp1 are acquirable. - when(sp0.maybeAcquireFetchLock(fetchId)).thenReturn(true); - when(sp1.maybeAcquireFetchLock(fetchId)).thenReturn(true); - - try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { - Map partitionDataMap = new LinkedHashMap<>(); - partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); - partitionDataMap.put(tp1, mock(ShareFetchResponseData.PartitionData.class)); - mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); - - assertFalse(delayedShareFetch.isCompleted()); - assertTrue(delayedShareFetch.tryComplete()); - - assertTrue(delayedShareFetch.isCompleted()); - // Pending remote fetch object gets created for delayed share fetch. - assertNotNull(delayedShareFetch.pendingRemoteFetches()); - // Verify the locks are released for both tp0 and tp1. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0, tp1)); - assertTrue(shareFetch.isCompleted()); - // Share fetch response contains both remote storage fetch topic partitions. - assertEquals(Set.of(tp0, tp1), future.join().keySet()); - assertEquals(Errors.NONE.code(), future.join().get(tp0).errorCode()); - assertEquals(Errors.NONE.code(), future.join().get(tp1).errorCode()); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - } - - @Test - public void testRemoteStorageFetchCompletionPostRegisteringCallbackByPendingFetchesCompletion() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - SharePartition sp0 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp0.nextFetchOffset()).thenReturn(10L); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - - CompletableFuture> future = new CompletableFuture<>(); - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - PendingRemoteFetches pendingRemoteFetches = mock(PendingRemoteFetches.class); - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withReplicaManager(replicaManager) - .withSharePartitions(sharePartitions) - .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) - .withPendingRemoteFetches(pendingRemoteFetches) - .withFetchId(fetchId) - .build()); - - LinkedHashMap partitionsAcquired = new LinkedHashMap<>(); - partitionsAcquired.put(tp0, 10L); - - // Manually update acquired partitions. - delayedShareFetch.updatePartitionsAcquired(partitionsAcquired); - - // Mock remote fetch result. - RemoteFetch remoteFetch = mock(RemoteFetch.class); - when(remoteFetch.topicIdPartition()).thenReturn(tp0); - when(remoteFetch.remoteFetchResult()).thenReturn(CompletableFuture.completedFuture( - new RemoteLogReadResult(Optional.of(REMOTE_FETCH_INFO), Optional.empty())) - ); - when(remoteFetch.logReadResult()).thenReturn(new LogReadResult( - REMOTE_FETCH_INFO, - Optional.empty(), - -1L, - -1L, - -1L, - -1L, - -1L, - OptionalLong.empty(), - OptionalInt.empty(), - Optional.empty() - )); - when(pendingRemoteFetches.remoteFetches()).thenReturn(List.of(remoteFetch)); - when(pendingRemoteFetches.isDone()).thenReturn(false); - - // Make sure that the callback is called to complete remote storage share fetch result. - doAnswer(invocationOnMock -> { - BiConsumer callback = invocationOnMock.getArgument(0); - callback.accept(mock(Void.class), null); - return null; - }).when(pendingRemoteFetches).invokeCallbackOnCompletion(any()); - - try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { - Map partitionDataMap = new LinkedHashMap<>(); - partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); - mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); - - assertFalse(delayedShareFetch.isCompleted()); - delayedShareFetch.forceComplete(); - assertTrue(delayedShareFetch.isCompleted()); - // the future of shareFetch completes. - assertTrue(shareFetch.isCompleted()); - assertEquals(Set.of(tp0), future.join().keySet()); - // Verify the locks are released for tp0. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); - assertTrue(delayedShareFetch.outsidePurgatoryCallbackLock()); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - } - - @Test - public void testRemoteStorageFetchCompletionPostRegisteringCallbackByTimerTaskCompletion() { - ReplicaManager replicaManager = mock(ReplicaManager.class); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - SharePartition sp0 = mock(SharePartition.class); - - when(sp0.canAcquireRecords()).thenReturn(true); - when(sp0.nextFetchOffset()).thenReturn(10L); - - LinkedHashMap sharePartitions = new LinkedHashMap<>(); - sharePartitions.put(tp0, sp0); - - CompletableFuture> future = new CompletableFuture<>(); - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, "grp", Uuid.randomUuid().toString(), - future, List.of(tp0), BATCH_SIZE, MAX_FETCH_RECORDS, - BROKER_TOPIC_STATS); - - PendingRemoteFetches pendingRemoteFetches = mock(PendingRemoteFetches.class); - Uuid fetchId = Uuid.randomUuid(); - DelayedShareFetch delayedShareFetch = spy(DelayedShareFetchBuilder.builder() - .withShareFetchData(shareFetch) - .withReplicaManager(replicaManager) - .withSharePartitions(sharePartitions) - .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) - .withPendingRemoteFetches(pendingRemoteFetches) - .withFetchId(fetchId) - .build()); - - LinkedHashMap partitionsAcquired = new LinkedHashMap<>(); - partitionsAcquired.put(tp0, 10L); - - // Manually update acquired partitions. - delayedShareFetch.updatePartitionsAcquired(partitionsAcquired); - - // Mock remote fetch result. - RemoteFetch remoteFetch = mock(RemoteFetch.class); - when(remoteFetch.topicIdPartition()).thenReturn(tp0); - when(remoteFetch.remoteFetchResult()).thenReturn(CompletableFuture.completedFuture( - new RemoteLogReadResult(Optional.of(REMOTE_FETCH_INFO), Optional.empty())) - ); - when(remoteFetch.logReadResult()).thenReturn(new LogReadResult( - REMOTE_FETCH_INFO, - Optional.empty(), - -1L, - -1L, - -1L, - -1L, - -1L, - OptionalLong.empty(), - OptionalInt.empty(), - Optional.empty() - )); - when(pendingRemoteFetches.remoteFetches()).thenReturn(List.of(remoteFetch)); - when(pendingRemoteFetches.isDone()).thenReturn(false); - - // Make sure that the callback to complete remote storage share fetch result is not called. - doAnswer(invocationOnMock -> null).when(pendingRemoteFetches).invokeCallbackOnCompletion(any()); - - // Mock the behaviour of replica manager such that remote storage fetch completion timer task completes on adding it to the watch queue. - doAnswer(invocationOnMock -> { - TimerTask timerTask = invocationOnMock.getArgument(0); - timerTask.run(); - return null; - }).when(replicaManager).addShareFetchTimerRequest(any()); - - try (MockedStatic mockedShareFetchUtils = Mockito.mockStatic(ShareFetchUtils.class)) { - Map partitionDataMap = new LinkedHashMap<>(); - partitionDataMap.put(tp0, mock(ShareFetchResponseData.PartitionData.class)); - mockedShareFetchUtils.when(() -> ShareFetchUtils.processFetchResponse(any(), any(), any(), any(), any())).thenReturn(partitionDataMap); - - assertFalse(delayedShareFetch.isCompleted()); - delayedShareFetch.forceComplete(); - assertTrue(delayedShareFetch.isCompleted()); - // the future of shareFetch completes. - assertTrue(shareFetch.isCompleted()); - assertEquals(Set.of(tp0), future.join().keySet()); - // Verify the locks are released for tp0. - Mockito.verify(delayedShareFetch, times(1)).releasePartitionLocks(Set.of(tp0)); - assertTrue(delayedShareFetch.outsidePurgatoryCallbackLock()); - assertTrue(delayedShareFetch.lock().tryLock()); - delayedShareFetch.lock().unlock(); - } - } - - static void mockTopicIdPartitionToReturnDataEqualToMinBytes(ReplicaManager replicaManager, TopicIdPartition topicIdPartition, int minBytes) { - LogOffsetMetadata hwmOffsetMetadata = new LogOffsetMetadata(1, 1, minBytes); - LogOffsetSnapshot endOffsetSnapshot = new LogOffsetSnapshot(1, mock(LogOffsetMetadata.class), - hwmOffsetMetadata, mock(LogOffsetMetadata.class)); - Partition partition = mock(Partition.class); - when(partition.isLeader()).thenReturn(true); - when(partition.getLeaderEpoch()).thenReturn(1); - when(partition.fetchOffsetSnapshot(any(), anyBoolean())).thenReturn(endOffsetSnapshot); - when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition())).thenReturn(partition); - } - - private void mockTopicIdPartitionFetchBytes(ReplicaManager replicaManager, TopicIdPartition topicIdPartition, LogOffsetMetadata hwmOffsetMetadata) { - LogOffsetSnapshot endOffsetSnapshot = new LogOffsetSnapshot(1, mock(LogOffsetMetadata.class), - hwmOffsetMetadata, mock(LogOffsetMetadata.class)); - Partition partition = mock(Partition.class); - when(partition.fetchOffsetSnapshot(any(), anyBoolean())).thenReturn(endOffsetSnapshot); - when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition())).thenReturn(partition); - } - - private PartitionMaxBytesStrategy mockPartitionMaxBytes(Set partitions) { - PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); - LinkedHashMap maxBytes = new LinkedHashMap<>(); - partitions.forEach(partition -> maxBytes.put(partition, 1)); - when(partitionMaxBytesStrategy.maxBytes(anyInt(), any(), anyInt())).thenReturn(maxBytes); - return partitionMaxBytesStrategy; - } - - private Seq> buildLocalAndRemoteFetchResult( - Set localLogReadTopicIdPartitions, - Set remoteReadTopicIdPartitions) { - List> logReadResults = new ArrayList<>(); - localLogReadTopicIdPartitions.forEach(topicIdPartition -> logReadResults.add(new Tuple2<>(topicIdPartition, new LogReadResult( - new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), MemoryRecords.EMPTY), - Optional.empty(), - -1L, - -1L, - -1L, - -1L, - -1L, - OptionalLong.empty(), - OptionalInt.empty(), - Optional.empty() - )))); - remoteReadTopicIdPartitions.forEach(topicIdPartition -> logReadResults.add(new Tuple2<>(topicIdPartition, new LogReadResult( - REMOTE_FETCH_INFO, - Optional.empty(), - -1L, - -1L, - -1L, - -1L, - -1L, - OptionalLong.empty(), - OptionalInt.empty(), - Optional.empty() - )))); - return CollectionConverters.asScala(logReadResults).toSeq(); - } - - @SuppressWarnings("unchecked") - private static BiConsumer mockExceptionHandler() { - return mock(BiConsumer.class); - } - - @SuppressWarnings("unchecked") - static class DelayedShareFetchBuilder { - private ShareFetch shareFetch = mock(ShareFetch.class); - private ReplicaManager replicaManager = mock(ReplicaManager.class); - private BiConsumer exceptionHandler = mockExceptionHandler(); - private LinkedHashMap sharePartitions = mock(LinkedHashMap.class); - private PartitionMaxBytesStrategy partitionMaxBytesStrategy = mock(PartitionMaxBytesStrategy.class); - private Time time = new MockTime(); - private Optional pendingRemoteFetches = Optional.empty(); - private ShareGroupMetrics shareGroupMetrics = mock(ShareGroupMetrics.class); - private Uuid fetchId = Uuid.randomUuid(); - - DelayedShareFetchBuilder withShareFetchData(ShareFetch shareFetch) { - this.shareFetch = shareFetch; - return this; - } - - DelayedShareFetchBuilder withReplicaManager(ReplicaManager replicaManager) { - this.replicaManager = replicaManager; - return this; - } - - DelayedShareFetchBuilder withExceptionHandler(BiConsumer exceptionHandler) { - this.exceptionHandler = exceptionHandler; - return this; - } + DelayedShareFetchBuilder withExceptionHandler(BiConsumer exceptionHandler) { + this.exceptionHandler = exceptionHandler; + return this; + } DelayedShareFetchBuilder withSharePartitions(LinkedHashMap sharePartitions) { this.sharePartitions = sharePartitions; return this; } - DelayedShareFetchBuilder withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy partitionMaxBytesStrategy) { - this.partitionMaxBytesStrategy = partitionMaxBytesStrategy; - return this; - } - - private DelayedShareFetchBuilder withShareGroupMetrics(ShareGroupMetrics shareGroupMetrics) { - this.shareGroupMetrics = shareGroupMetrics; - return this; - } - - private DelayedShareFetchBuilder withTime(Time time) { - this.time = time; - return this; - } - - private DelayedShareFetchBuilder withPendingRemoteFetches(PendingRemoteFetches pendingRemoteFetches) { - this.pendingRemoteFetches = Optional.of(pendingRemoteFetches); - return this; - } - - private DelayedShareFetchBuilder withFetchId(Uuid fetchId) { - this.fetchId = fetchId; - return this; - } - public static DelayedShareFetchBuilder builder() { return new DelayedShareFetchBuilder(); } @@ -2267,13 +766,7 @@ public DelayedShareFetch build() { shareFetch, replicaManager, exceptionHandler, - sharePartitions, - partitionMaxBytesStrategy, - shareGroupMetrics, - time, - pendingRemoteFetches, - fetchId, - REMOTE_FETCH_MAX_WAIT_MS); + sharePartitions); } } } diff --git a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java index e3a77158dafc4..647650f7a2fbc 100644 --- a/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java +++ b/core/src/test/java/kafka/server/share/ShareFetchUtilsTest.java @@ -24,50 +24,36 @@ import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.FencedLeaderEpochException; import org.apache.kafka.common.message.ShareFetchResponseData; -import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords; +import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.MemoryRecordsBuilder; -import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.record.Records; import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.requests.FetchRequest; import org.apache.kafka.server.share.SharePartitionKey; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; import org.apache.kafka.server.share.fetch.ShareFetch; -import org.apache.kafka.server.share.fetch.ShareFetchPartitionData; import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchParams; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.apache.kafka.storage.internals.log.OffsetResultHolder; -import org.apache.kafka.storage.log.metrics.BrokerTopicStats; -import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtensionContext; -import org.junit.jupiter.params.ParameterizedTest; -import org.junit.jupiter.params.provider.Arguments; -import org.junit.jupiter.params.provider.ArgumentsProvider; -import org.junit.jupiter.params.provider.ArgumentsSource; import org.mockito.Mockito; -import java.io.IOException; -import java.nio.ByteBuffer; +import java.util.Collections; +import java.util.HashMap; import java.util.LinkedHashMap; -import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalInt; import java.util.OptionalLong; import java.util.concurrent.CompletableFuture; import java.util.function.BiConsumer; -import java.util.stream.Stream; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createFileRecords; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.createShareAcquiredRecords; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.memoryRecordsBuilder; +import static kafka.server.share.SharePartitionManagerTest.PARTITION_MAX_BYTES; import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyBoolean; @@ -82,14 +68,12 @@ public class ShareFetchUtilsTest { - private static final FetchParams FETCH_PARAMS = new FetchParams( + private static final FetchParams FETCH_PARAMS = new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, 0, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); - private static final int BATCH_SIZE = 500; private static final BiConsumer EXCEPTION_HANDLER = (key, exception) -> { // No-op }; - private static final BrokerTopicStats BROKER_TOPIC_STATS = new BrokerTopicStats(); @Test public void testProcessFetchResponse() { @@ -97,6 +81,9 @@ public void testProcessFetchResponse() { String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -104,11 +91,11 @@ public void testProcessFetchResponse() { when(sp0.nextFetchOffset()).thenReturn((long) 3); when(sp1.nextFetchOffset()).thenReturn((long) 3); - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() + when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() + when(sp1.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(100).setLastOffset(103).setDeliveryCount((short) 1))); LinkedHashMap sharePartitions = new LinkedHashMap<>(); @@ -116,7 +103,7 @@ public void testProcessFetchResponse() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, 100); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), @@ -130,14 +117,13 @@ public void testProcessFetchResponse() { new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes())); - List responseData = List.of( - new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, + Map responseData = new HashMap<>(); + responseData.put(tp0, new FetchPartitionData(Errors.NONE, 0L, 0L, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false)), - new ShareFetchPartitionData(tp1, 0, new FetchPartitionData(Errors.NONE, 0L, 100L, + OptionalInt.empty(), false)); + responseData.put(tp1, new FetchPartitionData(Errors.NONE, 0L, 100L, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false)) - ); + OptionalInt.empty(), false)); Map resultData = ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, mock(ReplicaManager.class), EXCEPTION_HANDLER); @@ -148,10 +134,10 @@ public void testProcessFetchResponse() { assertEquals(1, resultData.get(tp1).partitionIndex()); assertEquals(Errors.NONE.code(), resultData.get(tp0).errorCode()); assertEquals(Errors.NONE.code(), resultData.get(tp1).errorCode()); - assertEquals(List.of(new ShareFetchResponseData.AcquiredRecords() + assertEquals(Collections.singletonList(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1)), resultData.get(tp0).acquiredRecords()); - assertEquals(List.of(new ShareFetchResponseData.AcquiredRecords() + assertEquals(Collections.singletonList(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(100).setLastOffset(103).setDeliveryCount((short) 1)), resultData.get(tp1).acquiredRecords()); } @@ -162,6 +148,9 @@ public void testProcessFetchResponseWithEmptyRecords() { String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); @@ -169,24 +158,23 @@ public void testProcessFetchResponseWithEmptyRecords() { when(sp0.nextFetchOffset()).thenReturn((long) 3); when(sp1.nextFetchOffset()).thenReturn((long) 3); - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn(ShareAcquiredRecords.empty()); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn(ShareAcquiredRecords.empty()); + when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); + when(sp1.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId, - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); - - List responseData = List.of( - new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, - MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false)), - new ShareFetchPartitionData(tp1, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, - MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false)) - ); + new CompletableFuture<>(), partitionMaxBytes, 100); + + Map responseData = new HashMap<>(); + responseData.put(tp0, new FetchPartitionData(Errors.NONE, 0L, 0L, + MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + responseData.put(tp1, new FetchPartitionData(Errors.NONE, 0L, 0L, + MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); Map resultData = ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, mock(ReplicaManager.class), EXCEPTION_HANDLER); @@ -197,8 +185,8 @@ public void testProcessFetchResponseWithEmptyRecords() { assertEquals(1, resultData.get(tp1).partitionIndex()); assertEquals(Errors.NONE.code(), resultData.get(tp0).errorCode()); assertEquals(Errors.NONE.code(), resultData.get(tp1).errorCode()); - assertEquals(List.of(), resultData.get(tp0).acquiredRecords()); - assertEquals(List.of(), resultData.get(tp1).acquiredRecords()); + assertEquals(Collections.emptyList(), resultData.get(tp0).acquiredRecords()); + assertEquals(Collections.emptyList(), resultData.get(tp1).acquiredRecords()); } @Test @@ -208,6 +196,10 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + SharePartition sp0 = Mockito.mock(SharePartition.class); SharePartition sp1 = Mockito.mock(SharePartition.class); @@ -216,7 +208,7 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { sharePartitions.put(tp1, sp1); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, 100); ReplicaManager replicaManager = mock(ReplicaManager.class); @@ -227,12 +219,12 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { when(sp0.nextFetchOffset()).thenReturn((long) 0, (long) 5); when(sp1.nextFetchOffset()).thenReturn((long) 4, (long) 4); - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( + when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( ShareAcquiredRecords.empty(), - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0).setLastOffset(3).setDeliveryCount((short) 1))); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() + when(sp1.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(100).setLastOffset(103).setDeliveryCount((short) 1)), ShareAcquiredRecords.empty()); @@ -242,14 +234,13 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes())); - List responseData1 = List.of( - new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.OFFSET_OUT_OF_RANGE, 0L, 0L, - MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false)), - new ShareFetchPartitionData(tp1, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, - records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false)) - ); + Map responseData1 = new HashMap<>(); + responseData1.put(tp0, new FetchPartitionData(Errors.OFFSET_OUT_OF_RANGE, 0L, 0L, + MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + responseData1.put(tp1, new FetchPartitionData(Errors.NONE, 0L, 0L, + records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); Map resultData1 = ShareFetchUtils.processFetchResponse(shareFetch, responseData1, sharePartitions, replicaManager, EXCEPTION_HANDLER); @@ -272,14 +263,13 @@ public void testProcessFetchResponseWithLsoMovementForTopicPartition() { new SimpleRecord("2".getBytes(), "v".getBytes()), new SimpleRecord(null, "value".getBytes())); - List responseData2 = List.of( - new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, - records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false)), - new ShareFetchPartitionData(tp1, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, - MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false)) - ); + Map responseData2 = new HashMap<>(); + responseData2.put(tp0, new FetchPartitionData(Errors.NONE, 0L, 0L, + records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + responseData2.put(tp1, new FetchPartitionData(Errors.NONE, 0L, 0L, + MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); Map resultData2 = ShareFetchUtils.processFetchResponse(shareFetch, responseData2, sharePartitions, replicaManager, EXCEPTION_HANDLER); @@ -302,20 +292,21 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); SharePartition sp0 = Mockito.mock(SharePartition.class); LinkedHashMap sharePartitions = new LinkedHashMap<>(); sharePartitions.put(tp0, sp0); ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, Uuid.randomUuid().toString(), - new CompletableFuture<>(), List.of(tp0), BATCH_SIZE, 100, BROKER_TOPIC_STATS); + new CompletableFuture<>(), partitionMaxBytes, 100); ReplicaManager replicaManager = mock(ReplicaManager.class); // Mock the replicaManager.fetchOffsetForTimestamp method to return a timestamp and offset for the topic partition. FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(100L, 1L, Optional.empty()); doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())).when(replicaManager).fetchOffsetForTimestamp(any(TopicPartition.class), anyLong(), any(), any(), anyBoolean()); - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn(ShareAcquiredRecords.empty()); + when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); MemoryRecords records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("0".getBytes(), "v".getBytes()), @@ -324,10 +315,10 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { new SimpleRecord(null, "value".getBytes())); // When no records are acquired from share partition. - List responseData = List.of( - new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.NONE, 0L, 0L, + Map responseData = Collections.singletonMap( + tp0, new FetchPartitionData(Errors.NONE, 0L, 0L, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false))); + OptionalInt.empty(), false)); Map resultData = ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, replicaManager, EXCEPTION_HANDLER); @@ -335,22 +326,22 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { assertEquals(1, resultData.size()); assertTrue(resultData.containsKey(tp0)); assertEquals(0, resultData.get(tp0).partitionIndex()); - assertEquals(MemoryRecords.EMPTY, resultData.get(tp0).records()); + assertNull(resultData.get(tp0).records()); assertTrue(resultData.get(tp0).acquiredRecords().isEmpty()); assertEquals(Errors.NONE.code(), resultData.get(tp0).errorCode()); // When fetch partition data has OFFSET_OUT_OF_RANGE error. - responseData = List.of( - new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.OFFSET_OUT_OF_RANGE, 0L, 0L, + responseData = Collections.singletonMap( + tp0, new FetchPartitionData(Errors.OFFSET_OUT_OF_RANGE, 0L, 0L, records, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false))); + OptionalInt.empty(), false)); resultData = ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, replicaManager, EXCEPTION_HANDLER); assertEquals(1, resultData.size()); assertTrue(resultData.containsKey(tp0)); assertEquals(0, resultData.get(tp0).partitionIndex()); - assertEquals(MemoryRecords.EMPTY, resultData.get(tp0).records()); + assertNull(resultData.get(tp0).records()); assertTrue(resultData.get(tp0).acquiredRecords().isEmpty()); assertEquals(Errors.NONE.code(), resultData.get(tp0).errorCode()); @@ -358,11 +349,15 @@ public void testProcessFetchResponseWhenNoRecordsAreAcquired() { } @Test - public void testProcessFetchResponseWithMaxFetchRecords() throws IOException { + public void testProcessFetchResponseWithMaxFetchRecords() { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + SharePartition sp0 = Mockito.mock(SharePartition.class); SharePartition sp1 = Mockito.mock(SharePartition.class); @@ -375,62 +370,54 @@ public void testProcessFetchResponseWithMaxFetchRecords() throws IOException { Uuid memberId = Uuid.randomUuid(); // Set max fetch records to 10 - ShareFetch shareFetch = new ShareFetch(FETCH_PARAMS, groupId, memberId.toString(), - new CompletableFuture<>(), List.of(tp0, tp1), BATCH_SIZE, 10, BROKER_TOPIC_STATS); - - LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); - recordsPerOffset.put(0L, 1); - recordsPerOffset.put(1L, 1); - recordsPerOffset.put(2L, 1); - recordsPerOffset.put(3L, 1); - Records records1 = createFileRecords(recordsPerOffset); + ShareFetch shareFetch = new ShareFetch( + new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, 0, + 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty()), + groupId, memberId.toString(), new CompletableFuture<>(), partitionMaxBytes, 10); - recordsPerOffset.clear(); - recordsPerOffset.put(100L, 4); - Records records2 = createFileRecords(recordsPerOffset); + MemoryRecords records1 = MemoryRecords.withRecords(Compression.NONE, + new SimpleRecord("0".getBytes(), "v".getBytes()), + new SimpleRecord("1".getBytes(), "v".getBytes()), + new SimpleRecord("2".getBytes(), "v".getBytes()), + new SimpleRecord(null, "value".getBytes())); FetchPartitionData fetchPartitionData1 = new FetchPartitionData(Errors.NONE, 0L, 0L, records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false); FetchPartitionData fetchPartitionData2 = new FetchPartitionData(Errors.NONE, 0L, 0L, - records2, Optional.empty(), OptionalLong.empty(), Optional.empty(), + records1, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false); - when(sp0.acquire(memberId.toString(), BATCH_SIZE, 10, 0, fetchPartitionData1, FetchIsolation.HIGH_WATERMARK)).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() + when(sp0.acquire(memberId.toString(), 10, fetchPartitionData1)).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0).setLastOffset(1).setDeliveryCount((short) 1))); - when(sp1.acquire(memberId.toString(), BATCH_SIZE, 8, 0, fetchPartitionData2, FetchIsolation.HIGH_WATERMARK)).thenReturn( - createShareAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() + when(sp1.acquire(memberId.toString(), 8, fetchPartitionData2)).thenReturn( + ShareAcquiredRecords.fromAcquiredRecords(new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(100).setLastOffset(103).setDeliveryCount((short) 1))); // Send the topic partitions in order so can validate if correct mock is called, accounting // the offset count for the acquired records from the previous share partition acquire. - List responseData = List.of( - new ShareFetchPartitionData(tp0, 0, fetchPartitionData1), - new ShareFetchPartitionData(tp1, 0, fetchPartitionData2) - ); + Map responseData1 = new LinkedHashMap<>(); + responseData1.put(tp0, fetchPartitionData1); + responseData1.put(tp1, fetchPartitionData2); - Map resultData = - ShareFetchUtils.processFetchResponse(shareFetch, responseData, sharePartitions, + Map resultData1 = + ShareFetchUtils.processFetchResponse(shareFetch, responseData1, sharePartitions, mock(ReplicaManager.class), EXCEPTION_HANDLER); - assertEquals(2, resultData.size()); - assertTrue(resultData.containsKey(tp0)); - assertTrue(resultData.containsKey(tp1)); - assertEquals(0, resultData.get(tp0).partitionIndex()); - assertEquals(1, resultData.get(tp1).partitionIndex()); - assertEquals(Errors.NONE.code(), resultData.get(tp0).errorCode()); - assertEquals(Errors.NONE.code(), resultData.get(tp1).errorCode()); - assertEquals(1, resultData.get(tp0).acquiredRecords().size()); - assertEquals(0, resultData.get(tp0).acquiredRecords().get(0).firstOffset()); - assertEquals(1, resultData.get(tp0).acquiredRecords().get(0).lastOffset()); - assertEquals(1, resultData.get(tp1).acquiredRecords().size()); - assertEquals(100, resultData.get(tp1).acquiredRecords().get(0).firstOffset()); - assertEquals(103, resultData.get(tp1).acquiredRecords().get(0).lastOffset()); - - // Validate the slicing for fetched data happened for tp0 records, not for tp1 records. - assertTrue(records1.sizeInBytes() > resultData.get(tp0).records().sizeInBytes()); - assertEquals(records2.sizeInBytes(), resultData.get(tp1).records().sizeInBytes()); + assertEquals(2, resultData1.size()); + assertTrue(resultData1.containsKey(tp0)); + assertTrue(resultData1.containsKey(tp1)); + assertEquals(0, resultData1.get(tp0).partitionIndex()); + assertEquals(1, resultData1.get(tp1).partitionIndex()); + assertEquals(Errors.NONE.code(), resultData1.get(tp0).errorCode()); + assertEquals(Errors.NONE.code(), resultData1.get(tp1).errorCode()); + assertEquals(1, resultData1.get(tp0).acquiredRecords().size()); + assertEquals(0, resultData1.get(tp0).acquiredRecords().get(0).firstOffset()); + assertEquals(1, resultData1.get(tp0).acquiredRecords().get(0).lastOffset()); + assertEquals(1, resultData1.get(tp1).acquiredRecords().size()); + assertEquals(100, resultData1.get(tp1).acquiredRecords().get(0).firstOffset()); + assertEquals(103, resultData1.get(tp1).acquiredRecords().get(0).lastOffset()); } @Test @@ -450,13 +437,13 @@ public void testProcessFetchResponseWithOffsetFetchException() { // Mock the replicaManager.fetchOffsetForTimestamp method to throw exception. Throwable exception = new FencedLeaderEpochException("Fenced exception"); doThrow(exception).when(replicaManager).fetchOffsetForTimestamp(any(TopicPartition.class), anyLong(), any(), any(), anyBoolean()); - when(sp0.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(FetchPartitionData.class), any())).thenReturn(ShareAcquiredRecords.empty()); + when(sp0.acquire(anyString(), anyInt(), any(FetchPartitionData.class))).thenReturn(ShareAcquiredRecords.empty()); // When no records are acquired from share partition. - List responseData = List.of( - new ShareFetchPartitionData(tp0, 0, new FetchPartitionData(Errors.OFFSET_OUT_OF_RANGE, 0L, 0L, + Map responseData = Collections.singletonMap( + tp0, new FetchPartitionData(Errors.OFFSET_OUT_OF_RANGE, 0L, 0L, MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), - OptionalInt.empty(), false))); + OptionalInt.empty(), false)); BiConsumer exceptionHandler = mock(BiConsumer.class); Map resultData = @@ -468,199 +455,4 @@ public void testProcessFetchResponseWithOffsetFetchException() { Mockito.verify(exceptionHandler, times(1)).accept(new SharePartitionKey("grp", tp0), exception); Mockito.verify(sp0, times(0)).updateCacheAndOffsets(any(Long.class)); } - - @ParameterizedTest(name = "{0}") - @ArgumentsSource(RecordsArgumentsProvider.class) - public void testMaybeSliceFetchRecordsSingleBatch(String name, Records records) { - // Acquire all offsets, should return same records. - List acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(5).setLastOffset(14).setDeliveryCount((short) 1)); - Records slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 10)); - assertEquals(records, slicedRecords); - - // Acquire offsets out of first offset bound should return the records for the matching batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(2).setLastOffset(14).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 10)); - assertEquals(records, slicedRecords); - - // Acquire offsets out of last offset bound should return the records for the matching batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(5).setLastOffset(20).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 5)); - assertEquals(records, slicedRecords); - - // Acquire only subset of batch offsets, starting from the first offset. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(5).setLastOffset(8).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertEquals(records, slicedRecords); - - // Acquire only subset of batch offsets, ending at the last offset. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(8).setLastOffset(14).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertEquals(records, slicedRecords); - - // Acquire only subset of batch offsets, within the batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(8).setLastOffset(10).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertEquals(records, slicedRecords); - } - - @ParameterizedTest(name = "{0}") - @ArgumentsSource(MultipleBatchesRecordsArgumentsProvider.class) - public void testMaybeSliceFetchRecordsMultipleBatches(String name, Records records) { - // Acquire all offsets, should return same records. - List acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(0).setLastOffset(10).setDeliveryCount((short) 1)); - Records slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 11)); - assertEquals(records, slicedRecords); - - // Acquire offsets from all batches, but only first record from last batch. Should return - // all batches. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(0).setLastOffset(7).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 5)); - assertEquals(records, slicedRecords); - - // Acquire only first batch offsets, should return only first batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(0).setLastOffset(2).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 5)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - List recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(1, recordBatches.size()); - assertEquals(0, recordBatches.get(0).baseOffset()); - assertEquals(2, recordBatches.get(0).lastOffset()); - - // Acquire only second batch offsets, should return only second batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(3).setLastOffset(4).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 5)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(1, recordBatches.size()); - assertEquals(3, recordBatches.get(0).baseOffset()); - assertEquals(4, recordBatches.get(0).lastOffset()); - - // Acquire only last batch offsets, should return only last batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(7).setLastOffset(10).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(1, recordBatches.size()); - assertEquals(7, recordBatches.get(0).baseOffset()); - assertEquals(10, recordBatches.get(0).lastOffset()); - - // Acquire only subset of first batch offsets, should return only first batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(1).setLastOffset(1).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(1, recordBatches.size()); - assertEquals(0, recordBatches.get(0).baseOffset()); - assertEquals(2, recordBatches.get(0).lastOffset()); - - // Acquire only subset of second batch offsets, should return only second batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(4).setLastOffset(4).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(1, recordBatches.size()); - assertEquals(3, recordBatches.get(0).baseOffset()); - assertEquals(4, recordBatches.get(0).lastOffset()); - - // Acquire only subset of last batch offsets, should return only last batch. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(8).setLastOffset(8).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(1, recordBatches.size()); - assertEquals(7, recordBatches.get(0).baseOffset()); - assertEquals(10, recordBatches.get(0).lastOffset()); - - // Acquire including gaps between batches, should return 2 batches. - acquiredRecords = List.of(new AcquiredRecords().setFirstOffset(4).setLastOffset(8).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(2, recordBatches.size()); - assertEquals(3, recordBatches.get(0).baseOffset()); - assertEquals(4, recordBatches.get(0).lastOffset()); - assertEquals(7, recordBatches.get(1).baseOffset()); - assertEquals(10, recordBatches.get(1).lastOffset()); - - // Acquire with multiple acquired records, should return matching batches. - acquiredRecords = List.of( - new AcquiredRecords().setFirstOffset(0).setLastOffset(2).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(3).setLastOffset(4).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(2, recordBatches.size()); - assertEquals(0, recordBatches.get(0).baseOffset()); - assertEquals(2, recordBatches.get(0).lastOffset()); - assertEquals(3, recordBatches.get(1).baseOffset()); - assertEquals(4, recordBatches.get(1).lastOffset()); - - // Acquire with multiple acquired records of individual offsets from single batch, should return - // matching batch. - acquiredRecords = List.of( - new AcquiredRecords().setFirstOffset(8).setLastOffset(8).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(9).setLastOffset(9).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertTrue(records.sizeInBytes() > slicedRecords.sizeInBytes()); - recordBatches = TestUtils.toList(slicedRecords.batches()); - assertEquals(1, recordBatches.size()); - assertEquals(7, recordBatches.get(0).baseOffset()); - assertEquals(10, recordBatches.get(0).lastOffset()); - - // Acquire with multiple acquired records of individual offsets from multiple batch, should return - // multiple matching batches. - acquiredRecords = List.of( - new AcquiredRecords().setFirstOffset(1).setLastOffset(1).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(9).setLastOffset(9).setDeliveryCount((short) 1)); - slicedRecords = ShareFetchUtils.maybeSliceFetchRecords(records, new ShareAcquiredRecords(acquiredRecords, 1)); - assertEquals(records.sizeInBytes(), slicedRecords.sizeInBytes()); - } - - @ParameterizedTest(name = "{0}") - @ArgumentsSource(MultipleBatchesRecordsArgumentsProvider.class) - public void testMaybeSliceFetchRecordsException(String name, Records records) { - // Send empty acquired records which should trigger an exception and same file records should - // be returned. The method doesn't expect empty acquired records. - Records slicedRecords = ShareFetchUtils.maybeSliceFetchRecords( - records, new ShareAcquiredRecords(List.of(), 3)); - assertEquals(records, slicedRecords); - } - - private static class RecordsArgumentsProvider implements ArgumentsProvider { - @Override - public Stream provideArguments(ExtensionContext context) throws Exception { - return Stream.of( - Arguments.of("FileRecords", createFileRecords(Map.of(5L, 10))), - Arguments.of("MemoryRecords", createMemoryRecords(5L, 10)) - ); - } - - private MemoryRecords createMemoryRecords(long baseOffset, int numRecords) { - try (MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(numRecords, baseOffset)) { - return recordsBuilder.build(); - } - } - } - - private static class MultipleBatchesRecordsArgumentsProvider implements ArgumentsProvider { - @Override - public Stream provideArguments(ExtensionContext context) throws Exception { - LinkedHashMap recordsPerOffset = new LinkedHashMap<>(); - recordsPerOffset.put(0L, 3); - recordsPerOffset.put(3L, 2); - recordsPerOffset.put(7L, 4); // Gap of 2 offsets between batches. - return Stream.of( - Arguments.of("FileRecords", createFileRecords(recordsPerOffset)), - Arguments.of("MemoryRecords", createMemoryRecords(recordsPerOffset)) - ); - } - - private MemoryRecords createMemoryRecords(Map recordsPerOffset) { - ByteBuffer buffer = ByteBuffer.allocate(1024); - recordsPerOffset.forEach((offset, numOfRecords) -> memoryRecordsBuilder(buffer, numOfRecords, offset).close()); - buffer.flip(); - - return MemoryRecords.readableRecords(buffer); - } - } } diff --git a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java index 24a84bab64a9b..a8b5941c16ef0 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionManagerTest.java @@ -17,15 +17,16 @@ package kafka.server.share; import kafka.cluster.Partition; +import kafka.server.LogReadResult; import kafka.server.ReplicaManager; import kafka.server.ReplicaQuota; import kafka.server.share.SharePartitionManager.SharePartitionListener; import org.apache.kafka.clients.consumer.AcknowledgeType; +import org.apache.kafka.common.MetricName; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; -import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.InvalidRecordStateException; @@ -34,25 +35,24 @@ import org.apache.kafka.common.errors.KafkaStorageException; import org.apache.kafka.common.errors.LeaderNotAvailableException; import org.apache.kafka.common.errors.NotLeaderOrFollowerException; -import org.apache.kafka.common.errors.ShareSessionLimitReachedException; import org.apache.kafka.common.errors.ShareSessionNotFoundException; import org.apache.kafka.common.message.ShareAcknowledgeResponseData; import org.apache.kafka.common.message.ShareFetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.PartitionData; +import org.apache.kafka.common.metrics.Metrics; import org.apache.kafka.common.protocol.ApiKeys; import org.apache.kafka.common.protocol.Errors; import org.apache.kafka.common.protocol.ObjectSerializationCache; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.record.MemoryRecords; -import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.requests.FetchRequest; +import org.apache.kafka.common.requests.ShareFetchRequest; import org.apache.kafka.common.requests.ShareFetchResponse; import org.apache.kafka.common.requests.ShareRequestMetadata; import org.apache.kafka.common.utils.ImplicitLinkedHashCollection; +import org.apache.kafka.common.utils.MockTime; import org.apache.kafka.common.utils.Time; import org.apache.kafka.coordinator.group.GroupConfigManager; -import org.apache.kafka.server.LogReadResult; -import org.apache.kafka.server.common.ShareVersion; import org.apache.kafka.server.purgatory.DelayedOperationKey; import org.apache.kafka.server.purgatory.DelayedOperationPurgatory; import org.apache.kafka.server.share.CachedSharePartition; @@ -64,11 +64,9 @@ import org.apache.kafka.server.share.context.ShareSessionContext; import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; import org.apache.kafka.server.share.fetch.DelayedShareFetchKey; -import org.apache.kafka.server.share.fetch.PartitionMaxBytesStrategy; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; import org.apache.kafka.server.share.fetch.ShareFetch; -import org.apache.kafka.server.share.metrics.ShareGroupMetrics; -import org.apache.kafka.server.share.persister.NoOpStatePersister; +import org.apache.kafka.server.share.persister.NoOpShareStatePersister; import org.apache.kafka.server.share.persister.Persister; import org.apache.kafka.server.share.session.ShareSession; import org.apache.kafka.server.share.session.ShareSessionCache; @@ -76,53 +74,51 @@ import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchParams; import org.apache.kafka.server.util.FutureUtils; -import org.apache.kafka.server.util.MockTime; import org.apache.kafka.server.util.timer.MockTimer; import org.apache.kafka.server.util.timer.SystemTimer; import org.apache.kafka.server.util.timer.SystemTimerReaper; import org.apache.kafka.server.util.timer.Timer; -import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.internals.log.FetchDataInfo; import org.apache.kafka.storage.internals.log.LogOffsetMetadata; import org.apache.kafka.storage.internals.log.OffsetResultHolder; -import org.apache.kafka.storage.log.metrics.BrokerTopicMetrics; -import org.apache.kafka.storage.log.metrics.BrokerTopicStats; import org.apache.kafka.test.TestUtils; import org.junit.jupiter.api.AfterEach; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.Timeout; -import org.mockito.ArgumentCaptor; import org.mockito.ArgumentMatchers; import org.mockito.Mockito; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.Optional; -import java.util.OptionalInt; -import java.util.OptionalLong; import java.util.Set; import java.util.concurrent.CompletableFuture; import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.TimeUnit; import java.util.function.Consumer; +import scala.Option; import scala.Tuple2; import scala.collection.Seq; import scala.jdk.javaapi.CollectionConverters; import static kafka.server.share.DelayedShareFetchTest.mockTopicIdPartitionToReturnDataEqualToMinBytes; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.validateRotatedListEquals; -import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertFalse; import static org.junit.jupiter.api.Assertions.assertInstanceOf; import static org.junit.jupiter.api.Assertions.assertNotEquals; import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.any; @@ -130,6 +126,8 @@ import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.Mockito.atLeast; +import static org.mockito.Mockito.atMost; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; @@ -148,50 +146,38 @@ public class SharePartitionManagerTest { private static final short MAX_FETCH_RECORDS = 500; private static final int DELAYED_SHARE_FETCH_MAX_WAIT_MS = 2000; private static final int DELAYED_SHARE_FETCH_TIMEOUT_MS = 3000; - private static final int BATCH_SIZE = 500; - private static final FetchParams FETCH_PARAMS = new FetchParams( + private static final FetchParams FETCH_PARAMS = new FetchParams(ApiKeys.SHARE_FETCH.latestVersion(), FetchRequest.ORDINARY_CONSUMER_ID, -1, DELAYED_SHARE_FETCH_MAX_WAIT_MS, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty(), true); - private static final String TIMER_NAME_PREFIX = "share-partition-manager"; - private static final String CONNECTION_ID = "id-1"; + static final int PARTITION_MAX_BYTES = 40000; static final int DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL = 1000; - static final long REMOTE_FETCH_MAX_WAIT_MS = 6000L; - private MockTime time; + private Timer mockTimer; private ReplicaManager mockReplicaManager; - private BrokerTopicStats brokerTopicStats; - private SharePartitionManager sharePartitionManager; - private static final List EMPTY_PART_LIST = List.of(); - private static final List EMPTY_ACQUIRED_RECORDS = List.of(); + private static final List EMPTY_PART_LIST = Collections.unmodifiableList(new ArrayList<>()); @BeforeEach public void setUp() { - time = new MockTime(); - kafka.utils.TestUtils.clearYammerMetrics(); - brokerTopicStats = new BrokerTopicStats(); + mockTimer = new SystemTimerReaper("sharePartitionManagerTestReaper", + new SystemTimer("sharePartitionManagerTestTimer")); mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); - when(mockReplicaManager.getPartitionOrException((TopicPartition) any())).thenReturn(partition); + when(mockReplicaManager.getPartitionOrException(Mockito.any())).thenReturn(partition); } @AfterEach public void tearDown() throws Exception { - if (sharePartitionManager != null) { - sharePartitionManager.close(); - } - brokerTopicStats.close(); - assertNoReaperThreadsPendingClose(); + mockTimer.close(); } @Test public void testNewContextReturnsFinalContextWithoutRequestData() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + Time time = new MockTime(); + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).withTime(time).build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); @@ -200,26 +186,28 @@ public void testNewContextReturnsFinalContextWithoutRequestData() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - List reqData1 = List.of(tp0, tp1); + Map reqData1 = new LinkedHashMap<>(); + reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), PARTITION_MAX_BYTES)); + reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), PARTITION_MAX_BYTES)); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + assertEquals(ShareSessionContext.class, context1.getClass()); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, reqMetadata2, true, CONNECTION_ID); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), Collections.emptyList(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @Test public void testNewContextReturnsFinalContextWithRequestData() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + Time time = new MockTime(); + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).withTime(time).build(); Uuid tpId0 = Uuid.randomUuid(); + Uuid tpId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); @@ -227,28 +215,31 @@ public void testNewContextReturnsFinalContextWithRequestData() { Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - List reqData1 = List.of(tp0, tp1); + Map reqData1 = new LinkedHashMap<>(); + reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), PARTITION_MAX_BYTES)); + reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), PARTITION_MAX_BYTES)); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + assertEquals(ShareSessionContext.class, context1.getClass()); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - // Sending a Request with FINAL_EPOCH. This should return a FinalContext. - List reqData2 = List.of(tp0, tp1); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, true, CONNECTION_ID); + // shareFetch is not empty, but the maxBytes of topic partition is 0, which means this is added only for acknowledgements. + // New context should be created successfully + Map reqData3 = Collections.singletonMap(new TopicIdPartition(tpId1, new TopicPartition("foo", 0)), + new ShareFetchRequest.SharePartitionData(tpId1, 0)); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true); assertEquals(FinalContext.class, context2.getClass()); } @Test - public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequestData() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + public void testNewContextReturnsFinalContextError() { + Time time = new MockTime(); + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).withTime(time).build(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); @@ -258,113 +249,31 @@ public void testNewContextReturnsFinalContextWhenTopicPartitionsArePresentInRequ Uuid memberId = Uuid.randomUuid(); // Create a new share session with an initial share fetch request - List reqData1 = List.of(tp0, tp1); + Map reqData1 = new LinkedHashMap<>(); + reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), PARTITION_MAX_BYTES)); + reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), PARTITION_MAX_BYTES)); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + assertEquals(ShareSessionContext.class, context1.getClass()); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH); - // shareFetch is not empty, and it contains tpId1, which should return FinalContext instance since it is FINAL_EPOCH - List reqData2 = List.of(new TopicIdPartition(tpId1, new TopicPartition("foo", 0))); - assertInstanceOf(FinalContext.class, - sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, true, CONNECTION_ID)); - } - - @Test - public void testNewContextThrowsErrorWhenShareSessionNotFoundOnFinalEpoch() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext("grp", EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.FINAL_EPOCH), false, CONNECTION_ID)); - } - - @Test - public void testNewContextThrowsErrorWhenAcknowledgeDataPresentOnInitialEpoch() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - Uuid tpId0 = Uuid.randomUuid(); - TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); - - assertThrows(InvalidRequestException.class, () -> sharePartitionManager.newContext("grp", List.of(tp0, tp1), EMPTY_PART_LIST, - new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH), true, CONNECTION_ID)); - } - - @Test - public void testNewContextThrowsErrorWhenShareSessionCacheIsFullOnInitialEpoch() { - // Define a cache with max size 1 - ShareSessionCache cache = new ShareSessionCache(1); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - - Uuid tpId0 = Uuid.randomUuid(); - TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); - - String groupId = "grp"; - Uuid memberId1 = Uuid.randomUuid(); - Uuid memberId2 = Uuid.randomUuid(); - - // Create a new share session with an initial share fetch request - List reqData = List.of(tp0, tp1); - - ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); - assertFalse(((ShareSessionContext) context1).isSubsequent()); - - // Trying to create a new share session, but since cache is already full, it should throw an exception - ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH); - assertThrows(ShareSessionLimitReachedException.class, () -> sharePartitionManager.newContext("grp", reqData, EMPTY_PART_LIST, - reqMetadata2, false, "id-2")); - } - - @Test - public void testNewContextExistingSessionNewRequestWithInitialEpoch() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - - Uuid tpId0 = Uuid.randomUuid(); - TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); - - String groupId = "grp"; - Uuid memberId = Uuid.randomUuid(); - List reqData = List.of(tp0, tp1); - - ShareRequestMetadata reqMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH); - - // Create a new share session with an initial share fetch request - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, reqMetadata, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); - assertFalse(((ShareSessionContext) context1).isSubsequent()); - assertEquals(1, cache.size()); - - // Sending another request with INITIAL_EPOCH and same share session key. This should return a new ShareSessionContext - // and delete the older one. - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData, EMPTY_PART_LIST, reqMetadata, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context2); - assertFalse(((ShareSessionContext) context1).isSubsequent()); - assertEquals(1, cache.size()); + // shareFetch is not empty and the maxBytes of topic partition is not 0, which means this is trying to fetch on a Final request. + // New context should throw an error + Map reqData3 = Collections.singletonMap(new TopicIdPartition(tpId1, new TopicPartition("foo", 0)), + new ShareFetchRequest.SharePartitionData(tpId1, PARTITION_MAX_BYTES)); + assertThrows(InvalidRequestException.class, + () -> sharePartitionManager.newContext(groupId, reqData3, Collections.emptyList(), reqMetadata2, true)); } @Test public void testNewContext() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + Time time = new MockTime(); + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).withTime(time).build(); Map topicNames = new HashMap<>(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); @@ -378,14 +287,20 @@ public void testNewContext() { String groupId = "grp"; // Create a new share session with an initial share fetch request - List reqData2 = List.of(tp0, tp1); + Map reqData2 = new LinkedHashMap<>(); + reqData2.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); + reqData2.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); + ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context2); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); + assertEquals(ShareSessionContext.class, context2.getClass()); assertFalse(((ShareSessionContext) context2).isSubsequent()); - ((ShareSessionContext) context2).shareFetchData().forEach(topicIdPartition -> assertTrue(reqData2.contains(topicIdPartition))); + ((ShareSessionContext) context2).shareFetchData().forEach((topicIdPartition, sharePartitionData) -> { + assertTrue(reqData2.containsKey(topicIdPartition)); + assertEquals(reqData2.get(topicIdPartition), sharePartitionData); + }); LinkedHashMap respData2 = new LinkedHashMap<>(); respData2.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); @@ -396,29 +311,31 @@ public void testNewContext() { assertEquals(respData2, resp2.responseData(topicNames)); ShareSessionKey shareSessionKey2 = new ShareSessionKey(groupId, - reqMetadata2.memberId()); + reqMetadata2.memberId()); // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, "id-2")); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test trying to create a new session with a non-existent session key Uuid memberId4 = Uuid.randomUuid(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(memberId4, 1), true, "id-3")); + new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context5); + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); + assertEquals(ShareSessionContext.class, context5.getClass()); assertTrue(((ShareSessionContext) context5).isSubsequent()); ShareSessionContext shareSessionContext5 = (ShareSessionContext) context5; synchronized (shareSessionContext5.session()) { shareSessionContext5.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new - TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); - assertTrue(reqData2.contains(topicIdPartition)); + TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); + ShareFetchRequest.SharePartitionData data = cachedSharePartition.reqData(); + assertTrue(reqData2.containsKey(topicIdPartition)); + assertEquals(reqData2.get(topicIdPartition), data); }); } ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); @@ -427,18 +344,18 @@ public void testNewContext() { // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true, CONNECTION_ID); + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true, CONNECTION_ID); + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -458,112 +375,106 @@ public void testNewContext() { } @Test - public void testAcknowledgeSessionUpdateThrowsOnInitialEpoch() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); + public void testShareSessionExpiration() { + Time time = new MockTime(); + ShareSessionCache cache = new ShareSessionCache(2, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).withTime(time).build(); + Map topicNames = new HashMap<>(); + Uuid fooId = Uuid.randomUuid(); + topicNames.put(fooId, "foo"); + TopicIdPartition foo0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); + TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); - assertThrows(InvalidShareSessionEpochException.class, - () -> sharePartitionManager.acknowledgeSessionUpdate("grp", - new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH))); - } + // Create a new share session, session 1 + Map session1req = new LinkedHashMap<>(); + session1req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); + session1req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); - @Test - public void testAcknowledgeSessionUpdateThrowsWhenShareSessionNotFound() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); + String groupId = "grp"; + ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - // The share session corresponding to this memberId has not been created yet. This should throw an exception. - assertThrows(ShareSessionNotFoundException.class, - () -> sharePartitionManager.acknowledgeSessionUpdate("grp", - new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)))); - } + ShareFetchContext session1context = sharePartitionManager.newContext(groupId, session1req, EMPTY_PART_LIST, reqMetadata1, false); + assertEquals(session1context.getClass(), ShareSessionContext.class); - @Test - public void testAcknowledgeSessionUpdateThrowsInvalidShareSessionEpochException() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); + LinkedHashMap respData1 = new LinkedHashMap<>(); + respData1.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); + respData1.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); - Uuid tpId0 = Uuid.randomUuid(); - TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); + ShareFetchResponse session1resp = session1context.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData1); + assertEquals(Errors.NONE, session1resp.error()); + assertEquals(2, session1resp.responseData(topicNames).size()); - String groupId = "grp"; - Uuid memberId = Uuid.randomUuid(); + ShareSessionKey session1Key = new ShareSessionKey(groupId, reqMetadata1.memberId()); + // check share session entered into cache + assertNotNull(cache.get(session1Key)); - // Create a new share session with an initial share fetch request - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, - new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH), false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); - assertFalse(((ShareSessionContext) context1).isSubsequent()); + time.sleep(500); - // The expected epoch from the share session should be 1, but we are passing 2. This should throw an exception. - assertThrows(InvalidShareSessionEpochException.class, - () -> sharePartitionManager.acknowledgeSessionUpdate("grp", - new ShareRequestMetadata(memberId, - ShareRequestMetadata.nextEpoch(ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH))))); - } + // Create a second new share session + Map session2req = new LinkedHashMap<>(); + session2req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); + session2req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); - @Test - public void testAcknowledgeSessionUpdateSuccessOnSubsequentEpoch() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); + ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - Uuid tpId0 = Uuid.randomUuid(); - TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); + ShareFetchContext session2context = sharePartitionManager.newContext(groupId, session2req, EMPTY_PART_LIST, reqMetadata2, false); + assertEquals(session2context.getClass(), ShareSessionContext.class); - String groupId = "grp"; - Uuid memberId = Uuid.randomUuid(); + LinkedHashMap respData2 = new LinkedHashMap<>(); + respData2.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); + respData2.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); - // Create a new share session with an initial share fetch request - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, - new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH), false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); - assertFalse(((ShareSessionContext) context1).isSubsequent()); + ShareFetchResponse session2resp = session2context.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); + assertEquals(Errors.NONE, session2resp.error()); + assertEquals(2, session2resp.responseData(topicNames).size()); - // The expected epoch from the share session should be 1, and we are passing the same. So, execution should be successful. - assertDoesNotThrow( - () -> sharePartitionManager.acknowledgeSessionUpdate("grp", - new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)))); - } + ShareSessionKey session2Key = new ShareSessionKey(groupId, reqMetadata2.memberId()); - @Test - public void testAcknowledgeSessionUpdateSuccessOnFinalEpoch() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); + // both newly created entries are present in cache + assertNotNull(cache.get(session1Key)); + assertNotNull(cache.get(session2Key)); - Uuid tpId0 = Uuid.randomUuid(); - TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); + time.sleep(500); - String groupId = "grp"; - Uuid memberId = Uuid.randomUuid(); + // Create a subsequent share fetch context for session 1 + ShareFetchContext session1context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); + assertEquals(session1context2.getClass(), ShareSessionContext.class); - // Create a new share session with an initial share fetch request - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, List.of(tp0, tp1), EMPTY_PART_LIST, - new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH), false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); - assertFalse(((ShareSessionContext) context1).isSubsequent()); + // total sleep time will now be large enough that share session 1 will be evicted if not correctly touched + time.sleep(501); - // The expected epoch from the share session should be 1, but we are passing the Final Epoch (-1). This should throw an exception. - assertDoesNotThrow( - () -> sharePartitionManager.acknowledgeSessionUpdate("grp", - new ShareRequestMetadata(memberId, ShareRequestMetadata.FINAL_EPOCH))); + // create one final share session to test that the least recently used entry is evicted + // the second share session should be evicted because the first share session was incrementally fetched + // more recently than the second session was created + Map session3req = new LinkedHashMap<>(); + session3req.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); + session3req.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); + + ShareRequestMetadata reqMetadata3 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); + + ShareFetchContext session3context = sharePartitionManager.newContext(groupId, session3req, EMPTY_PART_LIST, reqMetadata3, false); + + LinkedHashMap respData3 = new LinkedHashMap<>(); + respData3.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); + respData3.put(foo1, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo1.partition())); + + ShareFetchResponse session3resp = session3context.updateAndGenerateResponseData(groupId, reqMetadata3.memberId(), respData3); + assertEquals(Errors.NONE, session3resp.error()); + assertEquals(2, session3resp.responseData(topicNames).size()); + + ShareSessionKey session3Key = new ShareSessionKey(groupId, reqMetadata3.memberId()); + + assertNotNull(cache.get(session1Key)); + assertNull(cache.get(session2Key), "share session 2 should have been evicted by latest share session, " + + "as share session 1 was used more recently"); + assertNotNull(cache.get(session3Key)); } @Test public void testSubsequentShareSession() { - sharePartitionManager = SharePartitionManagerBuilder.builder().build(); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder().build(); Map topicNames = new HashMap<>(); Uuid fooId = Uuid.randomUuid(); Uuid barId = Uuid.randomUuid(); @@ -574,13 +485,15 @@ public void testSubsequentShareSession() { TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); // Create a new share session with foo-0 and foo-1 - List reqData1 = List.of(tp0, tp1); + Map reqData1 = new LinkedHashMap<>(); + reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); + reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + assertEquals(ShareSessionContext.class, context1.getClass()); LinkedHashMap respData1 = new LinkedHashMap<>(); respData1.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(tp0.partition())); @@ -591,12 +504,13 @@ public void testSubsequentShareSession() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent fetch request that removes foo-0 and adds bar-0 - List reqData2 = List.of(tp2); + Map reqData2 = Collections.singletonMap( + tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); List removed2 = new ArrayList<>(); removed2.add(tp0); ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, removed2, - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context2); + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); + assertEquals(ShareSessionContext.class, context2.getClass()); Set expectedTopicIdPartitions2 = new HashSet<>(); expectedTopicIdPartitions2.add(tp1); @@ -617,19 +531,17 @@ public void testSubsequentShareSession() { ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData2); assertEquals(Errors.NONE, resp2.error()); assertEquals(1, resp2.data().responses().size()); - assertEquals(barId, resp2.data().responses().stream().findFirst().get().topicId()); - assertEquals(1, resp2.data().responses().stream().findFirst().get().partitions().size()); - assertEquals(0, resp2.data().responses().stream().findFirst().get().partitions().get(0).partitionIndex()); + assertEquals(barId, resp2.data().responses().get(0).topicId()); + assertEquals(1, resp2.data().responses().get(0).partitions().size()); + assertEquals(0, resp2.data().responses().get(0).partitions().get(0).partitionIndex()); assertEquals(1, resp2.responseData(topicNames).size()); } @Test public void testZeroSizeShareSession() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).build(); Map topicNames = new HashMap<>(); Uuid fooId = Uuid.randomUuid(); topicNames.put(fooId, "foo"); @@ -637,13 +549,15 @@ public void testZeroSizeShareSession() { TopicIdPartition foo1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); // Create a new share session with foo-0 and foo-1 - List reqData1 = List.of(foo0, foo1); + Map reqData1 = new LinkedHashMap<>(); + reqData1.put(foo0, new ShareFetchRequest.SharePartitionData(foo0.topicId(), 100)); + reqData1.put(foo1, new ShareFetchRequest.SharePartitionData(foo1.topicId(), 100)); String groupId = "grp"; ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + assertEquals(ShareSessionContext.class, context1.getClass()); LinkedHashMap respData1 = new LinkedHashMap<>(); respData1.put(foo0, new ShareFetchResponseData.PartitionData().setPartitionIndex(foo0.partition())); @@ -658,9 +572,9 @@ public void testZeroSizeShareSession() { List removed2 = new ArrayList<>(); removed2.add(foo0); removed2.add(foo1); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, removed2, - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context2); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), removed2, + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); + assertEquals(ShareSessionContext.class, context2.getClass()); LinkedHashMap respData2 = new LinkedHashMap<>(); ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData2); @@ -671,11 +585,9 @@ public void testZeroSizeShareSession() { @Test public void testToForgetPartitions() { String groupId = "grp"; - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).build(); Uuid fooId = Uuid.randomUuid(); Uuid barId = Uuid.randomUuid(); TopicIdPartition foo = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); @@ -683,25 +595,28 @@ public void testToForgetPartitions() { ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - List reqData1 = List.of(foo, bar); + Map reqData1 = new LinkedHashMap<>(); + reqData1.put(foo, new ShareFetchRequest.SharePartitionData(foo.topicId(), 100)); + reqData1.put(bar, new ShareFetchRequest.SharePartitionData(bar.topicId(), 100)); + - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); - assertPartitionsPresent((ShareSessionContext) context1, List.of(foo, bar)); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + assertEquals(ShareSessionContext.class, context1.getClass()); + assertPartitionsPresent((ShareSessionContext) context1, Arrays.asList(foo, bar)); mockUpdateAndGenerateResponseData(context1, groupId, reqMetadata1.memberId()); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(foo), - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true, CONNECTION_ID); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), Collections.singletonList(foo), + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); // So foo is removed but not the others. - assertPartitionsPresent((ShareSessionContext) context2, List.of(bar)); + assertPartitionsPresent((ShareSessionContext) context2, Collections.singletonList(bar)); mockUpdateAndGenerateResponseData(context2, groupId, reqMetadata1.memberId()); - ShareFetchContext context3 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(bar), - new ShareRequestMetadata(reqMetadata1.memberId(), 2), true, CONNECTION_ID); - assertPartitionsPresent((ShareSessionContext) context3, EMPTY_PART_LIST); + ShareFetchContext context3 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), Collections.singletonList(bar), + new ShareRequestMetadata(reqMetadata1.memberId(), 2), true); + assertPartitionsPresent((ShareSessionContext) context3, Collections.emptyList()); } // This test simulates a share session where the topic ID changes broker side (the one handling the request) in both the metadata cache and the log @@ -709,11 +624,9 @@ public void testToForgetPartitions() { @Test public void testShareSessionUpdateTopicIdsBrokerSide() { String groupId = "grp"; - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).build(); Uuid fooId = Uuid.randomUuid(); Uuid barId = Uuid.randomUuid(); TopicIdPartition foo = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); @@ -724,12 +637,14 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { topicNames.put(barId, "bar"); // Create a new share session with foo-0 and bar-1 - List reqData1 = List.of(foo, bar); + Map reqData1 = new LinkedHashMap<>(); + reqData1.put(foo, new ShareFetchRequest.SharePartitionData(foo.topicId(), 100)); + reqData1.put(bar, new ShareFetchRequest.SharePartitionData(bar.topicId(), 100)); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); - assertInstanceOf(ShareSessionContext.class, context1); + assertEquals(ShareSessionContext.class, context1.getClass()); assertFalse(((ShareSessionContext) context1).isSubsequent()); LinkedHashMap respData1 = new LinkedHashMap<>(); @@ -742,10 +657,10 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { assertEquals(2, resp1.responseData(topicNames).size()); // Create a subsequent share fetch request as though no topics changed. - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata1.memberId(), 1), true, CONNECTION_ID); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata1.memberId(), 1), true); - assertInstanceOf(ShareSessionContext.class, context2); + assertEquals(ShareSessionContext.class, context2.getClass()); assertTrue(((ShareSessionContext) context2).isSubsequent()); LinkedHashMap respData2 = new LinkedHashMap<>(); @@ -760,11 +675,10 @@ public void testShareSessionUpdateTopicIdsBrokerSide() { @Test public void testGetErroneousAndValidTopicIdPartitions() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + Time time = new MockTime(); + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).withTime(time).build(); Uuid tpId0 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(tpId0, new TopicPartition("foo", 1)); @@ -774,13 +688,17 @@ public void testGetErroneousAndValidTopicIdPartitions() { String groupId = "grp"; // Create a new share session with an initial share fetch request - List reqData2 = List.of(tp0, tp1, tpNull1); + Map reqData2 = new LinkedHashMap<>(); + reqData2.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); + reqData2.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); + reqData2.put(tpNull1, new ShareFetchRequest.SharePartitionData(tpNull1.topicId(), 100)); + ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context2); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); + assertEquals(ShareSessionContext.class, context2.getClass()); assertFalse(((ShareSessionContext) context2).isSubsequent()); - assertErroneousAndValidTopicIdPartitions(context2.getErroneousAndValidTopicIdPartitions(), List.of(tpNull1), List.of(tp0, tp1)); + assertErroneousAndValidTopicIdPartitions(context2.getErroneousAndValidTopicIdPartitions(), Collections.singletonList(tpNull1), Arrays.asList(tp0, tp1)); LinkedHashMap respData2 = new LinkedHashMap<>(); respData2.put(tp0, new ShareFetchResponseData.PartitionData().setPartitionIndex(0)); @@ -799,45 +717,46 @@ public void testGetErroneousAndValidTopicIdPartitions() { // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test trying to create a new session with a non-existent session key assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(Uuid.randomUuid(), 1), true, CONNECTION_ID)); + new ShareRequestMetadata(Uuid.randomUuid(), 1), true)); // Continue the first share session we created. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context5); + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); + assertEquals(ShareSessionContext.class, context5.getClass()); assertTrue(((ShareSessionContext) context5).isSubsequent()); - assertErroneousAndValidTopicIdPartitions(context5.getErroneousAndValidTopicIdPartitions(), List.of(tpNull1), List.of(tp0, tp1)); + assertErroneousAndValidTopicIdPartitions(context5.getErroneousAndValidTopicIdPartitions(), Collections.singletonList(tpNull1), Arrays.asList(tp0, tp1)); ShareFetchResponse resp5 = context5.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); assertEquals(Errors.NONE, resp5.error()); // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - List reqData7 = List.of(tpNull2); + Map reqData7 = Collections.singletonMap( + tpNull2, new ShareFetchRequest.SharePartitionData(tpNull2.topicId(), 100)); ShareFetchContext context7 = sharePartitionManager.newContext(groupId, reqData7, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true, CONNECTION_ID); + new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); // Check for throttled response ShareFetchResponse resp7 = context7.throttleResponse(100); assertEquals(Errors.NONE, resp7.error()); assertEquals(100, resp7.throttleTimeMs()); - assertErroneousAndValidTopicIdPartitions(context7.getErroneousAndValidTopicIdPartitions(), List.of(tpNull1, tpNull2), List.of(tp0, tp1)); + assertErroneousAndValidTopicIdPartitions(context7.getErroneousAndValidTopicIdPartitions(), Arrays.asList(tpNull1, tpNull2), Arrays.asList(tp0, tp1)); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true, CONNECTION_ID); + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); - assertErroneousAndValidTopicIdPartitions(context8.getErroneousAndValidTopicIdPartitions(), EMPTY_PART_LIST, EMPTY_PART_LIST); + assertErroneousAndValidTopicIdPartitions(context8.getErroneousAndValidTopicIdPartitions(), Collections.emptyList(), Collections.emptyList()); // Check for throttled response ShareFetchResponse resp8 = context8.throttleResponse(100); assertEquals(Errors.NONE, resp8.error()); @@ -853,11 +772,10 @@ public void testGetErroneousAndValidTopicIdPartitions() { @Test public void testShareFetchContextResponseSize() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); - + Time time = new MockTime(); + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).withTime(time).build(); Map topicNames = new HashMap<>(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); @@ -871,15 +789,17 @@ public void testShareFetchContextResponseSize() { String groupId = "grp"; // Create a new share session with an initial share fetch request - List reqData2 = List.of(tp0, tp1); + Map reqData2 = new LinkedHashMap<>(); + reqData2.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); + reqData2.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); // For response size expected value calculation ObjectSerializationCache objectSerializationCache = new ObjectSerializationCache(); short version = ApiKeys.SHARE_FETCH.latestVersion(); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context2); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); + assertEquals(ShareSessionContext.class, context2.getClass()); assertFalse(((ShareSessionContext) context2).isSubsequent()); LinkedHashMap respData2 = new LinkedHashMap<>(); @@ -898,18 +818,19 @@ public void testShareFetchContextResponseSize() { // Test trying to create a new session with an invalid epoch assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test trying to create a new session with a non-existent session key Uuid memberId4 = Uuid.randomUuid(); assertThrows(ShareSessionNotFoundException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(memberId4, 1), true, CONNECTION_ID)); + new ShareRequestMetadata(memberId4, 1), true)); // Continue the first share session we created. - List reqData5 = List.of(tp2); + Map reqData5 = Collections.singletonMap( + tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); ShareFetchContext context5 = sharePartitionManager.newContext(groupId, reqData5, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context5); + new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); + assertEquals(ShareSessionContext.class, context5.getClass()); assertTrue(((ShareSessionContext) context5).isSubsequent()); LinkedHashMap respData5 = new LinkedHashMap<>(); @@ -923,11 +844,11 @@ public void testShareFetchContextResponseSize() { // Test setting an invalid share session epoch. assertThrows(InvalidShareSessionEpochException.class, () -> sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true, CONNECTION_ID)); + new ShareRequestMetadata(shareSessionKey2.memberId(), 5), true)); // Test generating a throttled response for a subsequent share session - ShareFetchContext context7 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true, CONNECTION_ID); + ShareFetchContext context7 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); int respSize7 = context7.responseSize(respData2, version); ShareFetchResponse resp7 = context7.throttleResponse(100); @@ -937,8 +858,8 @@ public void testShareFetchContextResponseSize() { assertEquals(4 + new ShareFetchResponseData().size(objectSerializationCache, version), respSize7); // Get the final share session. - ShareFetchContext context8 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true, CONNECTION_ID); + ShareFetchContext context8 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata2.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context8.getClass()); assertEquals(1, cache.size()); @@ -954,10 +875,9 @@ public void testShareFetchContextResponseSize() { @Test public void testCachedTopicPartitionsWithNoTopicPartitions() { - ShareSessionCache cache = new ShareSessionCache(10); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .build(); + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).build(); List result = sharePartitionManager.cachedTopicIdPartitionsInShareSession("grp", Uuid.randomUuid()); assertTrue(result.isEmpty()); @@ -965,8 +885,9 @@ public void testCachedTopicPartitionsWithNoTopicPartitions() { @Test public void testCachedTopicPartitionsForValidShareSessions() { - ShareSessionCache cache = new ShareSessionCache(10); - + ShareSessionCache cache = new ShareSessionCache(10, 1000); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withCache(cache).build(); Uuid tpId0 = Uuid.randomUuid(); Uuid tpId1 = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(tpId0, new TopicPartition("foo", 0)); @@ -976,31 +897,15 @@ public void testCachedTopicPartitionsForValidShareSessions() { String groupId = "grp"; Uuid memberId1 = Uuid.randomUuid(); Uuid memberId2 = Uuid.randomUuid(); - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - - when(sp0.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); - when(sp1.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); - when(sp2.releaseAcquiredRecords(ArgumentMatchers.eq(String.valueOf(memberId1)))).thenReturn(CompletableFuture.completedFuture(null)); - - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); - - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .withPartitionCache(partitionCache) - .build(); - // Create a new share session with an initial share fetch request. - List reqData1 = List.of(tp0, tp1); + Map reqData1 = new LinkedHashMap<>(); + reqData1.put(tp0, new ShareFetchRequest.SharePartitionData(tp0.topicId(), 100)); + reqData1.put(tp1, new ShareFetchRequest.SharePartitionData(tp1.topicId(), 100)); ShareRequestMetadata reqMetadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context1); + ShareFetchContext context1 = sharePartitionManager.newContext(groupId, reqData1, EMPTY_PART_LIST, reqMetadata1, false); + assertEquals(ShareSessionContext.class, context1.getClass()); assertFalse(((ShareSessionContext) context1).isSubsequent()); ShareSessionKey shareSessionKey1 = new ShareSessionKey(groupId, @@ -1013,15 +918,16 @@ public void testCachedTopicPartitionsForValidShareSessions() { ShareFetchResponse resp1 = context1.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData1); assertEquals(Errors.NONE, resp1.error()); - assertEquals(Set.of(tp0, tp1), + assertEquals(new HashSet<>(Arrays.asList(tp0, tp1)), new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Create a new share session with an initial share fetch request. - List reqData2 = List.of(tp2); + Map reqData2 = Collections.singletonMap( + tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); ShareRequestMetadata reqMetadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH); - ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context2); + ShareFetchContext context2 = sharePartitionManager.newContext(groupId, reqData2, EMPTY_PART_LIST, reqMetadata2, false); + assertEquals(ShareSessionContext.class, context2.getClass()); assertFalse(((ShareSessionContext) context2).isSubsequent()); ShareSessionKey shareSessionKey2 = new ShareSessionKey(groupId, @@ -1033,13 +939,14 @@ public void testCachedTopicPartitionsForValidShareSessions() { ShareFetchResponse resp2 = context2.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData2); assertEquals(Errors.NONE, resp2.error()); - assertEquals(List.of(tp2), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); + assertEquals(Collections.singletonList(tp2), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Continue the first share session we created. - List reqData3 = List.of(tp2); + Map reqData3 = Collections.singletonMap( + tp2, new ShareFetchRequest.SharePartitionData(tp2.topicId(), 100)); ShareFetchContext context3 = sharePartitionManager.newContext(groupId, reqData3, EMPTY_PART_LIST, - new ShareRequestMetadata(shareSessionKey1.memberId(), 1), true, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context3); + new ShareRequestMetadata(shareSessionKey1.memberId(), 1), true); + assertEquals(ShareSessionContext.class, context3.getClass()); assertTrue(((ShareSessionContext) context3).isSubsequent()); LinkedHashMap respData3 = new LinkedHashMap<>(); @@ -1047,14 +954,15 @@ public void testCachedTopicPartitionsForValidShareSessions() { ShareFetchResponse resp3 = context3.updateAndGenerateResponseData(groupId, reqMetadata1.memberId(), respData3); assertEquals(Errors.NONE, resp3.error()); - assertEquals(Set.of(tp0, tp1, tp2), + assertEquals(new HashSet<>(Arrays.asList(tp0, tp1, tp2)), new HashSet<>(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1))); // Continue the second session we created. - List reqData4 = List.of(tp3); - ShareFetchContext context4 = sharePartitionManager.newContext(groupId, reqData4, List.of(tp2), - new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context4); + Map reqData4 = Collections.singletonMap( + tp3, new ShareFetchRequest.SharePartitionData(tp3.topicId(), 100)); + ShareFetchContext context4 = sharePartitionManager.newContext(groupId, reqData4, Collections.singletonList(tp2), + new ShareRequestMetadata(shareSessionKey2.memberId(), 1), true); + assertEquals(ShareSessionContext.class, context4.getClass()); assertTrue(((ShareSessionContext) context4).isSubsequent()); LinkedHashMap respData4 = new LinkedHashMap<>(); @@ -1062,11 +970,11 @@ public void testCachedTopicPartitionsForValidShareSessions() { ShareFetchResponse resp4 = context4.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData4); assertEquals(Errors.NONE, resp4.error()); - assertEquals(List.of(tp3), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); + assertEquals(Collections.singletonList(tp3), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); // Get the final share session. - ShareFetchContext context5 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, EMPTY_PART_LIST, - new ShareRequestMetadata(reqMetadata1.memberId(), ShareRequestMetadata.FINAL_EPOCH), true, CONNECTION_ID); + ShareFetchContext context5 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), EMPTY_PART_LIST, + new ShareRequestMetadata(reqMetadata1.memberId(), ShareRequestMetadata.FINAL_EPOCH), true); assertEquals(FinalContext.class, context5.getClass()); LinkedHashMap respData5 = new LinkedHashMap<>(); @@ -1080,16 +988,16 @@ public void testCachedTopicPartitionsForValidShareSessions() { assertTrue(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId1).isEmpty()); // Continue the second share session . - ShareFetchContext context6 = sharePartitionManager.newContext(groupId, EMPTY_PART_LIST, List.of(tp3), - new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true, CONNECTION_ID); - assertInstanceOf(ShareSessionContext.class, context6); + ShareFetchContext context6 = sharePartitionManager.newContext(groupId, Collections.emptyMap(), Collections.singletonList(tp3), + new ShareRequestMetadata(shareSessionKey2.memberId(), 2), true); + assertEquals(ShareSessionContext.class, context6.getClass()); assertTrue(((ShareSessionContext) context6).isSubsequent()); LinkedHashMap respData6 = new LinkedHashMap<>(); ShareFetchResponse resp6 = context6.updateAndGenerateResponseData(groupId, reqMetadata2.memberId(), respData6); assertEquals(Errors.NONE, resp6.error()); - assertEquals(EMPTY_PART_LIST, sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); + assertEquals(Collections.emptyList(), sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, memberId2)); } @Test @@ -1112,7 +1020,7 @@ public void testSharePartitionKey() { assertNotEquals(sharePartitionKey1, sharePartitionKey3); assertNotEquals(sharePartitionKey1, sharePartitionKey4); assertNotEquals(sharePartitionKey1, sharePartitionKey5); - assertNotNull(sharePartitionKey1); + assertNotEquals(sharePartitionKey1, null); } @Test @@ -1128,14 +1036,23 @@ public void testMultipleSequentialShareFetches() { TopicIdPartition tp4 = new TopicIdPartition(fooId, new TopicPartition("foo", 2)); TopicIdPartition tp5 = new TopicIdPartition(barId, new TopicPartition("bar", 2)); TopicIdPartition tp6 = new TopicIdPartition(fooId, new TopicPartition("foo", 3)); - List topicIdPartitions = List.of(tp0, tp1, tp2, tp3, tp4, tp5, tp6); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp3, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp4, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp5, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp6, PARTITION_MAX_BYTES); mockFetchOffsetForTimestamp(mockReplicaManager); - Timer mockTimer = systemTimerReaper(); + Time time = mock(Time.class); + when(time.hiResClockMs()).thenReturn(0L).thenReturn(100L); + Metrics metrics = new Metrics(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp0, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 1); @@ -1145,39 +1062,142 @@ public void testMultipleSequentialShareFetches() { mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp5, 1); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp6, 1); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(mockReplicaManager) + .withTime(time) + .withMetrics(metrics) .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) .build(); - doAnswer(invocation -> buildLogReadResult(topicIdPartitions)).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - CompletableFuture> future = sharePartitionManager.fetchMessages( - groupId, memberId1.toString(), FETCH_PARAMS, 1, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - assertTrue(future.isDone()); + sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, partitionMaxBytes); Mockito.verify(mockReplicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - future = sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 3, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - assertTrue(future.isDone()); + sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, partitionMaxBytes); Mockito.verify(mockReplicaManager, times(2)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - future = sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 10, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - assertTrue(future.isDone()); + sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, partitionMaxBytes); Mockito.verify(mockReplicaManager, times(3)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - // Should have 6 total fetches, 3 fetches for topic foo (though 4 partitions but 3 fetches) and 3 - // fetches for topic bar (though 3 partitions but 3 fetches). - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(6, 0, 0, 0), - Map.of("foo", new TopicMetrics(3, 0, 0, 0), "bar", new TopicMetrics(3, 0, 0, 0)) + Map> expectedMetrics = new HashMap<>(); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.PARTITION_LOAD_TIME_AVG, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME), + val -> assertEquals(val.intValue(), (int) 100.0 / 7, SharePartitionManager.ShareGroupMetrics.PARTITION_LOAD_TIME_AVG) + ); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.PARTITION_LOAD_TIME_MAX, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME), + val -> assertEquals(val, 100.0, SharePartitionManager.ShareGroupMetrics.PARTITION_LOAD_TIME_MAX) ); + expectedMetrics.forEach((metric, test) -> { + assertTrue(metrics.metrics().containsKey(metric)); + test.accept((Double) metrics.metrics().get(metric).metricValue()); + }); + } + + @Test + public void testMultipleConcurrentShareFetches() throws InterruptedException { + + String groupId = "grp"; + Uuid memberId1 = Uuid.randomUuid(); + Uuid fooId = Uuid.randomUuid(); + Uuid barId = Uuid.randomUuid(); + TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); + TopicIdPartition tp1 = new TopicIdPartition(fooId, new TopicPartition("foo", 1)); + TopicIdPartition tp2 = new TopicIdPartition(barId, new TopicPartition("bar", 0)); + TopicIdPartition tp3 = new TopicIdPartition(barId, new TopicPartition("bar", 1)); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp3, PARTITION_MAX_BYTES); + + final Time time = new MockTime(0, System.currentTimeMillis(), 0); + + mockFetchOffsetForTimestamp(mockReplicaManager); + + DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( + "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); + mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); + mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); + mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp0, 1); + mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 1); + mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp2, 1); + mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp3, 1); + + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withTime(time) + .withReplicaManager(mockReplicaManager) + .withTimer(mockTimer) + .build(); + + SharePartition sp0 = mock(SharePartition.class); + SharePartition sp1 = mock(SharePartition.class); + SharePartition sp2 = mock(SharePartition.class); + SharePartition sp3 = mock(SharePartition.class); + + when(sp0.nextFetchOffset()).thenReturn((long) 1, (long) 15, (long) 6, (long) 30, (long) 25); + when(sp1.nextFetchOffset()).thenReturn((long) 4, (long) 1, (long) 18, (long) 5); + when(sp2.nextFetchOffset()).thenReturn((long) 10, (long) 25, (long) 26); + when(sp3.nextFetchOffset()).thenReturn((long) 20, (long) 15, (long) 23, (long) 16); + + doAnswer(invocation -> { + assertEquals(1, sp0.nextFetchOffset()); + assertEquals(4, sp1.nextFetchOffset()); + assertEquals(10, sp2.nextFetchOffset()); + assertEquals(20, sp3.nextFetchOffset()); + return buildLogReadResult(partitionMaxBytes.keySet()); + }).doAnswer(invocation -> { + assertEquals(15, sp0.nextFetchOffset()); + assertEquals(1, sp1.nextFetchOffset()); + assertEquals(25, sp2.nextFetchOffset()); + assertEquals(15, sp3.nextFetchOffset()); + return buildLogReadResult(partitionMaxBytes.keySet()); + }).doAnswer(invocation -> { + assertEquals(6, sp0.nextFetchOffset()); + assertEquals(18, sp1.nextFetchOffset()); + assertEquals(26, sp2.nextFetchOffset()); + assertEquals(23, sp3.nextFetchOffset()); + return buildLogReadResult(partitionMaxBytes.keySet()); + }).doAnswer(invocation -> { + assertEquals(30, sp0.nextFetchOffset()); + assertEquals(5, sp1.nextFetchOffset()); + assertEquals(26, sp2.nextFetchOffset()); + assertEquals(16, sp3.nextFetchOffset()); + return buildLogReadResult(partitionMaxBytes.keySet()); + }).doAnswer(invocation -> { + assertEquals(25, sp0.nextFetchOffset()); + assertEquals(5, sp1.nextFetchOffset()); + assertEquals(26, sp2.nextFetchOffset()); + assertEquals(16, sp3.nextFetchOffset()); + return buildLogReadResult(partitionMaxBytes.keySet()); + }).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + int threadCount = 100; + ExecutorService executorService = Executors.newFixedThreadPool(threadCount); + + try { + for (int i = 0; i != threadCount; ++i) { + executorService.submit(() -> { + sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, partitionMaxBytes); + }); + // We are blocking the main thread at an interval of 10 threads so that the currently running executorService threads can complete. + if (i % 10 == 0) + executorService.awaitTermination(50, TimeUnit.MILLISECONDS); + } + } finally { + if (!executorService.awaitTermination(50, TimeUnit.MILLISECONDS)) + executorService.shutdown(); + } + // We are checking the number of replicaManager readFromLog() calls + Mockito.verify(mockReplicaManager, atMost(100)).readFromLog( + any(), any(), any(ReplicaQuota.class), anyBoolean()); + Mockito.verify(mockReplicaManager, atLeast(10)).readFromLog( + any(), any(), any(ReplicaQuota.class), anyBoolean()); } @Test @@ -1186,41 +1206,33 @@ public void testReplicaManagerFetchShouldNotProceed() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - List topicIdPartitions = List.of(tp0); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(false); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); - Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); Map result = future.join(); assertEquals(0, result.size()); - // Should have 1 fetch recorded and no failed as the fetch did complete without error. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(1, 0, 0, 0), - Map.of("foo", new TopicMetrics(1, 0, 0, 0)) - ); } @Test @@ -1229,54 +1241,44 @@ public void testReplicaManagerFetchShouldProceed() { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - List topicIdPartitions = List.of(tp0); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); mockFetchOffsetForTimestamp(mockReplicaManager); - Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp0, 1); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) .build(); - doAnswer(invocation -> buildLogReadResult(topicIdPartitions)).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); // Since the nextFetchOffset does not point to endOffset + 1, i.e. some of the records in the cachedState are AVAILABLE, // even though the maxInFlightMessages limit is exceeded, replicaManager.readFromLog should be called Mockito.verify(mockReplicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - // Should have 1 fetch recorded. - assertEquals(1, brokerTopicStats.allTopicsStats().totalShareFetchRequestRate().count()); - assertEquals(1, brokerTopicStats.numTopics()); - assertEquals(1, brokerTopicStats.topicStats(tp0.topic()).totalShareFetchRequestRate().count()); } @Test public void testCloseSharePartitionManager() throws Exception { Timer timer = Mockito.mock(SystemTimerReaper.class); - ShareGroupMetrics shareGroupMetrics = Mockito.mock(ShareGroupMetrics.class); + Persister persister = Mockito.mock(Persister.class); SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() - .withTimer(timer) - .withShareGroupMetrics(shareGroupMetrics) - .build(); + .withTimer(timer).withShareGroupPersister(persister).build(); - // Verify that 0 calls are made to timer.close() and shareGroupMetrics.close(). + // Verify that 0 calls are made to timer.close() and persister.stop(). Mockito.verify(timer, times(0)).close(); - Mockito.verify(shareGroupMetrics, times(0)).close(); + Mockito.verify(persister, times(0)).stop(); // Closing the sharePartitionManager closes timer object in sharePartitionManager. sharePartitionManager.close(); - // Verify that the timer object in sharePartitionManager is closed by checking the calls to timer.close() and shareGroupMetrics.close(). + // Verify that the timer object in sharePartitionManager is closed by checking the calls to timer.close() and persister.stop(). Mockito.verify(timer, times(1)).close(); - Mockito.verify(shareGroupMetrics, times(1)).close(); } @Test @@ -1306,14 +1308,13 @@ public void testReleaseSessionSuccess() { partitionMap.add(new CachedSharePartition(tp3)); when(shareSession.partitionMap()).thenReturn(partitionMap); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) - .withPartitionCache(partitionCache) - .withBrokerTopicStats(brokerTopicStats) + .withPartitionCacheMap(partitionCacheMap) .build(); CompletableFuture> resultFuture = @@ -1328,16 +1329,10 @@ public void testReleaseSessionSuccess() { assertEquals(2, result.get(tp2).partitionIndex()); assertEquals(Errors.INVALID_RECORD_STATE.code(), result.get(tp2).errorCode()); assertEquals("Unable to release acquired records for the batch", result.get(tp2).errorMessage()); - // tp3 was not a part of partitionCache. + // tp3 was not a part of partitionCacheMap. assertEquals(4, result.get(tp3).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp3).errorCode()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp3).errorMessage()); - // Shouldn't have any metrics for fetch and acknowledge. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(0, 0, 0, 0), - Map.of() - ); } @Test @@ -1355,7 +1350,7 @@ public void testReleaseSessionWithIncorrectGroupId() { partitionMap.add(new CachedSharePartition(tp1)); when(shareSession.partitionMap()).thenReturn(partitionMap); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -1384,7 +1379,7 @@ public void testReleaseSessionWithIncorrectMemberId() { partitionMap.add(new CachedSharePartition(tp1)); when(shareSession.partitionMap()).thenReturn(partitionMap); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -1407,7 +1402,7 @@ public void testReleaseSessionWithEmptyTopicPartitions() { when(cache.remove(new ShareSessionKey(groupId, memberId))).thenReturn(shareSession); when(shareSession.partitionMap()).thenReturn(new ImplicitLinkedHashCollection<>()); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -1429,7 +1424,7 @@ public void testReleaseSessionWithNullShareSession() { // Make the response not null for remove so can further check for the return value from topic partitions. when(cache.remove(new ShareSessionKey(groupId, memberId))).thenReturn(mock(ShareSession.class)); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withCache(cache) .build(); @@ -1449,18 +1444,15 @@ public void testAcknowledgeSinglePartition() { when(sp.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp), sp); - - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withBrokerTopicStats(brokerTopicStats) - .build(); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp), sp); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap).build(); Map> acknowledgeTopics = new HashMap<>(); - acknowledgeTopics.put(tp, List.of( - new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), - new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) + acknowledgeTopics.put(tp, Arrays.asList( + new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1)) )); CompletableFuture> resultFuture = sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); @@ -1469,16 +1461,10 @@ public void testAcknowledgeSinglePartition() { assertTrue(result.containsKey(tp)); assertEquals(0, result.get(tp).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp).errorCode()); - - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(0, 0, 1, 0), - Map.of("foo", new TopicMetrics(0, 0, 1, 0)) - ); } @Test - public void testAcknowledgeMultiplePartition() throws Exception { + public void testAcknowledgeMultiplePartition() { String groupId = "grp"; String memberId = Uuid.randomUuid().toString(); @@ -1494,30 +1480,27 @@ public void testAcknowledgeMultiplePartition() throws Exception { when(sp2.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); when(sp3.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(CompletableFuture.completedFuture(null)); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); - partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withShareGroupMetrics(shareGroupMetrics) - .withBrokerTopicStats(brokerTopicStats) - .build(); + Metrics metrics = new Metrics(); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap).withMetrics(metrics).build(); Map> acknowledgeTopics = new HashMap<>(); - acknowledgeTopics.put(tp1, List.of( - new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), - new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) + acknowledgeTopics.put(tp1, Arrays.asList( + new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1)) )); - acknowledgeTopics.put(tp2, List.of( - new ShareAcknowledgementBatch(15, 26, List.of((byte) 2)), - new ShareAcknowledgementBatch(34, 56, List.of((byte) 2)) + acknowledgeTopics.put(tp2, Arrays.asList( + new ShareAcknowledgementBatch(15, 26, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(34, 56, Collections.singletonList((byte) 2)) )); - acknowledgeTopics.put(tp3, List.of( - new ShareAcknowledgementBatch(4, 15, List.of((byte) 3)), - new ShareAcknowledgementBatch(16, 21, List.of((byte) 3)) + acknowledgeTopics.put(tp3, Arrays.asList( + new ShareAcknowledgementBatch(4, 15, Collections.singletonList((byte) 3)), + new ShareAcknowledgementBatch(16, 21, Collections.singletonList((byte) 3)) )); CompletableFuture> resultFuture = sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); @@ -1533,71 +1516,49 @@ public void testAcknowledgeMultiplePartition() throws Exception { assertEquals(0, result.get(tp3).partitionIndex()); assertEquals(Errors.NONE.code(), result.get(tp3).errorCode()); - assertEquals(42, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).count()); - assertEquals(35, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).count()); - assertEquals(18, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).count()); - assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).meanRate() > 0); - assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).meanRate() > 0); - assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).meanRate() > 0); - - // Should have 3 successful acknowledgement and 1 successful acknowledgement per topic. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(0, 0, 3, 0), - Map.of(tp1.topic(), new TopicMetrics(0, 0, 1, 0), tp2.topic(), new TopicMetrics(0, 0, 1, 0), tp3.topic(), new TopicMetrics(0, 0, 1, 0)) + Map> expectedMetrics = new HashMap<>(); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.SHARE_ACK_COUNT, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME), + val -> assertEquals(val, 1.0) ); - shareGroupMetrics.close(); - } - - @Test - public void testAcknowledgeIndividualOffsets() throws Exception { - String groupId = "grp"; - String memberId = Uuid.randomUuid().toString(); - - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); - - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - - List ack1 = List.of( - new ShareAcknowledgementBatch(12, 12, List.of((byte) 1))); - List ack2 = List.of( - new ShareAcknowledgementBatch(15, 20, List.of((byte) 2, (byte) 3, (byte) 2, (byte) 2, (byte) 3, (byte) 2))); - when(sp1.acknowledge(memberId, ack1)).thenReturn(CompletableFuture.completedFuture(null)); - when(sp2.acknowledge(memberId, ack2)).thenReturn(CompletableFuture.completedFuture(null)); - - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); - - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withShareGroupMetrics(shareGroupMetrics) - .withBrokerTopicStats(brokerTopicStats) - .build(); - - Map> acknowledgeTopics = Map.of(tp1, ack1, tp2, ack2); - CompletableFuture> resultFuture = - sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); - Map result = resultFuture.join(); - assertEquals(2, result.size()); - assertTrue(result.containsKey(tp1)); - assertTrue(result.containsKey(tp2)); - assertEquals(0, result.get(tp1).partitionIndex()); - assertEquals(Errors.NONE.code(), result.get(tp1).errorCode()); - assertEquals(0, result.get(tp2).partitionIndex()); - assertEquals(Errors.NONE.code(), result.get(tp2).errorCode()); - - assertEquals(1, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).count()); - assertEquals(4, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).count()); - assertEquals(2, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).count()); - assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).meanRate() > 0); - assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).meanRate() > 0); - assertTrue(shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).meanRate() > 0); - - shareGroupMetrics.close(); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.SHARE_ACK_RATE, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME), + val -> assertTrue(val > 0) + ); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_COUNT, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME, + Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.ACCEPT.toString())), + val -> assertEquals(2.0, val) + ); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_COUNT, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME, + Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.RELEASE.toString())), + val -> assertEquals(2.0, val) + ); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_COUNT, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME, + Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.REJECT.toString())), + val -> assertEquals(2.0, val) + ); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_RATE, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME, + Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.ACCEPT.toString())), + val -> assertTrue(val > 0) + ); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_RATE, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME, + Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.RELEASE.toString())), + val -> assertTrue(val > 0) + ); + expectedMetrics.put( + metrics.metricName(SharePartitionManager.ShareGroupMetrics.RECORD_ACK_RATE, SharePartitionManager.ShareGroupMetrics.METRICS_GROUP_NAME, + Collections.singletonMap(SharePartitionManager.ShareGroupMetrics.ACK_TYPE, AcknowledgeType.REJECT.toString())), + val -> assertTrue(val > 0) + ); + expectedMetrics.forEach((metric, test) -> { + assertTrue(metrics.metrics().containsKey(metric)); + test.accept((Double) metrics.metrics().get(metric).metricValue()); + }); } @Test @@ -1609,19 +1570,15 @@ public void testAcknowledgeIncorrectGroupId() { TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); SharePartition sp = mock(SharePartition.class); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp), sp); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withBrokerTopicStats(brokerTopicStats) - .withShareGroupMetrics(shareGroupMetrics) - .build(); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp), sp); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap).build(); Map> acknowledgeTopics = new HashMap<>(); - acknowledgeTopics.put(tp, List.of( - new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), - new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) + acknowledgeTopics.put(tp, Arrays.asList( + new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1)) )); CompletableFuture> resultFuture = sharePartitionManager.acknowledge(memberId, groupId2, acknowledgeTopics); @@ -1631,16 +1588,6 @@ public void testAcknowledgeIncorrectGroupId() { assertEquals(0, result.get(tp).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp).errorCode()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp).errorMessage()); - // No metric should be recorded as acknowledge failed. - assertEquals(0, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.ACCEPT.id).count()); - assertEquals(0, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.RELEASE.id).count()); - assertEquals(0, shareGroupMetrics.recordAcknowledgementMeter(AcknowledgeType.REJECT.id).count()); - // Should have 1 acknowledge recorded and 1 failed. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(0, 0, 1, 1), - Map.of(tp.topic(), new TopicMetrics(0, 0, 1, 1)) - ); } @Test @@ -1653,17 +1600,15 @@ public void testAcknowledgeIncorrectMemberId() { when(sp.acknowledge(ArgumentMatchers.eq(memberId), any())).thenReturn(FutureUtils.failedFuture( new InvalidRequestException("Member is not the owner of batch record") )); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp), sp); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withBrokerTopicStats(brokerTopicStats) - .build(); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp), sp); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap).build(); Map> acknowledgeTopics = new HashMap<>(); - acknowledgeTopics.put(tp, List.of( - new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), - new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) + acknowledgeTopics.put(tp, Arrays.asList( + new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1)) )); CompletableFuture> resultFuture = @@ -1674,12 +1619,6 @@ public void testAcknowledgeIncorrectMemberId() { assertEquals(0, result.get(tp).partitionIndex()); assertEquals(Errors.INVALID_REQUEST.code(), result.get(tp).errorCode()); assertEquals("Member is not the owner of batch record", result.get(tp).errorMessage()); - // Should have 1 acknowledge recorded and 1 failed. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(0, 0, 1, 1), - Map.of(tp.topic(), new TopicMetrics(0, 0, 1, 1)) - ); } @Test @@ -1688,14 +1627,12 @@ public void testAcknowledgeEmptyPartitionCacheMap() { String memberId = Uuid.randomUuid().toString(); TopicIdPartition tp = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo4", 3)); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withBrokerTopicStats(brokerTopicStats) - .build(); + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder().build(); Map> acknowledgeTopics = new HashMap<>(); - acknowledgeTopics.put(tp, List.of( - new ShareAcknowledgementBatch(78, 90, List.of((byte) 2)), - new ShareAcknowledgementBatch(94, 99, List.of((byte) 2)) + acknowledgeTopics.put(tp, Arrays.asList( + new ShareAcknowledgementBatch(78, 90, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(94, 99, Collections.singletonList((byte) 2)) )); CompletableFuture> resultFuture = sharePartitionManager.acknowledge(memberId, groupId, acknowledgeTopics); @@ -1705,12 +1642,6 @@ public void testAcknowledgeEmptyPartitionCacheMap() { assertEquals(3, result.get(tp).partitionIndex()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), result.get(tp).errorCode()); assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), result.get(tp).errorMessage()); - // Should have 1 acknowledge recorded and 1 failed. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(0, 0, 1, 1), - Map.of(tp.topic(), new TopicMetrics(0, 0, 1, 1)) - ); } @Test @@ -1721,10 +1652,13 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0)); TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); - List topicIdPartitions = List.of(tp1, tp2); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); + // mocked share partitions sp1 and sp2 can be acquired once there is an acknowledgement for it. doAnswer(invocation -> { when(sp1.canAcquireRecords()).thenReturn(true); @@ -1735,44 +1669,38 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { return CompletableFuture.completedFuture(Optional.empty()); }).when(sp2).acknowledge(ArgumentMatchers.eq(memberId), any()); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); ShareFetch shareFetch = new ShareFetch( - FETCH_PARAMS, - groupId, - Uuid.randomUuid().toString(), - new CompletableFuture<>(), - topicIdPartitions, - BATCH_SIZE, - 100, - brokerTopicStats); - - Timer mockTimer = systemTimerReaper(); + FETCH_PARAMS, + groupId, + Uuid.randomUuid().toString(), + new CompletableFuture<>(), + partitionMaxBytes, + 100); + DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 2); // Initially you cannot acquire records for both sp1 and sp2. - when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp2.maybeAcquireFetchLock()).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(ShareAcquiredRecords.empty()); - when(sp2.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(ShareAcquiredRecords.empty()); List delayedShareFetchWatchKeys = new ArrayList<>(); - topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + partitionMaxBytes.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) .build(); LinkedHashMap sharePartitions = new LinkedHashMap<>(); @@ -1783,7 +1711,6 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) - .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build(); delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchWatchKeys); @@ -1791,12 +1718,12 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { // Since acquisition lock for sp1 and sp2 cannot be acquired, we should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); - doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); Map> acknowledgeTopics = new HashMap<>(); - acknowledgeTopics.put(tp1, List.of( - new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), - new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) + acknowledgeTopics.put(tp1, Arrays.asList( + new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1)) )); assertEquals(2, delayedShareFetchPurgatory.watched()); @@ -1810,10 +1737,6 @@ public void testAcknowledgeCompletesDelayedShareFetchRequest() { Mockito.verify(sp2, times(0)).nextFetchOffset(); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); - // Should have 1 acknowledge recorded as other topic is acknowledgement request is not sent. - assertEquals(1, brokerTopicStats.allTopicsStats().totalShareAcknowledgementRequestRate().count()); - assertEquals(1, brokerTopicStats.numTopics()); - assertEquals(1, brokerTopicStats.topicStats(tp1.topic()).totalShareAcknowledgementRequestRate().count()); } @Test @@ -1825,7 +1748,9 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - List topicIdPartitions = List.of(tp1, tp2); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -1845,43 +1770,39 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { return CompletableFuture.completedFuture(Optional.empty()); }).when(sp3).acknowledge(ArgumentMatchers.eq(memberId), any()); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); - partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); ShareFetch shareFetch = new ShareFetch( - FETCH_PARAMS, - groupId, - Uuid.randomUuid().toString(), - new CompletableFuture<>(), - topicIdPartitions, - BATCH_SIZE, - 100, - brokerTopicStats); - - Timer mockTimer = systemTimerReaper(); + FETCH_PARAMS, + groupId, + Uuid.randomUuid().toString(), + new CompletableFuture<>(), + partitionMaxBytes, + 100); + DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); // Initially you cannot acquire records for both all 3 share partitions. - when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp2.maybeAcquireFetchLock()).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); - when(sp3.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp3.maybeAcquireFetchLock()).thenReturn(true); when(sp3.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); - topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + partitionMaxBytes.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) .build(); LinkedHashMap sharePartitions = new LinkedHashMap<>(); @@ -1901,9 +1822,9 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { assertEquals(2, delayedShareFetchPurgatory.watched()); Map> acknowledgeTopics = new HashMap<>(); - acknowledgeTopics.put(tp3, List.of( - new ShareAcknowledgementBatch(12, 20, List.of((byte) 1)), - new ShareAcknowledgementBatch(24, 56, List.of((byte) 1)) + acknowledgeTopics.put(tp3, Arrays.asList( + new ShareAcknowledgementBatch(12, 20, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(24, 56, Collections.singletonList((byte) 1)) )); // Acknowledgement request for sp3. @@ -1916,10 +1837,6 @@ public void testAcknowledgeDoesNotCompleteDelayedShareFetchRequest() { Mockito.verify(sp2, times(0)).nextFetchOffset(); assertTrue(delayedShareFetch.lock().tryLock()); delayedShareFetch.lock().unlock(); - // Should have 1 acknowledge recorded as other 2 topics acknowledgement request is not sent. - assertEquals(1, brokerTopicStats.allTopicsStats().totalShareAcknowledgementRequestRate().count()); - assertEquals(1, brokerTopicStats.numTopics()); - assertEquals(1, brokerTopicStats.topicStats(tp3.topic()).totalShareAcknowledgementRequestRate().count()); } @Test @@ -1931,11 +1848,12 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - List topicIdPartitions = List.of(tp1, tp2); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); - SharePartition sp3 = mock(SharePartition.class); ShareSessionCache cache = mock(ShareSessionCache.class); ShareSession shareSession = mock(ShareSession.class); @@ -1950,42 +1868,37 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { when(sp2.canAcquireRecords()).thenReturn(true); return CompletableFuture.completedFuture(Optional.empty()); }).when(sp2).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); - when(sp3.releaseAcquiredRecords(ArgumentMatchers.eq(memberId))).thenReturn(CompletableFuture.completedFuture(null)); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); - partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); ShareFetch shareFetch = new ShareFetch( - FETCH_PARAMS, - groupId, - Uuid.randomUuid().toString(), - new CompletableFuture<>(), - topicIdPartitions, - BATCH_SIZE, - 100, - brokerTopicStats); - - Timer mockTimer = systemTimerReaper(); + FETCH_PARAMS, + groupId, + Uuid.randomUuid().toString(), + new CompletableFuture<>(), + partitionMaxBytes, + 100); + DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(mockReplicaManager, tp1, 1); // Initially you cannot acquire records for both sp1 and sp2. - when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp2.maybeAcquireFetchLock()).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); - topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + partitionMaxBytes.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); - sharePartitionManager = spy(SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) + SharePartitionManager sharePartitionManager = spy(SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap) .withCache(cache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) @@ -1999,7 +1912,6 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { .withShareFetchData(shareFetch) .withReplicaManager(mockReplicaManager) .withSharePartitions(sharePartitions) - .withPartitionMaxBytesStrategy(PartitionMaxBytesStrategy.type(PartitionMaxBytesStrategy.StrategyType.UNIFORM)) .build(); delayedShareFetchPurgatory.tryCompleteElseWatch(delayedShareFetch, delayedShareFetchWatchKeys); @@ -2007,11 +1919,13 @@ public void testReleaseSessionCompletesDelayedShareFetchRequest() { // Since acquisition lock for sp1 and sp2 cannot be acquired, we should have 2 watched keys. assertEquals(2, delayedShareFetchPurgatory.watched()); + doAnswer(invocation -> buildLogReadResult(partitionMaxBytes.keySet())).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + + assertEquals(2, delayedShareFetchPurgatory.watched()); + // The share session for this share group member returns tp1 and tp3, tp1 is common in both the delayed fetch request and the share session. - when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, Uuid.fromString(memberId))).thenReturn(List.of(tp1, tp3)); + when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, Uuid.fromString(memberId))).thenReturn(Arrays.asList(tp1, tp3)); - doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(new ShareAcquiredRecords(EMPTY_ACQUIRED_RECORDS, 0)); // Release acquired records on session close request for tp1 and tp3. sharePartitionManager.releaseSession(groupId, memberId); @@ -2034,7 +1948,9 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0)); TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0)); - List topicIdPartitions = List.of(tp1, tp2); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp2, PARTITION_MAX_BYTES); SharePartition sp1 = mock(SharePartition.class); SharePartition sp2 = mock(SharePartition.class); @@ -2058,40 +1974,37 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { return CompletableFuture.completedFuture(Optional.empty()); }).when(sp3).releaseAcquiredRecords(ArgumentMatchers.eq(memberId)); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); - partitionCache.put(new SharePartitionKey(groupId, tp3), sp3); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); + partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCacheMap.put(new SharePartitionKey(groupId, tp3), sp3); ShareFetch shareFetch = new ShareFetch( - FETCH_PARAMS, - groupId, - Uuid.randomUuid().toString(), - new CompletableFuture<>(), - topicIdPartitions, - BATCH_SIZE, - 100, - brokerTopicStats); - - Timer mockTimer = systemTimerReaper(); + FETCH_PARAMS, + groupId, + Uuid.randomUuid().toString(), + new CompletableFuture<>(), + partitionMaxBytes, + 100); + DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); // Initially you cannot acquire records for both all 3 share partitions. - when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(false); - when(sp2.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp2.maybeAcquireFetchLock()).thenReturn(true); when(sp2.canAcquireRecords()).thenReturn(false); - when(sp3.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp3.maybeAcquireFetchLock()).thenReturn(true); when(sp3.canAcquireRecords()).thenReturn(false); List delayedShareFetchWatchKeys = new ArrayList<>(); - topicIdPartitions.forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); + partitionMaxBytes.keySet().forEach(topicIdPartition -> delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId(), topicIdPartition.partition()))); - sharePartitionManager = spy(SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) + SharePartitionManager sharePartitionManager = spy(SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap) .withCache(cache) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) @@ -2115,7 +2028,7 @@ public void testReleaseSessionDoesNotCompleteDelayedShareFetchRequest() { // The share session for this share group member returns tp1 and tp3. No topic partition is common in // both the delayed fetch request and the share session. - when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, Uuid.fromString(memberId))).thenReturn(List.of(tp3)); + when(sharePartitionManager.cachedTopicIdPartitionsInShareSession(groupId, Uuid.fromString(memberId))).thenReturn(Collections.singletonList(tp3)); // Release acquired records on session close for sp3. sharePartitionManager.releaseSession(groupId, memberId); @@ -2136,38 +2049,27 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - List topicIdPartitions = List.of(tp0); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Keep the initialization future pending, so fetch request is stuck. CompletableFuture pendingInitializationFuture = new CompletableFuture<>(); when(sp0.maybeInitialize()).thenReturn(pendingInitializationFuture); - when(sp0.loadStartTimeMs()).thenReturn(10L); - Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); - Time time = mock(Time.class); - when(time.hiResClockMs()).thenReturn(100L); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withReplicaManager(mockReplicaManager) - .withTime(time) - .withShareGroupMetrics(shareGroupMetrics) - .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap).withReplicaManager(mockReplicaManager).withTimer(mockTimer) .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); // Verify that the fetch request is completed. TestUtils.waitForCondition( future::isDone, @@ -2178,102 +2080,21 @@ public void testPendingInitializationShouldCompleteFetchRequest() throws Excepti Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); assertFalse(pendingInitializationFuture.isDone()); - assertEquals(0, shareGroupMetrics.partitionLoadTimeMs().count()); // Complete the pending initialization future. pendingInitializationFuture.complete(null); - // Verify the partition load time metrics. - assertEquals(1, shareGroupMetrics.partitionLoadTimeMs().count()); - assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().min()); - assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().max()); - assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().sum()); - // Should have 1 fetch recorded. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(1, 0, 0, 0), - Map.of(tp0.topic(), new TopicMetrics(1, 0, 0, 0)) - ); - shareGroupMetrics.close(); } @Test - public void testPartitionLoadTimeMetricWithMultiplePartitions() throws Exception { - String groupId = "grp"; - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - List topicIdPartitions = List.of(tp0, tp1); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); - - // Keep the initialization future pending, so fetch request is stuck. - CompletableFuture pendingInitializationFuture1 = new CompletableFuture<>(); - when(sp0.maybeInitialize()).thenReturn(pendingInitializationFuture1); - when(sp0.loadStartTimeMs()).thenReturn(10L); - - CompletableFuture pendingInitializationFuture2 = new CompletableFuture<>(); - when(sp1.maybeInitialize()).thenReturn(pendingInitializationFuture2); - when(sp1.loadStartTimeMs()).thenReturn(40L); - - Timer mockTimer = systemTimerReaper(); - DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( - "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); - mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); - - Time time = mock(Time.class); - when(time.hiResClockMs()).thenReturn(100L); - ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withReplicaManager(mockReplicaManager) - .withTime(time) - .withShareGroupMetrics(shareGroupMetrics) - .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) - .build(); - - CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - // Verify that the fetch request is completed. - TestUtils.waitForCondition( - future::isDone, - DELAYED_SHARE_FETCH_TIMEOUT_MS, - () -> "Processing in delayed share fetch queue never ended."); - assertFalse(pendingInitializationFuture1.isDone()); - assertFalse(pendingInitializationFuture2.isDone()); - assertEquals(0, shareGroupMetrics.partitionLoadTimeMs().count()); - // Complete the first pending initialization future. - pendingInitializationFuture1.complete(null); - // Verify the partition load time metrics for first partition. - assertEquals(1, shareGroupMetrics.partitionLoadTimeMs().count()); - assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().min()); - assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().max()); - assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().sum()); - // Complete the second pending initialization future. - pendingInitializationFuture2.complete(null); - // Verify the partition load time metrics for both partitions. - assertEquals(2, shareGroupMetrics.partitionLoadTimeMs().count()); - assertEquals(60.0, shareGroupMetrics.partitionLoadTimeMs().min()); - assertEquals(90.0, shareGroupMetrics.partitionLoadTimeMs().max()); - assertEquals(150.0, shareGroupMetrics.partitionLoadTimeMs().sum()); - shareGroupMetrics.close(); - } - - @Test - public void testDelayedInitializationShouldCompleteFetchRequest() { + public void testDelayedInitializationShouldCompleteFetchRequest() throws Exception { String groupId = "grp"; Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - List topicIdPartitions = List.of(tp0); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Keep the 2 initialization futures pending and 1 completed with leader not available exception. CompletableFuture pendingInitializationFuture1 = new CompletableFuture<>(); @@ -2283,31 +2104,24 @@ public void testDelayedInitializationShouldCompleteFetchRequest() { .thenReturn(pendingInitializationFuture2) .thenReturn(CompletableFuture.failedFuture(new LeaderNotAvailableException("Leader not available"))); - Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory shareFetchPurgatorySpy = spy(new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true)); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true)); mockReplicaManagerDelayedShareFetch(mockReplicaManager, shareFetchPurgatorySpy); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withReplicaManager(mockReplicaManager) - .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap).withReplicaManager(mockReplicaManager).withTimer(mockTimer) .build(); // Send 3 requests for share fetch for same share partition. CompletableFuture> future1 = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); CompletableFuture> future2 = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); CompletableFuture> future3 = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); Mockito.verify(sp0, times(3)).maybeInitialize(); Mockito.verify(mockReplicaManager, times(3)).addDelayedShareFetchRequest(any(), any()); @@ -2331,10 +2145,6 @@ public void testDelayedInitializationShouldCompleteFetchRequest() { // Verify that replica manager fetch is not called. Mockito.verify(mockReplicaManager, times(0)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - // Should have 3 fetch recorded. - assertEquals(3, brokerTopicStats.allTopicsStats().totalShareFetchRequestRate().count()); - assertEquals(1, brokerTopicStats.numTopics()); - assertEquals(3, brokerTopicStats.topicStats(tp0.topic()).totalShareFetchRequestRate().count()); } @Test @@ -2343,30 +2153,25 @@ public void testSharePartitionInitializationExceptions() throws Exception { Uuid memberId = Uuid.randomUuid(); Uuid fooId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(fooId, new TopicPartition("foo", 0)); - List topicIdPartitions = List.of(tp0); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); - Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withReplicaManager(mockReplicaManager) - .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap).withReplicaManager(mockReplicaManager).withTimer(mockTimer) .build(); // Return LeaderNotAvailableException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new LeaderNotAvailableException("Leader not available"))); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, @@ -2377,138 +2182,119 @@ public void testSharePartitionInitializationExceptions() throws Exception { assertTrue(future.join().isEmpty()); Mockito.verify(sp0, times(0)).markFenced(); // Verify that the share partition is still in the cache on LeaderNotAvailableException. - assertEquals(1, partitionCache.size()); + assertEquals(1, partitionCacheMap.size()); // Return IllegalStateException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new IllegalStateException("Illegal state"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Illegal state"); Mockito.verify(sp0, times(1)).markFenced(); - assertTrue(partitionCache.isEmpty()); + assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return CoordinatorNotAvailableException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new CoordinatorNotAvailableException("Coordinator not available"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.COORDINATOR_NOT_AVAILABLE, "Coordinator not available"); Mockito.verify(sp0, times(2)).markFenced(); - assertTrue(partitionCache.isEmpty()); + assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return InvalidRequestException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new InvalidRequestException("Invalid request"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.INVALID_REQUEST, "Invalid request"); Mockito.verify(sp0, times(3)).markFenced(); - assertTrue(partitionCache.isEmpty()); + assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return FencedStateEpochException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new FencedStateEpochException("Fenced state epoch"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced state epoch"); Mockito.verify(sp0, times(4)).markFenced(); - assertTrue(partitionCache.isEmpty()); + assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return NotLeaderOrFollowerException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new NotLeaderOrFollowerException("Not leader or follower"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Not leader or follower"); Mockito.verify(sp0, times(5)).markFenced(); - assertTrue(partitionCache.isEmpty()); + assertTrue(partitionCacheMap.isEmpty()); // The last exception removes the share partition from the cache hence re-add the share partition to cache. - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Return RuntimeException to simulate initialization failure. when(sp0.maybeInitialize()).thenReturn(FutureUtils.failedFuture(new RuntimeException("Runtime exception"))); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing in delayed share fetch queue never ended."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Runtime exception"); Mockito.verify(sp0, times(6)).markFenced(); - assertTrue(partitionCache.isEmpty()); - // Should have 7 fetch recorded and 6 failures as 1 fetch was waiting on initialization and - // didn't error out. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(7, 6, 0, 0), - Map.of(tp0.topic(), new TopicMetrics(7, 6, 0, 0)) - ); + assertTrue(partitionCacheMap.isEmpty()); } + @Test + @SuppressWarnings("unchecked") public void testShareFetchProcessingExceptions() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - List topicIdPartitions = List.of(tp0); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); - SharePartitionCache partitionCache = mock(SharePartitionCache.class); + Map partitionCacheMap = (Map) mock(Map.class); // Throw the exception for first fetch request. Return share partition for next. - when(partitionCache.computeIfAbsent(any(), any())) + when(partitionCacheMap.computeIfAbsent(any(), any())) .thenThrow(new RuntimeException("Error creating instance")); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .withBrokerTopicStats(brokerTopicStats) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap) .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing for delayed share fetch request not finished."); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Error creating instance"); - // Should have 1 fetch recorded and 1 failure. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(1, 1, 0, 0), - Map.of(tp0.topic(), new TopicMetrics(1, 1, 0, 0)) - ); } @Test public void testSharePartitionInitializationFailure() throws Exception { String groupId = "grp"; TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - List topicIdPartitions = List.of(tp0); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); // Send map to check no share partition is created. - SharePartitionCache partitionCache = new SharePartitionCache(); + Map partitionCacheMap = new HashMap<>(); // Validate when partition is not the leader. Partition partition = mock(Partition.class); when(partition.isLeader()).thenReturn(false); @@ -2516,41 +2302,32 @@ public void testSharePartitionInitializationFailure() throws Exception { ReplicaManager replicaManager = mock(ReplicaManager.class); // First check should throw KafkaStorageException, second check should return partition which // is not leader. - when(replicaManager.getPartitionOrException(any(TopicPartition.class))) + when(replicaManager.getPartitionOrException(any())) .thenThrow(new KafkaStorageException("Exception")) .thenReturn(partition); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(replicaManager) - .withPartitionCache(partitionCache) - .withBrokerTopicStats(brokerTopicStats) + .withPartitionCacheMap(partitionCacheMap) .build(); // Validate when exception is thrown. CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing for delayed share fetch request not finished."); validateShareFetchFutureException(future, tp0, Errors.KAFKA_STORAGE_ERROR, "Exception"); - assertTrue(partitionCache.isEmpty()); + assertTrue(partitionCacheMap.isEmpty()); // Validate when partition is not leader. - future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, partitionMaxBytes); TestUtils.waitForCondition( future::isDone, DELAYED_SHARE_FETCH_TIMEOUT_MS, () -> "Processing for delayed share fetch request not finished."); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER); - assertTrue(partitionCache.isEmpty()); - // Should have 2 fetch recorded and 2 failure. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(2, 2, 0, 0), - Map.of(tp0.topic(), new TopicMetrics(2, 2, 0, 0)) - ); + assertTrue(partitionCacheMap.isEmpty()); } @Test @@ -2563,51 +2340,50 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { TopicIdPartition tp1 = new TopicIdPartition(memberId1, new TopicPartition("foo", 1)); // For tp2, share partition initialization will fail. TopicIdPartition tp2 = new TopicIdPartition(memberId1, new TopicPartition("foo", 2)); - List topicIdPartitions = List.of(tp0, tp1, tp2); + Map partitionMaxBytes = Map.of( + tp0, PARTITION_MAX_BYTES, + tp1, PARTITION_MAX_BYTES, + tp2, PARTITION_MAX_BYTES); // Mark partition0 as not the leader. Partition partition0 = mock(Partition.class); when(partition0.isLeader()).thenReturn(false); ReplicaManager replicaManager = mock(ReplicaManager.class); - when(replicaManager.getPartitionOrException(any(TopicPartition.class))) + when(replicaManager.getPartitionOrException(any())) .thenReturn(partition0); // Mock share partition for tp1, so it can succeed. SharePartition sp1 = mock(SharePartition.class); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - when(sp1.acquire(anyString(), anyInt(), anyInt(), anyLong(), any(), any())).thenReturn(new ShareAcquiredRecords(EMPTY_ACQUIRED_RECORDS, 0)); + when(sp1.acquire(anyString(), anyInt(), any())).thenReturn(new ShareAcquiredRecords(Collections.emptyList(), 0)); // Fail initialization for tp2. SharePartition sp2 = mock(SharePartition.class); - partitionCache.put(new SharePartitionKey(groupId, tp2), sp2); + partitionCacheMap.put(new SharePartitionKey(groupId, tp2), sp2); when(sp2.maybeInitialize()).thenReturn(CompletableFuture.failedFuture(new FencedStateEpochException("Fenced state epoch"))); - Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, replicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(replicaManager, delayedShareFetchPurgatory); when(sp1.fetchOffsetMetadata(anyLong())).thenReturn(Optional.of(new LogOffsetMetadata(0, 1, 0))); mockTopicIdPartitionToReturnDataEqualToMinBytes(replicaManager, tp1, 1); - doAnswer(invocation -> buildLogReadResult(List.of(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); + doAnswer(invocation -> buildLogReadResult(Collections.singleton(tp1))).when(replicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(replicaManager) - .withPartitionCache(partitionCache) - .withBrokerTopicStats(brokerTopicStats) - .withTimer(mockTimer) + .withPartitionCacheMap(partitionCacheMap) .build(); // Validate when exception is thrown. CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, Uuid.randomUuid().toString(), FETCH_PARAMS, partitionMaxBytes); assertTrue(future.isDone()); assertFalse(future.isCompletedExceptionally()); @@ -2621,17 +2397,9 @@ public void testSharePartitionPartialInitializationFailure() throws Exception { assertEquals(Errors.FENCED_STATE_EPOCH.code(), partitionDataMap.get(tp2).errorCode()); assertEquals("Fenced state epoch", partitionDataMap.get(tp2).errorMessage()); - Mockito.verify(replicaManager, times(1)).completeDelayedShareFetchRequest( - new DelayedShareFetchGroupKey(groupId, tp2)); + Mockito.verify(replicaManager, times(0)).completeDelayedShareFetchRequest(any()); Mockito.verify(replicaManager, times(1)).readFromLog( any(), any(), any(ReplicaQuota.class), anyBoolean()); - // Should have 1 fetch recorded and 1 failure as single topic has multiple partition fetch - // and failure. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(1, 1, 0, 0), - Map.of(tp0.topic(), new TopicMetrics(1, 1, 0, 0)) - ); } @Test @@ -2639,50 +2407,40 @@ public void testReplicaManagerFetchException() { String groupId = "grp"; Uuid memberId = Uuid.randomUuid(); TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - List topicIdPartitions = List.of(tp0); + Map partitionMaxBytes = Collections.singletonMap(tp0, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); - Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); doThrow(new RuntimeException("Exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); validateShareFetchFutureException(future, tp0, Errors.UNKNOWN_SERVER_ERROR, "Exception"); // Verify that the share partition is still in the cache on exception. - assertEquals(1, partitionCache.size()); + assertEquals(1, partitionCacheMap.size()); // Throw NotLeaderOrFollowerException from replica manager fetch which should evict instance from the cache. doThrow(new NotLeaderOrFollowerException("Leader exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); validateShareFetchFutureException(future, tp0, Errors.NOT_LEADER_OR_FOLLOWER, "Leader exception"); - assertTrue(partitionCache.isEmpty()); - // Should have 2 fetch recorded and 2 failures. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(2, 2, 0, 0), - Map.of(tp0.topic(), new TopicMetrics(2, 2, 0, 0)) - ); + assertTrue(partitionCacheMap.isEmpty()); } @Test @@ -2692,67 +2450,57 @@ public void testReplicaManagerFetchMultipleSharePartitionsException() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - List topicIdPartitions = List.of(tp0, tp1); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); SharePartition sp0 = mock(SharePartition.class); - when(sp0.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp0.maybeAcquireFetchLock()).thenReturn(true); when(sp0.canAcquireRecords()).thenReturn(true); when(sp0.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); SharePartition sp1 = mock(SharePartition.class); // Do not make the share partition acquirable hence it shouldn't be removed from the cache, - // as it won't be part of replica manager readFromLog request. - when(sp1.maybeAcquireFetchLock(any())).thenReturn(false); + // as it won't be part of replica manger readFromLog request. + when(sp1.maybeAcquireFetchLock()).thenReturn(false); when(sp1.maybeInitialize()).thenReturn(CompletableFuture.completedFuture(null)); - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); - partitionCache.put(new SharePartitionKey(groupId, tp1), sp1); + Map partitionCacheMap = new HashMap<>(); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCacheMap.put(new SharePartitionKey(groupId, tp1), sp1); - Timer mockTimer = systemTimerReaper(); DelayedOperationPurgatory delayedShareFetchPurgatory = new DelayedOperationPurgatory<>( "TestShareFetch", mockTimer, mockReplicaManager.localBrokerId(), - DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, false, true); + DELAYED_SHARE_FETCH_PURGATORY_PURGE_INTERVAL, true, true); mockReplicaManagerDelayedShareFetch(mockReplicaManager, delayedShareFetchPurgatory); // Throw FencedStateEpochException from replica manager fetch which should evict instance from the cache. doThrow(new FencedStateEpochException("Fenced exception")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() + .withPartitionCacheMap(partitionCacheMap) .withReplicaManager(mockReplicaManager) .withTimer(mockTimer) - .withBrokerTopicStats(brokerTopicStats) .build(); CompletableFuture> future = - sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); validateShareFetchFutureException(future, tp0, Errors.FENCED_STATE_EPOCH, "Fenced exception"); // Verify that tp1 is still in the cache on exception. - assertEquals(1, partitionCache.size()); - assertEquals(sp1, partitionCache.get(new SharePartitionKey(groupId, tp1))); + assertEquals(1, partitionCacheMap.size()); + assertEquals(sp1, partitionCacheMap.get(new SharePartitionKey(groupId, tp1))); // Make sp1 acquirable and add sp0 back in partition cache. Both share partitions should be // removed from the cache. - when(sp1.maybeAcquireFetchLock(any())).thenReturn(true); + when(sp1.maybeAcquireFetchLock()).thenReturn(true); when(sp1.canAcquireRecords()).thenReturn(true); - partitionCache.put(new SharePartitionKey(groupId, tp0), sp0); + partitionCacheMap.put(new SharePartitionKey(groupId, tp0), sp0); // Throw FencedStateEpochException from replica manager fetch which should evict instance from the cache. doThrow(new FencedStateEpochException("Fenced exception again")).when(mockReplicaManager).readFromLog(any(), any(), any(ReplicaQuota.class), anyBoolean()); - future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); + future = sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); validateShareFetchFutureException(future, List.of(tp0, tp1), Errors.FENCED_STATE_EPOCH, "Fenced exception again"); - assertTrue(partitionCache.isEmpty()); - // Should have 4 fetch recorded (2 fetch and 2 topics) and 3 failures as sp1 was not acquired - // in first fetch and shall have empty response. Similarly, tp0 should record 2 failures and - // tp1 should record 1 failure. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(4, 3, 0, 0), - Map.of(tp0.topic(), new TopicMetrics(2, 2, 0, 0), tp1.topic(), new TopicMetrics(2, 1, 0, 0)) - ); + assertTrue(partitionCacheMap.isEmpty()); } @Test @@ -2762,309 +2510,60 @@ public void testListenerRegistration() { TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - List topicIdPartitions = List.of(tp0, tp1); + Map partitionMaxBytes = new HashMap<>(); + partitionMaxBytes.put(tp0, PARTITION_MAX_BYTES); + partitionMaxBytes.put(tp1, PARTITION_MAX_BYTES); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); Partition partition = mockPartition(); - when(mockReplicaManager.getPartitionOrException((TopicPartition) Mockito.any())).thenReturn(partition); + when(mockReplicaManager.getPartitionOrException(Mockito.any())).thenReturn(partition); - sharePartitionManager = SharePartitionManagerBuilder.builder() + SharePartitionManager sharePartitionManager = SharePartitionManagerBuilder.builder() .withReplicaManager(mockReplicaManager) - .withBrokerTopicStats(brokerTopicStats) + .withTimer(mockTimer) .build(); - CompletableFuture> future = sharePartitionManager.fetchMessages( - groupId, memberId.toString(), FETCH_PARAMS, 0, MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - assertTrue(future.isDone()); + sharePartitionManager.fetchMessages(groupId, memberId.toString(), FETCH_PARAMS, partitionMaxBytes); // Validate that the listener is registered. verify(mockReplicaManager, times(2)).maybeAddListener(any(), any()); - // The share partition initialization should error out as further mocks are not provided, the - // metrics should mark fetch as failed. - validateBrokerTopicStatsMetrics( - brokerTopicStats, - new TopicMetrics(2, 2, 0, 0), - Map.of(tp0.topic(), new TopicMetrics(1, 1, 0, 0), tp1.topic(), new TopicMetrics(1, 1, 0, 0)) - ); } @Test public void testSharePartitionListenerOnFailed() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); - SharePartitionCache partitionCache = new SharePartitionCache(); + Map partitionCacheMap = new HashMap<>(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); - SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); - testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onFailed); + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); + testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onFailed); } @Test public void testSharePartitionListenerOnDeleted() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); - SharePartitionCache partitionCache = new SharePartitionCache(); + Map partitionCacheMap = new HashMap<>(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); - SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); - testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onDeleted); + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); + testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onDeleted); } @Test public void testSharePartitionListenerOnBecomingFollower() { SharePartitionKey sharePartitionKey = new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))); - SharePartitionCache partitionCache = new SharePartitionCache(); + Map partitionCacheMap = new HashMap<>(); ReplicaManager mockReplicaManager = mock(ReplicaManager.class); - SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCache); - testSharePartitionListener(sharePartitionKey, partitionCache, mockReplicaManager, partitionListener::onBecomingFollower); - } - - @Test - public void testFetchMessagesRotatePartitions() { - String groupId = "grp"; - Uuid memberId1 = Uuid.randomUuid(); - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - TopicIdPartition tp2 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 0)); - TopicIdPartition tp3 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 1)); - TopicIdPartition tp4 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 2)); - TopicIdPartition tp5 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("bar", 2)); - TopicIdPartition tp6 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 3)); - List topicIdPartitions = List.of(tp0, tp1, tp2, tp3, tp4, tp5, tp6); - - sharePartitionManager = Mockito.spy(SharePartitionManagerBuilder.builder().withBrokerTopicStats(brokerTopicStats).build()); - // Capture the arguments passed to processShareFetch. - ArgumentCaptor captor = ArgumentCaptor.forClass(ShareFetch.class); - - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 0, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - verify(sharePartitionManager, times(1)).processShareFetch(captor.capture()); - // Verify the partitions rotation, no rotation. - ShareFetch resultShareFetch = captor.getValue(); - validateRotatedListEquals(resultShareFetch.topicIdPartitions(), topicIdPartitions, 0); - - // Single rotation. - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 1, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - verify(sharePartitionManager, times(2)).processShareFetch(captor.capture()); - // Verify the partitions rotation, rotate by 1. - resultShareFetch = captor.getValue(); - validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); - - // Rotation by 3, less that the number of partitions. - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 3, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - verify(sharePartitionManager, times(3)).processShareFetch(captor.capture()); - // Verify the partitions rotation, rotate by 3. - resultShareFetch = captor.getValue(); - validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 3); - - // Rotation by 12, more than the number of partitions. - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, 12, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - verify(sharePartitionManager, times(4)).processShareFetch(captor.capture()); - // Verify the partitions rotation, rotate by 5 (12 % 7). - resultShareFetch = captor.getValue(); - validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 5); - // Rotation by Integer.MAX_VALUE, boundary test. - sharePartitionManager.fetchMessages(groupId, memberId1.toString(), FETCH_PARAMS, Integer.MAX_VALUE, - MAX_FETCH_RECORDS, BATCH_SIZE, topicIdPartitions); - verify(sharePartitionManager, times(5)).processShareFetch(captor.capture()); - // Verify the partitions rotation, rotate by 1 (2147483647 % 7). - resultShareFetch = captor.getValue(); - validateRotatedListEquals(topicIdPartitions, resultShareFetch.topicIdPartitions(), 1); - } - - @Test - public void testCreateIdleShareFetchTask() throws Exception { - ReplicaManager replicaManager = mock(ReplicaManager.class); - - MockTimer mockTimer = new MockTimer(time); - long maxWaitMs = 1000L; - - // Set up the mock to capture and add the timer task - Mockito.doAnswer(invocation -> { - TimerTask timerTask = invocation.getArgument(0); - mockTimer.add(timerTask); - return null; - }).when(replicaManager).addShareFetchTimerRequest(Mockito.any(TimerTask.class)); - - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withReplicaManager(replicaManager) - .withTime(time) - .withTimer(mockTimer) - .build(); - - CompletableFuture future = sharePartitionManager.createIdleShareFetchTimerTask(maxWaitMs); - // Future should not be completed immediately - assertFalse(future.isDone()); - - mockTimer.advanceClock(maxWaitMs / 2); - assertFalse(future.isDone()); - - mockTimer.advanceClock((maxWaitMs / 2) + 1); - // Verify the future is completed after the wait time - assertTrue(future.isDone()); - assertFalse(future.isCompletedExceptionally()); - } - - @Test - public void testOnShareVersionToggle() { - String groupId = "grp"; - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - SharePartition sp2 = mock(SharePartition.class); - SharePartition sp3 = mock(SharePartition.class); - - // Mock the share partitions corresponding to the topic partitions. - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put( - new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo1", 0))), sp0 - ); - partitionCache.put( - new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo2", 0))), sp1 - ); - partitionCache.put( - new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo3", 0))), sp2 - ); - partitionCache.put( - new SharePartitionKey(groupId, new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo4", 0))), sp3 - ); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .build(); - assertEquals(4, partitionCache.size()); - sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, false); - // Because we are toggling to a share version which does not support share groups, the cache inside share partitions must be cleared. - assertEquals(0, partitionCache.size()); - //Check if all share partitions have been fenced. - Mockito.verify(sp0).markFenced(); - Mockito.verify(sp1).markFenced(); - Mockito.verify(sp2).markFenced(); - Mockito.verify(sp3).markFenced(); - } - - @Test - public void testOnShareVersionToggleWhenEnabledFromConfig() { - SharePartition sp0 = mock(SharePartition.class); - // Mock the share partitions corresponding to the topic partitions. - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.put( - new SharePartitionKey("grp", new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0))), sp0 - ); - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withPartitionCache(partitionCache) - .build(); - assertEquals(1, partitionCache.size()); - sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, true); - // Though share version is toggled to off, but it's enabled from config, hence the cache should not be cleared. - assertEquals(1, partitionCache.size()); - Mockito.verify(sp0, times(0)).markFenced(); - } - - @Test - public void testShareGroupListener() { - String groupId = "grp"; - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - TopicIdPartition tp1 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 1)); - Uuid memberId1 = Uuid.randomUuid(); - Uuid memberId2 = Uuid.randomUuid(); - - SharePartition sp0 = mock(SharePartition.class); - SharePartition sp1 = mock(SharePartition.class); - - ShareSessionCache cache = new ShareSessionCache(10); - cache.maybeCreateSession(groupId, memberId1, new ImplicitLinkedHashCollection<>(), CONNECTION_ID); - cache.maybeCreateSession(groupId, memberId2, new ImplicitLinkedHashCollection<>(), "id-2"); - - SharePartitionCache partitionCache = new SharePartitionCache(); - partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp0), k -> sp0); - partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp1), k -> sp1); - - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .withPartitionCache(partitionCache) - .withReplicaManager(mockReplicaManager) - .build(); - - assertEquals(2, cache.size()); - assertEquals(2, partitionCache.size()); - - // Invoke listeners by simulating connection disconnect for memberId1. - cache.connectionDisconnectListener().onDisconnect(CONNECTION_ID); - // Session cache should remove the memberId1. - assertEquals(1, cache.size()); - // Partition cache should not remove the share partitions as the group is not empty. - assertEquals(2, partitionCache.size()); - assertNotNull(cache.get(new ShareSessionKey(groupId, memberId2))); - - // Invoke listeners by simulating connection disconnect for memberId2. - cache.connectionDisconnectListener().onDisconnect("id-2"); - // Session cache should remove the memberId2. - assertEquals(0, cache.size()); - // Partition cache should remove the share partitions as the group is empty. - assertEquals(0, partitionCache.size()); - - Mockito.verify(sp0, times(1)).markFenced(); - Mockito.verify(sp1, times(1)).markFenced(); - Mockito.verify(mockReplicaManager, times(2)).removeListener(any(), any()); - } - - @Test - public void testShareGroupListenerWithEmptyCache() { - String groupId = "grp"; - TopicIdPartition tp0 = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)); - Uuid memberId1 = Uuid.randomUuid(); - - SharePartition sp0 = mock(SharePartition.class); - - ShareSessionCache cache = new ShareSessionCache(10); - cache.maybeCreateSession(groupId, memberId1, new ImplicitLinkedHashCollection<>(), CONNECTION_ID); - - SharePartitionCache partitionCache = spy(new SharePartitionCache()); - partitionCache.computeIfAbsent(new SharePartitionKey(groupId, tp0), k -> sp0); - - sharePartitionManager = SharePartitionManagerBuilder.builder() - .withCache(cache) - .withPartitionCache(partitionCache) - .withReplicaManager(mockReplicaManager) - .build(); - - assertEquals(1, cache.size()); - assertEquals(1, partitionCache.size()); - - // Clean up share session and partition cache. - sharePartitionManager.onShareVersionToggle(ShareVersion.SV_0, false); - assertEquals(0, cache.size()); - assertEquals(0, partitionCache.size()); - - Mockito.verify(sp0, times(1)).markFenced(); - Mockito.verify(mockReplicaManager, times(1)).removeListener(any(), any()); - Mockito.verify(partitionCache, times(0)).topicIdPartitionsForGroup(groupId); - - // Invoke listeners by simulating connection disconnect for member. As the group is empty, - // hence onGroupEmpty method should be invoked and should complete without any exception. - cache.connectionDisconnectListener().onDisconnect(CONNECTION_ID); - // Verify that the listener is called for the group. - Mockito.verify(partitionCache, times(1)).topicIdPartitionsForGroup(groupId); - } - - private Timer systemTimerReaper() { - return new SystemTimerReaper( - TIMER_NAME_PREFIX + "-test-reaper", - new SystemTimer(TIMER_NAME_PREFIX + "-test-timer")); - } - - private void assertNoReaperThreadsPendingClose() throws InterruptedException { - TestUtils.waitForCondition( - () -> Thread.getAllStackTraces().keySet().stream().noneMatch(t -> t.getName().contains(TIMER_NAME_PREFIX)), - "Found unexpected reaper threads with name containing: " + TIMER_NAME_PREFIX); + SharePartitionListener partitionListener = new SharePartitionListener(sharePartitionKey, mockReplicaManager, partitionCacheMap); + testSharePartitionListener(sharePartitionKey, partitionCacheMap, mockReplicaManager, partitionListener::onBecomingFollower); } private void testSharePartitionListener( SharePartitionKey sharePartitionKey, - SharePartitionCache partitionCache, + Map partitionCacheMap, ReplicaManager mockReplicaManager, Consumer listenerConsumer ) { @@ -3075,22 +2574,22 @@ private void testSharePartitionListener( SharePartition sp0 = mock(SharePartition.class); SharePartition sp1 = mock(SharePartition.class); - partitionCache.put(sharePartitionKey, sp0); - partitionCache.put(spk, sp1); + partitionCacheMap.put(sharePartitionKey, sp0); + partitionCacheMap.put(spk, sp1); // Invoke listener for first share partition. listenerConsumer.accept(sharePartitionKey.topicIdPartition().topicPartition()); // Validate that the share partition is removed from the cache. - assertEquals(1, partitionCache.size()); - assertFalse(partitionCache.containsKey(sharePartitionKey)); + assertEquals(1, partitionCacheMap.size()); + assertFalse(partitionCacheMap.containsKey(sharePartitionKey)); verify(sp0, times(1)).markFenced(); verify(mockReplicaManager, times(1)).removeListener(any(), any()); // Invoke listener for non-matching share partition. listenerConsumer.accept(tp); // The non-matching share partition should not be removed as the listener is attached to a different topic partition. - assertEquals(1, partitionCache.size()); + assertEquals(1, partitionCacheMap.size()); verify(sp1, times(0)).markFenced(); // Verify the remove listener is not called for the second share partition. verify(mockReplicaManager, times(1)).removeListener(any(), any()); @@ -3109,14 +2608,14 @@ private void mockUpdateAndGenerateResponseData(ShareFetchContext context, String if (context.getClass() == ShareSessionContext.class) { ShareSessionContext shareSessionContext = (ShareSessionContext) context; if (!shareSessionContext.isSubsequent()) { - shareSessionContext.shareFetchData().forEach(topicIdPartition -> data.put(topicIdPartition, + shareSessionContext.shareFetchData().forEach((topicIdPartition, sharePartitionData) -> data.put(topicIdPartition, topicIdPartition.topic() == null ? errorShareFetchResponse(Errors.UNKNOWN_TOPIC_ID.code()) : noErrorShareFetchResponse())); } else { synchronized (shareSessionContext.session()) { shareSessionContext.session().partitionMap().forEach(cachedSharePartition -> { - TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), - new TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); + TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new + TopicPartition(cachedSharePartition.topic(), cachedSharePartition.partition())); data.put(topicIdPartition, topicIdPartition.topic() == null ? errorShareFetchResponse(Errors.UNKNOWN_TOPIC_ID.code()) : noErrorShareFetchResponse()); }); @@ -3129,7 +2628,8 @@ private void mockUpdateAndGenerateResponseData(ShareFetchContext context, String private void assertPartitionsPresent(ShareSessionContext context, List partitions) { Set partitionsInContext = new HashSet<>(); if (!context.isSubsequent()) { - partitionsInContext.addAll(context.shareFetchData()); + context.shareFetchData().forEach((topicIdPartition, sharePartitionData) -> + partitionsInContext.add(topicIdPartition)); } else { context.session().partitionMap().forEach(cachedSharePartition -> { TopicIdPartition topicIdPartition = new TopicIdPartition(cachedSharePartition.topicId(), new @@ -3147,9 +2647,11 @@ private void assertErroneousAndValidTopicIdPartitions( Set expectedErroneousSet = new HashSet<>(expectedErroneous); Set expectedValidSet = new HashSet<>(expectedValid); Set actualErroneousPartitions = new HashSet<>(); + Set actualValidPartitions = new HashSet<>(); erroneousAndValidPartitionData.erroneous().forEach((topicIdPartition, partitionData) -> actualErroneousPartitions.add(topicIdPartition)); - Set actualValidPartitions = new HashSet<>(erroneousAndValidPartitionData.validTopicIdPartitions()); + erroneousAndValidPartitionData.validTopicIdPartitions().forEach((topicIdPartition, partitionData) -> + actualValidPartitions.add(topicIdPartition)); assertEquals(expectedErroneousSet, actualErroneousPartitions); assertEquals(expectedValidSet, actualValidPartitions); } @@ -3164,12 +2666,12 @@ private Partition mockPartition() { private void validateShareFetchFutureException(CompletableFuture> future, TopicIdPartition topicIdPartition, Errors error) { - validateShareFetchFutureException(future, List.of(topicIdPartition), error, null); + validateShareFetchFutureException(future, Collections.singletonList(topicIdPartition), error, null); } private void validateShareFetchFutureException(CompletableFuture> future, TopicIdPartition topicIdPartition, Errors error, String message) { - validateShareFetchFutureException(future, List.of(topicIdPartition), error, message); + validateShareFetchFutureException(future, Collections.singletonList(topicIdPartition), error, message); } private void validateShareFetchFutureException(CompletableFuture> future, @@ -3191,47 +2693,23 @@ private void mockFetchOffsetForTimestamp(ReplicaManager replicaManager) { when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); } - private void validateBrokerTopicStatsMetrics( - BrokerTopicStats brokerTopicStats, - TopicMetrics expectedAllTopicMetrics, - Map expectedTopicMetrics - ) { - if (expectedAllTopicMetrics != null) { - assertEquals(expectedAllTopicMetrics.totalShareFetchRequestCount, brokerTopicStats.allTopicsStats().totalShareFetchRequestRate().count()); - assertEquals(expectedAllTopicMetrics.failedShareFetchRequestCount, brokerTopicStats.allTopicsStats().failedShareFetchRequestRate().count()); - assertEquals(expectedAllTopicMetrics.totalShareAcknowledgementRequestCount, brokerTopicStats.allTopicsStats().totalShareAcknowledgementRequestRate().count()); - assertEquals(expectedAllTopicMetrics.failedShareAcknowledgementRequestCount, brokerTopicStats.allTopicsStats().failedShareAcknowledgementRequestRate().count()); - } - // Validate tracked topic metrics. - assertEquals(expectedTopicMetrics.size(), brokerTopicStats.numTopics()); - expectedTopicMetrics.forEach((topic, metrics) -> { - BrokerTopicMetrics topicMetrics = brokerTopicStats.topicStats(topic); - assertEquals(metrics.totalShareFetchRequestCount, topicMetrics.totalShareFetchRequestRate().count()); - assertEquals(metrics.failedShareFetchRequestCount, topicMetrics.failedShareFetchRequestRate().count()); - assertEquals(metrics.totalShareAcknowledgementRequestCount, topicMetrics.totalShareAcknowledgementRequestRate().count()); - assertEquals(metrics.failedShareAcknowledgementRequestCount, topicMetrics.failedShareAcknowledgementRequestRate().count()); - }); - } - - static Seq> buildLogReadResult(List topicIdPartitions) { + static Seq> buildLogReadResult(Set topicIdPartitions) { List> logReadResults = new ArrayList<>(); topicIdPartitions.forEach(topicIdPartition -> logReadResults.add(new Tuple2<>(topicIdPartition, new LogReadResult( - new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), MemoryRecords.withRecords( - Compression.NONE, new SimpleRecord("test-key".getBytes(), "test-value".getBytes()))), - Optional.empty(), + new FetchDataInfo(new LogOffsetMetadata(0, 0, 0), MemoryRecords.EMPTY), + Option.empty(), -1L, -1L, -1L, -1L, -1L, - OptionalLong.empty(), - OptionalInt.empty(), - Optional.empty() + Option.empty(), + Option.empty(), + Option.empty() )))); return CollectionConverters.asScala(logReadResults).toSeq(); } - @SuppressWarnings("unchecked") static void mockReplicaManagerDelayedShareFetch(ReplicaManager replicaManager, DelayedOperationPurgatory delayedShareFetchPurgatory) { doAnswer(invocationOnMock -> { @@ -3250,22 +2728,14 @@ static void mockReplicaManagerDelayedShareFetch(ReplicaManager replicaManager, }).when(replicaManager).addDelayedShareFetchRequest(any(), any()); } - private record TopicMetrics( - long totalShareFetchRequestCount, - long failedShareFetchRequestCount, - long totalShareAcknowledgementRequestCount, - long failedShareAcknowledgementRequestCount - ) { } - static class SharePartitionManagerBuilder { - private final Persister persister = new NoOpStatePersister(); private ReplicaManager replicaManager = mock(ReplicaManager.class); private Time time = new MockTime(); - private ShareSessionCache cache = new ShareSessionCache(10); - private SharePartitionCache partitionCache = new SharePartitionCache(); + private ShareSessionCache cache = new ShareSessionCache(10, 1000); + private Map partitionCacheMap = new HashMap<>(); + private Persister persister = new NoOpShareStatePersister(); private Timer timer = new MockTimer(); - private ShareGroupMetrics shareGroupMetrics = new ShareGroupMetrics(time); - private BrokerTopicStats brokerTopicStats; + private Metrics metrics = new Metrics(); private SharePartitionManagerBuilder withReplicaManager(ReplicaManager replicaManager) { this.replicaManager = replicaManager; @@ -3282,23 +2752,23 @@ private SharePartitionManagerBuilder withCache(ShareSessionCache cache) { return this; } - SharePartitionManagerBuilder withPartitionCache(SharePartitionCache partitionCache) { - this.partitionCache = partitionCache; + SharePartitionManagerBuilder withPartitionCacheMap(Map partitionCacheMap) { + this.partitionCacheMap = partitionCacheMap; return this; } - private SharePartitionManagerBuilder withTimer(Timer timer) { - this.timer = timer; + private SharePartitionManagerBuilder withShareGroupPersister(Persister persister) { + this.persister = persister; return this; } - private SharePartitionManagerBuilder withShareGroupMetrics(ShareGroupMetrics shareGroupMetrics) { - this.shareGroupMetrics = shareGroupMetrics; + private SharePartitionManagerBuilder withTimer(Timer timer) { + this.timer = timer; return this; } - private SharePartitionManagerBuilder withBrokerTopicStats(BrokerTopicStats brokerTopicStats) { - this.brokerTopicStats = brokerTopicStats; + private SharePartitionManagerBuilder withMetrics(Metrics metrics) { + this.metrics = metrics; return this; } @@ -3308,19 +2778,17 @@ public static SharePartitionManagerBuilder builder() { public SharePartitionManager build() { return new SharePartitionManager(replicaManager, - time, - cache, - partitionCache, - DEFAULT_RECORD_LOCK_DURATION_MS, - timer, - MAX_DELIVERY_COUNT, - MAX_IN_FLIGHT_MESSAGES, - REMOTE_FETCH_MAX_WAIT_MS, - persister, - mock(GroupConfigManager.class), - shareGroupMetrics, - brokerTopicStats - ); + time, + cache, + partitionCacheMap, + DEFAULT_RECORD_LOCK_DURATION_MS, + timer, + MAX_DELIVERY_COUNT, + MAX_IN_FLIGHT_MESSAGES, + MAX_FETCH_RECORDS, + persister, + mock(GroupConfigManager.class), + metrics); } } } diff --git a/core/src/test/java/kafka/server/share/SharePartitionTest.java b/core/src/test/java/kafka/server/share/SharePartitionTest.java index 25432b4ae15e7..de012a3ed08b5 100644 --- a/core/src/test/java/kafka/server/share/SharePartitionTest.java +++ b/core/src/test/java/kafka/server/share/SharePartitionTest.java @@ -17,35 +17,28 @@ package kafka.server.share; import kafka.server.ReplicaManager; -import kafka.server.share.SharePartition.GapWindow; +import kafka.server.share.SharePartition.InFlightState; +import kafka.server.share.SharePartition.RecordState; import kafka.server.share.SharePartition.SharePartitionState; import kafka.server.share.SharePartitionManager.SharePartitionListener; -import org.apache.kafka.clients.consumer.AcknowledgeType; import org.apache.kafka.common.TopicIdPartition; import org.apache.kafka.common.TopicPartition; import org.apache.kafka.common.Uuid; import org.apache.kafka.common.compress.Compression; import org.apache.kafka.common.errors.CoordinatorNotAvailableException; +import org.apache.kafka.common.errors.FencedStateEpochException; import org.apache.kafka.common.errors.GroupIdNotFoundException; import org.apache.kafka.common.errors.InvalidRecordStateException; import org.apache.kafka.common.errors.InvalidRequestException; -import org.apache.kafka.common.errors.LeaderNotAvailableException; import org.apache.kafka.common.errors.NotLeaderOrFollowerException; import org.apache.kafka.common.errors.UnknownServerException; import org.apache.kafka.common.errors.UnknownTopicOrPartitionException; -import org.apache.kafka.common.message.FetchResponseData; import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords; import org.apache.kafka.common.protocol.Errors; -import org.apache.kafka.common.record.ControlRecordType; -import org.apache.kafka.common.record.DefaultRecord; -import org.apache.kafka.common.record.EndTransactionMarker; import org.apache.kafka.common.record.FileRecords; import org.apache.kafka.common.record.MemoryRecords; import org.apache.kafka.common.record.MemoryRecordsBuilder; -import org.apache.kafka.common.record.RecordBatch; -import org.apache.kafka.common.record.Records; -import org.apache.kafka.common.record.SimpleRecord; import org.apache.kafka.common.record.TimestampType; import org.apache.kafka.common.requests.ListOffsetsRequest; import org.apache.kafka.common.utils.MockTime; @@ -54,25 +47,19 @@ import org.apache.kafka.coordinator.group.GroupConfigManager; import org.apache.kafka.coordinator.group.ShareGroupAutoOffsetResetStrategy; import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch; -import org.apache.kafka.server.share.fetch.AcquisitionLockTimerTask; -import org.apache.kafka.server.share.fetch.DelayedShareFetchGroupKey; -import org.apache.kafka.server.share.fetch.InFlightState; -import org.apache.kafka.server.share.fetch.RecordState; import org.apache.kafka.server.share.fetch.ShareAcquiredRecords; -import org.apache.kafka.server.share.metrics.SharePartitionMetrics; -import org.apache.kafka.server.share.persister.NoOpStatePersister; +import org.apache.kafka.server.share.persister.NoOpShareStatePersister; import org.apache.kafka.server.share.persister.PartitionFactory; import org.apache.kafka.server.share.persister.Persister; import org.apache.kafka.server.share.persister.PersisterStateBatch; import org.apache.kafka.server.share.persister.ReadShareGroupStateResult; import org.apache.kafka.server.share.persister.TopicData; import org.apache.kafka.server.share.persister.WriteShareGroupStateResult; -import org.apache.kafka.server.storage.log.FetchIsolation; import org.apache.kafka.server.storage.log.FetchPartitionData; import org.apache.kafka.server.util.FutureUtils; -import org.apache.kafka.server.util.timer.MockTimer; +import org.apache.kafka.server.util.timer.SystemTimer; +import org.apache.kafka.server.util.timer.SystemTimerReaper; import org.apache.kafka.server.util.timer.Timer; -import org.apache.kafka.server.util.timer.TimerTask; import org.apache.kafka.storage.internals.log.OffsetResultHolder; import org.apache.kafka.test.TestUtils; @@ -83,8 +70,9 @@ import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; import java.util.HashMap; -import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Optional; @@ -96,8 +84,6 @@ import java.util.concurrent.TimeUnit; import static kafka.server.share.SharePartition.EMPTY_MEMBER_ID; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.memoryRecordsBuilder; -import static org.apache.kafka.server.share.fetch.ShareFetchTestUtils.yammerMetricValue; import static org.apache.kafka.test.TestUtils.assertFutureThrows; import static org.junit.jupiter.api.Assertions.assertArrayEquals; import static org.junit.jupiter.api.Assertions.assertEquals; @@ -107,10 +93,7 @@ import static org.junit.jupiter.api.Assertions.assertThrows; import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.ArgumentMatchers.anyList; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; -@SuppressWarnings("resource") public class SharePartitionTest { private static final String ACQUISITION_LOCK_NEVER_GOT_RELEASED = "Acquisition lock never got released."; @@ -118,46 +101,75 @@ public class SharePartitionTest { private static final int MAX_DELIVERY_COUNT = 5; private static final TopicIdPartition TOPIC_ID_PARTITION = new TopicIdPartition(Uuid.randomUuid(), 0, "test-topic"); private static final String MEMBER_ID = "member-1"; + private static Timer mockTimer; private static final Time MOCK_TIME = new MockTime(); - private static final short MAX_IN_FLIGHT_RECORDS = 200; + private static final short MAX_IN_FLIGHT_MESSAGES = 200; private static final int ACQUISITION_LOCK_TIMEOUT_MS = 100; - private static final int DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS = 120; - private static final int BATCH_SIZE = 500; - private static final int DEFAULT_FETCH_OFFSET = 0; + private static final int DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS = 300; private static final int MAX_FETCH_RECORDS = Integer.MAX_VALUE; - private static final byte ACKNOWLEDGE_TYPE_GAP_ID = 0; - private static final FetchIsolation FETCH_ISOLATION_HWM = FetchIsolation.HIGH_WATERMARK; - private static Timer mockTimer; - private SharePartitionMetrics sharePartitionMetrics; @BeforeEach public void setUp() { - kafka.utils.TestUtils.clearYammerMetrics(); - mockTimer = new MockTimer(); - sharePartitionMetrics = new SharePartitionMetrics(GROUP_ID, TOPIC_ID_PARTITION.topic(), TOPIC_ID_PARTITION.partition()); + mockTimer = new SystemTimerReaper("share-group-lock-timeout-test-reaper", + new SystemTimer("share-group-lock-test-timeout")); } @AfterEach public void tearDown() throws Exception { mockTimer.close(); - sharePartitionMetrics.close(); } @Test - public void testMaybeInitialize() throws InterruptedException { + public void testRecordStateValidateTransition() { + // Null check. + assertThrows(NullPointerException.class, () -> RecordState.AVAILABLE.validateTransition(null)); + // Same state transition check. + assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.AVAILABLE)); + assertThrows(IllegalStateException.class, () -> RecordState.ACQUIRED.validateTransition(RecordState.ACQUIRED)); + assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ACKNOWLEDGED)); + assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ARCHIVED)); + // Invalid state transition to any other state from Acknowledged state. + assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.AVAILABLE)); + assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ACQUIRED)); + assertThrows(IllegalStateException.class, () -> RecordState.ACKNOWLEDGED.validateTransition(RecordState.ARCHIVED)); + // Invalid state transition to any other state from Archived state. + assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.AVAILABLE)); + assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ACKNOWLEDGED)); + assertThrows(IllegalStateException.class, () -> RecordState.ARCHIVED.validateTransition(RecordState.ARCHIVED)); + // Invalid state transition to any other state from Available state other than Acquired. + assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.ACKNOWLEDGED)); + assertThrows(IllegalStateException.class, () -> RecordState.AVAILABLE.validateTransition(RecordState.ARCHIVED)); + + // Successful transition from Available to Acquired. + assertEquals(RecordState.ACQUIRED, RecordState.AVAILABLE.validateTransition(RecordState.ACQUIRED)); + // Successful transition from Acquired to any state. + assertEquals(RecordState.AVAILABLE, RecordState.ACQUIRED.validateTransition(RecordState.AVAILABLE)); + assertEquals(RecordState.ACKNOWLEDGED, RecordState.ACQUIRED.validateTransition(RecordState.ACKNOWLEDGED)); + assertEquals(RecordState.ARCHIVED, RecordState.ACQUIRED.validateTransition(RecordState.ARCHIVED)); + } + + @Test + public void testRecordStateForId() { + assertEquals(RecordState.AVAILABLE, RecordState.forId((byte) 0)); + assertEquals(RecordState.ACQUIRED, RecordState.forId((byte) 1)); + assertEquals(RecordState.ACKNOWLEDGED, RecordState.forId((byte) 2)); + assertEquals(RecordState.ARCHIVED, RecordState.forId((byte) 4)); + // Invalid check. + assertThrows(IllegalArgumentException.class, () -> RecordState.forId((byte) 5)); + } + + @Test + public void testMaybeInitialize() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), - List.of( + Arrays.asList( new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3))))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withPersister(persister) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); CompletableFuture result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); @@ -183,29 +195,20 @@ public void testMaybeInitialize() throws InterruptedException { assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState()); assertEquals(3, sharePartition.cachedState().get(11L).batchDeliveryCount()); assertNull(sharePartition.cachedState().get(11L).offsetState()); - - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 2, - "In-flight batch count should be 2."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 11, - "In-flight message count should be 11."); - assertEquals(11, sharePartitionMetrics.inFlightBatchMessageCount().sum()); - assertEquals(2, sharePartitionMetrics.inFlightBatchMessageCount().count()); - assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().min()); - assertEquals(6, sharePartitionMetrics.inFlightBatchMessageCount().max()); } @Test public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsEarliest() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData( 0, PartitionFactory.DEFAULT_STATE_EPOCH, PartitionFactory.UNINITIALIZED_START_OFFSET, PartitionFactory.DEFAULT_ERROR_CODE, PartitionFactory.DEFAULT_ERR_MESSAGE, - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); @@ -248,14 +251,14 @@ public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsEarliest() { public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsLatest() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData( 0, PartitionFactory.DEFAULT_STATE_EPOCH, PartitionFactory.UNINITIALIZED_START_OFFSET, PartitionFactory.DEFAULT_ERROR_CODE, PartitionFactory.DEFAULT_ERR_MESSAGE, - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); @@ -295,18 +298,17 @@ public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsLatest() { } @Test - public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsByDuration() - throws InterruptedException { + public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsByDuration() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData( 0, PartitionFactory.DEFAULT_STATE_EPOCH, PartitionFactory.UNINITIALIZED_START_OFFSET, PartitionFactory.DEFAULT_ERROR_CODE, PartitionFactory.DEFAULT_ERR_MESSAGE, - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); @@ -324,8 +326,7 @@ public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsByDuration() ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); - FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset( - MOCK_TIME.milliseconds() - TimeUnit.HOURS.toMillis(1), 15L, Optional.empty()); + FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(MOCK_TIME.milliseconds() - TimeUnit.HOURS.toMillis(1), 15L, Optional.empty()); Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())). when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); @@ -333,7 +334,6 @@ public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsByDuration() .withPersister(persister) .withGroupConfigManager(groupConfigManager) .withReplicaManager(replicaManager) - .withSharePartitionMetrics(sharePartitionMetrics) .build(); CompletableFuture result = sharePartition.maybeInitialize(); @@ -353,25 +353,20 @@ public void testMaybeInitializeDefaultStartEpochGroupConfigReturnsByDuration() assertEquals(15, sharePartition.startOffset()); assertEquals(15, sharePartition.endOffset()); assertEquals(PartitionFactory.DEFAULT_STATE_EPOCH, sharePartition.stateEpoch()); - - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 0, - "In-flight batch count should be 0."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 0, - "In-flight message count should be 0."); } @Test public void testMaybeInitializeDefaultStartEpochGroupConfigNotPresent() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData( 0, PartitionFactory.DEFAULT_STATE_EPOCH, PartitionFactory.UNINITIALIZED_START_OFFSET, PartitionFactory.DEFAULT_ERROR_CODE, PartitionFactory.DEFAULT_ERR_MESSAGE, - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); @@ -412,14 +407,14 @@ public void testMaybeInitializeDefaultStartEpochGroupConfigNotPresent() { public void testMaybeInitializeFetchOffsetForLatestTimestampThrowsError() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData( 0, PartitionFactory.DEFAULT_STATE_EPOCH, PartitionFactory.UNINITIALIZED_START_OFFSET, PartitionFactory.DEFAULT_ERROR_CODE, PartitionFactory.DEFAULT_ERR_MESSAGE, - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); @@ -456,14 +451,14 @@ public void testMaybeInitializeFetchOffsetForLatestTimestampThrowsError() { public void testMaybeInitializeFetchOffsetForEarliestTimestampThrowsError() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData( 0, PartitionFactory.DEFAULT_STATE_EPOCH, PartitionFactory.UNINITIALIZED_START_OFFSET, PartitionFactory.DEFAULT_ERROR_CODE, PartitionFactory.DEFAULT_ERR_MESSAGE, - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); @@ -502,14 +497,14 @@ public void testMaybeInitializeFetchOffsetForEarliestTimestampThrowsError() { public void testMaybeInitializeFetchOffsetForByDurationThrowsError() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData( 0, PartitionFactory.DEFAULT_STATE_EPOCH, PartitionFactory.UNINITIALIZED_START_OFFSET, PartitionFactory.DEFAULT_ERROR_CODE, PartitionFactory.DEFAULT_ERR_MESSAGE, - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); @@ -555,10 +550,10 @@ public void testMaybeInitializeFetchOffsetForByDurationThrowsError() { public void testMaybeInitializeSharePartitionAgain() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), - List.of( + Arrays.asList( new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3))))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); @@ -583,10 +578,10 @@ public void testMaybeInitializeSharePartitionAgain() { public void testMaybeInitializeSharePartitionAgainConcurrentRequests() throws InterruptedException { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), - List.of( + Arrays.asList( new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3))))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); @@ -618,9 +613,9 @@ public void testMaybeInitializeSharePartitionAgainConcurrentRequests() throws In public void testMaybeInitializeWithEmptyStateBatches() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.NONE.code(), Errors.NONE.message(), List.of())))) + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.NONE.code(), Errors.NONE.message(), Collections.emptyList())))) ); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); @@ -643,129 +638,115 @@ public void testMaybeInitializeWithErrorPartitionResponse() { ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); // Mock NOT_COORDINATOR error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.NOT_COORDINATOR.code(), Errors.NOT_COORDINATOR.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); CompletableFuture result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(CoordinatorNotAvailableException.class, result); + assertFutureThrows(result, CoordinatorNotAvailableException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); // Mock COORDINATOR_NOT_AVAILABLE error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.COORDINATOR_NOT_AVAILABLE.code(), Errors.COORDINATOR_NOT_AVAILABLE.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(CoordinatorNotAvailableException.class, result); + assertFutureThrows(result, CoordinatorNotAvailableException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); // Mock COORDINATOR_LOAD_IN_PROGRESS error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.COORDINATOR_LOAD_IN_PROGRESS.code(), Errors.COORDINATOR_LOAD_IN_PROGRESS.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(CoordinatorNotAvailableException.class, result); + assertFutureThrows(result, CoordinatorNotAvailableException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); // Mock GROUP_ID_NOT_FOUND error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(GroupIdNotFoundException.class, result); + assertFutureThrows(result, GroupIdNotFoundException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); // Mock UNKNOWN_TOPIC_OR_PARTITION error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(UnknownTopicOrPartitionException.class, result); + assertFutureThrows(result, UnknownTopicOrPartitionException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); // Mock FENCED_STATE_EPOCH error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.FENCED_STATE_EPOCH.code(), Errors.FENCED_STATE_EPOCH.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(NotLeaderOrFollowerException.class, result); + assertFutureThrows(result, FencedStateEpochException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); // Mock FENCED_LEADER_EPOCH error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.FENCED_LEADER_EPOCH.code(), Errors.FENCED_LEADER_EPOCH.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(NotLeaderOrFollowerException.class, result); + assertFutureThrows(result, NotLeaderOrFollowerException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); // Mock UNKNOWN_SERVER_ERROR error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.UNKNOWN_SERVER_ERROR.code(), Errors.UNKNOWN_SERVER_ERROR.message(), - List.of()))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(UnknownServerException.class, result); - assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); - - // Mock NETWORK_EXCEPTION error. - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 5, 10L, Errors.NETWORK_EXCEPTION.code(), Errors.NETWORK_EXCEPTION.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(UnknownServerException.class, result); + assertFutureThrows(result, UnknownServerException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } @@ -773,10 +754,10 @@ public void testMaybeInitializeWithErrorPartitionResponse() { public void testMaybeInitializeWithInvalidStartOffsetStateBatches() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 3, 6L, Errors.NONE.code(), Errors.NONE.message(), - List.of( + Arrays.asList( new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3))))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); @@ -785,7 +766,7 @@ public void testMaybeInitializeWithInvalidStartOffsetStateBatches() { CompletableFuture result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, result); + assertFutureThrows(result, IllegalStateException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } @@ -793,10 +774,10 @@ public void testMaybeInitializeWithInvalidStartOffsetStateBatches() { public void testMaybeInitializeWithInvalidTopicIdResponse() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(Uuid.randomUuid(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(Uuid.randomUuid(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), - List.of( + Arrays.asList( new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3))))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); @@ -805,7 +786,7 @@ public void testMaybeInitializeWithInvalidTopicIdResponse() { CompletableFuture result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, result); + assertFutureThrows(result, IllegalStateException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } @@ -813,10 +794,10 @@ public void testMaybeInitializeWithInvalidTopicIdResponse() { public void testMaybeInitializeWithInvalidPartitionResponse() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(1, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), - List.of( + Arrays.asList( new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3))))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); @@ -825,12 +806,12 @@ public void testMaybeInitializeWithInvalidPartitionResponse() { CompletableFuture result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, result); + assertFutureThrows(result, IllegalStateException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } @Test - public void testMaybeInitializeWithNoOpStatePersister() { + public void testMaybeInitializeWithNoOpShareStatePersister() { ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty()); @@ -859,7 +840,7 @@ public void testMaybeInitializeWithNullResponse() { CompletableFuture result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, result); + assertFutureThrows(result, IllegalStateException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } @@ -874,7 +855,7 @@ public void testMaybeInitializeWithNullTopicsData() { CompletableFuture result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, result); + assertFutureThrows(result, IllegalStateException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } @@ -882,14 +863,14 @@ public void testMaybeInitializeWithNullTopicsData() { public void testMaybeInitializeWithEmptyTopicsData() { Persister persister = Mockito.mock(Persister.class); ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of()); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.emptyList()); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); CompletableFuture result = sharePartition.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, result); + assertFutureThrows(result, IllegalStateException.class); assertEquals(SharePartitionState.FAILED, sharePartition.partitionState()); } @@ -903,7 +884,7 @@ public void testMaybeInitializeWithReadException() { CompletableFuture result = sharePartition1.maybeInitialize(); assertTrue(result.isDone()); assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(RuntimeException.class, result); + assertFutureThrows(result, RuntimeException.class); assertEquals(SharePartitionState.FAILED, sharePartition1.partitionState()); persister = Mockito.mock(Persister.class); @@ -915,1680 +896,1291 @@ public void testMaybeInitializeWithReadException() { } @Test - public void testMaybeInitializeFencedSharePartition() { - SharePartition sharePartition = SharePartitionBuilder.builder().build(); - // Mark the share partition as fenced. - sharePartition.markFenced(); + public void testAcquireSingleRecord() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records = memoryRecords(1); - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(LeaderNotAvailableException.class, result); - assertEquals(SharePartitionState.FENCED, sharePartition.partitionState()); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 3, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 1); + + assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(1, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); + assertEquals(0, sharePartition.cachedState().get(0L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(0L).offsetState()); } @Test - public void testMaybeInitializeStateBatchesWithGapAtBeginning() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 10 to 14 - new PersisterStateBatch(21L, 30L, RecordState.ARCHIVED.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(10, sharePartition.nextFetchOffset()); - - assertEquals(2, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(15L)); - assertNotNull(sharePartition.cachedState().get(21L)); - - assertEquals(20, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(2, sharePartition.cachedState().get(15L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(15L).offsetState()); - - assertEquals(30, sharePartition.cachedState().get(21L).lastOffset()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState()); - assertEquals(3, sharePartition.cachedState().get(21L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(21L).offsetState()); + public void testAcquireMultipleRecords() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records = memoryRecords(5, 10); - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - assertEquals(10, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(30, persisterReadResultGapWindow.endOffset()); + assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); + assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testMaybeInitializeStateBatchesWithMultipleGaps() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 10 to 14 - new PersisterStateBatch(30L, 40L, RecordState.ARCHIVED.id, (short) 3))))))); // There is a gap from 21 to 29 - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testAcquireWithMaxFetchRecords() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Less-number of records than max fetch records. + MemoryRecords records = memoryRecords(5); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 10, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); + assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); + assertEquals(4, sharePartition.cachedState().get(0L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(0L).offsetState()); - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(10, sharePartition.nextFetchOffset()); + // More-number of records than max fetch records, but from 0 offset hence previous 10 records + // should be ignored and new full batch till end should be acquired. + records = memoryRecords(25); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 10, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 20); + assertArrayEquals(expectedAcquiredRecord(5, 24, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(25, sharePartition.nextFetchOffset()); assertEquals(2, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(15L)); - assertNotNull(sharePartition.cachedState().get(30L)); - - assertEquals(20, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(2, sharePartition.cachedState().get(15L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(15L).offsetState()); - - assertEquals(40, sharePartition.cachedState().get(30L).lastOffset()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(30L).batchState()); - assertEquals(3, sharePartition.cachedState().get(30L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(30L).offsetState()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - assertEquals(10, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); + assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); + assertEquals(5, sharePartition.cachedState().get(5L).firstOffset()); + assertEquals(24, sharePartition.cachedState().get(5L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(5L).offsetState()); } @Test - public void testMaybeInitializeStateBatchesWithGapNotAtBeginning() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 15L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), - new PersisterStateBatch(30L, 40L, RecordState.ARCHIVED.id, (short) 3))))))); // There is a gap from 21 to 29 - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - // The start offset will be moved to 21, since the offsets 15 to 20 are acknowledged, and will be removed - // from cached state in the maybeUpdateCachedStateAndOffsets method - assertEquals(21, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(21, sharePartition.nextFetchOffset()); + public void testAcquireWithMultipleBatchesAndMaxFetchRecords() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - assertEquals(1, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(30L)); + // Create 3 batches of records. + ByteBuffer buffer = ByteBuffer.allocate(4096); + memoryRecordsBuilder(buffer, 5, 10).close(); + memoryRecordsBuilder(buffer, 15, 15).close(); + memoryRecordsBuilder(buffer, 15, 30).close(); - assertEquals(40, sharePartition.cachedState().get(30L).lastOffset()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(30L).batchState()); - assertEquals(3, sharePartition.cachedState().get(30L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(30L).offsetState()); + buffer.flip(); - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); + MemoryRecords records = MemoryRecords.readableRecords(buffer); + // Acquire 10 records. + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + 10, + new FetchPartitionData(Errors.NONE, 20, 10, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 20); - assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); + // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch + // should be skipped. + assertArrayEquals(expectedAcquiredRecord(10, 29, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(30, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); + assertEquals(29, sharePartition.cachedState().get(10L).lastOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testMaybeInitializeStateBatchesWithoutGaps() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 15L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), - new PersisterStateBatch(21L, 30L, RecordState.ARCHIVED.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testAcquireMultipleRecordsWithOverlapAndNewBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records = memoryRecords(5, 0); - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertTrue(sharePartition.cachedState().isEmpty()); - assertEquals(31, sharePartition.startOffset()); - assertEquals(31, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(31, sharePartition.nextFetchOffset()); + assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(5, sharePartition.nextFetchOffset()); - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); + // Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored. + records = memoryRecords(10, 0); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - // Since there are no gaps present in the readState response, persisterReadResultGapWindow should be null - assertNull(persisterReadResultGapWindow); + assertArrayEquals(expectedAcquiredRecords(memoryRecords(5, 5), 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(10, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); } @Test - public void testMaybeInitializeAndAcquire() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), - new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(10, sharePartition.nextFetchOffset()); + public void testAcquireSameBatchAgain() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records = memoryRecords(5, 10); - assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); - assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); - - // Create a single batch record that covers the entire range from 10 to 30 of initial read gap. - // The records in the batch are from 10 to 49. - MemoryRecords records = memoryRecords(40, 10); - // Set max fetch records to 1, records will be acquired till the first gap is encountered. List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 1, - 10, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), 5); - assertArrayEquals(expectedAcquiredRecord(10, 14, 1).toArray(), acquiredRecordsList.toArray()); + assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); - assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(10L).offsetState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(15L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - // Send the same batch again to acquire the next set of records. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 10, - 15, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 13); - - List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(15, 18, 3)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(26, 30, 4)); + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 0); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(31, sharePartition.nextFetchOffset()); - assertEquals(6, sharePartition.cachedState().size()); - assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); - assertEquals(1, sharePartition.cachedState().get(19L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(19L).offsetState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); - assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); - assertEquals(1, sharePartition.cachedState().get(23L).batchDeliveryCount()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState()); - assertEquals(30L, sharePartition.endOffset()); - // As all the gaps are now filled, the persisterReadResultGapWindow should be null. - assertNull(sharePartition.persisterReadResultGapWindow()); + // No records should be returned as the batch is already acquired. + assertEquals(0, acquiredRecordsList.size()); + assertEquals(15, sharePartition.nextFetchOffset()); - // Now initial read gap is filled, so the complete batch can be acquired despite max fetch records being 1. + // Send subset of the same batch again, no records should be returned. + MemoryRecords subsetRecords = memoryRecords(2, 10); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 1, - 31, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 19); - - assertArrayEquals(expectedAcquiredRecord(31, 49, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(50, sharePartition.nextFetchOffset()); - assertEquals(7, sharePartition.cachedState().size()); - assertEquals(31, sharePartition.cachedState().get(31L).firstOffset()); - assertEquals(49, sharePartition.cachedState().get(31L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState()); - assertEquals(1, sharePartition.cachedState().get(31L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(31L).offsetState()); - assertEquals(49L, sharePartition.endOffset()); + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, subsetRecords, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 0); + + // No records should be returned as the batch is already acquired. + assertEquals(0, acquiredRecordsList.size()); + assertEquals(15, sharePartition.nextFetchOffset()); + // Cache shouldn't be tracking per offset records + assertNull(sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testMaybeInitializeAndAcquireWithHigherMaxFetchRecords() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), - new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(10, sharePartition.nextFetchOffset()); - - assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); - assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); - - // Create a single batch record that covers the entire range from 10 to 30 of initial read gap. - // The records in the batch are from 10 to 49. - MemoryRecords records = memoryRecords(40, 10); - // Set max fetch records to 500, all records should be acquired. + public void testAcquireWithEmptyFetchRecords() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500, - 10, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 37); - - List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(26, 30, 4)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(31, 49, 1)); + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, MemoryRecords.EMPTY, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 0); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(50, sharePartition.nextFetchOffset()); - assertEquals(7, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); - assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); - assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); - assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); - assertEquals(31, sharePartition.cachedState().get(31L).firstOffset()); - assertEquals(49, sharePartition.cachedState().get(31L).lastOffset()); + assertEquals(0, acquiredRecordsList.size()); + assertEquals(0, sharePartition.nextFetchOffset()); + } - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState()); - assertEquals(49L, sharePartition.endOffset()); - // As all the gaps are now filled, the persisterReadResultGapWindow should be null. - assertNull(sharePartition.persisterReadResultGapWindow()); + @Test + public void testNextFetchOffsetInitialState() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + assertEquals(0, sharePartition.nextFetchOffset()); } @Test - public void testMaybeInitializeAndAcquireWithFetchBatchLastOffsetWithinCachedBatch() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), - new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testNextFetchOffsetWithCachedStateAcquired() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + assertEquals(5, sharePartition.nextFetchOffset()); + } - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); + @Test + public void testNextFetchOffsetWithFindAndCachedStateEmpty() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.findNextFetchOffset(true); + assertTrue(sharePartition.findNextFetchOffset()); + assertEquals(0, sharePartition.nextFetchOffset()); + assertFalse(sharePartition.findNextFetchOffset()); + } - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(10, sharePartition.nextFetchOffset()); + @Test + public void testNextFetchOffsetWithFindAndCachedState() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.findNextFetchOffset(true); + assertTrue(sharePartition.findNextFetchOffset()); + sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + assertEquals(5, sharePartition.nextFetchOffset()); + assertFalse(sharePartition.findNextFetchOffset()); + } - assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); - assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); - - // Create a single batch record that ends in between the cached batch and the fetch offset is - // post startOffset. - MemoryRecords records = memoryRecords(16, 12); - // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch. - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500, - 10, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 13); - - List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(12, 14, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecords(26, 27, 4)); + @Test + public void testCanAcquireRecordsWithEmptyCache() { + SharePartition sharePartition = SharePartitionBuilder.builder().withMaxInflightMessages(1).build(); + assertTrue(sharePartition.canAcquireRecords()); + } - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(28, sharePartition.nextFetchOffset()); - assertEquals(6, sharePartition.cachedState().size()); - assertEquals(12, sharePartition.cachedState().get(12L).firstOffset()); - assertEquals(14, sharePartition.cachedState().get(12L).lastOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); - assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); - assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); - - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(12L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(26L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(26L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(27L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(28L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(29L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(30L).state()); - assertEquals(30L, sharePartition.endOffset()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(28L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + @Test + public void testCanAcquireRecordsWithCachedDataAndLimitNotReached() { + SharePartition sharePartition = SharePartitionBuilder.builder().withMaxInflightMessages(6).build(); + sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + // Limit not reached as only 6 in-flight messages is the limit. + assertTrue(sharePartition.canAcquireRecords()); } @Test - public void testMaybeInitializeAndAcquireWithFetchBatchPriorStartOffset() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), - new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testCanAcquireRecordsWithCachedDataAndLimitReached() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxInflightMessages(1) + .withState(SharePartitionState.ACTIVE) + .build(); + sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + // Limit reached as only one in-flight message is the limit. + assertFalse(sharePartition.canAcquireRecords()); + } - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); + @Test + public void testMaybeAcquireAndReleaseFetchLock() { + ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(10, sharePartition.nextFetchOffset()); + FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty()); + Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())). + when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); - assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); - assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); - - // Create a single batch record where first offset is prior startOffset. - MemoryRecords records = memoryRecords(16, 6); - // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch. + SharePartition sharePartition = SharePartitionBuilder.builder().withReplicaManager(replicaManager).build(); + sharePartition.maybeInitialize(); + assertTrue(sharePartition.maybeAcquireFetchLock()); + // Lock cannot be acquired again, as already acquired. + assertFalse(sharePartition.maybeAcquireFetchLock()); + // Release the lock. + sharePartition.releaseFetchLock(); + // Lock can be acquired again. + assertTrue(sharePartition.maybeAcquireFetchLock()); + } + + @Test + public void testAcknowledgeSingleRecordBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + + MemoryRecords records1 = memoryRecords(1, 0); + MemoryRecords records2 = memoryRecords(1, 1); + + // Another batch is acquired because if there is only 1 batch, and it is acknowledged, the batch will be removed from cachedState List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500, - 10, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 10); + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 10, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 1); - List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + assertEquals(1, acquiredRecordsList.size()); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(23, sharePartition.nextFetchOffset()); - assertEquals(5, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); - assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 10, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 1); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertEquals(30L, sharePartition.endOffset()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(20L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(1, acquiredRecordsList.size()); + + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(1, 1, Collections.singletonList((byte) 1)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); + + assertEquals(2, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(1L).batchState()); + assertEquals(1, sharePartition.cachedState().get(1L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(1L).offsetState()); } @Test - public void testMaybeInitializeAndAcquireWithMultipleBatches() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), - new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testAcknowledgeMultipleRecordBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records = memoryRecords(10, 5); - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 10); - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(5, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(1, acquiredRecordsList.size()); - assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); - assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(5L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(5, 14, Collections.singletonList((byte) 1)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); + + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(0, sharePartition.cachedState().size()); + } + + @Test + public void testAcknowledgeMultipleRecordBatchWithGapOffsets() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records1 = memoryRecords(2, 5); + // Untracked gap of 3 offsets from 7-9. + MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(5, 10); + // Gap from 15-17 offsets. + recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + MemoryRecords records2 = recordsBuilder.build(); - // Create multiple batch records that covers the entire range from 5 to 30 of initial read gap. - // The records in the batch are from 5 to 49. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 2, 5).close(); - memoryRecordsBuilder(buffer, 1, 8).close(); - memoryRecordsBuilder(buffer, 2, 10).close(); - memoryRecordsBuilder(buffer, 6, 13).close(); - memoryRecordsBuilder(buffer, 3, 19).close(); - memoryRecordsBuilder(buffer, 9, 22).close(); - memoryRecordsBuilder(buffer, 19, 31).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Set max fetch records to 1, records will be acquired till the first gap is encountered. List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 1, - 5L, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), 2); - assertArrayEquals(expectedAcquiredRecord(5, 6, 1).toArray(), acquiredRecordsList.toArray()); + assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray()); assertEquals(7, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.cachedState().size()); - assertEquals(5, sharePartition.cachedState().get(5L).firstOffset()); - assertEquals(6, sharePartition.cachedState().get(5L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(5L).offsetState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(7L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - - // Remove first batch from the records as the fetch offset has moved forward to 7 offset. - List batch = TestUtils.toList(records.batches()); - records = records.slice(batch.get(0).sizeInBytes(), records.sizeInBytes() - batch.get(0).sizeInBytes()); - // Send the batch again to acquire the next set of records. - acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 3, - 7L, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 4); - - assertArrayEquals(expectedAcquiredRecord(8, 11, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(5, sharePartition.cachedState().size()); - assertEquals(8, sharePartition.cachedState().get(8L).firstOffset()); - assertEquals(11, sharePartition.cachedState().get(8L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(8L).batchState()); - assertEquals(1, sharePartition.cachedState().get(8L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(8L).offsetState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertEquals(30L, sharePartition.endOffset()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(12L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - - // Remove the next 2 batches from the records as the fetch offset has moved forward to 12 offset. - int size = batch.get(1).sizeInBytes() + batch.get(2).sizeInBytes(); - records = records.slice(size, records.sizeInBytes() - size); - // Send the records with 8 as max fetch records to acquire new and existing cached batches. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 8, - 12, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 10); + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 9); - List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(13, 14, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); + assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(19, sharePartition.nextFetchOffset()); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(26, sharePartition.nextFetchOffset()); - assertEquals(8, sharePartition.cachedState().size()); - assertEquals(13, sharePartition.cachedState().get(13L).firstOffset()); - assertEquals(14, sharePartition.cachedState().get(13L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(13L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); - assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertEquals(30L, sharePartition.endOffset()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(26L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - - // Remove the next 2 batches from the records as the fetch offset has moved forward to 26 offset. - // Do not remove the 5th batch as it's only partially acquired. - size = batch.get(3).sizeInBytes() + batch.get(4).sizeInBytes(); - records = records.slice(size, records.sizeInBytes() - size); - // Send the records with 10 as max fetch records to acquire the existing and till end of the - // fetched data. - acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 10, - 26, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 24); + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Arrays.asList( + new ShareAcknowledgementBatch(5, 6, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(10, 18, Arrays.asList( + (byte) 2, (byte) 2, (byte) 2, + (byte) 2, (byte) 2, (byte) 0, + (byte) 0, (byte) 0, (byte) 1 + )))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); - expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(26, 30, 4)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(31, 49, 1)); + assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); + assertNull(sharePartition.cachedState().get(5L).offsetState()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState()); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(50, sharePartition.nextFetchOffset()); - assertEquals(9, sharePartition.cachedState().size()); - assertEquals(31, sharePartition.cachedState().get(31L).firstOffset()); - assertEquals(49, sharePartition.cachedState().get(31L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(31L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).batchState()); - assertEquals(49L, sharePartition.endOffset()); - // As all the gaps are now filled, the persisterReadResultGapWindow should be null. - assertNull(sharePartition.persisterReadResultGapWindow()); + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testMaybeInitializeAndAcquireWithMultipleBatchesAndLastOffsetWithinCachedBatch() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 5L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), - new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testAcknowledgeMultipleSubsetRecordBatchWithGapOffsets() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records1 = memoryRecords(2, 5); + // Untracked gap of 3 offsets from 7-9. + MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); + // Gap from 12-13 offsets. + recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap for 15 offset. + recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap from 17-19 offsets. + recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + MemoryRecords records2 = recordsBuilder.build(); - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 2); - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(5, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(5, sharePartition.nextFetchOffset()); + assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(7, sharePartition.nextFetchOffset()); - assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); - assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(5L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 11); - // Create multiple batch records that ends in between the cached batch and the fetch offset is - // post startOffset. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 2, 7).close(); - memoryRecordsBuilder(buffer, 2, 10).close(); - memoryRecordsBuilder(buffer, 6, 13).close(); - // Though 19 offset is a gap but still be acquired. - memoryRecordsBuilder(buffer, 8, 20).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch. - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500, - 5, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 18); - - List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(7, 14, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(23, 25, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecords(26, 27, 4)); + assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(21, sharePartition.nextFetchOffset()); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(28, sharePartition.nextFetchOffset()); - assertEquals(6, sharePartition.cachedState().size()); - assertEquals(7, sharePartition.cachedState().get(7L).firstOffset()); - assertEquals(14, sharePartition.cachedState().get(7L).lastOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); - assertEquals(23, sharePartition.cachedState().get(23L).firstOffset()); - assertEquals(25, sharePartition.cachedState().get(23L).lastOffset()); + // Acknowledging over subset of both batch with subset of gap offsets. + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(6, 18, Arrays.asList( + (byte) 1, (byte) 1, (byte) 1, + (byte) 1, (byte) 1, (byte) 1, + (byte) 0, (byte) 0, (byte) 1, + (byte) 0, (byte) 1, (byte) 0, + (byte) 1)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(26L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(26L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(26L).offsetState().get(27L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(28L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(29L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).offsetState().get(30L).state()); - assertEquals(30L, sharePartition.endOffset()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(28L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); + assertEquals(21, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(5L).batchState()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); + + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); + + expectedOffsetStateMap.clear(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testMaybeInitializeAndAcquireWithMultipleBatchesPriorStartOffset() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 18L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(20L, 22L, RecordState.ARCHIVED.id, (short) 2), - new PersisterStateBatch(26L, 30L, RecordState.AVAILABLE.id, (short) 3))))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testAcknowledgeOutOfRangeCachedData() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Acknowledge a batch when cache is empty. + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(0, 15, Collections.singletonList((byte) 3)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); + MemoryRecords records = memoryRecords(5, 5); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(10, sharePartition.nextFetchOffset()); + assertEquals(1, acquiredRecordsList.size()); + // Cached data with offset 5-9 should exist. + assertEquals(1, sharePartition.cachedState().size()); + assertNotNull(sharePartition.cachedState().get(5L)); - assertEquals(18, sharePartition.cachedState().get(15L).lastOffset()); - assertEquals(22, sharePartition.cachedState().get(20L).lastOffset()); - assertEquals(30, sharePartition.cachedState().get(26L).lastOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(10L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - assertEquals(30L, sharePartition.persisterReadResultGapWindow().endOffset()); + ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(20, 25, Collections.singletonList((byte) 3)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRequestException.class); + } - // Create multiple batch records where multiple batches base offsets are prior startOffset. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 2, 3).close(); - memoryRecordsBuilder(buffer, 1, 6).close(); - memoryRecordsBuilder(buffer, 4, 8).close(); - memoryRecordsBuilder(buffer, 10, 13).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Set max fetch records to 500, records should be acquired till the last offset of the fetched batch. + @Test + public void testAcknowledgeOutOfRangeCachedDataFirstBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + + // Create data for the batch with offsets 0-4. + MemoryRecords records = memoryRecords(5, 0); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500, - 10, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 10); + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - List expectedAcquiredRecords = new ArrayList<>(expectedAcquiredRecord(10, 14, 1)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(15, 18, 3)); - expectedAcquiredRecords.addAll(expectedAcquiredRecord(19, 19, 1)); + assertEquals(1, acquiredRecordsList.size()); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(23, sharePartition.nextFetchOffset()); - assertEquals(5, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); - assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).firstOffset()); - assertEquals(19, sharePartition.cachedState().get(19L).lastOffset()); + // Create data for the batch with offsets 20-24. + records = memoryRecords(5, 20); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(19L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(26L).batchState()); - assertEquals(30L, sharePartition.endOffset()); - assertNotNull(sharePartition.persisterReadResultGapWindow()); - assertEquals(20L, sharePartition.persisterReadResultGapWindow().gapStartOffset()); - } + assertEquals(1, acquiredRecordsList.size()); - @Test - public void testAcquireSingleRecord() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); - MemoryRecords records = memoryRecords(1); + // Acknowledge a batch when first batch violates the range. + List acknowledgeBatches = Arrays.asList( + new ShareAcknowledgementBatch(0, 10, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(20, 24, Collections.singletonList((byte) 1))); + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, acknowledgeBatches); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRequestException.class); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 1); + // Create data for the batch with offsets 5-10. + records = memoryRecords(6, 5); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 6); - assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(1, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); - assertEquals(0, sharePartition.cachedState().get(0L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(0L).offsetState()); + assertEquals(1, acquiredRecordsList.size()); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, - "In-flight batch count should be 1."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 1, - "In-flight message count should be 1."); - assertEquals(1, sharePartitionMetrics.inFlightBatchMessageCount().sum()); + // Previous failed acknowledge request should succeed now. + ackResult = sharePartition.acknowledge( + MEMBER_ID, acknowledgeBatches); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); } @Test - public void testAcquireMultipleRecords() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); - MemoryRecords records = memoryRecords(5, 10); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3L, 5); + public void testAcknowledgeWithAnotherMember() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records = memoryRecords(5, 5); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(1, acquiredRecordsList.size()); + // Cached data with offset 5-9 should exist. assertEquals(1, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); - assertEquals(14, sharePartition.cachedState().get(10L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(10L).offsetState()); + assertNotNull(sharePartition.cachedState().get(5L)); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, - "In-flight batch count should be 1."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 5, - "In-flight message count should be 5."); - assertEquals(5, sharePartitionMetrics.inFlightBatchMessageCount().sum()); + CompletableFuture ackResult = sharePartition.acknowledge( + "member-2", + Collections.singletonList(new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 3)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); } @Test - public void testAcquireWithMaxFetchRecords() { + public void testAcknowledgeWhenOffsetNotAcquired() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Less-number of records than max fetch records. - MemoryRecords records = memoryRecords(5); + MemoryRecords records = memoryRecords(5, 5); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, - BATCH_SIZE, - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), 5); - assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(1, acquiredRecordsList.size()); + // Cached data with offset 5-9 should exist. assertEquals(1, sharePartition.cachedState().size()); - assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); - assertEquals(4, sharePartition.cachedState().get(0L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(0L).offsetState()); + assertNotNull(sharePartition.cachedState().get(5L)); - // More-number of records than max fetch records, but from 0 offset hence previous 10 records - // should be ignored and new full batch till end should be acquired. - records = memoryRecords(25); - acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + CompletableFuture ackResult = sharePartition.acknowledge( MEMBER_ID, - BATCH_SIZE, - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 20); + Collections.singletonList(new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 2)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); - assertArrayEquals(expectedAcquiredRecord(5, 24, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(25, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); - assertEquals(5, sharePartition.cachedState().get(5L).firstOffset()); - assertEquals(24, sharePartition.cachedState().get(5L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(5L).offsetState()); - } + // Acknowledge the same batch again but with ACCEPT type. + ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 1)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); - @Test - public void testAcquireWithMultipleBatchesAndMaxFetchRecords() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); + // Re-acquire the same batch and then acknowledge subset with ACCEPT type. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - // Create 3 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 10).close(); - memoryRecordsBuilder(buffer, 15, 15).close(); - memoryRecordsBuilder(buffer, 15, 30).close(); - - buffer.flip(); + assertEquals(1, acquiredRecordsList.size()); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Acquire 10 records. - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + ackResult = sharePartition.acknowledge( MEMBER_ID, - BATCH_SIZE, - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records, 10), - FETCH_ISOLATION_HWM), - 20); - - // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch - // should be skipped. - assertArrayEquals(expectedAcquiredRecord(10, 29, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(30, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.cachedState().get(10L).firstOffset()); - assertEquals(29, sharePartition.cachedState().get(10L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(10L).offsetState()); - - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 1, - "In-flight batch count should be 1."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 20, - "In-flight message count should be 20."); - assertEquals(20, sharePartitionMetrics.inFlightBatchMessageCount().sum()); - } - - @Test - public void testAcquireMultipleRecordsWithOverlapAndNewBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records = memoryRecords(5, 0); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); - - assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(5, sharePartition.nextFetchOffset()); - - // Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored. - records = memoryRecords(10, 0); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); + Collections.singletonList(new ShareAcknowledgementBatch(6, 8, Collections.singletonList((byte) 3)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); - assertArrayEquals(expectedAcquiredRecords(memoryRecords(5, 5), 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(10, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); + // Re-acknowledge the subset batch with REJECT type. + ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(6, 8, Collections.singletonList((byte) 3)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); } @Test - public void testAcquireSameBatchAgain() { + public void testAcknowledgeRollbackWithFullBatchError() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records = memoryRecords(5, 10); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 5); + MemoryRecords records1 = memoryRecords(5, 5); + MemoryRecords records2 = memoryRecords(5, 10); + MemoryRecords records3 = memoryRecords(5, 15); + List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(1, acquiredRecordsList.size()); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 3, 0); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - // No records should be returned as the batch is already acquired. - assertEquals(0, acquiredRecordsList.size()); - assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(1, acquiredRecordsList.size()); - // Send subset of the same batch again, no records should be returned. - MemoryRecords subsetRecords = memoryRecords(2, 10); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, subsetRecords, 3, 0); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - // No records should be returned as the batch is already acquired. - assertEquals(0, acquiredRecordsList.size()); - assertEquals(15, sharePartition.nextFetchOffset()); - // Cache shouldn't be tracking per offset records - assertNull(sharePartition.cachedState().get(10L).offsetState()); - } + assertEquals(1, acquiredRecordsList.size()); + // Cached data with offset 5-19 should exist. + assertEquals(3, sharePartition.cachedState().size()); - @Test - public void testAcquireWithEmptyFetchRecords() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - List acquiredRecordsList = fetchAcquiredRecords( - sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - MAX_FETCH_RECORDS, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(MemoryRecords.EMPTY), - FETCH_ISOLATION_HWM), - 0 - ); + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Arrays.asList( + new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(15, 19, Collections.singletonList((byte) 1)), + // Add another batch which should fail the request. + new ShareAcknowledgementBatch(15, 19, Collections.singletonList((byte) 1)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); - assertEquals(0, acquiredRecordsList.size()); - assertEquals(0, sharePartition.nextFetchOffset()); + // Check the state of the cache. The state should be acquired itself. + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); } @Test - public void testAcquireWithBatchSizeAndSingleBatch() { + public void testAcknowledgeRollbackWithSubsetError() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Single batch has more records than batch size. Hence, only a single batch exceeding the batch size - // should be acquired. - MemoryRecords records = memoryRecords(5); + MemoryRecords records1 = memoryRecords(5, 5); + MemoryRecords records2 = memoryRecords(5, 10); + MemoryRecords records3 = memoryRecords(5, 15); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, - 2 /* Batch size */, - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), 5); - assertArrayEquals(expectedAcquiredRecord(0, 4, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(5, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); - assertEquals(4, sharePartition.cachedState().get(0L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(0L).offsetState()); - } - - @Test - public void testAcquireWithBatchSizeAndMultipleBatches() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Create 4 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 2).close(); - memoryRecordsBuilder(buffer, 5, 10).close(); - memoryRecordsBuilder(buffer, 7, 15).close(); - memoryRecordsBuilder(buffer, 6, 22).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); + assertEquals(1, acquiredRecordsList.size()); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, - 5 /* Batch size */, - 100, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 26 /* Gap of 3 records will also be added to first batch */); - - // Fetch expected records from 4 batches, but change the first expected record to include gap offsets. - List expectedAcquiredRecords = expectedAcquiredRecords(records, 1); - expectedAcquiredRecords.remove(0); - expectedAcquiredRecords.addAll(0, expectedAcquiredRecord(2, 9, 1)); + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(28, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().containsKey(2L)); - assertTrue(sharePartition.cachedState().containsKey(10L)); - assertTrue(sharePartition.cachedState().containsKey(15L)); - assertTrue(sharePartition.cachedState().containsKey(22L)); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(22L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(22L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(2L).batchDeliveryCount()); - assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); - assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); - assertEquals(1, sharePartition.cachedState().get(22L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(2L).offsetState()); - assertNull(sharePartition.cachedState().get(10L).offsetState()); - assertNull(sharePartition.cachedState().get(15L).offsetState()); - assertNull(sharePartition.cachedState().get(22L).offsetState()); - } + assertEquals(1, acquiredRecordsList.size()); - @Test - public void testAcquireWithBatchSizeAndMaxFetchRecords() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Create 3 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 0).close(); - memoryRecordsBuilder(buffer, 15, 5).close(); - memoryRecordsBuilder(buffer, 15, 20).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - 2 /* Batch size */, - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 20); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - List expectedAcquiredRecords = expectedAcquiredRecords(records, 1); - // The last batch should be ignored as it exceeds the max fetch records. - expectedAcquiredRecords.remove(2); + assertEquals(1, acquiredRecordsList.size()); + // Cached data with offset 5-19 should exist. + assertEquals(3, sharePartition.cachedState().size()); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(20, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(0, sharePartition.cachedState().get(0L).firstOffset()); - assertEquals(4, sharePartition.cachedState().get(0L).lastOffset()); - assertEquals(5, sharePartition.cachedState().get(5L).firstOffset()); - assertEquals(19, sharePartition.cachedState().get(5L).lastOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Arrays.asList( + new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(15, 19, Collections.singletonList((byte) 1)), + // Add another batch which should fail the request. + new ShareAcknowledgementBatch(16, 19, Collections.singletonList((byte) 1)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); + + // Check the state of the cache. The state should be acquired itself. + assertEquals(3, sharePartition.cachedState().size()); assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(0L).offsetState()); - assertNull(sharePartition.cachedState().get(5L).offsetState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + // Though the last batch is subset but the offset state map will not be exploded as the batch is + // not in acquired state due to previous batch acknowledgement. + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); } @Test - public void testAcquireSingleBatchWithBatchSizeAndEndOffsetLargerThanBatchFirstOffset() { + public void testAcquireReleasedRecord() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.updateCacheAndOffsets(8L); + MemoryRecords records = memoryRecords(5, 10); - MemoryRecords records = memoryRecords(10, 5); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - 5 /* Batch size */, - 100, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 7 /* Acquisition of records starts post endOffset */); - - // Fetch expected single batch, but change the first offset as per endOffset. - assertArrayEquals(expectedAcquiredRecord(8, 14, 1).toArray(), acquiredRecordsList.toArray()); + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); + + assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().containsKey(8L)); - } - @Test - public void testAcquireWithBatchSizeAndEndOffsetLargerThanBatchFirstOffset() - throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); - sharePartition.updateCacheAndOffsets(4L); + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(12, 13, Collections.singletonList((byte) 2)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); - // Create 2 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 8, 2).close(); - memoryRecordsBuilder(buffer, 7, 10).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - 5 /* Batch size */, - 100, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 13 /* Acquisition of records starts post endOffset */); - - // Fetch expected records from 2 batches, but change the first batch's first offset as per endOffset. - List expectedAcquiredRecords = expectedAcquiredRecords(records, 1); - expectedAcquiredRecords.remove(0); - expectedAcquiredRecords.addAll(0, expectedAcquiredRecord(4, 9, 1)); + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - assertEquals(17, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().containsKey(4L)); - assertTrue(sharePartition.cachedState().containsKey(10L)); + // Send the same fetch request batch again but only 2 offsets should come as acquired. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 2); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_BATCH_COUNT).intValue() == 2, - "In-flight batch count should be 2."); - TestUtils.waitForCondition(() -> yammerMetricValue(SharePartitionMetrics.IN_FLIGHT_MESSAGE_COUNT).longValue() == 13, - "In-flight message count should be 13."); - assertEquals(13, sharePartitionMetrics.inFlightBatchMessageCount().sum()); - assertEquals(2, sharePartitionMetrics.inFlightBatchMessageCount().count()); - assertEquals(6, sharePartitionMetrics.inFlightBatchMessageCount().min()); - assertEquals(7, sharePartitionMetrics.inFlightBatchMessageCount().max()); + assertArrayEquals(expectedAcquiredRecords(12, 13, 2).toArray(), acquiredRecordsList.toArray()); + assertEquals(15, sharePartition.nextFetchOffset()); } @Test - public void testAcquireBatchSkipWithBatchSizeAndEndOffsetLargerThanFirstBatch() { + public void testAcquireReleasedRecordMultipleBatches() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.updateCacheAndOffsets(12L); - - // Create 2 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 8, 2).close(); - memoryRecordsBuilder(buffer, 7, 10).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); + // First fetch request with 5 records starting from offset 10. + MemoryRecords records1 = memoryRecords(5, 10); + // Second fetch request with 5 records starting from offset 15. + MemoryRecords records2 = memoryRecords(5, 15); + // Third fetch request with 5 records starting from offset 23, gap of 3 offsets. + MemoryRecords records3 = memoryRecords(5, 23); + // Fourth fetch request with 5 records starting from offset 28. + MemoryRecords records4 = memoryRecords(5, 28); List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - 5 /* Batch size */, - 100, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 5 /* Acquisition of records starts post endOffset */); - - // First batch should be skipped and fetch should result a single batch (second batch), but - // change the first offset of acquired batch as per endOffset. - assertArrayEquals(expectedAcquiredRecord(12, 16, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(17, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().containsKey(12L)); - } + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - @Test - public void testAcquireWithMaxInFlightRecordsAndTryAcquireNewBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .withMaxInflightRecords(20) - .build(); + assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(15, sharePartition.nextFetchOffset()); - // Acquire records, all 10 records should be acquired as within maxInflightRecords limit. - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500 /* Max fetch records */, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(memoryRecords(10, 0), 0), - FETCH_ISOLATION_HWM), - 10); - // Validate all 10 records will be acquired as the maxInFlightRecords is 20. - assertArrayEquals(expectedAcquiredRecord(0, 9, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(10, sharePartition.nextFetchOffset()); - - // Create 4 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 10).close(); - memoryRecordsBuilder(buffer, 10, 15).close(); - memoryRecordsBuilder(buffer, 5, 25).close(); - memoryRecordsBuilder(buffer, 2, 30).close(); - - buffer.flip(); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 30, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - MemoryRecords records = MemoryRecords.readableRecords(buffer); + assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(20, sharePartition.nextFetchOffset()); - // Acquire records, should be acquired till maxInFlightRecords i.e. 20 records. As second batch - // is ending at 24 offset, hence additional 15 records will be acquired. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500 /* Max fetch records */, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records, 0), - FETCH_ISOLATION_HWM), - 15); + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 30, 3, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - // Validate 2 batches are fetched one with 5 records and other till end of batch, third batch - // should be skipped. - assertArrayEquals(expectedAcquiredRecord(10, 24, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(25, sharePartition.nextFetchOffset()); + assertArrayEquals(expectedAcquiredRecords(records3, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(28, sharePartition.nextFetchOffset()); - // Should not acquire any records as the share partition is at capacity and fetch offset is beyond - // the end offset. - fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500 /* Max fetch records */, - 25 /* Fetch Offset */, - fetchPartitionData(memoryRecords(10, 25), 10), - FETCH_ISOLATION_HWM), - 0); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 30, 3, records4, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 5); - assertEquals(25, sharePartition.nextFetchOffset()); - } + assertArrayEquals(expectedAcquiredRecords(records4, 1).toArray(), acquiredRecordsList.toArray()); + assertEquals(33, sharePartition.nextFetchOffset()); - @Test - public void testAcquireWithMaxInFlightRecordsAndReleaseLastOffset() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .withMaxInflightRecords(20) - .build(); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(28L).batchState()); + assertNull(sharePartition.cachedState().get(10L).offsetState()); + assertNull(sharePartition.cachedState().get(15L).offsetState()); + assertNull(sharePartition.cachedState().get(23L).offsetState()); + assertNull(sharePartition.cachedState().get(28L).offsetState()); - // Create 4 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 10).close(); - memoryRecordsBuilder(buffer, 10, 15).close(); - memoryRecordsBuilder(buffer, 5, 25).close(); - memoryRecordsBuilder(buffer, 3, 30).close(); + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(12, 30, Collections.singletonList((byte) 2)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); - buffer.flip(); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(4, sharePartition.cachedState().size()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); + assertNull(sharePartition.cachedState().get(15L).offsetState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(23L).batchState()); + assertNull(sharePartition.cachedState().get(23L).offsetState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(23L).batchMemberId()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(28L).batchState()); + assertNotNull(sharePartition.cachedState().get(28L).offsetState()); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Acquire records, should be acquired till maxInFlightRecords i.e. 20 records till 29 offset. - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500 /* Max fetch records */, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records, 10), - FETCH_ISOLATION_HWM), - 20); + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - // Validate 3 batches are fetched and fourth batch should be skipped. Max in-flight records - // limit is reached. - assertArrayEquals(expectedAcquiredRecord(10, 29, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(30, sharePartition.nextFetchOffset()); + expectedOffsetStateMap.clear(); + expectedOffsetStateMap.put(28L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(29L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(30L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(31L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(32L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(28L).offsetState()); - // Release middle batch. - CompletableFuture ackResult = sharePartition.acknowledge( + // Send next batch from offset 12, only 3 records should be acquired. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, - List.of(new ShareAcknowledgementBatch(15, 19, List.of((byte) 2)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); - // Validate the nextFetchOffset is updated to 15. + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 3); + + assertArrayEquals(expectedAcquiredRecords(12, 14, 2).toArray(), acquiredRecordsList.toArray()); assertEquals(15, sharePartition.nextFetchOffset()); - // The complete released batch should be acquired but not the last batch, starting at offset 30, - // as the lastOffset is adjusted according to the endOffset. + // Though record2 batch exists to acquire but send batch record3, it should be acquired but + // next fetch offset should not move. acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500 /* Max fetch records */, - 15 /* Fetch Offset */, - fetchPartitionData(records, 10), - FETCH_ISOLATION_HWM), + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 40, 3, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), 5); - // Validate 1 batch is fetched, with 5 records till end of batch, last available batch should - // not be acquired - assertArrayEquals(expectedAcquiredRecords(15, 19, 2).toArray(), acquiredRecordsList.toArray()); - assertEquals(30, sharePartition.nextFetchOffset()); + assertArrayEquals(expectedAcquiredRecords(records3, 2).toArray(), acquiredRecordsList.toArray()); + assertEquals(15, sharePartition.nextFetchOffset()); - // Release last offset of the acquired batch. Only 1 record should be released and later acquired. - ackResult = sharePartition.acknowledge( + // Acquire partial records from batch 2. + MemoryRecords subsetRecords = memoryRecords(2, 17); + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( MEMBER_ID, - List.of(new ShareAcknowledgementBatch(29, 29, List.of((byte) 2)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); - // Validate the nextFetchOffset is updated to 29. - assertEquals(29, sharePartition.nextFetchOffset()); + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, subsetRecords, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 2); + + assertArrayEquals(expectedAcquiredRecords(17, 18, 2).toArray(), acquiredRecordsList.toArray()); + // Next fetch offset should not move. + assertEquals(15, sharePartition.nextFetchOffset()); - // Only the last record of the acquired batch should be acquired again. + // Acquire partial records from record 4 to further test if the next fetch offset move + // accordingly once complete record 2 is also acquired. + subsetRecords = memoryRecords(1, 28); acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 500 /* Max fetch records */, - 29 /* Fetch Offset */, - fetchPartitionData(records, 10), - FETCH_ISOLATION_HWM), + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, subsetRecords, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), 1); - // Validate 1 record is acquired. - assertArrayEquals(expectedAcquiredRecord(29, 29, 2).toArray(), acquiredRecordsList.toArray()); - assertEquals(30, sharePartition.nextFetchOffset()); - } - - @Test - public void testNextFetchOffsetInitialState() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - assertEquals(0, sharePartition.nextFetchOffset()); - } - - @Test - public void testNextFetchOffsetWithCachedStateAcquired() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5), 2, 5); - assertEquals(5, sharePartition.nextFetchOffset()); - } - - @Test - public void testNextFetchOffsetWithFindAndCachedStateEmpty() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.updateFindNextFetchOffset(true); - assertTrue(sharePartition.findNextFetchOffset()); - assertEquals(0, sharePartition.nextFetchOffset()); - assertFalse(sharePartition.findNextFetchOffset()); - } - - @Test - public void testNextFetchOffsetWithFindAndCachedState() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.updateFindNextFetchOffset(true); - assertTrue(sharePartition.findNextFetchOffset()); - - fetchAcquiredRecords(sharePartition, memoryRecords(5), 5); + assertArrayEquals(expectedAcquiredRecords(28, 28, 2).toArray(), acquiredRecordsList.toArray()); + // Next fetch offset should not move. + assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(5, sharePartition.nextFetchOffset()); - assertFalse(sharePartition.findNextFetchOffset()); - } + // Try to acquire complete record 2 though it's already partially acquired, the next fetch + // offset should move. + acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( + MEMBER_ID, + MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)), + 3); - @Test - public void testCanAcquireRecordsWithEmptyCache() { - SharePartition sharePartition = SharePartitionBuilder.builder().withMaxInflightRecords(1).build(); - assertTrue(sharePartition.canAcquireRecords()); + // Offset 15,16 and 19 should be acquired. + List expectedAcquiredRecords = expectedAcquiredRecords(15, 16, 2); + expectedAcquiredRecords.addAll(expectedAcquiredRecords(19, 19, 2)); + assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); + // Next fetch offset should not move. + assertEquals(29, sharePartition.nextFetchOffset()); } @Test - public void testCanAcquireRecordsWithCachedDataAndLimitNotReached() { + public void testAcquisitionLockForAcquiringSingleRecord() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightRecords(6) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5), 5); - // Limit not reached as only 6 in-flight records is the limit. - assertTrue(sharePartition.canAcquireRecords()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(1), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); + + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.nextFetchOffset() == 0 && + sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 && + sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null && + sharePartition.timer().size() == 0, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); } @Test - public void testCanAcquireRecordsWithCachedDataAndLimitReached() { + public void testAcquisitionLockForAcquiringMultipleRecords() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightRecords(1) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5), 5); - // Limit reached as only one in-flight record is the limit. - assertFalse(sharePartition.canAcquireRecords()); - } + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - @Test - public void testMaybeAcquireAndReleaseFetchLock() { - ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); + assertEquals(1, sharePartition.timer().size()); + assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - FileRecords.TimestampAndOffset timestampAndOffset = new FileRecords.TimestampAndOffset(-1L, 0L, Optional.empty()); - Mockito.doReturn(new OffsetResultHolder(Optional.of(timestampAndOffset), Optional.empty())). - when(replicaManager).fetchOffsetForTimestamp(Mockito.any(TopicPartition.class), Mockito.anyLong(), Mockito.any(), Mockito.any(), Mockito.anyBoolean()); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 + && sharePartition.nextFetchOffset() == 10 + && sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE + && sharePartition.cachedState().get(10L).batchDeliveryCount() == 1 + && sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); + } - Time time = mock(Time.class); - when(time.hiResClockMs()) - .thenReturn(100L) // for tracking loadTimeMs - .thenReturn(110L) // for time when lock is acquired - .thenReturn(120L) // for time when lock is released - .thenReturn(140L) // for subsequent lock acquire - .thenReturn(170L); // for subsequent lock release + @Test + public void testAcquisitionLockForAcquiringMultipleRecordsWithOverlapAndNewBatch() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withReplicaManager(replicaManager) - .withTime(time) - .withSharePartitionMetrics(sharePartitionMetrics) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withState(SharePartitionState.ACTIVE) .build(); - Uuid fetchId = Uuid.randomUuid(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.maybeInitialize(); - assertTrue(sharePartition.maybeAcquireFetchLock(fetchId)); - // Lock cannot be acquired again, as already acquired. - assertFalse(sharePartition.maybeAcquireFetchLock(fetchId)); - // Release the lock. - sharePartition.releaseFetchLock(fetchId); + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - assertEquals(1, sharePartitionMetrics.fetchLockTimeMs().count()); - assertEquals(10, sharePartitionMetrics.fetchLockTimeMs().sum()); - assertEquals(1, sharePartitionMetrics.fetchLockRatio().count()); - // Since first request didn't have any lock idle wait time, the ratio should be 1. - assertEquals(100, sharePartitionMetrics.fetchLockRatio().mean()); + // Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(10, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Lock can be acquired again. - assertTrue(sharePartition.maybeAcquireFetchLock(fetchId)); - // Release lock to update metrics and verify. - sharePartition.releaseFetchLock(fetchId); + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(2, sharePartition.timer().size()); - assertEquals(2, sharePartitionMetrics.fetchLockTimeMs().count()); - assertEquals(40, sharePartitionMetrics.fetchLockTimeMs().sum()); - assertEquals(2, sharePartitionMetrics.fetchLockRatio().count()); - // Since the second request had 20ms of idle wait time, the ratio should be 0.6 and mean as 0.8. - assertEquals(80, sharePartitionMetrics.fetchLockRatio().mean()); + // Allowing acquisition lock to expire. The acquisition lock timeout will cause release of records for all the acquired records. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 0 && + sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null && + sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); } @Test - public void testRecordFetchLockRatioMetric() { - Time time = mock(Time.class); + public void testAcquisitionLockForAcquiringSameBatchAgain() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) - .withTime(time) - .withSharePartitionMetrics(sharePartitionMetrics) .build(); - // Acquired time and last lock acquisition time is 0; - sharePartition.recordFetchLockRatioMetric(0); - assertEquals(1, sharePartitionMetrics.fetchLockRatio().count()); - assertEquals(100, sharePartitionMetrics.fetchLockRatio().mean()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - when(time.hiResClockMs()) - .thenReturn(10L) // for time when lock is acquired - .thenReturn(80L) // for time when lock is released - .thenReturn(160L); // to update lock idle duration while acquiring lock again. + assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - Uuid fetchId = Uuid.randomUuid(); - assertTrue(sharePartition.maybeAcquireFetchLock(fetchId)); - sharePartition.releaseFetchLock(fetchId); - // Acquired time is 70 but last lock acquisition time was still 0, as it's the first request - // when last acquisition time was recorded. The last acquisition time should be updated to 80. - assertEquals(2, sharePartitionMetrics.fetchLockRatio().count()); - assertEquals(100, sharePartitionMetrics.fetchLockRatio().mean()); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 10 && + sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - assertTrue(sharePartition.maybeAcquireFetchLock(fetchId)); - // Update metric again with 0 as acquire time and 80 as idle duration ms. - sharePartition.recordFetchLockRatioMetric(0); - assertEquals(3, sharePartitionMetrics.fetchLockRatio().count()); - // Mean should be (100+100+1)/3 = 67, as when idle duration is 80, the ratio should be 1. - assertEquals(67, sharePartitionMetrics.fetchLockRatio().mean()); + // Acquire the same batch again. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Update metric again with 10 as acquire time and 80 as idle duration ms. - sharePartition.recordFetchLockRatioMetric(10); - assertEquals(4, sharePartitionMetrics.fetchLockRatio().count()); - // Mean should be (100+100+1+11)/4 = 53, as when idle time is 80 and acquire time 10, the ratio should be 11. - assertEquals(53, sharePartitionMetrics.fetchLockRatio().mean()); + // Acquisition lock timeout task should be created on re-acquire action. + assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); } @Test - public void testAcknowledgeSingleRecordBatch() { - ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withReplicaManager(replicaManager) - .withState(SharePartitionState.ACTIVE) - .build(); + public void testAcquisitionLockOnAcknowledgingSingleRecordBatch() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(1, 0); - MemoryRecords records2 = memoryRecords(1, 1); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(1, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Another batch is acquired because if there is only 1 batch, and it is acknowledged, the batch will be removed from cachedState - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 1); - assertEquals(1, acquiredRecordsList.size()); + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 1); - assertEquals(1, acquiredRecordsList.size()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(0, 0, Collections.singletonList((byte) 2)))); - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(1, 1, List.of((byte) 1)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); + assertNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertEquals(0, sharePartition.timer().size()); - assertEquals(2, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(1L).batchState()); - assertEquals(1, sharePartition.cachedState().get(1L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(1L).offsetState()); - // Should not invoke completeDelayedShareFetchRequest as the first offset is not acknowledged yet. - Mockito.verify(replicaManager, Mockito.times(0)) - .completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(GROUP_ID, TOPIC_ID_PARTITION)); + assertEquals(0, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(0L).offsetState()); + + // Allowing acquisition lock to expire. This will not cause any change to cached state map since the batch is already acknowledged. + // Hence, the acquisition lock timeout task would be cancelled already. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 0 && + sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 && + sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); } @Test - public void testAcknowledgeMultipleRecordBatch() { - ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withReplicaManager(replicaManager) - .withState(SharePartitionState.ACTIVE) - .build(); - MemoryRecords records = memoryRecords(10, 5); + public void testAcquisitionLockOnAcknowledgingMultipleRecordBatch() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 10); - assertEquals(1, acquiredRecordsList.size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 14, List.of((byte) 1)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); + assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(0, sharePartition.cachedState().size()); - // Should invoke completeDelayedShareFetchRequest as the start offset is moved. - Mockito.verify(replicaManager, Mockito.times(1)) - .completeDelayedShareFetchRequest(new DelayedShareFetchGroupKey(GROUP_ID, TOPIC_ID_PARTITION)); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(5, 14, Collections.singletonList((byte) 2)))); + + assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(0, sharePartition.timer().size()); + + // Allowing acquisition lock to expire. This will not cause any change to cached state map since the batch is already acknowledged. + // Hence, the acquisition lock timeout task would be cancelled already. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 5 && + sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(5L).batchDeliveryCount() == 1 && + sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); } @Test - public void testAcknowledgeMultipleRecordBatchWithGapOffsets() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + public void testAcquisitionLockOnAcknowledgingMultipleRecordBatchWithGapOffsets() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withState(SharePartitionState.ACTIVE) + .build(); MemoryRecords records1 = memoryRecords(2, 5); // Untracked gap of 3 offsets from 7-9. MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(5, 10); // Gap from 15-17 offsets. recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); MemoryRecords records2 = recordsBuilder.build(); + MemoryRecords records3 = memoryRecords(2, 1); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 2); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(7, sharePartition.nextFetchOffset()); + assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 9); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(19, sharePartition.nextFetchOffset()); + assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(2, sharePartition.timer().size()); - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of( - new ShareAcknowledgementBatch(5, 6, List.of((byte) 2)), - new ShareAcknowledgementBatch(10, 18, List.of( - (byte) 2, (byte) 2, (byte) 2, - (byte) 2, (byte) 2, (byte) 0, - (byte) 0, (byte) 0, (byte) 1 - )))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertEquals(5, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); - assertNull(sharePartition.cachedState().get(5L).offsetState()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState()); + assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + assertEquals(3, sharePartition.timer().size()); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + sharePartition.acknowledge(MEMBER_ID, + // Do not send gap offsets to verify that they are ignored and accepted as per client ack. + Collections.singletonList(new ShareAcknowledgementBatch(5, 18, Collections.singletonList((byte) 1)))); + + assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); + + // Allowing acquisition lock to expire. The acquisition lock timeout will cause release of records for batch with starting offset 1. + // Since, other records have been acknowledged. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 1 && + sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask() == null && + sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null && + sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); + + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(1L).batchState()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(10L).batchState()); } @Test - public void testAcknowledgeMultipleSubsetRecordBatchWithGapOffsets() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + public void testAcquisitionLockForAcquiringSubsetBatchAgain() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withState(SharePartitionState.ACTIVE) + .build(); + + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(8, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); + + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 10 && + sharePartition.cachedState().size() == 1 && + sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); + + // Acquire subset of records again. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 3, memoryRecords(3, 12), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + // Acquisition lock timeout task should be created only on offsets which have been acquired again. + assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); + assertEquals(3, sharePartition.timer().size()); + + // Allowing acquisition lock to expire for the acquired subset batch. + TestUtils.waitForCondition( + () -> { + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + + return sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 10 && + expectedOffsetStateMap.equals(sharePartition.cachedState().get(10L).offsetState()); + }, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); + } + + @Test + public void testAcquisitionLockOnAcknowledgingMultipleSubsetRecordBatchWithGapOffsets() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withState(SharePartitionState.ACTIVE) + .build(); MemoryRecords records1 = memoryRecords(2, 5); // Untracked gap of 3 offsets from 7-9. MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); @@ -2600,6186 +2192,3415 @@ public void testAcknowledgeMultipleSubsetRecordBatchWithGapOffsets() { recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); MemoryRecords records2 = recordsBuilder.build(); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 2); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(7, sharePartition.nextFetchOffset()); + assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 11); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(21, sharePartition.nextFetchOffset()); + assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + assertEquals(2, sharePartition.timer().size()); // Acknowledging over subset of both batch with subset of gap offsets. - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(6, 18, List.of( - (byte) 1, (byte) 1, (byte) 1, - (byte) 1, (byte) 1, (byte) 1, - (byte) 0, (byte) 0, (byte) 1, - (byte) 0, (byte) 1, (byte) 0, - (byte) 1)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); - - assertEquals(21, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(5L).batchState()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch( + 6, 18, Arrays.asList( + (byte) 1, (byte) 1, (byte) 1, + (byte) 1, (byte) 1, (byte) 1, + (byte) 0, (byte) 0, (byte) 1, + (byte) 0, (byte) 1, (byte) 0, + (byte) 1)))); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); + assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - } + assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask()); + assertEquals(3, sharePartition.timer().size()); - @Test - public void testAcknowledgeOutOfRangeCachedData() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Acknowledge a batch when cache is empty. - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(0, 15, List.of((byte) 3)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); - - MemoryRecords records = memoryRecords(5, 5); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5); - - assertEquals(1, acquiredRecordsList.size()); - // Cached data with offset 5-9 should exist. - assertEquals(1, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(5L)); - - ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(20, 25, List.of((byte) 3)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRequestException.class, ackResult); - } - - @Test - public void testAcknowledgeOutOfRangeCachedDataFirstBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - // Create data for the batch with offsets 0-4. - MemoryRecords records = memoryRecords(5, 0); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5); - - assertEquals(1, acquiredRecordsList.size()); - - // Create data for the batch with offsets 20-24. - records = memoryRecords(5, 20); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5); - - assertEquals(1, acquiredRecordsList.size()); - - // Acknowledge a batch when first batch violates the range. - List acknowledgeBatches = List.of( - new ShareAcknowledgementBatch(0, 10, List.of((byte) 1)), - new ShareAcknowledgementBatch(20, 24, List.of((byte) 1))); - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, acknowledgeBatches); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRequestException.class, ackResult); - - // Create data for the batch with offsets 5-10. - records = memoryRecords(6, 5); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 6); - - assertEquals(1, acquiredRecordsList.size()); - - // Previous failed acknowledge request should succeed now. - ackResult = sharePartition.acknowledge( - MEMBER_ID, acknowledgeBatches); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); - } - - @Test - public void testAcknowledgeWithAnotherMember() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records = memoryRecords(5, 5); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5); - - assertEquals(1, acquiredRecordsList.size()); - // Cached data with offset 5-9 should exist. - assertEquals(1, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(5L)); - - CompletableFuture ackResult = sharePartition.acknowledge( - "member-2", - List.of(new ShareAcknowledgementBatch(5, 9, List.of((byte) 3)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); - } - - @Test - public void testAcknowledgeWhenOffsetNotAcquired() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records = memoryRecords(5, 5); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5); - - assertEquals(1, acquiredRecordsList.size()); - // Cached data with offset 5-9 should exist. - assertEquals(1, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(5L)); - - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 9, List.of((byte) 2)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); - - // Acknowledge the same batch again but with ACCEPT type. - ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 9, List.of((byte) 1)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); - - // Re-acquire the same batch and then acknowledge subset with ACCEPT type. - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5); - - assertEquals(1, acquiredRecordsList.size()); - - ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(6, 8, List.of((byte) 3)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); - - // Re-acknowledge the subset batch with REJECT type. - ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(6, 8, List.of((byte) 3)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); - } - - @Test - public void testAcknowledgeRollbackWithFullBatchError() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(5, 5); - MemoryRecords records2 = memoryRecords(5, 10); - MemoryRecords records3 = memoryRecords(5, 15); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 5); - - assertEquals(1, acquiredRecordsList.size()); - - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 5); - - assertEquals(1, acquiredRecordsList.size()); - - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records3, 5); - - assertEquals(1, acquiredRecordsList.size()); - // Cached data with offset 5-19 should exist. - assertEquals(3, sharePartition.cachedState().size()); - - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of( - new ShareAcknowledgementBatch(5, 9, List.of((byte) 2)), - new ShareAcknowledgementBatch(10, 14, List.of((byte) 1)), - new ShareAcknowledgementBatch(15, 19, List.of((byte) 1)), - // Add another batch which should fail the request. - new ShareAcknowledgementBatch(15, 19, List.of((byte) 1)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); - - // Check the state of the cache. The state should be acquired itself. - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - } - - @Test - public void testAcknowledgeRollbackWithSubsetError() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(5, 5); - MemoryRecords records2 = memoryRecords(5, 10); - MemoryRecords records3 = memoryRecords(5, 15); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 5); - - assertEquals(1, acquiredRecordsList.size()); - - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 5); - - assertEquals(1, acquiredRecordsList.size()); - - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records3, 5); - - assertEquals(1, acquiredRecordsList.size()); - // Cached data with offset 5-19 should exist. - assertEquals(3, sharePartition.cachedState().size()); - - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of( - new ShareAcknowledgementBatch(5, 9, List.of((byte) 2)), - new ShareAcknowledgementBatch(10, 14, List.of((byte) 1)), - new ShareAcknowledgementBatch(15, 19, List.of((byte) 1)), - // Add another batch which should fail the request. - new ShareAcknowledgementBatch(16, 19, List.of((byte) 1)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); - - // Check the state of the cache. The state should be acquired itself. - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - // Though the last batch is subset but the offset state map will not be exploded as the batch is - // not in acquired state due to previous batch acknowledgement. - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - } - - @Test - public void testAcquireReleasedRecord() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records = memoryRecords(5, 10); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5); - - assertArrayEquals(expectedAcquiredRecords(records, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(15, sharePartition.nextFetchOffset()); - - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(12, 13, List.of((byte) 2)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); - - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); - - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - - // Send the same fetch request batch again but only 2 offsets should come as acquired. - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 2); - - assertArrayEquals(expectedAcquiredRecords(12, 13, 2).toArray(), acquiredRecordsList.toArray()); - assertEquals(15, sharePartition.nextFetchOffset()); - } - - @Test - public void testAcquireReleasedRecordMultipleBatches() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // First fetch request with 5 records starting from offset 10. - MemoryRecords records1 = memoryRecords(5, 10); - // Second fetch request with 5 records starting from offset 15. - MemoryRecords records2 = memoryRecords(5, 15); - // Third fetch request with 5 records starting from offset 23, gap of 3 offsets. - MemoryRecords records3 = memoryRecords(5, 23); - // Fourth fetch request with 5 records starting from offset 28. - MemoryRecords records4 = memoryRecords(5, 28); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 5); - - assertArrayEquals(expectedAcquiredRecords(records1, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(15, sharePartition.nextFetchOffset()); - - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 5); - - assertArrayEquals(expectedAcquiredRecords(records2, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(20, sharePartition.nextFetchOffset()); - - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records3, 5); - - assertArrayEquals(expectedAcquiredRecords(records3, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(28, sharePartition.nextFetchOffset()); - - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records4, 5); - - assertArrayEquals(expectedAcquiredRecords(records4, 1).toArray(), acquiredRecordsList.toArray()); - assertEquals(33, sharePartition.nextFetchOffset()); - - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(23L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(28L).batchState()); - assertNull(sharePartition.cachedState().get(10L).offsetState()); - assertNull(sharePartition.cachedState().get(15L).offsetState()); - assertNull(sharePartition.cachedState().get(23L).offsetState()); - assertNull(sharePartition.cachedState().get(28L).offsetState()); - - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(12, 30, List.of((byte) 2)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); - - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.cachedState().size()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(15L).batchState()); - assertNull(sharePartition.cachedState().get(15L).offsetState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(23L).batchState()); - assertNull(sharePartition.cachedState().get(23L).offsetState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(23L).batchMemberId()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(28L).batchState()); - assertNotNull(sharePartition.cachedState().get(28L).offsetState()); - - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - - expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(28L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(29L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(30L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(31L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(32L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(28L).offsetState()); - - // Send next batch from offset 12, only 3 records should be acquired. - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records1, 3); - - assertArrayEquals(expectedAcquiredRecords(12, 14, 2).toArray(), acquiredRecordsList.toArray()); - assertEquals(15, sharePartition.nextFetchOffset()); - - // Though record2 batch exists to acquire but send batch record3, it should be acquired but - // next fetch offset should not move. - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records3, 5); - - assertArrayEquals(expectedAcquiredRecords(records3, 2).toArray(), acquiredRecordsList.toArray()); - assertEquals(15, sharePartition.nextFetchOffset()); - - // Acquire partial records from batch 2. - MemoryRecords subsetRecords = memoryRecords(2, 17); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, subsetRecords, 2); - - assertArrayEquals(expectedAcquiredRecords(17, 18, 2).toArray(), acquiredRecordsList.toArray()); - // Next fetch offset should not move. - assertEquals(15, sharePartition.nextFetchOffset()); - - // Acquire partial records from record 4 to further test if the next fetch offset move - // accordingly once complete record 2 is also acquired. - subsetRecords = memoryRecords(1, 28); - acquiredRecordsList = fetchAcquiredRecords(sharePartition, subsetRecords, 1); - - assertArrayEquals(expectedAcquiredRecords(28, 28, 2).toArray(), acquiredRecordsList.toArray()); - // Next fetch offset should not move. - assertEquals(15, sharePartition.nextFetchOffset()); - - // Try to acquire complete record 2 though it's already partially acquired, the next fetch - // offset should move. - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records2, 3); - - // Offset 15,16 and 19 should be acquired. - List expectedAcquiredRecords = expectedAcquiredRecords(15, 16, 2); - expectedAcquiredRecords.addAll(expectedAcquiredRecords(19, 19, 2)); - assertArrayEquals(expectedAcquiredRecords.toArray(), acquiredRecordsList.toArray()); - // Next fetch offset should not move. - assertEquals(29, sharePartition.nextFetchOffset()); - } - - @Test - public void testAcquireGapAtBeginningAndRecordsFetchedFromGap() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // All records fetched are part of the gap. The gap is from 11 to 20, fetched offsets are 11 to 15. - MemoryRecords records = memoryRecords(5, 11); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 5); - - assertArrayEquals(expectedAcquiredRecord(11, 15, 1).toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(16, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - // After records are acquired, the persisterReadResultGapWindow should be updated - assertEquals(16, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireGapAtBeginningAndFetchedRecordsOverlapInFlightBatches() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Fetched offsets overlap the inFlight batches. The gap is from 11 to 20, but fetched records are from 11 to 25. - MemoryRecords records = memoryRecords(15, 11); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 10); - - assertArrayEquals(expectedAcquiredRecord(11, 20, 1).toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(41, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - // After records are acquired, the persisterReadResultGapWindow should be updated - assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireGapAtBeginningAndFetchedRecordsOverlapInFlightAvailableBatches() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Fetched offsets overlap the inFlight batches. The gap is from 11 to 20, but fetched records are from 11 to 25. - MemoryRecords records = memoryRecords(15, 11); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 15); - - // The gap from 11 to 20 will be acquired. Since the next batch is AVAILABLE, and we records fetched from replica manager - // overlap with the next batch, some records from the next batch will also be acquired - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 21, 3)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(22, 22, 3)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(23, 23, 3)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(24, 24, 3)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(25, 25, 3)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(26, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - // After records are acquired, the persisterReadResultGapWindow should be updated - assertEquals(26, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireWhenCachedStateContainsGapsAndRecordsFetchedFromNonGapOffset() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21-30 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Fetched records are part of inFlightBatch 11-20 with state AVAILABLE. Fetched offsets also overlap the - // inFlight batches. The gap is from 11 to 20, but fetched records are from 11 to 25. - MemoryRecords records = memoryRecords(15, 11); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 15); - - // 2 different batches will be acquired this time (11-20 and 21-25). The first batch will have delivery count 3 - // as previous deliveryCount was 2. The second batch will have delivery count 1 as it is acquired for the first time. - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 3)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 25, 1)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(26, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - // After records are acquired, the persisterReadResultGapWindow should be updated - assertEquals(26, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireGapAtBeginningAndFetchedRecordsOverlapMultipleInFlightBatches() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(41L, 50L, RecordState.AVAILABLE.id, (short) 1), // There is a gap from 31 to 40 - new PersisterStateBatch(61L, 70L, RecordState.ARCHIVED.id, (short) 1), // There is a gap from 51 to 60 - new PersisterStateBatch(81L, 90L, RecordState.AVAILABLE.id, (short) 1) // There is a gap from 71 to 80 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - MemoryRecords records = memoryRecords(75, 11); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 55); - - // Acquired batches will contain the following -> - // 1. 11-20 (gap offsets) - // 2. 31-40 (gap offsets) - // 3. 41-50 (AVAILABLE batch in cachedState) - // 4. 51-60 (gap offsets) - // 5. 71-80 (gap offsets) - // 6. 81-85 (AVAILABLE batch in cachedState). These will be acquired as separate batches because we are breaking a batch in the cachedState - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(41, 50, 2)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(71, 80, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(81, 81, 2)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(82, 82, 2)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(83, 83, 2)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(84, 84, 2)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(85, 85, 2)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(90, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(86, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - // After records are acquired, the persisterReadResultGapWindow should be updated - assertEquals(86, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(90, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireGapAtBeginningAndFetchedRecordsEndJustBeforeGap() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(41L, 50L, RecordState.ACKNOWLEDGED.id, (short) 1), // There is a gap from 31 to 40 - new PersisterStateBatch(61L, 70L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 51 to 60 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - MemoryRecords records = memoryRecords(20, 11); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 20); - - // Acquired batches will contain the following -> - // 1. 11-20 (gap offsets) - // 2. 21-30 (AVAILABLE batch in cachedState) - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 30, 3)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(70, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(31, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - // After records are acquired, the persisterReadResultGapWindow should be updated - assertEquals(31, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(70, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireGapAtBeginningAndFetchedRecordsIncludeGapOffsetsAtEnd() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(41L, 50L, RecordState.AVAILABLE.id, (short) 1), // There is a gap from 31 to 40 - new PersisterStateBatch(61L, 70L, RecordState.ARCHIVED.id, (short) 1), // There is a gap from 51 to 60 - new PersisterStateBatch(81L, 90L, RecordState.AVAILABLE.id, (short) 1) // There is a gap from 71 to 80 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - MemoryRecords records = memoryRecords(65, 11); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 45); - - // Acquired batches will contain the following -> - // 1. 11-20 (gap offsets) - // 2. 31-40 (gap offsets) - // 3. 41-50 (AVAILABLE batch in cachedState) - // 4. 51-60 (gap offsets) - // 5. 71-75 (gap offsets). The gap is from 71 to 80, but the fetched records end at 75. These gap offsets will be acquired as a single batch - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(41, 50, 2)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(71, 75, 1)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(90, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(76, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - // After records are acquired, the persisterReadResultGapWindow should be updated - assertEquals(76, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(90, persisterReadResultGapWindow.endOffset()); - } - - - @Test - public void testAcquireWhenRecordsFetchedFromGapAndMaxFetchRecordsIsExceeded() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(11L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21-30 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Creating 3 batches of records with a total of 8 records - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 3, 21).close(); - memoryRecordsBuilder(buffer, 3, 24).close(); - memoryRecordsBuilder(buffer, 2, 27).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 6, // maxFetchRecords is less than the number of records fetched - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 6); - - // Since max fetch records (6) is less than the number of records fetched (8), only 6 records will be acquired - assertArrayEquals(expectedAcquiredRecord(21, 26, 1).toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(21, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(27, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - assertEquals(27, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireMaxFetchRecordsExceededAfterAcquiringGaps() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.AVAILABLE.id, (short) 2), // There is a gap from 11-20 - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Creating 3 batches of records with a total of 8 records - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 10, 11).close(); - memoryRecordsBuilder(buffer, 10, 21).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 8, // maxFetchRecords is less than the number of records fetched - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 10); - - assertArrayEquals(expectedAcquiredRecord(11, 20, 1).toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(21, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireMaxFetchRecordsExceededBeforeAcquiringGaps() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(31L, 40L, RecordState.AVAILABLE.id, (short) 1) // There is a gap from 21-30 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Creating 3 batches of records with a total of 8 records - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 10, 11).close(); - memoryRecordsBuilder(buffer, 20, 21).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - 8, // maxFetchRecords is less than the number of records fetched - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records), - FETCH_ISOLATION_HWM), - 10); - - assertArrayEquals(expectedAcquiredRecord(11, 20, 3).toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(21, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); - } - - @Test - public void testAcquireWhenRecordsFetchedFromGapAndPartitionContainsNaturalGaps() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 10L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(15L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 10 to 14 - new PersisterStateBatch(30L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21-29 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 11, 10).close(); - memoryRecordsBuilder(buffer, 21, 30).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 24); - - // Acquired batches will contain the following -> - // 1. 10-14 (gap offsets) - // 2. 21-29 (gap offsets) - // 3. 41-50 (gap offsets) - // The offsets fetched from partition include a natural gap from 21 to 29. The cached state also contain the - // gap from 21 to 29. But since the broker does not parse the fetched records, the broker is not aware of this - // natural gap. In this case, the gap will be acquired, and it is the client's responsibility to inform the - // broker about this gap. - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(10, 14, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(21, 29, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(41, 50, 1)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(50, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(51, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNull(persisterReadResultGapWindow); - } - - @Test - public void testAcquireCachedStateInitialGapMatchesWithActualPartitionGap() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(41L, 50L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 31-40 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Creating 2 batches starting from 21, such that there is a natural gap from 11 to 20 - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 15, 21).close(); - memoryRecordsBuilder(buffer, 25, 36).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 20); - - // Acquired batches will contain the following -> - // 1. 31-40 (gap offsets) - // 2. 51-60 (new offsets) - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(31, 40, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(60, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(61, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNull(persisterReadResultGapWindow); - } - - @Test - public void testAcquireCachedStateInitialGapOverlapsWithActualPartitionGap() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(41L, 50L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 31-40 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Creating 2 batches starting from 16, such that there is a natural gap from 11 to 15 - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 20, 16).close(); - memoryRecordsBuilder(buffer, 25, 36).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 25); - - // Acquired batches will contain the following -> - // 1. 16-20 (gap offsets) - // 1. 31-40 (gap offsets) - // 2. 51-60 (new offsets) - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(16, 20, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(16, sharePartition.startOffset()); - assertEquals(60, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(61, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNull(persisterReadResultGapWindow); - } - - @Test - public void testAcquireCachedStateGapInBetweenOverlapsWithActualPartitionGap() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(41L, 50L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 31-40 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Creating 3 batches starting from 11, such that there is a natural gap from 26 to 30 - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 10, 11).close(); - memoryRecordsBuilder(buffer, 15, 21).close(); - memoryRecordsBuilder(buffer, 20, 41).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 30); - - // Acquired batches will contain the following -> - // 1. 11-20 (gap offsets) - // 1. 31-40 (gap offsets) - // 2. 51-60 (new offsets) - // The entire gap of 31 to 40 will be acquired even when the fetched records only contain offsets 31 to 36 because - // we rely on the client to inform the broker about these natural gaps in the partition log - List expectedAcquiredRecord = new ArrayList<>(expectedAcquiredRecord(11, 20, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(31, 40, 1)); - expectedAcquiredRecord.addAll(expectedAcquiredRecord(51, 60, 1)); - assertArrayEquals(expectedAcquiredRecord.toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(60, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(61, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNull(persisterReadResultGapWindow); - } - - @Test - public void testAcquireWhenRecordsFetchedAfterGapsAreFetched() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(11L, 20L, RecordState.ACKNOWLEDGED.id, (short) 2), - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21 to 30 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - // Fetched records are from 21 to 35 - MemoryRecords records = memoryRecords(15, 21); - List acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 10); - - // Since the gap if only from 21 to 30 and the next batch is ARCHIVED, only 10 gap offsets will be acquired as a single batch - assertArrayEquals(expectedAcquiredRecord(21, 30, 1).toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(21, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(41, sharePartition.nextFetchOffset()); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); - - assertEquals(31, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); - - // Fetching from the nextFetchOffset so that endOffset moves ahead - records = memoryRecords(15, 41); - - acquiredRecordsList = fetchAcquiredRecords(sharePartition, records, 15); - - assertArrayEquals(expectedAcquiredRecord(41, 55, 1).toArray(), acquiredRecordsList.toArray()); - - assertEquals(SharePartitionState.ACTIVE, sharePartition.partitionState()); - assertFalse(sharePartition.cachedState().isEmpty()); - assertEquals(21, sharePartition.startOffset()); - assertEquals(55, sharePartition.endOffset()); - assertEquals(3, sharePartition.stateEpoch()); - assertEquals(56, sharePartition.nextFetchOffset()); - - // Since the endOffset is now moved ahead, the persisterReadResultGapWindow should be empty - persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNull(persisterReadResultGapWindow); - } - - @Test - public void testAcquisitionLockForAcquiringSingleRecord() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(1), 1); - - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.nextFetchOffset() == 0 && - sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 && - sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null && - sharePartition.timer().size() == 0, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(0L, List.of()))); - - assertEquals(1, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count()); - assertTrue(sharePartitionMetrics.acquisitionLockTimeoutPerSec().meanRate() > 0); - } - - @Test - public void testAcquisitionLockForAcquiringMultipleRecords() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); - - assertEquals(1, sharePartition.timer().size()); - assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 - && sharePartition.nextFetchOffset() == 10 - && sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE - && sharePartition.cachedState().get(10L).batchDeliveryCount() == 1 - && sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(10L, List.of()))); - - assertEquals(5, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count()); - assertTrue(sharePartitionMetrics.acquisitionLockTimeoutPerSec().meanRate() > 0); - } - - @Test - public void testAcquisitionLockForAcquiringMultipleRecordsWithOverlapAndNewBatch() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .withSharePartitionMetrics(sharePartitionMetrics) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 0), 5); - - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Add records from 0-9 offsets, 5-9 should be acquired and 0-4 should be ignored. - fetchAcquiredRecords(sharePartition, memoryRecords(10, 0), 5); - - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(2, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. The acquisition lock timeout will cause release of records for all the acquired records. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 0 && - sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null && - sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(0L, List.of(), 5L, List.of()))); - - assertEquals(10, sharePartitionMetrics.acquisitionLockTimeoutPerSec().count()); - assertTrue(sharePartitionMetrics.acquisitionLockTimeoutPerSec().meanRate() > 0); - } - - @Test - public void testAcquisitionLockForAcquiringSameBatchAgain() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); - - assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 10 && - sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(10L, List.of()))); - - // Acquire the same batch again. - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); - - // Acquisition lock timeout task should be created on re-acquire action. - assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - } - - @Test - public void testAcquisitionLockOnAcknowledgingSingleRecordBatch() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(1, 0), 1); - - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 0, List.of((byte) 2)))); - - assertNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertEquals(0, sharePartition.timer().size()); - - assertEquals(0, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(0L).offsetState()); - - // Allowing acquisition lock to expire. This will not cause any change to cached state map since the batch is already acknowledged. - // Hence, the acquisition lock timeout task would be cancelled already. - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 0 && - sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 && - sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(0L, List.of()))); - } - - @Test - public void testAcquisitionLockOnAcknowledgingMultipleRecordBatch() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 10); - - assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 14, List.of((byte) 2)))); - - assertEquals(5, sharePartition.nextFetchOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(0, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. This will not cause any change to cached state map since the batch is already acknowledged. - // Hence, the acquisition lock timeout task would be cancelled already. - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 5 && - sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(5L).batchDeliveryCount() == 1 && - sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of()))); - } - - @Test - public void testAcquisitionLockOnAcknowledgingMultipleRecordBatchWithGapOffsets() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - MemoryRecords records1 = memoryRecords(2, 5); - // Untracked gap of 3 offsets from 7-9. - MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(5, 10); - // Gap from 15-17 offsets. - recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - MemoryRecords records2 = recordsBuilder.build(); - MemoryRecords records3 = memoryRecords(2, 1); - - fetchAcquiredRecords(sharePartition, records3, 2); - - assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - fetchAcquiredRecords(sharePartition, records1, 2); - - assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(2, sharePartition.timer().size()); - - fetchAcquiredRecords(sharePartition, records2, 9); - - assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - assertEquals(3, sharePartition.timer().size()); - - sharePartition.acknowledge(MEMBER_ID, - // Do not send gap offsets to verify that they are ignored and accepted as per client ack. - List.of(new ShareAcknowledgementBatch(5, 18, List.of((byte) 1)))); - - assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. The acquisition lock timeout will cause release of records for batch with starting offset 1. - // Since, other records have been acknowledged. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 1 && - sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask() == null && - sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null && - sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(1L, List.of(), 5L, List.of(), 10L, List.of()))); - - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(1L).batchState()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(10L).batchState()); - } - - @Test - public void testAcquisitionLockForAcquiringSubsetBatchAgain() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(8, 10), 8); - - assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 10 && - sharePartition.cachedState().size() == 1 && - sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(10L, List.of()))); - - // Acquire subset of records again. - fetchAcquiredRecords(sharePartition, memoryRecords(3, 12), 3); - - // Acquisition lock timeout task should be created only on offsets which have been acquired again. - assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); - assertEquals(3, sharePartition.timer().size()); - - // Allowing acquisition lock to expire for the acquired subset batch. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> { - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - - return sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 10 && - expectedOffsetStateMap.equals(sharePartition.cachedState().get(10L).offsetState()); - }, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(10L, List.of(10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L)))); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); - } - - @Test - public void testAcquisitionLockOnAcknowledgingMultipleSubsetRecordBatchWithGapOffsets() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - MemoryRecords records1 = memoryRecords(2, 5); - // Untracked gap of 3 offsets from 7-9. - MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); - // Gap from 12-13 offsets. - recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap for 15 offset. - recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap from 17-19 offsets. - recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - MemoryRecords records2 = recordsBuilder.build(); - - fetchAcquiredRecords(sharePartition, records1, 2); - - assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - fetchAcquiredRecords(sharePartition, records2, 11); - assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - assertEquals(2, sharePartition.timer().size()); - - // Acknowledging over subset of both batch with subset of gap offsets. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch( - 6, 18, List.of( - (byte) 1, (byte) 1, (byte) 1, - (byte) 1, (byte) 1, (byte) 1, - (byte) 0, (byte) 0, (byte) 1, - (byte) 0, (byte) 1, (byte) 0, - (byte) 1)))); - - assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - - assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask()); - assertEquals(3, sharePartition.timer().size()); - - // Allowing acquisition lock to expire for the offsets that have not been acknowledged yet. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> { - Map expectedOffsetStateMap1 = new HashMap<>(); - expectedOffsetStateMap1.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap1.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - - Map expectedOffsetStateMap2 = new HashMap<>(); - expectedOffsetStateMap2.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - - return sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 5 && - expectedOffsetStateMap1.equals(sharePartition.cachedState().get(5L).offsetState()) && - expectedOffsetStateMap2.equals(sharePartition.cachedState().get(10L).offsetState()); - }, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L), 10L, List.of(10L, 11L, 12L, 13L, 14L, 15L, 16L, 17L, 18L, 19L, 20L)))); - - assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - - assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask()); - } - - @Test - public void testAcquisitionLockTimeoutCauseMaxDeliveryCountExceed() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records - .withState(SharePartitionState.ACTIVE) - .build(); - - // Adding memoryRecords(10, 0) in the sharePartition to make sure that SPSO doesn't move forward when delivery count of records2 - // exceed the max delivery count. - fetchAcquiredRecords(sharePartition, memoryRecords(10, 0), 10); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 10), 10); - - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - assertEquals(2, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 0 && - sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(10L).batchDeliveryCount() == 1 && - sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(10L, List.of()))); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 10), 10); - - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(2, sharePartition.cachedState().get(10L).batchDeliveryCount()); - assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire to archive the records that reach max delivery count. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 0 && - // After the second delivery attempt fails to acknowledge the record correctly, the record should be archived. - sharePartition.cachedState().get(10L).batchState() == RecordState.ARCHIVED && - sharePartition.cachedState().get(10L).batchDeliveryCount() == 2 && - sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(10L, List.of()))); - } - - @Test - public void testAcquisitionLockTimeoutCauseSPSOMoveForward() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 0), 10); - - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 0 && - sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 && - sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(0L, List.of()))); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 0), 5); - - assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(3L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(4L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(6L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(7L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(8L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(9L).acquisitionLockTimeoutTask()); - - // Allowing acquisition lock to expire to archive the records that reach max delivery count. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> { - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(0L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(1L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(2L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(3L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(4L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - - return sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 5 && - expectedOffsetStateMap.equals(sharePartition.cachedState().get(0L).offsetState()); - }, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(0L, List.of(0L, 1L, 2L, 3L, 4L, 5L, 6L, 7L, 8L, 9L)))); - - assertNull(sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(3L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(4L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(6L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(7L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(8L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(0L).offsetState().get(9L).acquisitionLockTimeoutTask()); - - // Since only first 5 records from the batch are archived, the batch remains in the cachedState, but the - // start offset is updated - assertEquals(5, sharePartition.startOffset()); - } - - @Test - public void testAcquisitionLockTimeoutCauseSPSOMoveForwardAndClearCachedState() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 0), 10); - - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 0 && - sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(0L, List.of()))); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 0), 10); - - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire to archive the records that reach max delivery count. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - // After the second failed attempt to acknowledge the record batch successfully, the record batch is archived. - // Since this is the first batch in the share partition, SPSO moves forward and the cachedState is cleared - sharePartition.cachedState().isEmpty() && - sharePartition.nextFetchOffset() == 10, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of())); - } - - @Test - public void testAcknowledgeAfterAcquisitionLockTimeout() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - - assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 5 && - sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of()))); - - // Acknowledge with ACCEPT type should throw InvalidRecordStateException since they've been released due to acquisition lock timeout. - CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 9, List.of((byte) 1)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); - assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(0, sharePartition.timer().size()); - - // Try acknowledging with REJECT type should throw InvalidRecordStateException since they've been released due to acquisition lock timeout. - ackResult = sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 9, List.of((byte) 3)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); - assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(0, sharePartition.timer().size()); - } - - @Test - public void testAcquisitionLockAfterDifferentAcknowledges() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - - assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Acknowledge with REJECT type. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 6, List.of((byte) 2)))); - - assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask()); - assertEquals(3, sharePartition.timer().size()); - - // Acknowledge with ACCEPT type. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(8, 9, List.of((byte) 1)))); - - assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - - // Allowing acquisition lock to expire will only affect the offsets that have not been acknowledged yet. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); + // Allowing acquisition lock to expire for the offsets that have not been acknowledged yet. TestUtils.waitForCondition( () -> { - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - - return sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 5 && - expectedOffsetStateMap.equals(sharePartition.cachedState().get(5L).offsetState()); - }, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L, 7L, 8L, 9L)))); - - assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask()); - } - - @Test - public void testAcquisitionLockOnBatchWithWriteShareGroupStateFailure() throws InterruptedException { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister) - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 10); - - assertEquals(1, sharePartition.timer().size()); - assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - - // Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.timer().size() == 0 && - sharePartition.nextFetchOffset() == 5 && - sharePartition.cachedState().size() == 1 && - sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of()))); - } - - @Test - public void testAcquisitionLockOnOffsetWithWriteShareGroupStateFailure() throws InterruptedException { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister) - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true for acknowledge to pass. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - - fetchAcquiredRecords(sharePartition, memoryRecords(6, 5), 6); - - assertEquals(1, sharePartition.timer().size()); - assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(8, 9, List.of((byte) 1)))); - - // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + Map expectedOffsetStateMap1 = new HashMap<>(); + expectedOffsetStateMap1.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap1.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - // Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> { - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - return sharePartition.timer().size() == 0 && sharePartition.cachedState().size() == 1 && - expectedOffsetStateMap.equals(sharePartition.cachedState().get(5L).offsetState()); + Map expectedOffsetStateMap2 = new HashMap<>(); + expectedOffsetStateMap2.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + + return sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 5 && + expectedOffsetStateMap1.equals(sharePartition.cachedState().get(5L).offsetState()) && + expectedOffsetStateMap2.equals(sharePartition.cachedState().get(10L).offsetState()); }, DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L, 7L, 8L, 9L, 10L)))); + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(10L).acquisitionLockTimeoutTask()); - } - - @Test - public void testReleaseSingleRecordBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(1, 0), 1); - - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - - assertEquals(0, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); - // Release delivery count. - assertEquals(0, sharePartition.cachedState().get(0L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(0L).offsetState()); - } - - @Test - public void testReleaseMultipleRecordBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 10); - - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - - assertEquals(5, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertEquals(0, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(5L).offsetState()); - } - - @Test - public void testReleaseMultipleAcknowledgedRecordBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records0 = memoryRecords(5, 0); - MemoryRecords records1 = memoryRecords(2, 5); - // Untracked gap of 3 offsets from 7-9. - MemoryRecords records2 = memoryRecords(9, 10); - - fetchAcquiredRecords(sharePartition, records0, 5); - fetchAcquiredRecords(sharePartition, records1, 2); - fetchAcquiredRecords(sharePartition, records2, 9); - - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 18, List.of((byte) 1)))); - - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - assertEquals(0, sharePartition.nextFetchOffset()); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(10L).batchState()); - assertNull(sharePartition.cachedState().get(5L).offsetState()); - assertNull(sharePartition.cachedState().get(10L).offsetState()); - } - - @Test - public void testReleaseAcknowledgedMultipleSubsetRecordBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(2, 5); - - // Untracked gap of 3 offsets from 7-9. - MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); - // Gap from 12-13 offsets. - recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap for 15 offset. - recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap from 17-19 offsets. - recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - MemoryRecords records2 = recordsBuilder.build(); - - fetchAcquiredRecords(sharePartition, records1, 2); - fetchAcquiredRecords(sharePartition, records2, 11); - - // Acknowledging over subset of both batch with subset of gap offsets. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(6, 18, List.of( - (byte) 1, (byte) 1, (byte) 1, - (byte) 1, (byte) 1, (byte) 1, - (byte) 0, (byte) 0, (byte) 1, - (byte) 0, (byte) 1, (byte) 0, - (byte) 1)))); - - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - - assertEquals(5, sharePartition.nextFetchOffset()); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); - - expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - } - - @Test - public void testReleaseAcquiredRecordsWithAnotherMember() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(1, 5); - // Untracked gap of 3 offsets from 7-9. - MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); - // Gap from 12-13 offsets. - recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap for 15 offset. - recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap from 17-19 offsets. - recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - MemoryRecords records2 = recordsBuilder.build(); - - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 5, fetchPartitionData(records1), FETCH_ISOLATION_HWM); - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(records2), FETCH_ISOLATION_HWM); - - // Acknowledging over subset of second batch with subset of gap offsets. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(10, 18, List.of( - (byte) 1, (byte) 1, (byte) 0, (byte) 0, - (byte) 1, (byte) 0, (byte) 1, (byte) 0, - (byte) 1)))); - - // Release acquired records for "member-1". - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - - assertEquals(19, sharePartition.nextFetchOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - - // Release acquired records for "member-2". - releaseResult = sharePartition.releaseAcquiredRecords("member-2"); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - - assertEquals(5, sharePartition.nextFetchOffset()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - // Check cached state. - expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - } - - @Test - public void testReleaseAcquiredRecordsWithAnotherMemberAndSubsetAcknowledged() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(2, 5); - // Untracked gap of 3 offsets from 7-9. - MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); - // Gap from 12-13 offsets. - recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap for 15 offset. - recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap from 17-19 offsets. - recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - MemoryRecords records2 = recordsBuilder.build(); - - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 5, fetchPartitionData(records1), FETCH_ISOLATION_HWM); - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(records2), FETCH_ISOLATION_HWM); - - // Acknowledging over subset of second batch with subset of gap offsets. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(10, 18, List.of( - (byte) 1, (byte) 1, (byte) 0, (byte) 0, - (byte) 1, (byte) 0, (byte) 1, (byte) 0, - (byte) 1)))); - // Release acquired records for "member-1". - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - - assertEquals(19, sharePartition.nextFetchOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - - // Ack subset of records by "member-2". - sharePartition.acknowledge("member-2", - List.of(new ShareAcknowledgementBatch(5, 5, List.of((byte) 1)))); - - // Release acquired records for "member-2". - releaseResult = sharePartition.releaseAcquiredRecords("member-2"); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - - assertEquals(6, sharePartition.nextFetchOffset()); - // Check cached state. - expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); - expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - } - - @Test - public void testReleaseAcquiredRecordsForEmptyCachedData() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Release a batch when cache is empty. - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - assertEquals(0, sharePartition.nextFetchOffset()); - assertEquals(0, sharePartition.cachedState().size()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask()); } @Test - public void testReleaseAcquiredRecordsAfterDifferentAcknowledges() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); + public void testAcquisitionLockTimeoutCauseMaxDeliveryCountExceed() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records + .withState(SharePartitionState.ACTIVE) + .build(); - sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 6, List.of((byte) 2)))); + // Adding memoryRecords(10, 0) in the sharePartition to make sure that SPSO doesn't move forward when delivery count of records2 + // exceed the max delivery count. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(8, 9, List.of((byte) 1)))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - assertEquals(5, sharePartition.nextFetchOffset()); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + assertEquals(2, sharePartition.timer().size()); + + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 0 && + sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(10L).batchDeliveryCount() == 1 && + sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); + + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(2, sharePartition.cachedState().get(10L).batchDeliveryCount()); + assertNotNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); + + // Allowing acquisition lock to expire to archive the records that reach max delivery count. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 0 && + // After the second delivery attempt fails to acknowledge the record correctly, the record should be archived. + sharePartition.cachedState().get(10L).batchState() == RecordState.ARCHIVED && + sharePartition.cachedState().get(10L).batchDeliveryCount() == 2 && + sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); } @Test - public void testMaxDeliveryCountLimitNotExceededForRecordsSubsetAfterReleaseAcquiredRecords() { + public void testAcquisitionLockTimeoutCauseSPSOMoveForward() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxDeliveryCount(2) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records .withState(SharePartitionState.ACTIVE) .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 0), 10); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - MemoryRecords records2 = memoryRecords(5, 10); - fetchAcquiredRecords(sharePartition, records2, 5); + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); + + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 0 && + sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 && + sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(10, 14, List.of((byte) 2)))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(5, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - fetchAcquiredRecords(sharePartition, records2, 5); + assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(3L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(0L).offsetState().get(4L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(6L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(7L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(8L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(9L).acquisitionLockTimeoutTask()); - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); + // Allowing acquisition lock to expire to archive the records that reach max delivery count. + TestUtils.waitForCondition( + () -> { + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(0L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(1L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(2L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(3L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(4L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(0, sharePartition.nextFetchOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); - assertNull(sharePartition.cachedState().get(10L).offsetState()); + return sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 5 && + expectedOffsetStateMap.equals(sharePartition.cachedState().get(0L).offsetState()); + }, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); + + assertNull(sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(3L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(4L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(6L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(7L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(8L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(0L).offsetState().get(9L).acquisitionLockTimeoutTask()); + + // Since only first 5 records from the batch are archived, the batch remains in the cachedState, but the + // start offset is updated + assertEquals(5, sharePartition.startOffset()); } @Test - public void testMaxDeliveryCountLimitNotExceededForRecordsSubsetAfterReleaseAcquiredRecordsSubset() { + public void testAcquisitionLockTimeoutCauseSPSOMoveForwardAndClearCachedState() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxDeliveryCount(2) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withMaxDeliveryCount(2) // Only 2 delivery attempts will be made before archiving the records .withState(SharePartitionState.ACTIVE) .build(); - // First fetch request with 5 records starting from offset 10. - MemoryRecords records1 = memoryRecords(5, 10); - // Second fetch request with 5 records starting from offset 15. - MemoryRecords records2 = memoryRecords(5, 15); - // third fetch request with 5 records starting from offset20. - MemoryRecords records3 = memoryRecords(5, 20); - - fetchAcquiredRecords(sharePartition, records1, 5); - fetchAcquiredRecords(sharePartition, records2, 5); - fetchAcquiredRecords(sharePartition, records3, 5); - - sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of( - new ShareAcknowledgementBatch(13, 16, List.of((byte) 2)), - new ShareAcknowledgementBatch(17, 19, List.of((byte) 3)), - new ShareAcknowledgementBatch(20, 24, List.of((byte) 2)) - ))); - // Send next batch from offset 13, only 2 records should be acquired. - fetchAcquiredRecords(sharePartition, records1, 2); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Send next batch from offset 15, only 2 records should be acquired. - fetchAcquiredRecords(sharePartition, records2, 2); - fetchAcquiredRecords(sharePartition, records3, 5); + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 0 && + sharePartition.cachedState().get(0L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - assertEquals(10, sharePartition.nextFetchOffset()); - assertEquals(3, sharePartition.cachedState().size()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState()); - assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(15L).batchState()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(20L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); - assertNull(sharePartition.cachedState().get(20L).offsetState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 3, 0, memoryRecords(10, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(15L).offsetState()); + // Allowing acquisition lock to expire to archive the records that reach max delivery count. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + // After the second failed attempt to acknowledge the record batch successfully, the record batch is archived. + // Since this is the first batch in the share partition, SPSO moves forward and the cachedState is cleared + sharePartition.cachedState().isEmpty() && + sharePartition.nextFetchOffset() == 10, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); } @Test - public void testMaxDeliveryCountLimitExceededForRecordsSubsetCacheCleared() { + public void testAcknowledgeAfterAcquisitionLockTimeout() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxDeliveryCount(2) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - // First fetch request with 5 records starting from offset 10. - MemoryRecords records1 = memoryRecords(5, 10); - // Second fetch request with 5 records starting from offset 15. - MemoryRecords records2 = memoryRecords(5, 15); - // Third fetch request with 5 records starting from offset 20. - MemoryRecords records3 = memoryRecords(5, 20); - fetchAcquiredRecords(sharePartition, records1, 5); - fetchAcquiredRecords(sharePartition, records2, 5); - fetchAcquiredRecords(sharePartition, records3, 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of( - new ShareAcknowledgementBatch(10, 12, List.of((byte) 1)), - new ShareAcknowledgementBatch(13, 16, List.of((byte) 2)), - new ShareAcknowledgementBatch(17, 19, List.of((byte) 3)), - new ShareAcknowledgementBatch(20, 24, List.of((byte) 2)) - ))); + assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - // Send next batch from offset 13, only 2 records should be acquired. - fetchAcquiredRecords(sharePartition, records1, 2); - // Send next batch from offset 15, only 2 records should be acquired. - fetchAcquiredRecords(sharePartition, records2, 2); - fetchAcquiredRecords(sharePartition, records3, 5); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 5 && + sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of( - new ShareAcknowledgementBatch(13, 16, List.of((byte) 2)), - new ShareAcknowledgementBatch(20, 24, List.of((byte) 2)) - ))); + // Acknowledge with ACCEPT type should throw InvalidRecordStateException since they've been released due to acquisition lock timeout. + CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 1)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); + assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(0, sharePartition.timer().size()); - assertEquals(25, sharePartition.nextFetchOffset()); - assertEquals(0, sharePartition.cachedState().size()); + // Try acknowledging with REJECT type should throw InvalidRecordStateException since they've been released due to acquisition lock timeout. + ackResult = sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 3)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); + assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(0, sharePartition.timer().size()); } @Test - public void testReleaseAcquiredRecordsSubsetWithAnotherMember() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + public void testAcquisitionLockAfterDifferentAcknowledges() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withState(SharePartitionState.ACTIVE) + .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 7); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 7, List.of((byte) 1)))); + assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); - // Release acquired records subset with another member. - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords("member-2"); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); + // Acknowledge with REJECT type. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(5, 6, Collections.singletonList((byte) 2)))); + + assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask()); + assertEquals(3, sharePartition.timer().size()); + + // Acknowledge with ACCEPT type. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(8, 9, Collections.singletonList((byte) 1)))); + + assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask()); + assertEquals(1, sharePartition.timer().size()); + + // Allowing acquisition lock to expire will only affect the offsets that have not been acknowledged yet. + TestUtils.waitForCondition( + () -> { + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + + return sharePartition.timer().size() == 0 && sharePartition.nextFetchOffset() == 5 && + expectedOffsetStateMap.equals(sharePartition.cachedState().get(5L).offsetState()); + }, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); + + assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask()); } @Test - public void testReleaseBatchWithWriteShareGroupStateFailure() { + public void testAcquisitionLockOnBatchWithWriteShareGroupStateFailure() throws InterruptedException { Persister persister = Mockito.mock(Persister.class); mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withPersister(persister) + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 10); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertTrue(releaseResult.isCompletedExceptionally()); - assertFutureThrows(GroupIdNotFoundException.class, releaseResult); + assertEquals(1, sharePartition.timer().size()); + assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - // Due to failure in writeShareGroupState, the cached state should not be updated. - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + // Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens. + TestUtils.waitForCondition( + () -> sharePartition.timer().size() == 0 && + sharePartition.nextFetchOffset() == 5 && + sharePartition.cachedState().size() == 1 && + sharePartition.cachedState().get(5L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask() == null, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); } @Test - public void testReleaseOffsetWithWriteShareGroupStateFailure() { + public void testAcquisitionLockOnOffsetWithWriteShareGroupStateFailure() throws InterruptedException { Persister persister = Mockito.mock(Persister.class); mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withPersister(persister) + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true for acknowledge to pass. WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - fetchAcquiredRecords(sharePartition, memoryRecords(6, 5), 6); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(8, 9, List.of((byte) 1)))); + assertEquals(1, sharePartition.timer().size()); + assertNotNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(8, 9, Collections.singletonList((byte) 1)))); // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertTrue(releaseResult.isCompletedExceptionally()); - assertFutureThrows(GroupIdNotFoundException.class, releaseResult); - - // Due to failure in writeShareGroupState, the cached state should not be updated. - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(5L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(6L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(7L).state()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).offsetState().get(8L).state()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).offsetState().get(9L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(10L).state()); + // Allowing acquisition lock to expire. Even if write share group state RPC fails, state transition still happens. + TestUtils.waitForCondition( + () -> { + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + return sharePartition.timer().size() == 0 && sharePartition.cachedState().size() == 1 && + expectedOffsetStateMap.equals(sharePartition.cachedState().get(5L).offsetState()); + }, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(5L).memberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(6L).memberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(7L).memberId()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(8L).memberId()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(9L).memberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(10L).memberId()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(7L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(8L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(9L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(10L).acquisitionLockTimeoutTask()); } @Test - public void testAcquisitionLockOnReleasingMultipleRecordBatch() { + public void testReleaseSingleRecordBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 10); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(1, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); assertNull(releaseResult.join()); assertFalse(releaseResult.isCompletedExceptionally()); - assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(0, sharePartition.nextFetchOffset()); assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertEquals(0, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertNull(sharePartition.cachedState().get(5L).offsetState()); - // Acquisition lock timer task would be cancelled by the release acquired records operation. - assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); - assertEquals(0, sharePartition.timer().size()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(0L).offsetState()); } @Test - public void testAcquisitionLockOnReleasingAcknowledgedMultipleSubsetRecordBatchWithGapOffsets() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .build(); - MemoryRecords records1 = memoryRecords(2, 5); - // Untracked gap of 3 offsets from 7-9. - MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); - // Gap from 12-13 offsets. - recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap for 15 offset. - recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - // Gap from 17-19 offsets. - recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - MemoryRecords records2 = recordsBuilder.build(); - - fetchAcquiredRecords(sharePartition, records1, 2); - fetchAcquiredRecords(sharePartition, records2, 11); + public void testReleaseMultipleRecordBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Acknowledging over subset of both batch with subset of gap offsets. - sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(6, 18, List.of( - (byte) 1, (byte) 1, (byte) 1, - (byte) 1, (byte) 1, (byte) 1, - (byte) 0, (byte) 0, (byte) 1, - (byte) 0, (byte) 1, (byte) 0, - (byte) 1)))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); assertNull(releaseResult.join()); assertFalse(releaseResult.isCompletedExceptionally()); assertEquals(5, sharePartition.nextFetchOffset()); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); - - expectedOffsetStateMap.clear(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - - // Acquisition lock timer task would be cancelled by the release acquired records operation. - assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - - assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask()); - - assertEquals(0, sharePartition.timer().size()); - } - - @Test - public void testLsoMovementOnInitializationSharePartition() { - // LSO is at 0. - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - sharePartition.updateCacheAndOffsets(0); - assertEquals(0, sharePartition.nextFetchOffset()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(0, sharePartition.endOffset()); - - // LSO is at 5. - sharePartition.updateCacheAndOffsets(5); - assertEquals(5, sharePartition.nextFetchOffset()); - assertEquals(5, sharePartition.startOffset()); - assertEquals(5, sharePartition.endOffset()); - } - - @Test - public void testLsoMovementForArchivingBatches() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 12), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 17), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 22), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 27), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 32), 5); - - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(2, 6, List.of((byte) 1)), - new ShareAcknowledgementBatch(12, 16, List.of((byte) 2)), - new ShareAcknowledgementBatch(22, 26, List.of((byte) 2)), - new ShareAcknowledgementBatch(27, 31, List.of((byte) 3)) - )); - - // LSO is at 20. - sharePartition.updateCacheAndOffsets(20); - - assertEquals(22, sharePartition.nextFetchOffset()); - assertEquals(20, sharePartition.startOffset()); - assertEquals(36, sharePartition.endOffset()); - - // For cached state corresponding to entry 2, the batch state will be ACKNOWLEDGED, hence it will be cleared as part of acknowledgement. - assertEquals(6, sharePartition.cachedState().size()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - assertNotNull(sharePartition.cachedState().get(7L).batchAcquisitionLockTimeoutTask()); - - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(12L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(12L).batchState()); - assertNull(sharePartition.cachedState().get(12L).batchAcquisitionLockTimeoutTask()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(17L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(17L).batchState()); - assertNotNull(sharePartition.cachedState().get(17L).batchAcquisitionLockTimeoutTask()); - - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(22L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(22L).batchState()); - assertNull(sharePartition.cachedState().get(22L).batchAcquisitionLockTimeoutTask()); - - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(27L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(27L).batchState()); - assertNull(sharePartition.cachedState().get(27L).batchAcquisitionLockTimeoutTask()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(32L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(32L).batchState()); - assertNotNull(sharePartition.cachedState().get(32L).batchAcquisitionLockTimeoutTask()); - } - - @Test - public void testLsoMovementForArchivingAllAvailableBatches() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - // A client acquires 4 batches, 11 -> 20, 21 -> 30, 31 -> 40, 41 -> 50. - fetchAcquiredRecords(sharePartition, memoryRecords(10, 11), 10); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 21), 10); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 31), 10); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 41), 10); - - // After the acknowledgements, the state of share partition will be: - // 1. 11 -> 20: AVAILABLE - // 2. 21 -> 30: ACQUIRED - // 3. 31 -> 40: AVAILABLE - // 4. 41 -> 50: ACQUIRED - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(11, 20, List.of((byte) 2)), - new ShareAcknowledgementBatch(31, 40, List.of((byte) 2)) - )); - - // Move the LSO to 41. When the LSO moves ahead, all batches that are AVAILABLE before the new LSO will be ARCHIVED. - // Thus, the state of the share partition will be: - // 1. 11 -> 20: ARCHIVED - // 2. 21 -> 30: ACQUIRED - // 3. 31 -> 40: ARCHIVED - // 4. 41 -> 50: ACQUIRED - // Note, the records that are in ACQUIRED state will remain in ACQUIRED state and will be transitioned to a Terminal - // state when the corresponding acquisition lock timer task expires. - sharePartition.updateCacheAndOffsets(41); - - assertEquals(51, sharePartition.nextFetchOffset()); - assertEquals(41, sharePartition.startOffset()); - assertEquals(50, sharePartition.endOffset()); - - assertEquals(4, sharePartition.cachedState().size()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(41L).batchState()); - - // The client acknowledges the batch 21 -> 30. Since this batch is before the LSO, nothing will be done and these - // records will remain in the ACQUIRED state. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21L, 30L, List.of((byte) 2)))); - - // The batch is still in ACQUIRED state. - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); - - // Once the acquisition lock timer task for the batch 21 -> 30 is expired, these records will directly be - // ARCHIVED. - sharePartition.cachedState().get(21L).batchAcquisitionLockTimeoutTask().run(); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState()); - } - - @Test - public void testLsoMovementForArchivingAllAvailableOffsets() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - // A client acquires 4 batches, 11 -> 20, 21 -> 30, 31 -> 40, 41 -> 50. - fetchAcquiredRecords(sharePartition, memoryRecords(10, 11), 10); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 21), 10); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 31), 10); - fetchAcquiredRecords(sharePartition, memoryRecords(10, 41), 10); - - // After the acknowledgements, the share partition state will be: - // 1. 11 -> 20: AVAILABLE - // 2. 21 -> 30: ACQUIRED - // 3. 31 -> 40: AVAILABLE - // 4. 41 -> 50: ACQUIRED - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(11, 20, List.of((byte) 2)), - new ShareAcknowledgementBatch(31, 40, List.of((byte) 2)) - )); - - // Move the LSO to 36. When the LSO moves ahead, all records that are AVAILABLE before the new LSO will be ARCHIVED. - // Thus, the state of the share partition will be: - // 1. 11 -> 20: ARCHIVED - // 2. 21 -> 30: ACQUIRED - // 3. 31 -> 35: ARCHIVED - // 3. 36 -> 40: AVAILABLE - // 4. 41 -> 50: ACQUIRED - // Note, the records that are in ACQUIRED state will remain in ACQUIRED state and will be transitioned to a Terminal - // state when the corresponding acquisition lock timer task expires. - sharePartition.updateCacheAndOffsets(36); - - assertEquals(36, sharePartition.nextFetchOffset()); - assertEquals(36, sharePartition.startOffset()); - assertEquals(50, sharePartition.endOffset()); - - assertEquals(4, sharePartition.cachedState().size()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(11L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(31L).state()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(32L).state()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(33L).state()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(34L).state()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(31L).offsetState().get(35L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(36L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(37L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(38L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(39L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(31L).offsetState().get(40L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(41L).batchState()); - - // The client acknowledges the batch 21 -> 30. Since this batch is before the LSO, nothing will be done and these - // records will remain in the ACQUIRED state. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21L, 30L, List.of((byte) 2)))); - - // The batch is still in ACQUIRED state. - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); - - // Once the acquisition lock timer task for the batch 21 -> 30 is expired, these records will directly be - // ARCHIVED. - sharePartition.cachedState().get(21L).batchAcquisitionLockTimeoutTask().run(); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(21L).batchState()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(5L).offsetState()); } @Test - public void testLsoMovementForArchivingOffsets() { + public void testReleaseMultipleAcknowledgedRecordBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records0 = memoryRecords(5, 0); + MemoryRecords records1 = memoryRecords(2, 5); + // Untracked gap of 3 offsets from 7-9. + MemoryRecords records2 = memoryRecords(9, 10); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(4, 8, List.of((byte) 1)))); - - // LSO at is 5. - sharePartition.updateCacheAndOffsets(5); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(5, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); - - // Check cached offset state map. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records0, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); - assertNull(sharePartition.cachedState().get(7L).offsetState().get(7L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(7L).offsetState().get(8L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(9L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(10L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(11L).acquisitionLockTimeoutTask()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Check cached offset state map. - expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(2L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(3L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(4L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(2L).offsetState()); - assertNotNull(sharePartition.cachedState().get(2L).offsetState().get(2L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(2L).offsetState().get(3L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(2L).offsetState().get(4L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(2L).offsetState().get(5L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(2L).offsetState().get(6L).acquisitionLockTimeoutTask()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(5, 18, Collections.singletonList((byte) 1)))); + + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); + assertEquals(0, sharePartition.nextFetchOffset()); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(10L).batchState()); + assertNull(sharePartition.cachedState().get(5L).offsetState()); + assertNull(sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testLsoMovementForArchivingOffsetsWithStartAndEndBatchesNotFullMatches() { + public void testReleaseAcknowledgedMultipleSubsetRecordBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records1 = memoryRecords(2, 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - // LSO is at 4. - sharePartition.updateCacheAndOffsets(4); - - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + // Untracked gap of 3 offsets from 7-9. + MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); + // Gap from 12-13 offsets. + recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap for 15 offset. + recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap from 17-19 offsets. + recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + MemoryRecords records2 = recordsBuilder.build(); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // LSO is at 8. - sharePartition.updateCacheAndOffsets(8); + // Acknowledging over subset of both batch with subset of gap offsets. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(6, 18, Arrays.asList( + (byte) 1, (byte) 1, (byte) 1, + (byte) 1, (byte) 1, (byte) 1, + (byte) 0, (byte) 0, (byte) 1, + (byte) 0, (byte) 1, (byte) 0, + (byte) 1)))); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(8, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + assertEquals(5, sharePartition.nextFetchOffset()); + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + expectedOffsetStateMap.clear(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatches() { + public void testReleaseAcquiredRecordsWithAnotherMember() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records1 = memoryRecords(1, 5); + // Untracked gap of 3 offsets from 7-9. + MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); + // Gap from 12-13 offsets. + recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap for 15 offset. + recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap from 17-19 offsets. + recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + MemoryRecords records2 = recordsBuilder.build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - // LSO is at 4. - sharePartition.updateCacheAndOffsets(4); + sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + // Acknowledging over subset of second batch with subset of gap offsets. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(10, 18, Arrays.asList( + (byte) 1, (byte) 1, (byte) 0, (byte) 0, + (byte) 1, (byte) 0, (byte) 1, (byte) 0, + (byte) 1)))); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + // Release acquired records for "member-1". + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - // LSO is at 7. - sharePartition.updateCacheAndOffsets(7); + assertEquals(19, sharePartition.nextFetchOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(7, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + // Release acquired records for "member-2". + releaseResult = sharePartition.releaseAcquiredRecords("member-2"); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + // Check cached state. + expectedOffsetStateMap.clear(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostAcceptAcknowledgement() { + public void testReleaseAcquiredRecordsWithAnotherMemberAndSubsetAcknowledged() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records1 = memoryRecords(2, 5); + // Untracked gap of 3 offsets from 7-9. + MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); + // Gap from 12-13 offsets. + recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap for 15 offset. + recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap from 17-19 offsets. + recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + MemoryRecords records2 = recordsBuilder.build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - // LSO is at 4. - sharePartition.updateCacheAndOffsets(4); + sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + // Acknowledging over subset of second batch with subset of gap offsets. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList(new ShareAcknowledgementBatch(10, 18, Arrays.asList( + (byte) 1, (byte) 1, (byte) 0, (byte) 0, + (byte) 1, (byte) 0, (byte) 1, (byte) 0, + (byte) 1)))); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + // Release acquired records for "member-1". + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - // Acknowledge with ACCEPT action. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(7, 8, List.of((byte) 1)))); + assertEquals(19, sharePartition.nextFetchOffset()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - // LSO is at 7. - sharePartition.updateCacheAndOffsets(7); + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(7, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + // Ack subset of records by "member-2". + sharePartition.acknowledge("member-2", + Collections.singletonList(new ShareAcknowledgementBatch(5, 5, Collections.singletonList((byte) 1)))); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + // Release acquired records for "member-2". + releaseResult = sharePartition.releaseAcquiredRecords("member-2"); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - // Check cached offset state map. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); + assertEquals(6, sharePartition.nextFetchOffset()); + // Check cached state. + expectedOffsetStateMap.clear(); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); + expectedOffsetStateMap.clear(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostReleaseAcknowledgement() { + public void testReleaseAcquiredRecordsForEmptyCachedData() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Release a batch when cache is empty. + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); + assertEquals(0, sharePartition.nextFetchOffset()); + assertEquals(0, sharePartition.cachedState().size()); + } - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - // LSO is at 4. - sharePartition.updateCacheAndOffsets(4); - - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - - // Acknowledge with RELEASE action. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(7, 8, List.of((byte) 2)))); - - // LSO is at 7. - sharePartition.updateCacheAndOffsets(7); + @Test + public void testReleaseAcquiredRecordsAfterDifferentAcknowledges() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertEquals(7, sharePartition.nextFetchOffset()); - assertEquals(7, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(5, 6, Collections.singletonList((byte) 2)))); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(8, 9, Collections.singletonList((byte) 1)))); - // Check cached offset state map. + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); + assertEquals(5, sharePartition.nextFetchOffset()); + // Check cached state. Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); } @Test - public void testLsoMovementToEndOffset() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquiredRecords() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxDeliveryCount(2) + .withState(SharePartitionState.ACTIVE) + .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, memoryRecords(10, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Acknowledge with RELEASE action. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(7, 8, List.of((byte) 2)))); + MemoryRecords records2 = memoryRecords(5, 10); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // LSO is at 11. - sharePartition.updateCacheAndOffsets(11); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 2)))); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - // Check cached offset state map. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); + assertEquals(0, sharePartition.nextFetchOffset()); + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState()); + assertNull(sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testLsoMovementToEndOffsetWhereEndOffsetIsAvailable() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - // Acknowledge with RELEASE action. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(7, 8, List.of((byte) 2)), - new ShareAcknowledgementBatch(11, 11, List.of((byte) 2)))); - - // LSO is at 11. - sharePartition.updateCacheAndOffsets(11); + public void testMaxDeliveryCountLimitExceededForRecordsSubsetAfterReleaseAcquiredRecordsSubset() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxDeliveryCount(2) + .withState(SharePartitionState.ACTIVE) + .build(); + // First fetch request with 5 records starting from offset 10. + MemoryRecords records1 = memoryRecords(5, 10); + // Second fetch request with 5 records starting from offset 15. + MemoryRecords records2 = memoryRecords(5, 15); + // third fetch request with 5 records starting from offset20. + MemoryRecords records3 = memoryRecords(5, 20); - assertEquals(11, sharePartition.nextFetchOffset()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Check cached offset state map. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); - } + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 50, 3, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - @Test - public void testLsoMovementAheadOfEndOffsetPostAcknowledgement() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(Arrays.asList( + new ShareAcknowledgementBatch(13, 16, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(17, 19, Collections.singletonList((byte) 3)), + new ShareAcknowledgementBatch(20, 24, Collections.singletonList((byte) 2)) + ))); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); + // Send next batch from offset 13, only 2 records should be acquired. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Acknowledge with RELEASE action. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(7, 8, List.of((byte) 2)))); + // Send next batch from offset 15, only 2 records should be acquired. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // LSO is at 12. - sharePartition.updateCacheAndOffsets(12); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(12, sharePartition.startOffset()); - assertEquals(12, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + assertEquals(10, sharePartition.nextFetchOffset()); + assertEquals(3, sharePartition.cachedState().size()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(10L).batchState()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState()); + assertThrows(IllegalStateException.class, () -> sharePartition.cachedState().get(15L).batchState()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); + assertNull(sharePartition.cachedState().get(20L).offsetState()); - // Check cached offset state map. Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + + expectedOffsetStateMap.clear(); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(15L).offsetState()); } @Test - public void testLsoMovementAheadOfEndOffset() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); + public void testMaxDeliveryCountLimitExceededForRecordsSubsetCacheCleared() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxDeliveryCount(2) + .withState(SharePartitionState.ACTIVE) + .build(); + // First fetch request with 5 records starting from offset 10. + MemoryRecords records1 = memoryRecords(5, 10); + // Second fetch request with 5 records starting from offset 15. + MemoryRecords records2 = memoryRecords(5, 15); + // Third fetch request with 5 records starting from offset 20. + MemoryRecords records3 = memoryRecords(5, 20); - // LSO is at 14. - sharePartition.updateCacheAndOffsets(14); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(14, sharePartition.nextFetchOffset()); - assertEquals(14, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 50, 3, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - } + sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(Arrays.asList( + new ShareAcknowledgementBatch(10, 12, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(13, 16, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(17, 19, Collections.singletonList((byte) 3)), + new ShareAcknowledgementBatch(20, 24, Collections.singletonList((byte) 2)) + ))); - @Test - public void testLsoMovementWithGapsInCachedStateMap() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Send next batch from offset 13, only 2 records should be acquired. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - MemoryRecords records1 = memoryRecords(5, 2); - // Gap of 7-9. - MemoryRecords records2 = memoryRecords(5, 10); - // Gap of 15-19. - MemoryRecords records3 = memoryRecords(5, 20); + // Send next batch from offset 15, only 2 records should be acquired. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - fetchAcquiredRecords(sharePartition, records1, 5); - fetchAcquiredRecords(sharePartition, records2, 5); - fetchAcquiredRecords(sharePartition, records3, 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records3, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // LSO is at 18. - sharePartition.updateCacheAndOffsets(18); + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); assertEquals(25, sharePartition.nextFetchOffset()); - assertEquals(18, sharePartition.startOffset()); - assertEquals(24, sharePartition.endOffset()); - assertEquals(3, sharePartition.cachedState().size()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState()); + assertEquals(0, sharePartition.cachedState().size()); } @Test - public void testLsoMovementWithGapsInCachedStateMapAndAcknowledgedBatch() { + public void testReleaseAcquiredRecordsSubsetWithAnotherMember() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(5, 2); - // Gap of 7-9. - MemoryRecords records2 = memoryRecords(5, 10); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(7, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - fetchAcquiredRecords(sharePartition, records1, 5); - fetchAcquiredRecords(sharePartition, records2, 5); + sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(5, 7, Collections.singletonList((byte) 1)))); - // Acknowledge with RELEASE action. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(10, 14, List.of((byte) 2)))); + // Release acquired records subset with another member. + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords("member-2"); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); + } + + @Test + public void testReleaseBatchWithWriteShareGroupStateFailure() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withPersister(persister) + .withState(SharePartitionState.ACTIVE) + .build(); - // LSO is at 10. - sharePartition.updateCacheAndOffsets(10); + // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - assertEquals(10, sharePartition.nextFetchOffset()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertTrue(releaseResult.isCompletedExceptionally()); + assertFutureThrows(releaseResult, GroupIdNotFoundException.class); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); + // Due to failure in writeShareGroupState, the cached state should not be updated. + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); } @Test - public void testLsoMovementPostGapsInAcknowledgements() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + public void testReleaseOffsetWithWriteShareGroupStateFailure() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withPersister(persister) + .withState(SharePartitionState.ACTIVE) + .build(); - MemoryRecords records1 = memoryRecords(2, 5); - // Untracked gap of 3 offsets from 7-9. - MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(5, 10); - // Gap from 15-17 offsets. - recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); - MemoryRecords records2 = recordsBuilder.build(); + // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true for acknowledge to pass. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - fetchAcquiredRecords(sharePartition, records1, 2); - fetchAcquiredRecords(sharePartition, records2, 9); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(5, 6, List.of((byte) 2)), - new ShareAcknowledgementBatch(10, 18, List.of( - (byte) 2, (byte) 2, (byte) 2, (byte) 2, (byte) 2, (byte) 0, (byte) 0, (byte) 0, (byte) 2 - )))); + sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(8, 9, Collections.singletonList((byte) 1)))); - // LSO is at 18. - sharePartition.updateCacheAndOffsets(18); + // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - assertEquals(18, sharePartition.nextFetchOffset()); - assertEquals(18, sharePartition.startOffset()); - assertEquals(18, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertTrue(releaseResult.isCompletedExceptionally()); + assertFutureThrows(releaseResult, GroupIdNotFoundException.class); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState()); + // Due to failure in writeShareGroupState, the cached state should not be updated. + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(5L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(6L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(7L).state()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).offsetState().get(8L).state()); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).offsetState().get(9L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(10L).state()); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(5L).memberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(6L).memberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(7L).memberId()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(8L).memberId()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(9L).memberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(10L).memberId()); } @Test - public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovement() { + public void testAcquisitionLockOnReleasingMultipleRecordBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 15, fetchPartitionData(memoryRecords(5, 15)), FETCH_ISOLATION_HWM); + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 20), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 25), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 30), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 35), 5); + assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertNull(sharePartition.cachedState().get(5L).offsetState()); + // Acquisition lock timer task would be cancelled by the release acquired records operation. + assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + assertEquals(0, sharePartition.timer().size()); + } - // Acknowledge records. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(6, 7, List.of((byte) 1)), - new ShareAcknowledgementBatch(8, 8, List.of((byte) 2)), - new ShareAcknowledgementBatch(25, 29, List.of((byte) 2)), - new ShareAcknowledgementBatch(35, 37, List.of((byte) 2)) - )); + @Test + public void testAcquisitionLockOnReleasingAcknowledgedMultipleSubsetRecordBatchWithGapOffsets() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withState(SharePartitionState.ACTIVE) + .build(); + MemoryRecords records1 = memoryRecords(2, 5); + // Untracked gap of 3 offsets from 7-9. + MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(2, 10); + // Gap from 12-13 offsets. + recordsBuilder.appendWithOffset(14, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap for 15 offset. + recordsBuilder.appendWithOffset(16, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + // Gap from 17-19 offsets. + recordsBuilder.appendWithOffset(20, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + MemoryRecords records2 = recordsBuilder.build(); - // LSO is at 24. - sharePartition.updateCacheAndOffsets(24); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(25, sharePartition.nextFetchOffset()); - assertEquals(24, sharePartition.startOffset()); - assertEquals(39, sharePartition.endOffset()); - assertEquals(7, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + // Acknowledging over subset of both batch with subset of gap offsets. + sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(6, 18, Arrays.asList( + (byte) 1, (byte) 1, (byte) 1, + (byte) 1, (byte) 1, (byte) 1, + (byte) 0, (byte) 0, (byte) 1, + (byte) 0, (byte) 1, (byte) 0, + (byte) 1)))); - // Release acquired records for MEMBER_ID. CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); assertNull(releaseResult.join()); assertFalse(releaseResult.isCompletedExceptionally()); + assertEquals(5, sharePartition.nextFetchOffset()); // Check cached state. Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState()); - - assertEquals("member-2", sharePartition.cachedState().get(15L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - - expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(21L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(22L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(23L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(24L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(20L).offsetState()); + expectedOffsetStateMap.clear(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(25L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(25L).batchState()); + // Acquisition lock timer task would be cancelled by the release acquired records operation. + assertNull(sharePartition.cachedState().get(5L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(5L).offsetState().get(6L).acquisitionLockTimeoutTask()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(30L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(30L).batchState()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(20L).acquisitionLockTimeoutTask()); - expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(35L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(36L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(37L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(38L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(39L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(35L).offsetState()); + assertEquals(0, sharePartition.timer().size()); } @Test - public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToStartOfBatch() { + public void testLsoMovementOnInitializationSharePartition() { + // LSO is at 0. SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.updateCacheAndOffsets(0); + assertEquals(0, sharePartition.nextFetchOffset()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(0, sharePartition.endOffset()); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); - - // LSO is at 10. - sharePartition.updateCacheAndOffsets(10); - - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); - - // Release acquired records. - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState()); - - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); + // LSO is at 5. + sharePartition.updateCacheAndOffsets(5); + assertEquals(5, sharePartition.nextFetchOffset()); + assertEquals(5, sharePartition.startOffset()); + assertEquals(5, sharePartition.endOffset()); } @Test - public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToMiddleOfBatch() { + public void testLsoMovementForArchivingBatches() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 12), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 17), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 22), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 27), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, + new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 32), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + + sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(2, 6, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(12, 16, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(22, 26, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(27, 31, Collections.singletonList((byte) 3)) + )); - // LSO is at 11. - sharePartition.updateCacheAndOffsets(11); + // LSO is at 20. + sharePartition.updateCacheAndOffsets(20); - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + assertEquals(22, sharePartition.nextFetchOffset()); + assertEquals(20, sharePartition.startOffset()); + assertEquals(36, sharePartition.endOffset()); - // Release acquired records. - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); + // For cached state corresponding to entry 2, the batch state will be ACKNOWLEDGED, hence it will be cleared as part of acknowledgment. + assertEquals(6, sharePartition.cachedState().size()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + assertNotNull(sharePartition.cachedState().get(7L).batchAcquisitionLockTimeoutTask()); - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(12L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(12L).batchState()); + assertNull(sharePartition.cachedState().get(12L).batchAcquisitionLockTimeoutTask()); + + assertEquals(MEMBER_ID, sharePartition.cachedState().get(17L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(17L).batchState()); + assertNotNull(sharePartition.cachedState().get(17L).batchAcquisitionLockTimeoutTask()); + + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(22L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(22L).batchState()); + assertNull(sharePartition.cachedState().get(22L).batchAcquisitionLockTimeoutTask()); + + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(27L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(27L).batchState()); + assertNull(sharePartition.cachedState().get(27L).batchAcquisitionLockTimeoutTask()); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(32L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(32L).batchState()); + assertNotNull(sharePartition.cachedState().get(32L).batchAcquisitionLockTimeoutTask()); } @Test - public void testReleaseAcquiredRecordsDecreaseDeliveryCount() { + public void testLsoMovementForArchivingOffsets() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(12, 13, List.of((byte) 1)))); - - // LSO is at 11. - sharePartition.updateCacheAndOffsets(11); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(4, 8, Collections.singletonList((byte) 1)))); - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); + // LSO at is 5. + sharePartition.updateCacheAndOffsets(5); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(5, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); assertEquals(2, sharePartition.cachedState().size()); - // Before release, the delivery count was incremented. + // Check cached offset state map. Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - - // Release acquired records. - CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertNull(releaseResult.join()); - assertFalse(releaseResult.isCompletedExceptionally()); - // Check delivery count. - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); + assertNull(sharePartition.cachedState().get(7L).offsetState().get(7L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(7L).offsetState().get(8L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(9L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(10L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(7L).offsetState().get(11L).acquisitionLockTimeoutTask()); - // After release, the delivery count was decremented. + // Check cached offset state map. expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 0, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - } - - @Test - public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovement() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); - - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 15, fetchPartitionData(memoryRecords(5, 15)), FETCH_ISOLATION_HWM); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 20), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 25), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 30), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 35), 5); - - // Acknowledge records. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(6, 7, List.of((byte) 1)), - new ShareAcknowledgementBatch(8, 8, List.of((byte) 2)), - new ShareAcknowledgementBatch(25, 29, List.of((byte) 2)), - new ShareAcknowledgementBatch(35, 37, List.of((byte) 2)) - )); - - // LSO is at 24. - sharePartition.updateCacheAndOffsets(24); - - assertEquals(25, sharePartition.nextFetchOffset()); - assertEquals(24, sharePartition.startOffset()); - assertEquals(39, sharePartition.endOffset()); - assertEquals(7, sharePartition.cachedState().size()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> { - Map expectedOffsetStateMap1 = new HashMap<>(); - expectedOffsetStateMap1.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap1.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap1.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap1.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap1.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - - Map expectedOffsetStateMap2 = new HashMap<>(); - expectedOffsetStateMap2.put(20L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(21L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(22L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(23L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap2.put(24L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - - Map expectedOffsetStateMap3 = new HashMap<>(); - expectedOffsetStateMap3.put(35L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap3.put(36L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap3.put(37L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap3.put(38L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap3.put(39L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - - return sharePartition.cachedState().get(5L).offsetState().equals(expectedOffsetStateMap1) && - sharePartition.cachedState().get(20L).offsetState().equals(expectedOffsetStateMap2) && - sharePartition.cachedState().get(25L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(30L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(35L).offsetState().equals(expectedOffsetStateMap3); - }, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(5L, 6L, 7L, 8L, 9L), 20L, List.of(20L, 21L, 22L, 23L, 24L), 25L, List.of(), 30L, List.of(), 35L, List.of(35L, 36L, 37L, 38L, 39L)))); - - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState()); + expectedOffsetStateMap.put(2L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(3L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(4L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(2L).offsetState()); + assertNotNull(sharePartition.cachedState().get(2L).offsetState().get(2L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(2L).offsetState().get(3L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(2L).offsetState().get(4L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(2L).offsetState().get(5L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(2L).offsetState().get(6L).acquisitionLockTimeoutTask()); } @Test - public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToStartOfBatch() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); + public void testLsoMovementForArchivingOffsetsWithStartAndEndBatchesNotFullMatches() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // LSO is at 10. - sharePartition.updateCacheAndOffsets(10); + // LSO is at 4. + sharePartition.updateCacheAndOffsets(4); - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(4, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); assertEquals(2, sharePartition.cachedState().size()); - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.cachedState().get(5L).batchMemberId().equals(EMPTY_MEMBER_ID) && - sharePartition.cachedState().get(5L).batchState() == RecordState.ARCHIVED && - sharePartition.cachedState().get(10L).batchMemberId().equals(EMPTY_MEMBER_ID) && - sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(), 10L, List.of()))); - } - - @Test - public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToMiddleOfBatch() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - // LSO is at 11. - sharePartition.updateCacheAndOffsets(11); + // LSO is at 8. + sharePartition.updateCacheAndOffsets(8); - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(11, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(8, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); assertEquals(2, sharePartition.cachedState().size()); - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> { - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - return sharePartition.cachedState().get(10L).offsetState().equals(expectedOffsetStateMap) && - sharePartition.cachedState().get(5L).batchMemberId().equals(EMPTY_MEMBER_ID) && - sharePartition.cachedState().get(5L).batchState() == RecordState.ARCHIVED; - }, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(5L, List.of(), 10L, List.of(10L, 11L, 12L, 13L, 14L)))); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); } @Test - public void testScheduleAcquisitionLockTimeoutValueFromGroupConfig() { - GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); - GroupConfig groupConfig = Mockito.mock(GroupConfig.class); - int expectedDurationMs = 500; - Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig)); - Mockito.when(groupConfig.shareRecordLockDurationMs()).thenReturn(expectedDurationMs); + public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatches() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withGroupConfigManager(groupConfigManager).build(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - AcquisitionLockTimerTask timerTask = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); + // LSO is at 4. + sharePartition.updateCacheAndOffsets(4); - Mockito.verify(groupConfigManager, Mockito.times(2)).groupConfig(GROUP_ID); - Mockito.verify(groupConfig).shareRecordLockDurationMs(); - assertEquals(expectedDurationMs, timerTask.delayMs); - } + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(4, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - @Test - public void testScheduleAcquisitionLockTimeoutValueUpdatesSuccessfully() { - GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); - GroupConfig groupConfig = Mockito.mock(GroupConfig.class); - int expectedDurationMs1 = 500; - int expectedDurationMs2 = 1000; - Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig)); - // First invocation of shareRecordLockDurationMs() returns 500, and the second invocation returns 1000 - Mockito.when(groupConfig.shareRecordLockDurationMs()) - .thenReturn(expectedDurationMs1) - .thenReturn(expectedDurationMs2); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withGroupConfigManager(groupConfigManager).build(); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - AcquisitionLockTimerTask timerTask1 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); + // LSO is at 7. + sharePartition.updateCacheAndOffsets(7); - Mockito.verify(groupConfigManager, Mockito.times(2)).groupConfig(GROUP_ID); - Mockito.verify(groupConfig).shareRecordLockDurationMs(); - assertEquals(expectedDurationMs1, timerTask1.delayMs); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(7, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - AcquisitionLockTimerTask timerTask2 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - Mockito.verify(groupConfigManager, Mockito.times(4)).groupConfig(GROUP_ID); - Mockito.verify(groupConfig, Mockito.times(2)).shareRecordLockDurationMs(); - assertEquals(expectedDurationMs2, timerTask2.delayMs); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); } @Test - public void testAcknowledgeBatchAndOffsetPostLsoMovement() { + public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostAcceptAcknowledgement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // LSO is at 12. - sharePartition.updateCacheAndOffsets(12); - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(12, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); + // LSO is at 4. + sharePartition.updateCacheAndOffsets(4); + + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(4, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); assertEquals(2, sharePartition.cachedState().size()); - // Check cached state map. + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - - // Acknowledge with RELEASE action. - CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(2, 6, List.of((byte) 2)), - new ShareAcknowledgementBatch(10, 14, List.of((byte) 2)))); + // Acknowledge with ACCEPT action. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(7, 8, Collections.singletonList((byte) 1)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); + // LSO is at 7. + sharePartition.updateCacheAndOffsets(7); assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(12, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); + assertEquals(7, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); assertEquals(2, sharePartition.cachedState().size()); assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - assertNotNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask()); // Check cached offset state map. - Map expectedOffsetStateMap = new HashMap<>(); + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); } @Test - public void testAcknowledgeBatchPostLsoMovement() { + public void testLsoMovementForArchivingOffsetsWithStartOffsetNotFullMatchesPostReleaseAcknowledgement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 20), 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - // LSO is at 14. - sharePartition.updateCacheAndOffsets(14); - assertEquals(25, sharePartition.nextFetchOffset()); - assertEquals(14, sharePartition.startOffset()); - assertEquals(24, sharePartition.endOffset()); - assertEquals(3, sharePartition.cachedState().size()); + // LSO is at 4. + sharePartition.updateCacheAndOffsets(4); + + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(4, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); + + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState()); + // Acknowledge with RELEASE action. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(7, 8, Collections.singletonList((byte) 2)))); - // Acknowledge with ACCEPT action. - CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(2, 14, List.of((byte) 1)))); - assertNull(ackResult.join()); - assertFalse(ackResult.isCompletedExceptionally()); + // LSO is at 7. + sharePartition.updateCacheAndOffsets(7); - assertEquals(25, sharePartition.nextFetchOffset()); - // For cached state corresponding to entry 2, the offset states will be ARCHIVED, ARCHIVED, ARCHIVED, ARCHIVED and ACKNOWLEDGED. - // Hence, it will get removed when calling maybeUpdateCachedStateAndOffsets() internally. - assertEquals(14, sharePartition.startOffset()); - assertEquals(24, sharePartition.endOffset()); - assertEquals(3, sharePartition.cachedState().size()); + assertEquals(7, sharePartition.nextFetchOffset()); + assertEquals(7, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState()); - - // Check cached state offset map. + // Check cached offset state map. Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); } @Test - public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledge() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - - // LSO is at 7. - sharePartition.updateCacheAndOffsets(7); - assertEquals(7, sharePartition.nextFetchOffset()); - assertEquals(7, sharePartition.startOffset()); - assertEquals(7, sharePartition.endOffset()); - assertEquals(1, sharePartition.cachedState().size()); - - // Check cached state map. - assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - assertNotNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask()); + public void testLsoMovementToEndOffset() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.nextFetchOffset() == 7 && sharePartition.cachedState().isEmpty() && - sharePartition.startOffset() == 7 && sharePartition.endOffset() == 7, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of())); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); + // Acknowledge with RELEASE action. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(7, 8, Collections.singletonList((byte) 2)))); - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); - assertEquals(1, sharePartition.cachedState().size()); + // LSO is at 11. + sharePartition.updateCacheAndOffsets(11); - // Acknowledge with RELEASE action. This contains a batch that doesn't exist at all. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(2, 14, List.of((byte) 2)))); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(11, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - assertEquals(10, sharePartition.nextFetchOffset()); - assertEquals(10, sharePartition.startOffset()); - assertEquals(14, sharePartition.endOffset()); - assertEquals(1, sharePartition.cachedState().size()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); - assertNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); + // Check cached offset state map. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); } @Test - public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledgeBatchLastOffsetAheadOfStartOffsetBatch() throws InterruptedException { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withState(SharePartitionState.ACTIVE) - .build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(2, 1), 2); - - // LSO is at 3. - sharePartition.updateCacheAndOffsets(3); - assertEquals(3, sharePartition.nextFetchOffset()); - assertEquals(3, sharePartition.startOffset()); - assertEquals(3, sharePartition.endOffset()); - assertEquals(1, sharePartition.cachedState().size()); - - // Check cached state map. - assertEquals(MEMBER_ID, sharePartition.cachedState().get(1L).batchMemberId()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(1L).batchState()); - assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask()); - - // Allowing acquisition lock to expire. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.nextFetchOffset() == 3 && sharePartition.cachedState().isEmpty() && - sharePartition.startOffset() == 3 && sharePartition.endOffset() == 3, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of())); + public void testLsoMovementToEndOffsetWhereEndOffsetIsAvailable() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(2, 3), 2); - fetchAcquiredRecords(sharePartition, memoryRecords(3, 5), 3); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - assertEquals(8, sharePartition.nextFetchOffset()); - assertEquals(3, sharePartition.startOffset()); - assertEquals(7, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); + // Acknowledge with RELEASE action. + sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(7, 8, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(11, 11, Collections.singletonList((byte) 2)))); - // Acknowledge with RELEASE action. This contains a batch that doesn't exist at all. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(1, 7, List.of((byte) 2)))); + // LSO is at 11. + sharePartition.updateCacheAndOffsets(11); - assertEquals(3, sharePartition.nextFetchOffset()); - assertEquals(3, sharePartition.startOffset()); - assertEquals(7, sharePartition.endOffset()); + assertEquals(11, sharePartition.nextFetchOffset()); + assertEquals(11, sharePartition.startOffset()); + assertEquals(11, sharePartition.endOffset()); assertEquals(2, sharePartition.cachedState().size()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(3L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(3L).batchState()); - assertNull(sharePartition.cachedState().get(3L).batchAcquisitionLockTimeoutTask()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); + // Check cached offset state map. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); } @Test - public void testWriteShareGroupStateWithNullResponse() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testLsoMovementAheadOfEndOffsetPostAcknowledgment() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(null)); - CompletableFuture result = sharePartition.writeShareGroupState(List.of()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, result); - } + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - @Test - public void testWriteShareGroupStateWithNullTopicsData() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + // Acknowledge with RELEASE action. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(7, 8, Collections.singletonList((byte) 2)))); - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(null); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - CompletableFuture result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, result); - } + // LSO is at 12. + sharePartition.updateCacheAndOffsets(12); - @Test - public void testWriteShareGroupStateWithInvalidTopicsData() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(12, sharePartition.startOffset()); + assertEquals(12, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - // TopicsData is empty. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of()); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - CompletableFuture writeResult = sharePartition.writeShareGroupState(anyList()); - assertTrue(writeResult.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, writeResult); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - // TopicsData contains more results than expected. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of()), - new TopicData<>(Uuid.randomUuid(), List.of()))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - writeResult = sharePartition.writeShareGroupState(anyList()); - assertTrue(writeResult.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, writeResult); + // Check cached offset state map. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(7L).offsetState()); + } - // TopicsData contains no partition data. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of()))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - writeResult = sharePartition.writeShareGroupState(anyList()); - assertTrue(writeResult.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, writeResult); + @Test + public void testLsoMovementAheadOfEndOffset() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // TopicsData contains wrong topicId. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(Uuid.randomUuid(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - writeResult = sharePartition.writeShareGroupState(anyList()); - assertTrue(writeResult.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, writeResult); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 7), Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - // TopicsData contains more partition data than expected. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()), - PartitionFactory.newPartitionErrorData(1, Errors.NONE.code(), Errors.NONE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - writeResult = sharePartition.writeShareGroupState(anyList()); - assertTrue(writeResult.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, writeResult); + // LSO is at 14. + sharePartition.updateCacheAndOffsets(14); + + assertEquals(14, sharePartition.nextFetchOffset()); + assertEquals(14, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - // TopicsData contains wrong partition. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(1, Errors.NONE.code(), Errors.NONE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - writeResult = sharePartition.writeShareGroupState(anyList()); - assertTrue(writeResult.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, writeResult); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + + assertEquals(MEMBER_ID, sharePartition.cachedState().get(7L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); } @Test - public void testWriteShareGroupStateWithWriteException() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition1 = SharePartitionBuilder.builder().withPersister(persister).build(); + public void testLsoMovementWithGapsInCachedStateMap() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(FutureUtils.failedFuture(new RuntimeException("Write exception"))); - CompletableFuture writeResult = sharePartition1.writeShareGroupState(anyList()); - assertTrue(writeResult.isCompletedExceptionally()); - assertFutureThrows(IllegalStateException.class, writeResult); + MemoryRecords records1 = memoryRecords(5, 2); + // Gap of 7-9. + MemoryRecords records2 = memoryRecords(5, 10); + // Gap of 15-19. + MemoryRecords records3 = memoryRecords(5, 20); - persister = Mockito.mock(Persister.class); - // Throw exception for write state. - mockPersisterReadStateMethod(persister); - SharePartition sharePartition2 = SharePartitionBuilder.builder().withPersister(persister).build(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records2, Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records3, Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - Mockito.when(persister.writeState(Mockito.any())).thenThrow(new RuntimeException("Write exception")); - assertThrows(RuntimeException.class, () -> sharePartition2.writeShareGroupState(anyList())); - } + // LSO is at 18. + sharePartition.updateCacheAndOffsets(18); - @Test - public void testWriteShareGroupState() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + assertEquals(25, sharePartition.nextFetchOffset()); + assertEquals(18, sharePartition.startOffset()); + assertEquals(24, sharePartition.endOffset()); + assertEquals(3, sharePartition.cachedState().size()); - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - CompletableFuture result = sharePartition.writeShareGroupState(anyList()); - assertNull(result.join()); - assertFalse(result.isCompletedExceptionally()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + + assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState()); } @Test - public void testWriteShareGroupStateFailure() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withPersister(persister) - .withState(SharePartitionState.ACTIVE) - .build(); - // Mock Write state RPC to return error response, NOT_COORDINATOR. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NOT_COORDINATOR.code(), Errors.NOT_COORDINATOR.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + public void testLsoMovementWithGapsInCachedStateMapAndAcknowledgedBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - CompletableFuture result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(CoordinatorNotAvailableException.class, result); + MemoryRecords records1 = memoryRecords(5, 2); + // Gap of 7-9. + MemoryRecords records2 = memoryRecords(5, 10); - // Mock Write state RPC to return error response, COORDINATOR_NOT_AVAILABLE. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.COORDINATOR_NOT_AVAILABLE.code(), Errors.COORDINATOR_NOT_AVAILABLE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records2, Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(CoordinatorNotAvailableException.class, result); + // Acknowledge with RELEASE action. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 2)))); - // Mock Write state RPC to return error response, COORDINATOR_LOAD_IN_PROGRESS. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.COORDINATOR_LOAD_IN_PROGRESS.code(), Errors.COORDINATOR_LOAD_IN_PROGRESS.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + // LSO is at 10. + sharePartition.updateCacheAndOffsets(10); - result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(CoordinatorNotAvailableException.class, result); + assertEquals(10, sharePartition.nextFetchOffset()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - // Mock Write state RPC to return error response, GROUP_ID_NOT_FOUND. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(GroupIdNotFoundException.class, result); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); + } - // Mock Write state RPC to return error response, UNKNOWN_TOPIC_OR_PARTITION. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + @Test + public void testLsoMovementPostGapsInAcknowledgments() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(UnknownTopicOrPartitionException.class, result); + MemoryRecords records1 = memoryRecords(2, 5); + // Untracked gap of 3 offsets from 7-9. + MemoryRecordsBuilder recordsBuilder = memoryRecordsBuilder(5, 10); + // Gap from 15-17 offsets. + recordsBuilder.appendWithOffset(18, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); + MemoryRecords records2 = recordsBuilder.build(); - // Mock Write state RPC to return error response, FENCED_STATE_EPOCH. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.FENCED_STATE_EPOCH.code(), Errors.FENCED_STATE_EPOCH.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(NotLeaderOrFollowerException.class, result); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records2, Optional.empty(), + OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - // Mock Write state RPC to return error response, FENCED_LEADER_EPOCH. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.FENCED_LEADER_EPOCH.code(), Errors.FENCED_LEADER_EPOCH.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(5, 6, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(10, 18, Arrays.asList( + (byte) 2, (byte) 2, (byte) 2, (byte) 2, (byte) 2, (byte) 0, (byte) 0, (byte) 0, (byte) 2 + )))); - result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(NotLeaderOrFollowerException.class, result); + // LSO is at 18. + sharePartition.updateCacheAndOffsets(18); - // Mock Write state RPC to return error response, UNKNOWN_SERVER_ERROR. - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_SERVER_ERROR.code(), Errors.UNKNOWN_SERVER_ERROR.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + assertEquals(18, sharePartition.nextFetchOffset()); + assertEquals(18, sharePartition.startOffset()); + assertEquals(18, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - result = sharePartition.writeShareGroupState(anyList()); - assertTrue(result.isCompletedExceptionally()); - assertFutureThrows(UnknownServerException.class, result); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState()); + + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testWriteShareGroupStateWithNoOpStatePersister() { + public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - List stateBatches = List.of( - new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)); - CompletableFuture result = sharePartition.writeShareGroupState(stateBatches); - assertNull(result.join()); - assertFalse(result.isCompletedExceptionally()); - } + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 25), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 30), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 35), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - @Test - public void testMaybeUpdateCachedStateWhenAcknowledgeTypeAccept() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Acknowledge records. + sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(6, 7, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(8, 8, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(25, 29, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(35, 37, Collections.singletonList((byte) 2)) + )); - fetchAcquiredRecords(sharePartition, memoryRecords(250, 0), 250); + // LSO is at 24. + sharePartition.updateCacheAndOffsets(24); - assertFalse(sharePartition.canAcquireRecords()); + assertEquals(25, sharePartition.nextFetchOffset()); + assertEquals(24, sharePartition.startOffset()); + assertEquals(39, sharePartition.endOffset()); + assertEquals(7, sharePartition.cachedState().size()); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 249, List.of((byte) 1)))); + // Release acquired records for MEMBER_ID. + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - assertEquals(250, sharePartition.nextFetchOffset()); - // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. - assertEquals(250, sharePartition.startOffset()); - assertEquals(250, sharePartition.endOffset()); - assertTrue(sharePartition.canAcquireRecords()); - // The records have been accepted, thus they are removed from the cached state. - assertEquals(0, sharePartition.cachedState().size()); - } + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - @Test - public void testMaybeUpdateCachedStateWhenAcknowledgeTypeReject() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); - fetchAcquiredRecords(sharePartition, memoryRecords(250, 0), 250); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState()); - assertFalse(sharePartition.canAcquireRecords()); + assertEquals("member-2", sharePartition.cachedState().get(15L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 249, List.of((byte) 3)))); + expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(21L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(22L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(23L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(24L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(250, sharePartition.nextFetchOffset()); - // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. - assertEquals(250, sharePartition.startOffset()); - assertEquals(250, sharePartition.endOffset()); - assertTrue(sharePartition.canAcquireRecords()); - // The records have been rejected, thus they are removed from the cached state. - assertEquals(0, sharePartition.cachedState().size()); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(20L).offsetState()); + + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(25L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(25L).batchState()); + + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(30L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(30L).batchState()); + + expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(35L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(36L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(37L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(38L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(39L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(35L).offsetState()); } @Test - public void testMaybeUpdateCachedStateWhenAcknowledgeTypeRelease() { + public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToStartOfBatch() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(250, 0), 250); - assertFalse(sharePartition.canAcquireRecords()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 249, List.of((byte) 2)))); + // LSO is at 10. + sharePartition.updateCacheAndOffsets(10); - // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. - assertEquals(0, sharePartition.startOffset()); - assertEquals(249, sharePartition.endOffset()); - assertTrue(sharePartition.canAcquireRecords()); - // The records have been released, thus they are not removed from the cached state. - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); + + // Release acquired records. + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); + + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState()); + + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); } @Test - public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForBatchSubset() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightRecords(20) - .withState(SharePartitionState.ACTIVE) - .build(); + public void testReleaseAcquiredRecordsBatchesPostStartOffsetMovementToMiddleOfBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 0), 15); - assertTrue(sharePartition.canAcquireRecords()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 15), 15); - assertFalse(sharePartition.canAcquireRecords()); + // LSO is at 11. + sharePartition.updateCacheAndOffsets(11); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 12, List.of((byte) 1)))); + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(11, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(12L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(13L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + // Release acquired records. + CompletableFuture releaseResult = sharePartition.releaseAcquiredRecords(MEMBER_ID); + assertNull(releaseResult.join()); + assertFalse(releaseResult.isCompletedExceptionally()); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(13, sharePartition.startOffset()); - assertEquals(29, sharePartition.endOffset()); - assertEquals(30, sharePartition.nextFetchOffset()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(5L).batchState()); + + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForEntireBatch() { + public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovement() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightRecords(20) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 0), 15); - assertTrue(sharePartition.canAcquireRecords()); - - fetchAcquiredRecords(sharePartition, memoryRecords(15, 15), 15); - assertFalse(sharePartition.canAcquireRecords()); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 14, List.of((byte) 3)))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 25), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 30), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 35), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); + // Acknowledge records. + sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(6, 7, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(8, 8, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(25, 29, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(35, 37, Collections.singletonList((byte) 2)) + )); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(15, sharePartition.startOffset()); - assertEquals(29, sharePartition.endOffset()); - assertEquals(30, sharePartition.nextFetchOffset()); - } + // LSO is at 24. + sharePartition.updateCacheAndOffsets(24); - @Test - public void testMaybeUpdateCachedStateWhenAcknowledgementsInBetween() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightRecords(20) - .withState(SharePartitionState.ACTIVE) - .build(); + assertEquals(25, sharePartition.nextFetchOffset()); + assertEquals(24, sharePartition.startOffset()); + assertEquals(39, sharePartition.endOffset()); + assertEquals(7, sharePartition.cachedState().size()); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 0), 15); - assertTrue(sharePartition.canAcquireRecords()); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> { + Map expectedOffsetStateMap1 = new HashMap<>(); + expectedOffsetStateMap1.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap1.put(6L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap1.put(7L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap1.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap1.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 15), 15); - assertFalse(sharePartition.canAcquireRecords()); + Map expectedOffsetStateMap2 = new HashMap<>(); + expectedOffsetStateMap2.put(20L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(21L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(22L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(23L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap2.put(24L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(10, 14, List.of((byte) 3)))); + Map expectedOffsetStateMap3 = new HashMap<>(); + expectedOffsetStateMap3.put(35L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap3.put(36L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap3.put(37L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap3.put(38L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap3.put(39L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(9L).state()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(0L).offsetState().get(10L).state()); + return sharePartition.cachedState().get(5L).offsetState().equals(expectedOffsetStateMap1) && + sharePartition.cachedState().get(20L).offsetState().equals(expectedOffsetStateMap2) && + sharePartition.cachedState().get(25L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(30L).batchState() == RecordState.AVAILABLE && + sharePartition.cachedState().get(35L).offsetState().equals(expectedOffsetStateMap3); + }, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(10L).batchState()); - assertFalse(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(29, sharePartition.endOffset()); - assertEquals(30, sharePartition.nextFetchOffset()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(15L).batchState()); } @Test - public void testMaybeUpdateCachedStateWhenAllRecordsInCachedStateAreAcknowledged() { + public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToStartOfBatch() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightRecords(20) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 0), 15); - assertTrue(sharePartition.canAcquireRecords()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 15), 15); - assertFalse(sharePartition.canAcquireRecords()); + // LSO is at 10. + sharePartition.updateCacheAndOffsets(10); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 29, List.of((byte) 1)))); + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(30, sharePartition.startOffset()); - assertEquals(30, sharePartition.endOffset()); - assertEquals(30, sharePartition.nextFetchOffset()); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.cachedState().get(5L).batchMemberId().equals(EMPTY_MEMBER_ID) && + sharePartition.cachedState().get(5L).batchState() == RecordState.ARCHIVED && + sharePartition.cachedState().get(10L).batchMemberId().equals(EMPTY_MEMBER_ID) && + sharePartition.cachedState().get(10L).batchState() == RecordState.AVAILABLE, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); } @Test - public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() { + public void testAcquisitionLockTimeoutForBatchesPostStartOffsetMovementToMiddleOfBatch() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightRecords(100) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(20, 0), 20); - assertTrue(sharePartition.canAcquireRecords()); - - fetchAcquiredRecords(sharePartition, memoryRecords(20, 20), 20); - assertTrue(sharePartition.canAcquireRecords()); - - fetchAcquiredRecords(sharePartition, memoryRecords(20, 40), 20); - assertTrue(sharePartition.canAcquireRecords()); - - // First Acknowledgement for the first batch of records 0-19. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 19, List.of((byte) 1)))); - - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(20, sharePartition.startOffset()); - assertEquals(59, sharePartition.endOffset()); - assertEquals(60, sharePartition.nextFetchOffset()); - - fetchAcquiredRecords(sharePartition, memoryRecords(20, 60), 20); - assertTrue(sharePartition.canAcquireRecords()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(20, 49, List.of((byte) 1)))); + // LSO is at 11. + sharePartition.updateCacheAndOffsets(11); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(40L).offsetState().get(49L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(40L).offsetState().get(50L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(60L).batchState()); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(50, sharePartition.startOffset()); - assertEquals(79, sharePartition.endOffset()); - assertEquals(80, sharePartition.nextFetchOffset()); + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(11, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - fetchAcquiredRecords(sharePartition, memoryRecords(100, 80), 100); - assertFalse(sharePartition.canAcquireRecords()); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> { + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + return sharePartition.cachedState().get(10L).offsetState().equals(expectedOffsetStateMap) && + sharePartition.cachedState().get(5L).batchMemberId().equals(EMPTY_MEMBER_ID) && + sharePartition.cachedState().get(5L).batchState() == RecordState.ARCHIVED; + }, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); + } - // Final Acknowledgement, all records are acknowledged here. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(50, 179, List.of((byte) 3)))); + @Test + public void testScheduleAcquisitionLockTimeoutValueFromGroupConfig() { + GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); + GroupConfig groupConfig = Mockito.mock(GroupConfig.class); + int expectedDurationMs = 500; + Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig)); + Mockito.when(groupConfig.shareRecordLockDurationMs()).thenReturn(expectedDurationMs); - assertEquals(0, sharePartition.cachedState().size()); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(180, sharePartition.startOffset()); - assertEquals(180, sharePartition.endOffset()); - assertEquals(180, sharePartition.nextFetchOffset()); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withGroupConfigManager(groupConfigManager).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(20, 180), 20); + SharePartition.AcquisitionLockTimerTask timerTask = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(180L).batchState()); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(180, sharePartition.startOffset()); - assertEquals(199, sharePartition.endOffset()); - assertEquals(200, sharePartition.nextFetchOffset()); + Mockito.verify(groupConfigManager, Mockito.times(2)).groupConfig(GROUP_ID); + Mockito.verify(groupConfig).shareRecordLockDurationMs(); + assertEquals(expectedDurationMs, timerTask.delayMs); } @Test - public void testMaybeUpdateCachedStateGapAfterLastOffsetAcknowledged() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(11L, 20L, RecordState.AVAILABLE.id, (short) 2), - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) // There is a gap from 21 to 30 - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); + public void testScheduleAcquisitionLockTimeoutValueUpdatesSuccessfully() { + GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); + GroupConfig groupConfig = Mockito.mock(GroupConfig.class); + int expectedDurationMs1 = 500; + int expectedDurationMs2 = 1000; + Mockito.when(groupConfigManager.groupConfig(GROUP_ID)).thenReturn(Optional.of(groupConfig)); + // First invocation of shareRecordLockDurationMs() returns 500, and the second invocation returns 1000 + Mockito.when(groupConfig.shareRecordLockDurationMs()) + .thenReturn(expectedDurationMs1) + .thenReturn(expectedDurationMs2); - // Acquiring the first AVAILABLE batch from 11 to 20 - fetchAcquiredRecords(sharePartition, memoryRecords(10, 11), 10); - assertTrue(sharePartition.canAcquireRecords()); + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withGroupConfigManager(groupConfigManager).build(); - // Sending acknowledgement for the first batch from 11 to 20 - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(11, 20, List.of((byte) 1)))); + SharePartition.AcquisitionLockTimerTask timerTask1 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); - assertTrue(sharePartition.canAcquireRecords()); - // After the acknowledgement is done successfully, maybeUpdateCachedStateAndOffsets method is invoked to see - // if the start offset can be moved ahead. The last offset acknowledged is 20. But instead of moving start - // offset to the next batch in the cached state (31 to 40), it is moved to the next offset of the last - // acknowledged offset (21). This is because there is an acquirable gap in the cached state from 21 to 30. - assertEquals(21, sharePartition.startOffset()); - assertEquals(40, sharePartition.endOffset()); - assertEquals(21, sharePartition.nextFetchOffset()); + Mockito.verify(groupConfigManager, Mockito.times(2)).groupConfig(GROUP_ID); + Mockito.verify(groupConfig).shareRecordLockDurationMs(); + assertEquals(expectedDurationMs1, timerTask1.delayMs); - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); + SharePartition.AcquisitionLockTimerTask timerTask2 = sharePartition.scheduleAcquisitionLockTimeout(MEMBER_ID, 100L, 200L); - assertEquals(21, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); + Mockito.verify(groupConfigManager, Mockito.times(4)).groupConfig(GROUP_ID); + Mockito.verify(groupConfig, Mockito.times(2)).shareRecordLockDurationMs(); + assertEquals(expectedDurationMs2, timerTask2.delayMs); } @Test - public void testCanAcquireRecordsReturnsTrue() { + public void testAcknowledgeBatchAndOffsetPostLsoMovement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - assertEquals(0, sharePartition.startOffset()); - assertEquals(0, sharePartition.endOffset()); - - fetchAcquiredRecords(sharePartition, memoryRecords(150, 0), 150); - - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(149, sharePartition.endOffset()); - } - - @Test - public void testCanAcquireRecordsChangeResponsePostAcknowledgement() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(0, sharePartition.startOffset()); - assertEquals(0, sharePartition.endOffset()); + // LSO is at 12. + sharePartition.updateCacheAndOffsets(12); + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(12, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - fetchAcquiredRecords(sharePartition, memoryRecords(150, 0), 150); - assertTrue(sharePartition.canAcquireRecords()); + // Check cached state map. + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - fetchAcquiredRecords(sharePartition, memoryRecords(100, 150), 100); - assertFalse(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(249, sharePartition.endOffset()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 249, List.of((byte) 1)))); + // Acknowledge with RELEASE action. + CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(2, 6, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 2)))); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(250, sharePartition.startOffset()); - assertEquals(250, sharePartition.endOffset()); - } + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); - @Test - public void testCanAcquireRecordsAfterReleaseAcknowledgement() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + assertEquals(12, sharePartition.nextFetchOffset()); + assertEquals(12, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - fetchAcquiredRecords(sharePartition, memoryRecords(150, 0), 150); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(149, sharePartition.endOffset()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + assertNotNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask()); - fetchAcquiredRecords(sharePartition, memoryRecords(100, 150), 100); - assertFalse(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(249, sharePartition.endOffset()); + // Check cached offset state map. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 89, List.of((byte) 2)))); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); - // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. - assertEquals(0, sharePartition.startOffset()); - assertEquals(249, sharePartition.endOffset()); - // The records have been released, thus they are still available for being acquired. - assertTrue(sharePartition.canAcquireRecords()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); + assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); + assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); } @Test - public void testCanAcquireRecordsAfterArchiveAcknowledgement() { + public void testAcknowledgeBatchPostLsoMovement() { SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - fetchAcquiredRecords(sharePartition, memoryRecords(150, 0), 150); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(149, sharePartition.endOffset()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 20), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - fetchAcquiredRecords(sharePartition, memoryRecords(100, 150), 100); - assertFalse(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(249, sharePartition.endOffset()); + // LSO is at 14. + sharePartition.updateCacheAndOffsets(14); + assertEquals(25, sharePartition.nextFetchOffset()); + assertEquals(14, sharePartition.startOffset()); + assertEquals(24, sharePartition.endOffset()); + assertEquals(3, sharePartition.cachedState().size()); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 89, List.of((byte) 3)))); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. - assertEquals(90, sharePartition.startOffset()); - assertEquals(249, sharePartition.endOffset()); - assertTrue(sharePartition.canAcquireRecords()); - } + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - @Test - public void testCanAcquireRecordsAfterAcceptAcknowledgement() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState()); - fetchAcquiredRecords(sharePartition, memoryRecords(150, 0), 150); - assertTrue(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(149, sharePartition.endOffset()); + // Acknowledge with ACCEPT action. + CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(2, 14, Collections.singletonList((byte) 1)))); + assertNull(ackResult.join()); + assertFalse(ackResult.isCompletedExceptionally()); - fetchAcquiredRecords(sharePartition, memoryRecords(100, 150), 100); - assertFalse(sharePartition.canAcquireRecords()); - assertEquals(0, sharePartition.startOffset()); - assertEquals(249, sharePartition.endOffset()); + assertEquals(25, sharePartition.nextFetchOffset()); + // For cached state corresponding to entry 2, the offset states will be ARCHIVED, ARCHIVED, ARCHIVED, ARCHIVED and ACKNOWLEDGED. + // Hence, it will get removed when calling maybeUpdateCachedStateAndOffsets() internally. + assertEquals(14, sharePartition.startOffset()); + assertEquals(24, sharePartition.endOffset()); + assertEquals(3, sharePartition.cachedState().size()); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 89, List.of((byte) 1)))); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); - // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. - assertEquals(90, sharePartition.startOffset()); - assertEquals(249, sharePartition.endOffset()); - assertTrue(sharePartition.canAcquireRecords()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState()); + + // Check cached state offset map. + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); } @Test - public void testAcknowledgeBatchWithWriteShareGroupStateFailure() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); + public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledge() throws InterruptedException { SharePartition sharePartition = SharePartitionBuilder.builder() - .withPersister(persister) + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) .withState(SharePartitionState.ACTIVE) .build(); - // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - - fetchAcquiredRecords(sharePartition, memoryRecords(10, 5), 10); - - CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 14, List.of((byte) 1)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(UnknownTopicOrPartitionException.class, ackResult); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Due to failure in writeShareGroupState, the cached state should not be updated. + // LSO is at 7. + sharePartition.updateCacheAndOffsets(7); + assertEquals(7, sharePartition.nextFetchOffset()); + assertEquals(7, sharePartition.startOffset()); + assertEquals(7, sharePartition.endOffset()); assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - } - @Test - public void testAcknowledgeOffsetWithWriteShareGroupStateFailure() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withPersister(persister) - .withState(SharePartitionState.ACTIVE) - .build(); + // Check cached state map. + assertEquals(MEMBER_ID, sharePartition.cachedState().get(2L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).batchState()); + assertNotNull(sharePartition.cachedState().get(2L).batchAcquisitionLockTimeoutTask()); - // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.nextFetchOffset() == 7 && sharePartition.cachedState().isEmpty() && + sharePartition.startOffset() == 7 && sharePartition.endOffset() == 7, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - fetchAcquiredRecords(sharePartition, memoryRecords(6, 5), 6); - CompletableFuture ackResult = sharePartition.acknowledge( - MEMBER_ID, - List.of(new ShareAcknowledgementBatch(8, 10, List.of((byte) 3)))); - assertTrue(ackResult.isCompletedExceptionally()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Due to failure in writeShareGroupState, the cached state should not be updated. + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(5L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(6L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(7L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(8L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(9L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(10L).state()); - - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(5L).memberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(6L).memberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(7L).memberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(8L).memberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(9L).memberId()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(10L).memberId()); - } - @Test - public void testAcknowledgeSubsetWithAnotherMember() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Acknowledge with RELEASE action. This contains a batch that doesn't exist at all. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(2, 14, Collections.singletonList((byte) 2)))); - fetchAcquiredRecords(sharePartition, memoryRecords(7, 5), 7); - sharePartition.acknowledge(MEMBER_ID, - List.of(new ShareAcknowledgementBatch(5, 7, List.of((byte) 1)))); + assertEquals(10, sharePartition.nextFetchOffset()); + assertEquals(10, sharePartition.startOffset()); + assertEquals(14, sharePartition.endOffset()); + assertEquals(1, sharePartition.cachedState().size()); - // Acknowledge subset with another member. - CompletableFuture ackResult = sharePartition.acknowledge("member-2", - List.of(new ShareAcknowledgementBatch(9, 11, List.of((byte) 1)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); + assertNull(sharePartition.cachedState().get(10L).batchAcquisitionLockTimeoutTask()); } @Test - public void testAcknowledgeWithAnotherMemberRollbackBatchError() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(memoryRecords(5, 10)), FETCH_ISOLATION_HWM); - - fetchAcquiredRecords(sharePartition, memoryRecords(5, 15), 5); + public void testLsoMovementThenAcquisitionLockTimeoutThenAcknowledgeBatchLastOffsetAheadOfStartOffsetBatch() throws InterruptedException { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) + .withState(SharePartitionState.ACTIVE) + .build(); - CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(5, 9, List.of((byte) 2)), - // Acknowledging batch with another member will cause failure and rollback. - new ShareAcknowledgementBatch(10, 14, List.of((byte) 1)), - new ShareAcknowledgementBatch(15, 19, List.of((byte) 1)))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 1), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); + // LSO is at 3. + sharePartition.updateCacheAndOffsets(3); + assertEquals(3, sharePartition.nextFetchOffset()); + assertEquals(3, sharePartition.startOffset()); + assertEquals(3, sharePartition.endOffset()); + assertEquals(1, sharePartition.cachedState().size()); - // State should be rolled back to the previous state for any changes. - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals("member-2", sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); - } + // Check cached state map. + assertEquals(MEMBER_ID, sharePartition.cachedState().get(1L).batchMemberId()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(1L).batchState()); + assertNotNull(sharePartition.cachedState().get(1L).batchAcquisitionLockTimeoutTask()); - @Test - public void testAcknowledgeWithAnotherMemberRollbackSubsetError() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + // Allowing acquisition lock to expire. + TestUtils.waitForCondition( + () -> sharePartition.nextFetchOffset() == 3 && sharePartition.cachedState().isEmpty() && + sharePartition.startOffset() == 3 && sharePartition.endOffset() == 3, + DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, + () -> ACQUISITION_LOCK_NEVER_GOT_RELEASED); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 10), 5); - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 15, fetchPartitionData(memoryRecords(5, 15)), FETCH_ISOLATION_HWM); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 3), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(5, 9, List.of((byte) 2)), - new ShareAcknowledgementBatch(10, 14, List.of((byte) 1)), - // Acknowledging subset with another member will cause failure and rollback. - new ShareAcknowledgementBatch(16, 18, List.of((byte) 1)))); - assertTrue(ackResult.isCompletedExceptionally()); - assertFutureThrows(InvalidRecordStateException.class, ackResult); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(3, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(3, sharePartition.cachedState().size()); - // Check the state of the cache. State should be rolled back to the previous state for any changes. - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - assertEquals("member-2", sharePartition.cachedState().get(15L).batchMemberId()); - assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); - } + assertEquals(8, sharePartition.nextFetchOffset()); + assertEquals(3, sharePartition.startOffset()); + assertEquals(7, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - @Test - public void testMaxDeliveryCountLimitExceededForRecordBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxDeliveryCount(2) - .withState(SharePartitionState.ACTIVE) - .build(); - MemoryRecords records = memoryRecords(10, 5); + // Acknowledge with RELEASE action. This contains a batch that doesn't exist at all. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(1, 7, Collections.singletonList((byte) 2)))); - fetchAcquiredRecords(sharePartition, records, 10); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(5, 14, List.of((byte) 2)))); + assertEquals(3, sharePartition.nextFetchOffset()); + assertEquals(3, sharePartition.startOffset()); + assertEquals(7, sharePartition.endOffset()); + assertEquals(2, sharePartition.cachedState().size()); - fetchAcquiredRecords(sharePartition, records, 10); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(5, 14, List.of((byte) 2)))); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(3L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(3L).batchState()); + assertNull(sharePartition.cachedState().get(3L).batchAcquisitionLockTimeoutTask()); - // All the records in the batch reached the max delivery count, hence they got archived and the cached state cleared. - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(15, sharePartition.startOffset()); - assertEquals(15, sharePartition.endOffset()); - assertEquals(0, sharePartition.cachedState().size()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + assertNull(sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask()); } @Test - public void testMaxDeliveryCountLimitExceededForRecordsSubset() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxDeliveryCount(2) - .withState(SharePartitionState.ACTIVE) - .build(); - // First fetch request with 5 records starting from offset 10. - MemoryRecords records1 = memoryRecords(5, 10); - // Second fetch request with 5 records starting from offset 15. - MemoryRecords records2 = memoryRecords(5, 15); - - fetchAcquiredRecords(sharePartition, records1, 5); - fetchAcquiredRecords(sharePartition, records2, 5); - - sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of( - new ShareAcknowledgementBatch(10, 12, List.of((byte) 1)), - new ShareAcknowledgementBatch(13, 16, List.of((byte) 2)), - new ShareAcknowledgementBatch(17, 19, List.of((byte) 1))))); + public void testWriteShareGroupStateWithNullResponse() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - // Send next batch from offset 13, only 2 records should be acquired. - fetchAcquiredRecords(sharePartition, records1, 2); - // Send next batch from offset 15, only 2 records should be acquired. - fetchAcquiredRecords(sharePartition, records2, 2); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(null)); + CompletableFuture result = sharePartition.writeShareGroupState(Collections.emptyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalStateException.class); + } - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(13, 16, List.of((byte) 2)))); + @Test + public void testWriteShareGroupStateWithNullTopicsData() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - assertEquals(20, sharePartition.nextFetchOffset()); - // Cached state will be empty because after the second release, the acquired records will now have moved to - // ARCHIVE state, since their max delivery count exceeded. Also, now since all the records are either in ACKNOWLEDGED or ARCHIVED - // state, cached state should be empty. - assertEquals(0, sharePartition.cachedState().size()); + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(null); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + CompletableFuture result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, IllegalStateException.class); } @Test - public void testMaxDeliveryCountLimitExceededForRecordsSubsetAndCachedStateNotCleared() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxDeliveryCount(2) - .withState(SharePartitionState.ACTIVE) - .build(); - // First fetch request with 5 records starting from offset 0. - MemoryRecords records1 = memoryRecords(5, 0); + public void testWriteShareGroupStateWithInvalidTopicsData() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - fetchAcquiredRecords(sharePartition, records1, 5); - sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(List.of( - new ShareAcknowledgementBatch(0, 1, List.of((byte) 2))))); + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + // TopicsData is empty. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.emptyList()); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + CompletableFuture writeResult = sharePartition.writeShareGroupState(anyList()); + assertTrue(writeResult.isCompletedExceptionally()); + assertFutureThrows(writeResult, IllegalStateException.class); - // Send next batch from offset 0, only 2 records should be acquired. - fetchAcquiredRecords(sharePartition, memoryRecords(2, 0), 2); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 4, List.of((byte) 2)))); + // TopicsData contains more results than expected. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Arrays.asList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.emptyList()), + new TopicData<>(Uuid.randomUuid(), Collections.emptyList()))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + writeResult = sharePartition.writeShareGroupState(anyList()); + assertTrue(writeResult.isCompletedExceptionally()); + assertFutureThrows(writeResult, IllegalStateException.class); - assertEquals(2, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); + // TopicsData contains no partition data. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.emptyList()))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + writeResult = sharePartition.writeShareGroupState(anyList()); + assertTrue(writeResult.isCompletedExceptionally()); + assertFutureThrows(writeResult, IllegalStateException.class); - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(0L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(1L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(2L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(3L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(4L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(0L).offsetState()); - } + // TopicsData contains wrong topicId. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(Uuid.randomUuid(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + writeResult = sharePartition.writeShareGroupState(anyList()); + assertTrue(writeResult.isCompletedExceptionally()); + assertFutureThrows(writeResult, IllegalStateException.class); - @Test - public void testNextFetchOffsetPostAcquireAndAcknowledgeFunctionality() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(10, 0); - String memberId1 = "memberId-1"; - String memberId2 = "memberId-2"; + // TopicsData contains more partition data than expected. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Arrays.asList( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()), + PartitionFactory.newPartitionErrorData(1, Errors.NONE.code(), Errors.NONE.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + writeResult = sharePartition.writeShareGroupState(anyList()); + assertTrue(writeResult.isCompletedExceptionally()); + assertFutureThrows(writeResult, IllegalStateException.class); - sharePartition.acquire(memberId1, BATCH_SIZE, MAX_FETCH_RECORDS, DEFAULT_FETCH_OFFSET, fetchPartitionData(records1), FETCH_ISOLATION_HWM); + // TopicsData contains wrong partition. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(1, Errors.NONE.code(), Errors.NONE.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); + writeResult = sharePartition.writeShareGroupState(anyList()); + assertTrue(writeResult.isCompletedExceptionally()); + assertFutureThrows(writeResult, IllegalStateException.class); + } - assertFalse(sharePartition.findNextFetchOffset()); - assertEquals(10, sharePartition.nextFetchOffset()); + @Test + public void testWriteShareGroupStateWithWriteException() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); + SharePartition sharePartition1 = SharePartitionBuilder.builder().withPersister(persister).build(); - sharePartition.acquire(memberId2, BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(memoryRecords(10, 10)), FETCH_ISOLATION_HWM); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(FutureUtils.failedFuture(new RuntimeException("Write exception"))); + CompletableFuture writeResult = sharePartition1.writeShareGroupState(anyList()); + assertTrue(writeResult.isCompletedExceptionally()); + assertFutureThrows(writeResult, IllegalStateException.class); - assertFalse(sharePartition.findNextFetchOffset()); - assertEquals(20, sharePartition.nextFetchOffset()); + persister = Mockito.mock(Persister.class); + // Throw exception for write state. + mockPersisterReadStateMethod(persister); + SharePartition sharePartition2 = SharePartitionBuilder.builder().withPersister(persister).build(); - sharePartition.acknowledge(memberId1, List.of( - new ShareAcknowledgementBatch(5, 9, List.of((byte) 2)))); + Mockito.when(persister.writeState(Mockito.any())).thenThrow(new RuntimeException("Write exception")); + assertThrows(RuntimeException.class, () -> sharePartition2.writeShareGroupState(anyList())); + } - assertTrue(sharePartition.findNextFetchOffset()); - assertEquals(5, sharePartition.nextFetchOffset()); + @Test + public void testWriteShareGroupState() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - sharePartition.acquire(memberId1, BATCH_SIZE, MAX_FETCH_RECORDS, DEFAULT_FETCH_OFFSET, fetchPartitionData(records1), FETCH_ISOLATION_HWM); + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - assertTrue(sharePartition.findNextFetchOffset()); - assertEquals(20, sharePartition.nextFetchOffset()); - assertFalse(sharePartition.findNextFetchOffset()); + CompletableFuture result = sharePartition.writeShareGroupState(anyList()); + assertNull(result.join()); + assertFalse(result.isCompletedExceptionally()); } @Test - public void testNextFetchOffsetWithMultipleConsumers() { + public void testWriteShareGroupStateFailure() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); SharePartition sharePartition = SharePartitionBuilder.builder() - .withMaxInflightRecords(100) + .withPersister(persister) .withState(SharePartitionState.ACTIVE) .build(); - MemoryRecords records1 = memoryRecords(3, 0); - String memberId1 = MEMBER_ID; - String memberId2 = "member-2"; + // Mock Write state RPC to return error response, NOT_COORDINATOR. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.NOT_COORDINATOR.code(), Errors.NOT_COORDINATOR.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acquire(memberId1, BATCH_SIZE, MAX_FETCH_RECORDS, DEFAULT_FETCH_OFFSET, fetchPartitionData(records1), FETCH_ISOLATION_HWM); - assertEquals(3, sharePartition.nextFetchOffset()); + CompletableFuture result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, CoordinatorNotAvailableException.class); - sharePartition.acknowledge(memberId1, List.of( - new ShareAcknowledgementBatch(0, 2, List.of((byte) 2)))); - assertEquals(0, sharePartition.nextFetchOffset()); + // Mock Write state RPC to return error response, COORDINATOR_NOT_AVAILABLE. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.COORDINATOR_NOT_AVAILABLE.code(), Errors.COORDINATOR_NOT_AVAILABLE.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acquire(memberId2, BATCH_SIZE, MAX_FETCH_RECORDS, 3, fetchPartitionData(memoryRecords(2, 3)), FETCH_ISOLATION_HWM); - assertEquals(0, sharePartition.nextFetchOffset()); + result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, CoordinatorNotAvailableException.class); - sharePartition.acquire(memberId1, BATCH_SIZE, MAX_FETCH_RECORDS, DEFAULT_FETCH_OFFSET, fetchPartitionData(records1), FETCH_ISOLATION_HWM); - assertEquals(5, sharePartition.nextFetchOffset()); + // Mock Write state RPC to return error response, COORDINATOR_LOAD_IN_PROGRESS. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.COORDINATOR_LOAD_IN_PROGRESS.code(), Errors.COORDINATOR_LOAD_IN_PROGRESS.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acknowledge(memberId2, List.of( - new ShareAcknowledgementBatch(3, 4, List.of((byte) 2)))); - assertEquals(3, sharePartition.nextFetchOffset()); - } + result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, CoordinatorNotAvailableException.class); - @Test - public void testNumberOfWriteCallsOnUpdates() { - SharePartition sharePartition = Mockito.spy(SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .build()); + // Mock Write state RPC to return error response, GROUP_ID_NOT_FOUND. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(2, 6, List.of((byte) 1)))); - // Acknowledge records will induce 1 write state RPC call via function isWriteShareGroupStateSuccessful. - Mockito.verify(sharePartition, Mockito.times(1)).writeShareGroupState(anyList()); + result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, GroupIdNotFoundException.class); - sharePartition.releaseAcquiredRecords(MEMBER_ID); - // Release acquired records will induce 0 write state RPC call via function isWriteShareGroupStateSuccessful - // because the in-flight batch has been acknowledged. Hence, the total calls remain 1. - Mockito.verify(sharePartition, Mockito.times(1)).writeShareGroupState(anyList()); - } + // Mock Write state RPC to return error response, UNKNOWN_TOPIC_OR_PARTITION. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - @Test - public void testReacquireSubsetWithAnotherMember() { - SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - MemoryRecords records1 = memoryRecords(5, 5); + result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, UnknownTopicOrPartitionException.class); - fetchAcquiredRecords(sharePartition, records1, 5); - fetchAcquiredRecords(sharePartition, memoryRecords(12, 10), 12); + // Mock Write state RPC to return error response, FENCED_STATE_EPOCH. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.FENCED_STATE_EPOCH.code(), Errors.FENCED_STATE_EPOCH.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(5, 11, List.of((byte) 2)), - new ShareAcknowledgementBatch(12, 13, List.of((byte) 0)), - new ShareAcknowledgementBatch(14, 15, List.of((byte) 2)), - new ShareAcknowledgementBatch(17, 20, List.of((byte) 2)))); + result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, FencedStateEpochException.class); - // Reacquire with another member. - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 5, fetchPartitionData(records1), FETCH_ISOLATION_HWM); - assertEquals(10, sharePartition.nextFetchOffset()); + // Mock Write state RPC to return error response, FENCED_LEADER_EPOCH. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.FENCED_LEADER_EPOCH.code(), Errors.FENCED_LEADER_EPOCH.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - // Reacquire with another member. - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 10, fetchPartitionData(memoryRecords(7, 10)), FETCH_ISOLATION_HWM); - assertEquals(17, sharePartition.nextFetchOffset()); + result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, NotLeaderOrFollowerException.class); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals("member-2", sharePartition.cachedState().get(5L).batchMemberId()); - assertEquals(2, sharePartition.cachedState().get(5L).batchDeliveryCount()); + // Mock Write state RPC to return error response, UNKNOWN_SERVER_ERROR. + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_SERVER_ERROR.code(), Errors.UNKNOWN_SERVER_ERROR.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - // Records 10-11, 14-15 were reacquired by member-2. - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2")); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2")); - // Records 12-13 were kept as gapOffsets, hence they are not reacquired and are kept in ARCHIVED state. - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2")); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2")); - // Record 16 was not released in the acknowledgements. It was included in the reacquire by member-2, - // still its ownership is with member-1 and delivery count is 1. - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(21L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + result = sharePartition.writeShareGroupState(anyList()); + assertTrue(result.isCompletedExceptionally()); + assertFutureThrows(result, UnknownServerException.class); } @Test - public void testMaybeInitializeWhenReadStateRpcReturnsZeroAvailableRecords() { - List stateBatches = new ArrayList<>(); - stateBatches.add(new PersisterStateBatch(233L, 233L, RecordState.ARCHIVED.id, (short) 1)); - for (int i = 0; i < 500; i++) { - stateBatches.add(new PersisterStateBatch(234L + i, 234L + i, RecordState.ACKNOWLEDGED.id, (short) 1)); - } - - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 233L, Errors.NONE.code(), Errors.NONE.message(), - stateBatches))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - CompletableFuture result = sharePartition.maybeInitialize(); - assertTrue(result.isDone()); - assertFalse(result.isCompletedExceptionally()); + public void testWriteShareGroupStateWithNoOpShareStatePersister() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + List stateBatches = Arrays.asList( + new PersisterStateBatch(5L, 10L, RecordState.AVAILABLE.id, (short) 2), + new PersisterStateBatch(11L, 15L, RecordState.ARCHIVED.id, (short) 3)); - assertTrue(sharePartition.cachedState().isEmpty()); - assertEquals(734, sharePartition.nextFetchOffset()); - assertEquals(734, sharePartition.startOffset()); - assertEquals(734, sharePartition.endOffset()); + CompletableFuture result = sharePartition.writeShareGroupState(stateBatches); + assertNull(result.join()); + assertFalse(result.isCompletedExceptionally()); } @Test - public void testAcquireWithWriteShareGroupStateDelay() { - Persister persister = Mockito.mock(Persister.class); - mockPersisterReadStateMethod(persister); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withPersister(persister) - .withState(SharePartitionState.ACTIVE) - .build(); + public void testMaybeUpdateCachedStateWhenAcknowledgementTypeAccept() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true with a delay of 5 sec. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - CompletableFuture future = new CompletableFuture<>(); - // persister.writeState RPC will not complete instantaneously due to which commit won't happen for acknowledged offsets. - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); + assertFalse(sharePartition.canAcquireRecords()); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 0), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 249, Collections.singletonList((byte) 1)))); - List acknowledgementBatches = new ArrayList<>(); - acknowledgementBatches.add(new ShareAcknowledgementBatch(2, 3, List.of((byte) 2))); - acknowledgementBatches.add(new ShareAcknowledgementBatch(5, 9, List.of((byte) 2))); - // Acknowledge 2-3, 5-9 offsets with RELEASE acknowledge type. - sharePartition.acknowledge(MEMBER_ID, acknowledgementBatches); + assertEquals(250, sharePartition.nextFetchOffset()); + // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. + assertEquals(250, sharePartition.startOffset()); + assertEquals(250, sharePartition.endOffset()); + assertTrue(sharePartition.canAcquireRecords()); + // The records have been accepted, thus they are removed from the cached state. + assertEquals(0, sharePartition.cachedState().size()); + } - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + @Test + public void testMaybeUpdateCachedStateWhenAcknowledgementTypeReject() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Even though offsets 2-3, 5-9 are in available state, but they won't be acquired since they are still in transition from ACQUIRED - // to AVAILABLE state as the write state RPC has not completed yet, so the commit hasn't happened yet. - fetchAcquiredRecords(sharePartition, memoryRecords(15, 0), 5); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertFalse(sharePartition.canAcquireRecords()); - // persister.writeState RPC will complete now. This is going to commit all the acknowledged batches. Hence, their - // rollBack state will become null and they will be available for acquire again. - future.complete(writeShareGroupStateResult); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 0), 7); - assertEquals(3, sharePartition.cachedState().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 249, Collections.singletonList((byte) 3)))); + + assertEquals(250, sharePartition.nextFetchOffset()); + // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. + assertEquals(250, sharePartition.startOffset()); + assertEquals(250, sharePartition.endOffset()); + assertTrue(sharePartition.canAcquireRecords()); + // The records have been rejected, thus they are removed from the cached state. + assertEquals(0, sharePartition.cachedState().size()); } @Test - public void testFindLastOffsetAcknowledgedWhenGapAtBeginning() { - Persister persister = Mockito.mock(Persister.class); - ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionAllData(0, 3, 11L, Errors.NONE.code(), Errors.NONE.message(), - List.of( - new PersisterStateBatch(21L, 30L, RecordState.ACKNOWLEDGED.id, (short) 2), // There is a gap from 11 to 20 - new PersisterStateBatch(31L, 40L, RecordState.ARCHIVED.id, (short) 1) - )))))); - Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); - - SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); - - sharePartition.maybeInitialize(); - - GapWindow persisterReadResultGapWindow = sharePartition.persisterReadResultGapWindow(); - assertNotNull(persisterReadResultGapWindow); + public void testMaybeUpdateCachedStateWhenAcknowledgementTypeRelease() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - // Since there is a gap in the beginning, the persisterReadResultGapWindow window is same as the cachedState - assertEquals(11, persisterReadResultGapWindow.gapStartOffset()); - assertEquals(40, persisterReadResultGapWindow.endOffset()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(250, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertFalse(sharePartition.canAcquireRecords()); - long lastOffsetAcknowledged = sharePartition.findLastOffsetAcknowledged(); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 249, Collections.singletonList((byte) 2)))); - // Since the persisterReadResultGapWindow window begins at startOffset, we cannot count any of the offsets as acknowledged. - // Thus, lastOffsetAcknowledged should be -1 - assertEquals(-1, lastOffsetAcknowledged); + // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. + assertEquals(0, sharePartition.startOffset()); + assertEquals(249, sharePartition.endOffset()); + assertTrue(sharePartition.canAcquireRecords()); + // The records have been released, thus they are not removed from the cached state. + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); + assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(0L).batchDeliveryCount()); } @Test - public void testCacheUpdateWhenBatchHasOngoingTransition() { - Persister persister = Mockito.mock(Persister.class); - + public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForBatchSubset() { SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxInflightMessages(20) .withState(SharePartitionState.ACTIVE) - .withPersister(persister) .build(); - // Acquire a single batch. - fetchAcquiredRecords( - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 21, - fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM - ), 10 - ); - // Validate that there is no ongoing transition. - assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); - // Return a future which will be completed later, so the batch state has ongoing transition. - CompletableFuture future = new CompletableFuture<>(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); - // Acknowledge batch to create ongoing transition. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 30, List.of(AcknowledgeType.ACCEPT.id)))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertTrue(sharePartition.canAcquireRecords()); - // Assert the start offset has not moved and batch has ongoing transition. - assertEquals(21L, sharePartition.startOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertFalse(sharePartition.canAcquireRecords()); - // Validate that offset can't be moved because batch has ongoing transition. - assertFalse(sharePartition.canMoveStartOffset()); - assertEquals(-1, sharePartition.findLastOffsetAcknowledged()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 12, Collections.singletonList((byte) 1)))); - // Complete the future so acknowledge API can be completed, which updates the cache. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - future.complete(writeShareGroupStateResult); + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(12L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(13L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); - // Validate the cache has been updated. - assertEquals(31L, sharePartition.startOffset()); - assertTrue(sharePartition.cachedState().isEmpty()); + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(13, sharePartition.startOffset()); + assertEquals(29, sharePartition.endOffset()); + assertEquals(30, sharePartition.nextFetchOffset()); } @Test - public void testCacheUpdateWhenOffsetStateHasOngoingTransition() { - Persister persister = Mockito.mock(Persister.class); - + public void testMaybeUpdateCachedStateWhenAcknowledgementsFromBeginningForEntireBatch() { SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxInflightMessages(20) .withState(SharePartitionState.ACTIVE) - .withPersister(persister) .build(); - // Acquire a single batch. - fetchAcquiredRecords( - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 21, - fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM - ), 10 - ); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertTrue(sharePartition.canAcquireRecords()); - // Validate that there is no ongoing transition. - assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); - assertNull(sharePartition.cachedState().get(21L).offsetState()); - // Return a future which will be completed later, so the batch state has ongoing transition. - CompletableFuture future = new CompletableFuture<>(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); - // Acknowledge offsets to create ongoing transition. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 23, List.of(AcknowledgeType.ACCEPT.id)))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertFalse(sharePartition.canAcquireRecords()); - // Assert the start offset has not moved and offset state is now maintained. Offset state should - // have ongoing transition. - assertEquals(21L, sharePartition.startOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(21L).offsetState()); - assertTrue(sharePartition.cachedState().get(21L).offsetState().get(21L).hasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(21L).offsetState().get(22L).hasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(21L).offsetState().get(23L).hasOngoingStateTransition()); - // Only 21, 22 and 23 offsets should have ongoing state transition as the acknowledge request - // contains 21-23 offsets. - assertFalse(sharePartition.cachedState().get(21L).offsetState().get(24L).hasOngoingStateTransition()); - - // Validate that offset can't be moved because batch has ongoing transition. - assertFalse(sharePartition.canMoveStartOffset()); - assertEquals(-1, sharePartition.findLastOffsetAcknowledged()); - - // Complete the future so acknowledge API can be completed, which updates the cache. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - future.complete(writeShareGroupStateResult); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 14, Collections.singletonList((byte) 3)))); - // Validate the cache has been updated. - assertEquals(24L, sharePartition.startOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(21L)); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); + + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(15, sharePartition.startOffset()); + assertEquals(29, sharePartition.endOffset()); + assertEquals(30, sharePartition.nextFetchOffset()); } - /** - * Test the case where the fetch batch has first record offset greater than the record batch start offset. - * Such batches can exist for compacted topics. - */ @Test - public void testAcquireAndAcknowledgeWithRecordsAheadOfRecordBatchStartOffset() { + public void testMaybeUpdateCachedStateWhenAcknowledgementsInBetween() { SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxInflightMessages(20) .withState(SharePartitionState.ACTIVE) .build(); - ByteBuffer buffer = ByteBuffer.allocate(4096); - // Set the base offset at 5. - try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, - TimestampType.CREATE_TIME, 5, 2)) { - // Append records from offset 10. - memoryRecords(2, 10).records().forEach(builder::append); - // Append records from offset 15. - memoryRecords(2, 15).records().forEach(builder::append); - } - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Complete batch from 5-16 will be acquired, hence 12 records. - fetchAcquiredRecords(sharePartition, records, 12); - // Partially acknowledge the batch from 5-16. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(5, 9, List.of(ACKNOWLEDGE_TYPE_GAP_ID)), - new ShareAcknowledgementBatch(10, 11, List.of(AcknowledgeType.ACCEPT.id)), - new ShareAcknowledgementBatch(12, 14, List.of(AcknowledgeType.REJECT.id)), - new ShareAcknowledgementBatch(15, 16, List.of(AcknowledgeType.RELEASE.id)))); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertTrue(sharePartition.canAcquireRecords()); - assertEquals(15, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertNotNull(sharePartition.cachedState().get(5L)); - assertNotNull(sharePartition.cachedState().get(5L).offsetState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertFalse(sharePartition.canAcquireRecords()); - // Check cached state. - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(5L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(6L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(7L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(8L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(9L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACKNOWLEDGED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 3)))); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(5L).offsetState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(9L).state()); + assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(0L).offsetState().get(10L).state()); + + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); + + assertFalse(sharePartition.canAcquireRecords()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(29, sharePartition.endOffset()); + assertEquals(30, sharePartition.nextFetchOffset()); } - /** - * Test the case where the available cached batches never appear again in fetch response within the - * previous fetch offset range. Also remove records from the previous fetch batches. - *

          - * Such case can arise with compacted topics where complete batches are removed or records within - * batches are removed. - */ @Test - public void testAcquireWhenBatchesAreRemovedFromBetweenInSubsequentFetchData() { + public void testMaybeUpdateCachedStateWhenAllRecordsInCachedStateAreAcknowledged() { SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxInflightMessages(20) .withState(SharePartitionState.ACTIVE) .build(); - // Create 3 batches of records for a single acquire. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 0).close(); - memoryRecordsBuilder(buffer, 15, 5).close(); - memoryRecordsBuilder(buffer, 15, 20).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Acquire batch (0-34) which shall create single cache entry. - fetchAcquiredRecords(sharePartition, records, 35); - // Acquire another 3 individual batches of records. - fetchAcquiredRecords(sharePartition, memoryRecords(5, 40), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 45), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 50), 15); - // Release all batches in the cache. - sharePartition.releaseAcquiredRecords(MEMBER_ID); - // Validate cache has 4 entries. - assertEquals(4, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertTrue(sharePartition.canAcquireRecords()); - // Compact all batches and remove some of the batches from the fetch response. - buffer = ByteBuffer.allocate(4096); - try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, - TimestampType.CREATE_TIME, 0, 2)) { - // Append only 2 records for 0 offset batch starting from offset 1. - memoryRecords(2, 1).records().forEach(builder::append); - } - // Do not include batch from offset 5. And compact batch starting at offset 20. - try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, - TimestampType.CREATE_TIME, 20, 2)) { - // Append 2 records for 20 offset batch starting from offset 20. - memoryRecords(2, 20).records().forEach(builder::append); - // And append 2 records matching the end offset of the batch. - memoryRecords(2, 33).records().forEach(builder::append); - } - // Send the full batch at offset 40. - memoryRecordsBuilder(buffer, 5, 40).close(); - // Do not include batch from offset 45. And compact the batch at offset 50. - try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, - TimestampType.CREATE_TIME, 50, 2)) { - // Append 5 records for 50 offset batch starting from offset 51. - memoryRecords(5, 51).records().forEach(builder::append); - // Append 2 records for in middle of the batch. - memoryRecords(2, 58).records().forEach(builder::append); - // And append 1 record prior to the end offset. - memoryRecords(1, 63).records().forEach(builder::append); - } - buffer.flip(); - records = MemoryRecords.readableRecords(buffer); - // Acquire the new compacted batches. The acquire method determines the acquisition range using - // the first and last offsets of the fetched batches and acquires all available cached batches - // within that range. That means the batch from offset 45-49 which is not included in the - // fetch response will also be acquired. Similarly, for the batch from offset 5-19 which is - // anyway in the bigger cached batch of 0-34, will also be acquired. This avoids iterating - // through individual fetched batch boundaries; the client is responsible for reporting any - // data gaps via acknowledgements. This test also covers the edge case where the last fetched - // batch is compacted, and its last offset is before the previously cached version's last offset. - // In this situation, the last batch's offset state tracking is initialized. This is handled - // correctly because the client will send individual offset acknowledgements, which require offset - // state tracking anyway. While this last scenario is unlikely in practice (as a batch's reported - // last offset should remain correct even after compaction), the test verifies its proper handling. - fetchAcquiredRecords(sharePartition, records, 59); - assertEquals(64, sharePartition.nextFetchOffset()); - assertEquals(4, sharePartition.cachedState().size()); - sharePartition.cachedState().forEach((offset, inFlightState) -> { - // All batches other than the last batch should have batch state maintained. - if (offset < 50) { - assertNotNull(inFlightState.batchState()); - assertEquals(RecordState.ACQUIRED, inFlightState.batchState()); - } else { - assertNotNull(inFlightState.offsetState()); - inFlightState.offsetState().forEach((recordOffset, offsetState) -> { - // All offsets other than the last offset should be acquired. - RecordState recordState = recordOffset < 64 ? RecordState.ACQUIRED : RecordState.AVAILABLE; - assertEquals(recordState, offsetState.state(), "Incorrect state for offset: " + recordOffset); - }); - } - }); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 15), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertFalse(sharePartition.canAcquireRecords()); + + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 29, Collections.singletonList((byte) 1)))); + + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(30, sharePartition.startOffset()); + assertEquals(30, sharePartition.endOffset()); + assertEquals(30, sharePartition.nextFetchOffset()); } - /** - * This test verifies that cached batches which are no longer returned in fetch responses (starting - * from the fetchOffset) are correctly archived. Archiving these batches is crucial for the SPSO - * and the next fetch offset to advance. Without archiving, these offsets would be stuck, as the - * cached batches would remain available. - *

          - * This scenario can occur with compacted topics when entire batches, previously held in the cache, - * are removed from the log at the offset where reading occurs. - */ @Test - public void testAcquireWhenBatchesRemovedForFetchOffset() { + public void testMaybeUpdateCachedStateMultipleAcquisitionsAndAcknowledgements() { SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxInflightMessages(100) .withState(SharePartitionState.ACTIVE) .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 0), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 10), 15); - // Release the batches in the cache. - sharePartition.releaseAcquiredRecords(MEMBER_ID); - // Validate cache has 3 entries. - assertEquals(3, sharePartition.cachedState().size()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertTrue(sharePartition.canAcquireRecords()); - // Compact second batch and remove first batch from the fetch response. - ByteBuffer buffer = ByteBuffer.allocate(4096); - try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, - TimestampType.CREATE_TIME, 5, 2)) { - // Append only 4 records for 5th offset batch starting from offset 6. - memoryRecords(4, 6).records().forEach(builder::append); - } - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 20), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertTrue(sharePartition.canAcquireRecords()); - // Only second batch should be acquired and first batch offsets should be archived. Send - // fetchOffset as 0. - fetchAcquiredRecords(sharePartition, records, 0, 0, 5); - assertEquals(10, sharePartition.nextFetchOffset()); - // The next fetch offset has been updated, but the start offset should remain unchanged since - // the acquire operation only marks offsets as archived. The start offset will be correctly - // updated once any records are acknowledged. - assertEquals(0, sharePartition.startOffset()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 40), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertTrue(sharePartition.canAcquireRecords()); - // Releasing acquired records updates the cache and moves the start offset. - sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertEquals(5, sharePartition.startOffset()); - assertEquals(5, sharePartition.nextFetchOffset()); - // Validate first batch has been removed from the cache. - assertEquals(2, sharePartition.cachedState().size()); - sharePartition.cachedState().forEach((offset, inFlightState) -> { - assertNotNull(inFlightState.batchState()); - assertEquals(RecordState.AVAILABLE, inFlightState.batchState()); - }); - } - - /** - * This test verifies that cached batches which are no longer returned in fetch responses are - * correctly archived, when fetchOffset is within an already cached batch. Archiving these batches/offsets - * is crucial for the SPSO and the next fetch offset to advance. - *

          - * This scenario can occur with compacted topics when fetch triggers from an offset which is within - * a cached batch, and respective batch is removed from the log. - */ - @Test - public void testAcquireWhenBatchesRemovedForFetchOffsetWithinBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .build(); + // First Acknowledgement for the first batch of records 0-19. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 19, Collections.singletonList((byte) 1)))); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 5), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(15, 10), 15); - // Acknowledge subset of the first batch offsets. - sharePartition.acknowledge(MEMBER_ID, List.of( - // Accept the 3 offsets of first batch. - new ShareAcknowledgementBatch(5, 7, List.of(AcknowledgeType.ACCEPT.id)))).join(); - // Release the remaining batches/offsets in the cache. - sharePartition.releaseAcquiredRecords(MEMBER_ID).join(); - // Validate cache has 2 entries. - assertEquals(2, sharePartition.cachedState().size()); + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(20, sharePartition.startOffset()); + assertEquals(59, sharePartition.endOffset()); + assertEquals(60, sharePartition.nextFetchOffset()); - // Mark fetch offset within the first batch to 8, first available offset. - fetchAcquiredRecords(sharePartition, memoryRecords(15, 10), 8, 0, 15); - assertEquals(25, sharePartition.nextFetchOffset()); - // The next fetch offset has been updated, but the start offset should remain unchanged since - // the acquire operation only marks offsets as archived. The start offset will be correctly - // updated once any records are acknowledged. - assertEquals(8, sharePartition.startOffset()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 60), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertTrue(sharePartition.canAcquireRecords()); + + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(20, 49, Collections.singletonList((byte) 1)))); + + assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(40L).offsetState().get(49L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(40L).offsetState().get(50L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(60L).batchState()); + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(50, sharePartition.startOffset()); + assertEquals(79, sharePartition.endOffset()); + assertEquals(80, sharePartition.nextFetchOffset()); + + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 80), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertFalse(sharePartition.canAcquireRecords()); + + // Final Acknowledgment, all records are acknowledged here. + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(50, 179, Collections.singletonList((byte) 3)))); + + assertEquals(0, sharePartition.cachedState().size()); + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(180, sharePartition.startOffset()); + assertEquals(180, sharePartition.endOffset()); + assertEquals(180, sharePartition.nextFetchOffset()); + + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(20, 180), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Releasing acquired records updates the cache and moves the start offset. - sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertEquals(10, sharePartition.startOffset()); - assertEquals(10, sharePartition.nextFetchOffset()); - // Validate first batch has been removed from the cache. assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(10L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(180L).batchState()); + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(180, sharePartition.startOffset()); + assertEquals(199, sharePartition.endOffset()); + assertEquals(200, sharePartition.nextFetchOffset()); + } + + @Test + public void testCanAcquireRecordsReturnsTrue() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + + assertEquals(0, sharePartition.startOffset()); + assertEquals(0, sharePartition.endOffset()); + + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(149, sharePartition.endOffset()); } - /** - * This test verifies that when cached batch consists of multiple fetched batches but batches are - * removed from the log, starting at fetch offset, then cached batch is updated. - *

          - * This scenario can occur with compacted topics when entire batches, previously held in the cache, - * are removed from the log at the offset where reading occurs. - */ - @Test - public void testAcquireWhenBatchesRemovedForFetchOffsetForSameCachedBatch() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .build(); + @Test + public void testCanAcquireRecordsChangeResponsePostAcknowledgement() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + + assertEquals(0, sharePartition.startOffset()); + assertEquals(0, sharePartition.endOffset()); - // Create 3 batches of records for a single acquire. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 0).close(); - memoryRecordsBuilder(buffer, 15, 5).close(); - memoryRecordsBuilder(buffer, 15, 20).close(); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Acquire batch (0-34) which shall create single cache entry. - fetchAcquiredRecords(sharePartition, records, 35); - // Release the batches in the cache. - sharePartition.releaseAcquiredRecords(MEMBER_ID); - // Validate cache has 1 entry. - assertEquals(1, sharePartition.cachedState().size()); + assertTrue(sharePartition.canAcquireRecords()); - // Compact second batch and remove first batch from the fetch response. - buffer = ByteBuffer.allocate(4096); - try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, - TimestampType.CREATE_TIME, 5, 2)) { - // Append only 4 records for 5th offset batch starting from offset 6. - memoryRecords(4, 6).records().forEach(builder::append); - } - buffer.flip(); - records = MemoryRecords.readableRecords(buffer); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Only second batch should be acquired and first batch offsets should be archived. Send - // fetchOffset as 0. - fetchAcquiredRecords(sharePartition, records, 0, 0, 5); - assertEquals(10, sharePartition.nextFetchOffset()); - // The next fetch offset has been updated, but the start offset should remain unchanged since - // the acquire operation only marks offsets as archived. The start offset will be correctly - // updated once any records are acknowledged. + assertFalse(sharePartition.canAcquireRecords()); assertEquals(0, sharePartition.startOffset()); + assertEquals(249, sharePartition.endOffset()); - // Releasing acquired records updates the cache and moves the start offset. - sharePartition.releaseAcquiredRecords(MEMBER_ID); - assertEquals(5, sharePartition.startOffset()); - assertEquals(5, sharePartition.nextFetchOffset()); - assertEquals(1, sharePartition.cachedState().size()); - sharePartition.cachedState().forEach((offset, inFlightState) -> { - assertNotNull(inFlightState.offsetState()); - inFlightState.offsetState().forEach((recordOffset, offsetState) -> { - RecordState recordState = recordOffset < 5 ? RecordState.ARCHIVED : RecordState.AVAILABLE; - assertEquals(recordState, offsetState.state()); - }); - }); - } - - private String assertionFailedMessage(SharePartition sharePartition, Map> offsets) { - StringBuilder errorMessage = new StringBuilder(ACQUISITION_LOCK_NEVER_GOT_RELEASED + String.format( - " timer size: %d, next fetch offset: %d\n", - sharePartition.timer().size(), - sharePartition.nextFetchOffset())); - for (Map.Entry> entry : offsets.entrySet()) { - if (entry.getValue() != null && !entry.getValue().isEmpty()) { - errorMessage.append(String.format("batch start offset: %d\n", entry.getKey())); - for (Long offset : entry.getValue()) { - errorMessage.append(String.format("\toffset: %d, offset state: %s, offset acquisition lock timeout task present: %b\n", - offset, sharePartition.cachedState().get(entry.getKey()).offsetState().get(offset).state().id(), - sharePartition.cachedState().get(entry.getKey()).offsetState().get(offset).acquisitionLockTimeoutTask() != null)); - } - } else { - errorMessage.append(String.format("batch start offset: %d, batch state: %s, batch acquisition lock timeout task present: %b\n", - entry.getKey(), sharePartition.cachedState().get(entry.getKey()).batchState().id(), - sharePartition.cachedState().get(entry.getKey()).batchAcquisitionLockTimeoutTask() != null)); - } - } - return errorMessage.toString(); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 249, Collections.singletonList((byte) 1)))); + + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(250, sharePartition.startOffset()); + assertEquals(250, sharePartition.endOffset()); } @Test - public void testFilterRecordBatchesFromAcquiredRecords() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .build(); + public void testCanAcquireRecordsAfterReleaseAcknowledgement() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - List acquiredRecords1 = List.of( - new AcquiredRecords().setFirstOffset(1).setLastOffset(5).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(10).setLastOffset(15).setDeliveryCount((short) 2), - new AcquiredRecords().setFirstOffset(20).setLastOffset(25).setDeliveryCount((short) 1) - ); - List recordBatches1 = List.of( - memoryRecordsBuilder(3, 2).build().batches().iterator().next(), - memoryRecordsBuilder(3, 12).build().batches().iterator().next() - ); - assertEquals( - List.of( - new AcquiredRecords().setFirstOffset(1).setLastOffset(1).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(5).setLastOffset(5).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(10).setLastOffset(11).setDeliveryCount((short) 2), - new AcquiredRecords().setFirstOffset(15).setLastOffset(15).setDeliveryCount((short) 2), - new AcquiredRecords().setFirstOffset(20).setLastOffset(25).setDeliveryCount((short) 1)), - sharePartition.filterRecordBatchesFromAcquiredRecords(acquiredRecords1, recordBatches1)); - - List acquiredRecords2 = List.of( - new AcquiredRecords().setFirstOffset(1).setLastOffset(4).setDeliveryCount((short) 3), - new AcquiredRecords().setFirstOffset(5).setLastOffset(8).setDeliveryCount((short) 3), - new AcquiredRecords().setFirstOffset(9).setLastOffset(30).setDeliveryCount((short) 2), - new AcquiredRecords().setFirstOffset(31).setLastOffset(40).setDeliveryCount((short) 3) - ); - List recordBatches2 = List.of( - memoryRecordsBuilder(21, 5).build().batches().iterator().next(), - memoryRecordsBuilder(5, 31).build().batches().iterator().next() - ); - assertEquals( - List.of( - new AcquiredRecords().setFirstOffset(1).setLastOffset(4).setDeliveryCount((short) 3), - new AcquiredRecords().setFirstOffset(26).setLastOffset(30).setDeliveryCount((short) 2), - new AcquiredRecords().setFirstOffset(36).setLastOffset(40).setDeliveryCount((short) 3) + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - ), sharePartition.filterRecordBatchesFromAcquiredRecords(acquiredRecords2, recordBatches2) - ); + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(149, sharePartition.endOffset()); - // Record batches is empty. - assertEquals(acquiredRecords2, sharePartition.filterRecordBatchesFromAcquiredRecords(acquiredRecords2, List.of())); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - List acquiredRecords3 = List.of( - new AcquiredRecords().setFirstOffset(0).setLastOffset(19).setDeliveryCount((short) 1) - ); - List recordBatches3 = List.of( - memoryRecordsBuilder(1, 8).build().batches().iterator().next(), - memoryRecordsBuilder(1, 18).build().batches().iterator().next() - ); + assertFalse(sharePartition.canAcquireRecords()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(249, sharePartition.endOffset()); - assertEquals( - List.of( - new AcquiredRecords().setFirstOffset(0).setLastOffset(7).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(9).setLastOffset(17).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(19).setLastOffset(19).setDeliveryCount((short) 1) + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 89, Collections.singletonList((byte) 2)))); - ), sharePartition.filterRecordBatchesFromAcquiredRecords(acquiredRecords3, recordBatches3) - ); + // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. + assertEquals(0, sharePartition.startOffset()); + assertEquals(249, sharePartition.endOffset()); + // The records have been released, thus they are still available for being acquired. + assertTrue(sharePartition.canAcquireRecords()); } @Test - public void testAcquireWithReadCommittedIsolationLevel() { - SharePartition sharePartition = Mockito.spy(SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .build()); + public void testCanAcquireRecordsAfterArchiveAcknowledgement() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 10).close(); - memoryRecordsBuilder(buffer, 5, 15).close(); - memoryRecordsBuilder(buffer, 15, 20).close(); - memoryRecordsBuilder(buffer, 8, 50).close(); - memoryRecordsBuilder(buffer, 10, 58).close(); - memoryRecordsBuilder(buffer, 5, 70).close(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - buffer.flip(); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - FetchPartitionData fetchPartitionData = fetchPartitionData(records, newAbortedTransactions()); - - // We are mocking the result of function fetchAbortedTransactionRecordBatches. The records present at these offsets need to be archived. - // We won't be utilizing the aborted transactions passed in fetchPartitionData. - when(sharePartition.fetchAbortedTransactionRecordBatches(fetchPartitionData.records.batches(), fetchPartitionData.abortedTransactions.get())).thenReturn( - List.of( - memoryRecordsBuilder(5, 10).build().batches().iterator().next(), - memoryRecordsBuilder(10, 58).build().batches().iterator().next(), - memoryRecordsBuilder(5, 70).build().batches().iterator().next() - ) - ); + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(149, sharePartition.endOffset()); - List acquiredRecordsList = fetchAcquiredRecords( - sharePartition.acquire( - MEMBER_ID, - 10 /* Batch size */, - 100, - DEFAULT_FETCH_OFFSET, - fetchPartitionData, - FetchIsolation.TXN_COMMITTED), - 45 /* Gap of 15 records will be added to second batch, gap of 2 records will also be added to fourth batch */); - - assertEquals(List.of( - new AcquiredRecords().setFirstOffset(15).setLastOffset(19).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(20).setLastOffset(49).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(50).setLastOffset(57).setDeliveryCount((short) 1), - new AcquiredRecords().setFirstOffset(68).setLastOffset(69).setDeliveryCount((short) 1) - ), acquiredRecordsList); - assertEquals(75, sharePartition.nextFetchOffset()); - - // Checking cached state. - assertEquals(4, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().containsKey(10L)); - assertTrue(sharePartition.cachedState().containsKey(20L)); - assertTrue(sharePartition.cachedState().containsKey(50L)); - assertTrue(sharePartition.cachedState().containsKey(70L)); - assertNotNull(sharePartition.cachedState().get(10L).offsetState()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(19L, sharePartition.cachedState().get(10L).lastOffset()); - assertEquals(49L, sharePartition.cachedState().get(20L).lastOffset()); - assertEquals(69L, sharePartition.cachedState().get(50L).lastOffset()); - assertEquals(74L, sharePartition.cachedState().get(70L).lastOffset()); + assertFalse(sharePartition.canAcquireRecords()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(249, sharePartition.endOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(20L).batchState()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(70L).batchState()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 89, Collections.singletonList((byte) 3)))); - assertEquals(MEMBER_ID, sharePartition.cachedState().get(20L).batchMemberId()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(70L).batchMemberId()); + // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. + assertEquals(90, sharePartition.startOffset()); + assertEquals(249, sharePartition.endOffset()); + assertTrue(sharePartition.canAcquireRecords()); + } - assertNotNull(sharePartition.cachedState().get(20L).batchAcquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(70L).batchAcquisitionLockTimeoutTask()); + @Test + public void testCanAcquireRecordsAfterAcceptAcknowledgement() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - Map expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(17L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(18L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(19L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(150, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(10L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(11L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(12L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(13L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(10L).offsetState().get(14L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(15L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(16L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(17L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(18L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(10L).offsetState().get(19L).acquisitionLockTimeoutTask()); + assertTrue(sharePartition.canAcquireRecords()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(149, sharePartition.endOffset()); - expectedOffsetStateMap = new HashMap<>(); - expectedOffsetStateMap.put(50L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(51L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(52L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(53L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(54L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(55L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(56L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(57L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(58L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(59L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(60L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(61L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(62L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(63L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(64L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(65L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(66L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(67L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); - expectedOffsetStateMap.put(68L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - expectedOffsetStateMap.put(69L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); - assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(50L).offsetState()); - - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(50L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(51L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(52L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(53L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(54L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(55L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(56L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(57L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(58L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(59L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(60L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(61L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(62L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(63L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(64L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(65L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(66L).acquisitionLockTimeoutTask()); - assertNull(sharePartition.cachedState().get(50L).offsetState().get(67L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(68L).acquisitionLockTimeoutTask()); - assertNotNull(sharePartition.cachedState().get(50L).offsetState().get(69L).acquisitionLockTimeoutTask()); - } - - @SuppressWarnings({"rawtypes", "unchecked"}) - @Test - public void testContainsAbortMarker() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .build(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(100, 150), + Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false)); - // Record batch is not a control batch. - RecordBatch recordBatch = mock(RecordBatch.class); - when(recordBatch.isControlBatch()).thenReturn(false); - assertFalse(sharePartition.containsAbortMarker(recordBatch)); - - // Record batch is a control batch but doesn't contain any records. - recordBatch = mock(RecordBatch.class); - Iterator batchIterator = mock(Iterator.class); - when(batchIterator.hasNext()).thenReturn(false); - when(recordBatch.iterator()).thenReturn(batchIterator); - when(recordBatch.isControlBatch()).thenReturn(true); - assertFalse(sharePartition.containsAbortMarker(recordBatch)); - - // Record batch is a control batch which contains a record of type ControlRecordType.ABORT. - recordBatch = mock(RecordBatch.class); - batchIterator = mock(Iterator.class); - when(batchIterator.hasNext()).thenReturn(true); - DefaultRecord record = mock(DefaultRecord.class); - ByteBuffer buffer = ByteBuffer.allocate(4096); - // Buffer has to be created in a way that ControlRecordType.parse(buffer) returns ControlRecordType.ABORT. - buffer.putShort((short) 5); - buffer.putShort(ControlRecordType.ABORT.type()); - buffer.putInt(23432); // some field added in version 5 - buffer.flip(); - when(record.key()).thenReturn(buffer); - when(batchIterator.next()).thenReturn(record); - when(recordBatch.iterator()).thenReturn(batchIterator); - when(recordBatch.isControlBatch()).thenReturn(true); - assertTrue(sharePartition.containsAbortMarker(recordBatch)); - - // Record batch is a control batch which contains a record of type ControlRecordType.COMMIT. - recordBatch = mock(RecordBatch.class); - batchIterator = mock(Iterator.class); - when(batchIterator.hasNext()).thenReturn(true); - record = mock(DefaultRecord.class); - buffer = ByteBuffer.allocate(4096); - // Buffer has to be created in a way that ControlRecordType.parse(buffer) returns ControlRecordType.COMMIT. - buffer.putShort((short) 5); - buffer.putShort(ControlRecordType.COMMIT.type()); - buffer.putInt(23432); // some field added in version 5 - buffer.flip(); - when(record.key()).thenReturn(buffer); - when(batchIterator.next()).thenReturn(record); - when(recordBatch.iterator()).thenReturn(batchIterator); - when(recordBatch.isControlBatch()).thenReturn(true); - assertFalse(sharePartition.containsAbortMarker(recordBatch)); + assertFalse(sharePartition.canAcquireRecords()); + assertEquals(0, sharePartition.startOffset()); + assertEquals(249, sharePartition.endOffset()); + + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 89, Collections.singletonList((byte) 1)))); + + // The SPSO should only move when the initial records in cached state are acknowledged with type ACKNOWLEDGE or ARCHIVED. + assertEquals(90, sharePartition.startOffset()); + assertEquals(249, sharePartition.endOffset()); + assertTrue(sharePartition.canAcquireRecords()); } @Test - public void testFetchAbortedTransactionRecordBatchesForOnlyAbortedTransactions() { + public void testAcknowledgeBatchWithWriteShareGroupStateFailure() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); SharePartition sharePartition = SharePartitionBuilder.builder() + .withPersister(persister) .withState(SharePartitionState.ACTIVE) .build(); - // Case 1 - Creating 10 transactional records in a single batch followed by a ABORT marker record for producerId 1. - ByteBuffer buffer = ByteBuffer.allocate(1024); - newTransactionalRecords(buffer, ControlRecordType.ABORT, 10, 1, 0); - buffer.flip(); - Records records = MemoryRecords.readableRecords(buffer); + // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code(), Errors.UNKNOWN_TOPIC_OR_PARTITION.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - List abortedTransactions = List.of( - new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1) - ); - // records from 0 to 9 should be archived because they are a part of aborted transactions. - List actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions); - assertEquals(1, actual.size()); - assertEquals(0, actual.get(0).baseOffset()); - assertEquals(9, actual.get(0).lastOffset()); - assertEquals(1, actual.get(0).producerId()); - - // Case 2: 3 individual batches each followed by a ABORT marker record for producerId 1. - buffer = ByteBuffer.allocate(1024); - newTransactionalRecords(buffer, ControlRecordType.ABORT, 1, 1, 0); - newTransactionalRecords(buffer, ControlRecordType.ABORT, 1, 1, 2); - newTransactionalRecords(buffer, ControlRecordType.ABORT, 1, 1, 4); - buffer.flip(); - records = MemoryRecords.readableRecords(buffer); - abortedTransactions = List.of( - new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1), - new FetchResponseData.AbortedTransaction().setFirstOffset(2).setProducerId(1), - new FetchResponseData.AbortedTransaction().setFirstOffset(4).setProducerId(1) - ); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions); - assertEquals(3, actual.size()); - assertEquals(0, actual.get(0).baseOffset()); - assertEquals(0, actual.get(0).lastOffset()); - assertEquals(1, actual.get(0).producerId()); - assertEquals(2, actual.get(1).baseOffset()); - assertEquals(2, actual.get(1).lastOffset()); - assertEquals(1, actual.get(1).producerId()); - assertEquals(4, actual.get(2).baseOffset()); - assertEquals(4, actual.get(2).lastOffset()); - assertEquals(1, actual.get(2).producerId()); - - // Case 3: The producer id of records is different, so they should not be archived, - buffer = ByteBuffer.allocate(1024); - // We are creating 10 transactional records followed by a ABORT marker record for producerId 2. - newTransactionalRecords(buffer, ControlRecordType.ABORT, 10, 2, 0); - buffer.flip(); - records = MemoryRecords.readableRecords(buffer); - abortedTransactions = List.of( - new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1) - ); + CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(5, 14, Collections.singletonList((byte) 1)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, UnknownTopicOrPartitionException.class); - actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions); - assertEquals(0, actual.size()); + // Due to failure in writeShareGroupState, the cached state should not be updated. + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); } @Test - public void testFetchAbortedTransactionRecordBatchesForAbortedAndCommittedTransactions() { + public void testAcknowledgeOffsetWithWriteShareGroupStateFailure() { + Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); SharePartition sharePartition = SharePartitionBuilder.builder() + .withPersister(persister) .withState(SharePartitionState.ACTIVE) .build(); - ByteBuffer buffer = ByteBuffer.allocate(1024); - newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 1, 0); - newTransactionalRecords(buffer, ControlRecordType.COMMIT, 2, 2, 3); - newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 2, 6); - newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 1, 9); - newTransactionalRecords(buffer, ControlRecordType.COMMIT, 2, 1, 12); - newTransactionalRecords(buffer, ControlRecordType.ABORT, 2, 1, 15); - buffer.flip(); - Records records = MemoryRecords.readableRecords(buffer); - - // Case 1 - Aborted transactions does not contain the record batch from offsets 6-7 with producer id 2. - List abortedTransactions = List.of( - new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1), - new FetchResponseData.AbortedTransaction().setFirstOffset(6).setProducerId(1), - new FetchResponseData.AbortedTransaction().setFirstOffset(9).setProducerId(1), - new FetchResponseData.AbortedTransaction().setFirstOffset(15).setProducerId(1) - ); + // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns false. + WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); + Mockito.when(persister.writeState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(writeShareGroupStateResult)); - List actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions); - assertEquals(3, actual.size()); - assertEquals(0, actual.get(0).baseOffset()); - assertEquals(1, actual.get(0).lastOffset()); - assertEquals(1, actual.get(0).producerId()); - assertEquals(9, actual.get(1).baseOffset()); - assertEquals(10, actual.get(1).lastOffset()); - assertEquals(1, actual.get(1).producerId()); - assertEquals(15, actual.get(2).baseOffset()); - assertEquals(16, actual.get(2).lastOffset()); - assertEquals(1, actual.get(2).producerId()); - - // Case 2 - Aborted transactions contains the record batch from offsets 6-7 with producer id 2. - abortedTransactions = List.of( - new FetchResponseData.AbortedTransaction().setFirstOffset(0).setProducerId(1), - new FetchResponseData.AbortedTransaction().setFirstOffset(6).setProducerId(2), - new FetchResponseData.AbortedTransaction().setFirstOffset(9).setProducerId(1), - new FetchResponseData.AbortedTransaction().setFirstOffset(15).setProducerId(1) - ); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(6, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + CompletableFuture ackResult = sharePartition.acknowledge( + MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(8, 10, Collections.singletonList((byte) 3)))); + assertTrue(ackResult.isCompletedExceptionally()); + + // Due to failure in writeShareGroupState, the cached state should not be updated. + assertEquals(1, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(5L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(6L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(7L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(8L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(9L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).offsetState().get(10L).state()); - actual = sharePartition.fetchAbortedTransactionRecordBatches(records.batches(), abortedTransactions); - assertEquals(4, actual.size()); - assertEquals(0, actual.get(0).baseOffset()); - assertEquals(1, actual.get(0).lastOffset()); - assertEquals(1, actual.get(0).producerId()); - assertEquals(6, actual.get(1).baseOffset()); - assertEquals(7, actual.get(1).lastOffset()); - assertEquals(2, actual.get(1).producerId()); - assertEquals(9, actual.get(2).baseOffset()); - assertEquals(10, actual.get(2).lastOffset()); - assertEquals(1, actual.get(2).producerId()); - assertEquals(15, actual.get(3).baseOffset()); - assertEquals(16, actual.get(3).lastOffset()); - assertEquals(1, actual.get(3).producerId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(5L).memberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(6L).memberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(7L).memberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(8L).memberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(9L).memberId()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).offsetState().get(10L).memberId()); } @Test - public void testFetchLockReleasedByDifferentId() { - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .build(); - Uuid fetchId1 = Uuid.randomUuid(); - Uuid fetchId2 = Uuid.randomUuid(); + public void testAcknowledgeSubsetWithAnotherMember() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(7, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + sharePartition.acknowledge(MEMBER_ID, + Collections.singletonList(new ShareAcknowledgementBatch(5, 7, Collections.singletonList((byte) 1)))); - // Initially, fetch lock is not acquired. - assertNull(sharePartition.fetchLock()); - // fetchId1 acquires the fetch lock. - assertTrue(sharePartition.maybeAcquireFetchLock(fetchId1)); - // If we release fetch lock by fetchId2, it will work. Currently, we have kept the release of fetch lock as non-strict - // such that even if the caller's id for releasing fetch lock does not match the id that holds the lock, we will - // still release it. This has been done to avoid the scenarios where we hold the fetch lock for a share partition - // forever due to faulty code. In the future, we plan to make the locks handling strict, then this test case needs to be updated. - sharePartition.releaseFetchLock(fetchId2); - assertNull(sharePartition.fetchLock()); // Fetch lock has been released. + // Acknowledge subset with another member. + CompletableFuture ackResult = sharePartition.acknowledge("member-2", + Collections.singletonList(new ShareAcknowledgementBatch(9, 11, Collections.singletonList((byte) 1)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); } @Test - public void testAcquireWhenBatchHasOngoingTransition() { - Persister persister = Mockito.mock(Persister.class); + public void testAcknowledgeWithAnotherMemberRollbackBatchError() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withPersister(persister) - .build(); - // Acquire a single batch with member-1. - fetchAcquiredRecords( - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 21, - fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM - ), 10 - ); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Validate that there is no ongoing transition. - assertFalse(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); - // Return a future which will be completed later, so the batch state has ongoing transition. - CompletableFuture future = new CompletableFuture<>(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); - // Acknowledge batch to create ongoing transition. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(21, 30, List.of(AcknowledgeType.RELEASE.id)))); + sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Assert the start offset has not moved and batch has ongoing transition. - assertEquals(21L, sharePartition.startOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().get(21L).batchHasOngoingStateTransition()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(21L).batchMemberId()); - - // Acquire the same batch with member-2. This function call will return with 0 records since there is an ongoing - // transition for this batch. - fetchAcquiredRecords( - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 21, - fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM - ), 0 - ); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(21L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(21L).batchMemberId()); + CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 2)), + // Acknowledging batch with another member will cause failure and rollback. + new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(15, 19, Collections.singletonList((byte) 1)))); - // Complete the future so acknowledge API can be completed, which updates the cache. Now the records can be acquired. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - future.complete(writeShareGroupStateResult); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); - // Acquire the same batch with member-2. 10 records will be acquired. - fetchAcquiredRecords( - sharePartition.acquire("member-2", BATCH_SIZE, MAX_FETCH_RECORDS, 21, - fetchPartitionData(memoryRecords(10, 21)), FETCH_ISOLATION_HWM - ), 10 - ); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(21L).batchState()); - assertEquals("member-2", sharePartition.cachedState().get(21L).batchMemberId()); + // State should be rolled back to the previous state for any changes. + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals("member-2", sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(15L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); } @Test - public void testNextFetchOffsetWhenBatchHasOngoingTransition() { - Persister persister = Mockito.mock(Persister.class); + public void testAcknowledgeWithAnotherMemberRollbackSubsetError() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withPersister(persister) - .build(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Acquire a single batch 0-9 with member-1. - fetchAcquiredRecords( - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 0, - fetchPartitionData(memoryRecords(10, 0)), FETCH_ISOLATION_HWM - ), 10 - ); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Acquire a single batch 10-19 with member-1. - fetchAcquiredRecords( - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 10, - fetchPartitionData(memoryRecords(10, 10)), FETCH_ISOLATION_HWM - ), 10 - ); + sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 15), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Validate that there is no ongoing transition. - assertEquals(2, sharePartition.cachedState().size()); - assertFalse(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(10L).batchHasOngoingStateTransition()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + CompletableFuture ackResult = sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(10, 14, Collections.singletonList((byte) 1)), + // Acknowledging subset with another member will cause failure and rollback. + new ShareAcknowledgementBatch(16, 18, Collections.singletonList((byte) 1)))); + assertTrue(ackResult.isCompletedExceptionally()); + assertFutureThrows(ackResult, InvalidRecordStateException.class); + + assertEquals(3, sharePartition.cachedState().size()); + // Check the state of the cache. State should be rolled back to the previous state for any changes. + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(5L).batchDeliveryCount()); assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); + assertEquals(MEMBER_ID, sharePartition.cachedState().get(10L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(10L).batchDeliveryCount()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(15L).batchState()); + assertEquals("member-2", sharePartition.cachedState().get(15L).batchMemberId()); + assertEquals(1, sharePartition.cachedState().get(15L).batchDeliveryCount()); + } - // Return futures which will be completed later, so the batch state has ongoing transition. - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); + @Test + public void testMaxDeliveryCountLimitExceededForRecordBatch() { + SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxDeliveryCount(2) + .withState(SharePartitionState.ACTIVE) + .build(); + MemoryRecords records = memoryRecords(10, 5); - // Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for - // offsets 0-9 and 10-19 respectively. - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Acknowledge batch to create ongoing transition. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 9, List.of(AcknowledgeType.RELEASE.id)))); - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(10, 19, List.of(AcknowledgeType.RELEASE.id)))); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(5, 14, Collections.singletonList((byte) 2)))); - // Complete future2 so second acknowledge API can be completed, which updates the cache. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - future2.complete(writeShareGroupStateResult); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Offsets 0-9 will have ongoing state transition since future1 is not complete yet. - // Offsets 10-19 won't have ongoing state transition since future2 has been completed. - assertTrue(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(10L).batchHasOngoingStateTransition()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(5, 14, Collections.singletonList((byte) 2)))); - // nextFetchOffset should return 10 and not 0 because batch 0-9 is undergoing state transition. - assertEquals(10, sharePartition.nextFetchOffset()); + // All the records in the batch reached the max delivery count, hence they got archived and the cached state cleared. + assertEquals(15, sharePartition.nextFetchOffset()); + assertEquals(15, sharePartition.startOffset()); + assertEquals(15, sharePartition.endOffset()); + assertEquals(0, sharePartition.cachedState().size()); } @Test - public void testNextFetchOffsetWhenOffsetsHaveOngoingTransition() { - Persister persister = Mockito.mock(Persister.class); - + public void testMaxDeliveryCountLimitExceededForRecordsSubset() { SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxDeliveryCount(2) .withState(SharePartitionState.ACTIVE) - .withPersister(persister) .build(); + // First fetch request with 5 records starting from offset 10. + MemoryRecords records1 = memoryRecords(5, 10); + // Second fetch request with 5 records starting from offset 15. + MemoryRecords records2 = memoryRecords(5, 15); - // Acquire a single batch 0-50 with member-1. - fetchAcquiredRecords( - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 0, - fetchPartitionData(memoryRecords(50, 0)), FETCH_ISOLATION_HWM - ), 50 - ); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Validate that there is no ongoing transition. - assertFalse(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition()); + sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(Arrays.asList( + new ShareAcknowledgementBatch(10, 12, Collections.singletonList((byte) 1)), + new ShareAcknowledgementBatch(13, 16, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(17, 19, Collections.singletonList((byte) 1))))); - // Return futures which will be completed later, so the batch state has ongoing transition. - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); + // Send next batch from offset 13, only 2 records should be acquired. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for - // offsets 5-9 and 20-24 respectively. - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + // Send next batch from offset 15, only 2 records should be acquired. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records2, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Acknowledge batch to create ongoing transition. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(5, 9, List.of(AcknowledgeType.RELEASE.id)))); - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(20, 24, List.of(AcknowledgeType.RELEASE.id)))); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(13, 16, Collections.singletonList((byte) 2)))); - // Complete future2 so second acknowledge API can be completed, which updates the cache. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - future2.complete(writeShareGroupStateResult); - - // Offsets 5-9 will have ongoing state transition since future1 is not complete yet. - // Offsets 20-24 won't have ongoing state transition since future2 has been completed. - assertTrue(sharePartition.cachedState().get(0L).offsetState().get(5L).hasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(0L).offsetState().get(6L).hasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(0L).offsetState().get(7L).hasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(0L).offsetState().get(8L).hasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(0L).offsetState().get(9L).hasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(0L).offsetState().get(20L).hasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(0L).offsetState().get(21L).hasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(0L).offsetState().get(22L).hasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(0L).offsetState().get(23L).hasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(0L).offsetState().get(24L).hasOngoingStateTransition()); - - // nextFetchOffset should return 20 and not 5 because offsets 5-9 is undergoing state transition. assertEquals(20, sharePartition.nextFetchOffset()); + // Cached state will be empty because after the second release, the acquired records will now have moved to + // ARCHIVE state, since their max delivery count exceeded. Also, now since all the records are either in ACKNOWLEDGED or ARCHIVED + // state, cached state should be empty. + assertEquals(0, sharePartition.cachedState().size()); } @Test - public void testAcquisitionLockTimeoutWithConcurrentAcknowledgement() throws InterruptedException { - Persister persister = Mockito.mock(Persister.class); + public void testMaxDeliveryCountLimitExceededForRecordsSubsetAndCachedStateNotCleared() { SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxDeliveryCount(2) .withState(SharePartitionState.ACTIVE) - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withPersister(persister) .build(); + // First fetch request with 5 records starting from offset 0. + MemoryRecords records1 = memoryRecords(5, 0); - // Create 2 batches of records. - ByteBuffer buffer = ByteBuffer.allocate(4096); - memoryRecordsBuilder(buffer, 5, 0).close(); - memoryRecordsBuilder(buffer, 15, 5).close(); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - buffer.flip(); + sharePartition.acknowledge(MEMBER_ID, new ArrayList<>(Collections.singletonList( + new ShareAcknowledgementBatch(0, 1, Collections.singletonList((byte) 2))))); - MemoryRecords records = MemoryRecords.readableRecords(buffer); - // Acquire 10 records. - fetchAcquiredRecords(sharePartition.acquire( - MEMBER_ID, - 5, /* Batch size of 5 so cache can have 2 entries */ - 10, - DEFAULT_FETCH_OFFSET, - fetchPartitionData(records, 0), - FETCH_ISOLATION_HWM), - 20); + // Send next batch from offset 0, only 2 records should be acquired. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 40, 3, memoryRecords(2, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(2, sharePartition.timer().size()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(0, 4, Collections.singletonList((byte) 2)))); + + assertEquals(2, sharePartition.nextFetchOffset()); + assertEquals(1, sharePartition.cachedState().size()); - // Return 2 future which will be completed later. - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + Map expectedOffsetStateMap = new HashMap<>(); + expectedOffsetStateMap.put(0L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(1L, new InFlightState(RecordState.ARCHIVED, (short) 2, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(2L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(3L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(4L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(0L).offsetState()); + } - // Store the corresponding batch timer tasks. - TimerTask timerTask1 = sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask(); - TimerTask timerTask2 = sharePartition.cachedState().get(5L).batchAcquisitionLockTimeoutTask(); + @Test + public void testNextFetchOffsetPostAcquireAndAcknowledgeFunctionality() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records1 = memoryRecords(10, 0); + String memberId1 = "memberId-1"; + String memberId2 = "memberId-2"; - // Acknowledge 1 offset in first batch as Accept to create offset tracking, accept complete - // sencond batch. And mark offset 0 as release so cached state do not move ahead. - sharePartition.acknowledge(MEMBER_ID, List.of( - new ShareAcknowledgementBatch(0, 0, List.of(AcknowledgeType.RELEASE.id)), - new ShareAcknowledgementBatch(1, 1, List.of(AcknowledgeType.ACCEPT.id)), - new ShareAcknowledgementBatch(5, 19, List.of(AcknowledgeType.ACCEPT.id)))); + sharePartition.acquire(memberId1, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Assert the start offset has not moved. - assertEquals(0L, sharePartition.startOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); - // Verify ongoing transition states. - assertTrue(sharePartition.cachedState().get(0L).offsetState().get(0L).hasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(0L).offsetState().get(1L).hasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(0L).offsetState().get(2L).hasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(5L).batchHasOngoingStateTransition()); - - // Validate first timer task is already cancelled. - assertTrue(timerTask1.isCancelled()); - assertFalse(timerTask2.isCancelled()); - - // Fetch offset state timer tasks. - TimerTask timerTaskOffsetState1 = sharePartition.cachedState().get(0L).offsetState().get(0L).acquisitionLockTimeoutTask(); - TimerTask timerTaskOffsetState2 = sharePartition.cachedState().get(0L).offsetState().get(1L).acquisitionLockTimeoutTask(); - TimerTask timerTaskOffsetState3 = sharePartition.cachedState().get(0L).offsetState().get(2L).acquisitionLockTimeoutTask(); - - // Complete futures. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - future1.complete(writeShareGroupStateResult); - future2.complete(writeShareGroupStateResult); + assertFalse(sharePartition.findNextFetchOffset()); + assertEquals(10, sharePartition.nextFetchOffset()); - // Verify timer tasks are now cancelled, except unacknowledged offsets. - assertEquals(2, sharePartition.cachedState().size()); - assertTrue(timerTask2.isCancelled()); - assertTrue(timerTaskOffsetState1.isCancelled()); - assertTrue(timerTaskOffsetState2.isCancelled()); - assertFalse(timerTaskOffsetState3.isCancelled()); + sharePartition.acquire(memberId2, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(10, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Verify the state prior executing the timer tasks. - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); + assertFalse(sharePartition.findNextFetchOffset()); + assertEquals(20, sharePartition.nextFetchOffset()); - // Running expired timer tasks should not mark offsets available, except for offset 2. - timerTask1.run(); - // State should remain same. - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + sharePartition.acknowledge(memberId1, Collections.singletonList( + new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 2)))); - timerTask2.run(); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(5L).batchState()); + assertTrue(sharePartition.findNextFetchOffset()); + assertEquals(5, sharePartition.nextFetchOffset()); - timerTaskOffsetState2.run(); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + sharePartition.acquire(memberId1, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Should update the state to available as the timer task is not yet expired. - timerTaskOffsetState3.run(); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertTrue(sharePartition.findNextFetchOffset()); + assertEquals(20, sharePartition.nextFetchOffset()); + assertFalse(sharePartition.findNextFetchOffset()); } @Test - public void testLsoMovementWithWriteStateRPCFailuresInAcknowledgement() { - Persister persister = Mockito.mock(Persister.class); + public void testNextFetchOffsetWithMultipleConsumers() { SharePartition sharePartition = SharePartitionBuilder.builder() + .withMaxInflightMessages(100) .withState(SharePartitionState.ACTIVE) - .withPersister(persister) .build(); + MemoryRecords records1 = memoryRecords(3, 0); + String memberId1 = MEMBER_ID; + String memberId2 = "member-2"; - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - // Validate that there is no ongoing transition. - assertFalse(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition()); - assertFalse(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition()); - - // Return futures which will be completed later, so the batch state has ongoing transition. - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); + sharePartition.acquire(memberId1, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertEquals(3, sharePartition.nextFetchOffset()); - // Mocking the persister write state RPC to return future 1 and future 2 when acknowledgement occurs for - // offsets 2-6 and 7-11 respectively. - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); + sharePartition.acknowledge(memberId1, Collections.singletonList( + new ShareAcknowledgementBatch(0, 2, Collections.singletonList((byte) 2)))); + assertEquals(0, sharePartition.nextFetchOffset()); - // Acknowledge batch to create ongoing transition. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(2, 6, List.of(AcknowledgeType.RELEASE.id)))); - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.RELEASE.id)))); + sharePartition.acquire(memberId2, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(2, 3), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertEquals(0, sharePartition.nextFetchOffset()); - // Validate that there is no ongoing transition. - assertTrue(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition()); + sharePartition.acquire(memberId1, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertEquals(5, sharePartition.nextFetchOffset()); - // Move LSO to 7, so some records/offsets can be marked archived for the first batch. - sharePartition.updateCacheAndOffsets(7L); + sharePartition.acknowledge(memberId2, Collections.singletonList( + new ShareAcknowledgementBatch(3, 4, Collections.singletonList((byte) 2)))); + assertEquals(3, sharePartition.nextFetchOffset()); + } - // Start offset will be moved. - assertEquals(12L, sharePartition.nextFetchOffset()); - assertEquals(7L, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(7L).batchState()); + @Test + public void testNumberOfWriteCallsOnUpdates() { + SharePartition sharePartition = Mockito.spy(SharePartitionBuilder.builder() + .withState(SharePartitionState.ACTIVE) + .build()); - // Complete future1 exceptionally so acknowledgement for 2-6 offsets will be completed. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); - future1.complete(writeShareGroupStateResult); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 10, 0, memoryRecords(5, 2), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // The completion of future1 with exception should not impact the cached state since those records have already - // been archived. - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(7, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertFalse(sharePartition.cachedState().get(2L).batchHasOngoingStateTransition()); - assertTrue(sharePartition.cachedState().get(7L).batchHasOngoingStateTransition()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(7L).batchState()); + sharePartition.acknowledge(MEMBER_ID, Collections.singletonList( + new ShareAcknowledgementBatch(2, 6, Collections.singletonList((byte) 1)))); + // Acknowledge records will induce 1 write state RPC call via function isWriteShareGroupStateSuccessful. + Mockito.verify(sharePartition, Mockito.times(1)).writeShareGroupState(anyList()); - future2.complete(writeShareGroupStateResult); - assertEquals(12L, sharePartition.nextFetchOffset()); - assertEquals(7, sharePartition.startOffset()); - assertEquals(11, sharePartition.endOffset()); - assertEquals(2, sharePartition.cachedState().size()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).batchState()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); + sharePartition.releaseAcquiredRecords(MEMBER_ID); + // Release acquired records will induce 0 write state RPC call via function isWriteShareGroupStateSuccessful + // because the in-flight batch has been acknowledged. Hence, the total calls remain 1. + Mockito.verify(sharePartition, Mockito.times(1)).writeShareGroupState(anyList()); } @Test - public void testAcquisitionLockTimeoutWithWriteStateRPCFailure() throws InterruptedException { - Persister persister = Mockito.mock(Persister.class); - SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withPersister(persister) - .build(); + public void testReacquireSubsetWithAnotherMember() { + SharePartition sharePartition = SharePartitionBuilder.builder().withState(SharePartitionState.ACTIVE).build(); + MemoryRecords records1 = memoryRecords(5, 5); - fetchAcquiredRecords( - sharePartition.acquire(MEMBER_ID, BATCH_SIZE, MAX_FETCH_RECORDS, 0, - fetchPartitionData(memoryRecords(2, 0)), FETCH_ISOLATION_HWM - ), 2 - ); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - assertEquals(1, sharePartition.timer().size()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).batchState()); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(12, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - // Return a future which will be completed later, so the batch state has ongoing transition. - CompletableFuture future = new CompletableFuture<>(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); + sharePartition.acknowledge(MEMBER_ID, Arrays.asList( + new ShareAcknowledgementBatch(5, 11, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(12, 13, Collections.singletonList((byte) 0)), + new ShareAcknowledgementBatch(14, 15, Collections.singletonList((byte) 2)), + new ShareAcknowledgementBatch(17, 20, Collections.singletonList((byte) 2)))); - // Acknowledge batch to create ongoing transition. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(0, 1, List.of(AcknowledgeType.ACCEPT.id)))); - // Assert the start offset has not moved and batch has ongoing transition. - assertEquals(0L, sharePartition.startOffset()); - assertEquals(1, sharePartition.cachedState().size()); - assertTrue(sharePartition.cachedState().get(0L).batchHasOngoingStateTransition()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(0L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - // Timer task has not been expired yet. - assertFalse(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask().hasExpired()); + // Reacquire with another member. + sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, records1, + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertEquals(10, sharePartition.nextFetchOffset()); - // Allowing acquisition lock to expire. This will not cause any change because the record is not in ACQUIRED state. - // This will remove the entry of the timer task from timer. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.cachedState().get(0L).batchState() == RecordState.ACKNOWLEDGED && - sharePartition.cachedState().get(0L).batchDeliveryCount() == 1 && - sharePartition.timer().size() == 0, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(0L, List.of()))); + // Reacquire with another member. + sharePartition.acquire("member-2", MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 30, 0, memoryRecords(7, 10), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertEquals(17, sharePartition.nextFetchOffset()); - // Acquisition lock timeout task has run already and is not null. - assertNotNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); - // Timer task should be expired now. - assertTrue(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask().hasExpired()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals("member-2", sharePartition.cachedState().get(5L).batchMemberId()); + assertEquals(2, sharePartition.cachedState().get(5L).batchDeliveryCount()); - // Complete future exceptionally so acknowledgement for 0-1 offsets will be completed. - WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); - future.complete(writeShareGroupStateResult); + // Check cached state. + Map expectedOffsetStateMap = new HashMap<>(); + // Records 10-11, 14-15 were reacquired by member-2. + expectedOffsetStateMap.put(10L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2")); + expectedOffsetStateMap.put(11L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2")); + // Records 12-13 were kept as gapOffsets, hence they are not reacquired and are kept in ARCHIVED state. + expectedOffsetStateMap.put(12L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(13L, new InFlightState(RecordState.ARCHIVED, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(14L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2")); + expectedOffsetStateMap.put(15L, new InFlightState(RecordState.ACQUIRED, (short) 2, "member-2")); + // Record 16 was not released in the acknowledgements. It was included in the reacquire by member-2, + // still its ownership is with member-1 and delivery count is 1. + expectedOffsetStateMap.put(16L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + expectedOffsetStateMap.put(17L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(18L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(19L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(20L, new InFlightState(RecordState.AVAILABLE, (short) 1, EMPTY_MEMBER_ID)); + expectedOffsetStateMap.put(21L, new InFlightState(RecordState.ACQUIRED, (short) 1, MEMBER_ID)); + assertEquals(expectedOffsetStateMap, sharePartition.cachedState().get(10L).offsetState()); + } - // Even though write state RPC has failed and corresponding acquisition lock timeout task has expired, - // the record should not stuck in ACQUIRED state with no acquisition lock timeout task. - assertEquals(1, sharePartition.cachedState().size()); - assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).batchState()); - assertEquals(EMPTY_MEMBER_ID, sharePartition.cachedState().get(0L).batchMemberId()); - assertNull(sharePartition.cachedState().get(0L).batchAcquisitionLockTimeoutTask()); + @Test + public void testMaybeInitializeWhenReadStateRpcReturnsZeroAvailableRecords() { + List stateBatches = new ArrayList<>(); + for (int i = 0; i < 500; i++) { + stateBatches.add(new PersisterStateBatch(234L + i, 234L + i, RecordState.ACKNOWLEDGED.id, (short) 1)); + } + stateBatches.add(new PersisterStateBatch(232L, 232L, RecordState.ARCHIVED.id, (short) 1)); + + Persister persister = Mockito.mock(Persister.class); + ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionAllData(0, 3, 232L, Errors.NONE.code(), Errors.NONE.message(), + stateBatches))))); + Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); + SharePartition sharePartition = SharePartitionBuilder.builder().withPersister(persister).build(); + + CompletableFuture result = sharePartition.maybeInitialize(); + assertTrue(result.isDone()); + assertFalse(result.isCompletedExceptionally()); + + assertTrue(sharePartition.cachedState().isEmpty()); + assertEquals(734, sharePartition.nextFetchOffset()); + assertEquals(734, sharePartition.startOffset()); + assertEquals(734, sharePartition.endOffset()); } @Test - public void testRecordArchivedWithWriteStateRPCFailure() throws InterruptedException { + public void testAcquireWithWriteShareGroupStateDelay() { Persister persister = Mockito.mock(Persister.class); + mockPersisterReadStateMethod(persister); SharePartition sharePartition = SharePartitionBuilder.builder() - .withState(SharePartitionState.ACTIVE) - .withDefaultAcquisitionLockTimeoutMs(ACQUISITION_LOCK_TIMEOUT_MS) - .withMaxDeliveryCount(2) .withPersister(persister) + .withState(SharePartitionState.ACTIVE) .build(); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - // Futures which will be completed later, so the batch state has ongoing transition. - CompletableFuture future1 = new CompletableFuture<>(); - CompletableFuture future2 = new CompletableFuture<>(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); - - // Acknowledge batches. - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(3, 3, List.of(AcknowledgeType.ACCEPT.id)))); - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.ACCEPT.id)))); - - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(2L).offsetState().get(3L).state()); - assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState()); - assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount()); - + // Mock persister writeState method so that sharePartition.isWriteShareGroupStateSuccessful() returns true with a delay of 5 sec. WriteShareGroupStateResult writeShareGroupStateResult = Mockito.mock(WriteShareGroupStateResult.class); - Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( - PartitionFactory.newPartitionErrorData(0, Errors.GROUP_ID_NOT_FOUND.code(), Errors.GROUP_ID_NOT_FOUND.message()))))); - - future1.complete(writeShareGroupStateResult); - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).offsetState().get(3L).state()); - assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState()); - assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount()); - - future2.complete(writeShareGroupStateResult); - assertEquals(12L, sharePartition.nextFetchOffset()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(2L).offsetState().get(3L).state()); - assertEquals(1, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount()); - assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(7L).batchState()); - assertEquals(1, sharePartition.cachedState().get(7L).batchDeliveryCount()); - - // Allowing acquisition lock to expire. This will also ensure that acquisition lock timeout task - // is run successfully post write state RPC failure. - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - TestUtils.waitForCondition( - () -> sharePartition.cachedState().get(2L).offsetState().get(3L).state() == RecordState.AVAILABLE && - sharePartition.cachedState().get(7L).batchState() == RecordState.AVAILABLE && - sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount() == 1 && - sharePartition.cachedState().get(7L).batchDeliveryCount() == 1 && - sharePartition.timer().size() == 0, - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(2L, List.of(3L), 7L, List.of()))); - // Acquisition lock timeout task has run already and next fetch offset is moved to 2. - assertEquals(2, sharePartition.nextFetchOffset()); - // Send the same batches again. - fetchAcquiredRecords(sharePartition, memoryRecords(5, 2), 5); - fetchAcquiredRecords(sharePartition, memoryRecords(5, 7), 5); - - future1 = new CompletableFuture<>(); - future2 = new CompletableFuture<>(); - Mockito.when(persister.writeState(Mockito.any())).thenReturn(future1).thenReturn(future2); - - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(3, 3, List.of(AcknowledgeType.ACCEPT.id)))); - sharePartition.acknowledge(MEMBER_ID, List.of(new ShareAcknowledgementBatch(7, 11, List.of(AcknowledgeType.ACCEPT.id)))); - - mockTimer.advanceClock(DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS); - // Verify the timer tasks have run and the state is archived for the offsets which are not acknowledged, - // but the acquisition lock timeout task should be just expired for acknowledged offsets, though - // the state should not be archived. - TestUtils.waitForCondition( - () -> sharePartition.cachedState().get(2L).offsetState().get(2L).state() == RecordState.ARCHIVED && - sharePartition.cachedState().get(2L).offsetState().get(3L).state() == RecordState.ACKNOWLEDGED && - sharePartition.cachedState().get(2L).offsetState().get(3L).acquisitionLockTimeoutTask().hasExpired() && - sharePartition.cachedState().get(7L).batchState() == RecordState.ACKNOWLEDGED && - sharePartition.cachedState().get(7L).batchAcquisitionLockTimeoutTask().hasExpired(), - DEFAULT_MAX_WAIT_ACQUISITION_LOCK_TIMEOUT_MS, - () -> assertionFailedMessage(sharePartition, Map.of(2L, List.of(3L), 7L, List.of()))); - - future1.complete(writeShareGroupStateResult); - // Now the state should be archived for the offsets despite the write state RPC failure, as the - // delivery count has reached the max delivery count and the acquisition lock timeout task - // has already expired for the offsets which were acknowledged. - assertEquals(12, sharePartition.nextFetchOffset()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(2L).offsetState().get(3L).state()); - assertEquals(2, sharePartition.cachedState().get(2L).offsetState().get(3L).deliveryCount()); - assertEquals(RecordState.ACKNOWLEDGED, sharePartition.cachedState().get(7L).batchState()); - assertEquals(2, sharePartition.cachedState().get(7L).batchDeliveryCount()); - - future2.complete(writeShareGroupStateResult); - assertEquals(12L, sharePartition.nextFetchOffset()); - assertEquals(RecordState.ARCHIVED, sharePartition.cachedState().get(7L).batchState()); - assertEquals(2, sharePartition.cachedState().get(7L).batchDeliveryCount()); - } - - /** - * This function produces transactional data of a given no. of records followed by a transactional marker (COMMIT/ABORT). - */ - private void newTransactionalRecords(ByteBuffer buffer, ControlRecordType controlRecordType, int numRecords, long producerId, long baseOffset) { - try (MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, - RecordBatch.CURRENT_MAGIC_VALUE, - Compression.NONE, - TimestampType.CREATE_TIME, - baseOffset, - MOCK_TIME.milliseconds(), - producerId, - (short) 0, - 0, - true, - RecordBatch.NO_PARTITION_LEADER_EPOCH)) { - for (int i = 0; i < numRecords; i++) - builder.append(new SimpleRecord(MOCK_TIME.milliseconds(), "key".getBytes(), "value".getBytes())); - - builder.build(); - } - writeTransactionMarker(buffer, controlRecordType, (int) baseOffset + numRecords, producerId); - } - - private void writeTransactionMarker(ByteBuffer buffer, ControlRecordType controlRecordType, int offset, long producerId) { - MemoryRecords.writeEndTransactionalMarker(buffer, - offset, - MOCK_TIME.milliseconds(), - 0, - producerId, - (short) 0, - new EndTransactionMarker(controlRecordType, 0)); - } + Mockito.when(writeShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( + PartitionFactory.newPartitionErrorData(0, Errors.NONE.code(), Errors.NONE.message()))))); - private List newAbortedTransactions() { - FetchResponseData.AbortedTransaction abortedTransaction = new FetchResponseData.AbortedTransaction(); - abortedTransaction.setFirstOffset(0); - abortedTransaction.setProducerId(1000L); - return List.of(abortedTransaction); - } + CompletableFuture future = new CompletableFuture<>(); + // persister.writeState RPC will not complete instantaneously due to which commit won't happen for acknowledged offsets. + Mockito.when(persister.writeState(Mockito.any())).thenReturn(future); - private FetchPartitionData fetchPartitionData(Records records) { - return fetchPartitionData(records, 0); - } + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - private FetchPartitionData fetchPartitionData(Records records, List abortedTransactions) { - return fetchPartitionData(records, 0, abortedTransactions); - } + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(5, 5), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - private FetchPartitionData fetchPartitionData(Records records, long logStartOffset) { - return new FetchPartitionData(Errors.NONE, 5, logStartOffset, records, - Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false); - } + List acknowledgementBatches = new ArrayList<>(); + acknowledgementBatches.add(new ShareAcknowledgementBatch(2, 3, Collections.singletonList((byte) 2))); + acknowledgementBatches.add(new ShareAcknowledgementBatch(5, 9, Collections.singletonList((byte) 2))); + // Acknowledge 2-3, 5-9 offsets with RELEASE acknowledge type. + sharePartition.acknowledge(MEMBER_ID, acknowledgementBatches); - private FetchPartitionData fetchPartitionData(Records records, long logStartOffset, List abortedTransactions) { - return new FetchPartitionData(Errors.NONE, 5, logStartOffset, records, - Optional.empty(), OptionalLong.empty(), Optional.of(abortedTransactions), OptionalInt.empty(), false); - } + assertEquals(2, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); - private List fetchAcquiredRecords(SharePartition sharePartition, Records records, long logStartOffset, int expectedOffsetCount) { - return fetchAcquiredRecords(sharePartition, records, records.batches().iterator().next().baseOffset(), logStartOffset, expectedOffsetCount); - } + // Even though offsets 2-3, 5-9 are in available state, but they won't be acquired since they are still in transition from ACQUIRED + // to AVAILABLE state as the write state RPC has not completed yet, so the commit hasn't happened yet. + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); - private List fetchAcquiredRecords(SharePartition sharePartition, Records records, long fetchOffset, long logStartOffset, int expectedOffsetCount) { - ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - MAX_FETCH_RECORDS, - fetchOffset, - fetchPartitionData(records, logStartOffset), - FETCH_ISOLATION_HWM); - return fetchAcquiredRecords(shareAcquiredRecords, expectedOffsetCount); - } + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); + assertEquals(RecordState.AVAILABLE, sharePartition.cachedState().get(5L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); - private List fetchAcquiredRecords(SharePartition sharePartition, Records records, int expectedOffsetCount) { - ShareAcquiredRecords shareAcquiredRecords = sharePartition.acquire( - MEMBER_ID, - BATCH_SIZE, - MAX_FETCH_RECORDS, - records.batches().iterator().next().baseOffset(), - fetchPartitionData(records), - FETCH_ISOLATION_HWM); - return fetchAcquiredRecords(shareAcquiredRecords, expectedOffsetCount); + // persister.writeState RPC will complete now. This is going to commit all the acknowledged batches. Hence, their + // rollBack state will become null and they will be available for acquire again. + future.complete(writeShareGroupStateResult); + sharePartition.acquire(MEMBER_ID, MAX_FETCH_RECORDS, new FetchPartitionData(Errors.NONE, 20, 0, memoryRecords(15, 0), + Optional.empty(), OptionalLong.empty(), Optional.empty(), + OptionalInt.empty(), false)); + assertEquals(3, sharePartition.cachedState().size()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(0L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(1L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(2L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(3L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(0L).offsetState().get(4L).state()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(5L).batchState()); + assertEquals(RecordState.ACQUIRED, sharePartition.cachedState().get(10L).batchState()); } private List fetchAcquiredRecords(ShareAcquiredRecords shareAcquiredRecords, int expectedOffsetCount) { @@ -8793,13 +5614,24 @@ private MemoryRecords memoryRecords(int numOfRecords) { } private MemoryRecords memoryRecords(int numOfRecords, long startOffset) { - try (MemoryRecordsBuilder builder = memoryRecordsBuilder(numOfRecords, startOffset)) { - return builder.build(); + return memoryRecordsBuilder(numOfRecords, startOffset).build(); + } + + private MemoryRecordsBuilder memoryRecordsBuilder(int numOfRecords, long startOffset) { + return memoryRecordsBuilder(ByteBuffer.allocate(1024), numOfRecords, startOffset); + } + + private MemoryRecordsBuilder memoryRecordsBuilder(ByteBuffer buffer, int numOfRecords, long startOffset) { + MemoryRecordsBuilder builder = MemoryRecords.builder(buffer, Compression.NONE, + TimestampType.CREATE_TIME, startOffset, 2); + for (int i = 0; i < numOfRecords; i++) { + builder.appendWithOffset(startOffset + i, 0L, TestUtils.randomString(10).getBytes(), TestUtils.randomString(10).getBytes()); } + return builder; } private List expectedAcquiredRecord(long baseOffset, long lastOffset, int deliveryCount) { - return List.of(new AcquiredRecords() + return Collections.singletonList(new AcquiredRecords() .setFirstOffset(baseOffset) .setLastOffset(lastOffset) .setDeliveryCount((short) deliveryCount)); @@ -8827,10 +5659,10 @@ private List expectedAcquiredRecords(long baseOffset, long last public void mockPersisterReadStateMethod(Persister persister) { ReadShareGroupStateResult readShareGroupStateResult = Mockito.mock(ReadShareGroupStateResult.class); - Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(List.of( - new TopicData<>(TOPIC_ID_PARTITION.topicId(), List.of( + Mockito.when(readShareGroupStateResult.topicsData()).thenReturn(Collections.singletonList( + new TopicData<>(TOPIC_ID_PARTITION.topicId(), Collections.singletonList( PartitionFactory.newPartitionAllData(0, 0, 0L, Errors.NONE.code(), Errors.NONE.message(), - List.of()))))); + Collections.emptyList()))))); Mockito.when(persister.readState(Mockito.any())).thenReturn(CompletableFuture.completedFuture(readShareGroupStateResult)); } @@ -8838,17 +5670,15 @@ private static class SharePartitionBuilder { private int defaultAcquisitionLockTimeoutMs = 30000; private int maxDeliveryCount = MAX_DELIVERY_COUNT; - private int maxInflightRecords = MAX_IN_FLIGHT_RECORDS; + private int maxInflightMessages = MAX_IN_FLIGHT_MESSAGES; - private Persister persister = new NoOpStatePersister(); + private Persister persister = new NoOpShareStatePersister(); private ReplicaManager replicaManager = Mockito.mock(ReplicaManager.class); private GroupConfigManager groupConfigManager = Mockito.mock(GroupConfigManager.class); private SharePartitionState state = SharePartitionState.EMPTY; - private Time time = MOCK_TIME; - private SharePartitionMetrics sharePartitionMetrics = Mockito.mock(SharePartitionMetrics.class); - private SharePartitionBuilder withMaxInflightRecords(int maxInflightRecords) { - this.maxInflightRecords = maxInflightRecords; + private SharePartitionBuilder withMaxInflightMessages(int maxInflightMessages) { + this.maxInflightMessages = maxInflightMessages; return this; } @@ -8882,24 +5712,14 @@ private SharePartitionBuilder withState(SharePartitionState state) { return this; } - private SharePartitionBuilder withTime(Time time) { - this.time = time; - return this; - } - - private SharePartitionBuilder withSharePartitionMetrics(SharePartitionMetrics sharePartitionMetrics) { - this.sharePartitionMetrics = sharePartitionMetrics; - return this; - } - public static SharePartitionBuilder builder() { return new SharePartitionBuilder(); } public SharePartition build() { - return new SharePartition(GROUP_ID, TOPIC_ID_PARTITION, 0, maxInflightRecords, maxDeliveryCount, - defaultAcquisitionLockTimeoutMs, mockTimer, time, persister, replicaManager, groupConfigManager, - state, Mockito.mock(SharePartitionListener.class), sharePartitionMetrics); + return new SharePartition(GROUP_ID, TOPIC_ID_PARTITION, 0, maxInflightMessages, maxDeliveryCount, + defaultAcquisitionLockTimeoutMs, mockTimer, MOCK_TIME, persister, replicaManager, groupConfigManager, + state, Mockito.mock(SharePartitionListener.class)); } } } diff --git a/core/src/test/java/kafka/test/api/CustomQuotaCallbackTest.java b/core/src/test/java/kafka/test/api/CustomQuotaCallbackTest.java new file mode 100644 index 0000000000000..a7da8ce4eb592 --- /dev/null +++ b/core/src/test/java/kafka/test/api/CustomQuotaCallbackTest.java @@ -0,0 +1,131 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.test.api; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.common.Cluster; +import org.apache.kafka.common.security.auth.KafkaPrincipal; +import org.apache.kafka.common.test.ClusterInstance; +import org.apache.kafka.common.test.TestUtils; +import org.apache.kafka.common.test.api.ClusterConfigProperty; +import org.apache.kafka.common.test.api.ClusterTest; +import org.apache.kafka.common.test.api.ClusterTestDefaults; +import org.apache.kafka.common.test.api.Type; +import org.apache.kafka.common.test.junit.ClusterTestExtensions; +import org.apache.kafka.server.config.QuotaConfig; +import org.apache.kafka.server.quota.ClientQuotaCallback; +import org.apache.kafka.server.quota.ClientQuotaEntity; +import org.apache.kafka.server.quota.ClientQuotaType; + +import org.junit.jupiter.api.extension.ExtendWith; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.atomic.AtomicInteger; + +@ClusterTestDefaults(controllers = 3, + types = {Type.KRAFT}, + serverProperties = { + @ClusterConfigProperty(id = 3000, key = QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, value = "kafka.test.api.CustomQuotaCallbackTest$CustomQuotaCallback"), + @ClusterConfigProperty(id = 3001, key = QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, value = "kafka.test.api.CustomQuotaCallbackTest$CustomQuotaCallback"), + @ClusterConfigProperty(id = 3002, key = QuotaConfig.CLIENT_QUOTA_CALLBACK_CLASS_CONFIG, value = "kafka.test.api.CustomQuotaCallbackTest$CustomQuotaCallback"), + } +) +@ExtendWith(ClusterTestExtensions.class) +public class CustomQuotaCallbackTest { + + private final ClusterInstance cluster; + + public CustomQuotaCallbackTest(ClusterInstance clusterInstance) { + this.cluster = clusterInstance; + } + + @ClusterTest + public void testCustomQuotaCallbackWithControllerServer() throws InterruptedException { + + try (Admin admin = cluster.admin(Map.of())) { + admin.createTopics(List.of(new NewTopic("topic", 1, (short) 1))); + TestUtils.waitForCondition( + () -> CustomQuotaCallback.COUNTERS.size() == 3 + && CustomQuotaCallback.COUNTERS.values().stream().allMatch(counter -> counter.get() > 0), + "The CustomQuotaCallback not triggered in all controllers. " + ); + + // Reset the counters, and we expect the callback to be triggered again in all controllers + CustomQuotaCallback.COUNTERS.clear(); + + admin.deleteTopics(List.of("topic")); + TestUtils.waitForCondition( + () -> CustomQuotaCallback.COUNTERS.size() == 3 + && CustomQuotaCallback.COUNTERS.values().stream().allMatch(counter -> counter.get() > 0), + "The CustomQuotaCallback not triggered in all controllers. " + ); + + } + } + + + public static class CustomQuotaCallback implements ClientQuotaCallback { + + public static final Map COUNTERS = new ConcurrentHashMap<>(); + private String nodeId; + + @Override + public Map quotaMetricTags(ClientQuotaType quotaType, KafkaPrincipal principal, String clientId) { + return Map.of(); + } + + @Override + public Double quotaLimit(ClientQuotaType quotaType, Map metricTags) { + return Double.MAX_VALUE; + } + + @Override + public void updateQuota(ClientQuotaType quotaType, ClientQuotaEntity quotaEntity, double newValue) { + + } + + @Override + public void removeQuota(ClientQuotaType quotaType, ClientQuotaEntity quotaEntity) { + + } + + @Override + public boolean quotaResetRequired(ClientQuotaType quotaType) { + return true; + } + + @Override + public boolean updateClusterMetadata(Cluster cluster) { + COUNTERS.computeIfAbsent(nodeId, k -> new AtomicInteger()).incrementAndGet(); + return true; + } + + @Override + public void close() { + + } + + @Override + public void configure(Map configs) { + nodeId = (String) configs.get("node.id"); + } + + } +} diff --git a/core/src/test/java/kafka/test/api/ShareConsumerTest.java b/core/src/test/java/kafka/test/api/ShareConsumerTest.java new file mode 100644 index 0000000000000..7b3f468c05cc0 --- /dev/null +++ b/core/src/test/java/kafka/test/api/ShareConsumerTest.java @@ -0,0 +1,1905 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.test.api; + +import kafka.api.BaseConsumerTest; + +import org.apache.kafka.clients.admin.Admin; +import org.apache.kafka.clients.admin.AlterConfigOp; +import org.apache.kafka.clients.admin.AlterConfigsOptions; +import org.apache.kafka.clients.admin.ConfigEntry; +import org.apache.kafka.clients.admin.NewTopic; +import org.apache.kafka.clients.admin.RecordsToDelete; +import org.apache.kafka.clients.consumer.AcknowledgeType; +import org.apache.kafka.clients.consumer.AcknowledgementCommitCallback; +import org.apache.kafka.clients.consumer.ConsumerConfig; +import org.apache.kafka.clients.consumer.ConsumerRecord; +import org.apache.kafka.clients.consumer.ConsumerRecords; +import org.apache.kafka.clients.consumer.KafkaShareConsumer; +import org.apache.kafka.clients.producer.KafkaProducer; +import org.apache.kafka.clients.producer.ProducerConfig; +import org.apache.kafka.clients.producer.ProducerRecord; +import org.apache.kafka.clients.producer.RecordMetadata; +import org.apache.kafka.common.KafkaException; +import org.apache.kafka.common.TopicIdPartition; +import org.apache.kafka.common.TopicPartition; +import org.apache.kafka.common.config.ConfigResource; +import org.apache.kafka.common.errors.InterruptException; +import org.apache.kafka.common.errors.InvalidRecordStateException; +import org.apache.kafka.common.errors.InvalidTopicException; +import org.apache.kafka.common.errors.WakeupException; +import org.apache.kafka.common.header.Header; +import org.apache.kafka.common.network.ListenerName; +import org.apache.kafka.common.record.TimestampType; +import org.apache.kafka.common.serialization.ByteArrayDeserializer; +import org.apache.kafka.common.serialization.ByteArraySerializer; +import org.apache.kafka.common.serialization.Deserializer; +import org.apache.kafka.common.serialization.Serializer; +import org.apache.kafka.common.test.KafkaClusterTestKit; +import org.apache.kafka.common.test.TestKitNodes; +import org.apache.kafka.common.test.api.Flaky; +import org.apache.kafka.coordinator.group.GroupConfig; +import org.apache.kafka.test.TestUtils; + +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Tag; +import org.junit.jupiter.api.TestInfo; +import org.junit.jupiter.api.Timeout; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.ValueSource; + +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.stream.IntStream; +import java.util.stream.Stream; + +import static org.apache.kafka.test.TestUtils.DEFAULT_MAX_WAIT_MS; +import static org.junit.jupiter.api.Assertions.assertDoesNotThrow; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +@Timeout(1200) +@Tag("integration") +public class ShareConsumerTest { + private KafkaClusterTestKit cluster; + private final TopicPartition tp = new TopicPartition("topic", 0); + private final TopicPartition tp2 = new TopicPartition("topic2", 0); + private final TopicPartition warmupTp = new TopicPartition("warmup", 0); + private static final String DEFAULT_STATE_PERSISTER = "org.apache.kafka.server.share.persister.DefaultStatePersister"; + private static final String NO_OP_PERSISTER = "org.apache.kafka.server.share.persister.NoOpShareStatePersister"; + + private Admin adminClient; + + @BeforeEach + public void createCluster(TestInfo testInfo) throws Exception { + String persisterClassName = NO_OP_PERSISTER; + if (testInfo.getDisplayName().contains(".persister=")) { + persisterClassName = testInfo.getDisplayName().split("=")[1]; + } + cluster = new KafkaClusterTestKit.Builder( + new TestKitNodes.Builder() + .setNumBrokerNodes(1) + .setNumControllerNodes(1) + .build()) + .setConfigProp("auto.create.topics.enable", "false") + .setConfigProp("group.coordinator.rebalance.protocols", "classic,consumer,share") + .setConfigProp("group.share.enable", "true") + .setConfigProp("group.share.partition.max.record.locks", "10000") + .setConfigProp("group.share.persister.class.name", persisterClassName) + .setConfigProp("group.share.record.lock.duration.ms", "15000") + .setConfigProp("offsets.topic.replication.factor", "1") + .setConfigProp("share.coordinator.state.topic.min.isr", "1") + .setConfigProp("share.coordinator.state.topic.num.partitions", "3") + .setConfigProp("share.coordinator.state.topic.replication.factor", "1") + .setConfigProp("transaction.state.log.min.isr", "1") + .setConfigProp("transaction.state.log.replication.factor", "1") + .setConfigProp("unstable.api.versions.enable", "true") + .build(); + cluster.format(); + cluster.startup(); + cluster.waitForActiveController(); + cluster.waitForReadyBrokers(); + createTopic("topic"); + createTopic("topic2"); + adminClient = createAdminClient(); + warmup(); + } + + @AfterEach + public void destroyCluster() throws Exception { + adminClient.close(); + cluster.close(); + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testPollNoSubscribeFails(String persister) { + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + assertEquals(Collections.emptySet(), shareConsumer.subscription()); + // "Consumer is not subscribed to any topics." + assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscribeAndPollNoRecords(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscribePollUnsubscribe(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + shareConsumer.unsubscribe(); + assertEquals(Collections.emptySet(), shareConsumer.subscription()); + assertEquals(0, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscribePollSubscribe(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscribeUnsubscribePollFails(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + shareConsumer.unsubscribe(); + assertEquals(Collections.emptySet(), shareConsumer.subscription()); + // "Consumer is not subscribed to any topics." + assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); + assertEquals(0, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscribeSubscribeEmptyPollFails(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + Set subscription = Collections.singleton(tp.topic()); + shareConsumer.subscribe(subscription); + assertEquals(subscription, shareConsumer.subscription()); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + shareConsumer.subscribe(Collections.emptySet()); + assertEquals(Collections.emptySet(), shareConsumer.subscription()); + // "Consumer is not subscribed to any topics." + assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(500))); + assertEquals(0, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscriptionAndPoll(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscriptionAndPollMultiple(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + producer.send(record); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + producer.send(record); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + } + } + + @Flaky("KAFKA-18033") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testAcknowledgementSentOnSubscriptionChange(String persister) throws ExecutionException, InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + ProducerRecord record2 = new ProducerRecord<>(tp2.topic(), tp2.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record2).get(); + producer.flush(); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); + + shareConsumer.subscribe(Collections.singletonList(tp2.topic())); + + // Waiting for heartbeat to propagate the subscription change. + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(500)); + return partitionExceptionMap.containsKey(tp) && partitionExceptionMap.containsKey(tp2); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records from the updated subscription"); + + // Verifying if the callback was invoked without exceptions for the partitions for both topics. + assertNull(partitionExceptionMap.get(tp)); + assertNull(partitionExceptionMap.get(tp2)); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testAcknowledgementCommitCallbackSuccessfulAcknowledgement(String persister) throws Exception { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + + producer.send(record); + producer.flush(); + + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); + + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(500)); + return partitionExceptionMap.containsKey(tp); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to receive call to callback"); + + // We expect null exception as the acknowledgment error code is null. + assertNull(partitionExceptionMap.get(tp)); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testAcknowledgementCommitCallbackOnClose(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + + // Now in the second poll, we implicitly acknowledge the record received in the first poll. + // We get back the acknowledgement error code asynchronously after the second poll. + // The acknowledgement commit callback is invoked in close. + shareConsumer.poll(Duration.ofMillis(1000)); + shareConsumer.close(); + + // We expect null exception as the acknowledgment error code is null. + assertTrue(partitionExceptionMap.containsKey(tp)); + assertNull(partitionExceptionMap.get(tp)); + } + } + + @Flaky("KAFKA-18033") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testAcknowledgementCommitCallbackInvalidRecordStateException(String persister) throws Exception { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + + // Waiting until the acquisition lock expires. + Thread.sleep(20000); + + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(500)); + return partitionExceptionMap.containsKey(tp) && partitionExceptionMap.get(tp) instanceof InvalidRecordStateException; + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to be notified by InvalidRecordStateException"); + } + } + + private static class TestableAcknowledgementCommitCallback implements AcknowledgementCommitCallback { + private final Map> partitionOffsetsMap; + private final Map partitionExceptionMap; + + public TestableAcknowledgementCommitCallback(Map> partitionOffsetsMap, + Map partitionExceptionMap) { + this.partitionOffsetsMap = partitionOffsetsMap; + this.partitionExceptionMap = partitionExceptionMap; + } + + @Override + public void onComplete(Map> offsetsMap, Exception exception) { + offsetsMap.forEach((partition, offsets) -> { + partitionOffsetsMap.merge(partition.topicPartition(), offsets, (oldOffsets, newOffsets) -> { + Set mergedOffsets = new HashSet<>(); + mergedOffsets.addAll(oldOffsets); + mergedOffsets.addAll(newOffsets); + return mergedOffsets; + }); + if (!partitionExceptionMap.containsKey(partition.topicPartition())) { + partitionExceptionMap.put(partition.topicPartition(), exception); + } + }); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testHeaders(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + int numRecords = 1; + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + record.headers().add("headerKey", "headerValue".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + List> records = consumeRecords(shareConsumer, numRecords); + assertEquals(numRecords, records.size()); + + for (ConsumerRecord consumerRecord : records) { + Header header = consumerRecord.headers().lastHeader("headerKey"); + if (header != null) + assertEquals("headerValue", new String(header.value())); + } + } + } + + private void testHeadersSerializeDeserialize(Serializer serializer, Deserializer deserializer) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), serializer); + KafkaShareConsumer shareConsumer = createShareConsumer(deserializer, new ByteArrayDeserializer(), "group1")) { + + int numRecords = 1; + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + List> records = consumeRecords(shareConsumer, numRecords); + assertEquals(numRecords, records.size()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testHeadersSerializerDeserializer(String persister) { + testHeadersSerializeDeserialize(new BaseConsumerTest.SerializerImpl(), new BaseConsumerTest.DeserializerImpl()); + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testMaxPollRecords(String persister) { + int numRecords = 10000; + int maxPollRecords = 2; + + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), + "group1", Collections.singletonMap(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, String.valueOf(maxPollRecords)))) { + + long startingTimestamp = System.currentTimeMillis(); + produceMessagesWithTimestamp(numRecords, startingTimestamp); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + List> records = consumeRecords(shareConsumer, numRecords); + long i = 0L; + for (ConsumerRecord record : records) { + assertEquals(tp.topic(), record.topic()); + assertEquals(tp.partition(), record.partition()); + assertEquals(TimestampType.CREATE_TIME, record.timestampType()); + assertEquals(startingTimestamp + i, record.timestamp()); + assertEquals("key " + i, new String(record.key())); + assertEquals("value " + i, new String(record.value())); + // this is true only because K and V are byte arrays + assertEquals(("key " + i).length(), record.serializedKeySize()); + assertEquals(("value " + i).length(), record.serializedValueSize()); + + i++; + } + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testControlRecordsSkipped(String persister) throws Exception { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer transactionalProducer = createProducer(new ByteArraySerializer(), new ByteArraySerializer(), "T1"); + KafkaProducer nonTransactionalProducer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + + transactionalProducer.initTransactions(); + transactionalProducer.beginTransaction(); + RecordMetadata transactional1 = transactionalProducer.send(record).get(); + + RecordMetadata nonTransactional1 = nonTransactionalProducer.send(record).get(); + + transactionalProducer.commitTransaction(); + + transactionalProducer.beginTransaction(); + RecordMetadata transactional2 = transactionalProducer.send(record).get(); + transactionalProducer.abortTransaction(); + + RecordMetadata nonTransactional2 = nonTransactionalProducer.send(record).get(); + + transactionalProducer.close(); + nonTransactionalProducer.close(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(4, records.count()); + assertEquals(transactional1.offset(), records.records(tp).get(0).offset()); + assertEquals(nonTransactional1.offset(), records.records(tp).get(1).offset()); + assertEquals(transactional2.offset(), records.records(tp).get(2).offset()); + assertEquals(nonTransactional2.offset(), records.records(tp).get(3).offset()); + + // There will be control records on the topic-partition, so the offsets of the non-control records + // are not 0, 1, 2, 3. Just assert that the offset of the final one is not 3. + assertNotEquals(3, nonTransactional2.offset()); + + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testExplicitAcknowledgeSuccess(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(shareConsumer::acknowledge); + producer.send(record); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testExplicitAcknowledgeCommitSuccess(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(shareConsumer::acknowledge); + producer.send(record); + Map> result = shareConsumer.commitSync(); + assertEquals(1, result.size()); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + } + } + + @Flaky("KAFKA-18033") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testExplicitAcknowledgementCommitAsync(String persister) throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.send(record2); + producer.send(record3); + producer.flush(); + + shareConsumer1.subscribe(Collections.singleton(tp.topic())); + shareConsumer2.subscribe(Collections.singleton(tp.topic())); + + Map> partitionOffsetsMap1 = new HashMap<>(); + Map partitionExceptionMap1 = new HashMap<>(); + shareConsumer1.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap1, partitionExceptionMap1)); + + ConsumerRecords records = shareConsumer1.poll(Duration.ofMillis(5000)); + assertEquals(3, records.count()); + Iterator> iterator = records.iterator(); + + // Acknowledging 2 out of the 3 records received via commitAsync. + ConsumerRecord firstRecord = iterator.next(); + ConsumerRecord secondRecord = iterator.next(); + assertEquals(0L, firstRecord.offset()); + assertEquals(1L, secondRecord.offset()); + + shareConsumer1.acknowledge(firstRecord); + shareConsumer1.acknowledge(secondRecord); + shareConsumer1.commitAsync(); + + // The 3rd record should be reassigned to 2nd consumer when it polls, kept higher wait time + // as time out for locks is 15 secs. + TestUtils.waitForCondition(() -> { + ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(1000)); + return records2.count() == 1 && records2.iterator().next().offset() == 2L; + }, 30000, 100L, () -> "Didn't receive timed out record"); + + assertFalse(partitionExceptionMap1.containsKey(tp)); + + // The callback will receive the acknowledgement responses asynchronously after the next poll. + TestUtils.waitForCondition(() -> { + shareConsumer1.poll(Duration.ofMillis(1000)); + return partitionExceptionMap1.containsKey(tp); + }, 30000, 100L, () -> "Didn't receive call to callback"); + + assertNull(partitionExceptionMap1.get(tp)); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testImplicitModeNotTriggeredByPollWhenNoAcksToSend(String persister) throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + Map> partitionOffsetsMap1 = new HashMap<>(); + Map partitionExceptionMap1 = new HashMap<>(); + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap1, partitionExceptionMap1)); + + // The acknowledgement mode moves to PENDING from UNKNOWN. + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(0, records.count()); + shareConsumer.commitAsync(); + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.flush(); + + // The acknowledgement mode remains in PENDING because no records were returned. + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + + // The acknowledgement mode now moves to EXPLICIT. + shareConsumer.acknowledge(records.iterator().next()); + shareConsumer.commitAsync(); + + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(500)); + return partitionExceptionMap1.containsKey(tp); + }, 30000, 100L, () -> "Didn't receive call to callback"); + } + } + + @Flaky("KAFKA-18033") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testExplicitAcknowledgementCommitAsyncPartialBatch(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.send(record2); + producer.send(record3); + producer.flush(); + + shareConsumer1.subscribe(Collections.singleton(tp.topic())); + + Map> partitionOffsetsMap = new HashMap<>(); + Map partitionExceptionMap = new HashMap<>(); + shareConsumer1.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap, partitionExceptionMap)); + + ConsumerRecords records = shareConsumer1.poll(Duration.ofMillis(5000)); + assertEquals(3, records.count()); + Iterator> iterator = records.iterator(); + + // Acknowledging 2 out of the 3 records received via commitAsync. + ConsumerRecord firstRecord = iterator.next(); + ConsumerRecord secondRecord = iterator.next(); + assertEquals(0L, firstRecord.offset()); + assertEquals(1L, secondRecord.offset()); + + shareConsumer1.acknowledge(firstRecord); + shareConsumer1.acknowledge(secondRecord); + shareConsumer1.commitAsync(); + + // The 3rd record should be re-presented to the consumer when it polls again. + records = shareConsumer1.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + iterator = records.iterator(); + firstRecord = iterator.next(); + assertEquals(2L, firstRecord.offset()); + + // And poll again without acknowledging - the callback will receive the acknowledgement responses too + records = shareConsumer1.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + iterator = records.iterator(); + firstRecord = iterator.next(); + assertEquals(2L, firstRecord.offset()); + + shareConsumer1.acknowledge(firstRecord); + + // The callback will receive the acknowledgement responses after polling. The callback is + // called on entry to the poll method or during close. The commit is being performed asynchronously, so + // we can only rely on the completion once the consumer has closed because that waits for the response. + shareConsumer1.poll(Duration.ofMillis(500)); + + shareConsumer1.close(); + + assertTrue(partitionExceptionMap.containsKey(tp)); + assertNull(partitionExceptionMap.get(tp)); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testExplicitAcknowledgeReleasePollAccept(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.ACCEPT)); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testExplicitAcknowledgeReleaseAccept(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.ACCEPT)); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + } + } + + @Flaky("KAFKA-18033") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testExplicitAcknowledgeReleaseClose(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + records.forEach(consumedRecord -> shareConsumer.acknowledge(consumedRecord, AcknowledgeType.RELEASE)); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testExplicitAcknowledgeThrowsNotInBatch(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + ConsumerRecord consumedRecord = records.records(tp).get(0); + shareConsumer.acknowledge(consumedRecord); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(consumedRecord)); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testImplicitAcknowledgeFailsExplicit(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + ConsumerRecord consumedRecord = records.records(tp).get(0); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + assertThrows(IllegalStateException.class, () -> shareConsumer.acknowledge(consumedRecord)); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testImplicitAcknowledgeCommitSync(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + Map> result = shareConsumer.commitSync(); + assertEquals(1, result.size()); + result = shareConsumer.commitSync(); + assertEquals(0, result.size()); + records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testImplicitAcknowledgementCommitAsync(String persister) throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord record3 = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record1); + producer.send(record2); + producer.send(record3); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + Map> partitionOffsetsMap1 = new HashMap<>(); + Map partitionExceptionMap1 = new HashMap<>(); + + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallback(partitionOffsetsMap1, partitionExceptionMap1)); + + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(3, records.count()); + + // Implicitly acknowledging all the records received. + shareConsumer.commitAsync(); + + assertFalse(partitionExceptionMap1.containsKey(tp)); + // The callback will receive the acknowledgement responses after the next poll. + TestUtils.waitForCondition(() -> { + shareConsumer.poll(Duration.ofMillis(1000)); + return partitionExceptionMap1.containsKey(tp); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Acknowledgement commit callback did not receive the response yet"); + + assertNull(partitionExceptionMap1.get(tp)); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testFetchRecordLargerThanMaxPartitionFetchBytes(String persister) throws Exception { + int maxPartitionFetchBytes = 10000; + + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), + "group1", Collections.singletonMap(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, String.valueOf(maxPartitionFetchBytes)))) { + + ProducerRecord smallRecord = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + ProducerRecord bigRecord = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), new byte[maxPartitionFetchBytes]); + producer.send(smallRecord).get(); + producer.send(bigRecord).get(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testMultipleConsumersWithDifferentGroupIds(String persister) throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + alterShareAutoOffsetReset("group2", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group2")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + + shareConsumer1.subscribe(Collections.singleton(tp.topic())); + + shareConsumer2.subscribe(Collections.singleton(tp.topic())); + + // producing 3 records to the topic + producer.send(record); + producer.send(record); + producer.send(record); + producer.flush(); + + // Both the consumers should read all the messages, because they are part of different share groups (both have different group IDs) + AtomicInteger shareConsumer1Records = new AtomicInteger(); + AtomicInteger shareConsumer2Records = new AtomicInteger(); + TestUtils.waitForCondition(() -> { + int records1 = shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()); + int records2 = shareConsumer2Records.addAndGet(shareConsumer2.poll(Duration.ofMillis(2000)).count()); + return records1 == 3 && records2 == 3; + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for both consumers"); + + producer.send(record); + producer.send(record); + + shareConsumer1Records.set(0); + TestUtils.waitForCondition(() -> shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()) == 2, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer 1"); + + producer.send(record); + producer.send(record); + producer.send(record); + + shareConsumer1Records.set(0); + shareConsumer2Records.set(0); + TestUtils.waitForCondition(() -> { + int records1 = shareConsumer1Records.addAndGet(shareConsumer1.poll(Duration.ofMillis(2000)).count()); + int records2 = shareConsumer2Records.addAndGet(shareConsumer2.poll(Duration.ofMillis(2000)).count()); + return records1 == 3 && records2 == 5; + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for both consumers for the last batch"); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testMultipleConsumersInGroupSequentialConsumption(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + shareConsumer1.subscribe(Collections.singleton(tp.topic())); + shareConsumer2.subscribe(Collections.singleton(tp.topic())); + + int totalMessages = 2000; + for (int i = 0; i < totalMessages; i++) { + producer.send(record); + } + producer.flush(); + + int consumer1MessageCount = 0; + int consumer2MessageCount = 0; + + int maxRetries = 10; + int retries = 0; + while (retries < maxRetries) { + ConsumerRecords records1 = shareConsumer1.poll(Duration.ofMillis(2000)); + consumer1MessageCount += records1.count(); + ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(2000)); + consumer2MessageCount += records2.count(); + if (records1.count() + records2.count() == 0) + break; + retries++; + } + + assertEquals(totalMessages, consumer1MessageCount + consumer2MessageCount); + } + } + + @Flaky("KAFKA-18033") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testMultipleConsumersInGroupConcurrentConsumption(String persister) + throws InterruptedException, ExecutionException, TimeoutException { + AtomicInteger totalMessagesConsumed = new AtomicInteger(0); + + int consumerCount = 4; + int producerCount = 4; + int messagesPerProducer = 5000; + + String groupId = "group1"; + alterShareAutoOffsetReset(groupId, "earliest"); + + List> producerFutures = new ArrayList<>(); + for (int i = 0; i < producerCount; i++) { + producerFutures.add(CompletableFuture.runAsync(() -> produceMessages(messagesPerProducer))); + } + + int maxBytes = 100000; + List> consumerFutures = new ArrayList<>(); + for (int i = 0; i < consumerCount; i++) { + final int consumerNumber = i + 1; + consumerFutures.add(CompletableFuture.supplyAsync(() -> + consumeMessages(totalMessagesConsumed, + producerCount * messagesPerProducer, groupId, consumerNumber, + 30, true, maxBytes))); + } + + CompletableFuture.allOf(producerFutures.toArray(CompletableFuture[]::new)).get(60, TimeUnit.SECONDS); + CompletableFuture.allOf(consumerFutures.toArray(CompletableFuture[]::new)).get(60, TimeUnit.SECONDS); + + int totalResult = consumerFutures.stream().mapToInt(CompletableFuture::join).sum(); + assertEquals(producerCount * messagesPerProducer, totalResult); + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testMultipleConsumersInMultipleGroupsConcurrentConsumption(String persister) + throws ExecutionException, InterruptedException, TimeoutException { + AtomicInteger totalMessagesConsumedGroup1 = new AtomicInteger(0); + AtomicInteger totalMessagesConsumedGroup2 = new AtomicInteger(0); + AtomicInteger totalMessagesConsumedGroup3 = new AtomicInteger(0); + + int producerCount = 4; + int messagesPerProducer = 2000; + final int totalMessagesSent = producerCount * messagesPerProducer; + + String groupId1 = "group1"; + String groupId2 = "group2"; + String groupId3 = "group3"; + + alterShareAutoOffsetReset(groupId1, "earliest"); + alterShareAutoOffsetReset(groupId2, "earliest"); + alterShareAutoOffsetReset(groupId3, "earliest"); + + List> producerFutures = new ArrayList<>(); + for (int i = 0; i < producerCount; i++) { + producerFutures.add(CompletableFuture.supplyAsync(() -> produceMessages(messagesPerProducer))); + } + // Wait for the producers to run + assertDoesNotThrow(() -> CompletableFuture.allOf(producerFutures.toArray(CompletableFuture[]::new)) + .get(15, TimeUnit.SECONDS), "Exception awaiting produceMessages"); + int actualMessageSent = producerFutures.stream().mapToInt(CompletableFuture::join).sum(); + + List> consumeMessagesFutures1 = new ArrayList<>(); + List> consumeMessagesFutures2 = new ArrayList<>(); + List> consumeMessagesFutures3 = new ArrayList<>(); + + int maxBytes = 100000; + for (int i = 0; i < 2; i++) { + final int consumerNumber = i + 1; + consumeMessagesFutures1.add(CompletableFuture.supplyAsync(() -> + consumeMessages(totalMessagesConsumedGroup1, totalMessagesSent, + "group1", consumerNumber, 100, true, maxBytes))); + + consumeMessagesFutures2.add(CompletableFuture.supplyAsync(() -> + consumeMessages(totalMessagesConsumedGroup2, totalMessagesSent, + "group2", consumerNumber, 100, true, maxBytes))); + + consumeMessagesFutures3.add(CompletableFuture.supplyAsync(() -> + consumeMessages(totalMessagesConsumedGroup3, totalMessagesSent, + "group3", consumerNumber, 100, true, maxBytes))); + } + + CompletableFuture.allOf(Stream.of(consumeMessagesFutures1.stream(), consumeMessagesFutures2.stream(), + consumeMessagesFutures3.stream()).flatMap(Function.identity()).toArray(CompletableFuture[]::new)) + .get(120, TimeUnit.SECONDS); + + int totalResult1 = consumeMessagesFutures1.stream().mapToInt(CompletableFuture::join).sum(); + int totalResult2 = consumeMessagesFutures2.stream().mapToInt(CompletableFuture::join).sum(); + int totalResult3 = consumeMessagesFutures3.stream().mapToInt(CompletableFuture::join).sum(); + + assertEquals(totalMessagesSent, totalResult1); + assertEquals(totalMessagesSent, totalResult2); + assertEquals(totalMessagesSent, totalResult3); + assertEquals(totalMessagesSent, actualMessageSent); + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testConsumerCloseInGroupSequential(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer1 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumer2 = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + shareConsumer1.subscribe(Collections.singleton(tp.topic())); + shareConsumer2.subscribe(Collections.singleton(tp.topic())); + + int totalMessages = 1500; + for (int i = 0; i < totalMessages; i++) { + producer.send(record); + } + producer.close(); + + int consumer1MessageCount = 0; + int consumer2MessageCount = 0; + + // Poll three times to receive records. The second poll acknowledges the records + // from the first poll, and so on. The third poll's records are not acknowledged + // because the consumer is closed, which makes the broker release the records fetched. + ConsumerRecords records1 = shareConsumer1.poll(Duration.ofMillis(5000)); + consumer1MessageCount += records1.count(); + int consumer1MessageCountA = records1.count(); + records1 = shareConsumer1.poll(Duration.ofMillis(5000)); + consumer1MessageCount += records1.count(); + int consumer1MessageCountB = records1.count(); + records1 = shareConsumer1.poll(Duration.ofMillis(5000)); + int consumer1MessageCountC = records1.count(); + assertEquals(totalMessages, consumer1MessageCountA + consumer1MessageCountB + consumer1MessageCountC); + shareConsumer1.close(); + + int maxRetries = 10; + int retries = 0; + while (consumer1MessageCount + consumer2MessageCount < totalMessages && retries < maxRetries) { + ConsumerRecords records2 = shareConsumer2.poll(Duration.ofMillis(5000)); + consumer2MessageCount += records2.count(); + retries++; + } + shareConsumer2.close(); + assertEquals(totalMessages, consumer1MessageCount + consumer2MessageCount); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testMultipleConsumersInGroupFailureConcurrentConsumption(String persister) + throws InterruptedException, ExecutionException, TimeoutException { + AtomicInteger totalMessagesConsumed = new AtomicInteger(0); + + int consumerCount = 4; + int producerCount = 4; + int messagesPerProducer = 5000; + + String groupId = "group1"; + + alterShareAutoOffsetReset(groupId, "earliest"); + + List> produceMessageFutures = new ArrayList<>(); + for (int i = 0; i < producerCount; i++) { + produceMessageFutures.add(CompletableFuture.runAsync(() -> produceMessages(messagesPerProducer))); + } + + int maxBytes = 1000000; + + // The "failing" consumer polls but immediately closes, which releases the records for the other consumers + CompletableFuture failedMessagesConsumedFuture = CompletableFuture.supplyAsync( + () -> consumeMessages(new AtomicInteger(0), producerCount * messagesPerProducer, groupId, + 0, 1, false)); + + // Wait for the failed consumer to run + assertDoesNotThrow(() -> failedMessagesConsumedFuture.get(15, TimeUnit.SECONDS), + "Exception awaiting consumeMessages"); + + List> consumeMessagesFutures = new ArrayList<>(); + for (int i = 0; i < consumerCount; i++) { + final int consumerNumber = i + 1; + consumeMessagesFutures.add(CompletableFuture.supplyAsync( + () -> consumeMessages(totalMessagesConsumed, producerCount * messagesPerProducer, + groupId, consumerNumber, 40, true, maxBytes))); + } + + CompletableFuture.allOf(produceMessageFutures.toArray(CompletableFuture[]::new)).get(60, TimeUnit.SECONDS); + CompletableFuture.allOf(consumeMessagesFutures.toArray(CompletableFuture[]::new)).get(60, TimeUnit.SECONDS); + + int totalSuccessResult = consumeMessagesFutures.stream().mapToInt(CompletableFuture::join).sum(); + assertEquals(producerCount * messagesPerProducer, totalSuccessResult); + } + + @Flaky("KAFKA-18025") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testAcquisitionLockTimeoutOnConsumer(String persister) throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord producerRecord1 = new ProducerRecord<>(tp.topic(), tp.partition(), null, + "key_1".getBytes(), "value_1".getBytes()); + ProducerRecord producerRecord2 = new ProducerRecord<>(tp.topic(), tp.partition(), null, + "key_2".getBytes(), "value_2".getBytes()); + shareConsumer.subscribe(Set.of(tp.topic())); + + // Produce a first record which is consumed and acknowledged normally. + producer.send(producerRecord1); + producer.flush(); + + // Poll twice to receive records. The first poll fetches the record and starts the acquisition lock timer. + // Since, we are only sending one record and the acquisition lock hasn't timed out, the second poll only + // acknowledges the record from the first poll and does not fetch any more records. + ConsumerRecords consumerRecords = shareConsumer.poll(Duration.ofMillis(5000)); + ConsumerRecord consumerRecord = consumerRecords.records(tp).get(0); + assertEquals("key_1", new String(consumerRecord.key())); + assertEquals("value_1", new String(consumerRecord.value())); + assertEquals(1, consumerRecords.count()); + + consumerRecords = shareConsumer.poll(Duration.ofMillis(1000)); + assertEquals(0, consumerRecords.count()); + + // Produce a second record which is fetched, but not acknowledged before it times out. The record will + // be released automatically by the broker. It is then fetched again and acknowledged normally. + producer.send(producerRecord2); + producer.flush(); + + // Poll three more times. The first poll fetches the second record and starts the acquisition lock timer. + // Before the second poll, acquisition lock times out and hence the consumer needs to fetch the record again. + // The acquisition lock doesn't time out between the second and third polls, so the third poll only acknowledges + // the record from the second poll and does not fetch any more records. + consumerRecords = shareConsumer.poll(Duration.ofMillis(5000)); + consumerRecord = consumerRecords.records(tp).get(0); + assertEquals("key_2", new String(consumerRecord.key())); + assertEquals("value_2", new String(consumerRecord.value())); + assertEquals(1, consumerRecords.count()); + + // Allow the acquisition lock to time out. + Thread.sleep(20000); + + consumerRecords = shareConsumer.poll(Duration.ofMillis(5000)); + consumerRecord = consumerRecords.records(tp).get(0); + // By checking the key and value before the count, we get a bit more information if too many records are returned. + // This test has been observed to fail very occasionally because of this. + assertEquals("key_2", new String(consumerRecord.key())); + assertEquals("value_2", new String(consumerRecord.value())); + assertEquals(1, consumerRecords.count()); + + consumerRecords = shareConsumer.poll(Duration.ofMillis(1000)); + assertEquals(0, consumerRecords.count()); + } + } + + /** + * Test to verify that the acknowledgement commit callback cannot invoke methods of KafkaShareConsumer. + * The exception thrown is verified in {@link TestableAcknowledgementCommitCallbackWithShareConsumer} + */ + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testAcknowledgementCommitCallbackCallsShareConsumerDisallowed(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallbackWithShareConsumer<>(shareConsumer)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + // The acknowledgment commit callback will try to call a method of KafkaShareConsumer + shareConsumer.poll(Duration.ofMillis(5000)); + // The second poll sends the acknowledgements implicitly. + // The acknowledgement commit callback will be called and the exception is thrown. + // This is verified inside the onComplete() method implementation. + shareConsumer.poll(Duration.ofMillis(500)); + } + } + + private class TestableAcknowledgementCommitCallbackWithShareConsumer implements AcknowledgementCommitCallback { + private final KafkaShareConsumer shareConsumer; + + TestableAcknowledgementCommitCallbackWithShareConsumer(KafkaShareConsumer shareConsumer) { + this.shareConsumer = shareConsumer; + } + + @Override + public void onComplete(Map> offsetsMap, Exception exception) { + // Accessing methods of KafkaShareConsumer should throw an exception. + assertThrows(IllegalStateException.class, shareConsumer::close); + assertThrows(IllegalStateException.class, () -> shareConsumer.subscribe(Collections.singleton(tp.topic()))); + assertThrows(IllegalStateException.class, () -> shareConsumer.poll(Duration.ofMillis(5000))); + } + } + + /** + * Test to verify that the acknowledgement commit callback can invoke KafkaShareConsumer.wakeup() and it + * wakes up the enclosing poll. + */ + @Flaky("KAFKA-18033") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testAcknowledgementCommitCallbackCallsShareConsumerWakeup(String persister) throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + // The acknowledgment commit callback will try to call a method of KafkaShareConsumer + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallbackWakeup<>(shareConsumer)); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); + + // The second poll sends the acknowledgments implicitly. + shareConsumer.poll(Duration.ofMillis(2000)); + + // Till now acknowledgement commit callback has not been called, so no exception thrown yet. + // On 3rd poll, the acknowledgement commit callback will be called and the exception is thrown. + AtomicBoolean exceptionThrown = new AtomicBoolean(false); + TestUtils.waitForCondition(() -> { + try { + shareConsumer.poll(Duration.ofMillis(500)); + } catch (org.apache.kafka.common.errors.WakeupException e) { + exceptionThrown.set(true); + } + return exceptionThrown.get(); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to receive expected exception"); + } + } + + private static class TestableAcknowledgementCommitCallbackWakeup implements AcknowledgementCommitCallback { + private final KafkaShareConsumer shareConsumer; + + TestableAcknowledgementCommitCallbackWakeup(KafkaShareConsumer shareConsumer) { + this.shareConsumer = shareConsumer; + } + + @Override + public void onComplete(Map> offsetsMap, Exception exception) { + shareConsumer.wakeup(); + } + } + + /** + * Test to verify that the acknowledgement commit callback can throw an exception, and it is propagated + * to the caller of poll(). + */ + @Flaky("KAFKA-18033") + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testAcknowledgementCommitCallbackThrowsException(String persister) throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.setAcknowledgementCommitCallback(new TestableAcknowledgementCommitCallbackThrows<>()); + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer"); + + AtomicBoolean exceptionThrown = new AtomicBoolean(false); + TestUtils.waitForCondition(() -> { + try { + shareConsumer.poll(Duration.ofMillis(500)); + } catch (org.apache.kafka.common.errors.OutOfOrderSequenceException e) { + exceptionThrown.set(true); + } + return exceptionThrown.get(); + }, DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to receive expected exception"); + } + } + + private static class TestableAcknowledgementCommitCallbackThrows implements AcknowledgementCommitCallback { + @Override + public void onComplete(Map> offsetsMap, Exception exception) { + throw new org.apache.kafka.common.errors.OutOfOrderSequenceException("Exception thrown in TestableAcknowledgementCommitCallbackThrows.onComplete"); + } + } + + /** + * Test to verify that calling Thread.interrupt() before KafkaShareConsumer.poll(Duration) + * causes it to throw InterruptException + */ + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testPollThrowsInterruptExceptionIfInterrupted(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + // interrupt the thread and call poll + try { + Thread.currentThread().interrupt(); + assertThrows(InterruptException.class, () -> shareConsumer.poll(Duration.ZERO)); + } finally { + // clear interrupted state again since this thread may be reused by JUnit + Thread.interrupted(); + } + + assertDoesNotThrow(() -> shareConsumer.poll(Duration.ZERO), "Failed to consume records"); + } + } + + /** + * Test to verify that InvalidTopicException is thrown if the consumer subscribes + * to an invalid topic. + */ + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscribeOnInvalidTopicThrowsInvalidTopicException(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + shareConsumer.subscribe(Collections.singleton("topic abc")); + + // The exception depends upon a metadata response which arrives asynchronously. If the delay is + // too short, the poll might return before the error is known. + assertThrows(InvalidTopicException.class, () -> shareConsumer.poll(Duration.ofMillis(10000))); + } + } + + /** + * Test to ensure that a wakeup when records are buffered doesn't prevent the records + * being returned on the next poll. + */ + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testWakeupWithFetchedRecordsAvailable(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + shareConsumer.wakeup(); + assertThrows(WakeupException.class, () -> shareConsumer.poll(Duration.ZERO)); + + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscriptionFollowedByTopicCreation(String persister) throws InterruptedException { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + String topic = "foo"; + shareConsumer.subscribe(Collections.singleton(topic)); + + // Topic is created post creation of share consumer and subscription + createTopic(topic); + + ProducerRecord record = new ProducerRecord<>(topic, 0, null, "key".getBytes(), "value".getBytes()); + producer.send(record); + producer.flush(); + + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "Failed to consume records for share consumer, metadata sync failed"); + + producer.send(record); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + producer.send(record); + records = shareConsumer.poll(Duration.ofMillis(5000)); + assertEquals(1, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testSubscriptionAndPollFollowedByTopicDeletion(String persister) throws InterruptedException, ExecutionException { + String topic1 = "bar"; + String topic2 = "baz"; + createTopic(topic1); + createTopic(topic2); + + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1")) { + + ProducerRecord recordTopic1 = new ProducerRecord<>(topic1, 0, null, "key".getBytes(), "value".getBytes()); + ProducerRecord recordTopic2 = new ProducerRecord<>(topic2, 0, null, "key".getBytes(), "value".getBytes()); + + // Consumer subscribes to the topics -> bar and baz. + shareConsumer.subscribe(Arrays.asList(topic1, topic2)); + + producer.send(recordTopic1).get(); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + + producer.send(recordTopic2).get(); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + + // Topic bar is deleted, hence poll should not give any results. + deleteTopic(topic1); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(500)); + assertEquals(0, records.count()); + + producer.send(recordTopic2).get(); + // Poll should give the record from the non-deleted topic baz. + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + + producer.send(recordTopic2).get(); + TestUtils.waitForCondition(() -> shareConsumer.poll(Duration.ofMillis(2000)).count() == 1, + DEFAULT_MAX_WAIT_MS, 100L, () -> "incorrect number of records"); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testLsoMovementByRecordsDeletion(String persister) { + String groupId = "group1"; + + alterShareAutoOffsetReset(groupId, "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + ProducerRecord record = new ProducerRecord<>(tp.topic(), 0, null, "key".getBytes(), "value".getBytes()); + + // We write 10 records to the topic, so they would be written from offsets 0-9 on the topic. + for (int i = 0; i < 10; i++) { + assertDoesNotThrow(() -> producer.send(record).get(), "Failed to send records"); + } + + // We delete records before offset 5, so the LSO should move to 5. + adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(5L))); + + int messageCount = consumeMessages(new AtomicInteger(0), 5, groupId, 1, 10, true); + // The records returned belong to offsets 5-9. + assertEquals(5, messageCount); + + // We write 5 records to the topic, so they would be written from offsets 10-14 on the topic. + for (int i = 0; i < 5; i++) { + assertDoesNotThrow(() -> producer.send(record).get(), "Failed to send records"); + } + + // We delete records before offset 14, so the LSO should move to 14. + adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(14L))); + + int consumeMessagesCount = consumeMessages(new AtomicInteger(0), 1, groupId, 1, 10, true); + // The record returned belong to offset 14. + assertEquals(1, consumeMessagesCount); + + // We delete records before offset 15, so the LSO should move to 15 and now no records should be returned. + adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(15L))); + + messageCount = consumeMessages(new AtomicInteger(0), 0, groupId, 1, 5, true); + assertEquals(0, messageCount); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testShareAutoOffsetResetDefaultValue(String persister) { + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // Producing a record. + producer.send(record); + producer.flush(); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + // No records should be consumed because share.auto.offset.reset has a default of "latest". Since the record + // was produced before share partition was initialized (which happens after the first share fetch request + // in the poll method), the start offset would be the latest offset, i.e. 1 (the next offset after the already + // present 0th record) + assertEquals(0, records.count()); + // Producing another record. + producer.send(record); + producer.flush(); + records = shareConsumer.poll(Duration.ofMillis(5000)); + // Now the next record should be consumed successfully + assertEquals(1, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testShareAutoOffsetResetEarliest(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // Producing a record. + producer.send(record); + producer.flush(); + ConsumerRecords records = shareConsumer.poll(Duration.ofMillis(5000)); + // Since the value for share.auto.offset.reset has been altered to "earliest", the consumer should consume + // all messages present on the partition + assertEquals(1, records.count()); + // Producing another record. + producer.send(record); + producer.flush(); + records = shareConsumer.poll(Duration.ofMillis(5000)); + // The next records should also be consumed successfully + assertEquals(1, records.count()); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testShareAutoOffsetResetEarliestAfterLsoMovement(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + try (KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + shareConsumer.subscribe(Collections.singleton(tp.topic())); + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // We write 10 records to the topic, so they would be written from offsets 0-9 on the topic. + for (int i = 0; i < 10; i++) { + assertDoesNotThrow(() -> producer.send(record).get(), "Failed to send records"); + } + + // We delete records before offset 5, so the LSO should move to 5. + adminClient.deleteRecords(Collections.singletonMap(tp, RecordsToDelete.beforeOffset(5L))); + + int consumedMessageCount = consumeMessages(new AtomicInteger(0), 5, "group1", 1, 10, true); + // The records returned belong to offsets 5-9. + assertEquals(5, consumedMessageCount); + } + } + + @ParameterizedTest(name = "{displayName}.persister={0}") + @ValueSource(strings = {NO_OP_PERSISTER, DEFAULT_STATE_PERSISTER}) + public void testShareAutoOffsetResetMultipleGroupsWithDifferentValue(String persister) { + alterShareAutoOffsetReset("group1", "earliest"); + alterShareAutoOffsetReset("group2", "latest"); + try (KafkaShareConsumer shareConsumerEarliest = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group1"); + KafkaShareConsumer shareConsumerLatest = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "group2"); + KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + + shareConsumerEarliest.subscribe(Collections.singleton(tp.topic())); + + shareConsumerLatest.subscribe(Collections.singleton(tp.topic())); + + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + // Producing a record. + producer.send(record); + producer.flush(); + ConsumerRecords records1 = shareConsumerEarliest.poll(Duration.ofMillis(5000)); + // Since the value for share.auto.offset.reset has been altered to "earliest", the consumer should consume + // all messages present on the partition + assertEquals(1, records1.count()); + + ConsumerRecords records2 = shareConsumerLatest.poll(Duration.ofMillis(5000)); + // Since the value for share.auto.offset.reset has been altered to "latest", the consumer should not consume + // any message + assertEquals(0, records2.count()); + + // Producing another record. + producer.send(record); + + records1 = shareConsumerEarliest.poll(Duration.ofMillis(5000)); + // The next record should also be consumed successfully by group1 + assertEquals(1, records1.count()); + + records2 = shareConsumerLatest.poll(Duration.ofMillis(5000)); + // The next record should also be consumed successfully by group2 + assertEquals(1, records2.count()); + } + } + + private int produceMessages(int messageCount) { + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), null, "key".getBytes(), "value".getBytes()); + IntStream.range(0, messageCount).forEach(__ -> producer.send(record)); + producer.flush(); + } + return messageCount; + } + + private void produceMessagesWithTimestamp(int messageCount, long startingTimestamp) { + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer())) { + for (int i = 0; i < messageCount; i++) { + ProducerRecord record = new ProducerRecord<>(tp.topic(), tp.partition(), startingTimestamp + i, + ("key " + i).getBytes(), ("value " + i).getBytes()); + producer.send(record); + } + producer.flush(); + } + } + + private int consumeMessages(AtomicInteger totalMessagesConsumed, + int totalMessages, + String groupId, + int consumerNumber, + int maxPolls, + boolean commit) { + return assertDoesNotThrow(() -> { + try (KafkaShareConsumer shareConsumer = createShareConsumer( + new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId)) { + shareConsumer.subscribe(Collections.singleton(tp.topic())); + return consumeMessages(shareConsumer, totalMessagesConsumed, totalMessages, consumerNumber, maxPolls, commit); + } + }, "Consumer " + consumerNumber + " failed with exception"); + } + + private int consumeMessages(AtomicInteger totalMessagesConsumed, + int totalMessages, + String groupId, + int consumerNumber, + int maxPolls, + boolean commit, + int maxFetchBytes) { + return assertDoesNotThrow(() -> { + try (KafkaShareConsumer shareConsumer = createShareConsumer( + new ByteArrayDeserializer(), new ByteArrayDeserializer(), groupId, + Map.of(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxFetchBytes))) { + shareConsumer.subscribe(Collections.singleton(tp.topic())); + return consumeMessages(shareConsumer, totalMessagesConsumed, totalMessages, consumerNumber, maxPolls, commit); + } + }, "Consumer " + consumerNumber + " failed with exception"); + } + + private int consumeMessages(KafkaShareConsumer consumer, + AtomicInteger totalMessagesConsumed, + int totalMessages, + int consumerNumber, + int maxPolls, + boolean commit) { + return assertDoesNotThrow(() -> { + int messagesConsumed = 0; + int retries = 0; + if (totalMessages > 0) { + while (totalMessagesConsumed.get() < totalMessages && retries < maxPolls) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(2000)); + messagesConsumed += records.count(); + totalMessagesConsumed.addAndGet(records.count()); + retries++; + } + } else { + while (retries < maxPolls) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(2000)); + messagesConsumed += records.count(); + totalMessagesConsumed.addAndGet(records.count()); + retries++; + } + } + + if (commit) { + // Complete acknowledgement of the records + consumer.commitSync(Duration.ofMillis(10000)); + } + return messagesConsumed; + }, "Consumer " + consumerNumber + " failed with exception"); + } + + private List> consumeRecords(KafkaShareConsumer consumer, + int numRecords) { + ArrayList> accumulatedRecords = new ArrayList<>(); + long startTimeMs = System.currentTimeMillis(); + while (accumulatedRecords.size() < numRecords) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + records.forEach(accumulatedRecords::add); + long currentTimeMs = System.currentTimeMillis(); + assertFalse(currentTimeMs - startTimeMs > 60000, "Timed out before consuming expected records."); + } + return accumulatedRecords; + } + + private void createTopic(String topicName) { + Properties props = cluster.clientProperties(); + assertDoesNotThrow(() -> { + try (Admin admin = Admin.create(props)) { + admin.createTopics(Collections.singleton(new NewTopic(topicName, 1, (short) 1))).all().get(); + } + }, "Failed to create topic"); + } + + private void deleteTopic(String topicName) { + Properties props = cluster.clientProperties(); + assertDoesNotThrow(() -> { + try (Admin admin = Admin.create(props)) { + admin.deleteTopics(Collections.singleton(topicName)).all().get(); + } + }, "Failed to delete topic"); + } + + private Admin createAdminClient() { + Properties props = cluster.clientProperties(); + return Admin.create(props); + } + + private KafkaProducer createProducer(Serializer keySerializer, + Serializer valueSerializer) { + Properties props = cluster.clientProperties(); + return new KafkaProducer<>(props, keySerializer, valueSerializer); + } + + private KafkaProducer createProducer(Serializer keySerializer, + Serializer valueSerializer, + String transactionalId) { + Properties props = cluster.clientProperties(); + props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, transactionalId); + return new KafkaProducer<>(props, keySerializer, valueSerializer); + } + + private KafkaShareConsumer createShareConsumer(Deserializer keyDeserializer, + Deserializer valueDeserializer, + String groupId) { + Properties props = cluster.clientProperties(); + props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); + return new KafkaShareConsumer<>(props, keyDeserializer, valueDeserializer); + } + + private KafkaShareConsumer createShareConsumer(Deserializer keyDeserializer, + Deserializer valueDeserializer, + String groupId, + Map additionalProperties) { + Properties props = cluster.clientProperties(); + props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); + props.putAll(additionalProperties); + return new KafkaShareConsumer<>(props, keyDeserializer, valueDeserializer); + } + + private void warmup() throws InterruptedException { + createTopic(warmupTp.topic()); + TestUtils.waitForCondition(() -> + !cluster.brokers().get(0).metadataCache().getAliveBrokerNodes(new ListenerName("EXTERNAL")).isEmpty(), + DEFAULT_MAX_WAIT_MS, 100L, () -> "cache not up yet"); + ProducerRecord record = new ProducerRecord<>(warmupTp.topic(), warmupTp.partition(), null, "key".getBytes(), "value".getBytes()); + Set subscription = Collections.singleton(warmupTp.topic()); + alterShareAutoOffsetReset("warmupgroup1", "earliest"); + try (KafkaProducer producer = createProducer(new ByteArraySerializer(), new ByteArraySerializer()); + KafkaShareConsumer shareConsumer = createShareConsumer(new ByteArrayDeserializer(), new ByteArrayDeserializer(), "warmupgroup1")) { + + producer.send(record); + producer.flush(); + + shareConsumer.subscribe(subscription); + TestUtils.waitForCondition( + () -> shareConsumer.poll(Duration.ofMillis(5000)).count() == 1, 30000, 200L, () -> "warmup record not received"); + } + } + + private void alterShareAutoOffsetReset(String groupId, String newValue) { + ConfigResource configResource = new ConfigResource(ConfigResource.Type.GROUP, groupId); + Map> alterEntries = new HashMap<>(); + alterEntries.put(configResource, List.of(new AlterConfigOp(new ConfigEntry( + GroupConfig.SHARE_AUTO_OFFSET_RESET_CONFIG, newValue), AlterConfigOp.OpType.SET))); + AlterConfigsOptions alterOptions = new AlterConfigsOptions(); + assertDoesNotThrow(() -> adminClient.incrementalAlterConfigs(alterEntries, alterOptions) + .all() + .get(60, TimeUnit.SECONDS), "Failed to alter configs"); + } +} diff --git a/core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala b/core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala new file mode 100644 index 0000000000000..37348c0657862 --- /dev/null +++ b/core/src/test/scala/integration/kafka/admin/ListOffsetsIntegrationTest.scala @@ -0,0 +1,288 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.admin + +import kafka.integration.KafkaServerTestHarness +import kafka.server.KafkaConfig +import kafka.utils.TestUtils +import kafka.utils.TestUtils.{createProducer, plaintextBootstrapServers, tempDir, waitUntilTrue} +import org.apache.kafka.clients.admin._ +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.requests.ListOffsetsResponse +import org.apache.kafka.common.utils.{MockTime, Time, Utils} +import org.apache.kafka.server.config.ServerLogConfigs +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +import java.io.File +import java.util.{Optional, Properties} +import scala.collection.{Map, Seq} +import scala.jdk.CollectionConverters._ + +class ListOffsetsIntegrationTest extends KafkaServerTestHarness { + + private val topicName = "foo" + private val topicNameWithCustomConfigs = "foo2" + private var adminClient: Admin = _ + private val mockTime: Time = new MockTime(1) + private val dataFolder = Seq(tempDir().getAbsolutePath, tempDir().getAbsolutePath) + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + createTopicWithConfig(topicName, new Properties()) + adminClient = Admin.create(Map[String, Object]( + AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers() + ).asJava) + } + + override def brokerTime(brokerId: Int): Time = mockTime + + @AfterEach + override def tearDown(): Unit = { + Utils.closeQuietly(adminClient, "ListOffsetsAdminClient") + super.tearDown() + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListMaxTimestampWithEmptyLog(quorum: String): Unit = { + val maxTimestampOffset = runFetchOffsets(adminClient, OffsetSpec.maxTimestamp(), topicName) + assertEquals(ListOffsetsResponse.UNKNOWN_OFFSET, maxTimestampOffset.offset()) + assertEquals(ListOffsetsResponse.UNKNOWN_TIMESTAMP, maxTimestampOffset.timestamp()) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testThreeCompressedRecordsInOneBatch(quorum: String): Unit = { + produceMessagesInOneBatch("gzip") + verifyListOffsets() + + // test LogAppendTime case + setUpForLogAppendTimeCase() + produceMessagesInOneBatch("gzip", topicNameWithCustomConfigs) + // In LogAppendTime's case, the maxTimestampOffset should be the first message of the batch. + // So in this one batch test, it'll be the first offset 0 + verifyListOffsets(topic = topicNameWithCustomConfigs, 0) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testThreeNonCompressedRecordsInOneBatch(quorum: String): Unit = { + produceMessagesInOneBatch() + verifyListOffsets() + + // test LogAppendTime case + setUpForLogAppendTimeCase() + produceMessagesInOneBatch(topic=topicNameWithCustomConfigs) + // In LogAppendTime's case, if the timestamps are the same, we choose the offset of the first record + // thus, the maxTimestampOffset should be the first record of the batch. + // So in this one batch test, it'll be the first offset which is 0 + verifyListOffsets(topic = topicNameWithCustomConfigs, 0) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testThreeNonCompressedRecordsInSeparateBatch(quorum: String): Unit = { + produceMessagesInSeparateBatch() + verifyListOffsets() + + // test LogAppendTime case + setUpForLogAppendTimeCase() + produceMessagesInSeparateBatch(topic = topicNameWithCustomConfigs) + // In LogAppendTime's case, if the timestamp is different, it should be the last one + verifyListOffsets(topic = topicNameWithCustomConfigs, 2) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testThreeRecordsInOneBatchHavingDifferentCompressionTypeWithServer(quorum: String): Unit = { + val props: Properties = new Properties() + props.setProperty(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4") + createTopicWithConfig(topicNameWithCustomConfigs, props) + produceMessagesInOneBatch(topic = topicNameWithCustomConfigs) + verifyListOffsets(topic = topicNameWithCustomConfigs) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testThreeRecordsInSeparateBatchHavingDifferentCompressionTypeWithServer(quorum: String): Unit = { + val props: Properties = new Properties() + props.setProperty(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4") + createTopicWithConfig(topicNameWithCustomConfigs, props) + produceMessagesInSeparateBatch(topic = topicNameWithCustomConfigs) + verifyListOffsets(topic = topicNameWithCustomConfigs) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testThreeCompressedRecordsInSeparateBatch(quorum: String): Unit = { + produceMessagesInSeparateBatch("gzip") + verifyListOffsets() + + // test LogAppendTime case + setUpForLogAppendTimeCase() + produceMessagesInSeparateBatch("gzip", topicNameWithCustomConfigs) + // In LogAppendTime's case, the maxTimestampOffset is the message in the last batch since we advance the time + // for each batch, So it'll be the last offset 2 + verifyListOffsets(topic = topicNameWithCustomConfigs, 2) + } + + private def setUpForLogAppendTimeCase(): Unit = { + val props: Properties = new Properties() + props.setProperty(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "LogAppendTime") + createTopicWithConfig(topicNameWithCustomConfigs, props) + } + + private def createTopicWithConfig(topic: String, props: Properties): Unit = { + createTopic(topic, 1, 1.toShort, topicConfig = props) + } + + private def verifyListOffsets(topic: String = topicName, expectedMaxTimestampOffset: Int = 1): Unit = { + def check(): Unit = { + val earliestOffset = runFetchOffsets(adminClient, OffsetSpec.earliest(), topic) + assertEquals(0, earliestOffset.offset()) + + val latestOffset = runFetchOffsets(adminClient, OffsetSpec.latest(), topic) + assertEquals(3, latestOffset.offset()) + + val maxTimestampOffset = runFetchOffsets(adminClient, OffsetSpec.maxTimestamp(), topic) + assertEquals(expectedMaxTimestampOffset, maxTimestampOffset.offset()) + // the epoch is related to the returned offset. + // Hence, it should be zero (the earliest leader epoch), regardless of new leader election + assertEquals(Optional.of(0), maxTimestampOffset.leaderEpoch()) + } + + // case 0: test the offsets from leader's append path + check() + + // case 1: test the offsets from follower's append path. + // we make a follower be the new leader to handle the ListOffsetRequest + def leader(): Int = adminClient.describeTopics(java.util.Collections.singletonList(topic)) + .allTopicNames().get().get(topic).partitions().get(0).leader().id() + + val previousLeader = leader() + val newLeader = brokers.map(_.config.brokerId).find(_ != previousLeader).get + + // change the leader to new one + adminClient.alterPartitionReassignments(java.util.Collections.singletonMap(new TopicPartition(topic, 0), + Optional.of(new NewPartitionReassignment(java.util.Arrays.asList(newLeader))))).all().get() + // wait for all reassignments get completed + waitUntilTrue(() => adminClient.listPartitionReassignments().reassignments().get().isEmpty, + s"There still are ongoing reassignments") + // make sure we are able to see the new leader + var lastLeader = -1 + TestUtils.waitUntilTrue(() => { + lastLeader = leader() + lastLeader == newLeader + }, s"expected leader: $newLeader but actual: $lastLeader") + check() + + // case 2: test the offsets from recovery path. + // server will rebuild offset index according to log files if the index files are nonexistent + val indexFiles = brokers.flatMap(_.config.logDirs).toSet + brokers.foreach(b => killBroker(b.config.brokerId)) + indexFiles.foreach { root => + val files = new File(s"$root/$topic-0").listFiles() + if (files != null) files.foreach { f => + if (f.getName.endsWith(".index")) f.delete() + } + } + restartDeadBrokers() + Utils.closeQuietly(adminClient, "ListOffsetsAdminClient") + adminClient = Admin.create(java.util.Collections.singletonMap( + AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers().asInstanceOf[Object])) + check() + } + + private def runFetchOffsets(adminClient: Admin, + offsetSpec: OffsetSpec, + topic: String): ListOffsetsResult.ListOffsetsResultInfo = { + val tp = new TopicPartition(topic, 0) + adminClient.listOffsets(Map( + tp -> offsetSpec + ).asJava, new ListOffsetsOptions()).all().get().get(tp) + } + + private def produceMessagesInOneBatch(compressionType: String = "none", topic: String = topicName): Unit = { + val records = Seq( + new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, 100L, + null, new Array[Byte](10)), + new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, 999L, + null, new Array[Byte](10)), + new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, 200L, + null, new Array[Byte](10)), + ) + // create a producer with large linger.ms and enough batch.size (default is enough for three 10 bytes records), + // so that we can confirm all records will be accumulated in producer until we flush them into one batch. + val producer = createProducer( + plaintextBootstrapServers(brokers), + deliveryTimeoutMs = Int.MaxValue, + lingerMs = Int.MaxValue, + compressionType = compressionType) + + try { + val futures = records.map(producer.send) + producer.flush() + futures.foreach(_.get) + } finally { + producer.close() + } + } + + private def produceMessagesInSeparateBatch(compressionType: String = "none", topic: String = topicName): Unit = { + val records = Seq(new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, 100L, + null, new Array[Byte](10))) + val records2 = Seq(new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, 999L, + null, new Array[Byte](10))) + val records3 = Seq(new ProducerRecord[Array[Byte], Array[Byte]](topic, 0, 200L, + null, new Array[Byte](10))) + + val producer = createProducer( + plaintextBootstrapServers(brokers), + compressionType = compressionType) + try { + val futures = records.map(producer.send) + futures.foreach(_.get) + // advance the server time after each record sent to make sure the time changed when appendTime is used + mockTime.sleep(100) + val futures2 = records2.map(producer.send) + futures2.foreach(_.get) + mockTime.sleep(100) + val futures3 = records3.map(producer.send) + futures3.foreach(_.get) + } finally { + producer.close() + } + } + + def generateConfigs: Seq[KafkaConfig] = { + TestUtils.createBrokerConfigs(2).zipWithIndex.map{ case (props, index) => + // We use mock timer so the records can get removed if the test env is too busy to complete + // tests before kafka-log-retention. Hence, we disable the retention to avoid failed tests + props.setProperty(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, "-1") + props.setProperty(ServerLogConfigs.LOG_DIR_CONFIG, dataFolder(index)) + props + }.map(KafkaConfig.fromProps) + } +} + diff --git a/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala b/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala index 2ac15a29e20bb..d89a83c7750f2 100644 --- a/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala +++ b/core/src/test/scala/integration/kafka/admin/RemoteTopicCrudTest.scala @@ -19,14 +19,24 @@ package kafka.admin import kafka.api.IntegrationTestHarness import kafka.server.KafkaConfig import kafka.utils.TestUtils -import org.apache.kafka.common.config.{ConfigException, TopicConfig} +import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry} +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.config.{ConfigException, ConfigResource, TopicConfig} +import org.apache.kafka.common.errors.{InvalidConfigurationException, UnknownTopicOrPartitionException} +import org.apache.kafka.common.utils.MockTime import org.apache.kafka.server.config.ServerLogConfigs -import org.apache.kafka.server.log.remote.storage._ +import org.apache.kafka.server.log.remote.storage.{NoOpRemoteLogMetadataManager, NoOpRemoteStorageManager, RemoteLogManagerConfig, RemoteLogSegmentId, RemoteLogSegmentMetadata, RemoteLogSegmentState} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, Tag, Test, TestInfo} +import org.junit.jupiter.api.function.Executable +import org.junit.jupiter.api.{BeforeEach, Tag, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.{CsvSource, ValueSource} -import java.util.Properties +import java.util +import java.util.concurrent.atomic.AtomicInteger +import java.util.{Collections, Optional, Properties} import scala.collection.Seq +import scala.concurrent.ExecutionException import scala.util.Random @Tag("integration") @@ -52,12 +62,410 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { @BeforeEach override def setUp(info: TestInfo): Unit = { + if (info.getTestMethod.get().getName.endsWith("SystemRemoteStorageIsDisabled")) { + sysRemoteStorageEnabled = false + } + if (info.getTestMethod.get().getName.equals("testTopicDeletion")) { + storageManagerClassName = classOf[MyRemoteStorageManager].getName + metadataManagerClassName = classOf[MyRemoteLogMetadataManager].getName + } super.setUp(info) testTopicName = s"${info.getTestMethod.get().getName}-${Random.alphanumeric.take(10).mkString}" } - @Test - def testClusterWideDisablementOfTieredStorageWithEnabledTieredTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateRemoteTopicWithValidRetentionTime(quorum: String): Unit = { + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "200") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100") + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + verifyRemoteLogTopicConfigs(topicConfig) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateRemoteTopicWithValidRetentionSize(quorum: String): Unit = { + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "512") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "256") + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + verifyRemoteLogTopicConfigs(topicConfig) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateRemoteTopicWithInheritedLocalRetentionTime(quorum: String): Unit = { + // inherited local retention ms is 1000 + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "1001") + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + verifyRemoteLogTopicConfigs(topicConfig) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateRemoteTopicWithInheritedLocalRetentionSize(quorum: String): Unit = { + // inherited local retention bytes is 1024 + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1025") + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + verifyRemoteLogTopicConfigs(topicConfig) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateRemoteTopicWithInvalidRetentionTime(quorum: String): Unit = { + // inherited local retention ms is 1000 + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "200") + assertThrowsException(classOf[InvalidConfigurationException], () => + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig)) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateRemoteTopicWithInvalidRetentionSize(quorum: String): Unit = { + // inherited local retention bytes is 1024 + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "512") + assertThrowsException(classOf[InvalidConfigurationException], () => + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig)) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateCompactedRemoteStorage(quorum: String): Unit = { + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact") + assertThrowsException(classOf[InvalidConfigurationException], () => + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig)) + } + + // `remote.log.delete.on.disable` and `remote.log.copy.disable` only works in KRaft mode. + @ParameterizedTest + @CsvSource(Array("kraft,true,true", "kraft,true,false", "kraft,false,true", "kraft,false,false")) + def testCreateRemoteTopicWithCopyDisabledAndDeleteOnDisable(quorum: String, copyDisabled: Boolean, deleteOnDisable: Boolean): Unit = { + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, copyDisabled.toString) + topicConfig.put(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, deleteOnDisable.toString) + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + verifyRemoteLogTopicConfigs(topicConfig) + } + + // `remote.log.delete.on.disable` only works in KRaft mode. + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateTopicRetentionMsValidationWithRemoteCopyDisabled(quorum: String): Unit = { + val testTopicName2 = testTopicName + "2" + val testTopicName3 = testTopicName + "3" + val errorMsgMs = "When `remote.log.copy.disable` is set to true, the `local.retention.ms` and `retention.ms` " + + "must be set to the identical value because there will be no more logs copied to the remote storage." + + // 1. create a topic with `remote.log.copy.disable=true` and have different local.retention.ms and retention.ms value, + // it should fail to create the topic + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100") + topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "1000") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2") + + val admin = createAdminClient() + val err = assertThrowsException(classOf[InvalidConfigurationException], + () => TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, + numReplicationFactor, topicConfig = topicConfig)) + assertEquals(errorMsgMs, err.getMessage) + + // 2. change the local.retention.ms value to the same value as retention.ms should successfully create the topic + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "1000") + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + // 3. change the local.retention.ms value to "-2" should also successfully create the topic + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2") + TestUtils.createTopicWithAdmin(admin, testTopicName2, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + // 4. create a topic with `remote.log.copy.disable=false` and have different local.retention.ms and retention.ms value, + // it should successfully creates the topic. + topicConfig.clear() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100") + topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "1000") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2") + TestUtils.createTopicWithAdmin(admin, testTopicName3, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + // 5. alter the config to `remote.log.copy.disable=true`, it should fail the config change + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), + AlterConfigOp.OpType.SET), + )) + val err2 = assertThrowsException(classOf[InvalidConfigurationException], + () => admin.incrementalAlterConfigs(configs).all().get()) + assertEquals(errorMsgMs, err2.getMessage) + + // 6. alter the config to `remote.log.copy.disable=true` and local.retention.ms == retention.ms, it should work without error + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), + AlterConfigOp.OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "1000"), + AlterConfigOp.OpType.SET), + )) + + admin.incrementalAlterConfigs(configs).all().get() + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateTopicRetentionBytesValidationWithRemoteCopyDisabled(quorum: String): Unit = { + val testTopicName2 = testTopicName + "2" + val testTopicName3 = testTopicName + "3" + val errorMsgBytes = "When `remote.log.copy.disable` is set to true, the `local.retention.bytes` and `retention.bytes` " + + "must be set to the identical value because there will be no more logs copied to the remote storage." + + // 1. create a topic with `remote.log.copy.disable=true` and have different local.retention.bytes and retention.bytes value, + // it should fail to create the topic + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100") + topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1000") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2") + + val admin = createAdminClient() + val err = assertThrowsException(classOf[InvalidConfigurationException], + () => TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, + numReplicationFactor, topicConfig = topicConfig)) + assertEquals(errorMsgBytes, err.getMessage) + + // 2. change the local.retention.bytes value to the same value as retention.bytes should successfully create the topic + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "1000") + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + // 3. change the local.retention.bytes value to "-2" should also successfully create the topic + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "-2") + TestUtils.createTopicWithAdmin(admin, testTopicName2, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + // 4. create a topic with `remote.log.copy.disable=false` and have different local.retention.bytes and retention.bytes value, + // it should successfully creates the topic. + topicConfig.clear() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100") + topicConfig.put(TopicConfig.RETENTION_BYTES_CONFIG, "1000") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "-2") + TestUtils.createTopicWithAdmin(admin, testTopicName3, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + // 5. alter the config to `remote.log.copy.disable=true`, it should fail the config change + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), + AlterConfigOp.OpType.SET), + )) + val err2 = assertThrowsException(classOf[InvalidConfigurationException], + () => admin.incrementalAlterConfigs(configs).all().get()) + assertEquals(errorMsgBytes, err2.getMessage) + + // 6. alter the config to `remote.log.copy.disable=true` and local.retention.bytes == retention.bytes, it should work without error + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName3), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG, "true"), + AlterConfigOp.OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "1000"), + AlterConfigOp.OpType.SET), + )) + admin.incrementalAlterConfigs(configs).all().get() + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testEnableRemoteLogOnExistingTopicTest(quorum: String): Unit = { + val admin = createAdminClient() + val topicConfig = new Properties() + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), + Collections.singleton( + new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), + AlterConfigOp.OpType.SET)) + ) + admin.incrementalAlterConfigs(configs).all().get() + verifyRemoteLogTopicConfigs(topicConfig) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testEnableRemoteLogWhenSystemRemoteStorageIsDisabled(quorum: String): Unit = { + val admin = createAdminClient() + + val topicConfigWithRemoteStorage = new Properties() + topicConfigWithRemoteStorage.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + val message = assertThrowsException(classOf[InvalidConfigurationException], + () => TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, + numReplicationFactor, topicConfig = topicConfigWithRemoteStorage)) + assertTrue(message.getMessage.contains("Tiered Storage functionality is disabled in the broker")) + + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor) + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), + Collections.singleton( + new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), + AlterConfigOp.OpType.SET)) + ) + val errorMessage = assertThrowsException(classOf[InvalidConfigurationException], + () => admin.incrementalAlterConfigs(configs).all().get()) + assertTrue(errorMessage.getMessage.contains("Tiered Storage functionality is disabled in the broker")) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUpdateTopicConfigWithValidRetentionTimeTest(quorum: String): Unit = { + val admin = createAdminClient() + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "200"), + AlterConfigOp.OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100"), + AlterConfigOp.OpType.SET) + )) + admin.incrementalAlterConfigs(configs).all().get() + verifyRemoteLogTopicConfigs(topicConfig) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUpdateTopicConfigWithValidRetentionSizeTest(quorum: String): Unit = { + val admin = createAdminClient() + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, "200"), + AlterConfigOp.OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, "100"), + AlterConfigOp.OpType.SET) + )) + admin.incrementalAlterConfigs(configs).all().get() + verifyRemoteLogTopicConfigs(topicConfig) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUpdateTopicConfigWithInheritedLocalRetentionTime(quorum: String): Unit = { + val admin = createAdminClient() + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + // inherited local retention ms is 1000 + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "200"), + AlterConfigOp.OpType.SET), + )) + assertThrowsException(classOf[InvalidConfigurationException], + () => admin.incrementalAlterConfigs(configs).all().get()) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUpdateTopicConfigWithInheritedLocalRetentionSize(quorum: String): Unit = { + val admin = createAdminClient() + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + // inherited local retention bytes is 1024 + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, "512"), + AlterConfigOp.OpType.SET), + )) + assertThrowsException(classOf[InvalidConfigurationException], + () => admin.incrementalAlterConfigs(configs).all().get(), "Invalid local retention size") + } + + // The remote storage config validation on controller level only works in KRaft + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUpdateTopicConfigWithDisablingRemoteStorage(quorum: String): Unit = { + val admin = createAdminClient() + val topicConfig = new Properties + topicConfig.setProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + TestUtils.createTopicWithAdmin(admin, testTopicName, brokers, controllerServers, numPartitions, numReplicationFactor, + topicConfig = topicConfig) + + val configs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + configs.put(new ConfigResource(ConfigResource.Type.TOPIC, testTopicName), + util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"), + AlterConfigOp.OpType.SET), + )) + assertThrowsException(classOf[InvalidConfigurationException], + () => admin.incrementalAlterConfigs(configs).all().get(), "Disabling remote storage feature on the topic level is not supported.") + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testTopicDeletion(quorum: String): Unit = { + MyRemoteStorageManager.deleteSegmentEventCounter.set(0) + val numPartitions = 2 + val topicConfig = new Properties() + topicConfig.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") + topicConfig.put(TopicConfig.RETENTION_MS_CONFIG, "200") + topicConfig.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, "100") + TestUtils.createTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers, numPartitions, brokerCount, + topicConfig = topicConfig) + TestUtils.deleteTopicWithAdmin(createAdminClient(), testTopicName, brokers, controllerServers) + assertThrowsException(classOf[UnknownTopicOrPartitionException], + () => TestUtils.describeTopic(createAdminClient(), testTopicName), "Topic should be deleted") + TestUtils.waitUntilTrue(() => + numPartitions * MyRemoteLogMetadataManager.segmentCountPerPartition == MyRemoteStorageManager.deleteSegmentEventCounter.get(), + "Remote log segments should be deleted only once by the leader") + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testClusterWideDisablementOfTieredStorageWithEnabledTieredTopic(quorum: String): Unit = { val topicConfig = new Properties() topicConfig.setProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") @@ -73,8 +481,9 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { faultHandler.setIgnore(true) } - @Test - def testClusterWithoutTieredStorageStartsSuccessfullyIfTopicWithTieringDisabled(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testClusterWithoutTieredStorageStartsSuccessfullyIfTopicWithTieringDisabled(quorum: String): Unit = { val topicConfig = new Properties() topicConfig.setProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, false.toString) @@ -87,6 +496,63 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { recreateBrokers(startup = true) } + private def assertThrowsException(exceptionType: Class[_ <: Throwable], + executable: Executable, + message: String = ""): Throwable = { + assertThrows(exceptionType, () => { + try { + executable.execute() + } catch { + case e: ExecutionException => throw e.getCause + } + }, message) + } + + private def verifyRemoteLogTopicConfigs(topicConfig: Properties): Unit = { + TestUtils.waitUntilTrue(() => { + val logBuffer = brokers.flatMap(_.logManager.getLog(new TopicPartition(testTopicName, 0))) + var result = logBuffer.nonEmpty + if (result) { + if (topicConfig.containsKey(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG)) { + result = result && + topicConfig.getProperty(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG).toBoolean == + logBuffer.head.config.remoteStorageEnable() + } + if (topicConfig.containsKey(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) { + result = result && + topicConfig.getProperty(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG).toLong == + logBuffer.head.config.localRetentionBytes() + } + if (topicConfig.containsKey(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) { + result = result && + topicConfig.getProperty(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG).toLong == + logBuffer.head.config.localRetentionMs() + } + if (topicConfig.containsKey(TopicConfig.RETENTION_MS_CONFIG)) { + result = result && + topicConfig.getProperty(TopicConfig.RETENTION_MS_CONFIG).toLong == + logBuffer.head.config.retentionMs + } + if (topicConfig.containsKey(TopicConfig.RETENTION_BYTES_CONFIG)) { + result = result && + topicConfig.getProperty(TopicConfig.RETENTION_BYTES_CONFIG).toLong == + logBuffer.head.config.retentionSize + } + if (topicConfig.contains(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG)) { + result = result && + topicConfig.getProperty(TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG).toBoolean == + logBuffer.head.config.remoteLogCopyDisable() + } + if (topicConfig.contains(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG)) { + result = result && + topicConfig.getProperty(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG).toBoolean == + logBuffer.head.config.remoteLogDeleteOnDisable() + } + } + result + }, s"Failed to update topic config $topicConfig") + } + private def overrideProps(): Properties = { val props = new Properties() props.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, sysRemoteStorageEnabled.toString) @@ -98,4 +564,40 @@ class RemoteTopicCrudTest extends IntegrationTestHarness { props.put(RemoteLogManagerConfig.LOG_LOCAL_RETENTION_BYTES_PROP, "1024") props } -} \ No newline at end of file +} + +object MyRemoteStorageManager { + val deleteSegmentEventCounter = new AtomicInteger(0) +} + +class MyRemoteStorageManager extends NoOpRemoteStorageManager { + import MyRemoteStorageManager._ + + override def deleteLogSegmentData(remoteLogSegmentMetadata: RemoteLogSegmentMetadata): Unit = { + deleteSegmentEventCounter.incrementAndGet() + } +} + +class MyRemoteLogMetadataManager extends NoOpRemoteLogMetadataManager { + + import MyRemoteLogMetadataManager._ + val time = new MockTime() + + override def listRemoteLogSegments(topicIdPartition: TopicIdPartition): util.Iterator[RemoteLogSegmentMetadata] = { + val segmentMetadataList = new util.ArrayList[RemoteLogSegmentMetadata]() + for (idx <- 0 until segmentCountPerPartition) { + val timestamp = time.milliseconds() + val startOffset = idx * recordsPerSegment + val endOffset = startOffset + recordsPerSegment - 1 + val segmentLeaderEpochs: util.Map[Integer, java.lang.Long] = Collections.singletonMap(0, 0L) + segmentMetadataList.add(new RemoteLogSegmentMetadata(new RemoteLogSegmentId(topicIdPartition, Uuid.randomUuid()), startOffset, endOffset, timestamp, 0, timestamp, segmentSize, Optional.empty(), RemoteLogSegmentState.COPY_SEGMENT_FINISHED, segmentLeaderEpochs)) + } + segmentMetadataList.iterator() + } +} + +object MyRemoteLogMetadataManager { + val segmentCountPerPartition = 10 + val recordsPerSegment = 100 + val segmentSize = 1024 +} diff --git a/core/src/test/scala/integration/kafka/api/AbstractAuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AbstractAuthorizerIntegrationTest.scala index fafce17382c27..dc836352787b2 100644 --- a/core/src/test/scala/integration/kafka/api/AbstractAuthorizerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/AbstractAuthorizerIntegrationTest.scala @@ -71,7 +71,6 @@ class AbstractAuthorizerIntegrationTest extends BaseRequestTest { val brokerId: Integer = 0 val topic = "topic" - val sourceTopic = "source-topic" val topicPattern = "topic.*" val transactionalId = "transactional.id" val producerId = 83392L @@ -81,16 +80,11 @@ class AbstractAuthorizerIntegrationTest extends BaseRequestTest { val tp = new TopicPartition(topic, part) val logDir = "logDir" val group = "my-group" - val shareGroup = "share-group" - val streamsGroup = "streams-group" val protocolType = "consumer" val protocolName = "consumer-range" val clusterResource = new ResourcePattern(CLUSTER, Resource.CLUSTER_NAME, LITERAL) val topicResource = new ResourcePattern(TOPIC, topic, LITERAL) - val sourceTopicResource = new ResourcePattern(TOPIC, sourceTopic, LITERAL) val groupResource = new ResourcePattern(GROUP, group, LITERAL) - val shareGroupResource = new ResourcePattern(GROUP, shareGroup, LITERAL) - val streamsGroupResource = new ResourcePattern(GROUP, streamsGroup, LITERAL) val transactionalIdResource = new ResourcePattern(TRANSACTIONAL_ID, transactionalId, LITERAL) producerConfig.setProperty(ProducerConfig.ACKS_CONFIG, "1") @@ -115,7 +109,6 @@ class AbstractAuthorizerIntegrationTest extends BaseRequestTest { properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "1") properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") - properties.put(GroupCoordinatorConfig.CONSUMER_GROUP_REGEX_REFRESH_INTERVAL_MS_CONFIG, "10000") properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, "1") properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, "1") diff --git a/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala b/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala index 6a60621308bc2..517614d84a11f 100644 --- a/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/AbstractConsumerTest.scala @@ -94,14 +94,12 @@ abstract class AbstractConsumerTest extends BaseRequestTest { def awaitNonEmptyRecords[K, V](consumer: Consumer[K, V], partition: TopicPartition, pollTimeoutMs: Long = 100): ConsumerRecords[K, V] = { - var result: ConsumerRecords[K, V] = null - TestUtils.pollRecordsUntilTrue(consumer, (polledRecords: ConsumerRecords[K, V]) => { - val hasRecords = !polledRecords.records(partition).isEmpty - if (hasRecords) result = polledRecords - hasRecords + if (polledRecords.records(partition).asScala.nonEmpty) + return polledRecords + false }, s"Consumer did not consume any messages for partition $partition before timeout.", JTestUtils.DEFAULT_MAX_WAIT_MS, pollTimeoutMs) - result + throw new IllegalStateException("Should have timed out before reaching here") } /** diff --git a/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala new file mode 100644 index 0000000000000..64cc259408e13 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/AdminClientRebootstrapTest.scala @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.api + +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +class AdminClientRebootstrapTest extends RebootstrapTest { + @ParameterizedTest(name = "{displayName}.quorum=kraft.useRebootstrapTriggerMs={0}") + @ValueSource(booleans = Array(false, true)) + def testRebootstrap(useRebootstrapTriggerMs: Boolean): Unit = { + + server1.shutdown() + server1.awaitShutdown() + + val adminClient = createAdminClient(configOverrides = clientOverrides(useRebootstrapTriggerMs)) + + // Only the server 0 is available for the admin client during the bootstrap. + adminClient.listTopics().names().get() + + server0.shutdown() + server0.awaitShutdown() + server1.startup() + + // The server 0, originally cached during the bootstrap, is offline. + // However, the server 1 from the bootstrap list is online. + // Should be able to list topics again. + adminClient.listTopics().names().get() + + server1.shutdown() + server1.awaitShutdown() + server0.startup() + + // The same situation, but the server 1 has gone and server 0 is back. + adminClient.listTopics().names().get() + } +} diff --git a/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala new file mode 100644 index 0000000000000..b11cb96ef8ce0 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/AdminClientWithPoliciesIntegrationTest.scala @@ -0,0 +1,252 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package kafka.api + +import java.util +import java.util.{Collections, Properties} +import kafka.integration.KafkaServerTestHarness +import kafka.server.KafkaConfig +import kafka.utils.{Logging, TestUtils} +import org.apache.kafka.clients.admin.AlterConfigOp.OpType +import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsOptions, ConfigEntry} +import org.apache.kafka.common.config.{ConfigResource, SslConfigs, TopicConfig} +import org.apache.kafka.common.errors.{InvalidConfigurationException, InvalidRequestException, PolicyViolationException} +import org.apache.kafka.common.utils.Utils +import org.apache.kafka.network.SocketServerConfigs +import org.apache.kafka.server.config.{ServerConfigs, ServerLogConfigs} +import org.apache.kafka.server.policy.AlterConfigPolicy +import org.apache.kafka.storage.internals.log.LogConfig +import org.apache.kafka.test.TestUtils.assertFutureThrows +import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertTrue} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +import scala.collection.mutable +import scala.jdk.CollectionConverters._ + +/** + * Tests AdminClient calls when the broker is configured with policies like AlterConfigPolicy, CreateTopicPolicy, etc. + */ +@Timeout(120) +class AdminClientWithPoliciesIntegrationTest extends KafkaServerTestHarness with Logging { + + import AdminClientWithPoliciesIntegrationTest._ + + var client: Admin = _ + val brokerCount = 3 + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + TestUtils.waitUntilBrokerMetadataIsPropagated(brokers) + } + + @AfterEach + override def tearDown(): Unit = { + if (client != null) + Utils.closeQuietly(client, "AdminClient") + super.tearDown() + } + + def createConfig: util.Map[String, Object] = + Map[String, Object](AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers()).asJava + + override def generateConfigs: collection.Seq[KafkaConfig] = { + val configs = TestUtils.createBrokerConfigs(brokerCount) + configs.foreach(overrideNodeConfigs) + configs.map(KafkaConfig.fromProps) + } + + override def kraftControllerConfigs(testInfo: TestInfo): Seq[Properties] = { + val props = new Properties() + overrideNodeConfigs(props) + Seq(props) + } + + private def overrideNodeConfigs(props: Properties): Unit = { + props.put(ServerLogConfigs.ALTER_CONFIG_POLICY_CLASS_NAME_CONFIG, classOf[Policy]) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testValidAlterConfigs(quorum: String): Unit = { + client = Admin.create(createConfig) + // Create topics + val topic1 = "describe-alter-configs-topic-1" + val topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1) + val topicConfig1 = new Properties + val maxMessageBytes = "500000" + val retentionMs = "60000000" + topicConfig1.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, maxMessageBytes) + topicConfig1.setProperty(TopicConfig.RETENTION_MS_CONFIG, retentionMs) + createTopic(topic1, 1, 1, topicConfig1) + + val topic2 = "describe-alter-configs-topic-2" + val topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2) + createTopic(topic2) + + PlaintextAdminIntegrationTest.checkValidAlterConfigs(client, this, topicResource1, topicResource2, maxMessageBytes, retentionMs) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidAlterConfigs(quorum: String): Unit = { + client = Admin.create(createConfig) + PlaintextAdminIntegrationTest.checkInvalidAlterConfigs(this, client) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidAlterConfigsDueToPolicy(quorum: String): Unit = { + client = Admin.create(createConfig) + + // Create topics + val topic1 = "invalid-alter-configs-due-to-policy-topic-1" + val topicResource1 = new ConfigResource(ConfigResource.Type.TOPIC, topic1) + createTopic(topic1) + + val topic2 = "invalid-alter-configs-due-to-policy-topic-2" + val topicResource2 = new ConfigResource(ConfigResource.Type.TOPIC, topic2) + createTopic(topic2) + + val topic3 = "invalid-alter-configs-due-to-policy-topic-3" + val topicResource3 = new ConfigResource(ConfigResource.Type.TOPIC, topic3) + createTopic(topic3) + + // Set a mutable broker config + val brokerResource = new ConfigResource(ConfigResource.Type.BROKER, brokers.head.config.brokerId.toString) + var alterResult = client.incrementalAlterConfigs(Collections.singletonMap(brokerResource, + util.Arrays.asList(new AlterConfigOp(new ConfigEntry(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG, "50000"), OpType.SET)))) + alterResult.all.get + assertEquals(Set(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG), validationsForResource(brokerResource).head.configs().keySet().asScala) + validations.clear() + + val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() + alterConfigs.put(topicResource1, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), OpType.SET), + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2"), OpType.SET) + )) + + alterConfigs.put(topicResource2, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.8"), OpType.SET), + )) + + alterConfigs.put(topicResource3, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "-1"), OpType.SET), + )) + + alterConfigs.put(brokerResource, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG, "12313"), OpType.SET), + )) + + // Alter configs: second is valid, the others are invalid + alterResult = client.incrementalAlterConfigs(alterConfigs) + + assertEquals(Set(topicResource1, topicResource2, topicResource3, brokerResource).asJava, alterResult.values.keySet) + assertFutureThrows(alterResult.values.get(topicResource1), classOf[PolicyViolationException]) + alterResult.values.get(topicResource2).get + assertFutureThrows(alterResult.values.get(topicResource3), classOf[InvalidConfigurationException]) + assertFutureThrows(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) + assertTrue(validationsForResource(brokerResource).isEmpty, + "Should not see the broker resource in the AlterConfig policy when the broker configs are not being updated.") + validations.clear() + + // Verify that the second resource was updated and the others were not + ensureConsistentKRaftMetadata() + var describeResult = client.describeConfigs(Seq(topicResource1, topicResource2, topicResource3, brokerResource).asJava) + var configs = describeResult.all.get + assertEquals(4, configs.size) + + assertEquals(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO.toString, configs.get(topicResource1).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) + assertEquals(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_DEFAULT.toString, configs.get(topicResource1).get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value) + + assertEquals("0.8", configs.get(topicResource2).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) + + assertNull(configs.get(brokerResource).get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).value) + + // Alter configs with validateOnly = true: only second is valid + alterConfigs.put(topicResource2, util.Arrays.asList( + new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.7"), OpType.SET), + )) + + alterResult = client.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) + + assertEquals(Set(topicResource1, topicResource2, topicResource3, brokerResource).asJava, alterResult.values.keySet) + assertFutureThrows(alterResult.values.get(topicResource1), classOf[PolicyViolationException]) + alterResult.values.get(topicResource2).get + assertFutureThrows(alterResult.values.get(topicResource3), classOf[InvalidConfigurationException]) + assertFutureThrows(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) + assertTrue(validationsForResource(brokerResource).isEmpty, + "Should not see the broker resource in the AlterConfig policy when the broker configs are not being updated.") + validations.clear() + + // Verify that no resources are updated since validate_only = true + ensureConsistentKRaftMetadata() + describeResult = client.describeConfigs(Seq(topicResource1, topicResource2, topicResource3, brokerResource).asJava) + configs = describeResult.all.get + assertEquals(4, configs.size) + + assertEquals(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO.toString, configs.get(topicResource1).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) + assertEquals(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_DEFAULT.toString, configs.get(topicResource1).get(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG).value) + + assertEquals("0.8", configs.get(topicResource2).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) + + assertNull(configs.get(brokerResource).get(SslConfigs.SSL_TRUSTSTORE_PASSWORD_CONFIG).value) + + // Do an incremental alter config on the broker, ensure we don't see the broker config we set earlier in the policy + alterResult = client.incrementalAlterConfigs(Map( + brokerResource -> + Seq(new AlterConfigOp( + new ConfigEntry(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, "9999"), OpType.SET) + ).asJavaCollection + ).asJava) + alterResult.all.get + assertEquals(Set(SocketServerConfigs.MAX_CONNECTIONS_CONFIG), validationsForResource(brokerResource).head.configs().keySet().asScala) + } + +} + +object AdminClientWithPoliciesIntegrationTest { + + val validations = new mutable.ListBuffer[AlterConfigPolicy.RequestMetadata]() + + def validationsForResource(resource: ConfigResource): Seq[AlterConfigPolicy.RequestMetadata] = { + validations.filter { req => req.resource().equals(resource) }.toSeq + } + + class Policy extends AlterConfigPolicy { + + var configs: Map[String, _] = _ + var closed = false + + def configure(configs: util.Map[String, _]): Unit = { + validations.clear() + this.configs = configs.asScala.toMap + } + + def validate(requestMetadata: AlterConfigPolicy.RequestMetadata): Unit = { + validations.append(requestMetadata) + require(!closed, "Policy should not be closed") + require(configs.nonEmpty, "configure should have been called with non empty configs") + require(!requestMetadata.configs.isEmpty, "request configs should not be empty") + require(requestMetadata.resource.name.nonEmpty, "resource name should not be empty") + if (requestMetadata.configs.containsKey(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG)) + throw new PolicyViolationException("Min in sync replicas cannot be updated") + } + + def close(): Unit = closed = true + + } +} diff --git a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala index bfcc0bb0d4fca..fc74344c863e3 100644 --- a/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/AuthorizerIntegrationTest.scala @@ -17,12 +17,11 @@ import java.time.Duration import java.util import java.util.concurrent.{ExecutionException, Semaphore} import java.util.regex.Pattern -import java.util.{Comparator, Optional, Properties, UUID} +import java.util.{Collections, Optional, Properties} import kafka.utils.{TestInfoUtils, TestUtils} import kafka.utils.TestUtils.waitUntilTrue -import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ListGroupsOptions, NewTopic} +import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, NewTopic} import org.apache.kafka.clients.consumer._ -import org.apache.kafka.clients.consumer.internals.{StreamsRebalanceData, StreamsRebalanceListener} import org.apache.kafka.clients.producer._ import org.apache.kafka.common.acl.AclOperation._ import org.apache.kafka.common.acl.AclPermissionType.{ALLOW, DENY} @@ -38,23 +37,25 @@ import org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProt import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.{OffsetForLeaderPartition, OffsetForLeaderTopic, OffsetForLeaderTopicCollection} -import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AlterPartitionReassignmentsRequestData, AlterReplicaLogDirsRequestData, AlterShareGroupOffsetsRequestData, ConsumerGroupDescribeRequestData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, CreateAclsRequestData, CreatePartitionsRequestData, CreateTopicsRequestData, DeleteAclsRequestData, DeleteGroupsRequestData, DeleteRecordsRequestData, DeleteShareGroupOffsetsRequestData, DeleteShareGroupStateRequestData, DeleteTopicsRequestData, DescribeClusterRequestData, DescribeConfigsRequestData, DescribeGroupsRequestData, DescribeLogDirsRequestData, DescribeProducersRequestData, DescribeShareGroupOffsetsRequestData, DescribeTransactionsRequestData, FetchResponseData, FindCoordinatorRequestData, HeartbeatRequestData, IncrementalAlterConfigsRequestData, InitializeShareGroupStateRequestData, JoinGroupRequestData, ListPartitionReassignmentsRequestData, ListTransactionsRequestData, MetadataRequestData, OffsetCommitRequestData, OffsetFetchRequestData, OffsetFetchResponseData, ProduceRequestData, ReadShareGroupStateRequestData, ReadShareGroupStateSummaryRequestData, ShareAcknowledgeRequestData, ShareFetchRequestData, ShareGroupDescribeRequestData, ShareGroupHeartbeatRequestData, StreamsGroupDescribeRequestData, StreamsGroupHeartbeatRequestData, StreamsGroupHeartbeatResponseData, SyncGroupRequestData, WriteShareGroupStateRequestData, WriteTxnMarkersRequestData} -import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AlterPartitionReassignmentsRequestData, AlterReplicaLogDirsRequestData, ConsumerGroupDescribeRequestData, ConsumerGroupHeartbeatRequestData, CreateAclsRequestData, CreatePartitionsRequestData, CreateTopicsRequestData, DeleteAclsRequestData, DeleteGroupsRequestData, DeleteRecordsRequestData, DeleteTopicsRequestData, DescribeClusterRequestData, DescribeConfigsRequestData, DescribeGroupsRequestData, DescribeLogDirsRequestData, DescribeProducersRequestData, DescribeTransactionsRequestData, FetchResponseData, FindCoordinatorRequestData, HeartbeatRequestData, IncrementalAlterConfigsRequestData, JoinGroupRequestData, ListPartitionReassignmentsRequestData, ListTransactionsRequestData, MetadataRequestData, OffsetCommitRequestData, ProduceRequestData, SyncGroupRequestData, WriteTxnMarkersRequestData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, SimpleRecord} +import org.apache.kafka.common.requests.OffsetFetchResponse.PartitionData import org.apache.kafka.common.requests._ import org.apache.kafka.common.resource.PatternType.{LITERAL, PREFIXED} import org.apache.kafka.common.resource.ResourceType._ import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, ResourcePatternFilter, ResourceType} import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.utils.Utils -import org.apache.kafka.common.{ElectionType, IsolationLevel, KafkaException, TopicIdPartition, TopicPartition, Uuid, requests} +import org.apache.kafka.common.{ElectionType, IsolationLevel, KafkaException, TopicPartition, Uuid, requests} import org.apache.kafka.test.{TestUtils => JTestUtils} import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.security.authorizer.AclEntry.WILDCARD_HOST import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{CsvSource, MethodSource, ValueSource} + +import java.util.Collections.singletonList import org.apache.kafka.common.message.MetadataRequestData.MetadataRequestTopic import org.apache.kafka.common.message.WriteTxnMarkersRequestData.{WritableTxnMarker, WritableTxnMarkerTopic} import org.apache.kafka.coordinator.group.GroupConfig @@ -63,7 +64,6 @@ import org.junit.jupiter.api.function.Executable import scala.collection.mutable import scala.jdk.CollectionConverters._ -import scala.jdk.javaapi.OptionConverters class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val groupReadAcl = Map(groupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW))) @@ -71,13 +71,6 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val groupDeleteAcl = Map(groupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW))) val groupDescribeConfigsAcl = Map(groupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE_CONFIGS, ALLOW))) val groupAlterConfigsAcl = Map(groupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER_CONFIGS, ALLOW))) - val shareGroupReadAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW))) - val shareGroupDescribeAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW))) - val shareGroupDeleteAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW))) - val shareGroupDescribeConfigsAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE_CONFIGS, ALLOW))) - val shareGroupAlterConfigsAcl = Map(shareGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER_CONFIGS, ALLOW))) - val streamsGroupReadAcl = Map(streamsGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW))) - val streamsGroupDescribeAcl = Map(streamsGroupResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW))) val clusterAcl = Map(clusterResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CLUSTER_ACTION, ALLOW))) val clusterCreateAcl = Map(clusterResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW))) val clusterAlterAcl = Map(clusterResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER, ALLOW))) @@ -94,19 +87,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val topicAlterConfigsAcl = Map(topicResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER_CONFIGS, ALLOW))) val transactionIdWriteAcl = Map(transactionalIdResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW))) val transactionalIdDescribeAcl = Map(transactionalIdResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW))) - val sourceTopicDescribeAcl = Map(sourceTopicResource -> Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW))) val numRecords = 1 val requestKeyToError = (topicNames: Map[Uuid, String], version: Short) => Map[ApiKeys, Nothing => Errors]( ApiKeys.METADATA -> ((resp: requests.MetadataResponse) => resp.errors.asScala.find(_._1 == topic).getOrElse(("test", Errors.NONE))._2), ApiKeys.PRODUCE -> ((resp: requests.ProduceResponse) => { - val topicId = topicNames.find { case (_, topicName) => topicName == topic} - .map { case (topicId, _) => topicId } - .getOrElse(Uuid.ZERO_UUID) Errors.forCode( resp.data - .responses.find("", topicId) // version is always >= 13 no need to use topic name + .responses.find(topic) .partitionResponses.asScala.find(_.index == part).get .errorCode ) @@ -124,7 +113,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { }), ApiKeys.OFFSET_COMMIT -> ((resp: requests.OffsetCommitResponse) => Errors.forCode( resp.data.topics().get(0).partitions().get(0).errorCode)), - ApiKeys.OFFSET_FETCH -> ((resp: requests.OffsetFetchResponse) => Errors.forCode(resp.group(group).errorCode())), + ApiKeys.OFFSET_FETCH -> ((resp: requests.OffsetFetchResponse) => resp.groupLevelError(group)), ApiKeys.FIND_COORDINATOR -> ((resp: FindCoordinatorResponse) => { Errors.forCode(resp.data.coordinators.asScala.find(g => group == g.key).head.errorCode) }), @@ -205,31 +194,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { }), ApiKeys.CONSUMER_GROUP_HEARTBEAT -> ((resp: ConsumerGroupHeartbeatResponse) => Errors.forCode(resp.data.errorCode)), ApiKeys.CONSUMER_GROUP_DESCRIBE -> ((resp: ConsumerGroupDescribeResponse) => - Errors.forCode(resp.data.groups.asScala.find(g => group == g.groupId).head.errorCode)), - ApiKeys.SHARE_GROUP_HEARTBEAT -> ((resp: ShareGroupHeartbeatResponse) => Errors.forCode(resp.data.errorCode)), - ApiKeys.SHARE_GROUP_DESCRIBE -> ((resp: ShareGroupDescribeResponse) => - Errors.forCode(resp.data.groups.asScala.find(g => shareGroup == g.groupId).head.errorCode)), - ApiKeys.SHARE_FETCH -> ((resp: ShareFetchResponse) => Errors.forCode(resp.data.errorCode)), - ApiKeys.SHARE_ACKNOWLEDGE -> ((resp: ShareAcknowledgeResponse) => Errors.forCode(resp.data.errorCode)), - ApiKeys.INITIALIZE_SHARE_GROUP_STATE -> ((resp: InitializeShareGroupStateResponse) => Errors.forCode( - resp.data.results.get(0).partitions.get(0).errorCode)), - ApiKeys.READ_SHARE_GROUP_STATE -> ((resp: ReadShareGroupStateResponse) => Errors.forCode( - resp.data.results.get(0).partitions.get(0).errorCode)), - ApiKeys.WRITE_SHARE_GROUP_STATE -> ((resp: WriteShareGroupStateResponse) => Errors.forCode( - resp.data.results.get(0).partitions.get(0).errorCode)), - ApiKeys.DELETE_SHARE_GROUP_STATE -> ((resp: DeleteShareGroupStateResponse) => Errors.forCode( - resp.data.results.get(0).partitions.get(0).errorCode)), - ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY -> ((resp: ReadShareGroupStateSummaryResponse) => Errors.forCode( - resp.data.results.get(0).partitions.get(0).errorCode)), - ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS -> ((resp: DescribeShareGroupOffsetsResponse) => Errors.forCode( - resp.data.groups.asScala.find(g => shareGroup == g.groupId).head.errorCode)), - ApiKeys.DELETE_SHARE_GROUP_OFFSETS -> ((resp: DeleteShareGroupOffsetsResponse) => Errors.forCode( - resp.data.errorCode)), - ApiKeys.ALTER_SHARE_GROUP_OFFSETS -> ((resp: AlterShareGroupOffsetsResponse) => Errors.forCode( - resp.data.errorCode)), - ApiKeys.STREAMS_GROUP_HEARTBEAT -> ((resp: StreamsGroupHeartbeatResponse) => Errors.forCode(resp.data.errorCode)), - ApiKeys.STREAMS_GROUP_DESCRIBE -> ((resp: StreamsGroupDescribeResponse) => - Errors.forCode(resp.data.groups.asScala.find(g => streamsGroup == g.groupId).head.errorCode)) + Errors.forCode(resp.data.groups.asScala.find(g => group == g.groupId).head.errorCode)) ) def findErrorForTopicId(id: Uuid, response: AbstractResponse): Errors = { @@ -285,42 +250,25 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ApiKeys.DESCRIBE_PRODUCERS -> topicReadAcl, ApiKeys.DESCRIBE_TRANSACTIONS -> transactionalIdDescribeAcl, ApiKeys.CONSUMER_GROUP_HEARTBEAT -> groupReadAcl, - ApiKeys.CONSUMER_GROUP_DESCRIBE -> groupDescribeAcl, - ApiKeys.SHARE_GROUP_HEARTBEAT -> (shareGroupReadAcl ++ topicDescribeAcl), - ApiKeys.SHARE_GROUP_DESCRIBE -> (shareGroupDescribeAcl ++ topicDescribeAcl), - ApiKeys.SHARE_FETCH -> (shareGroupReadAcl ++ topicReadAcl), - ApiKeys.SHARE_ACKNOWLEDGE -> (shareGroupReadAcl ++ topicReadAcl), - ApiKeys.INITIALIZE_SHARE_GROUP_STATE -> clusterAcl, - ApiKeys.READ_SHARE_GROUP_STATE -> clusterAcl, - ApiKeys.WRITE_SHARE_GROUP_STATE -> clusterAcl, - ApiKeys.DELETE_SHARE_GROUP_STATE -> clusterAcl, - ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY -> clusterAcl, - ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS -> (shareGroupDescribeAcl ++ topicDescribeAcl), - ApiKeys.DELETE_SHARE_GROUP_OFFSETS -> (shareGroupDeleteAcl ++ topicReadAcl), - ApiKeys.ALTER_SHARE_GROUP_OFFSETS -> (shareGroupReadAcl ++ topicReadAcl), - ApiKeys.STREAMS_GROUP_HEARTBEAT -> (streamsGroupReadAcl ++ topicDescribeAcl), - ApiKeys.STREAMS_GROUP_DESCRIBE -> (streamsGroupDescribeAcl ++ topicDescribeAcl), + ApiKeys.CONSUMER_GROUP_DESCRIBE -> groupDescribeAcl ) private def createMetadataRequest(allowAutoTopicCreation: Boolean) = { - new requests.MetadataRequest.Builder(java.util.List.of(topic), allowAutoTopicCreation).build() + new requests.MetadataRequest.Builder(List(topic).asJava, allowAutoTopicCreation).build() } - private def createProduceRequest(name: String, id: Uuid, version: Short) = { + private def createProduceRequest = requests.ProduceRequest.builder(new ProduceRequestData() - .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - util.List.of(new ProduceRequestData.TopicProduceData() - .setName(name) - .setTopicId(id) - .setPartitionData(util.List.of( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) - .iterator)) - .setAcks(1.toShort) - .setTimeoutMs(5000)) - .build(version) - } + .setTopicData(new ProduceRequestData.TopicProduceDataCollection( + Collections.singletonList(new ProduceRequestData.TopicProduceData() + .setName(tp.topic).setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) + .iterator)) + .setAcks(1.toShort) + .setTimeoutMs(5000)) + .build() private def createFetchRequest = { val partitionMap = new util.LinkedHashMap[TopicPartition, requests.FetchRequest.PartitionData] @@ -336,13 +284,6 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { requests.FetchRequest.Builder.forConsumer(version, 100, Int.MaxValue, partitionMap).build() } - private def createFetchRequestWithEmptyTopicNameAndZeroTopicId(version: Short) = { - val partitionMap = new util.LinkedHashMap[TopicPartition, requests.FetchRequest.PartitionData] - partitionMap.put(new TopicPartition("", part), - new requests.FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 100, Optional.of(27))) - requests.FetchRequest.Builder.forConsumer(version, 100, Int.MaxValue, partitionMap).build() - } - private def createFetchFollowerRequest = { val partitionMap = new util.LinkedHashMap[TopicPartition, requests.FetchRequest.PartitionData] partitionMap.put(tp, new requests.FetchRequest.PartitionData(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID), @@ -353,12 +294,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def createListOffsetsRequest = { requests.ListOffsetsRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED) - .setTargetTimes(java.util.List.of(new ListOffsetsTopic() + .setTargetTimes(List(new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(java.util.List.of(new ListOffsetsPartition() + .setPartitions(List(new ListOffsetsPartition() .setPartitionIndex(tp.partition) .setTimestamp(0L) - .setCurrentLeaderEpoch(27)))) + .setCurrentLeaderEpoch(27)).asJava)).asJava ). build() } @@ -367,73 +308,35 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val epochs = new OffsetForLeaderTopicCollection() epochs.add(new OffsetForLeaderTopic() .setTopic(tp.topic) - .setPartitions(java.util.List.of(new OffsetForLeaderPartition() + .setPartitions(List(new OffsetForLeaderPartition() .setPartition(tp.partition) .setLeaderEpoch(7) - .setCurrentLeaderEpoch(27)))) + .setCurrentLeaderEpoch(27)).asJava)) OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs).build() } private def createOffsetFetchRequest: OffsetFetchRequest = { - OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setRequireStable(false) - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(group) - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(tp.topic) - .setPartitionIndexes(util.List.of[Integer](tp.partition)) - )) - )), - false - ).build() + new requests.OffsetFetchRequest.Builder(group, false, List(tp).asJava, false).build() } private def createOffsetFetchRequestAllPartitions: OffsetFetchRequest = { - OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setRequireStable(false) - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(group) - .setTopics(null) - )), - false - ).build() + new requests.OffsetFetchRequest.Builder(group, false, null, false).build() } private def createOffsetFetchRequest(groupToPartitionMap: util.Map[String, util.List[TopicPartition]]): OffsetFetchRequest = { - OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setGroups(groupToPartitionMap.asScala.map { case (groupId, partitions) => - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupId) - .setTopics( - if (partitions == null) - null - else - partitions.asScala.groupBy(_.topic).map { case (topic, partitions) => - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(topic) - .setPartitionIndexes(partitions.map(_.partition).map(Int.box).asJava) - }.toList.asJava) - }.toList.asJava), - false - ).build() + new requests.OffsetFetchRequest.Builder(groupToPartitionMap, false, false).build() } private def createFindCoordinatorRequest = { new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id) - .setCoordinatorKeys(util.List.of(group))).build() + .setCoordinatorKeys(Collections.singletonList(group))).build() } private def createJoinGroupRequest = { val protocolSet = new JoinGroupRequestProtocolCollection( - util.List.of(new JoinGroupRequestData.JoinGroupRequestProtocol() + Collections.singletonList(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName(protocolName) .setMetadata("test".getBytes()) ).iterator()) @@ -458,24 +361,24 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) .setProtocolType(protocolType) .setProtocolName(protocolName) - .setAssignments(util.List.of) + .setAssignments(Collections.emptyList()) ).build() } private def createDescribeGroupsRequest = { - new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(java.util.List.of(group))).build() + new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(List(group).asJava)).build() } private def createOffsetCommitRequest = { - requests.OffsetCommitRequest.Builder.forTopicNames( + new requests.OffsetCommitRequest.Builder( new OffsetCommitRequestData() .setGroupId(group) .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) .setGenerationIdOrMemberEpoch(1) - .setTopics(util.List.of( + .setTopics(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName(topic) - .setPartitions(util.List.of( + .setPartitions(Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(part) .setCommittedOffset(0) @@ -505,19 +408,19 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID)).build() private def leaveGroupRequest = new LeaveGroupRequest.Builder( - group, util.List.of( + group, Collections.singletonList( new MemberIdentity() .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) )).build() private def deleteGroupsRequest = new DeleteGroupsRequest.Builder( new DeleteGroupsRequestData() - .setGroupsNames(util.List.of(group)) + .setGroupsNames(Collections.singletonList(group)) ).build() private def createTopicsRequest: CreateTopicsRequest = { new CreateTopicsRequest.Builder(new CreateTopicsRequestData().setTopics( - new CreatableTopicCollection(util.Set.of(new CreatableTopic(). + new CreatableTopicCollection(Collections.singleton(new CreatableTopic(). setName(topic).setNumPartitions(1). setReplicationFactor(1.toShort)).iterator))).build() } @@ -525,14 +428,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def deleteTopicsRequest: DeleteTopicsRequest = { new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() - .setTopicNames(util.List.of(topic)) + .setTopicNames(Collections.singletonList(topic)) .setTimeoutMs(5000)).build() } private def deleteTopicsWithIdsRequest(topicId: Uuid): DeleteTopicsRequest = { new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() - .setTopics(util.List.of( + .setTopics(Collections.singletonList( new DeleteTopicsRequestData.DeleteTopicState() .setTopicId(topicId))) .setTimeoutMs(5000)).build() @@ -541,21 +444,21 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def deleteRecordsRequest = new DeleteRecordsRequest.Builder( new DeleteRecordsRequestData() .setTimeoutMs(5000) - .setTopics(util.List.of(new DeleteRecordsRequestData.DeleteRecordsTopic() + .setTopics(Collections.singletonList(new DeleteRecordsRequestData.DeleteRecordsTopic() .setName(tp.topic) - .setPartitions(util.List.of(new DeleteRecordsRequestData.DeleteRecordsPartition() + .setPartitions(Collections.singletonList(new DeleteRecordsRequestData.DeleteRecordsPartition() .setPartitionIndex(tp.partition) .setOffset(0L)))))).build() private def describeConfigsRequest = - new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(util.List.of( + new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(Collections.singletonList( new DescribeConfigsRequestData.DescribeConfigsResource().setResourceType(ConfigResource.Type.TOPIC.id) .setResourceName(tp.topic)))).build() private def alterConfigsRequest = new AlterConfigsRequest.Builder( - util.Map.of(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic), - new AlterConfigsRequest.Config(util.Set.of( + Collections.singletonMap(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic), + new AlterConfigsRequest.Config(Collections.singleton( new AlterConfigsRequest.ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "1000000") ))), true).build() @@ -585,7 +488,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } private def describeGroupConfigsRequest = { - new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(util.List.of( + new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData().setResources(Collections.singletonList( new DescribeConfigsRequestData.DescribeConfigsResource().setResourceType(ConfigResource.Type.GROUP.id) .setResourceName(group)))).build() } @@ -593,7 +496,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def describeAclsRequest = new DescribeAclsRequest.Builder(AclBindingFilter.ANY).build() private def createAclsRequest: CreateAclsRequest = new CreateAclsRequest.Builder( - new CreateAclsRequestData().setCreations(util.List.of( + new CreateAclsRequestData().setCreations(Collections.singletonList( new CreateAclsRequestData.AclCreation() .setResourceType(ResourceType.TOPIC.code) .setResourceName("mytopic") @@ -605,7 +508,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ).build() private def deleteAclsRequest: DeleteAclsRequest = new DeleteAclsRequest.Builder( - new DeleteAclsRequestData().setFilters(util.List.of( + new DeleteAclsRequestData().setFilters(Collections.singletonList( new DeleteAclsRequestData.DeleteAclsFilter() .setResourceTypeFilter(ResourceType.TOPIC.code) .setResourceNameFilter(null) @@ -621,16 +524,16 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setPath(logDir) dir.topics.add(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic() .setName(tp.topic) - .setPartitions(util.List.of(tp.partition))) + .setPartitions(Collections.singletonList(tp.partition))) val data = new AlterReplicaLogDirsRequestData() data.dirs.add(dir) new AlterReplicaLogDirsRequest.Builder(data).build() } - private def describeLogDirsRequest = new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(new DescribeLogDirsRequestData.DescribableLogDirTopicCollection(util.Set.of( - new DescribeLogDirsRequestData.DescribableLogDirTopic().setTopic(tp.topic).setPartitions(util.List.of(tp.partition))).iterator()))).build() + private def describeLogDirsRequest = new DescribeLogDirsRequest.Builder(new DescribeLogDirsRequestData().setTopics(new DescribeLogDirsRequestData.DescribableLogDirTopicCollection(Collections.singleton( + new DescribeLogDirsRequestData.DescribableLogDirTopic().setTopic(tp.topic).setPartitions(Collections.singletonList(tp.partition))).iterator()))).build() - private def addPartitionsToTxnRequest = AddPartitionsToTxnRequest.Builder.forClient(transactionalId, 1, 1, util.List.of(tp)).build() + private def addPartitionsToTxnRequest = AddPartitionsToTxnRequest.Builder.forClient(transactionalId, 1, 1, Collections.singletonList(tp)).build() private def addOffsetsToTxnRequest = new AddOffsetsToTxnRequest.Builder( new AddOffsetsToTxnRequestData() @@ -642,56 +545,56 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { private def electLeadersRequest = new ElectLeadersRequest.Builder( ElectionType.PREFERRED, - util.Set.of(tp), + Collections.singleton(tp), 10000 ).build() private def describeProducersRequest: DescribeProducersRequest = new DescribeProducersRequest.Builder( new DescribeProducersRequestData() - .setTopics(java.util.List.of( + .setTopics(List( new DescribeProducersRequestData.TopicRequest() .setName(tp.topic) - .setPartitionIndexes(java.util.List.of(Int.box(tp.partition))) - )) + .setPartitionIndexes(List(Int.box(tp.partition)).asJava) + ).asJava) ).build() private def describeTransactionsRequest: DescribeTransactionsRequest = new DescribeTransactionsRequest.Builder( - new DescribeTransactionsRequestData().setTransactionalIds(java.util.List.of(transactionalId)) + new DescribeTransactionsRequestData().setTransactionalIds(List(transactionalId).asJava) ).build() private def alterPartitionReassignmentsRequest = new AlterPartitionReassignmentsRequest.Builder( new AlterPartitionReassignmentsRequestData().setTopics( - java.util.List.of(new AlterPartitionReassignmentsRequestData.ReassignableTopic() + List(new AlterPartitionReassignmentsRequestData.ReassignableTopic() .setName(topic) .setPartitions( - java.util.List.of(new AlterPartitionReassignmentsRequestData.ReassignablePartition().setPartitionIndex(tp.partition)) - )) + List(new AlterPartitionReassignmentsRequestData.ReassignablePartition().setPartitionIndex(tp.partition)).asJava + )).asJava ) ).build() private def listPartitionReassignmentsRequest = new ListPartitionReassignmentsRequest.Builder( new ListPartitionReassignmentsRequestData().setTopics( - java.util.List.of(new ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics() + List(new ListPartitionReassignmentsRequestData.ListPartitionReassignmentsTopics() .setName(topic) .setPartitionIndexes( - java.util.List.of(Integer.valueOf(tp.partition)) - )) + List(Integer.valueOf(tp.partition)).asJava + )).asJava ) ).build() private def writeTxnMarkersRequest: WriteTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( new WriteTxnMarkersRequestData() .setMarkers( - java.util.List.of(new WritableTxnMarker() + List(new WritableTxnMarker() .setProducerId(producerId) .setProducerEpoch(1) .setTransactionResult(false) - .setTopics(java.util.List.of(new WritableTxnMarkerTopic() + .setTopics(List(new WritableTxnMarkerTopic() .setName(tp.topic()) - .setPartitionIndexes(java.util.List.of(Integer.valueOf(tp.partition()))) - )) + .setPartitionIndexes(List(Integer.valueOf(tp.partition())).asJava) + ).asJava) .setCoordinatorEpoch(1) - ) + ).asJava ) ).build() @@ -699,187 +602,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { new ConsumerGroupHeartbeatRequestData() .setGroupId(group) .setMemberEpoch(0) - .setSubscribedTopicNames(java.util.List.of(topic))).build() + .setSubscribedTopicNames(List(topic).asJava)).build() private def consumerGroupDescribeRequest = new ConsumerGroupDescribeRequest.Builder( new ConsumerGroupDescribeRequestData() - .setGroupIds(java.util.List.of(group)) + .setGroupIds(List(group).asJava) .setIncludeAuthorizedOperations(false)).build() - private def shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId(shareGroup) - .setMemberEpoch(0) - .setSubscribedTopicNames(List(topic).asJava)).build(ApiKeys.SHARE_GROUP_HEARTBEAT.latestVersion) - - - private def shareGroupDescribeRequest = new ShareGroupDescribeRequest.Builder( - new ShareGroupDescribeRequestData() - .setGroupIds(List(shareGroup).asJava) - .setIncludeAuthorizedOperations(false)).build(ApiKeys.SHARE_GROUP_DESCRIBE.latestVersion) - - - private def createShareFetchRequest = { - val metadata: ShareRequestMetadata = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH) - val send: Seq[TopicIdPartition] = Seq( - new TopicIdPartition(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID), new TopicPartition(topic, part))) - val ackMap = new util.HashMap[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - requests.ShareFetchRequest.Builder.forConsumer(shareGroup, metadata, 100, 0, Int.MaxValue, 500, 500, - send.asJava, Seq.empty.asJava, ackMap).build() - } - - private def shareAcknowledgeRequest = { - val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData() - .setGroupId(shareGroup) - .setMemberId(Uuid.randomUuid().toString) - .setShareSessionEpoch(1) - .setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) - .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( - new ShareAcknowledgeRequestData.AcknowledgePartition() - .setPartitionIndex(part) - .setAcknowledgementBatches(List( - new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(1) - .setAcknowledgeTypes(util.List.of(1.toByte)) - ).asJava) - ).iterator)) - ).iterator)) - - new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) - } - - private def initializeShareGroupStateRequest = new InitializeShareGroupStateRequest.Builder( - new InitializeShareGroupStateRequestData() - .setGroupId(shareGroup) - .setTopics(List(new InitializeShareGroupStateRequestData.InitializeStateData() - .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) - .setPartitions(List(new InitializeShareGroupStateRequestData.PartitionData() - .setPartition(part) - ).asJava) - ).asJava)).build() - - private def readShareGroupStateRequest = new ReadShareGroupStateRequest.Builder( - new ReadShareGroupStateRequestData() - .setGroupId(shareGroup) - .setTopics(List(new ReadShareGroupStateRequestData.ReadStateData() - .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) - .setPartitions(List(new ReadShareGroupStateRequestData.PartitionData() - .setPartition(part) - .setLeaderEpoch(0) - ).asJava) - ).asJava)).build() - - private def writeShareGroupStateRequest = new WriteShareGroupStateRequest.Builder( - new WriteShareGroupStateRequestData() - .setGroupId(shareGroup) - .setTopics(List(new WriteShareGroupStateRequestData.WriteStateData() - .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) - .setPartitions(List(new WriteShareGroupStateRequestData.PartitionData() - .setPartition(part) - ).asJava) - ).asJava)).build() - - private def deleteShareGroupStateRequest = new DeleteShareGroupStateRequest.Builder( - new DeleteShareGroupStateRequestData() - .setGroupId(shareGroup) - .setTopics(List(new DeleteShareGroupStateRequestData.DeleteStateData() - .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) - .setPartitions(List(new DeleteShareGroupStateRequestData.PartitionData() - .setPartition(part) - ).asJava) - ).asJava)).build() - - private def readShareGroupStateSummaryRequest = new ReadShareGroupStateSummaryRequest.Builder( - new ReadShareGroupStateSummaryRequestData() - .setGroupId(shareGroup) - .setTopics(List(new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() - .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) - .setPartitions(List(new ReadShareGroupStateSummaryRequestData.PartitionData() - .setPartition(part) - .setLeaderEpoch(0) - ).asJava) - ).asJava)).build(ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY.latestVersion) - - private def describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequest.Builder( - new DescribeShareGroupOffsetsRequestData() - .setGroups(List(new DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestGroup() - .setGroupId(shareGroup) - .setTopics(List(new DescribeShareGroupOffsetsRequestData.DescribeShareGroupOffsetsRequestTopic() - .setTopicName(topic) - .setPartitions(List(Integer.valueOf(part) - ).asJava) - ).asJava) - ).asJava)).build(ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS.latestVersion) - - private def deleteShareGroupOffsetsRequest = new DeleteShareGroupOffsetsRequest.Builder( - new DeleteShareGroupOffsetsRequestData() - .setGroupId(shareGroup) - .setTopics(List(new DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topic) - ).asJava)).build(ApiKeys.DELETE_SHARE_GROUP_OFFSETS.latestVersion) - - private def alterShareGroupOffsetsRequest = { - val data = new AlterShareGroupOffsetsRequestData - val topicCollection = new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopicCollection() - topicCollection.add(new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() - .setTopicName(topic) - .setPartitions(List(new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(part) - .setStartOffset(0) - ).asJava)) - data.setGroupId(shareGroup).setTopics(topicCollection) - new AlterShareGroupOffsetsRequest.Builder(data).build(ApiKeys.ALTER_SHARE_GROUP_OFFSETS.latestVersion) - } - - private def streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequest.Builder( - new StreamsGroupHeartbeatRequestData() - .setGroupId(streamsGroup) - .setMemberId("member-id") - .setMemberEpoch(0) - .setRebalanceTimeoutMs(1000) - .setActiveTasks(List.empty.asJava) - .setStandbyTasks(List.empty.asJava) - .setWarmupTasks(List.empty.asJava) - .setTopology(new StreamsGroupHeartbeatRequestData.Topology().setSubtopologies( - List(new StreamsGroupHeartbeatRequestData.Subtopology() - .setSourceTopics(List(topic).asJava) - ).asJava - ))).build(ApiKeys.STREAMS_GROUP_HEARTBEAT.latestVersion) - - private def streamsGroupHeartbeatRequest( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ) = new StreamsGroupHeartbeatRequest.Builder( - new StreamsGroupHeartbeatRequestData() - .setGroupId(streamsGroup) - .setMemberId("member-id") - .setMemberEpoch(0) - .setRebalanceTimeoutMs(1000) - .setActiveTasks(List.empty.asJava) - .setStandbyTasks(List.empty.asJava) - .setWarmupTasks(List.empty.asJava) - .setTopology(new StreamsGroupHeartbeatRequestData.Topology().setSubtopologies( - List(new StreamsGroupHeartbeatRequestData.Subtopology() - .setSourceTopics( - (if (topicAsSourceTopic) List(sourceTopic, topic) else List(sourceTopic)).asJava) - .setRepartitionSinkTopics( - (if (topicAsRepartitionSinkTopic) List(topic) else List.empty).asJava) - .setRepartitionSourceTopics( - (if (topicAsRepartitionSourceTopic) List(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(topic).setPartitions(3)) else List.empty).asJava) - .setStateChangelogTopics( - (if (topicAsStateChangelogTopics) List(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(topic)) else List.empty).asJava) - ).asJava - ))).build(ApiKeys.STREAMS_GROUP_HEARTBEAT.latestVersion) - - private def streamsGroupDescribeRequest = new StreamsGroupDescribeRequest.Builder( - new StreamsGroupDescribeRequestData() - .setGroupIds(List(streamsGroup).asJava) - .setIncludeAuthorizedOperations(false)).build(ApiKeys.STREAMS_GROUP_DESCRIBE.latestVersion) - private def sendRequests(requestKeyToRequest: mutable.Map[ApiKeys, AbstractRequest], topicExists: Boolean = true, topicNames: Map[Uuid, String] = getTopicNames()) = { for ((key, request) <- requestKeyToRequest) { @@ -894,8 +623,6 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // In KRaft mode, trying to delete a topic that doesn't exist but that you do have // describe permission for will give UNKNOWN_TOPIC_OR_PARTITION. true - } else if (resourceToAcls.size > 1) { - false } else { describeAcls == acls } @@ -910,16 +637,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @Test - def testAuthorizationWithTopicExisting(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthorizationWithTopicExisting(quorum: String): Unit = { //First create the topic so we have a valid topic ID - createTopicWithBrokerPrincipal(topic) - val topicId = getTopicIds()(topic) - assertNotNull(topicId) + sendRequests(mutable.Map(ApiKeys.CREATE_TOPICS -> createTopicsRequest)) val requestKeyToRequest = mutable.LinkedHashMap[ApiKeys, AbstractRequest]( ApiKeys.METADATA -> createMetadataRequest(allowAutoTopicCreation = true), - ApiKeys.PRODUCE -> createProduceRequest("", topicId, ApiKeys.PRODUCE.latestVersion()), + ApiKeys.PRODUCE -> createProduceRequest, ApiKeys.FETCH -> createFetchRequest, ApiKeys.LIST_OFFSETS -> createListOffsetsRequest, ApiKeys.OFFSET_FETCH -> createOffsetFetchRequest, @@ -951,38 +677,24 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ApiKeys.WRITE_TXN_MARKERS -> writeTxnMarkersRequest, ApiKeys.CONSUMER_GROUP_HEARTBEAT -> consumerGroupHeartbeatRequest, ApiKeys.CONSUMER_GROUP_DESCRIBE -> consumerGroupDescribeRequest, - ApiKeys.SHARE_GROUP_HEARTBEAT -> shareGroupHeartbeatRequest, - ApiKeys.SHARE_GROUP_DESCRIBE -> shareGroupDescribeRequest, - ApiKeys.SHARE_FETCH -> createShareFetchRequest, - ApiKeys.SHARE_ACKNOWLEDGE -> shareAcknowledgeRequest, - ApiKeys.INITIALIZE_SHARE_GROUP_STATE -> initializeShareGroupStateRequest, - ApiKeys.READ_SHARE_GROUP_STATE -> readShareGroupStateRequest, - ApiKeys.WRITE_SHARE_GROUP_STATE -> writeShareGroupStateRequest, - ApiKeys.DELETE_SHARE_GROUP_STATE -> deleteShareGroupStateRequest, - ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY -> readShareGroupStateSummaryRequest, - ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS -> describeShareGroupOffsetsRequest, - ApiKeys.DELETE_SHARE_GROUP_OFFSETS -> deleteShareGroupOffsetsRequest, - ApiKeys.ALTER_SHARE_GROUP_OFFSETS -> alterShareGroupOffsetsRequest, - ApiKeys.STREAMS_GROUP_HEARTBEAT -> streamsGroupHeartbeatRequest, - ApiKeys.STREAMS_GROUP_DESCRIBE -> streamsGroupDescribeRequest, - // Delete the topic last ApiKeys.DELETE_TOPICS -> deleteTopicsRequest ) - sendRequests(requestKeyToRequest) + sendRequests(requestKeyToRequest, true) } /* * even if the topic doesn't exist, request APIs should not leak the topic name */ - @Test - def testAuthorizationWithTopicNotExisting(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthorizationWithTopicNotExisting(quorum: String): Unit = { val id = Uuid.randomUuid() val topicNames = Map(id -> "topic") val requestKeyToRequest = mutable.LinkedHashMap[ApiKeys, AbstractRequest]( ApiKeys.METADATA -> createMetadataRequest(allowAutoTopicCreation = false), - ApiKeys.PRODUCE -> createProduceRequest("", id, ApiKeys.PRODUCE.latestVersion()), + ApiKeys.PRODUCE -> createProduceRequest, ApiKeys.FETCH -> createFetchRequestWithUnknownTopic(id, ApiKeys.FETCH.latestVersion()), ApiKeys.LIST_OFFSETS -> createListOffsetsRequest, ApiKeys.OFFSET_COMMIT -> createOffsetCommitRequest, @@ -994,88 +706,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ApiKeys.CREATE_PARTITIONS -> createPartitionsRequest, ApiKeys.DELETE_GROUPS -> deleteGroupsRequest, ApiKeys.OFFSET_FOR_LEADER_EPOCH -> offsetsForLeaderEpochRequest, - ApiKeys.ELECT_LEADERS -> electLeadersRequest, - ApiKeys.SHARE_FETCH -> createShareFetchRequest, - ApiKeys.SHARE_ACKNOWLEDGE -> shareAcknowledgeRequest, - ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS -> describeShareGroupOffsetsRequest, - ApiKeys.STREAMS_GROUP_HEARTBEAT -> streamsGroupHeartbeatRequest, - ApiKeys.STREAMS_GROUP_DESCRIBE -> streamsGroupDescribeRequest + ApiKeys.ELECT_LEADERS -> electLeadersRequest ) - sendRequests(requestKeyToRequest, topicExists = false, topicNames) - } - - /** - * Test that the produce request fails with TOPIC_AUTHORIZATION_FAILED if the client doesn't have permission - * and topic name is used in the request. Even if the topic doesn't exist, we return TOPIC_AUTHORIZATION_FAILED to - * prevent leaking the topic name. - * This case covers produce request version from oldest to 12. - * The newer version is covered by testAuthorizationWithTopicNotExisting and testAuthorizationWithTopicExisting. - */ - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testAuthorizationProduceVersionFromOldestTo12(withTopicExisting: Boolean): Unit = { - if (withTopicExisting) { - createTopicWithBrokerPrincipal(topic) - } - - for (version <- ApiKeys.PRODUCE.oldestVersion to 12) { - val request = createProduceRequest(topic, Uuid.ZERO_UUID, version.toShort) - val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) - val errorCode = response.asInstanceOf[ProduceResponse] - .data() - .responses() - .find(topic, Uuid.ZERO_UUID) - .partitionResponses.asScala.find(_.index == part).get - .errorCode - - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), errorCode, s"unexpected error for produce request version $version") - } - } - - /** - * Test that the produce request fails with UNKNOWN_TOPIC_ID if topic id is zero when request version >= 13. - * The produce request only supports topic id above version 13. - */ - @Test - def testZeroTopicIdForProduceVersionFrom13ToNewest(): Unit = { - for (version <- 13 to ApiKeys.PRODUCE.latestVersion()) { - val request = createProduceRequest("", Uuid.ZERO_UUID, version.toShort) - val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) - val errorCode = response.asInstanceOf[ProduceResponse] - .data() - .responses() - .find("", Uuid.ZERO_UUID) - .partitionResponses.asScala.find(_.index == part).get - .errorCode - - assertEquals(Errors.UNKNOWN_TOPIC_ID.code(), errorCode, s"unexpected error for produce request version $version") - } - } - - /** - * Test that the produce request fails with TOPIC_AUTHORIZATION_FAILED if topic name is empty when request version <= 12. - * The produce request only supports topic name below version 12. - */ - @Test - def testEmptyTopicNameForProduceVersionFromOldestTo12(): Unit = { - for (version <- ApiKeys.PRODUCE.oldestVersion() to 12) { - val request = createProduceRequest("", Uuid.ZERO_UUID, version.toShort) - val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) - val errorCode = response.asInstanceOf[ProduceResponse] - .data() - .responses() - .find("", Uuid.ZERO_UUID) - .partitionResponses.asScala.find(_.index == part).get - .errorCode - - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), errorCode, s"unexpected error for produce request version $version") - } + sendRequests(requestKeyToRequest, false, topicNames) } @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testTopicIdAuthorization(withTopicExisting: Boolean): Unit = { + @CsvSource(value = Array("kraft,false", "kraft,true")) + def testTopicIdAuthorization(quorum: String, withTopicExisting: Boolean): Unit = { val topicId = if (withTopicExisting) { createTopicWithBrokerPrincipal(topic) getTopicIds()(topic) @@ -1123,77 +762,24 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - /** - * Test that the fetch request fails with TOPIC_AUTHORIZATION_FAILED if the client doesn't have permission - * and topic name is used in the request. Even if the topic doesn't exist, we return TOPIC_AUTHORIZATION_FAILED to - * prevent leaking the topic name. - * This case covers fetch request version from oldest to 12. - * The newer version is covered by testAuthorizationWithTopicNotExisting and testAuthorizationWithTopicExisting. + /* + * even if the topic doesn't exist, request APIs should not leak the topic name */ @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testAuthorizationFetchVersionFromOldestTo12(withTopicExisting: Boolean): Unit = { - if (withTopicExisting) { - createTopicWithBrokerPrincipal(topic) - } - + @ValueSource(strings = Array("kraft")) + def testAuthorizationFetchV12WithTopicNotExisting(quorum: String): Unit = { val id = Uuid.ZERO_UUID - val topicNames = Map(id -> topic) - for (version <- ApiKeys.FETCH.oldestVersion to 12) { - val requestKeyToRequest = mutable.LinkedHashMap[ApiKeys, AbstractRequest]( - ApiKeys.FETCH -> createFetchRequestWithUnknownTopic(id, version.toShort), - ) - - sendRequests(requestKeyToRequest, withTopicExisting, topicNames) - } - } - - /** - * Test that the fetch request fails with UNKNOWN_TOPIC_ID if topic id is zero when request version >= 13. - * The fetch request only supports topic id above version 13. - */ - @Test - def testZeroTopicIdForFetchVersionFrom13ToNewest(): Unit = { - for (version <- 13 to ApiKeys.FETCH.latestVersion) { - val request = createFetchRequestWithEmptyTopicNameAndZeroTopicId(version.toShort) - val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) - - val errorCode = response.asInstanceOf[FetchResponse] - .data() - .responses() - .get(0) - .partitions() - .get(0) - .errorCode - - assertEquals(Errors.UNKNOWN_TOPIC_ID.code(), errorCode, s"unexpected error for fetch request version $version") - } - } + val topicNames = Map(id -> "topic") + val requestKeyToRequest = mutable.LinkedHashMap[ApiKeys, AbstractRequest]( + ApiKeys.FETCH -> createFetchRequestWithUnknownTopic(id, 12), + ) - /** - * Test that the fetch request fails with TOPIC_AUTHORIZATION_FAILED if topic name is empty when request version <= 12. - * The fetch request only supports topic name below version 12. - */ - @Test - def testEmptyTopicNameForFetchVersionFromOldestTo12(): Unit = { - for (version <- ApiKeys.FETCH.oldestVersion to 12) { - val request = createFetchRequestWithEmptyTopicNameAndZeroTopicId(version.toShort) - val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) - - val errorCode = response.asInstanceOf[FetchResponse] - .data() - .responses() - .get(0) - .partitions() - .get(0) - .errorCode - - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), errorCode, s"unexpected error for fetch request version $version") - } + sendRequests(requestKeyToRequest, false, topicNames) } - @Test - def testCreateTopicAuthorizationWithClusterCreate(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateTopicAuthorizationWithClusterCreate(quorum: String): Unit = { removeAllClientAcls() val resources = Set[ResourceType](TOPIC) @@ -1204,8 +790,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(createTopicsRequest, resources, isAuthorized = true) } - @Test - def testFetchFollowerRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchFollowerRequest(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) val request = createFetchFollowerRequest @@ -1251,8 +838,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { succeededPartitionDatas.foreach(partitionData => assertEquals(MemoryRecords.EMPTY, partitionData.records)) } - @Test - def testIncrementalAlterConfigsRequestRequiresClusterPermissionForBrokerLogger(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsRequestRequiresClusterPermissionForBrokerLogger(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) val data = new IncrementalAlterConfigsRequestData @@ -1274,8 +862,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resources, isAuthorized = true) } - @Test - def testOffsetsForLeaderEpochClusterPermission(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testOffsetsForLeaderEpochClusterPermission(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) val request = offsetsForLeaderEpochRequest @@ -1292,44 +881,50 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resources, isAuthorized = true) } - @Test - def testProduceWithNoTopicAccess(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testProduceWithNoTopicAccess(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) val producer = createProducer() assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) } - @Test - def testProduceWithTopicDescribe(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testProduceWithTopicDescribe(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val producer = createProducer() assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) } - @Test - def testProduceWithTopicRead(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testProduceWithTopicRead(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val producer = createProducer() assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) } - @Test - def testProduceWithTopicWrite(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testProduceWithTopicWrite(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) val producer = createProducer() sendRecords(producer, numRecords, tp) } - @Test - def testCreatePermissionOnTopicToWriteToNonExistentTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreatePermissionOnTopicToWriteToNonExistentTopic(quorum: String): Unit = { testCreatePermissionNeededToWriteToNonExistentTopic(TOPIC) } - @Test - def testCreatePermissionOnClusterToWriteToNonExistentTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreatePermissionOnClusterToWriteToNonExistentTopic(quorum: String): Unit = { testCreatePermissionNeededToWriteToNonExistentTopic(CLUSTER) } @@ -1338,7 +933,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), newTopicResource) val producer = createProducer() val e = assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) - assertEquals(util.Set.of(tp.topic), e.unauthorizedTopics()) + assertEquals(Collections.singleton(tp.topic), e.unauthorizedTopics()) val resource = if (resType == ResourceType.TOPIC) newTopicResource else clusterResource addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW)), resource) @@ -1346,9 +941,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRecords(producer, numRecords, tp) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeUsingAssignWithNoAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeUsingAssignWithNoAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1357,13 +952,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { removeAllClientAcls() val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSimpleConsumeWithOffsetLookupAndNoGroupAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSimpleConsumeWithOffsetLookupAndNoGroupAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1376,14 +971,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // note this still depends on group access because we haven't set offsets explicitly, which means // they will first be fetched from the consumer coordinator (which requires group access) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) val e = assertThrows(classOf[GroupAuthorizationException], () => consumeRecords(consumer)) assertEquals(group, e.groupId()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSimpleConsumeWithExplicitSeekAndNoGroupAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSimpleConsumeWithExplicitSeekAndNoGroupAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1396,14 +991,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // in this case, we do an explicit seek, so there should be no need to query the coordinator at all // remove the group.id config to avoid coordinator created val consumer = createConsumer(configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) - consumer.assign(java.util.List.of(tp)) - consumer.seekToBeginning(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) + consumer.seekToBeginning(List(tp).asJava) consumeRecords(consumer) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeWithoutTopicDescribeAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeWithoutTopicDescribeAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1413,15 +1008,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(util.Set.of(topic), e.unauthorizedTopics()) + assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeWithTopicDescribe(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeWithTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1433,14 +1028,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(util.Set.of(topic), e.unauthorizedTopics()) + assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeWithTopicWrite(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeWithTopicWrite(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1452,14 +1047,14 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(util.Set.of(topic), e.unauthorizedTopics()) + assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeWithTopicAndGroupRead(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeWithTopicAndGroupRead(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1471,13 +1066,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) consumeRecords(consumer) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPatternSubscriptionWithNoTopicAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPatternSubscriptionWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { val assignSemaphore = new Semaphore(0) createTopicWithBrokerPrincipal(topic) @@ -1502,9 +1097,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertTrue(consumer.subscription.isEmpty) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPatternSubscriptionWithTopicDescribeOnlyAndGroupRead(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPatternSubscriptionWithTopicDescribeOnlyAndGroupRead(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1517,12 +1112,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val consumer = createConsumer() consumer.subscribe(Pattern.compile(topicPattern)) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(util.Set.of(topic), e.unauthorizedTopics()) + assertEquals(Collections.singleton(topic), e.unauthorizedTopics()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPatternSubscriptionWithTopicAndGroupRead(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPatternSubscriptionWithTopicAndGroupRead(quorum: String, groupProtocol: String): Unit = { val assignSemaphore = new Semaphore(0) createTopicWithBrokerPrincipal(topic) @@ -1561,9 +1156,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertTrue(consumer.assignment().isEmpty) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPatternSubscriptionMatchingInternalTopic(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPatternSubscriptionMatchingInternalTopic(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1579,7 +1174,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // ensure that internal topics are not included if no permission consumer.subscribe(Pattern.compile(".*")) consumeRecords(consumer) - assertEquals(java.util.Set.of(topic), consumer.subscription) + assertEquals(Set(topic).asJava, consumer.subscription) // now authorize the user for the internal topic and verify that we can subscribe addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), new ResourcePattern(TOPIC, @@ -1591,9 +1186,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPatternSubscriptionMatchingInternalTopicWithDescribeOnlyPermission(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPatternSubscriptionMatchingInternalTopicWithDescribeOnlyPermission(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1614,12 +1209,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { consumeRecords(consumer) consumeRecords(consumer) }) - assertEquals(util.Set.of(GROUP_METADATA_TOPIC_NAME), e.unauthorizedTopics()) + assertEquals(Collections.singleton(GROUP_METADATA_TOPIC_NAME), e.unauthorizedTopics()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testPatternSubscriptionNotMatchingInternalTopic(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPatternSubscriptionNotMatchingInternalTopic(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -1636,17 +1231,17 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { consumeRecords(consumer) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCreatePermissionOnTopicToReadFromNonExistentTopic(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCreatePermissionOnTopicToReadFromNonExistentTopic(quorum: String, groupProtocol: String): Unit = { testCreatePermissionNeededToReadFromNonExistentTopic("newTopic", Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW)), TOPIC) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCreatePermissionOnClusterToReadFromNonExistentTopic(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCreatePermissionOnClusterToReadFromNonExistentTopic(quorum: String, groupProtocol: String): Unit = { testCreatePermissionNeededToReadFromNonExistentTopic("newTopic", Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW)), CLUSTER) @@ -1658,10 +1253,10 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), newTopicResource) addAndVerifyAcls(groupReadAcl(groupResource), groupResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(topicPartition)) + consumer.assign(List(topicPartition).asJava) val unauthorizedTopics = assertThrows(classOf[TopicAuthorizationException], () => (0 until 10).foreach(_ => consumer.poll(Duration.ofMillis(50L)))).unauthorizedTopics - assertEquals(util.Set.of(newTopic), unauthorizedTopics) + assertEquals(Collections.singleton(newTopic), unauthorizedTopics) val resource = if (resType == TOPIC) newTopicResource else clusterResource addAndVerifyAcls(acls, resource) @@ -1669,7 +1264,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50L)) brokers.forall { broker => - OptionConverters.toScala(broker.metadataCache.getLeaderAndIsr(newTopic, 0)) match { + broker.metadataCache.getLeaderAndIsr(newTopic, 0) match { case Some(partitionState) => FetchRequest.isValidBrokerId(partitionState.leader) case _ => false } @@ -1677,16 +1272,17 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { }, "Partition metadata not propagated.") } - @Test - def testCreatePermissionMetadataRequestAutoCreate(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreatePermissionMetadataRequestAutoCreate(quorum: String): Unit = { val readAcls = topicReadAcl(topicResource) addAndVerifyAcls(readAcls, topicResource) - brokers.foreach(b => assertEquals(Optional.empty, b.metadataCache.getLeaderAndIsr(topic, 0))) + brokers.foreach(b => assertEquals(None, b.metadataCache.getLeaderAndIsr(topic, 0))) - val metadataRequest = new MetadataRequest.Builder(java.util.List.of(topic), true).build() + val metadataRequest = new MetadataRequest.Builder(List(topic).asJava, true).build() val metadataResponse = connectAndReceive[MetadataResponse](metadataRequest) - assertEquals(java.util.Set.of(), metadataResponse.topicsByError(Errors.NONE)) + assertEquals(Set().asJava, metadataResponse.topicsByError(Errors.NONE)) val createAcls = topicCreateAcl(topicResource) addAndVerifyAcls(createAcls, topicResource) @@ -1694,103 +1290,103 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // retry as topic being created can have MetadataResponse with Errors.LEADER_NOT_AVAILABLE TestUtils.retry(JTestUtils.DEFAULT_MAX_WAIT_MS) { val metadataResponse = connectAndReceive[MetadataResponse](metadataRequest) - assertEquals(java.util.Set.of(topic), metadataResponse.topicsByError(Errors.NONE)) + assertEquals(Set(topic).asJava, metadataResponse.topicsByError(Errors.NONE)) } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitWithNoAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitWithNoAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() - assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) + assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitWithNoTopicAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) + assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitWithTopicWrite(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitWithTopicWrite(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) val consumer = createConsumer() - assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) + assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitWithTopicDescribe(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitWithTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val consumer = createConsumer() - assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) + assertThrows(classOf[TopicAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitWithNoGroupAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitWithNoGroupAccess(quorum: String, groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5)))) + assertThrows(classOf[GroupAuthorizationException], () => consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitWithTopicAndGroupRead(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitWithTopicAndGroupRead(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5))) + consumer.commitSync(Map(tp -> new OffsetAndMetadata(5)).asJava) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetFetchWithNoAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetFetchWithNoAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) assertThrows(classOf[TopicAuthorizationException], () => consumer.position(tp)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetFetchWithNoGroupAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetFetchWithNoGroupAccess(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) assertThrows(classOf[GroupAuthorizationException], () => consumer.position(tp)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetFetchWithNoTopicAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetFetchWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) assertThrows(classOf[TopicAuthorizationException], () => consumer.position(tp)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetFetchAllTopicPartitionsAuthorization(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetFetchAllTopicPartitionsAuthorization(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) val offset = 15L addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) - consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(offset))) + consumer.assign(List(tp).asJava) + consumer.commitSync(Map(tp -> new OffsetAndMetadata(offset)).asJava) removeAllClientAcls() addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) @@ -1801,36 +1397,31 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // without describe permission on the topic, we shouldn't be able to fetch offsets val offsetFetchRequest = createOffsetFetchRequestAllPartitions var offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - assertEquals(Errors.NONE, Errors.forCode(offsetFetchResponse.group(group).errorCode())) - assertTrue(offsetFetchResponse.group(group).topics.isEmpty) + assertEquals(Errors.NONE, offsetFetchResponse.groupLevelError(group)) + assertTrue(offsetFetchResponse.partitionDataMap(group).isEmpty) // now add describe permission on the topic and verify that the offset can be fetched addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - assertEquals(Errors.NONE, Errors.forCode(offsetFetchResponse.group(group).errorCode())) - assertEquals( - offset, - offsetFetchResponse.group(group).topics.asScala - .find(_.name == tp.topic) - .flatMap(_.partitions.asScala.find(_.partitionIndex == tp.partition).map(_.committedOffset)) - .getOrElse(-1L) - ) + assertEquals(Errors.NONE, offsetFetchResponse.groupLevelError(group)) + assertTrue(offsetFetchResponse.partitionDataMap(group).containsKey(tp)) + assertEquals(offset, offsetFetchResponse.partitionDataMap(group).get(tp).offset) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetFetchMultipleGroupsAuthorization(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetFetchMultipleGroupsAuthorization(quorum: String, groupProtocol: String): Unit = { val groups: Seq[String] = (1 to 5).map(i => s"group$i") val groupResources = groups.map(group => new ResourcePattern(GROUP, group, LITERAL)) val topics: Seq[String] = (1 to 3).map(i => s"topic$i") val topicResources = topics.map(topic => new ResourcePattern(TOPIC, topic, LITERAL)) - val topic1List = util.List.of(new TopicPartition(topics(0), 0)) - val topic1And2List = util.List.of( + val topic1List = singletonList(new TopicPartition(topics(0), 0)) + val topic1And2List = util.Arrays.asList( new TopicPartition(topics(0), 0), new TopicPartition(topics(1), 0), new TopicPartition(topics(1), 1)) - val allTopicsList = util.List.of( + val allTopicsList = util.Arrays.asList( new TopicPartition(topics(0), 0), new TopicPartition(topics(1), 0), new TopicPartition(topics(1), 1), @@ -1849,33 +1440,21 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { createTopicWithBrokerPrincipal(topics(0)) createTopicWithBrokerPrincipal(topics(1), numPartitions = 2) createTopicWithBrokerPrincipal(topics(2), numPartitions = 3) - groupResources.foreach { r => + groupResources.foreach(r => { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), r) - } - topicResources.foreach { t => + }) + topicResources.foreach(t => { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), t) - } + }) val offset = 15L val leaderEpoch: Optional[Integer] = Optional.of(1) val metadata = "metadata" - def assertResponse( - expected: OffsetFetchResponseData.OffsetFetchResponseGroup, - actual: OffsetFetchResponseData.OffsetFetchResponseGroup - ): Unit = { - actual.topics.sort((t1, t2) => t1.name.compareTo(t2.name)) - actual.topics.asScala.foreach { topic => - topic.partitions.sort(Comparator.comparingInt[OffsetFetchResponseData.OffsetFetchResponsePartitions](_.partitionIndex)) - } - - assertEquals(expected, actual) - } - def commitOffsets(tpList: util.List[TopicPartition]): Unit = { val consumer = createConsumer() consumer.assign(tpList) - val offsets = tpList.asScala.map { + val offsets = tpList.asScala.map{ tp => (tp, new OffsetAndMetadata(offset, leaderEpoch, metadata)) }.toMap.asJava consumer.commitSync(offsets) @@ -1891,373 +1470,175 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { removeAllClientAcls() + def verifyPartitionData(partitionData: OffsetFetchResponse.PartitionData): Unit = { + assertTrue(!partitionData.hasError) + assertEquals(offset, partitionData.offset) + assertEquals(metadata, partitionData.metadata) + assertEquals(leaderEpoch.get(), partitionData.leaderEpoch.get()) + } + + def verifyResponse(groupLevelResponse: Errors, + partitionData: util.Map[TopicPartition, PartitionData], + topicList: util.List[TopicPartition]): Unit = { + assertEquals(Errors.NONE, groupLevelResponse) + assertTrue(partitionData.size() == topicList.size()) + topicList.forEach(t => verifyPartitionData(partitionData.get(t))) + } + // test handling partial errors, where one group is fully authorized, some groups don't have // the right topic authorizations, and some groups have no authorization addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(0)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(1)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(3)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResources(0)) - val offsetFetchRequest = createOffsetFetchRequest(groupToPartitionMap) var offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - - offsetFetchResponse.data.groups.forEach { g => - g.groupId match { + offsetFetchResponse.data().groups().forEach(g => + g.groupId() match { case "group1" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(0)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava) - ).asJava), - offsetFetchResponse.group(g.groupId) - ) - + verifyResponse(offsetFetchResponse.groupLevelError(groups(0)), offsetFetchResponse + .partitionDataMap(groups(0)), topic1List) case "group2" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(0)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(1)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - ).asJava) - ).asJava), - offsetFetchResponse.group(g.groupId) - ) - + assertEquals(Errors.NONE, offsetFetchResponse.groupLevelError(groups(1))) + val group2Response = offsetFetchResponse.partitionDataMap(groups(1)) + assertTrue(group2Response.size() == 3) + assertTrue(group2Response.keySet().containsAll(topic1And2List)) + verifyPartitionData(group2Response.get(topic1And2List.get(0))) + assertTrue(group2Response.get(topic1And2List.get(1)).hasError) + assertTrue(group2Response.get(topic1And2List.get(2)).hasError) + assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group2Response.get(topic1And2List.get(1))) + assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group2Response.get(topic1And2List.get(2))) case "group3" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code), - offsetFetchResponse.group(g.groupId) - ) - + assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, offsetFetchResponse.groupLevelError(groups(2))) + assertTrue(offsetFetchResponse.partitionDataMap(groups(2)).size() == 0) case "group4" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(0)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava) - ).asJava), - offsetFetchResponse.group(g.groupId) - ) - + verifyResponse(offsetFetchResponse.groupLevelError(groups(3)), offsetFetchResponse + .partitionDataMap(groups(3)), topic1List) case "group5" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code), - offsetFetchResponse.group(g.groupId) - ) - } - } + assertEquals(Errors.GROUP_AUTHORIZATION_FAILED, offsetFetchResponse.groupLevelError(groups(4))) + assertTrue(offsetFetchResponse.partitionDataMap(groups(4)).size() == 0) + }) // test that after adding some of the ACLs, we get no group level authorization errors, but // still get topic level authorization errors for topics we don't have ACLs for addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(2)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResources(4)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResources(1)) - offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - - offsetFetchResponse.data.groups.forEach { g => - g.groupId match { + offsetFetchResponse.data().groups().forEach(g => + g.groupId() match { case "group1" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(0)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava) - ).asJava), - offsetFetchResponse.group(g.groupId) - ) - + verifyResponse(offsetFetchResponse.groupLevelError(groups(0)), offsetFetchResponse + .partitionDataMap(groups(0)), topic1List) case "group2" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(0)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(1)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava) - ).asJava), - offsetFetchResponse.group(g.groupId) - ) - + verifyResponse(offsetFetchResponse.groupLevelError(groups(1)), offsetFetchResponse + .partitionDataMap(groups(1)), topic1And2List) case "group3" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(0)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(1)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(2)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(2) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setCommittedOffset(OffsetFetchResponse.INVALID_OFFSET) - .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) - .setMetadata(OffsetFetchResponse.NO_METADATA) - ).asJava) - ).asJava), - offsetFetchResponse.group(g.groupId) - ) - + assertEquals(Errors.NONE, offsetFetchResponse.groupLevelError(groups(2))) + val group3Response = offsetFetchResponse.partitionDataMap(groups(2)) + assertTrue(group3Response.size() == 6) + assertTrue(group3Response.keySet().containsAll(allTopicsList)) + verifyPartitionData(group3Response.get(allTopicsList.get(0))) + verifyPartitionData(group3Response.get(allTopicsList.get(1))) + verifyPartitionData(group3Response.get(allTopicsList.get(2))) + assertTrue(group3Response.get(allTopicsList.get(3)).hasError) + assertTrue(group3Response.get(allTopicsList.get(4)).hasError) + assertTrue(group3Response.get(allTopicsList.get(5)).hasError) + assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group3Response.get(allTopicsList.get(3))) + assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group3Response.get(allTopicsList.get(4))) + assertEquals(OffsetFetchResponse.UNAUTHORIZED_PARTITION, group3Response.get(allTopicsList.get(5))) case "group4" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(0)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(1)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava) - ).asJava), - offsetFetchResponse.group(g.groupId) - ) - + verifyResponse(offsetFetchResponse.groupLevelError(groups(3)), offsetFetchResponse + .partitionDataMap(groups(3)), topic1And2List) case "group5" => - assertResponse( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(g.groupId) - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(0)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(topics(1)) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(offset) - .setCommittedLeaderEpoch(leaderEpoch.get) - .setMetadata(metadata) - ).asJava) - ).asJava), - offsetFetchResponse.group(g.groupId) - ) - } - } + verifyResponse(offsetFetchResponse.groupLevelError(groups(4)), offsetFetchResponse + .partitionDataMap(groups(4)), topic1And2List) + }) // test that after adding all necessary ACLs, we get no partition level or group level errors // from the offsetFetch response addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResources(2)) offsetFetchResponse = connectAndReceive[OffsetFetchResponse](offsetFetchRequest) - offsetFetchResponse.data.groups.forEach { group => - assertEquals(Errors.NONE.code, group.errorCode) - group.topics.forEach { topic => - topic.partitions.forEach { partition => - assertEquals(Errors.NONE.code, partition.errorCode) - } - } - } + offsetFetchResponse.data.groups.asScala.map(_.groupId).foreach( groupId => + verifyResponse(offsetFetchResponse.groupLevelError(groupId), offsetFetchResponse.partitionDataMap(groupId), partitionMap(groupId)) + ) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetFetchTopicDescribe(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetFetchTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) consumer.position(tp) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetFetchWithTopicAndGroupRead(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetFetchWithTopicAndGroupRead(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) consumer.position(tp) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMetadataWithNoTopicAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMetadataWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() assertThrows(classOf[TopicAuthorizationException], () => consumer.partitionsFor(topic)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMetadataWithTopicDescribe(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMetadataWithTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val consumer = createConsumer() consumer.partitionsFor(topic) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testListOffsetsWithNoTopicAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testListOffsetsWithNoTopicAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() - assertThrows(classOf[TopicAuthorizationException], () => consumer.endOffsets(java.util.Set.of(tp))) + assertThrows(classOf[TopicAuthorizationException], () => consumer.endOffsets(Set(tp).asJava)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testListOffsetsWithTopicDescribe(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testListOffsetsWithTopicDescribe(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val consumer = createConsumer() - consumer.endOffsets(java.util.Set.of(tp)) + consumer.endOffsets(Set(tp).asJava) } - @Test - def testDescribeGroupApiWithNoGroupAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeGroupApiWithNoGroupAcl(quorum: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) - val result = createAdminClient().describeConsumerGroups(java.util.List.of(group)) - JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.describedGroups().get(group)) + val result = createAdminClient().describeConsumerGroups(Seq(group).asJava) + JTestUtils.assertFutureThrows(result.describedGroups().get(group), classOf[GroupAuthorizationException]) } - @Test - def testDescribeGroupApiWithGroupDescribe(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeGroupApiWithGroupDescribe(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) - val result = createAdminClient().describeConsumerGroups(java.util.List.of(group)) - JTestUtils.assertFutureThrows(classOf[GroupIdNotFoundException], result.describedGroups().get(group)) + val result = createAdminClient().describeConsumerGroups(Seq(group).asJava) + JTestUtils.assertFutureThrows(result.describedGroups().get(group), classOf[GroupIdNotFoundException]) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testListGroupApiWithAndWithoutListGroupAcls(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testListGroupApiWithAndWithoutListGroupAcls(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) // write some record to the topic @@ -2271,13 +1652,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), new ResourcePattern(GROUP, group2, LITERAL)) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.subscribe(util.Set.of(topic)) + consumer.subscribe(Collections.singleton(topic)) consumeRecords(consumer) val otherConsumerProps = new Properties otherConsumerProps.put(ConsumerConfig.GROUP_ID_CONFIG, group2) val otherConsumer = createConsumer(configOverrides = otherConsumerProps) - otherConsumer.subscribe(util.Set.of(topic)) + otherConsumer.subscribe(Collections.singleton(topic)) consumeRecords(otherConsumer) val adminClient = createAdminClient() @@ -2286,118 +1667,121 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { removeAllClientAcls() addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), clusterResource) // it should list both groups (due to cluster describe permission) - assertEquals(Set(group, group2), adminClient.listGroups(ListGroupsOptions.forConsumerGroups()).all().get().asScala.map(_.groupId()).toSet) + assertEquals(Set(group, group2), adminClient.listConsumerGroups().all().get().asScala.map(_.groupId()).toSet) // now replace cluster describe with group read permission removeAllClientAcls() addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) // it should list only one group now - val groupList = adminClient.listGroups(ListGroupsOptions.forConsumerGroups()).all().get().asScala.toList + val groupList = adminClient.listConsumerGroups().all().get().asScala.toList assertEquals(1, groupList.length) assertEquals(group, groupList.head.groupId) // now remove all acls and verify describe group access is required to list any group removeAllClientAcls() - val listGroupResult = adminClient.listGroups(ListGroupsOptions.forConsumerGroups()) + val listGroupResult = adminClient.listConsumerGroups() assertEquals(List(), listGroupResult.errors().get().asScala.toList) assertEquals(List(), listGroupResult.all().get().asScala.toList) otherConsumer.close() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDeleteGroupApiWithDeleteGroupAcl(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDeleteGroupApiWithDeleteGroupAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), groupResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) - consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) - createAdminClient().deleteConsumerGroups(java.util.List.of(group)).deletedGroups().get(group).get() + consumer.assign(List(tp).asJava) + consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) + createAdminClient().deleteConsumerGroups(Seq(group).asJava).deletedGroups().get(group).get() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDeleteGroupApiWithNoDeleteGroupAcl(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDeleteGroupApiWithNoDeleteGroupAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) - consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) - val result = createAdminClient().deleteConsumerGroups(java.util.List.of(group)) - JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.deletedGroups().get(group)) + consumer.assign(List(tp).asJava) + consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) + val result = createAdminClient().deleteConsumerGroups(Seq(group).asJava) + JTestUtils.assertFutureThrows(result.deletedGroups().get(group), classOf[GroupAuthorizationException]) } - @Test - def testDeleteGroupApiWithNoDeleteGroupAcl2(): Unit = { - val result = createAdminClient().deleteConsumerGroups(java.util.List.of(group)) - JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.deletedGroups().get(group)) + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteGroupApiWithNoDeleteGroupAcl2(quorum: String): Unit = { + val result = createAdminClient().deleteConsumerGroups(Seq(group).asJava) + JTestUtils.assertFutureThrows(result.deletedGroups().get(group), classOf[GroupAuthorizationException]) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDeleteGroupOffsetsWithAcl(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDeleteGroupOffsetsWithAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) - consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) + consumer.assign(List(tp).asJava) + consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) consumer.close() - val result = createAdminClient().deleteConsumerGroupOffsets(group, java.util.Set.of(tp)) + val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) assertNull(result.partitionResult(tp).get()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDeleteGroupOffsetsWithoutDeleteAcl(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDeleteGroupOffsetsWithoutDeleteAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) - consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) + consumer.assign(List(tp).asJava) + consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) consumer.close() - val result = createAdminClient().deleteConsumerGroupOffsets(group, java.util.Set.of(tp)) - JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.all()) + val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) + JTestUtils.assertFutureThrows(result.all(), classOf[GroupAuthorizationException]) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDeleteGroupOffsetsWithDeleteAclWithoutTopicAcl(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDeleteGroupOffsetsWithDeleteAclWithoutTopicAcl(quorum: String, groupProtocol: String): Unit = { createTopicWithBrokerPrincipal(topic) // Create the consumer group addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) - consumer.commitSync(java.util.Map.of(tp, new OffsetAndMetadata(5, ""))) + consumer.assign(List(tp).asJava) + consumer.commitSync(Map(tp -> new OffsetAndMetadata(5, "")).asJava) consumer.close() // Remove the topic ACL & Check that it does not work without it removeAllClientAcls() addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), groupResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) - val result = createAdminClient().deleteConsumerGroupOffsets(group, java.util.Set.of(tp)) - JTestUtils.assertFutureThrows(classOf[TopicAuthorizationException], result.all()) - JTestUtils.assertFutureThrows(classOf[TopicAuthorizationException], result.partitionResult(tp)) + val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) + JTestUtils.assertFutureThrows(result.all(), classOf[TopicAuthorizationException]) + JTestUtils.assertFutureThrows(result.partitionResult(tp), classOf[TopicAuthorizationException]) } - @Test - def testDeleteGroupOffsetsWithNoAcl(): Unit = { - val result = createAdminClient().deleteConsumerGroupOffsets(group, java.util.Set.of(tp)) - JTestUtils.assertFutureThrows(classOf[GroupAuthorizationException], result.all()) + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteGroupOffsetsWithNoAcl(quorum: String): Unit = { + val result = createAdminClient().deleteConsumerGroupOffsets(group, Set(tp).asJava) + JTestUtils.assertFutureThrows(result.all(), classOf[GroupAuthorizationException]) } - @Test - def testIncrementalAlterGroupConfigsWithAlterAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterGroupConfigsWithAlterAcl(quorum: String): Unit = { addAndVerifyAcls(groupAlterConfigsAcl(groupResource), groupResource) val request = incrementalAlterGroupConfigsRequest @@ -2405,8 +1789,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @Test - def testIncrementalAlterGroupConfigsWithOperationAll(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterGroupConfigsWithOperationAll(quorum: String): Unit = { val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) @@ -2415,8 +1800,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @Test - def testIncrementalAlterGroupConfigsWithoutAlterAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterGroupConfigsWithoutAlterAcl(quorum: String): Unit = { removeAllClientAcls() val request = incrementalAlterGroupConfigsRequest @@ -2424,8 +1810,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @Test - def testDescribeGroupConfigsWithDescribeAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeGroupConfigsWithDescribeAcl(quorum: String): Unit = { addAndVerifyAcls(groupDescribeConfigsAcl(groupResource), groupResource) val request = describeGroupConfigsRequest @@ -2433,8 +1820,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @Test - def testDescribeGroupConfigsWithOperationAll(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeGroupConfigsWithOperationAll(quorum: String): Unit = { val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) @@ -2443,8 +1831,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @Test - def testDescribeGroupConfigsWithoutDescribeAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeGroupConfigsWithoutDescribeAcl(quorum: String): Unit = { removeAllClientAcls() val request = describeGroupConfigsRequest @@ -2452,37 +1841,42 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @Test - def testUnauthorizedDeleteTopicsWithoutDescribe(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnauthorizedDeleteTopicsWithoutDescribe(quorum: String): Unit = { val deleteResponse = connectAndReceive[DeleteTopicsResponse](deleteTopicsRequest) assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, deleteResponse.data.responses.find(topic).errorCode) } - @Test - def testUnauthorizedDeleteTopicsWithDescribe(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnauthorizedDeleteTopicsWithDescribe(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val deleteResponse = connectAndReceive[DeleteTopicsResponse](deleteTopicsRequest) assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, deleteResponse.data.responses.find(topic).errorCode) } - @Test - def testDeleteTopicsWithWildCardAuth(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteTopicsWithWildCardAuth(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), new ResourcePattern(TOPIC, "*", LITERAL)) val deleteResponse = connectAndReceive[DeleteTopicsResponse](deleteTopicsRequest) assertEquals(Errors.NONE.code, deleteResponse.data.responses.find(topic).errorCode) } - @Test - def testUnauthorizedDeleteRecordsWithoutDescribe(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnauthorizedDeleteRecordsWithoutDescribe(quorum: String): Unit = { val deleteRecordsResponse = connectAndReceive[DeleteRecordsResponse](deleteRecordsRequest) assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, deleteRecordsResponse.data.topics.asScala.head. partitions.asScala.head.errorCode) } - @Test - def testUnauthorizedDeleteRecordsWithDescribe(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnauthorizedDeleteRecordsWithDescribe(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), topicResource) val deleteRecordsResponse = connectAndReceive[DeleteRecordsResponse](deleteRecordsRequest) @@ -2490,8 +1884,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { partitions.asScala.head.errorCode) } - @Test - def testDeleteRecordsWithWildCardAuth(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteRecordsWithWildCardAuth(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DELETE, ALLOW)), new ResourcePattern(TOPIC, "*", LITERAL)) val deleteRecordsResponse = connectAndReceive[DeleteRecordsResponse](deleteRecordsRequest) @@ -2499,35 +1894,40 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { partitions.asScala.head.errorCode) } - @Test - def testUnauthorizedCreatePartitions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnauthorizedCreatePartitions(quorum: String): Unit = { val createPartitionsResponse = connectAndReceive[CreatePartitionsResponse](createPartitionsRequest) assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, createPartitionsResponse.data.results.asScala.head.errorCode) } - @Test - def testCreatePartitionsWithWildCardAuth(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreatePartitionsWithWildCardAuth(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALTER, ALLOW)), new ResourcePattern(TOPIC, "*", LITERAL)) val createPartitionsResponse = connectAndReceive[CreatePartitionsResponse](createPartitionsRequest) assertEquals(Errors.NONE.code, createPartitionsResponse.data.results.asScala.head.errorCode) } - @Test - def testTransactionalProducerInitTransactionsNoWriteTransactionalIdAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testTransactionalProducerInitTransactionsNoWriteTransactionalIdAcl(quorum: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, DESCRIBE, ALLOW)), transactionalIdResource) val producer = buildTransactionalProducer() assertThrows(classOf[TransactionalIdAuthorizationException], () => producer.initTransactions()) } - @Test - def testTransactionalProducerInitTransactionsNoDescribeTransactionalIdAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testTransactionalProducerInitTransactionsNoDescribeTransactionalIdAcl(quorum: String): Unit = { val producer = buildTransactionalProducer() assertThrows(classOf[TransactionalIdAuthorizationException], () => producer.initTransactions()) } - @Test - def testSendOffsetsWithNoConsumerGroupDescribeAccess(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testSendOffsetsWithNoConsumerGroupDescribeAccess(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CLUSTER_ACTION, ALLOW)), clusterResource) @@ -2538,11 +1938,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.beginTransaction() assertThrows(classOf[GroupAuthorizationException], - () => producer.sendOffsetsToTransaction(java.util.Map.of(tp, new OffsetAndMetadata(0L)), new ConsumerGroupMetadata(group))) + () => producer.sendOffsetsToTransaction(Map(tp -> new OffsetAndMetadata(0L)).asJava, new ConsumerGroupMetadata(group))) } - @Test - def testSendOffsetsWithNoConsumerGroupWriteAccess(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testSendOffsetsWithNoConsumerGroupWriteAccess(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2552,11 +1953,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.beginTransaction() assertThrows(classOf[GroupAuthorizationException], - () => producer.sendOffsetsToTransaction(java.util.Map.of(tp, new OffsetAndMetadata(0L)), new ConsumerGroupMetadata(group))) + () => producer.sendOffsetsToTransaction(Map(tp -> new OffsetAndMetadata(0L)).asJava, new ConsumerGroupMetadata(group))) } - @Test - def testIdempotentProducerNoIdempotentWriteAclInInitProducerId(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIdempotentProducerNoIdempotentWriteAclInInitProducerId(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) assertIdempotentSendAuthorizationFailure() @@ -2593,8 +1995,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertClusterAuthFailure() } - @Test - def testIdempotentProducerNoIdempotentWriteAclInProduce(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIdempotentProducerNoIdempotentWriteAclInProduce(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, IDEMPOTENT_WRITE, ALLOW)), clusterResource) @@ -2621,15 +2024,17 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertTrue(e.getCause.isInstanceOf[TopicAuthorizationException]) } - @Test - def shouldInitTransactionsWhenAclSet(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldInitTransactionsWhenAclSet(quorum: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) val producer = buildTransactionalProducer() producer.initTransactions() } - @Test - def testTransactionalProducerTopicAuthorizationExceptionInSendCallback(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testTransactionalProducerTopicAuthorizationExceptionInSendCallback(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2640,12 +2045,13 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.beginTransaction() val future = producer.send(new ProducerRecord(tp.topic, tp.partition, "1".getBytes, "1".getBytes)) - val e = JTestUtils.assertFutureThrows(classOf[TopicAuthorizationException], future) + val e = JTestUtils.assertFutureThrows(future, classOf[TopicAuthorizationException]) assertEquals(Set(topic), e.unauthorizedTopics.asScala) } - @Test - def testTransactionalProducerTopicAuthorizationExceptionInCommit(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testTransactionalProducerTopicAuthorizationExceptionInCommit(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2661,8 +2067,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { }) } - @Test - def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessDuringSend(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessDuringSend(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2672,11 +2079,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) producer.beginTransaction() val future = producer.send(new ProducerRecord(tp.topic, tp.partition, "1".getBytes, "1".getBytes)) - JTestUtils.assertFutureThrows(classOf[TransactionalIdAuthorizationException], future) + JTestUtils.assertFutureThrows(future, classOf[TransactionalIdAuthorizationException]) } - @Test - def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessOnEndTransaction(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessOnEndTransaction(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) @@ -2689,8 +2097,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertThrows(classOf[TransactionalIdAuthorizationException], () => producer.commitTransaction()) } - @Test - def testListTransactionsAuthorization(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListTransactionsAuthorization(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -2722,8 +2131,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertListTransactionResult(expectedTransactionalIds = Set(transactionalId)) } - @Test - def shouldNotIncludeUnauthorizedTopicsInDescribeTransactionsResponse(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldNotIncludeUnauthorizedTopicsInDescribeTransactionsResponse(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -2744,8 +2154,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertEquals(List.empty, transactionStateData.topics.asScala.toList) } - @Test - def shouldSuccessfullyAbortTransactionAfterTopicAuthorizationException(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldSuccessfullyAbortTransactionAfterTopicAuthorizationException(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -2757,14 +2168,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.send(new ProducerRecord(tp.topic, tp.partition, "1".getBytes, "1".getBytes)).get // try and add a partition resulting in TopicAuthorizationException val future = producer.send(new ProducerRecord("otherTopic", 0, "1".getBytes, "1".getBytes)) - val e = JTestUtils.assertFutureThrows(classOf[TopicAuthorizationException], future) + val e = JTestUtils.assertFutureThrows(future, classOf[TopicAuthorizationException]) assertEquals(Set("otherTopic"), e.unauthorizedTopics.asScala) // now rollback producer.abortTransaction() } - @Test - def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessOnSendOffsetsToTxn(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldThrowTransactionalIdAuthorizationExceptionWhenNoTransactionAccessOnSendOffsetsToTxn(quorum: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), transactionalIdResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), groupResource) val producer = buildTransactionalProducer() @@ -2774,14 +2186,15 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { // In transaction V2, the server receives the offset commit request first, so the error is GroupAuthorizationException // instead of TransactionalIdAuthorizationException. assertThrows(classOf[GroupAuthorizationException], () => { - val offsets = java.util.Map.of(tp, new OffsetAndMetadata(1L)) + val offsets = Map(tp -> new OffsetAndMetadata(1L)).asJava producer.sendOffsetsToTransaction(offsets, new ConsumerGroupMetadata(group)) producer.commitTransaction() }) } - @Test - def shouldSendSuccessfullyWhenIdempotentAndHasCorrectACL(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldSendSuccessfullyWhenIdempotentAndHasCorrectACL(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, IDEMPOTENT_WRITE, ALLOW)), clusterResource) addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW)), topicResource) @@ -2790,16 +2203,18 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } // Verify that metadata request without topics works without any ACLs and returns cluster id - @Test - def testClusterId(): Unit = { - val request = new requests.MetadataRequest.Builder(java.util.List.of, false).build() + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testClusterId(quorum: String): Unit = { + val request = new requests.MetadataRequest.Builder(List.empty.asJava, false).build() val response = connectAndReceive[MetadataResponse](request) - assertEquals(util.Map.of, response.errorCounts) + assertEquals(Collections.emptyMap, response.errorCounts) assertFalse(response.clusterId.isEmpty, "Cluster id not returned") } - @Test - def testRetryProducerInitializationAfterPermissionFix(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testRetryProducerInitializationAfterPermissionFix(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) val wildcard = new ResourcePattern(TOPIC, ResourcePattern.WILDCARD_RESOURCE, LITERAL) val prefixed = new ResourcePattern(TOPIC, "t", PREFIXED) @@ -2821,8 +2236,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { producer.close() } - @Test - def testAuthorizeByResourceTypeMultipleAddAndRemove(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthorizeByResourceTypeMultipleAddAndRemove(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) for (_ <- 1 to 3) { @@ -2838,8 +2254,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @Test - def testAuthorizeByResourceTypeIsolationUnrelatedDenyWontDominateAllow(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthorizeByResourceTypeIsolationUnrelatedDenyWontDominateAllow(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) createTopicWithBrokerPrincipal("topic-2") createTopicWithBrokerPrincipal("to") @@ -2860,8 +2277,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertIdempotentSendSuccess() } - @Test - def testAuthorizeByResourceTypeDenyTakesPrecedence(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthorizeByResourceTypeDenyTakesPrecedence(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) val allowWriteAce = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, WRITE, ALLOW) addAndVerifyAcls(Set(allowWriteAce), topicResource) @@ -2872,8 +2290,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertIdempotentSendAuthorizationFailure() } - @Test - def testAuthorizeByResourceTypeWildcardResourceDenyDominate(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthorizeByResourceTypeWildcardResourceDenyDominate(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) val wildcard = new ResourcePattern(TOPIC, ResourcePattern.WILDCARD_RESOURCE, LITERAL) val prefixed = new ResourcePattern(TOPIC, "t", PREFIXED) @@ -2889,8 +2308,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertIdempotentSendAuthorizationFailure() } - @Test - def testAuthorizeByResourceTypePrefixedResourceDenyDominate(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthorizeByResourceTypePrefixedResourceDenyDominate(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) val prefixed = new ResourcePattern(TOPIC, topic.substring(0, 1), PREFIXED) val literal = new ResourcePattern(TOPIC, topic, LITERAL) @@ -2902,8 +2322,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertIdempotentSendAuthorizationFailure() } - @Test - def testMetadataClusterAuthorizedOperationsWithoutDescribeCluster(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testMetadataClusterAuthorizedOperationsWithoutDescribeCluster(quorum: String): Unit = { removeAllClientAcls() // MetadataRequest versions older than 1 are not supported. @@ -2912,8 +2333,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @Test - def testMetadataClusterAuthorizedOperationsWithDescribeAndAlterCluster(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testMetadataClusterAuthorizedOperationsWithDescribeAndAlterCluster(quorum: String): Unit = { removeAllClientAcls() val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) @@ -2932,8 +2354,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @Test - def testDescribeTopicAclWithOperationAll(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTopicAclWithOperationAll(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) removeAllClientAcls() @@ -2944,7 +2367,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setName(topic) val metadataRequest = new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(util.List.of(metadataRequestTopic)) + .setTopics(Collections.singletonList(metadataRequestTopic)) .setAllowAutoTopicCreation(false) ).build() @@ -2956,8 +2379,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { assertEquals(Errors.NONE, topicResponse.error) } - @Test - def testDescribeTopicConfigsAclWithOperationAll(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTopicConfigsAclWithOperationAll(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) removeAllClientAcls() @@ -2965,7 +2389,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() - .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(Collections.singletonList(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceType(ConfigResource.Type.TOPIC.id) .setResourceName(tp.topic))) ).build() @@ -2980,7 +2404,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { expectedClusterAuthorizedOperations: Int ): Unit = { val metadataRequest = new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(util.List.of) + .setTopics(Collections.emptyList()) .setAllowAutoTopicCreation(true) .setIncludeClusterAuthorizedOperations(true)) .build(version) @@ -2995,8 +2419,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @Test - def testDescribeClusterClusterAuthorizedOperationsWithoutDescribeCluster(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeClusterClusterAuthorizedOperationsWithoutDescribeCluster(quorum: String): Unit = { removeAllClientAcls() for (version <- ApiKeys.DESCRIBE_CLUSTER.oldestVersion to ApiKeys.DESCRIBE_CLUSTER.latestVersion) { @@ -3004,8 +2429,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @Test - def testDescribeClusterClusterAuthorizedOperationsWithDescribeAndAlterCluster(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeClusterClusterAuthorizedOperationsWithDescribeAndAlterCluster(quorum: String): Unit = { removeAllClientAcls() val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) @@ -3023,8 +2449,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @Test - def testHostAddressBasedAcls(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testHostAddressBasedAcls(quorum: String): Unit = { createTopicWithBrokerPrincipal(topic) removeAllClientAcls() @@ -3040,7 +2467,7 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { .setName(topic) val metadataRequest = new MetadataRequest.Builder(new MetadataRequestData() - .setTopics(util.List.of(metadataRequestTopic)) + .setTopics(Collections.singletonList(metadataRequestTopic)) .setAllowAutoTopicCreation(false) ).build() @@ -3055,17 +2482,18 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCreateAndCloseConsumerWithNoAccess(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCreateAndCloseConsumerWithNoAccess(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() val closeConsumer: Executable = () => consumer.close() // Close consumer without consuming anything. close() call should pass successfully and throw no exception. assertDoesNotThrow(closeConsumer, "Exception not expected on closing consumer") } - @Test - def testConsumerGroupHeartbeatWithGroupReadAndTopicDescribeAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupHeartbeatWithGroupReadAndTopicDescribeAcl(quorum: String): Unit = { addAndVerifyAcls(groupReadAcl(groupResource), groupResource) addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) @@ -3074,8 +2502,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @Test - def testConsumerGroupHeartbeatWithOperationAll(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupHeartbeatWithOperationAll(quorum: String): Unit = { val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) @@ -3085,8 +2514,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @Test - def testConsumerGroupHeartbeatWithoutGroupReadOrTopicDescribeAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupHeartbeatWithoutGroupReadOrTopicDescribeAcl(quorum: String): Unit = { removeAllClientAcls() val request = consumerGroupHeartbeatRequest @@ -3094,8 +2524,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @Test - def testConsumerGroupHeartbeatWithoutGroupReadAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupHeartbeatWithoutGroupReadAcl(quorum: String): Unit = { addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) val request = consumerGroupHeartbeatRequest @@ -3104,8 +2535,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @Test - def testConsumerGroupHeartbeatWithoutTopicDescribeAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupHeartbeatWithoutTopicDescribeAcl(quorum: String): Unit = { addAndVerifyAcls(groupReadAcl(groupResource), groupResource) val request = consumerGroupHeartbeatRequest @@ -3114,991 +2546,21 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @Test - def testConsumerGroupHeartbeatWithRegex(): Unit = { + private def createConsumerGroupToDescribe(): Unit = { createTopicWithBrokerPrincipal(topic) - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val response = sendAndReceiveFirstRegexHeartbeat(Uuid.randomUuid.toString, listenerName) - sendAndReceiveRegexHeartbeat(response, listenerName, Some(1)) - } - - @Test - def testConsumerGroupHeartbeatWithRegexWithoutTopicDescribeAcl(): Unit = { - createTopicWithBrokerPrincipal(topic) - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) - - val response = sendAndReceiveFirstRegexHeartbeat(Uuid.randomUuid.toString, listenerName) - sendAndReceiveRegexHeartbeat(response, listenerName, None) - } - - @Test - def testConsumerGroupHeartbeatWithRegexWithTopicDescribeAclAddedAndRemoved(): Unit = { - createTopicWithBrokerPrincipal(topic) - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) - - val memberId = Uuid.randomUuid.toString; - var response = sendAndReceiveFirstRegexHeartbeat(memberId, listenerName) - TestUtils.tryUntilNoAssertionError() { - response = sendAndReceiveRegexHeartbeat(response, listenerName, Some(0), true) - } - - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - TestUtils.tryUntilNoAssertionError(waitTime = 25000) { - response = sendAndReceiveRegexHeartbeat(response, listenerName, Some(1)) - } - - removeAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - TestUtils.tryUntilNoAssertionError(waitTime = 25000) { - response = sendAndReceiveRegexHeartbeat(response, listenerName, Some(0)) - } - } - - @Test - def testConsumerGroupHeartbeatWithRegexWithDifferentMemberAcls(): Unit = { - createTopicWithBrokerPrincipal(topic, numPartitions = 2) - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), groupResource) - - // Member on inter-broker listener has all access and is assigned the matching topic - var member1Response = sendAndReceiveFirstRegexHeartbeat("memberWithAllAccess", interBrokerListenerName) - member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(2)) - - // Member on client listener has no topic describe access, but is assigned a partition of the - // unauthorized topic. This is leaking unauthorized topic metadata to member2. Simply filtering out - // the topic from the assignment in the response is not sufficient since different assignment states - // in the broker and client can lead to other issues. This needs to be fixed properly by using - // member permissions while computing assignments. - var member2Response = sendAndReceiveFirstRegexHeartbeat("memberWithLimitedAccess", listenerName) - member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(1)) - member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(1), fullRequest = true) - member2Response = sendAndReceiveRegexHeartbeat(member2Response, listenerName, Some(1)) - - // Create another topic and send heartbeats on member1 to trigger regex refresh - createTopicWithBrokerPrincipal("topic2", numPartitions = 2) - TestUtils.retry(15000) { - member1Response = sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(2)) - } - // This is leaking unauthorized topic metadata to member2. - member2Response = sendAndReceiveRegexHeartbeat(member2Response, listenerName, Some(2)) - - // Create another topic and send heartbeats on member2 to trigger regex refresh - createTopicWithBrokerPrincipal("topic3", numPartitions = 2) - TestUtils.retry(15000) { - member2Response = sendAndReceiveRegexHeartbeat(member2Response, listenerName, Some(0), fullRequest = true) - } - // This removes all topics from member1 since member2's permissions were used to refresh regex. - sendAndReceiveRegexHeartbeat(member1Response, interBrokerListenerName, Some(0), fullRequest = true) - } - - @Test - def testShareGroupHeartbeatWithGroupReadAndTopicDescribeAcl(): Unit = { - addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = shareGroupHeartbeatRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testShareGroupHeartbeatWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val request = shareGroupHeartbeatRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testShareGroupHeartbeatWithoutGroupReadOrTopicDescribeAcl(): Unit = { - removeAllClientAcls() - - val request = shareGroupHeartbeatRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testShareGroupHeartbeatWithoutGroupReadAcl(): Unit = { - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = shareGroupHeartbeatRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testShareGroupHeartbeatWithoutTopicDescribeAcl(): Unit = { - addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) - - val request = shareGroupHeartbeatRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - private def createShareGroupToDescribe(): Unit = { - createTopicWithBrokerPrincipal(topic) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), shareGroupResource) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) - shareConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroup) - val consumer = createShareConsumer() - consumer.subscribe(util.Set.of(topic)) - consumer.poll(Duration.ofMillis(500L)) - removeAllClientAcls() - } - - private def createEmptyShareGroup(): Unit = { - createTopicWithBrokerPrincipal(topic) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), shareGroupResource) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) - shareConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroup) - val consumer = createShareConsumer() - consumer.subscribe(util.Set.of(topic)) - consumer.poll(Duration.ofMillis(500L)) - consumer.close() - removeAllClientAcls() - } - - @Test - def testShareGroupDescribeWithGroupDescribeAndTopicDescribeAcl(): Unit = { - createShareGroupToDescribe() - addAndVerifyAcls(shareGroupDescribeAcl(shareGroupResource), shareGroupResource) - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = shareGroupDescribeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testShareGroupDescribeWithOperationAll(): Unit = { - createShareGroupToDescribe() - - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val request = shareGroupDescribeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testShareGroupDescribeWithoutGroupDescribeAcl(): Unit = { - createShareGroupToDescribe() - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = shareGroupDescribeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testShareGroupDescribeWithoutGroupDescribeOrTopicDescribeAcl(): Unit = { - createShareGroupToDescribe() - - val request = shareGroupDescribeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testShareFetchWithGroupReadAndTopicReadAcl(): Unit = { - addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) - addAndVerifyAcls(topicReadAcl(topicResource), topicResource) - - val request = createShareFetchRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testShareFetchWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val request = createShareFetchRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testShareFetchWithoutGroupReadOrTopicReadAcl(): Unit = { - removeAllClientAcls() - - val request = createShareFetchRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testShareFetchWithoutGroupReadAcl(): Unit = { - addAndVerifyAcls(topicReadAcl(topicResource), topicResource) - - val request = createShareFetchRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testShareFetchWithoutTopicReadAcl(): Unit = { - createTopicWithBrokerPrincipal(topic) - addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) - - val request = createShareFetchRequest - val response = connectAndReceive[ShareFetchResponse](request, listenerName = listenerName) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED, Errors.forCode(response.data.responses.stream().findFirst().get().partitions.get(0).errorCode)) - } - - @Test - def testShareAcknowledgeWithGroupReadAndTopicReadAcl(): Unit = { - addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) - addAndVerifyAcls(topicReadAcl(topicResource), topicResource) - - val request = shareAcknowledgeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testShareAcknowledgeWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val request = shareAcknowledgeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testShareAcknowledgeWithoutGroupReadOrTopicReadAcl(): Unit = { - removeAllClientAcls() - - val request = shareAcknowledgeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testShareAcknowledgeFetchWithoutGroupReadAcl(): Unit = { - addAndVerifyAcls(topicReadAcl(topicResource), topicResource) - - val request = shareAcknowledgeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testInitializeShareGroupStateWithClusterAcl(): Unit = { - addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) - - val request = initializeShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testInitializeShareGroupStateWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) - - val request = initializeShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testInitializeShareGroupStateWithoutClusterAcl(): Unit = { - removeAllClientAcls() - - val request = initializeShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testReadShareGroupStateWithClusterAcl(): Unit = { - addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) - - val request = readShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testReadShareGroupStateWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) - - val request = readShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testReadShareGroupStateWithoutClusterAcl(): Unit = { - removeAllClientAcls() - - val request = readShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testWriteShareGroupStateWithClusterAcl(): Unit = { - addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) - - val request = writeShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testWriteShareGroupStateWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) - - val request = writeShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testWriteShareGroupStateWithoutClusterAcl(): Unit = { - removeAllClientAcls() - - val request = writeShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testDeleteShareGroupStateWithClusterAcl(): Unit = { - addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) - - val request = deleteShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testDeleteShareGroupStateWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) - - val request = deleteShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testDeleteShareGroupStateWithoutClusterAcl(): Unit = { - removeAllClientAcls() - - val request = deleteShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testReadShareGroupStateSummaryWithClusterAcl(): Unit = { - addAndVerifyAcls(clusterAcl(clusterResource), clusterResource) - - val request = readShareGroupStateSummaryRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testReadShareGroupStateSummaryWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), clusterResource) - - val request = readShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testReadShareGroupStateSummaryWithoutClusterAcl(): Unit = { - removeAllClientAcls() - - val request = readShareGroupStateRequest - val resource = Set[ResourceType](CLUSTER) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testDescribeShareGroupOffsetsWithGroupDescribeAndTopicDescribeAcl(): Unit = { - addAndVerifyAcls(shareGroupDescribeAcl(shareGroupResource), shareGroupResource) - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = describeShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testDescribeShareGroupOffsetsWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val request = describeShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testDescribeShareGroupOffsetsWithoutGroupDescribeOrTopicDescribeAcl(): Unit = { - removeAllClientAcls() - - val request = describeShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testDescribeShareGroupOffsetsWithoutGroupDescribeAcl(): Unit = { - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = describeShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testDescribeShareGroupOffsetsWithoutTopicDescribeAcl(): Unit = { - addAndVerifyAcls(shareGroupDescribeAcl(shareGroupResource), shareGroupResource) - - val request = describeShareGroupOffsetsRequest - val response = connectAndReceive[DescribeShareGroupOffsetsResponse](request, listenerName = listenerName) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED, Errors.forCode(response.data.groups.get(0).topics.get(0).partitions.get(0).errorCode)) - } - - @Test - def testDeleteShareGroupOffsetsWithGroupDeleteAndTopicReadAcl(): Unit = { - addAndVerifyAcls(shareGroupDeleteAcl(shareGroupResource), shareGroupResource) - addAndVerifyAcls(topicReadAcl(topicResource), topicResource) - - val request = deleteShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testDeleteShareGroupOffsetsWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val request = deleteShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testDeleteShareGroupOffsetsWithoutGroupDeleteOrTopicReadAcl(): Unit = { - removeAllClientAcls() - - val request = deleteShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testDeleteShareGroupOffsetsWithoutGroupDeleteAcl(): Unit = { - addAndVerifyAcls(topicReadAcl(topicResource), topicResource) - - val request = deleteShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testDeleteShareGroupOffsetsWithoutTopicReadAcl(): Unit = { - createEmptyShareGroup() - addAndVerifyAcls(shareGroupDeleteAcl(shareGroupResource), shareGroupResource) - - val request = deleteShareGroupOffsetsRequest - val response = connectAndReceive[DeleteShareGroupOffsetsResponse](request, listenerName = listenerName) - assertEquals(1, response.data.responses.size) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, response.data.responses.get(0).errorCode, s"Unexpected response $response") - } - - @Test - def testAlterShareGroupOffsetsWithGroupReadAndTopicReadAcl(): Unit = { - addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) - addAndVerifyAcls(topicReadAcl(topicResource), topicResource) - - val request = alterShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testAlterShareGroupOffsetsWithOperationAll(): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), shareGroupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val request = alterShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @Test - def testAlterShareGroupOffsetsWithoutGroupReadOrTopicReadAcl(): Unit = { - removeAllClientAcls() - - val request = alterShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testAlterShareGroupOffsetsWithoutGroupReadAcl(): Unit = { - addAndVerifyAcls(topicReadAcl(topicResource), topicResource) - - val request = alterShareGroupOffsetsRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @Test - def testAlterShareGroupOffsetsWithoutTopicReadAcl(): Unit = { - createEmptyShareGroup() - addAndVerifyAcls(shareGroupReadAcl(shareGroupResource), shareGroupResource) - - val request = alterShareGroupOffsetsRequest - val response = connectAndReceive[AlterShareGroupOffsetsResponse](request, listenerName = listenerName) - assertEquals(1, response.data.responses.size) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, response.data.responses.stream().findFirst().get().partitions.get(0).errorCode, s"Unexpected response $response") - } - - @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupHeartbeatWithGroupReadAndTopicDescribeAcl( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - addAndVerifyAcls(streamsGroupReadAcl(streamsGroupResource), streamsGroupResource) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = streamsGroupHeartbeatRequest( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) + addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) + consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, "consumer") + consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, group) + val consumer = createConsumer() + consumer.subscribe(Collections.singleton(topic)) + consumer.poll(Duration.ofMillis(500L)) + removeAllClientAcls() } @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupHeartbeatWithOperationAll( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), streamsGroupResource) - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - addAndVerifyAcls(Set(allowAllOpsAcl), sourceTopicResource) - - val request = streamsGroupHeartbeatRequest( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupHeartbeatWithoutGroupReadOrTopicDescribeAcl( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - removeAllClientAcls() - - val request = streamsGroupHeartbeatRequest( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupHeartbeatWithoutGroupReadAcl( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - - val request = streamsGroupHeartbeatRequest( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupHeartbeatWithoutTopicDescribeAcl( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - addAndVerifyAcls(streamsGroupReadAcl(streamsGroupResource), streamsGroupResource) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - - val request = streamsGroupHeartbeatRequest( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @ParameterizedTest - @CsvSource(Array( - "true, false", - "false, true" - )) - def testStreamsGroupHeartbeatWithoutInternalTopicCreateAcl( - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - createTopicWithBrokerPrincipal(sourceTopic) - addAndVerifyAcls(streamsGroupReadAcl(streamsGroupResource), streamsGroupResource) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = streamsGroupHeartbeatRequest( - topicAsSourceTopic = false, - topicAsRepartitionSinkTopic = false, - topicAsRepartitionSourceTopic = topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics = topicAsStateChangelogTopics - ) - val resource = Set[ResourceType](GROUP, TOPIC) - - // Request successful, but internal topic not created. - val response = sendRequestAndVerifyResponseError(request, resource, isAuthorized = true).asInstanceOf[StreamsGroupHeartbeatResponse] - assertEquals( - util.List.of(new StreamsGroupHeartbeatResponseData.Status() - .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) - .setStatusDetail("Internal topics are missing: [topic]; Unauthorized to CREATE on topics topic.")), - response.data().status()) - } - - @ParameterizedTest - @CsvSource(Array( - "true, false", - "false, true" - )) - def testStreamsGroupHeartbeatWithInternalTopicCreateAcl( - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - createTopicWithBrokerPrincipal(sourceTopic) - addAndVerifyAcls(streamsGroupReadAcl(streamsGroupResource), streamsGroupResource) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - addAndVerifyAcls(topicCreateAcl(topicResource), topicResource) - - val request = streamsGroupHeartbeatRequest( - topicAsSourceTopic = false, - topicAsRepartitionSinkTopic = false, - topicAsRepartitionSourceTopic = topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics = topicAsStateChangelogTopics - ) - val resource = Set[ResourceType](GROUP, TOPIC) - val response = sendRequestAndVerifyResponseError(request, resource, isAuthorized = true).asInstanceOf[StreamsGroupHeartbeatResponse] - // Request successful, and no internal topic creation error. - assertEquals( - util.List.of(new StreamsGroupHeartbeatResponseData.Status() - .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) - .setStatusDetail("Internal topics are missing: [topic]")), - response.data().status()) - } - - private def createStreamsGroupToDescribe( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - createTopicWithBrokerPrincipal(sourceTopic) - createTopicWithBrokerPrincipal(topic) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), streamsGroupResource) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), sourceTopicResource) - streamsConsumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, streamsGroup) - streamsConsumerConfig.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - val consumer = createStreamsConsumer(streamsRebalanceData = new StreamsRebalanceData( - UUID.randomUUID(), - Optional.empty(), - util.Map.of( - "subtopology-0", new StreamsRebalanceData.Subtopology( - if (topicAsSourceTopic) util.Set.of(sourceTopic, topic) else util.Set.of(sourceTopic), - if (topicAsRepartitionSinkTopic) util.Set.of(topic) else util.Set.of(), - if (topicAsRepartitionSourceTopic) - util.Map.of(topic, new StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.empty(), util.Map.of())) - else util.Map.of(), - if (topicAsStateChangelogTopics) - util.Map.of(topic, new StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.empty(), util.Map.of())) - else util.Map.of(), - util.Set.of() - )), - Map.empty[String, String].asJava - )) - consumer.subscribe( - if (topicAsSourceTopic || topicAsRepartitionSourceTopic) util.Set.of(sourceTopic, topic) else util.Set.of(sourceTopic), - new StreamsRebalanceListener { - override def onTasksRevoked(tasks: util.Set[StreamsRebalanceData.TaskId]): Unit = () - override def onTasksAssigned(assignment: StreamsRebalanceData.Assignment): Unit = () - override def onAllTasksLost(): Unit = () - } - ) - consumer.poll(Duration.ofMillis(500L)) - removeAllClientAcls() - } - - @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupDescribeWithGroupDescribeAndTopicDescribeAcl( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - createStreamsGroupToDescribe( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - addAndVerifyAcls(streamsGroupDescribeAcl(streamsGroupResource), streamsGroupResource) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = streamsGroupDescribeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupDescribeWithOperationAll( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - createStreamsGroupToDescribe( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - - val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) - addAndVerifyAcls(Set(allowAllOpsAcl), streamsGroupResource) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - addAndVerifyAcls(Set(allowAllOpsAcl), topicResource) - - val request = streamsGroupDescribeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) - } - - @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupDescribeWithoutGroupDescribeAcl( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - createStreamsGroupToDescribe( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) - - val request = streamsGroupDescribeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - @ParameterizedTest - @CsvSource(Array( - "true, false, false, false", - "false, true, false, false", - "false, false, true, false", - "false, false, false, true" - )) - def testStreamsGroupDescribeWithoutGroupDescribeOrTopicDescribeAcl( - topicAsSourceTopic: Boolean, - topicAsRepartitionSinkTopic: Boolean, - topicAsRepartitionSourceTopic: Boolean, - topicAsStateChangelogTopics: Boolean - ): Unit = { - createStreamsGroupToDescribe( - topicAsSourceTopic, - topicAsRepartitionSinkTopic, - topicAsRepartitionSourceTopic, - topicAsStateChangelogTopics - ) - - val request = streamsGroupDescribeRequest - val resource = Set[ResourceType](GROUP, TOPIC) - addAndVerifyAcls(sourceTopicDescribeAcl(sourceTopicResource), sourceTopicResource) // Always added, since we need a source topic - - sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) - } - - private def sendAndReceiveFirstRegexHeartbeat(memberId: String, - listenerName: ListenerName): ConsumerGroupHeartbeatResponseData = { - val request = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId(group) - .setMemberId(memberId) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setTopicPartitions(util.List.of()) - .setSubscribedTopicRegex("^top.*")).build() - val resource = Set[ResourceType](GROUP, TOPIC) - val response = sendRequestAndVerifyResponseError(request, resource, isAuthorized = true, listenerName = listenerName) - .data.asInstanceOf[ConsumerGroupHeartbeatResponseData] - assertEquals(Errors.NONE.code, response.errorCode, s"Unexpected response $response") - assertEquals(0, response.assignment.topicPartitions.size, s"Unexpected assignment $response") - response - } - - private def sendAndReceiveRegexHeartbeat(lastResponse: ConsumerGroupHeartbeatResponseData, - listenerName: ListenerName, - expectedAssignmentSize: Option[Int], - fullRequest: Boolean = false): ConsumerGroupHeartbeatResponseData = { - var data = new ConsumerGroupHeartbeatRequestData() - .setGroupId(group) - .setMemberId(lastResponse.memberId) - .setMemberEpoch(lastResponse.memberEpoch) - if (fullRequest) { - val partitions = Option(lastResponse.assignment).map(_.topicPartitions.asScala.map(p => - new ConsumerGroupHeartbeatRequestData.TopicPartitions() - .setTopicId(p.topicId) - .setPartitions(p.partitions) - )).getOrElse(List()) - data = data - .setTopicPartitions(partitions.asJava) - .setSubscribedTopicRegex("^top.*") - .setRebalanceTimeoutMs(5 * 60 * 1000) - } - val request = new ConsumerGroupHeartbeatRequest.Builder(data).build() - val resource = Set[ResourceType](GROUP, TOPIC) - val response = sendRequestAndVerifyResponseError(request, resource, isAuthorized = true, listenerName = listenerName) - .data.asInstanceOf[ConsumerGroupHeartbeatResponseData] - assertEquals(Errors.NONE.code, response.errorCode, s"Unexpected response $response") - expectedAssignmentSize match { - case Some(size) => - assertNotNull(response.assignment, s"Unexpected assignment $response") - assertEquals(size, response.assignment.topicPartitions.asScala.map(_.partitions.size).sum, s"Unexpected assignment $response") - case None => - assertNull(response.assignment, s"Unexpected assignment $response") - } - response - } - - private def createConsumerGroupToDescribe(): Unit = { - createTopicWithBrokerPrincipal(topic) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), groupResource) - addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, READ, ALLOW)), topicResource) - consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, "consumer") - consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, group) - val consumer = createConsumer() - consumer.subscribe(util.Set.of(topic)) - consumer.poll(Duration.ofMillis(500L)) - removeAllClientAcls() - } - - @Test - def testConsumerGroupDescribeWithGroupDescribeAndTopicDescribeAcl(): Unit = { + @ValueSource(strings = Array("kraft")) + def testConsumerGroupDescribeWithGroupDescribeAndTopicDescribeAcl(quorum: String): Unit = { createConsumerGroupToDescribe() addAndVerifyAcls(groupDescribeAcl(groupResource), groupResource) @@ -4109,8 +2571,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @Test - def testConsumerGroupDescribeWithOperationAll(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupDescribeWithOperationAll(quorum: String): Unit = { createConsumerGroupToDescribe() val allowAllOpsAcl = new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, ALL, ALLOW) @@ -4122,8 +2585,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = true) } - @Test - def testConsumerGroupDescribeWithoutGroupDescribeAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupDescribeWithoutGroupDescribeAcl(quorum: String): Unit = { createConsumerGroupToDescribe() addAndVerifyAcls(topicDescribeAcl(topicResource), topicResource) @@ -4133,8 +2597,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @Test - def testConsumerGroupDescribeWithoutTopicDescribeAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupDescribeWithoutTopicDescribeAcl(quorum: String): Unit = { createConsumerGroupToDescribe() addAndVerifyAcls(groupDescribeAcl(groupResource), groupResource) @@ -4144,8 +2609,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { sendRequestAndVerifyResponseError(request, resource, isAuthorized = false) } - @Test - def testConsumerGroupDescribeWithoutGroupDescribeOrTopicDescribeAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupDescribeWithoutGroupDescribeOrTopicDescribeAcl(quorum: String): Unit = { createConsumerGroupToDescribe() val request = consumerGroupDescribeRequest @@ -4170,12 +2636,12 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { val aclEntryFilter = new AccessControlEntryFilter(clientPrincipalString, null, AclOperation.ANY, AclPermissionType.ANY) val aclFilter = new AclBindingFilter(ResourcePatternFilter.ANY, aclEntryFilter) - authorizerForWrite.deleteAcls(TestUtils.anonymousAuthorizableContext, java.util.List.of(aclFilter)).asScala. + authorizerForWrite.deleteAcls(TestUtils.anonymousAuthorizableContext, List(aclFilter).asJava).asScala. map(_.toCompletableFuture.get).flatMap { deletion => deletion.aclBindingDeleteResults().asScala.map(_.aclBinding.pattern).toSet }.foreach { resource => - (brokers.map(_.authorizerPlugin.get) ++ controllerServers.map(_.authorizerPlugin.get)).foreach { authorizer => - TestUtils.waitAndVerifyAcls(Set.empty[AccessControlEntry], authorizer.get, resource, aclEntryFilter) + (brokers.map(_.authorizer.get) ++ controllerServers.map(_.authorizer.get)).foreach { authorizer => + TestUtils.waitAndVerifyAcls(Set.empty[AccessControlEntry], authorizer, resource, aclEntryFilter) } } } @@ -4184,10 +2650,9 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { resources: Set[ResourceType], isAuthorized: Boolean, topicExists: Boolean = true, - topicNames: Map[Uuid, String] = getTopicNames(), - listenerName: ListenerName = listenerName): AbstractResponse = { + topicNames: Map[Uuid, String] = getTopicNames()): AbstractResponse = { val apiKey = request.apiKey - val response = connectAndReceive[AbstractResponse](request, listenerName = listenerName) + val response = connectAndReceive[AbstractResponse](request) val error = requestKeyToError(topicNames, request.version())(apiKey).asInstanceOf[AbstractResponse => Errors](response) val authorizationErrors = resources.flatMap { resourceType => @@ -4281,14 +2746,16 @@ class AuthorizerIntegrationTest extends AbstractAuthorizerIntegrationTest { ) } - @Test - def testPrefixAcls(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testPrefixAcls(quorum: String): Unit = { addAndVerifyAcls(Set(new AccessControlEntry(clientPrincipalString, WILDCARD_HOST, CREATE, ALLOW)), new ResourcePattern(TOPIC, "f", PREFIXED)) addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", WILDCARD_HOST, CREATE, DENY)), new ResourcePattern(TOPIC, "fooa", PREFIXED)) addAndVerifyAcls(Set(new AccessControlEntry("User:otherPrincipal", WILDCARD_HOST, CREATE, ALLOW)), new ResourcePattern(TOPIC, "foob", PREFIXED)) - createAdminClient().createTopics(util.List.of(new NewTopic("foobar", 1, 1.toShort))).all().get() + createAdminClient().createTopics(Collections. + singletonList(new NewTopic("foobar", 1, 1.toShort))).all().get() } } diff --git a/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala index 16dec9dc00800..3fc63c5952633 100644 --- a/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseAdminIntegrationTest.scala @@ -17,7 +17,7 @@ package kafka.api import java.util -import java.util.{Optional, Properties} +import java.util.Properties import java.util.concurrent.ExecutionException import kafka.utils.Logging import kafka.utils.TestUtils._ @@ -33,10 +33,13 @@ import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.test.TestUtils.assertFutureThrows import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo, Timeout} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ import scala.collection.Seq +import scala.jdk.OptionConverters.RichOption /** * Base integration test cases for [[Admin]]. Each test case added here will be executed @@ -68,16 +71,17 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg super.tearDown() } - @Test - def testCreateDeleteTopics(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateDeleteTopics(quorum: String): Unit = { client = createAdminClient val topics = Seq("mytopic", "mytopic2", "mytopic3") - val newTopics = util.List.of( - new NewTopic("mytopic", util.Map.of(0: Integer, util.List.of[Integer](1, 2), 1: Integer, util.List.of[Integer](2, 0))), + val newTopics = Seq( + new NewTopic("mytopic", Map((0: Integer) -> Seq[Integer](1, 2).asJava, (1: Integer) -> Seq[Integer](2, 0).asJava).asJava), new NewTopic("mytopic2", 3, 3.toShort), - new NewTopic("mytopic3", Optional.empty[Integer], Optional.empty[java.lang.Short]) + new NewTopic("mytopic3", Option.empty[Integer].toJava, Option.empty[java.lang.Short].toJava) ) - val validateResult = client.createTopics(newTopics, new CreateTopicsOptions().validateOnly(true)) + val validateResult = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)) validateResult.all.get() waitForTopics(client, List(), topics) @@ -92,7 +96,7 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg } validateMetadataAndConfigs(validateResult) - val createResult = client.createTopics(newTopics) + val createResult = client.createTopics(newTopics.asJava) createResult.all.get() waitForTopics(client, topics, List()) validateMetadataAndConfigs(createResult) @@ -102,17 +106,17 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg assertEquals(topicIds(topic), createResult.topicId(topic).get()) } - val failedCreateResult = client.createTopics(newTopics) + val failedCreateResult = client.createTopics(newTopics.asJava) val results = failedCreateResult.values() assertTrue(results.containsKey("mytopic")) - assertFutureThrows(classOf[TopicExistsException], results.get("mytopic")) + assertFutureThrows(results.get("mytopic"), classOf[TopicExistsException]) assertTrue(results.containsKey("mytopic2")) - assertFutureThrows(classOf[TopicExistsException], results.get("mytopic2")) + assertFutureThrows(results.get("mytopic2"), classOf[TopicExistsException]) assertTrue(results.containsKey("mytopic3")) - assertFutureThrows(classOf[TopicExistsException], results.get("mytopic3")) - assertFutureThrows(classOf[TopicExistsException], failedCreateResult.numPartitions("mytopic3")) - assertFutureThrows(classOf[TopicExistsException], failedCreateResult.replicationFactor("mytopic3")) - assertFutureThrows(classOf[TopicExistsException], failedCreateResult.config("mytopic3")) + assertFutureThrows(results.get("mytopic3"), classOf[TopicExistsException]) + assertFutureThrows(failedCreateResult.numPartitions("mytopic3"), classOf[TopicExistsException]) + assertFutureThrows(failedCreateResult.replicationFactor("mytopic3"), classOf[TopicExistsException]) + assertFutureThrows(failedCreateResult.config("mytopic3"), classOf[TopicExistsException]) val topicToDescription = client.describeTopics(topics.asJava).allTopicNames.get() assertEquals(topics.toSet, topicToDescription.keySet.asScala) @@ -160,8 +164,9 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg waitForTopics(client, List(), topics) } - @Test - def testAuthorizedOperations(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthorizedOperations(quorum: String): Unit = { client = createAdminClient // without includeAuthorizedOperations flag @@ -174,8 +179,8 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg assertEquals(expectedOperations, result.authorizedOperations().get()) val topic = "mytopic" - val newTopics = util.List.of(new NewTopic(topic, 3, 3.toShort)) - client.createTopics(newTopics).all.get() + val newTopics = Seq(new NewTopic(topic, 3, 3.toShort)) + client.createTopics(newTopics.asJava).all.get() waitForTopics(client, expectedPresent = Seq(topic), expectedMissing = List()) // without includeAuthorizedOperations flag @@ -250,7 +255,7 @@ abstract class BaseAdminIntegrationTest extends IntegrationTestHarness with Logg expectedNumPartitionsOpt: Option[Int] = None): TopicDescription = { var result: TopicDescription = null waitUntilTrue(() => { - val topicResult = client.describeTopics(util.Set.of(topic), describeOptions).topicNameValues().get(topic) + val topicResult = client.describeTopics(Set(topic).asJava, describeOptions).topicNameValues().get(topic) try { result = topicResult.get expectedNumPartitionsOpt.map(_ == result.partitions.size).getOrElse(true) diff --git a/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala b/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala index adfb657b77603..a8b67d9a2750c 100644 --- a/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseConsumerTest.scala @@ -19,9 +19,11 @@ package kafka.api import kafka.utils.TestInfoUtils import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, GroupProtocol} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig} +import org.apache.kafka.common.header.Headers import org.apache.kafka.common.{ClusterResource, ClusterResourceListener, PartitionInfo} import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.serialization.{Deserializer, Serializer} +import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, Deserializer, Serializer} +import org.apache.kafka.common.test.api.Flaky import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource @@ -36,9 +38,9 @@ import scala.collection.Seq */ abstract class BaseConsumerTest extends AbstractConsumerTest { - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSimpleConsumption(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSimpleConsumption(quorum: String, groupProtocol: String): Unit = { val numRecords = 10000 val producer = createProducer() val startingTimestamp = System.currentTimeMillis() @@ -46,7 +48,7 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { val consumer = createConsumer() assertEquals(0, consumer.assignment.size) - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) assertEquals(1, consumer.assignment.size) consumer.seek(tp, 0) @@ -56,9 +58,9 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { sendAndAwaitAsyncCommit(consumer) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testClusterResourceListener(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testClusterResourceListener(quorum: String, groupProtocol: String): Unit = { val numRecords = 100 val producerProps = new Properties() producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, classOf[BaseConsumerTest.TestClusterResourceListenerSerializer]) @@ -72,17 +74,18 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { consumerProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[BaseConsumerTest.TestClusterResourceListenerDeserializer]) consumerProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[BaseConsumerTest.TestClusterResourceListenerDeserializer]) val consumer: Consumer[Array[Byte], Array[Byte]] = createConsumer(keyDeserializer = null, valueDeserializer = null, consumerProps) - consumer.subscribe(java.util.List.of(tp.topic())) + consumer.subscribe(List(tp.topic()).asJava) consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) assertNotEquals(0, BaseConsumerTest.updateProducerCount.get()) assertNotEquals(0, BaseConsumerTest.updateConsumerCount.get()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCoordinatorFailover(groupProtocol: String): Unit = { + @Flaky("KAFKA-15920") + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCoordinatorFailover(quorum: String, groupProtocol: String): Unit = { val listener = new TestConsumerReassignmentListener() - if (groupProtocol.equalsIgnoreCase(GroupProtocol.CLASSIC.name)) { + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "5001") this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") } @@ -90,7 +93,7 @@ abstract class BaseConsumerTest extends AbstractConsumerTest { this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "15000") val consumer = createConsumer() - consumer.subscribe(java.util.List.of(topic), listener) + consumer.subscribe(List(topic).asJava, listener) // the initial subscription should cause a callback execution awaitRebalance(consumer, listener) @@ -129,4 +132,41 @@ object BaseConsumerTest { override def onUpdate(clusterResource: ClusterResource): Unit = updateConsumerCount.incrementAndGet() override def deserialize(topic: String, data: Array[Byte]): Array[Byte] = data } + + class SerializerImpl extends Serializer[Array[Byte]] { + var serializer = new ByteArraySerializer() + + override def serialize(topic: String, headers: Headers, data: Array[Byte]): Array[Byte] = { + headers.add("content-type", "application/octet-stream".getBytes) + serializer.serialize(topic, data) + } + + override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = serializer.configure(configs, isKey) + + override def close(): Unit = serializer.close() + + override def serialize(topic: String, data: Array[Byte]): Array[Byte] = { + fail("method should not be invoked") + null + } + } + + class DeserializerImpl extends Deserializer[Array[Byte]] { + var deserializer = new ByteArrayDeserializer() + + override def deserialize(topic: String, headers: Headers, data: Array[Byte]): Array[Byte] = { + val header = headers.lastHeader("content-type") + assertEquals("application/octet-stream", if (header == null) null else new String(header.value())) + deserializer.deserialize(topic, data) + } + + override def configure(configs: java.util.Map[String, _], isKey: Boolean): Unit = deserializer.configure(configs, isKey) + + override def close(): Unit = deserializer.close() + + override def deserialize(topic: String, data: Array[Byte]): Array[Byte] = { + fail("method should not be invoked") + null + } + } } diff --git a/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala index add18b260cd20..74111e319b0ab 100644 --- a/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseProducerSendTest.scala @@ -19,7 +19,7 @@ package kafka.api import java.time.Duration import java.nio.charset.StandardCharsets -import java.util.Properties +import java.util.{Collections, Properties} import java.util.concurrent.TimeUnit import kafka.integration.KafkaServerTestHarness import kafka.security.JaasTestUtils @@ -43,25 +43,22 @@ import org.junit.jupiter.params.provider.MethodSource import scala.collection.mutable import scala.concurrent.ExecutionException +import scala.jdk.CollectionConverters._ import scala.jdk.javaapi.OptionConverters abstract class BaseProducerSendTest extends KafkaServerTestHarness { def generateConfigs: scala.collection.Seq[KafkaConfig] = { + val overridingProps = new Properties() val numServers = 2 + overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, 2.toShort) + overridingProps.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, 4.toString) TestUtils.createBrokerConfigs( numServers, interBrokerSecurityProtocol = Some(securityProtocol), trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties - ).map(KafkaConfig.fromProps(_, brokerOverrides)) - } - - protected def brokerOverrides: Properties = { - val overridingProps = new Properties() - overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, 2.toShort) - overridingProps.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, 4.toString) - overridingProps + ).map(KafkaConfig.fromProps(_, overridingProps)) } private var consumer: Consumer[Array[Byte], Array[Byte]] = _ @@ -131,9 +128,9 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { * 1. Send with null key/value/partition-id should be accepted; send with null topic should be rejected. * 2. Last message of the non-blocking send should return the correct offset metadata */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendOffset(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendOffset(quorum: String, groupProtocol: String): Unit = { val producer = createProducer() val partition = 0 @@ -193,9 +190,9 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendCompressedMessageWithCreateTime(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendCompressedMessageWithCreateTime(quorum: String, groupProtocol: String): Unit = { val producer = createProducer( compressionType = "gzip", lingerMs = Int.MaxValue, @@ -203,9 +200,9 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { sendAndVerifyTimestamp(producer, TimestampType.CREATE_TIME) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendNonCompressedMessageWithCreateTime(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendNonCompressedMessageWithCreateTime(quorum: String, groupProtocol: String): Unit = { val producer = createProducer(lingerMs = Int.MaxValue, deliveryTimeoutMs = Int.MaxValue) sendAndVerifyTimestamp(producer, TimestampType.CREATE_TIME) } @@ -296,9 +293,9 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { * * After close() returns, all messages should be sent with correct returned offset metadata */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testClose(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testClose(quorum: String, groupProtocol: String): Unit = { val producer = createProducer() try { @@ -330,9 +327,9 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { * * The specified partition-id should be respected */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendToPartition(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendToPartition(quorum: String, groupProtocol: String): Unit = { val producer = createProducer() try { @@ -351,7 +348,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { assertEquals(partition, recordMetadata.partition) } - consumer.assign(java.util.List.of(new TopicPartition(topic, partition))) + consumer.assign(List(new TopicPartition(topic, partition)).asJava) // make sure the fetched messages also respect the partitioning and ordering val records = TestUtils.consumeRecords(consumer, numRecords) @@ -370,9 +367,9 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendToPartitionWithFollowerShutdownShouldNotTimeout(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendToPartitionWithFollowerShutdownShouldNotTimeout(quorum: String, groupProtocol: String): Unit = { // This test produces to a leader that has follower that is shutting down. It shows that // the produce request succeed, do not timeout and do not need to be retried. val producer = createProducer() @@ -399,7 +396,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { assertEquals(partition, recordMetadata.partition) } - consumer.assign(java.util.List.of(new TopicPartition(topic, partition))) + consumer.assign(List(new TopicPartition(topic, partition)).asJava) // make sure the fetched messages also respect the partitioning and ordering val records = TestUtils.consumeRecords(consumer, numRecords) @@ -423,9 +420,9 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { * Producer will attempt to send messages to the partition specified in each record, and should * succeed as long as the partition is included in the metadata. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendBeforeAndAfterPartitionExpansion(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendBeforeAndAfterPartitionExpansion(quorum: String, groupProtocol: String): Unit = { val producer = createProducer(maxBlockMs = 5 * 1000L) // create topic @@ -448,7 +445,7 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { val e = assertThrows(classOf[ExecutionException], () => producer.send(new ProducerRecord(topic, partition1, null, "value".getBytes(StandardCharsets.UTF_8))).get()) assertEquals(classOf[TimeoutException], e.getCause.getClass) - admin.createPartitions(java.util.Map.of(topic, NewPartitions.increaseTo(2))).all().get() + admin.createPartitions(Collections.singletonMap(topic, NewPartitions.increaseTo(2))).all().get() // read metadata from a broker and verify the new topic partitions exist TestUtils.waitForPartitionMetadata(brokers, topic, 0) @@ -481,9 +478,9 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { /** * Test that flush immediately sends all accumulated requests. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFlush(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFlush(quorum: String, groupProtocol: String): Unit = { val producer = createProducer(lingerMs = Int.MaxValue, deliveryTimeoutMs = Int.MaxValue) try { TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, 2, 2) @@ -503,12 +500,12 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { /** * Test close with zero timeout from caller thread */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCloseWithZeroTimeoutFromCallerThread(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCloseWithZeroTimeoutFromCallerThread(quorum: String, groupProtocol: String): Unit = { TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, 2, 2) val partition = 0 - consumer.assign(java.util.List.of(new TopicPartition(topic, partition))) + consumer.assign(List(new TopicPartition(topic, partition)).asJava) val record0 = new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, null, "value".getBytes(StandardCharsets.UTF_8)) @@ -529,12 +526,12 @@ abstract class BaseProducerSendTest extends KafkaServerTestHarness { /** * Test close with zero and non-zero timeout from sender thread */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCloseWithZeroTimeoutFromSenderThread(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCloseWithZeroTimeoutFromSenderThread(quorum: String, groupProtocol: String): Unit = { TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, 1, 2) val partition = 0 - consumer.assign(java.util.List.of(new TopicPartition(topic, partition))) + consumer.assign(List(new TopicPartition(topic, partition)).asJava) val record = new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, null, "value".getBytes(StandardCharsets.UTF_8)) // Test closing from sender thread. diff --git a/core/src/test/scala/integration/kafka/api/BaseQuotaTest.scala b/core/src/test/scala/integration/kafka/api/BaseQuotaTest.scala index 13eb169e0459e..5657df9a0d52c 100644 --- a/core/src/test/scala/integration/kafka/api/BaseQuotaTest.scala +++ b/core/src/test/scala/integration/kafka/api/BaseQuotaTest.scala @@ -17,10 +17,10 @@ package kafka.api import java.time.Duration import java.util import java.util.concurrent.TimeUnit -import java.util.Properties +import java.util.{Collections, Properties} import com.yammer.metrics.core.{Histogram, Meter} import kafka.api.QuotaTestClients._ -import kafka.server.KafkaBroker +import kafka.server.{ClientQuotaManager, KafkaBroker} import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.admin.Admin import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig} @@ -32,11 +32,10 @@ import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.quota.ClientQuotaAlteration import org.apache.kafka.common.quota.ClientQuotaEntity import org.apache.kafka.common.security.auth.KafkaPrincipal -import org.apache.kafka.common.test.api.Flaky import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs} import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.apache.kafka.server.quota.{ClientQuotaManager, QuotaType} +import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest @@ -91,10 +90,9 @@ abstract class BaseQuotaTest extends IntegrationTestHarness { quotaTestClients = createQuotaTestClients(topic1, leaderNode) } - @Flaky("KAFKA-8073") - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testThrottledProducerConsumer(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testThrottledProducerConsumer(quorum: String, groupProtocol: String): Unit = { val numRecords = 1000 val produced = quotaTestClients.produceUntilThrottled(numRecords) quotaTestClients.verifyProduceThrottle(expectThrottle = true) @@ -104,9 +102,9 @@ abstract class BaseQuotaTest extends IntegrationTestHarness { quotaTestClients.verifyConsumeThrottle(expectThrottle = true) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProducerConsumerOverrideUnthrottled(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProducerConsumerOverrideUnthrottled(quorum: String, groupProtocol: String): Unit = { // Give effectively unlimited quota for producer and consumer val props = new Properties() props.put(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG, Long.MaxValue.toString) @@ -124,9 +122,9 @@ abstract class BaseQuotaTest extends IntegrationTestHarness { quotaTestClients.verifyConsumeThrottle(expectThrottle = false) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProducerConsumerOverrideLowerQuota(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProducerConsumerOverrideLowerQuota(quorum: String, groupProtocol: String): Unit = { // consumer quota is set such that consumer quota * default quota window (10 seconds) is less than // MAX_PARTITION_FETCH_BYTES_CONFIG, so that we can test consumer ability to fetch in this case // In this case, 250 * 10 < 4096 @@ -142,10 +140,9 @@ abstract class BaseQuotaTest extends IntegrationTestHarness { quotaTestClients.verifyConsumeThrottle(expectThrottle = true) } - @Flaky("KAFKA-18810") - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testQuotaOverrideDelete(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testQuotaOverrideDelete(quorum: String, groupProtocol: String): Unit = { // Override producer and consumer quotas to unlimited quotaTestClients.overrideQuotas(Long.MaxValue, Long.MaxValue, Long.MaxValue.toDouble) quotaTestClients.waitForQuotaUpdate(Long.MaxValue, Long.MaxValue, Long.MaxValue.toDouble) @@ -165,19 +162,19 @@ abstract class BaseQuotaTest extends IntegrationTestHarness { // Since producer may have been throttled after producing a couple of records, // consume from beginning till throttled - quotaTestClients.consumer.seekToBeginning(util.Set.of(new TopicPartition(topic1, 0))) + quotaTestClients.consumer.seekToBeginning(Collections.singleton(new TopicPartition(topic1, 0))) quotaTestClients.consumeUntilThrottled(numRecords + produced) quotaTestClients.verifyConsumeThrottle(expectThrottle = true) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testThrottledRequest(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testThrottledRequest(quorum: String, groupProtocol: String): Unit = { quotaTestClients.overrideQuotas(Long.MaxValue, Long.MaxValue, 0.1) quotaTestClients.waitForQuotaUpdate(Long.MaxValue, Long.MaxValue, 0.1) val consumer = quotaTestClients.consumer - consumer.subscribe(util.Set.of(topic1)) + consumer.subscribe(Collections.singleton(topic1)) val endTimeMs = System.currentTimeMillis + 10000 var throttled = false while ((!throttled || quotaTestClients.exemptRequestMetric == null || metricValue(quotaTestClients.exemptRequestMetric) <= 0) @@ -220,13 +217,13 @@ abstract class QuotaTestClients(topic: String, def produceUntilThrottled(maxRecords: Int, waitForRequestCompletion: Boolean = true): Int = { var numProduced = 0 var throttled = false - val metric = throttleMetric(QuotaType.PRODUCE, producerClientId) do { val payload = numProduced.toString.getBytes val future = producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, null, null, payload), new ErrorLoggingCallback(topic, null, null, true)) numProduced += 1 do { + val metric = throttleMetric(QuotaType.PRODUCE, producerClientId) throttled = metric != null && metricValue(metric) > 0 } while (!future.isDone && (!throttled || waitForRequestCompletion)) } while (numProduced < maxRecords && !throttled) @@ -236,7 +233,7 @@ abstract class QuotaTestClients(topic: String, def consumeUntilThrottled(maxRecords: Int, waitForRequestCompletion: Boolean = true): Int = { val timeoutMs = TimeUnit.MINUTES.toMillis(1) - consumer.subscribe(util.Set.of(topic)) + consumer.subscribe(Collections.singleton(topic)) var numConsumed = 0 var throttled = false val startMs = System.currentTimeMillis diff --git a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala index 43c33e617de97..f05280b24a268 100644 --- a/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/ConsumerBounceTest.scala @@ -13,20 +13,28 @@ package kafka.api +import java.{time, util} import java.util.concurrent._ -import java.util.Properties +import java.util.{Collections, Properties} import kafka.server.KafkaConfig import kafka.utils.{Logging, TestInfoUtils, TestUtils} import org.apache.kafka.clients.consumer._ +import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.GroupMaxSizeReachedException +import org.apache.kafka.common.message.FindCoordinatorRequestData +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.{FindCoordinatorRequest, FindCoordinatorResponse} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} +import org.apache.kafka.server.util.ShutdownableThread import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Disabled, TestInfo} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource +import java.time.Duration +import scala.jdk.CollectionConverters._ import scala.collection.{Seq, mutable} /** @@ -90,15 +98,239 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { } } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumptionWithBrokerFailures(quorum: String, groupProtocol: String): Unit = consumeWithBrokerFailures(10) + + /* + * 1. Produce a bunch of messages + * 2. Then consume the messages while killing and restarting brokers at random + */ + def consumeWithBrokerFailures(numIters: Int): Unit = { + val numRecords = 1000 + val producer = createProducer() + producerSend(producer, numRecords) + + var consumed = 0L + val consumer = createConsumer() + + consumer.subscribe(Collections.singletonList(topic)) + + val scheduler = new BounceBrokerScheduler(numIters) + try { + scheduler.start() + + while (scheduler.isRunning) { + val records = consumer.poll(Duration.ofMillis(100)).asScala + + for (record <- records) { + assertEquals(consumed, record.offset()) + consumed += 1 + } + + if (records.nonEmpty) { + consumer.commitSync() + assertEquals(consumer.position(tp), consumer.committed(Set(tp).asJava).get(tp).offset) + + if (consumer.position(tp) == numRecords) { + consumer.seekToBeginning(Collections.emptyList()) + consumed = 0 + } + } + } + } finally { + scheduler.shutdown() + } + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSeekAndCommitWithBrokerFailures(quorum: String, groupProtocol: String): Unit = seekAndCommitWithBrokerFailures(5) + + def seekAndCommitWithBrokerFailures(numIters: Int): Unit = { + val numRecords = 1000 + val producer = createProducer() + producerSend(producer, numRecords) + + val consumer = createConsumer() + consumer.assign(Collections.singletonList(tp)) + consumer.seek(tp, 0) + + // wait until all the followers have synced the last HW with leader + TestUtils.waitUntilTrue(() => brokerServers.forall(server => + server.replicaManager.localLog(tp).get.highWatermark == numRecords + ), "Failed to update high watermark for followers after timeout") + + val scheduler = new BounceBrokerScheduler(numIters) + try { + scheduler.start() + + while (scheduler.isRunning) { + val coin = TestUtils.random.nextInt(3) + if (coin == 0) { + info("Seeking to end of log") + consumer.seekToEnd(Collections.emptyList()) + assertEquals(numRecords.toLong, consumer.position(tp)) + } else if (coin == 1) { + val pos = TestUtils.random.nextInt(numRecords).toLong + info("Seeking to " + pos) + consumer.seek(tp, pos) + assertEquals(pos, consumer.position(tp)) + } else if (coin == 2) { + info("Committing offset.") + consumer.commitSync() + assertEquals(consumer.position(tp), consumer.committed(Set(tp).asJava).get(tp).offset) + } + } + } finally { + scheduler.shutdown() + } + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSubscribeWhenTopicUnavailable(quorum: String, groupProtocol: String): Unit = { + val numRecords = 1000 + val newtopic = "newtopic" + + val consumer = createConsumer() + consumer.subscribe(Collections.singleton(newtopic)) + executor.schedule(new Runnable { + def run(): Unit = createTopic(newtopic, numPartitions = brokerCount, replicationFactor = brokerCount) + }, 2, TimeUnit.SECONDS) + consumer.poll(time.Duration.ZERO) + + val producer = createProducer() + + def sendRecords(numRecords: Int, topic: String): Unit = { + var remainingRecords = numRecords + val endTimeMs = System.currentTimeMillis + 20000 + while (remainingRecords > 0 && System.currentTimeMillis < endTimeMs) { + val futures = (0 until remainingRecords).map { i => + producer.send(new ProducerRecord(topic, part, i.toString.getBytes, i.toString.getBytes)) + } + futures.map { future => + try { + future.get + remainingRecords -= 1 + } catch { + case _: Exception => + } + } + } + assertEquals(0, remainingRecords) + } + + val poller = new ConsumerAssignmentPoller(consumer, List(newtopic)) + consumerPollers += poller + poller.start() + sendRecords(numRecords, newtopic) + receiveExactRecords(poller, numRecords, 10000) + poller.shutdown() + + brokerServers.foreach(server => killBroker(server.config.brokerId)) + Thread.sleep(500) + restartDeadBrokers() + + val poller2 = new ConsumerAssignmentPoller(consumer, List(newtopic)) + consumerPollers += poller2 + poller2.start() + sendRecords(numRecords, newtopic) + receiveExactRecords(poller, numRecords, 10000L) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testClose(quorum: String, groupProtocol: String): Unit = { + val numRecords = 10 + val producer = createProducer() + producerSend(producer, numRecords) + + checkCloseGoodPath(numRecords, "group1") + checkCloseWithCoordinatorFailure(numRecords, "group2", "group3") + checkCloseWithClusterFailure(numRecords, "group4", "group5", groupProtocol) + } + + /** + * Consumer is closed while cluster is healthy. Consumer should complete pending offset commits + * and leave group. New consumer instance should be able join group and start consuming from + * last committed offset. + */ + private def checkCloseGoodPath(numRecords: Int, groupId: String): Unit = { + val consumer = createConsumerAndReceive(groupId, manualAssign = false, numRecords) + val future = submitCloseAndValidate(consumer, Long.MaxValue, None, gracefulCloseTimeMs) + future.get + checkClosedState(groupId, numRecords) + } + + /** + * Consumer closed while coordinator is unavailable. Close of consumers using group + * management should complete after commit attempt even though commits fail due to rebalance. + * Close of consumers using manual assignment should complete with successful commits since a + * broker is available. + */ + private def checkCloseWithCoordinatorFailure(numRecords: Int, dynamicGroup: String, manualGroup: String): Unit = { + val consumer1 = createConsumerAndReceive(dynamicGroup, manualAssign = false, numRecords) + val consumer2 = createConsumerAndReceive(manualGroup, manualAssign = true, numRecords) + + killBroker(findCoordinator(dynamicGroup)) + killBroker(findCoordinator(manualGroup)) + + submitCloseAndValidate(consumer1, Long.MaxValue, None, gracefulCloseTimeMs).get + submitCloseAndValidate(consumer2, Long.MaxValue, None, gracefulCloseTimeMs).get + + restartDeadBrokers() + checkClosedState(dynamicGroup, 0) + checkClosedState(manualGroup, numRecords) + } + + private def findCoordinator(group: String): Int = { + val request = new FindCoordinatorRequest.Builder(new FindCoordinatorRequestData() + .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id) + .setCoordinatorKeys(Collections.singletonList(group))).build() + var nodeId = -1 + TestUtils.waitUntilTrue(() => { + val response = connectAndReceive[FindCoordinatorResponse](request) + nodeId = response.node.id + response.error == Errors.NONE + }, s"Failed to find coordinator for group $group") + nodeId + } + + /** + * Consumer is closed while all brokers are unavailable. Cannot rebalance or commit offsets since + * there is no coordinator, but close should timeout and return. If close is invoked with a very + * large timeout, close should timeout after request timeout. + */ + private def checkCloseWithClusterFailure(numRecords: Int, group1: String, group2: String, + groupProtocol: String): Unit = { + val consumer1 = createConsumerAndReceive(group1, manualAssign = false, numRecords) + + val requestTimeout = 6000 + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "5000") + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") + } + this.consumerConfig.setProperty(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, requestTimeout.toString) + val consumer2 = createConsumerAndReceive(group2, manualAssign = true, numRecords) + + brokerServers.foreach(server => killBroker(server.config.brokerId)) + val closeTimeout = 2000 + val future1 = submitCloseAndValidate(consumer1, closeTimeout, None, Some(closeTimeout)) + val future2 = submitCloseAndValidate(consumer2, Long.MaxValue, None, Some(requestTimeout)) + future1.get + future2.get + } + /** * If we have a running consumer group of size N, configure consumer.group.max.size = N-1 and restart all brokers, * the group should be forced to rebalance when it becomes hosted on a Coordinator with the new config. * Then, 1 consumer should be left out of the group. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) @Disabled // TODO: To be re-enabled once we can make it less flaky (KAFKA-13421) - def testRollingBrokerRestartsWithSmallerMaxGroupSizeConfigDisruptsBigGroup(groupProtocol: String): Unit = { + def testRollingBrokerRestartsWithSmallerMaxGroupSizeConfigDisruptsBigGroup(quorum: String, groupProtocol: String): Unit = { val group = "group-max-size-test" val topic = "group-max-size-test" val maxGroupSize = 2 @@ -106,7 +338,7 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { val partitionCount = consumerCount * 2 this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000") - if (groupProtocol.equalsIgnoreCase(GroupProtocol.CLASSIC.name)) { + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") } this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") @@ -136,9 +368,215 @@ class ConsumerBounceTest extends AbstractConsumerTest with Logging { assertTrue(raisedExceptions.head.isInstanceOf[GroupMaxSizeReachedException]) } + /** + * When we have the consumer group max size configured to X, the X+1th consumer trying to join should receive a fatal exception + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerReceivesFatalExceptionWhenGroupPassesMaxSize(quorum: String, groupProtocol: String): Unit = { + val group = "fatal-exception-test" + val topic = "fatal-exception-test" + this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000") + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") + } + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + + val partitions = createTopicPartitions(topic, numPartitions = maxGroupSize, replicationFactor = brokerCount) + + // Create N+1 consumers in the same consumer group and assert that the N+1th consumer receives a fatal error when it tries to join the group + val consumerPollers = mutable.Buffer[ConsumerAssignmentPoller]() + try { + addConsumersToGroupAndWaitForGroupAssignment(maxGroupSize, mutable.Buffer[Consumer[Array[Byte], Array[Byte]]](), + consumerPollers, List[String](topic), partitions, group) + val (_, rejectedConsumerPollers) = addConsumersToGroup(1, + mutable.Buffer[Consumer[Array[Byte], Array[Byte]]](), mutable.Buffer[ConsumerAssignmentPoller](), List[String](topic), partitions, group) + val rejectedConsumer = rejectedConsumerPollers.head + TestUtils.waitUntilTrue(() => { + rejectedConsumer.thrownException.isDefined + }, "Extra consumer did not throw an exception") + assertTrue(rejectedConsumer.thrownException.get.isInstanceOf[GroupMaxSizeReachedException]) + + // assert group continues to live + producerSend(createProducer(), maxGroupSize * 100, topic, numPartitions = Some(partitions.size)) + TestUtils.waitUntilTrue(() => { + consumerPollers.forall(p => p.receivedMessages >= 100) + }, "The consumers in the group could not fetch the expected records", 10000L) + } finally { + consumerPollers.foreach(_.shutdown()) + } + } + + /** + * Consumer is closed during rebalance. Close should leave group and close + * immediately if rebalance is in progress. If brokers are not available, + * close should terminate immediately without sending leave group. + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCloseDuringRebalance(quorum: String, groupProtocol: String): Unit = { + val topic = "closetest" + createTopic(topic, 10, brokerCount) + this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, "60000") + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "1000") + } + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + checkCloseDuringRebalance("group1", topic, executor, brokersAvailableDuringClose = true) + } + + private def checkCloseDuringRebalance(groupId: String, topic: String, executor: ExecutorService, brokersAvailableDuringClose: Boolean): Unit = { + + def subscribeAndPoll(consumer: Consumer[Array[Byte], Array[Byte]], revokeSemaphore: Option[Semaphore] = None): Future[Any] = { + executor.submit(() => { + consumer.subscribe(Collections.singletonList(topic)) + revokeSemaphore.foreach(s => s.release()) + consumer.poll(Duration.ofMillis(500)) + }, 0) + } + + def waitForRebalance(timeoutMs: Long, future: Future[Any], otherConsumers: Consumer[Array[Byte], Array[Byte]]*): Unit = { + val startMs = System.currentTimeMillis + while (System.currentTimeMillis < startMs + timeoutMs && !future.isDone) + otherConsumers.foreach(consumer => consumer.poll(time.Duration.ofMillis(100L))) + assertTrue(future.isDone, "Rebalance did not complete in time") + } + + def createConsumerToRebalance(): Future[Any] = { + val consumer = createConsumerWithGroupId(groupId) + val rebalanceSemaphore = new Semaphore(0) + val future = subscribeAndPoll(consumer, Some(rebalanceSemaphore)) + // Wait for consumer to poll and trigger rebalance + assertTrue(rebalanceSemaphore.tryAcquire(2000, TimeUnit.MILLISECONDS), "Rebalance not triggered") + // Rebalance is blocked by other consumers not polling + assertFalse(future.isDone, "Rebalance completed too early") + future + } + val consumer1 = createConsumerWithGroupId(groupId) + waitForRebalance(2000, subscribeAndPoll(consumer1)) + val consumer2 = createConsumerWithGroupId(groupId) + waitForRebalance(2000, subscribeAndPoll(consumer2), consumer1) + val rebalanceFuture = createConsumerToRebalance() + + // consumer1 should leave group and close immediately even though rebalance is in progress + val closeFuture1 = submitCloseAndValidate(consumer1, Long.MaxValue, None, gracefulCloseTimeMs) + + // Rebalance should complete without waiting for consumer1 to timeout since consumer1 has left the group + waitForRebalance(2000, rebalanceFuture, consumer2) + + // Trigger another rebalance and shutdown all brokers + // This consumer poll() doesn't complete and `tearDown` shuts down the executor and closes the consumer + createConsumerToRebalance() + brokerServers.foreach(server => killBroker(server.config.brokerId)) + + // consumer2 should close immediately without LeaveGroup request since there are no brokers available + val closeFuture2 = submitCloseAndValidate(consumer2, Long.MaxValue, None, Some(0)) + + // Ensure futures complete to avoid concurrent shutdown attempt during test cleanup + closeFuture1.get(2000, TimeUnit.MILLISECONDS) + closeFuture2.get(2000, TimeUnit.MILLISECONDS) + } + + private def createConsumerAndReceive(groupId: String, manualAssign: Boolean, numRecords: Int): Consumer[Array[Byte], Array[Byte]] = { + val consumer = createConsumerWithGroupId(groupId) + val consumerPoller = if (manualAssign) + subscribeConsumerAndStartPolling(consumer, List(), Set(tp)) + else + subscribeConsumerAndStartPolling(consumer, List(topic)) + + consumerPollers += consumerPoller + receiveExactRecords(consumerPoller, numRecords) + consumerPoller.shutdown() + consumer + } + + private def receiveExactRecords(consumer: ConsumerAssignmentPoller, numRecords: Int, timeoutMs: Long = 60000): Unit = { + TestUtils.waitUntilTrue(() => { + consumer.receivedMessages == numRecords + }, s"Consumer did not receive expected $numRecords. It received ${consumer.receivedMessages}", timeoutMs) + } + + private def submitCloseAndValidate(consumer: Consumer[Array[Byte], Array[Byte]], + closeTimeoutMs: Long, minCloseTimeMs: Option[Long], maxCloseTimeMs: Option[Long]): Future[Any] = { + executor.submit(() => { + val closeGraceTimeMs = 2000 + val startMs = System.currentTimeMillis() + info("Closing consumer with timeout " + closeTimeoutMs + " ms.") + consumer.close(time.Duration.ofMillis(closeTimeoutMs)) + val timeTakenMs = System.currentTimeMillis() - startMs + maxCloseTimeMs.foreach { ms => + assertTrue(timeTakenMs < ms + closeGraceTimeMs, "Close took too long " + timeTakenMs) + } + minCloseTimeMs.foreach { ms => + assertTrue(timeTakenMs >= ms, "Close finished too quickly " + timeTakenMs) + } + info("consumer.close() completed in " + timeTakenMs + " ms.") + }, 0) + } + + private def checkClosedState(groupId: String, committedRecords: Int): Unit = { + // Check that close was graceful with offsets committed and leave group sent. + // New instance of consumer should be assigned partitions immediately and should see committed offsets. + val assignSemaphore = new Semaphore(0) + val consumer = createConsumerWithGroupId(groupId) + consumer.subscribe(Collections.singletonList(topic), new ConsumerRebalanceListener { + def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { + assignSemaphore.release() + } + def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = { + }}) + + TestUtils.waitUntilTrue(() => { + consumer.poll(time.Duration.ofMillis(100L)) + assignSemaphore.tryAcquire() + }, "Assignment did not complete on time") + + if (committedRecords > 0) + assertEquals(committedRecords, consumer.committed(Set(tp).asJava).get(tp).offset) + consumer.close() + } + + private class BounceBrokerScheduler(val numIters: Int) extends ShutdownableThread("daemon-bounce-broker", false) { + private var iter: Int = 0 + + override def doWork(): Unit = { + killRandomBroker() + Thread.sleep(500) + restartDeadBrokers() + + iter += 1 + if (iter == numIters) + initiateShutdown() + else + Thread.sleep(500) + } + } + private def createTopicPartitions(topic: String, numPartitions: Int, replicationFactor: Int, topicConfig: Properties = new Properties): Set[TopicPartition] = { createTopic(topic, numPartitions = numPartitions, replicationFactor = replicationFactor, topicConfig = topicConfig) Range(0, numPartitions).map(part => new TopicPartition(topic, part)).toSet } + + private def producerSend(producer: KafkaProducer[Array[Byte], Array[Byte]], + numRecords: Int, + topic: String = this.topic, + numPartitions: Option[Int] = None): Unit = { + var partitionIndex = 0 + def getPartition: Int = { + numPartitions match { + case Some(partitions) => + val nextPart = partitionIndex % partitions + partitionIndex += 1 + nextPart + case None => part + } + } + + val futures = (0 until numRecords).map { i => + producer.send(new ProducerRecord(topic, getPartition, i.toString.getBytes, i.toString.getBytes)) + } + futures.map(_.get) + } + } diff --git a/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala new file mode 100644 index 0000000000000..5d6622799fe68 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ConsumerRebootstrapTest.scala @@ -0,0 +1,147 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.api + +import kafka.api.ConsumerRebootstrapTest._ +import kafka.server.QuorumTestHarness.getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} +import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows} +import org.junit.jupiter.api.Disabled +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.{Arguments, MethodSource} + +import java.time.Duration +import java.util.{Collections, stream} +import java.util.concurrent.TimeUnit +import java.util.concurrent.TimeoutException + +class ConsumerRebootstrapTest extends RebootstrapTest { + @Disabled("KAFKA-17986") + @ParameterizedTest(name = RebootstrapTestName) + @MethodSource(Array("rebootstrapTestParams")) + def testRebootstrap(quorum: String, groupProtocol: String, useRebootstrapTriggerMs: Boolean): Unit = { + sendRecords(10, 0) + + TestUtils.waitUntilTrue( + () => server0.logManager.logsByTopic(tp.topic()).head.logEndOffset == server1.logManager.logsByTopic(tp.topic()).head.logEndOffset, + "Timeout waiting for records to be replicated" + ) + + server1.shutdown() + server1.awaitShutdown() + + val consumer = createConsumer(configOverrides = clientOverrides(useRebootstrapTriggerMs)) + + // Only the server 0 is available for the consumer during the bootstrap. + consumer.assign(Collections.singleton(tp)) + + consumeAndVerifyRecords(consumer, 10, 0) + + // Bring back the server 1 and shut down 0. + server1.startup() + + TestUtils.waitUntilTrue( + () => server0.logManager.logsByTopic(tp.topic()).head.logEndOffset == server1.logManager.logsByTopic(tp.topic()).head.logEndOffset, + "Timeout waiting for records to be replicated" + ) + + server0.shutdown() + server0.awaitShutdown() + sendRecords(10, 10) + + // The server 0, originally cached during the bootstrap, is offline. + // However, the server 1 from the bootstrap list is online. + // Should be able to consume records. + consumeAndVerifyRecords(consumer, 10, 10, startingKeyAndValueIndex = 10, startingTimestamp = 10) + + // Bring back the server 0 and shut down 1. + server0.startup() + + TestUtils.waitUntilTrue( + () => server0.logManager.logsByTopic(tp.topic()).head.logEndOffset == server1.logManager.logsByTopic(tp.topic()).head.logEndOffset, + "Timeout waiting for records to be replicated" + ) + + server1.shutdown() + server1.awaitShutdown() + sendRecords(10, 20) + + // The same situation, but the server 1 has gone and server 0 is back. + consumeAndVerifyRecords(consumer, 10, 20, startingKeyAndValueIndex = 20, startingTimestamp = 20) + } + + @Disabled + @ParameterizedTest(name = RebootstrapTestName) + @MethodSource(Array("rebootstrapTestParams")) + def testRebootstrapDisabled(quorum: String, groupProtocol: String, useRebootstrapTriggerMs: Boolean): Unit = { + server1.shutdown() + server1.awaitShutdown() + + val configOverrides = clientOverrides(useRebootstrapTriggerMs) + configOverrides.put(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "none") + if (useRebootstrapTriggerMs) + configOverrides.put(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, "1000") + + val producer = createProducer(configOverrides = configOverrides) + val consumer = createConsumer(configOverrides = configOverrides) + val adminClient = createAdminClient(configOverrides = configOverrides) + + // Only the server 0 is available during the bootstrap. + val recordMetadata0 = producer.send(new ProducerRecord(topic, part, 0L, "key 0".getBytes, "value 0".getBytes)).get(15, TimeUnit.SECONDS) + assertEquals(0, recordMetadata0.offset()) + adminClient.listTopics().names().get(15, TimeUnit.SECONDS) + consumer.assign(Collections.singleton(tp)) + consumeAndVerifyRecords(consumer, 1, 0) + + server0.shutdown() + server0.awaitShutdown() + server1.startup() + + assertThrows(classOf[TimeoutException], () => producer.send(new ProducerRecord(topic, part, "key 2".getBytes, "value 2".getBytes)).get(5, TimeUnit.SECONDS)) + assertThrows(classOf[TimeoutException], () => adminClient.listTopics().names().get(5, TimeUnit.SECONDS)) + + val producer2 = createProducer(configOverrides = configOverrides) + producer2.send(new ProducerRecord(topic, part, 1L, "key 1".getBytes, "value 1".getBytes)).get(15, TimeUnit.SECONDS) + assertEquals(0, consumer.poll(Duration.ofSeconds(5)).count) + } + + private def sendRecords(numRecords: Int, from: Int): Unit = { + val producer: KafkaProducer[Array[Byte], Array[Byte]] = createProducer() + (from until (numRecords + from)).foreach { i => + val record = new ProducerRecord(tp.topic(), tp.partition(), i.toLong, s"key $i".getBytes, s"value $i".getBytes) + producer.send(record) + } + producer.flush() + producer.close() + } +} + +object ConsumerRebootstrapTest { + + final val RebootstrapTestName = s"${TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames}.useRebootstrapTriggerMs={2}" + def rebootstrapTestParams: stream.Stream[Arguments] = { + assertEquals(1, getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly.count()) + val args = getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly + .findFirst().get.get + stream.Stream.of( + Arguments.of((args :+ true):_*), + Arguments.of((args :+ false):_*) + ) + } +} diff --git a/core/src/test/scala/integration/kafka/api/ConsumerTopicCreationTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerTopicCreationTest.scala new file mode 100644 index 0000000000000..0c6a58d98d12b --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ConsumerTopicCreationTest.scala @@ -0,0 +1,108 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.api + +import java.lang.{Boolean => JBoolean} +import java.time.Duration +import java.util +import java.util.{Collections, Locale} +import kafka.utils.{EmptyTestInfo, TestUtils} +import org.apache.kafka.clients.admin.NewTopic +import org.apache.kafka.clients.consumer.{ConsumerConfig, GroupProtocol} +import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} +import org.apache.kafka.server.config.{ServerConfigs, ServerLogConfigs} +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.{Arguments, MethodSource} + +/** + * Tests behavior of specifying auto topic creation configuration for the consumer and broker + */ +class ConsumerTopicCreationTest { + + @ParameterizedTest(name = "{displayName}.groupProtocol={0}.brokerAutoTopicCreationEnable={1}.consumerAllowAutoCreateTopics={2}") + @MethodSource(Array("parameters")) + def testAutoTopicCreation(groupProtocol: String, brokerAutoTopicCreationEnable: JBoolean, consumerAllowAutoCreateTopics: JBoolean): Unit = { + val testCase = new ConsumerTopicCreationTest.TestCase(groupProtocol, brokerAutoTopicCreationEnable, consumerAllowAutoCreateTopics) + testCase.setUp(new EmptyTestInfo() { + override def getDisplayName = "quorum=kraft" + }) + try testCase.test() finally testCase.tearDown() + } + +} + +object ConsumerTopicCreationTest { + + private class TestCase(groupProtocol: String, brokerAutoTopicCreationEnable: JBoolean, consumerAllowAutoCreateTopics: JBoolean) extends IntegrationTestHarness { + private val topic_1 = "topic-1" + private val topic_2 = "topic-2" + private val producerClientId = "ConsumerTestProducer" + private val consumerClientId = "ConsumerTestConsumer" + + // configure server properties + this.serverConfig.setProperty(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG, "false") // speed up shutdown + this.serverConfig.setProperty(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, brokerAutoTopicCreationEnable.toString) + + // configure client properties + this.producerConfig.setProperty(ProducerConfig.CLIENT_ID_CONFIG, producerClientId) + this.consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, consumerClientId) + this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "my-test") + this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + this.consumerConfig.setProperty(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "100") + this.consumerConfig.setProperty(ConsumerConfig.ALLOW_AUTO_CREATE_TOPICS_CONFIG, consumerAllowAutoCreateTopics.toString) + this.consumerConfig.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) + override protected def brokerCount: Int = 1 + + + def test(): Unit = { + val consumer = createConsumer() + val producer = createProducer() + val adminClient = createAdminClient() + val record = new ProducerRecord(topic_1, 0, "key".getBytes, "value".getBytes) + + // create `topic_1` and produce a record to it + adminClient.createTopics(Collections.singleton(new NewTopic(topic_1, 1, 1.toShort))).all.get + producer.send(record).get + + consumer.subscribe(util.Arrays.asList(topic_1, topic_2)) + + // Wait until the produced record was consumed. This guarantees that metadata request for `topic_2` was sent to the + // broker. + TestUtils.waitUntilTrue(() => { + consumer.poll(Duration.ofMillis(100)).count > 0 + }, "Timed out waiting to consume") + + // MetadataRequest is guaranteed to create the topic znode if creation was required + val topicCreated = getTopicIds().keySet.contains(topic_2) + if (brokerAutoTopicCreationEnable && consumerAllowAutoCreateTopics) + assertTrue(topicCreated) + else + assertFalse(topicCreated) + } + } + + def parameters: java.util.stream.Stream[Arguments] = { + val data = new java.util.ArrayList[Arguments]() + for (brokerAutoTopicCreationEnable <- Array(JBoolean.TRUE, JBoolean.FALSE)) + for (consumerAutoCreateTopicsPolicy <- Array(JBoolean.TRUE, JBoolean.FALSE)) + data.add(Arguments.of(GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT), brokerAutoTopicCreationEnable, consumerAutoCreateTopicsPolicy)) + data.stream() + } +} diff --git a/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala new file mode 100644 index 0000000000000..7e614af8324b5 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ConsumerWithLegacyMessageFormatIntegrationTest.scala @@ -0,0 +1,159 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.api + +import kafka.utils.TestInfoUtils +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.record.{AbstractRecords, CompressionType, MemoryRecords, RecordBatch, RecordVersion, SimpleRecord, TimestampType} +import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import java.nio.ByteBuffer +import java.util +import java.util.{Collections, Optional} +import scala.jdk.CollectionConverters._ + +class ConsumerWithLegacyMessageFormatIntegrationTest extends AbstractConsumerTest { + + val topic1 = "part-test-topic-1" + val topic2 = "part-test-topic-2" + val topic3 = "part-test-topic-3" + + val t1p0 = new TopicPartition(topic1, 0) + val t1p1 = new TopicPartition(topic1, 1) + val t2p0 = new TopicPartition(topic2, 0) + val t2p1 = new TopicPartition(topic2, 1) + val t3p0 = new TopicPartition(topic3, 0) + val t3p1 = new TopicPartition(topic3, 1) + + private def appendLegacyRecords(numRecords: Int, tp: TopicPartition, brokerId: Int, magicValue: Byte): Unit = { + val records = (0 until numRecords).map { i => + new SimpleRecord(i, s"key $i".getBytes, s"value $i".getBytes) + } + val buffer = ByteBuffer.allocate(AbstractRecords.estimateSizeInBytes(magicValue, CompressionType.NONE, records.asJava)) + val builder = MemoryRecords.builder(buffer, magicValue, Compression.of(CompressionType.NONE).build, + TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, + 0, false, RecordBatch.NO_PARTITION_LEADER_EPOCH) + + records.foreach(builder.append) + + brokers.filter(_.config.brokerId == brokerId).foreach(b => { + val unifiedLog = b.replicaManager.logManager.getLog(tp).get + unifiedLog.appendAsLeaderWithRecordVersion( + records = builder.build(), + leaderEpoch = 0, + recordVersion = RecordVersion.lookup(magicValue) + ) + // Default isolation.level is read_uncommitted. It makes Partition#fetchOffsetForTimestamp to return UnifiedLog#highWatermark, + // so increasing high watermark to make it return the correct offset. + unifiedLog.maybeIncrementHighWatermark(unifiedLog.logEndOffsetMetadata) + }) + } + + private def setupTopics(): Unit = { + val producer = createProducer() + createTopic(topic1, numPartitions = 2) + createTopicWithAssignment(topic2, Map(0 -> List(0), 1 -> List(1))) + createTopicWithAssignment(topic3, Map(0 -> List(0), 1 -> List(1))) + + // v2 message format for topic1 + sendRecords(producer, numRecords = 100, t1p0, startingTimestamp = 0) + sendRecords(producer, numRecords = 100, t1p1, startingTimestamp = 0) + // v0 message format for topic2 + appendLegacyRecords(100, t2p0, 0, RecordBatch.MAGIC_VALUE_V0) + appendLegacyRecords(100, t2p1, 1, RecordBatch.MAGIC_VALUE_V0) + // v1 message format for topic3 + appendLegacyRecords(100, t3p0, 0, RecordBatch.MAGIC_VALUE_V1) + appendLegacyRecords(100, t3p1, 1, RecordBatch.MAGIC_VALUE_V1) + + producer.close() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetsForTimes(quorum: String, groupProtocol: String): Unit = { + setupTopics() + val consumer = createConsumer() + + // Test negative target time + assertThrows(classOf[IllegalArgumentException], + () => consumer.offsetsForTimes(Collections.singletonMap(t1p0, -1))) + + val timestampsToSearch = util.Map.of[TopicPartition, java.lang.Long]( + t1p0, 0L, + t1p1, 20L, + t2p0, 40L, + t2p1, 60L, + t3p0, 80L, + t3p1, 100L + ) + + val timestampOffsets = consumer.offsetsForTimes(timestampsToSearch) + + val timestampTopic1P0 = timestampOffsets.get(t1p0) + assertEquals(0, timestampTopic1P0.offset) + assertEquals(0, timestampTopic1P0.timestamp) + assertEquals(Optional.of(0), timestampTopic1P0.leaderEpoch) + + val timestampTopic1P1 = timestampOffsets.get(t1p1) + assertEquals(20, timestampTopic1P1.offset) + assertEquals(20, timestampTopic1P1.timestamp) + assertEquals(Optional.of(0), timestampTopic1P1.leaderEpoch) + + // v0 message format doesn't have timestamp + val timestampTopic2P0 = timestampOffsets.get(t2p0) + assertNull(timestampTopic2P0) + + val timestampTopic2P1 = timestampOffsets.get(t2p1) + assertNull(timestampTopic2P1) + + // v1 message format doesn't have leader epoch + val timestampTopic3P0 = timestampOffsets.get(t3p0) + assertEquals(80, timestampTopic3P0.offset) + assertEquals(80, timestampTopic3P0.timestamp) + assertEquals(Optional.empty, timestampTopic3P0.leaderEpoch) + + assertNull(timestampOffsets.get(t3p1)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testEarliestOrLatestOffsets(quorum: String, groupProtocol: String): Unit = { + setupTopics() + + val partitions = Set(t1p0, t1p1, t2p0, t2p1, t3p0, t3p1).asJava + val consumer = createConsumer() + + val earliests = consumer.beginningOffsets(partitions) + assertEquals(0L, earliests.get(t1p0)) + assertEquals(0L, earliests.get(t1p1)) + assertEquals(0L, earliests.get(t2p0)) + assertEquals(0L, earliests.get(t2p1)) + assertEquals(0L, earliests.get(t3p0)) + assertEquals(0L, earliests.get(t3p1)) + + val latests = consumer.endOffsets(partitions) + assertEquals(100L, latests.get(t1p0)) + assertEquals(100L, latests.get(t1p1)) + assertEquals(100L, latests.get(t2p0)) + assertEquals(100L, latests.get(t2p1)) + assertEquals(100L, latests.get(t3p0)) + assertEquals(100L, latests.get(t3p1)) + } +} diff --git a/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala b/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala index e1bd97c93b044..006a5d085c511 100644 --- a/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala +++ b/core/src/test/scala/integration/kafka/api/CustomQuotaCallbackTest.scala @@ -101,9 +101,9 @@ class CustomQuotaCallbackTest extends IntegrationTestHarness with SaslSetup { kafkaClientSaslMechanism, JaasTestUtils.KAFKA_SCRAM_ADMIN, JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCustomQuotaCallback(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCustomQuotaCallback(quorum: String, groupProtocol: String): Unit = { // Large quota override, should not throttle var brokerId = 0 var user = createGroupWithOneUser("group0_user1", brokerId) @@ -406,7 +406,7 @@ class GroupedUserQuotaCallback extends ClientQuotaCallback with Reconfigurable w } override def reconfigurableConfigs: util.Set[String] = { - java.util.Set.of(DefaultProduceQuotaProp, DefaultFetchQuotaProp) + Set(DefaultProduceQuotaProp, DefaultFetchQuotaProp).asJava } override def validateReconfiguration(configs: util.Map[String, _]): Unit = { @@ -437,9 +437,9 @@ class GroupedUserQuotaCallback extends ClientQuotaCallback with Reconfigurable w case groupPrincipal: GroupedUserPrincipal => val userGroup = groupPrincipal.userGroup val quotaLimit = quotaOrDefault(userGroup, quotaType) - if (quotaLimit != null) { - util.Map.of(QuotaGroupTag, userGroup) - } else + if (quotaLimit != null) + Map(QuotaGroupTag -> userGroup).asJava + else UnlimitedQuotaMetricTags case _ => UnlimitedQuotaMetricTags diff --git a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala index f777c8da46eff..ab5b587a0e681 100644 --- a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationTest.scala @@ -66,7 +66,7 @@ class DelegationTokenEndToEndAuthorizationTest extends EndToEndAuthorizationTest override def addFormatterSettings(formatter: Formatter): Unit = { formatter.setClusterId("XcZZOzUqS4yHOjhMQB6JLQ") formatter.setScramArguments( - java.util.List.of(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]")) + List(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]").asJava) } override def createPrivilegedAdminClient(): Admin = createScramAdminClient(kafkaClientSaslMechanism, kafkaPrincipal.getName, kafkaPassword) @@ -99,14 +99,14 @@ class DelegationTokenEndToEndAuthorizationTest extends EndToEndAuthorizationTest superuserClientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, privilegedClientLoginContext) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCreateUserWithDelegationToken(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCreateUserWithDelegationToken(quorum: String, groupProtocol: String): Unit = { val privilegedAdminClient = Admin.create(privilegedAdminClientConfig) try { val user = "user" - val results = privilegedAdminClient.alterUserScramCredentials(java.util.List.of[UserScramCredentialAlteration]( - new UserScramCredentialUpsertion(user, new ScramCredentialInfo(PublicScramMechanism.SCRAM_SHA_256, 4096), "password"))) + val results = privilegedAdminClient.alterUserScramCredentials(List[UserScramCredentialAlteration]( + new UserScramCredentialUpsertion(user, new ScramCredentialInfo(PublicScramMechanism.SCRAM_SHA_256, 4096), "password")).asJava) assertEquals(1, results.values.size) val future = results.values.get(user) future.get // make sure we haven't completed exceptionally diff --git a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala index 3af29d58a7a70..833b06654d3ee 100644 --- a/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala +++ b/core/src/test/scala/integration/kafka/api/DelegationTokenEndToEndAuthorizationWithOwnerTest.scala @@ -26,9 +26,10 @@ import org.apache.kafka.common.resource.ResourcePattern import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.security.token.delegation.DelegationToken import org.junit.jupiter.api.Assertions.{assertThrows, assertTrue} -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource -import java.util +import java.util.Collections import scala.concurrent.ExecutionException import scala.jdk.CollectionConverters._ import scala.util.Using @@ -64,12 +65,12 @@ class DelegationTokenEndToEndAuthorizationWithOwnerTest extends DelegationTokenE private val describeTokenFailPassword = "describe-token-fail-password" override def configureSecurityAfterServersStart(): Unit = { - // Create the Acls before calling super which will create the additional tokens + // Create the Acls before calling super which will create the additiona tokens Using.resource(createPrivilegedAdminClient()) { superuserAdminClient => superuserAdminClient.createAcls(List(AclTokenOtherDescribe, AclTokenCreate, AclTokenDescribe).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TokenCreateAcl ++ TokenDescribeAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, + TestUtils.waitAndVerifyAcls(TokenCreateAcl ++ TokenDescribeAcl, s.dataPlaneRequestProcessor.authorizer.get, new ResourcePattern(USER, clientPrincipal.toString, LITERAL)) } } @@ -93,33 +94,36 @@ class DelegationTokenEndToEndAuthorizationWithOwnerTest extends DelegationTokenE createScramAdminClient(kafkaClientSaslMechanism, tokenRequesterPrincipal.getName, tokenRequesterPassword) } - @Test - def testCreateTokenForOtherUserFails(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateTokenForOtherUserFails(quorum: String): Unit = { val thrown = assertThrows(classOf[ExecutionException], () => { createDelegationTokens(() => new CreateDelegationTokenOptions().owner(otherClientPrincipal), assert = false) }) assertTrue(thrown.getMessage.contains("Delegation Token authorization failed")) } - @Test - def testDescribeTokenForOtherUserFails(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTokenForOtherUserFails(quorum: String): Unit = { Using.resource(createScramAdminClient(kafkaClientSaslMechanism, describeTokenFailPrincipal.getName, describeTokenFailPassword)) { describeTokenFailAdminClient => Using.resource(createScramAdminClient(kafkaClientSaslMechanism, otherClientPrincipal.getName, otherClientPassword)) { otherClientAdminClient => otherClientAdminClient.createDelegationToken().delegationToken().get() val tokens = describeTokenFailAdminClient.describeDelegationToken( - new DescribeDelegationTokenOptions().owners(util.List.of(otherClientPrincipal)) + new DescribeDelegationTokenOptions().owners(Collections.singletonList(otherClientPrincipal)) ).delegationTokens.get.asScala assertTrue(tokens.isEmpty) } } } - @Test - def testDescribeTokenForOtherUserPasses(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTokenForOtherUserPasses(quorum: String): Unit = { val adminClient = createTokenRequesterAdminClient() try { val tokens = adminClient.describeDelegationToken( - new DescribeDelegationTokenOptions().owners(util.List.of(clientPrincipal))) + new DescribeDelegationTokenOptions().owners(Collections.singletonList(clientPrincipal))) .delegationTokens.get.asScala assertTrue(tokens.nonEmpty) tokens.foreach(t => { diff --git a/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala index 40bb4f649cb5d..aa6d208ef8bf9 100644 --- a/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/EndToEndAuthorizationTest.scala @@ -19,7 +19,7 @@ package kafka.api import com.yammer.metrics.core.Gauge -import java.util.Properties +import java.util.{Collections, Properties} import java.util.concurrent.ExecutionException import org.apache.kafka.metadata.authorizer.StandardAuthorizer import kafka.utils._ @@ -45,7 +45,6 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{CsvSource, MethodSource} -import java.util import scala.jdk.CollectionConverters._ /** @@ -171,12 +170,12 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas /** * Tests the ability of producing and consuming with the appropriate ACLs set. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProduceConsumeViaAssign(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceConsumeViaAssign(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) consumeRecords(consumer, numRecords) confirmReauthenticationMetrics() } @@ -200,71 +199,71 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas ._2.asInstanceOf[Gauge[Double]] } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProduceConsumeViaSubscribe(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceConsumeViaSubscribe(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) val consumer = createConsumer() - consumer.subscribe(java.util.List.of(topic)) + consumer.subscribe(List(topic).asJava) consumeRecords(consumer, numRecords) confirmReauthenticationMetrics() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProduceConsumeWithWildcardAcls(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceConsumeWithWildcardAcls(quorum: String, groupProtocol: String): Unit = { setWildcardResourceAcls() val producer = createProducer() sendRecords(producer, numRecords, tp) val consumer = createConsumer() - consumer.subscribe(java.util.List.of(topic)) + consumer.subscribe(List(topic).asJava) consumeRecords(consumer, numRecords) confirmReauthenticationMetrics() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProduceConsumeWithPrefixedAcls(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceConsumeWithPrefixedAcls(quorum: String, groupProtocol: String): Unit = { setPrefixedResourceAcls() val producer = createProducer() sendRecords(producer, numRecords, tp) val consumer = createConsumer() - consumer.subscribe(java.util.List.of(topic)) + consumer.subscribe(List(topic).asJava) consumeRecords(consumer, numRecords) confirmReauthenticationMetrics() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProduceConsumeTopicAutoCreateTopicCreateAcl(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceConsumeTopicAutoCreateTopicCreateAcl(quorum: String, groupProtocol: String): Unit = { // topic2 is not created on setup() val tp2 = new TopicPartition("topic2", 0) setAclsAndProduce(tp2) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp2)) + consumer.assign(List(tp2).asJava) consumeRecords(consumer, numRecords, topic = tp2.topic) confirmReauthenticationMetrics() } private def setWildcardResourceAcls(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(java.util.List.of(AclWildcardTopicWrite, AclWildcardTopicCreate, AclWildcardTopicDescribe, AclWildcardTopicRead)).values - superuserAdminClient.createAcls(java.util.List.of(AclWildcardGroupRead)).values + superuserAdminClient.createAcls(List(AclWildcardTopicWrite, AclWildcardTopicCreate, AclWildcardTopicDescribe, AclWildcardTopicRead).asJava).values + superuserAdminClient.createAcls(List(AclWildcardGroupRead).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, wildcardTopicResource) - TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, wildcardGroupResource) + TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizer.get, wildcardTopicResource) + TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizer.get, wildcardGroupResource) } } private def setPrefixedResourceAcls(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(java.util.List.of(AclPrefixedTopicWrite, AclPrefixedTopicCreate, AclPrefixedTopicDescribe, AclPrefixedTopicRead)).values - superuserAdminClient.createAcls(java.util.List.of(AclPrefixedGroupRead)).values + superuserAdminClient.createAcls(List(AclPrefixedTopicWrite, AclPrefixedTopicCreate, AclPrefixedTopicDescribe, AclPrefixedTopicRead).asJava).values + superuserAdminClient.createAcls(List(AclPrefixedGroupRead).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, prefixedTopicResource) - TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, prefixedGroupResource) + TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizer.get, prefixedTopicResource) + TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizer.get, prefixedGroupResource) } } @@ -272,14 +271,14 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas val topicResource = new ResourcePattern(TOPIC, tp.topic, LITERAL) val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(java.util.List.of(AclTopicWrite(topicResource), AclTopicCreate(topicResource), AclTopicDescribe(topicResource))).values - superuserAdminClient.createAcls(java.util.List.of(AclTopicRead(topicResource))).values - superuserAdminClient.createAcls(java.util.List.of(AclGroupRead)).values + superuserAdminClient.createAcls(List(AclTopicWrite(topicResource), AclTopicCreate(topicResource), AclTopicDescribe(topicResource)).asJava).values + superuserAdminClient.createAcls(List(AclTopicRead(topicResource)).asJava).values + superuserAdminClient.createAcls(List(AclGroupRead).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, + TestUtils.waitAndVerifyAcls(TopicReadAcl ++ TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizer.get, new ResourcePattern(TOPIC, tp.topic, LITERAL)) - TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, groupResource) + TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizer.get, groupResource) } } @@ -291,9 +290,9 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas private def setConsumerGroupAcls(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(java.util.List.of(AclGroupRead)).values + superuserAdminClient.createAcls(List(AclGroupRead).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, groupResource) + TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizer.get, groupResource) } } @@ -302,14 +301,14 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * messages and describe topics respectively when the describe ACL isn't set. * Also verifies that subsequent publish, consume and describe to authorized topic succeeds. */ - @ParameterizedTest(name = "{displayName}.groupProtocol={0}.isIdempotenceEnabled={1}") + @ParameterizedTest(name = "{displayName}.quorum={0}.groupProtocol={1}.isIdempotenceEnabled={2}") @CsvSource(value = Array( - "classic, true", - //"consumer, true", - "classic, false", - //"consumer, false", + "kraft, classic, true", + //"kraft, consumer, true", + "kraft, classic, false", + //"kraft, consumer, false", )) - def testNoDescribeProduceOrConsumeWithoutTopicDescribeAcl(groupProtocol:String, isIdempotenceEnabled:Boolean): Unit = { + def testNoDescribeProduceOrConsumeWithoutTopicDescribeAcl(quorum:String, groupProtocol:String, isIdempotenceEnabled:Boolean): Unit = { // Set consumer group acls since we are testing topic authorization setConsumerGroupAcls() @@ -320,10 +319,10 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer, numRecords, topic = tp.topic)) val adminClient = createAdminClient() - val e1 = assertThrows(classOf[ExecutionException], () => adminClient.describeTopics(java.util.Set.of(topic)).allTopicNames().get()) + val e1 = assertThrows(classOf[ExecutionException], () => adminClient.describeTopics(Set(topic).asJava).allTopicNames().get()) assertTrue(e1.getCause.isInstanceOf[TopicAuthorizationException], "Unexpected exception " + e1.getCause) // Verify successful produce/consume/describe on another topic using the same producer, consumer and adminClient @@ -339,22 +338,22 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas producer sendRecords(producer2, numRecords, tp2) - consumer.assign(java.util.List.of(tp2)) + consumer.assign(List(tp2).asJava) consumeRecords(consumer, numRecords, topic = topic2) - val describeResults = adminClient.describeTopics(java.util.Set.of(topic, topic2)).topicNameValues() + val describeResults = adminClient.describeTopics(Set(topic, topic2).asJava).topicNameValues() assertEquals(1, describeResults.get(topic2).get().partitions().size()) - val e2 = assertThrows(classOf[ExecutionException], () => adminClient.describeTopics(java.util.Set.of(topic)).allTopicNames().get()) + val e2 = assertThrows(classOf[ExecutionException], () => adminClient.describeTopics(Set(topic).asJava).allTopicNames().get()) assertTrue(e2.getCause.isInstanceOf[TopicAuthorizationException], "Unexpected exception " + e2.getCause) // Verify that consumer manually assigning both authorized and unauthorized topic doesn't consume // from the unauthorized topic and throw; since we can now return data during the time we are updating // metadata / fetching positions, it is possible that the authorized topic record is returned during this time. - consumer.assign(java.util.List.of(tp, tp2)) + consumer.assign(List(tp, tp2).asJava) sendRecords(producer2, numRecords, tp2) var topic2RecordConsumed = false def verifyNoRecords(records: ConsumerRecords[Array[Byte], Array[Byte]]): Boolean = { - assertEquals(util.Set.of(tp2), records.partitions(), "Consumed records with unexpected partitions: " + records) + assertEquals(Collections.singleton(tp2), records.partitions(), "Consumed records with unexpected partitions: " + records) topic2RecordConsumed = true false } @@ -368,22 +367,22 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas } sendRecords(producer2, numRecords, tp) consumeRecordsIgnoreOneAuthorizationException(consumer, numRecords, startingOffset = 0, topic) - val describeResults2 = adminClient.describeTopics(java.util.Set.of(topic, topic2)).topicNameValues + val describeResults2 = adminClient.describeTopics(Set(topic, topic2).asJava).topicNameValues assertEquals(1, describeResults2.get(topic).get().partitions().size()) assertEquals(1, describeResults2.get(topic2).get().partitions().size()) } @ParameterizedTest @CsvSource(value = Array( - "true", - "false", + "kraft, true", + "kraft, false", )) - def testNoProduceWithDescribeAcl(isIdempotenceEnabled:Boolean): Unit = { + def testNoProduceWithDescribeAcl(quorum:String, isIdempotenceEnabled:Boolean): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(java.util.List.of(AclTopicDescribe())).values + superuserAdminClient.createAcls(List(AclTopicDescribe()).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TopicDescribeAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) + TestUtils.waitAndVerifyAcls(TopicDescribeAcl, s.dataPlaneRequestProcessor.authorizer.get, topicResource) } val prop = new Properties() @@ -395,7 +394,7 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas assertThrows(classOf[KafkaException], () => sendRecords(producer, numRecords, tp)) } else { val e = assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords, tp)) - assertEquals(java.util.Set.of(topic), e.unauthorizedTopics()) + assertEquals(Set(topic).asJava, e.unauthorizedTopics()) } confirmReauthenticationMetrics() } @@ -404,34 +403,34 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * Tests that a consumer fails to consume messages without the appropriate * ACL set. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testNoConsumeWithoutDescribeAclViaAssign(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNoConsumeWithoutDescribeAclViaAssign(quorum: String, groupProtocol: String): Unit = { noConsumeWithoutDescribeAclSetup() val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) // the exception is expected when the consumer attempts to lookup offsets assertThrows(classOf[KafkaException], () => consumeRecords(consumer)) confirmReauthenticationMetrics() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testNoConsumeWithoutDescribeAclViaSubscribe(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNoConsumeWithoutDescribeAclViaSubscribe(quorum: String, groupProtocol: String): Unit = { noConsumeWithoutDescribeAclSetup() val consumer = createConsumer() - consumer.subscribe(java.util.List.of(topic)) + consumer.subscribe(List(topic).asJava) // this should timeout since the consumer will not be able to fetch any metadata for the topic assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer, timeout = 3000)) // Verify that no records are consumed even if one of the requested topics is authorized setReadAndWriteAcls(tp) - consumer.subscribe(java.util.List.of(topic, "topic2")) + consumer.subscribe(List(topic, "topic2").asJava) assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer, timeout = 3000)) // Verify that records are consumed if all topics are authorized - consumer.subscribe(java.util.List.of(topic)) - if (groupProtocol.equalsIgnoreCase(GroupProtocol.CLASSIC.name)) { + consumer.subscribe(List(topic).asJava) + if (groupProtocol.equals(GroupProtocol.CLASSIC)) { consumeRecordsIgnoreOneAuthorizationException(consumer) } else { TestUtils.waitUntilTrue(() => { @@ -447,58 +446,58 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas private def noConsumeWithoutDescribeAclSetup(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(java.util.List.of(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe())).values - superuserAdminClient.createAcls(java.util.List.of(AclGroupRead)).values + superuserAdminClient.createAcls(List(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe()).asJava).values + superuserAdminClient.createAcls(List(AclGroupRead).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) - TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, groupResource) + TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizer.get, topicResource) + TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizer.get, groupResource) } val producer = createProducer() sendRecords(producer, numRecords, tp) - superuserAdminClient.deleteAcls(java.util.List.of(AclTopicDescribe().toFilter)).values - superuserAdminClient.deleteAcls(java.util.List.of(AclTopicWrite().toFilter)).values + superuserAdminClient.deleteAcls(List(AclTopicDescribe().toFilter).asJava).values + superuserAdminClient.deleteAcls(List(AclTopicWrite().toFilter).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) - TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, groupResource) + TestUtils.waitAndVerifyAcls(TopicCreateAcl, s.dataPlaneRequestProcessor.authorizer.get, topicResource) + TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizer.get, groupResource) } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testNoConsumeWithDescribeAclViaAssign(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNoConsumeWithDescribeAclViaAssign(quorum: String, groupProtocol: String): Unit = { noConsumeWithDescribeAclSetup() val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(java.util.Set.of(topic), e.unauthorizedTopics()) + assertEquals(Set(topic).asJava, e.unauthorizedTopics()) confirmReauthenticationMetrics() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testNoConsumeWithDescribeAclViaSubscribe(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNoConsumeWithDescribeAclViaSubscribe(quorum: String, groupProtocol: String): Unit = { noConsumeWithDescribeAclSetup() val consumer = createConsumer() - consumer.subscribe(java.util.List.of(topic)) + consumer.subscribe(List(topic).asJava) val e = assertThrows(classOf[TopicAuthorizationException], () => consumeRecords(consumer)) - assertEquals(java.util.Set.of(topic), e.unauthorizedTopics()) + assertEquals(Set(topic).asJava, e.unauthorizedTopics()) confirmReauthenticationMetrics() } private def noConsumeWithDescribeAclSetup(): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(java.util.List.of(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe())).values - superuserAdminClient.createAcls(java.util.List.of(AclGroupRead)).values + superuserAdminClient.createAcls(List(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe()).asJava).values + superuserAdminClient.createAcls(List(AclGroupRead).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) - TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, groupResource) + TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizer.get, topicResource) + TestUtils.waitAndVerifyAcls(GroupReadAcl, s.dataPlaneRequestProcessor.authorizer.get, groupResource) } val producer = createProducer() sendRecords(producer, numRecords, tp) @@ -508,19 +507,19 @@ abstract class EndToEndAuthorizationTest extends IntegrationTestHarness with Sas * Tests that a consumer fails to consume messages without the appropriate * ACL set. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testNoGroupAcl(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNoGroupAcl(quorum: String, groupProtocol: String): Unit = { val superuserAdminClient = createSuperuserAdminClient() - superuserAdminClient.createAcls(java.util.List.of(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe())).values + superuserAdminClient.createAcls(List(AclTopicWrite(), AclTopicCreate(), AclTopicDescribe()).asJava).values brokers.foreach { s => - TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizerPlugin.get, topicResource) + TestUtils.waitAndVerifyAcls(TopicWriteAcl ++ TopicDescribeAcl ++ TopicCreateAcl, s.dataPlaneRequestProcessor.authorizer.get, topicResource) } val producer = createProducer() sendRecords(producer, numRecords, tp) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) val e = assertThrows(classOf[GroupAuthorizationException], () => consumeRecords(consumer)) assertEquals(group, e.groupId()) confirmReauthenticationMetrics() diff --git a/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala b/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala new file mode 100644 index 0000000000000..c423c95ae9d89 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/EndToEndClusterIdTest.scala @@ -0,0 +1,218 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.api + +import java.util.concurrent.ExecutionException +import java.util.concurrent.atomic.AtomicReference +import java.util.Properties +import kafka.integration.KafkaServerTestHarness +import kafka.server._ +import kafka.utils._ +import kafka.utils.Implicits._ +import org.apache.kafka.clients.consumer._ +import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} +import org.apache.kafka.common.{ClusterResource, ClusterResourceListener, TopicPartition} +import org.apache.kafka.server.metrics.MetricConfigs +import org.apache.kafka.test.{TestUtils => _, _} +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{BeforeEach, TestInfo} + +import scala.jdk.CollectionConverters._ +import org.apache.kafka.test.TestUtils.isValidClusterId +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +/** The test cases here verify the following conditions. + * 1. The ProducerInterceptor receives the cluster id after the onSend() method is called and before onAcknowledgement() method is called. + * 2. The Serializer receives the cluster id before the serialize() method is called. + * 3. The producer MetricReporter receives the cluster id after send() method is called on KafkaProducer. + * 4. The ConsumerInterceptor receives the cluster id before the onConsume() method. + * 5. The Deserializer receives the cluster id before the deserialize() method is called. + * 6. The consumer MetricReporter receives the cluster id after poll() is called on KafkaConsumer. + * 7. The broker MetricReporter receives the cluster id after the broker startup is over. + * 8. The broker KafkaMetricReporter receives the cluster id after the broker startup is over. + * 9. All the components receive the same cluster id. + */ + +object EndToEndClusterIdTest { + + object MockConsumerMetricsReporter { + val CLUSTER_META = new AtomicReference[ClusterResource] + } + + class MockConsumerMetricsReporter extends MockMetricsReporter with ClusterResourceListener { + + override def onUpdate(clusterMetadata: ClusterResource): Unit = { + MockConsumerMetricsReporter.CLUSTER_META.set(clusterMetadata) + } + } + + object MockProducerMetricsReporter { + val CLUSTER_META = new AtomicReference[ClusterResource] + } + + class MockProducerMetricsReporter extends MockMetricsReporter with ClusterResourceListener { + + override def onUpdate(clusterMetadata: ClusterResource): Unit = { + MockProducerMetricsReporter.CLUSTER_META.set(clusterMetadata) + } + } + + object MockBrokerMetricsReporter { + val CLUSTER_META = new AtomicReference[ClusterResource] + } + + class MockBrokerMetricsReporter extends MockMetricsReporter with ClusterResourceListener { + + override def onUpdate(clusterMetadata: ClusterResource): Unit = { + MockBrokerMetricsReporter.CLUSTER_META.set(clusterMetadata) + } + } +} + +class EndToEndClusterIdTest extends KafkaServerTestHarness { + + import EndToEndClusterIdTest._ + + val producerCount = 1 + val consumerCount = 1 + val serverCount = 1 + lazy val producerConfig = new Properties + lazy val consumerConfig = new Properties + lazy val serverConfig = new Properties + val numRecords = 1 + val topic = "e2etopic" + val part = 0 + val tp = new TopicPartition(topic, part) + this.serverConfig.setProperty(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, classOf[MockBrokerMetricsReporter].getName) + + override def generateConfigs = { + val cfgs = TestUtils.createBrokerConfigs(serverCount, interBrokerSecurityProtocol = Some(securityProtocol), + trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties) + cfgs.foreach(_ ++= serverConfig) + cfgs.map(KafkaConfig.fromProps) + } + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + MockDeserializer.resetStaticVariables() + // create the consumer offset topic + createTopic(topic, 2, serverCount) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testEndToEnd(quorum: String, groupProtocol: String): Unit = { + val appendStr = "mock" + MockConsumerInterceptor.resetCounters() + MockProducerInterceptor.resetCounters() + + assertNotNull(MockBrokerMetricsReporter.CLUSTER_META) + isValidClusterId(MockBrokerMetricsReporter.CLUSTER_META.get.clusterId) + + val producerProps = new Properties() + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) + producerProps.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, classOf[MockProducerInterceptor].getName) + producerProps.put("mock.interceptor.append", appendStr) + producerProps.put(ProducerConfig.METRIC_REPORTER_CLASSES_CONFIG, classOf[MockProducerMetricsReporter].getName) + val testProducer = new KafkaProducer(producerProps, new MockSerializer, new MockSerializer) + + // Send one record and make sure clusterId is set after send and before onAcknowledgement + sendRecords(testProducer, 1, tp) + assertNotEquals(MockProducerInterceptor.CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT, MockProducerInterceptor.NO_CLUSTER_ID) + assertNotNull(MockProducerInterceptor.CLUSTER_META) + assertEquals(MockProducerInterceptor.CLUSTER_ID_BEFORE_ON_ACKNOWLEDGEMENT.get.clusterId, MockProducerInterceptor.CLUSTER_META.get.clusterId) + isValidClusterId(MockProducerInterceptor.CLUSTER_META.get.clusterId) + + // Make sure that serializer gets the cluster id before serialize method. + assertNotEquals(MockSerializer.CLUSTER_ID_BEFORE_SERIALIZE, MockSerializer.NO_CLUSTER_ID) + assertNotNull(MockSerializer.CLUSTER_META) + isValidClusterId(MockSerializer.CLUSTER_META.get.clusterId) + + assertNotNull(MockProducerMetricsReporter.CLUSTER_META) + isValidClusterId(MockProducerMetricsReporter.CLUSTER_META.get.clusterId) + + this.consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) + this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, classOf[MockConsumerInterceptor].getName) + this.consumerConfig.put(ConsumerConfig.METRIC_REPORTER_CLASSES_CONFIG, classOf[MockConsumerMetricsReporter].getName) + this.consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) + val testConsumer = new KafkaConsumer(this.consumerConfig, new MockDeserializer, new MockDeserializer) + testConsumer.assign(List(tp).asJava) + testConsumer.seek(tp, 0) + + // consume and verify that values are modified by interceptors + consumeRecords(testConsumer, numRecords) + + // Check that cluster id is present after the first poll call. + assertNotEquals(MockConsumerInterceptor.CLUSTER_ID_BEFORE_ON_CONSUME, MockConsumerInterceptor.NO_CLUSTER_ID) + assertNotNull(MockConsumerInterceptor.CLUSTER_META) + isValidClusterId(MockConsumerInterceptor.CLUSTER_META.get.clusterId) + assertEquals(MockConsumerInterceptor.CLUSTER_ID_BEFORE_ON_CONSUME.get.clusterId, MockConsumerInterceptor.CLUSTER_META.get.clusterId) + + assertNotEquals(MockDeserializer.clusterIdBeforeDeserialize, MockDeserializer.noClusterId) + assertNotNull(MockDeserializer.clusterMeta) + isValidClusterId(MockDeserializer.clusterMeta.get.clusterId) + assertEquals(MockDeserializer.clusterIdBeforeDeserialize.get.clusterId, MockDeserializer.clusterMeta.get.clusterId) + + assertNotNull(MockConsumerMetricsReporter.CLUSTER_META) + isValidClusterId(MockConsumerMetricsReporter.CLUSTER_META.get.clusterId) + + // Make sure everyone receives the same cluster id. + assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockSerializer.CLUSTER_META.get.clusterId) + assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockProducerMetricsReporter.CLUSTER_META.get.clusterId) + assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockConsumerInterceptor.CLUSTER_META.get.clusterId) + assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockDeserializer.clusterMeta.get.clusterId) + assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockConsumerMetricsReporter.CLUSTER_META.get.clusterId) + assertEquals(MockProducerInterceptor.CLUSTER_META.get.clusterId, MockBrokerMetricsReporter.CLUSTER_META.get.clusterId) + + testConsumer.close() + testProducer.close() + MockConsumerInterceptor.resetCounters() + MockProducerInterceptor.resetCounters() + } + + private def sendRecords(producer: KafkaProducer[Array[Byte], Array[Byte]], numRecords: Int, tp: TopicPartition): Unit = { + val futures = (0 until numRecords).map { i => + val record = new ProducerRecord(tp.topic(), tp.partition(), s"$i".getBytes, s"$i".getBytes) + debug(s"Sending this record: $record") + producer.send(record) + } + try { + futures.foreach(_.get) + } catch { + case e: ExecutionException => throw e.getCause + } + } + + private def consumeRecords(consumer: Consumer[Array[Byte], Array[Byte]], + numRecords: Int, + startingOffset: Int = 0, + topic: String = topic, + part: Int = part): Unit = { + val records = TestUtils.consumeRecords(consumer, numRecords) + + for (i <- 0 until numRecords) { + val record = records(i) + val offset = startingOffset + i + assertEquals(topic, record.topic) + assertEquals(part, record.partition) + assertEquals(offset.toLong, record.offset) + } + } +} diff --git a/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala new file mode 100644 index 0000000000000..e9a0644a26c63 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/GroupAuthorizerIntegrationTest.scala @@ -0,0 +1,238 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package kafka.api + +import java.util.Properties +import java.util.concurrent.ExecutionException +import kafka.api.GroupAuthorizerIntegrationTest._ +import kafka.server.BaseRequestTest +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.consumer.ConsumerConfig +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.acl.{AccessControlEntry, AclOperation, AclPermissionType} +import org.apache.kafka.common.config.internals.BrokerSecurityConfigs +import org.apache.kafka.common.errors.TopicAuthorizationException +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, ResourceType} +import org.apache.kafka.common.security.auth.{AuthenticationContext, KafkaPrincipal} +import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.apache.kafka.metadata.authorizer.StandardAuthorizer +import org.apache.kafka.security.authorizer.AclEntry.WILDCARD_HOST +import org.apache.kafka.server.config.ServerConfigs +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.function.Executable +import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import scala.jdk.CollectionConverters._ + +object GroupAuthorizerIntegrationTest { + val BrokerPrincipal = new KafkaPrincipal("Group", "broker") + val ClientPrincipal = new KafkaPrincipal("Group", "client") + + val BrokerListenerName = "BROKER" + val ClientListenerName = "CLIENT" + val ControllerListenerName = "CONTROLLER" + + class GroupPrincipalBuilder extends DefaultKafkaPrincipalBuilder(null, null) { + override def build(context: AuthenticationContext): KafkaPrincipal = { + context.listenerName match { + case BrokerListenerName | ControllerListenerName => BrokerPrincipal + case ClientListenerName => ClientPrincipal + case listenerName => throw new IllegalArgumentException(s"No principal mapped to listener $listenerName") + } + } + } +} + +class GroupAuthorizerIntegrationTest extends BaseRequestTest { + + val brokerId: Integer = 0 + + override def brokerCount: Int = 1 + override def interBrokerListenerName: ListenerName = new ListenerName(BrokerListenerName) + override def listenerName: ListenerName = new ListenerName(ClientListenerName) + + def brokerPrincipal: KafkaPrincipal = BrokerPrincipal + def clientPrincipal: KafkaPrincipal = ClientPrincipal + + override def kraftControllerConfigs(testInfo: TestInfo): collection.Seq[Properties] = { + val controllerConfigs = super.kraftControllerConfigs(testInfo) + controllerConfigs.foreach(addNodeProperties) + controllerConfigs + } + + override def brokerPropertyOverrides(properties: Properties): Unit = { + properties.put(ServerConfigs.BROKER_ID_CONFIG, brokerId.toString) + addNodeProperties(properties) + } + + private def addNodeProperties(properties: Properties): Unit = { + properties.put(ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG, classOf[StandardAuthorizer].getName) + properties.put(StandardAuthorizer.SUPER_USERS_CONFIG, BrokerPrincipal.toString) + + properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, "1") + properties.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") + properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, "1") + properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, "1") + properties.put(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, "1") + properties.put(BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, classOf[GroupPrincipalBuilder].getName) + } + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + doSetup(testInfo, createOffsetsTopic = false) + + // Allow inter-broker communication + addAndVerifyAcls( + Set(createAcl(AclOperation.CLUSTER_ACTION, AclPermissionType.ALLOW, principal = BrokerPrincipal)), + new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) + ) + + createOffsetsTopic(interBrokerListenerName) + } + + private def createAcl(aclOperation: AclOperation, + aclPermissionType: AclPermissionType, + principal: KafkaPrincipal = ClientPrincipal): AccessControlEntry = { + new AccessControlEntry(principal.toString, WILDCARD_HOST, aclOperation, aclPermissionType) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUnauthorizedProduceAndConsume(quorum: String, groupProtocol: String): Unit = { + val topic = "topic" + val topicPartition = new TopicPartition("topic", 0) + + createTopic(topic, listenerName = interBrokerListenerName) + + val producer = createProducer() + val produceException = assertThrows(classOf[ExecutionException], + () => producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get()).getCause + assertTrue(produceException.isInstanceOf[TopicAuthorizationException]) + assertEquals(Set(topic), produceException.asInstanceOf[TopicAuthorizationException].unauthorizedTopics.asScala) + + val consumer = createConsumer(configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) + consumer.assign(List(topicPartition).asJava) + val consumeException = assertThrows(classOf[TopicAuthorizationException], + () => TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1)) + assertEquals(Set(topic), consumeException.unauthorizedTopics.asScala) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeUnsubscribeWithoutGroupPermission(quorum: String, groupProtocol: String): Unit = { + val topic = "topic" + + createTopic(topic, listenerName = interBrokerListenerName) + + // allow topic read/write permission to poll/send record + addAndVerifyAcls( + Set(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW), createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) + ) + val producer = createProducer() + producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get() + producer.close() + + // allow group read permission to join group + val group = "group" + addAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) + ) + + val props = new Properties() + props.put(ConsumerConfig.GROUP_ID_CONFIG, group) + props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + val consumer = createConsumer(configOverrides = props) + consumer.subscribe(List(topic).asJava) + TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1) + + removeAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) + ) + + assertDoesNotThrow(new Executable { + override def execute(): Unit = consumer.unsubscribe() + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeCloseWithoutGroupPermission(quorum: String, groupProtocol: String): Unit = { + val topic = "topic" + createTopic(topic, listenerName = interBrokerListenerName) + + // allow topic read/write permission to poll/send record + addAndVerifyAcls( + Set(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW), createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) + ) + val producer = createProducer() + producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get() + + // allow group read permission to join group + val group = "group" + addAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) + ) + + val props = new Properties() + props.put(ConsumerConfig.GROUP_ID_CONFIG, group) + props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + val consumer = createConsumer(configOverrides = props) + consumer.subscribe(List(topic).asJava) + TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1) + + removeAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.GROUP, group, PatternType.LITERAL) + ) + + assertDoesNotThrow(new Executable { + override def execute(): Unit = consumer.close() + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAuthorizedProduceAndConsume(quorum: String, groupProtocol: String): Unit = { + val topic = "topic" + val topicPartition = new TopicPartition("topic", 0) + + createTopic(topic, listenerName = interBrokerListenerName) + + addAndVerifyAcls( + Set(createAcl(AclOperation.WRITE, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) + ) + val producer = createProducer() + producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic, "message".getBytes)).get() + + addAndVerifyAcls( + Set(createAcl(AclOperation.READ, AclPermissionType.ALLOW)), + new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) + ) + val consumer = createConsumer(configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) + consumer.assign(List(topicPartition).asJava) + TestUtils.pollUntilAtLeastNumRecords(consumer, numRecords = 1) + } + +} diff --git a/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala index e161406a50e51..cbd69baedc75c 100644 --- a/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/GroupCoordinatorIntegrationTest.scala @@ -12,12 +12,13 @@ */ package kafka.api +import kafka.log.UnifiedLog import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, Type} import kafka.utils.TestUtils import org.apache.kafka.clients.admin.{Admin, ConsumerGroupDescription} import org.apache.kafka.clients.consumer.{Consumer, GroupProtocol, OffsetAndMetadata} -import org.apache.kafka.common.errors.{GroupIdNotFoundException, UnknownTopicOrPartitionException} -import org.apache.kafka.common.{ConsumerGroupState, GroupType, KafkaFuture, TopicCollection, TopicPartition} +import org.apache.kafka.common.errors.GroupIdNotFoundException +import org.apache.kafka.common.{ConsumerGroupState, GroupType, KafkaFuture, TopicPartition} import org.junit.jupiter.api.Assertions._ import scala.jdk.CollectionConverters._ @@ -26,13 +27,11 @@ import org.apache.kafka.common.record.CompressionType import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.server.config.ServerConfigs -import org.apache.kafka.storage.internals.log.UnifiedLog -import org.apache.kafka.test.{TestUtils => JTestUtils} import org.junit.jupiter.api.Timeout import java.time.Duration +import java.util.Collections import java.util.concurrent.TimeUnit -import scala.concurrent.ExecutionException @Timeout(120) class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { @@ -48,9 +47,9 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { ) def testGroupCoordinatorPropagatesOffsetsTopicCompressionCodec(): Unit = { withConsumer(groupId = "group", groupProtocol = GroupProtocol.CLASSIC) { consumer => - consumer.commitSync(java.util.Map.of( - new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0), new OffsetAndMetadata(10, "") - )) + consumer.commitSync(Map( + new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) -> new OffsetAndMetadata(10, "") + ).asJava) val logManager = cluster.brokers().asScala.head._2.logManager def getGroupMetadataLogOpt: Option[UnifiedLog] = @@ -86,7 +85,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // Create a consumer group grp1 with one member. The member subscribes to foo and leaves. This creates // a mix of group records with tombstones to delete the member. withConsumer(groupId = "grp1", groupProtocol = GroupProtocol.CONSUMER) { consumer => - consumer.subscribe(java.util.List.of("foo")) + consumer.subscribe(List("foo").asJava) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment.asScala.nonEmpty @@ -106,7 +105,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // group coordinator won't be available. withAdmin { admin => val groups = admin - .describeConsumerGroups(java.util.List.of("grp1")) + .describeConsumerGroups(List("grp1").asJava) .describedGroups() .asScala .toMap @@ -135,14 +134,14 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // and ensure that all the offset commit records are before the consumer group records due to the // rebalance after the commit sync. withConsumer(groupId = "grp2", groupProtocol = GroupProtocol.CONSUMER, enableAutoCommit = false) { consumer => - consumer.subscribe(java.util.List.of("foo")) + consumer.subscribe(List("foo").asJava) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty }, msg = "Consumer did not get an non empty assignment") consumer.commitSync() consumer.unsubscribe() - consumer.subscribe(java.util.List.of("foo")) + consumer.subscribe(List("foo").asJava) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty @@ -162,7 +161,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // group coordinator won't be available. withAdmin { admin => val groups = admin - .describeConsumerGroups(java.util.List.of("grp2")) + .describeConsumerGroups(List("grp2").asJava) .describedGroups() .asScala .toMap @@ -189,7 +188,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // Create a consumer group grp3 with one member. The member subscribes to foo and leaves the group. Then // the group is deleted. This creates tombstones to delete the member, the group and the offsets. withConsumer(groupId = "grp3", groupProtocol = GroupProtocol.CONSUMER) { consumer => - consumer.subscribe(java.util.List.of("foo")) + consumer.subscribe(List("foo").asJava) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty @@ -197,7 +196,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { } admin - .deleteConsumerGroups(java.util.List.of("grp3")) + .deleteConsumerGroups(List("grp3").asJava) .deletedGroups() .get("grp3") .get(10, TimeUnit.SECONDS) @@ -215,7 +214,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // group coordinator won't be available. withAdmin { admin => val groups = admin - .describeConsumerGroups(java.util.List.of("grp3")) + .describeConsumerGroups(List("grp3").asJava) .describedGroups() .asScala .toMap @@ -242,7 +241,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // Create a classic group grp4 with one member. Upgrades the group to the consumer // protocol. withConsumer(groupId = "grp4", groupProtocol = GroupProtocol.CLASSIC) { consumer => - consumer.subscribe(java.util.List.of("foo")) + consumer.subscribe(List("foo").asJava) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty @@ -250,7 +249,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { } withConsumer(groupId = "grp4", groupProtocol = GroupProtocol.CONSUMER) { consumer => - consumer.subscribe(java.util.List.of("foo")) + consumer.subscribe(List("foo").asJava) TestUtils.waitUntilTrue(() => { consumer.poll(Duration.ofMillis(50)) consumer.assignment().asScala.nonEmpty @@ -270,7 +269,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { // group coordinator won't be available. withAdmin { admin => val groups = admin - .describeConsumerGroups(java.util.List.of("grp4")) + .describeConsumerGroups(List("grp4").asJava) .describedGroups() .asScala .toMap @@ -279,64 +278,12 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { } } - @ClusterTest( - types = Array(Type.KRAFT), - serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) - ) - def testRecreatingConsumerOffsetsTopic(): Unit = { - withAdmin { admin => - TestUtils.createTopicWithAdminRaw( - admin = admin, - topic = "foo", - numPartitions = 3 - ) - - withConsumer(groupId = "group", groupProtocol = GroupProtocol.CONSUMER) { consumer => - consumer.subscribe(List("foo").asJava) - TestUtils.waitUntilTrue(() => { - consumer.poll(Duration.ofMillis(50)) - consumer.assignment().asScala.nonEmpty - }, msg = "Consumer did not get an non empty assignment") - } - - admin - .deleteTopics(TopicCollection.ofTopicNames(List(Topic.GROUP_METADATA_TOPIC_NAME).asJava)) - .all() - .get() - - TestUtils.waitUntilTrue(() => { - try { - admin - .describeTopics(TopicCollection.ofTopicNames(List(Topic.GROUP_METADATA_TOPIC_NAME).asJava)) - .topicNameValues() - .get(Topic.GROUP_METADATA_TOPIC_NAME) - .get(JTestUtils.DEFAULT_MAX_WAIT_MS, TimeUnit.MILLISECONDS) - false - } catch { - case e: ExecutionException => - e.getCause.isInstanceOf[UnknownTopicOrPartitionException] - } - }, msg = s"${Topic.GROUP_METADATA_TOPIC_NAME} was not deleted") - - withConsumer(groupId = "group", groupProtocol = GroupProtocol.CONSUMER) { consumer => - consumer.subscribe(List("foo").asJava) - TestUtils.waitUntilTrue(() => { - consumer.poll(Duration.ofMillis(50)) - consumer.assignment().asScala.nonEmpty - }, msg = "Consumer did not get an non empty assignment") - } - } - } - private def rollAndCompactConsumerOffsets(): Unit = { val tp = new TopicPartition("__consumer_offsets", 0) val broker = cluster.brokers.asScala.head._2 val log = broker.logManager.getLog(tp).get log.roll() - assertTrue(broker.logManager.cleaner.awaitCleaned(tp, 0, 60000L)) + assertTrue(broker.logManager.cleaner.awaitCleaned(tp, 0)) } private def withAdmin(f: Admin => Unit): Unit = { @@ -377,7 +324,7 @@ class GroupCoordinatorIntegrationTest(cluster: ClusterInstance) { assertEquals(groupId, group.groupId) assertEquals(groupType, group.`type`) assertEquals(state, group.state) - assertEquals(java.util.List.of, group.members) + assertEquals(Collections.emptyList, group.members) } private def assertDescribedDeadGroup( diff --git a/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala b/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala index 303e989e9b4fb..ed5611eb74b84 100644 --- a/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala +++ b/core/src/test/scala/integration/kafka/api/IntegrationTestHarness.scala @@ -22,25 +22,21 @@ import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, KafkaConsume import kafka.utils.TestUtils import kafka.utils.Implicits._ -import java.util -import java.util.{Optional, Properties, UUID} +import java.util.Properties import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig} import kafka.server.KafkaConfig import kafka.integration.KafkaServerTestHarness import kafka.security.JaasTestUtils import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} -import org.apache.kafka.clients.consumer.internals.{AsyncKafkaConsumer, StreamsRebalanceData, StreamsRebalanceListener} import org.apache.kafka.common.network.{ConnectionMode, ListenerName} import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer, Deserializer, Serializer} -import org.apache.kafka.common.utils.Utils +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.MetadataLogConfig -import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs} import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import scala.collection.mutable import scala.collection.Seq -import scala.jdk.CollectionConverters._ import scala.jdk.javaapi.OptionConverters /** @@ -53,7 +49,6 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { val producerConfig = new Properties val consumerConfig = new Properties val shareConsumerConfig = new Properties - val streamsConsumerConfig = new Properties val adminClientConfig = new Properties val superuserClientConfig = new Properties val serverConfig = new Properties @@ -61,7 +56,6 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { private val consumers = mutable.Buffer[Consumer[_, _]]() private val shareConsumers = mutable.Buffer[ShareConsumer[_, _]]() - private val streamsConsumers = mutable.Buffer[Consumer[_, _]]() private val producers = mutable.Buffer[KafkaProducer[_, _]]() private val adminClients = mutable.Buffer[Admin]() @@ -76,7 +70,11 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { trustStoreFile = trustStoreFile, saslProperties = serverSaslProperties, logDirCount = logDirCount) configureListeners(cfgs) modifyConfigs(cfgs) - cfgs.foreach(_.setProperty(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, TestUtils.tempDir().getAbsolutePath)) + if (isShareGroupTest()) { + cfgs.foreach(_.setProperty(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,share")) + cfgs.foreach(_.setProperty(ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG, "true")) + } + cfgs.foreach(_.setProperty(KRaftConfigs.METADATA_LOG_DIR_CONFIG, TestUtils.tempDir().getAbsolutePath)) insertControllerListenersIfNeeded(cfgs) cfgs.map(KafkaConfig.fromProps) } @@ -154,12 +152,7 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { shareConsumerConfig.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, "group") shareConsumerConfig.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) shareConsumerConfig.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) - - streamsConsumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) - streamsConsumerConfig.putIfAbsent(ConsumerConfig.GROUP_ID_CONFIG, "group") - streamsConsumerConfig.putIfAbsent(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) - streamsConsumerConfig.putIfAbsent(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) - + adminClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) doSuperuserSetup(testInfo) @@ -218,67 +211,6 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { shareConsumer } - def createStreamsConsumer[K, V](keyDeserializer: Deserializer[K] = new ByteArrayDeserializer, - valueDeserializer: Deserializer[V] = new ByteArrayDeserializer, - configOverrides: Properties = new Properties, - configsToRemove: List[String] = List(), - streamsRebalanceData: StreamsRebalanceData): AsyncKafkaConsumer[K, V] = { - val props = new Properties - props ++= streamsConsumerConfig - props ++= configOverrides - configsToRemove.foreach(props.remove(_)) - val streamsConsumer = new AsyncKafkaConsumer[K, V]( - new ConsumerConfig(ConsumerConfig.appendDeserializerToConfig(Utils.propsToMap(props), keyDeserializer, valueDeserializer)), - keyDeserializer, - valueDeserializer, - Optional.of(streamsRebalanceData) - ) - streamsConsumers += streamsConsumer - streamsConsumer - } - - def createStreamsGroup[K, V](configOverrides: Properties = new Properties, - configsToRemove: List[String] = List(), - inputTopic: String, - streamsGroupId: String): AsyncKafkaConsumer[K, V] = { - val props = new Properties() - props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) - props.put(ConsumerConfig.GROUP_ID_CONFIG, streamsGroupId) - props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) - props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, classOf[ByteArrayDeserializer].getName) - props ++= configOverrides - configsToRemove.foreach(props.remove(_)) - - val streamsRebalanceData = new StreamsRebalanceData( - UUID.randomUUID(), - Optional.empty(), - util.Map.of( - "subtopology-0", new StreamsRebalanceData.Subtopology( - util.Set.of(inputTopic), - util.Set.of(), - util.Map.of(), - util.Map.of(inputTopic + "-store-changelog", new StreamsRebalanceData.TopicInfo(Optional.of(1), Optional.empty(), util.Map.of())), - util.Set.of() - )), - Map.empty[String, String].asJava - ) - - val consumer = createStreamsConsumer( - keyDeserializer = new ByteArrayDeserializer().asInstanceOf[Deserializer[K]], - valueDeserializer = new ByteArrayDeserializer().asInstanceOf[Deserializer[V]], - configOverrides = props, - streamsRebalanceData = streamsRebalanceData - ) - consumer.subscribe(util.Set.of(inputTopic), - new StreamsRebalanceListener { - override def onTasksRevoked(tasks: util.Set[StreamsRebalanceData.TaskId]): Unit = () - override def onTasksAssigned(assignment: StreamsRebalanceData.Assignment): Unit = () - override def onAllTasksLost(): Unit = () - }) - consumer - } - def createAdminClient( listenerName: ListenerName = listenerName, configOverrides: Properties = new Properties @@ -311,14 +243,11 @@ abstract class IntegrationTestHarness extends KafkaServerTestHarness { consumers.foreach(_.close(Duration.ZERO)) shareConsumers.foreach(_.wakeup()) shareConsumers.foreach(_.close(Duration.ZERO)) - streamsConsumers.foreach(_.wakeup()) - streamsConsumers.foreach(_.close(Duration.ZERO)) adminClients.foreach(_.close(Duration.ZERO)) producers.clear() consumers.clear() shareConsumers.clear() - streamsConsumers.clear() adminClients.clear() } finally { super.tearDown() diff --git a/core/src/test/scala/integration/kafka/api/LogAppendTimeTest.scala b/core/src/test/scala/integration/kafka/api/LogAppendTimeTest.scala new file mode 100644 index 0000000000000..f3741d2d77c45 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/LogAppendTimeTest.scala @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.api + +import java.util.Collections +import java.util.concurrent.TimeUnit +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.common.record.TimestampType +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.server.config.ServerLogConfigs +import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.api.Assertions.{assertEquals, assertNotEquals, assertTrue} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +/** + * Tests where the broker is configured to use LogAppendTime. For tests where LogAppendTime is configured via topic + * level configs, see the *ProducerSendTest classes. + */ +class LogAppendTimeTest extends IntegrationTestHarness { + val producerCount: Int = 1 + val consumerCount: Int = 1 + val brokerCount: Int = 2 + + // This will be used for the offsets topic as well + serverConfig.put(ServerLogConfigs.LOG_MESSAGE_TIMESTAMP_TYPE_CONFIG, TimestampType.LOG_APPEND_TIME.name) + serverConfig.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, "2") + + private val topic = "topic" + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + createTopic(topic) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceConsume(quorum: String, groupProtocol: String): Unit = { + val producer = createProducer() + val now = System.currentTimeMillis() + val createTime = now - TimeUnit.DAYS.toMillis(1) + val producerRecords = (1 to 10).map(i => new ProducerRecord(topic, null, createTime, s"key$i".getBytes, + s"value$i".getBytes)) + val recordMetadatas = producerRecords.map(producer.send).map(_.get(10, TimeUnit.SECONDS)) + recordMetadatas.foreach { recordMetadata => + assertTrue(recordMetadata.timestamp >= now) + assertTrue(recordMetadata.timestamp < now + TimeUnit.SECONDS.toMillis(60)) + } + + val consumer = createConsumer() + consumer.subscribe(Collections.singleton(topic)) + val consumerRecords = TestUtils.consumeRecords(consumer, producerRecords.size) + + consumerRecords.zipWithIndex.foreach { case (consumerRecord, index) => + val producerRecord = producerRecords(index) + val recordMetadata = recordMetadatas(index) + assertEquals(new String(producerRecord.key), new String(consumerRecord.key)) + assertEquals(new String(producerRecord.value), new String(consumerRecord.value)) + assertNotEquals(producerRecord.timestamp, consumerRecord.timestamp) + assertEquals(recordMetadata.timestamp, consumerRecord.timestamp) + assertEquals(TimestampType.LOG_APPEND_TIME, consumerRecord.timestampType) + } + } +} diff --git a/core/src/test/scala/integration/kafka/api/MetricsTest.scala b/core/src/test/scala/integration/kafka/api/MetricsTest.scala index b2930c6b3e5dc..e08801343fc5b 100644 --- a/core/src/test/scala/integration/kafka/api/MetricsTest.scala +++ b/core/src/test/scala/integration/kafka/api/MetricsTest.scala @@ -79,9 +79,9 @@ class MetricsTest extends IntegrationTestHarness with SaslSetup { /** * Verifies some of the metrics of producer, consumer as well as server. */ - @ParameterizedTest(name = "testMetrics with systemRemoteStorageEnabled: {0}") - @CsvSource(Array("true", "false")) - def testMetrics(systemRemoteStorageEnabled: Boolean): Unit = { + @ParameterizedTest(name = "testMetrics with systemRemoteStorageEnabled: {1}") + @CsvSource(Array("kraft, true", "kraft, false")) + def testMetrics(quorum: String, systemRemoteStorageEnabled: Boolean): Unit = { val topic = "mytopic" createTopic(topic, numPartitions = 1, @@ -98,7 +98,7 @@ class MetricsTest extends IntegrationTestHarness with SaslSetup { sendRecords(producer, numRecords, recordSize, tp) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) consumer.seek(tp, 0) TestUtils.consumeRecords(consumer, numRecords) diff --git a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala index 4a686f3d4d871..1e2f16ddb379b 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextAdminIntegrationTest.scala @@ -22,19 +22,20 @@ import java.nio.ByteBuffer import java.nio.file.{Files, Paths, StandardOpenOption} import java.lang.{Long => JLong} import java.time.{Duration => JDuration} +import java.util.Arrays.asList import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger} import java.util.concurrent.{CountDownLatch, ExecutionException, TimeUnit} -import java.util.{Collections, Locale, Optional, Properties} +import java.util.{Collections, Optional, Properties} import java.{time, util} import kafka.integration.KafkaServerTestHarness +import kafka.server.metadata.KRaftMetadataCache import kafka.server.KafkaConfig import kafka.utils.TestUtils._ -import kafka.utils.{TestInfoUtils, TestUtils} +import kafka.utils.{Log4jController, TestInfoUtils, TestUtils} import org.apache.kafka.clients.HostResolver import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin.ConfigEntry.ConfigSource import org.apache.kafka.clients.admin._ -import org.apache.kafka.clients.consumer.internals.AsyncKafkaConsumer import org.apache.kafka.clients.consumer.{CommitFailedException, Consumer, ConsumerConfig, GroupProtocol, KafkaConsumer, OffsetAndMetadata, ShareConsumer} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.acl.{AccessControlEntry, AclBinding, AclBindingFilter, AclOperation, AclPermissionType} @@ -42,7 +43,7 @@ import org.apache.kafka.common.config.{ConfigResource, LogLevelConfig, SslConfig import org.apache.kafka.common.errors._ import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.KafkaException -import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter, ClientQuotaFilterComponent} +import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter} import org.apache.kafka.common.record.FileRecords import org.apache.kafka.common.requests.DeleteRecordsRequest import org.apache.kafka.common.resource.{PatternType, ResourcePattern, ResourceType} @@ -53,16 +54,14 @@ import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEX import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinatorConfig} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, MetadataVersion} import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs, ServerLogConfigs} -import org.apache.kafka.server.logger.LoggingController import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogFileUtils} import org.apache.kafka.test.TestUtils.{DEFAULT_MAX_WAIT_MS, assertFutureThrows} import org.apache.logging.log4j.core.config.Configurator import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo, Timeout} +import org.junit.jupiter.api.{BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource +import org.junit.jupiter.params.provider.{MethodSource, ValueSource} import org.slf4j.LoggerFactory import java.util.AbstractMap.SimpleImmutableEntry @@ -70,6 +69,7 @@ import scala.collection.Seq import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters.RichOption import scala.util.{Random, Using} /** @@ -89,14 +89,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) - Configurator.reconfigure() + Configurator.reconfigure(); brokerLoggerConfigResource = new ConfigResource( ConfigResource.Type.BROKER_LOGGER, brokers.head.config.brokerId.toString) } - @Test + @ParameterizedTest @Timeout(30) - def testDescribeConfigWithOptionTimeoutMs(): Unit = { + @ValueSource(strings = Array("kraft")) + def testDescribeConfigWithOptionTimeoutMs(quorum: String): Unit = { val config = createConfig config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, s"localhost:${TestUtils.IncorrectBrokerPort}") val brokenClient = Admin.create(config) @@ -105,17 +106,18 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Describe and broker val brokerResource1 = new ConfigResource(ConfigResource.Type.BROKER, brokers(1).config.brokerId.toString) val brokerResource2 = new ConfigResource(ConfigResource.Type.BROKER, brokers(2).config.brokerId.toString) - val configResources = util.List.of(brokerResource1, brokerResource2) + val configResources = Seq(brokerResource1, brokerResource2) val exception = assertThrows(classOf[ExecutionException], () => { - brokenClient.describeConfigs(configResources,new DescribeConfigsOptions().timeoutMs(0)).all().get() + brokenClient.describeConfigs(configResources.asJava,new DescribeConfigsOptions().timeoutMs(0)).all().get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally brokenClient.close(time.Duration.ZERO) } - @Test - def testCreatePartitionWithOptionRetryOnQuotaViolation(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreatePartitionWithOptionRetryOnQuotaViolation(quorum: String): Unit = { // Since it's hard to stably reach quota limit in integration test, we only verify quota configs are set correctly val config = createConfig val clientId = "test-client-id" @@ -123,11 +125,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { config.put(AdminClientConfig.CLIENT_ID_CONFIG, clientId) client = Admin.create(config) - val entity = new ClientQuotaEntity(util.Map.of(ClientQuotaEntity.CLIENT_ID, clientId)) + val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> clientId).asJava) val configEntries = Map(QuotaConfig.CONTROLLER_MUTATION_RATE_OVERRIDE_CONFIG -> 1.0, QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 3.0) - client.alterClientQuotas(util.List.of(new ClientQuotaAlteration(entity, configEntries.map { case (k, v) => + client.alterClientQuotas(Seq(new ClientQuotaAlteration(entity, configEntries.map { case (k, v) => new ClientQuotaAlteration.Op(k, v) - }.asJavaCollection))).all.get + }.asJavaCollection)).asJavaCollection).all.get TestUtils.waitUntilTrue(() => { // wait for our ClientQuotaEntity to be set @@ -139,70 +141,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(configEntries, quotaEntities.get(entity).asScala) } - @Test - def testDefaultNameQuotaIsNotEqualToDefaultQuota(): Unit = { - val config = createConfig - val defaultQuota = "" - client = Admin.create(config) - - //"" can not create default quota - val userEntity = new ClientQuotaEntity(util.Map.of(ClientQuotaEntity.USER, defaultQuota)) - val clientEntity = new ClientQuotaEntity(util.Map.of(ClientQuotaEntity.CLIENT_ID, defaultQuota)) - val userAlterations = new ClientQuotaAlteration(userEntity, - util.Set.of(new ClientQuotaAlteration.Op("consumer_byte_rate", 10000D))) - val clientAlterations = new ClientQuotaAlteration(clientEntity, - util.Set.of(new ClientQuotaAlteration.Op("producer_byte_rate", 10000D))) - val alterations = util.List.of(userAlterations, clientAlterations) - client.alterClientQuotas(alterations).all().get() - - TestUtils.waitUntilTrue(() => { - try { - //check "" as a default quota use - val userDefaultQuotas = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( - ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.USER)))).entities().get() - val clientDefaultQuotas = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( - ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.CLIENT_ID)))).entities().get() - - //check "" as a normal quota use - val userNormalQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( - ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER,defaultQuota)))).entities().get() - val clientNormalQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( - ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.CLIENT_ID,defaultQuota)))).entities().get() - - userDefaultQuotas.size() == 0 && clientDefaultQuotas.size() == 0 && userNormalQuota.size() == 1 && clientNormalQuota.size() == 1 - } catch { - case _: Exception => false - } - }, "Timed out waiting for quota config to be propagated to all servers") - - //null can create default quota - val userDefaultEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> Option.empty[String].orNull).asJava) - client.alterClientQuotas(util.List.of(new ClientQuotaAlteration(userDefaultEntity, util.Set.of( - new ClientQuotaAlteration.Op("consumer_byte_rate", 100D))))).all().get() - val clientDefaultEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> Option.empty[String].orNull).asJava) - client.alterClientQuotas(util.List.of(new ClientQuotaAlteration(clientDefaultEntity, util.Set.of( - new ClientQuotaAlteration.Op("producer_byte_rate", 100D))))).all().get() - - TestUtils.waitUntilTrue(() => { - try { - val userDefaultQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( - ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.USER)))).entities().get() - val clientDefaultQuota = client.describeClientQuotas(ClientQuotaFilter.containsOnly(util.List.of( - ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.CLIENT_ID)))).entities().get() - userDefaultQuota.size() == 1 && clientDefaultQuota.size() == 1 - } catch { - case _: Exception => false - } - }, "Timed out waiting for quota config to be propagated to all servers") - } - - @Test - def testDescribeUserScramCredentials(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeUserScramCredentials(quorum: String): Unit = { client = createAdminClient // add a new user val targetUserName = "tom" - client.alterUserScramCredentials(util.List.of( + client.alterUserScramCredentials(Collections.singletonList( new UserScramCredentialUpsertion(targetUserName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "123456") )).all.get TestUtils.waitUntilTrue(() => client.describeUserScramCredentials().all().get().size() == 1, @@ -219,7 +165,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }) // add other users - client.alterUserScramCredentials(util.List.of( + client.alterUserScramCredentials(util.Arrays.asList( new UserScramCredentialUpsertion("tom2", new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "123456"), new UserScramCredentialUpsertion("tom3", new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "123456") )).all().get @@ -227,7 +173,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { "Add user scram credential timeout") // alter user info - client.alterUserScramCredentials(util.List.of( + client.alterUserScramCredentials(Collections.singletonList( new UserScramCredentialUpsertion(targetUserName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_512, 8192), "123456") )).all.get TestUtils.waitUntilTrue(() => { @@ -247,7 +193,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(8192, credentialList(1).iterations()) // test describeUserScramCredentials(List users) - val userAndScramMap = client.describeUserScramCredentials(util.List.of("tom2")).all().get() + val userAndScramMap = client.describeUserScramCredentials(Collections.singletonList("tom2")).all().get() assertEquals(1, userAndScramMap.size()) val scram = userAndScramMap.get("tom2") assertNotNull(scram) @@ -262,14 +208,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { Admin.create(config) } + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) @Timeout(10) - @Test - def testDescribeUserScramCredentialsTimeout(): Unit = { + def testDescribeUserScramCredentialsTimeout(quorum: String, groupProtocol: String): Unit = { client = createInvalidAdminClient() try { // test describeUserScramCredentials(List users, DescribeUserScramCredentialsOptions options) val exception = assertThrows(classOf[ExecutionException], () => { - client.describeUserScramCredentials(util.List.of("tom4"), + client.describeUserScramCredentials(Collections.singletonList("tom4"), new DescribeUserScramCredentialsOptions().timeoutMs(0)).all().get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) @@ -283,8 +230,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) val consumer = new KafkaConsumer(configs, new ByteArrayDeserializer, new ByteArrayDeserializer) try { - consumer.assign(util.Set.of(topicPartition)) - consumer.seekToBeginning(util.Set.of(topicPartition)) + consumer.assign(Collections.singleton(topicPartition)) + consumer.seekToBeginning(Collections.singleton(topicPartition)) var consumeNum = 0 TestUtils.waitUntilTrue(() => { val records = consumer.poll(time.Duration.ofMillis(100)) @@ -294,14 +241,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally consumer.close() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDescribeProducers(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDescribeProducers(quorum: String, groupProtocol: String): Unit = { client = createAdminClient - client.createTopics(util.List.of(new NewTopic(topic, 1, 1.toShort))).all().get() + client.createTopics(Collections.singletonList(new NewTopic(topic, 1, 1.toShort))).all().get() def appendCommonRecords = (records: Int) => { - val producer = new KafkaProducer(util.Map.of(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + val producer = new KafkaProducer(Collections.singletonMap(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, plaintextBootstrapServers(brokers).asInstanceOf[Object]), new ByteArraySerializer, new ByteArraySerializer) try { (0 until records).foreach(i => @@ -327,7 +274,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } def queryProducerDetail() = client - .describeProducers(util.List.of(topicPartition)) + .describeProducers(Collections.singletonList(topicPartition)) .partitionResult(topicPartition).get().activeProducers().asScala // send common msg @@ -367,16 +314,17 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally ongoingProducer.close() } - @Test - def testDescribeTransactions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTransactions(quorum: String): Unit = { client = createAdminClient - client.createTopics(util.List.of(new NewTopic(topic, 1, 1.toShort))).all().get() + client.createTopics(Collections.singletonList(new NewTopic(topic, 1, 1.toShort))).all().get() var transactionId = "foo" val stateAbnormalMsg = "The transaction state is abnormal" def describeTransactions(): TransactionDescription = { - client.describeTransactions(util.Set.of(transactionId)).description(transactionId).get() + client.describeTransactions(Collections.singleton(transactionId)).description(transactionId).get() } def transactionState(): TransactionState = { describeTransactions().state() @@ -386,7 +334,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // calculate the transaction partition id val transactionPartitionId = Utils.abs(transactionId.hashCode) % brokers.head.metadataCache.numPartitions(Topic.TRANSACTION_STATE_TOPIC_NAME).get - val transactionTopic = client.describeTopics(util.Set.of(Topic.TRANSACTION_STATE_TOPIC_NAME)) + val transactionTopic = client.describeTopics(Collections.singleton(Topic.TRANSACTION_STATE_TOPIC_NAME)) val partitionList = transactionTopic.allTopicNames().get().get(Topic.TRANSACTION_STATE_TOPIC_NAME).partitions() partitionList.asScala.filter(tp => tp.partition() == transactionPartitionId).head.leader().id() } @@ -412,7 +360,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(findCoordinatorIdByTransactionId(transactionId), transactionResult.coordinatorId()) assertEquals(0, transactionResult.producerId()) assertEquals(0, transactionResult.producerEpoch()) - assertEquals(util.Set.of(topicPartition), transactionResult.topicPartitions()) + assertEquals(Collections.singleton(topicPartition), transactionResult.topicPartitions()) producer.commitTransaction() TestUtils.waitUntilTrue(() => transactionState() == TransactionState.COMPLETE_COMMIT, stateAbnormalMsg) @@ -441,7 +389,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val transactionSendMsgResult = describeTransactions() assertEquals(findCoordinatorIdByTransactionId(transactionId), transactionSendMsgResult.coordinatorId()) - assertEquals(util.Set.of(topicPartition), transactionSendMsgResult.topicPartitions()) + assertEquals(Collections.singleton(topicPartition), transactionSendMsgResult.topicPartitions()) assertEquals(topicPartition, transactionSendMsgResult.topicPartitions().asScala.head) TestUtils.waitUntilTrue(() => transactionState() == TransactionState.ONGOING, stateAbnormalMsg) @@ -451,23 +399,25 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally abortProducer.close() } - @Test + @ParameterizedTest @Timeout(10) - def testDescribeTransactionsTimeout(): Unit = { + @ValueSource(strings = Array("kraft")) + def testDescribeTransactionsTimeout(quorum: String): Unit = { client = createInvalidAdminClient() try { val transactionId = "foo" val exception = assertThrows(classOf[ExecutionException], () => { - client.describeTransactions(util.Set.of(transactionId), + client.describeTransactions(Collections.singleton(transactionId), new DescribeTransactionsOptions().timeoutMs(0)).description(transactionId).get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally client.close(time.Duration.ZERO) } - @Test + @ParameterizedTest @Timeout(10) - def testAbortTransactionTimeout(): Unit = { + @ValueSource(strings = Array("kraft")) + def testAbortTransactionTimeout(quorum: String): Unit = { client = createInvalidAdminClient() try { val exception = assertThrows(classOf[ExecutionException], () => { @@ -479,15 +429,16 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally client.close(time.Duration.ZERO) } - @Test - def testListTransactions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListTransactions(quorum: String): Unit = { def createTransactionList(): Unit = { client = createAdminClient - client.createTopics(util.List.of(new NewTopic(topic, 1, 1.toShort))).all().get() + client.createTopics(Collections.singletonList(new NewTopic(topic, 1, 1.toShort))).all().get() val stateAbnormalMsg = "The transaction state is abnormal" def transactionState(transactionId: String): TransactionState = { - client.describeTransactions(util.Set.of(transactionId)).description(transactionId).get().state() + client.describeTransactions(Collections.singleton(transactionId)).description(transactionId).get().state() } val transactionId1 = "foo" @@ -528,11 +479,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, client.listTransactions().all().get().size()) assertEquals(2, client.listTransactions(new ListTransactionsOptions() - .filterStates(util.List.of(TransactionState.COMPLETE_COMMIT))).all().get().size()) + .filterStates(Collections.singletonList(TransactionState.COMPLETE_COMMIT))).all().get().size()) assertEquals(1, client.listTransactions(new ListTransactionsOptions() - .filterStates(util.List.of(TransactionState.COMPLETE_ABORT))).all().get().size()) + .filterStates(Collections.singletonList(TransactionState.COMPLETE_ABORT))).all().get().size()) assertEquals(1, client.listTransactions(new ListTransactionsOptions() - .filterProducerIds(util.List.of(0L))).all().get().size()) + .filterProducerIds(Collections.singletonList(0L))).all().get().size()) // ensure all transaction's txnStartTimestamp >= 500 Thread.sleep(501) @@ -552,12 +503,12 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally producerNew.close() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAbortTransaction(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAbortTransaction(quorum: String, groupProtocol: String): Unit = { client = createAdminClient val tp = new TopicPartition("topic1", 0) - client.createTopics(util.List.of(new NewTopic(tp.topic(), 1, 1.toShort))).all().get() + client.createTopics(Collections.singletonList(new NewTopic(tp.topic(), 1, 1.toShort))).all().get() def checkConsumer = (tp: TopicPartition, expectedNumber: Int) => { val configs = new util.HashMap[String, Object]() @@ -566,15 +517,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { configs.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) val consumer = new KafkaConsumer(configs, new ByteArrayDeserializer, new ByteArrayDeserializer) try { - consumer.assign(util.Set.of(tp)) - consumer.seekToBeginning(util.Set.of(tp)) + consumer.assign(Collections.singleton(tp)) + consumer.seekToBeginning(Collections.singleton(tp)) val records = consumer.poll(time.Duration.ofSeconds(3)) assertEquals(expectedNumber, records.count()) } finally consumer.close() } def appendRecord = (records: Int) => { - val producer = new KafkaProducer(util.Map.of(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, + val producer = new KafkaProducer(Collections.singletonMap(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, plaintextBootstrapServers(brokers).asInstanceOf[Object]), new ByteArraySerializer, new ByteArraySerializer) try { (0 until records).foreach(i => @@ -603,7 +554,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { appendRecord(1) checkConsumer(tp, 1) - val transactionalProducer = client.describeProducers(util.List.of(tp)) + val transactionalProducer = client.describeProducers(Collections.singletonList(tp)) .partitionResult(tp).get().activeProducers().asScala.minBy(_.producerId()) assertDoesNotThrow(() => client.abortTransaction( @@ -616,15 +567,17 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally producer.close() } - @Test - def testClose(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testClose(quorum: String): Unit = { val client = createAdminClient client.close() client.close() // double close has no effect } - @Test - def testListNodes(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodes(quorum: String): Unit = { client = createAdminClient val brokerStrs = bootstrapServers().split(",").toList.sorted var nodeStrs: List[String] = null @@ -635,8 +588,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(brokerStrs.mkString(","), nodeStrs.mkString(",")) } - @Test - def testListNodesWithFencedBroker(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesWithFencedBroker(quorum: String): Unit = { client = createAdminClient val fencedBrokerId = brokers.last.config.brokerId killBroker(fencedBrokerId, JDuration.ofMillis(0)) @@ -658,8 +612,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }) } - @Test - def testAdminClientHandlingBadIPWithoutTimeout(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAdminClientHandlingBadIPWithoutTimeout(quorum: String): Unit = { val config = createConfig config.put(AdminClientConfig.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, "1000") val returnBadAddressFirst = new HostResolver { @@ -672,32 +627,34 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client.describeCluster().nodes().get() } - @Test - def testCreateExistingTopicsThrowTopicExistsException(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateExistingTopicsThrowTopicExistsException(quorum: String): Unit = { client = createAdminClient val topic = "mytopic" val topics = Seq(topic) - val newTopics = util.List.of(new NewTopic(topic, 1, 1.toShort)) + val newTopics = Seq(new NewTopic(topic, 1, 1.toShort)) - client.createTopics(newTopics).all.get() + client.createTopics(newTopics.asJava).all.get() waitForTopics(client, topics, List()) - val newTopicsWithInvalidRF = util.List.of(new NewTopic(topic, 1, (brokers.size + 1).toShort)) + val newTopicsWithInvalidRF = Seq(new NewTopic(topic, 1, (brokers.size + 1).toShort)) val e = assertThrows(classOf[ExecutionException], - () => client.createTopics(newTopicsWithInvalidRF, new CreateTopicsOptions().validateOnly(true)).all.get()) + () => client.createTopics(newTopicsWithInvalidRF.asJava, new CreateTopicsOptions().validateOnly(true)).all.get()) assertTrue(e.getCause.isInstanceOf[TopicExistsException]) } - @Test - def testDeleteTopicsWithIds(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteTopicsWithIds(quorum: String): Unit = { client = createAdminClient val topics = Seq("mytopic", "mytopic2", "mytopic3") - val newTopics = util.List.of( - new NewTopic("mytopic", util.Map.of(0: Integer, util.List.of[Integer](1, 2), 1: Integer, util.List.of[Integer](2, 0))), + val newTopics = Seq( + new NewTopic("mytopic", Map((0: Integer) -> Seq[Integer](1, 2).asJava, (1: Integer) -> Seq[Integer](2, 0).asJava).asJava), new NewTopic("mytopic2", 3, 3.toShort), - new NewTopic("mytopic3", Optional.empty[Integer], Optional.empty[java.lang.Short]) + new NewTopic("mytopic3", Option.empty[Integer].toJava, Option.empty[java.lang.Short].toJava) ) - val createResult = client.createTopics(newTopics) + val createResult = client.createTopics(newTopics.asJava) createResult.all.get() waitForTopics(client, topics, List()) val topicIds = getTopicIds().values.toSet @@ -706,20 +663,22 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { waitForTopics(client, List(), topics) } - @Test - def testDeleteTopicsWithOptionTimeoutMs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteTopicsWithOptionTimeoutMs(quorum: String): Unit = { client = createInvalidAdminClient() try { val timeoutOption = new DeleteTopicsOptions().timeoutMs(0) val exception = assertThrows(classOf[ExecutionException], () => - client.deleteTopics(util.List.of("test-topic"), timeoutOption).all().get()) + client.deleteTopics(Seq("test-topic").asJava, timeoutOption).all().get()) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally client.close(time.Duration.ZERO) } - @Test - def testListTopicsWithOptionTimeoutMs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListTopicsWithOptionTimeoutMs(quorum: String): Unit = { client = createInvalidAdminClient() try { @@ -730,16 +689,18 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally client.close(time.Duration.ZERO) } - @Test - def testListTopicsWithOptionListInternal(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListTopicsWithOptionListInternal(quorum: String): Unit = { client = createAdminClient val topicNames = client.listTopics(new ListTopicsOptions().listInternal(true)).names().get() assertFalse(topicNames.isEmpty, "Expected to see internal topics") } - @Test - def testDescribeTopicsWithOptionPartitionSizeLimitPerResponse(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTopicsWithOptionPartitionSizeLimitPerResponse(quorum: String): Unit = { client = createAdminClient val testTopics = Seq("test-topic") @@ -754,14 +715,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { waitForTopics(client, List(), testTopics) } - @Test - def testDescribeTopicsWithOptionTimeoutMs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTopicsWithOptionTimeoutMs(quorum: String): Unit = { client = createInvalidAdminClient() try { val timeoutOption = new DescribeTopicsOptions().timeoutMs(0) val exception = assertThrows(classOf[ExecutionException], () => - client.describeTopics(util.List.of("test-topic"), timeoutOption).allTopicNames().get()) + client.describeTopics(Seq("test-topic").asJava, timeoutOption).allTopicNames().get()) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally client.close(time.Duration.ZERO) } @@ -769,8 +731,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { /** * describe should not auto create topics */ - @Test - def testDescribeNonExistingTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeNonExistingTopic(quorum: String): Unit = { client = createAdminClient val existingTopic = "existing-topic" @@ -778,13 +741,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { waitForTopics(client, Seq(existingTopic), List()) val nonExistingTopic = "non-existing" - val results = client.describeTopics(util.List.of(nonExistingTopic, existingTopic)).topicNameValues() + val results = client.describeTopics(Seq(nonExistingTopic, existingTopic).asJava).topicNameValues() assertEquals(existingTopic, results.get(existingTopic).get.name) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], results.get(nonExistingTopic)) + assertFutureThrows(results.get(nonExistingTopic), classOf[UnknownTopicOrPartitionException]) } - @Test - def testDescribeTopicsWithIds(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTopicsWithIds(quorum: String): Unit = { client = createAdminClient val existingTopic = "existing-topic" @@ -796,13 +760,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val nonExistingTopicId = Uuid.randomUuid() - val results = client.describeTopics(TopicCollection.ofTopicIds(util.List.of(existingTopicId, nonExistingTopicId))).topicIdValues() + val results = client.describeTopics(TopicCollection.ofTopicIds(Seq(existingTopicId, nonExistingTopicId).asJava)).topicIdValues() assertEquals(existingTopicId, results.get(existingTopicId).get.topicId()) - assertFutureThrows(classOf[UnknownTopicIdException], results.get(nonExistingTopicId)) + assertFutureThrows(results.get(nonExistingTopicId), classOf[UnknownTopicIdException]) } - @Test - def testDescribeTopicsWithNames(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeTopicsWithNames(quorum: String): Unit = { client = createAdminClient val existingTopic = "existing-topic" @@ -811,12 +776,13 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { ensureConsistentKRaftMetadata() val existingTopicId = brokers.head.metadataCache.getTopicId(existingTopic) - val results = client.describeTopics(TopicCollection.ofTopicNames(util.List.of(existingTopic))).topicNameValues() + val results = client.describeTopics(TopicCollection.ofTopicNames(Seq(existingTopic).asJava)).topicNameValues() assertEquals(existingTopicId, results.get(existingTopic).get.topicId()) } - @Test - def testDescribeCluster(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeCluster(quorum: String): Unit = { client = createAdminClient val result = client.describeCluster val nodes = result.nodes.get() @@ -836,8 +802,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } - @Test - def testDescribeLogDirs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeLogDirs(quorum: String): Unit = { client = createAdminClient val topic = "topic" val leaderByPartition = createTopic(topic, numPartitions = 10) @@ -867,8 +834,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } - @Test - def testDescribeReplicaLogDirs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeReplicaLogDirs(quorum: String): Unit = { client = createAdminClient val topic = "topic" val leaderByPartition = createTopic(topic, numPartitions = 10) @@ -885,9 +853,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAlterReplicaLogDirs(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAlterReplicaLogDirs(quorum: String, groupProtocol: String): Unit = { client = createAdminClient val topic = "topic" val tp = new TopicPartition(topic, 0) @@ -895,11 +863,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Generate two mutually exclusive replicaAssignment val firstReplicaAssignment = brokers.map { server => - val logDir = new File(server.config.logDirs.get(randomNums(server))).getAbsolutePath + val logDir = new File(server.config.logDirs(randomNums(server))).getAbsolutePath new TopicPartitionReplica(topic, 0, server.config.brokerId) -> logDir }.toMap val secondReplicaAssignment = brokers.map { server => - val logDir = new File(server.config.logDirs.get(1 - randomNums(server))).getAbsolutePath + val logDir = new File(server.config.logDirs(1 - randomNums(server))).getAbsolutePath new TopicPartitionReplica(topic, 0, server.config.brokerId) -> logDir }.toMap @@ -975,37 +943,44 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } - @Test - def testDescribeConfigsNonexistent(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeConfigsNonexistent(quorum: String): Unit = { client = createAdminClient val brokerException = assertThrows(classOf[ExecutionException], () => { - client.describeConfigs(util.List.of(new ConfigResource(ConfigResource.Type.BROKER, "-1"))).all().get() + client.describeConfigs(Seq(new ConfigResource(ConfigResource.Type.BROKER, "-1")).asJava).all().get() }) assertInstanceOf(classOf[TimeoutException], brokerException.getCause) val topicException = assertThrows(classOf[ExecutionException], () => { - client.describeConfigs(util.List.of(new ConfigResource(ConfigResource.Type.TOPIC, "none_topic"))).all().get() + client.describeConfigs(Seq(new ConfigResource(ConfigResource.Type.TOPIC, "none_topic")).asJava).all().get() }) assertInstanceOf(classOf[UnknownTopicOrPartitionException], topicException.getCause) val brokerLoggerException = assertThrows(classOf[ExecutionException], () => { - client.describeConfigs(util.List.of(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "-1"))).all().get() + client.describeConfigs(Seq(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, "-1")).asJava).all().get() }) assertInstanceOf(classOf[TimeoutException], brokerLoggerException.getCause) } - @Test - def testDescribeConfigsNonexistentForKraft(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeConfigsNonexistentForKraft(quorum: String): Unit = { client = createAdminClient val groupResource = new ConfigResource(ConfigResource.Type.GROUP, "none_group") - val groupResult = client.describeConfigs(util.List.of(groupResource)).all().get().get(groupResource) + val groupResult = client.describeConfigs(Seq(groupResource).asJava).all().get().get(groupResource) assertNotEquals(0, groupResult.entries().size()) + + val metricResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, "none_metric") + val metricResult = client.describeConfigs(Seq(metricResource).asJava).all().get().get(metricResource) + assertEquals(0, metricResult.entries().size()) } - @Test - def testDescribeAndAlterConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeAndAlterConfigs(quorum: String): Unit = { client = createAdminClient // Create topics @@ -1025,8 +1000,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Describe topics and broker val brokerResource1 = new ConfigResource(ConfigResource.Type.BROKER, brokers(1).config.brokerId.toString) val brokerResource2 = new ConfigResource(ConfigResource.Type.BROKER, brokers(2).config.brokerId.toString) - val configResources = util.List.of(topicResource1, topicResource2, brokerResource1, brokerResource2) - val describeResult = client.describeConfigs(configResources) + val configResources = Seq(topicResource1, topicResource2, brokerResource1, brokerResource2) + val describeResult = client.describeConfigs(configResources.asJava) val configs = describeResult.all.get assertEquals(4, configs.size) @@ -1042,7 +1017,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { configs.get(topicResource1).get(TopicConfig.RETENTION_MS_CONFIG).value) val maxMessageBytes2 = configs.get(topicResource2).get(TopicConfig.MAX_MESSAGE_BYTES_CONFIG) - assertEquals(ServerLogConfigs.MAX_MESSAGE_BYTES_DEFAULT.toString, maxMessageBytes2.value) + assertEquals(LogConfig.DEFAULT_MAX_MESSAGE_BYTES.toString, maxMessageBytes2.value) assertEquals(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, maxMessageBytes2.name) assertTrue(maxMessageBytes2.isDefault) assertFalse(maxMessageBytes2.isSensitive) @@ -1088,29 +1063,30 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { checkValidAlterConfigs(client, this, topicResource1, topicResource2, maxMessageBytes, retentionMs) } - @Test - def testIncrementalAlterAndDescribeGroupConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterAndDescribeGroupConfigs(quorum: String): Unit = { client = createAdminClient val group = "describe-alter-configs-group" val groupResource = new ConfigResource(ConfigResource.Type.GROUP, group) // Alter group configs - var groupAlterConfigs = util.List.of( + var groupAlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "50000"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, ""), AlterConfigOp.OpType.DELETE) - ) + ).asJavaCollection - var alterResult = client.incrementalAlterConfigs(util.Map.of( - groupResource, groupAlterConfigs - )) + var alterResult = client.incrementalAlterConfigs(Map( + groupResource -> groupAlterConfigs + ).asJava) - assertEquals(util.Set.of(groupResource), alterResult.values.keySet) + assertEquals(Set(groupResource).asJava, alterResult.values.keySet) alterResult.all.get(15, TimeUnit.SECONDS) ensureConsistentKRaftMetadata() // Describe group config, verify that group config was updated correctly - var describeResult = client.describeConfigs(util.List.of(groupResource)) + var describeResult = client.describeConfigs(Seq(groupResource).asJava) var configs = describeResult.all.get(15, TimeUnit.SECONDS) assertEquals(1, configs.size) @@ -1121,37 +1097,37 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(ConfigSource.DEFAULT_CONFIG, configs.get(groupResource).get(GroupConfig.CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG).source) // Alter group with validateOnly=true - groupAlterConfigs = util.List.of( + groupAlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "60000"), AlterConfigOp.OpType.SET) - ) + ).asJava - alterResult = client.incrementalAlterConfigs(util.Map.of( - groupResource, groupAlterConfigs - ), new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(Map( + groupResource -> groupAlterConfigs + ).asJava, new AlterConfigsOptions().validateOnly(true)) alterResult.all.get(15, TimeUnit.SECONDS) // Verify that group config was not updated due to validateOnly = true - describeResult = client.describeConfigs(util.List.of(groupResource)) + describeResult = client.describeConfigs(Seq(groupResource).asJava) configs = describeResult.all.get(15, TimeUnit.SECONDS) assertEquals("50000", configs.get(groupResource).get(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG).value) // Alter group with validateOnly=true with invalid configs - groupAlterConfigs = util.List.of( + groupAlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "5"), AlterConfigOp.OpType.SET) - ) + ).asJava - alterResult = client.incrementalAlterConfigs(util.Map.of( - groupResource, groupAlterConfigs - ), new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(Map( + groupResource -> groupAlterConfigs + ).asJava, new AlterConfigsOptions().validateOnly(true)) - assertFutureThrows(classOf[InvalidConfigurationException], - alterResult.values.get(groupResource), + assertFutureThrows(alterResult.values.get(groupResource), classOf[InvalidConfigurationException], "consumer.session.timeout.ms must be greater than or equal to group.consumer.min.session.timeout.ms") } - @Test - def testCreatePartitions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreatePartitions(quorum: String): Unit = { client = createAdminClient // Create topics @@ -1177,27 +1153,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { def numPartitions(topic: String, expectedNumPartitionsOpt: Option[Int]): Int = partitions(topic, expectedNumPartitionsOpt).size // validateOnly: try creating a new partition (no assignments), to bring the total to 3 partitions - var alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(3)), validateOnly) + var alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(3)).asJava, validateOnly) var altered = alterResult.values.get(topic1).get TestUtils.waitForAllPartitionsMetadata(brokers, topic1, expectedNumPartitions = 1) // try creating a new partition (no assignments), to bring the total to 3 partitions - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(3)), actuallyDoIt) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(3)).asJava, actuallyDoIt) altered = alterResult.values.get(topic1).get TestUtils.waitForAllPartitionsMetadata(brokers, topic1, expectedNumPartitions = 3) // validateOnly: now try creating a new partition (with assignments), to bring the total to 3 partitions - val newPartition2Assignments = util.List.of[util.List[Integer]](util.List.of[Integer](0, 1), util.List.of[Integer](1, 2)) - alterResult = client.createPartitions(util.Map.of(topic2, - NewPartitions.increaseTo(3, newPartition2Assignments)), validateOnly) + val newPartition2Assignments = asList[util.List[Integer]](asList(0, 1), asList(1, 2)) + alterResult = client.createPartitions(Map(topic2 -> + NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, validateOnly) altered = alterResult.values.get(topic2).get TestUtils.waitForAllPartitionsMetadata(brokers, topic2, expectedNumPartitions = 1) // now try creating a new partition (with assignments), to bring the total to 3 partitions - alterResult = client.createPartitions(util.Map.of(topic2, - NewPartitions.increaseTo(3, newPartition2Assignments)), actuallyDoIt) + alterResult = client.createPartitions(Map(topic2 -> + NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, actuallyDoIt) altered = alterResult.values.get(topic2).get val actualPartitions2 = partitions(topic2, expectedNumPartitionsOpt = Some(3)) assertEquals(3, actualPartitions2.size) @@ -1209,8 +1185,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val desc = if (option.validateOnly()) "validateOnly" else "validateOnly=false" // try a newCount which would be a decrease - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(1)), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(1)).asJava, option) var e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidPartitionsException when newCount is a decrease") @@ -1220,8 +1196,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try a newCount which would be a noop (without assignment) - alterResult = client.createPartitions(util.Map.of(topic2, - NewPartitions.increaseTo(3)), option) + alterResult = client.createPartitions(Map(topic2 -> + NewPartitions.increaseTo(3)).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic2).get, () => s"$desc: Expect InvalidPartitionsException when requesting a noop") assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException], desc) @@ -1230,16 +1206,16 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic2, expectedNumPartitionsOpt = Some(3)), desc) // try a newCount which would be a noop (where the assignment matches current state) - alterResult = client.createPartitions(util.Map.of(topic2, - NewPartitions.increaseTo(3, newPartition2Assignments)), option) + alterResult = client.createPartitions(Map(topic2 -> + NewPartitions.increaseTo(3, newPartition2Assignments)).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic2).get) assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException], desc) assertEquals(exceptionMsgStr, e.getCause.getMessage, desc) assertEquals(3, numPartitions(topic2, expectedNumPartitionsOpt = Some(3)), desc) // try a newCount which would be a noop (where the assignment doesn't match current state) - alterResult = client.createPartitions(util.Map.of(topic2, - NewPartitions.increaseTo(3, newPartition2Assignments.asScala.reverse.toList.asJava)), option) + alterResult = client.createPartitions(Map(topic2 -> + NewPartitions.increaseTo(3, newPartition2Assignments.asScala.reverse.toList.asJava)).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic2).get) assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException], desc) assertEquals(exceptionMsgStr, e.getCause.getMessage, desc) @@ -1247,8 +1223,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // try a bad topic name val unknownTopic = "an-unknown-topic" - alterResult = client.createPartitions(util.Map.of(unknownTopic, - NewPartitions.increaseTo(2)), option) + alterResult = client.createPartitions(Map(unknownTopic -> + NewPartitions.increaseTo(2)).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(unknownTopic).get, () => s"$desc: Expect InvalidTopicException when using an unknown topic") assertTrue(e.getCause.isInstanceOf[UnknownTopicOrPartitionException], desc) @@ -1256,8 +1232,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(exceptionMsgStr, e.getCause.getMessage, desc) // try an invalid newCount - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(-22)), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(-22)).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidPartitionsException when newCount is invalid") assertTrue(e.getCause.isInstanceOf[InvalidPartitionsException], desc) @@ -1267,8 +1243,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try assignments where the number of brokers != replication factor - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(4, util.List.of(util.List.of[Integer](1, 2)))), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(4, asList(asList(1, 2)))).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidPartitionsException when #brokers != replication factor") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1278,8 +1254,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try #assignments < with the increase - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(6, util.List.of(util.List.of[Integer](1)))), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(6, asList(asList(1)))).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when #assignments != newCount - oldCount") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1288,8 +1264,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try #assignments > with the increase - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(4, util.List.of(util.List.of[Integer](1), util.List.of[Integer](2)))), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(4, asList(asList(1), asList(2)))).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when #assignments != newCount - oldCount") exceptionMsgStr = "Attempted to add 1 additional partition(s), but only 2 assignment(s) were specified." @@ -1298,8 +1274,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try with duplicate brokers in assignments - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(4, util.List.of(util.List.of[Integer](1, 1)))), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(4, asList(asList(1, 1)))).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when assignments has duplicate brokers") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1308,8 +1284,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try assignments with differently sized inner lists - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(5, util.List.of(util.List.of[Integer](1), util.List.of[Integer](1, 0)))), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(5, asList(asList(1), asList(1, 0)))).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when assignments have differently sized inner lists") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1319,8 +1295,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try assignments with unknown brokers - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(4, util.List.of(util.List.of[Integer](12)))), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(4, asList(asList(12)))).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when assignments contains an unknown broker") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1329,8 +1305,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, numPartitions(topic1, expectedNumPartitionsOpt = Some(3)), desc) // try with empty assignments - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(4, util.List.of)), option) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(4, Collections.emptyList())).asJava, option) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => s"$desc: Expect InvalidReplicaAssignmentException when assignments is empty") assertTrue(e.getCause.isInstanceOf[InvalidReplicaAssignmentException], desc) @@ -1340,9 +1316,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } // a mixed success, failure response - alterResult = client.createPartitions(util.Map.of( - topic1, NewPartitions.increaseTo(4), - topic2, NewPartitions.increaseTo(2)), actuallyDoIt) + alterResult = client.createPartitions(Map( + topic1 -> NewPartitions.increaseTo(4), + topic2 -> NewPartitions.increaseTo(2)).asJava, actuallyDoIt) // assert that the topic1 now has 4 partitions altered = alterResult.values.get(topic1).get TestUtils.waitForAllPartitionsMetadata(brokers, topic1, expectedNumPartitions = 4) @@ -1354,19 +1330,19 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Delete the topic. Verify addition of partitions to deleted topic is not possible. // In KRaft, the deletion occurs immediately and hence we have a different Exception thrown in the response. - val deleteResult = client.deleteTopics(util.List.of(topic1)) + val deleteResult = client.deleteTopics(asList(topic1)) deleteResult.topicNameValues.get(topic1).get - alterResult = client.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(4)), validateOnly) + alterResult = client.createPartitions(Map(topic1 -> + NewPartitions.increaseTo(4)).asJava, validateOnly) e = assertThrows(classOf[ExecutionException], () => alterResult.values.get(topic1).get, () => "Expect InvalidTopicException or UnknownTopicOrPartitionException when the topic is queued for deletion") assertTrue(e.getCause.isInstanceOf[UnknownTopicOrPartitionException], e.toString) assertEquals("This server does not host this topic-partition.", e.getCause.getMessage) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSeekAfterDeleteRecords(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSeekAfterDeleteRecords(quorum: String, groupProtocol: String): Unit = { createTopic(topic, numPartitions = 2, replicationFactor = brokerCount) client = createAdminClient @@ -1376,27 +1352,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - consumer.seekToBeginning(util.Set.of(topicPartition)) + consumer.seekToBeginning(Collections.singleton(topicPartition)) assertEquals(0L, consumer.position(topicPartition)) - val result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(5L))) + val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava) val lowWatermark = result.lowWatermarks().get(topicPartition).get.lowWatermark assertEquals(5L, lowWatermark) - consumer.seekToBeginning(util.List.of(topicPartition)) + consumer.seekToBeginning(Collections.singletonList(topicPartition)) assertEquals(5L, consumer.position(topicPartition)) consumer.seek(topicPartition, 7L) assertEquals(7L, consumer.position(topicPartition)) - client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK))).all.get - consumer.seekToBeginning(util.List.of(topicPartition)) + client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK)).asJava).all.get + consumer.seekToBeginning(Collections.singletonList(topicPartition)) assertEquals(10L, consumer.position(topicPartition)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testLogStartOffsetCheckpoint(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testLogStartOffsetCheckpoint(quorum: String, groupProtocol: String): Unit = { createTopic(topic, numPartitions = 2, replicationFactor = brokerCount) client = createAdminClient @@ -1406,7 +1382,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - var result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(5L))) + var result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava) var lowWatermark: Option[Long] = Some(result.lowWatermarks.get(topicPartition).get.lowWatermark) assertEquals(Some(5), lowWatermark) @@ -1420,7 +1396,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.waitUntilTrue(() => { // Need to retry if leader is not available for the partition - result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(0L))) + result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(0L)).asJava) lowWatermark = None val future = result.lowWatermarks().get(topicPartition) @@ -1434,9 +1410,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected low watermark of the partition to be 5 but got ${lowWatermark.getOrElse("no response within the timeout")}") } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testLogStartOffsetAfterDeleteRecords(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testLogStartOffsetAfterDeleteRecords(quorum: String, groupProtocol: String): Unit = { createTopic(topic, numPartitions = 2, replicationFactor = brokerCount) client = createAdminClient @@ -1447,7 +1423,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - val result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(3L))) + val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava) val lowWatermark = result.lowWatermarks.get(topicPartition).get.lowWatermark assertEquals(3L, lowWatermark) @@ -1455,8 +1431,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(3, brokers(i).replicaManager.localLog(topicPartition).get.logStartOffset) } - @Test - def testReplicaCanFetchFromLogStartOffsetAfterDeleteRecords(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testReplicaCanFetchFromLogStartOffsetAfterDeleteRecords(quorum: String): Unit = { val leaders = createTopic(topic, replicationFactor = brokerCount) val followerIndex = if (leaders(0) != brokers.head.config.brokerId) 0 else 1 @@ -1481,7 +1458,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 100, topicPartition) - val result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(3L))) + val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava) result.all().get() // start the stopped broker to verify that it will be able to fetch from new log start offset @@ -1496,15 +1473,16 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // kill the same follower again, produce more records, and delete records beyond follower's LOE killBroker(followerIndex) sendRecords(producer, 100, topicPartition) - val result1 = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(117L))) + val result1 = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(117L)).asJava) result1.all().get() restartDeadBrokers() TestUtils.waitForBrokersInIsr(client, topicPartition, Set(followerIndex)) waitForFollowerLog(expectedStartOffset=117L, expectedEndOffset=200L) } - @Test - def testAlterLogDirsAfterDeleteRecords(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterLogDirsAfterDeleteRecords(quorum: String): Unit = { client = createAdminClient createTopic(topic, replicationFactor = brokerCount) val expectedLEO = 100 @@ -1512,7 +1490,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { sendRecords(producer, expectedLEO, topicPartition) // delete records to move log start offset - val result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(3L))) + val result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava) result.all().get() // make sure we are in the expected state after delete records for (i <- 0 until brokerCount) { @@ -1521,11 +1499,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } // we will create another dir just for one server - val futureLogDir = brokers(0).config.logDirs.get(1) + val futureLogDir = brokers(0).config.logDirs(1) val futureReplica = new TopicPartitionReplica(topic, 0, brokers(0).config.brokerId) // Verify that replica can be moved to the specified log directory - client.alterReplicaLogDirs(util.Map.of(futureReplica, futureLogDir)).all.get + client.alterReplicaLogDirs(Map(futureReplica -> futureLogDir).asJava).all.get TestUtils.waitUntilTrue(() => { futureLogDir == brokers(0).logManager.getLog(topicPartition).get.dir.getParent }, "timed out waiting for replica movement") @@ -1535,9 +1513,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(expectedLEO, brokers.head.replicaManager.localLog(topicPartition).get.logEndOffset) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetsForTimesAfterDeleteRecords(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetsForTimesAfterDeleteRecords(quorum: String, groupProtocol: String): Unit = { createTopic(topic, numPartitions = 2, replicationFactor = brokerCount) client = createAdminClient @@ -1548,32 +1526,35 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - var returnedOffsets = consumer.offsetsForTimes(util.Map.of(topicPartition, JLong.valueOf(0L))) + var returnedOffsets = consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava) assertTrue(returnedOffsets.containsKey(topicPartition)) assertEquals(0L, returnedOffsets.get(topicPartition).offset()) - var result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(5L))) + var result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava) result.all.get - returnedOffsets = consumer.offsetsForTimes(util.Map.of(topicPartition, JLong.valueOf(0L))) + returnedOffsets = consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava) assertTrue(returnedOffsets.containsKey(topicPartition)) assertEquals(5L, returnedOffsets.get(topicPartition).offset()) - result = client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK))) + result = client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(DeleteRecordsRequest.HIGH_WATERMARK)).asJava) result.all.get - returnedOffsets = consumer.offsetsForTimes(util.Map.of(topicPartition, JLong.valueOf(0L))) + returnedOffsets = consumer.offsetsForTimes(Map(topicPartition -> JLong.valueOf(0L)).asJava) assertTrue(returnedOffsets.containsKey(topicPartition)) assertNull(returnedOffsets.get(topicPartition)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDeleteRecordsAfterCorruptRecords(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testDeleteRecordsAfterCorruptRecords(quorum: String, groupProtocol: String): Unit = { val config = new Properties() - config.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "200") + config.put(TopicConfig.SEGMENT_BYTES_CONFIG, "200") createTopic(topic, numPartitions = 1, replicationFactor = 1, config) client = createAdminClient + val consumer = createConsumer() + subscribeAndWaitForAssignment(topic, consumer) + val producer = createProducer() def sendRecords(begin: Int, end: Int) = { val futures = (begin until end).map( i => { @@ -1585,10 +1566,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { sendRecords(0, 10) sendRecords(10, 20) - val topicDesc = client.describeTopics(util.List.of(topic)).allTopicNames().get().get(topic) + val topicDesc = client.describeTopics(Collections.singletonList(topic)).allTopicNames().get().get(topic) assertEquals(1, topicDesc.partitions().size()) val partitionLeaderId = topicDesc.partitions().get(0).leader().id() - val logDirMap = client.describeLogDirs(util.List.of(partitionLeaderId)) + val logDirMap = client.describeLogDirs(Collections.singletonList(partitionLeaderId)) .allDescriptions().get().get(partitionLeaderId) val logDir = logDirMap.entrySet.stream .filter(entry => entry.getValue.replicaInfos.containsKey(topicPartition)).findAny().get().getKey @@ -1606,31 +1587,28 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { newContent.flip() Files.write(logFilePath, newContent.array(), StandardOpenOption.TRUNCATE_EXISTING) - val overrideConfig = new Properties - overrideConfig.setProperty("auto.offset.reset", "earliest") - val consumer = createConsumer(configOverrides = overrideConfig) - consumer.subscribe(util.List.of(topic)) + consumer.seekToBeginning(Collections.singletonList(topicPartition)) assertEquals("Encountered corrupt message when fetching offset 0 for topic-partition topic-0", assertThrows(classOf[KafkaException], () => consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS))).getMessage) val partitionFollowerId = brokers.map(b => b.config.nodeId).filter(id => id != partitionLeaderId).head - val newAssignment = util.Map.of(topicPartition, Optional.of(new NewPartitionReassignment( - util.List.of(Integer.valueOf(partitionLeaderId), Integer.valueOf(partitionFollowerId))))) + val newAssignment = Map(topicPartition -> Optional.of(new NewPartitionReassignment( + List(Integer.valueOf(partitionLeaderId), Integer.valueOf(partitionFollowerId)).asJava))).asJava // add follower to topic partition client.alterPartitionReassignments(newAssignment).all().get() // delete records in corrupt segment (the first segment) - client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(firstSegmentRecordsSize))).all.get + client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(firstSegmentRecordsSize)).asJava).all.get // verify reassignment is finished after delete records TestUtils.waitForBrokersInIsr(client, topicPartition, Set(partitionLeaderId, partitionFollowerId)) // seek to beginning and make sure we can consume all records - consumer.seekToBeginning(util.List.of(topicPartition)) + consumer.seekToBeginning(Collections.singletonList(topicPartition)) assertEquals(19, TestUtils.consumeRecords(consumer, 20 - firstSegmentRecordsSize).last.offset()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumeAfterDeleteRecords(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeAfterDeleteRecords(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() subscribeAndWaitForAssignment(topic, consumer) @@ -1641,20 +1619,20 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { var messageCount = 0 TestUtils.consumeRecords(consumer, 10) - client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(3L))).all.get + client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(3L)).asJava).all.get consumer.seek(topicPartition, 1) messageCount = 0 TestUtils.consumeRecords(consumer, 7) - client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(8L))).all.get + client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(8L)).asJava).all.get consumer.seek(topicPartition, 1) messageCount = 0 TestUtils.consumeRecords(consumer, 2) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDeleteRecordsWithException(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDeleteRecordsWithException(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() subscribeAndWaitForAssignment(topic, consumer) @@ -1663,43 +1641,43 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val producer = createProducer() sendRecords(producer, 10, topicPartition) - assertEquals(5L, client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(5L))) + assertEquals(5L, client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(5L)).asJava) .lowWatermarks.get(topicPartition).get.lowWatermark) // OffsetOutOfRangeException if offset > high_watermark val cause = assertThrows(classOf[ExecutionException], - () => client.deleteRecords(util.Map.of(topicPartition, RecordsToDelete.beforeOffset(20L))).lowWatermarks.get(topicPartition).get).getCause + () => client.deleteRecords(Map(topicPartition -> RecordsToDelete.beforeOffset(20L)).asJava).lowWatermarks.get(topicPartition).get).getCause assertEquals(classOf[OffsetOutOfRangeException], cause.getClass) } - @Test - def testDescribeConfigsForTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeConfigsForTopic(quorum: String): Unit = { createTopic(topic, numPartitions = 2, replicationFactor = brokerCount) client = createAdminClient val existingTopic = new ConfigResource(ConfigResource.Type.TOPIC, topic) - client.describeConfigs(util.List.of(existingTopic)).values.get(existingTopic).get() - - val defaultTopic = new ConfigResource(ConfigResource.Type.TOPIC, "") - var describeResult = client.describeConfigs(util.List.of(defaultTopic)) - assertFutureThrows(classOf[InvalidTopicException], describeResult.all()) + client.describeConfigs(Collections.singletonList(existingTopic)).values.get(existingTopic).get() val nonExistentTopic = new ConfigResource(ConfigResource.Type.TOPIC, "unknown") - describeResult = client.describeConfigs(util.List.of(nonExistentTopic)) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], describeResult.all()) + val describeResult1 = client.describeConfigs(Collections.singletonList(nonExistentTopic)) + + assertTrue(assertThrows(classOf[ExecutionException], () => describeResult1.values.get(nonExistentTopic).get).getCause.isInstanceOf[UnknownTopicOrPartitionException]) val invalidTopic = new ConfigResource(ConfigResource.Type.TOPIC, "(invalid topic)") - describeResult = client.describeConfigs(util.List.of(invalidTopic)) - assertFutureThrows(classOf[InvalidTopicException], describeResult.all()) + val describeResult2 = client.describeConfigs(Collections.singletonList(invalidTopic)) + + assertTrue(assertThrows(classOf[ExecutionException], () => describeResult2.values.get(invalidTopic).get).getCause.isInstanceOf[InvalidTopicException]) } - @Test - def testIncludeDocumentation(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncludeDocumentation(quorum: String): Unit = { createTopic(topic) client = createAdminClient val resource = new ConfigResource(ConfigResource.Type.TOPIC, topic) - val resources = util.List.of(resource) + val resources = Collections.singletonList(resource) val includeDocumentation = new DescribeConfigsOptions().includeDocumentation(true) var describeConfigs = client.describeConfigs(resources, includeDocumentation) var configEntries = describeConfigs.values().get(resource).get().entries() @@ -1712,7 +1690,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } private def subscribeAndWaitForAssignment(topic: String, consumer: Consumer[Array[Byte], Array[Byte]]): Unit = { - consumer.subscribe(util.List.of(topic)) + consumer.subscribe(Collections.singletonList(topic)) TestUtils.pollUntilTrue(consumer, () => !consumer.assignment.isEmpty, "Expected non-empty assignment") } @@ -1728,8 +1706,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { futures.foreach(_.get) } - @Test - def testInvalidAlterConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidAlterConfigs(quorum: String): Unit = { client = createAdminClient checkInvalidAlterConfigs(this, client) } @@ -1739,29 +1718,33 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { * Also see [[kafka.api.SaslSslAdminIntegrationTest.testAclOperations()]] for tests of ACL operations * when the authorizer is enabled. */ - @Test - def testAclOperations(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAclOperations(quorum: String): Unit = { val acl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW)) client = createAdminClient - assertFutureThrows(classOf[SecurityDisabledException], client.describeAcls(AclBindingFilter.ANY).values()) - assertFutureThrows(classOf[SecurityDisabledException], client.createAcls(util.Set.of(acl)).all()) - assertFutureThrows(classOf[SecurityDisabledException], client.deleteAcls(util.Set.of(acl.toFilter)).all()) + assertFutureThrows(client.describeAcls(AclBindingFilter.ANY).values(), classOf[SecurityDisabledException]) + assertFutureThrows(client.createAcls(Collections.singleton(acl)).all(), + classOf[SecurityDisabledException]) + assertFutureThrows(client.deleteAcls(Collections.singleton(acl.toFilter())).all(), + classOf[SecurityDisabledException]) } /** * Test closing the AdminClient with a generous timeout. Calls in progress should be completed, * since they can be done within the timeout. New calls should receive exceptions. */ - @Test - def testDelayedClose(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDelayedClose(quorum: String): Unit = { client = createAdminClient val topics = Seq("mytopic", "mytopic2") val newTopics = topics.map(new NewTopic(_, 1, 1.toShort)) val future = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)).all() client.close(time.Duration.ofHours(2)) val future2 = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)).all() - assertFutureThrows(classOf[IllegalStateException], future2) + assertFutureThrows(future2, classOf[IllegalStateException]) future.get client.close(time.Duration.ofMinutes(30)) // multiple close-with-timeout should have no effect } @@ -1770,8 +1753,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { * Test closing the AdminClient with a timeout of 0, when there are calls with extremely long * timeouts in progress. The calls should be aborted after the hard shutdown timeout elapses. */ - @Test - def testForceClose(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testForceClose(quorum: String): Unit = { val config = createConfig config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, s"localhost:${TestUtils.IncorrectBrokerPort}") client = Admin.create(config) @@ -1780,15 +1764,16 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1.toShort)).asJava, new CreateTopicsOptions().timeoutMs(900000)).all() client.close(time.Duration.ZERO) - assertFutureThrows(classOf[TimeoutException], future) + assertFutureThrows(future, classOf[TimeoutException]) } /** * Check that a call with a timeout does not complete before the minimum timeout has elapsed, * even when the default request timeout is shorter. */ - @Test - def testMinimumRequestTimeouts(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testMinimumRequestTimeouts(quorum: String): Unit = { val config = createConfig config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, s"localhost:${TestUtils.IncorrectBrokerPort}") config.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "0") @@ -1796,7 +1781,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val startTimeMs = Time.SYSTEM.milliseconds() val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1.toShort)).asJava, new CreateTopicsOptions().timeoutMs(2)).all() - assertFutureThrows(classOf[TimeoutException], future) + assertFutureThrows(future, classOf[TimeoutException]) val endTimeMs = Time.SYSTEM.milliseconds() assertTrue(endTimeMs > startTimeMs, "Expected the timeout to take at least one millisecond.") } @@ -1804,8 +1789,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { /** * Test injecting timeouts for calls that are in flight. */ - @Test - def testCallInFlightTimeouts(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCallInFlightTimeouts(quorum: String): Unit = { val config = createConfig config.put(AdminClientConfig.DEFAULT_API_TIMEOUT_MS_CONFIG, "100000000") config.put(AdminClientConfig.RETRIES_CONFIG, "0") @@ -1813,167 +1799,38 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { client = KafkaAdminClientTest.createInternal(new AdminClientConfig(config), factory) val future = client.createTopics(Seq("mytopic", "mytopic2").map(new NewTopic(_, 1, 1.toShort)).asJava, new CreateTopicsOptions().validateOnly(true)).all() - assertFutureThrows(classOf[TimeoutException], future) + assertFutureThrows(future, classOf[TimeoutException]) val future2 = client.createTopics(Seq("mytopic3", "mytopic4").map(new NewTopic(_, 1, 1.toShort)).asJava, new CreateTopicsOptions().validateOnly(true)).all() future2.get assertEquals(1, factory.failuresInjected) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testListConsumerGroupOffsets(groupProtocol: String): Unit = { - val config = createConfig - client = Admin.create(config) - try { - assertConsumerGroupsIsClean() - - val testTopicName = "test_topic" - prepareTopics(List(testTopicName), 2) - prepareRecords(testTopicName) - - val testGroupId = "test_group_id" - val testClientId = "test_client_id" - val groupInstances = Set("") - val topics = Set(testTopicName) - - // We need to disable the auto commit because after the members got removed from group, the offset commit - // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) - val defaultConsumerConfig = new Properties(consumerConfig) - defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - val backgroundConsumers = prepareConsumers(groupInstances, topics, defaultConsumerConfig) - - try { - // Start consumer polling threads in the background - backgroundConsumers.start() - val topicPartition = new TopicPartition(testTopicName, 0) - - // Test listConsumerGroupOffsets - TestUtils.waitUntilTrue(() => { - val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() - parts.containsKey(topicPartition) && (parts.get(topicPartition).offset() == 1) - }, "Expected the offset for partition 0 to eventually become 1.") - - // Test listConsumerGroupOffsets with requireStable true - val options = new ListConsumerGroupOffsetsOptions().requireStable(true) - var parts = client.listConsumerGroupOffsets(testGroupId, options) - .partitionsToOffsetAndMetadata() - .get() - assertTrue(parts.containsKey(topicPartition)) - assertEquals(1, parts.get(topicPartition).offset()) - - // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec - val groupSpecs = util.Map.of( - testGroupId, - new ListConsumerGroupOffsetsSpec().topicPartitions(util.List.of(new TopicPartition(testTopicName, 0))) - ) - parts = client.listConsumerGroupOffsets(groupSpecs) - .partitionsToOffsetAndMetadata() - .get() - assertTrue(parts.containsKey(topicPartition)) - assertEquals(1, parts.get(topicPartition).offset()) - - // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec and requireStable option - parts = client.listConsumerGroupOffsets(groupSpecs, options) - .partitionsToOffsetAndMetadata() - .get() - assertTrue(parts.containsKey(topicPartition)) - assertEquals(1, parts.get(topicPartition).offset()) - } finally { - backgroundConsumers.close() - } - } finally { - Utils.closeQuietly(client, "adminClient") - } - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testListConsumerGroups(groupProtocol: String): Unit = { - val config = createConfig - client = Admin.create(config) - try { - assertConsumerGroupsIsClean() - - val testTopicName = "test_topic" - prepareTopics(List(testTopicName), 2) - - val testGroupId = "test_group_id" - val testClientId = "test_client_id" - val groupInstances = Set("") - val topics = Set(testTopicName) - - // We need to disable the auto commit because after the members got removed from group, the offset commit - // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) - val defaultConsumerConfig = new Properties(consumerConfig) - defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - val backgroundConsumers = prepareConsumers(groupInstances, topics, defaultConsumerConfig) - - try { - val groupType = if (groupProtocol.equalsIgnoreCase(GroupProtocol.CONSUMER.name)) GroupType.CONSUMER else GroupType.CLASSIC - // Start consumer polling threads in the background - backgroundConsumers.start() - - // Test that we can list the new group. - TestUtils.waitUntilTrue(() => { - val matching = client.listConsumerGroups.all.get.asScala.filter(group => - group.groupId == testGroupId && group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && - group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId in group type $groupType") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) - .inGroupStates(util.Set.of(GroupState.STABLE)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inGroupStates(util.Set.of(GroupState.STABLE)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId in state Stable") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inGroupStates(util.Set.of(GroupState.EMPTY)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(_.groupId == testGroupId) - matching.isEmpty - }, "Expected to find zero groups") - } finally { - backgroundConsumers.close() - } - } finally { - Utils.closeQuietly(client, "adminClient") - } - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDescribeGroups(groupProtocol: String): Unit = { + /** + * Test the consumer group APIs. + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerGroups(quorum: String, groupProtocol: String): Unit = { val config = createConfig client = Admin.create(config) try { - assertConsumerGroupsIsClean() - + // Verify that initially there are no consumer groups to list. + val list1 = client.listConsumerGroups() + assertEquals(0, list1.all().get().size()) + assertEquals(0, list1.errors().get().size()) + assertEquals(0, list1.valid().get().size()) val testTopicName = "test_topic" val testTopicName1 = testTopicName + "1" val testTopicName2 = testTopicName + "2" val testNumPartitions = 2 - prepareTopics(List(testTopicName, testTopicName1, testTopicName2), testNumPartitions) + + client.createTopics(util.Arrays.asList( + new NewTopic(testTopicName, testNumPartitions, 1.toShort), + new NewTopic(testTopicName1, testNumPartitions, 1.toShort), + new NewTopic(testTopicName2, testNumPartitions, 1.toShort) + )).all().get() + waitForTopics(client, List(testTopicName, testTopicName1, testTopicName2), List()) val producer = createProducer() try { @@ -1982,68 +1839,245 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { Utils.closeQuietly(producer, "producer") } + val EMPTY_GROUP_INSTANCE_ID = "" val testGroupId = "test_group_id" val testClientId = "test_client_id" val testInstanceId1 = "test_instance_id_1" val testInstanceId2 = "test_instance_id_2" val fakeGroupId = "fake_group_id" - // contains two static members and one dynamic member - val groupInstances = Set(testInstanceId1, testInstanceId2, "") - val topics = Set(testTopicName, testTopicName1, testTopicName2) + def createProperties(groupInstanceId: String): Properties = { + val newConsumerConfig = new Properties(consumerConfig) + // We need to disable the auto commit because after the members got removed from group, the offset commit + // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) + newConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + newConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + newConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + if (groupInstanceId != EMPTY_GROUP_INSTANCE_ID) { + newConsumerConfig.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId) + } + newConsumerConfig + } - // We need to disable the auto commit because after the members got removed from group, the offset commit - // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) - val defaultConsumerConfig = new Properties(consumerConfig) - defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - val backgroundConsumers = prepareConsumers(groupInstances, topics, defaultConsumerConfig) + // contains two static members and one dynamic member + val groupInstanceSet = Set(testInstanceId1, testInstanceId2, EMPTY_GROUP_INSTANCE_ID) + val consumerSet = groupInstanceSet.map { groupInstanceId => createConsumer(configOverrides = createProperties(groupInstanceId))} + val topicSet = Set(testTopicName, testTopicName1, testTopicName2) + val latch = new CountDownLatch(consumerSet.size) try { - val groupType = if (groupProtocol.equalsIgnoreCase(GroupProtocol.CONSUMER.name)) GroupType.CONSUMER else GroupType.CLASSIC - // Start consumer polling threads in the background - backgroundConsumers.start() - - val describeWithFakeGroupResult = client.describeConsumerGroups(util.List.of(testGroupId, fakeGroupId), - new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) - assertEquals(2, describeWithFakeGroupResult.describedGroups().size()) - - // Test that we can get information about the test consumer group. - assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(testGroupId)) - val testGroupDescription = describeWithFakeGroupResult.describedGroups().get(testGroupId).get() - if (groupType == GroupType.CLASSIC) { - assertTrue(testGroupDescription.groupEpoch.isEmpty) - assertTrue(testGroupDescription.targetAssignmentEpoch.isEmpty) - } else { - assertEquals(Optional.of(3), testGroupDescription.groupEpoch) - assertEquals(Optional.of(3), testGroupDescription.targetAssignmentEpoch) - } - - assertEquals(testGroupId, testGroupDescription.groupId()) - assertFalse(testGroupDescription.isSimpleConsumerGroup) - assertEquals(groupInstances.size, testGroupDescription.members().size()) - val members = testGroupDescription.members() - members.asScala.foreach { member => - assertEquals(testClientId, member.clientId) - assertEquals(if (groupType == GroupType.CLASSIC) Optional.empty else Optional.of(true), member.upgraded) + def createConsumerThread[K,V](consumer: Consumer[K,V], topic: String): Thread = { + new Thread { + override def run : Unit = { + consumer.subscribe(Collections.singleton(topic)) + try { + while (true) { + consumer.poll(JDuration.ofSeconds(5)) + if (!consumer.assignment.isEmpty && latch.getCount > 0L) + latch.countDown() + try { + consumer.commitSync() + } catch { + case _: CommitFailedException => // Ignore and retry on next iteration. + } + } + } catch { + case _: InterruptException => // Suppress the output to stderr + } + } + } } - val topicPartitionsByTopic = members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()) - topics.foreach(topic => assertEquals(testNumPartitions, topicPartitionsByTopic.getOrElse(topic, List.empty).size)) - val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP) - assertEquals(expectedOperations, testGroupDescription.authorizedOperations()) + // Start consumers in a thread that will subscribe to a new group. + val consumerThreads = consumerSet.zip(topicSet).map(zipped => createConsumerThread(zipped._1, zipped._2)) + val groupType = if (groupProtocol.equalsIgnoreCase(GroupProtocol.CONSUMER.name)) GroupType.CONSUMER else GroupType.CLASSIC - // Test that the fake group throws GroupIdNotFoundException - assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(fakeGroupId)) - assertFutureThrows(classOf[GroupIdNotFoundException], describeWithFakeGroupResult.describedGroups().get(fakeGroupId), - s"Group $fakeGroupId not found.") + try { + consumerThreads.foreach(_.start()) + assertTrue(latch.await(30000, TimeUnit.MILLISECONDS)) + // Test that we can list the new group. + TestUtils.waitUntilTrue(() => { + val matching = client.listConsumerGroups.all.get.asScala.filter(group => + group.groupId == testGroupId && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in group type $groupType") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + .inGroupStates(Set(GroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.EMPTY).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter( + _.groupId == testGroupId) + matching.isEmpty + }, s"Expected to find zero groups") + + val describeWithFakeGroupResult = client.describeConsumerGroups(Seq(testGroupId, fakeGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + assertEquals(2, describeWithFakeGroupResult.describedGroups().size()) + + // Test that we can get information about the test consumer group. + assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(testGroupId)) + var testGroupDescription = describeWithFakeGroupResult.describedGroups().get(testGroupId).get() + assertEquals(groupType == GroupType.CLASSIC, testGroupDescription.groupEpoch.isEmpty) + assertEquals(groupType == GroupType.CLASSIC, testGroupDescription.targetAssignmentEpoch.isEmpty) + + assertEquals(testGroupId, testGroupDescription.groupId()) + assertFalse(testGroupDescription.isSimpleConsumerGroup) + assertEquals(groupInstanceSet.size, testGroupDescription.members().size()) + val members = testGroupDescription.members() + members.asScala.foreach { member => + assertEquals(testClientId, member.clientId) + assertEquals(if (groupType == GroupType.CLASSIC) Optional.empty else Optional.of(true), member.upgraded) + } + val topicPartitionsByTopic = members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()) + topicSet.foreach { topic => + val topicPartitions = topicPartitionsByTopic.getOrElse(topic, List.empty) + assertEquals(testNumPartitions, topicPartitions.size) + } - // Test that all() also throws GroupIdNotFoundException - assertFutureThrows(classOf[GroupIdNotFoundException], describeWithFakeGroupResult.all(), - s"Group $fakeGroupId not found.") + val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP) + assertEquals(expectedOperations, testGroupDescription.authorizedOperations()) + + // Test that the fake group throws GroupIdNotFoundException + assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(fakeGroupId)) + assertFutureThrows(describeWithFakeGroupResult.describedGroups().get(fakeGroupId), classOf[GroupIdNotFoundException], + s"Group $fakeGroupId not found.") + + // Test that all() also throws GroupIdNotFoundException + assertFutureThrows(describeWithFakeGroupResult.all(), classOf[GroupIdNotFoundException], + s"Group $fakeGroupId not found.") + + val testTopicPart0 = new TopicPartition(testTopicName, 0) + + // Test listConsumerGroupOffsets + TestUtils.waitUntilTrue(() => { + val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() + parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 1) + }, s"Expected the offset for partition 0 to eventually become 1.") + + // Test listConsumerGroupOffsets with requireStable true + val options = new ListConsumerGroupOffsetsOptions().requireStable(true) + var parts = client.listConsumerGroupOffsets(testGroupId, options) + .partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec + val groupSpecs = Collections.singletonMap(testGroupId, + new ListConsumerGroupOffsetsSpec().topicPartitions(Collections.singleton(new TopicPartition(testTopicName, 0)))) + parts = client.listConsumerGroupOffsets(groupSpecs).partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec and requireStable option + parts = client.listConsumerGroupOffsets(groupSpecs, options).partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test delete non-exist consumer instance + val invalidInstanceId = "invalid-instance-id" + var removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions( + Collections.singleton(new MemberToRemove(invalidInstanceId)) + )) + + assertFutureThrows(removeMembersResult.all, classOf[UnknownMemberIdException]) + val firstMemberFuture = removeMembersResult.memberResult(new MemberToRemove(invalidInstanceId)) + assertFutureThrows(firstMemberFuture, classOf[UnknownMemberIdException]) + + // Test consumer group deletion + var deleteResult = client.deleteConsumerGroups(Seq(testGroupId, fakeGroupId).asJava) + assertEquals(2, deleteResult.deletedGroups().size()) + + // Deleting the fake group ID should get GroupIdNotFoundException. + assertTrue(deleteResult.deletedGroups().containsKey(fakeGroupId)) + assertFutureThrows(deleteResult.deletedGroups().get(fakeGroupId), + classOf[GroupIdNotFoundException]) + + // Deleting the real group ID should get GroupNotEmptyException + assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) + assertFutureThrows(deleteResult.deletedGroups().get(testGroupId), + classOf[GroupNotEmptyException]) + + // Test delete one correct static member + val removeOptions = new RemoveMembersFromConsumerGroupOptions(Collections.singleton(new MemberToRemove(testInstanceId1))) + removeOptions.reason("test remove") + removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, removeOptions) + + assertNull(removeMembersResult.all().get()) + val validMemberFuture = removeMembersResult.memberResult(new MemberToRemove(testInstanceId1)) + assertNull(validMemberFuture.get()) + + val describeTestGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + assertEquals(1, describeTestGroupResult.describedGroups().size()) + + testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get() + + assertEquals(testGroupId, testGroupDescription.groupId) + assertFalse(testGroupDescription.isSimpleConsumerGroup) + assertEquals(consumerSet.size - 1, testGroupDescription.members().size()) + + // Delete all active members remaining (a static member + a dynamic member) + removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions()) + assertNull(removeMembersResult.all().get()) + + // The group should contain no members now. + testGroupDescription = client.describeConsumerGroups(Seq(testGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + .describedGroups().get(testGroupId).get() + assertTrue(testGroupDescription.members().isEmpty) + + // Consumer group deletion on empty group should succeed + deleteResult = client.deleteConsumerGroups(Seq(testGroupId).asJava) + assertEquals(1, deleteResult.deletedGroups().size()) + + assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) + assertNull(deleteResult.deletedGroups().get(testGroupId).get()) + + // Test alterConsumerGroupOffsets + val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(testGroupId, + Collections.singletonMap(testTopicPart0, new OffsetAndMetadata(0L))) + assertNull(alterConsumerGroupOffsetsResult.all().get()) + assertNull(alterConsumerGroupOffsetsResult.partitionResult(testTopicPart0).get()) + + // Verify alterConsumerGroupOffsets success + TestUtils.waitUntilTrue(() => { + val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() + parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 0) + }, s"Expected the offset for partition 0 to eventually become 0.") + } finally { + consumerThreads.foreach { + case consumerThread => + consumerThread.interrupt() + consumerThread.join() + } + } } finally { - backgroundConsumers.close() + consumerSet.zip(groupInstanceSet).foreach(zipped => Utils.closeQuietly(zipped._1, zipped._2)) } } finally { Utils.closeQuietly(client, "adminClient") @@ -2053,8 +2087,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { /** * Test the consumer group APIs. */ - @Test - def testConsumerGroupWithMemberMigration(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumerGroupWithMemberMigration(quorum: String): Unit = { val config = createConfig client = Admin.create(config) var classicConsumer: Consumer[Array[Byte], Array[Byte]] = null @@ -2068,7 +2103,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val testTopicName = "test_topic" val testNumPartitions = 2 - client.createTopics(util.List.of( + client.createTopics(util.Arrays.asList( new NewTopic(testTopicName, testNumPartitions, 1.toShort), )).all.get waitForTopics(client, List(testTopicName), List()) @@ -2092,25 +2127,25 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name) classicConsumer = createConsumer(configOverrides = newConsumerConfig) - classicConsumer.subscribe(util.List.of(testTopicName)) + classicConsumer.subscribe(List(testTopicName).asJava) classicConsumer.poll(JDuration.ofMillis(1000)) newConsumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, testConsumerClientId) consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CONSUMER.name) consumerConsumer = createConsumer(configOverrides = newConsumerConfig) - consumerConsumer.subscribe(util.List.of(testTopicName)) + consumerConsumer.subscribe(List(testTopicName).asJava) consumerConsumer.poll(JDuration.ofMillis(1000)) TestUtils.waitUntilTrue(() => { classicConsumer.poll(JDuration.ofMillis(100)) consumerConsumer.poll(JDuration.ofMillis(100)) - val describeConsumerGroupResult = client.describeConsumerGroups(util.List.of(testGroupId)).all.get + val describeConsumerGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava).all.get describeConsumerGroupResult.containsKey(testGroupId) && describeConsumerGroupResult.get(testGroupId).groupState == GroupState.STABLE && describeConsumerGroupResult.get(testGroupId).members.size == 2 }, s"Expected to find 2 members in a stable group $testGroupId") - val describeConsumerGroupResult = client.describeConsumerGroups(util.List.of(testGroupId)).all.get + val describeConsumerGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava).all.get val group = describeConsumerGroupResult.get(testGroupId) assertNotNull(group) assertEquals(Optional.of(2), group.groupEpoch) @@ -2135,457 +2170,388 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { /** * Test the consumer group APIs. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumerGroupsDeprecatedConsumerGroupState(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerGroupsDeprecatedConsumerGroupState(quorum: String, groupProtocol: String): Unit = { val config = createConfig client = Admin.create(config) try { - assertConsumerGroupsIsClean() - + // Verify that initially there are no consumer groups to list. + val list1 = client.listConsumerGroups() + assertEquals(0, list1.all().get().size()) + assertEquals(0, list1.errors().get().size()) + assertEquals(0, list1.valid().get().size()) val testTopicName = "test_topic" val testTopicName1 = testTopicName + "1" val testTopicName2 = testTopicName + "2" val testNumPartitions = 2 - prepareTopics(List(testTopicName, testTopicName1, testTopicName2), testNumPartitions) - prepareRecords(testTopicName) + client.createTopics(util.Arrays.asList( + new NewTopic(testTopicName, testNumPartitions, 1.toShort), + new NewTopic(testTopicName1, testNumPartitions, 1.toShort), + new NewTopic(testTopicName2, testNumPartitions, 1.toShort) + )).all().get() + waitForTopics(client, List(testTopicName, testTopicName1, testTopicName2), List()) + + val producer = createProducer() + try { + producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() + } finally { + Utils.closeQuietly(producer, "producer") + } + val EMPTY_GROUP_INSTANCE_ID = "" val testGroupId = "test_group_id" val testClientId = "test_client_id" val testInstanceId1 = "test_instance_id_1" val testInstanceId2 = "test_instance_id_2" val fakeGroupId = "fake_group_id" + def createProperties(groupInstanceId: String): Properties = { + val newConsumerConfig = new Properties(consumerConfig) + // We need to disable the auto commit because after the members got removed from group, the offset commit + // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) + newConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") + newConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + newConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + if (groupInstanceId != EMPTY_GROUP_INSTANCE_ID) { + newConsumerConfig.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId) + } + newConsumerConfig + } + // contains two static members and one dynamic member - val groupInstanceSet = Set(testInstanceId1, testInstanceId2, "") + val groupInstanceSet = Set(testInstanceId1, testInstanceId2, EMPTY_GROUP_INSTANCE_ID) + val consumerSet = groupInstanceSet.map { groupInstanceId => createConsumer(configOverrides = createProperties(groupInstanceId))} val topicSet = Set(testTopicName, testTopicName1, testTopicName2) - // We need to disable the auto commit because after the members got removed from group, the offset commit - // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) - val defaultConsumerConfig = new Properties(consumerConfig) - defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - - val backgroundConsumerSet = new BackgroundConsumerSet(defaultConsumerConfig) - groupInstanceSet.zip(topicSet).foreach { case (groupInstanceId, topic) => - val configOverrides = new Properties() - if (groupInstanceId != "") { - // static member - configOverrides.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId) + val latch = new CountDownLatch(consumerSet.size) + try { + def createConsumerThread[K,V](consumer: Consumer[K,V], topic: String): Thread = { + new Thread { + override def run : Unit = { + consumer.subscribe(Collections.singleton(topic)) + try { + while (true) { + consumer.poll(JDuration.ofSeconds(5)) + if (!consumer.assignment.isEmpty && latch.getCount > 0L) + latch.countDown() + try { + consumer.commitSync() + } catch { + case _: CommitFailedException => // Ignore and retry on next iteration. + } + } + } catch { + case _: InterruptException => // Suppress the output to stderr + } + } + } } - backgroundConsumerSet.addConsumer(topic, configOverrides) - } - try { + // Start consumers in a thread that will subscribe to a new group. + val consumerThreads = consumerSet.zip(topicSet).map(zipped => createConsumerThread(zipped._1, zipped._2)) val groupType = if (groupProtocol.equalsIgnoreCase(GroupProtocol.CONSUMER.name)) GroupType.CONSUMER else GroupType.CLASSIC - // Start consumer polling threads in the background - backgroundConsumerSet.start() - - // Test that we can list the new group. - TestUtils.waitUntilTrue(() => { - val matching = client.listConsumerGroups.all.get.asScala.filter(group => - group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE && - group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE && - group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId in group type $groupType") - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) - .inStates(util.Set.of(ConsumerGroupState.STABLE)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE && - group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().withTypes(util.Set.of(groupType)) - .inGroupStates(util.Set.of(GroupState.STABLE)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE && - group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inStates(util.Set.of(ConsumerGroupState.STABLE)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE && - group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId in state Stable") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inGroupStates(util.Set.of(GroupState.STABLE)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => - group.groupId == testGroupId && - group.state.get == ConsumerGroupState.STABLE && - group.groupState.get == GroupState.STABLE) - matching.size == 1 - }, s"Expected to be able to list $testGroupId in state Stable") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inStates(util.Set.of(ConsumerGroupState.EMPTY)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter( - _.groupId == testGroupId) - matching.isEmpty - }, s"Expected to find zero groups") - - TestUtils.waitUntilTrue(() => { - val options = new ListConsumerGroupsOptions().inGroupStates(util.Set.of(GroupState.EMPTY)) - val matching = client.listConsumerGroups(options).all.get.asScala.filter( - _.groupId == testGroupId) - matching.isEmpty - }, s"Expected to find zero groups") - - val describeWithFakeGroupResult = client.describeConsumerGroups(util.List.of(testGroupId, fakeGroupId), - new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) - assertEquals(2, describeWithFakeGroupResult.describedGroups().size()) - - // Test that we can get information about the test consumer group. - assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(testGroupId)) - val testGroupDescription = describeWithFakeGroupResult.describedGroups().get(testGroupId).get() + try { + consumerThreads.foreach(_.start()) + assertTrue(latch.await(30000, TimeUnit.MILLISECONDS)) + // Test that we can list the new group. + TestUtils.waitUntilTrue(() => { + val matching = client.listConsumerGroups.all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in group type $groupType") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + .inStates(Set(ConsumerGroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().withTypes(Set(groupType).asJava) + .inGroupStates(Set(GroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in group type $groupType and state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inStates(Set(ConsumerGroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.STABLE).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter(group => + group.groupId == testGroupId && + group.state.get == ConsumerGroupState.STABLE && + group.groupState.get == GroupState.STABLE) + matching.size == 1 + }, s"Expected to be able to list $testGroupId in state Stable") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inStates(Set(ConsumerGroupState.EMPTY).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter( + _.groupId == testGroupId) + matching.isEmpty + }, s"Expected to find zero groups") + + TestUtils.waitUntilTrue(() => { + val options = new ListConsumerGroupsOptions().inGroupStates(Set(GroupState.EMPTY).asJava) + val matching = client.listConsumerGroups(options).all.get.asScala.filter( + _.groupId == testGroupId) + matching.isEmpty + }, s"Expected to find zero groups") + + val describeWithFakeGroupResult = client.describeConsumerGroups(Seq(testGroupId, fakeGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + assertEquals(2, describeWithFakeGroupResult.describedGroups().size()) + + // Test that we can get information about the test consumer group. + assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(testGroupId)) + var testGroupDescription = describeWithFakeGroupResult.describedGroups().get(testGroupId).get() + + assertEquals(testGroupId, testGroupDescription.groupId()) + assertFalse(testGroupDescription.isSimpleConsumerGroup) + assertEquals(groupInstanceSet.size, testGroupDescription.members().size()) + val members = testGroupDescription.members() + members.asScala.foreach(member => assertEquals(testClientId, member.clientId())) + val topicPartitionsByTopic = members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()) + topicSet.foreach { topic => + val topicPartitions = topicPartitionsByTopic.getOrElse(topic, List.empty) + assertEquals(testNumPartitions, topicPartitions.size) + } - assertEquals(testGroupId, testGroupDescription.groupId()) - assertFalse(testGroupDescription.isSimpleConsumerGroup) - assertEquals(groupInstanceSet.size, testGroupDescription.members().size()) - val members = testGroupDescription.members() - members.asScala.foreach(member => assertEquals(testClientId, member.clientId())) - val topicPartitionsByTopic = members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()) - topicSet.foreach { topic => - val topicPartitions = topicPartitionsByTopic.getOrElse(topic, List.empty) - assertEquals(testNumPartitions, topicPartitions.size) + val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP) + assertEquals(expectedOperations, testGroupDescription.authorizedOperations()) + + // Test that the fake group throws GroupIdNotFoundException + assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(fakeGroupId)) + assertFutureThrows(describeWithFakeGroupResult.describedGroups().get(fakeGroupId), + classOf[GroupIdNotFoundException], s"Group $fakeGroupId not found.") + + // Test that all() also throws GroupIdNotFoundException + assertFutureThrows(describeWithFakeGroupResult.all(), + classOf[GroupIdNotFoundException], s"Group $fakeGroupId not found.") + + val testTopicPart0 = new TopicPartition(testTopicName, 0) + + // Test listConsumerGroupOffsets + TestUtils.waitUntilTrue(() => { + val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() + parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 1) + }, s"Expected the offset for partition 0 to eventually become 1.") + + // Test listConsumerGroupOffsets with requireStable true + val options = new ListConsumerGroupOffsetsOptions().requireStable(true) + var parts = client.listConsumerGroupOffsets(testGroupId, options) + .partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec + val groupSpecs = Collections.singletonMap(testGroupId, + new ListConsumerGroupOffsetsSpec().topicPartitions(Collections.singleton(new TopicPartition(testTopicName, 0)))) + parts = client.listConsumerGroupOffsets(groupSpecs).partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec and requireStable option + parts = client.listConsumerGroupOffsets(groupSpecs, options).partitionsToOffsetAndMetadata().get() + assertTrue(parts.containsKey(testTopicPart0)) + assertEquals(1, parts.get(testTopicPart0).offset()) + + // Test delete non-exist consumer instance + val invalidInstanceId = "invalid-instance-id" + var removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions( + Collections.singleton(new MemberToRemove(invalidInstanceId)) + )) + + assertFutureThrows(removeMembersResult.all, classOf[UnknownMemberIdException]) + val firstMemberFuture = removeMembersResult.memberResult(new MemberToRemove(invalidInstanceId)) + assertFutureThrows(firstMemberFuture, classOf[UnknownMemberIdException]) + + // Test consumer group deletion + var deleteResult = client.deleteConsumerGroups(Seq(testGroupId, fakeGroupId).asJava) + assertEquals(2, deleteResult.deletedGroups().size()) + + // Deleting the fake group ID should get GroupIdNotFoundException. + assertTrue(deleteResult.deletedGroups().containsKey(fakeGroupId)) + assertFutureThrows(deleteResult.deletedGroups().get(fakeGroupId), + classOf[GroupIdNotFoundException]) + + // Deleting the real group ID should get GroupNotEmptyException + assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) + assertFutureThrows(deleteResult.deletedGroups().get(testGroupId), + classOf[GroupNotEmptyException]) + + // Test delete one correct static member + val removeOptions = new RemoveMembersFromConsumerGroupOptions(Collections.singleton(new MemberToRemove(testInstanceId1))) + removeOptions.reason("test remove") + removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, removeOptions) + + assertNull(removeMembersResult.all().get()) + val validMemberFuture = removeMembersResult.memberResult(new MemberToRemove(testInstanceId1)) + assertNull(validMemberFuture.get()) + + val describeTestGroupResult = client.describeConsumerGroups(Seq(testGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + assertEquals(1, describeTestGroupResult.describedGroups().size()) + + testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get() + + assertEquals(testGroupId, testGroupDescription.groupId) + assertFalse(testGroupDescription.isSimpleConsumerGroup) + assertEquals(consumerSet.size - 1, testGroupDescription.members().size()) + + // Delete all active members remaining (a static member + a dynamic member) + removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions()) + assertNull(removeMembersResult.all().get()) + + // The group should contain no members now. + testGroupDescription = client.describeConsumerGroups(Seq(testGroupId).asJava, + new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) + .describedGroups().get(testGroupId).get() + assertTrue(testGroupDescription.members().isEmpty) + + // Consumer group deletion on empty group should succeed + deleteResult = client.deleteConsumerGroups(Seq(testGroupId).asJava) + assertEquals(1, deleteResult.deletedGroups().size()) + + assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) + assertNull(deleteResult.deletedGroups().get(testGroupId).get()) + + // Test alterConsumerGroupOffsets + val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(testGroupId, + Collections.singletonMap(testTopicPart0, new OffsetAndMetadata(0L))) + assertNull(alterConsumerGroupOffsetsResult.all().get()) + assertNull(alterConsumerGroupOffsetsResult.partitionResult(testTopicPart0).get()) + + // Verify alterConsumerGroupOffsets success + TestUtils.waitUntilTrue(() => { + val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() + parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 0) + }, s"Expected the offset for partition 0 to eventually become 0.") + } finally { + consumerThreads.foreach { + case consumerThread => + consumerThread.interrupt() + consumerThread.join() + } } - - val expectedOperations = AclEntry.supportedOperations(ResourceType.GROUP) - assertEquals(expectedOperations, testGroupDescription.authorizedOperations()) - - // Test that the fake group throws GroupIdNotFoundException - assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(fakeGroupId)) - assertFutureThrows(classOf[GroupIdNotFoundException], - describeWithFakeGroupResult.describedGroups().get(fakeGroupId), s"Group $fakeGroupId not found.") - - // Test that all() also throws GroupIdNotFoundException - assertFutureThrows(classOf[GroupIdNotFoundException], - describeWithFakeGroupResult.all(), s"Group $fakeGroupId not found.") - - val testTopicPart0 = new TopicPartition(testTopicName, 0) - - // Test listConsumerGroupOffsets - TestUtils.waitUntilTrue(() => { - val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() - parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 1) - }, s"Expected the offset for partition 0 to eventually become 1.") - - // Test listConsumerGroupOffsets with requireStable true - val options = new ListConsumerGroupOffsetsOptions().requireStable(true) - var parts = client.listConsumerGroupOffsets(testGroupId, options) - .partitionsToOffsetAndMetadata().get() - assertTrue(parts.containsKey(testTopicPart0)) - assertEquals(1, parts.get(testTopicPart0).offset()) - - // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec - val groupSpecs = util.Map.of(testGroupId, - new ListConsumerGroupOffsetsSpec().topicPartitions(util.Set.of(new TopicPartition(testTopicName, 0)))) - parts = client.listConsumerGroupOffsets(groupSpecs).partitionsToOffsetAndMetadata().get() - assertTrue(parts.containsKey(testTopicPart0)) - assertEquals(1, parts.get(testTopicPart0).offset()) - - // Test listConsumerGroupOffsets with listConsumerGroupOffsetsSpec and requireStable option - parts = client.listConsumerGroupOffsets(groupSpecs, options).partitionsToOffsetAndMetadata().get() - assertTrue(parts.containsKey(testTopicPart0)) - assertEquals(1, parts.get(testTopicPart0).offset()) } finally { - backgroundConsumerSet.close() + consumerSet.zip(groupInstanceSet).foreach(zipped => Utils.closeQuietly(zipped._1, zipped._2)) } } finally { Utils.closeQuietly(client, "adminClient") } } - - /** - * Test the consumer group APIs for member removal. - */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumerGroupWithMemberRemoval(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDeleteConsumerGroupOffsets(quorum: String, groupProtocol: String): Unit = { val config = createConfig client = Admin.create(config) try { - // Verify that initially there are no consumer groups to list. - assertConsumerGroupsIsClean() val testTopicName = "test_topic" - val testTopicName1 = testTopicName + "1" - val testTopicName2 = testTopicName + "2" - val testNumPartitions = 2 - - prepareTopics(List(testTopicName, testTopicName1, testTopicName2), testNumPartitions) - - prepareRecords(testTopicName) - val testGroupId = "test_group_id" val testClientId = "test_client_id" - val testInstanceId1 = "test_instance_id_1" - val testInstanceId2 = "test_instance_id_2" val fakeGroupId = "fake_group_id" - // contains two static members and one dynamic member - val groupInstanceSet = Set(testInstanceId1, testInstanceId2, "") - val topicSet = Set(testTopicName, testTopicName1, testTopicName2) + val tp1 = new TopicPartition(testTopicName, 0) + val tp2 = new TopicPartition("foo", 0) - // We need to disable the auto commit because after the members got removed from group, the offset commit - // will cause the member rejoining and the test will be flaky (check ConsumerCoordinator#OffsetCommitResponseHandler) - val defaultConsumerConfig = new Properties(consumerConfig) - defaultConsumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false") - defaultConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - defaultConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - // We need to set internal.leave.group.on.close to validate dynamic member removal, but it only works for ClassicConsumer - // After KIP-1092, we can control dynamic member removal for both ClassicConsumer and AsyncConsumer - defaultConsumerConfig.setProperty("internal.leave.group.on.close", "false") - - val backgroundConsumerSet = new BackgroundConsumerSet(defaultConsumerConfig) - groupInstanceSet.zip(topicSet).foreach { case (groupInstanceId, topic) => - val configOverrides = new Properties() - if (groupInstanceId != "") { - // static member - configOverrides.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId) - } - backgroundConsumerSet.addConsumer(topic, configOverrides) - } + client.createTopics(Collections.singleton( + new NewTopic(testTopicName, 1, 1.toShort))).all().get() + waitForTopics(client, List(testTopicName), List()) + val producer = createProducer() try { - // Start consumer polling threads in the background - backgroundConsumerSet.start() - - // Test delete non-exist consumer instance - val invalidInstanceId = "invalid-instance-id" - var removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions( - util.Set.of(new MemberToRemove(invalidInstanceId)) - )) + producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() + } finally { + Utils.closeQuietly(producer, "producer") + } - assertFutureThrows(classOf[UnknownMemberIdException], removeMembersResult.all) - val firstMemberFuture = removeMembersResult.memberResult(new MemberToRemove(invalidInstanceId)) - assertFutureThrows(classOf[UnknownMemberIdException], firstMemberFuture) + val newConsumerConfig = new Properties(consumerConfig) + newConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) + newConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) + // Increase timeouts to avoid having a rebalance during the test + newConsumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, Integer.MAX_VALUE.toString) + if (GroupProtocol.CLASSIC.name.equalsIgnoreCase(groupProtocol)) { + newConsumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_DEFAULT.toString) + } - // Test consumer group deletion - var deleteResult = client.deleteConsumerGroups(util.List.of(testGroupId, fakeGroupId)) - assertEquals(2, deleteResult.deletedGroups().size()) + Using.resource(createConsumer(configOverrides = newConsumerConfig)) { consumer => + consumer.subscribe(Collections.singletonList(testTopicName)) + val records = consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS)) + assertNotEquals(0, records.count) + consumer.commitSync() - // Deleting the fake group ID should get GroupIdNotFoundException. - assertTrue(deleteResult.deletedGroups().containsKey(fakeGroupId)) - assertFutureThrows(classOf[GroupIdNotFoundException], deleteResult.deletedGroups().get(fakeGroupId)) + // Test offset deletion while consuming + val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, Set(tp1, tp2).asJava) - // Deleting the real group ID should get GroupNotEmptyException - assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) - assertFutureThrows(classOf[GroupNotEmptyException], deleteResult.deletedGroups().get(testGroupId)) + // Top level error will equal to the first partition level error + assertFutureThrows(offsetDeleteResult.all(), classOf[GroupSubscribedToTopicException]) + assertFutureThrows(offsetDeleteResult.partitionResult(tp1), + classOf[GroupSubscribedToTopicException]) + assertFutureThrows(offsetDeleteResult.partitionResult(tp2), + classOf[UnknownTopicOrPartitionException]) - // Stop the consumer threads and close consumers to prevent member rejoining. - backgroundConsumerSet.stop() + // Test the fake group ID + val fakeDeleteResult = client.deleteConsumerGroupOffsets(fakeGroupId, Set(tp1, tp2).asJava) - // Check the members in the group after consumers have stopped - var describeTestGroupResult = client.describeConsumerGroups(util.List.of(testGroupId), - new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) - assertEquals(1, describeTestGroupResult.describedGroups().size()) + assertFutureThrows(fakeDeleteResult.all(), classOf[GroupIdNotFoundException]) + assertFutureThrows(fakeDeleteResult.partitionResult(tp1), + classOf[GroupIdNotFoundException]) + assertFutureThrows(fakeDeleteResult.partitionResult(tp2), + classOf[GroupIdNotFoundException]) + } - var testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get() - assertEquals(testGroupId, testGroupDescription.groupId) - assertFalse(testGroupDescription.isSimpleConsumerGroup) - - // Although we set `internal.leave.group.on.close` in the consumer, it only works for ClassicConsumer. - // After KIP-1092, we can control dynamic member removal in consumer.close() - if (groupProtocol == GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)) { - assertEquals(3, testGroupDescription.members().size()) - } else if (groupProtocol == GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) { - assertEquals(2, testGroupDescription.members().size()) - } + // Test offset deletion when group is empty + val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, Set(tp1, tp2).asJava) - // Test delete one static member - removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, - new RemoveMembersFromConsumerGroupOptions(util.Set.of(new MemberToRemove(testInstanceId1)))) + assertFutureThrows(offsetDeleteResult.all(), + classOf[UnknownTopicOrPartitionException]) + assertNull(offsetDeleteResult.partitionResult(tp1).get()) + assertFutureThrows(offsetDeleteResult.partitionResult(tp2), + classOf[UnknownTopicOrPartitionException]) + } finally { + Utils.closeQuietly(client, "adminClient") + } + } - assertNull(removeMembersResult.all().get()) - assertNull(removeMembersResult.memberResult(new MemberToRemove(testInstanceId1)).get()) - - describeTestGroupResult = client.describeConsumerGroups(util.List.of(testGroupId), - new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) - testGroupDescription = describeTestGroupResult.describedGroups().get(testGroupId).get() - - if (groupProtocol == GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)) { - assertEquals(2, testGroupDescription.members().size()) - } else if (groupProtocol == GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) { - assertEquals(1, testGroupDescription.members().size()) - } - - // Delete all active members remaining - removeMembersResult = client.removeMembersFromConsumerGroup(testGroupId, new RemoveMembersFromConsumerGroupOptions()) - assertNull(removeMembersResult.all().get()) - - // The group should contain no members now. - testGroupDescription = client.describeConsumerGroups(util.List.of(testGroupId), - new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)).describedGroups().get(testGroupId).get() - assertTrue(testGroupDescription.members().isEmpty) - - // Consumer group deletion on empty group should succeed - deleteResult = client.deleteConsumerGroups(util.List.of(testGroupId)) - assertEquals(1, deleteResult.deletedGroups().size()) - - assertTrue(deleteResult.deletedGroups().containsKey(testGroupId)) - assertNull(deleteResult.deletedGroups().get(testGroupId).get()) - - // Test alterConsumerGroupOffsets when group is empty - val testTopicPart0 = new TopicPartition(testTopicName, 0) - val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(testGroupId, - util.Map.of(testTopicPart0, new OffsetAndMetadata(0L))) - assertNull(alterConsumerGroupOffsetsResult.all().get()) - assertNull(alterConsumerGroupOffsetsResult.partitionResult(testTopicPart0).get()) - - // Verify alterConsumerGroupOffsets success - TestUtils.waitUntilTrue(() => { - val parts = client.listConsumerGroupOffsets(testGroupId).partitionsToOffsetAndMetadata().get() - parts.containsKey(testTopicPart0) && (parts.get(testTopicPart0).offset() == 0) - }, s"Expected the offset for partition 0 to eventually become 0.") - } finally { - backgroundConsumerSet.close() - } - } finally { - Utils.closeQuietly(client, "adminClient") - } - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDeleteConsumerGroupOffsets(groupProtocol: String): Unit = { - val config = createConfig - client = Admin.create(config) - try { - val testTopicName = "test_topic" - val testGroupId = "test_group_id" - val testClientId = "test_client_id" - val fakeGroupId = "fake_group_id" - - val tp1 = new TopicPartition(testTopicName, 0) - val tp2 = new TopicPartition("foo", 0) - - client.createTopics(util.Set.of( - new NewTopic(testTopicName, 1, 1.toShort))).all().get() - waitForTopics(client, List(testTopicName), List()) - - prepareRecords(testTopicName) - - val newConsumerConfig = new Properties(consumerConfig) - newConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - newConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - // Increase timeouts to avoid having a rebalance during the test - newConsumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, Integer.MAX_VALUE.toString) - if (GroupProtocol.CLASSIC.name.equalsIgnoreCase(groupProtocol)) { - newConsumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_DEFAULT.toString) - } - - Using.resource(createConsumer(configOverrides = newConsumerConfig)) { consumer => - consumer.subscribe(util.List.of(testTopicName)) - val records = consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS)) - assertNotEquals(0, records.count) - consumer.commitSync() - - // Test offset deletion while consuming - val partitions = new util.LinkedHashSet[TopicPartition](util.List.of(tp1, tp2)) - val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, partitions) - - // Top level error will equal to the first partition level error - assertFutureThrows(classOf[GroupSubscribedToTopicException], offsetDeleteResult.all()) - assertFutureThrows(classOf[GroupSubscribedToTopicException], offsetDeleteResult.partitionResult(tp1)) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.partitionResult(tp2)) - - // Test the fake group ID - val fakeDeleteResult = client.deleteConsumerGroupOffsets(fakeGroupId, util.Set.of(tp1, tp2)) - - assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.all()) - assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.partitionResult(tp1)) - assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.partitionResult(tp2)) - } - - // Test offset deletion when group is empty - val offsetDeleteResult = client.deleteConsumerGroupOffsets(testGroupId, util.Set.of(tp1, tp2)) - - assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.all()) - assertNull(offsetDeleteResult.partitionResult(tp1).get()) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.partitionResult(tp2)) - } finally { - Utils.closeQuietly(client, "adminClient") - } - } - - private def prepareTopics(topics: List[String], numberOfPartitions: Int): Unit = { - client.createTopics(topics.map(topic => new NewTopic(topic, numberOfPartitions, 1.toShort)).asJava).all().get() - waitForTopics(client, topics, List()) - } - - private def prepareRecords(testTopicName: String) = { - val producer = createProducer() - try { - producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() - } finally { - Utils.closeQuietly(producer, "producer") - } - } - - private def prepareConsumers(groupInstanceSet: Set[String], topicSet: Set[String], defaultConsumerConfig: Properties) = { - val backgroundConsumerSet = new BackgroundConsumerSet(defaultConsumerConfig) - groupInstanceSet.zip(topicSet).foreach { case (groupInstanceId, topic) => - val configOverrides = new Properties() - if (groupInstanceId != "") { - // static member - configOverrides.setProperty(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, groupInstanceId) - } - backgroundConsumerSet.addConsumer(topic, configOverrides) - } - backgroundConsumerSet - } - - /** - * Verify that initially there are no consumer groups to list. - */ - private def assertConsumerGroupsIsClean(): Unit = { - val listResult = client.listConsumerGroups() - assertEquals(0, listResult.all().get().size()) - assertEquals(0, listResult.errors().get().size()) - assertEquals(0, listResult.valid().get().size()) - } - - @Test - def testListGroups(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft+kip932")) + def testListGroups(quorum: String): Unit = { val classicGroupId = "classic_group_id" val consumerGroupId = "consumer_group_id" val shareGroupId = "share_group_id" val simpleGroupId = "simple_group_id" - val streamsGroupId = "streams_group_id" val testTopicName = "test_topic" - val config = createConfig - client = Admin.create(config) - - client.createTopics(util.Set.of( - new NewTopic(testTopicName, 1, 1.toShort) - )).all().get() - waitForTopics(client, List(testTopicName), List()) - val topicPartition = new TopicPartition(testTopicName, 0) - consumerConfig.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, GroupProtocol.CLASSIC.name) val classicGroupConfig = new Properties(consumerConfig) classicGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, classicGroupId) @@ -2600,83 +2566,67 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { shareGroupConfig.put(ConsumerConfig.GROUP_ID_CONFIG, shareGroupId) val shareGroup = createShareConsumer(configOverrides = shareGroupConfig) - val streamsGroup = createStreamsGroup( - inputTopic = testTopicName, - streamsGroupId = streamsGroupId - ) - + val config = createConfig + client = Admin.create(config) try { - classicGroup.subscribe(util.Set.of(testTopicName)) + client.createTopics(Collections.singleton( + new NewTopic(testTopicName, 1, 1.toShort) + )).all().get() + waitForTopics(client, List(testTopicName), List()) + val topicPartition = new TopicPartition(testTopicName, 0) + + classicGroup.subscribe(Collections.singleton(testTopicName)) classicGroup.poll(JDuration.ofMillis(1000)) - consumerGroup.subscribe(util.Set.of(testTopicName)) + consumerGroup.subscribe(Collections.singleton(testTopicName)) consumerGroup.poll(JDuration.ofMillis(1000)) - shareGroup.subscribe(util.Set.of(testTopicName)) + shareGroup.subscribe(Collections.singleton(testTopicName)) shareGroup.poll(JDuration.ofMillis(1000)) - streamsGroup.poll(JDuration.ofMillis(1000)) val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(simpleGroupId, - util.Map.of(topicPartition, new OffsetAndMetadata(0L))) + Collections.singletonMap(topicPartition, new OffsetAndMetadata(0L))) assertNull(alterConsumerGroupOffsetsResult.all().get()) assertNull(alterConsumerGroupOffsetsResult.partitionResult(topicPartition).get()) TestUtils.waitUntilTrue(() => { val groups = client.listGroups().all().get() - groups.size() == 5 + groups.size() == 4 }, "Expected to find all groups") val classicGroupListing = new GroupListing(classicGroupId, Optional.of(GroupType.CLASSIC), "consumer", Optional.of(GroupState.STABLE)) val consumerGroupListing = new GroupListing(consumerGroupId, Optional.of(GroupType.CONSUMER), "consumer", Optional.of(GroupState.STABLE)) val shareGroupListing = new GroupListing(shareGroupId, Optional.of(GroupType.SHARE), "share", Optional.of(GroupState.STABLE)) val simpleGroupListing = new GroupListing(simpleGroupId, Optional.of(GroupType.CLASSIC), "", Optional.of(GroupState.EMPTY)) - val streamsGroupListing = new GroupListing(streamsGroupId, Optional.of(GroupType.STREAMS), "streams", Optional.of(GroupState.STABLE)) var listGroupsResult = client.listGroups() assertTrue(listGroupsResult.errors().get().isEmpty) + assertEquals(Set(classicGroupListing, simpleGroupListing, consumerGroupListing, shareGroupListing), listGroupsResult.all().get().asScala.toSet) + assertEquals(Set(classicGroupListing, simpleGroupListing, consumerGroupListing, shareGroupListing), listGroupsResult.valid().get().asScala.toSet) - TestUtils.waitUntilTrue(() => { - val listGroupResultScala = client.listGroups().all().get().asScala - val filteredStreamsGroups = listGroupResultScala.filter(_.groupId() == streamsGroupId) - val filteredClassicGroups = listGroupResultScala.filter(_.groupId() == classicGroupId) - val filteredConsumerGroups = listGroupResultScala.filter(_.groupId() == consumerGroupId) - val filteredShareGroups = listGroupResultScala.filter(_.groupId() == shareGroupId) - filteredClassicGroups.forall(_.groupState().orElse(null) == GroupState.STABLE) && - filteredConsumerGroups.forall(_.groupState().orElse(null) == GroupState.STABLE) && - filteredShareGroups.forall(_.groupState().orElse(null) == GroupState.STABLE) && - filteredStreamsGroups.forall(_.groupState().orElse(null) == GroupState.STABLE) - }, "Groups not stable yet") - - listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(util.Set.of(GroupType.CLASSIC))) + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.CLASSIC))) assertTrue(listGroupsResult.errors().get().isEmpty) assertEquals(Set(classicGroupListing, simpleGroupListing), listGroupsResult.all().get().asScala.toSet) assertEquals(Set(classicGroupListing, simpleGroupListing), listGroupsResult.valid().get().asScala.toSet) - listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(util.Set.of(GroupType.CONSUMER))) + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.CONSUMER))) assertTrue(listGroupsResult.errors().get().isEmpty) assertEquals(Set(consumerGroupListing), listGroupsResult.all().get().asScala.toSet) assertEquals(Set(consumerGroupListing), listGroupsResult.valid().get().asScala.toSet) - listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(util.Set.of(GroupType.SHARE))) + listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(java.util.Set.of(GroupType.SHARE))) assertTrue(listGroupsResult.errors().get().isEmpty) assertEquals(Set(shareGroupListing), listGroupsResult.all().get().asScala.toSet) assertEquals(Set(shareGroupListing), listGroupsResult.valid().get().asScala.toSet) - - listGroupsResult = client.listGroups(new ListGroupsOptions().withTypes(util.Set.of(GroupType.STREAMS))) - assertTrue(listGroupsResult.errors().get().isEmpty) - assertEquals(Set(streamsGroupListing), listGroupsResult.all().get().asScala.toSet) - assertEquals(Set(streamsGroupListing), listGroupsResult.valid().get().asScala.toSet) - } finally { Utils.closeQuietly(classicGroup, "classicGroup") Utils.closeQuietly(consumerGroup, "consumerGroup") Utils.closeQuietly(shareGroup, "shareGroup") - Utils.closeQuietly(streamsGroup, "streamsGroup") Utils.closeQuietly(client, "adminClient") } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersClassicGroupProtocolOnly")) - def testDescribeClassicGroups(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testDescribeClassicGroups(quorum: String, groupProtocol: String): Unit = { val classicGroupId = "classic_group_id" val simpleGroupId = "simple_group_id" val testTopicName = "test_topic" @@ -2688,27 +2638,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val config = createConfig client = Admin.create(config) try { - client.createTopics(util.Set.of( + client.createTopics(Collections.singleton( new NewTopic(testTopicName, 1, 1.toShort) )).all().get() waitForTopics(client, List(testTopicName), List()) val topicPartition = new TopicPartition(testTopicName, 0) - classicGroup.subscribe(util.Set.of(testTopicName)) + classicGroup.subscribe(Collections.singleton(testTopicName)) classicGroup.poll(JDuration.ofMillis(1000)) val alterConsumerGroupOffsetsResult = client.alterConsumerGroupOffsets(simpleGroupId, - util.Map.of(topicPartition, new OffsetAndMetadata(0L))) + Collections.singletonMap(topicPartition, new OffsetAndMetadata(0L))) assertNull(alterConsumerGroupOffsetsResult.all().get()) assertNull(alterConsumerGroupOffsetsResult.partitionResult(topicPartition).get()) - val groupIds = util.List.of(simpleGroupId, classicGroupId) + val groupIds = Seq(simpleGroupId, classicGroupId) TestUtils.waitUntilTrue(() => { - val groups = client.describeClassicGroups(groupIds).all().get() + val groups = client.describeClassicGroups(groupIds.asJavaCollection).all().get() groups.size() == 2 }, "Expected to find all groups") - val classicConsumers = client.describeClassicGroups(groupIds).all().get() + val classicConsumers = client.describeClassicGroups(groupIds.asJavaCollection).all().get() val classicConsumer = classicConsumers.get(classicGroupId) assertNotNull(classicConsumer) assertEquals(classicGroupId, classicConsumer.groupId) @@ -2725,36 +2675,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } - /** - * Verify that initially there are no share groups to list. - */ - private def assertNoShareGroupsExist(): Unit = { - val list = client.listGroups() - assertEquals(0, list.all().get().size()) - assertEquals(0, list.errors().get().size()) - assertEquals(0, list.valid().get().size()) - } - - private def createShareConsumerThread[K,V](consumer: ShareConsumer[K,V], topic: String, latch: CountDownLatch): Thread = { - new Thread { - override def run : Unit = { - consumer.subscribe(util.Set.of(topic)) - try { - while (true) { - consumer.poll(JDuration.ofSeconds(5)) - if (latch.getCount > 0L) - latch.countDown() - consumer.commitSync() - } - } catch { - case _: InterruptException => // Suppress the output to stderr - } - } - } - } - - @Test - def testShareGroups(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft+kip932")) + def testShareGroups(quorum: String): Unit = { val testGroupId = "test_group_id" val testClientId = "test_client_id" val fakeGroupId = "fake_group_id" @@ -2770,17 +2693,46 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val consumerSet = Set(createShareConsumer(configOverrides = createProperties())) val topicSet = Set(testTopicName) + val latch = new CountDownLatch(consumerSet.size) + def createShareConsumerThread[K,V](consumer: ShareConsumer[K,V], topic: String): Thread = { + new Thread { + override def run : Unit = { + consumer.subscribe(Collections.singleton(topic)) + try { + while (true) { + consumer.poll(JDuration.ofSeconds(5)) + if (latch.getCount > 0L) + latch.countDown() + consumer.commitSync() + } + } catch { + case _: InterruptException => // Suppress the output to stderr + } + } + } + } + val config = createConfig client = Admin.create(config) + val producer = createProducer() try { - assertNoShareGroupsExist() - prepareTopics(List(testTopicName), testNumPartitions) - prepareRecords(testTopicName) + // Verify that initially there are no share groups to list. + val list = client.listGroups() + assertEquals(0, list.all().get().size()) + assertEquals(0, list.errors().get().size()) + assertEquals(0, list.valid().get().size()) + + client.createTopics(Collections.singleton( + new NewTopic(testTopicName, testNumPartitions, 1.toShort) + )).all().get() + waitForTopics(client, List(testTopicName), List()) + + producer.send(new ProducerRecord(testTopicName, 0, null, null)).get() // Start consumers in a thread that will subscribe to a new group. - val consumerThreads = consumerSet.zip(topicSet).map(zipped => createShareConsumerThread(zipped._1, zipped._2, latch)) + val consumerThreads = consumerSet.zip(topicSet).map(zipped => createShareConsumerThread(zipped._1, zipped._2)) try { consumerThreads.foreach(_.start()) @@ -2795,26 +2747,19 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { }, s"Expected to be able to list $testGroupId") TestUtils.waitUntilTrue(() => { - val options = new ListGroupsOptions().withTypes(util.Set.of(GroupType.SHARE)).inGroupStates(util.Set.of(GroupState.STABLE)) + val options = new ListGroupsOptions().withTypes(Collections.singleton(GroupType.SHARE)).inGroupStates(Collections.singleton(GroupState.STABLE)) client.listGroups(options).all.get.stream().filter(group => group.groupId == testGroupId && group.groupState.get == GroupState.STABLE).count() == 1 }, s"Expected to be able to list $testGroupId in state Stable") TestUtils.waitUntilTrue(() => { - val options = new ListGroupsOptions().withTypes(util.Set.of(GroupType.SHARE)).inGroupStates(util.Set.of(GroupState.EMPTY)) + val options = new ListGroupsOptions().withTypes(Collections.singleton(GroupType.SHARE)).inGroupStates(Collections.singleton(GroupState.EMPTY)) client.listGroups(options).all.get.stream().filter(_.groupId == testGroupId).count() == 0 }, s"Expected to find zero groups") - var describeWithFakeGroupResult: DescribeShareGroupsResult = null - - TestUtils.waitUntilTrue(() => { - describeWithFakeGroupResult = client.describeShareGroups(util.List.of(testGroupId, fakeGroupId), - new DescribeShareGroupsOptions().includeAuthorizedOperations(true)) - val members = describeWithFakeGroupResult.describedGroups().get(testGroupId).get().members() - members.asScala.flatMap(_.assignment().topicPartitions().asScala).groupBy(_.topic()).nonEmpty - }, s"Could not get partitions assigned. Last response $describeWithFakeGroupResult.") - + val describeWithFakeGroupResult = client.describeShareGroups(util.Arrays.asList(testGroupId, fakeGroupId), + new DescribeShareGroupsOptions().includeAuthorizedOperations(true)) assertEquals(2, describeWithFakeGroupResult.describedGroups().size()) // Test that we can get information about the test share group. @@ -2837,16 +2782,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Test that the fake group throws GroupIdNotFoundException assertTrue(describeWithFakeGroupResult.describedGroups().containsKey(fakeGroupId)) - assertFutureThrows(classOf[GroupIdNotFoundException], - describeWithFakeGroupResult.describedGroups().get(fakeGroupId), - s"Group $fakeGroupId not found.") + assertFutureThrows(describeWithFakeGroupResult.describedGroups().get(fakeGroupId), + classOf[GroupIdNotFoundException], s"Group $fakeGroupId not found.") // Test that all() also throws GroupIdNotFoundException - assertFutureThrows(classOf[GroupIdNotFoundException], - describeWithFakeGroupResult.all(), - s"Group $fakeGroupId not found.") + assertFutureThrows(describeWithFakeGroupResult.all(), + classOf[GroupIdNotFoundException], s"Group $fakeGroupId not found.") - val describeTestGroupResult = client.describeShareGroups(util.Set.of(testGroupId), + val describeTestGroupResult = client.describeShareGroups(Collections.singleton(testGroupId), new DescribeShareGroupsOptions().includeAuthorizedOperations(true)) assertEquals(1, describeTestGroupResult.all().get().size()) assertEquals(1, describeTestGroupResult.describedGroups().size()) @@ -2858,196 +2801,10 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // Describing a share group using describeConsumerGroups reports it as a non-existent group // but the error message is different - val describeConsumerGroupResult = client.describeConsumerGroups(util.Set.of(testGroupId), + val describeConsumerGroupResult = client.describeConsumerGroups(Collections.singleton(testGroupId), new DescribeConsumerGroupsOptions().includeAuthorizedOperations(true)) - assertFutureThrows(classOf[GroupIdNotFoundException], - describeConsumerGroupResult.all(), - s"Group $testGroupId is not a consumer group.") - } finally { - consumerThreads.foreach { - case consumerThread => - consumerThread.interrupt() - consumerThread.join() - } - } - } finally { - consumerSet.foreach(consumer => Utils.closeQuietly(consumer, "consumer")) - Utils.closeQuietly(client, "adminClient") - } - } - - @Test - def testDeleteShareGroupOffsets(): Unit = { - val config = createConfig - client = Admin.create(config) - val testTopicName = "test_topic" - val testGroupId = "test_group_id" - val testClientId = "test_client_id" - val fakeGroupId = "fake_group_id" - val fakeTopicName = "foo" - - try { - prepareTopics(List(testTopicName), 1) - prepareRecords(testTopicName) - - val newShareConsumerConfig = new Properties(consumerConfig) - newShareConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - newShareConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - - Using.resource(createShareConsumer(configOverrides = newShareConsumerConfig)) { consumer => - consumer.subscribe(util.List.of(testTopicName)) - consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS)) - consumer.commitSync() - - // listGroups is used to list share groups - // Test that we can list the new group. - TestUtils.waitUntilTrue(() => { - client.listGroups.all.get.stream().filter(group => - group.groupId == testGroupId && - group.groupState.get == GroupState.STABLE).count() == 1 - }, s"Expected to be able to list $testGroupId") - - // Test offset deletion while consuming - val offsetDeleteResult = client.deleteShareGroupOffsets(testGroupId, util.Set.of(testTopicName, fakeTopicName)) - - // Deleting the offset with real group ID should get GroupNotEmptyException - assertFutureThrows(classOf[GroupNotEmptyException], offsetDeleteResult.all()) - assertFutureThrows(classOf[GroupNotEmptyException], offsetDeleteResult.topicResult(testTopicName)) - assertFutureThrows(classOf[GroupNotEmptyException], offsetDeleteResult.topicResult(fakeTopicName)) - - // Test the fake group ID - val fakeDeleteResult = client.deleteShareGroupOffsets(fakeGroupId, util.Set.of(testTopicName, fakeTopicName)) - - assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.all()) - assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.topicResult(testTopicName)) - assertFutureThrows(classOf[GroupIdNotFoundException], fakeDeleteResult.topicResult(fakeTopicName)) - } - - // Test offset deletion when group is empty - val offsetDeleteResult = client.deleteShareGroupOffsets(testGroupId, util.Set.of(testTopicName, fakeTopicName)) - - assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.all()) - assertNull(offsetDeleteResult.topicResult(testTopicName).get()) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetDeleteResult.topicResult(fakeTopicName)) - - val tp1 = new TopicPartition(testTopicName, 0) - val parts = client.listShareGroupOffsets(util.Map.of(testGroupId, new ListShareGroupOffsetsSpec().topicPartitions(util.List.of(tp1)))) - .partitionsToOffsetAndMetadata(testGroupId) - .get() - assertTrue(parts.containsKey(tp1)) - assertNull(parts.get(tp1)) - } finally { - Utils.closeQuietly(client, "adminClient") - } - } - - @Test - def testAlterShareGroupOffsets(): Unit = { - val config = createConfig - client = Admin.create(config) - val testTopicName = "test_topic" - val testGroupId = "test_group_id" - val testClientId = "test_client_id" - val fakeGroupId = "fake_group_id" - val fakeTopicName = "foo" - - val tp1 = new TopicPartition(testTopicName, 0) - val tp2 = new TopicPartition(fakeTopicName, 0) - try { - prepareTopics(List(testTopicName), 1) - prepareRecords(testTopicName) - - val newShareConsumerConfig = new Properties(consumerConfig) - newShareConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - newShareConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - - Using.resource(createShareConsumer(configOverrides = newShareConsumerConfig)) { consumer => - consumer.subscribe(util.List.of(testTopicName)) - consumer.poll(JDuration.ofMillis(DEFAULT_MAX_WAIT_MS)) - consumer.commitSync() - - // listGroups is used to list share groups - // Test that we can list the new group. - TestUtils.waitUntilTrue(() => { - client.listGroups.all.get.stream().filter(group => - group.groupId == testGroupId && - group.groupState.get == GroupState.STABLE).count() == 1 - }, s"Expected to be able to list $testGroupId") - - // Test offset alter while consuming - val offsetAlterResult = client.alterShareGroupOffsets(testGroupId, util.Map.of(tp1, 0, tp2, 0)) - - // Altering the offset with real group ID should get GroupNotEmptyException - assertFutureThrows(classOf[GroupNotEmptyException], offsetAlterResult.all()) - assertFutureThrows(classOf[GroupNotEmptyException], offsetAlterResult.partitionResult(tp1)) - assertFutureThrows(classOf[GroupNotEmptyException], offsetAlterResult.partitionResult(tp2)) - - // Test the fake group ID - val fakeAlterResult = client.alterShareGroupOffsets(fakeGroupId, util.Map.of(tp1, 0, tp2, 0)) - - assertFutureThrows(classOf[GroupIdNotFoundException], fakeAlterResult.all()) - assertFutureThrows(classOf[GroupIdNotFoundException], fakeAlterResult.partitionResult(tp1)) - assertFutureThrows(classOf[GroupIdNotFoundException], fakeAlterResult.partitionResult(tp2)) - } - - // Test offset alter when group is empty - val offsetAlterResult = client.alterShareGroupOffsets(testGroupId, util.Map.of(tp1, 0, tp2, 0)) - - assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetAlterResult.all()) - assertNull(offsetAlterResult.partitionResult(tp1).get()) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], offsetAlterResult.partitionResult(tp2)) - - val parts = client.listShareGroupOffsets(util.Map.of(testGroupId, new ListShareGroupOffsetsSpec().topicPartitions(util.List.of(tp1)))) - .partitionsToOffsetAndMetadata(testGroupId) - .get() - assertTrue(parts.containsKey(tp1)) - assertEquals(0, parts.get(tp1).offset()) - } finally { - Utils.closeQuietly(client, "adminClient") - } - } - - @Test - def testListShareGroupOffsets(): Unit = { - val config = createConfig - client = Admin.create(config) - val testTopicName = "test_topic" - val testGroupId = "test_group_id" - val testClientId = "test_client_id" - - val newShareConsumerConfig = new Properties(consumerConfig) - newShareConsumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, testGroupId) - newShareConsumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, testClientId) - val consumerSet = Set(createShareConsumer(configOverrides = newShareConsumerConfig)) - val topicSet = Set(testTopicName) - val latch = new CountDownLatch(consumerSet.size) - - try { - assertNoShareGroupsExist() - prepareTopics(List(testTopicName), 2) - prepareRecords(testTopicName) - - // Start consumers in a thread that will subscribe to a new group. - val consumerThreads = consumerSet.zip(topicSet).map(zipped => createShareConsumerThread(zipped._1, zipped._2, latch)) - try { - consumerThreads.foreach(_.start()) - assertTrue(latch.await(30000, TimeUnit.MILLISECONDS)) - val tp1 = new TopicPartition(testTopicName, 0) - val tp2 = new TopicPartition(testTopicName, 1) - - // Test listShareGroupOffsets - TestUtils.waitUntilTrue(() => { - val parts = client.listShareGroupOffsets(util.Map.of(testGroupId, new ListShareGroupOffsetsSpec())) - .partitionsToOffsetAndMetadata(testGroupId) - .get() - parts.containsKey(tp1) && parts.containsKey(tp2) - }, "Expected the result contains all partitions.") - - // Test listShareGroupOffsets with listShareGroupOffsetsSpec - val groupSpecs = util.Map.of(testGroupId, new ListShareGroupOffsetsSpec().topicPartitions(util.List.of(tp1))) - val parts = client.listShareGroupOffsets(groupSpecs).partitionsToOffsetAndMetadata(testGroupId).get() - assertTrue(parts.containsKey(tp1)) - assertFalse(parts.containsKey(tp2)) + assertFutureThrows(describeConsumerGroupResult.all(), + classOf[GroupIdNotFoundException], s"Group $testGroupId is not a consumer group.") } finally { consumerThreads.foreach { case consumerThread => @@ -3057,26 +2814,14 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } } finally { consumerSet.foreach(consumer => Utils.closeQuietly(consumer, "consumer")) + Utils.closeQuietly(producer, "producer") Utils.closeQuietly(client, "adminClient") } } - /** - * Waits until the metadata for the given partition has fully propagated and become consistent across all brokers. - * - * @param partition The partition whose leader metadata should be verified across all brokers. - */ - def waitForBrokerMetadataPropagation(partition: TopicPartition): Unit = { - while (brokers.exists(_.metadataCache.getPartitionLeaderEndpoint(partition.topic, partition.partition(), listenerName).isEmpty) || - brokers.map(_.metadataCache.getPartitionLeaderEndpoint(partition.topic, partition.partition(), listenerName)) - .filter(_.isPresent) - .map(_.get()) - .toSet.size != 1) - TimeUnit.MILLISECONDS.sleep(300) - } - - @Test - def testElectPreferredLeaders(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testElectPreferredLeaders(quorum: String): Unit = { client = createAdminClient val prefer0 = Seq(0, 1, 2) @@ -3101,12 +2846,12 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val prior1 = brokers.head.metadataCache.getPartitionLeaderEndpoint(partition1.topic, partition1.partition(), listenerName).get.id() val prior2 = brokers.head.metadataCache.getPartitionLeaderEndpoint(partition2.topic, partition2.partition(), listenerName).get.id() - var reassignmentMap = Map.empty[TopicPartition, Optional[NewPartitionReassignment]] + var m = Map.empty[TopicPartition, Optional[NewPartitionReassignment]] if (prior1 != preferred) - reassignmentMap += partition1 -> Optional.of(new NewPartitionReassignment(newAssignment.map(Int.box).asJava)) + m += partition1 -> Optional.of(new NewPartitionReassignment(newAssignment.map(Int.box).asJava)) if (prior2 != preferred) - reassignmentMap += partition2 -> Optional.of(new NewPartitionReassignment(newAssignment.map(Int.box).asJava)) - client.alterPartitionReassignments(reassignmentMap.asJava).all().get() + m += partition2 -> Optional.of(new NewPartitionReassignment(newAssignment.map(Int.box).asJava)) + client.alterPartitionReassignments(m.asJava).all().get() TestUtils.waitUntilTrue( () => preferredLeader(partition1) == preferred && preferredLeader(partition2) == preferred, @@ -3122,7 +2867,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition2, 0) // Noop election - var electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(partition1)) + var electResult = client.electLeaders(ElectionType.PREFERRED, Set(partition1).asJava) val exception = electResult.partitions.get.get(partition1).get assertEquals(classOf[ElectionNotNeededException], exception.getClass) TestUtils.assertLeader(client, partition1, 0) @@ -3134,13 +2879,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition2, 0) // Now change the preferred leader to 1 - waitForBrokerMetadataPropagation(partition1) - waitForBrokerMetadataPropagation(partition2) changePreferredLeader(prefer1) // meaningful election - electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(partition1)) - assertEquals(util.Set.of(partition1), electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, Set(partition1).asJava) + assertEquals(Set(partition1).asJava, electResult.partitions.get.keySet) electResult.partitions.get.get(partition1) .ifPresent(t => fail(s"Unexpected exception during leader election: $t for partition $partition1")) TestUtils.assertLeader(client, partition1, 1) @@ -3167,38 +2910,32 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // unknown topic val unknownPartition = new TopicPartition("topic-does-not-exist", 0) - electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(unknownPartition)) - assertEquals(util.Set.of(unknownPartition), electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, Set(unknownPartition).asJava) + assertEquals(Set(unknownPartition).asJava, electResult.partitions.get.keySet) assertUnknownTopicOrPartition(unknownPartition, electResult) TestUtils.assertLeader(client, partition1, 1) TestUtils.assertLeader(client, partition2, 1) // Now change the preferred leader to 2 - waitForBrokerMetadataPropagation(partition1) - waitForBrokerMetadataPropagation(partition2) changePreferredLeader(prefer2) // mixed results - electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(unknownPartition, partition1)) - assertEquals(util.Set.of(unknownPartition, partition1), electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, Set(unknownPartition, partition1).asJava) + assertEquals(Set(unknownPartition, partition1).asJava, electResult.partitions.get.keySet) TestUtils.assertLeader(client, partition1, 2) TestUtils.assertLeader(client, partition2, 1) assertUnknownTopicOrPartition(unknownPartition, electResult) // elect preferred leader for partition 2 - electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(partition2)) - assertEquals(util.Set.of(partition2), electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, Set(partition2).asJava) + assertEquals(Set(partition2).asJava, electResult.partitions.get.keySet) assertFalse(electResult.partitions.get.get(partition2).isPresent) TestUtils.assertLeader(client, partition2, 2) // Now change the preferred leader to 1 - waitForBrokerMetadataPropagation(partition1) - waitForBrokerMetadataPropagation(partition2) changePreferredLeader(prefer1) // but shut it down... killBroker(1) - waitForBrokerMetadataPropagation(partition1) - waitForBrokerMetadataPropagation(partition2) TestUtils.waitForBrokersOutOfIsr(client, Set(partition1, partition2), Set(1)) def assertPreferredLeaderNotAvailable( @@ -3214,8 +2951,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // ... now what happens if we try to elect the preferred leader and it's down? val shortTimeout = new ElectLeadersOptions().timeoutMs(10000) - electResult = client.electLeaders(ElectionType.PREFERRED, util.Set.of(partition1), shortTimeout) - assertEquals(util.Set.of(partition1), electResult.partitions.get.keySet) + electResult = client.electLeaders(ElectionType.PREFERRED, Set(partition1).asJava, shortTimeout) + assertEquals(Set(partition1).asJava, electResult.partitions.get.keySet) assertPreferredLeaderNotAvailable(partition1, electResult) TestUtils.assertLeader(client, partition1, 2) @@ -3231,11 +2968,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition2, 2) } - @Test - def testElectUncleanLeadersForOnePartition(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testElectUncleanLeadersForOnePartition(quorum: String): Unit = { // Case: unclean leader election with one topic partition client = createAdminClient - disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3253,17 +2990,17 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { brokers(broker2).startup() TestUtils.waitForOnlineBroker(client, broker2) - val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1)) + val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1).asJava) electResult.partitions.get.get(partition1) .ifPresent(t => fail(s"Unexpected exception during leader election: $t for partition $partition1")) TestUtils.assertLeader(client, partition1, broker2) } - @Test - def testElectUncleanLeadersForManyPartitions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testElectUncleanLeadersForManyPartitions(quorum: String): Unit = { // Case: unclean leader election with many topic partitions client = createAdminClient - disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3290,7 +3027,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { brokers(broker2).startup() TestUtils.waitForOnlineBroker(client, broker2) - val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1, partition2)) + val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1, partition2).asJava) electResult.partitions.get.get(partition1) .ifPresent(t => fail(s"Unexpected exception during leader election: $t for partition $partition1")) electResult.partitions.get.get(partition2) @@ -3299,11 +3036,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition2, broker2) } - @Test - def testElectUncleanLeadersForAllPartitions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testElectUncleanLeadersForAllPartitions(quorum: String): Unit = { // Case: noop unclean leader election and valid unclean leader election for all partitions client = createAdminClient - disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3339,11 +3076,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition2, broker3) } - @Test - def testElectUncleanLeadersForUnknownPartitions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testElectUncleanLeadersForUnknownPartitions(quorum: String): Unit = { // Case: unclean leader election for unknown topic client = createAdminClient - disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3360,16 +3097,16 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, new TopicPartition(topic, 0), broker1) - val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(unknownPartition, unknownTopic)) + val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(unknownPartition, unknownTopic).asJava) assertTrue(electResult.partitions.get.get(unknownPartition).get.isInstanceOf[UnknownTopicOrPartitionException]) assertTrue(electResult.partitions.get.get(unknownTopic).get.isInstanceOf[UnknownTopicOrPartitionException]) } - @Test - def testElectUncleanLeadersWhenNoLiveBrokers(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testElectUncleanLeadersWhenNoLiveBrokers(quorum: String): Unit = { // Case: unclean leader election with no live brokers client = createAdminClient - disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3390,15 +3127,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { killBroker(broker1) TestUtils.assertNoLeader(client, partition1) - val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1)) + val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1).asJava) assertTrue(electResult.partitions.get.get(partition1).get.isInstanceOf[EligibleLeadersNotAvailableException]) } - @Test - def testElectUncleanLeadersNoop(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testElectUncleanLeadersNoop(quorum: String): Unit = { // Case: noop unclean leader election with explicit topic partitions client = createAdminClient - disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3418,15 +3155,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition1, broker2) brokers(broker1).startup() - val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1)) + val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1).asJava) assertTrue(electResult.partitions.get.get(partition1).get.isInstanceOf[ElectionNotNeededException]) } - @Test - def testElectUncleanLeadersAndNoop(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testElectUncleanLeadersAndNoop(quorum: String): Unit = { // Case: one noop unclean leader election and one valid unclean leader election client = createAdminClient - disableEligibleLeaderReplicas(client) val broker1 = 1 val broker2 = 2 @@ -3454,7 +3191,7 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { brokers(broker2).startup() TestUtils.waitForOnlineBroker(client, broker2) - val electResult = client.electLeaders(ElectionType.UNCLEAN, util.Set.of(partition1, partition2)) + val electResult = client.electLeaders(ElectionType.UNCLEAN, Set(partition1, partition2).asJava) electResult.partitions.get.get(partition1) .ifPresent(t => fail(s"Unexpected exception during leader election: $t for partition $partition1")) assertTrue(electResult.partitions.get.get(partition2).get.isInstanceOf[ElectionNotNeededException]) @@ -3462,8 +3199,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TestUtils.assertLeader(client, partition2, broker3) } - @Test - def testListReassignmentsDoesNotShowNonReassigningPartitions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListReassignmentsDoesNotShowNonReassigningPartitions(quorum: String): Unit = { client = createAdminClient // Create topics @@ -3471,29 +3209,31 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { createTopic(topic, replicationFactor = 3) val tp = new TopicPartition(topic, 0) - val reassignmentsMap = client.listPartitionReassignments(util.Set.of(tp)).reassignments().get() + val reassignmentsMap = client.listPartitionReassignments(Set(tp).asJava).reassignments().get() assertEquals(0, reassignmentsMap.size()) val allReassignmentsMap = client.listPartitionReassignments().reassignments().get() assertEquals(0, allReassignmentsMap.size()) } - @Test - def testListReassignmentsDoesNotShowDeletedPartitions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListReassignmentsDoesNotShowDeletedPartitions(quorum: String): Unit = { client = createAdminClient val topic = "list-reassignments-no-reassignments" val tp = new TopicPartition(topic, 0) - val reassignmentsMap = client.listPartitionReassignments(util.Set.of(tp)).reassignments().get() + val reassignmentsMap = client.listPartitionReassignments(Set(tp).asJava).reassignments().get() assertEquals(0, reassignmentsMap.size()) val allReassignmentsMap = client.listPartitionReassignments().reassignments().get() assertEquals(0, allReassignmentsMap.size()) } - @Test - def testValidIncrementalAlterConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testValidIncrementalAlterConfigs(quorum: String): Unit = { client = createAdminClient // Create topics @@ -3509,31 +3249,31 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { createTopic(topic2) // Alter topic configs - var topic1AlterConfigs = util.List.of( + var topic1AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MS_CONFIG, "1000"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE), AlterConfigOp.OpType.APPEND), new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, ""), AlterConfigOp.OpType.DELETE) - ) + ).asJavaCollection // Test SET and APPEND on previously unset properties - var topic2AlterConfigs = util.List.of( + var topic2AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), AlterConfigOp.OpType.APPEND) - ) + ).asJavaCollection - var alterResult = client.incrementalAlterConfigs(util.Map.of( - topic1Resource, topic1AlterConfigs, - topic2Resource, topic2AlterConfigs - )) + var alterResult = client.incrementalAlterConfigs(Map( + topic1Resource -> topic1AlterConfigs, + topic2Resource -> topic2AlterConfigs + ).asJava) - assertEquals(util.Set.of(topic1Resource, topic2Resource), alterResult.values.keySet) + assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) alterResult.all.get ensureConsistentKRaftMetadata() // Verify that topics were updated correctly - var describeResult = client.describeConfigs(util.List.of(topic1Resource, topic2Resource)) + var describeResult = client.describeConfigs(Seq(topic1Resource, topic2Resource).asJava) var configs = describeResult.all.get assertEquals(2, configs.size) @@ -3547,27 +3287,27 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals("delete,compact", configs.get(topic2Resource).get(TopicConfig.CLEANUP_POLICY_CONFIG).value) // verify subtract operation, including from an empty property - topic1AlterConfigs = util.List.of( + topic1AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), AlterConfigOp.OpType.SUBTRACT), new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, "0"), AlterConfigOp.OpType.SUBTRACT) - ) + ).asJava // subtract all from this list property - topic2AlterConfigs = util.List.of( + topic2AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + "," + TopicConfig.CLEANUP_POLICY_DELETE), AlterConfigOp.OpType.SUBTRACT) - ) + ).asJavaCollection - alterResult = client.incrementalAlterConfigs(util.Map.of( - topic1Resource, topic1AlterConfigs, - topic2Resource, topic2AlterConfigs - )) - assertEquals(util.Set.of(topic1Resource, topic2Resource), alterResult.values.keySet) + alterResult = client.incrementalAlterConfigs(Map( + topic1Resource -> topic1AlterConfigs, + topic2Resource -> topic2AlterConfigs + ).asJava) + assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) alterResult.all.get ensureConsistentKRaftMetadata() // Verify that topics were updated correctly - describeResult = client.describeConfigs(util.List.of(topic1Resource, topic2Resource)) + describeResult = client.describeConfigs(Seq(topic1Resource, topic2Resource).asJava) configs = describeResult.all.get assertEquals(2, configs.size) @@ -3578,36 +3318,37 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals("", configs.get(topic2Resource).get(TopicConfig.CLEANUP_POLICY_CONFIG).value ) // Alter topics with validateOnly=true - topic1AlterConfigs = util.List.of( + topic1AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT), AlterConfigOp.OpType.APPEND) - ) + ).asJava - alterResult = client.incrementalAlterConfigs(util.Map.of( - topic1Resource, topic1AlterConfigs - ), new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(Map( + topic1Resource -> topic1AlterConfigs + ).asJava, new AlterConfigsOptions().validateOnly(true)) alterResult.all.get // Verify that topics were not updated due to validateOnly = true - describeResult = client.describeConfigs(util.List.of(topic1Resource)) + describeResult = client.describeConfigs(Seq(topic1Resource).asJava) configs = describeResult.all.get assertEquals("delete", configs.get(topic1Resource).get(TopicConfig.CLEANUP_POLICY_CONFIG).value) // Alter topics with validateOnly=true with invalid configs - topic1AlterConfigs = util.List.of( + topic1AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "zip"), AlterConfigOp.OpType.SET) - ) + ).asJava - alterResult = client.incrementalAlterConfigs(util.Map.of( - topic1Resource, topic1AlterConfigs - ), new AlterConfigsOptions().validateOnly(true)) + alterResult = client.incrementalAlterConfigs(Map( + topic1Resource -> topic1AlterConfigs + ).asJava, new AlterConfigsOptions().validateOnly(true)) - assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values().get(topic1Resource), + assertFutureThrows(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], "Invalid value zip for configuration compression.type: String must be one of: uncompressed, zstd, lz4, snappy, gzip, producer") } - @Test - def testAppendAlreadyExistsConfigsAndSubtractNotExistsConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAppendAlreadyExistsConfigsAndSubtractNotExistsConfigs(quorum: String): Unit = { client = createAdminClient // Create topics @@ -3623,100 +3364,103 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { createTopic(topic, numPartitions = 1, replicationFactor = 1, topicCreateConfigs) // Append value that is already present - val topicAppendConfigs = util.List.of( + val topicAppendConfigs = Seq( new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, appendValues), AlterConfigOp.OpType.APPEND), - ) + ).asJavaCollection - val appendResult = client.incrementalAlterConfigs(util.Map.of(topicResource, topicAppendConfigs)) + val appendResult = client.incrementalAlterConfigs(Map(topicResource -> topicAppendConfigs).asJava) appendResult.all.get // Subtract values that are not present - val topicSubtractConfigs = util.List.of( + val topicSubtractConfigs = Seq( new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, subtractValues), AlterConfigOp.OpType.SUBTRACT) - ) - val subtractResult = client.incrementalAlterConfigs(util.Map.of(topicResource, topicSubtractConfigs)) + ).asJavaCollection + val subtractResult = client.incrementalAlterConfigs(Map(topicResource -> topicSubtractConfigs).asJava) subtractResult.all.get ensureConsistentKRaftMetadata() // Verify that topics were updated correctly - val describeResult = client.describeConfigs(util.List.of(topicResource)) + val describeResult = client.describeConfigs(Seq(topicResource).asJava) val configs = describeResult.all.get assertEquals(appendValues, configs.get(topicResource).get(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG).value) } - @Test - def testIncrementalAlterConfigsDeleteAndSetBrokerConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsDeleteAndSetBrokerConfigs(quorum: String): Unit = { client = createAdminClient val broker0Resource = new ConfigResource(ConfigResource.Type.BROKER, "0") - client.incrementalAlterConfigs(util.Map.of(broker0Resource, - util.List.of(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "123"), + client.incrementalAlterConfigs(Map(broker0Resource -> + Seq(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "123"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "456"), AlterConfigOp.OpType.SET) - ))).all().get() + ).asJavaCollection).asJava).all().get() TestUtils.waitUntilTrue(() => { - val broker0Configs = client.describeConfigs(util.List.of(broker0Resource)). + val broker0Configs = client.describeConfigs(Seq(broker0Resource).asJava). all().get().get(broker0Resource).entries().asScala.map(entry => (entry.name, entry.value)).toMap - "123".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && - "456".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "")) + ("123".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && + "456".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, ""))) }, "Expected to see the broker properties we just set", pause=25) - client.incrementalAlterConfigs(util.Map.of(broker0Resource, - util.List.of(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ""), + client.incrementalAlterConfigs(Map(broker0Resource -> + Seq(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ""), AlterConfigOp.OpType.DELETE), new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "654"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "987"), AlterConfigOp.OpType.SET) - ))).all().get() + ).asJavaCollection).asJava).all().get() TestUtils.waitUntilTrue(() => { - val broker0Configs = client.describeConfigs(util.List.of(broker0Resource)). + val broker0Configs = client.describeConfigs(Seq(broker0Resource).asJava). all().get().get(broker0Resource).entries().asScala.map(entry => (entry.name, entry.value)).toMap - "".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && + ("".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && "654".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && - "987".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "")) + "987".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ""))) }, "Expected to see the broker properties we just modified", pause=25) } - @Test - def testIncrementalAlterConfigsDeleteBrokerConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsDeleteBrokerConfigs(quorum: String): Unit = { client = createAdminClient val broker0Resource = new ConfigResource(ConfigResource.Type.BROKER, "0") - client.incrementalAlterConfigs(util.Map.of(broker0Resource, - util.List.of(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "123"), + client.incrementalAlterConfigs(Map(broker0Resource -> + Seq(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "123"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "456"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "789"), AlterConfigOp.OpType.SET) - ))).all().get() + ).asJavaCollection).asJava).all().get() TestUtils.waitUntilTrue(() => { - val broker0Configs = client.describeConfigs(util.List.of(broker0Resource)). + val broker0Configs = client.describeConfigs(Seq(broker0Resource).asJava). all().get().get(broker0Resource).entries().asScala.map(entry => (entry.name, entry.value)).toMap - "123".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && + ("123".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && "456".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && - "789".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "")) + "789".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ""))) }, "Expected to see the broker properties we just set", pause=25) - client.incrementalAlterConfigs(util.Map.of(broker0Resource, - util.List.of(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ""), + client.incrementalAlterConfigs(Map(broker0Resource -> + Seq(new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, ""), AlterConfigOp.OpType.DELETE), new AlterConfigOp(new ConfigEntry(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, ""), AlterConfigOp.OpType.DELETE), new AlterConfigOp(new ConfigEntry(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ""), AlterConfigOp.OpType.DELETE) - ))).all().get() + ).asJavaCollection).asJava).all().get() TestUtils.waitUntilTrue(() => { - val broker0Configs = client.describeConfigs(util.List.of(broker0Resource)). + val broker0Configs = client.describeConfigs(Seq(broker0Resource).asJava). all().get().get(broker0Resource).entries().asScala.map(entry => (entry.name, entry.value)).toMap - "".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && + ("".equals(broker0Configs.getOrElse(QuotaConfig.LEADER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && "".equals(broker0Configs.getOrElse(QuotaConfig.FOLLOWER_REPLICATION_THROTTLED_RATE_CONFIG, "")) && - "".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, "")) + "".equals(broker0Configs.getOrElse(QuotaConfig.REPLICA_ALTER_LOG_DIRS_IO_MAX_BYTES_PER_SECOND_CONFIG, ""))) }, "Expected to see the broker properties we just removed to be deleted", pause=25) } - @Test - def testInvalidIncrementalAlterConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidIncrementalAlterConfigs(quorum: String): Unit = { client = createAdminClient // Create topics @@ -3729,25 +3473,25 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { createTopic(topic2) // Add duplicate Keys for topic1 - var topic1AlterConfigs = util.List.of( + var topic1AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.75"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.65"), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), AlterConfigOp.OpType.SET) // valid entry - ) + ).asJavaCollection // Add valid config for topic2 - var topic2AlterConfigs = util.List.of( + var topic2AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), AlterConfigOp.OpType.SET) - ) + ).asJavaCollection - var alterResult = client.incrementalAlterConfigs(util.Map.of( - topic1Resource, topic1AlterConfigs, - topic2Resource, topic2AlterConfigs - )) - assertEquals(util.Set.of(topic1Resource, topic2Resource), alterResult.values.keySet) + var alterResult = client.incrementalAlterConfigs(Map( + topic1Resource -> topic1AlterConfigs, + topic2Resource -> topic2AlterConfigs + ).asJava) + assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) // InvalidRequestException error for topic1 - assertFutureThrows(classOf[InvalidRequestException], alterResult.values().get(topic1Resource), + assertFutureThrows(alterResult.values().get(topic1Resource), classOf[InvalidRequestException], "Error due to duplicate config keys") // Operation should succeed for topic2 @@ -3755,129 +3499,117 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { ensureConsistentKRaftMetadata() // Verify that topic1 is not config not updated, and topic2 config is updated - val describeResult = client.describeConfigs(util.List.of(topic1Resource, topic2Resource)) + val describeResult = client.describeConfigs(Seq(topic1Resource, topic2Resource).asJava) val configs = describeResult.all.get assertEquals(2, configs.size) assertEquals(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO.toString, configs.get(topic1Resource).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) - assertEquals(ServerLogConfigs.COMPRESSION_TYPE_DEFAULT, configs.get(topic1Resource).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) + assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, configs.get(topic1Resource).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) assertEquals("0.9", configs.get(topic2Resource).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) // Check invalid use of append/subtract operation types - topic1AlterConfigs = util.List.of( + topic1AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), AlterConfigOp.OpType.APPEND) - ) + ).asJavaCollection - topic2AlterConfigs = util.List.of( + topic2AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy"), AlterConfigOp.OpType.SUBTRACT) - ) + ).asJavaCollection - alterResult = client.incrementalAlterConfigs(util.Map.of( - topic1Resource, topic1AlterConfigs, - topic2Resource, topic2AlterConfigs - )) - assertEquals(util.Set.of(topic1Resource, topic2Resource), alterResult.values.keySet) + alterResult = client.incrementalAlterConfigs(Map( + topic1Resource -> topic1AlterConfigs, + topic2Resource -> topic2AlterConfigs + ).asJava) + assertEquals(Set(topic1Resource, topic2Resource).asJava, alterResult.values.keySet) - assertFutureThrows(classOf[InvalidConfigurationException],alterResult.values().get(topic1Resource), + assertFutureThrows(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], "Can't APPEND to key compression.type because its type is not LIST.") - assertFutureThrows(classOf[InvalidConfigurationException],alterResult.values().get(topic2Resource), + assertFutureThrows(alterResult.values().get(topic2Resource), classOf[InvalidConfigurationException], "Can't SUBTRACT to key compression.type because its type is not LIST.") // Try to add invalid config - topic1AlterConfigs = util.List.of( + topic1AlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), AlterConfigOp.OpType.SET) - ) + ).asJavaCollection - alterResult = client.incrementalAlterConfigs(util.Map.of( - topic1Resource, topic1AlterConfigs - )) - assertEquals(util.Set.of(topic1Resource), alterResult.values.keySet) + alterResult = client.incrementalAlterConfigs(Map( + topic1Resource -> topic1AlterConfigs + ).asJava) + assertEquals(Set(topic1Resource).asJava, alterResult.values.keySet) - assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values().get(topic1Resource), + assertFutureThrows(alterResult.values().get(topic1Resource), classOf[InvalidConfigurationException], "Invalid value 1.1 for configuration min.cleanable.dirty.ratio: Value must be no more than 1") } - @Test - def testInvalidAlterPartitionReassignments(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidAlterPartitionReassignments(quorum: String): Unit = { client = createAdminClient val topic = "alter-reassignments-topic-1" val tp1 = new TopicPartition(topic, 0) val tp2 = new TopicPartition(topic, 1) val tp3 = new TopicPartition(topic, 2) - createTopic(topic, numPartitions = 4, replicationFactor = 2) + createTopic(topic, numPartitions = 4) + val validAssignment = Optional.of(new NewPartitionReassignment( (0 until brokerCount).map(_.asInstanceOf[Integer]).asJava )) - val alterOptions = new AlterPartitionReassignmentsOptions - alterOptions.allowReplicationFactorChange(false) - val alterReplicaNumberTo1 = Optional.of(new NewPartitionReassignment(util.List.of(1.asInstanceOf[Integer]))) - val alterReplicaNumberTo2 = Optional.of(new NewPartitionReassignment((0 until brokerCount - 1).map(_.asInstanceOf[Integer]).asJava)) - val alterReplicaNumberTo3 = Optional.of(new NewPartitionReassignment((0 until brokerCount).map(_.asInstanceOf[Integer]).asJava)) - val alterReplicaResults = client.alterPartitionReassignments(util.Map.of( - tp1, alterReplicaNumberTo1, - tp2, alterReplicaNumberTo2, - tp3, alterReplicaNumberTo3, - ), alterOptions).values() - assertDoesNotThrow(() => alterReplicaResults.get(tp2).get()) - assertEquals("The replication factor is changed from 2 to 1", - assertFutureThrows(classOf[InvalidReplicationFactorException], alterReplicaResults.get(tp1)).getMessage) - assertEquals("The replication factor is changed from 2 to 3", - assertFutureThrows(classOf[InvalidReplicationFactorException], alterReplicaResults.get(tp3)).getMessage) - val nonExistentTp1 = new TopicPartition("topicA", 0) val nonExistentTp2 = new TopicPartition(topic, 4) - val nonExistentPartitionsResult = client.alterPartitionReassignments(util.Map.of( - tp1, validAssignment, - tp2, validAssignment, - tp3, validAssignment, - nonExistentTp1, validAssignment, - nonExistentTp2, validAssignment - )).values() - assertFutureThrows(classOf[UnknownTopicOrPartitionException], nonExistentPartitionsResult.get(nonExistentTp1)) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], nonExistentPartitionsResult.get(nonExistentTp2)) + val nonExistentPartitionsResult = client.alterPartitionReassignments(Map( + tp1 -> validAssignment, + tp2 -> validAssignment, + tp3 -> validAssignment, + nonExistentTp1 -> validAssignment, + nonExistentTp2 -> validAssignment + ).asJava).values() + assertFutureThrows(nonExistentPartitionsResult.get(nonExistentTp1), classOf[UnknownTopicOrPartitionException]) + assertFutureThrows(nonExistentPartitionsResult.get(nonExistentTp2), classOf[UnknownTopicOrPartitionException]) val extraNonExistentReplica = Optional.of(new NewPartitionReassignment((0 until brokerCount + 1).map(_.asInstanceOf[Integer]).asJava)) val negativeIdReplica = Optional.of(new NewPartitionReassignment(Seq(-3, -2, -1).map(_.asInstanceOf[Integer]).asJava)) val duplicateReplica = Optional.of(new NewPartitionReassignment(Seq(0, 1, 1).map(_.asInstanceOf[Integer]).asJava)) - val invalidReplicaResult = client.alterPartitionReassignments(util.Map.of( - tp1, extraNonExistentReplica, - tp2, negativeIdReplica, - tp3, duplicateReplica - )).values() - assertFutureThrows(classOf[InvalidReplicaAssignmentException], invalidReplicaResult.get(tp1)) - assertFutureThrows(classOf[InvalidReplicaAssignmentException], invalidReplicaResult.get(tp2)) - assertFutureThrows(classOf[InvalidReplicaAssignmentException], invalidReplicaResult.get(tp3)) + val invalidReplicaResult = client.alterPartitionReassignments(Map( + tp1 -> extraNonExistentReplica, + tp2 -> negativeIdReplica, + tp3 -> duplicateReplica + ).asJava).values() + assertFutureThrows(invalidReplicaResult.get(tp1), classOf[InvalidReplicaAssignmentException]) + assertFutureThrows(invalidReplicaResult.get(tp2), classOf[InvalidReplicaAssignmentException]) + assertFutureThrows(invalidReplicaResult.get(tp3), classOf[InvalidReplicaAssignmentException]) } - @Test - def testLongTopicNames(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testLongTopicNames(quorum: String): Unit = { val client = createAdminClient val longTopicName = String.join("", Collections.nCopies(249, "x")) val invalidTopicName = String.join("", Collections.nCopies(250, "x")) - val newTopics2 = util.List.of(new NewTopic(invalidTopicName, 3, 3.toShort), + val newTopics2 = Seq(new NewTopic(invalidTopicName, 3, 3.toShort), new NewTopic(longTopicName, 3, 3.toShort)) - val results = client.createTopics(newTopics2).values() + val results = client.createTopics(newTopics2.asJava).values() assertTrue(results.containsKey(longTopicName)) results.get(longTopicName).get() assertTrue(results.containsKey(invalidTopicName)) - assertFutureThrows(classOf[InvalidTopicException], results.get(invalidTopicName)) - assertFutureThrows(classOf[InvalidTopicException], - client.alterReplicaLogDirs( - util.Map.of(new TopicPartitionReplica(longTopicName, 0, 0), brokers(0).config.logDirs.get(0))).all()) + assertFutureThrows(results.get(invalidTopicName), classOf[InvalidTopicException]) + assertFutureThrows(client.alterReplicaLogDirs( + Map(new TopicPartitionReplica(longTopicName, 0, 0) -> brokers(0).config.logDirs(0)).asJava).all(), + classOf[InvalidTopicException]) client.close() } // Verify that createTopics and alterConfigs fail with null values - @Test - def testNullConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testNullConfigs(quorum: String): Unit = { def validateLogConfig(compressionType: String): Unit = { ensureConsistentKRaftMetadata() - val topicProps = brokers.head.metadataCache.topicConfig(topic) - val logConfig = LogConfig.fromProps(util.Map.of[String, AnyRef], topicProps) + val topicProps = brokers.head.metadataCache.asInstanceOf[KRaftMetadataCache].topicConfig(topic) + val logConfig = LogConfig.fromProps(Collections.emptyMap[String, AnyRef], topicProps) assertEquals(compressionType, logConfig.originals.get(TopicConfig.COMPRESSION_TYPE_CONFIG)) assertNull(logConfig.originals.get(TopicConfig.RETENTION_BYTES_CONFIG)) @@ -3890,30 +3622,33 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { TopicConfig.COMPRESSION_TYPE_CONFIG -> "producer" ).asJava val newTopic = new NewTopic(topic, 2, brokerCount.toShort) - assertFutureThrows(classOf[InvalidConfigurationException], - client.createTopics(util.List.of(newTopic.configs(invalidConfigs))).all, + assertFutureThrows( + client.createTopics(Collections.singletonList(newTopic.configs(invalidConfigs))).all, + classOf[InvalidConfigurationException], "Null value not supported for topic configs: retention.bytes" ) - val validConfigs = util.Map.of[String, String](TopicConfig.COMPRESSION_TYPE_CONFIG, "producer") - client.createTopics(util.List.of(newTopic.configs(validConfigs))).all.get() + val validConfigs = Map[String, String](TopicConfig.COMPRESSION_TYPE_CONFIG -> "producer").asJava + client.createTopics(Collections.singletonList(newTopic.configs(validConfigs))).all.get() waitForTopics(client, expectedPresent = Seq(topic), expectedMissing = List()) validateLogConfig(compressionType = "producer") val topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topic) - val alterOps = util.List.of( + val alterOps = Seq( new AlterConfigOp(new ConfigEntry(TopicConfig.RETENTION_BYTES_CONFIG, null), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), AlterConfigOp.OpType.SET) ) - assertFutureThrows(classOf[InvalidRequestException], - client.incrementalAlterConfigs(util.Map.of(topicResource, alterOps)).all, + assertFutureThrows( + client.incrementalAlterConfigs(Map(topicResource -> alterOps.asJavaCollection).asJava).all, + classOf[InvalidRequestException], "Null value not supported for : retention.bytes" ) validateLogConfig(compressionType = "producer") } - @Test - def testDescribeConfigsForLog4jLogLevels(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeConfigsForLog4jLogLevels(quorum: String): Unit = { client = createAdminClient LoggerFactory.getLogger("kafka.cluster.Replica").trace("Message to create the logger") val loggerConfig = describeBrokerLoggers() @@ -3928,8 +3663,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertTrue(clusterReplicaLogLevel.synonyms().isEmpty) } - @Test - def testIncrementalAlterConfigsForLog4jLogLevels(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsForLog4jLogLevels(quorum: String): Unit = { client = createAdminClient val ancestorLogger = "kafka" @@ -3940,9 +3676,9 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val initialReplicaManagerLogLevel = initialLoggerConfig.get("kafka.server.ReplicaManager").value() val newAncestorLogLevel = LogLevelConfig.DEBUG_LOG_LEVEL - val alterAncestorLoggerEntry = util.List.of( + val alterAncestorLoggerEntry = Seq( new AlterConfigOp(new ConfigEntry(ancestorLogger, newAncestorLogLevel), AlterConfigOp.OpType.SET) - ) + ).asJavaCollection // Test validateOnly does not change anything alterBrokerLoggers(alterAncestorLoggerEntry, validateOnly = true) val validatedLoggerConfig = describeBrokerLoggers() @@ -3960,19 +3696,19 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(newAncestorLogLevel, changedAncestorLoggerConfig.get("kafka.server.ReplicaManager").value()) // alter the LogCleaner's logger so we can later test resetting it - val alterLogCleanerLoggerEntry = util.List.of( + val alterLogCleanerLoggerEntry = Seq( new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SET) - ) + ).asJavaCollection alterBrokerLoggers(alterLogCleanerLoggerEntry) val changedBrokerLoggerConfig = describeBrokerLoggers() assertEquals(LogLevelConfig.ERROR_LOG_LEVEL, changedBrokerLoggerConfig.get("kafka.log.LogCleaner").value()) // properly test various set operations and one delete - val alterLogLevelsEntries = util.List.of( + val alterLogLevelsEntries = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.ControllerServer", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry("kafka.log.LogCleaner", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SET), new AlterConfigOp(new ConfigEntry("kafka.server.ReplicaManager", LogLevelConfig.TRACE_LOG_LEVEL), AlterConfigOp.OpType.SET), - ) + ).asJavaCollection alterBrokerLoggers(alterLogLevelsEntries) val alteredLoggerConfig = describeBrokerLoggers() assertEquals(newAncestorLogLevel, alteredLoggerConfig.get(ancestorLogger).value()) @@ -3988,80 +3724,84 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { * 4. Change kafka logger to ERROR * 5. Ensure the kafka.server.ControllerServer logger's level is ERROR (the current kafka logger level) */ - @Test - def testIncrementalAlterConfigsForLog4jLogLevelsCanResetLoggerToCurrentRoot(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsForLog4jLogLevelsCanResetLoggerToCurrentRoot(quorum: String): Unit = { client = createAdminClient val ancestorLogger = "kafka" // step 1 - configure kafka logger val initialAncestorLogLevel = LogLevelConfig.TRACE_LOG_LEVEL - val alterAncestorLoggerEntry = util.List.of( + val alterAncestorLoggerEntry = Seq( new AlterConfigOp(new ConfigEntry(ancestorLogger, initialAncestorLogLevel), AlterConfigOp.OpType.SET) - ) + ).asJavaCollection alterBrokerLoggers(alterAncestorLoggerEntry) val initialLoggerConfig = describeBrokerLoggers() assertEquals(initialAncestorLogLevel, initialLoggerConfig.get(ancestorLogger).value()) assertEquals(initialAncestorLogLevel, initialLoggerConfig.get("kafka.server.ControllerServer").value()) // step 2 - change ControllerServer logger to INFO - val alterControllerLoggerEntry = util.List.of( + val alterControllerLoggerEntry = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.ControllerServer", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET) - ) + ).asJavaCollection alterBrokerLoggers(alterControllerLoggerEntry) val changedControllerLoggerConfig = describeBrokerLoggers() assertEquals(initialAncestorLogLevel, changedControllerLoggerConfig.get(ancestorLogger).value()) assertEquals(LogLevelConfig.INFO_LOG_LEVEL, changedControllerLoggerConfig.get("kafka.server.ControllerServer").value()) // step 3 - unset ControllerServer logger - val deleteControllerLoggerEntry = util.List.of( + val deleteControllerLoggerEntry = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.ControllerServer", ""), AlterConfigOp.OpType.DELETE) - ) + ).asJavaCollection alterBrokerLoggers(deleteControllerLoggerEntry) val deletedControllerLoggerConfig = describeBrokerLoggers() assertEquals(initialAncestorLogLevel, deletedControllerLoggerConfig.get(ancestorLogger).value()) assertEquals(initialAncestorLogLevel, deletedControllerLoggerConfig.get("kafka.server.ControllerServer").value()) val newAncestorLogLevel = LogLevelConfig.ERROR_LOG_LEVEL - val newAlterAncestorLoggerEntry = util.List.of( + val newAlterAncestorLoggerEntry = Seq( new AlterConfigOp(new ConfigEntry(ancestorLogger, newAncestorLogLevel), AlterConfigOp.OpType.SET) - ) + ).asJavaCollection alterBrokerLoggers(newAlterAncestorLoggerEntry) val newAncestorLoggerConfig = describeBrokerLoggers() assertEquals(newAncestorLogLevel, newAncestorLoggerConfig.get(ancestorLogger).value()) assertEquals(newAncestorLogLevel, newAncestorLoggerConfig.get("kafka.server.ControllerServer").value()) } - @Test - def testIncrementalAlterConfigsForLog4jLogLevelsCanSetToRootLogger(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsForLog4jLogLevelsCanSetToRootLogger(quorum: String): Unit = { client = createAdminClient val initialLoggerConfig = describeBrokerLoggers() - val initialRootLogLevel = initialLoggerConfig.get(LoggingController.ROOT_LOGGER).value() + val initialRootLogLevel = initialLoggerConfig.get(Log4jController.ROOT_LOGGER).value() val newRootLogLevel = LogLevelConfig.DEBUG_LOG_LEVEL - val alterRootLoggerEntry = util.List.of( - new AlterConfigOp(new ConfigEntry(LoggingController.ROOT_LOGGER, newRootLogLevel), AlterConfigOp.OpType.SET) - ) + val alterRootLoggerEntry = Seq( + new AlterConfigOp(new ConfigEntry(Log4jController.ROOT_LOGGER, newRootLogLevel), AlterConfigOp.OpType.SET) + ).asJavaCollection alterBrokerLoggers(alterRootLoggerEntry, validateOnly = true) val validatedRootLoggerConfig = describeBrokerLoggers() - assertEquals(initialRootLogLevel, validatedRootLoggerConfig.get(LoggingController.ROOT_LOGGER).value()) + assertEquals(initialRootLogLevel, validatedRootLoggerConfig.get(Log4jController.ROOT_LOGGER).value()) alterBrokerLoggers(alterRootLoggerEntry) val changedRootLoggerConfig = describeBrokerLoggers() - assertEquals(newRootLogLevel, changedRootLoggerConfig.get(LoggingController.ROOT_LOGGER).value()) + assertEquals(newRootLogLevel, changedRootLoggerConfig.get(Log4jController.ROOT_LOGGER).value()) } - @Test - def testIncrementalAlterConfigsForLog4jLogLevelsCannotResetRootLogger(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsForLog4jLogLevelsCannotResetRootLogger(quorum: String): Unit = { client = createAdminClient - val deleteRootLoggerEntry = util.List.of( - new AlterConfigOp(new ConfigEntry(LoggingController.ROOT_LOGGER, ""), AlterConfigOp.OpType.DELETE) - ) + val deleteRootLoggerEntry = Seq( + new AlterConfigOp(new ConfigEntry(Log4jController.ROOT_LOGGER, ""), AlterConfigOp.OpType.DELETE) + ).asJavaCollection assertTrue(assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(deleteRootLoggerEntry)).getCause.isInstanceOf[InvalidRequestException]) } - @Test - def testIncrementalAlterConfigsForLog4jLogLevelsDoesNotWorkWithInvalidConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterConfigsForLog4jLogLevelsDoesNotWorkWithInvalidConfigs(quorum: String): Unit = { client = createAdminClient val validLoggerName = "kafka.server.KafkaRequestHandler" val expectedValidLoggerLogLevel = describeBrokerLoggers().get(validLoggerName) @@ -4069,100 +3809,96 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { assertEquals(expectedValidLoggerLogLevel, describeBrokerLoggers().get(validLoggerName)) } - val appendLogLevelEntries = util.List.of( + val appendLogLevelEntries = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("kafka.network.SocketServer", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.APPEND) // append is not supported - ) + ).asJavaCollection assertInstanceOf(classOf[InvalidRequestException], assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(appendLogLevelEntries)).getCause) assertLogLevelDidNotChange() - val subtractLogLevelEntries = util.List.of( + val subtractLogLevelEntries = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("kafka.network.SocketServer", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SUBTRACT) // subtract is not supported - ) + ).asJavaCollection assertInstanceOf(classOf[InvalidRequestException], assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(subtractLogLevelEntries)).getCause) assertLogLevelDidNotChange() - val invalidLogLevelLogLevelEntries = util.List.of( + val invalidLogLevelLogLevelEntries = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("kafka.network.SocketServer", "OFF"), AlterConfigOp.OpType.SET) // OFF is not a valid log level - ) + ).asJavaCollection assertInstanceOf(classOf[InvalidConfigurationException], assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(invalidLogLevelLogLevelEntries)).getCause) assertLogLevelDidNotChange() - val invalidLoggerNameLogLevelEntries = util.List.of( + val invalidLoggerNameLogLevelEntries = Seq( new AlterConfigOp(new ConfigEntry("kafka.server.KafkaRequestHandler", LogLevelConfig.INFO_LOG_LEVEL), AlterConfigOp.OpType.SET), // valid new AlterConfigOp(new ConfigEntry("Some Other LogCleaner", LogLevelConfig.ERROR_LOG_LEVEL), AlterConfigOp.OpType.SET) // invalid logger name is not supported - ) + ).asJavaCollection assertInstanceOf(classOf[InvalidConfigurationException], assertThrows(classOf[ExecutionException], () => alterBrokerLoggers(invalidLoggerNameLogLevelEntries)).getCause) assertLogLevelDidNotChange() } def alterBrokerLoggers(entries: util.Collection[AlterConfigOp], validateOnly: Boolean = false): Unit = { - client.incrementalAlterConfigs(util.Map.of(brokerLoggerConfigResource, entries), new AlterConfigsOptions().validateOnly(validateOnly)) + client.incrementalAlterConfigs(Map(brokerLoggerConfigResource -> entries).asJava, new AlterConfigsOptions().validateOnly(validateOnly)) .values.get(brokerLoggerConfigResource).get() } def describeBrokerLoggers(): Config = - client.describeConfigs(util.List.of(brokerLoggerConfigResource)).values.get(brokerLoggerConfigResource).get() + client.describeConfigs(Collections.singletonList(brokerLoggerConfigResource)).values.get(brokerLoggerConfigResource).get() - @Test - def testAppendConfigToEmptyDefaultValue(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAppendConfigToEmptyDefaultValue(ignored: String): Unit = { testAppendConfig(new Properties(), "0:0", "0:0") } - @Test - def testAppendConfigToExistentValue(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAppendConfigToExistentValue(ignored: String): Unit = { val props = new Properties() props.setProperty(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, "1:1") testAppendConfig(props, "0:0", "1:1,0:0") } - private def disableEligibleLeaderReplicas(admin: Admin): Unit = { - if (metadataVersion.isAtLeast(MetadataVersion.IBP_4_1_IV0)) { - admin.updateFeatures( - util.Map.of(EligibleLeaderReplicasVersion.FEATURE_NAME, new FeatureUpdate(0, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE)), - new UpdateFeaturesOptions()).all().get() - } - } - private def testAppendConfig(props: Properties, append: String, expected: String): Unit = { client = createAdminClient createTopic(topic, topicConfig = props) val topicResource = new ConfigResource(ConfigResource.Type.TOPIC, topic) - val topicAlterConfigs = util.List.of( + val topicAlterConfigs = Seq( new AlterConfigOp(new ConfigEntry(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, append), AlterConfigOp.OpType.APPEND), - ) + ).asJavaCollection - val alterResult = client.incrementalAlterConfigs(util.Map.of( - topicResource, topicAlterConfigs - )) + val alterResult = client.incrementalAlterConfigs(Map( + topicResource -> topicAlterConfigs + ).asJava) alterResult.all().get(15, TimeUnit.SECONDS) ensureConsistentKRaftMetadata() - val config = client.describeConfigs(util.List.of(topicResource)).all().get().get(topicResource).get(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) + val config = client.describeConfigs(List(topicResource).asJava).all().get().get(topicResource).get(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) assertEquals(expected, config.value()) } - @Test - def testListClientMetricsResources(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListClientMetricsResources(quorum: String): Unit = { client = createAdminClient - client.createTopics(util.Set.of(new NewTopic(topic, partition, 0.toShort))) + client.createTopics(Collections.singleton(new NewTopic(topic, partition, 0.toShort))) assertTrue(client.listClientMetricsResources().all().get().isEmpty) val name = "name" val configResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, name) val configEntry = new ConfigEntry("interval.ms", "111") val configOp = new AlterConfigOp(configEntry, AlterConfigOp.OpType.SET) - client.incrementalAlterConfigs(util.Map.of(configResource, util.List.of(configOp))).all().get() + client.incrementalAlterConfigs(Collections.singletonMap(configResource, Collections.singletonList(configOp))).all().get() TestUtils.waitUntilTrue(() => { val results = client.listClientMetricsResources().all().get() results.size() == 1 && results.iterator().next().equals(new ClientMetricsResourceListing(name)) }, "metadata timeout") } - @Test + @ParameterizedTest + @ValueSource(strings = Array("quorum=kraft")) @Timeout(30) - def testListClientMetricsResourcesTimeoutMs(): Unit = { + def testListClientMetricsResourcesTimeoutMs(ignored: String): Unit = { client = createInvalidAdminClient() try { val timeoutOption = new ListClientMetricsResourcesOptions().timeoutMs(0) @@ -4172,100 +3908,15 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { } finally client.close(time.Duration.ZERO) } - @Test - def testListConfigResources(): Unit = { - client = createAdminClient - - // Alter group and client metric config to add group and client metric config resource - val clientMetric = "client-metrics" - val group = "group" - val clientMetricResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, clientMetric) - val groupResource = new ConfigResource(ConfigResource.Type.GROUP, group) - val alterResult = client.incrementalAlterConfigs(util.Map.of( - clientMetricResource, - util.Set.of(new AlterConfigOp(new ConfigEntry("interval.ms", "111"), AlterConfigOp.OpType.SET)), - groupResource, - util.Set.of(new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "50000"), AlterConfigOp.OpType.SET)) - )) - assertEquals(util.Set.of(clientMetricResource, groupResource), alterResult.values.keySet) - alterResult.all.get(15, TimeUnit.SECONDS) - - ensureConsistentKRaftMetadata() - - // non-specified config resource type retrieves all config resources - var configResources = client.listConfigResources().all().get() - assertEquals(9, configResources.size()) - brokerServers.foreach(b => { - assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER, b.config.nodeId.toString))) - assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, b.config.nodeId.toString))) - }) - assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME))) - assertTrue(configResources.contains(groupResource)) - assertTrue(configResources.contains(clientMetricResource)) - - // BROKER config resource type retrieves only broker config resources - configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.BROKER), new ListConfigResourcesOptions()).all().get() - assertEquals(3, configResources.size()) - brokerServers.foreach(b => { - assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER, b.config.nodeId.toString))) - assertFalse(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, b.config.nodeId.toString))) - }) - assertFalse(configResources.contains(new ConfigResource(ConfigResource.Type.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME))) - assertFalse(configResources.contains(groupResource)) - assertFalse(configResources.contains(clientMetricResource)) - - // BROKER_LOGGER config resource type retrieves only broker logger config resources - configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.BROKER_LOGGER), new ListConfigResourcesOptions()).all().get() - assertEquals(3, configResources.size()) - brokerServers.foreach(b => { - assertFalse(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER, b.config.nodeId.toString))) - assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.BROKER_LOGGER, b.config.nodeId.toString))) - }) - assertFalse(configResources.contains(new ConfigResource(ConfigResource.Type.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME))) - assertFalse(configResources.contains(groupResource)) - assertFalse(configResources.contains(clientMetricResource)) - - // TOPIC config resource type retrieves only topic config resources - configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.TOPIC), new ListConfigResourcesOptions()).all().get() - assertEquals(1, configResources.size()) - assertTrue(configResources.contains(new ConfigResource(ConfigResource.Type.TOPIC, Topic.GROUP_METADATA_TOPIC_NAME))) - - // GROUP config resource type retrieves only group config resources - configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.GROUP), new ListConfigResourcesOptions()).all().get() - assertEquals(1, configResources.size()) - assertTrue(configResources.contains(groupResource)) - - // CLIENT_METRICS config resource type retrieves only client metric config resources - configResources = client.listConfigResources(util.Set.of(ConfigResource.Type.CLIENT_METRICS), new ListConfigResourcesOptions()).all().get() - assertEquals(1, configResources.size()) - assertTrue(configResources.contains(clientMetricResource)) - - // UNKNOWN config resource type gets UNSUPPORTED_VERSION error - assertThrows(classOf[ExecutionException], () => { - client.listConfigResources(util.Set.of(ConfigResource.Type.UNKNOWN), new ListConfigResourcesOptions()).all().get() - }) - } - - @Test - @Timeout(30) - def testListConfigResourcesTimeoutMs(): Unit = { - client = createInvalidAdminClient() - try { - val timeoutOption = new ListConfigResourcesOptions().timeoutMs(0) - val exception = assertThrows(classOf[ExecutionException], () => - client.listConfigResources(util.Set.of(), timeoutOption).all().get()) - assertInstanceOf(classOf[TimeoutException], exception.getCause) - } finally client.close(time.Duration.ZERO) - } - /** * Test that createTopics returns the dynamic configurations of the topics that were created. * * Note: this test requires some custom static broker and controller configurations, which are set up in * BaseAdminIntegrationTest.modifyConfigs. */ - @Test - def testCreateTopicsReturnsConfigs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateTopicsReturnsConfigs(quorum: String): Unit = { client = Admin.create(super.createConfig) val newLogRetentionProperties = new Properties @@ -4283,8 +3934,8 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { val controllerNodeResource = new ConfigResource(ConfigResource.Type.BROKER, controllerServer.config.nodeId.toString) controllerServer.controller.incrementalAlterConfigs(ANONYMOUS_CONTEXT, - util.Map.of(controllerNodeResource, - util.Map.of(CleanerConfig.LOG_CLEANER_DELETE_RETENTION_MS_PROP, + Collections.singletonMap(controllerNodeResource, + Collections.singletonMap(CleanerConfig.LOG_CLEANER_DELETE_RETENTION_MS_PROP, new SimpleImmutableEntry(AlterConfigOp.OpType.SET, "34"))), false).get() ensureConsistentKRaftMetadata() @@ -4298,11 +3949,11 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { s"Timed out waiting for change to ${ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG}", waitTimeMs = 60000L) - val newTopics = Seq(new NewTopic("foo", util.Map.of(0: Integer, util.List.of[Integer](1, 2), - 1: Integer, util.List.of[Integer](2, 0))). - configs(util.Map.of(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "9999999")), + val newTopics = Seq(new NewTopic("foo", Map((0: Integer) -> Seq[Integer](1, 2).asJava, + (1: Integer) -> Seq[Integer](2, 0).asJava).asJava). + configs(Collections.singletonMap(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "9999999")), new NewTopic("bar", 3, 3.toShort), - new NewTopic("baz", Optional.empty[Integer], Optional.empty[java.lang.Short]) + new NewTopic("baz", Option.empty[Integer].toJava, Option.empty[java.lang.Short].toJava) ) val result = client.createTopics(newTopics.asJava) result.all.get() @@ -4319,484 +3970,35 @@ class PlaintextAdminIntegrationTest extends BaseAdminIntegrationTest { // From the topic configuration defaults. assertEquals(new ConfigEntry(TopicConfig.CLEANUP_POLICY_CONFIG, "delete", - ConfigSource.DEFAULT_CONFIG, false, false, util.List.of, null, null), + ConfigSource.DEFAULT_CONFIG, false, false, Collections.emptyList(), null, null), topicConfigs.get(TopicConfig.CLEANUP_POLICY_CONFIG)) // From dynamic cluster config via the synonym LogRetentionTimeHoursProp. assertEquals(new ConfigEntry(TopicConfig.RETENTION_MS_CONFIG, "10800000", - ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG, false, false, util.List.of, null, null), + ConfigSource.DYNAMIC_DEFAULT_BROKER_CONFIG, false, false, Collections.emptyList(), null, null), topicConfigs.get(TopicConfig.RETENTION_MS_CONFIG)) // From dynamic broker config via LogCleanerDeleteRetentionMsProp. assertEquals(new ConfigEntry(TopicConfig.DELETE_RETENTION_MS_CONFIG, "34", - ConfigSource.DYNAMIC_BROKER_CONFIG, false, false, util.List.of, null, null), + ConfigSource.DYNAMIC_BROKER_CONFIG, false, false, Collections.emptyList(), null, null), topicConfigs.get(TopicConfig.DELETE_RETENTION_MS_CONFIG)) // From static broker config by SegmentJitterMsProp. assertEquals(new ConfigEntry(TopicConfig.SEGMENT_JITTER_MS_CONFIG, "123", - ConfigSource.STATIC_BROKER_CONFIG, false, false, util.List.of, null, null), + ConfigSource.STATIC_BROKER_CONFIG, false, false, Collections.emptyList(), null, null), topicConfigs.get(TopicConfig.SEGMENT_JITTER_MS_CONFIG)) // From static broker config by the synonym LogRollTimeHoursProp. val segmentMsPropType = ConfigSource.STATIC_BROKER_CONFIG assertEquals(new ConfigEntry(TopicConfig.SEGMENT_MS_CONFIG, "7200000", - segmentMsPropType, false, false, util.List.of, null, null), + segmentMsPropType, false, false, Collections.emptyList(), null, null), topicConfigs.get(TopicConfig.SEGMENT_MS_CONFIG)) // From the dynamic topic config. assertEquals(new ConfigEntry(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "9999999", - ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, util.List.of, null, null), + ConfigSource.DYNAMIC_TOPIC_CONFIG, false, false, Collections.emptyList(), null, null), topicConfigs.get(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG)) } - - class BackgroundConsumerSet(defaultConsumerConfig: Properties) { - private val consumerSet: scala.collection.mutable.Set[Consumer[Array[Byte], Array[Byte]]] = scala.collection.mutable.Set.empty - private val consumerThreads: scala.collection.mutable.Set[Thread] = scala.collection.mutable.Set.empty - private var startLatch: CountDownLatch = new CountDownLatch(0) - private var stopLatch: CountDownLatch = new CountDownLatch(0) - private var consumerThreadRunning = new AtomicBoolean(false) - - def addConsumer(topic: String, configOverrides: Properties = new Properties()): Unit = { - val newConsumerConfig = defaultConsumerConfig.clone().asInstanceOf[Properties] - newConsumerConfig.putAll(configOverrides) - - val consumer = createConsumer(configOverrides = newConsumerConfig) - val consumerThread = createConsumerThread(consumer, topic) - consumerSet.add(consumer) - consumerThreads.add(consumerThread) - } - - def start(): Unit = { - startLatch = new CountDownLatch(consumerSet.size) - stopLatch = new CountDownLatch(consumerSet.size) - consumerThreadRunning = new AtomicBoolean(true) - consumerThreads.foreach(_.start()) - assertTrue(startLatch.await(30000, TimeUnit.MILLISECONDS), "Failed to start consumer threads in time") - } - - def stop(): Unit = { - consumerSet.foreach(_.wakeup()) - consumerThreadRunning.set(false) - assertTrue(stopLatch.await(30000, TimeUnit.MILLISECONDS), "Failed to stop consumer threads in time") - } - - def close(): Unit = { - // stop the consumers and wait for consumer threads stopped - stop() - consumerThreads.foreach(_.join()) - } - - private def createConsumerThread[K,V](consumer: Consumer[K,V], topic: String): Thread = { - new Thread { - override def run : Unit = { - consumer.subscribe(util.Set.of(topic)) - try { - while (consumerThreadRunning.get()) { - consumer.poll(JDuration.ofSeconds(5)) - if (!consumer.assignment.isEmpty && startLatch.getCount > 0L) - startLatch.countDown() - try { - consumer.commitSync() - } catch { - case _: CommitFailedException => // Ignore and retry on next iteration. - } - } - } catch { - case _: WakeupException => // ignore - case _: InterruptException => // ignore - } finally { - consumer.close() - stopLatch.countDown() - } - } - } - } - } - - @Test - def testDescribeStreamsGroups(): Unit = { - val streamsGroupId = "stream_group_id" - val testTopicName = "test_topic" - val testNumPartitions = 1 - - val config = createConfig - client = Admin.create(config) - - prepareTopics(List(testTopicName), testNumPartitions) - prepareRecords(testTopicName) - - val streams = createStreamsGroup( - inputTopic = testTopicName, - streamsGroupId = streamsGroupId - ) - streams.poll(JDuration.ofMillis(500L)) - - try { - TestUtils.waitUntilTrue(() => { - val firstGroup = client.listGroups().all().get().stream() - .filter(g => g.groupId() == streamsGroupId).findFirst().orElse(null) - firstGroup.groupState().orElse(null) == GroupState.STABLE && firstGroup.groupId() == streamsGroupId - }, "Stream group not stable yet") - - // Verify the describe call works correctly - val describedGroups = client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get() - val group = describedGroups.get(streamsGroupId) - assertNotNull(group) - assertEquals(streamsGroupId, group.groupId()) - assertFalse(group.members().isEmpty) - assertNotNull(group.subtopologies()) - assertFalse(group.subtopologies().isEmpty) - - // Verify the topology contains the expected source and sink topics - val subtopologies = group.subtopologies().asScala - assertTrue(subtopologies.exists(subtopology => - subtopology.sourceTopics().contains(testTopicName))) - - // Test describing a non-existing group - val nonExistingGroup = "non_existing_stream_group" - val describedNonExistingGroupResponse = client.describeStreamsGroups(util.List.of(nonExistingGroup)) - assertFutureThrows(classOf[GroupIdNotFoundException], describedNonExistingGroupResponse.all()) - - } finally { - Utils.closeQuietly(streams, "streams") - Utils.closeQuietly(client, "adminClient") - } - } - - @Test - def testDescribeStreamsGroupsNotReady(): Unit = { - val streamsGroupId = "stream_group_id" - val testTopicName = "test_topic" - - val config = createConfig - client = Admin.create(config) - - val streams = createStreamsGroup( - inputTopic = testTopicName, - streamsGroupId = streamsGroupId - ) - streams.poll(JDuration.ofMillis(500L)) - - try { - TestUtils.waitUntilTrue(() => { - val firstGroup = client.listGroups().all().get().stream() - .filter(g => g.groupId() == streamsGroupId).findFirst().orElse(null) - firstGroup.groupState().orElse(null) == GroupState.NOT_READY && firstGroup.groupId() == streamsGroupId - }, "Stream group not NOT_READY yet") - - // Verify the describe call works correctly - val describedGroups = client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get() - val group = describedGroups.get(streamsGroupId) - assertNotNull(group) - assertEquals(streamsGroupId, group.groupId()) - assertFalse(group.members().isEmpty) - assertNotNull(group.subtopologies()) - assertFalse(group.subtopologies().isEmpty) - - // Verify the topology contains the expected source and sink topics - val subtopologies = group.subtopologies().asScala - assertTrue(subtopologies.exists(subtopology => - subtopology.sourceTopics().contains(testTopicName))) - - } finally { - Utils.closeQuietly(streams, "streams") - Utils.closeQuietly(client, "adminClient") - } - } - - @Test - def testDeleteStreamsGroups(): Unit = { - val testTopicName = "test_topic" - val testNumPartitions = 3 - val testNumStreamsGroup = 3 - - val targetDeletedGroups = util.List.of("stream_group_id_2", "stream_group_id_3") - val targetRemainingGroups = util.List.of("stream_group_id_1") - - val config = createConfig - client = Admin.create(config) - - prepareTopics(List(testTopicName), testNumPartitions) - prepareRecords(testTopicName) - - val streamsList = scala.collection.mutable.ListBuffer[(String, AsyncKafkaConsumer[_,_])]() - - try { - for (i <- 1 to testNumStreamsGroup) { - val streamsGroupId = s"stream_group_id_$i" - - val streams = createStreamsGroup( - inputTopic = testTopicName, - streamsGroupId = streamsGroupId, - ) - streams.poll(JDuration.ofMillis(500L)) - streamsList += ((streamsGroupId, streams)) - } - - TestUtils.waitUntilTrue(() => { - val groups = client.listGroups().all().get() - groups.stream() - .anyMatch(g => g.groupId().startsWith("stream_group_id_")) && testNumStreamsGroup == groups.size() - }, "Streams groups not ready to delete yet") - - // Test deletion of non-empty existing groups - var deleteStreamsGroupResult = client.deleteStreamsGroups(targetDeletedGroups) - assertFutureThrows(classOf[GroupNotEmptyException], deleteStreamsGroupResult.all()) - assertEquals(2, deleteStreamsGroupResult.deletedGroups().size()) - - // Stop and clean up the streams for the groups that are going to be deleted - streamsList - .filter { case (groupId, _) => targetDeletedGroups.contains(groupId) } - .foreach { case (_, streams) => - streams.close() - } - - val listTopicResult = client.listTopics() - assertEquals(2, listTopicResult.names().get().size()) - - // Test deletion of emptied existing streams groups - deleteStreamsGroupResult = client.deleteStreamsGroups(targetDeletedGroups) - assertEquals(2, deleteStreamsGroupResult.deletedGroups().size()) - - // Wait for the deleted groups to be removed - TestUtils.waitUntilTrue(() => { - val groupIds = client.listGroups().all().get().asScala.map(_.groupId()).toSet - targetDeletedGroups.asScala.forall(id => !groupIds.contains(id)) - }, "Deleted groups not yet deleted") - - // Verify that the deleted groups are no longer present - val remainingGroups = client.listGroups().all().get() - assertEquals(targetRemainingGroups.size(), remainingGroups.size()) - remainingGroups.stream().forEach(g => { - assertTrue(targetRemainingGroups.contains(g.groupId())) - }) - - // Test deletion of a non-existing group - val nonExistingGroup = "non_existing_stream_group" - val deleteNonExistingGroupResult = client.deleteStreamsGroups(util.List.of(nonExistingGroup)) - assertFutureThrows(classOf[GroupIdNotFoundException], deleteNonExistingGroupResult.all()) - assertEquals(deleteNonExistingGroupResult.deletedGroups().size(), 1) - - } finally{ - streamsList.foreach { case (_, streams) => - streams.close() - } - Utils.closeQuietly(client, "adminClient") - } - } - - @Test - def testListStreamsGroupOffsets(): Unit = { - val streamsGroupId = "stream_group_id" - val testTopicName = "test_topic" - val testNumPartitions = 3 - - val config = createConfig - client = Admin.create(config) - val producer = createProducer(configOverrides = new Properties()) - - prepareTopics(List(testTopicName), testNumPartitions) - prepareRecords(testTopicName) - - // Producer sends messages - for (i <- 1 to 20) { - TestUtils.waitUntilTrue(() => { - val producerRecord = producer.send( - new ProducerRecord[Array[Byte], Array[Byte]](testTopicName, s"key-$i".getBytes(), s"value-$i".getBytes())) - .get() - producerRecord != null && producerRecord.topic() == testTopicName - }, "Fail to produce record to topic") - } - - val streams = createStreamsGroup( - inputTopic = testTopicName, - streamsGroupId = streamsGroupId, - ) - - try { - TestUtils.waitUntilTrue(() => { - streams.poll(JDuration.ofMillis(100L)) - !streams.assignment().isEmpty - }, "Consumer not assigned to partitions") - - streams.poll(JDuration.ofMillis(1000L)) - streams.commitSync() - - TestUtils.waitUntilTrue(() => { - val firstGroup = client.listGroups().all().get().stream().findFirst().orElse(null) - firstGroup.groupState().orElse(null) == GroupState.STABLE && firstGroup.groupId() == streamsGroupId - }, "Stream group not stable yet") - - val allTopicPartitions = client.listStreamsGroupOffsets( - util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) - ).partitionsToOffsetAndMetadata(streamsGroupId).get() - assertNotNull(allTopicPartitions) - assertEquals(allTopicPartitions.size(), 3) - allTopicPartitions.forEach((topicPartition, offsetAndMetadata) => { - assertNotNull(topicPartition) - assertNotNull(offsetAndMetadata) - assertTrue(topicPartition.topic().startsWith(testTopicName)) - assertTrue(offsetAndMetadata.offset() >= 0) - }) - - } finally { - Utils.closeQuietly(streams, "streams") - Utils.closeQuietly(client, "adminClient") - Utils.closeQuietly(producer, "producer") - } - } - - @Test - def testDeleteStreamsGroupOffsets(): Unit = { - val streamsGroupId = "stream_group_id" - val testTopicName = "test_topic" - val testNumPartitions = 3 - - val config = createConfig - client = Admin.create(config) - val producer = createProducer(configOverrides = new Properties()) - - prepareTopics(List(testTopicName), testNumPartitions) - prepareRecords(testTopicName) - // Producer sends messages - for (i <- 1 to 20) { - TestUtils.waitUntilTrue(() => { - val producerRecord = producer.send( - new ProducerRecord[Array[Byte], Array[Byte]](testTopicName, s"key-$i".getBytes(), s"value-$i".getBytes())) - .get() - producerRecord != null && producerRecord.topic() == testTopicName - }, "Fail to produce record to topic") - } - - val streams = createStreamsGroup( - inputTopic = testTopicName, - streamsGroupId = streamsGroupId, - ) - - try { - TestUtils.waitUntilTrue(() => { - streams.poll(JDuration.ofMillis(100L)) - !streams.assignment().isEmpty - }, "Consumer not assigned to partitions") - - streams.poll(JDuration.ofMillis(1000L)) - streams.commitSync() - - // List streams group offsets - TestUtils.waitUntilTrue(() => { - val allTopicPartitions = client.listStreamsGroupOffsets( - util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) - ).partitionsToOffsetAndMetadata(streamsGroupId).get() - allTopicPartitions!=null && allTopicPartitions.size() == testNumPartitions - },"Streams group offsets not ready to list yet") - - // Verify running Kstreams group cannot delete its own offsets - var deleteStreamsGroupOffsetsResult = client.deleteStreamsGroupOffsets(streamsGroupId, util.Set.of(new TopicPartition(testTopicName, 0))) - assertFutureThrows(classOf[GroupSubscribedToTopicException], deleteStreamsGroupOffsetsResult.all()) - - // Verity stopped Kstreams group can delete its own offsets - streams.close() - TestUtils.waitUntilTrue(() => { - val groupDescription = client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get() - groupDescription.get(streamsGroupId).groupState() == GroupState.EMPTY - }, "Streams group not closed yet") - deleteStreamsGroupOffsetsResult = client.deleteStreamsGroupOffsets(streamsGroupId, util.Set.of(new TopicPartition(testTopicName, 0))) - val res = deleteStreamsGroupOffsetsResult.partitionResult(new TopicPartition(testTopicName, 0)).get() - assertNull(res) - - // Verify the group offsets after deletion - val allTopicPartitions = client.listStreamsGroupOffsets( - util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) - ).partitionsToOffsetAndMetadata(streamsGroupId).get() - assertEquals(testNumPartitions-1, allTopicPartitions.size()) - - // Verify non-existing topic partition couldn't be deleted - val deleteStreamsGroupOffsetsResultWithFakeTopic = client.deleteStreamsGroupOffsets(streamsGroupId, util.Set.of(new TopicPartition("mock-topic", 1))) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], deleteStreamsGroupOffsetsResultWithFakeTopic.all()) - val deleteStreamsGroupOffsetsResultWithFakePartition = client.deleteStreamsGroupOffsets(streamsGroupId, util.Set.of(new TopicPartition(testTopicName, testNumPartitions))) - assertFutureThrows(classOf[UnknownTopicOrPartitionException], deleteStreamsGroupOffsetsResultWithFakePartition.all()) - } finally { - Utils.closeQuietly(streams, "streams") - Utils.closeQuietly(client, "adminClient") - Utils.closeQuietly(producer, "producer") - } - } - - @Test - def testAlterStreamsGroupOffsets(): Unit = { - val streamsGroupId = "stream_group_id" - val testTopicName = "test_topic" - val testNumPartitions = 3 - - val config = createConfig - client = Admin.create(config) - val producer = createProducer(configOverrides = new Properties()) - - prepareTopics(List(testTopicName), testNumPartitions) - prepareRecords(testTopicName) - - // Producer sends messages - for (i <- 1 to 20) { - TestUtils.waitUntilTrue(() => { - val producerRecord = producer.send( - new ProducerRecord[Array[Byte], Array[Byte]](testTopicName, s"key-$i".getBytes(), s"value-$i".getBytes())) - .get() - producerRecord != null && producerRecord.topic() == testTopicName - }, "Fail to produce record to topic") - } - - val streams = createStreamsGroup( - inputTopic = testTopicName, - streamsGroupId = streamsGroupId, - ) - - try { - TestUtils.waitUntilTrue(() => { - streams.poll(JDuration.ofMillis(100L)) - !streams.assignment().isEmpty - }, "Consumer not assigned to partitions") - - streams.poll(JDuration.ofMillis(1000L)) - streams.commitSync() - - // List streams group offsets - TestUtils.waitUntilTrue(() => { - val allTopicPartitions = client.listStreamsGroupOffsets( - util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) - ).partitionsToOffsetAndMetadata(streamsGroupId).get() - allTopicPartitions!=null && allTopicPartitions.size() == testNumPartitions - },"Streams group offsets not ready to list yet") - - // Verity stopped Kstreams group can delete its own offsets - streams.close() - TestUtils.waitUntilTrue(() => { - val groupDescription = client.describeStreamsGroups(util.List.of(streamsGroupId)).all().get() - groupDescription.get(streamsGroupId).groupState() == GroupState.EMPTY - }, "Streams group not closed yet") - - val offsets = util.Map.of( - new TopicPartition(testTopicName, 0), new OffsetAndMetadata(1L), - new TopicPartition(testTopicName, 1), new OffsetAndMetadata(10L) - ) - val alterStreamsGroupOffsetsResult = client.alterStreamsGroupOffsets(streamsGroupId, offsets) - val res0 = alterStreamsGroupOffsetsResult.partitionResult(new TopicPartition(testTopicName, 0)).get() - val res1 = alterStreamsGroupOffsetsResult.partitionResult(new TopicPartition(testTopicName, 1)).get() - assertTrue(res0 == null && res1 == null, "Alter streams group offsets should return null for each partition result") - - val allTopicPartitions = client.listStreamsGroupOffsets( - util.Map.of(streamsGroupId, new ListStreamsGroupOffsetsSpec()) - ).partitionsToOffsetAndMetadata(streamsGroupId).get() - assertNotNull(allTopicPartitions) - assertEquals(testNumPartitions, allTopicPartitions.size()) - assertEquals(1L, allTopicPartitions.get(new TopicPartition(testTopicName, 0)).offset()) - assertEquals(10L, allTopicPartitions.get(new TopicPartition(testTopicName, 1)).offset()) - - } finally { - Utils.closeQuietly(streams, "streams") - Utils.closeQuietly(client, "adminClient") - Utils.closeQuietly(producer, "producer") - } - } } object PlaintextAdminIntegrationTest { @@ -4810,20 +4012,20 @@ object PlaintextAdminIntegrationTest { retentionMs: String): Unit = { // Alter topics val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - alterConfigs.put(topicResource1, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MS_CONFIG, "1000"), OpType.SET))) - alterConfigs.put(topicResource2, util.List.of( + alterConfigs.put(topicResource1, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MS_CONFIG, "1000"), OpType.SET))) + alterConfigs.put(topicResource2, util.Arrays.asList( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.9"), OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) )) var alterResult = admin.incrementalAlterConfigs(alterConfigs) - assertEquals(util.Set.of(topicResource1, topicResource2), alterResult.values.keySet) + assertEquals(Set(topicResource1, topicResource2).asJava, alterResult.values.keySet) alterResult.all.get // Verify that topics were updated correctly test.ensureConsistentKRaftMetadata() // Intentionally include duplicate resources to test if describeConfigs can handle them correctly. - var describeResult = admin.describeConfigs(util.List.of(topicResource1, topicResource2, topicResource2)) + var describeResult = admin.describeConfigs(Seq(topicResource1, topicResource2, topicResource2).asJava) var configs = describeResult.all.get assertEquals(2, configs.size) @@ -4836,16 +4038,16 @@ object PlaintextAdminIntegrationTest { assertEquals("lz4", configs.get(topicResource2).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) // Alter topics with validateOnly=true - alterConfigs.put(topicResource1, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "10"), OpType.SET))) - alterConfigs.put(topicResource2, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.3"), OpType.SET))) + alterConfigs.put(topicResource1, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "10"), OpType.SET))) + alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "0.3"), OpType.SET))) alterResult = admin.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) - assertEquals(util.Set.of(topicResource1, topicResource2), alterResult.values.keySet) + assertEquals(Set(topicResource1, topicResource2).asJava, alterResult.values.keySet) alterResult.all.get // Verify that topics were not updated due to validateOnly = true test.ensureConsistentKRaftMetadata() - describeResult = admin.describeConfigs(util.List.of(topicResource1, topicResource2)) + describeResult = admin.describeConfigs(Seq(topicResource1, topicResource2).asJava) configs = describeResult.all.get assertEquals(2, configs.size) @@ -4871,61 +4073,61 @@ object PlaintextAdminIntegrationTest { // Alter configs: first and third are invalid, second is valid val alterConfigs = new util.HashMap[ConfigResource, util.Collection[AlterConfigOp]]() - alterConfigs.put(topicResource1, util.List.of( + alterConfigs.put(topicResource1, util.Arrays.asList( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) )) - alterConfigs.put(topicResource2, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy"), OpType.SET))) - alterConfigs.put(brokerResource, util.List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://localhost:0,INTERNAL://localhost:0"), OpType.SET))) + alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "snappy"), OpType.SET))) + alterConfigs.put(brokerResource, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://localhost:0,INTERNAL://localhost:0"), OpType.SET))) var alterResult = admin.incrementalAlterConfigs(alterConfigs) - assertEquals(util.Set.of(topicResource1, topicResource2, brokerResource), alterResult.values.keySet) - assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values.get(topicResource1)) + assertEquals(Set(topicResource1, topicResource2, brokerResource).asJava, alterResult.values.keySet) + assertFutureThrows(alterResult.values.get(topicResource1), classOf[InvalidConfigurationException]) alterResult.values.get(topicResource2).get - assertFutureThrows(classOf[InvalidRequestException], alterResult.values.get(brokerResource)) + assertFutureThrows(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) // Verify that first and third resources were not updated and second was updated test.ensureConsistentKRaftMetadata() - var describeResult = admin.describeConfigs(util.List.of(topicResource1, topicResource2, brokerResource)) + var describeResult = admin.describeConfigs(Seq(topicResource1, topicResource2, brokerResource).asJava) var configs = describeResult.all.get assertEquals(3, configs.size) assertEquals(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO.toString, configs.get(topicResource1).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) - assertEquals(ServerLogConfigs.COMPRESSION_TYPE_DEFAULT, + assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, configs.get(topicResource1).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) assertEquals("snappy", configs.get(topicResource2).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) - assertEquals(ServerLogConfigs.COMPRESSION_TYPE_DEFAULT, configs.get(brokerResource).get(ServerConfigs.COMPRESSION_TYPE_CONFIG).value) + assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, configs.get(brokerResource).get(ServerConfigs.COMPRESSION_TYPE_CONFIG).value) // Alter configs with validateOnly = true: first and third are invalid, second is valid - alterConfigs.put(topicResource1, util.List.of( + alterConfigs.put(topicResource1, util.Arrays.asList( new AlterConfigOp(new ConfigEntry(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, "1.1"), OpType.SET), new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4"), OpType.SET) )) - alterConfigs.put(topicResource2, util.List.of(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), OpType.SET))) - alterConfigs.put(brokerResource, util.List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://localhost:0,INTERNAL://localhost:0"), OpType.SET))) + alterConfigs.put(topicResource2, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "gzip"), OpType.SET))) + alterConfigs.put(brokerResource, util.Arrays.asList(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "EXTERNAL://localhost:0,INTERNAL://localhost:0"), OpType.SET))) alterResult = admin.incrementalAlterConfigs(alterConfigs, new AlterConfigsOptions().validateOnly(true)) - assertEquals(util.Set.of(topicResource1, topicResource2, brokerResource), alterResult.values.keySet) - assertFutureThrows(classOf[InvalidConfigurationException], alterResult.values.get(topicResource1)) + assertEquals(Set(topicResource1, topicResource2, brokerResource).asJava, alterResult.values.keySet) + assertFutureThrows(alterResult.values.get(topicResource1), classOf[InvalidConfigurationException]) alterResult.values.get(topicResource2).get - assertFutureThrows(classOf[InvalidRequestException], alterResult.values.get(brokerResource)) + assertFutureThrows(alterResult.values.get(brokerResource), classOf[InvalidRequestException]) // Verify that no resources are updated since validate_only = true test.ensureConsistentKRaftMetadata() - describeResult = admin.describeConfigs(util.List.of(topicResource1, topicResource2, brokerResource)) + describeResult = admin.describeConfigs(Seq(topicResource1, topicResource2, brokerResource).asJava) configs = describeResult.all.get assertEquals(3, configs.size) assertEquals(LogConfig.DEFAULT_MIN_CLEANABLE_DIRTY_RATIO.toString, configs.get(topicResource1).get(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG).value) - assertEquals(ServerLogConfigs.COMPRESSION_TYPE_DEFAULT, + assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, configs.get(topicResource1).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) assertEquals("snappy", configs.get(topicResource2).get(TopicConfig.COMPRESSION_TYPE_CONFIG).value) - assertEquals(ServerLogConfigs.COMPRESSION_TYPE_DEFAULT, configs.get(brokerResource).get(ServerConfigs.COMPRESSION_TYPE_CONFIG).value) + assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, configs.get(brokerResource).get(ServerConfigs.COMPRESSION_TYPE_CONFIG).value) } } diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignTest.scala new file mode 100644 index 0000000000000..ec6ad5089c53f --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignTest.scala @@ -0,0 +1,207 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package kafka.api + +import kafka.utils.{TestInfoUtils, TestUtils} +import java.util.Properties +import org.apache.kafka.clients.consumer._ +import org.apache.kafka.common.TopicPartition +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Timeout +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import scala.jdk.CollectionConverters._ + +/** + * Integration tests for the consumer that covers logic related to manual assignment. + */ +@Timeout(600) +class PlaintextConsumerAssignTest extends AbstractConsumerTest { + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAssignAndCommitAsyncNotCommitted(quorum: String, groupProtocol: String): Unit = { + val props = new Properties() + val consumer = createConsumer(configOverrides = props) + val producer = createProducer() + val numRecords = 10000 + val startingTimestamp = System.currentTimeMillis() + val cb = new CountConsumerCommitCallback + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + consumer.assign(List(tp).asJava) + consumer.commitAsync(cb) + TestUtils.pollUntilTrue(consumer, () => cb.successCount >= 1 || cb.lastError.isDefined, + "Failed to observe commit callback before timeout", waitTimeMs = 10000) + val committedOffset = consumer.committed(Set(tp).asJava) + assertNotNull(committedOffset) + // No valid fetch position due to the absence of consumer.poll; and therefore no offset was committed to + // tp. The committed offset should be null. This is intentional. + assertNull(committedOffset.get(tp)) + assertTrue(consumer.assignment.contains(tp)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAssignAndCommitSyncNotCommitted(quorum: String, groupProtocol: String): Unit = { + val props = new Properties() + val consumer = createConsumer(configOverrides = props) + val producer = createProducer() + val numRecords = 10000 + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + consumer.assign(List(tp).asJava) + consumer.commitSync() + val committedOffset = consumer.committed(Set(tp).asJava) + assertNotNull(committedOffset) + // No valid fetch position due to the absence of consumer.poll; and therefore no offset was committed to + // tp. The committed offset should be null. This is intentional. + assertNull(committedOffset.get(tp)) + assertTrue(consumer.assignment.contains(tp)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAssignAndCommitSyncAllConsumed(quorum: String, groupProtocol: String): Unit = { + val numRecords = 10000 + + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + + val props = new Properties() + val consumer = createConsumer(configOverrides = props) + consumer.assign(List(tp).asJava) + consumer.seek(tp, 0) + consumeAndVerifyRecords(consumer = consumer, numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) + + consumer.commitSync() + val committedOffset = consumer.committed(Set(tp).asJava) + assertNotNull(committedOffset) + assertNotNull(committedOffset.get(tp)) + assertEquals(numRecords, committedOffset.get(tp).offset()) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAssignAndConsume(quorum: String, groupProtocol: String): Unit = { + val numRecords = 10 + + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + + val props = new Properties() + val consumer = createConsumer(configOverrides = props, + configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) + consumer.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) + + assertEquals(numRecords, consumer.position(tp)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAssignAndConsumeSkippingPosition(quorum: String, groupProtocol: String): Unit = { + val numRecords = 10 + + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + + val props = new Properties() + val consumer = createConsumer(configOverrides = props, + configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) + consumer.assign(List(tp).asJava) + val offset = 1 + consumer.seek(tp, offset) + consumeAndVerifyRecords(consumer = consumer, numRecords - offset, startingOffset = offset, + startingKeyAndValueIndex = offset, startingTimestamp = startingTimestamp + offset) + + assertEquals(numRecords, consumer.position(tp)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAssignAndFetchCommittedOffsets(quorum: String, groupProtocol: String): Unit = { + val numRecords = 100 + val startingTimestamp = System.currentTimeMillis() + val producer = createProducer() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + val props = new Properties() + val consumer = createConsumer(configOverrides = props) + consumer.assign(List(tp).asJava) + // First consumer consumes and commits offsets + consumer.seek(tp, 0) + consumeAndVerifyRecords(consumer = consumer, numRecords, startingOffset = 0, + startingTimestamp = startingTimestamp) + consumer.commitSync() + assertEquals(numRecords, consumer.committed(Set(tp).asJava).get(tp).offset) + // We should see the committed offsets from another consumer + val anotherConsumer = createConsumer(configOverrides = props) + anotherConsumer.assign(List(tp).asJava) + assertEquals(numRecords, anotherConsumer.committed(Set(tp).asJava).get(tp).offset) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAssignAndConsumeFromCommittedOffsets(quorum: String, groupProtocol: String): Unit = { + val producer = createProducer() + val numRecords = 100 + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords = numRecords, tp, startingTimestamp = startingTimestamp) + + // Commit offset with first consumer + val props = new Properties() + props.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "group1") + val consumer = createConsumer(configOverrides = props) + consumer.assign(List(tp).asJava) + val offset = 10 + consumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(offset))) + .asJava) + assertEquals(offset, consumer.committed(Set(tp).asJava).get(tp).offset) + consumer.close() + + // Consume from committed offsets with another consumer in same group + val anotherConsumer = createConsumer(configOverrides = props) + assertEquals(offset, anotherConsumer.committed(Set(tp).asJava).get(tp).offset) + anotherConsumer.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer = anotherConsumer, numRecords - offset, + startingOffset = offset, startingKeyAndValueIndex = offset, + startingTimestamp = startingTimestamp + offset) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAssignAndRetrievingCommittedOffsetsMultipleTimes(quorum: String, groupProtocol: String): Unit = { + val numRecords = 100 + val startingTimestamp = System.currentTimeMillis() + val producer = createProducer() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + + val props = new Properties() + val consumer = createConsumer(configOverrides = props) + consumer.assign(List(tp).asJava) + + // Consume and commit offsets + consumer.seek(tp, 0) + consumeAndVerifyRecords(consumer = consumer, numRecords, startingOffset = 0, + startingTimestamp = startingTimestamp) + consumer.commitSync() + + // Check committed offsets twice with same consumer + assertEquals(numRecords, consumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(numRecords, consumer.committed(Set(tp).asJava).get(tp).offset) + } + +} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignorsTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignorsTest.scala index bd36c22127fdf..23f5a1601b4fe 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignorsTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerAssignorsTest.scala @@ -25,6 +25,7 @@ import java.util import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock import scala.collection.mutable +import scala.jdk.CollectionConverters._ /** * Integration tests for the consumer that covers assignors logic (client and server side assignors) @@ -33,9 +34,9 @@ import scala.collection.mutable class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { // Only the classic group protocol supports client-side assignors - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersClassicGroupProtocolOnly")) - def testRoundRobinAssignment(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testRoundRobinAssignment(quorum: String, groupProtocol: String): Unit = { // 1 consumer using round-robin assignment this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "roundrobin-group") this.consumerConfig.setProperty(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, classOf[RoundRobinAssignor].getName) @@ -51,7 +52,7 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { assertEquals(0, consumer.assignment().size) // subscribe to two topics - consumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) awaitAssignment(consumer, expectedAssignment) // add one more topic with 2 partitions @@ -59,11 +60,11 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { createTopicAndSendRecords(producer, topic3, 2, 100) val newExpectedAssignment = expectedAssignment ++ Set(new TopicPartition(topic3, 0), new TopicPartition(topic3, 1)) - consumer.subscribe(java.util.List.of(topic1, topic2, topic3)) + consumer.subscribe(List(topic1, topic2, topic3).asJava) awaitAssignment(consumer, newExpectedAssignment) // remove the topic we just added - consumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) awaitAssignment(consumer, expectedAssignment) consumer.unsubscribe() @@ -71,9 +72,9 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { } // Only the classic group protocol supports client-side assignors - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersClassicGroupProtocolOnly")) - def testMultiConsumerRoundRobinAssignor(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testMultiConsumerRoundRobinAssignor(quorum: String, groupProtocol: String): Unit = { this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "roundrobin-group") this.consumerConfig.setProperty(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, classOf[RoundRobinAssignor].getName) @@ -110,9 +111,9 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { * will move to consumer #10, leading to a total of (#par mod 9) partition movement */ // Only the classic group protocol supports client-side assignors - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersClassicGroupProtocolOnly")) - def testMultiConsumerStickyAssignor(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testMultiConsumerStickyAssignor(quorum: String, groupProtocol: String): Unit = { def reverse(m: Map[Long, Set[TopicPartition]]) = m.values.toSet.flatten.map(v => (v, m.keys.filter(m(_).contains(v)).head)).toMap @@ -159,9 +160,9 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { * It tests the assignment results is expected using default assignor (i.e. Range assignor) */ // Only the classic group protocol supports client-side assignors - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersClassicGroupProtocolOnly")) - def testMultiConsumerDefaultAssignorAndVerifyAssignment(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testMultiConsumerDefaultAssignorAndVerifyAssignment(quorum: String, groupProtocol: String): Unit = { // create two new topics, each having 3 partitions val topic1 = "topic1" val topic2 = "topic2" @@ -197,9 +198,9 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { * As a result, it is testing the default assignment strategy set by BaseConsumerTest */ // Only the classic group protocol supports client-side assignors - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersClassicGroupProtocolOnly")) - def testMultiConsumerDefaultAssignor(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly")) + def testMultiConsumerDefaultAssignor(quorum: String, groupProtocol: String): Unit = { // use consumers and topics defined in this class + one more topic val producer = createProducer() sendRecords(producer, numRecords = 100, tp) @@ -234,9 +235,11 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { } // Remote assignors only supported with consumer group protocol - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testRemoteAssignorInvalid(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @CsvSource(Array( + "kraft, consumer" + )) + def testRemoteAssignorInvalid(quorum: String, groupProtocol: String): Unit = { // 1 consumer using invalid remote assignor this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "invalid-assignor-group") this.consumerConfig.setProperty(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "invalid") @@ -250,7 +253,7 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { assertEquals(0, consumer.assignment().size) // subscribe to two topics - consumer.subscribe(java.util.List.of(topic1)) + consumer.subscribe(List(topic1).asJava) val e: UnsupportedAssignorException = assertThrows( classOf[UnsupportedAssignorException], @@ -262,9 +265,11 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { } // Remote assignors only supported with consumer group protocol - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersConsumerGroupProtocolOnly")) - def testRemoteAssignorRange(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @CsvSource(Array( + "kraft, consumer" + )) + def testRemoteAssignorRange(quorum: String, groupProtocol: String): Unit = { // 1 consumer using range assignment this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "range-group") this.consumerConfig.setProperty(ConsumerConfig.GROUP_REMOTE_ASSIGNOR_CONFIG, "range") @@ -281,7 +286,7 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { assertEquals(0, consumer.assignment().size) // subscribe to two topics - consumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) awaitAssignment(consumer, expectedAssignment) // add one more topic with 2 partitions @@ -289,11 +294,11 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { val additionalAssignment = createTopicAndSendRecords(producer, topic3, 2, 100) val newExpectedAssignment = expectedAssignment ++ additionalAssignment - consumer.subscribe(java.util.List.of(topic1, topic2, topic3)) + consumer.subscribe(List(topic1, topic2, topic3).asJava) awaitAssignment(consumer, newExpectedAssignment) // remove the topic we just added - consumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) awaitAssignment(consumer, expectedAssignment) consumer.unsubscribe() @@ -301,12 +306,14 @@ class PlaintextConsumerAssignorsTest extends AbstractConsumerTest { } // Only the classic group protocol supports client-side assignors - @ParameterizedTest(name = "{displayName}.groupProtocol={0}.assignmentStrategy={1}") + @ParameterizedTest(name = "{displayName}.quorum={0}.groupProtocol={1}.assignmentStrategy={2}") @CsvSource(Array( - "classic, org.apache.kafka.clients.consumer.CooperativeStickyAssignor", - "classic, org.apache.kafka.clients.consumer.RangeAssignor" + "zk, classic, org.apache.kafka.clients.consumer.CooperativeStickyAssignor", + "zk, classic, org.apache.kafka.clients.consumer.RangeAssignor", + "kraft, classic, org.apache.kafka.clients.consumer.CooperativeStickyAssignor", + "kraft, classic, org.apache.kafka.clients.consumer.RangeAssignor" )) - def testRebalanceAndRejoin(groupProtocol: String, assignmentStrategy: String): Unit = { + def testRebalanceAndRejoin(quorum: String, groupProtocol: String, assignmentStrategy: String): Unit = { // create 2 consumers this.consumerConfig.setProperty(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) this.consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "rebalance-and-rejoin-group") diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerCallbackTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerCallbackTest.scala new file mode 100644 index 0000000000000..e159042df570d --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerCallbackTest.scala @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package integration.kafka.api + +import kafka.api.AbstractConsumerTest +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.consumer.{Consumer, ConsumerRebalanceListener} +import org.apache.kafka.common.TopicPartition +import org.junit.jupiter.api.Assertions.{assertDoesNotThrow, assertEquals, assertThrows, assertTrue} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import java.util +import java.util.Arrays.asList +import java.util.Collections +import java.util.concurrent.atomic.AtomicBoolean + +/** + * Integration tests for the consumer that cover interaction with the consumer from within callbacks + * and listeners. + */ +class PlaintextConsumerCallbackTest extends AbstractConsumerTest { + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerRebalanceListenerAssignOnPartitionsAssigned(quorum: String, groupProtocol: String): Unit = { + val tp = new TopicPartition(topic, 0) + triggerOnPartitionsAssigned(tp, { (consumer, _) => + val e: Exception = assertThrows(classOf[IllegalStateException], () => consumer.assign(Collections.singletonList(tp))) + assertEquals(e.getMessage, "Subscription to topics, partitions and pattern are mutually exclusive") + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerRebalanceListenerAssignmentOnPartitionsAssigned(quorum: String, groupProtocol: String): Unit = { + val tp = new TopicPartition(topic, 0) + triggerOnPartitionsAssigned(tp, { (consumer, _) => + assertTrue(consumer.assignment().contains(tp)); + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerRebalanceListenerBeginningOffsetsOnPartitionsAssigned(quorum: String, groupProtocol: String): Unit = { + val tp = new TopicPartition(topic, 0) + triggerOnPartitionsAssigned(tp, { (consumer, _) => + val map = consumer.beginningOffsets(Collections.singletonList(tp)) + assertTrue(map.containsKey(tp)) + assertEquals(0, map.get(tp)) + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerRebalanceListenerAssignOnPartitionsRevoked(quorum: String, groupProtocol: String): Unit = { + val tp = new TopicPartition(topic, 0) + triggerOnPartitionsRevoked(tp, { (consumer, _) => + val e: Exception = assertThrows(classOf[IllegalStateException], () => consumer.assign(Collections.singletonList(tp))) + assertEquals(e.getMessage, "Subscription to topics, partitions and pattern are mutually exclusive") + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerRebalanceListenerAssignmentOnPartitionsRevoked(quorum: String, groupProtocol: String): Unit = { + val tp = new TopicPartition(topic, 0) + triggerOnPartitionsRevoked(tp, { (consumer, _) => + assertTrue(consumer.assignment().contains(tp)) + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerRebalanceListenerBeginningOffsetsOnPartitionsRevoked(quorum: String, groupProtocol: String): Unit = { + val tp = new TopicPartition(topic, 0) + triggerOnPartitionsRevoked(tp, { (consumer, _) => + val map = consumer.beginningOffsets(Collections.singletonList(tp)) + assertTrue(map.containsKey(tp)) + assertEquals(0, map.get(tp)) + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testGetPositionOfNewlyAssignedPartitionOnPartitionsAssignedCallback(quorum: String, groupProtocol: String): Unit = { + val tp = new TopicPartition(topic, 0) + triggerOnPartitionsAssigned(tp, { (consumer, _) => assertDoesNotThrow(() => consumer.position(tp)) }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSeekPositionAndPauseNewlyAssignedPartitionOnPartitionsAssignedCallback(quorum: String, + groupProtocol: String): Unit = { + val consumer = createConsumer() + val startingOffset = 100L + val totalRecords = 120L + + val producer = createProducer() + val startingTimestamp = 0 + sendRecords(producer, totalRecords.toInt, tp, startingTimestamp) + + triggerOnPartitionsAssigned(tp, consumer, { (consumer, _) => + consumer.seek(tp, startingOffset) + consumer.pause(asList(tp)) + }) + + assertTrue(consumer.paused().contains(tp)) + consumer.resume(asList(tp)) + consumeAndVerifyRecords(consumer, numRecords = (totalRecords - startingOffset).toInt, + startingOffset = startingOffset.toInt, startingKeyAndValueIndex = startingOffset.toInt, + startingTimestamp = startingOffset) + } + + private def triggerOnPartitionsAssigned(tp: TopicPartition, + execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { + val consumer = createConsumer() + triggerOnPartitionsAssigned(tp, consumer, execute) + } + + private def triggerOnPartitionsAssigned(tp: TopicPartition, + consumer: Consumer[Array[Byte], Array[Byte]], + execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { + val partitionsAssigned = new AtomicBoolean(false) + consumer.subscribe(asList(topic), new ConsumerRebalanceListener { + override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { + // Make sure the partition used in the test is actually assigned before continuing. + if (partitions.contains(tp)) { + execute(consumer, partitions) + partitionsAssigned.set(true) + } + } + + override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = { + // noop + } + }) + TestUtils.pollUntilTrue(consumer, () => partitionsAssigned.get(), "Timed out before expected rebalance completed") + } + + private def triggerOnPartitionsRevoked(tp: TopicPartition, + execute: (Consumer[Array[Byte], Array[Byte]], util.Collection[TopicPartition]) => Unit): Unit = { + val consumer = createConsumer() + val partitionsAssigned = new AtomicBoolean(false) + val partitionsRevoked = new AtomicBoolean(false) + consumer.subscribe(asList(topic), new ConsumerRebalanceListener { + override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { + // Make sure the partition used in the test is actually assigned before continuing. + if (partitions.contains(tp)) { + partitionsAssigned.set(true) + } + } + + override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = { + // Make sure the partition used in the test is actually revoked before continuing. + if (partitions.contains(tp)) { + execute(consumer, partitions) + partitionsRevoked.set(true) + } + } + }) + TestUtils.pollUntilTrue(consumer, () => partitionsAssigned.get(), "Timed out before expected rebalance completed") + consumer.close() + assertTrue(partitionsRevoked.get()) + } +} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerCommitTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerCommitTest.scala new file mode 100644 index 0000000000000..8267401a4671f --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerCommitTest.scala @@ -0,0 +1,371 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package kafka.api + +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.consumer._ +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer} +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.test.MockConsumerInterceptor +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Timeout +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import java.time.Duration +import java.util +import java.util.Optional +import scala.jdk.CollectionConverters._ + +/** + * Integration tests for the consumer that covers the logic related to committing offsets. + */ +@Timeout(600) +class PlaintextConsumerCommitTest extends AbstractConsumerTest { + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAutoCommitOnClose(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") + val consumer = createConsumer() + + val numRecords = 10000 + val producer = createProducer() + sendRecords(producer, numRecords, tp) + + consumer.subscribe(List(topic).asJava) + awaitAssignment(consumer, Set(tp, tp2)) + + // should auto-commit sought positions before closing + consumer.seek(tp, 300) + consumer.seek(tp2, 500) + consumer.close() + + // now we should see the committed positions from another consumer + val anotherConsumer = createConsumer() + assertEquals(300, anotherConsumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(500, anotherConsumer.committed(Set(tp2).asJava).get(tp2).offset) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAutoCommitOnCloseAfterWakeup(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") + val consumer = createConsumer() + + val numRecords = 10000 + val producer = createProducer() + sendRecords(producer, numRecords, tp) + + consumer.subscribe(List(topic).asJava) + awaitAssignment(consumer, Set(tp, tp2)) + + // should auto-commit sought positions before closing + consumer.seek(tp, 300) + consumer.seek(tp2, 500) + + // wakeup the consumer before closing to simulate trying to break a poll + // loop from another thread + consumer.wakeup() + consumer.close() + + // now we should see the committed positions from another consumer + val anotherConsumer = createConsumer() + assertEquals(300, anotherConsumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(500, anotherConsumer.committed(Set(tp2).asJava).get(tp2).offset) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitMetadata(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + + // sync commit + val syncMetadata = new OffsetAndMetadata(5, Optional.of(15), "foo") + consumer.commitSync(Map((tp, syncMetadata)).asJava) + assertEquals(syncMetadata, consumer.committed(Set(tp).asJava).get(tp)) + + // async commit + val asyncMetadata = new OffsetAndMetadata(10, "bar") + sendAndAwaitAsyncCommit(consumer, Some(Map(tp -> asyncMetadata))) + assertEquals(asyncMetadata, consumer.committed(Set(tp).asJava).get(tp)) + + // handle null metadata + val nullMetadata = new OffsetAndMetadata(5, null) + consumer.commitSync(Map(tp -> nullMetadata).asJava) + assertEquals(nullMetadata, consumer.committed(Set(tp).asJava).get(tp)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAsyncCommit(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + + val callback = new CountConsumerCommitCallback + val count = 5 + + for (i <- 1 to count) + consumer.commitAsync(Map(tp -> new OffsetAndMetadata(i)).asJava, callback) + + TestUtils.pollUntilTrue(consumer, () => callback.successCount >= count || callback.lastError.isDefined, + "Failed to observe commit callback before timeout", waitTimeMs = 10000) + + assertEquals(None, callback.lastError) + assertEquals(count, callback.successCount) + assertEquals(new OffsetAndMetadata(count), consumer.committed(Set(tp).asJava).get(tp)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAutoCommitIntercept(quorum: String, groupProtocol: String): Unit = { + val topic2 = "topic2" + createTopic(topic2, 2, brokerCount) + + // produce records + val numRecords = 100 + val testProducer = createProducer(keySerializer = new StringSerializer, valueSerializer = new StringSerializer) + (0 until numRecords).map { i => + testProducer.send(new ProducerRecord(tp.topic(), tp.partition(), s"key $i", s"value $i")) + }.foreach(_.get) + + // create consumer with interceptor + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") + this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockConsumerInterceptor") + val testConsumer = createConsumer(keyDeserializer = new StringDeserializer, valueDeserializer = new StringDeserializer) + val rebalanceListener = new ConsumerRebalanceListener { + override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { + // keep partitions paused in this test so that we can verify the commits based on specific seeks + testConsumer.pause(partitions) + } + + override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = {} + } + changeConsumerSubscriptionAndValidateAssignment(testConsumer, List(topic), Set(tp, tp2), rebalanceListener) + testConsumer.seek(tp, 10) + testConsumer.seek(tp2, 20) + + // change subscription to trigger rebalance + val commitCountBeforeRebalance = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() + changeConsumerSubscriptionAndValidateAssignment(testConsumer, + List(topic, topic2), + Set(tp, tp2, new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)), + rebalanceListener) + + // after rebalancing, we should have reset to the committed positions + assertEquals(10, testConsumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(20, testConsumer.committed(Set(tp2).asJava).get(tp2).offset) + + // In both CLASSIC and CONSUMER protocols, interceptors are executed in poll and close. + // However, in the CONSUMER protocol, the assignment may be changed outside of a poll, so + // we need to poll once to ensure the interceptor is called. + if (groupProtocol.toUpperCase == GroupProtocol.CONSUMER.name) { + testConsumer.poll(Duration.ZERO) + } + + assertTrue(MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() > commitCountBeforeRebalance) + + // verify commits are intercepted on close + val commitCountBeforeClose = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() + testConsumer.close() + assertTrue(MockConsumerInterceptor.ON_COMMIT_COUNT.intValue() > commitCountBeforeClose) + testProducer.close() + + // cleanup + MockConsumerInterceptor.resetCounters() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitSpecifiedOffsets(quorum: String, groupProtocol: String): Unit = { + val producer = createProducer() + sendRecords(producer, numRecords = 5, tp) + sendRecords(producer, numRecords = 7, tp2) + + val consumer = createConsumer() + consumer.assign(List(tp, tp2).asJava) + + val pos1 = consumer.position(tp) + val pos2 = consumer.position(tp2) + consumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(3L))).asJava) + assertEquals(3, consumer.committed(Set(tp).asJava).get(tp).offset) + assertNull(consumer.committed(Set(tp2).asJava).get(tp2)) + + // Positions should not change + assertEquals(pos1, consumer.position(tp)) + assertEquals(pos2, consumer.position(tp2)) + consumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp2, new OffsetAndMetadata(5L))).asJava) + assertEquals(3, consumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(5, consumer.committed(Set(tp2).asJava).get(tp2).offset) + + // Using async should pick up the committed changes after commit completes + sendAndAwaitAsyncCommit(consumer, Some(Map(tp2 -> new OffsetAndMetadata(7L)))) + assertEquals(7, consumer.committed(Set(tp2).asJava).get(tp2).offset) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAutoCommitOnRebalance(quorum: String, groupProtocol: String): Unit = { + val topic2 = "topic2" + createTopic(topic2, 2, brokerCount) + + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true") + val consumer = createConsumer() + + val numRecords = 10000 + val producer = createProducer() + sendRecords(producer, numRecords, tp) + + val rebalanceListener = new ConsumerRebalanceListener { + override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { + // keep partitions paused in this test so that we can verify the commits based on specific seeks + consumer.pause(partitions) + } + + override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = {} + } + + consumer.subscribe(List(topic).asJava, rebalanceListener) + + awaitAssignment(consumer, Set(tp, tp2)) + + consumer.seek(tp, 300) + consumer.seek(tp2, 500) + + // change subscription to trigger rebalance + consumer.subscribe(List(topic, topic2).asJava, rebalanceListener) + + val newAssignment = Set(tp, tp2, new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)) + awaitAssignment(consumer, newAssignment) + + // after rebalancing, we should have reset to the committed positions + assertEquals(300, consumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(500, consumer.committed(Set(tp2).asJava).get(tp2).offset) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSubscribeAndCommitSync(quorum: String, groupProtocol: String): Unit = { + // This test ensure that the member ID is propagated from the group coordinator when the + // assignment is received into a subsequent offset commit + val consumer = createConsumer() + assertEquals(0, consumer.assignment.size) + consumer.subscribe(List(topic).asJava) + awaitAssignment(consumer, Set(tp, tp2)) + + consumer.seek(tp, 0) + + consumer.commitSync() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPositionAndCommit(quorum: String, groupProtocol: String): Unit = { + val producer = createProducer() + var startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords = 5, tp, startingTimestamp = startingTimestamp) + + val topicPartition = new TopicPartition(topic, 15) + val consumer = createConsumer() + assertNull(consumer.committed(Set(topicPartition).asJava).get(topicPartition)) + + // position() on a partition that we aren't subscribed to throws an exception + assertThrows(classOf[IllegalStateException], () => consumer.position(topicPartition)) + + consumer.assign(List(tp).asJava) + + assertEquals(0L, consumer.position(tp), "position() on a partition that we are subscribed to should reset the offset") + consumer.commitSync() + assertEquals(0L, consumer.committed(Set(tp).asJava).get(tp).offset) + consumeAndVerifyRecords(consumer = consumer, numRecords = 5, startingOffset = 0, startingTimestamp = startingTimestamp) + assertEquals(5L, consumer.position(tp), "After consuming 5 records, position should be 5") + consumer.commitSync() + assertEquals(5L, consumer.committed(Set(tp).asJava).get(tp).offset, "Committed offset should be returned") + + startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords = 1, tp, startingTimestamp = startingTimestamp) + + // another consumer in the same group should get the same position + val otherConsumer = createConsumer() + otherConsumer.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer = otherConsumer, numRecords = 1, startingOffset = 5, startingTimestamp = startingTimestamp) + } + + // TODO: This only works in the new consumer, but should be fixed for the old consumer as well + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testCommitAsyncCompletedBeforeConsumerCloses(quorum: String, groupProtocol: String): Unit = { + // This is testing the contract that asynchronous offset commit are completed before the consumer + // is closed, even when no commit sync is performed as part of the close (due to auto-commit + // disabled, or simply because there are no consumed offsets). + val producer = createProducer() + sendRecords(producer, numRecords = 3, tp) + sendRecords(producer, numRecords = 3, tp2) + + val consumer = createConsumer() + consumer.assign(List(tp, tp2).asJava) + + // Try without looking up the coordinator first + val cb = new CountConsumerCommitCallback + consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(1L))).asJava, cb) + consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp2, new OffsetAndMetadata(1L))).asJava, cb) + consumer.close() + assertEquals(2, cb.successCount) + } + + // TODO: This only works in the new consumer, but should be fixed for the old consumer as well + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testCommitAsyncCompletedBeforeCommitSyncReturns(quorum: String, groupProtocol: String): Unit = { + // This is testing the contract that asynchronous offset commits sent previously with the + // `commitAsync` are guaranteed to have their callbacks invoked prior to completion of + // `commitSync` (given that it does not time out). + val producer = createProducer() + sendRecords(producer, numRecords = 3, tp) + sendRecords(producer, numRecords = 3, tp2) + + val consumer = createConsumer() + consumer.assign(List(tp, tp2).asJava) + + // Try without looking up the coordinator first + val cb = new CountConsumerCommitCallback + consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(1L))).asJava, cb) + consumer.commitSync(Map.empty[TopicPartition, OffsetAndMetadata].asJava) + assertEquals(1, consumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(1, cb.successCount) + + // Try with coordinator known + consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(2L))).asJava, cb) + consumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp2, new OffsetAndMetadata(2L))).asJava) + assertEquals(2, consumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(2, consumer.committed(Set(tp2).asJava).get(tp2).offset) + assertEquals(2, cb.successCount) + + // Try with empty sync commit + consumer.commitAsync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(3L))).asJava, cb) + consumer.commitSync(Map.empty[TopicPartition, OffsetAndMetadata].asJava) + assertEquals(3, consumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(2, consumer.committed(Set(tp2).asJava).get(tp2).offset) + assertEquals(3, cb.successCount) + } + + def changeConsumerSubscriptionAndValidateAssignment[K, V](consumer: Consumer[K, V], + topicsToSubscribe: List[String], + expectedAssignment: Set[TopicPartition], + rebalanceListener: ConsumerRebalanceListener): Unit = { + consumer.subscribe(topicsToSubscribe.asJava, rebalanceListener) + awaitAssignment(consumer, expectedAssignment) + } +} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala new file mode 100644 index 0000000000000..4b50bddd9fc01 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerFetchTest.scala @@ -0,0 +1,283 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package kafka.api + +import kafka.utils.TestInfoUtils +import org.apache.kafka.clients.consumer._ +import org.apache.kafka.clients.producer.ProducerRecord +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Timeout +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource +import org.apache.kafka.common.TopicPartition + +import java.time.{Duration, Instant} +import scala.jdk.CollectionConverters._ + +/** + * Integration tests for the consumer that covers fetching logic + */ +@Timeout(600) +class PlaintextConsumerFetchTest extends AbstractConsumerTest { + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchInvalidOffset(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none") + val consumer = createConsumer(configOverrides = this.consumerConfig) + + // produce one record + val totalRecords = 2 + val producer = createProducer() + sendRecords(producer, totalRecords, tp) + consumer.assign(List(tp).asJava) + + // poll should fail because there is no offset reset strategy set. + // we fail only when resetting positions after coordinator is known, so using a long timeout. + assertThrows(classOf[NoOffsetForPartitionException], () => consumer.poll(Duration.ofMillis(15000))) + + // seek to out of range position + val outOfRangePos = totalRecords + 1 + consumer.seek(tp, outOfRangePos) + val e = assertThrows(classOf[OffsetOutOfRangeException], () => consumer.poll(Duration.ofMillis(20000))) + val outOfRangePartitions = e.offsetOutOfRangePartitions() + assertNotNull(outOfRangePartitions) + assertEquals(1, outOfRangePartitions.size) + assertEquals(outOfRangePos.toLong, outOfRangePartitions.get(tp)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchOutOfRangeOffsetResetConfigEarliest(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + // ensure no in-flight fetch request so that the offset can be reset immediately + this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "0") + val consumer = createConsumer(configOverrides = this.consumerConfig) + val totalRecords = 10L + + val producer = createProducer() + val startingTimestamp = 0 + sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) + consumer.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords = totalRecords.toInt, startingOffset = 0) + // seek to out of range position + val outOfRangePos = totalRecords + 1 + consumer.seek(tp, outOfRangePos) + // assert that poll resets to the beginning position + consumeAndVerifyRecords(consumer = consumer, numRecords = 1, startingOffset = 0) + } + + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchOutOfRangeOffsetResetConfigLatest(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") + // ensure no in-flight fetch request so that the offset can be reset immediately + this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "0") + val consumer = createConsumer(configOverrides = this.consumerConfig) + val totalRecords = 10L + + val producer = createProducer() + val startingTimestamp = 0 + sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) + consumer.assign(List(tp).asJava) + consumer.seek(tp, 0) + // consume some, but not all of the records + consumeAndVerifyRecords(consumer = consumer, numRecords = totalRecords.toInt / 2, startingOffset = 0) + // seek to out of range position + val outOfRangePos = totalRecords + 17 // arbitrary, much higher offset + consumer.seek(tp, outOfRangePos) + // assert that poll resets to the ending position + assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty) + sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = totalRecords) + val nextRecord = consumer.poll(Duration.ofMillis(50)).iterator().next() + // ensure the seek went to the last known record at the time of the previous poll + assertEquals(totalRecords, nextRecord.offset()) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchOutOfRangeOffsetResetConfigByDuration(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "by_duration:PT1H") + // ensure no in-flight fetch request so that the offset can be reset immediately + this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "0") + + // Test the scenario where the requested duration much earlier than the starting offset + val consumer1 = createConsumer(configOverrides = this.consumerConfig) + val producer1 = createProducer() + val totalRecords = 10L + var startingTimestamp = System.currentTimeMillis() + sendRecords(producer1, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) + consumer1.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer = consumer1, numRecords = totalRecords.toInt, startingOffset = 0, startingTimestamp = startingTimestamp) + + // seek to out of range position + var outOfRangePos = totalRecords + 1 + consumer1.seek(tp, outOfRangePos) + // assert that poll resets to the beginning position + consumeAndVerifyRecords(consumer = consumer1, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) + + // Test the scenario where starting offset is earlier than the requested duration + val consumer2 = createConsumer(configOverrides = this.consumerConfig) + val producer2 = createProducer() + val totalRecords2 = 25L + startingTimestamp = Instant.now().minus(Duration.ofHours(24)).toEpochMilli + //generate records with 1 hour interval for 1 day + sendRecords(producer2, totalRecords2.toInt, tp2, startingTimestamp = startingTimestamp, Duration.ofHours(1).toMillis) + consumer2.assign(List(tp2).asJava) + //consumer should read one record from last one hour + consumeAndVerifyRecords(consumer = consumer2, numRecords = 1, startingOffset = 24, startingKeyAndValueIndex = 24, + startingTimestamp = startingTimestamp + 24 * Duration.ofHours(1).toMillis, + tp = tp2, + timestampIncrement = Duration.ofHours(1).toMillis) + + // seek to out of range position + outOfRangePos = totalRecords2 + 1 + consumer2.seek(tp2, outOfRangePos) + // assert that poll resets to the duration offset. consumer should read one record from last one hour + consumeAndVerifyRecords(consumer = consumer2, numRecords = 1, startingOffset = 24, startingKeyAndValueIndex = 24, + startingTimestamp = startingTimestamp + 24 * Duration.ofHours(1).toMillis, + tp = tp2, + timestampIncrement = Duration.ofHours(1).toMillis) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchRecordLargerThanFetchMaxBytes(quorum: String, groupProtocol: String): Unit = { + val maxFetchBytes = 10 * 1024 + this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, maxFetchBytes.toString) + checkLargeRecord(maxFetchBytes + 1) + } + + private def checkLargeRecord(producerRecordSize: Int): Unit = { + val consumer = createConsumer() + + // produce a record that is larger than the configured fetch size + val record = new ProducerRecord(tp.topic(), tp.partition(), "key".getBytes, + new Array[Byte](producerRecordSize)) + val producer = createProducer() + producer.send(record) + + // consuming a record that is too large should succeed since KIP-74 + consumer.assign(List(tp).asJava) + val records = consumer.poll(Duration.ofMillis(20000)) + assertEquals(1, records.count) + val consumerRecord = records.iterator().next() + assertEquals(0L, consumerRecord.offset) + assertEquals(tp.topic(), consumerRecord.topic()) + assertEquals(tp.partition(), consumerRecord.partition()) + assertArrayEquals(record.key(), consumerRecord.key()) + assertArrayEquals(record.value(), consumerRecord.value()) + } + + /** We should only return a large record if it's the first record in the first non-empty partition of the fetch request */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchHonoursFetchSizeIfLargeRecordNotFirst(quorum: String, groupProtocol: String): Unit = { + val maxFetchBytes = 10 * 1024 + this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, maxFetchBytes.toString) + checkFetchHonoursSizeIfLargeRecordNotFirst(maxFetchBytes) + } + + private def checkFetchHonoursSizeIfLargeRecordNotFirst(largeProducerRecordSize: Int): Unit = { + val consumer = createConsumer() + + val smallRecord = new ProducerRecord(tp.topic(), tp.partition(), "small".getBytes, + "value".getBytes) + val largeRecord = new ProducerRecord(tp.topic(), tp.partition(), "large".getBytes, + new Array[Byte](largeProducerRecordSize)) + + val producer = createProducer() + producer.send(smallRecord).get + producer.send(largeRecord).get + + // we should only get the small record in the first `poll` + consumer.assign(List(tp).asJava) + val records = consumer.poll(Duration.ofMillis(20000)) + assertEquals(1, records.count) + val consumerRecord = records.iterator().next() + assertEquals(0L, consumerRecord.offset) + assertEquals(tp.topic(), consumerRecord.topic()) + assertEquals(tp.partition(), consumerRecord.partition()) + assertArrayEquals(smallRecord.key(), consumerRecord.key()) + assertArrayEquals(smallRecord.value(), consumerRecord.value()) + } + + /** We should only return a large record if it's the first record in the first partition of the fetch request */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchHonoursMaxPartitionFetchBytesIfLargeRecordNotFirst(quorum: String, groupProtocol: String): Unit = { + val maxPartitionFetchBytes = 10 * 1024 + this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxPartitionFetchBytes.toString) + checkFetchHonoursSizeIfLargeRecordNotFirst(maxPartitionFetchBytes) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchRecordLargerThanMaxPartitionFetchBytes(quorum: String, groupProtocol: String): Unit = { + val maxPartitionFetchBytes = 10 * 1024 + this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, maxPartitionFetchBytes.toString) + checkLargeRecord(maxPartitionFetchBytes + 1) + } + + /** Test that we consume all partitions if fetch max bytes and max.partition.fetch.bytes are low */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testLowMaxFetchSizeForRequestAndPartition(quorum: String, groupProtocol: String): Unit = { + // one of the effects of this is that there will be some log reads where `0 > remaining limit bytes < message size` + // and we don't return the message because it's not the first message in the first non-empty partition of the fetch + // this behaves a little different than when remaining limit bytes is 0 and it's important to test it + this.consumerConfig.setProperty(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, "500") + this.consumerConfig.setProperty(ConsumerConfig.MAX_PARTITION_FETCH_BYTES_CONFIG, "100") + + // Avoid a rebalance while the records are being sent (the default is 6 seconds) + this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 20000.toString) + val consumer = createConsumer() + + val topic1 = "topic1" + val topic2 = "topic2" + val topic3 = "topic3" + val partitionCount = 30 + val topics = Seq(topic1, topic2, topic3) + topics.foreach { topicName => + createTopic(topicName, partitionCount, brokerCount) + } + + val partitions = topics.flatMap { topic => + (0 until partitionCount).map(new TopicPartition(topic, _)) + } + + assertEquals(0, consumer.assignment().size) + + consumer.subscribe(List(topic1, topic2, topic3).asJava) + + awaitAssignment(consumer, partitions.toSet) + + val producer = createProducer() + + val producerRecords = partitions.flatMap(sendRecords(producer, numRecords = partitionCount, _)) + + val consumerRecords = consumeRecords(consumer, producerRecords.size) + + val expected = producerRecords.map { record => + (record.topic, record.partition, new String(record.key), new String(record.value), record.timestamp) + }.toSet + + val actual = consumerRecords.map { record => + (record.topic, record.partition, new String(record.key), new String(record.value), record.timestamp) + }.toSet + + assertEquals(expected, actual) + } + +} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala new file mode 100644 index 0000000000000..c52228acbca32 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerPollTest.scala @@ -0,0 +1,307 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package kafka.api + +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.consumer._ +import org.apache.kafka.common.{MetricName, TopicPartition} +import org.apache.kafka.common.utils.Utils +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Timeout +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import java.time.Duration +import java.util +import java.util.Properties +import scala.collection.mutable +import scala.jdk.CollectionConverters._ + +/** + * Integration tests for the consumer that covers the poll logic + */ +@Timeout(600) +class PlaintextConsumerPollTest extends AbstractConsumerTest { + + override protected def brokerPropertyOverrides(properties: Properties): Unit = { + super.brokerPropertyOverrides(properties) + properties.setProperty(GroupCoordinatorConfig.CONSUMER_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + properties.setProperty(GroupCoordinatorConfig.CONSUMER_GROUP_MIN_HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMaxPollRecords(quorum: String, groupProtocol: String): Unit = { + val maxPollRecords = 2 + val numRecords = 10000 + + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + + this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords.toString) + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer, numRecords = numRecords, startingOffset = 0, maxPollRecords = maxPollRecords, + startingTimestamp = startingTimestamp) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMaxPollIntervalMs(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 1000.toString) + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 2000.toString) + } + + val consumer = createConsumer() + + val listener = new TestConsumerReassignmentListener() + consumer.subscribe(List(topic).asJava, listener) + + // rebalance to get the initial assignment + awaitRebalance(consumer, listener) + assertEquals(1, listener.callsToAssigned) + assertEquals(0, listener.callsToRevoked) + + // after we extend longer than max.poll a rebalance should be triggered + // NOTE we need to have a relatively much larger value than max.poll to let heartbeat expired for sure + Thread.sleep(3000) + + awaitRebalance(consumer, listener) + assertEquals(2, listener.callsToAssigned) + assertEquals(1, listener.callsToRevoked) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMaxPollIntervalMsDelayInRevocation(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 5000.toString) + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000.toString) + } + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) + + val consumer = createConsumer() + var commitCompleted = false + var committedPosition: Long = -1 + + val listener = new TestConsumerReassignmentListener { + override def onPartitionsLost(partitions: util.Collection[TopicPartition]): Unit = {} + + override def onPartitionsRevoked(partitions: util.Collection[TopicPartition]): Unit = { + if (!partitions.isEmpty && partitions.contains(tp)) { + // on the second rebalance (after we have joined the group initially), sleep longer + // than session timeout and then try a commit. We should still be in the group, + // so the commit should succeed + Utils.sleep(1500) + committedPosition = consumer.position(tp) + consumer.commitSync(Map(tp -> new OffsetAndMetadata(committedPosition)).asJava) + commitCompleted = true + } + super.onPartitionsRevoked(partitions) + } + } + + consumer.subscribe(List(topic).asJava, listener) + + // rebalance to get the initial assignment + awaitRebalance(consumer, listener) + + // force a rebalance to trigger an invocation of the revocation callback while in the group + consumer.subscribe(List("otherTopic").asJava, listener) + awaitRebalance(consumer, listener) + + assertEquals(0, committedPosition) + assertTrue(commitCompleted) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMaxPollIntervalMsDelayInAssignment(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 5000.toString) + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 1000.toString) + } + this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) + + val consumer = createConsumer() + val listener = new TestConsumerReassignmentListener { + override def onPartitionsAssigned(partitions: util.Collection[TopicPartition]): Unit = { + // sleep longer than the session timeout, we should still be in the group after invocation + Utils.sleep(1500) + super.onPartitionsAssigned(partitions) + } + } + consumer.subscribe(List(topic).asJava, listener) + + // rebalance to get the initial assignment + awaitRebalance(consumer, listener) + + // We should still be in the group after this invocation + ensureNoRebalance(consumer, listener) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMaxPollIntervalMsShorterThanPollTimeout(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 1000.toString) + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 500.toString) + } + + val consumer = createConsumer() + val listener = new TestConsumerReassignmentListener + consumer.subscribe(List(topic).asJava, listener) + + // rebalance to get the initial assignment + awaitRebalance(consumer, listener) + + val callsToAssignedAfterFirstRebalance = listener.callsToAssigned + + consumer.poll(Duration.ofMillis(2000)) + + // If the poll poll above times out, it would trigger a rebalance. + // Leave some time for the rebalance to happen and check for the rebalance event. + consumer.poll(Duration.ofMillis(500)) + consumer.poll(Duration.ofMillis(500)) + + assertEquals(callsToAssignedAfterFirstRebalance, listener.callsToAssigned) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPerPartitionLeadWithMaxPollRecords(quorum: String, groupProtocol: String): Unit = { + val numMessages = 1000 + val maxPollRecords = 10 + val producer = createProducer() + sendRecords(producer, numMessages, tp) + + consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLeadWithMaxPollRecords") + consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLeadWithMaxPollRecords") + consumerConfig.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords.toString) + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + awaitNonEmptyRecords(consumer, tp) + + val tags = new util.HashMap[String, String]() + tags.put("client-id", "testPerPartitionLeadWithMaxPollRecords") + tags.put("topic", tp.topic()) + tags.put("partition", String.valueOf(tp.partition())) + val lead = consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags)) + assertEquals(maxPollRecords, lead.metricValue().asInstanceOf[Double], s"The lead should be $maxPollRecords") + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPerPartitionLagWithMaxPollRecords(quorum: String, groupProtocol: String): Unit = { + val numMessages = 1000 + val maxPollRecords = 10 + val producer = createProducer() + sendRecords(producer, numMessages, tp) + + consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLagWithMaxPollRecords") + consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLagWithMaxPollRecords") + consumerConfig.setProperty(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords.toString) + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + val records = awaitNonEmptyRecords(consumer, tp) + + val tags = new util.HashMap[String, String]() + tags.put("client-id", "testPerPartitionLagWithMaxPollRecords") + tags.put("topic", tp.topic()) + tags.put("partition", String.valueOf(tp.partition())) + val lag = consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)) + + assertEquals(numMessages - records.count, lag.metricValue.asInstanceOf[Double], epsilon, s"The lag should be ${numMessages - records.count}") + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMultiConsumerSessionTimeoutOnStopPolling(quorum: String, groupProtocol: String): Unit = { + runMultiConsumerSessionTimeoutTest(false) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMultiConsumerSessionTimeoutOnClose(quorum: String, groupProtocol: String): Unit = { + runMultiConsumerSessionTimeoutTest(true) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPollEventuallyReturnsRecordsWithZeroTimeout(quorum: String, groupProtocol: String): Unit = { + val numMessages = 100 + val producer = createProducer() + sendRecords(producer, numMessages, tp) + + val consumer = createConsumer() + consumer.subscribe(Set(topic).asJava) + val records = awaitNonEmptyRecords(consumer, tp, 0L) + assertEquals(numMessages, records.count()) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNoOffsetForPartitionExceptionOnPollZero(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none") + val consumer = createConsumer(configOverrides = this.consumerConfig) + + consumer.assign(List(tp).asJava) + + // continuous poll should eventually fail because there is no offset reset strategy set (fail only when resetting positions after coordinator is known) + TestUtils.tryUntilNoAssertionError() { + assertThrows(classOf[NoOffsetForPartitionException], () => consumer.poll(Duration.ZERO)) + } + } + + def runMultiConsumerSessionTimeoutTest(closeConsumer: Boolean): Unit = { + // use consumers defined in this class plus one additional consumer + // Use topic defined in this class + one additional topic + val producer = createProducer() + sendRecords(producer, numRecords = 100, tp) + sendRecords(producer, numRecords = 100, tp2) + val topic1 = "topic1" + val subscriptions = Set(tp, tp2) ++ createTopicAndSendRecords(producer, topic1, 6, 100) + + // first subscribe consumers that are defined in this class + val consumerPollers = mutable.Buffer[ConsumerAssignmentPoller]() + consumerPollers += subscribeConsumerAndStartPolling(createConsumer(), List(topic, topic1)) + consumerPollers += subscribeConsumerAndStartPolling(createConsumer(), List(topic, topic1)) + + // create one more consumer and add it to the group; we will timeout this consumer + val timeoutConsumer = createConsumer() + val timeoutPoller = subscribeConsumerAndStartPolling(timeoutConsumer, List(topic, topic1)) + consumerPollers += timeoutPoller + + // validate the initial assignment + validateGroupAssignment(consumerPollers, subscriptions) + + // stop polling and close one of the consumers, should trigger partition re-assignment among alive consumers + timeoutPoller.shutdown() + consumerPollers -= timeoutPoller + if (closeConsumer) + timeoutConsumer.close() + + validateGroupAssignment(consumerPollers, subscriptions, + Some(s"Did not get valid assignment for partitions ${subscriptions.asJava} after one consumer left"), 3 * groupMaxSessionTimeoutMs) + + // done with pollers and consumers + for (poller <- consumerPollers) + poller.shutdown() + } +} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala new file mode 100644 index 0000000000000..70abc3f8412dc --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerSubscriptionTest.scala @@ -0,0 +1,423 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package kafka.api + +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.consumer._ +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.errors.{InvalidRegularExpression, InvalidTopicException} +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Timeout +import org.junit.jupiter.api.function.Executable +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import java.time.Duration +import java.util.regex.Pattern +import scala.jdk.CollectionConverters._ + +/** + * Integration tests for the consumer that covers the subscribe and unsubscribe logic. + */ +@Timeout(600) +class PlaintextConsumerSubscriptionTest extends AbstractConsumerTest { + + /** + * Verifies that pattern subscription performs as expected. + * The pattern matches the topics 'topic' and 'tblablac', but not 'tblablak' or 'tblab1'. + * It is expected that the consumer is subscribed to all partitions of 'topic' and + * 'tblablac' after the subscription when metadata is refreshed. + * When a new topic 'tsomec' is added afterwards, it is expected that upon the next + * metadata refresh the consumer becomes subscribed to this new topic and all partitions + * of that topic are assigned to it. + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPatternSubscription(quorum: String, groupProtocol: String): Unit = { + val numRecords = 10000 + val producer = createProducer() + sendRecords(producer, numRecords, tp) + + val topic1 = "tblablac" // matches subscribed pattern + createTopic(topic1, 2, brokerCount) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic1, 0)) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic1, 1)) + + val topic2 = "tblablak" // does not match subscribed pattern + createTopic(topic2, 2, brokerCount) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic2, 0)) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic2, 1)) + + val topic3 = "tblab1" // does not match subscribed pattern + createTopic(topic3, 2, brokerCount) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic3, 0)) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic3, 1)) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + val pattern = Pattern.compile("t.*c") + consumer.subscribe(pattern, new TestConsumerReassignmentListener) + + var assignment = Set( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1)) + awaitAssignment(consumer, assignment) + + val topic4 = "tsomec" // matches subscribed pattern + createTopic(topic4, 2, brokerCount) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic4, 0)) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic4, 1)) + + assignment ++= Set( + new TopicPartition(topic4, 0), + new TopicPartition(topic4, 1)) + awaitAssignment(consumer, assignment) + + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + } + + /** + * Verifies that a second call to pattern subscription succeeds and performs as expected. + * The initial subscription is to a pattern that matches two topics 'topic' and 'foo'. + * The second subscription is to a pattern that matches 'foo' and a new topic 'bar'. + * It is expected that the consumer is subscribed to all partitions of 'topic' and 'foo' after + * the first subscription, and to all partitions of 'foo' and 'bar' after the second. + * The metadata refresh interval is intentionally increased to a large enough value to guarantee + * that it is the subscription call that triggers a metadata refresh, and not the timeout. + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSubsequentPatternSubscription(quorum: String, groupProtocol: String): Unit = { + this.consumerConfig.setProperty(ConsumerConfig.METADATA_MAX_AGE_CONFIG, "30000") + val consumer = createConsumer() + + val numRecords = 10000 + val producer = createProducer() + sendRecords(producer, numRecords = numRecords, tp) + + // the first topic ('topic') matches first subscription pattern only + + val fooTopic = "foo" // matches both subscription patterns + createTopic(fooTopic, 1, brokerCount) + sendRecords(producer, numRecords = 1000, new TopicPartition(fooTopic, 0)) + + assertEquals(0, consumer.assignment().size) + + val pattern1 = Pattern.compile(".*o.*") // only 'topic' and 'foo' match this + consumer.subscribe(pattern1, new TestConsumerReassignmentListener) + + var assignment = Set( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(fooTopic, 0)) + awaitAssignment(consumer, assignment) + + val barTopic = "bar" // matches the next subscription pattern + createTopic(barTopic, 1, brokerCount) + sendRecords(producer, numRecords = 1000, new TopicPartition(barTopic, 0)) + + val pattern2 = Pattern.compile("...") // only 'foo' and 'bar' match this + consumer.subscribe(pattern2, new TestConsumerReassignmentListener) + assignment --= Set( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1)) + assignment ++= Set( + new TopicPartition(barTopic, 0)) + awaitAssignment(consumer, assignment) + + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + } + + /** + * Verifies that pattern unsubscription performs as expected. + * The pattern matches the topics 'topic' and 'tblablac'. + * It is expected that the consumer is subscribed to all partitions of 'topic' and + * 'tblablac' after the subscription when metadata is refreshed. + * When consumer unsubscribes from all its subscriptions, it is expected that its + * assignments are cleared right away. + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPatternUnsubscription(quorum: String, groupProtocol: String): Unit = { + val numRecords = 10000 + val producer = createProducer() + sendRecords(producer, numRecords, tp) + + val topic1 = "tblablac" // matches the subscription pattern + createTopic(topic1, 2, brokerCount) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic1, 0)) + sendRecords(producer, numRecords = 1000, new TopicPartition(topic1, 1)) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + consumer.subscribe(Pattern.compile("t.*c"), new TestConsumerReassignmentListener) + val assignment = Set( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1)) + awaitAssignment(consumer, assignment) + + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternSubscription(quorum: String, groupProtocol: String): Unit = { + val topic1 = "tblablac" // matches subscribed pattern + createTopic(topic1, 2, brokerCount) + + val topic2 = "tblablak" // does not match subscribed pattern + createTopic(topic2, 2, brokerCount) + + val topic3 = "tblab1" // does not match subscribed pattern + createTopic(topic3, 2, brokerCount) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + var pattern = new SubscriptionPattern("t.*c") + consumer.subscribe(pattern) + + var assignment = Set( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1)) + awaitAssignment(consumer, assignment) + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + + // Subscribe to a different pattern to match topic2 (that did not match before) + pattern = new SubscriptionPattern(topic2 + ".*") + consumer.subscribe(pattern) + + assignment = Set( + new TopicPartition(topic2, 0), + new TopicPartition(topic2, 1)) + awaitAssignment(consumer, assignment) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternSubscriptionFetch(quorum: String, groupProtocol: String): Unit = { + val topic1 = "topic1" // matches subscribed pattern + createTopic(topic1, 2, brokerCount) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + val pattern = new SubscriptionPattern("topic.*") + consumer.subscribe(pattern) + + val assignment = Set( + new TopicPartition(topic, 0), + new TopicPartition(topic, 1), + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1)) + awaitAssignment(consumer, assignment) + + val producer = createProducer() + val totalRecords = 10L + val startingTimestamp = System.currentTimeMillis() + val tp = new TopicPartition(topic1, 0) + sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) + consumeAndVerifyRecords(consumer = consumer, numRecords = totalRecords.toInt, startingOffset = 0, startingTimestamp = startingTimestamp, tp = tp) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternExpandSubscription(quorum: String, groupProtocol: String): Unit = { + val topic1 = "topic1" // matches first pattern + createTopic(topic1, 2, brokerCount) + + val topic2 = "topic2" // does not match first pattern + createTopic(topic2, 2, brokerCount) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + var pattern = new SubscriptionPattern("topic1.*") + consumer.subscribe(pattern) + val assignment = Set( + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1)) + awaitAssignment(consumer, assignment) + + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + + // Subscribe to a different pattern that should match + // the same topics the member already had plus new ones + pattern = new SubscriptionPattern("topic1|topic2") + consumer.subscribe(pattern) + + val expandedAssignment = assignment ++ Set(new TopicPartition(topic2, 0), new TopicPartition(topic2, 1)) + awaitAssignment(consumer, expandedAssignment) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternSubscriptionAndTopicSubscription(quorum: String, groupProtocol: String): Unit = { + val topic1 = "topic1" // matches subscribed pattern + createTopic(topic1, 2, brokerCount) + + val topic11 = "topic11" // matches subscribed pattern + createTopic(topic11, 2, brokerCount) + + val topic2 = "topic2" // does not match subscribed pattern + createTopic(topic2, 2, brokerCount) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + // Subscribe to pattern + val pattern = new SubscriptionPattern("topic1.*") + consumer.subscribe(pattern) + val patternAssignment = Set( + new TopicPartition(topic1, 0), + new TopicPartition(topic1, 1), + new TopicPartition(topic11, 0), + new TopicPartition(topic11, 1)) + awaitAssignment(consumer, patternAssignment) + consumer.unsubscribe() + assertEquals(0, consumer.assignment().size) + + // Subscribe to explicit topic names + consumer.subscribe(List(topic2).asJava) + val assignment = Set( + new TopicPartition(topic2, 0), + new TopicPartition(topic2, 1)) + awaitAssignment(consumer, assignment) + consumer.unsubscribe() + + // Subscribe to pattern again + consumer.subscribe(pattern) + awaitAssignment(consumer, patternAssignment) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly")) + def testRe2JPatternSubscriptionInvalidRegex(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + assertEquals(0, consumer.assignment().size) + + val pattern = new SubscriptionPattern("(t.*c") + consumer.subscribe(pattern) + + TestUtils.tryUntilNoAssertionError() { + assertThrows(classOf[InvalidRegularExpression], () => consumer.poll(Duration.ZERO)) + } + consumer.unsubscribe() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testExpandingTopicSubscriptions(quorum: String, groupProtocol: String): Unit = { + val otherTopic = "other" + val initialAssignment = Set(new TopicPartition(topic, 0), new TopicPartition(topic, 1)) + val consumer = createConsumer() + consumer.subscribe(List(topic).asJava) + awaitAssignment(consumer, initialAssignment) + + createTopic(otherTopic, 2, brokerCount) + val expandedAssignment = initialAssignment ++ Set(new TopicPartition(otherTopic, 0), new TopicPartition(otherTopic, 1)) + consumer.subscribe(List(topic, otherTopic).asJava) + awaitAssignment(consumer, expandedAssignment) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testShrinkingTopicSubscriptions(quorum: String, groupProtocol: String): Unit = { + val otherTopic = "other" + createTopic(otherTopic, 2, brokerCount) + val initialAssignment = Set(new TopicPartition(topic, 0), new TopicPartition(topic, 1), new TopicPartition(otherTopic, 0), new TopicPartition(otherTopic, 1)) + val consumer = createConsumer() + consumer.subscribe(List(topic, otherTopic).asJava) + awaitAssignment(consumer, initialAssignment) + + val shrunkenAssignment = Set(new TopicPartition(topic, 0), new TopicPartition(topic, 1)) + consumer.subscribe(List(topic).asJava) + awaitAssignment(consumer, shrunkenAssignment) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUnsubscribeTopic(quorum: String, groupProtocol: String): Unit = { + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100") // timeout quickly to avoid slow test + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "30") + } + val consumer = createConsumer() + + val listener = new TestConsumerReassignmentListener() + consumer.subscribe(List(topic).asJava, listener) + + // the initial subscription should cause a callback execution + awaitRebalance(consumer, listener) + + consumer.subscribe(List[String]().asJava) + assertEquals(0, consumer.assignment.size()) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSubscribeInvalidTopicCanUnsubscribe(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + + setupSubscribeInvalidTopic(consumer) + if(groupProtocol == "consumer") { + // Must ensure memberId is not empty before sending leave group heartbeat. This is a temporary solution before KIP-1082. + TestUtils.waitUntilTrue(() => consumer.groupMetadata().memberId().nonEmpty, + waitTimeMs = 30000, msg = "Timeout waiting for first consumer group heartbeat response") + } + assertDoesNotThrow(new Executable { + override def execute(): Unit = consumer.unsubscribe() + }) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSubscribeInvalidTopicCanClose(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + + setupSubscribeInvalidTopic(consumer) + assertDoesNotThrow(new Executable { + override def execute(): Unit = consumer.close() + }) + } + + def setupSubscribeInvalidTopic(consumer: Consumer[Array[Byte], Array[Byte]]): Unit = { + // Invalid topic name due to space + val invalidTopicName = "topic abc" + consumer.subscribe(List(invalidTopicName).asJava) + + var exception : InvalidTopicException = null + TestUtils.waitUntilTrue(() => { + try consumer.poll(Duration.ofMillis(500)) catch { + case e : InvalidTopicException => exception = e + case e : Throwable => fail(s"An InvalidTopicException should be thrown. But ${e.getClass} is thrown") + } + exception != null + }, waitTimeMs = 5000, msg = "An InvalidTopicException should be thrown.") + + assertEquals(s"Invalid topics: [${invalidTopicName}]", exception.getMessage) + } +} diff --git a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala index bbc4e6c350cbb..2469a482ab557 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextConsumerTest.scala @@ -12,29 +12,830 @@ */ package kafka.api +import kafka.api.BaseConsumerTest.{DeserializerImpl, SerializerImpl} + +import java.time.Duration import java.util +import java.util.Arrays.asList +import java.util.{Collections, Locale, Optional, Properties} +import kafka.server.KafkaBroker import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.admin.{NewPartitions, NewTopic} import org.apache.kafka.clients.consumer._ -import org.apache.kafka.common.errors.InterruptException +import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.errors.{InterruptException, InvalidGroupIdException, InvalidTopicException, TimeoutException, WakeupException} +import org.apache.kafka.common.record.{CompressionType, TimestampType} +import org.apache.kafka.common.serialization._ import org.apache.kafka.common.test.api.Flaky +import org.apache.kafka.common.{MetricName, TopicPartition} +import org.apache.kafka.server.quota.QuotaType +import org.apache.kafka.test.{MockConsumerInterceptor, MockProducerInterceptor} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource -import java.util.concurrent.ExecutionException +import java.util.concurrent.{CompletableFuture, ExecutionException, TimeUnit} +import scala.jdk.CollectionConverters._ + +@Timeout(600) +class PlaintextConsumerTest extends BaseConsumerTest { + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testHeaders(quorum: String, groupProtocol: String): Unit = { + val numRecords = 1 + val record = new ProducerRecord(tp.topic, tp.partition, null, "key".getBytes, "value".getBytes) + + record.headers().add("headerKey", "headerValue".getBytes) + + val producer = createProducer() + producer.send(record) + + val consumer = createConsumer() + assertEquals(0, consumer.assignment.size) + consumer.assign(List(tp).asJava) + assertEquals(1, consumer.assignment.size) + + consumer.seek(tp, 0) + val records = consumeRecords(consumer = consumer, numRecords = numRecords) + + assertEquals(numRecords, records.size) + + for (i <- 0 until numRecords) { + val record = records(i) + val header = record.headers().lastHeader("headerKey") + assertEquals("headerValue", if (header == null) null else new String(header.value())) + } + } + + private def testHeadersSerializeDeserialize(serializer: Serializer[Array[Byte]], deserializer: Deserializer[Array[Byte]]): Unit = { + val numRecords = 1 + val record = new ProducerRecord(tp.topic, tp.partition, null, "key".getBytes, "value".getBytes) + + val producer = createProducer( + keySerializer = new ByteArraySerializer, + valueSerializer = serializer) + producer.send(record) + + val consumer = createConsumer( + keyDeserializer = new ByteArrayDeserializer, + valueDeserializer = deserializer) + assertEquals(0, consumer.assignment.size) + consumer.assign(List(tp).asJava) + assertEquals(1, consumer.assignment.size) + + consumer.seek(tp, 0) + val records = consumeRecords(consumer = consumer, numRecords = numRecords) + + assertEquals(numRecords, records.size) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testHeadersSerializerDeserializer(quorum: String, groupProtocol: String): Unit = { + val extendedSerializer = new SerializerImpl + + val extendedDeserializer = new DeserializerImpl + + testHeadersSerializeDeserialize(extendedSerializer, extendedDeserializer) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAutoOffsetReset(quorum: String, groupProtocol: String): Unit = { + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords = 1, tp, startingTimestamp = startingTimestamp) + + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testGroupConsumption(quorum: String, groupProtocol: String): Unit = { + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords = 10, tp, startingTimestamp = startingTimestamp) + + val consumer = createConsumer() + consumer.subscribe(List(topic).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPartitionsFor(quorum: String, groupProtocol: String): Unit = { + val numParts = 2 + createTopic("part-test", numParts) + val consumer = createConsumer() + val parts = consumer.partitionsFor("part-test") + assertNotNull(parts) + assertEquals(2, parts.size) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPartitionsForAutoCreate(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + // First call would create the topic + consumer.partitionsFor("non-exist-topic") + TestUtils.waitUntilTrue(() => { + !consumer.partitionsFor("non-exist-topic").isEmpty + }, s"Timed out while awaiting non empty partitions.") + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPartitionsForInvalidTopic(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + assertThrows(classOf[InvalidTopicException], () => consumer.partitionsFor(";3# ads,{234")) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSeek(quorum: String, groupProtocol: String): Unit = { + val consumer = createConsumer() + val totalRecords = 50L + val mid = totalRecords / 2 + + // Test seek non-compressed message + val producer = createProducer() + val startingTimestamp = 0 + sendRecords(producer, totalRecords.toInt, tp, startingTimestamp = startingTimestamp) + consumer.assign(List(tp).asJava) + + consumer.seekToEnd(List(tp).asJava) + assertEquals(totalRecords, consumer.position(tp)) + assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty) + + consumer.seekToBeginning(List(tp).asJava) + assertEquals(0L, consumer.position(tp)) + consumeAndVerifyRecords(consumer, numRecords = 1, startingOffset = 0, startingTimestamp = startingTimestamp) + + consumer.seek(tp, mid) + assertEquals(mid, consumer.position(tp)) + + consumeAndVerifyRecords(consumer, numRecords = 1, startingOffset = mid.toInt, startingKeyAndValueIndex = mid.toInt, + startingTimestamp = mid) + + // Test seek compressed message + sendCompressedMessages(totalRecords.toInt, tp2) + consumer.assign(List(tp2).asJava) + + consumer.seekToEnd(List(tp2).asJava) + assertEquals(totalRecords, consumer.position(tp2)) + assertTrue(consumer.poll(Duration.ofMillis(50)).isEmpty) + + consumer.seekToBeginning(List(tp2).asJava) + assertEquals(0L, consumer.position(tp2)) + consumeAndVerifyRecords(consumer, numRecords = 1, startingOffset = 0, tp = tp2) + + consumer.seek(tp2, mid) + assertEquals(mid, consumer.position(tp2)) + consumeAndVerifyRecords(consumer, numRecords = 1, startingOffset = mid.toInt, startingKeyAndValueIndex = mid.toInt, + startingTimestamp = mid, tp = tp2) + } + + private def sendCompressedMessages(numRecords: Int, tp: TopicPartition): Unit = { + val producerProps = new Properties() + producerProps.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, CompressionType.GZIP.name) + producerProps.setProperty(ProducerConfig.LINGER_MS_CONFIG, Int.MaxValue.toString) + val producer = createProducer(configOverrides = producerProps) + (0 until numRecords).foreach { i => + producer.send(new ProducerRecord(tp.topic, tp.partition, i.toLong, s"key $i".getBytes, s"value $i".getBytes)) + } + producer.close() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPartitionPauseAndResume(quorum: String, groupProtocol: String): Unit = { + val partitions = List(tp).asJava + val producer = createProducer() + var startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords = 5, tp, startingTimestamp = startingTimestamp) + + val consumer = createConsumer() + consumer.assign(partitions) + consumeAndVerifyRecords(consumer = consumer, numRecords = 5, startingOffset = 0, startingTimestamp = startingTimestamp) + consumer.pause(partitions) + startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords = 5, tp, startingTimestamp = startingTimestamp) + assertTrue(consumer.poll(Duration.ofMillis(100)).isEmpty) + consumer.resume(partitions) + consumeAndVerifyRecords(consumer = consumer, numRecords = 5, startingOffset = 5, startingTimestamp = startingTimestamp) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testInterceptors(quorum: String, groupProtocol: String): Unit = { + val appendStr = "mock" + MockConsumerInterceptor.resetCounters() + MockProducerInterceptor.resetCounters() + + // create producer with interceptor + val producerProps = new Properties() + producerProps.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, classOf[MockProducerInterceptor].getName) + producerProps.put("mock.interceptor.append", appendStr) + val testProducer = createProducer(keySerializer = new StringSerializer, + valueSerializer = new StringSerializer, + configOverrides = producerProps) + + // produce records + val numRecords = 10 + (0 until numRecords).map { i => + testProducer.send(new ProducerRecord(tp.topic, tp.partition, s"key $i", s"value $i")) + }.foreach(_.get) + assertEquals(numRecords, MockProducerInterceptor.ONSEND_COUNT.intValue) + assertEquals(numRecords, MockProducerInterceptor.ON_SUCCESS_COUNT.intValue) + // send invalid record + assertThrows(classOf[Throwable], () => testProducer.send(null), () => "Should not allow sending a null record") + assertEquals(1, MockProducerInterceptor.ON_ERROR_COUNT.intValue, "Interceptor should be notified about exception") + assertEquals(0, MockProducerInterceptor.ON_ERROR_WITH_METADATA_COUNT.intValue(), "Interceptor should not receive metadata with an exception when record is null") + + // create consumer with interceptor + this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockConsumerInterceptor") + val testConsumer = createConsumer(keyDeserializer = new StringDeserializer, valueDeserializer = new StringDeserializer) + testConsumer.assign(List(tp).asJava) + testConsumer.seek(tp, 0) + + // consume and verify that values are modified by interceptors + val records = consumeRecords(testConsumer, numRecords) + for (i <- 0 until numRecords) { + val record = records(i) + assertEquals(s"key $i", new String(record.key)) + assertEquals(s"value $i$appendStr".toUpperCase(Locale.ROOT), new String(record.value)) + } + + // commit sync and verify onCommit is called + val commitCountBefore = MockConsumerInterceptor.ON_COMMIT_COUNT.intValue + testConsumer.commitSync(Map[TopicPartition, OffsetAndMetadata]((tp, new OffsetAndMetadata(2L))).asJava) + assertEquals(2, testConsumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(commitCountBefore + 1, MockConsumerInterceptor.ON_COMMIT_COUNT.intValue) + + // commit async and verify onCommit is called + sendAndAwaitAsyncCommit(testConsumer, Some(Map(tp -> new OffsetAndMetadata(5L)))) + assertEquals(5, testConsumer.committed(Set(tp).asJava).get(tp).offset) + assertEquals(commitCountBefore + 2, MockConsumerInterceptor.ON_COMMIT_COUNT.intValue) + + testConsumer.close() + testProducer.close() + + // cleanup + MockConsumerInterceptor.resetCounters() + MockProducerInterceptor.resetCounters() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testInterceptorsWithWrongKeyValue(quorum: String, groupProtocol: String): Unit = { + val appendStr = "mock" + // create producer with interceptor that has different key and value types from the producer + val producerProps = new Properties() + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) + producerProps.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockProducerInterceptor") + producerProps.put("mock.interceptor.append", appendStr) + val testProducer = createProducer() + + // producing records should succeed + testProducer.send(new ProducerRecord(tp.topic(), tp.partition(), s"key".getBytes, s"value will not be modified".getBytes)) + + // create consumer with interceptor that has different key and value types from the consumer + this.consumerConfig.setProperty(ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG, "org.apache.kafka.test.MockConsumerInterceptor") + val testConsumer = createConsumer() + + testConsumer.assign(List(tp).asJava) + testConsumer.seek(tp, 0) + + // consume and verify that values are not modified by interceptors -- their exceptions are caught and logged, but not propagated + val records = consumeRecords(testConsumer, 1) + val record = records.head + assertEquals(s"value will not be modified", new String(record.value())) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeMessagesWithCreateTime(quorum: String, groupProtocol: String): Unit = { + val numRecords = 50 + // Test non-compressed messages + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) + + // Test compressed messages + sendCompressedMessages(numRecords, tp2) + consumer.assign(List(tp2).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, tp = tp2, startingOffset = 0) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumeMessagesWithLogAppendTime(quorum: String, groupProtocol: String): Unit = { + val topicName = "testConsumeMessagesWithLogAppendTime" + val topicProps = new Properties() + topicProps.setProperty(TopicConfig.MESSAGE_TIMESTAMP_TYPE_CONFIG, "LogAppendTime") + createTopic(topicName, 2, 2, topicProps) + + val startTime = System.currentTimeMillis() + val numRecords = 50 + + // Test non-compressed messages + val tp1 = new TopicPartition(topicName, 0) + val producer = createProducer() + sendRecords(producer, numRecords, tp1) + + val consumer = createConsumer() + consumer.assign(List(tp1).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, tp = tp1, startingOffset = 0, + startingTimestamp = startTime, timestampType = TimestampType.LOG_APPEND_TIME) + + // Test compressed messages + val tp2 = new TopicPartition(topicName, 1) + sendCompressedMessages(numRecords, tp2) + consumer.assign(List(tp2).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, tp = tp2, startingOffset = 0, + startingTimestamp = startTime, timestampType = TimestampType.LOG_APPEND_TIME) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testListTopics(quorum: String, groupProtocol: String): Unit = { + val numParts = 2 + val topic1 = "part-test-topic-1" + val topic2 = "part-test-topic-2" + val topic3 = "part-test-topic-3" + createTopic(topic1, numParts) + createTopic(topic2, numParts) + createTopic(topic3, numParts) + + val consumer = createConsumer() + val topics = consumer.listTopics() + assertNotNull(topics) + assertEquals(5, topics.size()) + assertEquals(5, topics.keySet().size()) + assertEquals(2, topics.get(topic1).size) + assertEquals(2, topics.get(topic2).size) + assertEquals(2, topics.get(topic3).size) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPauseStateNotPreservedByRebalance(quorum: String, groupProtocol: String): Unit = { + if (groupProtocol.equals(GroupProtocol.CLASSIC.name)) { + this.consumerConfig.setProperty(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, "100") // timeout quickly to avoid slow test + this.consumerConfig.setProperty(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, "30") + } + val consumer = createConsumer() + + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords = 5, tp, startingTimestamp = startingTimestamp) + consumer.subscribe(List(topic).asJava) + consumeAndVerifyRecords(consumer = consumer, numRecords = 5, startingOffset = 0, startingTimestamp = startingTimestamp) + consumer.pause(List(tp).asJava) + + // subscribe to a new topic to trigger a rebalance + consumer.subscribe(List("topic2").asJava) + + // after rebalance, our position should be reset and our pause state lost, + // so we should be able to consume from the beginning + consumeAndVerifyRecords(consumer = consumer, numRecords = 0, startingOffset = 5, startingTimestamp = startingTimestamp) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPerPartitionLeadMetricsCleanUpWithSubscribe(quorum: String, groupProtocol: String): Unit = { + val numMessages = 1000 + val topic2 = "topic2" + createTopic(topic2, 2, brokerCount) + // send some messages. + val producer = createProducer() + sendRecords(producer, numMessages, tp) + // Test subscribe + // Create a consumer and consumer some messages. + consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLeadMetricsCleanUpWithSubscribe") + consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLeadMetricsCleanUpWithSubscribe") + val consumer = createConsumer() + val listener = new TestConsumerReassignmentListener + consumer.subscribe(List(topic, topic2).asJava, listener) + val records = awaitNonEmptyRecords(consumer, tp) + assertEquals(1, listener.callsToAssigned, "should be assigned once") + // Verify the metric exist. + val tags1 = new util.HashMap[String, String]() + tags1.put("client-id", "testPerPartitionLeadMetricsCleanUpWithSubscribe") + tags1.put("topic", tp.topic()) + tags1.put("partition", String.valueOf(tp.partition())) + + val tags2 = new util.HashMap[String, String]() + tags2.put("client-id", "testPerPartitionLeadMetricsCleanUpWithSubscribe") + tags2.put("topic", tp2.topic()) + tags2.put("partition", String.valueOf(tp2.partition())) + val fetchLead0 = consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags1)) + assertNotNull(fetchLead0) + assertEquals(records.count.toDouble, fetchLead0.metricValue(), s"The lead should be ${records.count}") + + // Remove topic from subscription + consumer.subscribe(List(topic2).asJava, listener) + awaitRebalance(consumer, listener) + // Verify the metric has gone + assertNull(consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags1))) + assertNull(consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags2))) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPerPartitionLagMetricsCleanUpWithSubscribe(quorum: String, groupProtocol: String): Unit = { + val numMessages = 1000 + val topic2 = "topic2" + createTopic(topic2, 2, brokerCount) + // send some messages. + val producer = createProducer() + sendRecords(producer, numMessages, tp) + // Test subscribe + // Create a consumer and consumer some messages. + consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithSubscribe") + consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithSubscribe") + val consumer = createConsumer() + val listener = new TestConsumerReassignmentListener + consumer.subscribe(List(topic, topic2).asJava, listener) + val records = awaitNonEmptyRecords(consumer, tp) + assertEquals(1, listener.callsToAssigned, "should be assigned once") + // Verify the metric exist. + val tags1 = new util.HashMap[String, String]() + tags1.put("client-id", "testPerPartitionLagMetricsCleanUpWithSubscribe") + tags1.put("topic", tp.topic()) + tags1.put("partition", String.valueOf(tp.partition())) + + val tags2 = new util.HashMap[String, String]() + tags2.put("client-id", "testPerPartitionLagMetricsCleanUpWithSubscribe") + tags2.put("topic", tp2.topic()) + tags2.put("partition", String.valueOf(tp2.partition())) + val fetchLag0 = consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags1)) + assertNotNull(fetchLag0) + val expectedLag = numMessages - records.count + assertEquals(expectedLag, fetchLag0.metricValue.asInstanceOf[Double], epsilon, s"The lag should be $expectedLag") + + // Remove topic from subscription + consumer.subscribe(List(topic2).asJava, listener) + awaitRebalance(consumer, listener) + // Verify the metric has gone + assertNull(consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags1))) + assertNull(consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags2))) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPerPartitionLeadMetricsCleanUpWithAssign(quorum: String, groupProtocol: String): Unit = { + val numMessages = 1000 + // Test assign + // send some messages. + val producer = createProducer() + sendRecords(producer, numMessages, tp) + sendRecords(producer, numMessages, tp2) + + consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLeadMetricsCleanUpWithAssign") + consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLeadMetricsCleanUpWithAssign") + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + val records = awaitNonEmptyRecords(consumer, tp) + // Verify the metric exist. + val tags = new util.HashMap[String, String]() + tags.put("client-id", "testPerPartitionLeadMetricsCleanUpWithAssign") + tags.put("topic", tp.topic()) + tags.put("partition", String.valueOf(tp.partition())) + val fetchLead = consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags)) + assertNotNull(fetchLead) + + assertEquals(records.count.toDouble, fetchLead.metricValue(), s"The lead should be ${records.count}") + + consumer.assign(List(tp2).asJava) + awaitNonEmptyRecords(consumer ,tp2) + assertNull(consumer.metrics.get(new MetricName("records-lead", "consumer-fetch-manager-metrics", "", tags))) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPerPartitionLagMetricsCleanUpWithAssign(quorum: String, groupProtocol: String): Unit = { + val numMessages = 1000 + // Test assign + // send some messages. + val producer = createProducer() + sendRecords(producer, numMessages, tp) + sendRecords(producer, numMessages, tp2) + + consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithAssign") + consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithAssign") + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + val records = awaitNonEmptyRecords(consumer, tp) + // Verify the metric exist. + val tags = new util.HashMap[String, String]() + tags.put("client-id", "testPerPartitionLagMetricsCleanUpWithAssign") + tags.put("topic", tp.topic()) + tags.put("partition", String.valueOf(tp.partition())) + val fetchLag = consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)) + assertNotNull(fetchLag) + + val expectedLag = numMessages - records.count + assertEquals(expectedLag, fetchLag.metricValue.asInstanceOf[Double], epsilon, s"The lag should be $expectedLag") + + consumer.assign(List(tp2).asJava) + awaitNonEmptyRecords(consumer, tp2) + assertNull(consumer.metrics.get(new MetricName(tp.toString + ".records-lag", "consumer-fetch-manager-metrics", "", tags))) + assertNull(consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags))) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testPerPartitionLagMetricsWhenReadCommitted(quorum: String, groupProtocol: String): Unit = { + val numMessages = 1000 + // send some messages. + val producer = createProducer() + sendRecords(producer, numMessages, tp) + sendRecords(producer, numMessages, tp2) + + consumerConfig.setProperty(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed") + consumerConfig.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithAssign") + consumerConfig.setProperty(ConsumerConfig.CLIENT_ID_CONFIG, "testPerPartitionLagMetricsCleanUpWithAssign") + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + awaitNonEmptyRecords(consumer, tp) + // Verify the metric exist. + val tags = new util.HashMap[String, String]() + tags.put("client-id", "testPerPartitionLagMetricsCleanUpWithAssign") + tags.put("topic", tp.topic()) + tags.put("partition", String.valueOf(tp.partition())) + val fetchLag = consumer.metrics.get(new MetricName("records-lag", "consumer-fetch-manager-metrics", "", tags)) + assertNotNull(fetchLag) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testQuotaMetricsNotCreatedIfNoQuotasConfigured(quorum: String, groupProtocol: String): Unit = { + val numRecords = 1000 + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + sendRecords(producer, numRecords, tp, startingTimestamp = startingTimestamp) + + val consumer = createConsumer() + consumer.assign(List(tp).asJava) + consumer.seek(tp, 0) + consumeAndVerifyRecords(consumer = consumer, numRecords = numRecords, startingOffset = 0, startingTimestamp = startingTimestamp) + + def assertNoMetric(broker: KafkaBroker, name: String, quotaType: QuotaType, clientId: String): Unit = { + val metricName = broker.metrics.metricName("throttle-time", + quotaType.toString, + "", + "user", "", + "client-id", clientId) + assertNull(broker.metrics.metric(metricName), "Metric should not have been created " + metricName) + } + brokers.foreach(assertNoMetric(_, "byte-rate", QuotaType.PRODUCE, producerClientId)) + brokers.foreach(assertNoMetric(_, "throttle-time", QuotaType.PRODUCE, producerClientId)) + brokers.foreach(assertNoMetric(_, "byte-rate", QuotaType.FETCH, consumerClientId)) + brokers.foreach(assertNoMetric(_, "throttle-time", QuotaType.FETCH, consumerClientId)) + + brokers.foreach(assertNoMetric(_, "request-time", QuotaType.REQUEST, producerClientId)) + brokers.foreach(assertNoMetric(_, "throttle-time", QuotaType.REQUEST, producerClientId)) + brokers.foreach(assertNoMetric(_, "request-time", QuotaType.REQUEST, consumerClientId)) + brokers.foreach(assertNoMetric(_, "throttle-time", QuotaType.REQUEST, consumerClientId)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumingWithNullGroupId(quorum: String, groupProtocol: String): Unit = { + val topic = "test_topic" + val partition = 0 + val tp = new TopicPartition(topic, partition) + createTopic(topic) + + val producer = createProducer() + producer.send(new ProducerRecord(topic, partition, "k1".getBytes, "v1".getBytes)).get() + producer.send(new ProducerRecord(topic, partition, "k2".getBytes, "v2".getBytes)).get() + producer.send(new ProducerRecord(topic, partition, "k3".getBytes, "v3".getBytes)).get() + producer.close() + + // consumer 1 uses the default group id and consumes from earliest offset + val consumer1Config = new Properties(consumerConfig) + consumer1Config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + consumer1Config.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer1") + val consumer1 = createConsumer( + configOverrides = consumer1Config, + configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) + + // consumer 2 uses the default group id and consumes from latest offset + val consumer2Config = new Properties(consumerConfig) + consumer2Config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest") + consumer2Config.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer2") + val consumer2 = createConsumer( + configOverrides = consumer2Config, + configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) + + // consumer 3 uses the default group id and starts from an explicit offset + val consumer3Config = new Properties(consumerConfig) + consumer3Config.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer3") + val consumer3 = createConsumer( + configOverrides = consumer3Config, + configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) + + consumer1.assign(asList(tp)) + consumer2.assign(asList(tp)) + consumer3.assign(asList(tp)) + consumer3.seek(tp, 1) + + val numRecords1 = consumer1.poll(Duration.ofMillis(5000)).count() + assertThrows(classOf[InvalidGroupIdException], () => consumer1.commitSync()) + assertThrows(classOf[InvalidGroupIdException], () => consumer2.committed(Set(tp).asJava)) + + val numRecords2 = consumer2.poll(Duration.ofMillis(5000)).count() + val numRecords3 = consumer3.poll(Duration.ofMillis(5000)).count() -@Timeout(60) -class PlaintextConsumerTest extends AbstractConsumerTest { + consumer1.unsubscribe() + consumer2.unsubscribe() + consumer3.unsubscribe() + + assertTrue(consumer1.assignment().isEmpty) + assertTrue(consumer2.assignment().isEmpty) + assertTrue(consumer3.assignment().isEmpty) + + consumer1.close() + consumer2.close() + consumer3.close() + + assertEquals(3, numRecords1, "Expected consumer1 to consume from earliest offset") + assertEquals(0, numRecords2, "Expected consumer2 to consume from latest offset") + assertEquals(2, numRecords3, "Expected consumer3 to consume from offset 1") + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNullGroupIdNotSupportedIfCommitting(quorum: String, groupProtocol: String): Unit = { + val consumer1Config = new Properties(consumerConfig) + consumer1Config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + consumer1Config.put(ConsumerConfig.CLIENT_ID_CONFIG, "consumer1") + val consumer1 = createConsumer( + configOverrides = consumer1Config, + configsToRemove = List(ConsumerConfig.GROUP_ID_CONFIG)) + + consumer1.assign(List(tp).asJava) + assertThrows(classOf[InvalidGroupIdException], () => consumer1.commitSync()) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testStaticConsumerDetectsNewPartitionCreatedAfterRestart(quorum:String, groupProtocol: String): Unit = { + val foo = "foo" + val foo0 = new TopicPartition(foo, 0) + val foo1 = new TopicPartition(foo, 1) + + val admin = createAdminClient() + admin.createTopics(Seq(new NewTopic(foo, 1, 1.toShort)).asJava).all.get + + val consumerConfig = new Properties + consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, "my-group-id") + consumerConfig.put(ConsumerConfig.GROUP_INSTANCE_ID_CONFIG, "my-instance-id") + + val consumer1 = createConsumer(configOverrides = consumerConfig) + consumer1.subscribe(Seq(foo).asJava) + awaitAssignment(consumer1, Set(foo0)) + consumer1.close() + + val consumer2 = createConsumer(configOverrides = consumerConfig) + consumer2.subscribe(Seq(foo).asJava) + awaitAssignment(consumer2, Set(foo0)) + + admin.createPartitions(Map(foo -> NewPartitions.increaseTo(2)).asJava).all.get + + awaitAssignment(consumer2, Set(foo0, foo1)) + + consumer2.close() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testEndOffsets(quorum: String, groupProtocol: String): Unit = { + val producer = createProducer() + val startingTimestamp = System.currentTimeMillis() + val numRecords = 10000 + (0 until numRecords).map { i => + val timestamp = startingTimestamp + i.toLong + val record = new ProducerRecord(tp.topic(), tp.partition(), timestamp, s"key $i".getBytes, s"value $i".getBytes) + producer.send(record) + record + } + producer.flush() + + val consumer = createConsumer() + consumer.subscribe(List(topic).asJava) + awaitAssignment(consumer, Set(tp, tp2)) + + val endOffsets = consumer.endOffsets(Set(tp).asJava) + assertEquals(numRecords, endOffsets.get(tp)) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSeekThrowsIllegalStateIfPartitionsNotAssigned(quorum: String, groupProtocol: String): Unit = { + val tp = new TopicPartition(topic, 0) + val consumer = createConsumer(configOverrides = consumerConfig) + val e: Exception = assertThrows(classOf[IllegalStateException], () => consumer.seekToEnd(Collections.singletonList(tp))) + assertEquals("No current assignment for partition " + tp, e.getMessage) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchOffsetsForTime(quorum: String, groupProtocol: String): Unit = { + val numPartitions = 2 + val producer = createProducer() + val timestampsToSearch = new util.HashMap[TopicPartition, java.lang.Long]() + var i = 0 + for (part <- 0 until numPartitions) { + val tp = new TopicPartition(topic, part) + // key, val, and timestamp equal to the sequence number. + sendRecords(producer, numRecords = 100, tp, startingTimestamp = 0) + timestampsToSearch.put(tp, (i * 20).toLong) + i += 1 + } + + val consumer = createConsumer() + // Test negative target time + assertThrows(classOf[IllegalArgumentException], + () => consumer.offsetsForTimes(Collections.singletonMap(new TopicPartition(topic, 0), -1))) + val timestampOffsets = consumer.offsetsForTimes(timestampsToSearch) + + val timestampTp0 = timestampOffsets.get(new TopicPartition(topic, 0)) + assertEquals(0, timestampTp0.offset) + assertEquals(0, timestampTp0.timestamp) + assertEquals(Optional.of(0), timestampTp0.leaderEpoch) + + val timestampTp1 = timestampOffsets.get(new TopicPartition(topic, 1)) + assertEquals(20, timestampTp1.offset) + assertEquals(20, timestampTp1.timestamp) + assertEquals(Optional.of(0), timestampTp1.leaderEpoch) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + @Timeout(15) + def testPositionRespectsTimeout(quorum: String, groupProtocol: String): Unit = { + val topicPartition = new TopicPartition(topic, 15) + val consumer = createConsumer() + consumer.assign(List(topicPartition).asJava) + + // When position() is called for a topic/partition that doesn't exist, the consumer will repeatedly update the + // local metadata. However, it should give up after the user-supplied timeout has past. + assertThrows(classOf[TimeoutException], () => consumer.position(topicPartition, Duration.ofSeconds(3))) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + @Timeout(15) + def testPositionRespectsWakeup(quorum: String, groupProtocol: String): Unit = { + val topicPartition = new TopicPartition(topic, 15) + val consumer = createConsumer() + consumer.assign(List(topicPartition).asJava) + + CompletableFuture.runAsync { () => + TimeUnit.SECONDS.sleep(1) + consumer.wakeup() + } + + assertThrows(classOf[WakeupException], () => consumer.position(topicPartition, Duration.ofSeconds(3))) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + @Timeout(15) + def testPositionWithErrorConnectionRespectsWakeup(quorum: String, groupProtocol: String): Unit = { + val topicPartition = new TopicPartition(topic, 15) + val properties = new Properties() + properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:12345") // make sure the connection fails + val consumer = createConsumer(configOverrides = properties) + consumer.assign(List(topicPartition).asJava) + + CompletableFuture.runAsync { () => + TimeUnit.SECONDS.sleep(1) + consumer.wakeup() + } + + assertThrows(classOf[WakeupException], () => consumer.position(topicPartition, Duration.ofSeconds(100))) + } @Flaky("KAFKA-18031") - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCloseLeavesGroupOnInterrupt(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCloseLeavesGroupOnInterrupt(quorum: String, groupProtocol: String): Unit = { val adminClient = createAdminClient() val consumer = createConsumer() val listener = new TestConsumerReassignmentListener() - consumer.subscribe(java.util.List.of(topic), listener) + consumer.subscribe(List(topic).asJava, listener) awaitRebalance(consumer, listener) assertEquals(1, listener.callsToAssigned) @@ -61,7 +862,7 @@ class PlaintextConsumerTest extends AbstractConsumerTest { () => { try { val groupId = config.getString(ConsumerConfig.GROUP_ID_CONFIG) - val groupDescription = adminClient.describeConsumerGroups(util.List.of(groupId)).describedGroups.get(groupId).get + val groupDescription = adminClient.describeConsumerGroups (Collections.singletonList (groupId) ).describedGroups.get (groupId).get groupDescription.members.isEmpty } catch { case _: ExecutionException | _: InterruptedException => diff --git a/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala index 18d34ad05e9da..1acd22dc3fa4b 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextEndToEndAuthorizationTest.scala @@ -21,8 +21,10 @@ import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth._ import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder import org.apache.kafka.clients.admin.AdminClientConfig -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import org.apache.kafka.common.errors.TopicAuthorizationException // This test case uses a separate listener for client and inter-broker communication, from @@ -86,8 +88,9 @@ class PlaintextEndToEndAuthorizationTest extends EndToEndAuthorizationTest { superuserClientConfig.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers(interBrokerListenerName)) } - @Test - def testListenerName(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListenerName(quorum: String): Unit = { // To check the client listener name, establish a session on the server by sending any request eg sendRecords val producer = createProducer() assertThrows(classOf[TopicAuthorizationException], () => sendRecords(producer, numRecords = 1, tp)) diff --git a/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala b/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala index dc8b9423304ef..65eedf96e3a59 100644 --- a/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala +++ b/core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala @@ -26,9 +26,9 @@ import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.{InvalidTimestampException, RecordTooLargeException, SerializationException, TimeoutException} import org.apache.kafka.common.record.{DefaultRecord, DefaultRecordBatch, Records, TimestampType} import org.apache.kafka.common.serialization.ByteArraySerializer -import org.apache.kafka.server.config.ServerLogConfigs +import org.apache.kafka.storage.internals.log.LogConfig import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, TestInfo, Timeout} +import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} @@ -37,25 +37,9 @@ import java.nio.charset.StandardCharsets class PlaintextProducerSendTest extends BaseProducerSendTest { - // topic auto creation is enabled by default, only some tests disable it - var disableAutoTopicCreation = false - - override def brokerOverrides: Properties = { - val props = super.brokerOverrides - if (disableAutoTopicCreation) { - props.put("auto.create.topics.enable", "false") - } - props - } - @BeforeEach - override def setUp(testInfo: TestInfo): Unit = { - disableAutoTopicCreation = testInfo.getDisplayName.contains("autoCreateTopicsEnabled=false") - super.setUp(testInfo) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testWrongSerializer(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testWrongSerializer(quorum: String, groupProtocol: String): Unit = { val producerProps = new Properties() producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) producerProps.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringSerializer") @@ -65,9 +49,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { assertThrows(classOf[SerializationException], () => producer.send(record)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testBatchSizeZero(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testBatchSizeZero(quorum: String, groupProtocol: String): Unit = { val producer = createProducer( lingerMs = Int.MaxValue, deliveryTimeoutMs = Int.MaxValue, @@ -76,9 +60,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { } @Timeout(value = 15, unit = TimeUnit.SECONDS, threadMode = Timeout.ThreadMode.SEPARATE_THREAD) - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testBatchSizeZeroNoPartitionNoRecordKey(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testBatchSizeZeroNoPartitionNoRecordKey(quorum: String, groupProtocol: String): Unit = { val producer = createProducer(batchSize = 0) val numRecords = 10 try { @@ -99,9 +83,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendCompressedMessageWithLogAppendTime(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendCompressedMessageWithLogAppendTime(quorum: String, groupProtocol: String): Unit = { val producer = createProducer( compressionType = "gzip", lingerMs = Int.MaxValue, @@ -109,9 +93,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { sendAndVerifyTimestamp(producer, TimestampType.LOG_APPEND_TIME) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendNonCompressedMessageWithLogAppendTime(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendNonCompressedMessageWithLogAppendTime(quorum: String, groupProtocol: String): Unit = { val producer = createProducer(lingerMs = Int.MaxValue, deliveryTimeoutMs = Int.MaxValue) sendAndVerifyTimestamp(producer, TimestampType.LOG_APPEND_TIME) } @@ -121,9 +105,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { * * The topic should be created upon sending the first message */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAutoCreateTopic(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAutoCreateTopic(quorum: String, groupProtocol: String): Unit = { val producer = createProducer() try { // Send a message to auto-create the topic @@ -137,42 +121,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { } } - /** - * Test error message received when send fails waiting on metadata for a topic that does not exist. - * No need to run this for both rebalance protocols. - */ - @ParameterizedTest(name = "groupProtocol={0}.autoCreateTopicsEnabled={1}") - @MethodSource(Array("protocolAndAutoCreateTopicProviders")) - def testSendTimeoutErrorMessageWhenTopicDoesNotExist(groupProtocol: String, autoCreateTopicsEnabled: String): Unit = { - val producer = createProducer(maxBlockMs = 500) - val record = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes) - val exception = assertThrows(classOf[ExecutionException], () => producer.send(record).get) - assertInstanceOf(classOf[TimeoutException], exception.getCause) - assertEquals("Topic topic not present in metadata after 500 ms.", exception.getCause.getMessage) - } - - /** - * Test error message received when send fails waiting on metadata for a partition that does not exist (topic exists). - * No need to run this for both rebalance protocols. - */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersClassicGroupProtocolOnly")) - def testSendTimeoutErrorWhenPartitionDoesNotExist(groupProtocol: String): Unit = { - val producer = createProducer(maxBlockMs = 500) - // Send a message to auto-create the topic - var record = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes) - assertEquals(0L, producer.send(record).get.offset, "Should have offset 0") - - // Send another message to the topic that exists but to a partition that does not - record = new ProducerRecord(topic, 10, "key".getBytes, "value".getBytes) - val exception = assertThrows(classOf[ExecutionException], () => producer.send(record).get) - assertInstanceOf(classOf[TimeoutException], exception.getCause) - assertEquals("Partition 10 of topic topic with partition count 4 is not present in metadata after 500 ms.", exception.getCause.getMessage) - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("timestampConfigProvider")) - def testSendWithInvalidBeforeAndAfterTimestamp(groupProtocol: String, messageTimeStampConfig: String, recordTimestamp: Long): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("quorumAndTimestampConfigProvider")) + def testSendWithInvalidBeforeAndAfterTimestamp(quorum: String, groupProtocol: String, messageTimeStampConfig: String, recordTimestamp: Long): Unit = { val topicProps = new Properties() // set the TopicConfig for timestamp validation to have 1 minute threshold. Note that recordTimestamp has 5 minutes diff val oneMinuteInMs: Long = 1 * 60 * 60 * 1000L @@ -199,9 +150,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("timestampConfigProvider")) - def testValidBeforeAndAfterTimestampsAtThreshold(groupProtocol: String, messageTimeStampConfig: String, recordTimestamp: Long): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("quorumAndTimestampConfigProvider")) + def testValidBeforeAndAfterTimestampsAtThreshold(quorum: String, groupProtocol: String, messageTimeStampConfig: String, recordTimestamp: Long): Unit = { val topicProps = new Properties() // set the TopicConfig for timestamp validation to be the same as the record timestamp @@ -219,9 +170,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { compressedProducer.close() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("timestampConfigProvider")) - def testValidBeforeAndAfterTimestampsWithinThreshold(groupProtocol: String, messageTimeStampConfig: String, recordTimestamp: Long): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("quorumAndTimestampConfigProvider")) + def testValidBeforeAndAfterTimestampsWithinThreshold(quorum: String, groupProtocol: String, messageTimeStampConfig: String, recordTimestamp: Long): Unit = { val topicProps = new Properties() // set the TopicConfig for timestamp validation to have 10 minute threshold. Note that recordTimestamp has 5 minutes diff @@ -243,9 +194,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { // Test that producer with max.block.ms=0 can be used to send in non-blocking mode // where requests are failed immediately without blocking if metadata is not available // or buffer is full. - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testNonBlockingProducer(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNonBlockingProducer(quorum: String, groupProtocol: String): Unit = { def send(producer: KafkaProducer[Array[Byte],Array[Byte]]): Future[RecordMetadata] = { producer.send(new ProducerRecord(topic, 0, "key".getBytes, new Array[Byte](1000))) @@ -299,9 +250,9 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { verifySendSuccess(future2) // previous batch should be completed and sent now } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendRecordBatchWithMaxRequestSizeAndHigher(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendRecordBatchWithMaxRequestSizeAndHigher(quorum: String, groupProtocol: String): Unit = { val producerProps = new Properties() producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) val producer = registerProducer(new KafkaProducer(producerProps, new ByteArraySerializer, new ByteArraySerializer)) @@ -311,7 +262,7 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { val valueLengthSize = 3 val overhead = Records.LOG_OVERHEAD + DefaultRecordBatch.RECORD_BATCH_OVERHEAD + DefaultRecord.MAX_RECORD_OVERHEAD + keyLengthSize + headerLengthSize + valueLengthSize - val valueSize = ServerLogConfigs.MAX_MESSAGE_BYTES_DEFAULT - overhead + val valueSize = LogConfig.DEFAULT_MAX_MESSAGE_BYTES - overhead val record0 = new ProducerRecord(topic, new Array[Byte](0), new Array[Byte](valueSize)) assertEquals(record0.value.length, producer.send(record0).get.serializedValueSize) @@ -324,20 +275,14 @@ class PlaintextProducerSendTest extends BaseProducerSendTest { object PlaintextProducerSendTest { - def timestampConfigProvider: java.util.stream.Stream[Arguments] = { + def quorumAndTimestampConfigProvider: java.util.stream.Stream[Arguments] = { val now: Long = System.currentTimeMillis() val fiveMinutesInMs: Long = 5 * 60 * 60 * 1000L val data = new java.util.ArrayList[Arguments]() for (groupProtocol <- GroupProtocol.values().map(gp => gp.name.toLowerCase(Locale.ROOT))) { - data.add(Arguments.of(groupProtocol, TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, Long.box(now - fiveMinutesInMs))) - data.add(Arguments.of(groupProtocol, TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, Long.box(now + fiveMinutesInMs))) + data.add(Arguments.of("kraft", groupProtocol, TopicConfig.MESSAGE_TIMESTAMP_BEFORE_MAX_MS_CONFIG, Long.box(now - fiveMinutesInMs))) + data.add(Arguments.of("kraft", groupProtocol, TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, Long.box(now + fiveMinutesInMs))) } data.stream() } - - def protocolAndAutoCreateTopicProviders: java.util.stream.Stream[Arguments] = { - val data = new java.util.ArrayList[Arguments]() - data.add(Arguments.of("classic", "false")) - data.stream() - } } \ No newline at end of file diff --git a/core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala b/core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala new file mode 100644 index 0000000000000..2782a46f18abf --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ProducerCompressionTest.scala @@ -0,0 +1,167 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.api.test + +import kafka.server.{KafkaBroker, KafkaConfig, QuorumTestHarness} +import kafka.utils.TestUtils +import org.apache.kafka.clients.consumer.GroupProtocol +import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord, RecordMetadata} +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.header.Header +import org.apache.kafka.common.header.internals.{RecordHeader, RecordHeaders} +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.common.serialization.ByteArraySerializer +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.CsvSource + +import java.util.concurrent.Future +import java.util.{Collections, Properties} +import scala.collection.mutable.ListBuffer +import scala.util.Random + +class ProducerCompressionTest extends QuorumTestHarness { + + private val brokerId = 0 + private val topic = "topic" + private val numRecords = 2000 + + private var broker: KafkaBroker = _ + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + val props = TestUtils.createBrokerConfig(brokerId) + broker = createBroker(new KafkaConfig(props)) + } + + @AfterEach + override def tearDown(): Unit = { + TestUtils.shutdownServers(Seq(broker)) + super.tearDown() + } + + /** + * testCompression + * + * Compressed messages should be able to sent and consumed correctly + */ + @ParameterizedTest(name = "{displayName}.quorum={0}.groupProtocol={1}.compression={2}") + @CsvSource(value = Array( + "kraft,classic,none", + "kraft,consumer,none", + "kraft,classic,gzip", + "kraft,consumer,gzip", + "kraft,classic,snappy", + "kraft,consumer,snappy", + "kraft,classic,lz4", + "kraft,consumer,lz4", + "kraft,classic,zstd", + "kraft,consumer,zstd" + )) + def testCompression(quorum: String, groupProtocol: String, compression: String): Unit = { + val producerProps = new Properties() + val bootstrapServers = TestUtils.plaintextBootstrapServers(Seq(broker)) + producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers) + producerProps.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, compression) + producerProps.put(ProducerConfig.BATCH_SIZE_CONFIG, "66000") + producerProps.put(ProducerConfig.LINGER_MS_CONFIG, "200") + val producer = new KafkaProducer(producerProps, new ByteArraySerializer, new ByteArraySerializer) + val consumer = TestUtils.createConsumer(bootstrapServers, GroupProtocol.of(groupProtocol)) + + try { + // create topic + val admin = TestUtils.createAdminClient(Seq(broker), + ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)) + try { + TestUtils.createTopicWithAdmin(admin, topic, Seq(broker), controllerServers) + } finally { + admin.close() + } + val partition = 0 + + def messageValue(length: Int): String = { + val random = new Random(0) + new String(random.alphanumeric.take(length).toArray) + } + + // prepare the messages + val messageValues = (0 until numRecords).map(i => messageValue(i)) + val headerArr = Array[Header](new RecordHeader("key", "value".getBytes)) + val headers = new RecordHeaders(headerArr) + + // make sure the returned messages are correct + val now = System.currentTimeMillis() + val responses: ListBuffer[Future[RecordMetadata]] = new ListBuffer[Future[RecordMetadata]]() + + for (message <- messageValues) { + // 1. send message without key and header + responses += producer.send(new ProducerRecord(topic, null, now, null, message.getBytes)) + // 2. send message with key, without header + responses += producer.send(new ProducerRecord(topic, null, now, message.length.toString.getBytes, message.getBytes)) + // 3. send message with key and header + responses += producer.send(new ProducerRecord(topic, null, now, message.length.toString.getBytes, message.getBytes, headers)) + } + for ((future, offset) <- responses.zipWithIndex) { + assertEquals(offset.toLong, future.get.offset) + } + + val tp = new TopicPartition(topic, partition) + // make sure the fetched message count match + consumer.assign(Collections.singleton(tp)) + consumer.seek(tp, 0) + val records = TestUtils.consumeRecords(consumer, numRecords*3) + + for (i <- 0 until numRecords) { + val messageValue = messageValues(i) + // 1. verify message without key and header + var offset = i * 3 + var record = records(offset) + assertNull(record.key()) + assertEquals(messageValue, new String(record.value)) + assertEquals(0, record.headers().toArray.length) + assertEquals(now, record.timestamp) + assertEquals(offset.toLong, record.offset) + + // 2. verify message with key, without header + offset = i * 3 + 1 + record = records(offset) + assertEquals(messageValue.length.toString, new String(record.key())) + assertEquals(messageValue, new String(record.value)) + assertEquals(0, record.headers().toArray.length) + assertEquals(now, record.timestamp) + assertEquals(offset.toLong, record.offset) + + // 3. verify message with key and header + offset = i * 3 + 2 + record = records(offset) + assertEquals(messageValue.length.toString, new String(record.key())) + assertEquals(messageValue, new String(record.value)) + assertEquals(1, record.headers().toArray.length) + assertEquals(headerArr.apply(0), record.headers().toArray.apply(0)) + assertEquals(now, record.timestamp) + assertEquals(offset.toLong, record.offset) + } + } finally { + producer.close() + consumer.close() + } + } +} diff --git a/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala b/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala new file mode 100644 index 0000000000000..1826df1c6dc7b --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ProducerFailureHandlingTest.scala @@ -0,0 +1,261 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.api + +import java.util.concurrent.ExecutionException +import java.util.Properties +import kafka.integration.KafkaServerTestHarness +import kafka.server.KafkaConfig +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.producer._ +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.errors._ +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.record.{DefaultRecord, DefaultRecordBatch} +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.server.config.{ServerConfigs, ReplicationConfigs, ServerLogConfigs} +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.{MethodSource, ValueSource} + +class ProducerFailureHandlingTest extends KafkaServerTestHarness { + private val producerBufferSize = 30000 + private val serverMessageMaxBytes = producerBufferSize/2 + private val replicaFetchMaxPartitionBytes = serverMessageMaxBytes + 200 + private val replicaFetchMaxResponseBytes = replicaFetchMaxPartitionBytes + 200 + + val numServers = 2 + + val overridingProps = new Properties() + overridingProps.put(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, false.toString) + overridingProps.put(ServerConfigs.MESSAGE_MAX_BYTES_CONFIG, serverMessageMaxBytes.toString) + overridingProps.put(ReplicationConfigs.REPLICA_FETCH_MAX_BYTES_CONFIG, replicaFetchMaxPartitionBytes.toString) + overridingProps.put(ReplicationConfigs.REPLICA_FETCH_RESPONSE_MAX_BYTES_DOC, replicaFetchMaxResponseBytes.toString) + // Set a smaller value for the number of partitions for the offset commit topic (__consumer_offset topic) + // so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long + overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, 1.toString) + + def generateConfigs = + TestUtils.createBrokerConfigs(numServers, enableControlledShutdown = false).map(KafkaConfig.fromProps(_, overridingProps)) + + private var producer1: KafkaProducer[Array[Byte], Array[Byte]] = _ + private var producer2: KafkaProducer[Array[Byte], Array[Byte]] = _ + private var producer3: KafkaProducer[Array[Byte], Array[Byte]] = _ + private var producer4: KafkaProducer[Array[Byte], Array[Byte]] = _ + + private val topic1 = "topic-1" + private val topic2 = "topic-2" + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + + producer1 = TestUtils.createProducer(bootstrapServers(), acks = 0, retries = 0, requestTimeoutMs = 30000, maxBlockMs = 10000L, + bufferSize = producerBufferSize) + producer2 = TestUtils.createProducer(bootstrapServers(), acks = 1, retries = 0, requestTimeoutMs = 30000, maxBlockMs = 10000L, + bufferSize = producerBufferSize) + producer3 = TestUtils.createProducer(bootstrapServers(), acks = -1, retries = 0, requestTimeoutMs = 30000, maxBlockMs = 10000L, + bufferSize = producerBufferSize) + } + + @AfterEach + override def tearDown(): Unit = { + if (producer1 != null) producer1.close() + if (producer2 != null) producer2.close() + if (producer3 != null) producer3.close() + if (producer4 != null) producer4.close() + + super.tearDown() + } + + /** + * With ack == 0 the future metadata will have no exceptions with offset -1 + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testTooLargeRecordWithAckZero(quorum: String): Unit = { + // create topic + createTopic(topic1, replicationFactor = numServers) + + // send a too-large record + val record = new ProducerRecord(topic1, null, "key".getBytes, new Array[Byte](serverMessageMaxBytes + 1)) + + val recordMetadata = producer1.send(record).get() + assertNotNull(recordMetadata) + assertFalse(recordMetadata.hasOffset) + assertEquals(-1L, recordMetadata.offset) + } + + /** + * With ack == 1 the future metadata will throw ExecutionException caused by RecordTooLargeException + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testTooLargeRecordWithAckOne(quorum: String): Unit = { + // create topic + createTopic(topic1, replicationFactor = numServers) + + // send a too-large record + val record = new ProducerRecord(topic1, null, "key".getBytes, new Array[Byte](serverMessageMaxBytes + 1)) + assertThrows(classOf[ExecutionException], () => producer2.send(record).get) + } + + private def checkTooLargeRecordForReplicationWithAckAll(maxFetchSize: Int): Unit = { + val maxMessageSize = maxFetchSize + 100 + val topicConfig = new Properties + topicConfig.setProperty(TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, numServers.toString) + topicConfig.setProperty(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, maxMessageSize.toString) + + // create topic + val topic10 = "topic10" + createTopic(topic10, numPartitions = brokers.size, replicationFactor = numServers, topicConfig) + + // send a record that is too large for replication, but within the broker max message limit + val value = new Array[Byte](maxMessageSize - DefaultRecordBatch.RECORD_BATCH_OVERHEAD - DefaultRecord.MAX_RECORD_OVERHEAD) + val record = new ProducerRecord[Array[Byte], Array[Byte]](topic10, null, value) + val recordMetadata = producer3.send(record).get + + assertEquals(topic10, recordMetadata.topic) + } + + /** This should succeed as the replica fetcher thread can handle oversized messages since KIP-74 */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testPartitionTooLargeForReplicationWithAckAll(quorum: String): Unit = { + checkTooLargeRecordForReplicationWithAckAll(replicaFetchMaxPartitionBytes) + } + + /** This should succeed as the replica fetcher thread can handle oversized messages since KIP-74 */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testResponseTooLargeForReplicationWithAckAll(quorum: String): Unit = { + checkTooLargeRecordForReplicationWithAckAll(replicaFetchMaxResponseBytes) + } + + /** + * With non-exist-topic the future metadata should return ExecutionException caused by TimeoutException + */ + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testNonExistentTopic(quorum: String): Unit = { + // send a record with non-exist topic + val record = new ProducerRecord(topic2, null, "key".getBytes, "value".getBytes) + assertThrows(classOf[ExecutionException], () => producer1.send(record).get) + } + + /** + * With incorrect broker-list the future metadata should return ExecutionException caused by TimeoutException + * + * TODO: other exceptions that can be thrown in ExecutionException: + * UnknownTopicOrPartitionException + * NotLeaderOrFollowerException + * LeaderNotAvailableException + * CorruptRecordException + * TimeoutException + */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testWrongBrokerList(quorum: String): Unit = { + // create topic + createTopic(topic1, replicationFactor = numServers) + + // producer with incorrect broker list + producer4 = TestUtils.createProducer("localhost:8686,localhost:4242", acks = 1, maxBlockMs = 10000L, bufferSize = producerBufferSize) + + // send a record with incorrect broker list + val record = new ProducerRecord(topic1, null, "key".getBytes, "value".getBytes) + assertThrows(classOf[ExecutionException], () => producer4.send(record).get) + } + + /** + * Send with invalid partition id should return ExecutionException caused by TimeoutException + * when partition is higher than the upper bound of partitions. + */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidPartition(quorum: String): Unit = { + // create topic with a single partition + createTopic(topic1, replicationFactor = numServers) + + // create a record with incorrect partition id (higher than the number of partitions), send should fail + val higherRecord = new ProducerRecord(topic1, 1, "key".getBytes, "value".getBytes) + val e = assertThrows(classOf[ExecutionException], () => producer1.send(higherRecord).get) + assertEquals(classOf[TimeoutException], e.getCause.getClass) + } + + /** + * The send call after producer closed should throw IllegalStateException + */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testSendAfterClosed(quorum: String): Unit = { + // create topic + createTopic(topic1, replicationFactor = numServers) + + val record = new ProducerRecord[Array[Byte], Array[Byte]](topic1, null, "key".getBytes, "value".getBytes) + + // first send a message to make sure the metadata is refreshed + producer1.send(record).get + producer2.send(record).get + producer3.send(record).get + + producer1.close() + assertThrows(classOf[IllegalStateException], () => producer1.send(record)) + producer2.close() + assertThrows(classOf[IllegalStateException], () => producer2.send(record)) + producer3.close() + assertThrows(classOf[IllegalStateException], () => producer3.send(record)) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCannotSendToInternalTopic(quorum: String): Unit = { + + createOffsetsTopic() + val thrown = assertThrows(classOf[ExecutionException], + () => producer2.send(new ProducerRecord(Topic.GROUP_METADATA_TOPIC_NAME, "test".getBytes, "test".getBytes)).get) + assertTrue(thrown.getCause.isInstanceOf[InvalidTopicException], "Unexpected exception while sending to an invalid topic " + thrown.getCause) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testNotEnoughReplicasAfterBrokerShutdown(quorum: String): Unit = { + val topicName = "minisrtest2" + val topicProps = new Properties() + topicProps.put("min.insync.replicas", numServers.toString) + + createTopic(topicName, replicationFactor = numServers, topicConfig = topicProps) + + val record = new ProducerRecord(topicName, null, "key".getBytes, "value".getBytes) + // this should work with all brokers up and running + producer3.send(record).get + + // shut down one broker + brokers.head.shutdown() + brokers.head.awaitShutdown() + val e = assertThrows(classOf[ExecutionException], () => producer3.send(record).get) + assertTrue(e.getCause.isInstanceOf[NotEnoughReplicasException] || + e.getCause.isInstanceOf[NotEnoughReplicasAfterAppendException] || + e.getCause.isInstanceOf[TimeoutException]) + + // restart the server + brokers.head.startup() + } + +} diff --git a/core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala b/core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala new file mode 100644 index 0000000000000..6f50b60aa15d9 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ProducerIdExpirationTest.scala @@ -0,0 +1,254 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.api + +import java.util +import java.util.{Collections, Properties} +import kafka.integration.KafkaServerTestHarness +import kafka.server.KafkaConfig +import kafka.utils.{TestInfoUtils, TestUtils} +import kafka.utils.TestUtils.{consumeRecords, createAdminClient} +import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ConfigEntry, ProducerState} +import org.apache.kafka.clients.consumer.Consumer +import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} +import org.apache.kafka.common.{KafkaException, TopicPartition} +import org.apache.kafka.common.config.ConfigResource +import org.apache.kafka.common.errors.{InvalidPidMappingException, TransactionalIdNotFoundException} +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} +import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} +import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource +import org.opentest4j.AssertionFailedError + +import scala.collection.Seq + +class ProducerIdExpirationTest extends KafkaServerTestHarness { + val topic1 = "topic1" + val numPartitions = 1 + val replicationFactor = 3 + val tp0 = new TopicPartition(topic1, 0) + val configResource = new ConfigResource(ConfigResource.Type.BROKER, "") + + var producer: KafkaProducer[Array[Byte], Array[Byte]] = _ + var consumer: Consumer[Array[Byte], Array[Byte]] = _ + var admin: Admin = _ + + override def generateConfigs: Seq[KafkaConfig] = { + TestUtils.createBrokerConfigs(3).map(KafkaConfig.fromProps(_, serverProps())) + } + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + consumer = TestUtils.createConsumer(bootstrapServers(), + groupProtocolFromTestParameters(), + enableAutoCommit = false, + readCommitted = true) + admin = createAdminClient(brokers, listenerName) + + createTopic(topic1, numPartitions, 3) + } + + @AfterEach + override def tearDown(): Unit = { + if (producer != null) + producer.close() + if (consumer != null) + consumer.close() + if (admin != null) + admin.close() + + super.tearDown() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProducerIdExpirationWithNoTransactions(quorum: String, groupProtocol: String): Unit = { + producer = TestUtils.createProducer(bootstrapServers(), enableIdempotence = true) + + // Send records to populate producer state cache. + producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, "value".getBytes)) + producer.flush() + + // Ensure producer IDs are added. + ensureConsistentKRaftMetadata() + assertEquals(1, producerState.size) + + // Wait for the producer ID to expire. + TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not expire.") + + // Send more records to send producer ID back to brokers. + producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, "value".getBytes)) + producer.flush() + + // Producer IDs should repopulate. + assertEquals(1, producerState.size) + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testTransactionAfterTransactionIdExpiresButProducerIdRemains(quorum: String, groupProtocol: String): Unit = { + producer = TestUtils.createTransactionalProducer("transactionalProducer", brokers) + producer.initTransactions() + + // Start and then abort a transaction to allow the producer ID to expire. + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "2", "2", willBeCommitted = false)) + producer.flush() + + // Ensure producer IDs are added. + TestUtils.waitUntilTrue(() => producerState.size == 1, "Producer IDs were not added.") + + producer.abortTransaction() + + // Wait for the transactional ID to expire. + waitUntilTransactionalStateExpires() + + // Producer IDs should be retained. + assertEquals(1, producerState.size) + + // Start a new transaction and attempt to send, triggering an AddPartitionsToTxnRequest that will fail + // due to the expired transactional ID, resulting in a fatal error. + producer.beginTransaction() + val failedFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "1", "1", willBeCommitted = false)) + TestUtils.waitUntilTrue(() => failedFuture.isDone, "Producer future never completed.") + org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, classOf[InvalidPidMappingException]) + + // Assert that aborting the transaction throws a KafkaException due to the fatal error. + assertThrows(classOf[KafkaException], () => producer.abortTransaction()) + + // Close the producer and reinitialize to recover from the fatal error. + producer.close() + producer = TestUtils.createTransactionalProducer("transactionalProducer", brokers) + producer.initTransactions() + + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "4", "4", willBeCommitted = true)) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "3", "3", willBeCommitted = true)) + + // Producer IDs should be retained. + assertTrue(producerState.size() > 0) + + producer.commitTransaction() + + // Check we can still consume the transaction. + consumer.subscribe(Collections.singletonList(topic1)) + + val records = consumeRecords(consumer, 2) + records.foreach { record => + TestUtils.assertCommittedAndGetValue(record) + } + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDynamicProducerIdExpirationMs(quorum: String, groupProtocol: String): Unit = { + producer = TestUtils.createProducer(bootstrapServers(), enableIdempotence = true) + + // Send records to populate producer state cache. + producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, "value".getBytes)) + producer.flush() + + // Ensure producer IDs are added. + ensureConsistentKRaftMetadata() + assertEquals(1, producerState.size) + + // Wait for the producer ID to expire. + TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not expire.") + + // Update the producer ID expiration ms to a very high value. + admin.incrementalAlterConfigs(producerIdExpirationConfig("100000")) + + brokers.foreach(broker => TestUtils.waitUntilTrue(() => broker.logManager.producerStateManagerConfig.producerIdExpirationMs == 100000, "Configuration was not updated.")) + + // Send more records to send producer ID back to brokers. + producer.send(new ProducerRecord(topic1, 0, null, "key".getBytes, "value".getBytes)) + producer.flush() + + // Producer IDs should repopulate. + assertEquals(1, producerState.size) + + // Ensure producer ID does not expire within 4 seconds. + assertThrows(classOf[AssertionFailedError], () => + TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not expire.", 4000) + ) + + // Update the expiration time to a low value again. + admin.incrementalAlterConfigs(producerIdExpirationConfig("100")).all().get() + + // restart a broker to ensure that dynamic config changes are picked up on restart + killBroker(0) + restartDeadBrokers() + + brokers.foreach(broker => TestUtils.waitUntilTrue(() => broker.logManager.producerStateManagerConfig.producerIdExpirationMs == 100, "Configuration was not updated.")) + + // Ensure producer ID expires quickly again. + TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer ID did not expire.") + } + + private def producerState: util.List[ProducerState] = { + val describeResult = admin.describeProducers(Collections.singletonList(tp0)) + val activeProducers = describeResult.partitionResult(tp0).get().activeProducers + activeProducers + } + + private def producerIdExpirationConfig(configValue: String): util.Map[ConfigResource, util.Collection[AlterConfigOp]] = { + val producerIdCfg = new ConfigEntry(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_CONFIG, configValue) + val configs = Collections.singletonList(new AlterConfigOp(producerIdCfg, AlterConfigOp.OpType.SET)) + Collections.singletonMap(configResource, configs) + } + + private def waitUntilTransactionalStateExpires(): Unit = { + TestUtils.waitUntilTrue(() => { + var removedTransactionState = false + val txnDescribeResult = admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer") + try { + txnDescribeResult.get() + } catch { + case e: Exception => { + removedTransactionState = e.getCause.isInstanceOf[TransactionalIdNotFoundException] + } + } + removedTransactionState + }, "Transaction state never expired.") + } + + private def serverProps(): Properties = { + val serverProps = new Properties() + serverProps.put(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, false.toString) + // Set a smaller value for the number of partitions for the __consumer_offsets topic + // so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long. + serverProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, 1.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, 3.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, 2.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, 2.toString) + serverProps.put(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG, true.toString) + serverProps.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, false.toString) + serverProps.put(ReplicationConfigs.AUTO_LEADER_REBALANCE_ENABLE_CONFIG, false.toString) + serverProps.put(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, "0") + serverProps.put(TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_CONFIG, "200") + serverProps.put(TransactionStateManagerConfig.TRANSACTIONAL_ID_EXPIRATION_MS_CONFIG, "5000") + serverProps.put(TransactionStateManagerConfig.TRANSACTIONS_REMOVE_EXPIRED_TRANSACTIONAL_ID_CLEANUP_INTERVAL_MS_CONFIG, "500") + serverProps.put(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_CONFIG, "10000") + serverProps.put(TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_CONFIG, "500") + serverProps + } +} diff --git a/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala new file mode 100644 index 0000000000000..f32c4433b45bb --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ProducerRebootstrapTest.scala @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.api + +import org.apache.kafka.clients.producer.ProducerRecord +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Disabled +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +class ProducerRebootstrapTest extends RebootstrapTest { + @Disabled("KAFKA-17986") + @ParameterizedTest(name = "{displayName}.quorum=kraft.useRebootstrapTriggerMs={0}") + @ValueSource(booleans = Array(false, true)) + def testRebootstrap(useRebootstrapTriggerMs: Boolean): Unit = { + server1.shutdown() + server1.awaitShutdown() + + val producer = createProducer(configOverrides = clientOverrides(useRebootstrapTriggerMs)) + + // Only the server 0 is available for the producer during the bootstrap. + val recordMetadata0 = producer.send(new ProducerRecord(topic, part, "key 0".getBytes, "value 0".getBytes)).get() + assertEquals(0, recordMetadata0.offset()) + + server0.shutdown() + server0.awaitShutdown() + server1.startup() + + // The server 0, originally cached during the bootstrap, is offline. + // However, the server 1 from the bootstrap list is online. + // Should be able to produce records. + val recordMetadata1 = producer.send(new ProducerRecord(topic, part, "key 1".getBytes, "value 1".getBytes)).get() + assertEquals(0, recordMetadata1.offset()) + + server1.shutdown() + server1.awaitShutdown() + server0.startup() + + // The same situation, but the server 1 has gone and server 0 is back. + val recordMetadata2 = producer.send(new ProducerRecord(topic, part, "key 1".getBytes, "value 1".getBytes)).get() + assertEquals(1, recordMetadata2.offset()) + } +} diff --git a/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala b/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala new file mode 100644 index 0000000000000..0ee52530e57ff --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/ProducerSendWhileDeletionTest.scala @@ -0,0 +1,88 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.api + +import kafka.utils.TestUtils +import org.apache.kafka.clients.admin.NewPartitionReassignment +import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord} +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.server.config.{ReplicationConfigs, ServerLogConfigs} +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +import java.nio.charset.StandardCharsets +import java.util +import java.util.Optional +import scala.jdk.CollectionConverters._ + + +class ProducerSendWhileDeletionTest extends IntegrationTestHarness { + val producerCount: Int = 1 + val brokerCount: Int = 2 + val defaultLingerMs: Int = 5; + + serverConfig.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, 2.toString) + serverConfig.put(ReplicationConfigs.DEFAULT_REPLICATION_FACTOR_CONFIG, 2.toString) + serverConfig.put(ReplicationConfigs.AUTO_LEADER_REBALANCE_ENABLE_CONFIG, false.toString) + + producerConfig.put(ProducerConfig.MAX_BLOCK_MS_CONFIG, 5000L.toString) + producerConfig.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 10000.toString) + producerConfig.put(ProducerConfig.DELIVERY_TIMEOUT_MS_CONFIG, (10000 + defaultLingerMs).toString) + + /** + * Tests that Producer gets self-recovered when a topic is deleted mid-way of produce. + * + * Producer will attempt to send messages to the partition specified in each record, and should + * succeed as long as the partition is included in the metadata. + */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testSendWithTopicDeletionMidWay(quorum: String): Unit = { + val numRecords = 10 + val topic = "topic" + + // Create topic with leader as 0 for the 2 partitions. + createTopicWithAssignment(topic, Map(0 -> Seq(0, 1), 1 -> Seq(0, 1))) + + val reassignment = Map( + new TopicPartition(topic, 0) -> Optional.of(new NewPartitionReassignment(util.Arrays.asList(1, 0))), + new TopicPartition(topic, 1) -> Optional.of(new NewPartitionReassignment(util.Arrays.asList(1, 0))) + ) + + // Change leader to 1 for both the partitions to increase leader epoch from 0 -> 1 + val admin = createAdminClient() + admin.alterPartitionReassignments(reassignment.asJava).all().get() + + val producer = createProducer() + + (1 to numRecords).foreach { i => + val resp = producer.send(new ProducerRecord(topic, null, ("value" + i).getBytes(StandardCharsets.UTF_8))).get + assertEquals(topic, resp.topic()) + } + + // Start topic deletion + deleteTopic(topic, listenerName) + + // Verify that the topic is deleted when no metadata request comes in + TestUtils.verifyTopicDeletion(topic, 2, brokers) + + // Producer should be able to send messages even after topic gets deleted and auto-created + assertEquals(topic, producer.send(new ProducerRecord(topic, null, "value".getBytes(StandardCharsets.UTF_8))).get.topic()) + } + +} diff --git a/core/src/test/scala/integration/kafka/api/RackAwareAutoTopicCreationTest.scala b/core/src/test/scala/integration/kafka/api/RackAwareAutoTopicCreationTest.scala new file mode 100644 index 0000000000000..03a312d5f077a --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/RackAwareAutoTopicCreationTest.scala @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.api + +import java.util.Properties +import kafka.admin.RackAwareTest +import kafka.integration.KafkaServerTestHarness +import kafka.server.KafkaConfig +import kafka.utils.{TestInfoUtils, TestUtils} +import org.apache.kafka.clients.admin.Admin +import org.apache.kafka.clients.producer.ProducerRecord +import org.apache.kafka.server.config.{ReplicationConfigs, ServerLogConfigs} +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import scala.collection.Map +import scala.jdk.CollectionConverters.ListHasAsScala + +class RackAwareAutoTopicCreationTest extends KafkaServerTestHarness with RackAwareTest { + val numServers = 4 + val numPartitions = 8 + val replicationFactor = 2 + val overridingProps = new Properties() + var admin: Admin = _ + overridingProps.put(ServerLogConfigs.NUM_PARTITIONS_CONFIG, numPartitions.toString) + overridingProps.put(ReplicationConfigs.DEFAULT_REPLICATION_FACTOR_CONFIG, replicationFactor.toString) + + def generateConfigs = + (0 until numServers) map { node => + TestUtils.createBrokerConfig(node, enableControlledShutdown = false, rack = Some((node / 2).toString)) + } map (KafkaConfig.fromProps(_, overridingProps)) + + private val topic = "topic" + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + admin = TestUtils.createAdminClient(brokers, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)) + } + + @AfterEach + override def tearDown(): Unit = { + if (admin != null) admin.close() + super.tearDown() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAutoCreateTopic(quorum: String, groupProtocol: String): Unit = { + val producer = TestUtils.createProducer(bootstrapServers()) + try { + // Send a message to auto-create the topic + val record = new ProducerRecord(topic, null, "key".getBytes, "value".getBytes) + assertEquals(0L, producer.send(record).get.offset, "Should have offset 0") + + // double check that the topic is created with leader elected + TestUtils.waitUntilLeaderIsElectedOrChangedWithAdmin(admin, topic, 0) + val assignment = getReplicaAssignment(topic) + val brokerMetadatas = brokers.head.metadataCache.getAliveBrokers() + val expectedMap = Map(0 -> "0", 1 -> "0", 2 -> "1", 3 -> "1") + assertEquals(expectedMap, brokerMetadatas.map(b => b.id -> b.rack.get).toMap) + checkReplicaDistribution(assignment, expectedMap, numServers, numPartitions, replicationFactor, + verifyLeaderDistribution = false) + } finally producer.close() + } + + private def getReplicaAssignment(topic: String): Map[Int, Seq[Int]] = { + TestUtils.describeTopic(admin, topic).partitions.asScala.map { partition => + partition.partition -> partition.replicas.asScala.map(_.id).toSeq + }.toMap + } +} + diff --git a/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala b/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala new file mode 100644 index 0000000000000..68982405370b4 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/RebootstrapTest.scala @@ -0,0 +1,57 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.api + +import kafka.server.{KafkaBroker, KafkaConfig} +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig + +import java.util.Properties + +abstract class RebootstrapTest extends AbstractConsumerTest { + override def brokerCount: Int = 2 + + def server0: KafkaBroker = serverForId(0).get + def server1: KafkaBroker = serverForId(1).get + + override def generateConfigs: Seq[KafkaConfig] = { + val overridingProps = new Properties() + overridingProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, brokerCount.toString) + overridingProps.put(TopicConfig.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") + + // In this test, fixed ports are necessary, because brokers must have the + // same port after the restart. + FixedPortTestUtils.createBrokerConfigs(brokerCount, enableControlledShutdown = false) + .map(KafkaConfig.fromProps(_, overridingProps)) + } + + def clientOverrides(useRebootstrapTriggerMs: Boolean): Properties = { + val overrides = new Properties() + if (useRebootstrapTriggerMs) { + overrides.put(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, "5000") + } else { + overrides.put(CommonClientConfigs.METADATA_RECOVERY_REBOOTSTRAP_TRIGGER_MS_CONFIG, "3600000") + overrides.put(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MS_CONFIG, "5000") + overrides.put(CommonClientConfigs.SOCKET_CONNECTION_SETUP_TIMEOUT_MAX_MS_CONFIG, "5000") + overrides.put(CommonClientConfigs.RECONNECT_BACKOFF_MS_CONFIG, "1000") + overrides.put(CommonClientConfigs.RECONNECT_BACKOFF_MAX_MS_CONFIG, "1000") + } + overrides.put(CommonClientConfigs.METADATA_RECOVERY_STRATEGY_CONFIG, "rebootstrap") + overrides + } +} diff --git a/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala b/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala index c08c43081e6a2..03a987c54b44e 100644 --- a/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslClientsWithInvalidCredentialsTest.scala @@ -15,14 +15,15 @@ package kafka.api import kafka.security.JaasTestUtils import java.time.Duration -import java.util.Properties +import java.util.{Collections, Properties} import java.util.concurrent.{ExecutionException, TimeUnit} +import scala.jdk.CollectionConverters._ import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig} import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} import org.apache.kafka.common.{KafkaException, TopicPartition} import org.apache.kafka.common.errors.SaslAuthenticationException -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions._ import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.common.config.SaslConfigs @@ -36,6 +37,7 @@ import org.junit.jupiter.params.provider.{MethodSource, ValueSource} import scala.jdk.javaapi.OptionConverters import scala.util.Using + class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { private val kafkaClientSaslMechanism = "SCRAM-SHA-256" private val kafkaServerSaslMechanisms = List(kafkaClientSaslMechanism) @@ -61,7 +63,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { override def addFormatterSettings(formatter: Formatter): Unit = { formatter.setScramArguments( - java.util.List.of(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]")) + List(s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]").asJava) } override def createPrivilegedAdminClient() = { @@ -89,7 +91,7 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { closeSasl() } - @ParameterizedTest(name="{displayName}.isIdempotenceEnabled={0}") + @ParameterizedTest(name="{displayName}.quorum=kraft.isIdempotenceEnabled={0}") @ValueSource(booleans = Array(true, false)) def testProducerWithAuthenticationFailure(isIdempotenceEnabled: Boolean): Unit = { val prop = new Properties() @@ -109,8 +111,9 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { verifyWithRetry(sendOneRecord(producer2))() } - @Test - def testTransactionalProducerWithAuthenticationFailure(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testTransactionalProducerWithAuthenticationFailure(quorum: String): Unit = { val txProducer = createTransactionalProducer() verifyAuthenticationException(txProducer.initTransactions()) @@ -118,28 +121,28 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { assertThrows(classOf[KafkaException], () => txProducer.initTransactions()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsumerWithAuthenticationFailure(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsumerWithAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() - consumer.subscribe(java.util.List.of(topic)) + consumer.subscribe(List(topic).asJava) verifyConsumerWithAuthenticationFailure(consumer) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testManualAssignmentConsumerWithAuthenticationFailure(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testManualAssignmentConsumerWithAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) verifyConsumerWithAuthenticationFailure(consumer) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testManualAssignmentConsumerWithAutoCommitDisabledWithAuthenticationFailure(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testManualAssignmentConsumerWithAutoCommitDisabledWithAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { this.consumerConfig.setProperty(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false.toString) val consumer = createConsumer() - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) consumer.seek(tp, 0) verifyConsumerWithAuthenticationFailure(consumer) } @@ -154,15 +157,16 @@ class SaslClientsWithInvalidCredentialsTest extends AbstractSaslTest { verifyWithRetry(consumer.poll(Duration.ofMillis(1000)))(_.count == 1) } - @Test - def testKafkaAdminClientWithAuthenticationFailure(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testKafkaAdminClientWithAuthenticationFailure(quorum: String): Unit = { val props = JaasTestUtils.adminClientSecurityConfigs(securityProtocol, OptionConverters.toJava(trustStoreFile), OptionConverters.toJava(clientSaslProperties)) props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers()) val adminClient = Admin.create(props) def describeTopic(): Unit = { try { - val response = adminClient.describeTopics(java.util.Set.of(topic)).allTopicNames.get + val response = adminClient.describeTopics(Collections.singleton(topic)).allTopicNames.get assertEquals(1, response.size) response.forEach { (_, description) => assertEquals(numPartitions, description.partitions.size) diff --git a/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala index ceff3d4d6b0a7..ec81a98d725b7 100644 --- a/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslEndToEndAuthorizationTest.scala @@ -26,6 +26,8 @@ import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue, fail} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource +import scala.jdk.CollectionConverters._ + abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { override protected def securityProtocol = SecurityProtocol.SASL_SSL override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism)) @@ -56,9 +58,9 @@ abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { * the second one connects ok, but fails to consume messages due to the ACL. */ @Timeout(15) - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testTwoConsumersWithDifferentSaslCredentials(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testTwoConsumersWithDifferentSaslCredentials(quorum: String, groupProtocol: String): Unit = { setAclsAndProduce(tp) consumerConfig.putIfAbsent(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) val consumer1 = createConsumer() @@ -68,8 +70,8 @@ abstract class SaslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { consumerConfig.remove(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS) val consumer2 = createConsumer() - consumer1.assign(java.util.List.of(tp)) - consumer2.assign(java.util.List.of(tp)) + consumer1.assign(List(tp).asJava) + consumer2.assign(List(tp).asJava) consumeRecords(consumer1, numRecords) diff --git a/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala index 920dc109ea99c..b41ccb6316caf 100644 --- a/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslMultiMechanismConsumerTest.scala @@ -19,6 +19,8 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource +import scala.jdk.CollectionConverters._ + @Timeout(600) class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { private val kafkaClientSaslMechanism = "PLAIN" @@ -41,9 +43,9 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { closeSasl() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMultipleBrokerMechanisms(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMultipleBrokerMechanisms(quorum: String, groupProtocol: String): Unit = { val plainSaslProducer = createProducer() val plainSaslConsumer = createConsumer() @@ -56,7 +58,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { // Test SASL/PLAIN producer and consumer var startingTimestamp = System.currentTimeMillis() sendRecords(plainSaslProducer, numRecords, tp, startingTimestamp = startingTimestamp) - plainSaslConsumer.assign(java.util.List.of(tp)) + plainSaslConsumer.assign(List(tp).asJava) plainSaslConsumer.seek(tp, 0) consumeAndVerifyRecords(consumer = plainSaslConsumer, numRecords = numRecords, startingOffset = startingOffset, startingTimestamp = startingTimestamp) @@ -66,7 +68,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { // Test SASL/GSSAPI producer and consumer startingTimestamp = System.currentTimeMillis() sendRecords(gssapiSaslProducer, numRecords, tp, startingTimestamp = startingTimestamp) - gssapiSaslConsumer.assign(java.util.List.of(tp)) + gssapiSaslConsumer.assign(List(tp).asJava) gssapiSaslConsumer.seek(tp, startingOffset) consumeAndVerifyRecords(consumer = gssapiSaslConsumer, numRecords = numRecords, startingOffset = startingOffset, startingTimestamp = startingTimestamp) @@ -76,7 +78,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { // Test SASL/PLAIN producer and SASL/GSSAPI consumer startingTimestamp = System.currentTimeMillis() sendRecords(plainSaslProducer, numRecords, tp, startingTimestamp = startingTimestamp) - gssapiSaslConsumer.assign(java.util.List.of(tp)) + gssapiSaslConsumer.assign(List(tp).asJava) gssapiSaslConsumer.seek(tp, startingOffset) consumeAndVerifyRecords(consumer = gssapiSaslConsumer, numRecords = numRecords, startingOffset = startingOffset, startingTimestamp = startingTimestamp) @@ -85,7 +87,7 @@ class SaslMultiMechanismConsumerTest extends BaseConsumerTest with SaslSetup { // Test SASL/GSSAPI producer and SASL/PLAIN consumer startingTimestamp = System.currentTimeMillis() sendRecords(gssapiSaslProducer, numRecords, tp, startingTimestamp = startingTimestamp) - plainSaslConsumer.assign(java.util.List.of(tp)) + plainSaslConsumer.assign(List(tp).asJava) plainSaslConsumer.seek(tp, startingOffset) consumeAndVerifyRecords(consumer = plainSaslConsumer, numRecords = numRecords, startingOffset = startingOffset, startingTimestamp = startingTimestamp) diff --git a/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala b/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala new file mode 100644 index 0000000000000..09f1f5119b134 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/SaslPlainPlaintextConsumerTest.scala @@ -0,0 +1,46 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE + * file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file + * to You under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the + * License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ +package kafka.api + +import kafka.security.JaasTestUtils +import kafka.utils.TestUtils +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.junit.jupiter.api._ + +import java.util.Locale + +@Timeout(600) +class SaslPlainPlaintextConsumerTest extends BaseConsumerTest with SaslSetup { + override protected def listenerName = new ListenerName("CLIENT") + private val kafkaClientSaslMechanism = "PLAIN" + private val kafkaServerSaslMechanisms = List(kafkaClientSaslMechanism) + private val kafkaServerJaasEntryName = + s"${listenerName.value.toLowerCase(Locale.ROOT)}.${JaasTestUtils.KAFKA_SERVER_CONTEXT_NAME}" + override protected def securityProtocol = SecurityProtocol.SASL_PLAINTEXT + override protected lazy val trustStoreFile = Some(TestUtils.tempFile("truststore", ".jks")) + override protected val serverSaslProperties = Some(kafkaServerSaslProperties(kafkaServerSaslMechanisms, kafkaClientSaslMechanism)) + override protected val clientSaslProperties = Some(kafkaClientSaslProperties(kafkaClientSaslMechanism)) + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + startSasl(jaasSections(kafkaServerSaslMechanisms, Some(kafkaClientSaslMechanism), kafkaServerJaasEntryName)) + super.setUp(testInfo) + } + + @AfterEach + override def tearDown(): Unit = { + super.tearDown() + closeSasl() + } +} diff --git a/core/src/test/scala/integration/kafka/api/SaslScramSslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SaslScramSslEndToEndAuthorizationTest.scala index 8bff9b25e1734..9d72f2b060cd7 100644 --- a/core/src/test/scala/integration/kafka/api/SaslScramSslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslScramSslEndToEndAuthorizationTest.scala @@ -27,7 +27,9 @@ import org.apache.kafka.test.TestSslUtils import scala.jdk.CollectionConverters._ import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource class SaslScramSslEndToEndAuthorizationTest extends SaslEndToEndAuthorizationTest { override protected def kafkaClientSaslMechanism = "SCRAM-SHA-256" @@ -47,8 +49,8 @@ class SaslScramSslEndToEndAuthorizationTest extends SaslEndToEndAuthorizationTes // Create the admin credentials for KRaft as part of controller initialization override def addFormatterSettings(formatter: Formatter): Unit = { formatter.setClusterId("XcZZOzUqS4yHOjhMQB6JLQ") - formatter.setScramArguments(java.util.List.of( - s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]")) + formatter.setScramArguments(List( + s"SCRAM-SHA-256=[name=${JaasTestUtils.KAFKA_SCRAM_ADMIN},password=${JaasTestUtils.KAFKA_SCRAM_ADMIN_PASSWORD}]").asJava) } override def configureListeners(props: collection.Seq[Properties]): Unit = { @@ -66,8 +68,9 @@ class SaslScramSslEndToEndAuthorizationTest extends SaslEndToEndAuthorizationTes createScramCredentialsViaPrivilegedAdminClient(JaasTestUtils.KAFKA_SCRAM_USER_2, JaasTestUtils.KAFKA_SCRAM_PASSWORD_2) } - @Test - def testAuthentications(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAuthentications(quorum: String): Unit = { val successfulAuths = TestUtils.totalMetricValue(brokers.head, "successful-authentication-total") assertTrue(successfulAuths > 0, "No successful authentications") val failedAuths = TestUtils.totalMetricValue(brokers.head, "failed-authentication-total") diff --git a/core/src/test/scala/integration/kafka/api/SaslSetup.scala b/core/src/test/scala/integration/kafka/api/SaslSetup.scala index caef826127c96..b7d2d920fd931 100644 --- a/core/src/test/scala/integration/kafka/api/SaslSetup.scala +++ b/core/src/test/scala/integration/kafka/api/SaslSetup.scala @@ -64,9 +64,9 @@ trait SaslSetup { val (serverKeytabFile, clientKeytabFile) = maybeCreateEmptyKeytabFiles() kdc = new MiniKdc(kdcConf, workDir) kdc.start() - kdc.createPrincipal(serverKeytabFile, java.util.List.of(JaasTestUtils.KAFKA_SERVER_PRINCIPAL_UNQUALIFIED_NAME + "/localhost")) + kdc.createPrincipal(serverKeytabFile, List(JaasTestUtils.KAFKA_SERVER_PRINCIPAL_UNQUALIFIED_NAME + "/localhost").asJava) kdc.createPrincipal(clientKeytabFile, - java.util.List.of(JaasTestUtils.KAFKA_CLIENT_PRINCIPAL_UNQUALIFIED_NAME, JaasTestUtils.KAFKA_CLIENT_PRINCIPAL_UNQUALIFIED_NAME_2)) + List(JaasTestUtils.KAFKA_CLIENT_PRINCIPAL_UNQUALIFIED_NAME, JaasTestUtils.KAFKA_CLIENT_PRINCIPAL_UNQUALIFIED_NAME_2).asJava) } /** Return a tuple with the path to the server keytab file and client keytab file */ @@ -166,7 +166,7 @@ trait SaslSetup { def createScramCredentials(adminClient: Admin, userName: String, password: String): Unit = { PublicScramMechanism.values().filter(_ != PublicScramMechanism.UNKNOWN).map(mechanism => { - val results = adminClient.alterUserScramCredentials(util.List.of( + val results = adminClient.alterUserScramCredentials(util.Arrays.asList( new UserScramCredentialUpsertion(userName, new ScramCredentialInfo(mechanism, 4096), password))) results.all.get }) diff --git a/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala index fe5f1643d39c6..06592b9c3777a 100644 --- a/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SaslSslAdminIntegrationTest.scala @@ -23,7 +23,6 @@ import org.apache.kafka.common.acl.AclOperation.{ALL, ALTER, ALTER_CONFIGS, CLUS import org.apache.kafka.common.acl.AclPermissionType.{ALLOW, DENY} import org.apache.kafka.common.config.{ConfigResource, SaslConfigs, TopicConfig} import org.apache.kafka.common.errors.{ClusterAuthorizationException, DelegationTokenExpiredException, DelegationTokenNotFoundException, InvalidRequestException, TimeoutException, TopicAuthorizationException, UnknownTopicOrPartitionException} -import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.resource.PatternType.LITERAL import org.apache.kafka.common.resource.ResourceType.{GROUP, TOPIC} import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, ResourcePatternFilter, ResourceType} @@ -31,18 +30,22 @@ import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.utils.SecurityUtils import org.apache.kafka.common.security.token.delegation.DelegationToken import org.apache.kafka.security.authorizer.AclEntry.{WILDCARD_HOST, WILDCARD_PRINCIPAL_STRING} -import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, ServerConfigs, ServerLogConfigs} +import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, ServerConfigs} import org.apache.kafka.metadata.authorizer.StandardAuthorizer import org.apache.kafka.server.authorizer.{Authorizer => JAuthorizer} +import org.apache.kafka.storage.internals.log.LogConfig import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo, Timeout} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo, Timeout} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util -import java.util.Optional +import java.util.Collections import scala.collection.Seq import scala.concurrent.ExecutionException import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters.RichOption import scala.util.{Failure, Success, Try} @Timeout(120) @@ -83,8 +86,8 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu private def setInitialAcls(): Unit = { superUserAdmin = createSuperuserAdminClient() val ace = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, ALL, ALLOW) - superUserAdmin.createAcls(java.util.List.of(new AclBinding(new ResourcePattern(TOPIC, "*", LITERAL), ace))) - superUserAdmin.createAcls(java.util.List.of(new AclBinding(new ResourcePattern(GROUP, "*", LITERAL), ace))) + superUserAdmin.createAcls(List(new AclBinding(new ResourcePattern(TOPIC, "*", LITERAL), ace)).asJava) + superUserAdmin.createAcls(List(new AclBinding(new ResourcePattern(GROUP, "*", LITERAL), ace)).asJava) val clusterAcls = List(clusterAcl(ALLOW, CREATE), clusterAcl(ALLOW, DELETE), @@ -96,9 +99,9 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu superUserAdmin.createAcls(clusterAcls.map(ace => new AclBinding(clusterResourcePattern, ace)).asJava) brokers.foreach { b => - TestUtils.waitAndVerifyAcls(Set(ace), b.dataPlaneRequestProcessor.authorizerPlugin.get, new ResourcePattern(TOPIC, "*", LITERAL)) - TestUtils.waitAndVerifyAcls(Set(ace), b.dataPlaneRequestProcessor.authorizerPlugin.get, new ResourcePattern(GROUP, "*", LITERAL)) - TestUtils.waitAndVerifyAcls(clusterAcls.toSet, b.dataPlaneRequestProcessor.authorizerPlugin.get, clusterResourcePattern) + TestUtils.waitAndVerifyAcls(Set(ace), b.dataPlaneRequestProcessor.authorizer.get, new ResourcePattern(TOPIC, "*", LITERAL)) + TestUtils.waitAndVerifyAcls(Set(ace), b.dataPlaneRequestProcessor.authorizer.get, new ResourcePattern(GROUP, "*", LITERAL)) + TestUtils.waitAndVerifyAcls(clusterAcls.toSet, b.dataPlaneRequestProcessor.authorizer.get, clusterResourcePattern) } } @@ -128,9 +131,10 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val groupAcl = new AclBinding(new ResourcePattern(ResourceType.GROUP, "*", PatternType.LITERAL), new AccessControlEntry("User:*", "*", AclOperation.ALL, AclPermissionType.ALLOW)) - @Test + @ParameterizedTest @Timeout(30) - def testAclOperationsWithOptionTimeoutMs(): Unit = { + @ValueSource(strings = Array("kraft")) + def testAclOperationsWithOptionTimeoutMs(quorum: String): Unit = { val config = createConfig // this will cause timeout connecting to broker config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, s"localhost:${TestUtils.IncorrectBrokerPort}") @@ -140,15 +144,16 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val acl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW)) val exception = assertThrows(classOf[ExecutionException], () => { - brokenClient.createAcls(util.Set.of(acl), new CreateAclsOptions().timeoutMs(0)).all().get() + brokenClient.createAcls(Collections.singleton(acl), new CreateAclsOptions().timeoutMs(0)).all().get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally brokenClient.close(time.Duration.ZERO) } - @Test + @ParameterizedTest @Timeout(30) - def testDeleteAclsWithOptionTimeoutMs(): Unit = { + @ValueSource(strings = Array("kraft")) + def testDeleteAclsWithOptionTimeoutMs(quorum: String): Unit = { val config = createConfig // this will cause timeout connecting to broker config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, s"localhost:${TestUtils.IncorrectBrokerPort}") @@ -156,19 +161,20 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu try { val exception = assertThrows(classOf[ExecutionException], () => { - brokenClient.deleteAcls(util.Set.of(AclBindingFilter.ANY), new DeleteAclsOptions().timeoutMs(0)).all().get() + brokenClient.deleteAcls(Collections.singleton(AclBindingFilter.ANY), new DeleteAclsOptions().timeoutMs(0)).all().get() }) assertInstanceOf(classOf[TimeoutException], exception.getCause) } finally brokenClient.close(time.Duration.ZERO) } - @Test - def testExpireDelegationTokenWithOptionExpireTimePeriodMs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testExpireDelegationTokenWithOptionExpireTimePeriodMs(quorum: String): Unit = { client = createAdminClient - val renewer = java.util.List.of(SecurityUtils.parseKafkaPrincipal("User:renewer")) + val renewer = List(SecurityUtils.parseKafkaPrincipal("User:renewer")) def generateTokenResult(maxLifeTimeMs: Int, expiryTimePeriodMs: Int, expectedTokenNum: Int): (CreateDelegationTokenResult, ExpireDelegationTokenResult) = { - val createResult = client.createDelegationToken(new CreateDelegationTokenOptions().renewers(renewer).maxLifetimeMs(maxLifeTimeMs)) + val createResult = client.createDelegationToken(new CreateDelegationTokenOptions().renewers(renewer.asJava).maxLifetimeMs(maxLifeTimeMs)) val tokenCreated = createResult.delegationToken.get TestUtils.waitUntilTrue(() => brokers.forall(server => server.tokenCache.tokens().size() == expectedTokenNum), "Timed out waiting for token to propagate to all servers") @@ -197,31 +203,33 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu } finally client.close(time.Duration.ZERO) } - @Test - def testAclOperations(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAclOperations(quorum: String): Unit = { client = createAdminClient val acl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.DESCRIBE, AclPermissionType.ALLOW)) assertEquals(8, getAcls(AclBindingFilter.ANY).size) - val results = client.createAcls(java.util.List.of(acl2, acl3)) + val results = client.createAcls(List(acl2, acl3).asJava) assertEquals(Set(acl2, acl3), results.values.keySet().asScala) results.values.values.forEach(value => value.get) val aclUnknown = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "mytopic3", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.UNKNOWN, AclPermissionType.ALLOW)) - val results2 = client.createAcls(java.util.List.of(aclUnknown)) + val results2 = client.createAcls(List(aclUnknown).asJava) assertEquals(Set(aclUnknown), results2.values.keySet().asScala) - assertFutureThrows(classOf[InvalidRequestException], results2.all) - val results3 = client.deleteAcls(java.util.List.of(acl.toFilter, acl2.toFilter, acl3.toFilter)).values + assertFutureThrows(results2.all, classOf[InvalidRequestException]) + val results3 = client.deleteAcls(List(acl.toFilter, acl2.toFilter, acl3.toFilter).asJava).values assertEquals(Set(acl.toFilter, acl2.toFilter, acl3.toFilter), results3.keySet.asScala) assertEquals(0, results3.get(acl.toFilter).get.values.size()) assertEquals(Set(acl2), results3.get(acl2.toFilter).get.values.asScala.map(_.binding).toSet) assertEquals(Set(acl3), results3.get(acl3.toFilter).get.values.asScala.map(_.binding).toSet) } - @Test - def testAclOperations2(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAclOperations2(quorum: String): Unit = { client = createAdminClient - val results = client.createAcls(java.util.List.of(acl2, acl2, transactionalIdAcl)) + val results = client.createAcls(List(acl2, acl2, transactionalIdAcl).asJava) assertEquals(Set(acl2, acl2, transactionalIdAcl), results.values.keySet.asScala) results.all.get() waitForDescribeAcls(client, acl2.toFilter, Set(acl2)) @@ -234,7 +242,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu waitForDescribeAcls(client, filterA, Set(groupAcl)) waitForDescribeAcls(client, filterC, Set(transactionalIdAcl)) - val results2 = client.deleteAcls(java.util.List.of(filterA, filterB, filterC), new DeleteAclsOptions()) + val results2 = client.deleteAcls(List(filterA, filterB, filterC).asJava, new DeleteAclsOptions()) assertEquals(Set(filterA, filterB, filterC), results2.values.keySet.asScala) assertEquals(Set(groupAcl), results2.values.get(filterA).get.values.asScala.map(_.binding).toSet) assertEquals(Set(transactionalIdAcl), results2.values.get(filterC).get.values.asScala.map(_.binding).toSet) @@ -244,8 +252,9 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu waitForDescribeAcls(client, filterC, Set()) } - @Test - def testAclDescribe(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAclDescribe(quorum: String): Unit = { client = createAdminClient ensureAcls(Set(anyAcl, acl2, fooAcl, prefixAcl)) @@ -271,8 +280,9 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu assertEquals(Set(anyAcl, acl2, fooAcl, prefixAcl), getAcls(allTopicAcls)) } - @Test - def testAclDelete(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAclDelete(quorum: String): Unit = { client = createAdminClient ensureAcls(Set(anyAcl, acl2, fooAcl, prefixAcl)) @@ -281,37 +291,37 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val allPrefixedTopicAcls = new AclBindingFilter(new ResourcePatternFilter(ResourceType.TOPIC, null, PatternType.PREFIXED), AccessControlEntryFilter.ANY) // Delete only ACLs on literal 'mytopic2' topic - var deleted = client.deleteAcls(java.util.List.of(acl2.toFilter)).all().get().asScala.toSet + var deleted = client.deleteAcls(List(acl2.toFilter).asJava).all().get().asScala.toSet brokers.foreach { b => - waitAndVerifyRemovedAcl(acl2.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl2.pattern()) + waitAndVerifyRemovedAcl(acl2.entry(), b.dataPlaneRequestProcessor.authorizer.get, acl2.pattern()) } assertEquals(Set(anyAcl, fooAcl, prefixAcl), getAcls(allTopicAcls)) ensureAcls(deleted) // Delete only ACLs on literal '*' topic - deleted = client.deleteAcls(java.util.List.of(anyAcl.toFilter)).all().get().asScala.toSet + deleted = client.deleteAcls(List(anyAcl.toFilter).asJava).all().get().asScala.toSet brokers.foreach { b => - waitAndVerifyRemovedAcl(anyAcl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, anyAcl.pattern()) + waitAndVerifyRemovedAcl(anyAcl.entry(), b.dataPlaneRequestProcessor.authorizer.get, anyAcl.pattern()) } assertEquals(Set(acl2, fooAcl, prefixAcl), getAcls(allTopicAcls)) ensureAcls(deleted) // Delete only ACLs on specific prefixed 'mytopic' topics: - deleted = client.deleteAcls(java.util.List.of(prefixAcl.toFilter)).all().get().asScala.toSet + deleted = client.deleteAcls(List(prefixAcl.toFilter).asJava).all().get().asScala.toSet brokers.foreach { b => - waitAndVerifyRemovedAcl(prefixAcl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, prefixAcl.pattern()) + waitAndVerifyRemovedAcl(prefixAcl.entry(), b.dataPlaneRequestProcessor.authorizer.get, prefixAcl.pattern()) } assertEquals(Set(anyAcl, acl2, fooAcl), getAcls(allTopicAcls)) ensureAcls(deleted) // Delete all literal ACLs: - deleted = client.deleteAcls(java.util.List.of(allLiteralTopicAcls)).all().get().asScala.toSet + deleted = client.deleteAcls(List(allLiteralTopicAcls).asJava).all().get().asScala.toSet brokers.foreach { b => Set(anyAcl, acl2, fooAcl).foreach(acl => - waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl.pattern()) + waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizer.get, acl.pattern()) ) } assertEquals(Set(prefixAcl), getAcls(allTopicAcls)) @@ -319,27 +329,28 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu ensureAcls(deleted) // Delete all prefixed ACLs: - deleted = client.deleteAcls(java.util.List.of(allPrefixedTopicAcls)).all().get().asScala.toSet + deleted = client.deleteAcls(List(allPrefixedTopicAcls).asJava).all().get().asScala.toSet brokers.foreach { b => - waitAndVerifyRemovedAcl(prefixAcl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, prefixAcl.pattern()) + waitAndVerifyRemovedAcl(prefixAcl.entry(), b.dataPlaneRequestProcessor.authorizer.get, prefixAcl.pattern()) } assertEquals(Set(anyAcl, acl2, fooAcl), getAcls(allTopicAcls)) ensureAcls(deleted) // Delete all topic ACLs: - deleted = client.deleteAcls(java.util.List.of(allTopicAcls)).all().get().asScala.toSet + deleted = client.deleteAcls(List(allTopicAcls).asJava).all().get().asScala.toSet brokers.foreach { b => Set(anyAcl, acl2, fooAcl, prefixAcl).foreach(acl => - waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl.pattern()) + waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizer.get, acl.pattern()) ) } assertEquals(Set(), getAcls(allTopicAcls)) } //noinspection ScalaDeprecation - test explicitly covers clients using legacy / deprecated constructors - @Test - def testLegacyAclOpsNeverAffectOrReturnPrefixed(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testLegacyAclOpsNeverAffectOrReturnPrefixed(quorum: String): Unit = { client = createAdminClient ensureAcls(Set(anyAcl, acl2, fooAcl, prefixAcl)) // <-- prefixed exists, but should never be returned. @@ -355,49 +366,51 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu assertEquals(Set(fooAcl), getAcls(legacyFooTopicAcls)) // Delete only (legacy) ACLs on 'mytopic2' topic - var deleted = client.deleteAcls(java.util.List.of(legacyMyTopic2Acls)).all().get().asScala.toSet + var deleted = client.deleteAcls(List(legacyMyTopic2Acls).asJava).all().get().asScala.toSet brokers.foreach { b => - waitAndVerifyRemovedAcl(acl2.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl2.pattern()) + waitAndVerifyRemovedAcl(acl2.entry(), b.dataPlaneRequestProcessor.authorizer.get, acl2.pattern()) } assertEquals(Set(anyAcl, fooAcl, prefixAcl), getAcls(allTopicAcls)) ensureAcls(deleted) // Delete only (legacy) ACLs on '*' topic - deleted = client.deleteAcls(java.util.List.of(legacyAnyTopicAcls)).all().get().asScala.toSet + deleted = client.deleteAcls(List(legacyAnyTopicAcls).asJava).all().get().asScala.toSet brokers.foreach { b => - waitAndVerifyRemovedAcl(anyAcl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, anyAcl.pattern()) + waitAndVerifyRemovedAcl(anyAcl.entry(), b.dataPlaneRequestProcessor.authorizer.get, anyAcl.pattern()) } assertEquals(Set(acl2, fooAcl, prefixAcl), getAcls(allTopicAcls)) ensureAcls(deleted) // Delete all (legacy) topic ACLs: - deleted = client.deleteAcls(java.util.List.of(legacyAllTopicAcls)).all().get().asScala.toSet + deleted = client.deleteAcls(List(legacyAllTopicAcls).asJava).all().get().asScala.toSet brokers.foreach { b => Set(anyAcl, acl2, fooAcl).foreach(acl => - waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizerPlugin.get, acl.pattern()) + waitAndVerifyRemovedAcl(acl.entry(), b.dataPlaneRequestProcessor.authorizer.get, acl.pattern()) ) } assertEquals(Set(), getAcls(legacyAllTopicAcls)) assertEquals(Set(prefixAcl), getAcls(allTopicAcls)) } - @Test - def testAttemptToCreateInvalidAcls(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAttemptToCreateInvalidAcls(quorum: String): Unit = { client = createAdminClient val clusterAcl = new AclBinding(new ResourcePattern(ResourceType.CLUSTER, "foobar", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.READ, AclPermissionType.ALLOW)) val emptyResourceNameAcl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, "", PatternType.LITERAL), new AccessControlEntry("User:ANONYMOUS", "*", AclOperation.READ, AclPermissionType.ALLOW)) - val results = client.createAcls(java.util.List.of(clusterAcl, emptyResourceNameAcl), new CreateAclsOptions()) + val results = client.createAcls(List(clusterAcl, emptyResourceNameAcl).asJava, new CreateAclsOptions()) assertEquals(Set(clusterAcl, emptyResourceNameAcl), results.values.keySet().asScala) - assertFutureThrows(classOf[InvalidRequestException], results.values.get(clusterAcl)) - assertFutureThrows(classOf[InvalidRequestException], results.values.get(emptyResourceNameAcl)) + assertFutureThrows(results.values.get(clusterAcl), classOf[InvalidRequestException]) + assertFutureThrows(results.values.get(emptyResourceNameAcl), classOf[InvalidRequestException]) } - @Test - def testCreateDelegationTokenWithSmallerTimeout(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateDelegationTokenWithSmallerTimeout(quorum: String): Unit = { client = createAdminClient val timeout = 5000 @@ -408,8 +421,9 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu assertTrue(tokenInfo.maxTimestamp >= tokenInfo.expiryTimestamp) } - @Test - def testExpiredTimeStampLargerThanMaxLifeStamp(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testExpiredTimeStampLargerThanMaxLifeStamp(quorum: String): Unit = { client = createAdminClient val timeout = 5000 @@ -433,7 +447,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu private def testAclCreateGetDelete(expectAuth: Boolean): Unit = { TestUtils.waitUntilTrue(() => { - val result = client.createAcls(java.util.List.of(fooAcl, transactionalIdAcl), new CreateAclsOptions) + val result = client.createAcls(List(fooAcl, transactionalIdAcl).asJava, new CreateAclsOptions) if (expectAuth) { Try(result.all.get) match { case Failure(e) => @@ -455,7 +469,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu waitForDescribeAcls(client, transactionalIdAcl.toFilter, Set(transactionalIdAcl)) } TestUtils.waitUntilTrue(() => { - val result = client.deleteAcls(java.util.List.of(fooAcl.toFilter, transactionalIdAcl.toFilter), new DeleteAclsOptions) + val result = client.deleteAcls(List(fooAcl.toFilter, transactionalIdAcl.toFilter).asJava, new DeleteAclsOptions) if (expectAuth) { Try(result.all.get) match { case Failure(e) => @@ -506,8 +520,9 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu }, "timed out waiting for describeAcls to " + (if (expectAuth) "succeed" else "fail")) } - @Test - def testAclAuthorizationDenied(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAclAuthorizationDenied(quorum: String): Unit = { client = createAdminClient // Test that we cannot create or delete ACLs when ALTER is denied. @@ -540,37 +555,38 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu private def addClusterAcl(permissionType: AclPermissionType, operation: AclOperation): Unit = { val ace = clusterAcl(permissionType, operation) - superUserAdmin.createAcls(java.util.List.of(new AclBinding(clusterResourcePattern, ace))) + superUserAdmin.createAcls(List(new AclBinding(clusterResourcePattern, ace)).asJava) brokers.foreach { b => - waitAndVerifyAcl(ace, b.dataPlaneRequestProcessor.authorizerPlugin.get, clusterResourcePattern) + waitAndVerifyAcl(ace, b.dataPlaneRequestProcessor.authorizer.get, clusterResourcePattern) } } private def removeClusterAcl(permissionType: AclPermissionType, operation: AclOperation): Unit = { val ace = clusterAcl(permissionType, operation) - superUserAdmin.deleteAcls(java.util.List.of(new AclBinding(clusterResourcePattern, ace).toFilter)).values + superUserAdmin.deleteAcls(List(new AclBinding(clusterResourcePattern, ace).toFilter).asJava).values brokers.foreach { b => - waitAndVerifyRemovedAcl(ace, b.dataPlaneRequestProcessor.authorizerPlugin.get, clusterResourcePattern) + waitAndVerifyRemovedAcl(ace, b.dataPlaneRequestProcessor.authorizer.get, clusterResourcePattern) } } - @Test - def testCreateTopicsResponseMetadataAndConfig(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateTopicsResponseMetadataAndConfig(quorum: String): Unit = { val topic1 = "mytopic1" val topic2 = "mytopic2" val denyAcl = new AclBinding(new ResourcePattern(ResourceType.TOPIC, topic2, PatternType.LITERAL), new AccessControlEntry("User:*", "*", AclOperation.DESCRIBE_CONFIGS, AclPermissionType.DENY)) client = createAdminClient - client.createAcls(java.util.List.of(denyAcl), new CreateAclsOptions()).all().get() + client.createAcls(List(denyAcl).asJava, new CreateAclsOptions()).all().get() val topics = Seq(topic1, topic2) - val configsOverride = java.util.Map.of(TopicConfig.SEGMENT_BYTES_CONFIG, "3000000") - val newTopics = java.util.List.of( + val configsOverride = Map(TopicConfig.SEGMENT_BYTES_CONFIG -> "100000").asJava + val newTopics = Seq( new NewTopic(topic1, 2, 3.toShort).configs(configsOverride), - new NewTopic(topic2, Optional.empty[Integer], Optional.empty[java.lang.Short]).configs(configsOverride)) - val validateResult = client.createTopics(newTopics, new CreateTopicsOptions().validateOnly(true)) + new NewTopic(topic2, Option.empty[Integer].toJava, Option.empty[java.lang.Short].toJava).configs(configsOverride)) + val validateResult = client.createTopics(newTopics.asJava, new CreateTopicsOptions().validateOnly(true)) validateResult.all.get() waitForTopics(client, List(), topics) @@ -580,26 +596,26 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu val topicConfigs = result.config(topic1).get().entries.asScala assertTrue(topicConfigs.nonEmpty) val segmentBytesConfig = topicConfigs.find(_.name == TopicConfig.SEGMENT_BYTES_CONFIG).get - assertEquals(3000000, segmentBytesConfig.value.toLong) + assertEquals(100000, segmentBytesConfig.value.toLong) assertEquals(ConfigEntry.ConfigSource.DYNAMIC_TOPIC_CONFIG, segmentBytesConfig.source) val compressionConfig = topicConfigs.find(_.name == TopicConfig.COMPRESSION_TYPE_CONFIG).get - assertEquals(ServerLogConfigs.COMPRESSION_TYPE_DEFAULT, compressionConfig.value) + assertEquals(LogConfig.DEFAULT_COMPRESSION_TYPE, compressionConfig.value) assertEquals(ConfigEntry.ConfigSource.DEFAULT_CONFIG, compressionConfig.source) - assertFutureThrows(classOf[TopicAuthorizationException], result.numPartitions(topic2)) - assertFutureThrows(classOf[TopicAuthorizationException], result.replicationFactor(topic2)) - assertFutureThrows(classOf[TopicAuthorizationException], result.config(topic2)) + assertFutureThrows(result.numPartitions(topic2), classOf[TopicAuthorizationException]) + assertFutureThrows(result.replicationFactor(topic2), classOf[TopicAuthorizationException]) + assertFutureThrows(result.config(topic2), classOf[TopicAuthorizationException]) } validateMetadataAndConfigs(validateResult) - val createResult = client.createTopics(newTopics, new CreateTopicsOptions()) + val createResult = client.createTopics(newTopics.asJava, new CreateTopicsOptions()) createResult.all.get() waitForTopics(client, topics, List()) validateMetadataAndConfigs(createResult) val topicIds = getTopicIds() assertNotEquals(Uuid.ZERO_UUID, createResult.topicId(topic1).get()) assertEquals(topicIds(topic1), createResult.topicId(topic1).get()) - assertFutureThrows(classOf[TopicAuthorizationException], createResult.topicId(topic2)) + assertFutureThrows(createResult.topicId(topic2), classOf[TopicAuthorizationException]) val createResponseConfig = createResult.config(topic1).get().entries.asScala @@ -615,15 +631,16 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu } } - @Test - def testExpireDelegationToken(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testExpireDelegationToken(quorum: String): Unit = { client = createAdminClient val createDelegationTokenOptions = new CreateDelegationTokenOptions().maxLifetimeMs(5000) // Test expiration for non-exists token assertFutureThrows( - classOf[DelegationTokenNotFoundException], - client.expireDelegationToken("".getBytes()).expiryTimestamp() + client.expireDelegationToken("".getBytes()).expiryTimestamp(), + classOf[DelegationTokenNotFoundException] ) // Test expiring the token immediately @@ -635,9 +652,8 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu // Ensure current time > maxLifeTimeMs of token Thread.sleep(1000) assertFutureThrows( - classOf[DelegationTokenExpiredException], - client.expireDelegationToken(token2.hmac(), - new ExpireDelegationTokenOptions().expiryTimePeriodMs(1)).expiryTimestamp() + client.expireDelegationToken(token2.hmac(), new ExpireDelegationTokenOptions().expiryTimePeriodMs(1)).expiryTimestamp(), + classOf[DelegationTokenExpiredException] ) // Ensure expiring the expired token with negative expiryTimePeriodMs will not throw exception @@ -648,15 +664,17 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu TestUtils.retry(1000) { assertTrue(expireTokenOrFailWithAssert(token3, 200) < token3.tokenInfo().expiryTimestamp()) } } - @Test - def testCreateTokenWithOverflowTimestamp(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateTokenWithOverflowTimestamp(quorum: String): Unit = { client = createAdminClient val token = client.createDelegationToken(new CreateDelegationTokenOptions().maxLifetimeMs(Long.MaxValue)).delegationToken().get() assertEquals(Long.MaxValue, token.tokenInfo().expiryTimestamp()) } - @Test - def testExpireTokenWithOverflowTimestamp(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testExpireTokenWithOverflowTimestamp(quorum: String): Unit = { client = createAdminClient val token = client.createDelegationToken(new CreateDelegationTokenOptions().maxLifetimeMs(Long.MaxValue)).delegationToken().get() TestUtils.retry(1000) { assertTrue(expireTokenOrFailWithAssert(token, Long.MaxValue) == Long.MaxValue) } @@ -679,7 +697,7 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu TestUtils.waitUntilTrue(() => { try { - val topicResponse = client.describeConfigs(java.util.List.of(topicResource)).all.get.get(topicResource) + val topicResponse = client.describeConfigs(List(topicResource).asJava).all.get.get(topicResource) configEntries = topicResponse.entries.asScala true } catch { @@ -709,11 +727,11 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu } def waitAndVerifyRemovedAcl(expectedToRemoved: AccessControlEntry, - authorizerPlugin: Plugin[JAuthorizer], + authorizer: JAuthorizer, resource: ResourcePattern, accessControlEntryFilter: AccessControlEntryFilter = AccessControlEntryFilter.ANY): Unit = { val newLine = scala.util.Properties.lineSeparator - val authorizer = authorizerPlugin.get + val filter = new AclBindingFilter(resource.toFilter, accessControlEntryFilter) waitUntilTrue(() => !authorizer.acls(filter).asScala.map(_.entry).toSet.contains(expectedToRemoved), s"expected acl to be removed : $expectedToRemoved" + @@ -722,11 +740,11 @@ class SaslSslAdminIntegrationTest extends BaseAdminIntegrationTest with SaslSetu } def waitAndVerifyAcl(expected: AccessControlEntry, - authorizerPlugin: Plugin[JAuthorizer], + authorizer: JAuthorizer, resource: ResourcePattern, accessControlEntryFilter: AccessControlEntryFilter = AccessControlEntryFilter.ANY): Unit = { val newLine = scala.util.Properties.lineSeparator - val authorizer = authorizerPlugin.get + val filter = new AclBindingFilter(resource.toFilter, accessControlEntryFilter) waitUntilTrue(() => authorizer.acls(filter).asScala.map(_.entry).toSet.contains(expected), s"expected to contain acl: $expected" + diff --git a/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala b/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala index 2150db202e1e9..9e5930d978af4 100644 --- a/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SslAdminIntegrationTest.scala @@ -33,7 +33,9 @@ import org.apache.kafka.common.utils.Utils import org.apache.kafka.metadata.authorizer.{ClusterMetadataAuthorizer, StandardAuthorizer} import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertNotNull, assertThrows, assertTrue} -import org.junit.jupiter.api.{AfterEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ import scala.collection.{Seq, mutable} @@ -156,8 +158,9 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { super.tearDown() } - @Test - def testListNodesFromControllersIncludingFencedBrokers(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesFromControllersIncludingFencedBrokers(quorum: String): Unit = { useBoostrapControllers() client = createAdminClient val result = client.describeCluster(new DescribeClusterOptions().includeFencedBrokers(true)) @@ -165,21 +168,24 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { assertTrue(exception.getCause.getCause.getMessage.contains("Cannot request fenced brokers from controller endpoint")) } - @Test - def testListNodesFromControllers(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListNodesFromControllers(quorum: String): Unit = { useBoostrapControllers() client = createAdminClient val result = client.describeCluster(new DescribeClusterOptions()) assertTrue(result.nodes().get().size().equals(controllerServers.size)) } - @Test - def testAclUpdatesUsingSynchronousAuthorizer(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAclUpdatesUsingSynchronousAuthorizer(quorum: String): Unit = { verifyAclUpdates() } - @Test - def testAclUpdatesUsingAsynchronousAuthorizer(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAclUpdatesUsingAsynchronousAuthorizer(quorum: String): Unit = { SslAdminIntegrationTest.executor = Some(Executors.newSingleThreadExecutor) verifyAclUpdates() } @@ -188,8 +194,9 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { * Verify that ACL updates using synchronous authorizer are performed synchronously * on request threads without any performance overhead introduced by a purgatory. */ - @Test - def testSynchronousAuthorizerAclUpdatesBlockRequestThreads(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testSynchronousAuthorizerAclUpdatesBlockRequestThreads(quorum: String): Unit = { val testSemaphore = new Semaphore(0) SslAdminIntegrationTest.semaphore = Some(testSemaphore) waitForNoBlockedRequestThreads() @@ -202,7 +209,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { // Therefore, only the number of controller I/O threads is relevant in this context. val numReqThreads = controllerServers.head.config.numIoThreads * controllerServers.size while (blockedRequestThreads.size < numReqThreads) { - aclFutures += createAdminClient.createAcls(java.util.List.of(acl2)) + aclFutures += createAdminClient.createAcls(List(acl2).asJava) assertTrue(aclFutures.size < numReqThreads * 10, s"Request threads not blocked numRequestThreads=$numReqThreads blocked=$blockedRequestThreads aclFutures=${aclFutures.size}") } @@ -231,7 +238,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { } } (0 until numTimedOut) - .map(_ => createAdminClient.createAcls(java.util.List.of(acl2))) + .map(_ => createAdminClient.createAcls(List(acl2).asJava)) .foreach(_.all().get(30, TimeUnit.SECONDS)) } @@ -239,8 +246,9 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { * Verify that ACL updates using an asynchronous authorizer are completed asynchronously * using a purgatory, enabling other requests to be processed even when ACL updates are blocked. */ - @Test - def testAsynchronousAuthorizerAclUpdatesDontBlockRequestThreads(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAsynchronousAuthorizerAclUpdatesDontBlockRequestThreads(quorum: String): Unit = { SslAdminIntegrationTest.executor = Some(Executors.newSingleThreadExecutor) val testSemaphore = new Semaphore(0) SslAdminIntegrationTest.semaphore = Some(testSemaphore) @@ -251,7 +259,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { // In KRaft mode, ACL creation is handled exclusively by controller servers, not brokers. // Therefore, only the number of controller I/O threads is relevant in this context. val numReqThreads = controllerServers.head.config.numIoThreads * controllerServers.size - val aclFutures = (0 until numReqThreads).map(_ => createAdminClient.createAcls(java.util.List.of(acl2))) + val aclFutures = (0 until numReqThreads).map(_ => createAdminClient.createAcls(List(acl2).asJava)) waitForNoBlockedRequestThreads() assertTrue(aclFutures.forall(future => !future.all.isDone)) @@ -287,7 +295,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { useBoostrapControllers() client = createAdminClient - val results = client.createAcls(java.util.List.of(acl2, acl3)).values + val results = client.createAcls(List(acl2, acl3).asJava).values assertEquals(Set(acl2, acl3), results.keySet().asScala) assertFalse(results.values.asScala.exists(_.isDone)) TestUtils.waitUntilTrue(() => testSemaphore.hasQueuedThreads, "Authorizer not blocked in createAcls") @@ -296,7 +304,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { validateRequestContext(SslAdminIntegrationTest.lastUpdateRequestContext.get, ApiKeys.CREATE_ACLS) testSemaphore.acquire() - val results2 = client.deleteAcls(java.util.List.of(acl.toFilter, acl2.toFilter, acl3.toFilter)).values + val results2 = client.deleteAcls(List(acl.toFilter, acl2.toFilter, acl3.toFilter).asJava).values assertEquals(Set(acl.toFilter, acl2.toFilter, acl3.toFilter), results2.keySet.asScala) assertFalse(results2.values.asScala.exists(_.isDone)) TestUtils.waitUntilTrue(() => testSemaphore.hasQueuedThreads, "Authorizer not blocked in deleteAcls") @@ -353,7 +361,7 @@ class SslAdminIntegrationTest extends SaslSslAdminIntegrationTest { val controllerListenerName = ListenerName.forSecurityProtocol(extraControllerSecurityProtocol) val config = controllerServers.map { s => val listener = s.config.effectiveAdvertisedControllerListeners - .find(_.listener == controllerListenerName.value) + .find(_.listenerName == controllerListenerName) .getOrElse(throw new IllegalArgumentException(s"Could not find listener with name $controllerListenerName")) Utils.formatAddress(listener.host, s.socketServer.boundPort(controllerListenerName)) }.mkString(",") diff --git a/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala b/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala index 49ff3d7acacaf..3e0ba00d3f924 100644 --- a/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala +++ b/core/src/test/scala/integration/kafka/api/SslEndToEndAuthorizationTest.scala @@ -68,7 +68,7 @@ class SslEndToEndAuthorizationTest extends EndToEndAuthorizationTest { // - a space character occurring at the end of the string // - one of the characters ",", "+", """, "\", "<", ">" or ";" // - // Leading and trailing spaces in Kafka principal don't work with ACLs, but we can workaround by using + // Leading and trailing spaces in Kafka principal dont work with ACLs, but we can workaround by using // a PrincipalBuilder that removes/replaces them. private val clientCn = """\#A client with special chars in CN : (\, \+ \" \\ \< \> \; ')""" override val clientPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, s"O=A client,CN=$clientCn") diff --git a/core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala index 8c95aaf49bcef..a6cd0d905decd 100644 --- a/core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala +++ b/core/src/test/scala/integration/kafka/api/TransactionsBounceTest.scala @@ -75,9 +75,9 @@ class TransactionsBounceTest extends IntegrationTestHarness { override protected def brokerCount: Int = 4 - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testWithGroupMetadata(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testWithGroupMetadata(quorum: String, groupProtocol: String): Unit = { testBrokerFailure((producer, _, consumer) => producer.sendOffsetsToTransaction(TestUtils.consumerPositions(consumer).asJava, consumer.groupMetadata())) } diff --git a/core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala new file mode 100644 index 0000000000000..2449bcc986bc2 --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/TransactionsExpirationTest.scala @@ -0,0 +1,247 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.api + +import java.util.{Collections, Properties} +import kafka.integration.KafkaServerTestHarness +import kafka.server.KafkaConfig +import kafka.utils.TestUtils +import kafka.utils.TestUtils.{consumeRecords, createAdminClient} +import org.apache.kafka.clients.admin.{Admin, ProducerState} +import org.apache.kafka.clients.consumer.Consumer +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.common.{KafkaException, TopicPartition} +import org.apache.kafka.common.errors.{InvalidPidMappingException, TransactionalIdNotFoundException} +import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} +import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.CsvSource + +import scala.jdk.CollectionConverters._ +import scala.collection.Seq + +// Test class that uses a very small transaction timeout to trigger InvalidPidMapping errors +class TransactionsExpirationTest extends KafkaServerTestHarness { + val topic1 = "topic1" + val topic2 = "topic2" + val numPartitions = 4 + val replicationFactor = 3 + val tp0 = new TopicPartition(topic1, 0) + + var producer: KafkaProducer[Array[Byte], Array[Byte]] = _ + var consumer: Consumer[Array[Byte], Array[Byte]] = _ + var admin: Admin = _ + + override def generateConfigs: Seq[KafkaConfig] = { + TestUtils.createBrokerConfigs(3).map(KafkaConfig.fromProps(_, serverProps())) + } + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + + producer = TestUtils.createTransactionalProducer("transactionalProducer", brokers) + consumer = TestUtils.createConsumer(bootstrapServers(), + groupProtocolFromTestParameters(), + enableAutoCommit = false, + readCommitted = true) + admin = createAdminClient(brokers, listenerName) + + createTopic(topic1, numPartitions, 3) + createTopic(topic2, numPartitions, 3) + } + + @AfterEach + override def tearDown(): Unit = { + if (producer != null) + producer.close() + if (consumer != null) + consumer.close() + if (admin != null) + admin.close() + + super.tearDown() + } + + @ParameterizedTest + @CsvSource(Array( + "kraft,classic,false", + "kraft,consumer,false", + "kraft,classic,true", + "kraft,consumer,true", + )) + def testFatalErrorAfterInvalidProducerIdMapping(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { + producer.initTransactions() + + // Start and then abort a transaction to allow the transactional ID to expire. + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "2", "2", willBeCommitted = false)) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, 0, "4", "4", willBeCommitted = false)) + producer.abortTransaction() + + // Check the transactional state exists and then wait for it to expire. + waitUntilTransactionalStateExists() + waitUntilTransactionalStateExpires() + + // Start a new transaction and attempt to send, triggering an AddPartitionsToTxnRequest that will fail + // due to the expired transactional ID, resulting in a fatal error. + producer.beginTransaction() + val failedFuture = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, "1", "1", willBeCommitted = false)) + TestUtils.waitUntilTrue(() => failedFuture.isDone, "Producer future never completed.") + org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, classOf[InvalidPidMappingException]) + + // Assert that aborting the transaction throws a KafkaException due to the fatal error. + assertThrows(classOf[KafkaException], () => producer.abortTransaction()) + + // Close the producer and reinitialize to recover from the fatal error. + producer.close() + producer = TestUtils.createTransactionalProducer("transactionalProducer", brokers) + producer.initTransactions() + + // Proceed with a new transaction after reinitializing. + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "2", "2", willBeCommitted = true)) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 2, "4", "4", willBeCommitted = true)) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "1", "1", willBeCommitted = true)) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, "3", "3", willBeCommitted = true)) + producer.commitTransaction() + + waitUntilTransactionalStateExists() + + consumer.subscribe(List(topic1, topic2).asJava) + + val records = consumeRecords(consumer, 4) + records.foreach { record => + TestUtils.assertCommittedAndGetValue(record) + } + } + + @ParameterizedTest(name = "{displayName}.quorum={0}.groupProtocol={1}.isTV2Enabled={2}") + @CsvSource(Array( + "kraft,classic,false", + "kraft,consumer,false", + "kraft,classic,true", + "kraft,consumer,true", + )) + def testTransactionAfterProducerIdExpires(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { + producer.initTransactions() + + // Start and then abort a transaction to allow the producer ID to expire. + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "2", "2", willBeCommitted = false)) + producer.flush() + + // Ensure producer IDs are added. + var pState : List[ProducerState] = null + TestUtils.waitUntilTrue(() => { pState = producerState; pState.nonEmpty}, "Producer IDs for topic1 did not propagate quickly") + assertEquals(1, pState.size, "Unexpected producer to topic1") + val oldProducerId = pState.head.producerId + val oldProducerEpoch = pState.head.producerEpoch + + producer.abortTransaction() + + // Wait for the producer ID to expire. + TestUtils.waitUntilTrue(() => producerState.isEmpty, "Producer IDs for topic1 did not expire.") + + // Create a new producer to check that we retain the producer ID in transactional state. + producer.close() + producer = TestUtils.createTransactionalProducer("transactionalProducer", brokers) + producer.initTransactions() + + // Start a new transaction and attempt to send. This should work since only the producer ID was removed from its mapping in ProducerStateManager. + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 0, "4", "4", willBeCommitted = true)) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, 3, "3", "3", willBeCommitted = true)) + producer.commitTransaction() + + // Producer IDs should repopulate. + var pState2 : List[ProducerState] = null + TestUtils.waitUntilTrue(() => {pState2 = producerState; pState2.nonEmpty}, "Producer IDs for topic1 did not propagate quickly") + assertEquals(1, pState2.size, "Unexpected producer to topic1") + val newProducerId = pState2.head.producerId + val newProducerEpoch = pState2.head.producerEpoch + + // Because the transaction IDs outlive the producer IDs, creating a producer with the same transactional id + // soon after the first will re-use the same producerId, while bumping the epoch to indicate that they are distinct. + assertEquals(oldProducerId, newProducerId) + if (isTV2Enabled) { + // TV2 bumps epoch on EndTxn, and the final commit may or may not have bumped the epoch in the producer state. + // The epoch should be at least oldProducerEpoch + 2 for the first commit and the restarted producer. + assertTrue(oldProducerEpoch + 2 <= newProducerEpoch) + } else { + assertEquals(oldProducerEpoch + 1, newProducerEpoch) + } + + consumer.subscribe(List(topic1).asJava) + + val records = consumeRecords(consumer, 2) + records.foreach { record => + TestUtils.assertCommittedAndGetValue(record) + } + } + + private def producerState: List[ProducerState] = { + val describeResult = admin.describeProducers(Collections.singletonList(tp0)) + val activeProducers = describeResult.partitionResult(tp0).get().activeProducers + activeProducers.asScala.toList + } + + private def waitUntilTransactionalStateExpires(): Unit = { + TestUtils.waitUntilTrue(() => { + var removedTransactionState = false + val txnDescribeResult = admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer") + try { + txnDescribeResult.get() + } catch { + case e: Exception => { + removedTransactionState = e.getCause.isInstanceOf[TransactionalIdNotFoundException] + } + } + removedTransactionState + }, "Transaction state never expired.") + } + + private def waitUntilTransactionalStateExists(): Unit = { + val describeState = admin.describeTransactions(Collections.singletonList("transactionalProducer")).description("transactionalProducer") + TestUtils.waitUntilTrue(() => describeState.isDone, "Transactional state was never added.") + } + + private def serverProps(): Properties = { + val serverProps = new Properties() + serverProps.put(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, false.toString) + // Set a smaller value for the number of partitions for the __consumer_offsets topic + // so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long. + serverProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, 1.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, 3.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, 2.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, 2.toString) + serverProps.put(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG, true.toString) + serverProps.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, false.toString) + serverProps.put(ReplicationConfigs.AUTO_LEADER_REBALANCE_ENABLE_CONFIG, false.toString) + serverProps.put(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, "0") + serverProps.put(TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_CONFIG, "200") + serverProps.put(TransactionStateManagerConfig.TRANSACTIONAL_ID_EXPIRATION_MS_CONFIG, "10000") + serverProps.put(TransactionStateManagerConfig.TRANSACTIONS_REMOVE_EXPIRED_TRANSACTIONAL_ID_CLEANUP_INTERVAL_MS_CONFIG, "500") + serverProps.put(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_CONFIG, "5000") + serverProps.put(TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_CONFIG, "500") + serverProps + } +} diff --git a/core/src/test/scala/integration/kafka/api/TransactionsTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsTest.scala index e37af5441c275..747376681270a 100644 --- a/core/src/test/scala/integration/kafka/api/TransactionsTest.scala +++ b/core/src/test/scala/integration/kafka/api/TransactionsTest.scala @@ -21,8 +21,9 @@ import kafka.utils.TestUtils.{consumeRecords, waitUntilTrue} import kafka.utils.{TestInfoUtils, TestUtils} import org.apache.kafka.clients.consumer._ import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} -import org.apache.kafka.common.{KafkaException, TopicPartition} +import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.{ConcurrentTransactionsException, InvalidProducerEpochException, ProducerFencedException, TimeoutException} +import org.apache.kafka.common.test.api.Flaky import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} @@ -110,9 +111,9 @@ class TransactionsTest extends IntegrationTestHarness { super.tearDown() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testBasicTransactions(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testBasicTransactions(quorum: String, groupProtocol: String): Unit = { val producer = transactionalProducers.head val consumer = transactionalConsumers.head val unCommittedConsumer = nonTransactionalConsumers.head @@ -156,8 +157,8 @@ class TransactionsTest extends IntegrationTestHarness { verifyLogStartOffsets(Map((tp11, 0), (tp22, 0))) maybeVerifyLocalLogStartOffsets(Map((tp11, 3L), (tp22, 3L))) - consumer.subscribe(java.util.List.of(topic1, topic2)) - unCommittedConsumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) + unCommittedConsumer.subscribe(List(topic1, topic2).asJava) val records = consumeRecords(consumer, 2) records.foreach { record => @@ -171,9 +172,10 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testReadCommittedConsumerShouldNotSeeUndecidedData(groupProtocol: String): Unit = { + @Flaky("KAFKA-18036") + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testReadCommittedConsumerShouldNotSeeUndecidedData(quorum: String, groupProtocol: String): Unit = { val producer1 = transactionalProducers.head val producer2 = createTransactionalProducer("other") val readCommittedConsumer = transactionalConsumers.head @@ -204,19 +206,19 @@ class TransactionsTest extends IntegrationTestHarness { // ensure the records are visible to the read uncommitted consumer val tp1 = new TopicPartition(topic1, 0) val tp2 = new TopicPartition(topic2, 0) - readUncommittedConsumer.assign(java.util.Set.of(tp1, tp2)) + readUncommittedConsumer.assign(Set(tp1, tp2).asJava) consumeRecords(readUncommittedConsumer, 8) - val readUncommittedOffsetsForTimes = readUncommittedConsumer.offsetsForTimes(java.util.Map.of( - tp1, latestWrittenTimestamp: JLong, - tp2, latestWrittenTimestamp: JLong - )) + val readUncommittedOffsetsForTimes = readUncommittedConsumer.offsetsForTimes(Map( + tp1 -> (latestWrittenTimestamp: JLong), + tp2 -> (latestWrittenTimestamp: JLong) + ).asJava) assertEquals(2, readUncommittedOffsetsForTimes.size) assertEquals(latestWrittenTimestamp, readUncommittedOffsetsForTimes.get(tp1).timestamp) assertEquals(latestWrittenTimestamp, readUncommittedOffsetsForTimes.get(tp2).timestamp) readUncommittedConsumer.unsubscribe() // we should only see the first two records which come before the undecided second transaction - readCommittedConsumer.assign(java.util.Set.of(tp1, tp2)) + readCommittedConsumer.assign(Set(tp1, tp2).asJava) val records = consumeRecords(readCommittedConsumer, 2) records.foreach { record => assertEquals("x", new String(record.key)) @@ -231,17 +233,17 @@ class TransactionsTest extends IntegrationTestHarness { } // undecided timestamps should not be searchable either - val readCommittedOffsetsForTimes = readCommittedConsumer.offsetsForTimes(java.util.Map.of( - tp1, latestWrittenTimestamp: JLong, - tp2, latestWrittenTimestamp: JLong - )) + val readCommittedOffsetsForTimes = readCommittedConsumer.offsetsForTimes(Map( + tp1 -> (latestWrittenTimestamp: JLong), + tp2 -> (latestWrittenTimestamp: JLong) + ).asJava) assertNull(readCommittedOffsetsForTimes.get(tp1)) assertNull(readCommittedOffsetsForTimes.get(tp2)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDelayedFetchIncludesAbortedTransaction(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDelayedFetchIncludesAbortedTransaction(quorum: String, groupProtocol: String): Unit = { val producer1 = transactionalProducers.head val producer2 = createTransactionalProducer("other") val tp10 = new TopicPartition(topic1, 0) @@ -282,7 +284,7 @@ class TransactionsTest extends IntegrationTestHarness { consumerProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "100") val readCommittedConsumer = createReadCommittedConsumer(props = consumerProps) - readCommittedConsumer.assign(java.util.Set.of(tp10)) + readCommittedConsumer.assign(Set(tp10).asJava) val records = consumeRecords(readCommittedConsumer, numRecords = 2) assertEquals(2, records.size) @@ -297,9 +299,9 @@ class TransactionsTest extends IntegrationTestHarness { assertEquals(3L, second.offset) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendOffsetsWithGroupMetadata(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendOffsetsWithGroupMetadata(quorum: String, groupProtocol: String): Unit = { sendOffset((producer, _, consumer) => producer.sendOffsetsToTransaction(TestUtils.consumerPositions(consumer).asJava, consumer.groupMetadata())) } @@ -324,7 +326,7 @@ class TransactionsTest extends IntegrationTestHarness { val producer = transactionalProducers.head val consumer = createReadCommittedConsumer(consumerGroupId, maxPollRecords = numSeedMessages / 4) - consumer.subscribe(java.util.List.of(topic1)) + consumer.subscribe(List(topic1).asJava) producer.initTransactions() var shouldCommit = false @@ -368,7 +370,7 @@ class TransactionsTest extends IntegrationTestHarness { // In spite of random aborts, we should still have exactly 500 messages in topic2. I.e. we should not // re-copy or miss any messages from topic1, since the consumed offsets were committed transactionally. val verifyingConsumer = transactionalConsumers(0) - verifyingConsumer.subscribe(java.util.List.of(topic2)) + verifyingConsumer.subscribe(List(topic2).asJava) val valueSeq = TestUtils.pollUntilAtLeastNumRecords(verifyingConsumer, numSeedMessages).map { record => TestUtils.assertCommittedAndGetValue(record).toInt } @@ -377,14 +379,14 @@ class TransactionsTest extends IntegrationTestHarness { assertEquals(valueSeq.size, valueSet.size, s"Expected ${valueSeq.size} unique messages in $topic2.") } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFencingOnCommit(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFencingOnCommit(quorum: String, groupProtocol: String): Unit = { val producer1 = transactionalProducers(0) val producer2 = transactionalProducers(1) val consumer = transactionalConsumers(0) - consumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) producer1.initTransactions() @@ -408,14 +410,14 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFencingOnSendOffsets(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFencingOnSendOffsets(quorum: String, groupProtocol: String): Unit = { val producer1 = transactionalProducers(0) val producer2 = transactionalProducers(1) val consumer = transactionalConsumers(0) - consumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) producer1.initTransactions() @@ -429,8 +431,8 @@ class TransactionsTest extends IntegrationTestHarness { producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "2", "4", willBeCommitted = true)) producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "2", "4", willBeCommitted = true)) - assertThrows(classOf[ProducerFencedException], () => producer1.sendOffsetsToTransaction(java.util.Map.of(new TopicPartition(topic1, 0), - new OffsetAndMetadata(110L)), new ConsumerGroupMetadata("foobarGroup"))) + assertThrows(classOf[ProducerFencedException], () => producer1.sendOffsetsToTransaction(Map(new TopicPartition(topic1, 0) + -> new OffsetAndMetadata(110L)).asJava, new ConsumerGroupMetadata("foobarGroup"))) producer2.commitTransaction() // ok @@ -440,22 +442,22 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testOffsetMetadataInSendOffsetsToTransaction(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testOffsetMetadataInSendOffsetsToTransaction(quorum: String, groupProtocol: String): Unit = { val tp = new TopicPartition(topic1, 0) val groupId = "group" val producer = transactionalProducers.head val consumer = createReadCommittedConsumer(groupId) - consumer.subscribe(java.util.List.of(topic1)) + consumer.subscribe(List(topic1).asJava) producer.initTransactions() producer.beginTransaction() val offsetAndMetadata = new OffsetAndMetadata(110L, Optional.of(15), "some metadata") - producer.sendOffsetsToTransaction(java.util.Map.of(tp, offsetAndMetadata), new ConsumerGroupMetadata(groupId)) + producer.sendOffsetsToTransaction(Map(tp -> offsetAndMetadata).asJava, new ConsumerGroupMetadata(groupId)) producer.commitTransaction() // ok // The call to commit the transaction may return before all markers are visible, so we initialize a second @@ -463,31 +465,31 @@ class TransactionsTest extends IntegrationTestHarness { val producer2 = transactionalProducers(1) producer2.initTransactions() - TestUtils.waitUntilTrue(() => offsetAndMetadata.equals(consumer.committed(java.util.Set.of(tp)).get(tp)), "cannot read committed offset") + TestUtils.waitUntilTrue(() => offsetAndMetadata.equals(consumer.committed(Set(tp).asJava).get(tp)), "cannot read committed offset") } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testInitTransactionsTimeout(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testInitTransactionsTimeout(quorum: String, groupProtocol: String): Unit = { testTimeout(needInitAndSendMsg = false, producer => producer.initTransactions()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSendOffsetsToTransactionTimeout(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testSendOffsetsToTransactionTimeout(quorum: String, groupProtocol: String): Unit = { testTimeout(needInitAndSendMsg = true, producer => producer.sendOffsetsToTransaction( - java.util.Map.of(new TopicPartition(topic1, 0), new OffsetAndMetadata(0)), new ConsumerGroupMetadata("test-group"))) + Map(new TopicPartition(topic1, 0) -> new OffsetAndMetadata(0)).asJava, new ConsumerGroupMetadata("test-group"))) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCommitTransactionTimeout(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCommitTransactionTimeout(quorum: String, groupProtocol: String): Unit = { testTimeout(needInitAndSendMsg = true, producer => producer.commitTransaction()) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testAbortTransactionTimeout(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testAbortTransactionTimeout(quorum: String, groupProtocol: String): Unit = { testTimeout(needInitAndSendMsg = true, producer => producer.abortTransaction()) } @@ -506,14 +508,14 @@ class TransactionsTest extends IntegrationTestHarness { producer.close(Duration.ZERO) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFencingOnSend(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFencingOnSend(quorum: String, groupProtocol: String): Unit = { val producer1 = transactionalProducers(0) val producer2 = transactionalProducers(1) val consumer = transactionalConsumers(0) - consumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) producer1.initTransactions() @@ -551,14 +553,14 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFencingOnAddPartitions(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFencingOnAddPartitions(quorum: String, groupProtocol: String): Unit = { val producer1 = transactionalProducers(0) val producer2 = transactionalProducers(1) val consumer = transactionalConsumers(0) - consumer.subscribe(java.util.List.of(topic1, topic2)) + consumer.subscribe(List(topic1, topic2).asJava) TestUtils.waitUntilLeaderIsKnown(brokers, new TopicPartition(topic1, 0)) TestUtils.waitUntilLeaderIsKnown(brokers, new TopicPartition(topic2, 0)) @@ -586,9 +588,14 @@ class TransactionsTest extends IntegrationTestHarness { fail("Should not be able to send messages from a fenced producer.") } catch { case _: InvalidProducerEpochException => - case e: ExecutionException => - // In kraft mode, transactionV2 is used. - assertTrue(e.getCause.isInstanceOf[InvalidProducerEpochException]) + case e: ExecutionException => { + if (quorum == "zk") { + assertTrue(e.getCause.isInstanceOf[ProducerFencedException]) + } else { + // In kraft mode, transactionV2 is used. + assertTrue(e.getCause.isInstanceOf[InvalidProducerEpochException]) + } + } case e: Exception => throw new AssertionError("Got an unexpected exception from a fenced producer.", e) } @@ -601,9 +608,9 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFencingOnTransactionExpiration(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFencingOnTransactionExpiration(quorum: String, groupProtocol: String): Unit = { val producer = createTransactionalProducer("expiringProducer", transactionTimeoutMs = 300) producer.initTransactions() @@ -629,7 +636,7 @@ class TransactionsTest extends IntegrationTestHarness { // Verify that the first message was aborted and the second one was never written at all. val nonTransactionalConsumer = nonTransactionalConsumers.head - nonTransactionalConsumer.subscribe(java.util.List.of(topic1)) + nonTransactionalConsumer.subscribe(List(topic1).asJava) // Attempt to consume the one written record. We should not see the second. The // assertion does not strictly guarantee that the record wasn't written, but the @@ -639,15 +646,15 @@ class TransactionsTest extends IntegrationTestHarness { assertEquals("1", TestUtils.recordValueAsString(records.head)) val transactionalConsumer = transactionalConsumers.head - transactionalConsumer.subscribe(java.util.List.of(topic1)) + transactionalConsumer.subscribe(List(topic1).asJava) val transactionalRecords = consumeRecordsFor(transactionalConsumer) assertTrue(transactionalRecords.isEmpty) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMultipleMarkersOneLeader(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMultipleMarkersOneLeader(quorum: String, groupProtocol: String): Unit = { val firstProducer = transactionalProducers.head val consumer = transactionalConsumers.head val unCommittedConsumer = nonTransactionalConsumers.head @@ -668,8 +675,8 @@ class TransactionsTest extends IntegrationTestHarness { sendTransactionalMessagesWithValueRange(firstProducer, topicWith10Partitions, 10000, 11000, willBeCommitted = true) firstProducer.commitTransaction() - consumer.subscribe(java.util.List.of(topicWith10PartitionsAndOneReplica, topicWith10Partitions)) - unCommittedConsumer.subscribe(java.util.List.of(topicWith10PartitionsAndOneReplica, topicWith10Partitions)) + consumer.subscribe(List(topicWith10PartitionsAndOneReplica, topicWith10Partitions).asJava) + unCommittedConsumer.subscribe(List(topicWith10PartitionsAndOneReplica, topicWith10Partitions).asJava) val records = consumeRecords(consumer, 1000) records.foreach { record => @@ -683,9 +690,9 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsecutivelyRunInitTransactions(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsecutivelyRunInitTransactions(quorum: String, groupProtocol: String): Unit = { val producer = createTransactionalProducer(transactionalId = "normalProducer") producer.initTransactions() @@ -694,11 +701,11 @@ class TransactionsTest extends IntegrationTestHarness { @ParameterizedTest @CsvSource(Array( - "classic,false", - "consumer,false", + "kraft,classic,false", + "kraft,consumer,false", )) - def testBumpTransactionalEpochWithTV2Disabled(groupProtocol: String, isTV2Enabled: Boolean): Unit = { - val defaultLinger = 5 + def testBumpTransactionalEpochWithTV2Disabled(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { + val defaultLinger = 5; val producer = createTransactionalProducer("transactionalProducer", deliveryTimeoutMs = 5000 + defaultLinger, requestTimeoutMs = 5000) val consumer = transactionalConsumers.head @@ -730,20 +737,7 @@ class TransactionsTest extends IntegrationTestHarness { Thread.sleep(6000) // Wait for the record to time out restartDeadBrokers() - org.apache.kafka.test.TestUtils.assertFutureThrows(classOf[TimeoutException], failedFuture) - // Ensure the producer transitions to abortable_error state. - TestUtils.waitUntilTrue(() => { - var failed = false - try { - producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = false)) - } catch { - case e: Exception => - if (e.isInstanceOf[KafkaException]) - failed = true - } - failed - }, "The send request never failed as expected.") - assertThrows(classOf[KafkaException], () => producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = false))) + org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, classOf[TimeoutException]) producer.abortTransaction() producer.beginTransaction() @@ -753,7 +747,7 @@ class TransactionsTest extends IntegrationTestHarness { producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = true)) producer.commitTransaction() - consumer.subscribe(java.util.List.of(topic1, topic2, testTopic)) + consumer.subscribe(List(topic1, topic2, testTopic).asJava) val records = consumeRecords(consumer, 5) records.foreach { record => @@ -766,7 +760,7 @@ class TransactionsTest extends IntegrationTestHarness { producerStateEntry = brokers(partitionLeader).logManager.getLog(new TopicPartition(testTopic, 0)).get.producerStateManager.activeProducers.get(producerId) assertNotNull(producerStateEntry) - assertTrue(producerStateEntry.producerEpoch > initialProducerEpoch, "InitialProduceEpoch: " + initialProducerEpoch + " ProducerStateEntry: " + producerStateEntry) + assertTrue(producerStateEntry.producerEpoch > initialProducerEpoch) } finally { producer.close(Duration.ZERO) } @@ -774,11 +768,11 @@ class TransactionsTest extends IntegrationTestHarness { @ParameterizedTest @CsvSource(Array( - "classic, true", - "consumer, true" + "kraft, classic, true", + "kraft, consumer, true" )) - def testBumpTransactionalEpochWithTV2Enabled(groupProtocol: String, isTV2Enabled: Boolean): Unit = { - val defaultLinger = 5 + def testBumpTransactionalEpochWithTV2Enabled(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { + val defaultLinger = 5; val producer = createTransactionalProducer("transactionalProducer", deliveryTimeoutMs = 5000 + defaultLinger, requestTimeoutMs = 5000) val consumer = transactionalConsumers.head @@ -815,7 +809,7 @@ class TransactionsTest extends IntegrationTestHarness { Thread.sleep(6000) // Wait for the record to time out restartDeadBrokers() - org.apache.kafka.test.TestUtils.assertFutureThrows(classOf[TimeoutException], failedFuture) + org.apache.kafka.test.TestUtils.assertFutureThrows(failedFuture, classOf[TimeoutException]) producer.abortTransaction() // Third transaction: commit @@ -834,7 +828,7 @@ class TransactionsTest extends IntegrationTestHarness { producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(testTopic, 0, "3", "3", willBeCommitted = true)) producer.commitTransaction() - consumer.subscribe(java.util.List.of(topic1, topic2, testTopic)) + consumer.subscribe(List(topic1, topic2, testTopic).asJava) val records = consumeRecords(consumer, 5) records.foreach { record => @@ -846,14 +840,14 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = "{displayName}.groupProtocol={0}.isTV2Enabled={1}") + @ParameterizedTest(name = "{displayName}.quorum={0}.groupProtocol={1}.isTV2Enabled={2}") @CsvSource(Array( - "classic, false", - "consumer, false", - "classic, true", - "consumer, true", + "kraft, classic, false", + "kraft, consumer, false", + "kraft, classic, true", + "kraft, consumer, true", )) - def testFailureToFenceEpoch(groupProtocol: String, isTV2Enabled: Boolean): Unit = { + def testFailureToFenceEpoch(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { val producer1 = transactionalProducers.head val producer2 = createTransactionalProducer("transactional-producer", maxBlockMs = 1000) val initialProducerEpoch = 0 @@ -920,11 +914,11 @@ class TransactionsTest extends IntegrationTestHarness { } } - @ParameterizedTest(name = "{displayName}.groupProtocol={0}.isTV2Enabled={1}") + @ParameterizedTest(name = "{displayName}.quorum={0}.groupProtocol={1}.isTV2Enabled={2}") @CsvSource(Array( - "consumer, true", + "kraft, consumer, true", )) - def testEmptyAbortAfterCommit(groupProtocol: String, isTV2Enabled: Boolean): Unit = { + def testEmptyAbortAfterCommit(quorum: String, groupProtocol: String, isTV2Enabled: Boolean): Unit = { val producer = transactionalProducers.head producer.initTransactions() @@ -991,10 +985,11 @@ class TransactionsTest extends IntegrationTestHarness { waitUntilTrue(() => { brokers.forall(broker => { partitionStartOffsets.forall { - case (partition, offset) => + case (partition, offset) => { val lso = broker.replicaManager.localLog(partition).get.logStartOffset offsets.put(broker.config.brokerId, lso) offset == lso + } } }) }, s"log start offset doesn't change to the expected position: $partitionStartOffsets, current position: $offsets") diff --git a/core/src/test/scala/integration/kafka/api/TransactionsWithMaxInFlightOneTest.scala b/core/src/test/scala/integration/kafka/api/TransactionsWithMaxInFlightOneTest.scala new file mode 100644 index 0000000000000..c59997bd37c8a --- /dev/null +++ b/core/src/test/scala/integration/kafka/api/TransactionsWithMaxInFlightOneTest.scala @@ -0,0 +1,136 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.api + +import java.util.Properties +import kafka.integration.KafkaServerTestHarness +import kafka.server.KafkaConfig +import kafka.utils.{TestInfoUtils, TestUtils} +import kafka.utils.TestUtils.consumeRecords +import org.apache.kafka.clients.consumer.Consumer +import org.apache.kafka.clients.producer.KafkaProducer +import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} +import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.MethodSource + +import scala.collection.{Seq, mutable} +import scala.jdk.CollectionConverters._ + +/** + * This is used to test transactions with one broker and `max.in.flight.requests.per.connection=1`. + * A single broker is used to verify edge cases where different requests are queued on the same connection. + */ +class TransactionsWithMaxInFlightOneTest extends KafkaServerTestHarness { + val numBrokers = 1 + + val topic1 = "topic1" + val topic2 = "topic2" + val numPartitions = 4 + + val transactionalProducers = mutable.Buffer[KafkaProducer[Array[Byte], Array[Byte]]]() + val transactionalConsumers = mutable.Buffer[Consumer[Array[Byte], Array[Byte]]]() + + override def generateConfigs: Seq[KafkaConfig] = { + TestUtils.createBrokerConfigs(numBrokers).map(KafkaConfig.fromProps(_, serverProps())) + } + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + super.setUp(testInfo) + val topicConfig = new Properties() + topicConfig.put(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_CONFIG, 1.toString) + createTopic(topic1, numPartitions, numBrokers, topicConfig) + createTopic(topic2, numPartitions, numBrokers, topicConfig) + + createTransactionalProducer("transactional-producer") + createReadCommittedConsumer("transactional-group") + } + + @AfterEach + override def tearDown(): Unit = { + transactionalProducers.foreach(_.close()) + transactionalConsumers.foreach(_.close()) + super.tearDown() + } + + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testTransactionalProducerSingleBrokerMaxInFlightOne(quorum: String, groupProtocol: String): Unit = { + // We want to test with one broker to verify multiple requests queued on a connection + assertEquals(1, brokers.size) + + val producer = transactionalProducers.head + val consumer = transactionalConsumers.head + + producer.initTransactions() + + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "2", "2", willBeCommitted = false)) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "4", "4", willBeCommitted = false)) + producer.flush() + producer.abortTransaction() + + producer.beginTransaction() + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, null, "1", "1", willBeCommitted = true)) + producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, null, "3", "3", willBeCommitted = true)) + producer.commitTransaction() + + consumer.subscribe(List(topic1, topic2).asJava) + + val records = consumeRecords(consumer, 2) + records.foreach { record => + TestUtils.assertCommittedAndGetValue(record) + } + } + + private def serverProps() = { + val serverProps = new Properties() + serverProps.put(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, false.toString) + serverProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, 1.toString) + serverProps.put(GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, 1.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, 1.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_REPLICATION_FACTOR_CONFIG, 1.toString) + serverProps.put(TransactionLogConfig.TRANSACTIONS_TOPIC_MIN_ISR_CONFIG, 1.toString) + serverProps.put(ServerConfigs.CONTROLLED_SHUTDOWN_ENABLE_CONFIG, true.toString) + serverProps.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, false.toString) + serverProps.put(ReplicationConfigs.AUTO_LEADER_REBALANCE_ENABLE_CONFIG, false.toString) + serverProps.put(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, "0") + serverProps.put(TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_CONFIG, "200") + serverProps + } + + private def createReadCommittedConsumer(group: String) = { + val consumer = TestUtils.createConsumer(bootstrapServers(), + groupProtocolFromTestParameters(), + groupId = group, + enableAutoCommit = false, + readCommitted = true) + transactionalConsumers += consumer + consumer + } + + private def createTransactionalProducer(transactionalId: String): KafkaProducer[Array[Byte], Array[Byte]] = { + val producer = TestUtils.createTransactionalProducer(transactionalId, brokers, maxInFlight = 1) + transactionalProducers += producer + producer + } +} diff --git a/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala b/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala index 772af45733324..674a379cfeaac 100644 --- a/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/coordinator/transaction/ProducerIntegrationTest.scala @@ -17,7 +17,8 @@ package kafka.coordinator.transaction -import org.apache.kafka.server.IntegrationTestUtils +import kafka.network.SocketServer +import kafka.server.IntegrationTestUtils import org.apache.kafka.clients.admin.{Admin, NewTopic, TransactionState} import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, ConsumerRecords, OffsetAndMetadata} import org.apache.kafka.clients.producer.{Producer, ProducerConfig, ProducerRecord} @@ -26,18 +27,19 @@ import org.apache.kafka.common.errors.RecordTooLargeException import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterFeature, ClusterTest, ClusterTestDefaults, ClusterTests, Type} import org.apache.kafka.common.message.InitProducerIdRequestData +import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{InitProducerIdRequest, InitProducerIdResponse} -import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.common.test.{ClusterInstance, TestUtils} import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.common.{Feature, MetadataVersion} -import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions.{assertEquals, assertInstanceOf, assertThrows, assertTrue} import java.time.Duration import java.util +import java.util.Collections import java.util.concurrent.ExecutionException import java.util.stream.{Collectors, IntStream, StreamSupport} import scala.concurrent.duration.DurationInt @@ -93,7 +95,7 @@ class ProducerIntegrationTest { new ClusterFeature(feature = Feature.TRANSACTION_VERSION, version = 2))), )) def testTransactionWithInvalidSendAndEndTxnRequestSent(cluster: ClusterInstance): Unit = { - val topic = new NewTopic("foobar", 1, 1.toShort).configs(util.Map.of(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "100")) + val topic = new NewTopic("foobar", 1, 1.toShort).configs(Collections.singletonMap(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "100")) val txnId = "test-txn" val properties = new util.HashMap[String, Object] properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, txnId) @@ -103,7 +105,7 @@ class ProducerIntegrationTest { val admin = cluster.admin() val producer: Producer[Array[Byte], Array[Byte]] = cluster.producer(properties) try { - admin.createTopics(util.List.of(topic)) + admin.createTopics(List(topic).asJava) producer.initTransactions() producer.beginTransaction() @@ -160,7 +162,7 @@ class ProducerIntegrationTest { records.count == 5 }, "poll records size not match") val lastRecord = StreamSupport.stream(records.spliterator, false).reduce((_, second) => second).orElse(null) - val offsets = util.Map.of( + val offsets = Collections.singletonMap( new TopicPartition(lastRecord.topic, lastRecord.partition), new OffsetAndMetadata(lastRecord.offset + 1)) producer.sendOffsetsToTransaction(offsets, consumer.groupMetadata) producer.commitTransaction() @@ -181,9 +183,9 @@ class ProducerIntegrationTest { private def verifyUniqueIds(clusterInstance: ClusterInstance): Unit = { // Request enough PIDs from each broker to ensure each broker generates two blocks - val ids = clusterInstance.brokers().values().stream().flatMap(broker => { - IntStream.range(0, 1001).parallel().mapToObj(_ => - nextProducerId(broker.boundPort(clusterInstance.clientListener())) + val ids = clusterInstance.brokerSocketServers().stream().flatMap( broker => { + IntStream.range(0, 1001).parallel().mapToObj( _ => + nextProducerId(broker, clusterInstance.clientListener()) )}).collect(Collectors.toList[Long]).asScala.toSeq val brokerCount = clusterInstance.brokerIds.size @@ -192,7 +194,7 @@ class ProducerIntegrationTest { assertEquals(expectedTotalCount, ids.distinct.size, "Found duplicate producer IDs") } - private def nextProducerId(port: Int): Long = { + private def nextProducerId(broker: SocketServer, listener: ListenerName): Long = { // Generating producer ids may fail while waiting for the initial block and also // when the current block is full and waiting for the prefetched block. val deadline = 5.seconds.fromNow @@ -205,7 +207,11 @@ class ProducerIntegrationTest { .setTransactionalId(null) .setTransactionTimeoutMs(10) val request = new InitProducerIdRequest.Builder(data).build() - response = IntegrationTestUtils.connectAndReceive[InitProducerIdResponse](request, port) + + response = IntegrationTestUtils.connectAndReceive[InitProducerIdResponse](request, + destination = broker, + listenerName = listener) + shouldRetry = response.data.errorCode == Errors.COORDINATOR_LOAD_IN_PROGRESS.code } assertTrue(deadline.hasTimeLeft()) diff --git a/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala b/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala index e2db135124459..4f5cd7f4a2803 100644 --- a/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala +++ b/core/src/test/scala/integration/kafka/network/DynamicConnectionQuotaTest.scala @@ -30,16 +30,18 @@ import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.requests.{ProduceRequest, ProduceResponse} import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.test.api.Flaky -import org.apache.kafka.common.{KafkaException, Uuid, requests} +import org.apache.kafka.common.{KafkaException, requests} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.server.config.QuotaConfig import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.io.IOException import java.net.{InetAddress, Socket} import java.util.concurrent.{ExecutorService, Executors, TimeUnit} -import java.util.Properties +import java.util.{Collections, Properties} import scala.collection.Map import scala.jdk.CollectionConverters._ @@ -54,7 +56,7 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { val plaintextListenerDefaultQuota = 30 var executor: ExecutorService = _ var admin: Admin = _ - var topicId: Uuid = _ + override def brokerPropertyOverrides(properties: Properties): Unit = { properties.put(QuotaConfig.NUM_QUOTA_SAMPLES_CONFIG, "2") properties.put("listener.name.plaintext.max.connection.creation.rate", plaintextListenerDefaultQuota.toString) @@ -65,7 +67,6 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { super.setUp(testInfo) admin = createAdminClient(listener) TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers) - topicId = TestUtils.describeTopic(admin, topic).topicId() } @AfterEach @@ -82,8 +83,9 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { } @Flaky("KAFKA-17999") - @Test - def testDynamicConnectionQuota(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDynamicConnectionQuota(quorum: String): Unit = { val maxConnectionsPerIP = 5 def connectAndVerify(): Unit = { @@ -109,8 +111,9 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { verifyMaxConnections(maxConnectionsPerIPOverride, connectAndVerify) } - @Test - def testDynamicListenerConnectionQuota(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDynamicListenerConnectionQuota(quorum: String): Unit = { val initialConnectionCount = connectionCount def connectAndVerify(): Unit = { @@ -181,8 +184,9 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { } - @Test - def testDynamicListenerConnectionCreationRateQuota(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDynamicListenerConnectionCreationRateQuota(quorum: String): Unit = { // Create another listener. PLAINTEXT is an inter-broker listener // keep default limits val newListenerNames = Seq("PLAINTEXT", "EXTERNAL") @@ -242,8 +246,9 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { waitForConnectionCount(initialConnectionCount) } - @Test - def testDynamicIpConnectionRateQuota(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDynamicIpConnectionRateQuota(quorum: String): Unit = { val connRateLimit = 10 val initialConnectionCount = connectionCount // before setting connection rate to 10, verify we can do at least double that by default (no limit) @@ -302,9 +307,9 @@ class DynamicConnectionQuotaTest extends BaseRequestTest { private def produceRequest: ProduceRequest = requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - java.util.List.of(new ProduceRequestData.TopicProduceData() - .setTopicId(topicId) - .setPartitionData(java.util.List.of(new ProduceRequestData.PartitionProduceData() + Collections.singletonList(new ProduceRequestData.TopicProduceData() + .setName(topic) + .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(0) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord(System.currentTimeMillis(), "key".getBytes, "value".getBytes)))))) diff --git a/core/src/test/scala/integration/kafka/network/DynamicNumNetworkThreadsTest.scala b/core/src/test/scala/integration/kafka/network/DynamicNumNetworkThreadsTest.scala index 2273a69cf99ed..91bf8c0378364 100644 --- a/core/src/test/scala/integration/kafka/network/DynamicNumNetworkThreadsTest.scala +++ b/core/src/test/scala/integration/kafka/network/DynamicNumNetworkThreadsTest.scala @@ -24,7 +24,9 @@ import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.network.SocketServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util.Properties import scala.jdk.CollectionConverters._ @@ -64,8 +66,9 @@ class DynamicNumNetworkThreadsTest extends BaseRequestTest { .count(listener == _.tags().get("listener")) } - @Test - def testDynamicNumNetworkThreads(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDynamicNumNetworkThreads(quorum: String): Unit = { // Increase the base network thread count val newBaseNetworkThreadsCount = SocketServerConfigs.NUM_NETWORK_THREADS_DEFAULT + 1 var props = new Properties diff --git a/core/src/test/scala/integration/kafka/server/DelayedFetchTest.scala b/core/src/test/scala/integration/kafka/server/DelayedFetchTest.scala index f10beb0086fa8..34000b4417370 100644 --- a/core/src/test/scala/integration/kafka/server/DelayedFetchTest.scala +++ b/core/src/test/scala/integration/kafka/server/DelayedFetchTest.scala @@ -16,16 +16,15 @@ */ package kafka.server -import java.util.{Optional, OptionalLong} +import java.util.Optional import scala.collection.Seq import kafka.cluster.Partition import org.apache.kafka.common.{TopicIdPartition, Uuid} import org.apache.kafka.common.errors.{FencedLeaderEpochException, NotLeaderOrFollowerException} import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset -import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.MemoryRecords import org.apache.kafka.common.requests.FetchRequest -import org.apache.kafka.server.LogReadResult import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogOffsetMetadata, LogOffsetSnapshot} import org.junit.jupiter.api.Test @@ -231,6 +230,7 @@ class DelayedFetchTest { minBytes: Int = 1, ): FetchParams = { new FetchParams( + ApiKeys.FETCH.latestVersion, replicaId, 1, maxWaitMs, @@ -256,16 +256,16 @@ class DelayedFetchTest { } private def buildReadResult(error: Errors): LogReadResult = { - new LogReadResult( - new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), - Optional.empty(), - -1L, - -1L, - -1L, - -1L, - -1L, - OptionalLong.empty(), - if (error != Errors.NONE) Optional.of[Throwable](error.exception) else Optional.empty[Throwable]()) + LogReadResult( + exception = if (error != Errors.NONE) Some(error.exception) else None, + info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + divergingEpoch = None, + highWatermark = -1L, + leaderLogStartOffset = -1L, + leaderLogEndOffset = -1L, + followerLogStartOffset = -1L, + fetchTimeMs = -1L, + lastStableOffset = None) } } diff --git a/core/src/test/scala/integration/kafka/server/DelayedFutureTest.scala b/core/src/test/scala/integration/kafka/server/DelayedFutureTest.scala new file mode 100644 index 0000000000000..e9313159493d0 --- /dev/null +++ b/core/src/test/scala/integration/kafka/server/DelayedFutureTest.scala @@ -0,0 +1,96 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package integration.kafka.server + +import kafka.server.DelayedFuturePurgatory +import kafka.utils.TestUtils +import org.apache.kafka.common.utils.Time +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertThrows, assertTrue} +import org.junit.jupiter.api.Test + +import java.util.concurrent.{CompletableFuture, ExecutionException} +import java.util.concurrent.atomic.AtomicInteger +import scala.jdk.CollectionConverters.CollectionHasAsScala + +class DelayedFutureTest { + + @Test + def testDelayedFuture(): Unit = { + val purgatoryName = "testDelayedFuture" + val purgatory = new DelayedFuturePurgatory(purgatoryName, brokerId = 0) + try { + val result = new AtomicInteger() + + def hasExecutorThread: Boolean = Thread.getAllStackTraces.keySet.asScala.map(_.getName) + .exists(_.contains(s"DelayedExecutor-$purgatoryName")) + + def updateResult(futures: List[CompletableFuture[Integer]]): Unit = + result.set(futures.filterNot(_.isCompletedExceptionally).map(_.get.intValue).sum) + + assertFalse(hasExecutorThread, "Unnecessary thread created") + + // Two completed futures: callback should be executed immediately on the same thread + val futures1 = List(CompletableFuture.completedFuture(10.asInstanceOf[Integer]), + CompletableFuture.completedFuture(11.asInstanceOf[Integer])) + val r1 = purgatory.tryCompleteElseWatch[Integer](100000L, futures1, () => updateResult(futures1)) + assertTrue(r1.isCompleted, "r1 not completed") + assertEquals(21, result.get()) + assertFalse(hasExecutorThread, "Unnecessary thread created") + + // Two delayed futures: callback should wait for both to complete + result.set(-1) + val futures2 = List(new CompletableFuture[Integer], new CompletableFuture[Integer]) + val r2 = purgatory.tryCompleteElseWatch[Integer](100000L, futures2, () => updateResult(futures2)) + assertFalse(r2.isCompleted, "r2 should be incomplete") + futures2.head.complete(20) + assertFalse(r2.isCompleted) + assertEquals(-1, result.get()) + futures2(1).complete(21) + TestUtils.waitUntilTrue(() => r2.isCompleted, "r2 not completed") + TestUtils.waitUntilTrue(() => result.get == 41, "callback not invoked") + assertTrue(hasExecutorThread, "Thread not created for executing delayed task") + + // One immediate and one delayed future: callback should wait for delayed task to complete + result.set(-1) + val futures3 = List(new CompletableFuture[Integer], CompletableFuture.completedFuture(31.asInstanceOf[Integer])) + val r3 = purgatory.tryCompleteElseWatch[Integer](100000L, futures3, () => updateResult(futures3)) + assertFalse(r3.isCompleted, "r3 should be incomplete") + assertEquals(-1, result.get()) + futures3.head.complete(30) + TestUtils.waitUntilTrue(() => r3.isCompleted, "r3 not completed") + TestUtils.waitUntilTrue(() => result.get == 61, "callback not invoked") + + // One future doesn't complete within timeout. Should expire and invoke callback after timeout. + result.set(-1) + val start = Time.SYSTEM.hiResClockMs + val expirationMs = 2000L + val futures4 = List(new CompletableFuture[Integer], new CompletableFuture[Integer]) + val r4 = purgatory.tryCompleteElseWatch[Integer](expirationMs, futures4, () => updateResult(futures4)) + futures4.head.complete(40) + TestUtils.waitUntilTrue(() => futures4(1).isDone, "r4 futures not expired") + assertTrue(r4.isCompleted, "r4 not completed after timeout") + val elapsed = Time.SYSTEM.hiResClockMs - start + assertTrue(elapsed >= expirationMs, s"Time for expiration $elapsed should at least $expirationMs") + assertEquals(40, futures4.head.get) + assertEquals(classOf[org.apache.kafka.common.errors.TimeoutException], + assertThrows(classOf[ExecutionException], () => futures4(1).get).getCause.getClass) + assertEquals(40, result.get()) + } finally { + purgatory.shutdown() + } + } +} diff --git a/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala b/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala index 23b4b32b0d744..264f5310c2d62 100644 --- a/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala +++ b/core/src/test/scala/integration/kafka/server/DelayedRemoteFetchTest.scala @@ -16,23 +16,20 @@ */ package kafka.server -import com.yammer.metrics.core.Meter import kafka.cluster.Partition import org.apache.kafka.common.errors.NotLeaderOrFollowerException -import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.MemoryRecords import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.common.{TopicIdPartition, Uuid} -import org.apache.kafka.server.LogReadResult import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} import org.apache.kafka.storage.internals.log._ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test -import org.mockito.ArgumentMatchers.anyBoolean -import org.mockito.Mockito.{mock, never, verify, when} +import org.mockito.Mockito.{mock, verify, when} -import java.util.{Collections, Optional, OptionalLong} +import java.util.Optional import java.util.concurrent.{CompletableFuture, Future} import scala.collection._ import scala.jdk.CollectionConverters._ @@ -41,7 +38,6 @@ class DelayedRemoteFetchTest { private val maxBytes = 1024 private val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) private val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") - private val topicIdPartition2 = new TopicIdPartition(Uuid.randomUuid(), 0, "topic2") private val fetchOffset = 500L private val logStartOffset = 0L private val currentLeaderEpoch = Optional.of[Integer](10) @@ -64,22 +60,14 @@ class DelayedRemoteFetchTest { } val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - future.complete(buildRemoteReadResult(Errors.NONE)) - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) + future.complete(null) + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null, false) val highWatermark = 100 val leaderLogStartOffset = 10 val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) - val delayedRemoteFetch = new DelayedRemoteFetch( - java.util.Collections.emptyMap[TopicIdPartition, Future[Void]](), - java.util.Collections.singletonMap(topicIdPartition, future), - java.util.Collections.singletonMap(topicIdPartition, fetchInfo), - remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), - fetchParams, - Seq(topicIdPartition -> logReadInfo), - replicaManager, - callback) + val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) .thenReturn(mock(classOf[Partition])) @@ -108,23 +96,14 @@ class DelayedRemoteFetchTest { } val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - future.complete(buildRemoteReadResult(Errors.NONE)) - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) + future.complete(null) + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null, false) val highWatermark = 100 val leaderLogStartOffset = 10 val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) val fetchParams = buildFetchParams(replicaId = 1, maxWaitMs = 500) - - assertThrows(classOf[IllegalStateException], () => new DelayedRemoteFetch( - java.util.Collections.emptyMap[TopicIdPartition, Future[Void]](), - java.util.Collections.singletonMap(topicIdPartition, future), - java.util.Collections.singletonMap(topicIdPartition, fetchInfo), - remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), - fetchParams, - Seq(topicIdPartition -> logReadInfo), - replicaManager, - callback)) + assertThrows(classOf[IllegalStateException], () => new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback)) } @Test @@ -143,20 +122,12 @@ class DelayedRemoteFetchTest { .thenThrow(new NotLeaderOrFollowerException(s"Replica for $topicIdPartition not available")) val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null, false) val logReadInfo = buildReadResult(Errors.NONE) - val delayedRemoteFetch = new DelayedRemoteFetch( - java.util.Collections.emptyMap[TopicIdPartition, Future[Void]](), - java.util.Collections.singletonMap(topicIdPartition, future), - java.util.Collections.singletonMap(topicIdPartition, fetchInfo), - remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), - fetchParams, - Seq(topicIdPartition -> logReadInfo), - replicaManager, - callback) + val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) // delayed remote fetch should still be able to complete assertTrue(delayedRemoteFetch.tryComplete()) @@ -180,22 +151,14 @@ class DelayedRemoteFetchTest { .thenReturn(mock(classOf[Partition])) val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - future.complete(buildRemoteReadResult(Errors.NONE)) - val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) + future.complete(null) + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null, false) // build a read result with error val logReadInfo = buildReadResult(Errors.FENCED_LEADER_EPOCH) - val delayedRemoteFetch = new DelayedRemoteFetch( - java.util.Collections.emptyMap[TopicIdPartition, Future[Void]](), - java.util.Collections.singletonMap(topicIdPartition, future), - java.util.Collections.singletonMap(topicIdPartition, fetchInfo), - remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus), - fetchParams, - Seq(topicIdPartition -> logReadInfo), - replicaManager, - callback) + val delayedRemoteFetch = new DelayedRemoteFetch(null, future, fetchInfo, remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) assertTrue(delayedRemoteFetch.tryComplete()) assertTrue(delayedRemoteFetch.isCompleted) @@ -206,267 +169,58 @@ class DelayedRemoteFetchTest { @Test def testRequestExpiry(): Unit = { - val responses = mutable.Map[TopicIdPartition, FetchPartitionData]() - - def callback(responseSeq: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { - responseSeq.foreach { case (tp, data) => - responses.put(tp, data) - } - } - - def expiresPerSecValue(): Double = { - val allMetrics = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala - val metric = allMetrics.find { case (n, _) => n.getMBeanName.endsWith("kafka.server:type=DelayedRemoteFetchMetrics,name=ExpiresPerSec") } + var actualTopicPartition: Option[TopicIdPartition] = None + var fetchResultOpt: Option[FetchPartitionData] = None - if (metric.isEmpty) - 0 - else - metric.get._2.asInstanceOf[Meter].count + def callback(responses: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { + assertEquals(1, responses.size) + actualTopicPartition = Some(responses.head._1) + fetchResultOpt = Some(responses.head._2) } - val remoteFetchTaskExpired = mock(classOf[Future[Void]]) - val remoteFetchTask2 = mock(classOf[Future[Void]]) - // complete the 2nd task, and keep the 1st one expired - when(remoteFetchTask2.isDone).thenReturn(true) - - // Create futures - one completed, one not - val future1: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - val future2: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - // Only complete one remote fetch - future2.complete(buildRemoteReadResult(Errors.NONE)) - - val fetchInfo1 = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) - val fetchInfo2 = new RemoteStorageFetchInfo(0, false, topicIdPartition2, null, null) - val highWatermark = 100 val leaderLogStartOffset = 10 - val logReadInfo1 = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) - val logReadInfo2 = buildReadResult(Errors.NONE) - - val fetchStatus1 = FetchPartitionStatus( - startOffsetMetadata = new LogOffsetMetadata(fetchOffset), - fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset, logStartOffset, maxBytes, currentLeaderEpoch)) - val fetchStatus2 = FetchPartitionStatus( - startOffsetMetadata = new LogOffsetMetadata(fetchOffset + 100), - fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset + 100, logStartOffset, maxBytes, currentLeaderEpoch)) - - // Set up maps for multiple partitions - val remoteFetchTasks = new java.util.HashMap[TopicIdPartition, Future[Void]]() - val remoteFetchResults = new java.util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]]() - val remoteFetchInfos = new java.util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]() - - remoteFetchTasks.put(topicIdPartition, remoteFetchTaskExpired) - remoteFetchTasks.put(topicIdPartition2, remoteFetchTask2) - remoteFetchResults.put(topicIdPartition, future1) - remoteFetchResults.put(topicIdPartition2, future2) - remoteFetchInfos.put(topicIdPartition, fetchInfo1) - remoteFetchInfos.put(topicIdPartition2, fetchInfo2) - - val delayedRemoteFetch = new DelayedRemoteFetch( - remoteFetchTasks, - remoteFetchResults, - remoteFetchInfos, - remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus1, topicIdPartition2 -> fetchStatus2), - fetchParams, - Seq(topicIdPartition -> logReadInfo1, topicIdPartition2 -> logReadInfo2), - replicaManager, - callback) + val remoteFetchTask = mock(classOf[Future[Void]]) + val future: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() + val fetchInfo: RemoteStorageFetchInfo = new RemoteStorageFetchInfo(0, false, topicIdPartition.topicPartition(), null, null, false) + val logReadInfo = buildReadResult(Errors.NONE, highWatermark, leaderLogStartOffset) + + val delayedRemoteFetch = new DelayedRemoteFetch(remoteFetchTask, future, fetchInfo, remoteFetchMaxWaitMs, + Seq(topicIdPartition -> fetchStatus), fetchParams, Seq(topicIdPartition -> logReadInfo), replicaManager, callback) when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) .thenReturn(mock(classOf[Partition])) - when(replicaManager.getPartitionOrException(topicIdPartition2.topicPartition)) - .thenReturn(mock(classOf[Partition])) // Verify that the ExpiresPerSec metric is zero before fetching - val existingMetricVal = expiresPerSecValue() - // Verify the delayedRemoteFetch is not completed yet - assertFalse(delayedRemoteFetch.isCompleted) + val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics + assertEquals(0, metrics.keySet.asScala.count(_.getMBeanName == "kafka.server:type=DelayedRemoteFetchMetrics,name=ExpiresPerSec")) // Force the delayed remote fetch to expire delayedRemoteFetch.run() - // Check that the expired task was cancelled and force-completed - verify(remoteFetchTaskExpired).cancel(anyBoolean()) - verify(remoteFetchTask2, never()).cancel(anyBoolean()) + // Check that the task was cancelled and force-completed + verify(remoteFetchTask).cancel(true) assertTrue(delayedRemoteFetch.isCompleted) // Check that the ExpiresPerSec metric was incremented - assertTrue(expiresPerSecValue() > existingMetricVal) - - // Fetch results should include 2 results and the expired one should return local read results - assertEquals(2, responses.size) - assertTrue(responses.contains(topicIdPartition)) - assertTrue(responses.contains(topicIdPartition2)) - - assertEquals(Errors.NONE, responses(topicIdPartition).error) - assertEquals(highWatermark, responses(topicIdPartition).highWatermark) - assertEquals(leaderLogStartOffset, responses(topicIdPartition).logStartOffset) - - assertEquals(Errors.NONE, responses(topicIdPartition2).error) - } - - @Test - def testMultiplePartitions(): Unit = { - val responses = mutable.Map[TopicIdPartition, FetchPartitionData]() - - def callback(responseSeq: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { - responseSeq.foreach { case (tp, data) => - responses.put(tp, data) - } - } - - // Create futures - one completed, one not - val future1: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - val future2: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - // Only complete one remote fetch - future1.complete(buildRemoteReadResult(Errors.NONE)) - - val fetchInfo1 = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) - val fetchInfo2 = new RemoteStorageFetchInfo(0, false, topicIdPartition2, null, null) - - val highWatermark1 = 100 - val leaderLogStartOffset1 = 10 - val highWatermark2 = 200 - val leaderLogStartOffset2 = 20 - - val logReadInfo1 = buildReadResult(Errors.NONE, 100, 10) - val logReadInfo2 = buildReadResult(Errors.NONE, 200, 20) - - val fetchStatus1 = FetchPartitionStatus( - startOffsetMetadata = new LogOffsetMetadata(fetchOffset), - fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset, logStartOffset, maxBytes, currentLeaderEpoch)) - val fetchStatus2 = FetchPartitionStatus( - startOffsetMetadata = new LogOffsetMetadata(fetchOffset + 100), - fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset + 100, logStartOffset, maxBytes, currentLeaderEpoch)) - - // Set up maps for multiple partitions - val remoteFetchResults = new java.util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]]() - val remoteFetchInfos = new java.util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]() - - remoteFetchResults.put(topicIdPartition, future1) - remoteFetchResults.put(topicIdPartition2, future2) - remoteFetchInfos.put(topicIdPartition, fetchInfo1) - remoteFetchInfos.put(topicIdPartition2, fetchInfo2) - - val delayedRemoteFetch = new DelayedRemoteFetch( - Collections.emptyMap[TopicIdPartition, Future[Void]](), - remoteFetchResults, - remoteFetchInfos, - remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus1, topicIdPartition2 -> fetchStatus2), - fetchParams, - Seq(topicIdPartition -> logReadInfo1, topicIdPartition2 -> logReadInfo2), - replicaManager, - callback) - - when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) - .thenReturn(mock(classOf[Partition])) - when(replicaManager.getPartitionOrException(topicIdPartition2.topicPartition)) - .thenReturn(mock(classOf[Partition])) + assertEquals(1, metrics.keySet.asScala.count(_.getMBeanName == "kafka.server:type=DelayedRemoteFetchMetrics,name=ExpiresPerSec")) - // Should not complete since future2 is not done - assertFalse(delayedRemoteFetch.tryComplete()) - assertFalse(delayedRemoteFetch.isCompleted) - - // Complete future2 - future2.complete(buildRemoteReadResult(Errors.NONE)) - - // Now it should complete - assertTrue(delayedRemoteFetch.tryComplete()) - assertTrue(delayedRemoteFetch.isCompleted) - - // Verify both partitions were processed without error - assertEquals(2, responses.size) - assertTrue(responses.contains(topicIdPartition)) - assertTrue(responses.contains(topicIdPartition2)) - - assertEquals(Errors.NONE, responses(topicIdPartition).error) - assertEquals(highWatermark1, responses(topicIdPartition).highWatermark) - assertEquals(leaderLogStartOffset1, responses(topicIdPartition).logStartOffset) - - assertEquals(Errors.NONE, responses(topicIdPartition2).error) - assertEquals(highWatermark2, responses(topicIdPartition2).highWatermark) - assertEquals(leaderLogStartOffset2, responses(topicIdPartition2).logStartOffset) - } - - @Test - def testMultiplePartitionsWithFailedResults(): Unit = { - val responses = mutable.Map[TopicIdPartition, FetchPartitionData]() - - def callback(responseSeq: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { - responseSeq.foreach { case (tp, data) => - responses.put(tp, data) - } - } - - // Create futures - one successful, one with error - val future1: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - val future2: CompletableFuture[RemoteLogReadResult] = new CompletableFuture[RemoteLogReadResult]() - - // Created 1 successful result and 1 failed result - future1.complete(buildRemoteReadResult(Errors.NONE)) - future2.complete(buildRemoteReadResult(Errors.UNKNOWN_SERVER_ERROR)) - - val fetchInfo1 = new RemoteStorageFetchInfo(0, false, topicIdPartition, null, null) - val fetchInfo2 = new RemoteStorageFetchInfo(0, false, topicIdPartition2, null, null) - - val logReadInfo1 = buildReadResult(Errors.NONE, 100, 10) - val logReadInfo2 = buildReadResult(Errors.NONE, 200, 20) - - val fetchStatus1 = FetchPartitionStatus( - startOffsetMetadata = new LogOffsetMetadata(fetchOffset), - fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset, logStartOffset, maxBytes, currentLeaderEpoch)) - val fetchStatus2 = FetchPartitionStatus( - startOffsetMetadata = new LogOffsetMetadata(fetchOffset + 100), - fetchInfo = new FetchRequest.PartitionData(Uuid.ZERO_UUID, fetchOffset + 100, logStartOffset, maxBytes, currentLeaderEpoch)) - - // Set up maps for multiple partitions - val remoteFetchResults = new java.util.HashMap[TopicIdPartition, CompletableFuture[RemoteLogReadResult]]() - val remoteFetchInfos = new java.util.HashMap[TopicIdPartition, RemoteStorageFetchInfo]() - - remoteFetchResults.put(topicIdPartition, future1) - remoteFetchResults.put(topicIdPartition2, future2) - remoteFetchInfos.put(topicIdPartition, fetchInfo1) - remoteFetchInfos.put(topicIdPartition2, fetchInfo2) - - val delayedRemoteFetch = new DelayedRemoteFetch( - Collections.emptyMap[TopicIdPartition, Future[Void]](), - remoteFetchResults, - remoteFetchInfos, - remoteFetchMaxWaitMs, - Seq(topicIdPartition -> fetchStatus1, topicIdPartition2 -> fetchStatus2), - fetchParams, - Seq(topicIdPartition -> logReadInfo1, topicIdPartition2 -> logReadInfo2), - replicaManager, - callback) - - when(replicaManager.getPartitionOrException(topicIdPartition.topicPartition)) - .thenReturn(mock(classOf[Partition])) - when(replicaManager.getPartitionOrException(topicIdPartition2.topicPartition)) - .thenReturn(mock(classOf[Partition])) - - assertTrue(delayedRemoteFetch.tryComplete()) - assertTrue(delayedRemoteFetch.isCompleted) - - // Verify both partitions were processed - assertEquals(2, responses.size) - assertTrue(responses.contains(topicIdPartition)) - assertTrue(responses.contains(topicIdPartition2)) - - // First partition should be successful - val fetchResult1 = responses(topicIdPartition) - assertEquals(Errors.NONE, fetchResult1.error) + // Fetch results should still include local read results + assertTrue(actualTopicPartition.isDefined) + assertEquals(topicIdPartition, actualTopicPartition.get) + assertTrue(fetchResultOpt.isDefined) - // Second partition should have an error due to remote fetch failure - val fetchResult2 = responses(topicIdPartition2) - assertEquals(Errors.UNKNOWN_SERVER_ERROR, fetchResult2.error) + val fetchResult = fetchResultOpt.get + assertEquals(Errors.NONE, fetchResult.error) + assertEquals(highWatermark, fetchResult.highWatermark) + assertEquals(leaderLogStartOffset, fetchResult.logStartOffset) } private def buildFetchParams(replicaId: Int, maxWaitMs: Int): FetchParams = { new FetchParams( + ApiKeys.FETCH.latestVersion, replicaId, 1, maxWaitMs, @@ -480,22 +234,16 @@ class DelayedRemoteFetchTest { private def buildReadResult(error: Errors, highWatermark: Int = 0, leaderLogStartOffset: Int = 0): LogReadResult = { - new LogReadResult( - new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY, false, Optional.empty(), - Optional.of(mock(classOf[RemoteStorageFetchInfo]))), - Optional.empty(), - highWatermark, - leaderLogStartOffset, - -1L, - -1L, - -1L, - OptionalLong.empty(), - if (error != Errors.NONE) Optional.of[Throwable](error.exception) else Optional.empty[Throwable]()) + LogReadResult( + exception = if (error != Errors.NONE) Some(error.exception) else None, + info = new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY), + divergingEpoch = None, + highWatermark = highWatermark, + leaderLogStartOffset = leaderLogStartOffset, + leaderLogEndOffset = -1L, + followerLogStartOffset = -1L, + fetchTimeMs = -1L, + lastStableOffset = None) } - private def buildRemoteReadResult(error: Errors): RemoteLogReadResult = { - new RemoteLogReadResult( - Optional.of(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY)), - if (error != Errors.NONE) Optional.of[Throwable](error.exception) else Optional.empty[Throwable]()) - } } diff --git a/core/src/test/scala/integration/kafka/server/DelayedRemoteListOffsetsTest.scala b/core/src/test/scala/integration/kafka/server/DelayedRemoteListOffsetsTest.scala new file mode 100644 index 0000000000000..96664d41a809c --- /dev/null +++ b/core/src/test/scala/integration/kafka/server/DelayedRemoteListOffsetsTest.scala @@ -0,0 +1,257 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.errors.NotLeaderOrFollowerException +import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsTopicResponse +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.record.FileRecords.TimestampAndOffset +import org.apache.kafka.common.requests.ListOffsetsResponse +import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} +import org.apache.kafka.server.util.timer.MockTimer +import org.apache.kafka.storage.internals.log.{AsyncOffsetReadFutureHolder, OffsetResultHolder} +import org.junit.jupiter.api.{AfterEach, Test} +import org.junit.jupiter.api.Assertions.assertEquals +import org.mockito.ArgumentMatchers.anyBoolean +import org.mockito.Mockito.{mock, when} + +import java.util.Optional +import java.util.concurrent.CompletableFuture +import scala.collection.mutable +import scala.concurrent.TimeoutException +import scala.jdk.CollectionConverters._ + +class DelayedRemoteListOffsetsTest { + + val delayMs = 10 + val timer = new MockTimer() + val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) + type T = OffsetResultHolder.FileRecordsOrError + val purgatory = + new DelayedOperationPurgatory[DelayedRemoteListOffsets]("test-purgatory", timer, 0, 10, true, true) + + @AfterEach + def afterEach(): Unit = { + purgatory.shutdown() + } + + @Test + def testResponseOnRequestExpiration(): Unit = { + var numResponse = 0 + val responseCallback = (response: List[ListOffsetsTopicResponse]) => { + response.foreach { topic => + topic.partitions().forEach { partition => + assertEquals(Errors.REQUEST_TIMED_OUT.code(), partition.errorCode()) + assertEquals(ListOffsetsResponse.UNKNOWN_TIMESTAMP, partition.timestamp()) + assertEquals(ListOffsetsResponse.UNKNOWN_OFFSET, partition.offset()) + assertEquals(-1, partition.leaderEpoch()) + numResponse += 1 + } + } + } + + var cancelledCount = 0 + val jobFuture = mock(classOf[CompletableFuture[Void]]) + val holder: AsyncOffsetReadFutureHolder[T] = mock(classOf[AsyncOffsetReadFutureHolder[T]]) + when(holder.taskFuture).thenAnswer(_ => new CompletableFuture[T]()) + when(holder.jobFuture).thenReturn(jobFuture) + when(jobFuture.cancel(anyBoolean())).thenAnswer(_ => { + cancelledCount += 1 + true + }) + + val statusByPartition = mutable.Map( + new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)) + ) + + val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version = 5, statusByPartition, replicaManager, responseCallback) + val listOffsetsRequestKeys = statusByPartition.keys.map(new TopicPartitionOperationKey(_)).toList.asJava + assertEquals(0, DelayedRemoteListOffsetsMetrics.aggregateExpirationMeter.count()) + assertEquals(0, DelayedRemoteListOffsetsMetrics.partitionExpirationMeters.size) + purgatory.tryCompleteElseWatch(delayedRemoteListOffsets, listOffsetsRequestKeys) + + Thread.sleep(100) + assertEquals(3, listOffsetsRequestKeys.size) + assertEquals(listOffsetsRequestKeys.size, cancelledCount) + assertEquals(listOffsetsRequestKeys.size, numResponse) + assertEquals(listOffsetsRequestKeys.size, DelayedRemoteListOffsetsMetrics.aggregateExpirationMeter.count()) + listOffsetsRequestKeys.forEach(key => { + val tp = new TopicPartition(key.topic, key.partition) + assertEquals(1, DelayedRemoteListOffsetsMetrics.partitionExpirationMeters.get(tp).count()) + }) + } + + @Test + def testResponseOnSuccess(): Unit = { + var numResponse = 0 + val responseCallback = (response: List[ListOffsetsTopicResponse]) => { + response.foreach { topic => + topic.partitions().forEach { partition => + assertEquals(Errors.NONE.code(), partition.errorCode()) + assertEquals(100L, partition.timestamp()) + assertEquals(100L, partition.offset()) + assertEquals(50, partition.leaderEpoch()) + numResponse += 1 + } + } + } + + val timestampAndOffset = new TimestampAndOffset(100L, 100L, Optional.of(50)) + val taskFuture = new CompletableFuture[T]() + taskFuture.complete(new OffsetResultHolder.FileRecordsOrError(Optional.empty(), Optional.of(timestampAndOffset))) + + var cancelledCount = 0 + val jobFuture = mock(classOf[CompletableFuture[Void]]) + val holder: AsyncOffsetReadFutureHolder[T] = mock(classOf[AsyncOffsetReadFutureHolder[T]]) + when(holder.taskFuture).thenAnswer(_ => taskFuture) + when(holder.jobFuture).thenReturn(jobFuture) + when(jobFuture.cancel(anyBoolean())).thenAnswer(_ => { + cancelledCount += 1 + true + }) + + val statusByPartition = mutable.Map( + new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)) + ) + + val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version = 5, statusByPartition, replicaManager, responseCallback) + val listOffsetsRequestKeys = statusByPartition.keys.map(new TopicPartitionOperationKey(_)).toList.asJava + purgatory.tryCompleteElseWatch(delayedRemoteListOffsets, listOffsetsRequestKeys) + + assertEquals(0, cancelledCount) + assertEquals(listOffsetsRequestKeys.size, numResponse) + } + + @Test + def testResponseOnPartialError(): Unit = { + var numResponse = 0 + val responseCallback = (response: List[ListOffsetsTopicResponse]) => { + response.foreach { topic => + topic.partitions().forEach { partition => + if (topic.name().equals("test1")) { + assertEquals(Errors.UNKNOWN_SERVER_ERROR.code(), partition.errorCode()) + assertEquals(ListOffsetsResponse.UNKNOWN_TIMESTAMP, partition.timestamp()) + assertEquals(ListOffsetsResponse.UNKNOWN_OFFSET, partition.offset()) + assertEquals(-1, partition.leaderEpoch()) + } else { + assertEquals(Errors.NONE.code(), partition.errorCode()) + assertEquals(100L, partition.timestamp()) + assertEquals(100L, partition.offset()) + assertEquals(50, partition.leaderEpoch()) + } + numResponse += 1 + } + } + } + + val timestampAndOffset = new TimestampAndOffset(100L, 100L, Optional.of(50)) + val taskFuture = new CompletableFuture[T]() + taskFuture.complete(new OffsetResultHolder.FileRecordsOrError(Optional.empty(), Optional.of(timestampAndOffset))) + + var cancelledCount = 0 + val jobFuture = mock(classOf[CompletableFuture[Void]]) + val holder: AsyncOffsetReadFutureHolder[T] = mock(classOf[AsyncOffsetReadFutureHolder[T]]) + when(holder.taskFuture).thenAnswer(_ => taskFuture) + when(holder.jobFuture).thenReturn(jobFuture) + when(jobFuture.cancel(anyBoolean())).thenAnswer(_ => { + cancelledCount += 1 + true + }) + + val errorFutureHolder: AsyncOffsetReadFutureHolder[T] = mock(classOf[AsyncOffsetReadFutureHolder[T]]) + val errorTaskFuture = new CompletableFuture[T]() + errorTaskFuture.complete(new OffsetResultHolder.FileRecordsOrError(Optional.of(new TimeoutException("Timed out!")), Optional.empty())) + when(errorFutureHolder.taskFuture).thenAnswer(_ => errorTaskFuture) + when(errorFutureHolder.jobFuture).thenReturn(jobFuture) + + val statusByPartition = mutable.Map( + new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Optional.of(errorFutureHolder)) + ) + + val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version = 5, statusByPartition, replicaManager, responseCallback) + val listOffsetsRequestKeys = statusByPartition.keys.map(new TopicPartitionOperationKey(_)).toList.asJava + purgatory.tryCompleteElseWatch(delayedRemoteListOffsets, listOffsetsRequestKeys) + + assertEquals(0, cancelledCount) + assertEquals(listOffsetsRequestKeys.size, numResponse) + } + + @Test + def testPartialResponseWhenNotLeaderOrFollowerExceptionOnOnePartition(): Unit = { + var numResponse = 0 + val responseCallback = (response: List[ListOffsetsTopicResponse]) => { + response.foreach { topic => + topic.partitions().forEach { partition => + if (topic.name().equals("test1") && partition.partitionIndex() == 0) { + assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code(), partition.errorCode()) + assertEquals(ListOffsetsResponse.UNKNOWN_TIMESTAMP, partition.timestamp()) + assertEquals(ListOffsetsResponse.UNKNOWN_OFFSET, partition.offset()) + assertEquals(-1, partition.leaderEpoch()) + } else { + assertEquals(Errors.NONE.code(), partition.errorCode()) + assertEquals(100L, partition.timestamp()) + assertEquals(100L, partition.offset()) + assertEquals(50, partition.leaderEpoch()) + } + numResponse += 1 + } + } + } + + val timestampAndOffset = new TimestampAndOffset(100L, 100L, Optional.of(50)) + val taskFuture = new CompletableFuture[T]() + taskFuture.complete(new OffsetResultHolder.FileRecordsOrError(Optional.empty(), Optional.of(timestampAndOffset))) + + var cancelledCount = 0 + val jobFuture = mock(classOf[CompletableFuture[Void]]) + val holder: AsyncOffsetReadFutureHolder[T] = mock(classOf[AsyncOffsetReadFutureHolder[T]]) + when(holder.taskFuture).thenAnswer(_ => taskFuture) + when(holder.jobFuture).thenReturn(jobFuture) + when(jobFuture.cancel(anyBoolean())).thenAnswer(_ => { + cancelledCount += 1 + true + }) + + when(replicaManager.getPartitionOrException(new TopicPartition("test1", 0))) + .thenThrow(new NotLeaderOrFollowerException("Not leader or follower!")) + val errorFutureHolder: AsyncOffsetReadFutureHolder[T] = mock(classOf[AsyncOffsetReadFutureHolder[T]]) + val errorTaskFuture = new CompletableFuture[T]() + when(errorFutureHolder.taskFuture).thenAnswer(_ => errorTaskFuture) + when(errorFutureHolder.jobFuture).thenReturn(jobFuture) + + val statusByPartition = mutable.Map( + new TopicPartition("test", 0) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)), + new TopicPartition("test1", 0) -> ListOffsetsPartitionStatus(None, Optional.of(errorFutureHolder)), + new TopicPartition("test1", 1) -> ListOffsetsPartitionStatus(None, Optional.of(holder)) + ) + + val delayedRemoteListOffsets = new DelayedRemoteListOffsets(delayMs, version = 5, statusByPartition, replicaManager, responseCallback) + val listOffsetsRequestKeys = statusByPartition.keys.map(new TopicPartitionOperationKey(_)).toList.asJava + purgatory.tryCompleteElseWatch(delayedRemoteListOffsets, listOffsetsRequestKeys) + + assertEquals(1, cancelledCount) + assertEquals(listOffsetsRequestKeys.size, numResponse) + } +} diff --git a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala index 170ee3679f47b..f4de50d7cd8a3 100644 --- a/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala +++ b/core/src/test/scala/integration/kafka/server/DynamicBrokerReconfigurationTest.scala @@ -24,12 +24,13 @@ import java.lang.management.ManagementFactory import java.security.KeyStore import java.time.Duration import java.util -import java.util.{Optional, Properties} +import java.util.{Collections, Optional, Properties} import java.util.concurrent._ import javax.management.ObjectName import com.yammer.metrics.core.MetricName import kafka.admin.ConfigCommand import kafka.api.SaslSetup +import kafka.log.UnifiedLog import kafka.network.{DataPlaneAcceptor, Processor, RequestChannel} import kafka.security.JaasTestUtils import kafka.utils._ @@ -56,14 +57,11 @@ import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.serialization.{StringDeserializer, StringSerializer} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.MetadataLogConfig import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms} import org.apache.kafka.server.metrics.{KafkaYammerMetrics, MetricConfigs} -import org.apache.kafka.server.ReplicaState import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.server.util.ShutdownableThread -import org.apache.kafka.server.quota.{ClientQuotaEntity, ClientQuotaManager} -import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig} import org.apache.kafka.test.TestSslUtils import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} @@ -119,7 +117,30 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup clearLeftOverProcessorMetrics() // clear metrics left over from other tests so that new ones can be tested (0 until numServers).foreach { brokerId => - val props = defaultStaticConfig(brokerId) + + val props = TestUtils.createBrokerConfig(brokerId) + props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0") + props ++= securityProps(sslProperties1, TRUSTSTORE_PROPS) + // Ensure that we can support multiple listeners per security protocol and multiple security protocols + props.put(SocketServerConfigs.LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0") + props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, s"PLAINTEXT:PLAINTEXT, $SecureInternal:SSL, $SecureExternal:SASL_SSL, CONTROLLER:$controllerListenerSecurityProtocol") + props.put(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, SecureInternal) + props.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, "requested") + props.put(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG, "PLAIN") + props.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, kafkaServerSaslMechanisms.mkString(",")) + props.put(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, "1048576") // low value to test log rolling on config update + props.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2") // greater than one to test reducing threads + props.put(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, 1680000000.toString) + props.put(ServerLogConfigs.LOG_RETENTION_TIME_HOURS_CONFIG, 168.toString) + + props ++= sslProperties1 + props ++= securityProps(sslProperties1, KEYSTORE_PROPS, listenerPrefix(SecureInternal)) + + // Set invalid top-level properties to ensure that listener config is used + // Don't set any dynamic configs here since they get overridden in tests + props ++= invalidSslProperties + props ++= securityProps(invalidSslProperties, KEYSTORE_PROPS) + props ++= securityProps(sslProperties1, KEYSTORE_PROPS, listenerPrefix(SecureExternal)) val kafkaConfig = KafkaConfig.fromProps(props) @@ -137,33 +158,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup TestMetricsReporter.testReporters.clear() } - def defaultStaticConfig(brokerId: Int): Properties = { - val props = TestUtils.createBrokerConfig(brokerId) - props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0") - props ++= securityProps(sslProperties1, TRUSTSTORE_PROPS) - // Ensure that we can support multiple listeners per security protocol and multiple security protocols - props.put(SocketServerConfigs.LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0") - props.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, s"PLAINTEXT:PLAINTEXT, $SecureInternal:SSL, $SecureExternal:SASL_SSL, CONTROLLER:$controllerListenerSecurityProtocol") - props.put(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, SecureInternal) - props.put(BrokerSecurityConfigs.SSL_CLIENT_AUTH_CONFIG, "requested") - props.put(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG, "PLAIN") - props.put(BrokerSecurityConfigs.SASL_ENABLED_MECHANISMS_CONFIG, kafkaServerSaslMechanisms.mkString(",")) - props.put(ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG, "1048576") // low value to test log rolling on config update - props.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2") // greater than one to test reducing threads - props.put(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG, 1680000000.toString) - props.put(ServerLogConfigs.LOG_RETENTION_TIME_HOURS_CONFIG, 168.toString) - - props ++= sslProperties1 - props ++= securityProps(sslProperties1, KEYSTORE_PROPS, listenerPrefix(SecureInternal)) - - // Set invalid top-level properties to ensure that listener config is used - // Don't set any dynamic configs here since they get overridden in tests - props ++= invalidSslProperties - props ++= securityProps(invalidSslProperties, KEYSTORE_PROPS) - props ++= securityProps(sslProperties1, KEYSTORE_PROPS, listenerPrefix(SecureExternal)) - props - } - @AfterEach override def tearDown(): Unit = { clientThreads.foreach(_.interrupt()) @@ -178,9 +172,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup closeSasl() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConfigDescribeUsingAdminClient(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConfigDescribeUsingAdminClient(quorum: String, groupProtocol: String): Unit = { def verifyConfig(configName: String, configEntry: ConfigEntry, isSensitive: Boolean, isReadOnly: Boolean, expectedProps: Properties): Unit = { @@ -278,9 +272,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup assertEquals(List((CleanerConfig.LOG_CLEANER_THREADS_PROP, ConfigSource.DEFAULT_CONFIG)), synonymsList(logCleanerThreads)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUpdatesUsingConfigProvider(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUpdatesUsingConfigProvider(quorum: String, groupProtocol: String): Unit = { val PollingIntervalVal = f"$${file:polling.interval:interval}" val PollingIntervalUpdateVal = f"$${file:polling.interval:updinterval}" val SslTruststoreTypeVal = f"$${file:ssl.truststore.type:storetype}" @@ -343,9 +337,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testKeyStoreAlter(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testKeyStoreAlter(quorum: String, groupProtocol: String): Unit = { val topic2 = "testtopic2" TestUtils.createTopicWithAdmin(adminClients.head, topic2, servers, controllerServers, numPartitions, replicationFactor = numServers) @@ -412,9 +406,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup stopAndVerifyProduceConsume(producerThread, consumerThread) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testTrustStoreAlter(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testTrustStoreAlter(quorum: String, groupProtocol: String): Unit = { val producerBuilder = ProducerBuilder().listenerName(SecureInternal).securityProtocol(SecurityProtocol.SSL) // Producer with new keystore should fail to connect before truststore update @@ -488,33 +482,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup waitForAuthenticationFailure(producerBuilder.keyStoreProps(sslProperties1)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testSocketServerConfigTest(groupProtocol: String): Unit = { - val updatedMaxConnections = "20" - val connectionsIpsOverride = "1.2.3.4:1234,1.2.4.5:2345" - val properties = new Properties() - properties.setProperty(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG, updatedMaxConnections) - properties.setProperty(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG, connectionsIpsOverride) - - TestUtils.incrementalAlterConfigs(servers, adminClients.head, properties, perBrokerConfig = true) - - servers.foreach(_.shutdown()) - servers.foreach(_.awaitShutdown()) - servers.foreach(_.startup()) - - servers.foreach { broker => - assertEquals(updatedMaxConnections, broker.config.originals() - .get(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_CONFIG).toString) - - assertEquals(connectionsIpsOverride, broker.config.originals() - .get(SocketServerConfigs.MAX_CONNECTIONS_PER_IP_OVERRIDES_CONFIG).toString) - } - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testLogCleanerConfig(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testLogCleanerConfig(quorum: String, groupProtocol: String): Unit = { val (producerThread, consumerThread) = startProduceConsume(retries = 0, groupProtocol) verifyThreads("kafka-log-cleaner-thread-", countPerBroker = 1) @@ -565,9 +535,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup verifyThreads("kafka-log-cleaner-thread-", countPerBroker = 2) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testConsecutiveConfigChange(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testConsecutiveConfigChange(quorum: String, groupProtocol: String): Unit = { val topic2 = "testtopic2" val topicProps = new Properties topicProps.put(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_CONFIG, "2") @@ -609,9 +579,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup assertEquals("2", log.config.originals().get(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_CONFIG).toString) // Verify topic-level config still survives } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testDefaultTopicConfig(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testDefaultTopicConfig(quorum: String, groupProtocol: String): Unit = { val (producerThread, consumerThread) = startProduceConsume(retries = 0, groupProtocol) val props = new Properties @@ -655,7 +625,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup "Config not updated in LogManager") val log = servers.head.logManager.getLog(new TopicPartition(topic, 0)).getOrElse(throw new IllegalStateException("Log not found")) - TestUtils.waitUntilTrue(() => log.config.segmentSize() == 1048576, "Existing topic config using defaults not updated") + TestUtils.waitUntilTrue(() => log.config.segmentSize == 1048576, "Existing topic config using defaults not updated") val KafkaConfigToLogConfigName: Map[String, String] = ServerTopicConfigSynonyms.TOPIC_CONFIG_SYNONYMS.asScala.map { case (k, v) => (v, k) } props.asScala.foreach { case (k, v) => @@ -730,9 +700,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUncleanLeaderElectionEnable(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUncleanLeaderElectionEnable(quorum: String, groupProtocol: String): Unit = { // Create a topic with two replicas on brokers other than the controller val topic = "testtopic2" TestUtils.createTopicWithAdmin(adminClients.head, topic, servers, controllerServers, replicaAssignment = Map(0 -> Seq(0, 1))) @@ -743,7 +713,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup consumer.commitSync() def partitionInfo: TopicPartitionInfo = - adminClients.head.describeTopics(util.Set.of(topic)).topicNameValues().get(topic).get().partitions().get(0) + adminClients.head.describeTopics(Collections.singleton(topic)).topicNameValues().get(topic).get().partitions().get(0) val partitionInfo0 = partitionInfo assertEquals(partitionInfo0.replicas.get(0), partitionInfo0.leader) @@ -788,9 +758,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup consumer.commitSync() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testThreadPoolResize(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testThreadPoolResize(quorum: String, groupProtocol: String): Unit = { // In kraft mode, the StripedReplicaPlacer#initialize includes some randomization, // so the replica assignment is not deterministic. @@ -920,7 +890,7 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup private def verifyMarkPartitionsForTruncation(): Unit = { val leaderId = 0 val topicDescription = adminClients.head. - describeTopics(java.util.List.of(topic)). + describeTopics(java.util.Arrays.asList(topic)). allTopicNames(). get(3, TimeUnit.MINUTES).get(topic) val partitions = topicDescription.partitions().asScala. @@ -937,14 +907,14 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup assertEquals(replicaFetcherManager.getFetcherId(tp), fetcherThreads.head._1.fetcherId) val thread = fetcherThreads.head._2 assertEquals(Some(truncationOffset), thread.fetchState(tp).map(_.fetchOffset)) - assertEquals(Some(ReplicaState.TRUNCATING), thread.fetchState(tp).map(_.state)) + assertEquals(Some(Truncating), thread.fetchState(tp).map(_.state)) } } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testMetricsReporterUpdate(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testMetricsReporterUpdate(quorum: String, groupProtocol: String): Unit = { // Add a new metrics reporter val newProps = new Properties newProps.put(TestMetricsReporter.PollingIntervalProp, "100") @@ -965,9 +935,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup val clientId = "test-client-1" servers.foreach { server => server.quotaManagers.produce.updateQuota( - Optional.empty, - Optional.of(new ClientQuotaManager.ClientIdEntity(clientId): ClientQuotaEntity.ConfigEntity), - Optional.of(Quota.upperBound(10000000)) + None, + Some(ClientQuotaManager.ClientIdEntity(clientId)), + Some(Quota.upperBound(10000000)) ) } val (producerThread, consumerThread) = startProduceConsume(retries = 0, groupProtocol, clientId) @@ -1036,9 +1006,9 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup stopAndVerifyProduceConsume(producerThread, consumerThread) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testReconfigureRemovedListener(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testReconfigureRemovedListener(quorum: String, groupProtocol: String): Unit = { val client = adminClients.head val broker = servers.head assertEquals(2, broker.config.dynamicConfig.reconfigurables.asScala.count(r => r.isInstanceOf[DataPlaneAcceptor])) @@ -1048,26 +1018,26 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup .map(_.asInstanceOf[DataPlaneAcceptor]).toSeq // add new PLAINTEXT listener - client.incrementalAlterConfigs(util.Map.of(broker0Resource, - util.List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.LISTENERS_CONFIG, + client.incrementalAlterConfigs(Map(broker0Resource -> + Seq(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.LISTENERS_CONFIG, s"PLAINTEXT://localhost:0, $SecureInternal://localhost:0, $SecureExternal://localhost:0"), AlterConfigOp.OpType.SET) - ))).all().get() + ).asJavaCollection).asJava).all().get() TestUtils.waitUntilTrue(() => acceptors.size == 3, s"failed to add new DataPlaneAcceptor") // remove PLAINTEXT listener - client.incrementalAlterConfigs(util.Map.of(broker0Resource, - util.List.of(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.LISTENERS_CONFIG, + client.incrementalAlterConfigs(Map(broker0Resource -> + Seq(new AlterConfigOp(new ConfigEntry(SocketServerConfigs.LISTENERS_CONFIG, s"$SecureInternal://localhost:0, $SecureExternal://localhost:0"), AlterConfigOp.OpType.SET) - ))).all().get() + ).asJavaCollection).asJava).all().get() TestUtils.waitUntilTrue(() => acceptors.size == 2, s"failed to remove DataPlaneAcceptor. current: ${acceptors.map(_.endPoint.toString).mkString(",")}") } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testTransactionVerificationEnable(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testTransactionVerificationEnable(quorum: String, groupProtocol: String): Unit = { def verifyConfiguration(enabled: Boolean): Unit = { servers.foreach { server => TestUtils.waitUntilTrue(() => server.logManager.producerStateManagerConfig.transactionVerificationEnabled == enabled, "Configuration was not updated.") @@ -1097,59 +1067,6 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup verifyConfiguration(true) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testServersCanStartWithInvalidStaticConfigsAndValidDynamicConfigs(groupProtocol: String): Unit = { - TestNumReplicaFetcherMetricsReporter.testReporters.clear() - - // modify snapshot interval config to explicitly take snapshot on a broker with valid dynamic configs - val props = defaultStaticConfig(numServers) - props.put(MetadataLogConfig.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, "10000") - props.put(MetricConfigs.METRIC_REPORTER_CLASSES_CONFIG, classOf[TestNumReplicaFetcherMetricsReporter].getName) - props.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "1") - - val kafkaConfig = KafkaConfig.fromProps(props) - val newBroker = createBroker(kafkaConfig).asInstanceOf[BrokerServer] - servers += newBroker - - alterSslKeystoreUsingConfigCommand(sslProperties1, listenerPrefix(SecureExternal)) - - // Add num.replica.fetchers to the cluster-level config. - val clusterLevelProps = new Properties - clusterLevelProps.put(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2") - reconfigureServers(clusterLevelProps, perBrokerConfig = false, (ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG, "2")) - - // Wait for the metrics reporter to be configured - val initialReporter = TestNumReplicaFetcherMetricsReporter.waitForReporters(1).head - initialReporter.verifyState(reconfigureCount = 1, numFetcher = 2) - - TestUtils.ensureConsistentKRaftMetadata(servers, controllerServer) - - TestUtils.waitUntilTrue( - () => newBroker.raftManager.replicatedLog.latestSnapshotId().isPresent, - "metadata snapshot not present on broker", - 30000L - ) - - // shutdown broker and attempt to restart it after invalidating its static configurations - newBroker.shutdown() - newBroker.awaitShutdown() - - // Clean up the test reporter - TestNumReplicaFetcherMetricsReporter.testReporters.clear() - - val invalidStaticConfigs = defaultStaticConfig(newBroker.config.brokerId) - invalidStaticConfigs.putAll(securityProps(invalidSslConfigs, KEYSTORE_PROPS, listenerPrefix(SecureExternal))) - newBroker.config.updateCurrentConfig(KafkaConfig.fromProps(invalidStaticConfigs)) - - newBroker.startup() - - // Verify that the custom MetricsReporter is not reconfigured after restart. - // If readDynamicBrokerConfigsFromSnapshot works correctly, the reporter should maintain its state. - val reporterAfterRestart = TestNumReplicaFetcherMetricsReporter.waitForReporters(1).head - reporterAfterRestart.verifyState(reconfigureCount = 0, numFetcher = 2) - } - private def awaitInitialPositions(consumer: Consumer[_, _]): Unit = { TestUtils.pollUntilTrue(consumer, () => !consumer.assignment.isEmpty, "Timed out while waiting for assignment") consumer.assignment.forEach(tp => consumer.position(tp)) @@ -1235,11 +1152,11 @@ class DynamicBrokerReconfigurationTest extends QuorumTestHarness with SaslSetup } val cert1 = load(trustStore1Props).getCertificate("kafka") val cert2 = load(trustStore2Props).getCertificate("kafka") - val certs = util.Map.of("kafka1", cert1, "kafka2", cert2) + val certs = Map("kafka1" -> cert1, "kafka2" -> cert2) val combinedStorePath = TestUtils.tempFile("truststore", ".jks").getAbsolutePath val password = trustStore1Props.get(SSL_TRUSTSTORE_PASSWORD_CONFIG).asInstanceOf[Password] - TestSslUtils.createTrustStore(combinedStorePath, password, certs) + TestSslUtils.createTrustStore(combinedStorePath, password, certs.asJava) val newStoreProps = new Properties newStoreProps.put(SSL_TRUSTSTORE_LOCATION_CONFIG, combinedStorePath) newStoreProps.put(SSL_TRUSTSTORE_PASSWORD_CONFIG, password) @@ -1403,7 +1320,7 @@ val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new Con private def tempPropertiesFile(properties: Properties): File = TestUtils.tempPropertiesFile(properties.asScala) - private abstract class ClientBuilder[T] { + private abstract class ClientBuilder[T]() { protected var _bootstrapServers: Option[String] = None protected var _listenerName: String = SecureExternal protected var _securityProtocol = SecurityProtocol.SASL_SSL @@ -1436,7 +1353,7 @@ val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new Con private var _retries = Int.MaxValue private var _acks = -1 private var _requestTimeoutMs = 30000 - private val defaultLingerMs = 5 + private val defaultLingerMs = 5; private var _deliveryTimeoutMs = 30000 + defaultLingerMs def maxRetries(retries: Int): ProducerBuilder = { _retries = retries; this } @@ -1480,7 +1397,7 @@ val configEntries = props.asScala.map { case (k, v) => new AlterConfigOp(new Con val consumer = new KafkaConsumer[String, String](consumerProps, new StringDeserializer, new StringDeserializer) consumers += consumer - consumer.subscribe(util.Set.of(_topic)) + consumer.subscribe(Collections.singleton(_topic)) if (_autoOffsetReset == "latest") awaitInitialPositions(consumer) consumer @@ -1621,7 +1538,7 @@ class TestMetricsReporter extends MetricsReporter with Reconfigurable with Close } override def reconfigurableConfigs(): util.Set[String] = { - util.Set.of(PollingIntervalProp) + Set(PollingIntervalProp).asJava } override def validateReconfiguration(configs: util.Map[String, _]): Unit = { @@ -1656,64 +1573,6 @@ class TestMetricsReporter extends MetricsReporter with Reconfigurable with Close } } -object TestNumReplicaFetcherMetricsReporter { - val testReporters = new ConcurrentLinkedQueue[TestNumReplicaFetcherMetricsReporter]() - - def waitForReporters(count: Int): List[TestNumReplicaFetcherMetricsReporter] = { - TestUtils.waitUntilTrue(() => testReporters.size == count, msg = "Metrics reporters size not matched. Expected: " + count + ", actual: " + testReporters.size()) - - val reporters = testReporters.asScala.toList - TestUtils.waitUntilTrue(() => reporters.forall(_.configureCount == 1), msg = "Metrics reporters not configured") - reporters - } -} - - -class TestNumReplicaFetcherMetricsReporter extends MetricsReporter { - import TestNumReplicaFetcherMetricsReporter._ - @volatile var configureCount = 0 - @volatile var reconfigureCount = 0 - @volatile var numFetchers: Int = 1 - testReporters.add(this) - - override def init(metrics: util.List[KafkaMetric]): Unit = { - } - - override def configure(configs: util.Map[String, _]): Unit = { - configureCount += 1 - numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt - } - - override def metricChange(metric: KafkaMetric): Unit = { - } - - override def metricRemoval(metric: KafkaMetric): Unit = { - } - - override def reconfigurableConfigs(): util.Set[String] = { - util.Set.of(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG) - } - - override def validateReconfiguration(configs: util.Map[String, _]): Unit = { - val numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt - if (numFetchers <= 0) - throw new ConfigException(s"Invalid num.replica.fetchers $numFetchers") - } - - override def reconfigure(configs: util.Map[String, _]): Unit = { - reconfigureCount += 1 - numFetchers = configs.get(ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG).toString.toInt - } - - override def close(): Unit = { - } - - def verifyState(reconfigureCount: Int, numFetcher: Int = 1): Unit = { - assertEquals(reconfigureCount, this.reconfigureCount) - assertEquals(numFetcher, this.numFetchers) - } -} - class MockFileConfigProvider extends FileConfigProvider { @throws(classOf[IOException]) diff --git a/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala b/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala index e50a6a96bc56a..a9961c7c48225 100644 --- a/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala +++ b/core/src/test/scala/integration/kafka/server/FetchFromFollowerIntegrationTest.scala @@ -32,7 +32,7 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource import java.util -import java.util.Properties +import java.util.{Collections, Properties} import java.util.concurrent.{Executors, TimeUnit} import scala.jdk.CollectionConverters._ @@ -56,10 +56,10 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { .map(KafkaConfig.fromProps(_, overridingProps)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) @Timeout(15) - def testFollowerCompleteDelayedFetchesOnReplication(groupProtocol: String): Unit = { + def testFollowerCompleteDelayedFetchesOnReplication(quorum: String, groupProtocol: String): Unit = { // Create a topic with 2 replicas where broker 0 is the leader and 1 is the follower. val admin = createAdminClient() val partitionLeaders = TestUtils.createTopicWithAdmin( @@ -95,15 +95,15 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { TestUtils.generateAndProduceMessages(brokers, topic, numMessages = 1) val response = receive[FetchResponse](socket, ApiKeys.FETCH, version) assertEquals(Errors.NONE, response.error) - assertEquals(util.Map.of(Errors.NONE, 2), response.errorCounts) + assertEquals(Map(Errors.NONE -> 2).asJava, response.errorCounts) } finally { socket.close() } } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchFromLeaderWhilePreferredReadReplicaIsUnavailable(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchFromLeaderWhilePreferredReadReplicaIsUnavailable(quorum: String, groupProtocol: String): Unit = { // Create a topic with 2 replicas where broker 0 is the leader and 1 is the follower. val admin = createAdminClient() TestUtils.createTopicWithAdmin( @@ -123,15 +123,15 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { val topicPartition = new TopicPartition(topic, 0) TestUtils.waitUntilTrue(() => { val endpoints = brokers(leaderBrokerId).metadataCache.getPartitionReplicaEndpoints(topicPartition, listenerName) - !endpoints.containsKey(followerBrokerId) + !endpoints.contains(followerBrokerId) }, "follower is still reachable.") assertEquals(-1, getPreferredReplica) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testFetchFromFollowerWithRoll(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testFetchFromFollowerWithRoll(quorum: String, groupProtocol: String): Unit = { // Create a topic with 2 replicas where broker 0 is the leader and 1 is the follower. val admin = createAdminClient() TestUtils.createTopicWithAdmin( @@ -151,7 +151,7 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { consumerProps.put(ConsumerConfig.GROUP_PROTOCOL_CONFIG, groupProtocol) val consumer = new KafkaConsumer(consumerProps, new ByteArrayDeserializer, new ByteArrayDeserializer) try { - consumer.subscribe(util.List.of(topic)) + consumer.subscribe(List(topic).asJava) // Wait until preferred replica is set to follower. TestUtils.waitUntilTrue(() => { @@ -182,9 +182,9 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { } @Disabled - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testRackAwareRangeAssignor(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testRackAwareRangeAssignor(quorum: String, groupProtocol: String): Unit = { val partitionList = brokers.indices.toList val topicWithAllPartitionsOnAllRacks = "topicWithAllPartitionsOnAllRacks" @@ -240,15 +240,15 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { try { // Rack-based assignment results in partitions assigned in reverse order since partition racks are in the reverse order. - consumers.foreach(_.subscribe(util.Set.of(topicWithSingleRackPartitions))) + consumers.foreach(_.subscribe(Collections.singleton(topicWithSingleRackPartitions))) verifyAssignments(partitionList.reverse, topicWithSingleRackPartitions) // Non-rack-aware assignment results in ordered partitions. - consumers.foreach(_.subscribe(util.Set.of(topicWithAllPartitionsOnAllRacks))) + consumers.foreach(_.subscribe(Collections.singleton(topicWithAllPartitionsOnAllRacks))) verifyAssignments(partitionList, topicWithAllPartitionsOnAllRacks) // Rack-aware assignment with co-partitioning results in reverse assignment for both topics. - consumers.foreach(_.subscribe(util.Set.of(topicWithSingleRackPartitions, topicWithAllPartitionsOnAllRacks))) + consumers.foreach(_.subscribe(Set(topicWithSingleRackPartitions, topicWithAllPartitionsOnAllRacks).asJava)) verifyAssignments(partitionList.reverse, topicWithAllPartitionsOnAllRacks, topicWithSingleRackPartitions) // Perform reassignment for topicWithSingleRackPartitions to reverse the replica racks and @@ -256,7 +256,7 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { val admin = createAdminClient() val reassignments = new util.HashMap[TopicPartition, util.Optional[NewPartitionReassignment]]() partitionList.foreach { p => - val newAssignment = new NewPartitionReassignment(util.List.of(p)) + val newAssignment = new NewPartitionReassignment(Collections.singletonList(p)) reassignments.put(new TopicPartition(topicWithSingleRackPartitions, p), util.Optional.of(newAssignment)) } admin.alterPartitionReassignments(reassignments).all().get(30, TimeUnit.SECONDS) @@ -283,7 +283,7 @@ class FetchFromFollowerIntegrationTest extends BaseFetchRequestTest { ) val response = connectAndReceive[FetchResponse](request, brokers(leaderBrokerId).socketServer) assertEquals(Errors.NONE, response.error) - assertEquals(util.Map.of(Errors.NONE, 2), response.errorCounts) + assertEquals(Map(Errors.NONE -> 2).asJava, response.errorCounts) assertEquals(1, response.data.responses.size) val topicResponse = response.data.responses.get(0) assertEquals(1, topicResponse.partitions.size) diff --git a/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala b/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala index 575c612bf26a1..9868dd1d7d183 100644 --- a/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala +++ b/core/src/test/scala/integration/kafka/server/GssapiAuthenticationTest.scala @@ -20,7 +20,7 @@ package kafka.server import java.net.InetSocketAddress import java.time.Duration -import java.util.Properties +import java.util.{Collections, Properties} import java.util.concurrent.{CountDownLatch, Executors, TimeUnit} import javax.security.auth.login.LoginContext import kafka.api.{IntegrationTestHarness, SaslSetup} @@ -38,9 +38,11 @@ import org.apache.kafka.common.security.kerberos.KerberosLogin import org.apache.kafka.common.utils.{LogContext, MockTime} import org.apache.kafka.network.SocketServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource +import org.junit.jupiter.params.provider.{MethodSource, ValueSource} + +import scala.jdk.CollectionConverters._ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { override val brokerCount = 1 @@ -90,8 +92,9 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * Tests that Kerberos replay error `Request is a replay (34)` is not handled as an authentication exception * since replay detection used to detect DoS attacks may occasionally reject valid concurrent requests. */ - @Test - def testRequestIsAReplay(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testRequestIsAReplay(quorum: String): Unit = { val successfulAuthsPerThread = 10 val futures = (0 until numThreads).map(_ => executor.submit(new Runnable { override def run(): Unit = verifyRetriableFailuresDuringAuthentication(successfulAuthsPerThread) @@ -107,8 +110,9 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * are able to connect after the second re-login. Verifies that logout is performed only once * since duplicate logouts without successful login results in NPE from Java 9 onwards. */ - @Test - def testLoginFailure(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testLoginFailure(quorum: String): Unit = { val selector = createSelectorWithRelogin() try { val login = TestableKerberosLogin.instance @@ -130,8 +134,9 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * is performed when credentials are unavailable between logout and login, we handle it as a * transient error and not an authentication failure so that clients may retry. */ - @Test - def testReLogin(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testReLogin(quorum: String): Unit = { val selector = createSelectorWithRelogin() try { val login = TestableKerberosLogin.instance @@ -161,8 +166,9 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * Tests that Kerberos error `Server not found in Kerberos database (7)` is handled * as a fatal authentication failure. */ - @Test - def testServerNotFoundInKerberosDatabase(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testServerNotFoundInKerberosDatabase(quorum: String): Unit = { val jaasConfig = clientConfig.getProperty(SaslConfigs.SASL_JAAS_CONFIG) val invalidServiceConfig = jaasConfig.replace("serviceName=\"kafka\"", "serviceName=\"invalid-service\"") clientConfig.put(SaslConfigs.SASL_JAAS_CONFIG, invalidServiceConfig) @@ -174,15 +180,15 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { * Test that when client fails to verify authenticity of the server, the resulting failed authentication exception * is thrown immediately, and is not affected by connection.failed.authentication.delay.ms. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testServerAuthenticationFailure(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testServerAuthenticationFailure(quorum: String, groupProtocol: String): Unit = { // Setup client with a non-existent service principal, so that server authentication fails on the client val clientLoginContext = jaasClientLoginModule(kafkaClientSaslMechanism, Some("another-kafka-service")) val configOverrides = new Properties() configOverrides.setProperty(SaslConfigs.SASL_JAAS_CONFIG, clientLoginContext) val consumer = createConsumer(configOverrides = configOverrides) - consumer.assign(java.util.List.of(tp)) + consumer.assign(List(tp).asJava) val startMs = System.currentTimeMillis() assertThrows(classOf[SaslAuthenticationException], () => consumer.poll(Duration.ofMillis(50))) @@ -261,7 +267,7 @@ class GssapiAuthenticationTest extends IntegrationTestHarness with SaslSetup { private def createSelectorWithRelogin(): Selector = { clientConfig.setProperty(SaslConfigs.SASL_KERBEROS_MIN_TIME_BEFORE_RELOGIN, "0") val config = new TestSecurityConfig(clientConfig) - val jaasContexts = java.util.Map.of("GSSAPI", JaasContext.loadClientContext(config.values())) + val jaasContexts = Collections.singletonMap("GSSAPI", JaasContext.loadClientContext(config.values())) val channelBuilder = new SaslChannelBuilder(ConnectionMode.CLIENT, jaasContexts, securityProtocol, null, false, kafkaClientSaslMechanism, null, null, null, time, new LogContext(), _ => org.apache.kafka.test.TestUtils.defaultApiVersionsResponse(ListenerType.BROKER)) { diff --git a/core/src/test/scala/integration/kafka/server/IntegrationTestUtils.scala b/core/src/test/scala/integration/kafka/server/IntegrationTestUtils.scala new file mode 100644 index 0000000000000..165f95e3a62a9 --- /dev/null +++ b/core/src/test/scala/integration/kafka/server/IntegrationTestUtils.scala @@ -0,0 +1,106 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import kafka.network.SocketServer +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.protocol.ApiKeys +import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, RequestHeader, ResponseHeader} +import org.apache.kafka.common.utils.Utils + +import java.io.{DataInputStream, DataOutputStream} +import java.net.Socket +import java.nio.ByteBuffer +import scala.reflect.ClassTag + +object IntegrationTestUtils { + + def sendRequest(socket: Socket, request: Array[Byte]): Unit = { + val outgoing = new DataOutputStream(socket.getOutputStream) + outgoing.writeInt(request.length) + outgoing.write(request) + outgoing.flush() + } + + private def sendWithHeader(request: AbstractRequest, header: RequestHeader, socket: Socket): Unit = { + val serializedBytes = Utils.toArray(request.serializeWithHeader(header)) + sendRequest(socket, serializedBytes) + } + + def nextRequestHeader[T <: AbstractResponse](apiKey: ApiKeys, + apiVersion: Short, + clientId: String = "client-id", + correlationIdOpt: Option[Int] = None): RequestHeader = { + val correlationId = correlationIdOpt.getOrElse { + this.correlationId += 1 + this.correlationId + } + new RequestHeader(apiKey, apiVersion, clientId, correlationId) + } + + def send(request: AbstractRequest, + socket: Socket, + clientId: String = "client-id", + correlationId: Option[Int] = None): Unit = { + val header = nextRequestHeader(request.apiKey, request.version, clientId, correlationId) + sendWithHeader(request, header, socket) + } + + def receive[T <: AbstractResponse](socket: Socket, apiKey: ApiKeys, version: Short) + (implicit classTag: ClassTag[T]): T = { + val incoming = new DataInputStream(socket.getInputStream) + val len = incoming.readInt() + + val responseBytes = new Array[Byte](len) + incoming.readFully(responseBytes) + + val responseBuffer = ByteBuffer.wrap(responseBytes) + ResponseHeader.parse(responseBuffer, apiKey.responseHeaderVersion(version)) + + AbstractResponse.parseResponse(apiKey, responseBuffer, version) match { + case response: T => response + case response => + throw new ClassCastException(s"Expected response with type ${classTag.runtimeClass}, but found ${response.getClass}") + } + } + + def sendAndReceive[T <: AbstractResponse](request: AbstractRequest, + socket: Socket, + clientId: String = "client-id", + correlationId: Option[Int] = None) + (implicit classTag: ClassTag[T]): T = { + send(request, socket, clientId, correlationId) + receive[T](socket, request.apiKey, request.version) + } + + def connectAndReceive[T <: AbstractResponse](request: AbstractRequest, + destination: SocketServer, + listenerName: ListenerName) + (implicit classTag: ClassTag[T]): T = { + val socket = connect(destination, listenerName) + try sendAndReceive[T](request, socket) + finally socket.close() + } + + private var correlationId = 0 + + def connect(socketServer: SocketServer, + listenerName: ListenerName): Socket = { + new Socket("localhost", socketServer.boundPort(listenerName)) + } +} diff --git a/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala b/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala index 6f552a8ebe96c..17a75080ba167 100644 --- a/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala +++ b/core/src/test/scala/integration/kafka/server/KRaftClusterTest.scala @@ -17,9 +17,10 @@ package kafka.server +import kafka.log.UnifiedLog import kafka.network.SocketServer +import kafka.server.IntegrationTestUtils.connectAndReceive import kafka.utils.TestUtils -import org.apache.kafka.server.IntegrationTestUtils.connectAndReceive import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin._ import org.apache.kafka.common.acl.{AclBinding, AclBindingFilter} @@ -28,6 +29,7 @@ import org.apache.kafka.common.config.ConfigResource.Type import org.apache.kafka.common.errors.{InvalidPartitionsException, PolicyViolationException, UnsupportedVersionException} import org.apache.kafka.common.message.DescribeClusterRequestData import org.apache.kafka.common.metadata.{ConfigRecord, FeatureLevelRecord} +import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.Errors._ import org.apache.kafka.common.quota.ClientQuotaAlteration.Op @@ -43,11 +45,10 @@ import org.apache.kafka.metadata.bootstrap.BootstrapMetadata import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.server.authorizer._ import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion, MetadataVersion} -import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs} +import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.quota import org.apache.kafka.server.quota.{ClientQuotaCallback, ClientQuotaType} -import org.apache.kafka.storage.internals.log.UnifiedLog import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{Tag, Test, Timeout} import org.junit.jupiter.params.ParameterizedTest @@ -60,11 +61,10 @@ import java.nio.file.{FileSystems, Files, Path, Paths} import java.{lang, util} import java.util.concurrent.{CompletableFuture, CompletionStage, ExecutionException, TimeUnit} import java.util.concurrent.atomic.AtomicInteger -import java.util.{Optional, OptionalLong, Properties} +import java.util.{Collections, Optional, OptionalLong, Properties} import scala.collection.{Seq, mutable} import scala.concurrent.duration.{FiniteDuration, MILLISECONDS, SECONDS} import scala.jdk.CollectionConverters._ -import scala.util.Using @Timeout(120) @Tag("integration") @@ -113,7 +113,7 @@ class KRaftClusterTest { cluster.format() cluster.startup() val controller = cluster.controllers().values().iterator().asScala.filter(_.controller.isActive).next() - val port = controller.socketServer.boundPort(ListenerName.normalised(controller.config.controllerListeners.head.listener)) + val port = controller.socketServer.boundPort(controller.config.controllerListeners.head.listenerName) // shutdown active controller controller.shutdown() @@ -121,6 +121,8 @@ class KRaftClusterTest { val config = controller.sharedServer.controllerConfig.props config.asInstanceOf[util.HashMap[String,String]].put(SocketServerConfigs.LISTENERS_CONFIG, s"CONTROLLER://localhost:$port") controller.sharedServer.controllerConfig.updateCurrentConfig(new KafkaConfig(config)) + // metrics will be set to null when closing a controller, so we should recreate it for testing + controller.sharedServer.metrics = new Metrics() // restart controller controller.startup() @@ -130,32 +132,6 @@ class KRaftClusterTest { } } - @Test - def testClusterWithLowerCaseListeners(): Unit = { - Using.resource(new KafkaClusterTestKit.Builder( - new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setBrokerListenerName(new ListenerName("external")). - setNumControllerNodes(3). - build()).build() - ) { cluster => - cluster.format() - cluster.startup() - cluster.brokers().forEach((_, broker) => { - assertEquals(util.List.of("external://localhost:0"), broker.config.get(SocketServerConfigs.LISTENERS_CONFIG)) - assertEquals("external", broker.config.get(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG)) - assertEquals("external:PLAINTEXT,CONTROLLER:PLAINTEXT", broker.config.get(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG)) - }) - TestUtils.waitUntilTrue(() => cluster.brokers().get(0).brokerState == BrokerState.RUNNING, - "Broker never made it to RUNNING state.") - TestUtils.waitUntilTrue(() => cluster.raftManagers().get(0).client.leaderAndEpoch().leaderId.isPresent, - "RaftManager was not initialized.") - Using.resource(Admin.create(cluster.clientProperties())) { admin => - assertEquals(cluster.nodes().clusterId(), admin.describeCluster().clusterId().get()) - } - } - } - @Test def testCreateClusterAndWaitForBrokerInRunningState(): Unit = { val cluster = new KafkaClusterTestKit.Builder( @@ -199,13 +175,13 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { // Create a test topic - val newTopic = util.List.of(new NewTopic("test-topic", 1, 3.toShort)) + val newTopic = Collections.singletonList(new NewTopic("test-topic", 1, 3.toShort)) val createTopicResult = admin.createTopics(newTopic) createTopicResult.all().get() waitForTopicListing(admin, Seq("test-topic"), Seq()) // Delete topic - val deleteResult = admin.deleteTopics(util.List.of("test-topic")) + val deleteResult = admin.deleteTopics(Collections.singletonList("test-topic")) deleteResult.all().get() // List again @@ -265,7 +241,7 @@ class KRaftClusterTest { "Broker never made it to RUNNING state.") val admin = Admin.create(cluster.clientProperties()) try { - val entity = new ClientQuotaEntity(util.Map.of("user", "testkit")) + val entity = new ClientQuotaEntity(Map("user" -> "testkit").asJava) var filter = ClientQuotaFilter.containsOnly( List(ClientQuotaFilterComponent.ofEntity("user", "testkit")).asJava) @@ -273,7 +249,7 @@ class KRaftClusterTest { quotas: Seq[ClientQuotaAlteration.Op], filter: ClientQuotaFilter, expectCount: Int): util.Map[ClientQuotaEntity, util.Map[String, lang.Double]] = { - val alterResult = admin.alterClientQuotas(util.List.of(new ClientQuotaAlteration(entity, quotas.asJava))) + val alterResult = admin.alterClientQuotas(Seq(new ClientQuotaAlteration(entity, quotas.asJava)).asJava) try { alterResult.all().get() } catch { @@ -289,7 +265,7 @@ class KRaftClusterTest { } val (describeResult, ok) = TestUtils.computeUntilTrue(describeOrFail(filter)) { - results => results.getOrDefault(entity, util.Map.of[String, lang.Double]()).size() == expectCount + results => results.getOrDefault(entity, util.Collections.emptyMap[String, lang.Double]()).size() == expectCount } assertTrue(ok, "Broker never saw new client quotas") describeResult @@ -323,19 +299,19 @@ class KRaftClusterTest { assertEquals(9999.0, describeResult.get(entity).get("producer_byte_rate"), 1e-6) // Add another quota for a different entity with same user part - val entity2 = new ClientQuotaEntity(util.Map.of("user", "testkit", "client-id", "some-client")) + val entity2 = new ClientQuotaEntity(Map("user" -> "testkit", "client-id" -> "some-client").asJava) filter = ClientQuotaFilter.containsOnly( - util.List.of( + List( ClientQuotaFilterComponent.ofEntity("user", "testkit"), ClientQuotaFilterComponent.ofEntity("client-id", "some-client"), - )) + ).asJava) describeResult = alterThenDescribe(entity2, Seq(new ClientQuotaAlteration.Op("producer_byte_rate", 9998)), filter, 1) assertEquals(9998.0, describeResult.get(entity2).get("producer_byte_rate"), 1e-6) // non-strict match filter = ClientQuotaFilter.contains( - util.List.of(ClientQuotaFilterComponent.ofEntity("user", "testkit"))) + List(ClientQuotaFilterComponent.ofEntity("user", "testkit")).asJava) TestUtils.tryUntilNoAssertionError() { val results = admin.describeClientQuotas(filter).entities().get() @@ -356,14 +332,14 @@ class KRaftClusterTest { entity: ClientQuotaEntity, value: Long ): Unit = { - admin.alterClientQuotas(util.List.of( - new ClientQuotaAlteration(entity, util.List.of( + admin.alterClientQuotas(Collections.singletonList( + new ClientQuotaAlteration(entity, Collections.singletonList( new Op("consumer_byte_rate", value.doubleValue()))))). all().get() } def getConsumerByteRates(admin: Admin): Map[ClientQuotaEntity, Long] = { - val allFilter = ClientQuotaFilter.contains(util.List.of) + val allFilter = ClientQuotaFilter.contains(Collections.emptyList()) val results = new util.HashMap[ClientQuotaEntity, Long] admin.describeClientQuotas(allFilter).entities().get().forEach { case (entity, entityMap) => @@ -385,8 +361,8 @@ class KRaftClusterTest { "Broker never made it to RUNNING state.") val admin = Admin.create(cluster.clientProperties()) try { - val defaultUser = new ClientQuotaEntity(util.Collections.singletonMap[String, String]("user", null)) - val bobUser = new ClientQuotaEntity(util.Map.of[String, String]("user", "bob")) + val defaultUser = new ClientQuotaEntity(Collections.singletonMap[String, String]("user", null)) + val bobUser = new ClientQuotaEntity(Collections.singletonMap[String, String]("user", "bob")) TestUtils.retry(30000) { assertEquals(Map(), getConsumerByteRates(admin)) } @@ -518,10 +494,12 @@ class KRaftClusterTest { } private def sendDescribeClusterRequestToBoundPort(destination: SocketServer, - listenerName: ListenerName): DescribeClusterResponse = { - connectAndReceive[DescribeClusterResponse](new DescribeClusterRequest.Builder(new DescribeClusterRequestData()).build(), - destination.boundPort(listenerName)) - } + listenerName: ListenerName): DescribeClusterResponse = + connectAndReceive[DescribeClusterResponse]( + request = new DescribeClusterRequest.Builder(new DescribeClusterRequestData()).build(), + destination = destination, + listenerName = listenerName + ) @Test def testCreateClusterAndPerformReassignment(): Unit = { @@ -537,26 +515,26 @@ class KRaftClusterTest { try { // Create the topic. val assignments = new util.HashMap[Integer, util.List[Integer]] - assignments.put(0, util.List.of(0, 1, 2)) - assignments.put(1, util.List.of(1, 2, 3)) - assignments.put(2, util.List.of(2, 3, 0)) - assignments.put(3, util.List.of(3, 2, 1)) - val createTopicResult = admin.createTopics(util.List.of( + assignments.put(0, util.Arrays.asList(0, 1, 2)) + assignments.put(1, util.Arrays.asList(1, 2, 3)) + assignments.put(2, util.Arrays.asList(2, 3, 0)) + assignments.put(3, util.Arrays.asList(3, 2, 1)) + val createTopicResult = admin.createTopics(Collections.singletonList( new NewTopic("foo", assignments))) createTopicResult.all().get() waitForTopicListing(admin, Seq("foo"), Seq()) // Start some reassignments. - assertEquals(util.Map.of, admin.listPartitionReassignments().reassignments().get()) + assertEquals(Collections.emptyMap(), admin.listPartitionReassignments().reassignments().get()) val reassignments = new util.HashMap[TopicPartition, Optional[NewPartitionReassignment]] reassignments.put(new TopicPartition("foo", 0), - Optional.of(new NewPartitionReassignment(util.List.of(2, 1, 0)))) + Optional.of(new NewPartitionReassignment(util.Arrays.asList(2, 1, 0)))) reassignments.put(new TopicPartition("foo", 1), - Optional.of(new NewPartitionReassignment(util.List.of(0, 1, 2)))) + Optional.of(new NewPartitionReassignment(util.Arrays.asList(0, 1, 2)))) reassignments.put(new TopicPartition("foo", 2), - Optional.of(new NewPartitionReassignment(util.List.of(2, 3)))) + Optional.of(new NewPartitionReassignment(util.Arrays.asList(2, 3)))) reassignments.put(new TopicPartition("foo", 3), - Optional.of(new NewPartitionReassignment(util.List.of(3, 2, 0, 1)))) + Optional.of(new NewPartitionReassignment(util.Arrays.asList(3, 2, 0, 1)))) admin.alterPartitionReassignments(reassignments).all().get() TestUtils.waitUntilTrue( () => admin.listPartitionReassignments().reassignments().get().isEmpty, @@ -564,7 +542,7 @@ class KRaftClusterTest { var currentMapping: Seq[Seq[Int]] = Seq() val expectedMapping = Seq(Seq(2, 1, 0), Seq(0, 1, 2), Seq(2, 3), Seq(3, 2, 0, 1)) TestUtils.waitUntilTrue( () => { - val topicInfoMap = admin.describeTopics(util.Set.of("foo")).allTopicNames().get() + val topicInfoMap = admin.describeTopics(Collections.singleton("foo")).allTopicNames().get() if (topicInfoMap.containsKey("foo")) { currentMapping = translatePartitionInfoToSeq(topicInfoMap.get("foo").partitions()) expectedMapping.equals(currentMapping) @@ -712,7 +690,7 @@ class KRaftClusterTest { ("max.connections.per.ip", "60"), ("min.insync.replicas", "1"))), exhaustive = true) - admin.createTopics(util.List.of( + admin.createTopics(util.Arrays.asList( new NewTopic("foo", 2, 3.toShort), new NewTopic("bar", 2, 3.toShort))).all().get() TestUtils.waitForAllPartitionsMetadata(cluster.brokers().values().asScala.toSeq, "foo", 2) @@ -807,7 +785,7 @@ class KRaftClusterTest { val cluster = new KafkaClusterTestKit.Builder( new TestKitNodes.Builder(). setNumBrokerNodes(4). - setBootstrapMetadataVersion(MetadataVersion.fromVersionString(metadataVersionString, true)). + setBootstrapMetadataVersion(MetadataVersion.fromVersionString(metadataVersionString)). setNumControllerNodes(3).build()). build() try { @@ -816,14 +794,14 @@ class KRaftClusterTest { cluster.waitForReadyBrokers() val admin = Admin.create(cluster.clientProperties()) try { - val createResults = admin.createTopics(util.List.of( + val createResults = admin.createTopics(util.Arrays.asList( new NewTopic("foo", 1, 3.toShort), new NewTopic("bar", 2, 3.toShort))).values() createResults.get("foo").get() createResults.get("bar").get() - val increaseResults = admin.createPartitions(util.Map.of( - "foo", NewPartitions.increaseTo(3), - "bar", NewPartitions.increaseTo(2))).values() + val increaseResults = admin.createPartitions(Map( + "foo" -> NewPartitions.increaseTo(3), + "bar" -> NewPartitions.increaseTo(2)).asJava).values() increaseResults.get("foo").get() assertEquals(classOf[InvalidPartitionsException], assertThrows( classOf[ExecutionException], () => increaseResults.get("bar").get()).getCause.getClass) @@ -858,9 +836,8 @@ class KRaftClusterTest { Option(image.brokers().get(brokerId)).isEmpty } - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testUnregisterBroker(usingBootstrapController: Boolean): Unit = { + @Test + def testUnregisterBroker(): Unit = { val cluster = new KafkaClusterTestKit.Builder( new TestKitNodes.Builder(). setNumBrokerNodes(4). @@ -874,7 +851,7 @@ class KRaftClusterTest { cluster.brokers().get(0).shutdown() TestUtils.waitUntilTrue(() => !brokerIsUnfenced(clusterImage(cluster, 1), 0), "Timed out waiting for broker 0 to be fenced.") - val admin = createAdminClient(cluster, bootstrapController = usingBootstrapController) + val admin = Admin.create(cluster.clientProperties()) try { admin.unregisterBroker(0) } finally { @@ -1014,8 +991,8 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { admin.updateFeatures( - util.Map.of(MetadataVersion.FEATURE_NAME, - new FeatureUpdate(MetadataVersion.latestTesting().featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)), new UpdateFeaturesOptions + Map(MetadataVersion.FEATURE_NAME -> + new FeatureUpdate(MetadataVersion.latestTesting().featureLevel(), FeatureUpdate.UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions ) assertEquals(new SupportedVersionRange(0, 1), admin.describeFeatures().featureMetadata().get(). supportedFeatures().get(KRaftVersion.FEATURE_NAME)) @@ -1035,7 +1012,8 @@ class KRaftClusterTest { val cluster = new KafkaClusterTestKit.Builder( new TestKitNodes.Builder(). setNumBrokerNodes(1). - setNumControllerNodes(1).build()).setStandalone(true).build() + setNumControllerNodes(1). + setFeature(KRaftVersion.FEATURE_NAME, 1.toShort).build()).build() try { cluster.format() cluster.startup() @@ -1107,13 +1085,13 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { // Create a test topic - val newTopic = util.List.of(new NewTopic("test-topic", 1, 1.toShort)) + val newTopic = Collections.singletonList(new NewTopic("test-topic", 1, 1.toShort)) val createTopicResult = admin.createTopics(newTopic) createTopicResult.all().get() waitForTopicListing(admin, Seq("test-topic"), Seq()) // Delete topic - val deleteResult = admin.deleteTopics(util.List.of("test-topic")) + val deleteResult = admin.deleteTopics(Collections.singletonList("test-topic")) deleteResult.all().get() // List again @@ -1212,9 +1190,9 @@ class KRaftClusterTest { def assertConfigValue(expected: Int): Unit = { TestUtils.retry(60000) { assertEquals(expected, cluster.controllers().values().iterator().next(). - quotaManagers.clientQuotaCallbackPlugin.get.get.asInstanceOf[DummyClientQuotaCallback].value) + quotaManagers.clientQuotaCallback.get.asInstanceOf[DummyClientQuotaCallback].value) assertEquals(expected, cluster.brokers().values().iterator().next(). - quotaManagers.clientQuotaCallbackPlugin.get.get.asInstanceOf[DummyClientQuotaCallback].value) + quotaManagers.clientQuotaCallback.get.asInstanceOf[DummyClientQuotaCallback].value) } } @@ -1226,8 +1204,8 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { admin.incrementalAlterConfigs( - util.Map.of(new ConfigResource(Type.BROKER, ""), - util.List.of(new AlterConfigOp( + Collections.singletonMap(new ConfigResource(Type.BROKER, ""), + Collections.singletonList(new AlterConfigOp( new ConfigEntry(DummyClientQuotaCallback.dummyClientQuotaCallbackValueConfigKey, "1"), OpType.SET)))). all().get() } finally { @@ -1253,9 +1231,9 @@ class KRaftClusterTest { def assertFoobarValue(expected: Int): Unit = { TestUtils.retry(60000) { assertEquals(expected, cluster.controllers().values().iterator().next(). - authorizerPlugin.get.get.asInstanceOf[FakeConfigurableAuthorizer].foobar.get()) + authorizer.get.asInstanceOf[FakeConfigurableAuthorizer].foobar.get()) assertEquals(expected, cluster.brokers().values().iterator().next(). - authorizerPlugin.get.get.asInstanceOf[FakeConfigurableAuthorizer].foobar.get()) + authorizer.get.asInstanceOf[FakeConfigurableAuthorizer].foobar.get()) } } @@ -1267,8 +1245,8 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { admin.incrementalAlterConfigs( - util.Map.of(new ConfigResource(Type.BROKER, ""), - util.List.of(new AlterConfigOp( + Collections.singletonMap(new ConfigResource(Type.BROKER, ""), + Collections.singletonList(new AlterConfigOp( new ConfigEntry(FakeConfigurableAuthorizer.foobarConfigKey, "123"), OpType.SET)))). all().get() } finally { @@ -1389,7 +1367,7 @@ class KRaftClusterTest { @Test def testStartupWithNonDefaultKControllerDynamicConfiguration(): Unit = { - val bootstrapRecords = util.List.of( + val bootstrapRecords = util.Arrays.asList( new ApiMessageAndVersion(new FeatureLevelRecord(). setName(MetadataVersion.FEATURE_NAME). setFeatureLevel(MetadataVersion.IBP_3_7_IV0.featureLevel), 0.toShort), @@ -1433,7 +1411,7 @@ class KRaftClusterTest { val broker1 = cluster.brokers().get(1) val foo0 = new TopicPartition("foo", 0) - admin.createTopics(util.List.of( + admin.createTopics(util.Arrays.asList( new NewTopic("foo", 3, 3.toShort))).all().get() // Wait until foo-0 is created on broker0. @@ -1445,7 +1423,7 @@ class KRaftClusterTest { broker0.shutdown() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent) + assertTrue(info.isDefined) assertEquals(Set(1, 2), info.get.isr().asScala.toSet) } @@ -1459,7 +1437,7 @@ class KRaftClusterTest { broker0.startup() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent) + assertTrue(info.isDefined) assertEquals(Set(0, 1, 2), info.get.isr().asScala.toSet) } } finally { @@ -1488,7 +1466,7 @@ class KRaftClusterTest { val broker1 = cluster.brokers().get(1) val foo0 = new TopicPartition("foo", 0) - admin.createTopics(util.List.of( + admin.createTopics(util.Arrays.asList( new NewTopic("foo", 3, 3.toShort))).all().get() // Wait until foo-0 is created on broker0. @@ -1500,7 +1478,7 @@ class KRaftClusterTest { broker0.shutdown() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent) + assertTrue(info.isDefined) assertEquals(Set(1, 2), info.get.isr().asScala.toSet) } @@ -1508,13 +1486,13 @@ class KRaftClusterTest { // This is equivalent to a failure during the promotion of the future replica and a restart with directory for // the main replica being offline val log = broker0.logManager.getLog(foo0).get - log.renameDir(UnifiedLog.logFutureDirName(foo0), false) + log.renameDir(UnifiedLog.logFutureDirName(foo0), shouldReinitialize = false) // Start up broker0 and wait until the ISR of foo-0 is set to [0, 1, 2] broker0.startup() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent) + assertTrue(info.isDefined) assertEquals(Set(0, 1, 2), info.get.isr().asScala.toSet) assertTrue(broker0.logManager.getLog(foo0, isFuture = true).isEmpty) } @@ -1528,11 +1506,11 @@ class KRaftClusterTest { def copyDirectory(src: String, dest: String): Unit = { Files.walk(Paths.get(src)).forEach(p => { - val out = Paths.get(dest, p.toString.substring(src.length())) - if (!p.toString.equals(src)) { - Files.copy(p, out) + val out = Paths.get(dest, p.toString().substring(src.length())) + if (!p.toString().equals(src)) { + Files.copy(p, out); } - }) + }); } @Test @@ -1553,7 +1531,7 @@ class KRaftClusterTest { val broker1 = cluster.brokers().get(1) val foo0 = new TopicPartition("foo", 0) - admin.createTopics(util.List.of( + admin.createTopics(util.Arrays.asList( new NewTopic("foo", 3, 3.toShort))).all().get() // Wait until foo-0 is created on broker0. @@ -1565,7 +1543,7 @@ class KRaftClusterTest { broker0.shutdown() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent) + assertTrue(info.isDefined) assertEquals(Set(1, 2), info.get.isr().asScala.toSet) } @@ -1574,24 +1552,24 @@ class KRaftClusterTest { // Copy foo-0 to targetParentDir // This is so that we can rename the main replica to a future down below val parentDir = log.parentDir - val targetParentDir = broker0.config.logDirs.stream().filter(l => !l.equals(parentDir)).findFirst().get() + val targetParentDir = broker0.config.logDirs.filter(_ != parentDir).head val targetDirFile = new File(targetParentDir, log.dir.getName) targetDirFile.mkdir() - copyDirectory(log.dir.toString, targetDirFile.toString) + copyDirectory(log.dir.toString(), targetDirFile.toString()) assertTrue(targetDirFile.exists()) // Rename original log to a future // This is equivalent to a failure during the promotion of the future replica and a restart with directory for // the main replica being online val originalLogFile = log.dir - log.renameDir(UnifiedLog.logFutureDirName(foo0), false) + log.renameDir(UnifiedLog.logFutureDirName(foo0), shouldReinitialize = false) assertFalse(originalLogFile.exists()) // Start up broker0 and wait until the ISR of foo-0 is set to [0, 1, 2] broker0.startup() TestUtils.retry(60000) { val info = broker1.metadataCache.getLeaderAndIsr("foo", 0) - assertTrue(info.isPresent) + assertTrue(info.isDefined) assertEquals(Set(0, 1, 2), info.get.isr().asScala.toSet) assertTrue(broker0.logManager.getLog(foo0, isFuture = true).isEmpty) assertFalse(targetDirFile.exists()) @@ -1623,7 +1601,7 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { // Create a test topic - admin.createTopics(util.List.of( + admin.createTopics(Collections.singletonList( new NewTopic("test-topic", 1, 1.toShort))).all().get() waitForTopicListing(admin, Seq("test-topic"), Seq()) @@ -1632,7 +1610,7 @@ class KRaftClusterTest { cluster.raftManagers().get(active.asInstanceOf[QuorumController].nodeId()).shutdown() // Create a test topic on the new active controller - admin.createTopics(util.List.of( + admin.createTopics(Collections.singletonList( new NewTopic("test-topic2", 1, 1.toShort))).all().get() waitForTopicListing(admin, Seq("test-topic2"), Seq()) } finally { @@ -1643,51 +1621,6 @@ class KRaftClusterTest { } } - /** - * Test that once a cluster is formatted, a bootstrap.metadata file that contains an unsupported - * MetadataVersion is not a problem. This is a regression test for KAFKA-19192. - */ - @Test - def testOldBootstrapMetadataFile(): Unit = { - val baseDirectory = TestUtils.tempDir().toPath() - Using.resource(new KafkaClusterTestKit.Builder( - new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(1). - setBaseDirectory(baseDirectory). - build()). - setDeleteOnClose(false). - build() - ) { cluster => - cluster.format() - cluster.startup() - cluster.waitForReadyBrokers() - } - val oldBootstrapMetadata = BootstrapMetadata.fromRecords( - util.List.of( - new ApiMessageAndVersion( - new FeatureLevelRecord(). - setName(MetadataVersion.FEATURE_NAME). - setFeatureLevel(1), - 0.toShort) - ), - "oldBootstrapMetadata") - // Re-create the cluster using the same directory structure as above. - // Since we do not need to use the bootstrap metadata, the fact that - // it specifies an obsolete metadata.version should not be a problem. - Using.resource(new KafkaClusterTestKit.Builder( - new TestKitNodes.Builder(). - setNumBrokerNodes(1). - setNumControllerNodes(1). - setBaseDirectory(baseDirectory). - setBootstrapMetadata(oldBootstrapMetadata). - build()).build() - ) { cluster => - cluster.startup() - cluster.waitForReadyBrokers() - } - } - @Test def testIncreaseNumIoThreads(): Unit = { val cluster = new KafkaClusterTestKit.Builder( @@ -1703,10 +1636,10 @@ class KRaftClusterTest { val admin = Admin.create(cluster.clientProperties()) try { admin.incrementalAlterConfigs( - util.Map.of(new ConfigResource(Type.BROKER, ""), - util.List.of(new AlterConfigOp( + Collections.singletonMap(new ConfigResource(Type.BROKER, ""), + Collections.singletonList(new AlterConfigOp( new ConfigEntry(ServerConfigs.NUM_IO_THREADS_CONFIG, "8"), OpType.SET)))).all().get() - val newTopic = util.List.of(new NewTopic("test-topic", 1, 1.toShort)) + val newTopic = Collections.singletonList(new NewTopic("test-topic", 1, 1.toShort)) val createTopicResult = admin.createTopics(newTopic) createTopicResult.all().get() waitForTopicListing(admin, Seq("test-topic"), Seq()) @@ -1744,7 +1677,7 @@ object DummyClientQuotaCallback { class DummyClientQuotaCallback extends ClientQuotaCallback with Reconfigurable { var value = 0 - override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = util.Map.of + override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): util.Map[String, String] = Collections.emptyMap() override def quotaLimit(quotaType: ClientQuotaType, metricTags: util.Map[String, String]): lang.Double = 1.0 @@ -1765,7 +1698,7 @@ class DummyClientQuotaCallback extends ClientQuotaCallback with Reconfigurable { } } - override def reconfigurableConfigs(): util.Set[String] = util.Set.of(DummyClientQuotaCallback.dummyClientQuotaCallbackValueConfigKey) + override def reconfigurableConfigs(): util.Set[String] = Set(DummyClientQuotaCallback.dummyClientQuotaCallbackValueConfigKey).asJava override def validateReconfiguration(configs: util.Map[String, _]): Unit = { } @@ -1804,7 +1737,7 @@ class FakeConfigurableAuthorizer extends Authorizer with Reconfigurable { }).toMap.asJava } - override def reconfigurableConfigs(): util.Set[String] = util.Set.of(foobarConfigKey) + override def reconfigurableConfigs(): util.Set[String] = Set(foobarConfigKey).asJava override def validateReconfiguration(configs: util.Map[String, _]): Unit = { fakeConfigurableAuthorizerConfigToInt(configs) @@ -1818,7 +1751,7 @@ class FakeConfigurableAuthorizer extends Authorizer with Reconfigurable { actions.asScala.map(_ => AuthorizationResult.ALLOWED).toList.asJava } - override def acls(filter: AclBindingFilter): lang.Iterable[AclBinding] = util.List.of[AclBinding]() + override def acls(filter: AclBindingFilter): lang.Iterable[AclBinding] = List[AclBinding]().asJava override def close(): Unit = {} @@ -1830,13 +1763,13 @@ class FakeConfigurableAuthorizer extends Authorizer with Reconfigurable { requestContext: AuthorizableRequestContext, aclBindings: util.List[AclBinding] ): util.List[_ <: CompletionStage[AclCreateResult]] = { - util.List.of + Collections.emptyList() } override def deleteAcls( requestContext: AuthorizableRequestContext, aclBindingFilters: util.List[AclBindingFilter] ): util.List[_ <: CompletionStage[AclDeleteResult]] = { - util.List.of + Collections.emptyList() } } diff --git a/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala b/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala new file mode 100644 index 0000000000000..490ebc48c1648 --- /dev/null +++ b/core/src/test/scala/integration/kafka/server/MetadataVersionIntegrationTest.scala @@ -0,0 +1,89 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import org.apache.kafka.common.test.api.{ClusterTest, ClusterTests, Type} +import kafka.utils.TestUtils +import org.apache.kafka.clients.admin.FeatureUpdate.UpgradeType +import org.apache.kafka.clients.admin.{FeatureUpdate, UpdateFeaturesOptions} +import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.server.common.MetadataVersion +import org.junit.jupiter.api.Assertions.assertEquals + +import scala.jdk.CollectionConverters._ + +class MetadataVersionIntegrationTest { + @ClusterTests(value = Array( + new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_3_IV3), + new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_4_IV0), + new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_5_IV0), + new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_6_IV0), + new ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_6_IV1) + )) + def testBasicMetadataVersionUpgrade(clusterInstance: ClusterInstance): Unit = { + val admin = clusterInstance.admin() + try { + val describeResult = admin.describeFeatures() + val ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) + assertEquals(ff.minVersionLevel(), clusterInstance.config().metadataVersion().featureLevel()) + assertEquals(ff.maxVersionLevel(), clusterInstance.config().metadataVersion().featureLevel()) + + // Update to new version + val updateVersion = MetadataVersion.IBP_3_7_IV1.featureLevel.shortValue + val updateResult = admin.updateFeatures( + Map("metadata.version" -> new FeatureUpdate(updateVersion, UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions()) + updateResult.all().get() + + // Verify that new version is visible on broker + TestUtils.waitUntilTrue(() => { + val describeResult2 = admin.describeFeatures() + val ff2 = describeResult2.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) + ff2.minVersionLevel() == updateVersion && ff2.maxVersionLevel() == updateVersion + }, "Never saw metadata.version increase on broker") + } finally { + admin.close() + } + } + + @ClusterTest(types = Array(Type.KRAFT), metadataVersion = MetadataVersion.IBP_3_9_IV0) + def testUpgradeSameVersion(clusterInstance: ClusterInstance): Unit = { + val admin = clusterInstance.admin() + try { + val updateVersion = MetadataVersion.IBP_3_9_IV0.featureLevel.shortValue + val updateResult = admin.updateFeatures( + Map("metadata.version" -> new FeatureUpdate(updateVersion, UpgradeType.UPGRADE)).asJava, new UpdateFeaturesOptions()) + updateResult.all().get() + } finally { + admin.close() + } + } + + @ClusterTest(types = Array(Type.KRAFT)) + def testDefaultIsLatestVersion(clusterInstance: ClusterInstance): Unit = { + val admin = clusterInstance.admin() + try { + val describeResult = admin.describeFeatures() + val ff = describeResult.featureMetadata().get().finalizedFeatures().get(MetadataVersion.FEATURE_NAME) + assertEquals(ff.minVersionLevel(), MetadataVersion.latestTesting().featureLevel(), + "If this test fails, check the default MetadataVersion in the @ClusterTest annotation") + assertEquals(ff.maxVersionLevel(), MetadataVersion.latestTesting().featureLevel()) + } finally { + admin.close() + } + } +} diff --git a/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala b/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala index 969b069fc5d25..8bf7860d151a0 100644 --- a/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala +++ b/core/src/test/scala/integration/kafka/server/MultipleListenersWithAdditionalJaasContextTest.scala @@ -39,7 +39,7 @@ class MultipleListenersWithAdditionalJaasContextTest extends MultipleListenersWi val props = new Properties kafkaServerSaslMechanisms(SecureInternal).foreach { mechanism => addDynamicJaasSection(props, SecureInternal, mechanism, - JaasTestUtils.kafkaServerSection("secure_internal.KafkaServer", java.util.List.of(mechanism), None.toJava)) + JaasTestUtils.kafkaServerSection("secure_internal.KafkaServer", Seq(mechanism).asJava, None.toJava)) } props } diff --git a/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala b/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala index 8b6256663b6de..b7cdbc757a1a7 100644 --- a/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala +++ b/core/src/test/scala/integration/kafka/server/MultipleListenersWithSameSecurityProtocolBaseTest.scala @@ -18,7 +18,7 @@ package kafka.server -import java.util.{Objects, Optional, Properties} +import java.util.{Collections, Objects, Optional, Properties} import java.util.concurrent.TimeUnit import kafka.api.SaslSetup import kafka.security.JaasTestUtils @@ -126,13 +126,13 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT servers.head.groupCoordinator.groupMetadataTopicConfigs.entrySet(). forEach(e => newTopicConfigs.put(e.getKey.toString, e.getValue.toString)) newTopic.configs(newTopicConfigs) - admin.createTopics(java.util.List.of(newTopic)).all().get(5, TimeUnit.MINUTES) + admin.createTopics(java.util.Arrays.asList(newTopic)).all().get(5, TimeUnit.MINUTES) createScramCredentials(admin, JaasTestUtils.KAFKA_SCRAM_USER, JaasTestUtils.KAFKA_SCRAM_PASSWORD) TestUtils.ensureConsistentKRaftMetadata(servers, controllerServer) servers.head.config.listeners.foreach { endPoint => - val listenerName = ListenerName.normalised(endPoint.listener) + val listenerName = endPoint.listenerName val trustStoreFile = if (JaasTestUtils.usesSslTransportLayer(endPoint.securityProtocol)) Some(this.trustStoreFile) @@ -143,7 +143,7 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT def addProducerConsumer(listenerName: ListenerName, mechanism: String, saslProps: Option[Properties]): Unit = { val topic = s"${listenerName.value}${producers.size}" - admin.createTopics(java.util.List.of(new NewTopic(topic, 2, 2.toShort))).all().get(5, TimeUnit.MINUTES) + admin.createTopics(java.util.Arrays.asList(new NewTopic(topic, 2, 2.toShort))).all().get(5, TimeUnit.MINUTES) val clientMetadata = ClientMetadata(listenerName, mechanism, topic) producers(clientMetadata) = TestUtils.createProducer(bootstrapServers, acks = -1, @@ -155,7 +155,7 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT } if (JaasTestUtils.usesSaslAuthentication(endPoint.securityProtocol)) { - kafkaServerSaslMechanisms(endPoint.listener).foreach { mechanism => + kafkaServerSaslMechanisms(endPoint.listenerName.value).foreach { mechanism => addProducerConsumer(listenerName, mechanism, Some(kafkaClientSaslProperties(mechanism, dynamicJaasConfig = true))) } } else { @@ -179,16 +179,16 @@ abstract class MultipleListenersWithSameSecurityProtocolBaseTest extends QuorumT * Tests that we can produce and consume to/from all broker-defined listeners and security protocols. We produce * with acks=-1 to ensure that replication is also working. */ - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProduceConsume(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceConsume(quorum: String, groupProtocol: String): Unit = { producers.foreach { case (clientMetadata, producer) => val producerRecords = (1 to 10).map(i => new ProducerRecord(clientMetadata.topic, s"key$i".getBytes, s"value$i".getBytes)) producerRecords.map(producer.send).map(_.get(10, TimeUnit.SECONDS)) val consumer = consumers(clientMetadata) - consumer.subscribe(java.util.Set.of(clientMetadata.topic)) + consumer.subscribe(Collections.singleton(clientMetadata.topic)) TestUtils.consumeRecords(consumer, producerRecords.size) } } diff --git a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala index 6af0932690f92..dac38f2de2666 100755 --- a/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala +++ b/core/src/test/scala/integration/kafka/server/QuorumTestHarness.scala @@ -20,7 +20,7 @@ package kafka.server import java.io.File import java.net.InetSocketAddress import java.util -import java.util.{Locale, Optional, OptionalInt, Properties, stream} +import java.util.{Collections, Locale, Optional, OptionalInt, Properties, stream} import java.util.concurrent.{CompletableFuture, TimeUnit} import javax.security.auth.login.Configuration import kafka.utils.{CoreUtils, Logging, TestInfoUtils, TestUtils} @@ -38,9 +38,9 @@ import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsem import org.apache.kafka.metadata.storage.Formatter import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.queue.KafkaEventQueue -import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} +import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.{ClientMetricsManager, ServerSocketFactory} -import org.apache.kafka.server.common.{MetadataVersion, TransactionVersion} +import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, MetadataVersion, TransactionVersion} import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs, ServerLogConfigs} import org.apache.kafka.server.fault.{FaultHandler, MockFaultHandler} import org.apache.kafka.server.util.timer.SystemTimer @@ -81,7 +81,7 @@ class KRaftQuorumImplementation( ): KafkaBroker = { val metaPropertiesEnsemble = { val loader = new MetaPropertiesEnsemble.Loader() - loader.addLogDirs(config.logDirs) + loader.addLogDirs(config.logDirs.asJava) loader.addMetadataLogDir(config.metadataLogDir) val ensemble = loader.load() val copier = new MetaPropertiesEnsemble.Copier(ensemble) @@ -159,6 +159,10 @@ abstract class QuorumTestHarness extends Logging { private var testInfo: TestInfo = _ protected var implementation: QuorumImplementation = _ + def isShareGroupTest(): Boolean = { + TestInfoUtils.isShareGroupTest(testInfo) + } + def maybeGroupProtocolSpecified(): Option[GroupProtocol] = { TestInfoUtils.maybeGroupProtocolSpecified(testInfo) } @@ -257,17 +261,16 @@ abstract class QuorumTestHarness extends Logging { } val nodeId = Integer.parseInt(props.getProperty(KRaftConfigs.NODE_ID_CONFIG)) val metadataDir = TestUtils.tempDir() - props.setProperty(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, metadataDir.getAbsolutePath) + props.setProperty(KRaftConfigs.METADATA_LOG_DIR_CONFIG, metadataDir.getAbsolutePath) val proto = controllerListenerSecurityProtocol.toString val securityProtocolMaps = extraControllerSecurityProtocols().map(sc => sc + ":" + sc).mkString(",") val listeners = extraControllerSecurityProtocols().map(sc => sc + "://localhost:0").mkString(",") val listenerNames = extraControllerSecurityProtocols().mkString(",") props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, s"CONTROLLER:$proto,$securityProtocolMaps") - props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, - if (listeners.isEmpty) "CONTROLLER://localhost:0" else s"CONTROLLER://localhost:0,$listeners") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, - if (listeners.isEmpty) "CONTROLLER" else s"CONTROLLER,$listenerNames") + props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, s"CONTROLLER://localhost:0,$listeners") + props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, s"CONTROLLER,$listenerNames") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, s"$nodeId@localhost:0") + // Setting the configuration to the same value set on the brokers via TestUtils to keep KRaft based and Zk based controller configs are consistent. props.setProperty(ServerLogConfigs.LOG_DELETE_DELAY_MS_CONFIG, "1000") val config = new KafkaConfig(props) @@ -277,7 +280,7 @@ abstract class QuorumTestHarness extends Logging { formatter.addDirectory(metadataDir.getAbsolutePath) formatter.setReleaseVersion(metadataVersion) formatter.setUnstableFeatureVersionsEnabled(true) - formatter.setControllerListenerName(config.controllerListenerNames.get(0)) + formatter.setControllerListenerName(config.controllerListenerNames.head) formatter.setMetadataLogDirectory(config.metadataLogDir) val transactionVersion = @@ -286,6 +289,12 @@ abstract class QuorumTestHarness extends Logging { } else TransactionVersion.TV_1.featureLevel() formatter.setFeatureLevel(TransactionVersion.FEATURE_NAME, transactionVersion) + val elrVersion = + if (TestInfoUtils.isEligibleLeaderReplicasV1Enabled(testInfo)) { + EligibleLeaderReplicasVersion.ELRV_1.featureLevel() + } else EligibleLeaderReplicasVersion.ELRV_0.featureLevel() + formatter.setFeatureLevel(EligibleLeaderReplicasVersion.FEATURE_NAME, elrVersion) + addFormatterSettings(formatter) formatter.run() val bootstrapMetadata = formatter.bootstrapMetadata() @@ -303,7 +312,7 @@ abstract class QuorumTestHarness extends Logging { Time.SYSTEM, new Metrics(), controllerQuorumVotersFuture, - util.List.of, + Collections.emptyList(), faultHandlerFactory, ServerSocketFactory.INSTANCE, ) @@ -320,7 +329,7 @@ abstract class QuorumTestHarness extends Logging { controllerQuorumVotersFuture.completeExceptionally(e) } else { controllerQuorumVotersFuture.complete( - util.Map.of(nodeId, new InetSocketAddress("localhost", port)) + Collections.singletonMap(nodeId, new InetSocketAddress("localhost", port)) ) } }) @@ -360,7 +369,7 @@ object QuorumTestHarness { /** * Verify that a previous test that doesn't use QuorumTestHarness hasn't left behind an unexpected thread. - * This assumes that brokers, admin clients, producers and consumers are not created in another @BeforeClass, + * This assumes that brokers, ZooKeeper clients, producers and consumers are not created in another @BeforeClass, * which is true for core tests where this harness is used. */ @BeforeAll @@ -402,24 +411,32 @@ object QuorumTestHarness { s"${unexpected.mkString("`", ",", "`")}") } - def getTestGroupProtocolParametersAll: java.util.stream.Stream[Arguments] = { + // We want to test the following combinations: + // * KRaft and the classic group protocol + // * KRaft and the consumer group protocol + def getTestQuorumAndGroupProtocolParametersAll: java.util.stream.Stream[Arguments] = { stream.Stream.of( - Arguments.of(GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)), - Arguments.of(GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) + Arguments.of("kraft", GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)), + Arguments.of("kraft", GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) ) } - // For tests that only work with the classic group protocol - def getTestGroupProtocolParametersClassicGroupProtocolOnly: java.util.stream.Stream[Arguments] = { + // For tests that only work with the classic group protocol, we want to test the following combinations: + // * KRaft and the classic group protocol + def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly: java.util.stream.Stream[Arguments] = { stream.Stream.of( - Arguments.of(GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)) + Arguments.of("kraft", GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT)) ) } - // For tests that only work with the consumer group protocol - def getTestGroupProtocolParametersConsumerGroupProtocolOnly: java.util.stream.Stream[Arguments] = { + // For tests that only work with the consumer group protocol, we want to test the following combination: + // * KRaft and the consumer group protocol + def getTestQuorumAndGroupProtocolParametersConsumerGroupProtocolOnly: stream.Stream[Arguments] = { stream.Stream.of( - Arguments.of(GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) + Arguments.of("kraft", GroupProtocol.CONSUMER.name.toLowerCase(Locale.ROOT)) ) } + + // The following is for tests that only work with the classic group protocol because of relying on Zookeeper + def getTestQuorumAndGroupProtocolParametersClassicGroupProtocolOnly_ZK_implicit: java.util.stream.Stream[Arguments] = stream.Stream.of(Arguments.of("zk", GroupProtocol.CLASSIC.name.toLowerCase(Locale.ROOT))) } diff --git a/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala b/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala index 14d679f25d3a1..7196f6ed7eea5 100644 --- a/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala +++ b/core/src/test/scala/integration/kafka/server/RaftClusterSnapshotTest.scala @@ -20,9 +20,8 @@ package kafka.server import kafka.utils.TestUtils import org.apache.kafka.common.test.{KafkaClusterTestKit, TestKitNodes} import org.apache.kafka.common.utils.BufferSupplier -import org.apache.kafka.common.utils.LogContext import org.apache.kafka.metadata.MetadataRecordSerde -import org.apache.kafka.raft.MetadataLogConfig +import org.apache.kafka.server.config.KRaftConfigs import org.apache.kafka.snapshot.RecordsSnapshotReader import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Assertions.assertNotEquals @@ -49,8 +48,8 @@ class RaftClusterSnapshotTest { .setNumControllerNodes(numberOfControllers) .build() ) - .setConfigProp(MetadataLogConfig.METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_CONFIG, "10") - .setConfigProp(MetadataLogConfig.METADATA_MAX_IDLE_INTERVAL_MS_CONFIG, "0") + .setConfigProp(KRaftConfigs.METADATA_SNAPSHOT_MAX_NEW_RECORD_BYTES_CONFIG, "10") + .setConfigProp(KRaftConfigs.METADATA_MAX_IDLE_INTERVAL_MS_CONFIG, "0") .build() ) { cluster => cluster.format() @@ -80,8 +79,7 @@ class RaftClusterSnapshotTest { new MetadataRecordSerde(), BufferSupplier.create(), 1, - true, - new LogContext() + true ) ) { snapshot => // Check that the snapshot is non-empty diff --git a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala index f83c545c7b298..285560d382686 100644 --- a/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala +++ b/core/src/test/scala/kafka/raft/KafkaMetadataLogTest.scala @@ -16,41 +16,31 @@ */ package kafka.raft +import kafka.log.UnifiedLog import kafka.server.{KafkaConfig, KafkaRaftServer} import kafka.utils.TestUtils import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.errors.CorruptRecordException -import org.apache.kafka.common.errors.RecordTooLargeException +import org.apache.kafka.common.errors.{InvalidConfigurationException, RecordTooLargeException} import org.apache.kafka.common.protocol import org.apache.kafka.common.protocol.{ObjectSerializationCache, Writable} -import org.apache.kafka.common.record.ArbitraryMemoryRecords -import org.apache.kafka.common.record.InvalidMemoryRecordsProvider import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.utils.Utils -import org.apache.kafka.raft.{KafkaRaftClient, LogAppendInfo, LogOffsetMetadata, MetadataLogConfig, QuorumConfig, ReplicatedLog, SegmentPosition, ValidOffsetAndEpoch} +import org.apache.kafka.raft._ import org.apache.kafka.raft.internals.BatchBuilder import org.apache.kafka.server.common.serialization.RecordSerde import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} import org.apache.kafka.server.util.MockTime import org.apache.kafka.snapshot.{FileRawSnapshotWriter, RawSnapshotReader, RawSnapshotWriter, SnapshotPath, Snapshots} -import org.apache.kafka.storage.internals.log.{LogConfig, LogStartOffsetIncrementReason, UnifiedLog} +import org.apache.kafka.storage.internals.log.{LogConfig, LogStartOffsetIncrementReason} import org.apache.kafka.test.TestUtils.assertOptional import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.function.Executable import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ArgumentsSource -import net.jqwik.api.AfterFailureMode -import net.jqwik.api.ForAll -import net.jqwik.api.Property -import org.apache.kafka.common.config.{AbstractConfig, ConfigException} -import org.apache.kafka.server.common.OffsetAndEpoch import java.io.File import java.nio.ByteBuffer import java.nio.file.{Files, Path} import java.util -import java.util.{Optional, Properties} +import java.util.{Collections, Optional, Properties} import scala.jdk.CollectionConverters._ import scala.util.Using @@ -73,21 +63,21 @@ final class KafkaMetadataLogTest { @Test def testConfig(): Unit = { val props = new Properties() - props.put(KRaftConfigs.PROCESS_ROLES_CONFIG, util.List.of("broker")) + props.put(KRaftConfigs.PROCESS_ROLES_CONFIG, util.Arrays.asList("broker")) props.put(QuorumConfig.QUORUM_VOTERS_CONFIG, "1@localhost:9093") props.put(KRaftConfigs.NODE_ID_CONFIG, Int.box(2)) props.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") - props.put(MetadataLogConfig.METADATA_LOG_SEGMENT_BYTES_CONFIG, Int.box(10240)) - props.put(MetadataLogConfig.METADATA_LOG_SEGMENT_MILLIS_CONFIG, Int.box(10 * 1024)) - assertThrows(classOf[ConfigException], () => { + props.put(KRaftConfigs.METADATA_LOG_SEGMENT_BYTES_CONFIG, Int.box(10240)) + props.put(KRaftConfigs.METADATA_LOG_SEGMENT_MILLIS_CONFIG, Int.box(10 * 1024)) + assertThrows(classOf[InvalidConfigurationException], () => { val kafkaConfig = KafkaConfig.fromProps(props) - val metadataConfig = new MetadataLogConfig(kafkaConfig) + val metadataConfig = MetadataLogConfig(kafkaConfig, KafkaRaftClient.MAX_BATCH_SIZE_BYTES, KafkaRaftClient.MAX_FETCH_SIZE_BYTES) buildMetadataLog(tempDir, mockTime, metadataConfig) }) - props.put(MetadataLogConfig.METADATA_LOG_SEGMENT_BYTES_CONFIG, Int.box(10 * 1024 * 1024)) + props.put(KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG, Int.box(10240)) val kafkaConfig = KafkaConfig.fromProps(props) - val metadataConfig = new MetadataLogConfig(kafkaConfig) + val metadataConfig = MetadataLogConfig(kafkaConfig, KafkaRaftClient.MAX_BATCH_SIZE_BYTES, KafkaRaftClient.MAX_FETCH_SIZE_BYTES) buildMetadataLog(tempDir, mockTime, metadataConfig) } @@ -119,93 +109,12 @@ final class KafkaMetadataLogTest { classOf[RuntimeException], () => { log.appendAsFollower( - MemoryRecords.withRecords(initialOffset, Compression.NONE, currentEpoch, recordFoo), - currentEpoch + MemoryRecords.withRecords(initialOffset, Compression.NONE, currentEpoch, recordFoo) ) } ) } - @Test - def testEmptyAppendNotAllowed(): Unit = { - val log = buildMetadataLog(tempDir, mockTime) - - assertThrows(classOf[IllegalArgumentException], () => log.appendAsFollower(MemoryRecords.EMPTY, 1)) - assertThrows(classOf[IllegalArgumentException], () => log.appendAsLeader(MemoryRecords.EMPTY, 1)) - } - - @ParameterizedTest - @ArgumentsSource(classOf[InvalidMemoryRecordsProvider]) - def testInvalidMemoryRecords(records: MemoryRecords, expectedException: Optional[Class[Exception]]): Unit = { - val log = buildMetadataLog(tempDir, mockTime) - val previousEndOffset = log.endOffset().offset() - - val action: Executable = () => log.appendAsFollower(records, Int.MaxValue) - if (expectedException.isPresent) { - assertThrows(expectedException.get, action) - } else { - assertThrows(classOf[CorruptRecordException], action) - } - - assertEquals(previousEndOffset, log.endOffset().offset()) - } - - @Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY) - def testRandomRecords( - @ForAll(supplier = classOf[ArbitraryMemoryRecords]) records: MemoryRecords - ): Unit = { - val tempDir = TestUtils.tempDir() - try { - val log = buildMetadataLog(tempDir, mockTime) - val previousEndOffset = log.endOffset().offset() - - assertThrows( - classOf[CorruptRecordException], - () => log.appendAsFollower(records, Int.MaxValue) - ) - - assertEquals(previousEndOffset, log.endOffset().offset()) - } finally { - Utils.delete(tempDir) - } - } - - @Test - def testInvalidLeaderEpoch(): Unit = { - val log = buildMetadataLog(tempDir, mockTime) - val previousEndOffset = log.endOffset().offset() - val epoch = log.lastFetchedEpoch() + 1 - val numberOfRecords = 10 - - val batchWithValidEpoch = MemoryRecords.withRecords( - previousEndOffset, - Compression.NONE, - epoch, - (0 until numberOfRecords).map(number => new SimpleRecord(number.toString.getBytes)): _* - ) - - val batchWithInvalidEpoch = MemoryRecords.withRecords( - previousEndOffset + numberOfRecords, - Compression.NONE, - epoch + 1, - (0 until numberOfRecords).map(number => new SimpleRecord(number.toString.getBytes)): _* - ) - - val buffer = ByteBuffer.allocate(batchWithValidEpoch.sizeInBytes() + batchWithInvalidEpoch.sizeInBytes()) - buffer.put(batchWithValidEpoch.buffer()) - buffer.put(batchWithInvalidEpoch.buffer()) - buffer.flip() - - val records = MemoryRecords.readableRecords(buffer) - - log.appendAsFollower(records, epoch) - - // Check that only the first batch was appended - assertEquals(previousEndOffset + numberOfRecords, log.endOffset().offset()) - // Check that the last fetched epoch matches the first batch - assertEquals(epoch, log.lastFetchedEpoch()) - } - @Test def testCreateSnapshot(): Unit = { val numberOfRecords = 10 @@ -479,7 +388,7 @@ final class KafkaMetadataLogTest { assertEquals(log.earliestSnapshotId(), log.latestSnapshotId()) log.close() - mockTime.sleep(config.internalDeleteDelayMillis) + mockTime.sleep(config.fileDeleteDelayMs) // Assert that the log dir doesn't contain any older snapshots Files .walk(logDir, 1) @@ -494,7 +403,7 @@ final class KafkaMetadataLogTest { def testStartupWithInvalidSnapshotState(): Unit = { // Initialize an empty log at offset 100. var log = buildMetadataLog(tempDir, mockTime) - log.log.truncateFullyAndStartAt(100, Optional.empty) + log.log.truncateFullyAndStartAt(newOffset = 100) log.close() val metadataDir = metadataLogDir(tempDir) @@ -514,7 +423,7 @@ final class KafkaMetadataLogTest { // Snapshot at offset 100 should be fine. writeEmptySnapshot(metadataDir, new OffsetAndEpoch(100, 1)) log = buildMetadataLog(tempDir, mockTime) - log.log.truncateFullyAndStartAt(200, Optional.empty) + log.log.truncateFullyAndStartAt(newOffset = 200) log.close() // Snapshots at higher offsets are also fine. In this case, the @@ -528,7 +437,7 @@ final class KafkaMetadataLogTest { def testSnapshotDeletionWithInvalidSnapshotState(): Unit = { // Initialize an empty log at offset 100. val log = buildMetadataLog(tempDir, mockTime) - log.log.truncateFullyAndStartAt(100, Optional.empty) + log.log.truncateFullyAndStartAt(newOffset = 100) log.close() val metadataDir = metadataLogDir(tempDir) @@ -650,7 +559,7 @@ final class KafkaMetadataLogTest { assertEquals(greaterSnapshotId, secondLog.latestSnapshotId().get) assertEquals(3 * numberOfRecords, secondLog.startOffset) assertEquals(epoch, secondLog.lastFetchedEpoch) - mockTime.sleep(config.internalDeleteDelayMillis) + mockTime.sleep(config.fileDeleteDelayMs) // Assert that the log dir doesn't contain any older snapshots Files @@ -688,14 +597,7 @@ final class KafkaMetadataLogTest { val leaderEpoch = 5 val maxBatchSizeInBytes = 16384 val recordSize = 64 - val config = createMetadataLogConfig( - DefaultMetadataLogConfig.logSegmentBytes, - DefaultMetadataLogConfig.logSegmentMillis, - DefaultMetadataLogConfig.retentionMaxBytes, - DefaultMetadataLogConfig.retentionMillis, - maxBatchSizeInBytes - ) - val log = buildMetadataLog(tempDir, mockTime, config) + val log = buildMetadataLog(tempDir, mockTime, DefaultMetadataLogConfig.copy(maxBatchSizeInBytes = maxBatchSizeInBytes)) val oversizeBatch = buildFullBatch(leaderEpoch, recordSize, maxBatchSizeInBytes + recordSize) assertThrows(classOf[RecordTooLargeException], () => { @@ -740,7 +642,7 @@ final class KafkaMetadataLogTest { ) val serializationCache = new ObjectSerializationCache - val records = util.List.of(new Array[Byte](recordSize)) + val records = Collections.singletonList(new Array[Byte](recordSize)) while (!batchBuilder.bytesNeeded(records, serializationCache).isPresent) { batchBuilder.appendRecord(records.get(0), serializationCache) } @@ -905,14 +807,18 @@ final class KafkaMetadataLogTest { @Test def testAdvanceLogStartOffsetAfterCleaning(): Unit = { - val config = createMetadataLogConfig( - 512, - 10 * 1000, - 256, - 60 * 1000, - 512, - DefaultMetadataLogConfig.internalMaxFetchSizeInBytes, + val config = MetadataLogConfig( + logSegmentBytes = 512, + logSegmentMinBytes = 512, + logSegmentMillis = 10 * 1000, + retentionMaxBytes = 256, + retentionMillis = 60 * 1000, + maxBatchSizeInBytes = 512, + maxFetchSizeInBytes = DefaultMetadataLogConfig.maxFetchSizeInBytes, + fileDeleteDelayMs = ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT, + nodeId = 1 ) + config.copy() val log = buildMetadataLog(tempDir, mockTime, config) // Generate some segments @@ -940,12 +846,13 @@ final class KafkaMetadataLogTest { @Test def testDeleteSnapshots(): Unit = { // Generate some logs and a few snapshots, set retention low and verify that cleaning occurs - val config = createMetadataLogConfig( - 1024, - 10 * 1000, - 1024, - 60 * 1000, - 100, + val config = DefaultMetadataLogConfig.copy( + logSegmentBytes = 1024, + logSegmentMinBytes = 1024, + logSegmentMillis = 10 * 1000, + retentionMaxBytes = 1024, + retentionMillis = 60 * 1000, + maxBatchSizeInBytes = 100 ) val log = buildMetadataLog(tempDir, mockTime, config) @@ -971,12 +878,13 @@ final class KafkaMetadataLogTest { @Test def testSoftRetentionLimit(): Unit = { // Set retention equal to the segment size and generate slightly more than one segment of logs - val config = createMetadataLogConfig( - 10240, - 10 * 1000, - 10240, - 60 * 1000, - 100, + val config = DefaultMetadataLogConfig.copy( + logSegmentBytes = 10240, + logSegmentMinBytes = 10240, + logSegmentMillis = 10 * 1000, + retentionMaxBytes = 10240, + retentionMillis = 60 * 1000, + maxBatchSizeInBytes = 100 ) val log = buildMetadataLog(tempDir, mockTime, config) @@ -1010,21 +918,15 @@ final class KafkaMetadataLogTest { }) } - @Test - def testSegmentMsConfigIsSetInMetadataLog(): Unit = { - val log = buildMetadataLog(tempDir, mockTime) - - assertEquals(DefaultMetadataLogConfig.logSegmentMillis, log.log.config().segmentMs) - } - @Test def testSegmentsLessThanLatestSnapshot(): Unit = { - val config = createMetadataLogConfig( - 10240, - 10 * 1000, - 10240, - 60 * 1000, - 200, + val config = DefaultMetadataLogConfig.copy( + logSegmentBytes = 10240, + logSegmentMinBytes = 10240, + logSegmentMillis = 10 * 1000, + retentionMaxBytes = 10240, + retentionMillis = 60 * 1000, + maxBatchSizeInBytes = 200 ) val log = buildMetadataLog(tempDir, mockTime, config) @@ -1075,11 +977,16 @@ object KafkaMetadataLogTest { override def read(input: protocol.Readable, size: Int): Array[Byte] = input.readArray(size) } - val DefaultMetadataLogConfig = createMetadataLogConfig( - 100 * 1024, - 10 * 1000, - 100 * 1024, - 60 * 1000, + val DefaultMetadataLogConfig = MetadataLogConfig( + logSegmentBytes = 100 * 1024, + logSegmentMinBytes = 100 * 1024, + logSegmentMillis = 10 * 1000, + retentionMaxBytes = 100 * 1024, + retentionMillis = 60 * 1000, + maxBatchSizeInBytes = KafkaRaftClient.MAX_BATCH_SIZE_BYTES, + maxFetchSizeInBytes = KafkaRaftClient.MAX_FETCH_SIZE_BYTES, + fileDeleteDelayMs = ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT, + nodeId = 1 ) def buildMetadataLogAndDir( @@ -1099,8 +1006,7 @@ object KafkaMetadataLogTest { logDir, time, time.scheduler, - metadataLogConfig, - 1 + metadataLogConfig ) (logDir.toPath, metadataLog, metadataLogConfig) @@ -1156,25 +1062,4 @@ object KafkaMetadataLogTest { } dir } - - private def createMetadataLogConfig( - internalLogSegmentBytes: Int, - logSegmentMillis: Long, - retentionMaxBytes: Long, - retentionMillis: Long, - internalMaxBatchSizeInBytes: Int = KafkaRaftClient.MAX_BATCH_SIZE_BYTES, - internalMaxFetchSizeInBytes: Int = KafkaRaftClient.MAX_FETCH_SIZE_BYTES, - internalDeleteDelayMillis: Long = ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT - ): MetadataLogConfig = { - val config: util.Map[String, Any] = util.Map.of( - MetadataLogConfig.INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG, internalLogSegmentBytes, - MetadataLogConfig.METADATA_LOG_SEGMENT_MILLIS_CONFIG, logSegmentMillis, - MetadataLogConfig.METADATA_MAX_RETENTION_BYTES_CONFIG, retentionMaxBytes, - MetadataLogConfig.METADATA_MAX_RETENTION_MILLIS_CONFIG, retentionMillis, - MetadataLogConfig.INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_CONFIG, internalMaxBatchSizeInBytes, - MetadataLogConfig.INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_CONFIG, internalMaxFetchSizeInBytes, - MetadataLogConfig.INTERNAL_METADATA_DELETE_DELAY_MILLIS_CONFIG, internalDeleteDelayMillis, - ) - new MetadataLogConfig(new AbstractConfig(MetadataLogConfig.CONFIG_DEF, config, false)) - } -} +} \ No newline at end of file diff --git a/core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala b/core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala index e67e041e1f59d..495ad0b1c00f1 100644 --- a/core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala +++ b/core/src/test/scala/kafka/server/KafkaRequestHandlerTest.scala @@ -40,6 +40,7 @@ import org.mockito.Mockito.{mock, times, verify, when} import java.net.InetAddress import java.nio.ByteBuffer +import java.util.Collections import java.util.concurrent.CompletableFuture import java.util.concurrent.atomic.AtomicInteger import java.util.stream.Collectors @@ -55,8 +56,8 @@ class KafkaRequestHandlerTest { def testCallbackTiming(): Unit = { val time = new MockTime() val startTime = time.nanoseconds() - val metrics = new RequestChannelMetrics(java.util.Set.of[ApiKeys]) - val requestChannel = new RequestChannel(10, time, metrics) + val metrics = new RequestChannelMetrics(Collections.emptySet[ApiKeys]) + val requestChannel = new RequestChannel(10, "", time, metrics) val apiHandler = mock(classOf[ApiRequestHandler]) try { val handler = new KafkaRequestHandler(0, 0, mock(classOf[Meter]), new AtomicInteger(1), requestChannel, apiHandler, time) @@ -94,7 +95,7 @@ class KafkaRequestHandlerTest { val time = new MockTime() val metrics = mock(classOf[RequestChannelMetrics]) val apiHandler = mock(classOf[ApiRequestHandler]) - val requestChannel = new RequestChannel(10, time, metrics) + val requestChannel = new RequestChannel(10, "", time, metrics) val handler = new KafkaRequestHandler(0, 0, mock(classOf[Meter]), new AtomicInteger(1), requestChannel, apiHandler, time) var handledCount = 0 @@ -130,7 +131,7 @@ class KafkaRequestHandlerTest { val time = new MockTime() val metrics = mock(classOf[RequestChannelMetrics]) val apiHandler = mock(classOf[ApiRequestHandler]) - val requestChannel = new RequestChannel(10, time, metrics) + val requestChannel = new RequestChannel(10, "", time, metrics) val handler = new KafkaRequestHandler(0, 0, mock(classOf[Meter]), new AtomicInteger(1), requestChannel, apiHandler, time) val originalRequestLocal = mock(classOf[RequestLocal]) @@ -164,7 +165,7 @@ class KafkaRequestHandlerTest { val time = new MockTime() val metrics = mock(classOf[RequestChannelMetrics]) val apiHandler = mock(classOf[ApiRequestHandler]) - val requestChannel = new RequestChannel(10, time, metrics) + val requestChannel = new RequestChannel(10, "", time, metrics) val handler = new KafkaRequestHandler(0, 0, mock(classOf[Meter]), new AtomicInteger(1), requestChannel, apiHandler, time) val originalRequestLocal = mock(classOf[RequestLocal]) diff --git a/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala b/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala index c0ce96dd67244..4b9e7569b6ee7 100644 --- a/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala +++ b/core/src/test/scala/kafka/server/LocalLeaderEndPointTest.scala @@ -18,10 +18,9 @@ package kafka.server import kafka.server.QuotaFactory.QuotaManagers -import kafka.server.metadata.KRaftMetadataCache import kafka.utils.{CoreUtils, Logging, TestUtils} import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.{TopicIdPartition, Uuid} +import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.metadata.{FeatureLevelRecord, PartitionChangeRecord, PartitionRecord, TopicRecord} @@ -32,7 +31,6 @@ import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance} import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.LeaderEndPoint import org.apache.kafka.server.util.{MockScheduler, MockTime} import org.apache.kafka.storage.internals.log.{AppendOrigin, LogDirFailureChannel} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} @@ -40,7 +38,6 @@ import org.junit.jupiter.api.Assertions._ import org.mockito.Mockito.mock import java.io.File -import java.util.{Map => JMap} import scala.collection.Map import scala.jdk.CollectionConverters._ @@ -50,8 +47,7 @@ class LocalLeaderEndPointTest extends Logging { val topicId = Uuid.randomUuid() val topic = "test" val partition = 5 - val topicIdPartition = new TopicIdPartition(topicId, partition, topic) - val topicPartition = topicIdPartition.topicPartition() + val topicPartition = new TopicPartition(topic, partition) val sourceBroker: BrokerEndPoint = new BrokerEndPoint(0, "localhost", 9092) var replicaManager: ReplicaManager = _ var endPoint: LeaderEndPoint = _ @@ -62,10 +58,10 @@ class LocalLeaderEndPointTest extends Logging { def setUp(): Unit = { val props = TestUtils.createBrokerConfig(sourceBroker.id, port = sourceBroker.port) val config = KafkaConfig.fromProps(props) - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) val alterPartitionManager = mock(classOf[AlterPartitionManager]) val metrics = new Metrics - quotaManager = QuotaFactory.instantiate(config, metrics, time, "", "") + quotaManager = QuotaFactory.instantiate(config, metrics, time, "") replicaManager = new ReplicaManager( metrics = metrics, config = config, @@ -73,7 +69,7 @@ class LocalLeaderEndPointTest extends Logging { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager ) @@ -90,8 +86,8 @@ class LocalLeaderEndPointTest extends Logging { delta.replay(new PartitionRecord() .setPartitionId(partition) .setTopicId(topicId) - .setReplicas(java.util.List.of[Integer](sourceBroker.id)) - .setIsr(java.util.List.of[Integer](sourceBroker.id)) + .setReplicas(List[Integer](sourceBroker.id).asJava) + .setIsr(List[Integer](sourceBroker.id).asJava) .setLeader(sourceBroker.id) .setLeaderEpoch(0) .setPartitionEpoch(0) @@ -118,52 +114,52 @@ class LocalLeaderEndPointTest extends Logging { @Test def testFetchLatestOffset(): Unit = { - appendRecords(replicaManager, topicIdPartition, records) + appendRecords(replicaManager, topicPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - assertEquals(new OffsetAndEpoch(3L, 0), endPoint.fetchLatestOffset(topicPartition, 0)) + assertEquals(new OffsetAndEpoch(3L, 0), endPoint.fetchLatestOffset(topicPartition, currentLeaderEpoch = 0)) bumpLeaderEpoch() - appendRecords(replicaManager, topicIdPartition, records) + appendRecords(replicaManager, topicPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - assertEquals(new OffsetAndEpoch(6L, 1), endPoint.fetchLatestOffset(topicPartition, 7)) + assertEquals(new OffsetAndEpoch(6L, 1), endPoint.fetchLatestOffset(topicPartition, currentLeaderEpoch = 7)) } @Test def testFetchEarliestOffset(): Unit = { - appendRecords(replicaManager, topicIdPartition, records) + appendRecords(replicaManager, topicPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestOffset(topicPartition, 0)) + assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestOffset(topicPartition, currentLeaderEpoch = 0)) bumpLeaderEpoch() - appendRecords(replicaManager, topicIdPartition, records) + appendRecords(replicaManager, topicPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) replicaManager.deleteRecords(timeout = 1000L, Map(topicPartition -> 3), _ => ()) - assertEquals(new OffsetAndEpoch(3L, 1), endPoint.fetchEarliestOffset(topicPartition, 7)) + assertEquals(new OffsetAndEpoch(3L, 1), endPoint.fetchEarliestOffset(topicPartition, currentLeaderEpoch = 7)) } @Test def testFetchEarliestLocalOffset(): Unit = { - appendRecords(replicaManager, topicIdPartition, records) + appendRecords(replicaManager, topicPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestLocalOffset(topicPartition, 0)) + assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestLocalOffset(topicPartition, currentLeaderEpoch = 0)) bumpLeaderEpoch() - appendRecords(replicaManager, topicIdPartition, records) + appendRecords(replicaManager, topicPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - replicaManager.logManager.getLog(topicPartition).foreach(log => log.updateLocalLogStartOffset(3)) - assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestOffset(topicPartition, 7)) - assertEquals(new OffsetAndEpoch(3L, 1), endPoint.fetchEarliestLocalOffset(topicPartition, 7)) + replicaManager.logManager.getLog(topicPartition).foreach(log => log._localLogStartOffset = 3) + assertEquals(new OffsetAndEpoch(0L, 0), endPoint.fetchEarliestOffset(topicPartition, currentLeaderEpoch = 7)) + assertEquals(new OffsetAndEpoch(3L, 1), endPoint.fetchEarliestLocalOffset(topicPartition, currentLeaderEpoch = 7)) } @Test def testFetchEpochEndOffsets(): Unit = { - appendRecords(replicaManager, topicIdPartition, records) + appendRecords(replicaManager, topicPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - var result = endPoint.fetchEpochEndOffsets(JMap.of( - topicPartition, new OffsetForLeaderPartition() + var result = endPoint.fetchEpochEndOffsets(Map( + topicPartition -> new OffsetForLeaderPartition() .setPartition(topicPartition.partition) - .setLeaderEpoch(0)) - ).asScala + .setLeaderEpoch(0) + )) var expected = Map( topicPartition -> new EpochEndOffset() @@ -180,14 +176,14 @@ class LocalLeaderEndPointTest extends Logging { bumpLeaderEpoch() assertEquals(2, replicaManager.getPartitionOrException(topicPartition).getLeaderEpoch) - appendRecords(replicaManager, topicIdPartition, records) + appendRecords(replicaManager, topicPartition, records) .onFire(response => assertEquals(Errors.NONE, response.error)) - result = endPoint.fetchEpochEndOffsets(JMap.of( - topicPartition, new OffsetForLeaderPartition() + result = endPoint.fetchEpochEndOffsets(Map( + topicPartition -> new OffsetForLeaderPartition() .setPartition(topicPartition.partition) .setLeaderEpoch(2) - )).asScala + )) expected = Map( topicPartition -> new EpochEndOffset() @@ -200,11 +196,11 @@ class LocalLeaderEndPointTest extends Logging { assertEquals(expected, result) // Check missing epoch: 1, we expect the API to return (leader_epoch=0, end_offset=3). - result = endPoint.fetchEpochEndOffsets(JMap.of( - topicPartition, new OffsetForLeaderPartition() + result = endPoint.fetchEpochEndOffsets(Map( + topicPartition -> new OffsetForLeaderPartition() .setPartition(topicPartition.partition) .setLeaderEpoch(1) - )).asScala + )) expected = Map( topicPartition -> new EpochEndOffset() @@ -216,11 +212,11 @@ class LocalLeaderEndPointTest extends Logging { assertEquals(expected, result) // Check missing epoch: 5, we expect the API to return (leader_epoch=-1, end_offset=-1) - result = endPoint.fetchEpochEndOffsets(JMap.of( - topicPartition, new OffsetForLeaderPartition() + result = endPoint.fetchEpochEndOffsets(Map( + topicPartition -> new OffsetForLeaderPartition() .setPartition(topicPartition.partition) .setLeaderEpoch(5) - )).asScala + )) expected = Map( topicPartition -> new EpochEndOffset() @@ -266,12 +262,12 @@ class LocalLeaderEndPointTest extends Logging { } private def appendRecords(replicaManager: ReplicaManager, - partition: TopicIdPartition, + partition: TopicPartition, records: MemoryRecords, origin: AppendOrigin = AppendOrigin.CLIENT, requiredAcks: Short = -1): CallbackResult[PartitionResponse] = { val result = new CallbackResult[PartitionResponse]() - def appendCallback(responses: scala.collection.Map[TopicIdPartition, PartitionResponse]): Unit = { + def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { val response = responses.get(partition) assertTrue(response.isDefined) result.fire(response.get) diff --git a/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala b/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala index 46c7237dafbc1..cb08a021e2c6d 100644 --- a/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala +++ b/core/src/test/scala/kafka/server/NodeToControllerRequestThreadTest.scala @@ -18,6 +18,7 @@ package kafka.server import java.nio.ByteBuffer +import java.util.Collections import java.util.concurrent.atomic.AtomicReference import kafka.utils.TestUtils import kafka.utils.TestUtils.TestControllerRequestCompletionHandler @@ -94,7 +95,7 @@ class NodeToControllerRequestThreadTest { when(controllerNodeProvider.getControllerInfo()).thenReturn(controllerInfo(Some(activeController))) - val expectedResponse = RequestTestUtils.metadataUpdateWith(2, java.util.Map.of("a", 2)) + val expectedResponse = RequestTestUtils.metadataUpdateWith(2, Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) @@ -138,7 +139,7 @@ class NodeToControllerRequestThreadTest { when(controllerNodeProvider.getControllerInfo()).thenReturn( controllerInfo(Some(oldController)), controllerInfo(Some(newController))) - val expectedResponse = RequestTestUtils.metadataUpdateWith(3, java.util.Map.of("a", 2)) + val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) @@ -188,9 +189,9 @@ class NodeToControllerRequestThreadTest { controllerInfo(Some(oldController)), controllerInfo(Some(newController))) val responseWithNotControllerError = RequestTestUtils.metadataUpdateWith("cluster1", 2, - java.util.Map.of("a", Errors.NOT_CONTROLLER), - java.util.Map.of("a", 2)) - val expectedResponse = RequestTestUtils.metadataUpdateWith(3, java.util.Map.of("a", 2)) + Collections.singletonMap("a", Errors.NOT_CONTROLLER), + Collections.singletonMap("a", 2)) + val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs = Long.MaxValue) @@ -256,7 +257,7 @@ class NodeToControllerRequestThreadTest { new EnvelopeResponseData().setErrorCode(Errors.NOT_CONTROLLER.code())) // response for retry request after receiving NOT_CONTROLLER error - val expectedResponse = RequestTestUtils.metadataUpdateWith(3, java.util.Map.of("a", 2)) + val expectedResponse = RequestTestUtils.metadataUpdateWith(3, Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), @@ -319,8 +320,8 @@ class NodeToControllerRequestThreadTest { val retryTimeoutMs = 30000 val responseWithNotControllerError = RequestTestUtils.metadataUpdateWith("cluster1", 2, - java.util.Map.of("a", Errors.NOT_CONTROLLER), - java.util.Map.of("a", 2)) + Collections.singletonMap("a", Errors.NOT_CONTROLLER), + Collections.singletonMap("a", 2)) val testRequestThread = new NodeToControllerRequestThread( mockClient, new ManualMetadataUpdater(), controllerNodeProvider, config, time, "", retryTimeoutMs) diff --git a/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala b/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala index 739f8968bd29f..90edb4e306bc1 100644 --- a/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala +++ b/core/src/test/scala/kafka/server/RemoteLeaderEndPointTest.scala @@ -17,6 +17,8 @@ package kafka.server +import kafka.log.UnifiedLog +import kafka.server.AbstractFetcherThread.ResultWithPartitions import kafka.server.epoch.util.MockBlockingSender import kafka.utils.TestUtils import org.apache.kafka.clients.FetchSessionHandler @@ -31,16 +33,12 @@ import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.server.common.{MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.util.MockTime -import org.apache.kafka.server.{LeaderEndPoint, PartitionFetchState, ReplicaState} -import org.apache.kafka.storage.internals.log.UnifiedLog import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, Test} import org.junit.jupiter.params.ParameterizedTest import org.mockito.Mockito.{mock, when} import java.util -import java.util.Optional -import java.util.{Map => JMap} import scala.collection.Map import scala.jdk.CollectionConverters._ @@ -93,19 +91,19 @@ class RemoteLeaderEndPointTest { @Test def testFetchEpochEndOffsets(): Unit = { - val expected = util.Map.of( - topicPartition, new EpochEndOffset() + val expected = Map( + topicPartition -> new EpochEndOffset() .setPartition(topicPartition.partition) .setErrorCode(Errors.NONE.code) .setLeaderEpoch(0) .setEndOffset(logEndOffset)) - blockingSend.setOffsetsForNextResponse(expected) - val result = endPoint.fetchEpochEndOffsets(JMap.of( - topicPartition, new OffsetForLeaderPartition() + blockingSend.setOffsetsForNextResponse(expected.asJava) + val result = endPoint.fetchEpochEndOffsets(Map( + topicPartition -> new OffsetForLeaderPartition() .setPartition(topicPartition.partition) - .setLeaderEpoch(currentLeaderEpoch))).asScala + .setLeaderEpoch(currentLeaderEpoch))) - assertEquals(expected, result.asJava) + assertEquals(expected, result) } @Test @@ -132,18 +130,18 @@ class RemoteLeaderEndPointTest { val tp = new TopicPartition("topic1", 0) val topicId1 = Uuid.randomUuid() val log = mock(classOf[UnifiedLog]) - val partitionMap = java.util.Map.of( - tp, new PartitionFetchState(Optional.of(topicId1), 150, Optional.empty(), 0, Optional.empty(), ReplicaState.FETCHING, Optional.empty)) + val partitionMap = Map( + tp -> PartitionFetchState(Some(topicId1), 150, None, 0, None, state = Fetching, lastFetchedEpoch = None)) when(replicaManager.localLogOrException(tp)).thenReturn(log) when(log.logStartOffset).thenReturn(1) - val result1 = endPoint.buildFetch(partitionMap) - assertTrue(result1.partitionsWithError.isEmpty) - assertEquals(if (version < 15) -1L else 1L, result1.result.get.fetchRequest.build(version).replicaEpoch) + val ResultWithPartitions(fetchRequestOpt, partitionsWithError) = endPoint.buildFetch(partitionMap) + assertTrue(partitionsWithError.isEmpty) + assertEquals(if (version < 15) -1L else 1L, fetchRequestOpt.get.fetchRequest.build(version).replicaEpoch) currentBrokerEpoch = 2L - val result2 = endPoint.buildFetch(partitionMap) - assertTrue(result2.partitionsWithError.isEmpty) - assertEquals(if (version < 15) -1L else 2L, result2.result.get.fetchRequest.build(version).replicaEpoch) + val ResultWithPartitions(newFetchRequestOpt, newPartitionsWithError) = endPoint.buildFetch(partitionMap) + assertTrue(newPartitionsWithError.isEmpty) + assertEquals(if (version < 15) -1L else 2L, newFetchRequestOpt.get.fetchRequest.build(version).replicaEpoch) } } diff --git a/core/src/test/scala/kafka/server/metadata/ClientQuotaMetadataManagerTest.scala b/core/src/test/scala/kafka/server/metadata/ClientQuotaMetadataManagerTest.scala index 0b197d467213a..9d36cae25c239 100644 --- a/core/src/test/scala/kafka/server/metadata/ClientQuotaMetadataManagerTest.scala +++ b/core/src/test/scala/kafka/server/metadata/ClientQuotaMetadataManagerTest.scala @@ -16,12 +16,12 @@ */ package kafka.server.metadata +import kafka.server.ClientQuotaManager import org.apache.kafka.image.ClientQuotaDelta -import org.apache.kafka.server.quota.ClientQuotaManager import org.junit.jupiter.api.Assertions.{assertDoesNotThrow, assertEquals, assertThrows} import org.junit.jupiter.api.Test import org.junit.jupiter.api.function.Executable -import java.util.Optional + class ClientQuotaMetadataManagerTest { @@ -41,35 +41,35 @@ class ClientQuotaMetadataManagerTest { assertThrows(classOf[IllegalStateException],() => ClientQuotaMetadataManager.transferToClientQuotaEntity(IpEntity("a"))) assertThrows(classOf[IllegalStateException],() => ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultIpEntity)) assertEquals( - (Optional.of(new ClientQuotaManager.UserEntity("user")), Optional.empty()), + (Some(ClientQuotaManager.UserEntity("user")), None), ClientQuotaMetadataManager.transferToClientQuotaEntity(UserEntity("user")) ) assertEquals( - (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.empty()), + (Some(ClientQuotaManager.DefaultUserEntity), None), ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultUserEntity) ) assertEquals( - (Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client"))), + (None, Some(ClientQuotaManager.ClientIdEntity("client"))), ClientQuotaMetadataManager.transferToClientQuotaEntity(ClientIdEntity("client")) ) assertEquals( - (Optional.empty(), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)), + (None, Some(ClientQuotaManager.DefaultClientIdEntity)), ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultClientIdEntity) ) assertEquals( - (Optional.of(new ClientQuotaManager.UserEntity("user")), Optional.of(new ClientQuotaManager.ClientIdEntity("client"))), + (Some(ClientQuotaManager.UserEntity("user")), Some(ClientQuotaManager.ClientIdEntity("client"))), ClientQuotaMetadataManager.transferToClientQuotaEntity(ExplicitUserExplicitClientIdEntity("user", "client")) ) assertEquals( - (Optional.of(new ClientQuotaManager.UserEntity("user")), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)), + (Some(ClientQuotaManager.UserEntity("user")), Some(ClientQuotaManager.DefaultClientIdEntity)), ClientQuotaMetadataManager.transferToClientQuotaEntity(ExplicitUserDefaultClientIdEntity("user")) ) assertEquals( - (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.of(new ClientQuotaManager.ClientIdEntity("client"))), + (Some(ClientQuotaManager.DefaultUserEntity), Some(ClientQuotaManager.ClientIdEntity("client"))), ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultUserExplicitClientIdEntity("client")) ) assertEquals( - (Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID)), + (Some(ClientQuotaManager.DefaultUserEntity), Some(ClientQuotaManager.DefaultClientIdEntity)), ClientQuotaMetadataManager.transferToClientQuotaEntity(DefaultUserDefaultClientIdEntity) ) } diff --git a/core/src/test/scala/kafka/server/metadata/MockConfigRepository.scala b/core/src/test/scala/kafka/server/metadata/MockConfigRepository.scala new file mode 100644 index 0000000000000..27e4c1d886916 --- /dev/null +++ b/core/src/test/scala/kafka/server/metadata/MockConfigRepository.scala @@ -0,0 +1,62 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server.metadata + +import java.util +import java.util.Properties + +import org.apache.kafka.common.config.ConfigResource +import org.apache.kafka.common.config.ConfigResource.Type.TOPIC + +object MockConfigRepository { + def forTopic(topic: String, key: String, value: String): MockConfigRepository = { + val properties = new Properties() + properties.put(key, value) + forTopic(topic, properties) + } + + def forTopic(topic: String, properties: Properties): MockConfigRepository = { + val repository = new MockConfigRepository() + repository.configs.put(new ConfigResource(TOPIC, topic), properties) + repository + } +} + +class MockConfigRepository extends ConfigRepository { + val configs = new util.HashMap[ConfigResource, Properties]() + + override def config(configResource: ConfigResource): Properties = configs.synchronized { + configs.getOrDefault(configResource, new Properties()) + } + + def setConfig(configResource: ConfigResource, key: String, value: String): Unit = configs.synchronized { + val properties = configs.getOrDefault(configResource, new Properties()) + val newProperties = new Properties() + newProperties.putAll(properties) + if (value == null) { + newProperties.remove(key) + } else { + newProperties.put(key, value) + } + configs.put(configResource, newProperties) + } + + def setTopicConfig(topicName: String, key: String, value: String): Unit = configs.synchronized { + setConfig(new ConfigResource(TOPIC, topicName), key, value) + } +} diff --git a/core/src/test/scala/kafka/tools/LogCompactionTester.scala b/core/src/test/scala/kafka/tools/LogCompactionTester.scala new file mode 100644 index 0000000000000..2ea6c3aae6ca9 --- /dev/null +++ b/core/src/test/scala/kafka/tools/LogCompactionTester.scala @@ -0,0 +1,349 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.tools + +import java.io._ +import java.nio.ByteBuffer +import java.nio.charset.StandardCharsets.UTF_8 +import java.nio.file.{Files, Path} +import java.time.Duration +import java.util.{Properties, Random} + +import joptsimple.OptionParser +import kafka.utils._ +import org.apache.kafka.clients.admin.{Admin, NewTopic} +import org.apache.kafka.clients.CommonClientConfigs +import org.apache.kafka.clients.consumer.{Consumer, ConsumerConfig, KafkaConsumer} +import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord} +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.serialization.{ByteArraySerializer, StringDeserializer} +import org.apache.kafka.common.utils.{Exit, AbstractIterator, Utils} +import org.apache.kafka.server.util.CommandLineUtils + +import scala.jdk.CollectionConverters._ + +/** + * This is a torture test that runs against an existing broker + * + * Here is how it works: + * + * It produces a series of specially formatted messages to one or more partitions. Each message it produces + * it logs out to a text file. The messages have a limited set of keys, so there is duplication in the key space. + * + * The broker will clean its log as the test runs. + * + * When the specified number of messages have been produced we create a consumer and consume all the messages in the topic + * and write that out to another text file. + * + * Using a stable unix sort we sort both the producer log of what was sent and the consumer log of what was retrieved by the message key. + * Then we compare the final message in both logs for each key. If this final message is not the same for all keys we + * print an error and exit with exit code 1, otherwise we print the size reduction and exit with exit code 0. + */ +object LogCompactionTester { + + //maximum line size while reading produced/consumed record text file + private val ReadAheadLimit = 4906 + + def main(args: Array[String]): Unit = { + val parser = new OptionParser(false) + val numMessagesOpt = parser.accepts("messages", "The number of messages to send or consume.") + .withRequiredArg + .describedAs("count") + .ofType(classOf[java.lang.Long]) + .defaultsTo(Long.MaxValue) + val messageCompressionOpt = parser.accepts("compression-type", "message compression type") + .withOptionalArg + .describedAs("compressionType") + .ofType(classOf[java.lang.String]) + .defaultsTo("none") + val numDupsOpt = parser.accepts("duplicates", "The number of duplicates for each key.") + .withRequiredArg + .describedAs("count") + .ofType(classOf[java.lang.Integer]) + .defaultsTo(5) + val brokerOpt = parser.accepts("bootstrap-server", "The server(s) to connect to.") + .withRequiredArg + .describedAs("url") + .ofType(classOf[String]) + val topicsOpt = parser.accepts("topics", "The number of topics to test.") + .withRequiredArg + .describedAs("count") + .ofType(classOf[java.lang.Integer]) + .defaultsTo(1) + val percentDeletesOpt = parser.accepts("percent-deletes", "The percentage of updates that are deletes.") + .withRequiredArg + .describedAs("percent") + .ofType(classOf[java.lang.Integer]) + .defaultsTo(0) + val sleepSecsOpt = parser.accepts("sleep", "Time in milliseconds to sleep between production and consumption.") + .withRequiredArg + .describedAs("ms") + .ofType(classOf[java.lang.Integer]) + .defaultsTo(0) + + val options = parser.parse(args: _*) + + if (args.isEmpty) + CommandLineUtils.printUsageAndExit(parser, "A tool to test log compaction. Valid options are: ") + + CommandLineUtils.checkRequiredArgs(parser, options, brokerOpt, numMessagesOpt) + + // parse options + val messages = options.valueOf(numMessagesOpt).longValue + val compressionType = options.valueOf(messageCompressionOpt) + val percentDeletes = options.valueOf(percentDeletesOpt).intValue + val dups = options.valueOf(numDupsOpt).intValue + val brokerUrl = options.valueOf(brokerOpt) + val topicCount = options.valueOf(topicsOpt).intValue + val sleepSecs = options.valueOf(sleepSecsOpt).intValue + + val testId = new Random().nextLong + val topics = (0 until topicCount).map("log-cleaner-test-" + testId + "-" + _).toArray + createTopics(brokerUrl, topics.toSeq) + + println(s"Producing $messages messages..to topics ${topics.mkString(",")}") + val producedDataFilePath = produceMessages(brokerUrl, topics, messages, compressionType, dups, percentDeletes) + println(s"Sleeping for $sleepSecs seconds...") + Thread.sleep(sleepSecs * 1000) + println("Consuming messages...") + val consumedDataFilePath = consumeMessages(brokerUrl, topics) + + val producedLines = lineCount(producedDataFilePath) + val consumedLines = lineCount(consumedDataFilePath) + val reduction = 100 * (1.0 - consumedLines.toDouble / producedLines.toDouble) + println(f"$producedLines%d rows of data produced, $consumedLines%d rows of data consumed ($reduction%.1f%% reduction).") + + println("De-duplicating and validating output files...") + validateOutput(producedDataFilePath.toFile, consumedDataFilePath.toFile) + Utils.delete(producedDataFilePath.toFile) + Utils.delete(consumedDataFilePath.toFile) + //if you change this line, we need to update test_log_compaction_tool.py system test + println("Data verification is completed") + } + + def createTopics(brokerUrl: String, topics: Seq[String]): Unit = { + val adminConfig = new Properties + adminConfig.put(CommonClientConfigs.BOOTSTRAP_SERVERS_CONFIG, brokerUrl) + val adminClient = Admin.create(adminConfig) + + try { + val topicConfigs = Map(TopicConfig.CLEANUP_POLICY_CONFIG -> TopicConfig.CLEANUP_POLICY_COMPACT) + val newTopics = topics.map(name => new NewTopic(name, 1, 1.toShort).configs(topicConfigs.asJava)).asJava + adminClient.createTopics(newTopics).all.get + + var pendingTopics: Seq[String] = Seq() + TestUtils.waitUntilTrue(() => { + val allTopics = adminClient.listTopics.names.get.asScala.toSeq + pendingTopics = topics.filter(topicName => !allTopics.contains(topicName)) + pendingTopics.isEmpty + }, s"timed out waiting for topics : $pendingTopics") + + } finally adminClient.close() + } + + def lineCount(filPath: Path): Int = Files.readAllLines(filPath).size + + def validateOutput(producedDataFile: File, consumedDataFile: File): Unit = { + val producedReader = externalSort(producedDataFile) + val consumedReader = externalSort(consumedDataFile) + val produced = valuesIterator(producedReader) + val consumed = valuesIterator(consumedReader) + + val producedDedupedFile = new File(producedDataFile.getAbsolutePath + ".deduped") + val producedDeduped : BufferedWriter = Files.newBufferedWriter(producedDedupedFile.toPath, UTF_8) + + val consumedDedupedFile = new File(consumedDataFile.getAbsolutePath + ".deduped") + val consumedDeduped : BufferedWriter = Files.newBufferedWriter(consumedDedupedFile.toPath, UTF_8) + var total = 0 + var mismatched = 0 + while (produced.hasNext && consumed.hasNext) { + val p = produced.next() + producedDeduped.write(p.toString) + producedDeduped.newLine() + val c = consumed.next() + consumedDeduped.write(c.toString) + consumedDeduped.newLine() + if (p != c) + mismatched += 1 + total += 1 + } + producedDeduped.close() + consumedDeduped.close() + println(s"Validated $total values, $mismatched mismatches.") + require(!produced.hasNext, "Additional values produced not found in consumer log.") + require(!consumed.hasNext, "Additional values consumed not found in producer log.") + require(mismatched == 0, "Non-zero number of row mismatches.") + // if all the checks worked out we can delete the deduped files + Utils.delete(producedDedupedFile) + Utils.delete(consumedDedupedFile) + } + + def require(requirement: Boolean, message: => Any): Unit = { + if (!requirement) { + System.err.println(s"Data validation failed : $message") + Exit.exit(1) + } + } + + def valuesIterator(reader: BufferedReader): Iterator[TestRecord] = { + new AbstractIterator[TestRecord] { + def makeNext(): TestRecord = { + var next = readNext(reader) + while (next != null && next.delete) + next = readNext(reader) + if (next == null) + allDone() + else + next + } + }.asScala + } + + def readNext(reader: BufferedReader): TestRecord = { + var line = reader.readLine() + if (line == null) + return null + var curr = TestRecord.parse(line) + while (true) { + line = peekLine(reader) + if (line == null) + return curr + val next = TestRecord.parse(line) + if (next == null || next.topicAndKey != curr.topicAndKey) + return curr + curr = next + reader.readLine() + } + null + } + + def peekLine(reader: BufferedReader) = { + reader.mark(ReadAheadLimit) + val line = reader.readLine + reader.reset() + line + } + + def externalSort(file: File): BufferedReader = { + val builder = new ProcessBuilder("sort", "--key=1,2", "--stable", "--buffer-size=20%", "--temporary-directory=" + Files.createTempDirectory("log_compaction_test"), file.getAbsolutePath) + val process = builder.start + new Thread() { + override def run(): Unit = { + val exitCode = process.waitFor() + if (exitCode != 0) { + System.err.println("Process exited abnormally.") + while (process.getErrorStream.available > 0) { + System.err.write(process.getErrorStream.read()) + } + } + } + }.start() + new BufferedReader(new InputStreamReader(process.getInputStream, UTF_8), 10 * 1024 * 1024) + } + + def produceMessages(brokerUrl: String, + topics: Array[String], + messages: Long, + compressionType: String, + dups: Int, + percentDeletes: Int): Path = { + val producerProps = new Properties + producerProps.setProperty(ProducerConfig.MAX_BLOCK_MS_CONFIG, Long.MaxValue.toString) + producerProps.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl) + producerProps.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, compressionType) + val producer = new KafkaProducer(producerProps, new ByteArraySerializer, new ByteArraySerializer) + try { + val rand = new Random(1) + val keyCount = (messages / dups).toInt + val producedFilePath = Files.createTempFile("kafka-log-cleaner-produced-", ".txt") + println(s"Logging produce requests to $producedFilePath") + val producedWriter: BufferedWriter = Files.newBufferedWriter(producedFilePath, UTF_8) + for (i <- 0L until (messages * topics.length)) { + val topic = topics((i % topics.length).toInt) + val key = rand.nextInt(keyCount) + val delete = (i % 100) < percentDeletes + val msg = + if (delete) + new ProducerRecord[Array[Byte], Array[Byte]](topic, key.toString.getBytes(UTF_8), null) + else + new ProducerRecord(topic, key.toString.getBytes(UTF_8), i.toString.getBytes(UTF_8)) + producer.send(msg) + producedWriter.write(TestRecord(topic, key, i, delete).toString) + producedWriter.newLine() + } + producedWriter.close() + producedFilePath + } finally { + producer.close() + } + } + + def createConsumer(brokerUrl: String): Consumer[String, String] = { + val consumerProps = new Properties + consumerProps.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "log-cleaner-test-" + new Random().nextInt(Int.MaxValue)) + consumerProps.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerUrl) + consumerProps.setProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest") + new KafkaConsumer(consumerProps, new StringDeserializer, new StringDeserializer) + } + + def consumeMessages(brokerUrl: String, topics: Array[String]): Path = { + val consumer = createConsumer(brokerUrl) + consumer.subscribe(topics.toSeq.asJava) + val consumedFilePath = Files.createTempFile("kafka-log-cleaner-consumed-", ".txt") + println(s"Logging consumed messages to $consumedFilePath") + val consumedWriter: BufferedWriter = Files.newBufferedWriter(consumedFilePath, UTF_8) + + try { + var done = false + while (!done) { + val consumerRecords = consumer.poll(Duration.ofSeconds(20)) + if (!consumerRecords.isEmpty) { + for (record <- consumerRecords.asScala) { + val delete = record.value == null + val value = if (delete) -1L else record.value.toLong + consumedWriter.write(TestRecord(record.topic, record.key.toInt, value, delete).toString) + consumedWriter.newLine() + } + } else { + done = true + } + } + consumedFilePath + } finally { + consumedWriter.close() + consumer.close() + } + } + + def readString(buffer: ByteBuffer): String = { + Utils.utf8(buffer) + } + +} + +case class TestRecord(topic: String, key: Int, value: Long, delete: Boolean) { + override def toString = topic + "\t" + key + "\t" + value + "\t" + (if (delete) "d" else "u") + def topicAndKey = topic + key +} + +object TestRecord { + def parse(line: String): TestRecord = { + val components = line.split("\t") + new TestRecord(components(0), components(1).toInt, components(2).toLong, components(3) == "d") + } +} diff --git a/core/src/test/scala/kafka/utils/LoggingTest.scala b/core/src/test/scala/kafka/utils/LoggingTest.scala index 761b276c400bd..d1b389b4a8cf3 100644 --- a/core/src/test/scala/kafka/utils/LoggingTest.scala +++ b/core/src/test/scala/kafka/utils/LoggingTest.scala @@ -17,7 +17,6 @@ package kafka.utils -import org.apache.kafka.server.logger.LoggingController import java.lang.management.ManagementFactory import javax.management.ObjectName @@ -25,16 +24,24 @@ import org.junit.jupiter.api.Test import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} import org.slf4j.LoggerFactory + class LoggingTest extends Logging { + @Test + def testTypeOfGetLoggers(): Unit = { + val log4jController = new Log4jController + // the return object of getLoggers must be a collection instance from java standard library. + // That enables mbean client to deserialize it without extra libraries. + assertEquals(classOf[java.util.ArrayList[String]], log4jController.getLoggers.getClass) + } + @Test def testLog4jControllerIsRegistered(): Unit = { val mbs = ManagementFactory.getPlatformMBeanServer - val log4jControllerName = ObjectName.getInstance("kafka:type=kafka.Log4jController") assertTrue(mbs.isRegistered(log4jControllerName), "kafka.utils.Log4jController is not registered") - val log4jInstance = mbs.getObjectInstance(log4jControllerName) - assertEquals("org.apache.kafka.server.logger.LoggingController", log4jInstance.getClassName) + val instance = mbs.getObjectInstance(log4jControllerName) + assertEquals("kafka.utils.Log4jController", instance.getClassName) } @Test @@ -63,7 +70,7 @@ class LoggingTest extends Logging { @Test def testLoggerLevelIsResolved(): Unit = { - val controller = new LoggingController() + val controller = new Log4jController() val previousLevel = controller.getLogLevel("kafka") try { controller.setLogLevel("kafka", "TRACE") diff --git a/core/src/test/scala/kafka/utils/TestInfoUtils.scala b/core/src/test/scala/kafka/utils/TestInfoUtils.scala index e6c70b6e8fe49..a74d1ca1612ff 100644 --- a/core/src/test/scala/kafka/utils/TestInfoUtils.scala +++ b/core/src/test/scala/kafka/utils/TestInfoUtils.scala @@ -18,21 +18,25 @@ package kafka.utils import java.lang.reflect.Method import java.util -import java.util.Optional +import java.util.{Collections, Optional} import org.junit.jupiter.api.TestInfo import org.apache.kafka.clients.consumer.GroupProtocol class EmptyTestInfo extends TestInfo { override def getDisplayName: String = "" - override def getTags: util.Set[String] = java.util.Set.of() + override def getTags: util.Set[String] = Collections.emptySet() override def getTestClass: Optional[Class[_]] = Optional.empty() override def getTestMethod: Optional[Method] = Optional.empty() } object TestInfoUtils { + + final val TestWithParameterizedQuorumAndGroupProtocolNames = "{displayName}.quorum={0}.groupProtocol={1}" - final val TestWithParameterizedGroupProtocolNames = "{displayName}.groupProtocol={0}" + def isShareGroupTest(testInfo: TestInfo): Boolean = { + testInfo.getDisplayName.contains("kraft+kip932") + } def maybeGroupProtocolSpecified(testInfo: TestInfo): Option[GroupProtocol] = { if (testInfo.getDisplayName.contains("groupProtocol=classic")) @@ -50,4 +54,12 @@ object TestInfoUtils { def isTransactionV2Enabled(testInfo: TestInfo): Boolean = { !testInfo.getDisplayName.contains("isTV2Enabled=false") } + + /** + * Returns whether eligible leader replicas version 1 is enabled. + * When no parameter is provided, the default returned is false. + */ + def isEligibleLeaderReplicasV1Enabled(testInfo: TestInfo): Boolean = { + testInfo.getDisplayName.contains("isELRV1Enabled=true") + } } diff --git a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala index 9ca8b42cd14be..8834f6f36083c 100644 --- a/core/src/test/scala/unit/kafka/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/KafkaConfigTest.scala @@ -56,14 +56,14 @@ class KafkaConfigTest { "Invalid value -1 for configuration node.id: Value must be at least 0") properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) - assertBadConfigContainingMessage(properties, - "Missing required configuration \"controller.listener.names\" which has no default value.") - - properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") assertBadConfigContainingMessage(properties, "If using process.roles, either controller.quorum.bootstrap.servers must contain the set of bootstrap controllers or controller.quorum.voters must contain a parseable set of controllers.") properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") + assertBadConfigContainingMessage(properties, + "requirement failed: controller.listener.names must contain at least one value when running KRaft with just the broker role") + + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") KafkaConfig.fromProps(properties) } @@ -82,10 +82,6 @@ class KafkaConfigTest { "Invalid value -1 for configuration node.id: Value must be at least 0") properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) - assertBadConfigContainingMessage(properties, - "Missing required configuration \"controller.listener.names\" which has no default value.") - - properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") assertBadConfigContainingMessage(properties, "If using process.roles, either controller.quorum.bootstrap.servers must contain the set of bootstrap controllers or controller.quorum.voters must contain a parseable set of controllers.") @@ -94,34 +90,15 @@ class KafkaConfigTest { "requirement failed: The listeners config must only contain KRaft controller listeners from controller.listener.names when process.roles=controller") properties.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") - properties.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT") - KafkaConfig.fromProps(properties) - } + assertBadConfigContainingMessage(properties, + "No security protocol defined for listener CONTROLLER") - @Test - def testControllerListenerNamesMismatch(): Unit = { - val properties = new Properties() - properties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") - properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) - properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "OTHER") - properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") - properties.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") properties.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "CONTROLLER:PLAINTEXT") - assertBadConfigContainingMessage(properties, "requirement failed: The listeners config must only contain KRaft controller listeners from controller.listener.names when process.roles=controller") - } - @Test - def testControllerSecurityProtocolMapMissing(): Unit = { - val properties = new Properties() - properties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "controller") - properties.put(KRaftConfigs.NODE_ID_CONFIG, 0) - properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "OTHER") - properties.put(QuorumConfig.QUORUM_BOOTSTRAP_SERVERS_CONFIG, "localhost:9092") - properties.put(SocketServerConfigs.LISTENERS_CONFIG, "CONTROLLER://:9092") - - assertBadConfigContainingMessage(properties, "No security protocol defined for listener CONTROLLER") + properties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") + KafkaConfig.fromProps(properties) } @Test @@ -139,12 +116,12 @@ class KafkaConfigTest { // We should be also able to set completely new property val config3 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact"))) assertEquals(1, config3.nodeId) - assertEquals(util.List.of("compact"), config3.logCleanupPolicy) + assertEquals(util.Arrays.asList("compact"), config3.logCleanupPolicy) // We should be also able to set several properties val config4 = KafkaConfig.fromProps(Kafka.getPropsFromArgs(Array(propertiesFile, "--override", "log.cleanup.policy=compact,delete", "--override", "node.id=2"))) assertEquals(2, config4.nodeId) - assertEquals(util.List.of("compact","delete"), config4.logCleanupPolicy) + assertEquals(util.Arrays.asList("compact","delete"), config4.logCleanupPolicy) } @Test diff --git a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala index bba5278d7a6f3..a9901ba65e760 100755 --- a/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala +++ b/core/src/test/scala/unit/kafka/admin/AddPartitionsTest.scala @@ -17,6 +17,8 @@ package kafka.admin +import java.util.Collections +import kafka.controller.ReplicaAssignment import kafka.server.{BaseRequestTest, BrokerServer} import kafka.utils.TestUtils import kafka.utils.TestUtils._ @@ -24,9 +26,13 @@ import org.apache.kafka.clients.admin.{Admin, NewPartitions, NewTopic} import org.apache.kafka.common.errors.InvalidReplicaAssignmentException import org.apache.kafka.common.requests.{MetadataRequest, MetadataResponse} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util +import java.util.Arrays.asList +import java.util.Collections.singletonList import java.util.concurrent.ExecutionException import scala.jdk.CollectionConverters._ @@ -37,34 +43,34 @@ class AddPartitionsTest extends BaseRequestTest { val partitionId = 0 val topic1 = "new-topic1" - val topic1Assignment = Map(0 -> Seq(0,1)) + val topic1Assignment = Map(0 -> ReplicaAssignment(Seq(0,1), List(), List())) val topic2 = "new-topic2" - val topic2Assignment = Map(0 -> Seq(1,2)) + val topic2Assignment = Map(0 -> ReplicaAssignment(Seq(1,2), List(), List())) val topic3 = "new-topic3" - val topic3Assignment = Map(0 -> Seq(2,3,0,1)) + val topic3Assignment = Map(0 -> ReplicaAssignment(Seq(2,3,0,1), List(), List())) val topic4 = "new-topic4" - val topic4Assignment = Map(0 -> Seq(0,3)) + val topic4Assignment = Map(0 -> ReplicaAssignment(Seq(0,3), List(), List())) val topic5 = "new-topic5" - val topic5Assignment = Map(1 -> Seq(0,1)) + val topic5Assignment = Map(1 -> ReplicaAssignment(Seq(0,1), List(), List())) var admin: Admin = _ - @BeforeEach override def setUp(testInfo: TestInfo): Unit = { super.setUp(testInfo) brokers.foreach(broker => broker.asInstanceOf[BrokerServer].lifecycleManager.initialUnfenceFuture.get()) - createTopicWithAssignment(topic1, partitionReplicaAssignment = topic1Assignment) - createTopicWithAssignment(topic2, partitionReplicaAssignment = topic2Assignment) - createTopicWithAssignment(topic3, partitionReplicaAssignment = topic3Assignment) - createTopicWithAssignment(topic4, partitionReplicaAssignment = topic4Assignment) + createTopicWithAssignment(topic1, partitionReplicaAssignment = topic1Assignment.map { case (k, v) => k -> v.replicas }) + createTopicWithAssignment(topic2, partitionReplicaAssignment = topic2Assignment.map { case (k, v) => k -> v.replicas }) + createTopicWithAssignment(topic3, partitionReplicaAssignment = topic3Assignment.map { case (k, v) => k -> v.replicas }) + createTopicWithAssignment(topic4, partitionReplicaAssignment = topic4Assignment.map { case (k, v) => k -> v.replicas }) admin = createAdminClient() } - @Test - def testWrongReplicaCount(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testWrongReplicaCount(quorum: String): Unit = { assertEquals(classOf[InvalidReplicaAssignmentException], assertThrows(classOf[ExecutionException], () => { - admin.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(2, util.List.of(util.List.of[Integer](0, 1, 2))))).all().get() + admin.createPartitions(Collections.singletonMap(topic1, + NewPartitions.increaseTo(2, singletonList(asList(0, 1, 2))))).all().get() }).getCause.getClass) } @@ -72,15 +78,16 @@ class AddPartitionsTest extends BaseRequestTest { * Test that when we supply a manual partition assignment to createTopics, it must be 0-based * and consecutive. */ - @Test - def testMissingPartitionsInCreateTopics(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testMissingPartitionsInCreateTopics(quorum: String): Unit = { val topic6Placements = new util.HashMap[Integer, util.List[Integer]] - topic6Placements.put(1, util.List.of(0, 1)) - topic6Placements.put(2, util.List.of(1, 0)) + topic6Placements.put(1, asList(0, 1)) + topic6Placements.put(2, asList(1, 0)) val topic7Placements = new util.HashMap[Integer, util.List[Integer]] - topic7Placements.put(2, util.List.of(0, 1)) - topic7Placements.put(3, util.List.of(1, 0)) - val futures = admin.createTopics(util.List.of( + topic7Placements.put(2, asList(0, 1)) + topic7Placements.put(3, asList(1, 0)) + val futures = admin.createTopics(asList( new NewTopic("new-topic6", topic6Placements), new NewTopic("new-topic7", topic7Placements))).values() val topic6Cause = assertThrows(classOf[ExecutionException], () => futures.get("new-topic6").get()).getCause @@ -97,19 +104,21 @@ class AddPartitionsTest extends BaseRequestTest { * Test that when we supply a manual partition assignment to createPartitions, it must contain * enough partitions. */ - @Test - def testMissingPartitionsInCreatePartitions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testMissingPartitionsInCreatePartitions(quorum: String): Unit = { val cause = assertThrows(classOf[ExecutionException], () => - admin.createPartitions(util.Map.of(topic1, - NewPartitions.increaseTo(3, util.List.of(util.List.of[Integer](0, 1, 2))))).all().get()).getCause + admin.createPartitions(Collections.singletonMap(topic1, + NewPartitions.increaseTo(3, singletonList(asList(0, 1, 2))))).all().get()).getCause assertEquals(classOf[InvalidReplicaAssignmentException], cause.getClass) assertTrue(cause.getMessage.contains("Attempted to add 2 additional partition(s), but only 1 assignment(s) " + "were specified."), "Unexpected error message: " + cause.getMessage) } - @Test - def testIncrementPartitions(): Unit = { - admin.createPartitions(util.Map.of(topic1, NewPartitions.increaseTo(3))).all().get() + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementPartitions(quorum: String): Unit = { + admin.createPartitions(Collections.singletonMap(topic1, NewPartitions.increaseTo(3))).all().get() // wait until leader is elected waitUntilLeaderIsElectedOrChangedWithAdmin(admin, topic1, 1) @@ -119,7 +128,7 @@ class AddPartitionsTest extends BaseRequestTest { TestUtils.waitForPartitionMetadata(brokers, topic1, 1) TestUtils.waitForPartitionMetadata(brokers, topic1, 2) val response = connectAndReceive[MetadataResponse]( - new MetadataRequest.Builder(util.List.of(topic1), false).build) + new MetadataRequest.Builder(Seq(topic1).asJava, false).build) assertEquals(1, response.topicMetadata.size) val partitions = response.topicMetadata.asScala.head.partitionMetadata.asScala.sortBy(_.partition) assertEquals(partitions.size, 3) @@ -135,11 +144,12 @@ class AddPartitionsTest extends BaseRequestTest { } } - @Test - def testManualAssignmentOfReplicas(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testManualAssignmentOfReplicas(quorum: String): Unit = { // Add 2 partitions - admin.createPartitions(util.Map.of(topic2, NewPartitions.increaseTo(3, - util.List.of(util.List.of[Integer](0, 1), util.List.of[Integer](2, 3))))).all().get() + admin.createPartitions(Collections.singletonMap(topic2, NewPartitions.increaseTo(3, + asList(asList(0, 1), asList(2, 3))))).all().get() // wait until leader is elected val leader1 = waitUntilLeaderIsElectedOrChangedWithAdmin(admin, topic2, 1) val leader2 = waitUntilLeaderIsElectedOrChangedWithAdmin(admin, topic2, 2) @@ -150,7 +160,7 @@ class AddPartitionsTest extends BaseRequestTest { val partition2Metadata = TestUtils.waitForPartitionMetadata(brokers, topic2, 2) assertEquals(leader2, partition2Metadata.leader()) val response = connectAndReceive[MetadataResponse]( - new MetadataRequest.Builder(util.List.of(topic2), false).build) + new MetadataRequest.Builder(Seq(topic2).asJava, false).build) assertEquals(1, response.topicMetadata.size) val topicMetadata = response.topicMetadata.asScala.head val partitionMetadata = topicMetadata.partitionMetadata.asScala.sortBy(_.partition) @@ -163,9 +173,10 @@ class AddPartitionsTest extends BaseRequestTest { assertEquals(Set(0, 1), replicas.asScala.toSet) } - @Test - def testReplicaPlacementAllServers(): Unit = { - admin.createPartitions(util.Map.of(topic3, NewPartitions.increaseTo(7))).all().get() + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testReplicaPlacementAllServers(quorum: String): Unit = { + admin.createPartitions(Collections.singletonMap(topic3, NewPartitions.increaseTo(7))).all().get() // read metadata from a broker and verify the new topic partitions exist TestUtils.waitForPartitionMetadata(brokers, topic3, 1) @@ -176,7 +187,7 @@ class AddPartitionsTest extends BaseRequestTest { TestUtils.waitForPartitionMetadata(brokers, topic3, 6) val response = connectAndReceive[MetadataResponse]( - new MetadataRequest.Builder(util.List.of(topic3), false).build) + new MetadataRequest.Builder(Seq(topic3).asJava, false).build) assertEquals(1, response.topicMetadata.size) val topicMetadata = response.topicMetadata.asScala.head @@ -190,16 +201,17 @@ class AddPartitionsTest extends BaseRequestTest { } } - @Test - def testReplicaPlacementPartialServers(): Unit = { - admin.createPartitions(util.Map.of(topic2, NewPartitions.increaseTo(3))).all().get() + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testReplicaPlacementPartialServers(quorum: String): Unit = { + admin.createPartitions(Collections.singletonMap(topic2, NewPartitions.increaseTo(3))).all().get() // read metadata from a broker and verify the new topic partitions exist TestUtils.waitForPartitionMetadata(brokers, topic2, 1) TestUtils.waitForPartitionMetadata(brokers, topic2, 2) val response = connectAndReceive[MetadataResponse]( - new MetadataRequest.Builder(util.List.of(topic2), false).build) + new MetadataRequest.Builder(Seq(topic2).asJava, false).build) assertEquals(1, response.topicMetadata.size) val topicMetadata = response.topicMetadata.asScala.head diff --git a/core/src/test/scala/unit/kafka/admin/AdminRackAwareTest.scala b/core/src/test/scala/unit/kafka/admin/AdminRackAwareTest.scala new file mode 100644 index 0000000000000..015331c1d8e19 --- /dev/null +++ b/core/src/test/scala/unit/kafka/admin/AdminRackAwareTest.scala @@ -0,0 +1,251 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.admin + +import kafka.utils.{CoreUtils, Logging} +import org.apache.kafka.admin.{AdminUtils, BrokerMetadata} +import org.apache.kafka.common.errors.{InvalidPartitionsException, InvalidReplicationFactorException} +import org.apache.kafka.server.common.AdminOperationException +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Test + +import java.util.Optional +import scala.collection.Map +import scala.jdk.CollectionConverters._ + +class AdminRackAwareTest extends RackAwareTest with Logging { + + @Test + def testGetRackAlternatedBrokerListAndAssignReplicasToBrokers(): Unit = { + val rackMap = Map(0 -> "rack1", 1 -> "rack3", 2 -> "rack3", 3 -> "rack2", 4 -> "rack2", 5 -> "rack1") + val newList = AdminUtils.getRackAlternatedBrokerList(rackMap.map(e => (e._1.asInstanceOf[Integer], e._2)).asJava) + assertEquals(List(0, 3, 1, 5, 4, 2), newList.asScala.toList) + val anotherList = AdminUtils.getRackAlternatedBrokerList((rackMap.toMap - 5).map(e => (e._1.asInstanceOf[Integer], e._2)).asJava) + assertEquals(List(0, 3, 1, 4, 2), anotherList.asScala.toList) + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(rackMap), 7, 3, 0, 0)) + val expected = Map(0 -> List(0, 3, 1), + 1 -> List(3, 1, 5), + 2 -> List(1, 5, 4), + 3 -> List(5, 4, 2), + 4 -> List(4, 2, 0), + 5 -> List(2, 0, 3), + 6 -> List(0, 4, 2)) + assertEquals(expected, assignment) + } + + @Test + def testAssignmentWithRackAware(): Unit = { + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack1") + val numPartitions = 6 + val replicationFactor = 3 + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor, 2, 0)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, + replicationFactor) + } + + @Test + def testAssignmentWithRackAwareWithRandomStartIndex(): Unit = { + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack1") + val numPartitions = 6 + val replicationFactor = 3 + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, + replicationFactor) + } + + @Test + def testAssignmentWithRackAwareWithUnevenReplicas(): Unit = { + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack1") + val numPartitions = 13 + val replicationFactor = 3 + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor, 0, 0)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, + replicationFactor, verifyLeaderDistribution = false, verifyReplicasDistribution = false) + } + + @Test + def testAssignmentWithRackAwareWithUnevenRacks(): Unit = { + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack1", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack1") + val numPartitions = 12 + val replicationFactor = 3 + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, + replicationFactor, verifyReplicasDistribution = false) + } + + @Test + def testAssignmentWith2ReplicasRackAware(): Unit = { + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack1") + val numPartitions = 12 + val replicationFactor = 2 + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, + replicationFactor) + } + + @Test + def testRackAwareExpansion(): Unit = { + val brokerRackMapping = Map(6 -> "rack1", 7 -> "rack2", 8 -> "rack2", 9 -> "rack3", 10 -> "rack3", 11 -> "rack1") + val numPartitions = 12 + val replicationFactor = 2 + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor, -1, 12)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, + replicationFactor) + } + + @Test + def testAssignmentWith2ReplicasRackAwareWith6Partitions(): Unit = { + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack1") + val numPartitions = 6 + val replicationFactor = 2 + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, + replicationFactor) + } + + @Test + def testAssignmentWith2ReplicasRackAwareWith6PartitionsAnd3Brokers(): Unit = { + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 4 -> "rack3") + val numPartitions = 3 + val replicationFactor = 2 + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, replicationFactor)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, replicationFactor) + } + + @Test + def testLargeNumberPartitionsAssignment(): Unit = { + val numPartitions = 96 + val replicationFactor = 3 + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack1", + 6 -> "rack1", 7 -> "rack2", 8 -> "rack2", 9 -> "rack3", 10 -> "rack1", 11 -> "rack3") + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor)) + checkReplicaDistribution(assignment, brokerRackMapping, brokerRackMapping.size, numPartitions, + replicationFactor) + } + + @Test + def testMoreReplicasThanRacks(): Unit = { + val numPartitions = 6 + val replicationFactor = 5 + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack2") + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, replicationFactor)) + assertEquals(List.fill(assignment.size)(replicationFactor), assignment.values.toIndexedSeq.map(_.size)) + val distribution = getReplicaDistribution(assignment, brokerRackMapping) + for (partition <- 0 until numPartitions) + assertEquals(3, distribution.partitionRacks(partition).toSet.size) + } + + @Test + def testLessReplicasThanRacks(): Unit = { + val numPartitions = 6 + val replicationFactor = 2 + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack3", 4 -> "rack3", 5 -> "rack2") + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, + replicationFactor)) + assertEquals(List.fill(assignment.size)(replicationFactor), assignment.values.toIndexedSeq.map(_.size)) + val distribution = getReplicaDistribution(assignment, brokerRackMapping) + for (partition <- 0 to 5) + assertEquals(2, distribution.partitionRacks(partition).toSet.size) + } + + @Test + def testSingleRack(): Unit = { + val numPartitions = 6 + val replicationFactor = 3 + val brokerRackMapping = Map(0 -> "rack1", 1 -> "rack1", 2 -> "rack1", 3 -> "rack1", 4 -> "rack1", 5 -> "rack1") + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(toBrokerMetadata(brokerRackMapping), numPartitions, replicationFactor)) + assertEquals(List.fill(assignment.size)(replicationFactor), assignment.values.toIndexedSeq.map(_.size)) + val distribution = getReplicaDistribution(assignment, brokerRackMapping) + for (partition <- 0 until numPartitions) + assertEquals(1, distribution.partitionRacks(partition).toSet.size) + for (broker <- brokerRackMapping.keys) + assertEquals(1, distribution.brokerLeaderCount(broker)) + } + + @Test + def testSkipBrokerWithReplicaAlreadyAssigned(): Unit = { + val rackInfo = Map(0 -> "a", 1 -> "b", 2 -> "c", 3 -> "a", 4 -> "a") + val brokerList = 0 to 4 + val numPartitions = 6 + val replicationFactor = 4 + val brokerMetadatas = toBrokerMetadata(rackInfo) + assertEquals(brokerList, brokerMetadatas.asScala.map(_.id)) + val assignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(brokerMetadatas, numPartitions, replicationFactor, + 2, -1)) + checkReplicaDistribution(assignment, rackInfo, 5, 6, 4, + verifyRackAware = false, verifyLeaderDistribution = false, verifyReplicasDistribution = false) + } + + @Test + def testReplicaAssignment(): Unit = { + val brokerMetadatas = (0 to 4).map(new BrokerMetadata(_, Optional.empty())).asJava + + // test 0 replication factor + assertThrows(classOf[InvalidReplicationFactorException], + () => AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, 0)) + + // test wrong replication factor + assertThrows(classOf[InvalidReplicationFactorException], + () => AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, 6)) + + // correct assignment + val expectedAssignment = Map( + 0 -> List(0, 1, 2), + 1 -> List(1, 2, 3), + 2 -> List(2, 3, 4), + 3 -> List(3, 4, 0), + 4 -> List(4, 0, 1), + 5 -> List(0, 2, 3), + 6 -> List(1, 3, 4), + 7 -> List(2, 4, 0), + 8 -> List(3, 0, 1), + 9 -> List(4, 1, 2)) + + val actualAssignment = CoreUtils.replicaToBrokerAssignmentAsScala(AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, 3, 0, -1)) + assertEquals(expectedAssignment, actualAssignment) + } + + @Test + def testAssignReplicasToBrokersWithInvalidParameters(): Unit = { + val rackMap = Map(0 -> "rack1", 1 -> "rack3", 2 -> "rack3", 3 -> "rack2", 4 -> null) + val brokerMetadatas = toBrokerMetadata(rackMap) + + // test 0 partition + assertThrows(classOf[InvalidPartitionsException], + () => AdminUtils.assignReplicasToBrokers(brokerMetadatas, 0, 0, -1, -1)) + + // test 0 replication factor + assertThrows(classOf[InvalidReplicationFactorException], + () => AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, 0, -1, -1)) + + // test wrong replication factor + assertThrows(classOf[InvalidReplicationFactorException], + () => AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, brokerMetadatas.size() + 1, -1, -1)) + + // test wrong brokerMetadatas + assertThrows(classOf[AdminOperationException], + () => AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, brokerMetadatas.size(), -1, -1)) + } +} diff --git a/core/src/test/scala/unit/kafka/admin/RackAwareTest.scala b/core/src/test/scala/unit/kafka/admin/RackAwareTest.scala new file mode 100644 index 0000000000000..62df52273741d --- /dev/null +++ b/core/src/test/scala/unit/kafka/admin/RackAwareTest.scala @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.admin + +import org.apache.kafka.admin.BrokerMetadata + +import scala.collection.{Map, Seq, mutable} +import org.junit.jupiter.api.Assertions._ + +import java.util +import java.util.Optional +import scala.jdk.CollectionConverters._ + +trait RackAwareTest { + + def checkReplicaDistribution(assignment: Map[Int, Seq[Int]], + brokerRackMapping: Map[Int, String], + numBrokers: Int, + numPartitions: Int, + replicationFactor: Int, + verifyRackAware: Boolean = true, + verifyLeaderDistribution: Boolean = true, + verifyReplicasDistribution: Boolean = true): Unit = { + // always verify that no broker will be assigned for more than one replica + for ((_, brokerList) <- assignment) { + assertEquals(brokerList.toSet.size, brokerList.size, + "More than one replica is assigned to same broker for the same partition") + } + val distribution = getReplicaDistribution(assignment, brokerRackMapping) + + if (verifyRackAware) { + val partitionRackMap = distribution.partitionRacks + assertEquals(List.fill(numPartitions)(replicationFactor), partitionRackMap.values.toList.map(_.distinct.size), + "More than one replica of the same partition is assigned to the same rack") + } + + if (verifyLeaderDistribution) { + val leaderCount = distribution.brokerLeaderCount + val leaderCountPerBroker = numPartitions / numBrokers + assertEquals(List.fill(numBrokers)(leaderCountPerBroker), leaderCount.values.toList, + "Preferred leader count is not even for brokers") + } + + if (verifyReplicasDistribution) { + val replicasCount = distribution.brokerReplicasCount + val numReplicasPerBroker = numPartitions * replicationFactor / numBrokers + assertEquals(List.fill(numBrokers)(numReplicasPerBroker), replicasCount.values.toList, + "Replica count is not even for broker") + } + } + + def getReplicaDistribution(assignment: Map[Int, Seq[Int]], brokerRackMapping: Map[Int, String]): ReplicaDistributions = { + val leaderCount = mutable.Map[Int, Int]() + val partitionCount = mutable.Map[Int, Int]() + val partitionRackMap = mutable.Map[Int, List[String]]() + assignment.foreach { case (partitionId, replicaList) => + val leader = replicaList.head + leaderCount(leader) = leaderCount.getOrElse(leader, 0) + 1 + for (brokerId <- replicaList) { + partitionCount(brokerId) = partitionCount.getOrElse(brokerId, 0) + 1 + val rack = brokerRackMapping.getOrElse(brokerId, sys.error(s"No mapping found for $brokerId in `brokerRackMapping`")) + partitionRackMap(partitionId) = rack :: partitionRackMap.getOrElse(partitionId, List()) + } + } + ReplicaDistributions(partitionRackMap, leaderCount, partitionCount) + } + + def toBrokerMetadata(rackMap: Map[Int, String], brokersWithoutRack: Seq[Int] = Seq.empty): util.Collection[BrokerMetadata] = { + val res = rackMap.toSeq.map { case (brokerId, rack) => + new BrokerMetadata(brokerId, Optional.ofNullable(rack)) + } ++ brokersWithoutRack.map { brokerId => + new BrokerMetadata(brokerId, Optional.empty()) + }.sortBy(_.id) + + res.asJavaCollection + } + +} + +case class ReplicaDistributions(partitionRacks: Map[Int, Seq[String]], brokerLeaderCount: Map[Int, Int], brokerReplicasCount: Map[Int, Int]) diff --git a/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala index d475c6e42918d..9d60328d8684d 100644 --- a/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/AbstractPartitionTest.scala @@ -17,12 +17,14 @@ package kafka.cluster import kafka.log.LogManager +import kafka.server.MetadataCache +import kafka.server.metadata.MockConfigRepository import kafka.utils.TestUtils import kafka.utils.TestUtils.MockAlterPartitionManager -import org.apache.kafka.common.{DirectoryId, TopicPartition, Uuid} +import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.requests.LeaderAndIsrRequest import org.apache.kafka.common.utils.Utils -import org.apache.kafka.metadata.{LeaderRecoveryState, MetadataCache, MockConfigRepository, PartitionRegistration} import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.util.MockTime @@ -37,6 +39,7 @@ import java.io.File import java.lang.{Long => JLong} import java.util.{Optional, Properties} import java.util.concurrent.atomic.AtomicInteger +import scala.jdk.CollectionConverters._ object AbstractPartitionTest { val brokerId = 101 @@ -98,7 +101,7 @@ class AbstractPartitionTest { def createLogProperties(overrides: Map[String, String]): Properties = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 512: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 512: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 1000: java.lang.Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, 999: java.lang.Integer) overrides.foreach { case (k, v) => logProps.put(k, v) } @@ -118,25 +121,31 @@ class AbstractPartitionTest { isLeader: Boolean): Partition = { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val replicas = Array(brokerId, remoteReplicaId) + val controllerEpoch = 0 + val replicas = List[Integer](brokerId, remoteReplicaId).asJava val isr = replicas - val partitionRegistrationBuilder = new PartitionRegistration.Builder() - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) if (isLeader) { - val partitionRegistration = partitionRegistrationBuilder.setLeader(brokerId).build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setIsNew(true), offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(leaderEpoch, partition.getLeaderEpoch) } else { - val partitionRegistration = partitionRegistrationBuilder.setLeader(remoteReplicaId).build() - assertTrue(partition.makeFollower(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become follower transition to succeed") + assertTrue(partition.makeFollower(new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(remoteReplicaId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setIsNew(true), offsetCheckpoints, None), "Expected become follower transition to succeed") assertEquals(leaderEpoch, partition.getLeaderEpoch) - assertTrue(partition.leaderLogIfLocal.isEmpty) + assertEquals(None, partition.leaderLogIfLocal) } partition diff --git a/core/src/test/scala/unit/kafka/cluster/AssignmentStateTest.scala b/core/src/test/scala/unit/kafka/cluster/AssignmentStateTest.scala index 6172afd286df1..c34a7ac7536ba 100644 --- a/core/src/test/scala/unit/kafka/cluster/AssignmentStateTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/AssignmentStateTest.scala @@ -16,99 +16,102 @@ */ package kafka.cluster -import org.apache.kafka.common.DirectoryId -import org.apache.kafka.metadata.{LeaderRecoveryState, PartitionRegistration} +import org.apache.kafka.common.requests.LeaderAndIsrRequest import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} -import java.util import scala.jdk.CollectionConverters._ object AssignmentStateTest { import AbstractPartitionTest._ - def parameters: util.stream.Stream[Arguments] = util.List.of[Arguments]( + def parameters: java.util.stream.Stream[Arguments] = Seq[Arguments]( Arguments.of( - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId, brokerId + 1, brokerId + 2), - Array.emptyIntArray, Array.emptyIntArray, util.List.of[Int], Boolean.box(false)), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List.empty[Integer], List.empty[Integer], Seq.empty[Int], Boolean.box(false)), Arguments.of( - Array(brokerId, brokerId + 1), - Array(brokerId, brokerId + 1, brokerId + 2), - Array.emptyIntArray, Array.emptyIntArray, util.List.of[Int], Boolean.box(true)), + List[Integer](brokerId, brokerId + 1), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List.empty[Integer], List.empty[Integer], Seq.empty[Int], Boolean.box(true)), Arguments.of( - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId + 3, brokerId + 4), - Array(brokerId + 1), - util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId + 3, brokerId + 4), + List[Integer](brokerId + 1), + Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId + 3, brokerId + 4), - Array.emptyIntArray, - util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId + 3, brokerId + 4), + List.empty[Integer], + Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId, brokerId + 1, brokerId + 2), - Array.emptyIntArray, - Array(brokerId + 1), - util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List.empty[Integer], + List[Integer](brokerId + 1), + Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - Array(brokerId + 1, brokerId + 2), - Array(brokerId + 1, brokerId + 2), - Array(brokerId), - Array.emptyIntArray, - util.List.of(brokerId + 1, brokerId + 2), Boolean.box(false)), + List[Integer](brokerId + 1, brokerId + 2), + List[Integer](brokerId + 1, brokerId + 2), + List[Integer](brokerId), + List.empty[Integer], + Seq(brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - Array(brokerId + 2, brokerId + 3, brokerId + 4), - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId + 3, brokerId + 4, brokerId + 5), - Array.emptyIntArray, - util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + List[Integer](brokerId + 2, brokerId + 3, brokerId + 4), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId + 3, brokerId + 4, brokerId + 5), + List.empty[Integer], + Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - Array(brokerId + 2, brokerId + 3, brokerId + 4), - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId + 3, brokerId + 4, brokerId + 5), - Array.emptyIntArray, - util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), + List[Integer](brokerId + 2, brokerId + 3, brokerId + 4), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId + 3, brokerId + 4, brokerId + 5), + List.empty[Integer], + Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(false)), Arguments.of( - Array(brokerId + 2, brokerId + 3), - Array(brokerId, brokerId + 1, brokerId + 2), - Array(brokerId + 3, brokerId + 4, brokerId + 5), - Array.emptyIntArray, - util.List.of(brokerId, brokerId + 1, brokerId + 2), Boolean.box(true)) - ).stream() + List[Integer](brokerId + 2, brokerId + 3), + List[Integer](brokerId, brokerId + 1, brokerId + 2), + List[Integer](brokerId + 3, brokerId + 4, brokerId + 5), + List.empty[Integer], + Seq(brokerId, brokerId + 1, brokerId + 2), Boolean.box(true)) + ).asJava.stream() } class AssignmentStateTest extends AbstractPartitionTest { @ParameterizedTest @MethodSource(Array("parameters")) - def testPartitionAssignmentStatus(isr: Array[Int], replicas: Array[Int], - adding: Array[Int], removing: Array[Int], - original: util.List[Int], isUnderReplicated: Boolean): Unit = { - val partitionRegistration = new PartitionRegistration.Builder() + def testPartitionAssignmentStatus(isr: List[Integer], replicas: List[Integer], + adding: List[Integer], removing: List[Integer], + original: Seq[Int], isUnderReplicated: Boolean): Unit = { + val controllerEpoch = 3 + + val leaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(6) - .setIsr(isr) + .setIsr(isr.asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .setAddingReplicas(adding) - .setRemovingReplicas(removing) - .build() + .setReplicas(replicas.asJava) + .setIsNew(false) + if (adding.nonEmpty) + leaderState.setAddingReplicas(adding.asJava) + if (removing.nonEmpty) + leaderState.setRemovingReplicas(removing.asJava) + + val isReassigning = adding.nonEmpty || removing.nonEmpty // set the original replicas as the URP calculation will need them - if (!original.isEmpty) - partition.assignmentState = SimpleAssignmentState(original.asScala) + if (original.nonEmpty) + partition.assignmentState = SimpleAssignmentState(original) // do the test - partition.makeLeader(partitionRegistration, isNew = false, offsetCheckpoints, None) - val isReassigning = !adding.isEmpty || !removing.isEmpty + partition.makeLeader(leaderState, offsetCheckpoints, None) assertEquals(isReassigning, partition.isReassigning) - adding.foreach(r => assertTrue(partition.isAddingReplica(r))) + if (adding.nonEmpty) + adding.foreach(r => assertTrue(partition.isAddingReplica(r))) if (adding.contains(brokerId)) assertTrue(partition.isAddingLocalReplica) else diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala index fe262360a32a8..035df56ca59e4 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionLockTest.scala @@ -21,23 +21,25 @@ import java.lang.{Long => JLong} import java.util.{Optional, Properties} import java.util.concurrent._ import java.util.concurrent.atomic.AtomicBoolean -import kafka.log.LogManager +import kafka.log._ import kafka.server._ +import kafka.server.metadata.MockConfigRepository import kafka.utils._ import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} -import org.apache.kafka.common.requests.FetchRequest +import org.apache.kafka.common.requests.{FetchRequest, LeaderAndIsrRequest} import org.apache.kafka.common.utils.Utils -import org.apache.kafka.common.{DirectoryId, TopicPartition, Uuid} +import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState, MetadataCache, MockConfigRepository, PartitionRegistration} +import org.apache.kafka.metadata.LeaderAndIsr import org.apache.kafka.server.common.{RequestLocal, TopicIdPartition} import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams} import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpoints import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AppendOrigin, CleanerConfig, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetsListener, LogSegments, ProducerStateManager, ProducerStateManagerConfig, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, CleanerConfig, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogLoader, LogSegments, ProducerStateManager, ProducerStateManagerConfig, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} @@ -139,20 +141,19 @@ class PartitionLockTest extends Logging { def testGetReplicaWithUpdateAssignmentAndIsr(): Unit = { val active = new AtomicBoolean(true) val replicaToCheck = 3 - val firstReplicaSet = Array(3, 4, 5) - val secondReplicaSet = Array(1, 2, 3) - def partitionRegistration(replicas: Array[Int]) = new PartitionRegistration.Builder() - .setLeader(replicas(0)) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + val firstReplicaSet = Seq[Integer](3, 4, 5).asJava + val secondReplicaSet = Seq[Integer](1, 2, 3).asJava + def partitionState(replicas: java.util.List[Integer]) = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(1) + .setLeader(replicas.get(0)) .setLeaderEpoch(1) .setIsr(replicas) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() + .setIsNew(true) val offsetCheckpoints: OffsetCheckpoints = mock(classOf[OffsetCheckpoints]) // Update replica set synchronously first to avoid race conditions - partition.makeLeader(partitionRegistration(secondReplicaSet), isNew = true, offsetCheckpoints, None) + partition.makeLeader(partitionState(secondReplicaSet), offsetCheckpoints, None) assertTrue(partition.getReplica(replicaToCheck).isDefined, s"Expected replica $replicaToCheck to be defined") val future = executorService.submit((() => { @@ -165,7 +166,7 @@ class PartitionLockTest extends Logging { secondReplicaSet } - partition.makeLeader(partitionRegistration(replicas), isNew = true, offsetCheckpoints, None) + partition.makeLeader(partitionState(replicas), offsetCheckpoints, None) i += 1 Thread.sleep(1) // just to avoid tight loop @@ -299,7 +300,7 @@ class PartitionLockTest extends Logging { val logDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(log.topicPartition) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - log.dir, log.topicPartition, logDirFailureChannel, Optional.empty, mockTime.scheduler) + log.dir, log.topicPartition, logDirFailureChannel, None, mockTime.scheduler) val maxTransactionTimeout = 5 * 60 * 1000 val producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false) val producerStateManager = new ProducerStateManager( @@ -344,27 +345,25 @@ class PartitionLockTest extends Logging { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) - val replicas = (0 to numReplicaFetchers).map(i => brokerId + i).toArray + val controllerEpoch = 0 + val replicas = (0 to numReplicaFetchers).map(i => Integer.valueOf(brokerId + i)).toList.asJava val isr = replicas - replicas.foreach(replicaId => when(metadataCache.getAliveBrokerEpoch(replicaId)).thenReturn(Optional.of(1L))) - val partitionRegistration = new PartitionRegistration.Builder() + assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, Some(topicId)), "Expected become leader transition to succeed") + .setIsNew(true), offsetCheckpoints, Some(topicId)), "Expected become leader transition to succeed") partition } private def createLogProperties(overrides: Map[String, String]): Properties = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 512: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 512: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 1000: java.lang.Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, 999: java.lang.Integer) overrides.foreach { case (k, v) => logProps.put(k, v) } @@ -396,6 +395,7 @@ class PartitionLockTest extends Logging { while (fetchOffset < numRecords) { val fetchParams = new FetchParams( + ApiKeys.FETCH.latestVersion, followerId, 1, 0L, @@ -450,9 +450,8 @@ class PartitionLockTest extends Logging { log.producerIdExpirationCheckIntervalMs, leaderEpochCache, producerStateManager, - Optional.empty, - false, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) { + _topicId = None, + keepPartitionMetadataFile = true) { override def appendAsLeader(records: MemoryRecords, leaderEpoch: Int, origin: AppendOrigin, requestLocal: RequestLocal, verificationGuard: VerificationGuard): LogAppendInfo = { diff --git a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala index 8e512ad4d0128..b559189f394fd 100644 --- a/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala +++ b/core/src/test/scala/unit/kafka/cluster/PartitionTest.scala @@ -18,7 +18,7 @@ package kafka.cluster import java.net.InetAddress import com.yammer.metrics.core.Metric -import kafka.log.LogManager +import kafka.log._ import kafka.server._ import kafka.utils._ import org.apache.kafka.common.errors.{ApiException, FencedLeaderEpochException, InconsistentTopicIdException, InvalidTxnStateException, NotLeaderOrFollowerException, OffsetNotAvailableException, OffsetOutOfRangeException, UnknownLeaderEpochException} @@ -26,12 +26,11 @@ import org.apache.kafka.common.message.{AlterPartitionResponseData, FetchRespons import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.FileRecords.TimestampAndOffset import org.apache.kafka.common.record._ -import org.apache.kafka.common.requests.{AlterPartitionResponse, FetchRequest, ListOffsetsRequest, RequestHeader} +import org.apache.kafka.common.requests.{AlterPartitionResponse, FetchRequest, LeaderAndIsrRequest, ListOffsetsRequest, RequestHeader} import org.apache.kafka.common.utils.Time import org.apache.kafka.common.{DirectoryId, IsolationLevel, TopicPartition, Uuid} -import org.apache.kafka.metadata.{LeaderRecoveryState, MetadataCache, PartitionRegistration} import org.apache.kafka.server.config.ReplicationConfigs -import org.apache.kafka.server.replica.Replica +import org.apache.kafka.metadata.LeaderRecoveryState import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.mockito.ArgumentMatchers @@ -55,19 +54,17 @@ import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, NodeToControllerChannelManager, RequestLocal} import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, TopicPartitionOperationKey} +import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.share.fetch.DelayedShareFetchPartitionKey import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, UnexpectedAppendOffsetException} import org.apache.kafka.server.util.{KafkaScheduler, MockTime} import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpoints import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AppendOrigin, CleanerConfig, EpochEntry, LocalLog, LogAppendInfo, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogOffsetsListener, LogReadInfo, LogSegments, LogStartOffsetIncrementReason, ProducerStateManager, ProducerStateManagerConfig, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, CleanerConfig, EpochEntry, LocalLog, LogAppendInfo, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogReadInfo, LogSegments, LogStartOffsetIncrementReason, ProducerStateManager, ProducerStateManagerConfig, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource -import java.lang -import java.util import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.RichOption @@ -103,7 +100,7 @@ object PartitionTest { /** * Verifies the callbacks that have been triggered since the last - * verification. Values different from `-1` are the ones that have + * verification. Values different than `-1` are the ones that have * been updated. */ def verify( @@ -132,6 +129,7 @@ object PartitionTest { maxBytes: Int = Int.MaxValue ): FetchParams = { new FetchParams( + ApiKeys.FETCH.latestVersion, replicaId, replicaEpoch, maxWaitMs, @@ -150,6 +148,7 @@ object PartitionTest { isolation: FetchIsolation = FetchIsolation.HIGH_WATERMARK ): FetchParams = { new FetchParams( + ApiKeys.FETCH.latestVersion, FetchRequest.CONSUMER_REPLICA_ID, -1, maxWaitMs, @@ -166,14 +165,14 @@ class PartitionTest extends AbstractPartitionTest { @Test def testLastFetchedOffsetValidation(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) def append(leaderEpoch: Int, count: Int): Unit = { val recordArray = (1 to count).map { i => new SimpleRecord(s"$i".getBytes) } val records = MemoryRecords.withRecords(0L, Compression.NONE, leaderEpoch, recordArray: _*) - log.appendAsLeader(records, leaderEpoch) + log.appendAsLeader(records, leaderEpoch = leaderEpoch) } append(leaderEpoch = 0, count = 2) // 0 @@ -187,7 +186,6 @@ class PartitionTest extends AbstractPartitionTest { val leaderEpoch = 10 val logStartOffset = 0L val partition = setupPartitionWithMocks(leaderEpoch = leaderEpoch, isLeader = true) - addBrokerEpochToMockMetadataCache(metadataCache, Array(remoteReplicaId)) def epochEndOffset(epoch: Int, endOffset: Long): FetchResponseData.EpochEndOffset = { new FetchResponseData.EpochEndOffset() @@ -233,7 +231,7 @@ class PartitionTest extends AbstractPartitionTest { // Move log start offset to the middle of epoch 3 log.updateHighWatermark(log.logEndOffset) - log.maybeIncrementLogStartOffset(5L, LogStartOffsetIncrementReason.ClientRecordDeletion) + log.maybeIncrementLogStartOffset(newLogStartOffset = 5L, LogStartOffsetIncrementReason.ClientRecordDeletion) assertDivergence(epochEndOffset(epoch = 2, endOffset = 5), read(lastFetchedEpoch = 2, fetchOffset = 8)) assertNoDivergence(read(lastFetchedEpoch = 0, fetchOffset = 5)) @@ -242,7 +240,7 @@ class PartitionTest extends AbstractPartitionTest { assertThrows(classOf[OffsetOutOfRangeException], () => read(lastFetchedEpoch = 0, fetchOffset = 0)) // Fetch offset lower than start offset should throw OffsetOutOfRangeException - log.maybeIncrementLogStartOffset(10, LogStartOffsetIncrementReason.ClientRecordDeletion) + log.maybeIncrementLogStartOffset(newLogStartOffset = 10, LogStartOffsetIncrementReason.ClientRecordDeletion) assertThrows(classOf[OffsetOutOfRangeException], () => read(lastFetchedEpoch = 5, fetchOffset = 6)) // diverging assertThrows(classOf[OffsetOutOfRangeException], () => read(lastFetchedEpoch = 3, fetchOffset = 6)) // not diverging } @@ -251,15 +249,15 @@ class PartitionTest extends AbstractPartitionTest { def testMakeLeaderUpdatesEpochCache(): Unit = { val leaderEpoch = 8 - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) log.appendAsLeader(MemoryRecords.withRecords(0L, Compression.NONE, 0, new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes) - ), 0) + ), leaderEpoch = 0) log.appendAsLeader(MemoryRecords.withRecords(0L, Compression.NONE, 5, new SimpleRecord("k3".getBytes, "v3".getBytes), new SimpleRecord("k4".getBytes, "v4".getBytes) - ), 5) + ), leaderEpoch = 5) assertEquals(4, log.logEndOffset) val partition = setupPartitionWithMocks(leaderEpoch = leaderEpoch, isLeader = true) @@ -306,23 +304,24 @@ class PartitionTest extends AbstractPartitionTest { @Test def testReplicaFetchToFollower(): Unit = { + val controllerEpoch = 3 val followerId = brokerId + 1 val leaderId = brokerId + 2 - val replicas = Array(brokerId, followerId, leaderId) - val isr = Array(brokerId, followerId, leaderId) + val replicas = List[Integer](brokerId, followerId, leaderId).asJava + val isr = List[Integer](brokerId, followerId, leaderId).asJava val leaderEpoch = 8 val partitionEpoch = 1 - val partitionRegistration = new PartitionRegistration.Builder() + assertTrue(partition.makeFollower(new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leaderId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(partitionEpoch) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeFollower(partitionRegistration, isNew = true, offsetCheckpoints, None)) + .setIsNew(true), + offsetCheckpoints, None + )) def assertFetchFromReplicaFails[T <: ApiException]( expectedExceptionClass: Class[T], @@ -346,26 +345,26 @@ class PartitionTest extends AbstractPartitionTest { @Test def testFetchFromUnrecognizedFollower(): Unit = { + val controllerEpoch = 3 val leader = brokerId val validReplica = brokerId + 1 val addingReplica1 = brokerId + 2 val addingReplica2 = brokerId + 3 - val replicas = Array(leader, validReplica) - val isr = Array(leader, validReplica) + val replicas = List(leader, validReplica) + val isr = List[Integer](leader, validReplica).asJava val leaderEpoch = 8 val partitionEpoch = 1 - addBrokerEpochToMockMetadataCache(metadataCache, Array(leader, addingReplica1, addingReplica2)) - var partitionRegistration = new PartitionRegistration.Builder() + assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leader) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(partitionEpoch) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, topicId)) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, topicId + )) assertThrows(classOf[UnknownLeaderEpochException], () => { fetchFollower( @@ -388,21 +387,21 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(None, partition.getReplica(addingReplica2).map(_.stateSnapshot.logEndOffset)) // The replicas are added as part of a reassignment - val newReplicas = Array(leader, validReplica, addingReplica1, addingReplica2) + val newReplicas = List(leader, validReplica, addingReplica1, addingReplica2) val newPartitionEpoch = partitionEpoch + 1 - val addingReplicas = Array(addingReplica1, addingReplica2) + val addingReplicas = List(addingReplica1, addingReplica2) - partitionRegistration = new PartitionRegistration.Builder() + assertFalse(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leader) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(newPartitionEpoch) - .setReplicas(newReplicas) - .setAddingReplicas(addingReplicas) - .setDirectories(DirectoryId.unassignedArray(newReplicas.length)) - .build() - assertFalse(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None)) + .setReplicas(newReplicas.map(Int.box).asJava) + .setAddingReplicas(addingReplicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None + )) // Now the fetches are allowed assertEquals(0L, fetchFollower( @@ -427,8 +426,6 @@ class PartitionTest extends AbstractPartitionTest { def testMakeFollowerWithWithFollowerAppendRecords(): Unit = { val appendSemaphore = new Semaphore(0) val mockTime = new MockTime() - val prevLeaderEpoch = 0 - val replicas = Array(0, 1, 2, brokerId) partition = new Partition( topicPartition, @@ -447,7 +444,7 @@ class PartitionTest extends AbstractPartitionTest { val logDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(log.topicPartition) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - log.dir, log.topicPartition, logDirFailureChannel, Optional.empty, time.scheduler) + log.dir, log.topicPartition, logDirFailureChannel, None, time.scheduler) val maxTransactionTimeoutMs = 5 * 60 * 1000 val producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, true) val producerStateManager = new ProducerStateManager( @@ -476,49 +473,32 @@ class PartitionTest extends AbstractPartitionTest { val localLog = new LocalLog(log.dir, log.config, segments, offsets.recoveryPoint, offsets.nextOffsetMetadata, mockTime.scheduler, mockTime, log.topicPartition, logDirFailureChannel) - new SlowLog(log, topicId.toJava, offsets.logStartOffset, localLog, leaderEpochCache, producerStateManager, appendSemaphore) + new SlowLog(log, topicId, offsets.logStartOffset, localLog, leaderEpochCache, producerStateManager, appendSemaphore) } } partition.createLogIfNotExists(isNew = true, isFutureReplica = false, offsetCheckpoints, None) - var partitionRegistration = new PartitionRegistration.Builder() - .setLeader(2) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(prevLeaderEpoch) - .setIsr(replicas) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = false, offsetCheckpoints, None)) - val appendThread = new Thread { override def run(): Unit = { - val records = createRecords( - util.List.of( - new SimpleRecord("k1".getBytes, "v1".getBytes), - new SimpleRecord("k2".getBytes, "v2".getBytes) - ), - baseOffset = 0, - partitionLeaderEpoch = prevLeaderEpoch - ) - partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false, prevLeaderEpoch) + val records = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes)), + baseOffset = 0) + partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false) } } appendThread.start() TestUtils.waitUntilTrue(() => appendSemaphore.hasQueuedThreads, "follower log append is not called.") - partitionRegistration = new PartitionRegistration.Builder() + val partitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) .setLeader(2) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(prevLeaderEpoch + 1) - .setIsr(replicas) - .setPartitionEpoch(2) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None)) + .setLeaderEpoch(1) + .setIsr(List[Integer](0, 1, 2, brokerId).asJava) + .setPartitionEpoch(1) + .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) + .setIsNew(false) + assertTrue(partition.makeFollower(partitionState, offsetCheckpoints, None)) appendSemaphore.release() appendThread.join() @@ -545,32 +525,25 @@ class PartitionTest extends AbstractPartitionTest { new SimpleRecord("k2".getBytes, "v4".getBytes), new SimpleRecord("k2".getBytes, "v5".getBytes), new SimpleRecord("k2".getBytes, "v6".getBytes) - ), 0) + ), leaderEpoch = 0) currentLog.roll() currentLog.appendAsLeader(MemoryRecords.withRecords(0L, Compression.NONE, 0, new SimpleRecord("k3".getBytes, "v7".getBytes), new SimpleRecord("k4".getBytes, "v8".getBytes) - ), 0) + ), leaderEpoch = 0) // Write to the future replica as if the log had been compacted, and do not roll the segment val buffer = ByteBuffer.allocate(1024) - val builder = MemoryRecords.builder( - buffer, - RecordBatch.CURRENT_MAGIC_VALUE, - Compression.NONE, - TimestampType.CREATE_TIME, - 0L, // baseOffset - RecordBatch.NO_TIMESTAMP, - 0 // partitionLeaderEpoch - ) + val builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, + TimestampType.CREATE_TIME, 0L, RecordBatch.NO_TIMESTAMP, 0) builder.appendWithOffset(2L, new SimpleRecord("k1".getBytes, "v3".getBytes)) builder.appendWithOffset(5L, new SimpleRecord("k2".getBytes, "v6".getBytes)) builder.appendWithOffset(6L, new SimpleRecord("k3".getBytes, "v7".getBytes)) builder.appendWithOffset(7L, new SimpleRecord("k4".getBytes, "v8".getBytes)) val futureLog = partition.futureLocalLogOrException - futureLog.appendAsFollower(builder.build(), 0) + futureLog.appendAsFollower(builder.build()) assertTrue(partition.maybeReplaceCurrentWithFutureReplica()) } @@ -668,8 +641,6 @@ class PartitionTest extends AbstractPartitionTest { val leaderEpoch = 5 val partition = setupPartitionWithMocks(leaderEpoch, isLeader = true) - addBrokerEpochToMockMetadataCache(metadataCache, Array(remoteReplicaId)) - def sendFetch(leaderEpoch: Option[Int]): LogReadInfo = { fetchFollower( partition, @@ -804,11 +775,12 @@ class PartitionTest extends AbstractPartitionTest { */ @Test def testMonotonicOffsetsAfterLeaderChange(): Unit = { + val controllerEpoch = 3 val leader = brokerId val follower1 = brokerId + 1 val follower2 = brokerId + 2 - val replicas = Array(leader, follower1, follower2) - val isr = Array(leader, follower2) + val replicas = List(leader, follower1, follower2) + val isr = List[Integer](leader, follower2).asJava val leaderEpoch = 8 val batch1 = TestUtils.records(records = List( new SimpleRecord(10, "k1".getBytes, "v1".getBytes), @@ -816,19 +788,17 @@ class PartitionTest extends AbstractPartitionTest { val batch2 = TestUtils.records(records = List(new SimpleRecord("k3".getBytes, "v1".getBytes), new SimpleRecord(20,"k4".getBytes, "v2".getBytes), new SimpleRecord(21,"k5".getBytes, "v3".getBytes))) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val leaderRegistration = new PartitionRegistration.Builder() + val leaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leader) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(leaderRegistration, isNew = true, offsetCheckpoints, topicId), "Expected first makeLeader() to return 'leader changed'") + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true) + assertTrue(partition.makeLeader(leaderState, offsetCheckpoints, topicId), "Expected first makeLeader() to return 'leader changed'") assertEquals(leaderEpoch, partition.getLeaderEpoch, "Current leader epoch") assertEquals(Set[Integer](leader, follower2), partition.partitionState.isr, "ISR") @@ -890,28 +860,28 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(Right(None), fetchOffsetsForTimestamp(30, Some(IsolationLevel.READ_UNCOMMITTED))) // Make into a follower - val followerRegistration = new PartitionRegistration.Builder() + val followerState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(follower2) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch + 1) .setIsr(isr) .setPartitionEpoch(4) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeFollower(followerRegistration, isNew = false, offsetCheckpoints, None)) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(false) + + assertTrue(partition.makeFollower(followerState, offsetCheckpoints, None)) // Back to leader, this resets the startLogOffset for this epoch (to 2), we're now in the fault condition - val newLeaderRegistration = new PartitionRegistration.Builder() + val newLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leader) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch + 2) .setIsr(isr) .setPartitionEpoch(5) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(newLeaderRegistration, isNew = false, offsetCheckpoints, None)) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(false) + + assertTrue(partition.makeLeader(newLeaderState, offsetCheckpoints, None)) // Try to get offsets as a client fetchOffsetsForTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(IsolationLevel.READ_UNCOMMITTED)) match { @@ -981,20 +951,6 @@ class PartitionTest extends AbstractPartitionTest { def testAppendRecordsAsFollowerBelowLogStartOffset(): Unit = { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val log = partition.localLogOrException - val epoch = 1 - val replicas = Array(0, 1, 2, brokerId) - - // Start off as follower - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(1) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(epoch) - .setIsr(replicas) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None) val initialLogStartOffset = 5L partition.truncateFullyAndStartAt(initialLogStartOffset, isFuture = false) @@ -1004,14 +960,9 @@ class PartitionTest extends AbstractPartitionTest { s"Log start offset after truncate fully and start at $initialLogStartOffset:") // verify that we cannot append records that do not contain log start offset even if the log is empty - assertThrows( - classOf[UnexpectedAppendOffsetException], + assertThrows(classOf[UnexpectedAppendOffsetException], () => // append one record with offset = 3 - () => partition.appendRecordsToFollowerOrFutureReplica( - createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 3L), - isFuture = false, - partitionLeaderEpoch = epoch - ) + partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 3L), isFuture = false) ) assertEquals(initialLogStartOffset, log.logEndOffset, s"Log end offset should not change after failure to append") @@ -1019,71 +970,58 @@ class PartitionTest extends AbstractPartitionTest { // verify that we can append records that contain log start offset, even when first // offset < log start offset if the log is empty val newLogStartOffset = 4L - val records = createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes), + val records = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes), new SimpleRecord("k3".getBytes, "v3".getBytes)), baseOffset = newLogStartOffset) - partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false, partitionLeaderEpoch = epoch) + partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false) assertEquals(7L, log.logEndOffset, s"Log end offset after append of 3 records with base offset $newLogStartOffset:") assertEquals(newLogStartOffset, log.logStartOffset, s"Log start offset after append of 3 records with base offset $newLogStartOffset:") // and we can append more records after that - partition.appendRecordsToFollowerOrFutureReplica( - createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 7L), - isFuture = false, - partitionLeaderEpoch = epoch - ) + partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 7L), isFuture = false) assertEquals(8L, log.logEndOffset, s"Log end offset after append of 1 record at offset 7:") assertEquals(newLogStartOffset, log.logStartOffset, s"Log start offset not expected to change:") // but we cannot append to offset < log start if the log is not empty - val records2 = createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes), + val records2 = createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes)), baseOffset = 3L) - assertThrows( - classOf[UnexpectedAppendOffsetException], - () => partition.appendRecordsToFollowerOrFutureReplica(records2, isFuture = false, partitionLeaderEpoch = epoch) - ) + assertThrows(classOf[UnexpectedAppendOffsetException], () => partition.appendRecordsToFollowerOrFutureReplica(records2, isFuture = false)) assertEquals(8L, log.logEndOffset, s"Log end offset should not change after failure to append") // we still can append to next offset - partition.appendRecordsToFollowerOrFutureReplica( - createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 8L), - isFuture = false, - partitionLeaderEpoch = epoch - ) + partition.appendRecordsToFollowerOrFutureReplica(createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 8L), isFuture = false) assertEquals(9L, log.logEndOffset, s"Log end offset after append of 1 record at offset 8:") assertEquals(newLogStartOffset, log.logStartOffset, s"Log start offset not expected to change:") } @Test def testListOffsetIsolationLevels(): Unit = { + val controllerEpoch = 0 val leaderEpoch = 5 - val replicas = Array(brokerId, brokerId + 1) + val replicas = List[Integer](brokerId, brokerId + 1).asJava val isr = replicas partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() + assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") - + .setIsNew(true), offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(leaderEpoch, partition.getLeaderEpoch) - val records = createTransactionalRecords(util.List.of( + val records = createTransactionalRecords(List( new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes), new SimpleRecord("k3".getBytes, "v3".getBytes)), baseOffset = 0L, producerId = 2L) - val verificationGuard = partition.maybeStartTransactionVerification(2L, 0, 0, supportsEpochBump = true) + val verificationGuard = partition.maybeStartTransactionVerification(2L, 0, 0, true) partition.appendRecordsToLeader(records, origin = AppendOrigin.CLIENT, requiredAcks = 0, RequestLocal.withThreadConfinedCaching, verificationGuard) def fetchOffset(isolationLevel: Option[IsolationLevel], timestamp: Long): TimestampAndOffset = { @@ -1136,63 +1074,55 @@ class PartitionTest extends AbstractPartitionTest { @Test def testAppendRecordsToFollowerWithNoReplicaThrowsException(): Unit = { - assertThrows( - classOf[NotLeaderOrFollowerException], - () => partition.appendRecordsToFollowerOrFutureReplica( - createRecords(util.List.of(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 0L), - isFuture = false, - partitionLeaderEpoch = 0 - ) + assertThrows(classOf[NotLeaderOrFollowerException], () => + partition.appendRecordsToFollowerOrFutureReplica( + createRecords(List(new SimpleRecord("k1".getBytes, "v1".getBytes)), baseOffset = 0L), isFuture = false) ) } @Test def testMakeFollowerWithNoLeaderIdChange(): Unit = { - val replicas = Array(0, 1, 2, brokerId) // Start off as follower - var partitionRegistration = new PartitionRegistration.Builder() + var partitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) .setLeader(1) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(1) - .setIsr(replicas) + .setIsr(List[Integer](0, 1, 2, brokerId).asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None) + .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) + .setIsNew(false) + partition.makeFollower(partitionState, offsetCheckpoints, None) // Request with same leader and epoch increases by only 1, do become-follower steps - partitionRegistration = new PartitionRegistration.Builder() + partitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) .setLeader(1) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(4) - .setIsr(replicas) + .setIsr(List[Integer](0, 1, 2, brokerId).asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None)) + .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) + .setIsNew(false) + assertTrue(partition.makeFollower(partitionState, offsetCheckpoints, None)) // Request with same leader and same epoch, skip become-follower steps - partitionRegistration = new PartitionRegistration.Builder() + partitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) .setLeader(1) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(4) - .setIsr(replicas) + .setIsr(List[Integer](0, 1, 2, brokerId).asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertFalse(partition.makeFollower(partitionRegistration, isNew = false, offsetCheckpoints, None)) + .setReplicas(List[Integer](0, 1, 2, brokerId).asJava) + assertFalse(partition.makeFollower(partitionState, offsetCheckpoints, None)) } @Test def testFollowerDoesNotJoinISRUntilCaughtUpToOffsetWithinCurrentLeaderEpoch(): Unit = { + val controllerEpoch = 3 val leader = brokerId val follower1 = brokerId + 1 val follower2 = brokerId + 2 - val replicas = Array(leader, follower1, follower2) - val isr = Array(leader, follower2) + val replicas = List[Integer](leader, follower1, follower2).asJava + val isr = List[Integer](leader, follower2).asJava val leaderEpoch = 8 val batch1 = TestUtils.records(records = List(new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes))) @@ -1201,18 +1131,16 @@ class PartitionTest extends AbstractPartitionTest { new SimpleRecord("k5".getBytes, "v3".getBytes))) val batch3 = TestUtils.records(records = List(new SimpleRecord("k6".getBytes, "v1".getBytes), new SimpleRecord("k7".getBytes, "v2".getBytes))) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val leaderRegistration = new PartitionRegistration.Builder() + val leaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leader) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(leaderRegistration, isNew = true, offsetCheckpoints, topicId), "Expected first makeLeader() to return 'leader changed'") + .setIsNew(true) + assertTrue(partition.makeLeader(leaderState, offsetCheckpoints, topicId), "Expected first makeLeader() to return 'leader changed'") assertEquals(leaderEpoch, partition.getLeaderEpoch, "Current leader epoch") assertEquals(Set[Integer](leader, follower2), partition.partitionState.isr, "ISR") @@ -1231,27 +1159,25 @@ class PartitionTest extends AbstractPartitionTest { assertEquals(lastOffsetOfFirstBatch + 1, partition.log.get.highWatermark, "Expected leader's HW") // current leader becomes follower and then leader again (without any new records appended) - val followerRegistration = new PartitionRegistration.Builder() + val followerState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(follower2) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch + 1) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - partition.makeFollower(followerRegistration, isNew = false, offsetCheckpoints, None) + .setIsNew(false) + partition.makeFollower(followerState, offsetCheckpoints, None) - val newLeaderRegistration = new PartitionRegistration.Builder() + val newLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leader) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch + 2) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(newLeaderRegistration, isNew = false, offsetCheckpoints, topicId), + .setIsNew(false) + assertTrue(partition.makeLeader(newLeaderState, offsetCheckpoints, topicId), "Expected makeLeader() to return 'leader changed' after makeFollower()") val currentLeaderEpochStartOffset = partition.localLogOrException.logEndOffset @@ -1274,38 +1200,38 @@ class PartitionTest extends AbstractPartitionTest { Set(leader, follower1, follower2), "AlterIsr") } - def createRecords(records: lang.Iterable[SimpleRecord], baseOffset: Long, partitionLeaderEpoch: Int = 0): MemoryRecords = { - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records)) + def createRecords(records: Iterable[SimpleRecord], baseOffset: Long, partitionLeaderEpoch: Int = 0): MemoryRecords = { + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) val builder = MemoryRecords.builder( buf, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, TimestampType.LOG_APPEND_TIME, baseOffset, time.milliseconds, partitionLeaderEpoch) - records.forEach(builder.append) + records.foreach(builder.append) builder.build() } - def createIdempotentRecords(records: lang.Iterable[SimpleRecord], + def createIdempotentRecords(records: Iterable[SimpleRecord], baseOffset: Long, baseSequence: Int = 0, producerId: Long = 1L): MemoryRecords = { val producerEpoch = 0.toShort val isTransactional = false - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records)) + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) val builder = MemoryRecords.builder(buf, Compression.NONE, baseOffset, producerId, producerEpoch, baseSequence, isTransactional) - records.forEach(builder.append) + records.foreach(builder.append) builder.build() } - def createTransactionalRecords(records: lang.Iterable[SimpleRecord], + def createTransactionalRecords(records: Iterable[SimpleRecord], baseOffset: Long, baseSequence: Int = 0, producerId: Long = 1L): MemoryRecords = { val producerEpoch = 0.toShort val isTransactional = true - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records)) + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) val builder = MemoryRecords.builder(buf, Compression.NONE, baseOffset, producerId, producerEpoch, baseSequence, isTransactional) - records.forEach(builder.append) + records.foreach(builder.append) builder.build() } @@ -1315,31 +1241,30 @@ class PartitionTest extends AbstractPartitionTest { */ @Test def testAtMinIsr(): Unit = { + val controllerEpoch = 3 val leader = brokerId val follower1 = brokerId + 1 val follower2 = brokerId + 2 - val replicas = Array(leader, follower1, follower2) - val isr = Array(leader) + val replicas = List[Integer](leader, follower1, follower2).asJava + val isr = List[Integer](leader).asJava val leaderEpoch = 8 assertFalse(partition.isAtMinIsr) // Make isr set to only have leader to trigger AtMinIsr (default min isr config is 1) - val leaderRegistration = new PartitionRegistration.Builder() + val leaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leader) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - partition.makeLeader(leaderRegistration, isNew = true, offsetCheckpoints, None) + .setIsNew(true) + partition.makeLeader(leaderState, offsetCheckpoints, None) assertTrue(partition.isAtMinIsr) } @Test def testIsUnderMinIsr(): Unit = { - val replicas = Array(brokerId, brokerId + 1) configRepository.setTopicConfig(topicPartition.topic, TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "2") partition = new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, @@ -1352,61 +1277,61 @@ class PartitionTest extends AbstractPartitionTest { logManager, alterPartitionManager) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = None) - - var leaderRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(0) - .setIsr(replicas) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .setPartitionEpoch(1) - .build() - partition.makeLeader(leaderRegistration, isNew = true, offsetCheckpoints, None) + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) + .setLeader(brokerId) + .setLeaderEpoch(0) + .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setPartitionEpoch(1) + .setIsNew(true), + offsetCheckpoints, + topicId = None) assertFalse(partition.isUnderMinIsr) - leaderRegistration = new PartitionRegistration.Builder() + val LeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(1) - .setIsr(Array(brokerId)) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) + .setIsr(List(brokerId).map(Int.box).asJava) .setPartitionEpoch(2) - .build() - partition.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, None) + .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setIsNew(false) + + partition.makeLeader(LeaderState, offsetCheckpoints, None) assertTrue(partition.isUnderMinIsr) } @Test def testUpdateFollowerFetchState(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) seedLogData(log, numRecords = 6, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) + val replicas = List[Integer](brokerId, remoteBrokerId).asJava val isr = replicas - addBrokerEpochToMockMetadataCache(metadataCache, replicas) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val initializeTimeMs = time.milliseconds() - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = initializeTimeMs, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) time.sleep(500) @@ -1430,6 +1355,7 @@ class PartitionTest extends AbstractPartitionTest { @Test def testIsReplicaIsrEligibleWithEmptyReplicaMap(): Unit = { + val mockMetadataCache: KRaftMetadataCache = mock(classOf[KRaftMetadataCache]) val partition = spy(new Partition(topicPartition, replicaLagTimeMaxMs = ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_DEFAULT, localBrokerId = brokerId, @@ -1437,37 +1363,39 @@ class PartitionTest extends AbstractPartitionTest { time, alterPartitionListener, delayedOperations, - metadataCache, + mockMetadataCache, logManager, alterPartitionManager)) when(offsetCheckpoints.fetch(ArgumentMatchers.anyString, ArgumentMatchers.eq(topicPartition))) .thenReturn(Optional.empty[JLong]) - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) seedLogData(log, numRecords = 6, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val replicas = List(brokerId, remoteBrokerId) + addBrokerEpochToMockMetadataCache(mockMetadataCache, replicas) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val initializeTimeMs = time.milliseconds() - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(Array(brokerId)) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(List[Integer](brokerId).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") + doAnswer(_ => { // simulate topic is deleted at the moment partition.delete() - val replica = new Replica(remoteBrokerId, topicPartition, metadataCache) + val replica = new Replica(remoteBrokerId, topicPartition, mockMetadataCache) partition.updateFollowerFetchState(replica, mock(classOf[LogOffsetMetadata]), 0, initializeTimeMs, 0, defaultBrokerEpoch(remoteBrokerId)) mock(classOf[LogReadInfo]) }).when(partition).fetchRecords(any(), any(), anyLong(), anyInt(), anyBoolean(), anyBoolean()) @@ -1477,32 +1405,32 @@ class PartitionTest extends AbstractPartitionTest { @Test def testInvalidAlterPartitionRequestsAreNotRetried(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val replicas = List[Integer](brokerId, remoteBrokerId).asJava + val isr = List[Integer](brokerId).asJava partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(Set(brokerId), partition.partitionState.isr) assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = 0L, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = 10L) @@ -1528,32 +1456,32 @@ class PartitionTest extends AbstractPartitionTest { @Test def testIsrExpansion(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val replicas = List(brokerId, remoteBrokerId) + val isr = List[Integer](brokerId).asJava partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(Set(brokerId), partition.partitionState.isr) assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = 0L, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = 3L) @@ -1567,10 +1495,10 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = 10L) assertEquals(alterPartitionManager.isrUpdates.size, 1) val isrItem = alterPartitionManager.isrUpdates.head - assertEquals(isrItem.leaderAndIsr.isr, util.List.of[Integer](brokerId, remoteBrokerId)) + assertEquals(isrItem.leaderAndIsr.isr, List(brokerId, remoteBrokerId).map(Int.box).asJava) isrItem.leaderAndIsr.isrWithBrokerEpoch.asScala.foreach { brokerState => - // the broker epochs should be equal to broker epoch of the leader - assertEquals(defaultBrokerEpoch(brokerState.brokerId()), brokerState.brokerEpoch()) + // the broker epochs in the leaderAndIsr should be -1. + assertEquals(-1, brokerState.brokerEpoch()) } assertEquals(Set(brokerId), partition.partitionState.isr) assertEquals(Set(brokerId, remoteBrokerId), partition.partitionState.maximalIsr) @@ -1591,32 +1519,32 @@ class PartitionTest extends AbstractPartitionTest { @Test def testIsrNotExpandedIfUpdateFails(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val replicas = List[Integer](brokerId, remoteBrokerId).asJava + val isr = List[Integer](brokerId).asJava partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr) + .setPartitionEpoch(1) + .setReplicas(replicas) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(Set(brokerId), partition.partitionState.isr) assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = 0L, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = 10L) @@ -1646,14 +1574,16 @@ class PartitionTest extends AbstractPartitionTest { @ParameterizedTest @ValueSource(strings = Array("fenced", "shutdown", "unfenced")) def testHighWatermarkIncreasesWithFencedOrShutdownFollower(brokerState: String): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val shrinkedIsr = Array(brokerId) + val replicas = List(brokerId, remoteBrokerId) + val shrinkedIsr = Set(brokerId) + val metadataCache = mock(classOf[KRaftMetadataCache]) addBrokerEpochToMockMetadataCache(metadataCache, replicas) val partition = new Partition( @@ -1670,17 +1600,21 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - var partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(replicas) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = false, offsetCheckpoints, None), "Expected become leader transition to succeed") - + assertTrue( + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(replicas.map(Int.box).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(false), + offsetCheckpoints, + None + ), + "Expected become leader transition to succeed" + ) assertEquals(replicas.toSet, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) @@ -1708,23 +1642,28 @@ class PartitionTest extends AbstractPartitionTest { seedLogData(log, numRecords = 10, leaderEpoch) // Controller shrinks the ISR after - partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(shrinkedIsr) - .setPartitionEpoch(2) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertFalse(partition.makeLeader(partitionRegistration, isNew = false, offsetCheckpoints, None), "Expected to stay leader") + assertFalse( + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(shrinkedIsr.toList.map(Int.box).asJava) + .setPartitionEpoch(2) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(false), + offsetCheckpoints, + None + ), + "Expected to stay leader" + ) assertTrue(partition.isLeader) - assertEquals(shrinkedIsr.toSet, partition.partitionState.isr) - assertEquals(shrinkedIsr.toSet, partition.partitionState.maximalIsr) + assertEquals(shrinkedIsr, partition.partitionState.isr) + assertEquals(shrinkedIsr, partition.partitionState.maximalIsr) assertEquals(Set.empty, partition.getOutOfSyncReplicas(partition.replicaLagTimeMaxMs)) - // In the case of unfenced, the HWM doesn't increase, otherwise the HWM increases because the + // In the case of unfenced, the HWM doesn't increase, otherwise the the HWM increases because the // fenced and shutdown replica is not considered during HWM calculation. if (brokerState == "unfenced") { assertEquals(10, partition.localLogOrException.highWatermark) @@ -1733,16 +1672,19 @@ class PartitionTest extends AbstractPartitionTest { } } - @Test - def testIsrNotExpandedIfReplicaIsFencedOrShutdown(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIsrNotExpandedIfReplicaIsFencedOrShutdown(quorum: String): Unit = { + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId) + val replicas = List(brokerId, remoteBrokerId) + val isr = Set(brokerId) + val metadataCache = mock(classOf[KRaftMetadataCache]) addBrokerEpochToMockMetadataCache(metadataCache, replicas) // Mark the remote broker as eligible or ineligible in the metadata cache of the leader. @@ -1765,18 +1707,18 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr.toList.map(Int.box).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) markRemoteReplicaEligible(true) @@ -1792,7 +1734,7 @@ class PartitionTest extends AbstractPartitionTest { ) // Expansion is triggered. - assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertEquals(1, alterPartitionManager.isrUpdates.size) @@ -1800,8 +1742,8 @@ class PartitionTest extends AbstractPartitionTest { alterPartitionManager.failIsrUpdate(Errors.INELIGIBLE_REPLICA) // The leader reverts back to the previous ISR. - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) assertFalse(partition.partitionState.isInflight) assertEquals(0, alterPartitionManager.isrUpdates.size) @@ -1812,8 +1754,8 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = log.logEndOffset) // Expansion is not triggered because the follower is fenced. - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) assertFalse(partition.partitionState.isInflight) assertEquals(0, alterPartitionManager.isrUpdates.size) @@ -1824,7 +1766,7 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = log.logEndOffset) // Expansion is triggered. - assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertTrue(partition.partitionState.isInflight) assertEquals(1, alterPartitionManager.isrUpdates.size) @@ -1841,14 +1783,15 @@ class PartitionTest extends AbstractPartitionTest { @Test def testIsrCanExpandedIfBrokerEpochsMatchWithKraftMetadataCache(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId1 = brokerId + 1 val remoteBrokerId2 = brokerId + 2 - val replicas = Array(brokerId, remoteBrokerId1, remoteBrokerId2) - val isr = Array(brokerId, remoteBrokerId2) + val replicas = List(brokerId, remoteBrokerId1, remoteBrokerId2) + val isr = Set(brokerId, remoteBrokerId2) val metadataCache: KRaftMetadataCache = mock(classOf[KRaftMetadataCache]) @@ -1870,24 +1813,24 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr.toList.map(Int.box).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) // Fetch to let the follower catch up to the log end offset, but using a wrong broker epoch. The expansion should fail. - addBrokerEpochToMockMetadataCache(metadataCache, Array(brokerId, remoteBrokerId2)) + addBrokerEpochToMockMetadataCache(metadataCache, List(brokerId, remoteBrokerId2)) // Create a race case where the replica epoch get bumped right after the previous fetch succeeded. val wrongReplicaEpoch = defaultBrokerEpoch(remoteBrokerId1) - 1 - when(metadataCache.getAliveBrokerEpoch(remoteBrokerId1)).thenReturn(Optional.of(wrongReplicaEpoch), Optional.of(defaultBrokerEpoch(remoteBrokerId1))) + when(metadataCache.getAliveBrokerEpoch(remoteBrokerId1)).thenReturn(Option(wrongReplicaEpoch), Option(defaultBrokerEpoch(remoteBrokerId1))) fetchFollower(partition, replicaId = remoteBrokerId1, fetchOffset = log.logEndOffset, @@ -1902,8 +1845,8 @@ class PartitionTest extends AbstractPartitionTest { ) // Expansion is not triggered. - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) assertEquals(0, alterPartitionManager.isrUpdates.size) // Fetch again, this time with correct default broker epoch. @@ -1920,7 +1863,7 @@ class PartitionTest extends AbstractPartitionTest { ) // Expansion is triggered. - assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertEquals(1, alterPartitionManager.isrUpdates.size) val isrUpdate = alterPartitionManager.isrUpdates.head @@ -1936,13 +1879,15 @@ class PartitionTest extends AbstractPartitionTest { @Test def testFenceFollowerFetchWithStaleBrokerEpoch(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId1 = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId1) - val isr = Array(brokerId, remoteBrokerId1) + val replicas = List(brokerId, remoteBrokerId1) + val isr = Set(brokerId, remoteBrokerId1) + val metadataCache = mock(classOf[KRaftMetadataCache]) addBrokerEpochToMockMetadataCache(metadataCache, replicas) val partition = new Partition( @@ -1959,18 +1904,18 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr.toList.map(Int.box).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) val expectedReplicaEpoch = defaultBrokerEpoch(remoteBrokerId1) fetchFollower(partition, @@ -1999,14 +1944,16 @@ class PartitionTest extends AbstractPartitionTest { @Test def testIsrNotExpandedIfReplicaIsInControlledShutdown(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId) + val replicas = List(brokerId, remoteBrokerId) + val isr = Set(brokerId) + val metadataCache = mock(classOf[KRaftMetadataCache]) addBrokerEpochToMockMetadataCache(metadataCache, replicas) val partition = new Partition( topicPartition, @@ -2022,18 +1969,18 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr.toList.map(Int.box).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) // Fetch to let the follower catch up to the log end offset and // to check if an expansion is possible. @@ -2047,7 +1994,7 @@ class PartitionTest extends AbstractPartitionTest { ) // Expansion is triggered. - assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertEquals(1, alterPartitionManager.isrUpdates.size) @@ -2055,8 +2002,8 @@ class PartitionTest extends AbstractPartitionTest { alterPartitionManager.failIsrUpdate(Errors.INELIGIBLE_REPLICA) // The leader reverts back to the previous ISR. - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) assertFalse(partition.partitionState.isInflight) assertEquals(0, alterPartitionManager.isrUpdates.size) @@ -2067,8 +2014,8 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = log.logEndOffset) // Expansion is not triggered because the follower is fenced. - assertEquals(isr.toSet, partition.partitionState.isr) - assertEquals(isr.toSet, partition.partitionState.maximalIsr) + assertEquals(isr, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.maximalIsr) assertFalse(partition.partitionState.isInflight) assertEquals(0, alterPartitionManager.isrUpdates.size) @@ -2079,7 +2026,7 @@ class PartitionTest extends AbstractPartitionTest { fetchFollower(partition, replicaId = remoteBrokerId, fetchOffset = log.logEndOffset) // Expansion is triggered. - assertEquals(isr.toSet, partition.partitionState.isr) + assertEquals(isr, partition.partitionState.isr) assertEquals(replicas.toSet, partition.partitionState.maximalIsr) assertTrue(partition.partitionState.isInflight) assertEquals(1, alterPartitionManager.isrUpdates.size) @@ -2096,17 +2043,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testRetryShrinkIsr(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId, remoteBrokerId) + val replicas = Seq(brokerId, remoteBrokerId) + val isr = Seq(brokerId, remoteBrokerId) val topicId = Uuid.randomUuid() assertTrue(makeLeader( topicId = Some(topicId), + controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2121,7 +2070,7 @@ class PartitionTest extends AbstractPartitionTest { // Try to shrink the ISR partition.maybeShrinkIsr() assertEquals(alterPartitionManager.isrUpdates.size, 1) - assertEquals(alterPartitionManager.isrUpdates.head.leaderAndIsr.isr, util.List.of[Integer](brokerId)) + assertEquals(alterPartitionManager.isrUpdates.head.leaderAndIsr.isr, List(brokerId).map(Int.box).asJava) assertEquals(Set(brokerId, remoteBrokerId), partition.partitionState.isr) assertEquals(Set(brokerId, remoteBrokerId), partition.partitionState.maximalIsr) @@ -2147,18 +2096,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testMaybeShrinkIsr(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId1 = brokerId + 1 val remoteBrokerId2 = brokerId + 2 - val replicas = Array(brokerId, remoteBrokerId1, remoteBrokerId2) - val isr = Array(brokerId, remoteBrokerId1, remoteBrokerId2) + val replicas = Seq(brokerId, remoteBrokerId1, remoteBrokerId2) + val isr = Seq(brokerId, remoteBrokerId1, remoteBrokerId2) val initializeTimeMs = time.milliseconds() val metadataCache = mock(classOf[KRaftMetadataCache]) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) + addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) val partition = new Partition( topicPartition, @@ -2174,24 +2124,24 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr.toList.map(Int.box).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(0L, partition.localLogOrException.highWatermark) fetchFollower(partition, replicaId = remoteBrokerId1, fetchOffset = log.logEndOffset) assertReplicaState(partition, remoteBrokerId2, lastCaughtUpTimeMs = initializeTimeMs, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) // On initialization, the replica is considered caught up and should not be removed @@ -2206,7 +2156,7 @@ class PartitionTest extends AbstractPartitionTest { partition.maybeShrinkIsr() assertEquals(0, alterPartitionListener.shrinks.get) assertEquals(alterPartitionManager.isrUpdates.size, 1) - assertEquals(alterPartitionManager.isrUpdates.head.leaderAndIsr.isr, util.List.of[Integer](brokerId, remoteBrokerId1)) + assertEquals(alterPartitionManager.isrUpdates.head.leaderAndIsr.isr, List(brokerId, remoteBrokerId1).map(Int.box).asJava) val isrUpdate = alterPartitionManager.isrUpdates.head isrUpdate.leaderAndIsr.isrWithBrokerEpoch.asScala.foreach { brokerState => assertEquals(defaultBrokerEpoch(brokerState.brokerId()), brokerState.brokerEpoch()) @@ -2229,16 +2179,18 @@ class PartitionTest extends AbstractPartitionTest { @Test def testHighWatermarkAdvanceShouldNotAdvanceWhenUnderMinISR(): Unit = { configRepository.setTopicConfig(topicPartition.topic, TopicConfig.MIN_IN_SYNC_REPLICAS_CONFIG, "3") - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId1 = brokerId + 1 val remoteBrokerId2 = brokerId + 2 - val replicas = Array(brokerId, remoteBrokerId1, remoteBrokerId2) - val isr = Array(brokerId, remoteBrokerId1) + val replicas = Seq(brokerId, remoteBrokerId1, remoteBrokerId2) + val isr = Seq(brokerId, remoteBrokerId1) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val metadataCache = mock(classOf[KRaftMetadataCache]) + addBrokerEpochToMockMetadataCache(metadataCache, replicas.toList) val partition = new Partition( topicPartition, @@ -2254,16 +2206,16 @@ class PartitionTest extends AbstractPartitionTest { ) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + assertTrue(partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr.toList.map(Int.box).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true), + offsetCheckpoints, None), "Expected become leader transition to succeed") assertTrue(partition.isUnderMinIsr) assertEquals(0L, partition.localLogOrException.highWatermark) @@ -2285,17 +2237,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testAlterIsrLeaderAndIsrRace(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId, remoteBrokerId) + val replicas = Seq(brokerId, remoteBrokerId) + val isr = Seq(brokerId, remoteBrokerId) val initializeTimeMs = time.milliseconds() assertTrue(makeLeader( topicId = topicId, + controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2306,8 +2260,8 @@ class PartitionTest extends AbstractPartitionTest { assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = initializeTimeMs, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) // Shrink the ISR @@ -2318,6 +2272,7 @@ class PartitionTest extends AbstractPartitionTest { // Become leader again, reset the ISR state assertFalse(makeLeader( topicId = topicId, + controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2340,18 +2295,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testShouldNotShrinkIsrIfPreviousFetchIsCaughtUp(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId, remoteBrokerId) + val replicas = Seq(brokerId, remoteBrokerId) + val isr = Seq(brokerId, remoteBrokerId) val initializeTimeMs = time.milliseconds() - addBrokerEpochToMockMetadataCache(metadataCache, replicas) assertTrue(makeLeader( topicId = topicId, + controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2362,8 +2318,8 @@ class PartitionTest extends AbstractPartitionTest { assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = initializeTimeMs, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) // There is a short delay before the first fetch. The follower is not yet caught up to the log end. @@ -2398,18 +2354,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testShouldNotShrinkIsrIfFollowerCaughtUpToLogEnd(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId, remoteBrokerId) + val replicas = Seq(brokerId, remoteBrokerId) + val isr = Seq(brokerId, remoteBrokerId) val initializeTimeMs = time.milliseconds() - addBrokerEpochToMockMetadataCache(metadataCache, replicas) assertTrue(makeLeader( topicId = topicId, + controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2420,8 +2377,8 @@ class PartitionTest extends AbstractPartitionTest { assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = initializeTimeMs, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) // The follower catches up to the log end immediately. @@ -2444,17 +2401,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testIsrNotShrunkIfUpdateFails(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId, remoteBrokerId) + val replicas = Seq(brokerId, remoteBrokerId) + val isr = Seq(brokerId, remoteBrokerId) val initializeTimeMs = time.milliseconds() assertTrue(makeLeader( topicId = topicId, + controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2465,8 +2424,8 @@ class PartitionTest extends AbstractPartitionTest { assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = initializeTimeMs, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) time.sleep(30001) @@ -2529,17 +2488,18 @@ class PartitionTest extends AbstractPartitionTest { } def handleAlterIsrFailure(error: Errors, callback: (Int, Int, Partition) => Unit): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val remoteBrokerId = brokerId + 1 - val replicas = Array(brokerId, remoteBrokerId) - val isr = Array(brokerId) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val replicas = Seq(brokerId, remoteBrokerId) + val isr = Seq(brokerId) assertTrue(makeLeader( topicId = topicId, + controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2550,8 +2510,8 @@ class PartitionTest extends AbstractPartitionTest { assertReplicaState(partition, remoteBrokerId, lastCaughtUpTimeMs = 0L, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) // This will attempt to expand the ISR @@ -2576,7 +2536,7 @@ class PartitionTest extends AbstractPartitionTest { private def createClientResponseWithAlterPartitionResponse( topicPartition: TopicPartition, partitionErrorCode: Short, - isr: util.List[Integer] = util.List.of[Integer], + isr: List[Int] = List.empty, leaderEpoch: Int = 0, partitionEpoch: Int = 0 ): ClientResponse = { @@ -2585,7 +2545,7 @@ class PartitionTest extends AbstractPartitionTest { topicResponse.partitions.add(new AlterPartitionResponseData.PartitionData() .setPartitionIndex(topicPartition.partition) - .setIsr(isr) + .setIsr(isr.map(Integer.valueOf).asJava) .setLeaderEpoch(leaderEpoch) .setPartitionEpoch(partitionEpoch) .setErrorCode(partitionErrorCode)) @@ -2619,17 +2579,17 @@ class PartitionTest extends AbstractPartitionTest { logManager, alterPartitionManager) - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val follower1 = brokerId + 1 val follower2 = brokerId + 2 val follower3 = brokerId + 3 - val replicas = Array(brokerId, follower1, follower2, follower3) - val isr = Array(brokerId, follower1, follower2) + val replicas = Seq(brokerId, follower1, follower2, follower3) + val isr = Seq(brokerId, follower1, follower2) val partitionEpoch = 1 - addBrokerEpochToMockMetadataCache(metadataCache, replicas) doNothing().when(delayedOperations).checkAndCompleteAll() @@ -2639,7 +2599,7 @@ class PartitionTest extends AbstractPartitionTest { // Complete the ISR expansion val alterPartitionResponseWithoutError = - createClientResponseWithAlterPartitionResponse(topicPartition, Errors.NONE.code, util.List.of[Integer](brokerId, follower1, follower2, follower3), leaderEpoch, partitionEpoch + 1) + createClientResponseWithAlterPartitionResponse(topicPartition, Errors.NONE.code, List(brokerId, follower1, follower2, follower3), leaderEpoch, partitionEpoch + 1) when(mockChannelManager.sendRequest(any(), any())) .thenAnswer { invocation => @@ -2653,6 +2613,7 @@ class PartitionTest extends AbstractPartitionTest { assertTrue(makeLeader( topicId = topicId, + controllerEpoch, leaderEpoch, isr, replicas, @@ -2674,21 +2635,22 @@ class PartitionTest extends AbstractPartitionTest { @Test def testSingleInFlightAlterIsr(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 10, leaderEpoch = 4) + val controllerEpoch = 0 val leaderEpoch = 5 val follower1 = brokerId + 1 val follower2 = brokerId + 2 val follower3 = brokerId + 3 - val replicas = Array(brokerId, follower1, follower2, follower3) - val isr = Array(brokerId, follower1, follower2) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) + val replicas = Seq(brokerId, follower1, follower2, follower3) + val isr = Seq(brokerId, follower1, follower2) doNothing().when(delayedOperations).checkAndCompleteAll() assertTrue(makeLeader( topicId = topicId, + controllerEpoch = controllerEpoch, leaderEpoch = leaderEpoch, isr = isr, replicas = replicas, @@ -2713,41 +2675,41 @@ class PartitionTest extends AbstractPartitionTest { @Test def testUseCheckpointToInitializeHighWatermark(): Unit = { - val log = logManager.getOrCreateLog(topicPartition, topicId = topicId.toJava) + val log = logManager.getOrCreateLog(topicPartition, topicId = topicId) seedLogData(log, numRecords = 6, leaderEpoch = 5) when(offsetCheckpoints.fetch(logDir1.getAbsolutePath, topicPartition)) .thenReturn(Optional.of(long2Long(4L))) - val replicas = Array(brokerId, brokerId + 1) - val leaderRegistration = new PartitionRegistration.Builder() + val controllerEpoch = 3 + val replicas = List[Integer](brokerId, brokerId + 1).asJava + val leaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(6) .setIsr(replicas) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - partition.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, None) + .setIsNew(false) + partition.makeLeader(leaderState, offsetCheckpoints, None) assertEquals(4, partition.localLogOrException.highWatermark) } @Test def testTopicIdAndPartitionMetadataFileForLeader(): Unit = { + val controllerEpoch = 3 val leaderEpoch = 5 val topicId = Uuid.randomUuid() - val replicas = Array(brokerId, brokerId + 1) - val leaderRegistration = new PartitionRegistration.Builder() + val replicas = List[Integer](brokerId, brokerId + 1).asJava + val leaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(replicas) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - partition.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, Some(topicId)) + .setIsNew(false) + partition.makeLeader(leaderState, offsetCheckpoints, Some(topicId)) checkTopicId(topicId, partition) @@ -2770,28 +2732,28 @@ class PartitionTest extends AbstractPartitionTest { // Calling makeLeader with a new topic ID should not overwrite the old topic ID. We should get an InconsistentTopicIdException. // This scenario should not occur, since the topic ID check will fail. - assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, Some(Uuid.randomUuid()))) + assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeLeader(leaderState, offsetCheckpoints, Some(Uuid.randomUuid()))) // Calling makeLeader with no topic ID should not overwrite the old topic ID. We should get the original log. - partition2.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, None) + partition2.makeLeader(leaderState, offsetCheckpoints, None) checkTopicId(topicId, partition2) } @Test def testTopicIdAndPartitionMetadataFileForFollower(): Unit = { + val controllerEpoch = 3 val leaderEpoch = 5 val topicId = Uuid.randomUuid() - val replicas = Array(brokerId, brokerId + 1) - val leaderRegistration = new PartitionRegistration.Builder() + val replicas = List[Integer](brokerId, brokerId + 1).asJava + val leaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(replicas) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - partition.makeLeader(leaderRegistration, isNew = false, offsetCheckpoints, Some(topicId)) + .setIsNew(false) + partition.makeFollower(leaderState, offsetCheckpoints, Some(topicId)) checkTopicId(topicId, partition) @@ -2814,10 +2776,10 @@ class PartitionTest extends AbstractPartitionTest { // Calling makeFollower with a new topic ID should not overwrite the old topic ID. We should get an InconsistentTopicIdException. // This scenario should not occur, since the topic ID check will fail. - assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeFollower(leaderRegistration, isNew = false, offsetCheckpoints, Some(Uuid.randomUuid()))) + assertThrows(classOf[InconsistentTopicIdException], () => partition2.makeFollower(leaderState, offsetCheckpoints, Some(Uuid.randomUuid()))) // Calling makeFollower with no topic ID should not overwrite the old topic ID. We should get the original log. - partition2.makeFollower(leaderRegistration, isNew = false, offsetCheckpoints, None) + partition2.makeFollower(leaderState, offsetCheckpoints, None) checkTopicId(topicId, partition2) } @@ -2856,20 +2818,23 @@ class PartitionTest extends AbstractPartitionTest { @Test def testUnderReplicatedPartitionsCorrectSemantics(): Unit = { - val replicas = Array(brokerId, brokerId + 1, brokerId + 2) - val isr = Array(brokerId, brokerId + 1) - val leaderRegistrationBuilder = new PartitionRegistration.Builder() + val controllerEpoch = 3 + val replicas = List[Integer](brokerId, brokerId + 1, brokerId + 2).asJava + val isr = List[Integer](brokerId, brokerId + 1).asJava + + var leaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(6) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - partition.makeLeader(leaderRegistrationBuilder.build(), isNew = false, offsetCheckpoints, None) + .setIsNew(false) + partition.makeLeader(leaderState, offsetCheckpoints, None) assertTrue(partition.isUnderReplicated) - partition.makeLeader(leaderRegistrationBuilder.setIsr(replicas).build(), isNew = false, offsetCheckpoints, None) + leaderState = leaderState.setIsr(replicas) + partition.makeLeader(leaderState, offsetCheckpoints, None) assertFalse(partition.isUnderReplicated) } @@ -3053,22 +3018,23 @@ class PartitionTest extends AbstractPartitionTest { @Test def testDoNotResetReplicaStateIfLeaderEpochIsNotBumped(): Unit = { + val controllerEpoch = 3 val leaderId = brokerId val followerId = brokerId + 1 - val replicas = Array(leaderId, followerId) + val replicas = List(leaderId, followerId) val leaderEpoch = 8 val topicId = Uuid.randomUuid() - addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val LeaderRegistrationBuilder = new PartitionRegistration.Builder() + val initialLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leaderId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(Array(leaderId)) + .setIsr(List(leaderId).map(Int.box).asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - assertTrue(partition.makeLeader(LeaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true) + + assertTrue(partition.makeLeader(initialLeaderState, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) @@ -3077,8 +3043,8 @@ class PartitionTest extends AbstractPartitionTest { // in the ISR. assertReplicaState(partition, followerId, lastCaughtUpTimeMs = 0L, - logStartOffset = UnifiedLog.UNKNOWN_OFFSET, - logEndOffset = UnifiedLog.UNKNOWN_OFFSET + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset ) // Follower fetches and updates its replica state. @@ -3092,7 +3058,16 @@ class PartitionTest extends AbstractPartitionTest { // makeLeader is called again with the same leader epoch but with // a newer partition epoch. This can happen in KRaft when a partition // is reassigned. The leader epoch is not bumped when we add replicas. - assertFalse(partition.makeLeader(LeaderRegistrationBuilder.setPartitionEpoch(2).build(), isNew = false, offsetCheckpoints, Some(topicId))) + val updatedLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(leaderId) + .setLeaderEpoch(leaderEpoch) + .setIsr(List(leaderId).map(Int.box).asJava) + .setPartitionEpoch(2) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(false) + + assertFalse(partition.makeLeader(updatedLeaderState, offsetCheckpoints, Some(topicId))) assertEquals(2, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) @@ -3107,21 +3082,23 @@ class PartitionTest extends AbstractPartitionTest { @Test def testDoNotUpdateEpochStartOffsetIfLeaderEpochIsNotBumped(): Unit = { + val controllerEpoch = 3 val leaderId = brokerId val followerId = brokerId + 1 - val replicas = Array(leaderId, followerId) + val replicas = List(leaderId, followerId) val leaderEpoch = 8 val topicId = Uuid.randomUuid() - val LeaderRegistrationBuilder = new PartitionRegistration.Builder() + val initialLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leaderId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(Array(leaderId)) + .setIsr(List(leaderId).map(Int.box).asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - assertTrue(partition.makeLeader(LeaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true) + + assertTrue(partition.makeLeader(initialLeaderState, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) @@ -3134,11 +3111,20 @@ class PartitionTest extends AbstractPartitionTest { leaderLog.appendAsLeader(MemoryRecords.withRecords(0L, Compression.NONE, 0, new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k1".getBytes, "v1".getBytes) - ), leaderEpoch) + ), leaderEpoch = leaderEpoch) // makeLeader is called again with the same leader epoch but with // a newer partition epoch. - assertFalse(partition.makeLeader(LeaderRegistrationBuilder.setPartitionEpoch(2).build(), isNew = false, offsetCheckpoints, Some(topicId))) + val updatedLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(leaderId) + .setLeaderEpoch(leaderEpoch) + .setIsr(List(leaderId).map(Int.box).asJava) + .setPartitionEpoch(2) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(false) + + assertFalse(partition.makeLeader(updatedLeaderState, offsetCheckpoints, Some(topicId))) assertEquals(2, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) assertEquals(Set(leaderId), partition.partitionState.isr) @@ -3148,114 +3134,141 @@ class PartitionTest extends AbstractPartitionTest { @Test def testIgnoreLeaderPartitionStateChangeWithOlderPartitionEpoch(): Unit = { + val controllerEpoch = 3 val leaderId = brokerId - val replicas = Array(leaderId) + val replicas = List(leaderId) val leaderEpoch = 8 val topicId = Uuid.randomUuid() - val LeaderRegistrationBuilder = new PartitionRegistration.Builder() + val initialLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(leaderId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(Array(leaderId)) + .setIsr(List(leaderId).map(Int.box).asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - assertTrue(partition.makeLeader(LeaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true) + + assertTrue(partition.makeLeader(initialLeaderState, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) // makeLeader is called again with the same leader epoch but with // a older partition epoch. - assertFalse(partition.makeLeader(LeaderRegistrationBuilder.setPartitionEpoch(0).build(), isNew = false, offsetCheckpoints, Some(topicId))) + val updatedLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(leaderId) + .setLeaderEpoch(leaderEpoch) + .setIsr(List(leaderId).map(Int.box).asJava) + .setPartitionEpoch(0) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(false) + + assertFalse(partition.makeLeader(updatedLeaderState, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) } @Test def testIgnoreFollowerPartitionStateChangeWithOlderPartitionEpoch(): Unit = { + val controllerEpoch = 3 val leaderId = brokerId val followerId = brokerId + 1 - val replicas = Array(leaderId, followerId) + val replicas = List(leaderId, followerId) val leaderEpoch = 8 val topicId = Uuid.randomUuid() - val LeaderRegistrationBuilder = new PartitionRegistration.Builder() + val initialFollowerState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(followerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) - .setIsr(Array(leaderId)) + .setIsr(List(leaderId, followerId).map(Int.box).asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - assertTrue(partition.makeLeader(LeaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true) + + assertTrue(partition.makeFollower(initialFollowerState, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) // makeLeader is called again with the same leader epoch but with // a older partition epoch. - assertFalse(partition.makeLeader(LeaderRegistrationBuilder.setIsr(Array(leaderId, followerId)).build(), isNew = true, offsetCheckpoints, Some(topicId))) + val updatedFollowerState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(followerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(List(leaderId, followerId).map(Int.box).asJava) + .setPartitionEpoch(1) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true) + + assertFalse(partition.makeFollower(updatedFollowerState, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(leaderEpoch, partition.getLeaderEpoch) } @Test def testFollowerShouldNotHaveAnyRemoteReplicaStates(): Unit = { + val controllerEpoch = 3 val localReplica = brokerId val remoteReplica1 = brokerId + 1 val remoteReplica2 = brokerId + 2 - val replicas = Array(localReplica, remoteReplica1, remoteReplica2) + val replicas = List(localReplica, remoteReplica1, remoteReplica2) val topicId = Uuid.randomUuid() // The local replica is the leader. - val leaderRegistrationBuilder = new PartitionRegistration.Builder() + val initialLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(localReplica) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(1) - .setIsr(replicas) + .setIsr(replicas.map(Int.box).asJava) .setPartitionEpoch(1) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - assertTrue(partition.makeLeader(leaderRegistrationBuilder.build(), isNew = true, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(true) + + assertTrue(partition.makeLeader(initialLeaderState, offsetCheckpoints, Some(topicId))) assertEquals(1, partition.getPartitionEpoch) assertEquals(1, partition.getLeaderEpoch) assertEquals(Some(localReplica), partition.leaderReplicaIdOpt) assertEquals(replicas.toSet, partition.partitionState.isr) assertEquals(Seq(remoteReplica1, remoteReplica2), partition.remoteReplicas.map(_.brokerId).toSeq) - assertEquals(replicas.toSeq, partition.assignmentState.replicas) + assertEquals(replicas, partition.assignmentState.replicas) // The local replica becomes a follower. - val updatedLeaderRegistration = leaderRegistrationBuilder + val updatedLeaderState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(remoteReplica1) .setLeaderEpoch(2) + .setIsr(replicas.map(Int.box).asJava) .setPartitionEpoch(2) - .build() - assertTrue(partition.makeFollower(updatedLeaderRegistration, isNew = false, offsetCheckpoints, Some(topicId))) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(false) + + assertTrue(partition.makeFollower(updatedLeaderState, offsetCheckpoints, Some(topicId))) assertEquals(2, partition.getPartitionEpoch) assertEquals(2, partition.getLeaderEpoch) assertEquals(Some(remoteReplica1), partition.leaderReplicaIdOpt) assertEquals(Set.empty, partition.partitionState.isr) assertEquals(Seq.empty, partition.remoteReplicas.map(_.brokerId).toSeq) - assertEquals(replicas.toSeq, partition.assignmentState.replicas) + assertEquals(replicas, partition.assignmentState.replicas) } @Test def testAddAndRemoveListeners(): Unit = { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - val replicas = Array(brokerId, brokerId + 1) - val isr = replicas - addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(0) - .setIsr(isr) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .setPartitionEpoch(1) - .build() - partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) + .setLeader(brokerId) + .setLeaderEpoch(0) + .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setPartitionEpoch(1) + .setIsNew(true), + offsetCheckpoints, + topicId = None) + val listener1 = new MockPartitionListener() val listener2 = new MockPartitionListener() @@ -3312,19 +3325,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testAddListenerFailsWhenPartitionIsDeleted(): Unit = { - val replicas = Array(brokerId, brokerId + 1) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(0) - .setIsr(replicas) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .setPartitionEpoch(1) - .build() - partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) + .setLeader(brokerId) + .setLeaderEpoch(0) + .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setPartitionEpoch(1) + .setIsNew(true), + offsetCheckpoints, + topicId = None) partition.delete() @@ -3335,19 +3348,17 @@ class PartitionTest extends AbstractPartitionTest { def testPartitionListenerWhenLogOffsetsChanged(): Unit = { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - val replicas = Array(brokerId, brokerId + 1) - val isr = Array(brokerId, brokerId + 1) - addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(0) - .setIsr(isr) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .setPartitionEpoch(1) - .build() - partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) + .setLeader(brokerId) + .setLeaderEpoch(0) + .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setPartitionEpoch(1) + .setIsNew(true), + offsetCheckpoints, + topicId = None) val listener = new MockPartitionListener() assertTrue(partition.maybeAddListener(listener)) @@ -3370,26 +3381,26 @@ class PartitionTest extends AbstractPartitionTest { listener.verify(expectedHighWatermark = partition.localLogOrException.logEndOffset) - partition.truncateFullyAndStartAt(0L, isFuture = false) + partition.truncateFullyAndStartAt(0L, false) listener.verify(expectedHighWatermark = 0L) } @Test def testPartitionListenerWhenPartitionFailed(): Unit = { - val replicas = Array(brokerId, brokerId + 1) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(0) - .setIsr(replicas) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .setPartitionEpoch(1) - .build() - partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) + .setLeader(brokerId) + .setLeaderEpoch(0) + .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setPartitionEpoch(1) + .setIsNew(true), + offsetCheckpoints, + topicId = None) val listener = new MockPartitionListener() assertTrue(partition.maybeAddListener(listener)) @@ -3401,19 +3412,19 @@ class PartitionTest extends AbstractPartitionTest { @Test def testPartitionListenerWhenPartitionIsDeleted(): Unit = { - val replicas = Array(brokerId, brokerId + 1) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, topicId = topicId) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(0) - .setIsr(replicas) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .setPartitionEpoch(1) - .build() - partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) + .setLeader(brokerId) + .setLeaderEpoch(0) + .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setPartitionEpoch(1) + .setIsNew(true), + offsetCheckpoints, + topicId = None) val listener = new MockPartitionListener() assertTrue(partition.maybeAddListener(listener)) @@ -3429,20 +3440,17 @@ class PartitionTest extends AbstractPartitionTest { partition.createLogIfNotExists(isNew = true, isFutureReplica = false, offsetCheckpoints, topicId = topicId) assertTrue(partition.log.isDefined) - val replicas = Array(brokerId, brokerId + 1) - val isr = replicas - val epoch = 0 - addBrokerEpochToMockMetadataCache(metadataCache, replicas) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(epoch) - .setIsr(isr) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .setPartitionEpoch(1) - .build() - partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None) + partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(0) + .setLeader(brokerId) + .setLeaderEpoch(0) + .setIsr(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setReplicas(List(brokerId, brokerId + 1).map(Int.box).asJava) + .setPartitionEpoch(1) + .setIsNew(true), + offsetCheckpoints, + topicId = None) val listener = new MockPartitionListener() assertTrue(partition.maybeAddListener(listener)) @@ -3469,8 +3477,7 @@ class PartitionTest extends AbstractPartitionTest { partition.appendRecordsToFollowerOrFutureReplica( records = records, - isFuture = true, - partitionLeaderEpoch = epoch + isFuture = true ) listener.verify() @@ -3496,26 +3503,25 @@ class PartitionTest extends AbstractPartitionTest { @Test def testMaybeStartTransactionVerification(): Unit = { + val controllerEpoch = 0 val leaderEpoch = 5 - val replicas = Array(brokerId, brokerId + 1) + val replicas = List[Integer](brokerId, brokerId + 1).asJava val isr = replicas val producerId = 22L partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val partitionRegistration = new PartitionRegistration.Builder() + assertTrue(partition.makeLeader(new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - assertTrue(partition.makeLeader(partitionRegistration, isNew = true, offsetCheckpoints, None), "Expected become leader transition to succeed") + .setIsNew(true), offsetCheckpoints, None), "Expected become leader transition to succeed") assertEquals(leaderEpoch, partition.getLeaderEpoch) - val idempotentRecords = createIdempotentRecords(util.List.of( + val idempotentRecords = createIdempotentRecords(List( new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes), new SimpleRecord("k3".getBytes, "v3".getBytes)), @@ -3523,7 +3529,7 @@ class PartitionTest extends AbstractPartitionTest { producerId = producerId) partition.appendRecordsToLeader(idempotentRecords, origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching) - def transactionRecords() = createTransactionalRecords(util.List.of( + def transactionRecords() = createTransactionalRecords(List( new SimpleRecord("k1".getBytes, "v1".getBytes), new SimpleRecord("k2".getBytes, "v2".getBytes), new SimpleRecord("k3".getBytes, "v3".getBytes)), @@ -3535,7 +3541,7 @@ class PartitionTest extends AbstractPartitionTest { assertThrows(classOf[InvalidTxnStateException], () => partition.appendRecordsToLeader(transactionRecords(), origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching)) // Before appendRecordsToLeader is called, ReplicaManager will call maybeStartTransactionVerification. We should get a non-sentinel VerificationGuard. - val verificationGuard = partition.maybeStartTransactionVerification(producerId, 3, 0, supportsEpochBump = true) + val verificationGuard = partition.maybeStartTransactionVerification(producerId, 3, 0, true) assertNotEquals(VerificationGuard.SENTINEL, verificationGuard) // With the wrong VerificationGuard, append should fail. @@ -3543,21 +3549,22 @@ class PartitionTest extends AbstractPartitionTest { origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching, new VerificationGuard())) // We should return the same VerificationGuard when we still need to verify. Append should proceed. - val verificationGuard2 = partition.maybeStartTransactionVerification(producerId, 3, 0, supportsEpochBump = true) + val verificationGuard2 = partition.maybeStartTransactionVerification(producerId, 3, 0, true) assertEquals(verificationGuard, verificationGuard2) partition.appendRecordsToLeader(transactionRecords(), origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching, verificationGuard) // We should no longer need a VerificationGuard. Future appends without VerificationGuard will also succeed. - val verificationGuard3 = partition.maybeStartTransactionVerification(producerId, 3, 0, supportsEpochBump = true) + val verificationGuard3 = partition.maybeStartTransactionVerification(producerId, 3, 0, true) assertEquals(VerificationGuard.SENTINEL, verificationGuard3) partition.appendRecordsToLeader(transactionRecords(), origin = AppendOrigin.CLIENT, requiredAcks = 1, RequestLocal.withThreadConfinedCaching) } private def makeLeader( topicId: Option[Uuid], + controllerEpoch: Int, leaderEpoch: Int, - isr: Array[Int], - replicas: Array[Int], + isr: Seq[Int], + replicas: Seq[Int], partitionEpoch: Int, isNew: Boolean, partition: Partition = partition @@ -3568,16 +3575,18 @@ class PartitionTest extends AbstractPartitionTest { offsetCheckpoints, topicId ) - val partitionRegistration = new PartitionRegistration.Builder() - .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) - .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(partitionEpoch) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - val newLeader = partition.makeLeader(partitionRegistration, isNew = isNew, offsetCheckpoints, topicId) + val newLeader = partition.makeLeader( + new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) + .setLeader(brokerId) + .setLeaderEpoch(leaderEpoch) + .setIsr(isr.map(Int.box).asJava) + .setPartitionEpoch(partitionEpoch) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(isNew), + offsetCheckpoints, + topicId + ) assertTrue(partition.isLeader) assertFalse(partition.partitionState.isInflight) assertEquals(topicId, partition.topicId) @@ -3598,7 +3607,7 @@ class PartitionTest extends AbstractPartitionTest { private class SlowLog( log: UnifiedLog, - topicId: Optional[Uuid], + topicId: Option[Uuid], logStartOffset: Long, localLog: LocalLog, leaderEpochCache: LeaderEpochFileCache, @@ -3611,13 +3620,12 @@ class PartitionTest extends AbstractPartitionTest { log.producerIdExpirationCheckIntervalMs, leaderEpochCache, producerStateManager, - topicId, - false, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) { + _topicId = topicId, + keepPartitionMetadataFile = true) { - override def appendAsFollower(records: MemoryRecords, epoch: Int): LogAppendInfo = { + override def appendAsFollower(records: MemoryRecords): LogAppendInfo = { appendSemaphore.acquire() - val appendInfo = super.appendAsFollower(records, epoch) + val appendInfo = super.appendAsFollower(records) appendInfo } } @@ -3722,9 +3730,9 @@ class PartitionTest extends AbstractPartitionTest { ) } - private def addBrokerEpochToMockMetadataCache(metadataCache: MetadataCache, brokers: Array[Int]): Unit = { + private def addBrokerEpochToMockMetadataCache(kRaftMetadataCache: KRaftMetadataCache, brokers: List[Int]): Unit = { brokers.foreach { broker => - when(metadataCache.getAliveBrokerEpoch(broker)).thenReturn(Optional.of(defaultBrokerEpoch(broker))) + when(kRaftMetadataCache.getAliveBrokerEpoch(broker)).thenReturn(Option(defaultBrokerEpoch(broker))) } } @@ -3748,29 +3756,29 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) + val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = Array(brokerId, brokerId + 1) + val replicas = List[Integer](brokerId, brokerId + 1).asJava val isr = replicas - val partitionRegistration = new PartitionRegistration.Builder() + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() + .setIsNew(isNew) val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.random() when(spyLogManager.hasOfflineLogDirs()).thenReturn(true) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(true) // When - val res = partition.makeLeader(partitionRegistration, isNew = isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeLeader(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) - verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Optional.of(topicId), Some(targetDirectory)) + verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Some(topicId), Some(targetDirectory)) } @ParameterizedTest @@ -3793,29 +3801,29 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) + val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = Array(brokerId, brokerId + 1) + val replicas = List[Integer](brokerId, brokerId + 1).asJava val isr = replicas - val partitionRegistration = new PartitionRegistration.Builder() + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() + .setIsNew(isNew) val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.random() when(spyLogManager.hasOfflineLogDirs()).thenReturn(true) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(true) // When - val res = partition.makeFollower(partitionRegistration, isNew = isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeFollower(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) - verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Optional.of(topicId), Some(targetDirectory)) + verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Some(topicId), Some(targetDirectory)) } @ParameterizedTest @@ -3838,29 +3846,29 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) + val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = Array(brokerId, brokerId + 1) + val replicas = List[Integer](brokerId, brokerId + 1).asJava val isr = replicas - val partitionRegistration = new PartitionRegistration.Builder() + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() + .setIsNew(isNew) val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.random() when(spyLogManager.hasOfflineLogDirs()).thenReturn(false) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(false) // When - val res = partition.makeLeader(partitionRegistration, isNew = isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeLeader(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) - verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Optional.of(topicId), Some(targetDirectory)) + verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Some(topicId), Some(targetDirectory)) } @ParameterizedTest @@ -3883,29 +3891,29 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) + val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = Array(brokerId, brokerId + 1) + val replicas = List[Integer](brokerId, brokerId + 1).asJava val isr = replicas - val partitionRegistration = new PartitionRegistration.Builder() + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() + .setIsNew(isNew) val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.random() when(spyLogManager.hasOfflineLogDirs()).thenReturn(false) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(false) // When - val res = partition.makeFollower(partitionRegistration, isNew = isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeFollower(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) - verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Optional.of(topicId), Some(targetDirectory)) + verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Some(topicId), Some(targetDirectory)) } @ParameterizedTest @@ -3928,29 +3936,29 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) + val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = Array(brokerId, brokerId + 1) + val replicas = List[Integer](brokerId, brokerId + 1).asJava val isr = replicas - val partitionRegistration = new PartitionRegistration.Builder() + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() + .setIsNew(isNew) val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.UNASSIGNED when(spyLogManager.hasOfflineLogDirs()).thenReturn(true) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(false) // When - val res = partition.makeLeader(partitionRegistration, isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeLeader(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) - verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Optional.of(topicId), Some(targetDirectory)) + verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Some(topicId), Some(targetDirectory)) } @@ -3974,29 +3982,29 @@ class PartitionTest extends AbstractPartitionTest { metadataCache, spyLogManager, alterPartitionManager) + val controllerEpoch = 0 val leaderEpoch = 1 - val replicas = Array(brokerId, brokerId + 1) + val replicas = List[Integer](brokerId, brokerId + 1).asJava val isr = replicas - val partitionRegistration = new PartitionRegistration.Builder() + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setControllerEpoch(controllerEpoch) .setLeader(brokerId) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) .setLeaderEpoch(leaderEpoch) .setIsr(isr) .setPartitionEpoch(1) .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() + .setIsNew(isNew) val topicId = Uuid.randomUuid() val targetDirectory = DirectoryId.UNASSIGNED when(spyLogManager.hasOfflineLogDirs()).thenReturn(true) when(spyLogManager.onlineLogDirId(targetDirectory)).thenReturn(false) // When - val res = partition.makeFollower(partitionRegistration, isNew, offsetCheckpoints, Some(topicId), Some(targetDirectory)) + val res = partition.makeFollower(leaderAndIsrPartitionState, offsetCheckpoints, Some(topicId), Some(targetDirectory)) // Then assertTrue(res) - verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Optional.of(topicId), Some(targetDirectory)) + verify(spyLogManager, times(1)).getOrCreateLog(topicPartition, isNew, isFuture = false, Some(topicId), Some(targetDirectory)) } @Test diff --git a/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala b/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala new file mode 100644 index 0000000000000..55a49f31cbf7e --- /dev/null +++ b/core/src/test/scala/unit/kafka/cluster/ReplicaTest.scala @@ -0,0 +1,349 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.cluster + +import kafka.log.UnifiedLog +import kafka.server.metadata.KRaftMetadataCache +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.errors.NotLeaderOrFollowerException +import org.apache.kafka.server.util.MockTime +import org.apache.kafka.storage.internals.log.LogOffsetMetadata +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertThrows, assertTrue} +import org.junit.jupiter.api.{BeforeEach, Test} +import org.mockito.Mockito.{mock, when} + +object ReplicaTest { + val BrokerId: Int = 0 + val Partition: TopicPartition = new TopicPartition("foo", 0) + val ReplicaLagTimeMaxMs: Long = 30000 +} + +class ReplicaTest { + import ReplicaTest._ + + val time = new MockTime() + var replica: Replica = _ + + @BeforeEach + def setup(): Unit = { + val metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.getAliveBrokerEpoch(BrokerId)).thenReturn(Option(1L)) + replica = new Replica(BrokerId, Partition, metadataCache) + } + + private def assertReplicaState( + logStartOffset: Long, + logEndOffset: Long, + lastCaughtUpTimeMs: Long, + lastFetchLeaderLogEndOffset: Long, + lastFetchTimeMs: Long, + brokerEpoch: Option[Long] = Option[Long](1L) + ): Unit = { + val replicaState = replica.stateSnapshot + assertEquals(logStartOffset, replicaState.logStartOffset, + "Unexpected Log Start Offset") + assertEquals(logEndOffset, replicaState.logEndOffset, + "Unexpected Log End Offset") + assertEquals(lastCaughtUpTimeMs, replicaState.lastCaughtUpTimeMs, + "Unexpected Last Caught Up Time") + assertEquals(lastFetchLeaderLogEndOffset, replicaState.lastFetchLeaderLogEndOffset, + "Unexpected Last Fetch Leader Log End Offset") + assertEquals(lastFetchTimeMs, replicaState.lastFetchTimeMs, + "Unexpected Last Fetch Time") + assertEquals(brokerEpoch, replicaState.brokerEpoch, + "Broker Epoch Mismatch") + } + + def assertReplicaStateDoesNotChange( + op: => Unit + ): Unit = { + val previousState = replica.stateSnapshot + + op + + assertReplicaState( + logStartOffset = previousState.logStartOffset, + logEndOffset = previousState.logEndOffset, + lastCaughtUpTimeMs = previousState.lastCaughtUpTimeMs, + lastFetchLeaderLogEndOffset = previousState.lastFetchLeaderLogEndOffset, + lastFetchTimeMs = previousState.lastFetchTimeMs + ) + } + + private def updateFetchState( + followerFetchOffset: Long, + followerStartOffset: Long, + leaderEndOffset: Long + ): Long = { + val currentTimeMs = time.milliseconds() + replica.updateFetchStateOrThrow( + followerFetchOffsetMetadata = new LogOffsetMetadata(followerFetchOffset), + followerStartOffset = followerStartOffset, + followerFetchTimeMs = currentTimeMs, + leaderEndOffset = leaderEndOffset, + brokerEpoch = 1L + ) + currentTimeMs + } + + private def resetReplicaState( + leaderEndOffset: Long, + isNewLeader: Boolean, + isFollowerInSync: Boolean + ): Long = { + val currentTimeMs = time.milliseconds() + replica.resetReplicaState( + currentTimeMs = currentTimeMs, + leaderEndOffset = leaderEndOffset, + isNewLeader = isNewLeader, + isFollowerInSync = isFollowerInSync + ) + currentTimeMs + } + + private def isCaughtUp( + leaderEndOffset: Long + ): Boolean = { + replica.stateSnapshot.isCaughtUp( + leaderEndOffset = leaderEndOffset, + currentTimeMs = time.milliseconds(), + replicaMaxLagMs = ReplicaLagTimeMaxMs + ) + } + + @Test + def testInitialState(): Unit = { + assertReplicaState( + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset, + lastCaughtUpTimeMs = 0L, + lastFetchLeaderLogEndOffset = 0L, + lastFetchTimeMs = 0L, + brokerEpoch = Option.empty + ) + } + + @Test + def testUpdateFetchState(): Unit = { + val fetchTimeMs1 = updateFetchState( + followerFetchOffset = 5L, + followerStartOffset = 1L, + leaderEndOffset = 10L + ) + + assertReplicaState( + logStartOffset = 1L, + logEndOffset = 5L, + lastCaughtUpTimeMs = 0L, + lastFetchLeaderLogEndOffset = 10L, + lastFetchTimeMs = fetchTimeMs1 + ) + + val fetchTimeMs2 = updateFetchState( + followerFetchOffset = 10L, + followerStartOffset = 2L, + leaderEndOffset = 15L + ) + + assertReplicaState( + logStartOffset = 2L, + logEndOffset = 10L, + lastCaughtUpTimeMs = fetchTimeMs1, + lastFetchLeaderLogEndOffset = 15L, + lastFetchTimeMs = fetchTimeMs2 + ) + + val fetchTimeMs3 = updateFetchState( + followerFetchOffset = 15L, + followerStartOffset = 3L, + leaderEndOffset = 15L + ) + + assertReplicaState( + logStartOffset = 3L, + logEndOffset = 15L, + lastCaughtUpTimeMs = fetchTimeMs3, + lastFetchLeaderLogEndOffset = 15L, + lastFetchTimeMs = fetchTimeMs3 + ) + } + + @Test + def testResetReplicaStateWhenLeaderIsReelectedAndReplicaIsInSync(): Unit = { + updateFetchState( + followerFetchOffset = 10L, + followerStartOffset = 1L, + leaderEndOffset = 10L + ) + + val resetTimeMs1 = resetReplicaState( + leaderEndOffset = 11L, + isNewLeader = false, + isFollowerInSync = true + ) + + assertReplicaState( + logStartOffset = 1L, + logEndOffset = 10L, + lastCaughtUpTimeMs = resetTimeMs1, + lastFetchLeaderLogEndOffset = 11L, + lastFetchTimeMs = resetTimeMs1 + ) + } + + @Test + def testResetReplicaStateWhenLeaderIsReelectedAndReplicaIsNotInSync(): Unit = { + updateFetchState( + followerFetchOffset = 10L, + followerStartOffset = 1L, + leaderEndOffset = 10L + ) + + resetReplicaState( + leaderEndOffset = 11L, + isNewLeader = false, + isFollowerInSync = false + ) + + assertReplicaState( + logStartOffset = 1L, + logEndOffset = 10L, + lastCaughtUpTimeMs = 0L, + lastFetchLeaderLogEndOffset = 11L, + lastFetchTimeMs = 0L + ) + } + + @Test + def testResetReplicaStateWhenNewLeaderIsElectedAndReplicaIsInSync(): Unit = { + updateFetchState( + followerFetchOffset = 10L, + followerStartOffset = 1L, + leaderEndOffset = 10L + ) + + val resetTimeMs1 = resetReplicaState( + leaderEndOffset = 11L, + isNewLeader = true, + isFollowerInSync = true + ) + + assertReplicaState( + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset, + lastCaughtUpTimeMs = resetTimeMs1, + lastFetchLeaderLogEndOffset = UnifiedLog.UnknownOffset, + lastFetchTimeMs = 0L, + brokerEpoch = Option.empty + ) + } + + @Test + def testResetReplicaStateWhenNewLeaderIsElectedAndReplicaIsNotInSync(): Unit = { + updateFetchState( + followerFetchOffset = 10L, + followerStartOffset = 1L, + leaderEndOffset = 10L + ) + + resetReplicaState( + leaderEndOffset = 11L, + isNewLeader = true, + isFollowerInSync = false + ) + + assertReplicaState( + logStartOffset = UnifiedLog.UnknownOffset, + logEndOffset = UnifiedLog.UnknownOffset, + lastCaughtUpTimeMs = 0L, + lastFetchLeaderLogEndOffset = UnifiedLog.UnknownOffset, + lastFetchTimeMs = 0L, + brokerEpoch = Option.empty + ) + } + + @Test + def testIsCaughtUpWhenReplicaIsCaughtUpToLogEnd(): Unit = { + assertFalse(isCaughtUp(leaderEndOffset = 10L)) + + updateFetchState( + followerFetchOffset = 10L, + followerStartOffset = 1L, + leaderEndOffset = 10L + ) + + assertTrue(isCaughtUp(leaderEndOffset = 10L)) + + time.sleep(ReplicaLagTimeMaxMs + 1) + + assertTrue(isCaughtUp(leaderEndOffset = 10L)) + } + + @Test + def testIsCaughtUpWhenReplicaIsNotCaughtUpToLogEnd(): Unit = { + assertFalse(isCaughtUp(leaderEndOffset = 10L)) + + updateFetchState( + followerFetchOffset = 5L, + followerStartOffset = 1L, + leaderEndOffset = 10L + ) + + assertFalse(isCaughtUp(leaderEndOffset = 10L)) + + updateFetchState( + followerFetchOffset = 10L, + followerStartOffset = 1L, + leaderEndOffset = 15L + ) + + assertTrue(isCaughtUp(leaderEndOffset = 16L)) + + time.sleep(ReplicaLagTimeMaxMs + 1) + + assertFalse(isCaughtUp(leaderEndOffset = 16L)) + } + + @Test + def testFenceStaleUpdates(): Unit = { + val metadataCache = mock(classOf[KRaftMetadataCache]) + when(metadataCache.getAliveBrokerEpoch(BrokerId)).thenReturn(Option(2L)) + + val replica = new Replica(BrokerId, Partition, metadataCache) + replica.updateFetchStateOrThrow( + followerFetchOffsetMetadata = new LogOffsetMetadata(5L), + followerStartOffset = 1L, + followerFetchTimeMs = 1, + leaderEndOffset = 10L, + brokerEpoch = 2L + ) + assertThrows(classOf[NotLeaderOrFollowerException], () => replica.updateFetchStateOrThrow( + followerFetchOffsetMetadata = new LogOffsetMetadata(5L), + followerStartOffset = 2L, + followerFetchTimeMs = 3, + leaderEndOffset = 10L, + brokerEpoch = 1L + )) + replica.updateFetchStateOrThrow( + followerFetchOffsetMetadata = new LogOffsetMetadata(5L), + followerStartOffset = 2L, + followerFetchTimeMs = 4, + leaderEndOffset = 10L, + brokerEpoch = -1L + ) + } +} diff --git a/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala b/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala index 8f10811091d70..b574b3dbb0cfa 100644 --- a/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/AbstractCoordinatorConcurrencyTest.scala @@ -20,24 +20,24 @@ package kafka.coordinator import java.util.concurrent.{ConcurrentHashMap, ExecutorService, Executors} import java.util.{Collections, Random} import java.util.concurrent.atomic.AtomicInteger +import java.util.concurrent.locks.Lock import kafka.coordinator.AbstractCoordinatorConcurrencyTest._ import kafka.cluster.Partition -import kafka.log.LogManager +import kafka.log.{LogManager, UnifiedLog} import kafka.server.QuotaFactory.QuotaManagers -import kafka.server._ +import kafka.server.{KafkaConfig, _} import kafka.utils._ -import org.apache.kafka.common.{TopicIdPartition, TopicPartition} +import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, RecordValidationStats} import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.metadata.MetadataCache +import org.apache.kafka.server.ActionQueue import org.apache.kafka.server.common.RequestLocal -import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, DelayedRemoteListOffsets, TopicPartitionOperationKey} -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager.TransactionSupportedOperation +import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, TopicPartitionOperationKey} import org.apache.kafka.server.util.timer.{MockTimer, Timer} import org.apache.kafka.server.util.{MockScheduler, MockTime, Scheduler} -import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, VerificationGuard} import org.junit.jupiter.api.{AfterEach, BeforeEach} import org.mockito.Mockito.{mock, when, withSettings} @@ -173,6 +173,7 @@ object AbstractCoordinatorConcurrencyTest { val producePurgatory: DelayedOperationPurgatory[DelayedProduce], val delayedFetchPurgatoryParam: DelayedOperationPurgatory[DelayedFetch], val delayedDeleteRecordsPurgatoryParam: DelayedOperationPurgatory[DelayedDeleteRecords], + val delayedElectLeaderPurgatoryParam: DelayedOperationPurgatory[DelayedElectLeader], val delayedRemoteFetchPurgatoryParam: DelayedOperationPurgatory[DelayedRemoteFetch], val delayedRemoteListOffsetsPurgatoryParam: DelayedOperationPurgatory[DelayedRemoteListOffsets]) extends ReplicaManager( @@ -183,14 +184,15 @@ object AbstractCoordinatorConcurrencyTest { logManager, None, quotaManagers, - mock(classOf[MetadataCache]), + null, null, null, delayedProducePurgatoryParam = Some(producePurgatory), delayedFetchPurgatoryParam = Some(delayedFetchPurgatoryParam), delayedDeleteRecordsPurgatoryParam = Some(delayedDeleteRecordsPurgatoryParam), delayedRemoteFetchPurgatoryParam = Some(delayedRemoteFetchPurgatoryParam), - delayedRemoteListOffsetsPurgatoryParam = Some(delayedRemoteListOffsetsPurgatoryParam)) { + delayedRemoteListOffsetsPurgatoryParam = Some(delayedRemoteListOffsetsPurgatoryParam), + threadNamePrefix = Option(this.getClass.getName)) { @volatile var logs: mutable.Map[TopicPartition, (UnifiedLog, Long)] = _ @@ -213,10 +215,12 @@ object AbstractCoordinatorConcurrencyTest { requiredAcks: Short, internalTopicsAllowed: Boolean, origin: AppendOrigin, - entriesPerPartition: Map[TopicIdPartition, MemoryRecords], - responseCallback: Map[TopicIdPartition, PartitionResponse] => Unit, - processingStatsCallback: Map[TopicIdPartition, RecordValidationStats] => Unit = _ => (), + entriesPerPartition: Map[TopicPartition, MemoryRecords], + responseCallback: Map[TopicPartition, PartitionResponse] => Unit, + delayedProduceLock: Option[Lock] = None, + processingStatsCallback: Map[TopicPartition, RecordValidationStats] => Unit = _ => (), requestLocal: RequestLocal = RequestLocal.noCaching, + actionQueue: ActionQueue = null, verificationGuards: Map[TopicPartition, VerificationGuard] = Map.empty): Unit = { if (entriesPerPartition.isEmpty) @@ -225,7 +229,7 @@ object AbstractCoordinatorConcurrencyTest { case (tp, _) => (tp, ProducePartitionStatus(0L, new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L))) }) - val delayedProduce = new DelayedProduce(5, produceMetadata, this, responseCallback) { + val delayedProduce = new DelayedProduce(5, produceMetadata, this, responseCallback, delayedProduceLock) { // Complete produce requests after a few attempts to trigger delayed produce from different threads val completeAttempts = new AtomicInteger override def tryComplete(): Boolean = { @@ -288,8 +292,10 @@ object AbstractCoordinatorConcurrencyTest { "Fetch", timer, 0, 1000, false, true) val mockDeleteRecordsPurgatory = new DelayedOperationPurgatory[DelayedDeleteRecords]( "DeleteRecords", timer, 0, 1000, false, true) + val mockElectLeaderPurgatory = new DelayedOperationPurgatory[DelayedElectLeader]( + "ElectLeader", timer, 0, 1000, false, true) new TestReplicaManager(config, time, scheduler, logManager, quotaManagers, watchKeys, producePurgatory, - mockFetchPurgatory, mockDeleteRecordsPurgatory, mockRemoteFetchPurgatory, + mockFetchPurgatory, mockDeleteRecordsPurgatory, mockElectLeaderPurgatory, mockRemoteFetchPurgatory, mockRemoteListOffsetsPurgatory) } } diff --git a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala new file mode 100644 index 0000000000000..dc4bbc830cd27 --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorLoaderImplTest.scala @@ -0,0 +1,705 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.group + +import kafka.log.UnifiedLog +import kafka.server.ReplicaManager +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.errors.NotLeaderOrFollowerException +import org.apache.kafka.common.record.{ControlRecordType, EndTransactionMarker, FileRecords, MemoryRecords, RecordBatch, SimpleRecord} +import org.apache.kafka.common.requests.TransactionResult +import org.apache.kafka.common.utils.{MockTime, Time} +import org.apache.kafka.coordinator.common.runtime.Deserializer.UnknownRecordTypeException +import org.apache.kafka.coordinator.common.runtime.{CoordinatorPlayback, Deserializer} +import org.apache.kafka.server.storage.log.FetchIsolation +import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogOffsetMetadata} +import org.apache.kafka.test.TestUtils.assertFutureThrows +import org.junit.jupiter.api.Assertions.{assertEquals, assertNotNull} +import org.junit.jupiter.api.{Test, Timeout} +import org.mockito.ArgumentMatchers.anyLong +import org.mockito.{ArgumentCaptor, ArgumentMatchers} +import org.mockito.Mockito.{mock, times, verify, when} +import org.mockito.invocation.InvocationOnMock + +import java.nio.ByteBuffer +import java.nio.charset.Charset +import java.util.concurrent.{CountDownLatch, TimeUnit} +import scala.util.Using + +class StringKeyValueDeserializer extends Deserializer[(String, String)] { + override def deserialize(key: ByteBuffer, value: ByteBuffer): (String, String) = { + ( + Charset.defaultCharset().decode(key).toString, + Charset.defaultCharset().decode(value).toString + ) + } +} + +@Timeout(60) +class CoordinatorLoaderImplTest { + @Test + def testNonexistentPartition(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = mock(classOf[Deserializer[(String, String)]]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(None) + + val result = loader.load(tp, coordinator) + assertFutureThrows(result, classOf[NotLeaderOrFollowerException]) + } + } + + @Test + def testLoadingIsRejectedWhenClosed(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = mock(classOf[Deserializer[(String, String)]]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + loader.close() + + val result = loader.load(tp, coordinator) + assertFutureThrows(result, classOf[RuntimeException]) + } + } + + @Test + def testLoading(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = new StringKeyValueDeserializer + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(9L)) + when(log.highWatermark).thenReturn(0L) + + val readResult1 = logReadResult(startOffset = 0, records = Seq( + new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes) + )) + + when(log.read( + startOffset = 0L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult1) + + val readResult2 = logReadResult(startOffset = 2, records = Seq( + new SimpleRecord("k3".getBytes, "v3".getBytes), + new SimpleRecord("k4".getBytes, "v4".getBytes), + new SimpleRecord("k5".getBytes, "v5".getBytes) + )) + + when(log.read( + startOffset = 2L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult2) + + val readResult3 = logReadResult(startOffset = 5, producerId = 100L, producerEpoch = 5, records = Seq( + new SimpleRecord("k6".getBytes, "v6".getBytes), + new SimpleRecord("k7".getBytes, "v7".getBytes) + )) + + when(log.read( + startOffset = 5L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult3) + + val readResult4 = logReadResult( + startOffset = 7, + producerId = 100L, + producerEpoch = 5, + controlRecordType = ControlRecordType.COMMIT + ) + + when(log.read( + startOffset = 7L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult4) + + val readResult5 = logReadResult( + startOffset = 8, + producerId = 500L, + producerEpoch = 10, + controlRecordType = ControlRecordType.ABORT + ) + + when(log.read( + startOffset = 8L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult5) + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) + + verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k1", "v1")) + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k2", "v2")) + verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k3", "v3")) + verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k4", "v4")) + verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k5", "v5")) + verify(coordinator).replay(5L, 100L, 5.toShort, ("k6", "v6")) + verify(coordinator).replay(6L, 100L, 5.toShort, ("k7", "v7")) + verify(coordinator).replayEndTransactionMarker(100L, 5, TransactionResult.COMMIT) + verify(coordinator).replayEndTransactionMarker(500L, 10, TransactionResult.ABORT) + verify(coordinator).updateLastWrittenOffset(2) + verify(coordinator).updateLastWrittenOffset(5) + verify(coordinator).updateLastWrittenOffset(7) + verify(coordinator).updateLastWrittenOffset(8) + verify(coordinator).updateLastCommittedOffset(0) + } + } + + @Test + def testLoadingStoppedWhenClosed(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = new StringKeyValueDeserializer + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(100L)) + + val readResult = logReadResult(startOffset = 0, records = Seq( + new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes) + )) + + val latch = new CountDownLatch(1) + when(log.read( + startOffset = ArgumentMatchers.anyLong(), + maxLength = ArgumentMatchers.eq(1000), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true) + )).thenAnswer { _ => + latch.countDown() + readResult + } + + val result = loader.load(tp, coordinator) + latch.await(10, TimeUnit.SECONDS) + loader.close() + + val ex = assertFutureThrows(result, classOf[RuntimeException]) + assertEquals("Coordinator loader is closed.", ex.getMessage) + } + } + + @Test + def testUnknownRecordTypeAreIgnored(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = mock(classOf[StringKeyValueDeserializer]) + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(2L)) + + val readResult = logReadResult(startOffset = 0, records = Seq( + new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes) + )) + + when(log.read( + startOffset = 0L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult) + + when(serde.deserialize(ArgumentMatchers.any(), ArgumentMatchers.any())) + .thenThrow(new UnknownRecordTypeException(1)) + .thenReturn(("k2", "v2")) + + loader.load(tp, coordinator).get(10, TimeUnit.SECONDS) + + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k2", "v2")) + } + } + + @Test + def testDeserializationErrorFailsTheLoading(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = mock(classOf[StringKeyValueDeserializer]) + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(2L)) + + val readResult = logReadResult(startOffset = 0, records = Seq( + new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes) + )) + + when(log.read( + startOffset = 0L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult) + + when(serde.deserialize(ArgumentMatchers.any(), ArgumentMatchers.any())) + .thenThrow(new RuntimeException("Error!")) + + val ex = assertFutureThrows(loader.load(tp, coordinator), classOf[RuntimeException]) + + assertEquals(s"Deserializing record DefaultRecord(offset=0, timestamp=-1, key=2 bytes, value=2 bytes) from $tp failed due to: Error!", ex.getMessage) + } + } + + @Test + def testLoadGroupAndOffsetsWithCorruptedLog(): Unit = { + // Simulate a case where startOffset < endOffset but log is empty. This could theoretically happen + // when all the records are expired and the active segment is truncated or when the partition + // is accidentally corrupted. + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = mock(classOf[StringKeyValueDeserializer]) + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(10L)) + + val readResult = logReadResult(startOffset = 0, records = Seq()) + + when(log.read( + startOffset = 0L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult) + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) + } + } + + @Test + def testLoadSummary(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = new StringKeyValueDeserializer + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + val time = new MockTime() + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + val startTimeMs = time.milliseconds() + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(5L)) + + val readResult1 = logReadResult(startOffset = 0, records = Seq( + new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes) + )) + + when(log.read( + startOffset = 0L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenAnswer((_: InvocationOnMock) => { + time.sleep(1000) + readResult1 + }) + + val readResult2 = logReadResult(startOffset = 2, records = Seq( + new SimpleRecord("k3".getBytes, "v3".getBytes), + new SimpleRecord("k4".getBytes, "v4".getBytes), + new SimpleRecord("k5".getBytes, "v5".getBytes) + )) + + when(log.read( + startOffset = 2L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult2) + + val summary = loader.load(tp, coordinator).get(10, TimeUnit.SECONDS) + assertEquals(startTimeMs, summary.startTimeMs()) + assertEquals(startTimeMs + 1000, summary.endTimeMs()) + assertEquals(5, summary.numRecords()) + assertEquals(readResult1.records.sizeInBytes() + readResult2.records.sizeInBytes(), summary.numBytes()) + } + } + + @Test + def testUpdateLastWrittenOffsetOnBatchLoaded(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = new StringKeyValueDeserializer + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(log.highWatermark).thenReturn(0L).thenReturn(0L).thenReturn(2L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(7L)) + + val readResult1 = logReadResult(startOffset = 0, records = Seq( + new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes) + )) + + when(log.read( + startOffset = 0L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult1) + + val readResult2 = logReadResult(startOffset = 2, records = Seq( + new SimpleRecord("k3".getBytes, "v3".getBytes), + new SimpleRecord("k4".getBytes, "v4".getBytes), + new SimpleRecord("k5".getBytes, "v5".getBytes) + )) + + when(log.read( + startOffset = 2L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult2) + + val readResult3 = logReadResult(startOffset = 5, records = Seq( + new SimpleRecord("k6".getBytes, "v6".getBytes), + new SimpleRecord("k7".getBytes, "v7".getBytes) + )) + + when(log.read( + startOffset = 5L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult3) + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) + + verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k1", "v1")) + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k2", "v2")) + verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k3", "v3")) + verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k4", "v4")) + verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k5", "v5")) + verify(coordinator).replay(5L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k6", "v6")) + verify(coordinator).replay(6L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k7", "v7")) + verify(coordinator, times(0)).updateLastWrittenOffset(0) + verify(coordinator, times(1)).updateLastWrittenOffset(2) + verify(coordinator, times(1)).updateLastWrittenOffset(5) + verify(coordinator, times(1)).updateLastWrittenOffset(7) + verify(coordinator, times(1)).updateLastCommittedOffset(0) + verify(coordinator, times(1)).updateLastCommittedOffset(2) + verify(coordinator, times(0)).updateLastCommittedOffset(5) + } + } + + @Test + def testUpdateLastWrittenOffsetAndUpdateLastCommittedOffsetNoRecordsRead(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = new StringKeyValueDeserializer + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(log.highWatermark).thenReturn(0L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(0L)) + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) + + verify(coordinator, times(0)).updateLastWrittenOffset(anyLong()) + verify(coordinator, times(0)).updateLastCommittedOffset(anyLong()) + } + } + + @Test + def testUpdateLastWrittenOffsetOnBatchLoadedWhileHighWatermarkAhead(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = new StringKeyValueDeserializer + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(log.highWatermark).thenReturn(5L).thenReturn(7L).thenReturn(7L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(7L)) + + val readResult1 = logReadResult(startOffset = 0, records = Seq( + new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes) + )) + + when(log.read( + startOffset = 0L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult1) + + val readResult2 = logReadResult(startOffset = 2, records = Seq( + new SimpleRecord("k3".getBytes, "v3".getBytes), + new SimpleRecord("k4".getBytes, "v4".getBytes), + new SimpleRecord("k5".getBytes, "v5".getBytes) + )) + + when(log.read( + startOffset = 2L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult2) + + val readResult3 = logReadResult(startOffset = 5, records = Seq( + new SimpleRecord("k6".getBytes, "v6".getBytes), + new SimpleRecord("k7".getBytes, "v7".getBytes) + )) + + when(log.read( + startOffset = 5L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult3) + + assertNotNull(loader.load(tp, coordinator).get(10, TimeUnit.SECONDS)) + + verify(coordinator).replay(0L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k1", "v1")) + verify(coordinator).replay(1L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k2", "v2")) + verify(coordinator).replay(2L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k3", "v3")) + verify(coordinator).replay(3L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k4", "v4")) + verify(coordinator).replay(4L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k5", "v5")) + verify(coordinator).replay(5L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k6", "v6")) + verify(coordinator).replay(6L, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, ("k7", "v7")) + verify(coordinator, times(0)).updateLastWrittenOffset(0) + verify(coordinator, times(0)).updateLastWrittenOffset(2) + verify(coordinator, times(0)).updateLastWrittenOffset(5) + verify(coordinator, times(1)).updateLastWrittenOffset(7) + verify(coordinator, times(0)).updateLastCommittedOffset(0) + verify(coordinator, times(0)).updateLastCommittedOffset(2) + verify(coordinator, times(0)).updateLastCommittedOffset(5) + verify(coordinator, times(1)).updateLastCommittedOffset(7) + } + } + + @Test + def testPartitionGoesOfflineDuringLoad(): Unit = { + val tp = new TopicPartition("foo", 0) + val replicaManager = mock(classOf[ReplicaManager]) + val serde = new StringKeyValueDeserializer + val log = mock(classOf[UnifiedLog]) + val coordinator = mock(classOf[CoordinatorPlayback[(String, String)]]) + + Using.resource(new CoordinatorLoaderImpl[(String, String)]( + time = Time.SYSTEM, + replicaManager = replicaManager, + deserializer = serde, + loadBufferSize = 1000 + )) { loader => + when(replicaManager.getLog(tp)).thenReturn(Some(log)) + when(log.logStartOffset).thenReturn(0L) + when(log.highWatermark).thenReturn(0L) + when(replicaManager.getLogEndOffset(tp)).thenReturn(Some(5L)).thenReturn(Some(-1L)) + + val readResult1 = logReadResult(startOffset = 0, records = Seq( + new SimpleRecord("k1".getBytes, "v1".getBytes), + new SimpleRecord("k2".getBytes, "v2".getBytes) + )) + + when(log.read( + startOffset = 0L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult1) + + val readResult2 = logReadResult(startOffset = 2, records = Seq( + new SimpleRecord("k3".getBytes, "v3".getBytes), + new SimpleRecord("k4".getBytes, "v4".getBytes), + new SimpleRecord("k5".getBytes, "v5".getBytes) + )) + + when(log.read( + startOffset = 2L, + maxLength = 1000, + isolation = FetchIsolation.LOG_END, + minOneMessage = true + )).thenReturn(readResult2) + + assertFutureThrows(loader.load(tp, coordinator), classOf[NotLeaderOrFollowerException]) + } + } + + private def logReadResult( + startOffset: Long, + producerId: Long = RecordBatch.NO_PRODUCER_ID, + producerEpoch: Short = RecordBatch.NO_PRODUCER_EPOCH, + records: Seq[SimpleRecord] + ): FetchDataInfo = { + val fileRecords = mock(classOf[FileRecords]) + val memoryRecords = if (producerId == RecordBatch.NO_PRODUCER_ID) { + MemoryRecords.withRecords( + startOffset, + Compression.NONE, + records: _* + ) + } else { + MemoryRecords.withTransactionalRecords( + startOffset, + Compression.NONE, + producerId, + producerEpoch, + 0, + RecordBatch.NO_PARTITION_LEADER_EPOCH, + records: _* + ) + } + + when(fileRecords.sizeInBytes).thenReturn(memoryRecords.sizeInBytes) + + val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer]) + when(fileRecords.readInto( + bufferCapture.capture(), + ArgumentMatchers.anyInt()) + ).thenAnswer { _ => + val buffer = bufferCapture.getValue + buffer.put(memoryRecords.buffer.duplicate) + buffer.flip() + } + + new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecords) + } + + private def logReadResult( + startOffset: Long, + producerId: Long, + producerEpoch: Short, + controlRecordType: ControlRecordType + ): FetchDataInfo = { + val fileRecords = mock(classOf[FileRecords]) + val memoryRecords = MemoryRecords.withEndTransactionMarker( + startOffset, + 0L, + RecordBatch.NO_PARTITION_LEADER_EPOCH, + producerId, + producerEpoch, + new EndTransactionMarker(controlRecordType, 0) + ) + + when(fileRecords.sizeInBytes).thenReturn(memoryRecords.sizeInBytes) + + val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer]) + when(fileRecords.readInto( + bufferCapture.capture(), + ArgumentMatchers.anyInt()) + ).thenAnswer { _ => + val buffer = bufferCapture.getValue + buffer.put(memoryRecords.buffer.duplicate) + buffer.flip() + } + + new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecords) + } +} diff --git a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala index c55d2c0da3dc1..67f5deeaaeea8 100644 --- a/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/group/CoordinatorPartitionWriterTest.scala @@ -16,15 +16,16 @@ */ package kafka.coordinator.group -import kafka.server.{LogAppendResult, ReplicaManager} -import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import kafka.server.ReplicaManager +import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.errors.NotLeaderOrFollowerException import org.apache.kafka.common.message.DeleteRecordsResponseData.DeleteRecordsPartitionResult import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.record.{CompressionType, MemoryRecords, RecordBatch, RecordValidationStats, SimpleRecord} +import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, SimpleRecord} +import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.coordinator.common.runtime.PartitionWriter -import org.apache.kafka.storage.internals.log.{AppendOrigin, LogAppendInfo, LogConfig, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, VerificationGuard} import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions.{assertEquals, assertNull, assertThrows, assertTrue} import org.junit.jupiter.api.Test @@ -34,9 +35,9 @@ import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.Mockito.{mock, verify, when} import java.nio.charset.Charset -import java.util -import java.util.Optional +import java.util.Collections import scala.collection.Map +import scala.jdk.CollectionConverters._ class CoordinatorPartitionWriterTest { @Test @@ -75,8 +76,8 @@ class CoordinatorPartitionWriterTest { replicaManager ) - when(replicaManager.getLogConfig(tp)).thenReturn(Some(new LogConfig(util.Map.of))) - assertEquals(new LogConfig(util.Map.of), partitionRecordWriter.config(tp)) + when(replicaManager.getLogConfig(tp)).thenReturn(Some(new LogConfig(Map.empty.asJava))) + assertEquals(new LogConfig(Map.empty.asJava), partitionRecordWriter.config(tp)) when(replicaManager.getLogConfig(tp)).thenReturn(None) assertThrows(classOf[NotLeaderOrFollowerException], () => partitionRecordWriter.config(tp)) @@ -86,41 +87,41 @@ class CoordinatorPartitionWriterTest { @Test def testWriteRecords(): Unit = { val tp = new TopicPartition("foo", 0) - val topicId = Uuid.fromString("TbEp6-A4s3VPT1TwiI5COw") val replicaManager = mock(classOf[ReplicaManager]) - when(replicaManager.topicIdPartition(tp)).thenReturn(new TopicIdPartition(topicId, tp)) - val partitionRecordWriter = new CoordinatorPartitionWriter( - replicaManager + replicaManager ) - val recordsCapture: ArgumentCaptor[Map[TopicIdPartition, MemoryRecords]] = - ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, MemoryRecords]]) + val recordsCapture: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + val callbackCapture: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - when(replicaManager.appendRecordsToLeader( + when(replicaManager.appendRecords( + ArgumentMatchers.eq(0L), ArgumentMatchers.eq(1.toShort), ArgumentMatchers.eq(true), ArgumentMatchers.eq(AppendOrigin.COORDINATOR), recordsCapture.capture(), + callbackCapture.capture(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.eq(Map(tp -> VerificationGuard.SENTINEL)), - )).thenReturn(Map(new TopicIdPartition(topicId, tp) -> LogAppendResult( - new LogAppendInfo( - 5L, - 10L, - Optional.empty, - RecordBatch.NO_TIMESTAMP, - 0L, - 0L, - RecordValidationStats.EMPTY, - CompressionType.NONE, - 100, - 10L - ), - Option.empty, - hasCustomErrorMessage = false - ))) + )).thenAnswer( _ => { + callbackCapture.getValue.apply(Map( + tp -> new PartitionResponse( + Errors.NONE, + 5, + 10, + RecordBatch.NO_TIMESTAMP, + -1, + Collections.emptyList(), + "" + ) + )) + }) val batch = MemoryRecords.withRecords( Compression.NONE, @@ -136,9 +137,11 @@ class CoordinatorPartitionWriterTest { VerificationGuard.SENTINEL, batch )) + assertEquals( batch, - recordsCapture.getValue.getOrElse(new TopicIdPartition(topicId, tp), throw new AssertionError(s"No records for $tp")) + recordsCapture.getValue.getOrElse(tp, + throw new AssertionError(s"No records for $tp")) ) } @@ -180,43 +183,46 @@ class CoordinatorPartitionWriterTest { "transactional-id", 10L, 5.toShort, - ApiKeys.TXN_OFFSET_COMMIT.latestVersion().toInt + ApiKeys.TXN_OFFSET_COMMIT.latestVersion() ) if (error == Errors.NONE) { assertEquals(verificationGuard, future.get) } else { - assertFutureThrows(error.exception.getClass, future) + assertFutureThrows(future, error.exception.getClass) } } @Test def testWriteRecordsWithFailure(): Unit = { val tp = new TopicPartition("foo", 0) - val topicId = Uuid.fromString("TbEp6-A4s3VPT1TwiI5COw") val replicaManager = mock(classOf[ReplicaManager]) - when(replicaManager.topicIdPartition(tp)).thenReturn(new TopicIdPartition(topicId, tp)) - val partitionRecordWriter = new CoordinatorPartitionWriter( replicaManager ) - val recordsCapture: ArgumentCaptor[Map[TopicIdPartition, MemoryRecords]] = - ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, MemoryRecords]]) + val recordsCapture: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + val callbackCapture: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - when(replicaManager.appendRecordsToLeader( + when(replicaManager.appendRecords( + ArgumentMatchers.eq(0L), ArgumentMatchers.eq(1.toShort), ArgumentMatchers.eq(true), ArgumentMatchers.eq(AppendOrigin.COORDINATOR), recordsCapture.capture(), + callbackCapture.capture(), + ArgumentMatchers.any(), + ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.eq(Map(tp -> VerificationGuard.SENTINEL)), - )).thenReturn(Map(new TopicIdPartition(topicId, tp) -> LogAppendResult( - LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, - Some(Errors.NOT_LEADER_OR_FOLLOWER.exception), - hasCustomErrorMessage = false - ))) + )).thenAnswer(_ => { + callbackCapture.getValue.apply(Map( + tp -> new PartitionResponse(Errors.NOT_LEADER_OR_FOLLOWER) + )) + }) val batch = MemoryRecords.withRecords( Compression.NONE, diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorAdapterTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorAdapterTest.scala new file mode 100644 index 0000000000000..4a1e33705cfe7 --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorAdapterTest.scala @@ -0,0 +1,955 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.group + +import kafka.coordinator.group.GroupCoordinatorConcurrencyTest.{JoinGroupCallback, SyncGroupCallback} +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.errors.{InvalidGroupIdException, UnsupportedVersionException} +import org.apache.kafka.common.message.{ConsumerGroupHeartbeatRequestData, DeleteGroupsResponseData, DescribeGroupsResponseData, HeartbeatRequestData, HeartbeatResponseData, JoinGroupRequestData, JoinGroupResponseData, LeaveGroupRequestData, LeaveGroupResponseData, ListGroupsRequestData, ListGroupsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteRequestData, OffsetDeleteResponseData, OffsetFetchRequestData, OffsetFetchResponseData, ShareGroupHeartbeatRequestData, SyncGroupRequestData, SyncGroupResponseData, TxnOffsetCommitRequestData, TxnOffsetCommitResponseData} +import org.apache.kafka.common.message.JoinGroupRequestData.JoinGroupRequestProtocol +import org.apache.kafka.common.message.JoinGroupResponseData.JoinGroupResponseMember +import org.apache.kafka.common.message.OffsetDeleteRequestData.{OffsetDeleteRequestPartition, OffsetDeleteRequestTopic, OffsetDeleteRequestTopicCollection} +import org.apache.kafka.common.message.OffsetDeleteResponseData.{OffsetDeleteResponsePartition, OffsetDeleteResponsePartitionCollection, OffsetDeleteResponseTopic, OffsetDeleteResponseTopicCollection} +import org.apache.kafka.common.network.{ClientInformation, ListenerName} +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.requests.{OffsetFetchResponse, RequestContext, RequestHeader, TransactionResult} +import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} +import org.apache.kafka.common.utils.{BufferSupplier, Time} +import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource +import org.apache.kafka.coordinator.group.OffsetAndMetadata +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.util.MockTime +import org.apache.kafka.test.TestUtils.assertFutureThrows +import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} +import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.mockito.ArgumentMatchers.any +import org.mockito.{ArgumentCaptor, ArgumentMatchers} +import org.mockito.Mockito.{mock, verify, when} + +import java.net.InetAddress +import java.util.{Optional, OptionalInt, OptionalLong} +import scala.jdk.CollectionConverters._ + +class GroupCoordinatorAdapterTest { + + private def makeContext( + apiKey: ApiKeys, + apiVersion: Short + ): RequestContext = { + new RequestContext( + new RequestHeader(apiKey, apiVersion, "client", 0), + "1", + InetAddress.getLocalHost, + KafkaPrincipal.ANONYMOUS, + ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), + SecurityProtocol.PLAINTEXT, + ClientInformation.EMPTY, + false + ) + } + + @Test + def testJoinConsumerGroup(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val ctx = makeContext(ApiKeys.CONSUMER_GROUP_HEARTBEAT, ApiKeys.CONSUMER_GROUP_HEARTBEAT.latestVersion) + val request = new ConsumerGroupHeartbeatRequestData() + .setGroupId("group") + + val future = adapter.consumerGroupHeartbeat(ctx, request) + + assertTrue(future.isDone) + assertTrue(future.isCompletedExceptionally) + assertFutureThrows(future, classOf[UnsupportedVersionException]) + } + + @Test + def testJoinShareGroup(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val ctx = makeContext(ApiKeys.SHARE_GROUP_HEARTBEAT, ApiKeys.SHARE_GROUP_HEARTBEAT.latestVersion) + val request = new ShareGroupHeartbeatRequestData() + .setGroupId("group") + + val future = adapter.shareGroupHeartbeat(ctx, request) + + assertTrue(future.isDone) + assertTrue(future.isCompletedExceptionally) + assertFutureThrows(future, classOf[UnsupportedVersionException]) + } + + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.JOIN_GROUP) + def testJoinGroup(version: Short): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val ctx = makeContext(ApiKeys.JOIN_GROUP, version) + val request = new JoinGroupRequestData() + .setGroupId("group") + .setMemberId("member") + .setProtocolType("consumer") + .setRebalanceTimeoutMs(1000) + .setSessionTimeoutMs(2000) + .setReason("reason") + .setProtocols(new JoinGroupRequestData.JoinGroupRequestProtocolCollection(List( + new JoinGroupRequestProtocol() + .setName("first") + .setMetadata("first".getBytes()), + new JoinGroupRequestProtocol() + .setName("second") + .setMetadata("second".getBytes())).iterator.asJava)) + val bufferSupplier = BufferSupplier.create() + + val future = adapter.joinGroup(ctx, request, bufferSupplier) + assertFalse(future.isDone) + + val capturedProtocols: ArgumentCaptor[List[(String, Array[Byte])]] = + ArgumentCaptor.forClass(classOf[List[(String, Array[Byte])]]) + val capturedCallback: ArgumentCaptor[JoinGroupCallback] = + ArgumentCaptor.forClass(classOf[JoinGroupCallback]) + + verify(groupCoordinator).handleJoinGroup( + ArgumentMatchers.eq(request.groupId), + ArgumentMatchers.eq(request.memberId), + ArgumentMatchers.eq(None), + ArgumentMatchers.eq(if (version >= 4) true else false), + ArgumentMatchers.eq(if (version >= 9) true else false), + ArgumentMatchers.eq(ctx.clientId), + ArgumentMatchers.eq(InetAddress.getLocalHost.toString), + ArgumentMatchers.eq(request.rebalanceTimeoutMs), + ArgumentMatchers.eq(request.sessionTimeoutMs), + ArgumentMatchers.eq(request.protocolType), + capturedProtocols.capture(), + capturedCallback.capture(), + ArgumentMatchers.eq(Some("reason")), + ArgumentMatchers.eq(new RequestLocal(bufferSupplier)) + ) + + assertEquals(List( + ("first", "first"), + ("second", "second") + ), capturedProtocols.getValue.map { case (name, metadata) => + (name, new String(metadata)) + }) + + capturedCallback.getValue.apply(JoinGroupResult( + members = List( + new JoinGroupResponseMember() + .setMemberId("member") + .setMetadata("member".getBytes()) + .setGroupInstanceId("instance") + ), + memberId = "member", + generationId = 10, + protocolType = Some("consumer"), + protocolName = Some("range"), + leaderId = "leader", + skipAssignment = true, + error = Errors.UNKNOWN_MEMBER_ID + )) + + val expectedData = new JoinGroupResponseData() + .setMembers(List(new JoinGroupResponseMember() + .setMemberId("member") + .setMetadata("member".getBytes()) + .setGroupInstanceId("instance")).asJava) + .setMemberId("member") + .setGenerationId(10) + .setProtocolType("consumer") + .setProtocolName("range") + .setLeader("leader") + .setSkipAssignment(true) + .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code) + + assertTrue(future.isDone) + assertEquals(expectedData, future.get()) + } + + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.SYNC_GROUP) + def testSyncGroup(version: Short): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val ctx = makeContext(ApiKeys.SYNC_GROUP, version) + val data = new SyncGroupRequestData() + .setGroupId("group") + .setMemberId("member1") + .setGroupInstanceId("instance") + .setProtocolType("consumer") + .setProtocolName("range") + .setGenerationId(10) + .setAssignments(List( + new SyncGroupRequestData.SyncGroupRequestAssignment() + .setMemberId("member1") + .setAssignment("member1".getBytes()), + new SyncGroupRequestData.SyncGroupRequestAssignment() + .setMemberId("member2") + .setAssignment("member2".getBytes()) + ).asJava) + val bufferSupplier = BufferSupplier.create() + + val future = adapter.syncGroup(ctx, data, bufferSupplier) + assertFalse(future.isDone) + + val capturedAssignment: ArgumentCaptor[Map[String, Array[Byte]]] = + ArgumentCaptor.forClass(classOf[Map[String, Array[Byte]]]) + val capturedCallback: ArgumentCaptor[SyncGroupCallback] = + ArgumentCaptor.forClass(classOf[SyncGroupCallback]) + + verify(groupCoordinator).handleSyncGroup( + ArgumentMatchers.eq(data.groupId), + ArgumentMatchers.eq(data.generationId), + ArgumentMatchers.eq(data.memberId), + ArgumentMatchers.eq(Some(data.protocolType)), + ArgumentMatchers.eq(Some(data.protocolName)), + ArgumentMatchers.eq(Some(data.groupInstanceId)), + capturedAssignment.capture(), + capturedCallback.capture(), + ArgumentMatchers.eq(new RequestLocal(bufferSupplier)) + ) + + assertEquals(Map( + "member1" -> "member1", + "member2" -> "member2", + ), capturedAssignment.getValue.map { case (member, metadata) => + (member, new String(metadata)) + }) + + capturedCallback.getValue.apply(SyncGroupResult( + error = Errors.NONE, + protocolType = Some("consumer"), + protocolName = Some("range"), + memberAssignment = "member1".getBytes() + )) + + val expectedResponseData = new SyncGroupResponseData() + .setErrorCode(Errors.NONE.code) + .setProtocolType("consumer") + .setProtocolName("range") + .setAssignment("member1".getBytes()) + + assertTrue(future.isDone) + assertEquals(expectedResponseData, future.get()) + } + + @Test + def testHeartbeat(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val ctx = makeContext(ApiKeys.HEARTBEAT, ApiKeys.HEARTBEAT.latestVersion) + val data = new HeartbeatRequestData() + .setGroupId("group") + .setMemberId("member1") + .setGenerationId(0) + + val future = adapter.heartbeat(ctx, data) + + val capturedCallback: ArgumentCaptor[Errors => Unit] = + ArgumentCaptor.forClass(classOf[Errors => Unit]) + + verify(groupCoordinator).handleHeartbeat( + ArgumentMatchers.eq(data.groupId), + ArgumentMatchers.eq(data.memberId), + ArgumentMatchers.eq(None), + ArgumentMatchers.eq(data.generationId), + capturedCallback.capture(), + ) + + assertFalse(future.isDone) + + capturedCallback.getValue.apply(Errors.NONE) + + assertTrue(future.isDone) + assertEquals(new HeartbeatResponseData(), future.get()) + } + + def testLeaveGroup(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val ctx = makeContext(ApiKeys.LEAVE_GROUP, ApiKeys.LEAVE_GROUP.latestVersion) + val data = new LeaveGroupRequestData() + .setGroupId("group") + .setMembers(List( + new LeaveGroupRequestData.MemberIdentity() + .setMemberId("member-1") + .setGroupInstanceId("instance-1"), + new LeaveGroupRequestData.MemberIdentity() + .setMemberId("member-2") + .setGroupInstanceId("instance-2") + ).asJava) + + val future = adapter.leaveGroup(ctx, data) + + val capturedCallback: ArgumentCaptor[LeaveGroupResult => Unit] = + ArgumentCaptor.forClass(classOf[LeaveGroupResult => Unit]) + + verify(groupCoordinator).handleLeaveGroup( + ArgumentMatchers.eq(data.groupId), + ArgumentMatchers.eq(data.members.asScala.toList), + capturedCallback.capture(), + ) + + assertFalse(future.isDone) + + capturedCallback.getValue.apply(LeaveGroupResult( + topLevelError = Errors.NONE, + memberResponses = List( + LeaveMemberResponse( + memberId = "member-1", + groupInstanceId = Some("instance-1"), + error = Errors.NONE + ), + LeaveMemberResponse( + memberId = "member-2", + groupInstanceId = Some("instance-2"), + error = Errors.NONE + ) + ) + )) + + val expectedData = new LeaveGroupResponseData() + .setMembers(List( + new LeaveGroupResponseData.MemberResponse() + .setMemberId("member-1") + .setGroupInstanceId("instance-1"), + new LeaveGroupResponseData.MemberResponse() + .setMemberId("member-2") + .setGroupInstanceId("instance-2") + ).asJava) + + assertTrue(future.isDone) + assertEquals(expectedData, future.get()) + } + + @Test + def testListGroups(): Unit = { + testListGroups(null, null, Set.empty, Set.empty) + testListGroups(List(), List(), Set.empty, Set.empty) + testListGroups(List("Stable, Empty"), List(), Set("Stable, Empty"), Set.empty) + testListGroups(List(), List("classic"), Set.empty, Set("classic")) + } + + def testListGroups( + statesFilter: List[String], + typesFilter: List[String], + expectedStatesFilter: Set[String], + expectedTypesFilter: Set[String] + ): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val ctx = makeContext(ApiKeys.LIST_GROUPS, ApiKeys.LIST_GROUPS.latestVersion) + val data = new ListGroupsRequestData() + .setStatesFilter(statesFilter.asJava) + .setTypesFilter(typesFilter.asJava) + + when(groupCoordinator.handleListGroups(expectedStatesFilter, expectedTypesFilter)).thenReturn { + (Errors.NOT_COORDINATOR, List( + GroupOverview("group1", "protocol1", "Stable", "classic"), + GroupOverview("group2", "qwerty", "Empty", "classic") + )) + } + + val future = adapter.listGroups(ctx, data) + assertTrue(future.isDone) + + val expectedData = new ListGroupsResponseData() + .setErrorCode(Errors.NOT_COORDINATOR.code) + .setGroups(List( + new ListGroupsResponseData.ListedGroup() + .setGroupId("group1") + .setProtocolType("protocol1") + .setGroupState("Stable") + .setGroupType("classic"), + new ListGroupsResponseData.ListedGroup() + .setGroupId("group2") + .setProtocolType("qwerty") + .setGroupState("Empty") + .setGroupType("classic") + ).asJava) + + assertTrue(future.isDone) + assertEquals(expectedData, future.get()) + } + + @Test + def testDescribeGroup(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val groupId1 = "group-1" + val groupId2 = "group-2" + + val groupSummary1 = GroupSummary( + "Stable", + "consumer", + "roundrobin", + List(MemberSummary( + "memberid", + Some("instanceid"), + "clientid", + "clienthost", + "metadata".getBytes(), + "assignment".getBytes() + )) + ) + + when(groupCoordinator.handleDescribeGroup(groupId1, ApiKeys.DESCRIBE_GROUPS.latestVersion)).thenReturn { + (Errors.NONE, None, groupSummary1) + } + + when(groupCoordinator.handleDescribeGroup(groupId2, ApiKeys.DESCRIBE_GROUPS.latestVersion)).thenReturn { + (Errors.NOT_COORDINATOR, None, GroupCoordinator.EmptyGroup) + } + + val ctx = makeContext(ApiKeys.DESCRIBE_GROUPS, ApiKeys.DESCRIBE_GROUPS.latestVersion) + val future = adapter.describeGroups(ctx, List(groupId1, groupId2).asJava) + assertTrue(future.isDone) + + val expectedDescribedGroups = List( + new DescribeGroupsResponseData.DescribedGroup() + .setGroupId(groupId1) + .setErrorCode(Errors.NONE.code) + .setProtocolType(groupSummary1.protocolType) + .setProtocolData(groupSummary1.protocol) + .setGroupState(groupSummary1.state) + .setMembers(List(new DescribeGroupsResponseData.DescribedGroupMember() + .setMemberId(groupSummary1.members.head.memberId) + .setGroupInstanceId(groupSummary1.members.head.groupInstanceId.orNull) + .setClientId(groupSummary1.members.head.clientId) + .setClientHost(groupSummary1.members.head.clientHost) + .setMemberMetadata(groupSummary1.members.head.metadata) + .setMemberAssignment(groupSummary1.members.head.assignment) + ).asJava), + new DescribeGroupsResponseData.DescribedGroup() + .setGroupId(groupId2) + .setErrorCode(Errors.NOT_COORDINATOR.code) + ).asJava + + assertEquals(expectedDescribedGroups, future.get()) + } + + @Test + def testDeleteGroups(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val ctx = makeContext(ApiKeys.DELETE_GROUPS, ApiKeys.DELETE_GROUPS.latestVersion) + val groupIds = List("group-1", "group-2", "group-3") + val bufferSupplier = BufferSupplier.create() + + when(groupCoordinator.handleDeleteGroups( + groupIds.toSet, + new RequestLocal(bufferSupplier) + )).thenReturn(Map( + "group-1" -> Errors.NONE, + "group-2" -> Errors.NOT_COORDINATOR, + "group-3" -> Errors.INVALID_GROUP_ID, + )) + + val future = adapter.deleteGroups(ctx, groupIds.asJava, bufferSupplier) + assertTrue(future.isDone) + + val expectedResults = new DeleteGroupsResponseData.DeletableGroupResultCollection() + expectedResults.add(new DeleteGroupsResponseData.DeletableGroupResult() + .setGroupId("group-1") + .setErrorCode(Errors.NONE.code)) + expectedResults.add(new DeleteGroupsResponseData.DeletableGroupResult() + .setGroupId("group-2") + .setErrorCode(Errors.NOT_COORDINATOR.code)) + expectedResults.add(new DeleteGroupsResponseData.DeletableGroupResult() + .setGroupId("group-3") + .setErrorCode(Errors.INVALID_GROUP_ID.code)) + + assertEquals(expectedResults, future.get()) + } + + @Test + def testFetchAllOffsets(): Unit = { + val foo0 = new TopicPartition("foo", 0) + val foo1 = new TopicPartition("foo", 1) + val bar1 = new TopicPartition("bar", 1) + + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + when(groupCoordinator.handleFetchOffsets( + "group", + requireStable = true, + None + )).thenReturn(( + Errors.NONE, + Map( + foo0 -> new OffsetFetchResponse.PartitionData( + 100, + Optional.of(1), + "foo", + Errors.NONE + ), + bar1 -> new OffsetFetchResponse.PartitionData( + -1, + Optional.empty[Integer], + "", + Errors.UNKNOWN_TOPIC_OR_PARTITION + ), + foo1 -> new OffsetFetchResponse.PartitionData( + 200, + Optional.empty[Integer], + "", + Errors.NONE + ), + ) + )) + + val ctx = makeContext(ApiKeys.OFFSET_FETCH, ApiKeys.OFFSET_FETCH.latestVersion) + val future = adapter.fetchAllOffsets( + ctx, + new OffsetFetchRequestData.OffsetFetchRequestGroup().setGroupId("group"), + requireStable = true + ) + + assertTrue(future.isDone) + + val expectedResponse = List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(foo0.topic) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(foo0.partition) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + .setMetadata("foo") + .setErrorCode(Errors.NONE.code), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(foo1.partition) + .setCommittedOffset(200) + .setCommittedLeaderEpoch(-1) + .setMetadata("") + .setErrorCode(Errors.NONE.code), + ).asJava), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(bar1.topic) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(bar1.partition) + .setCommittedOffset(-1) + .setCommittedLeaderEpoch(-1) + .setMetadata("") + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) + ).asJava) + ) + + assertEquals("group", future.get().groupId) + assertEquals( + expectedResponse.sortWith(_.name > _.name), + future.get().topics.asScala.toList.sortWith(_.name > _.name) + ) + } + + @Test + def testFetchOffsets(): Unit = { + val foo0 = new TopicPartition("foo", 0) + val foo1 = new TopicPartition("foo", 1) + val bar1 = new TopicPartition("bar", 1) + + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + when(groupCoordinator.handleFetchOffsets( + "group", + requireStable = true, + Some(Seq(foo0, foo1, bar1)) + )).thenReturn(( + Errors.NONE, + Map( + foo0 -> new OffsetFetchResponse.PartitionData( + 100, + Optional.of(1), + "foo", + Errors.NONE + ), + bar1 -> new OffsetFetchResponse.PartitionData( + -1, + Optional.empty[Integer], + "", + Errors.UNKNOWN_TOPIC_OR_PARTITION + ), + foo1 -> new OffsetFetchResponse.PartitionData( + 200, + Optional.empty[Integer], + "", + Errors.NONE + ), + ) + )) + + val ctx = makeContext(ApiKeys.OFFSET_FETCH, ApiKeys.OFFSET_FETCH.latestVersion) + val future = adapter.fetchOffsets( + ctx, + new OffsetFetchRequestData.OffsetFetchRequestGroup() + .setGroupId("group") + .setTopics(List( + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(foo0.topic) + .setPartitionIndexes(List[Integer](foo0.partition, foo1.partition).asJava), + new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName(bar1.topic) + .setPartitionIndexes(List[Integer](bar1.partition).asJava)).asJava), + requireStable = true + ) + + assertTrue(future.isDone) + + val expectedResponse = List( + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(foo0.topic) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(foo0.partition) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + .setMetadata("foo") + .setErrorCode(Errors.NONE.code), + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(foo1.partition) + .setCommittedOffset(200) + .setCommittedLeaderEpoch(-1) + .setMetadata("") + .setErrorCode(Errors.NONE.code), + ).asJava), + new OffsetFetchResponseData.OffsetFetchResponseTopics() + .setName(bar1.topic) + .setPartitions(List( + new OffsetFetchResponseData.OffsetFetchResponsePartitions() + .setPartitionIndex(bar1.partition) + .setCommittedOffset(-1) + .setCommittedLeaderEpoch(-1) + .setMetadata("") + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) + ).asJava) + ) + + assertEquals("group", future.get().groupId) + assertEquals( + expectedResponse.sortWith(_.name > _.name), + future.get().topics.asScala.toList.sortWith(_.name > _.name) + ) + } + + @ParameterizedTest + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) + def testCommitOffsets(version: Short): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val time = new MockTime() + val adapter = new GroupCoordinatorAdapter(groupCoordinator, time) + val now = time.milliseconds() + + val ctx = makeContext(ApiKeys.OFFSET_COMMIT, version) + val data = new OffsetCommitRequestData() + .setGroupId("group") + .setMemberId("member") + .setGenerationIdOrMemberEpoch(10) + .setRetentionTimeMs(1000) + .setTopics(List( + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setName("foo") + .setPartitions(List( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + ).asJava) + ).asJava) + val bufferSupplier = BufferSupplier.create() + + val future = adapter.commitOffsets(ctx, data, bufferSupplier) + assertFalse(future.isDone) + + val capturedCallback: ArgumentCaptor[Map[TopicIdPartition, Errors] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, Errors] => Unit]) + + verify(groupCoordinator).handleCommitOffsets( + ArgumentMatchers.eq(data.groupId), + ArgumentMatchers.eq(data.memberId), + ArgumentMatchers.eq(None), + ArgumentMatchers.eq(data.generationIdOrMemberEpoch), + ArgumentMatchers.eq(Map( + new TopicIdPartition(Uuid.ZERO_UUID, 0 , "foo") -> new OffsetAndMetadata( + 100, + OptionalInt.of(1), + "", + now, + OptionalLong.of(now + 1000L) + ) + )), + capturedCallback.capture(), + ArgumentMatchers.eq(new RequestLocal(bufferSupplier)) + ) + + capturedCallback.getValue.apply(Map( + new TopicIdPartition(Uuid.ZERO_UUID, 0 , "foo") -> Errors.NONE + )) + + val expectedResponseData = new OffsetCommitResponseData() + .setTopics(List( + new OffsetCommitResponseData.OffsetCommitResponseTopic() + .setName("foo") + .setPartitions(List( + new OffsetCommitResponseData.OffsetCommitResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code) + ).asJava) + ).asJava) + + assertTrue(future.isDone) + assertEquals(expectedResponseData, future.get()) + } + + @Test + def testCommitTransactionalOffsets(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val time = new MockTime() + val adapter = new GroupCoordinatorAdapter(groupCoordinator, time) + val now = time.milliseconds() + + val ctx = makeContext(ApiKeys.TXN_OFFSET_COMMIT, ApiKeys.TXN_OFFSET_COMMIT.latestVersion) + val data = new TxnOffsetCommitRequestData() + .setGroupId("group") + .setMemberId("member") + .setGenerationId(10) + .setProducerEpoch(1) + .setProducerId(2) + .setTransactionalId("transaction-id") + .setTopics(List( + new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() + .setName("foo") + .setPartitions(List( + new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() + .setPartitionIndex(0) + .setCommittedOffset(100) + .setCommittedLeaderEpoch(1) + ).asJava) + ).asJava) + val bufferSupplier = BufferSupplier.create() + + val future = adapter.commitTransactionalOffsets(ctx, data, bufferSupplier) + assertFalse(future.isDone) + + val capturedCallback: ArgumentCaptor[Map[TopicIdPartition, Errors] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, Errors] => Unit]) + + verify(groupCoordinator).handleTxnCommitOffsets( + ArgumentMatchers.eq(data.groupId), + ArgumentMatchers.eq(data.transactionalId), + ArgumentMatchers.eq(data.producerId), + ArgumentMatchers.eq(data.producerEpoch), + ArgumentMatchers.eq(data.memberId), + ArgumentMatchers.eq(None), + ArgumentMatchers.eq(data.generationId), + ArgumentMatchers.eq(Map( + new TopicIdPartition(Uuid.ZERO_UUID, 0 , "foo") -> new OffsetAndMetadata( + 100, + OptionalInt.of(1), + "", + now, + OptionalLong.empty() + ) + )), + capturedCallback.capture(), + ArgumentMatchers.eq(new RequestLocal(bufferSupplier)), + ArgumentMatchers.any() + ) + + capturedCallback.getValue.apply(Map( + new TopicIdPartition(Uuid.ZERO_UUID, 0 , "foo") -> Errors.NONE + )) + + val expectedData = new TxnOffsetCommitResponseData() + .setTopics(List( + new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() + .setName("foo") + .setPartitions(List( + new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code) + ).asJava) + ).asJava) + + assertTrue(future.isDone) + assertEquals(expectedData, future.get()) + } + + def testDeleteOffsets(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val foo0 = new TopicPartition("foo", 0) + val foo1 = new TopicPartition("foo", 1) + val bar0 = new TopicPartition("bar", 0) + val bar1 = new TopicPartition("bar", 1) + + val ctx = makeContext(ApiKeys.OFFSET_DELETE, ApiKeys.OFFSET_DELETE.latestVersion) + val data = new OffsetDeleteRequestData() + .setGroupId("group") + .setTopics(new OffsetDeleteRequestTopicCollection(List( + new OffsetDeleteRequestTopic() + .setName("foo") + .setPartitions(List( + new OffsetDeleteRequestPartition().setPartitionIndex(0), + new OffsetDeleteRequestPartition().setPartitionIndex(1) + ).asJava), + new OffsetDeleteRequestTopic() + .setName("bar") + .setPartitions(List( + new OffsetDeleteRequestPartition().setPartitionIndex(0), + new OffsetDeleteRequestPartition().setPartitionIndex(1) + ).asJava) + ).asJava.iterator)) + val bufferSupplier = BufferSupplier.create() + + when(groupCoordinator.handleDeleteOffsets( + data.groupId, + Seq(foo0, foo1, bar0, bar1), + new RequestLocal(bufferSupplier) + )).thenReturn(( + Errors.NONE, + Map( + foo0 -> Errors.NONE, + foo1 -> Errors.NONE, + bar0 -> Errors.GROUP_SUBSCRIBED_TO_TOPIC, + bar1 -> Errors.GROUP_SUBSCRIBED_TO_TOPIC, + ) + )) + + val future = adapter.deleteOffsets(ctx, data, bufferSupplier) + + val expectedData = new OffsetDeleteResponseData() + .setTopics(new OffsetDeleteResponseTopicCollection(List( + new OffsetDeleteResponseTopic() + .setName("foo") + .setPartitions(new OffsetDeleteResponsePartitionCollection(List( + new OffsetDeleteResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code), + new OffsetDeleteResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.NONE.code) + ).asJava.iterator)), + new OffsetDeleteResponseTopic() + .setName("bar") + .setPartitions(new OffsetDeleteResponsePartitionCollection(List( + new OffsetDeleteResponsePartition() + .setPartitionIndex(0) + .setErrorCode(Errors.GROUP_SUBSCRIBED_TO_TOPIC.code), + new OffsetDeleteResponsePartition() + .setPartitionIndex(1) + .setErrorCode(Errors.GROUP_SUBSCRIBED_TO_TOPIC.code) + ).asJava.iterator)), + ).asJava.iterator)) + + assertTrue(future.isDone) + assertEquals(expectedData, future.get()) + } + + @Test + def testDeleteOffsetsWithGroupLevelError(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + val foo0 = new TopicPartition("foo", 0) + val foo1 = new TopicPartition("foo", 1) + + val ctx = makeContext(ApiKeys.OFFSET_DELETE, ApiKeys.OFFSET_DELETE.latestVersion) + val data = new OffsetDeleteRequestData() + .setGroupId("group") + .setTopics(new OffsetDeleteRequestTopicCollection(List( + new OffsetDeleteRequestTopic() + .setName("foo") + .setPartitions(List( + new OffsetDeleteRequestPartition().setPartitionIndex(0), + new OffsetDeleteRequestPartition().setPartitionIndex(1) + ).asJava) + ).asJava.iterator)) + val bufferSupplier = BufferSupplier.create() + + when(groupCoordinator.handleDeleteOffsets( + data.groupId, + Seq(foo0, foo1), + new RequestLocal(bufferSupplier) + )).thenReturn((Errors.INVALID_GROUP_ID, Map.empty[TopicPartition, Errors])) + + val future = adapter.deleteOffsets(ctx, data, bufferSupplier) + assertTrue(future.isDone) + assertTrue(future.isCompletedExceptionally) + assertFutureThrows(future, classOf[InvalidGroupIdException]) + } + + @Test + def testConsumerGroupDescribe(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + val context = makeContext(ApiKeys.CONSUMER_GROUP_DESCRIBE, ApiKeys.CONSUMER_GROUP_DESCRIBE.latestVersion) + val groupIds = List("group-id-1", "group-id-2").asJava + + val future = adapter.consumerGroupDescribe(context, groupIds) + assertTrue(future.isDone) + assertTrue(future.isCompletedExceptionally) + assertFutureThrows(future, classOf[UnsupportedVersionException]) + } + + @Test + def testShareGroupDescribe(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + val context = makeContext(ApiKeys.SHARE_GROUP_DESCRIBE, ApiKeys.SHARE_GROUP_DESCRIBE.latestVersion) + val groupIds = List("group-id-1", "group-id-2").asJava + + val future = adapter.shareGroupDescribe(context, groupIds) + assertTrue(future.isDone) + assertTrue(future.isCompletedExceptionally) + assertFutureThrows(future, classOf[UnsupportedVersionException]) + } + + @Test + def testOnTransactionCompletedWithUnexpectedException(): Unit = { + val groupCoordinator = mock(classOf[GroupCoordinator]) + val adapter = new GroupCoordinatorAdapter(groupCoordinator, Time.SYSTEM) + + when(groupCoordinator.scheduleHandleTxnCompletion( + any(), + any(), + any() + )).thenThrow(new IllegalStateException("Oh no!")) + + val future = adapter.onTransactionCompleted( + 10, + Seq.empty[TopicPartition].asJava, + TransactionResult.COMMIT + ) + + assertTrue(future.isDone) + assertTrue(future.isCompletedExceptionally) + assertFutureThrows(future, classOf[Exception]) + } +} diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala new file mode 100644 index 0000000000000..3eecdfe65e190 --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorConcurrencyTest.scala @@ -0,0 +1,406 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import java.util.{OptionalInt, OptionalLong, Properties} +import java.util.concurrent.locks.{Lock, ReentrantLock} +import java.util.concurrent.{ConcurrentHashMap, TimeUnit} +import kafka.coordinator.AbstractCoordinatorConcurrencyTest +import kafka.coordinator.AbstractCoordinatorConcurrencyTest._ +import kafka.coordinator.group.GroupCoordinatorConcurrencyTest._ +import kafka.server.{KafkaConfig, KafkaRequestHandler} +import kafka.utils.CoreUtils +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.requests.{JoinGroupRequest, OffsetFetchResponse} +import org.apache.kafka.common.utils.{Time, Utils} +import org.apache.kafka.coordinator.group.{GroupCoordinatorConfig, OffsetAndMetadata} +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.purgatory.DelayedOperationPurgatory +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} + +import scala.collection._ +import scala.concurrent.duration.Duration +import scala.concurrent.{Await, Future, Promise, TimeoutException} + +class GroupCoordinatorConcurrencyTest extends AbstractCoordinatorConcurrencyTest[GroupMember] { + + private val protocolType = "consumer" + private val protocolName = "range" + private val metadata = Array[Byte]() + private val protocols = List((protocolName, metadata)) + private val nGroups = nThreads * 10 + private val nMembersPerGroup = nThreads * 5 + private val numPartitions = 2 + private var metrics: Metrics = _ + + private val allOperations = Seq( + new JoinGroupOperation, + new SyncGroupOperation, + new OffsetFetchOperation, + new CommitOffsetsOperation, + new HeartbeatOperation, + new LeaveGroupOperation + ) + + var heartbeatPurgatory: DelayedOperationPurgatory[DelayedHeartbeat] = _ + var rebalancePurgatory: DelayedOperationPurgatory[DelayedRebalance] = _ + var groupCoordinator: GroupCoordinator = _ + + @BeforeEach + override def setUp(): Unit = { + super.setUp() + + serverProps.setProperty(GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, ConsumerMinSessionTimeout.toString) + serverProps.setProperty(GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, ConsumerMaxSessionTimeout.toString) + serverProps.setProperty(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, GroupInitialRebalanceDelay.toString) + + val config = KafkaConfig.fromProps(serverProps) + + heartbeatPurgatory = new DelayedOperationPurgatory[DelayedHeartbeat]("Heartbeat", timer, config.brokerId, 1000, false, true) + rebalancePurgatory = new DelayedOperationPurgatory[DelayedRebalance]("Rebalance", timer, config.brokerId, 1000, false, true) + + metrics = new Metrics + groupCoordinator = GroupCoordinator(config, replicaManager, heartbeatPurgatory, rebalancePurgatory, timer.time, metrics) + groupCoordinator.startup(() => numPartitions, enableMetadataExpiration = false) + + // Transactional appends attempt to schedule to the request handler thread using + // a non request handler thread. Set this to avoid error. + KafkaRequestHandler.setBypassThreadCheck(true) + } + + @AfterEach + override def tearDown(): Unit = { + try { + CoreUtils.swallow(groupCoordinator.shutdown(), this) + Utils.closeQuietly(metrics, "metrics") + } finally { + super.tearDown() + } + } + + def createGroupMembers(groupPrefix: String): Set[GroupMember] = { + (0 until nGroups).flatMap { i => + new Group(s"$groupPrefix$i", nMembersPerGroup, groupCoordinator).members + }.toSet + } + + @Test + def testConcurrentGoodPathSequence(): Unit = { + verifyConcurrentOperations(createGroupMembers, allOperations) + } + + @Test + def testConcurrentTxnGoodPathSequence(): Unit = { + verifyConcurrentOperations(createGroupMembers, Seq( + new JoinGroupOperation, + new SyncGroupOperation, + new OffsetFetchOperation, + new CommitTxnOffsetsOperation, + new CompleteTxnOperation, + new HeartbeatOperation, + new LeaveGroupOperation + )) + } + + @Test + def testConcurrentRandomSequence(): Unit = { + /** + * handleTxnCommitOffsets does not complete delayed requests now so it causes error if handleTxnCompletion is executed + * before completing delayed request. In random mode, we use this global lock to prevent such an error. + */ + val lock = new ReentrantLock() + verifyConcurrentRandomSequences(createGroupMembers, Seq( + new JoinGroupOperation, + new SyncGroupOperation, + new OffsetFetchOperation, + new CommitTxnOffsetsOperation(lock = Some(lock)), + new CompleteTxnOperation(lock = Some(lock)), + new HeartbeatOperation, + new LeaveGroupOperation + )) + } + + @Test + def testConcurrentJoinGroupEnforceGroupMaxSize(): Unit = { + val groupMaxSize = 1 + val newProperties = new Properties + newProperties.put(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, groupMaxSize.toString) + val config = KafkaConfig.fromProps(serverProps, newProperties) + + if (groupCoordinator != null) + groupCoordinator.shutdown() + groupCoordinator = GroupCoordinator(config, replicaManager, heartbeatPurgatory, + rebalancePurgatory, timer.time, new Metrics()) + groupCoordinator.startup(() => numPartitions, enableMetadataExpiration = false) + + val members = new Group(s"group", nMembersPerGroup, groupCoordinator) + .members + val joinOp = new JoinGroupOperation() + + verifyConcurrentActions(members.toSet.map(joinOp.actionNoVerify)) + + val errors = members.map { member => + val joinGroupResult = joinOp.await(member, DefaultRebalanceTimeout) + joinGroupResult.error + } + + assertEquals(groupMaxSize, errors.count(_ == Errors.NONE)) + assertEquals(members.size-groupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED)) + } + + abstract class GroupOperation[R, C] extends Operation { + val responseFutures = new ConcurrentHashMap[GroupMember, Future[R]]() + + def setUpCallback(member: GroupMember): C = { + val responsePromise = Promise[R]() + val responseFuture = responsePromise.future + responseFutures.put(member, responseFuture) + responseCallback(responsePromise) + } + def responseCallback(responsePromise: Promise[R]): C + + override def run(member: GroupMember): Unit = { + val responseCallback = setUpCallback(member) + runWithCallback(member, responseCallback) + } + + def runWithCallback(member: GroupMember, responseCallback: C): Unit + + def await(member: GroupMember, timeoutMs: Long): R = { + var retries = (timeoutMs + 10) / 10 + val responseFuture = responseFutures.get(member) + while (retries > 0) { + timer.advanceClock(10) + try { + return Await.result(responseFuture, Duration(10, TimeUnit.MILLISECONDS)) + } catch { + case _: TimeoutException => + } + retries -= 1 + } + throw new TimeoutException(s"Operation did not complete within $timeoutMs millis") + } + } + + class JoinGroupOperation extends GroupOperation[JoinGroupCallbackParams, JoinGroupCallback] { + override def responseCallback(responsePromise: Promise[JoinGroupCallbackParams]): JoinGroupCallback = { + val callback: JoinGroupCallback = responsePromise.success + callback + } + override def runWithCallback(member: GroupMember, responseCallback: JoinGroupCallback): Unit = { + groupCoordinator.handleJoinGroup(member.groupId, member.memberId, None, requireKnownMemberId = false, + supportSkippingAssignment = false, "clientId", "clientHost", DefaultRebalanceTimeout, + DefaultSessionTimeout, protocolType, protocols, responseCallback) + replicaManager.tryCompleteActions() + } + override def awaitAndVerify(member: GroupMember): Unit = { + val joinGroupResult = await(member, DefaultRebalanceTimeout) + assertEquals(Errors.NONE, joinGroupResult.error) + member.memberId = joinGroupResult.memberId + member.generationId = joinGroupResult.generationId + } + } + + class SyncGroupOperation extends GroupOperation[SyncGroupCallbackParams, SyncGroupCallback] { + override def responseCallback(responsePromise: Promise[SyncGroupCallbackParams]): SyncGroupCallback = { + val callback: SyncGroupCallback = syncGroupResult => + responsePromise.success(syncGroupResult.error, syncGroupResult.memberAssignment) + callback + } + override def runWithCallback(member: GroupMember, responseCallback: SyncGroupCallback): Unit = { + if (member.leader) { + groupCoordinator.handleSyncGroup(member.groupId, member.generationId, member.memberId, + Some(protocolType), Some(protocolName), member.groupInstanceId, member.group.assignment, responseCallback) + } else { + groupCoordinator.handleSyncGroup(member.groupId, member.generationId, member.memberId, + Some(protocolType), Some(protocolName), member.groupInstanceId, Map.empty[String, Array[Byte]], responseCallback) + } + replicaManager.tryCompleteActions() + } + override def awaitAndVerify(member: GroupMember): Unit = { + val result = await(member, DefaultSessionTimeout) + assertEquals(Errors.NONE, result._1) + assertNotNull(result._2) + assertEquals(0, result._2.length) + } + } + + class HeartbeatOperation extends GroupOperation[HeartbeatCallbackParams, HeartbeatCallback] { + override def responseCallback(responsePromise: Promise[HeartbeatCallbackParams]): HeartbeatCallback = { + val callback: HeartbeatCallback = error => responsePromise.success(error) + callback + } + override def runWithCallback(member: GroupMember, responseCallback: HeartbeatCallback): Unit = { + groupCoordinator.handleHeartbeat(member.groupId, member.memberId, + member.groupInstanceId, member.generationId, responseCallback) + replicaManager.tryCompleteActions() + } + override def awaitAndVerify(member: GroupMember): Unit = { + val error = await(member, DefaultSessionTimeout) + assertEquals(Errors.NONE, error) + } + } + + class OffsetFetchOperation extends GroupOperation[OffsetFetchCallbackParams, OffsetFetchCallback] { + override def responseCallback(responsePromise: Promise[OffsetFetchCallbackParams]): OffsetFetchCallback = { + val callback: OffsetFetchCallback = (error, offsets) => responsePromise.success(error, offsets) + callback + } + override def runWithCallback(member: GroupMember, responseCallback: OffsetFetchCallback): Unit = { + val (error, partitionData) = groupCoordinator.handleFetchOffsets(member.groupId, requireStable = true, None) + replicaManager.tryCompleteActions() + responseCallback(error, partitionData) + } + override def awaitAndVerify(member: GroupMember): Unit = { + val result = await(member, 500) + assertEquals(Errors.NONE, result._1) + assertEquals(Map.empty, result._2) + } + } + + class CommitOffsetsOperation extends GroupOperation[CommitOffsetCallbackParams, CommitOffsetCallback] { + override def responseCallback(responsePromise: Promise[CommitOffsetCallbackParams]): CommitOffsetCallback = { + val callback: CommitOffsetCallback = offsets => responsePromise.success(offsets) + callback + } + override def runWithCallback(member: GroupMember, responseCallback: CommitOffsetCallback): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offsets = immutable.Map(tip -> new OffsetAndMetadata(1, OptionalInt.empty(), "", Time.SYSTEM.milliseconds(), OptionalLong.empty())) + groupCoordinator.handleCommitOffsets(member.groupId, member.memberId, + member.groupInstanceId, member.generationId, offsets, responseCallback) + replicaManager.tryCompleteActions() + } + override def awaitAndVerify(member: GroupMember): Unit = { + val offsets = await(member, 500) + offsets.foreach { case (_, error) => assertEquals(Errors.NONE, error) } + } + } + + class CommitTxnOffsetsOperation(lock: Option[Lock] = None) extends CommitOffsetsOperation { + override def runWithCallback(member: GroupMember, responseCallback: CommitOffsetCallback): Unit = { + val offsets = immutable.Map(new TopicIdPartition(Uuid.randomUuid(), 0, "topic") -> new OffsetAndMetadata(1, OptionalInt.empty(), "", Time.SYSTEM.milliseconds(), OptionalLong.empty())) + val producerId = 1000L + val producerEpoch : Short = 2 + // When transaction offsets are appended to the log, transactions may be scheduled for + // completion. Since group metadata locks are acquired for transaction completion, include + // this in the callback to test that there are no deadlocks. + def callbackWithTxnCompletion(errors: Map[TopicIdPartition, Errors]): Unit = { + val offsetsPartitions = (0 to numPartitions).map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, _)) + groupCoordinator.groupManager.scheduleHandleTxnCompletion(producerId, + offsetsPartitions.map(_.partition).toSet, isCommit = random.nextBoolean) + responseCallback(errors) + } + lock.foreach(_.lock()) + try { + // Since the replica manager is mocked we can use a dummy value for transactionalId. + groupCoordinator.handleTxnCommitOffsets(member.group.groupId, "dummy-txn-id", producerId, producerEpoch, + JoinGroupRequest.UNKNOWN_MEMBER_ID, Option.empty, JoinGroupRequest.UNKNOWN_GENERATION_ID, + offsets, callbackWithTxnCompletion, RequestLocal.noCaching, ApiKeys.TXN_OFFSET_COMMIT.latestVersion()) + replicaManager.tryCompleteActions() + } finally lock.foreach(_.unlock()) + } + } + + class CompleteTxnOperation(lock: Option[Lock] = None) extends GroupOperation[CompleteTxnCallbackParams, CompleteTxnCallback] { + override def responseCallback(responsePromise: Promise[CompleteTxnCallbackParams]): CompleteTxnCallback = { + val callback: CompleteTxnCallback = error => responsePromise.success(error) + callback + } + override def runWithCallback(member: GroupMember, responseCallback: CompleteTxnCallback): Unit = { + val producerId = 1000L + val offsetsPartitions = (0 to numPartitions).map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, _)) + lock.foreach(_.lock()) + try { + groupCoordinator.groupManager.handleTxnCompletion(producerId, + offsetsPartitions.map(_.partition).toSet, isCommit = random.nextBoolean) + responseCallback(Errors.NONE) + } finally lock.foreach(_.unlock()) + + } + override def awaitAndVerify(member: GroupMember): Unit = { + val error = await(member, 500) + assertEquals(Errors.NONE, error) + } + } + + class LeaveGroupOperation extends GroupOperation[LeaveGroupCallbackParams, LeaveGroupCallback] { + override def responseCallback(responsePromise: Promise[LeaveGroupCallbackParams]): LeaveGroupCallback = { + val callback: LeaveGroupCallback = result => responsePromise.success(result) + callback + } + override def runWithCallback(member: GroupMember, responseCallback: LeaveGroupCallback): Unit = { + val memberIdentity = new MemberIdentity() + .setMemberId(member.memberId) + groupCoordinator.handleLeaveGroup(member.group.groupId, List(memberIdentity), responseCallback) + } + override def awaitAndVerify(member: GroupMember): Unit = { + val leaveGroupResult = await(member, DefaultSessionTimeout) + + val memberResponses = leaveGroupResult.memberResponses + GroupCoordinatorTest.verifyLeaveGroupResult(leaveGroupResult, Errors.NONE, List(Errors.NONE)) + assertEquals(member.memberId, memberResponses.head.memberId) + assertEquals(None, memberResponses.head.groupInstanceId) + } + } +} + +object GroupCoordinatorConcurrencyTest { + + type JoinGroupCallbackParams = JoinGroupResult + type JoinGroupCallback = JoinGroupResult => Unit + type SyncGroupCallbackParams = (Errors, Array[Byte]) + type SyncGroupCallback = SyncGroupResult => Unit + type HeartbeatCallbackParams = Errors + type HeartbeatCallback = Errors => Unit + type OffsetFetchCallbackParams = (Errors, Map[TopicPartition, OffsetFetchResponse.PartitionData]) + type OffsetFetchCallback = (Errors, Map[TopicPartition, OffsetFetchResponse.PartitionData]) => Unit + type CommitOffsetCallbackParams = Map[TopicIdPartition, Errors] + type CommitOffsetCallback = Map[TopicIdPartition, Errors] => Unit + type LeaveGroupCallbackParams = LeaveGroupResult + type LeaveGroupCallback = LeaveGroupResult => Unit + type CompleteTxnCallbackParams = Errors + type CompleteTxnCallback = Errors => Unit + + private val ConsumerMinSessionTimeout = 10 + private val ConsumerMaxSessionTimeout = 120 * 1000 + private val DefaultRebalanceTimeout = 60 * 1000 + private val DefaultSessionTimeout = 60 * 1000 + private val GroupInitialRebalanceDelay = 50 + + class Group(val groupId: String, nMembers: Int, groupCoordinator: GroupCoordinator) { + val groupPartitionId: Int = groupCoordinator.partitionFor(groupId) + groupCoordinator.groupManager.addOwnedPartition(groupPartitionId) + val members: Seq[GroupMember] = (0 until nMembers).map { i => + new GroupMember(this, groupPartitionId, i == 0) + } + def assignment: Map[String, Array[Byte]] = members.map { m => (m.memberId, Array[Byte]()) }.toMap + } + + class GroupMember(val group: Group, val groupPartitionId: Int, val leader: Boolean) extends CoordinatorMember { + @volatile var memberId: String = JoinGroupRequest.UNKNOWN_MEMBER_ID + @volatile var groupInstanceId: Option[String] = None + @volatile var generationId: Int = -1 + def groupId: String = group.groupId + } + +} diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala new file mode 100644 index 0000000000000..19cbb382f7c91 --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupCoordinatorTest.scala @@ -0,0 +1,4249 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import java.util.{OptionalInt, OptionalLong} +import kafka.server.{HostedPartition, KafkaConfig, KafkaRequestHandler, ReplicaManager} +import kafka.utils._ +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.protocol.{ApiKeys, Errors} +import org.apache.kafka.common.record.{MemoryRecords, RecordBatch} +import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse +import org.apache.kafka.common.requests.{JoinGroupRequest, OffsetCommitRequest, OffsetFetchResponse, TransactionResult} + +import java.util.concurrent.TimeUnit +import java.util.concurrent.locks.ReentrantLock +import kafka.cluster.Partition +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription +import org.apache.kafka.clients.consumer.internals.ConsumerProtocol +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity +import org.apache.kafka.coordinator.group.{GroupCoordinatorConfig, OffsetAndMetadata} +import org.apache.kafka.server.ActionQueue +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.purgatory.DelayedOperationPurgatory +import org.apache.kafka.server.util.timer.MockTimer +import org.apache.kafka.server.util.{KafkaScheduler, MockTime} +import org.apache.kafka.storage.internals.log.{AppendOrigin, VerificationGuard} +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource +import org.mockito.{ArgumentCaptor, ArgumentMatchers} +import org.mockito.ArgumentMatchers.{any, anyLong, anyShort} +import org.mockito.Mockito.{mock, when} + +import scala.jdk.CollectionConverters._ +import scala.collection.{Seq, mutable} +import scala.collection.mutable.ArrayBuffer +import scala.concurrent.duration.Duration +import scala.concurrent.{Await, Future, Promise, TimeoutException} + +class GroupCoordinatorTest { + import GroupCoordinatorTest._ + + type JoinGroupCallback = JoinGroupResult => Unit + type SyncGroupCallback = SyncGroupResult => Unit + type HeartbeatCallbackParams = Errors + type HeartbeatCallback = Errors => Unit + type CommitOffsetCallbackParams = Map[TopicIdPartition, Errors] + type CommitOffsetCallback = Map[TopicIdPartition, Errors] => Unit + type LeaveGroupCallback = LeaveGroupResult => Unit + + val ClientId = "consumer-test" + val ClientHost = "localhost" + val GroupMinSessionTimeout = 10 + val GroupMaxSessionTimeout = 10 * 60 * 1000 + val GroupMaxSize = 4 + val DefaultRebalanceTimeout = 500 + val DefaultSessionTimeout = 500 + val GroupInitialRebalanceDelay = 50 + var timer: MockTimer = _ + var groupCoordinator: GroupCoordinator = _ + var replicaManager: ReplicaManager = _ + var scheduler: KafkaScheduler = _ + + private val groupId = "groupId" + private val protocolType = "consumer" + private val protocolName = "range" + private val memberId = "memberId" + private val groupInstanceId = "groupInstanceId" + private val leaderInstanceId = "leader" + private val followerInstanceId = "follower" + private val invalidMemberId = "invalidMember" + private val metadata = Array[Byte]() + private val protocols = List((protocolName, metadata)) + private val protocolSuperset = List((protocolName, metadata), ("roundrobin", metadata)) + private val requireStable = true + private var groupPartitionId: Int = -1 + + // we use this string value since its hashcode % #.partitions is different + private val otherGroupId = "otherGroup" + + @BeforeEach + def setUp(): Unit = { + val props = TestUtils.createBrokerConfig(0) + props.setProperty(GroupCoordinatorConfig.GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG, GroupMinSessionTimeout.toString) + props.setProperty(GroupCoordinatorConfig.GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG, GroupMaxSessionTimeout.toString) + props.setProperty(GroupCoordinatorConfig.GROUP_MAX_SIZE_CONFIG, GroupMaxSize.toString) + props.setProperty(GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, GroupInitialRebalanceDelay.toString) + // make two partitions of the group topic to make sure some partitions are not owned by the coordinator + val ret = mutable.Map[String, Map[Int, Seq[Int]]]() + ret += (Topic.GROUP_METADATA_TOPIC_NAME -> Map(0 -> Seq(1), 1 -> Seq(1))) + + replicaManager = mock(classOf[ReplicaManager]) + + timer = new MockTimer + + val config = KafkaConfig.fromProps(props) + + val heartbeatPurgatory = new DelayedOperationPurgatory[DelayedHeartbeat]("Heartbeat", timer, 1000, config.brokerId, false, true) + val rebalancePurgatory = new DelayedOperationPurgatory[DelayedRebalance]("Rebalance", timer, 1000, config.brokerId, false, true) + + groupCoordinator = GroupCoordinator(config, replicaManager, heartbeatPurgatory, rebalancePurgatory, timer.time, new Metrics()) + // make two partitions of the group topic to make sure some partitions are not owned by the coordinator + groupCoordinator.startup(() => 2, enableMetadataExpiration = false) + + // add the partition into the owned partition list + groupPartitionId = groupCoordinator.partitionFor(groupId) + groupCoordinator.groupManager.addOwnedPartition(groupPartitionId) + } + + @AfterEach + def tearDown(): Unit = { + if (groupCoordinator != null) + groupCoordinator.shutdown() + } + + @Test + def testRequestHandlingWhileLoadingInProgress(): Unit = { + val otherGroupPartitionId = groupCoordinator.groupManager.partitionFor(otherGroupId) + assertTrue(otherGroupPartitionId != groupPartitionId) + + groupCoordinator.groupManager.addLoadingPartition(otherGroupPartitionId) + assertTrue(groupCoordinator.groupManager.isGroupLoading(otherGroupId)) + + // Dynamic Member JoinGroup + var joinGroupResponse: Option[JoinGroupResult] = None + groupCoordinator.handleJoinGroup(otherGroupId, memberId, None, requireKnownMemberId = true, supportSkippingAssignment = true, "clientId", "clientHost", 60000, 10000, "consumer", + List("range" -> new Array[Byte](0)), result => { joinGroupResponse = Some(result)}) + assertEquals(Some(Errors.COORDINATOR_LOAD_IN_PROGRESS), joinGroupResponse.map(_.error)) + + // Static Member JoinGroup + groupCoordinator.handleJoinGroup(otherGroupId, memberId, Some("groupInstanceId"), requireKnownMemberId = false, supportSkippingAssignment = true, "clientId", "clientHost", 60000, 10000, "consumer", + List("range" -> new Array[Byte](0)), result => { joinGroupResponse = Some(result)}) + assertEquals(Some(Errors.COORDINATOR_LOAD_IN_PROGRESS), joinGroupResponse.map(_.error)) + + // SyncGroup + var syncGroupResponse: Option[Errors] = None + groupCoordinator.handleSyncGroup(otherGroupId, 1, memberId, Some("consumer"), Some("range"), None, Map.empty[String, Array[Byte]], + syncGroupResult => syncGroupResponse = Some(syncGroupResult.error)) + assertEquals(Some(Errors.REBALANCE_IN_PROGRESS), syncGroupResponse) + + // OffsetCommit + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0 , "foo") + var offsetCommitErrors = Map.empty[TopicIdPartition, Errors] + groupCoordinator.handleCommitOffsets(otherGroupId, memberId, None, 1, + Map(topicIdPartition -> offsetAndMetadata(15L)), result => { offsetCommitErrors = result }) + assertEquals(Map(topicIdPartition -> Errors.COORDINATOR_LOAD_IN_PROGRESS), offsetCommitErrors) + + // Heartbeat + var heartbeatError: Option[Errors] = None + groupCoordinator.handleHeartbeat(otherGroupId, memberId, None, 1, error => { heartbeatError = Some(error) }) + assertEquals(Some(Errors.NONE), heartbeatError) + + // DescribeGroups + val (describeGroupError, _, _) = groupCoordinator.handleDescribeGroup(otherGroupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) + assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, describeGroupError) + + // ListGroups + val (listGroupsError, _) = groupCoordinator.handleListGroups(Set(), Set()) + assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, listGroupsError) + + // DeleteGroups + val deleteGroupsErrors = groupCoordinator.handleDeleteGroups(Set(otherGroupId)) + assertEquals(Some(Errors.COORDINATOR_LOAD_IN_PROGRESS), deleteGroupsErrors.get(otherGroupId)) + + // Check that non-loading groups are still accessible + assertEquals(Errors.GROUP_ID_NOT_FOUND, groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion)._1) + + // After loading, we should be able to access the group + val otherGroupMetadataTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, otherGroupPartitionId) + when(replicaManager.getLog(otherGroupMetadataTopicPartition)).thenReturn(None) + + // Call removeGroupsAndOffsets so that partition removed from loadingPartitions + groupCoordinator.groupManager.removeGroupsAndOffsets(otherGroupMetadataTopicPartition, OptionalInt.of(1), group => {}) + groupCoordinator.groupManager.loadGroupsAndOffsets(otherGroupMetadataTopicPartition, 1, group => {}, 0L) + assertEquals(Errors.GROUP_ID_NOT_FOUND, groupCoordinator.handleDescribeGroup(otherGroupId, ApiKeys.DESCRIBE_GROUPS.latestVersion)._1) + } + + @Test + def testOffsetsRetentionMsIntegerOverflow(): Unit = { + val props = TestUtils.createBrokerConfig(0) + props.setProperty(GroupCoordinatorConfig.OFFSETS_RETENTION_MINUTES_CONFIG, Integer.MAX_VALUE.toString) + val config = KafkaConfig.fromProps(props) + val offsetConfig = GroupCoordinator.offsetConfig(config) + assertEquals(offsetConfig.offsetsRetentionMs, Integer.MAX_VALUE * 60L * 1000L) + } + + @Test + def testJoinGroupWrongCoordinator(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + var joinGroupResult = dynamicJoinGroup(otherGroupId, memberId, protocolType, protocols) + assertEquals(Errors.NOT_COORDINATOR, joinGroupResult.error) + + joinGroupResult = staticJoinGroup(otherGroupId, memberId, groupInstanceId, protocolType, protocols) + assertEquals(Errors.NOT_COORDINATOR, joinGroupResult.error) + } + + @Test + def testJoinGroupShouldReceiveErrorIfGroupOverMaxSize(): Unit = { + val futures = ArrayBuffer[Future[JoinGroupResult]]() + val rebalanceTimeout = GroupInitialRebalanceDelay * 2 + + for (i <- 1.to(GroupMaxSize)) { + futures += sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout) + if (i != 1) + timer.advanceClock(GroupInitialRebalanceDelay) + } + // advance clock beyond rebalanceTimeout + timer.advanceClock(GroupInitialRebalanceDelay + 1) + for (future <- futures) { + assertEquals(Errors.NONE, await(future, 1).error) + } + + // Should receive an error since the group is full + val errorFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout) + assertEquals(Errors.GROUP_MAX_SIZE_REACHED, await(errorFuture, 1).error) + } + + @Test + def testDynamicMembersJoinGroupWithMaxSizeAndRequiredKnownMember(): Unit = { + val requiredKnownMemberId = true + val nbMembers = GroupMaxSize + 1 + + // First JoinRequests + var futures = 1.to(nbMembers).map { _ => + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // Get back the assigned member ids + val memberIds = futures.map(await(_, 1).memberId) + + // Second JoinRequests + futures = memberIds.map { memberId => + sendJoinGroup(groupId, memberId, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin + timer.advanceClock(GroupInitialRebalanceDelay + 1) + // advance clock by GroupInitialRebalanceDelay to complete second InitialDelayedJoin + timer.advanceClock(GroupInitialRebalanceDelay + 1) + + // Awaiting results + val errors = futures.map(await(_, DefaultRebalanceTimeout + 1).error) + + assertEquals(GroupMaxSize, errors.count(_ == Errors.NONE)) + assertEquals(nbMembers-GroupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED)) + + // Members which were accepted can rejoin, others are rejected, while + // completing rebalance + futures = memberIds.map { memberId => + sendJoinGroup(groupId, memberId, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // Awaiting results + val rejoinErrors = futures.map(await(_, 1).error) + + assertEquals(errors, rejoinErrors) + } + + @Test + def testDynamicMembersJoinGroupWithMaxSize(): Unit = { + val requiredKnownMemberId = false + val nbMembers = GroupMaxSize + 1 + + // JoinRequests + var futures = 1.to(nbMembers).map { _ => + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin + timer.advanceClock(GroupInitialRebalanceDelay + 1) + // advance clock by GroupInitialRebalanceDelay to complete second InitialDelayedJoin + timer.advanceClock(GroupInitialRebalanceDelay + 1) + + // Awaiting results + val joinGroupResults = futures.map(await(_, DefaultRebalanceTimeout + 1)) + val errors = joinGroupResults.map(_.error) + + assertEquals(GroupMaxSize, errors.count(_ == Errors.NONE)) + assertEquals(nbMembers-GroupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED)) + + // Members which were accepted can rejoin, others are rejected, while + // completing rebalance + val memberIds = joinGroupResults.map(_.memberId) + futures = memberIds.map { memberId => + sendJoinGroup(groupId, memberId, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // Awaiting results + val rejoinErrors = futures.map(await(_, 1).error) + + assertEquals(errors, rejoinErrors) + } + + @Test + def testStaticMembersJoinGroupWithMaxSize(): Unit = { + val nbMembers = GroupMaxSize + 1 + val instanceIds = 1.to(nbMembers).map(i => Some(s"instance-id-$i")) + + // JoinRequests + var futures = instanceIds.map { instanceId => + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, + instanceId, DefaultSessionTimeout, DefaultRebalanceTimeout) + } + + // advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin + timer.advanceClock(GroupInitialRebalanceDelay + 1) + // advance clock by GroupInitialRebalanceDelay to complete second InitialDelayedJoin + timer.advanceClock(GroupInitialRebalanceDelay + 1) + + // Awaiting results + val joinGroupResults = futures.map(await(_, DefaultRebalanceTimeout + 1)) + val errors = joinGroupResults.map(_.error) + + assertEquals(GroupMaxSize, errors.count(_ == Errors.NONE)) + assertEquals(nbMembers-GroupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED)) + + // Members which were accepted can rejoin, others are rejected, while + // completing rebalance + val memberIds = joinGroupResults.map(_.memberId) + futures = instanceIds.zip(memberIds).map { case (instanceId, memberId) => + sendJoinGroup(groupId, memberId, protocolType, protocols, + instanceId, DefaultSessionTimeout, DefaultRebalanceTimeout) + } + + // Awaiting results + val rejoinErrors = futures.map(await(_, 1).error) + + assertEquals(errors, rejoinErrors) + } + + @Test + def testDynamicMembersCanReJoinGroupWithMaxSizeWhileRebalancing(): Unit = { + val requiredKnownMemberId = true + val nbMembers = GroupMaxSize + 1 + + // First JoinRequests + var futures = 1.to(nbMembers).map { _ => + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // Get back the assigned member ids + val memberIds = futures.map(await(_, 1).memberId) + + // Second JoinRequests + memberIds.map { memberId => + sendJoinGroup(groupId, memberId, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // Members can rejoin while rebalancing + futures = memberIds.map { memberId => + sendJoinGroup(groupId, memberId, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin + timer.advanceClock(GroupInitialRebalanceDelay + 1) + // advance clock by GroupInitialRebalanceDelay to complete second InitialDelayedJoin + timer.advanceClock(GroupInitialRebalanceDelay + 1) + + // Awaiting results + val errors = futures.map(await(_, DefaultRebalanceTimeout + 1).error) + + assertEquals(GroupMaxSize, errors.count(_ == Errors.NONE)) + assertEquals(nbMembers-GroupMaxSize, errors.count(_ == Errors.GROUP_MAX_SIZE_REACHED)) + } + + @Test + def testLastJoiningMembersAreKickedOutWhenReJoiningGroupWithMaxSize(): Unit = { + val nbMembers = GroupMaxSize + 2 + val group = new GroupMetadata(groupId, Stable, new MockTime()) + val memberIds = 1.to(nbMembers).map(_ => group.generateMemberId(ClientId, None)) + + memberIds.foreach { memberId => + group.add(new MemberMetadata(memberId, None, ClientId, ClientHost, + DefaultRebalanceTimeout, GroupMaxSessionTimeout, protocolType, protocols)) + } + groupCoordinator.groupManager.addGroup(group) + + groupCoordinator.prepareRebalance(group, "") + + val futures = memberIds.map { memberId => + sendJoinGroup(groupId, memberId, protocolType, protocols, + None, GroupMaxSessionTimeout, DefaultRebalanceTimeout) + } + + // advance clock by GroupInitialRebalanceDelay to complete first InitialDelayedJoin + when(replicaManager.onlinePartition(any[TopicPartition])) + .thenReturn(Some(mock(classOf[Partition]))) + timer.advanceClock(DefaultRebalanceTimeout + 1) + + // Awaiting results + val errors = futures.map(await(_, DefaultRebalanceTimeout + 1).error) + + assertEquals(Set(Errors.NONE), errors.take(GroupMaxSize).toSet) + assertEquals(Set(Errors.GROUP_MAX_SIZE_REACHED), errors.drop(GroupMaxSize).toSet) + + memberIds.drop(GroupMaxSize).foreach { memberId => + assertFalse(group.has(memberId)) + } + } + + @Test + def testJoinGroupSessionTimeoutTooSmall(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols, sessionTimeout = GroupMinSessionTimeout - 1) + assertEquals(Errors.INVALID_SESSION_TIMEOUT, joinGroupResult.error) + } + + @Test + def testJoinGroupSessionTimeoutTooLarge(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols, sessionTimeout = GroupMaxSessionTimeout + 1) + assertEquals(Errors.INVALID_SESSION_TIMEOUT, joinGroupResult.error) + } + + @Test + def testJoinGroupUnknownConsumerNewGroup(): Unit = { + var joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + assertEquals(Errors.UNKNOWN_MEMBER_ID, joinGroupResult.error) + + joinGroupResult = staticJoinGroup(groupId, memberId, groupInstanceId, protocolType, protocols) + assertEquals(Errors.UNKNOWN_MEMBER_ID, joinGroupResult.error) + } + + @Test + def testInvalidGroupId(): Unit = { + val groupId = "" + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + assertEquals(Errors.INVALID_GROUP_ID, joinGroupResult.error) + } + + @Test + def testValidJoinGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + } + + @Test + def testJoinGroupInconsistentProtocolType(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val otherMemberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + + val otherJoinGroupResult = await(sendJoinGroup(groupId, otherMemberId, "connect", protocols), 1) + assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, otherJoinGroupResult.error) + } + + @Test + def testJoinGroupWithEmptyProtocolType(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + var joinGroupResult = dynamicJoinGroup(groupId, memberId, "", protocols) + assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, joinGroupResult.error) + + joinGroupResult = staticJoinGroup(groupId, memberId, groupInstanceId, "", protocols) + assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, joinGroupResult.error) + } + + @Test + def testJoinGroupWithEmptyGroupProtocol(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, List()) + assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, joinGroupResult.error) + } + + @Test + def testNewMemberTimeoutCompletion(): Unit = { + val sessionTimeout = GroupCoordinator.NewMemberJoinTimeoutMs + 5000 + val responseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, None, sessionTimeout, DefaultRebalanceTimeout) + + timer.advanceClock(GroupInitialRebalanceDelay + 1) + + val joinResult = Await.result(responseFuture, Duration(DefaultRebalanceTimeout + 100, TimeUnit.MILLISECONDS)) + val group = groupCoordinator.groupManager.getGroup(groupId).get + val memberId = joinResult.memberId + + assertEquals(Errors.NONE, joinResult.error) + assertEquals(0, group.allMemberMetadata.count(_.isNew)) + + val syncGroupResult = syncGroupLeader(groupId, joinResult.generationId, memberId, Map(memberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + assertEquals(1, group.size) + + timer.advanceClock(GroupCoordinator.NewMemberJoinTimeoutMs + 100) + + // Make sure the NewMemberTimeout is not still in effect, and the member is not kicked + assertEquals(1, group.size) + + timer.advanceClock(sessionTimeout + 100) + assertEquals(0, group.size) + } + + @Test + def testNewMemberJoinExpiration(): Unit = { + // This tests new member expiration during a protracted rebalance. We first create a + // group with one member which uses a large value for session timeout and rebalance timeout. + // We then join with one new member and let the rebalance hang while we await the first member. + // The new member join timeout expires and its JoinGroup request is failed. + + val sessionTimeout = GroupCoordinator.NewMemberJoinTimeoutMs + 5000 + val rebalanceTimeout = GroupCoordinator.NewMemberJoinTimeoutMs * 2 + + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, + sessionTimeout, rebalanceTimeout) + val firstMemberId = firstJoinResult.memberId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val groupOpt = groupCoordinator.groupManager.getGroup(groupId) + assertTrue(groupOpt.isDefined) + val group = groupOpt.get + assertEquals(0, group.allMemberMetadata.count(_.isNew)) + + val responseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, None, sessionTimeout, rebalanceTimeout) + assertFalse(responseFuture.isCompleted) + + assertEquals(2, group.allMembers.size) + assertEquals(1, group.allMemberMetadata.count(_.isNew)) + + val newMember = group.allMemberMetadata.find(_.isNew).get + assertNotEquals(firstMemberId, newMember.memberId) + + timer.advanceClock(GroupCoordinator.NewMemberJoinTimeoutMs + 1) + assertTrue(responseFuture.isCompleted) + + val response = Await.result(responseFuture, Duration(0, TimeUnit.MILLISECONDS)) + assertEquals(Errors.UNKNOWN_MEMBER_ID, response.error) + assertEquals(1, group.allMembers.size) + assertEquals(0, group.allMemberMetadata.count(_.isNew)) + assertEquals(firstMemberId, group.allMembers.head) + } + + @Test + def testNewMemberFailureAfterJoinGroupCompletion(): Unit = { + // For old versions of the JoinGroup protocol, new members were subject + // to expiration if the rebalance took long enough. This test case ensures + // that following completion of the JoinGroup phase, new members follow + // normal heartbeat expiration logic. + + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, + Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + + val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols) + + val joinResult = await(joinFuture, DefaultSessionTimeout+100) + val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100) + assertEquals(Errors.NONE, joinResult.error) + assertEquals(Errors.NONE, otherJoinResult.error) + + verifySessionExpiration(groupId) + } + + @Test + def testNewMemberFailureAfterSyncGroupCompletion(): Unit = { + // For old versions of the JoinGroup protocol, new members were subject + // to expiration if the rebalance took long enough. This test case ensures + // that following completion of the SyncGroup phase, new members follow + // normal heartbeat expiration logic. + + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, + Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + + val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols) + + val joinResult = await(joinFuture, DefaultSessionTimeout+100) + val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100) + assertEquals(Errors.NONE, joinResult.error) + assertEquals(Errors.NONE, otherJoinResult.error) + val secondGenerationId = joinResult.generationId + val secondMemberId = otherJoinResult.memberId + + sendSyncGroupFollower(groupId, secondGenerationId, secondMemberId) + + val syncGroupResult = syncGroupLeader(groupId, secondGenerationId, firstMemberId, + Map(firstMemberId -> Array.emptyByteArray, secondMemberId -> Array.emptyByteArray)) + assertEquals(Errors.NONE, syncGroupResult.error) + + verifySessionExpiration(groupId) + } + + private def verifySessionExpiration(groupId: String): Unit = { + when(replicaManager.onlinePartition(any[TopicPartition])) + .thenReturn(Some(mock(classOf[Partition]))) + + timer.advanceClock(DefaultSessionTimeout + 1) + + val groupMetadata = group(groupId) + assertEquals(Empty, groupMetadata.currentState) + assertTrue(groupMetadata.allMembers.isEmpty) + } + + @Test + def testJoinGroupInconsistentGroupProtocol(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val otherMemberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupFuture = sendJoinGroup(groupId, memberId, protocolType, List(("range", metadata))) + + val otherJoinGroupResult = dynamicJoinGroup(groupId, otherMemberId, protocolType, List(("roundrobin", metadata))) + timer.advanceClock(GroupInitialRebalanceDelay + 1) + + val joinGroupResult = await(joinGroupFuture, 1) + assertEquals(Errors.NONE, joinGroupResult.error) + assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, otherJoinGroupResult.error) + } + + @Test + def testJoinGroupUnknownConsumerExistingGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val otherMemberId = "memberId" + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + + val otherJoinGroupResult = await(sendJoinGroup(groupId, otherMemberId, protocolType, protocols), 1) + assertEquals(Errors.UNKNOWN_MEMBER_ID, otherJoinGroupResult.error) + } + + @Test + def testJoinGroupUnknownConsumerNewDeadGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val deadGroupId = "deadGroupId" + + groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime())) + val joinGroupResult = dynamicJoinGroup(deadGroupId, memberId, protocolType, protocols) + assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, joinGroupResult.error) + } + + @Test + def testSyncDeadGroup(): Unit = { + val memberId = "memberId" + val deadGroupId = "deadGroupId" + + groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime())) + val syncGroupResult = syncGroupFollower(deadGroupId, 1, memberId) + assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, syncGroupResult.error) + } + + @Test + def testJoinGroupSecondJoinInconsistentProtocol(): Unit = { + var responseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, requireKnownMemberId = true) + var joinGroupResult = Await.result(responseFuture, Duration(DefaultRebalanceTimeout + 1, TimeUnit.MILLISECONDS)) + assertEquals(Errors.MEMBER_ID_REQUIRED, joinGroupResult.error) + val memberId = joinGroupResult.memberId + + // Sending an inconsistent protocol shall be refused + responseFuture = sendJoinGroup(groupId, memberId, protocolType, List(), requireKnownMemberId = true) + joinGroupResult = Await.result(responseFuture, Duration(DefaultRebalanceTimeout + 1, TimeUnit.MILLISECONDS)) + assertEquals(Errors.INCONSISTENT_GROUP_PROTOCOL, joinGroupResult.error) + + // Sending consistent protocol shall be accepted + responseFuture = sendJoinGroup(groupId, memberId, protocolType, protocols, requireKnownMemberId = true) + timer.advanceClock(GroupInitialRebalanceDelay + 1) + joinGroupResult = Await.result(responseFuture, Duration(DefaultRebalanceTimeout + 1, TimeUnit.MILLISECONDS)) + assertEquals(Errors.NONE, joinGroupResult.error) + } + + @Test + def staticMemberJoinAsFirstMember(): Unit = { + val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId, protocolType, protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + } + + @Test + def staticMemberReJoinWithExplicitUnknownMemberId(): Unit = { + var joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId, protocolType, protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + + val unknownMemberId = "unknown_member" + joinGroupResult = staticJoinGroup(groupId, unknownMemberId, groupInstanceId, protocolType, protocols) + assertEquals(Errors.FENCED_INSTANCE_ID, joinGroupResult.error) + } + + @Test + def staticMemberFenceDuplicateRejoinedFollower(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // A third member joins will trigger rebalance. + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + timer.advanceClock(1) + assertTrue(getGroup(groupId).is(PreparingRebalance)) + + timer.advanceClock(1) + // Old follower rejoins group will be matching current member.id. + val oldFollowerJoinGroupFuture = + sendJoinGroup(groupId, rebalanceResult.followerId, protocolType, protocols, groupInstanceId = Some(followerInstanceId)) + + timer.advanceClock(1) + // Duplicate follower joins group with unknown member id will trigger member.id replacement. + val duplicateFollowerJoinFuture = + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, groupInstanceId = Some(followerInstanceId)) + + timer.advanceClock(1) + // Old member shall be fenced immediately upon duplicate follower joins. + val oldFollowerJoinGroupResult = Await.result(oldFollowerJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + checkJoinGroupResult(oldFollowerJoinGroupResult, + Errors.FENCED_INSTANCE_ID, + -1, + Set.empty, + PreparingRebalance, + None) + verifyDelayedTaskNotCompleted(duplicateFollowerJoinFuture) + } + + @Test + def staticMemberFenceDuplicateSyncingFollowerAfterMemberIdChanged(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // Known leader rejoins will trigger rebalance. + val leaderJoinGroupFuture = + sendJoinGroup(groupId, rebalanceResult.leaderId, protocolType, protocols, groupInstanceId = Some(leaderInstanceId)) + timer.advanceClock(1) + assertTrue(getGroup(groupId).is(PreparingRebalance)) + + timer.advanceClock(1) + // Old follower rejoins group will match current member.id. + val oldFollowerJoinGroupFuture = + sendJoinGroup(groupId, rebalanceResult.followerId, protocolType, protocols, groupInstanceId = Some(followerInstanceId)) + + timer.advanceClock(1) + val leaderJoinGroupResult = Await.result(leaderJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + checkJoinGroupResult(leaderJoinGroupResult, + Errors.NONE, + rebalanceResult.generation + 1, + Set(leaderInstanceId, followerInstanceId), + CompletingRebalance, + Some(protocolType)) + assertEquals(rebalanceResult.leaderId, leaderJoinGroupResult.memberId) + assertEquals(rebalanceResult.leaderId, leaderJoinGroupResult.leaderId) + + // Old follower shall be getting a successful join group response. + val oldFollowerJoinGroupResult = Await.result(oldFollowerJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + checkJoinGroupResult(oldFollowerJoinGroupResult, + Errors.NONE, + rebalanceResult.generation + 1, + Set.empty, + CompletingRebalance, + Some(protocolType), + expectedLeaderId = leaderJoinGroupResult.memberId) + assertEquals(rebalanceResult.followerId, oldFollowerJoinGroupResult.memberId) + assertEquals(rebalanceResult.leaderId, oldFollowerJoinGroupResult.leaderId) + assertTrue(getGroup(groupId).is(CompletingRebalance)) + + // Duplicate follower joins group with unknown member id will trigger member.id replacement, + // and will also trigger a rebalance under CompletingRebalance state; the old follower sync callback + // will return fenced exception while broker replaces the member identity with the duplicate follower joins. + val oldFollowerSyncGroupFuture = sendSyncGroupFollower(groupId, oldFollowerJoinGroupResult.generationId, + oldFollowerJoinGroupResult.memberId, Some(protocolType), Some(protocolName), Some(followerInstanceId)) + + val duplicateFollowerJoinFuture = + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, groupInstanceId = Some(followerInstanceId)) + timer.advanceClock(1) + + val oldFollowerSyncGroupResult = Await.result(oldFollowerSyncGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + assertEquals(Errors.FENCED_INSTANCE_ID, oldFollowerSyncGroupResult.error) + assertTrue(getGroup(groupId).is(PreparingRebalance)) + + timer.advanceClock(GroupInitialRebalanceDelay + 1) + timer.advanceClock(DefaultRebalanceTimeout + 1) + + val duplicateFollowerJoinGroupResult = Await.result(duplicateFollowerJoinFuture, Duration(1, TimeUnit.MILLISECONDS)) + checkJoinGroupResult(duplicateFollowerJoinGroupResult, + Errors.NONE, + rebalanceResult.generation + 2, + Set(followerInstanceId), // this follower will become the new leader, and hence it would have the member list + CompletingRebalance, + Some(protocolType), + expectedLeaderId = duplicateFollowerJoinGroupResult.memberId) + assertTrue(getGroup(groupId).is(CompletingRebalance)) + } + + @Test + def staticMemberFenceDuplicateRejoiningFollowerAfterMemberIdChanged(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // Known leader rejoins will trigger rebalance. + val leaderJoinGroupFuture = + sendJoinGroup(groupId, rebalanceResult.leaderId, protocolType, protocols, groupInstanceId = Some(leaderInstanceId)) + timer.advanceClock(1) + assertTrue(getGroup(groupId).is(PreparingRebalance)) + + // Duplicate follower joins group will trigger member.id replacement. + val duplicateFollowerJoinGroupFuture = + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, groupInstanceId = Some(followerInstanceId)) + + timer.advanceClock(1) + // Old follower rejoins group will fail because member.id already updated. + val oldFollowerJoinGroupFuture = + sendJoinGroup(groupId, rebalanceResult.followerId, protocolType, protocols, groupInstanceId = Some(followerInstanceId)) + + val leaderRejoinGroupResult = Await.result(leaderJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + checkJoinGroupResult(leaderRejoinGroupResult, + Errors.NONE, + rebalanceResult.generation + 1, + Set(leaderInstanceId, followerInstanceId), + CompletingRebalance, + Some(protocolType)) + + val duplicateFollowerJoinGroupResult = Await.result(duplicateFollowerJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + checkJoinGroupResult(duplicateFollowerJoinGroupResult, + Errors.NONE, + rebalanceResult.generation + 1, + Set.empty, + CompletingRebalance, + Some(protocolType)) + assertNotEquals(rebalanceResult.followerId, duplicateFollowerJoinGroupResult.memberId) + + val oldFollowerJoinGroupResult = Await.result(oldFollowerJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + checkJoinGroupResult(oldFollowerJoinGroupResult, + Errors.FENCED_INSTANCE_ID, + -1, + Set.empty, + CompletingRebalance, + None) + } + + @Test + def staticMemberRejoinWithKnownMemberId(): Unit = { + var joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId, protocolType, protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + + val assignedMemberId = joinGroupResult.memberId + // The second join group should return immediately since we are using the same metadata during CompletingRebalance. + val rejoinResponseFuture = sendJoinGroup(groupId, assignedMemberId, protocolType, protocols, Some(groupInstanceId)) + timer.advanceClock(1) + joinGroupResult = Await.result(rejoinResponseFuture, Duration(1, TimeUnit.MILLISECONDS)) + assertEquals(Errors.NONE, joinGroupResult.error) + assertTrue(getGroup(groupId).is(CompletingRebalance)) + + val syncGroupFuture = sendSyncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, + Some(protocolType), Some(protocolName), Some(groupInstanceId), Map(assignedMemberId -> Array[Byte]())) + timer.advanceClock(1) + val syncGroupResult = Await.result(syncGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + assertEquals(Errors.NONE, syncGroupResult.error) + assertTrue(getGroup(groupId).is(Stable)) + } + + @ParameterizedTest + @ValueSource(booleans = Array(true, false)) + def staticMemberRejoinWithLeaderIdAndUnknownMemberId(supportSkippingAssignment: Boolean): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // A static leader rejoin with unknown id will not trigger rebalance, and no assignment will be returned. + val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, + leaderInstanceId, protocolType, protocolSuperset, clockAdvance = 1, supportSkippingAssignment = supportSkippingAssignment) + + checkJoinGroupResult(joinGroupResult, + Errors.NONE, + rebalanceResult.generation, // The group should be at the same generation + if (supportSkippingAssignment) Set(leaderInstanceId, followerInstanceId) else Set.empty, + Stable, + Some(protocolType), + if (supportSkippingAssignment) joinGroupResult.memberId else rebalanceResult.leaderId, + expectedSkipAssignment = supportSkippingAssignment + ) + + val oldLeaderJoinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, leaderInstanceId, protocolType, protocolSuperset, clockAdvance = 1) + assertEquals(Errors.FENCED_INSTANCE_ID, oldLeaderJoinGroupResult.error) + + // Old leader will get fenced. + val oldLeaderSyncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, rebalanceResult.leaderId, + Map.empty, None, None, Some(leaderInstanceId)) + assertEquals(Errors.FENCED_INSTANCE_ID, oldLeaderSyncGroupResult.error) + + // Calling sync on old leader.id will fail because that leader.id is no longer valid and replaced. + val newLeaderSyncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, rebalanceResult.leaderId, Map.empty) + assertEquals(Errors.UNKNOWN_MEMBER_ID, newLeaderSyncGroupResult.error) + } + + @Test + def staticMemberRejoinWithLeaderIdAndKnownMemberId(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, + sessionTimeout = DefaultRebalanceTimeout / 2) + + // A static leader with known id rejoin will trigger rebalance. + val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, leaderInstanceId, + protocolType, protocolSuperset, clockAdvance = DefaultRebalanceTimeout + 1) + // Timeout follower in the meantime. + assertFalse(getGroup(groupId).hasStaticMember(followerInstanceId)) + checkJoinGroupResult(joinGroupResult, + Errors.NONE, + rebalanceResult.generation + 1, // The group has promoted to the new generation. + Set(leaderInstanceId), + CompletingRebalance, + Some(protocolType), + rebalanceResult.leaderId, + rebalanceResult.leaderId) + } + + @Test + def staticMemberRejoinWithLeaderIdAndUnexpectedDeadGroup(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + getGroup(groupId).transitionTo(Dead) + + val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, leaderInstanceId, protocolType, protocols, clockAdvance = 1) + assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, joinGroupResult.error) + } + + @Test + def staticMemberRejoinWithLeaderIdAndUnexpectedEmptyGroup(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + getGroup(groupId).transitionTo(PreparingRebalance) + getGroup(groupId).transitionTo(Empty) + + val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, leaderInstanceId, protocolType, protocols, clockAdvance = 1) + assertEquals(Errors.UNKNOWN_MEMBER_ID, joinGroupResult.error) + } + + @Test + def staticMemberRejoinWithFollowerIdAndChangeOfProtocol(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, sessionTimeout = DefaultSessionTimeout * 2) + + // A static follower rejoin with changed protocol will trigger rebalance. + val newProtocols = List(("roundrobin", metadata)) + // Old leader hasn't joined in the meantime, triggering a re-election. + val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, followerInstanceId, protocolType, newProtocols, clockAdvance = DefaultSessionTimeout + 1) + + assertEquals(rebalanceResult.followerId, joinGroupResult.memberId) + assertTrue(getGroup(groupId).hasStaticMember(leaderInstanceId)) + assertTrue(getGroup(groupId).isLeader(rebalanceResult.followerId)) + checkJoinGroupResult(joinGroupResult, + Errors.NONE, + rebalanceResult.generation + 1, // The group has promoted to the new generation, and leader has changed because old one times out. + Set(leaderInstanceId, followerInstanceId), + CompletingRebalance, + Some(protocolType), + rebalanceResult.followerId, + rebalanceResult.followerId) + } + + @Test + def staticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWithSelectedProtocolChanged(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // A static follower rejoin with protocol changed and also cause updated group's selectedProtocol changed + // should trigger rebalance. + val selectedProtocols = getGroup(groupId).selectProtocol + val newProtocols = List(("roundrobin", metadata)) + assert(!newProtocols.map(_._1).contains(selectedProtocols)) + // Old leader hasn't joined in the meantime, triggering a re-election. + val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, followerInstanceId, protocolType, newProtocols, clockAdvance = DefaultSessionTimeout + 1) + + checkJoinGroupResult(joinGroupResult, + Errors.NONE, + rebalanceResult.generation + 1, + Set(leaderInstanceId, followerInstanceId), + CompletingRebalance, + Some(protocolType)) + + assertTrue(getGroup(groupId).isLeader(joinGroupResult.memberId)) + assertNotEquals(rebalanceResult.followerId, joinGroupResult.memberId) + assertEquals(joinGroupResult.protocolName, Some("roundrobin")) + } + + @Test + def staticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWhileSelectProtocolUnchangedPersistenceFailure(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val selectedProtocol = getGroup(groupId).selectProtocol + val newProtocols = List((selectedProtocol, metadata)) + // Timeout old leader in the meantime. + val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, + followerInstanceId, protocolType, newProtocols, clockAdvance = 1, appendRecordError = Errors.MESSAGE_TOO_LARGE) + + checkJoinGroupResult(joinGroupResult, + Errors.UNKNOWN_SERVER_ERROR, + rebalanceResult.generation, + Set.empty, + Stable, + Some(protocolType)) + + // Join with old member id will not fail because the member id is not updated because of persistence failure + assertNotEquals(rebalanceResult.followerId, joinGroupResult.memberId) + val oldFollowerJoinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, followerInstanceId, protocolType, newProtocols, clockAdvance = 1) + assertEquals(Errors.NONE, oldFollowerJoinGroupResult.error) + + // Sync with old member id will also not fail because the member id is not updated because of persistence failure + val syncGroupWithOldMemberIdResult = syncGroupFollower(groupId, rebalanceResult.generation, + rebalanceResult.followerId, None, None, Some(followerInstanceId)) + assertEquals(Errors.NONE, syncGroupWithOldMemberIdResult.error) + } + + @Test + def staticMemberRejoinWithUpdatedSessionAndRebalanceTimeoutsButCannotPersistChange(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1, 2 * DefaultSessionTimeout, 2 * DefaultRebalanceTimeout, appendRecordError = Errors.MESSAGE_TOO_LARGE) + checkJoinGroupResult(joinGroupResult, + Errors.UNKNOWN_SERVER_ERROR, + rebalanceResult.generation, + Set.empty, + Stable, + Some(protocolType)) + assertTrue(groupCoordinator.groupManager.getGroup(groupId).isDefined) + val group = groupCoordinator.groupManager.getGroup(groupId).get + group.allMemberMetadata.foreach { member => + assertEquals(member.sessionTimeoutMs, DefaultSessionTimeout) + assertEquals(member.rebalanceTimeoutMs, DefaultRebalanceTimeout) + } + } + + + @Test + def staticMemberRejoinWithUpdatedSessionAndRebalanceTimeoutsAndPersistChange(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + val followerJoinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1, 2 * DefaultSessionTimeout, 2 * DefaultRebalanceTimeout) + checkJoinGroupResult(followerJoinGroupResult, + Errors.NONE, + rebalanceResult.generation, + Set.empty, + Stable, + Some(protocolType)) + val leaderJoinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocolSuperset, clockAdvance = 1, 2 * DefaultSessionTimeout, 2 * DefaultRebalanceTimeout) + checkJoinGroupResult(leaderJoinGroupResult, + Errors.NONE, + rebalanceResult.generation, + Set(leaderInstanceId, followerInstanceId), + Stable, + Some(protocolType), + leaderJoinGroupResult.leaderId, + leaderJoinGroupResult.memberId, + expectedSkipAssignment = true) + assertTrue(groupCoordinator.groupManager.getGroup(groupId).isDefined) + val group = groupCoordinator.groupManager.getGroup(groupId).get + group.allMemberMetadata.foreach { member => + assertEquals(member.sessionTimeoutMs, 2 * DefaultSessionTimeout) + assertEquals(member.rebalanceTimeoutMs, 2 * DefaultRebalanceTimeout) + } + } + @Test + def staticMemberRejoinWithUnknownMemberIdAndChangeOfProtocolWhileSelectProtocolUnchanged(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // A static follower rejoin with protocol changing to leader protocol subset won't trigger rebalance if updated + // group's selectProtocol remain unchanged. + val selectedProtocol = getGroup(groupId).selectProtocol + val newProtocols = List((selectedProtocol, metadata)) + // Timeout old leader in the meantime. + val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, + followerInstanceId, protocolType, newProtocols, clockAdvance = 1) + + checkJoinGroupResult(joinGroupResult, + Errors.NONE, + rebalanceResult.generation, + Set.empty, + Stable, + Some(protocolType)) + + // Join with old member id will fail because the member id is updated + assertNotEquals(rebalanceResult.followerId, joinGroupResult.memberId) + val oldFollowerJoinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, followerInstanceId, protocolType, newProtocols, clockAdvance = 1) + assertEquals(Errors.FENCED_INSTANCE_ID, oldFollowerJoinGroupResult.error) + + // Sync with old member id will fail because the member id is updated + val syncGroupWithOldMemberIdResult = syncGroupFollower(groupId, rebalanceResult.generation, + rebalanceResult.followerId, None, None, Some(followerInstanceId)) + assertEquals(Errors.FENCED_INSTANCE_ID, syncGroupWithOldMemberIdResult.error) + + val syncGroupWithNewMemberIdResult = syncGroupFollower(groupId, rebalanceResult.generation, + joinGroupResult.memberId, None, None, Some(followerInstanceId)) + assertEquals(Errors.NONE, syncGroupWithNewMemberIdResult.error) + assertEquals(rebalanceResult.followerAssignment, syncGroupWithNewMemberIdResult.memberAssignment) + } + + @Test + def staticMemberRejoinWithKnownLeaderIdToTriggerRebalanceAndFollowerWithChangeOfProtocol(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // A static leader rejoin with known member id will trigger rebalance. + val leaderRejoinGroupFuture = sendJoinGroup(groupId, rebalanceResult.leaderId, protocolType, + protocolSuperset, Some(leaderInstanceId)) + // Rebalance complete immediately after follower rejoin. + val followerRejoinWithFuture = sendJoinGroup(groupId, rebalanceResult.followerId, protocolType, + protocolSuperset, Some(followerInstanceId)) + + timer.advanceClock(1) + + // Leader should get the same assignment as last round. + checkJoinGroupResult(await(leaderRejoinGroupFuture, 1), + Errors.NONE, + rebalanceResult.generation + 1, // The group has promoted to the new generation. + Set(leaderInstanceId, followerInstanceId), + CompletingRebalance, + Some(protocolType), + rebalanceResult.leaderId, + rebalanceResult.leaderId) + + checkJoinGroupResult(await(followerRejoinWithFuture, 1), + Errors.NONE, + rebalanceResult.generation + 1, // The group has promoted to the new generation. + Set.empty, + CompletingRebalance, + Some(protocolType), + rebalanceResult.leaderId, + rebalanceResult.followerId) + + // The follower protocol changed from protocolSuperset to general protocols. + val followerRejoinWithProtocolChangeFuture = sendJoinGroup(groupId, rebalanceResult.followerId, + protocolType, protocols, Some(followerInstanceId)) + // The group will transit to PreparingRebalance due to protocol change from follower. + assertTrue(getGroup(groupId).is(PreparingRebalance)) + + timer.advanceClock(DefaultRebalanceTimeout + 1) + checkJoinGroupResult(await(followerRejoinWithProtocolChangeFuture, 1), + Errors.NONE, + rebalanceResult.generation + 2, // The group has promoted to the new generation. + Set(followerInstanceId), + CompletingRebalance, + Some(protocolType), + rebalanceResult.followerId, + rebalanceResult.followerId) + } + + @Test + def staticMemberRejoinAsFollowerWithUnknownMemberId(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // A static follower rejoin with no protocol change will not trigger rebalance. + val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1) + + // Old leader shouldn't be timed out. + assertTrue(getGroup(groupId).hasStaticMember(leaderInstanceId)) + checkJoinGroupResult(joinGroupResult, + Errors.NONE, + rebalanceResult.generation, // The group has no change. + Set.empty, + Stable, + Some(protocolType)) + + assertNotEquals(rebalanceResult.followerId, joinGroupResult.memberId) + + val syncGroupResult = syncGroupFollower(groupId, rebalanceResult.generation, joinGroupResult.memberId) + assertEquals(Errors.NONE, syncGroupResult.error) + assertEquals(rebalanceResult.followerAssignment, syncGroupResult.memberAssignment) + } + + @Test + def staticMemberRejoinAsFollowerWithKnownMemberIdAndNoProtocolChange(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + // A static follower rejoin with no protocol change will not trigger rebalance. + val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1) + + // Old leader shouldn't be timed out. + assertTrue(getGroup(groupId).hasStaticMember(leaderInstanceId)) + checkJoinGroupResult(joinGroupResult, + Errors.NONE, + rebalanceResult.generation, // The group has no change. + Set.empty, + Stable, + Some(protocolType), + rebalanceResult.leaderId, + rebalanceResult.followerId) + } + + @Test + def staticMemberRejoinAsFollowerWithMismatchedMemberId(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.followerId, leaderInstanceId, protocolType, protocolSuperset, clockAdvance = 1) + assertEquals(Errors.FENCED_INSTANCE_ID, joinGroupResult.error) + } + + @Test + def staticMemberRejoinAsLeaderWithMismatchedMemberId(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1) + assertEquals(Errors.FENCED_INSTANCE_ID, joinGroupResult.error) + } + + @Test + def staticMemberSyncAsLeaderWithInvalidMemberId(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val syncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, "invalid", + Map.empty, None, None, Some(leaderInstanceId)) + assertEquals(Errors.FENCED_INSTANCE_ID, syncGroupResult.error) + } + + @Test + def staticMemberHeartbeatLeaderWithInvalidMemberId(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val syncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, rebalanceResult.leaderId, Map.empty) + assertEquals(Errors.NONE, syncGroupResult.error) + + val validHeartbeatResult = heartbeat(groupId, rebalanceResult.leaderId, rebalanceResult.generation) + assertEquals(Errors.NONE, validHeartbeatResult) + + val invalidHeartbeatResult = heartbeat(groupId, invalidMemberId, rebalanceResult.generation, Some(leaderInstanceId)) + assertEquals(Errors.FENCED_INSTANCE_ID, invalidHeartbeatResult) + } + + @Test + def shouldGetDifferentStaticMemberIdAfterEachRejoin(): Unit = { + val initialResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val timeAdvance = 1 + var lastMemberId = initialResult.leaderId + for (_ <- 1 to 5) { + val joinGroupResult = staticJoinGroupWithPersistence(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, + leaderInstanceId, protocolType, protocols, clockAdvance = timeAdvance) + assertTrue(joinGroupResult.memberId.startsWith(leaderInstanceId)) + assertNotEquals(lastMemberId, joinGroupResult.memberId) + lastMemberId = joinGroupResult.memberId + } + } + + @Test + def testOffsetCommitDeadGroup(): Unit = { + val memberId = "memberId" + + val deadGroupId = "deadGroupId" + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + + groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime())) + val offsetCommitResult = commitOffsets(deadGroupId, memberId, 1, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.COORDINATOR_NOT_AVAILABLE), offsetCommitResult) + } + + @Test + def staticMemberCommitOffsetWithInvalidMemberId(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val syncGroupResult = syncGroupLeader(groupId, rebalanceResult.generation, rebalanceResult.leaderId, Map.empty) + assertEquals(Errors.NONE, syncGroupResult.error) + + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val validOffsetCommitResult = commitOffsets(groupId, rebalanceResult.leaderId, rebalanceResult.generation, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), validOffsetCommitResult) + + val invalidOffsetCommitResult = commitOffsets(groupId, invalidMemberId, rebalanceResult.generation, + Map(tip -> offset), Some(leaderInstanceId)) + assertEquals(Map(tip -> Errors.FENCED_INSTANCE_ID), invalidOffsetCommitResult) + } + + @Test + def staticMemberJoinWithUnknownInstanceIdAndKnownMemberId(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val joinGroupResult = staticJoinGroup(groupId, rebalanceResult.leaderId, "unknown_instance", + protocolType, protocolSuperset, clockAdvance = 1) + + assertEquals(Errors.UNKNOWN_MEMBER_ID, joinGroupResult.error) + } + + @Test + def staticMemberReJoinWithIllegalStateAsUnknownMember(): Unit = { + staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + val group = groupCoordinator.groupManager.getGroup(groupId).get + group.transitionTo(PreparingRebalance) + group.transitionTo(Empty) + + // Illegal state exception shall trigger since follower id resides in pending member bucket. + val expectedException = assertThrows(classOf[IllegalStateException], + () => staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, followerInstanceId, protocolType, protocolSuperset, clockAdvance = 1)) + + val message = expectedException.getMessage + assertTrue(message.contains(group.groupId)) + assertTrue(message.contains(followerInstanceId)) + } + + @Test + def testLeaderFailToRejoinBeforeFinalRebalanceTimeoutWithLongSessionTimeout(): Unit = { + groupStuckInRebalanceTimeoutDueToNonjoinedStaticMember() + + timer.advanceClock(DefaultRebalanceTimeout + 1) + // The static leader should already session timeout, moving group towards Empty + assertEquals(Set.empty, getGroup(groupId).allMembers) + assertNull(getGroup(groupId).leaderOrNull) + assertEquals(3, getGroup(groupId).generationId) + assertGroupState(groupState = Empty) + } + + @Test + def testLeaderRejoinBeforeFinalRebalanceTimeoutWithLongSessionTimeout(): Unit = { + groupStuckInRebalanceTimeoutDueToNonjoinedStaticMember() + + // The static leader should be back now, moving group towards CompletingRebalance + val leaderRejoinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocols) + checkJoinGroupResult(leaderRejoinGroupResult, + Errors.NONE, + 3, + Set(leaderInstanceId), + CompletingRebalance, + Some(protocolType) + ) + assertEquals(Set(leaderRejoinGroupResult.memberId), getGroup(groupId).allMembers) + assertNotNull(getGroup(groupId).leaderOrNull) + assertEquals(3, getGroup(groupId).generationId) + } + + def groupStuckInRebalanceTimeoutDueToNonjoinedStaticMember(): Unit = { + val longSessionTimeout = DefaultSessionTimeout * 2 + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, sessionTimeout = longSessionTimeout) + + val dynamicJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocolSuperset, sessionTimeout = longSessionTimeout) + timer.advanceClock(DefaultRebalanceTimeout + 1) + + val dynamicJoinResult = await(dynamicJoinFuture, 100) + // The new dynamic member has been elected as leader + assertEquals(dynamicJoinResult.leaderId, dynamicJoinResult.memberId) + assertEquals(Errors.NONE, dynamicJoinResult.error) + assertEquals(3, dynamicJoinResult.members.size) + assertEquals(2, dynamicJoinResult.generationId) + assertGroupState(groupState = CompletingRebalance) + + assertEquals(Set(rebalanceResult.leaderId, rebalanceResult.followerId, + dynamicJoinResult.memberId), getGroup(groupId).allMembers) + assertEquals(Set(leaderInstanceId, followerInstanceId), + getGroup(groupId).allStaticMembers) + assertEquals(Set(dynamicJoinResult.memberId), getGroup(groupId).allDynamicMembers) + + // Send a special leave group request from static follower, moving group towards PreparingRebalance + val followerLeaveGroupResults = singleLeaveGroup(groupId, rebalanceResult.followerId) + verifyLeaveGroupResult(followerLeaveGroupResults) + assertGroupState(groupState = PreparingRebalance) + + timer.advanceClock(DefaultRebalanceTimeout + 1) + // Only static leader is maintained, and group is stuck at PreparingRebalance stage + assertTrue(getGroup(groupId).allDynamicMembers.isEmpty) + assertEquals(Set(rebalanceResult.leaderId), getGroup(groupId).allMembers) + assertTrue(getGroup(groupId).allDynamicMembers.isEmpty) + assertEquals(2, getGroup(groupId).generationId) + assertGroupState(groupState = PreparingRebalance) + } + + @Test + def testStaticMemberFollowerFailToRejoinBeforeRebalanceTimeout(): Unit = { + // Increase session timeout so that the follower won't be evicted when rebalance timeout is reached. + val initialRebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, sessionTimeout = DefaultRebalanceTimeout * 2) + + val newMemberInstanceId = "newMember" + + val leaderId = initialRebalanceResult.leaderId + + val newMemberJoinGroupFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, + protocolSuperset, Some(newMemberInstanceId)) + assertGroupState(groupState = PreparingRebalance) + + val leaderRejoinGroupResult = staticJoinGroup(groupId, leaderId, leaderInstanceId, protocolType, protocolSuperset, clockAdvance = DefaultRebalanceTimeout + 1) + checkJoinGroupResult(leaderRejoinGroupResult, + Errors.NONE, + initialRebalanceResult.generation + 1, + Set(leaderInstanceId, followerInstanceId, newMemberInstanceId), + CompletingRebalance, + Some(protocolType), + expectedLeaderId = leaderId, + expectedMemberId = leaderId) + + val newMemberJoinGroupResult = Await.result(newMemberJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + assertEquals(Errors.NONE, newMemberJoinGroupResult.error) + checkJoinGroupResult(newMemberJoinGroupResult, + Errors.NONE, + initialRebalanceResult.generation + 1, + Set.empty, + CompletingRebalance, + Some(protocolType), + expectedLeaderId = leaderId) + } + + @Test + def testStaticMemberLeaderFailToRejoinBeforeRebalanceTimeout(): Unit = { + // Increase session timeout so that the leader won't be evicted when rebalance timeout is reached. + val initialRebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId, sessionTimeout = DefaultRebalanceTimeout * 2) + + val newMemberInstanceId = "newMember" + + val newMemberJoinGroupFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, + protocolSuperset, Some(newMemberInstanceId)) + timer.advanceClock(1) + assertGroupState(groupState = PreparingRebalance) + + val oldFollowerRejoinGroupResult = staticJoinGroup(groupId, initialRebalanceResult.followerId, followerInstanceId, protocolType, protocolSuperset, clockAdvance = DefaultRebalanceTimeout + 1) + val newMemberJoinGroupResult = Await.result(newMemberJoinGroupFuture, Duration(1, TimeUnit.MILLISECONDS)) + + val (newLeaderResult, newFollowerResult) = if (oldFollowerRejoinGroupResult.leaderId == oldFollowerRejoinGroupResult.memberId) + (oldFollowerRejoinGroupResult, newMemberJoinGroupResult) + else + (newMemberJoinGroupResult, oldFollowerRejoinGroupResult) + + checkJoinGroupResult(newLeaderResult, + Errors.NONE, + initialRebalanceResult.generation + 1, + Set(leaderInstanceId, followerInstanceId, newMemberInstanceId), + CompletingRebalance, + Some(protocolType)) + + checkJoinGroupResult(newFollowerResult, + Errors.NONE, + initialRebalanceResult.generation + 1, + Set.empty, + CompletingRebalance, + Some(protocolType), + expectedLeaderId = newLeaderResult.memberId) + } + + @Test + def testJoinGroupProtocolTypeIsNotProvidedWhenAnErrorOccurs(): Unit = { + // JoinGroup(leader) + val leaderResponseFuture = sendJoinGroup(groupId, "fake-id", protocolType, + protocolSuperset, Some(leaderInstanceId), DefaultSessionTimeout) + + // The Protocol Type is None when there is an error + val leaderJoinGroupResult = await(leaderResponseFuture, 1) + assertEquals(Errors.UNKNOWN_MEMBER_ID, leaderJoinGroupResult.error) + assertEquals(None, leaderJoinGroupResult.protocolType) + } + + @Test + def testJoinGroupReturnsTheProtocolType(): Unit = { + // JoinGroup(leader) + val leaderResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, + protocolSuperset, Some(leaderInstanceId), DefaultSessionTimeout) + + // JoinGroup(follower) + val followerResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, + protocolSuperset, Some(followerInstanceId), DefaultSessionTimeout) + + timer.advanceClock(GroupInitialRebalanceDelay + 1) + timer.advanceClock(DefaultRebalanceTimeout + 1) + + // The Protocol Type is Defined when there is not error + val leaderJoinGroupResult = await(leaderResponseFuture, 1) + assertEquals(Errors.NONE, leaderJoinGroupResult.error) + assertEquals(protocolType, leaderJoinGroupResult.protocolType.orNull) + + // The Protocol Type is Defined when there is not error + val followerJoinGroupResult = await(followerResponseFuture, 1) + assertEquals(Errors.NONE, followerJoinGroupResult.error) + assertEquals(protocolType, followerJoinGroupResult.protocolType.orNull) + } + + @Test + def testSyncGroupReturnsAnErrorWhenProtocolTypeIsInconsistent(): Unit = { + testSyncGroupProtocolTypeAndNameWith(Some("whatever"), None, Errors.INCONSISTENT_GROUP_PROTOCOL, + None, None) + } + + @Test + def testSyncGroupReturnsAnErrorWhenProtocolNameIsInconsistent(): Unit = { + testSyncGroupProtocolTypeAndNameWith(None, Some("whatever"), Errors.INCONSISTENT_GROUP_PROTOCOL, + None, None) + } + + @Test + def testSyncGroupSucceedWhenProtocolTypeAndNameAreNotProvided(): Unit = { + testSyncGroupProtocolTypeAndNameWith(None, None, Errors.NONE, + Some(protocolType), Some(protocolName)) + } + + @Test + def testSyncGroupSucceedWhenProtocolTypeAndNameAreConsistent(): Unit = { + testSyncGroupProtocolTypeAndNameWith(Some(protocolType), Some(protocolName), + Errors.NONE, Some(protocolType), Some(protocolName)) + } + + private def testSyncGroupProtocolTypeAndNameWith(protocolType: Option[String], + protocolName: Option[String], + expectedError: Errors, + expectedProtocolType: Option[String], + expectedProtocolName: Option[String]): Unit = { + // JoinGroup(leader) with the Protocol Type of the group + val leaderResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, this.protocolType, + protocolSuperset, Some(leaderInstanceId), DefaultSessionTimeout) + + // JoinGroup(follower) with the Protocol Type of the group + val followerResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, this.protocolType, + protocolSuperset, Some(followerInstanceId), DefaultSessionTimeout) + + timer.advanceClock(GroupInitialRebalanceDelay + 1) + timer.advanceClock(DefaultRebalanceTimeout + 1) + + val leaderJoinGroupResult = await(leaderResponseFuture, 1) + val leaderId = leaderJoinGroupResult.memberId + val generationId = leaderJoinGroupResult.generationId + val followerJoinGroupResult = await(followerResponseFuture, 1) + val followerId = followerJoinGroupResult.memberId + + // SyncGroup with the provided Protocol Type and Name + val leaderSyncGroupResult = syncGroupLeader(groupId, generationId, leaderId, + Map(leaderId -> Array.empty), protocolType, protocolName) + assertEquals(expectedError, leaderSyncGroupResult.error) + assertEquals(expectedProtocolType, leaderSyncGroupResult.protocolType) + assertEquals(expectedProtocolName, leaderSyncGroupResult.protocolName) + + // SyncGroup with the provided Protocol Type and Name + val followerSyncGroupResult = syncGroupFollower(groupId, generationId, followerId, + protocolType, protocolName) + assertEquals(expectedError, followerSyncGroupResult.error) + assertEquals(expectedProtocolType, followerSyncGroupResult.protocolType) + assertEquals(expectedProtocolName, followerSyncGroupResult.protocolName) + } + + private class RebalanceResult(val generation: Int, + val leaderId: String, + val leaderAssignment: Array[Byte], + val followerId: String, + val followerAssignment: Array[Byte]) + /** + * Generate static member rebalance results, including: + * - generation + * - leader id + * - leader assignment + * - follower id + * - follower assignment + */ + private def staticMembersJoinAndRebalance(leaderInstanceId: String, + followerInstanceId: String, + sessionTimeout: Int = DefaultSessionTimeout, + rebalanceTimeout: Int = DefaultRebalanceTimeout): RebalanceResult = { + val leaderResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, + protocolSuperset, Some(leaderInstanceId), sessionTimeout, rebalanceTimeout) + + val followerResponseFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, + protocolSuperset, Some(followerInstanceId), sessionTimeout, rebalanceTimeout) + // The goal for two timer advance is to let first group initial join complete and set newMemberAdded flag to false. Next advance is + // to trigger the rebalance as needed for follower delayed join. One large time advance won't help because we could only populate one + // delayed join from purgatory and the new delayed op is created at that time and never be triggered. + timer.advanceClock(GroupInitialRebalanceDelay + 1) + timer.advanceClock(DefaultRebalanceTimeout + 1) + val newGeneration = 1 + + val leaderJoinGroupResult = await(leaderResponseFuture, 1) + assertEquals(Errors.NONE, leaderJoinGroupResult.error) + assertEquals(newGeneration, leaderJoinGroupResult.generationId) + + val followerJoinGroupResult = await(followerResponseFuture, 1) + assertEquals(Errors.NONE, followerJoinGroupResult.error) + assertEquals(newGeneration, followerJoinGroupResult.generationId) + + val leaderId = leaderJoinGroupResult.memberId + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) + val leaderSyncGroupResult = syncGroupLeader(groupId, leaderJoinGroupResult.generationId, leaderId, Map(leaderId -> Array[Byte]())) + assertEquals(Errors.NONE, leaderSyncGroupResult.error) + assertTrue(getGroup(groupId).is(Stable)) + + val followerId = followerJoinGroupResult.memberId + val followerSyncGroupResult = syncGroupFollower(groupId, leaderJoinGroupResult.generationId, followerId) + assertEquals(Errors.NONE, followerSyncGroupResult.error) + assertTrue(getGroup(groupId).is(Stable)) + + new RebalanceResult(newGeneration, + leaderId, + leaderSyncGroupResult.memberAssignment, + followerId, + followerSyncGroupResult.memberAssignment) + } + + private def checkJoinGroupResult(joinGroupResult: JoinGroupResult, + expectedError: Errors, + expectedGeneration: Int, + expectedGroupInstanceIds: Set[String], + expectedGroupState: GroupState, + expectedProtocolType: Option[String], + expectedLeaderId: String = JoinGroupRequest.UNKNOWN_MEMBER_ID, + expectedMemberId: String = JoinGroupRequest.UNKNOWN_MEMBER_ID, + expectedSkipAssignment: Boolean = false): Unit = { + assertEquals(expectedError, joinGroupResult.error) + assertEquals(expectedGeneration, joinGroupResult.generationId) + assertEquals(expectedGroupInstanceIds.size, joinGroupResult.members.size) + val resultedGroupInstanceIds = joinGroupResult.members.map(member => member.groupInstanceId).toSet + assertEquals(expectedGroupInstanceIds, resultedGroupInstanceIds) + assertGroupState(groupState = expectedGroupState) + assertEquals(expectedProtocolType, joinGroupResult.protocolType) + assertEquals(expectedSkipAssignment, joinGroupResult.skipAssignment) + + if (!expectedLeaderId.equals(JoinGroupRequest.UNKNOWN_MEMBER_ID)) { + assertEquals(expectedLeaderId, joinGroupResult.leaderId) + } + if (!expectedMemberId.equals(JoinGroupRequest.UNKNOWN_MEMBER_ID)) { + assertEquals(expectedMemberId, joinGroupResult.memberId) + } + } + + @Test + def testHeartbeatWrongCoordinator(): Unit = { + val heartbeatResult = heartbeat(otherGroupId, memberId, -1) + assertEquals(Errors.NOT_COORDINATOR, heartbeatResult) + } + + @Test + def testHeartbeatUnknownGroup(): Unit = { + val heartbeatResult = heartbeat(groupId, memberId, -1) + assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult) + } + + @Test + def testHeartbeatDeadGroup(): Unit = { + val memberId = "memberId" + + val deadGroupId = "deadGroupId" + + groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime())) + val heartbeatResult = heartbeat(deadGroupId, memberId, 1) + assertEquals(Errors.COORDINATOR_NOT_AVAILABLE, heartbeatResult) + } + + @Test + def testHeartbeatEmptyGroup(): Unit = { + val memberId = "memberId" + + val group = new GroupMetadata(groupId, Empty, new MockTime()) + val member = new MemberMetadata(memberId, Some(groupInstanceId), + ClientId, ClientHost, DefaultRebalanceTimeout, DefaultSessionTimeout, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + group.add(member) + groupCoordinator.groupManager.addGroup(group) + val heartbeatResult = heartbeat(groupId, memberId, 0) + assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult) + } + + @Test + def testHeartbeatUnknownConsumerExistingGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val otherMemberId = "memberId" + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val heartbeatResult = heartbeat(groupId, otherMemberId, 1) + assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult) + } + + @Test + def testHeartbeatRebalanceInProgress(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val heartbeatResult = heartbeat(groupId, assignedMemberId, 1) + assertEquals(Errors.NONE, heartbeatResult) + } + + @Test + def testHeartbeatIllegalGeneration(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val heartbeatResult = heartbeat(groupId, assignedMemberId, 2) + assertEquals(Errors.ILLEGAL_GENERATION, heartbeatResult) + } + + @Test + def testValidHeartbeat(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedConsumerId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1) + assertEquals(Errors.NONE, heartbeatResult) + } + + @Test + def testSessionTimeout(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedConsumerId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + when(replicaManager.getPartition(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId))) + .thenReturn(HostedPartition.None) + + timer.advanceClock(DefaultSessionTimeout + 100) + + val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1) + assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult) + } + + @Test + def testHeartbeatMaintainsSession(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val sessionTimeout = 1000 + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols, + rebalanceTimeout = sessionTimeout, sessionTimeout = sessionTimeout) + val assignedConsumerId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + timer.advanceClock(sessionTimeout / 2) + + var heartbeatResult = heartbeat(groupId, assignedConsumerId, 1) + assertEquals(Errors.NONE, heartbeatResult) + + timer.advanceClock(sessionTimeout / 2 + 100) + + heartbeatResult = heartbeat(groupId, assignedConsumerId, 1) + assertEquals(Errors.NONE, heartbeatResult) + } + + @Test + def testCommitMaintainsSession(): Unit = { + val sessionTimeout = 1000 + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols, + rebalanceTimeout = sessionTimeout, sessionTimeout = sessionTimeout) + val assignedMemberId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + timer.advanceClock(sessionTimeout / 2) + + val commitOffsetResult = commitOffsets(groupId, assignedMemberId, generationId, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + timer.advanceClock(sessionTimeout / 2 + 100) + + val heartbeatResult = heartbeat(groupId, assignedMemberId, 1) + assertEquals(Errors.NONE, heartbeatResult) + } + + @Test + def testSessionTimeoutDuringRebalance(): Unit = { + // create a group with a single member + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, + rebalanceTimeout = 2000, sessionTimeout = 1000) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + + // now have a new member join to trigger a rebalance + val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + timer.advanceClock(500) + + var heartbeatResult = heartbeat(groupId, firstMemberId, firstGenerationId) + assertEquals(Errors.REBALANCE_IN_PROGRESS, heartbeatResult) + + // letting the session expire should make the member fall out of the group + timer.advanceClock(1100) + + heartbeatResult = heartbeat(groupId, firstMemberId, firstGenerationId) + assertEquals(Errors.UNKNOWN_MEMBER_ID, heartbeatResult) + + // and the rebalance should complete with only the new member + val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100) + assertEquals(Errors.NONE, otherJoinResult.error) + } + + @Test + def testRebalanceCompletesBeforeMemberJoins(): Unit = { + // create a group with a single member + val firstJoinResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocols, + rebalanceTimeout = 1200, sessionTimeout = 1000) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + + // now have a new member join to trigger a rebalance + val otherMemberSessionTimeout = DefaultSessionTimeout + val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + // send a couple heartbeats to keep the member alive while the rebalance finishes + var expectedResultList = List(Errors.REBALANCE_IN_PROGRESS, Errors.REBALANCE_IN_PROGRESS) + for (expectedResult <- expectedResultList) { + timer.advanceClock(otherMemberSessionTimeout) + val heartbeatResult = heartbeat(groupId, firstMemberId, firstGenerationId) + assertEquals(expectedResult, heartbeatResult) + } + + // now timeout the rebalance + timer.advanceClock(otherMemberSessionTimeout) + val otherJoinResult = await(otherJoinFuture, otherMemberSessionTimeout+100) + val otherMemberId = otherJoinResult.memberId + val otherGenerationId = otherJoinResult.generationId + val syncResult = syncGroupLeader(groupId, otherGenerationId, otherMemberId, Map(otherMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncResult.error) + + // the unjoined static member should be remained in the group before session timeout. + assertEquals(Errors.NONE, otherJoinResult.error) + var heartbeatResult = heartbeat(groupId, firstMemberId, firstGenerationId) + assertEquals(Errors.ILLEGAL_GENERATION, heartbeatResult) + + expectedResultList = List(Errors.NONE, Errors.NONE, Errors.REBALANCE_IN_PROGRESS) + + // now session timeout the unjoined member. Still keeping the new member. + for (expectedResult <- expectedResultList) { + timer.advanceClock(otherMemberSessionTimeout) + heartbeatResult = heartbeat(groupId, otherMemberId, otherGenerationId) + assertEquals(expectedResult, heartbeatResult) + } + + val otherRejoinGroupFuture = sendJoinGroup(groupId, otherMemberId, protocolType, protocols) + val otherReJoinResult = await(otherRejoinGroupFuture, otherMemberSessionTimeout+100) + assertEquals(Errors.NONE, otherReJoinResult.error) + + val otherRejoinGenerationId = otherReJoinResult.generationId + val reSyncResult = syncGroupLeader(groupId, otherRejoinGenerationId, otherMemberId, Map(otherMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, reSyncResult.error) + + // the joined member should get heart beat response with no error. Let the new member keep heartbeating for a while + // to verify that no new rebalance is triggered unexpectedly + for ( _ <- 1 to 20) { + timer.advanceClock(500) + heartbeatResult = heartbeat(groupId, otherMemberId, otherRejoinGenerationId) + assertEquals(Errors.NONE, heartbeatResult) + } + } + + @Test + def testSyncGroupEmptyAssignment(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedConsumerId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map()) + assertEquals(Errors.NONE, syncGroupResult.error) + assertTrue(syncGroupResult.memberAssignment.isEmpty) + + val heartbeatResult = heartbeat(groupId, assignedConsumerId, 1) + assertEquals(Errors.NONE, heartbeatResult) + } + + @Test + def testSyncGroupNotCoordinator(): Unit = { + val generation = 1 + + val syncGroupResult = syncGroupFollower(otherGroupId, generation, memberId) + assertEquals(Errors.NOT_COORDINATOR, syncGroupResult.error) + } + + @Test + def testSyncGroupFromUnknownGroup(): Unit = { + val syncGroupResult = syncGroupFollower(groupId, 1, memberId) + assertEquals(Errors.UNKNOWN_MEMBER_ID, syncGroupResult.error) + } + + @Test + def testSyncGroupFromUnknownMember(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedConsumerId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + assertEquals(Errors.NONE, joinGroupResult.error) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]())) + val syncGroupError = syncGroupResult.error + assertEquals(Errors.NONE, syncGroupError) + + val unknownMemberId = "blah" + val unknownMemberSyncResult = syncGroupFollower(groupId, generationId, unknownMemberId) + assertEquals(Errors.UNKNOWN_MEMBER_ID, unknownMemberSyncResult.error) + } + + @Test + def testSyncGroupFromIllegalGeneration(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedConsumerId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + assertEquals(Errors.NONE, joinGroupResult.error) + + // send the sync group with an invalid generation + val syncGroupResult = syncGroupLeader(groupId, generationId+1, assignedConsumerId, Map(assignedConsumerId -> Array[Byte]())) + assertEquals(Errors.ILLEGAL_GENERATION, syncGroupResult.error) + } + + @Test + def testJoinGroupFromUnchangedFollowerDoesNotRebalance(): Unit = { + // to get a group of two members: + // 1. join and sync with a single member (because we can't immediately join with two members) + // 2. join and sync with the first member and a new member + + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + + val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols) + + val joinResult = await(joinFuture, DefaultSessionTimeout+100) + val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100) + assertEquals(Errors.NONE, joinResult.error) + assertEquals(Errors.NONE, otherJoinResult.error) + assertTrue(joinResult.generationId == otherJoinResult.generationId) + + assertEquals(firstMemberId, joinResult.leaderId) + assertEquals(firstMemberId, otherJoinResult.leaderId) + + val nextGenerationId = joinResult.generationId + + // this shouldn't cause a rebalance since protocol information hasn't changed + val followerJoinResult = await(sendJoinGroup(groupId, otherJoinResult.memberId, protocolType, protocols), 1) + + assertEquals(Errors.NONE, followerJoinResult.error) + assertEquals(nextGenerationId, followerJoinResult.generationId) + } + + @Test + def testJoinGroupFromUnchangedLeaderShouldRebalance(): Unit = { + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + + // join groups from the leader should force the group to rebalance, which allows the + // leader to push new assignments when local metadata changes + + val secondJoinResult = await(sendJoinGroup(groupId, firstMemberId, protocolType, protocols), 1) + + assertEquals(Errors.NONE, secondJoinResult.error) + assertNotEquals(firstGenerationId, secondJoinResult.generationId) + } + + /** + * Test if the following scenario completes a rebalance correctly: A new member starts a JoinGroup request with + * an UNKNOWN_MEMBER_ID, attempting to join a stable group. But never initiates the second JoinGroup request with + * the provided member ID and times out. The test checks if original member remains the sole member in this group, + * which should remain stable throughout this test. + */ + @Test + def testSecondMemberPartiallyJoinAndTimeout(): Unit = { + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + // Starting sync group leader + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + timer.advanceClock(100) + assertEquals(Set(firstMemberId), groupCoordinator.groupManager.getGroup(groupId).get.allMembers) + assertEquals(groupCoordinator.groupManager.getGroup(groupId).get.allMembers, + groupCoordinator.groupManager.getGroup(groupId).get.allDynamicMembers) + assertEquals(0, groupCoordinator.groupManager.getGroup(groupId).get.numPending) + val group = groupCoordinator.groupManager.getGroup(groupId).get + + // ensure the group is stable before a new member initiates join request + assertEquals(Stable, group.currentState) + + // new member initiates join group + val secondJoinResult = joinGroupPartial(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + assertEquals(Errors.MEMBER_ID_REQUIRED, secondJoinResult.error) + assertEquals(1, group.numPending) + assertEquals(Stable, group.currentState) + + // advance clock to timeout the pending member + assertEquals(Set(firstMemberId), group.allMembers) + assertEquals(1, group.numPending) + timer.advanceClock(300) + + // original (firstMember) member sends heartbeats to prevent session timeouts. + val heartbeatResult = heartbeat(groupId, firstMemberId, 1) + assertEquals(Errors.NONE, heartbeatResult) + + // timeout the pending member + timer.advanceClock(300) + + // at this point the second member should have been removed from pending list (session timeout), + // and the group should be in Stable state with only the first member in it. + assertEquals(Set(firstMemberId), group.allMembers) + assertEquals(0, group.numPending) + assertEquals(Stable, group.currentState) + assertTrue(group.has(firstMemberId)) + } + + /** + * Create a group with two members in Stable state. Create a third pending member by completing it's first JoinGroup + * request without a member id. + */ + private def setupGroupWithPendingMember(): JoinGroupResult = { + // add the first member + val joinResult1 = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + assertGroupState(groupState = CompletingRebalance) + + // now the group is stable, with the one member that joined above + val firstSyncResult = syncGroupLeader(groupId, joinResult1.generationId, joinResult1.memberId, Map(joinResult1.memberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + assertGroupState(groupState = Stable) + + // start the join for the second member + val secondJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + // rejoin the first member back into the group + val firstJoinFuture = sendJoinGroup(groupId, joinResult1.memberId, protocolType, protocols) + val firstMemberJoinResult = await(firstJoinFuture, DefaultSessionTimeout+100) + val secondMemberJoinResult = await(secondJoinFuture, DefaultSessionTimeout+100) + assertGroupState(groupState = CompletingRebalance) + + // stabilize the group + val secondSyncResult = syncGroupLeader(groupId, firstMemberJoinResult.generationId, joinResult1.memberId, Map(joinResult1.memberId -> Array[Byte]())) + assertEquals(Errors.NONE, secondSyncResult.error) + assertGroupState(groupState = Stable) + + // re-join an existing member, to transition the group to PreparingRebalance state. + sendJoinGroup(groupId, firstMemberJoinResult.memberId, protocolType, protocols) + assertGroupState(groupState = PreparingRebalance) + + // create a pending member in the group + val pendingMember = joinGroupPartial(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, sessionTimeout=100) + assertEquals(1, groupCoordinator.groupManager.getGroup(groupId).get.numPending) + + // re-join the second existing member + sendJoinGroup(groupId, secondMemberJoinResult.memberId, protocolType, protocols) + assertGroupState(groupState = PreparingRebalance) + assertEquals(1, groupCoordinator.groupManager.getGroup(groupId).get.numPending) + + pendingMember + } + + /** + * Setup a group in with a pending member. The test checks if the a pending member joining completes the rebalancing + * operation + */ + @Test + def testJoinGroupCompletionWhenPendingMemberJoins(): Unit = { + val pendingMember = setupGroupWithPendingMember() + + // compete join group for the pending member + val pendingMemberJoinFuture = sendJoinGroup(groupId, pendingMember.memberId, protocolType, protocols) + await(pendingMemberJoinFuture, DefaultSessionTimeout+100) + + assertGroupState(groupState = CompletingRebalance) + assertEquals(3, group().allMembers.size) + assertEquals(0, group().numPending) + } + + /** + * Setup a group in with a pending member. The test checks if the timeout of the pending member will + * cause the group to return to a CompletingRebalance state. + */ + @Test + def testJoinGroupCompletionWhenPendingMemberTimesOut(): Unit = { + setupGroupWithPendingMember() + + // Advancing Clock by > 100 (session timeout for third and fourth member) + // and < 500 (for first and second members). This will force the coordinator to attempt join + // completion on heartbeat expiration (since we are in PendingRebalance stage). + timer.advanceClock(120) + + assertGroupState(groupState = CompletingRebalance) + assertEquals(2, group().allMembers.size) + assertEquals(0, group().numPending) + } + + @Test + def testPendingMembersLeavesGroup(): Unit = { + val pending = setupGroupWithPendingMember() + + val leaveGroupResults = singleLeaveGroup(groupId, pending.memberId) + verifyLeaveGroupResult(leaveGroupResults) + + assertGroupState(groupState = CompletingRebalance) + assertEquals(2, group().allMembers.size) + assertEquals(2, group().allDynamicMembers.size) + assertEquals(0, group().numPending) + } + + private def verifyHeartbeat( + joinGroupResult: JoinGroupResult, + expectedError: Errors + ): Unit = { + val heartbeatResult = heartbeat( + groupId, + joinGroupResult.memberId, + joinGroupResult.generationId + ) + assertEquals(expectedError, heartbeatResult) + } + + private def joinWithNMembers(nbMembers: Int): Seq[JoinGroupResult] = { + val requiredKnownMemberId = true + + // First JoinRequests + var futures = 1.to(nbMembers).map { _ => + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + // Get back the assigned member ids + val memberIds = futures.map(await(_, 1).memberId) + + // Second JoinRequests + futures = memberIds.map { memberId => + sendJoinGroup(groupId, memberId, protocolType, protocols, + None, DefaultSessionTimeout, DefaultRebalanceTimeout, requiredKnownMemberId) + } + + timer.advanceClock(GroupInitialRebalanceDelay + 1) + timer.advanceClock(DefaultRebalanceTimeout + 1) + + futures.map(await(_, 1)) + } + + @Test + def testRebalanceTimesOutWhenSyncRequestIsNotReceived(): Unit = { + // This test case ensure that the DelayedSync does kick out all members + // if they don't sent a sync request before the rebalance timeout. The + // group is in the Stable state in this case. + val results = joinWithNMembers(nbMembers = 3) + assertEquals(Set(Errors.NONE), results.map(_.error).toSet) + + // Advance time + timer.advanceClock(DefaultRebalanceTimeout / 2) + + // Heartbeats to ensure that heartbeating does not interfere with the + // delayed sync operation. + results.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.NONE) + } + + // Advance part the rebalance timeout to trigger the delayed operation. + when(replicaManager.onlinePartition(any[TopicPartition])) + .thenReturn(Some(mock(classOf[Partition]))) + + timer.advanceClock(DefaultRebalanceTimeout / 2 + 1) + + // Heartbeats fail because none of the members have sent the sync request + results.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.UNKNOWN_MEMBER_ID) + } + } + + @Test + def testRebalanceTimesOutWhenSyncRequestIsNotReceivedFromFollowers(): Unit = { + // This test case ensure that the DelayedSync does kick out the followers + // if they don't sent a sync request before the rebalance timeout. The + // group is in the Stable state in this case. + val results = joinWithNMembers(nbMembers = 3) + assertEquals(Set(Errors.NONE), results.map(_.error).toSet) + + // Advance time + timer.advanceClock(DefaultRebalanceTimeout / 2) + + // Heartbeats to ensure that heartbeating does not interfere with the + // delayed sync operation. + results.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.NONE) + } + + // Leader sends Sync + val assignments = results.map(result => result.memberId -> Array.empty[Byte]).toMap + val leaderResult = sendSyncGroupLeader(groupId, results.head.generationId, results.head.memberId, + Some(protocolType), Some(protocolName), None, assignments) + + assertEquals(Errors.NONE, await(leaderResult, 1).error) + + // Leader should be able to heartbeat + verifyHeartbeat(results.head, Errors.NONE) + + // Advance part the rebalance timeout to trigger the delayed operation. + timer.advanceClock(DefaultRebalanceTimeout / 2 + 1) + + // Leader should be able to heartbeat + verifyHeartbeat(results.head, Errors.REBALANCE_IN_PROGRESS) + + // Followers should have been removed. + results.tail.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.UNKNOWN_MEMBER_ID) + } + } + + @Test + def testRebalanceTimesOutWhenSyncRequestIsNotReceivedFromLeaders(): Unit = { + // This test case ensure that the DelayedSync does kick out the leader + // if it does not sent a sync request before the rebalance timeout. The + // group is in the CompletingRebalance state in this case. + val results = joinWithNMembers(nbMembers = 3) + assertEquals(Set(Errors.NONE), results.map(_.error).toSet) + + // Advance time + timer.advanceClock(DefaultRebalanceTimeout / 2) + + // Heartbeats to ensure that heartbeating does not interfere with the + // delayed sync operation. + results.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.NONE) + } + + // Followers send Sync + val followerResults = results.tail.map { joinGroupResult => + sendSyncGroupFollower(groupId, joinGroupResult.generationId, joinGroupResult.memberId, + Some(protocolType), Some(protocolName), None) + } + + // Advance part the rebalance timeout to trigger the delayed operation. + timer.advanceClock(DefaultRebalanceTimeout / 2 + 1) + + val followerErrors = followerResults.map(await(_, 1).error) + assertEquals(Set(Errors.REBALANCE_IN_PROGRESS), followerErrors.toSet) + + // Leader should have been removed. + verifyHeartbeat(results.head, Errors.UNKNOWN_MEMBER_ID) + + // Followers should be able to heartbeat. + results.tail.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.REBALANCE_IN_PROGRESS) + } + } + + @Test + def testRebalanceDoesNotTimeOutWhenAllSyncAreReceived(): Unit = { + // This test case ensure that the DelayedSync does not kick any + // members out when they have all sent their sync requests. + val results = joinWithNMembers(nbMembers = 3) + assertEquals(Set(Errors.NONE), results.map(_.error).toSet) + + // Advance time + timer.advanceClock(DefaultRebalanceTimeout / 2) + + // Heartbeats to ensure that heartbeating does not interfere with the + // delayed sync operation. + results.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.NONE) + } + + val assignments = results.map(result => result.memberId -> Array.empty[Byte]).toMap + val leaderResult = sendSyncGroupLeader(groupId, results.head.generationId, results.head.memberId, + Some(protocolType), Some(protocolName), None, assignments) + + assertEquals(Errors.NONE, await(leaderResult, 1).error) + + // Followers send Sync + val followerResults = results.tail.map { joinGroupResult => + sendSyncGroupFollower(groupId, joinGroupResult.generationId, joinGroupResult.memberId, + Some(protocolType), Some(protocolName), None) + } + + val followerErrors = followerResults.map(await(_, 1).error) + assertEquals(Set(Errors.NONE), followerErrors.toSet) + + // Advance past the rebalance timeout to expire the Sync timeout. All + // members should remain and the group should not rebalance. + timer.advanceClock(DefaultRebalanceTimeout / 2 + 1) + + // Followers should be able to heartbeat. + results.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.NONE) + } + + // Advance a bit more. + timer.advanceClock(DefaultRebalanceTimeout / 2) + + // Followers should be able to heartbeat. + results.foreach { joinGroupResult => + verifyHeartbeat(joinGroupResult, Errors.NONE) + } + } + + private def group(groupId: String = groupId) = { + groupCoordinator.groupManager.getGroup(groupId) match { + case Some(g) => g + case None => null + } + } + + private def assertGroupState(groupId: String = groupId, + groupState: GroupState): Unit = { + groupCoordinator.groupManager.getGroup(groupId) match { + case Some(group) => assertEquals(groupState, group.currentState) + case None => fail(s"Group $groupId not found in coordinator") + } + } + + private def joinGroupPartial(groupId: String, + memberId: String, + protocolType: String, + protocols: List[(String, Array[Byte])], + sessionTimeout: Int = DefaultSessionTimeout, + rebalanceTimeout: Int = DefaultRebalanceTimeout): JoinGroupResult = { + val requireKnownMemberId = true + val responseFuture = sendJoinGroup(groupId, memberId, protocolType, protocols, None, sessionTimeout, rebalanceTimeout, requireKnownMemberId) + Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS)) + } + + @Test + def testLeaderFailureInSyncGroup(): Unit = { + // to get a group of two members: + // 1. join and sync with a single member (because we can't immediately join with two members) + // 2. join and sync with the first member and a new member + + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + + val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols) + + val joinResult = await(joinFuture, DefaultSessionTimeout+100) + val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100) + assertEquals(Errors.NONE, joinResult.error) + assertEquals(Errors.NONE, otherJoinResult.error) + assertTrue(joinResult.generationId == otherJoinResult.generationId) + + assertEquals(firstMemberId, joinResult.leaderId) + assertEquals(firstMemberId, otherJoinResult.leaderId) + + val nextGenerationId = joinResult.generationId + + // with no leader SyncGroup, the follower's request should fail with an error indicating + // that it should rejoin + val followerSyncFuture = sendSyncGroupFollower(groupId, nextGenerationId, otherJoinResult.memberId, None, None, None) + + timer.advanceClock(DefaultSessionTimeout + 100) + + val followerSyncResult = await(followerSyncFuture, DefaultSessionTimeout+100) + assertEquals(Errors.REBALANCE_IN_PROGRESS, followerSyncResult.error) + } + + @Test + def testSyncGroupFollowerAfterLeader(): Unit = { + // to get a group of two members: + // 1. join and sync with a single member (because we can't immediately join with two members) + // 2. join and sync with the first member and a new member + + val firstJoinResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val firstMemberId = firstJoinResult.memberId + val firstGenerationId = firstJoinResult.generationId + assertEquals(firstMemberId, firstJoinResult.leaderId) + assertEquals(Errors.NONE, firstJoinResult.error) + + val firstSyncResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, firstSyncResult.error) + + val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols) + + val joinResult = await(joinFuture, DefaultSessionTimeout+100) + val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100) + assertEquals(Errors.NONE, joinResult.error) + assertEquals(Errors.NONE, otherJoinResult.error) + assertTrue(joinResult.generationId == otherJoinResult.generationId) + + assertEquals(firstMemberId, joinResult.leaderId) + assertEquals(firstMemberId, otherJoinResult.leaderId) + + val nextGenerationId = joinResult.generationId + val leaderId = firstMemberId + val leaderAssignment = Array[Byte](0) + val followerId = otherJoinResult.memberId + val followerAssignment = Array[Byte](1) + + val leaderSyncResult = syncGroupLeader(groupId, nextGenerationId, leaderId, + Map(leaderId -> leaderAssignment, followerId -> followerAssignment)) + assertEquals(Errors.NONE, leaderSyncResult.error) + assertEquals(leaderAssignment, leaderSyncResult.memberAssignment) + + val followerSyncResult = syncGroupFollower(groupId, nextGenerationId, otherJoinResult.memberId) + assertEquals(Errors.NONE, followerSyncResult.error) + assertEquals(followerAssignment, followerSyncResult.memberAssignment) + } + + @Test + def testSyncGroupLeaderAfterFollower(): Unit = { + // to get a group of two members: + // 1. join and sync with a single member (because we can't immediately join with two members) + // 2. join and sync with the first member and a new member + + val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val firstMemberId = joinGroupResult.memberId + val firstGenerationId = joinGroupResult.generationId + assertEquals(firstMemberId, joinGroupResult.leaderId) + assertEquals(Errors.NONE, joinGroupResult.error) + + val syncGroupResult = syncGroupLeader(groupId, firstGenerationId, firstMemberId, Map(firstMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val otherJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + val joinFuture = sendJoinGroup(groupId, firstMemberId, protocolType, protocols) + + val joinResult = await(joinFuture, DefaultSessionTimeout+100) + val otherJoinResult = await(otherJoinFuture, DefaultSessionTimeout+100) + assertEquals(Errors.NONE, joinResult.error) + assertEquals(Errors.NONE, otherJoinResult.error) + assertTrue(joinResult.generationId == otherJoinResult.generationId) + + val nextGenerationId = joinResult.generationId + val leaderId = joinResult.leaderId + val leaderAssignment = Array[Byte](0) + val followerId = otherJoinResult.memberId + val followerAssignment = Array[Byte](1) + + assertEquals(firstMemberId, joinResult.leaderId) + assertEquals(firstMemberId, otherJoinResult.leaderId) + + val followerSyncFuture = sendSyncGroupFollower(groupId, nextGenerationId, followerId, None, None, None) + + val leaderSyncResult = syncGroupLeader(groupId, nextGenerationId, leaderId, + Map(leaderId -> leaderAssignment, followerId -> followerAssignment)) + assertEquals(Errors.NONE, leaderSyncResult.error) + assertEquals(leaderAssignment, leaderSyncResult.memberAssignment) + + val followerSyncResult = await(followerSyncFuture, DefaultSessionTimeout+100) + assertEquals(Errors.NONE, followerSyncResult.error) + assertEquals(followerAssignment, followerSyncResult.memberAssignment) + } + + @Test + def testCommitOffsetFromUnknownGroup(): Unit = { + val generationId = 1 + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + + val commitOffsetResult = commitOffsets(groupId, memberId, generationId, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.ILLEGAL_GENERATION), commitOffsetResult) + } + + @Test + def testCommitOffsetWithDefaultGeneration(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + + val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID, + OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + } + + @Test + def testCommitOffsetsAfterGroupIsEmpty(): Unit = { + // Tests the scenario where the reset offset tool modifies the offsets + // of a group after it becomes empty + + // A group member joins + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + // and leaves. + val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId) + verifyLeaveGroupResult(leaveGroupResults) + + // The simple offset commit should now fail + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID, + OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, error) + assertEquals(Some(0), partitionData.get(tip.topicPartition).map(_.offset)) + } + + @Test + def testFetchOffsets(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = 97L + val metadata = "some metadata" + val leaderEpoch = OptionalInt.of(15) + val offsetAndMetadata = new OffsetAndMetadata(offset, leaderEpoch, metadata, timer.time.milliseconds(), OptionalLong.empty) + + val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID, + OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tip -> offsetAndMetadata)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, error) + + val maybePartitionData = partitionData.get(tip.topicPartition) + assertTrue(maybePartitionData.isDefined) + assertEquals(offset, maybePartitionData.get.offset) + assertEquals(metadata, maybePartitionData.get.metadata) + assertEquals(leaderEpoch.getAsInt, maybePartitionData.get.leaderEpoch.get) + } + + @Test + def testCommitAndFetchOffsetsWithEmptyGroup(): Unit = { + // For backwards compatibility, the coordinator supports committing/fetching offsets with an empty groupId. + // To allow inspection and removal of the empty group, we must also support DescribeGroups and DeleteGroups + + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val groupId = "" + + val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID, + OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val (fetchError, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, fetchError) + assertEquals(Some(0), partitionData.get(tip.topicPartition).map(_.offset)) + + var (describeError, describeErrorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) + assertEquals(Errors.NONE, describeError) + assertTrue(describeErrorMessage.isEmpty) + assertEquals(Empty.toString, summary.state) + + val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + val partition: Partition = mock(classOf[Partition]) + + when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) + when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) + + val deleteErrors = groupCoordinator.handleDeleteGroups(Set(groupId)) + assertEquals(Errors.NONE, deleteErrors(groupId)) + + val (err, data) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, err) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), data.get(tip.topicPartition).map(_.offset)) + } + + @Test + def testBasicFetchTxnOffsets(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + + // Validate that the offset isn't materialized yet. + assertEquals(Errors.NONE, error) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tip.topicPartition).map(_.offset)) + + val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + + // Send commit marker. + handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.COMMIT) + + // Validate that committed offset is materialized. + val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, secondReqError) + assertEquals(Some(0), secondReqPartitionData.get(tip.topicPartition).map(_.offset)) + } + + @Test + def testFetchTxnOffsetsWithAbort(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, error) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tip.topicPartition).map(_.offset)) + + val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + + // Validate that the pending commit is discarded. + handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.ABORT) + + val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, secondReqError) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), secondReqPartitionData.get(tip.topicPartition).map(_.offset)) + } + + @Test + def testFetchPendingTxnOffsetsWithAbort(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val nonExistTp = new TopicPartition("non-exist-topic", 0) + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition, nonExistTp))) + assertEquals(Errors.NONE, error) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tip.topicPartition).map(_.offset)) + assertEquals(Some(Errors.UNSTABLE_OFFSET_COMMIT), partitionData.get(tip.topicPartition).map(_.error)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(nonExistTp).map(_.offset)) + assertEquals(Some(Errors.NONE), partitionData.get(nonExistTp).map(_.error)) + + val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + + // Validate that the pending commit is discarded. + handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.ABORT) + + val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, secondReqError) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), secondReqPartitionData.get(tip.topicPartition).map(_.offset)) + assertEquals(Some(Errors.NONE), secondReqPartitionData.get(tip.topicPartition).map(_.error)) + } + + @Test + def testFetchPendingTxnOffsetsWithCommit(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "offset") + val offset = offsetAndMetadata(25) + val producerId = 1000L + val producerEpoch : Short = 2 + + val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, error) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tip.topicPartition).map(_.offset)) + assertEquals(Some(Errors.UNSTABLE_OFFSET_COMMIT), partitionData.get(tip.topicPartition).map(_.error)) + + val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + + // Validate that the pending commit is committed + handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.COMMIT) + + val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, secondReqError) + assertEquals(Some(25), secondReqPartitionData.get(tip.topicPartition).map(_.offset)) + assertEquals(Some(Errors.NONE), secondReqPartitionData.get(tip.topicPartition).map(_.error)) + } + + @Test + def testFetchTxnOffsetsIgnoreSpuriousCommit(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, error) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tip.topicPartition).map(_.offset)) + + val offsetsTopic = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.ABORT) + + val (secondReqError, secondReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, secondReqError) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), secondReqPartitionData.get(tip.topicPartition).map(_.offset)) + + // Ignore spurious commit. + handleTxnCompletion(producerId, List(offsetsTopic), TransactionResult.COMMIT) + + val (thirdReqError, thirdReqPartitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tip.topicPartition))) + assertEquals(Errors.NONE, thirdReqError) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), thirdReqPartitionData.get(tip.topicPartition).map(_.offset)) + } + + @Test + def testFetchTxnOffsetsOneProducerMultipleGroups(): Unit = { + // One producer, two groups located on separate offsets topic partitions. + // Both group have pending offset commits. + // Marker for only one partition is received. That commit should be materialized while the other should not. + + val topicIdPartitions = List( + new TopicIdPartition(Uuid.randomUuid(), 0, "topic1"), + new TopicIdPartition(Uuid.randomUuid(), 0, "topic2") + ) + val offsets = List(offsetAndMetadata(10), offsetAndMetadata(15)) + val producerId = 1000L + val producerEpoch: Short = 3 + + val groupIds = List(groupId, otherGroupId) + val offsetTopicPartitions = List(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(groupId)), + new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(otherGroupId))) + + groupCoordinator.groupManager.addOwnedPartition(offsetTopicPartitions(1).partition) + val errors = mutable.ArrayBuffer[Errors]() + val partitionData = mutable.ArrayBuffer[scala.collection.Map[TopicPartition, OffsetFetchResponse.PartitionData]]() + + val commitOffsetResults = mutable.ArrayBuffer[CommitOffsetCallbackParams]() + + // Ensure that the two groups map to different partitions. + assertNotEquals(offsetTopicPartitions(0), offsetTopicPartitions(1)) + + commitOffsetResults.append(commitTransactionalOffsets(groupId, producerId, producerEpoch, Map(topicIdPartitions(0) -> offsets(0)))) + assertEquals(Errors.NONE, commitOffsetResults(0)(topicIdPartitions(0))) + commitOffsetResults.append(commitTransactionalOffsets(otherGroupId, producerId, producerEpoch, Map(topicIdPartitions(1) -> offsets(1)))) + assertEquals(Errors.NONE, commitOffsetResults(1)(topicIdPartitions(1))) + + // We got a commit for only one __consumer_offsets partition. We should only materialize it's group offsets. + val topicPartitions = topicIdPartitions.map(_.topicPartition) + handleTxnCompletion(producerId, List(offsetTopicPartitions(0)), TransactionResult.COMMIT) + groupCoordinator.handleFetchOffsets(groupIds(0), requireStable, Some(topicPartitions)) match { + case (error, partData) => + errors.append(error) + partitionData.append(partData) + case _ => + } + + groupCoordinator.handleFetchOffsets(groupIds(1), requireStable, Some(topicPartitions)) match { + case (error, partData) => + errors.append(error) + partitionData.append(partData) + case _ => + } + + assertEquals(2, errors.size) + assertEquals(Errors.NONE, errors(0)) + assertEquals(Errors.NONE, errors(1)) + + // Exactly one offset commit should have been materialized. + assertEquals(Some(offsets(0).committedOffset), partitionData(0).get(topicPartitions(0)).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(0).get(topicPartitions(1)).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(1).get(topicPartitions(0)).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(1).get(topicPartitions(1)).map(_.offset)) + + // Now we receive the other marker. + handleTxnCompletion(producerId, List(offsetTopicPartitions(1)), TransactionResult.COMMIT) + errors.clear() + partitionData.clear() + groupCoordinator.handleFetchOffsets(groupIds(0), requireStable, Some(topicPartitions)) match { + case (error, partData) => + errors.append(error) + partitionData.append(partData) + case _ => + } + + groupCoordinator.handleFetchOffsets(groupIds(1), requireStable, Some(topicPartitions)) match { + case (error, partData) => + errors.append(error) + partitionData.append(partData) + case _ => + } + // Two offsets should have been materialized + assertEquals(Some(offsets(0).committedOffset), partitionData(0).get(topicPartitions(0)).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(0).get(topicPartitions(1)).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(1).get(topicPartitions(0)).map(_.offset)) + assertEquals(Some(offsets(1).committedOffset), partitionData(1).get(topicPartitions(1)).map(_.offset)) + } + + @Test + def testFetchTxnOffsetsMultipleProducersOneGroup(): Unit = { + // One group, two producers + // Different producers will commit offsets for different partitions. + // Each partition's offsets should be materialized when the corresponding producer's marker is received. + + val topicIdPartitions = List( + new TopicIdPartition(Uuid.randomUuid(), 0, "topic1"), + new TopicIdPartition(Uuid.randomUuid(), 0, "topic2") + ) + val offsets = List(offsetAndMetadata(10), offsetAndMetadata(15)) + val producerIds = List(1000L, 1005L) + val producerEpochs: Seq[Short] = List(3, 4) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(groupId)) + + val errors = mutable.ArrayBuffer[Errors]() + val partitionData = mutable.ArrayBuffer[scala.collection.Map[TopicPartition, OffsetFetchResponse.PartitionData]]() + + val commitOffsetResults = mutable.ArrayBuffer[CommitOffsetCallbackParams]() + + // producer0 commits the offsets for partition0 + commitOffsetResults.append(commitTransactionalOffsets(groupId, producerIds(0), producerEpochs(0), Map(topicIdPartitions(0) -> offsets(0)))) + assertEquals(Errors.NONE, commitOffsetResults(0)(topicIdPartitions(0))) + + // producer1 commits the offsets for partition1 + commitOffsetResults.append(commitTransactionalOffsets(groupId, producerIds(1), producerEpochs(1), Map(topicIdPartitions(1) -> offsets(1)))) + assertEquals(Errors.NONE, commitOffsetResults(1)(topicIdPartitions(1))) + + // producer0 commits its transaction. + val topicPartitions = topicIdPartitions.map(_.topicPartition) + handleTxnCompletion(producerIds(0), List(offsetTopicPartition), TransactionResult.COMMIT) + groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(topicPartitions)) match { + case (error, partData) => + errors.append(error) + partitionData.append(partData) + case _ => + } + + assertEquals(Errors.NONE, errors(0)) + + // We should only see the offset commit for producer0 + assertEquals(Some(offsets(0).committedOffset), partitionData(0).get(topicPartitions(0)).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData(0).get(topicPartitions(1)).map(_.offset)) + + // producer1 now commits its transaction. + handleTxnCompletion(producerIds(1), List(offsetTopicPartition), TransactionResult.COMMIT) + + groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(topicPartitions)) match { + case (error, partData) => + errors.append(error) + partitionData.append(partData) + case _ => + } + + assertEquals(Errors.NONE, errors(1)) + + // We should now see the offset commits for both producers. + assertEquals(Some(offsets(0).committedOffset), partitionData(1).get(topicPartitions(0)).map(_.offset)) + assertEquals(Some(offsets(1).committedOffset), partitionData(1).get(topicPartitions(1)).map(_.offset)) + } + + @Test + def testFetchOffsetForUnknownPartition(): Unit = { + val tp = new TopicPartition("topic", 0) + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable, Some(Seq(tp))) + assertEquals(Errors.NONE, error) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), partitionData.get(tp).map(_.offset)) + } + + @Test + def testFetchOffsetNotCoordinatorForGroup(): Unit = { + val tp = new TopicPartition("topic", 0) + val (error, partitionData) = groupCoordinator.handleFetchOffsets(otherGroupId, requireStable, Some(Seq(tp))) + assertEquals(Errors.NOT_COORDINATOR, error) + assertTrue(partitionData.isEmpty) + } + + @Test + def testFetchAllOffsets(): Unit = { + val tip1 = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val tip2 = new TopicIdPartition(tip1.topicId, 1, "topic") + val tip3 = new TopicIdPartition(Uuid.randomUuid(), 0, "other-topic") + val offset1 = offsetAndMetadata(15) + val offset2 = offsetAndMetadata(16) + val offset3 = offsetAndMetadata(17) + + assertEquals((Errors.NONE, Map.empty), groupCoordinator.handleFetchOffsets(groupId, requireStable)) + + val commitOffsetResult = commitOffsets(groupId, OffsetCommitRequest.DEFAULT_MEMBER_ID, + OffsetCommitRequest.DEFAULT_GENERATION_ID, Map(tip1 -> offset1, tip2 -> offset2, tip3 -> offset3)) + assertEquals(Map(tip1 -> Errors.NONE, tip2 -> Errors.NONE, tip3 -> Errors.NONE), commitOffsetResult) + + val (error, partitionData) = groupCoordinator.handleFetchOffsets(groupId, requireStable) + assertEquals(Errors.NONE, error) + assertEquals(3, partitionData.size) + assertTrue(partitionData.forall(_._2.error == Errors.NONE)) + assertEquals(Some(offset1.committedOffset), partitionData.get(tip1.topicPartition).map(_.offset)) + assertEquals(Some(offset2.committedOffset), partitionData.get(tip2.topicPartition).map(_.offset)) + assertEquals(Some(offset3.committedOffset), partitionData.get(tip3.topicPartition).map(_.offset)) + } + + @Test + def testCommitOffsetInCompletingRebalance(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val commitOffsetResult = commitOffsets(groupId, assignedMemberId, generationId, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.REBALANCE_IN_PROGRESS), commitOffsetResult) + } + + @Test + def testCommitOffsetInCompletingRebalanceFromUnknownMemberId(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val tip = new TopicIdPartition(Uuid.randomUuid(), 0 , "topic") + val offset = offsetAndMetadata(0) + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val commitOffsetResult = commitOffsets(groupId, memberId, generationId, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.UNKNOWN_MEMBER_ID), commitOffsetResult) + } + + @Test + def testCommitOffsetInCompletingRebalanceFromIllegalGeneration(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val commitOffsetResult = commitOffsets(groupId, assignedMemberId, generationId + 1, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.ILLEGAL_GENERATION), commitOffsetResult) + } + + @Test + def testManualCommitOffsetShouldNotValidateMemberIdAndInstanceId(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + + var commitOffsetResult = commitOffsets( + groupId, + JoinGroupRequest.UNKNOWN_MEMBER_ID, + -1, + Map(tip -> offsetAndMetadata(0)), + Some("instance-id") + ) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + commitOffsetResult = commitOffsets( + groupId, + "unknown", + -1, + Map(tip -> offsetAndMetadata(0)), + None + ) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + } + + @Test + def testTxnCommitOffsetWithFencedInstanceId(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val leaderNoMemberIdCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, + Map(tip -> offset), memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID, groupInstanceId = Some(leaderInstanceId)) + assertEquals(Map(tip -> Errors.FENCED_INSTANCE_ID), leaderNoMemberIdCommitOffsetResult) + + val leaderInvalidMemberIdCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, + Map(tip -> offset), memberId = "invalid-member", groupInstanceId = Some(leaderInstanceId)) + assertEquals(Map(tip -> Errors.FENCED_INSTANCE_ID), leaderInvalidMemberIdCommitOffsetResult) + + val leaderCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, + Map(tip -> offset), rebalanceResult.leaderId, Some(leaderInstanceId), rebalanceResult.generation) + assertEquals(Map(tip -> Errors.NONE), leaderCommitOffsetResult) + } + + @Test + def testTxnCommitOffsetWithInvalidMemberId(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val invalidIdCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, + Map(tip -> offset), "invalid-member") + assertEquals(Map(tip -> Errors.UNKNOWN_MEMBER_ID), invalidIdCommitOffsetResult) + } + + @Test + def testTxnCommitOffsetWithKnownMemberId(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + + val assignedConsumerId = joinGroupResult.memberId + val leaderCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, + Map(tip -> offset), assignedConsumerId, generationId = joinGroupResult.generationId) + assertEquals(Map(tip -> Errors.NONE), leaderCommitOffsetResult) + } + + @Test + def testTxnCommitOffsetWithIllegalGeneration(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + + val assignedConsumerId = joinGroupResult.memberId + val initialGenerationId = joinGroupResult.generationId + val illegalGenerationCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, + Map(tip -> offset), memberId = assignedConsumerId, generationId = initialGenerationId + 5) + assertEquals(Map(tip -> Errors.ILLEGAL_GENERATION), illegalGenerationCommitOffsetResult) + } + + @Test + def testTxnCommitOffsetWithLegalGeneration(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch : Short = 2 + + val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + + val assignedConsumerId = joinGroupResult.memberId + val initialGenerationId = joinGroupResult.generationId + val leaderCommitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, + Map(tip -> offset), memberId = assignedConsumerId, generationId = initialGenerationId) + assertEquals(Map(tip -> Errors.NONE), leaderCommitOffsetResult) + } + + @Test + def testHeartbeatDuringRebalanceCausesRebalanceInProgress(): Unit = { + // First start up a group (with a slightly larger timeout to give us time to heartbeat when the rebalance starts) + val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val assignedConsumerId = joinGroupResult.memberId + val initialGenerationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + // Then join with a new consumer to trigger a rebalance + sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + + // We should be in the middle of a rebalance, so the heartbeat should return rebalance in progress + val heartbeatResult = heartbeat(groupId, assignedConsumerId, initialGenerationId) + assertEquals(Errors.REBALANCE_IN_PROGRESS, heartbeatResult) + } + + @Test + def testGenerationIdIncrementsOnRebalance(): Unit = { + val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val initialGenerationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + val memberId = joinGroupResult.memberId + assertEquals(1, initialGenerationId) + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, initialGenerationId, memberId, Map(memberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val joinGroupFuture = sendJoinGroup(groupId, memberId, protocolType, protocols) + val otherJoinGroupResult = await(joinGroupFuture, 1) + + val nextGenerationId = otherJoinGroupResult.generationId + val otherJoinGroupError = otherJoinGroupResult.error + assertEquals(2, nextGenerationId) + assertEquals(Errors.NONE, otherJoinGroupError) + } + + @Test + def testLeaveGroupWrongCoordinator(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val leaveGroupResults = singleLeaveGroup(otherGroupId, memberId) + verifyLeaveGroupResult(leaveGroupResults, Errors.NOT_COORDINATOR) + } + + @Test + def testLeaveGroupUnknownGroup(): Unit = { + val leaveGroupResults = singleLeaveGroup(groupId, memberId) + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID)) + } + + @Test + def testLeaveGroupUnknownConsumerExistingGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val otherMemberId = "consumerId" + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val leaveGroupResults = singleLeaveGroup(groupId, otherMemberId) + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID)) + } + + @Test + def testSingleLeaveDeadGroup(): Unit = { + val deadGroupId = "deadGroupId" + + groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime())) + val leaveGroupResults = singleLeaveGroup(deadGroupId, memberId) + verifyLeaveGroupResult(leaveGroupResults, Errors.COORDINATOR_NOT_AVAILABLE) + } + + @Test + def testBatchLeaveDeadGroup(): Unit = { + val deadGroupId = "deadGroupId" + + groupCoordinator.groupManager.addGroup(new GroupMetadata(deadGroupId, Dead, new MockTime())) + val leaveGroupResults = batchLeaveGroup(deadGroupId, + List(new MemberIdentity().setMemberId(memberId), new MemberIdentity().setMemberId(memberId))) + verifyLeaveGroupResult(leaveGroupResults, Errors.COORDINATOR_NOT_AVAILABLE) + } + + @Test + def testValidLeaveGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId) + verifyLeaveGroupResult(leaveGroupResults) + } + + @Test + def testLeaveGroupWithFencedInstanceId(): Unit = { + val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocolSuperset) + assertEquals(Errors.NONE, joinGroupResult.error) + + val leaveGroupResults = singleLeaveGroup(groupId, "some_member", Some(leaderInstanceId)) + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.FENCED_INSTANCE_ID)) + } + + @Test + def testLeaveGroupStaticMemberWithUnknownMemberId(): Unit = { + val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocolSuperset) + assertEquals(Errors.NONE, joinGroupResult.error) + + // Having unknown member id will not affect the request processing. + val leaveGroupResults = singleLeaveGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, Some(leaderInstanceId)) + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.NONE)) + } + + @Test + def testStaticMembersValidBatchLeaveGroup(): Unit = { + staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity() + .setGroupInstanceId(leaderInstanceId), new MemberIdentity().setGroupInstanceId(followerInstanceId))) + + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.NONE, Errors.NONE)) + } + + @Test + def testStaticMembersWrongCoordinatorBatchLeaveGroup(): Unit = { + staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val leaveGroupResults = batchLeaveGroup("invalid-group", List(new MemberIdentity() + .setGroupInstanceId(leaderInstanceId), new MemberIdentity().setGroupInstanceId(followerInstanceId))) + + verifyLeaveGroupResult(leaveGroupResults, Errors.NOT_COORDINATOR) + } + + @Test + def testStaticMembersUnknownGroupBatchLeaveGroup(): Unit = { + val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity() + .setGroupInstanceId(leaderInstanceId), new MemberIdentity().setGroupInstanceId(followerInstanceId))) + + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID, Errors.UNKNOWN_MEMBER_ID)) + } + + @Test + def testStaticMembersFencedInstanceBatchLeaveGroup(): Unit = { + staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity() + .setGroupInstanceId(leaderInstanceId), new MemberIdentity() + .setGroupInstanceId(followerInstanceId) + .setMemberId("invalid-member"))) + + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.NONE, Errors.FENCED_INSTANCE_ID)) + } + + @Test + def testStaticMembersUnknownInstanceBatchLeaveGroup(): Unit = { + staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + + val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity() + .setGroupInstanceId("unknown-instance"), new MemberIdentity() + .setGroupInstanceId(followerInstanceId))) + + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID, Errors.NONE)) + } + + @Test + def testPendingMemberBatchLeaveGroup(): Unit = { + val pendingMember = setupGroupWithPendingMember() + + val leaveGroupResults = batchLeaveGroup(groupId, List(new MemberIdentity() + .setGroupInstanceId("unknown-instance"), new MemberIdentity() + .setMemberId(pendingMember.memberId))) + + verifyLeaveGroupResult(leaveGroupResults, Errors.NONE, List(Errors.UNKNOWN_MEMBER_ID, Errors.NONE)) + } + + @Test + def testListGroupsIncludesStableGroups(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + assertEquals(Errors.NONE, joinGroupResult.error) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val (error, groups) = groupCoordinator.handleListGroups(Set(), Set()) + assertEquals(Errors.NONE, error) + assertEquals(1, groups.size) + assertEquals(GroupOverview("groupId", "consumer", Stable.toString, "classic"), groups.head) + } + + @Test + def testListGroupsIncludesRebalancingGroups(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + + val (error, groups) = groupCoordinator.handleListGroups(Set(), Set()) + assertEquals(Errors.NONE, error) + assertEquals(1, groups.size) + assertEquals(GroupOverview("groupId", "consumer", CompletingRebalance.toString, "classic"), groups.head) + } + + @Test + def testListGroupsWithStates(): Unit = { + val allStates = Set(PreparingRebalance, CompletingRebalance, Stable, Dead, Empty).map(s => s.toString) + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + // Member joins the group + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + assertEquals(Errors.NONE, joinGroupResult.error) + + // The group should be in CompletingRebalance + val (error, groups) = groupCoordinator.handleListGroups(Set(CompletingRebalance.toString), Set()) + assertEquals(Errors.NONE, error) + assertEquals(1, groups.size) + val (error2, groups2) = groupCoordinator.handleListGroups(allStates.filterNot(s => s == CompletingRebalance.toString), Set()) + assertEquals(Errors.NONE, error2) + assertEquals(0, groups2.size) + + // Member syncs + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + // The group is now stable + val (error3, groups3) = groupCoordinator.handleListGroups(Set(Stable.toString), Set()) + assertEquals(Errors.NONE, error3) + assertEquals(1, groups3.size) + val (error4, groups4) = groupCoordinator.handleListGroups(allStates.filterNot(s => s == Stable.toString), Set()) + assertEquals(Errors.NONE, error4) + assertEquals(0, groups4.size) + + // Member leaves + val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId) + verifyLeaveGroupResult(leaveGroupResults) + + // The group is now empty + val (error5, groups5) = groupCoordinator.handleListGroups(Set(Empty.toString), Set()) + assertEquals(Errors.NONE, error5) + assertEquals(1, groups5.size) + val (error6, groups6) = groupCoordinator.handleListGroups(allStates.filterNot(s => s == Empty.toString), Set()) + assertEquals(Errors.NONE, error6) + assertEquals(0, groups6.size) + } + + @Test + def testListGroupsWithTypes(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + assertEquals(Errors.NONE, joinGroupResult.error) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + // When a group type filter is specified: + // All groups are returned if the type is classic, else nothing is returned. + val (error1, groups1) = groupCoordinator.handleListGroups(Set(), Set("classic")) + assertEquals(Errors.NONE, error1) + assertEquals(1, groups1.size) + assertEquals(GroupOverview("groupId", "consumer", Stable.toString, "classic"), groups1.head) + + val (error2, groups2) = groupCoordinator.handleListGroups(Set(), Set("consumer")) + assertEquals(Errors.NONE, error2) + assertEquals(0, groups2.size) + + // No groups are returned when an incorrect group type is passed. + val (error3, groups3) = groupCoordinator.handleListGroups(Set(), Set("Invalid")) + assertEquals(Errors.NONE, error3) + assertEquals(0, groups3.size) + + // When no group type filter is specified, all groups are returned with classic group type. + val (error4, groups4) = groupCoordinator.handleListGroups(Set(), Set()) + assertEquals(Errors.NONE, error4) + assertEquals(1, groups4.size) + assertEquals(GroupOverview("groupId", "consumer", Stable.toString, "classic"), groups4.head) + + // Check that group type is case-insensitive. + val (error5, groups5) = groupCoordinator.handleListGroups(Set(), Set("Classic")) + assertEquals(Errors.NONE, error5) + assertEquals(1, groups5.size) + assertEquals(GroupOverview("groupId", "consumer", Stable.toString, "classic"), groups5.head) + } + + @Test + def testDescribeGroupWrongCoordinator(): Unit = { + val (error, _, _) = groupCoordinator.handleDescribeGroup(otherGroupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) + assertEquals(Errors.NOT_COORDINATOR, error) + } + + @Test + def testDescribeGroupInactiveGroup(): Unit = { + val (error, errorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, 5) + assertEquals(Errors.NONE, error) + assertTrue(errorMessage.isEmpty) + assertEquals(GroupCoordinator.DeadGroup, summary) + + val (errorV6, errorMessageV6, summaryV6) = groupCoordinator.handleDescribeGroup(groupId, 6) + assertEquals(Errors.GROUP_ID_NOT_FOUND, errorV6) + assertEquals(s"Group $groupId not found.", errorMessageV6.get) + assertEquals(GroupCoordinator.DeadGroup, summaryV6) + } + + @Test + def testDescribeGroupStableForDynamicMember(): Unit = { + val joinGroupResult = dynamicJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val (error, errorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) + assertEquals(Errors.NONE, error) + assertTrue(errorMessage.isEmpty) + assertEquals(protocolType, summary.protocolType) + assertEquals("range", summary.protocol) + assertEquals(List(assignedMemberId), summary.members.map(_.memberId)) + } + + @Test + def testDescribeGroupStableForStaticMember(): Unit = { + val joinGroupResult = staticJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, leaderInstanceId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val generationId = joinGroupResult.generationId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val (error, errorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) + assertEquals(Errors.NONE, error) + assertTrue(errorMessage.isEmpty) + assertEquals(protocolType, summary.protocolType) + assertEquals("range", summary.protocol) + assertEquals(List(assignedMemberId), summary.members.map(_.memberId)) + assertEquals(List(leaderInstanceId), summary.members.flatMap(_.groupInstanceId)) + } + + @Test + def testDescribeGroupRebalancing(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val (error, errorMessage, summary) = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) + assertEquals(Errors.NONE, error) + assertTrue(errorMessage.isEmpty) + assertEquals(protocolType, summary.protocolType) + assertEquals(GroupCoordinator.NoProtocol, summary.protocol) + assertEquals(CompletingRebalance.toString, summary.state) + assertTrue(summary.members.map(_.memberId).contains(joinGroupResult.memberId)) + assertTrue(summary.members.forall(_.metadata.isEmpty)) + assertTrue(summary.members.forall(_.assignment.isEmpty)) + } + + @Test + def testDeleteNonEmptyGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + dynamicJoinGroup(groupId, memberId, protocolType, protocols) + + val result = groupCoordinator.handleDeleteGroups(Set(groupId)) + assert(result.size == 1 && result.contains(groupId) && result.get(groupId).contains(Errors.NON_EMPTY_GROUP)) + } + + @Test + def testDeleteGroupWithInvalidGroupId(): Unit = { + val invalidGroupId = null + val result = groupCoordinator.handleDeleteGroups(Set(invalidGroupId)) + assert(result.size == 1 && result.contains(invalidGroupId) && result.get(invalidGroupId).contains(Errors.INVALID_GROUP_ID)) + } + + @Test + def testDeleteGroupWithWrongCoordinator(): Unit = { + val result = groupCoordinator.handleDeleteGroups(Set(otherGroupId)) + assert(result.size == 1 && result.contains(otherGroupId) && result.get(otherGroupId).contains(Errors.NOT_COORDINATOR)) + } + + @Test + def testDeleteEmptyGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + + val leaveGroupResults = singleLeaveGroup(groupId, joinGroupResult.memberId) + verifyLeaveGroupResult(leaveGroupResults) + + val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + val partition: Partition = mock(classOf[Partition]) + + when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) + when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) + + val result = groupCoordinator.handleDeleteGroups(Set(groupId)) + assert(result.size == 1 && result.contains(groupId) && result.get(groupId).contains(Errors.NONE)) + } + + @Test + def testDeleteEmptyGroupWithStoredOffsets(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + val assignedMemberId = joinGroupResult.memberId + val joinGroupError = joinGroupResult.error + assertEquals(Errors.NONE, joinGroupError) + + val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, assignedMemberId, Map(assignedMemberId -> Array[Byte]())) + assertEquals(Errors.NONE, syncGroupResult.error) + + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "topic") + val offset = offsetAndMetadata(0) + val commitOffsetResult = commitOffsets(groupId, assignedMemberId, joinGroupResult.generationId, Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), commitOffsetResult) + + val describeGroupResult = groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion) + assertEquals(Stable.toString, describeGroupResult._3.state) + assertEquals(assignedMemberId, describeGroupResult._3.members.head.memberId) + + val leaveGroupResults = singleLeaveGroup(groupId, assignedMemberId) + verifyLeaveGroupResult(leaveGroupResults) + + val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + val partition: Partition = mock(classOf[Partition]) + + when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) + when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) + + val result = groupCoordinator.handleDeleteGroups(Set(groupId)) + assert(result.size == 1 && result.contains(groupId) && result.get(groupId).contains(Errors.NONE)) + + assertEquals(Dead.toString, groupCoordinator.handleDescribeGroup(groupId, ApiKeys.DESCRIBE_GROUPS.latestVersion)._3.state) + } + + @Test + def testDeleteOffsetOfNonExistingGroup(): Unit = { + val tp = new TopicPartition("foo", 0) + val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(tp), + RequestLocal.noCaching) + + assertEquals(Errors.GROUP_ID_NOT_FOUND, groupError) + assertTrue(topics.isEmpty) + } + + @Test + def testDeleteOffsetOfNonEmptyNonConsumerGroup(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + dynamicJoinGroup(groupId, memberId, "My Protocol", protocols) + val tp = new TopicPartition("foo", 0) + val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(tp), + RequestLocal.noCaching) + + assertEquals(Errors.NON_EMPTY_GROUP, groupError) + assertTrue(topics.isEmpty) + } + + @Test + def testDeleteOffsetOfEmptyNonConsumerGroup(): Unit = { + // join the group + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, "My Protocol", protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + + val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, joinGroupResult.leaderId, Map.empty) + assertEquals(Errors.NONE, syncGroupResult.error) + + val ti1p0 = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val ti2p0 = new TopicIdPartition(Uuid.randomUuid(), 0, "bar") + val offset = offsetAndMetadata(37) + + val validOffsetCommitResult = commitOffsets(groupId, joinGroupResult.memberId, joinGroupResult.generationId, + Map(ti1p0 -> offset, ti2p0 -> offset)) + assertEquals(Map(ti1p0 -> Errors.NONE, ti2p0 -> Errors.NONE), validOffsetCommitResult) + + // and leaves. + val leaveGroupResults = singleLeaveGroup(groupId, joinGroupResult.memberId) + verifyLeaveGroupResult(leaveGroupResults) + + assertTrue(groupCoordinator.groupManager.getGroup(groupId).exists(_.is(Empty))) + + val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + val partition: Partition = mock(classOf[Partition]) + + when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) + when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) + + val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(ti1p0.topicPartition), + RequestLocal.noCaching) + + assertEquals(Errors.NONE, groupError) + assertEquals(1, topics.size) + assertEquals(Some(Errors.NONE), topics.get(ti1p0.topicPartition)) + + val cachedOffsets = groupCoordinator.groupManager.getOffsets(groupId, requireStable, Some(Seq(ti1p0.topicPartition, ti2p0.topicPartition))) + + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(ti1p0.topicPartition).map(_.offset)) + assertEquals(Some(offset.committedOffset), cachedOffsets.get(ti2p0.topicPartition).map(_.offset)) + } + + @Test + def testDeleteOffsetOfConsumerGroupWithUnparsableProtocol(): Unit = { + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + + val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, joinGroupResult.leaderId, Map.empty) + assertEquals(Errors.NONE, syncGroupResult.error) + + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = offsetAndMetadata(37) + + val validOffsetCommitResult = commitOffsets(groupId, joinGroupResult.memberId, joinGroupResult.generationId, + Map(tip -> offset)) + assertEquals(Map(tip -> Errors.NONE), validOffsetCommitResult) + + val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(tip.topicPartition), + RequestLocal.noCaching) + + assertEquals(Errors.NONE, groupError) + assertEquals(1, topics.size) + assertEquals(Some(Errors.GROUP_SUBSCRIBED_TO_TOPIC), topics.get(tip.topicPartition)) + } + + @Test + def testDeleteOffsetOfDeadConsumerGroup(): Unit = { + val group = new GroupMetadata(groupId, Dead, new MockTime()) + group.protocolType = Some(protocolType) + groupCoordinator.groupManager.addGroup(group) + + val tp = new TopicPartition("foo", 0) + val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(tp), + RequestLocal.noCaching) + + assertEquals(Errors.GROUP_ID_NOT_FOUND, groupError) + assertTrue(topics.isEmpty) + } + + @Test + def testDeleteOffsetOfEmptyConsumerGroup(): Unit = { + // join the group + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, protocols) + assertEquals(Errors.NONE, joinGroupResult.error) + + val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, joinGroupResult.leaderId, Map.empty) + assertEquals(Errors.NONE, syncGroupResult.error) + + val ti1p0 = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val ti2p0 = new TopicIdPartition(Uuid.randomUuid(), 0, "bar") + val offset = offsetAndMetadata(37) + + val validOffsetCommitResult = commitOffsets(groupId, joinGroupResult.memberId, joinGroupResult.generationId, + Map(ti1p0 -> offset, ti2p0 -> offset)) + assertEquals(Map(ti1p0 -> Errors.NONE, ti2p0 -> Errors.NONE), validOffsetCommitResult) + + // and leaves. + val leaveGroupResults = singleLeaveGroup(groupId, joinGroupResult.memberId) + verifyLeaveGroupResult(leaveGroupResults) + + assertTrue(groupCoordinator.groupManager.getGroup(groupId).exists(_.is(Empty))) + + val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + val partition: Partition = mock(classOf[Partition]) + + when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) + when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) + + val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(ti1p0.topicPartition), + RequestLocal.noCaching) + + assertEquals(Errors.NONE, groupError) + assertEquals(1, topics.size) + assertEquals(Some(Errors.NONE), topics.get(ti1p0.topicPartition)) + + val cachedOffsets = groupCoordinator.groupManager.getOffsets(groupId, requireStable, Some(Seq(ti1p0.topicPartition, ti2p0.topicPartition))) + + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(ti1p0.topicPartition).map(_.offset)) + assertEquals(Some(offset.committedOffset), cachedOffsets.get(ti2p0.topicPartition).map(_.offset)) + } + + @Test + def testDeleteOffsetOfStableConsumerGroup(): Unit = { + // join the group + val memberId = JoinGroupRequest.UNKNOWN_MEMBER_ID + val subscription = new Subscription(List("bar").asJava) + + val joinGroupResult = dynamicJoinGroup(groupId, memberId, protocolType, + List(("protocol", ConsumerProtocol.serializeSubscription(subscription).array()))) + assertEquals(Errors.NONE, joinGroupResult.error) + + val syncGroupResult = syncGroupLeader(groupId, joinGroupResult.generationId, joinGroupResult.leaderId, Map.empty) + assertEquals(Errors.NONE, syncGroupResult.error) + + val ti1p0 = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val ti2p0 = new TopicIdPartition(Uuid.randomUuid(), 0, "bar") + val offset = offsetAndMetadata(37) + + val validOffsetCommitResult = commitOffsets(groupId, joinGroupResult.memberId, joinGroupResult.generationId, + Map(ti1p0 -> offset, ti2p0 -> offset)) + assertEquals(Map(ti1p0 -> Errors.NONE, ti2p0 -> Errors.NONE), validOffsetCommitResult) + + assertTrue(groupCoordinator.groupManager.getGroup(groupId).exists(_.is(Stable))) + + val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + val partition: Partition = mock(classOf[Partition]) + + when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) + when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) + + val (groupError, topics) = groupCoordinator.handleDeleteOffsets(groupId, Seq(ti1p0.topicPartition, ti2p0.topicPartition), + RequestLocal.noCaching) + + assertEquals(Errors.NONE, groupError) + assertEquals(2, topics.size) + assertEquals(Some(Errors.NONE), topics.get(ti1p0.topicPartition)) + assertEquals(Some(Errors.GROUP_SUBSCRIBED_TO_TOPIC), topics.get(ti2p0.topicPartition)) + + val cachedOffsets = groupCoordinator.groupManager.getOffsets(groupId, requireStable, Some(Seq(ti1p0.topicPartition, ti2p0.topicPartition))) + + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(ti1p0.topicPartition).map(_.offset)) + assertEquals(Some(offset.committedOffset), cachedOffsets.get(ti2p0.topicPartition).map(_.offset)) + } + + @Test + def shouldDelayInitialRebalanceByGroupInitialRebalanceDelayOnEmptyGroup(): Unit = { + val firstJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols) + timer.advanceClock(GroupInitialRebalanceDelay / 2) + verifyDelayedTaskNotCompleted(firstJoinFuture) + timer.advanceClock((GroupInitialRebalanceDelay / 2) + 1) + val joinGroupResult = await(firstJoinFuture, 1) + assertEquals(Errors.NONE, joinGroupResult.error) + } + + private def verifyDelayedTaskNotCompleted(firstJoinFuture: Future[JoinGroupResult]) = { + assertThrows(classOf[TimeoutException], () => await(firstJoinFuture, 1), + () => "should have timed out as rebalance delay not expired") + } + + @Test + def shouldResetRebalanceDelayWhenNewMemberJoinsGroupInInitialRebalance(): Unit = { + val rebalanceTimeout = GroupInitialRebalanceDelay * 3 + val firstMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout) + timer.advanceClock(GroupInitialRebalanceDelay - 1) + val secondMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout) + timer.advanceClock(2) + + // advance past initial rebalance delay and make sure that tasks + // haven't been completed + timer.advanceClock(GroupInitialRebalanceDelay / 2 + 1) + verifyDelayedTaskNotCompleted(firstMemberJoinFuture) + verifyDelayedTaskNotCompleted(secondMemberJoinFuture) + // advance clock beyond updated delay and make sure the + // tasks have completed + timer.advanceClock(GroupInitialRebalanceDelay / 2) + val firstResult = await(firstMemberJoinFuture, 1) + val secondResult = await(secondMemberJoinFuture, 1) + assertEquals(Errors.NONE, firstResult.error) + assertEquals(Errors.NONE, secondResult.error) + } + + @Test + def shouldDelayRebalanceUptoRebalanceTimeout(): Unit = { + val rebalanceTimeout = GroupInitialRebalanceDelay * 2 + val firstMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout) + val secondMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout) + timer.advanceClock(GroupInitialRebalanceDelay + 1) + val thirdMemberJoinFuture = sendJoinGroup(groupId, JoinGroupRequest.UNKNOWN_MEMBER_ID, protocolType, protocols, rebalanceTimeout = rebalanceTimeout) + timer.advanceClock(GroupInitialRebalanceDelay) + + verifyDelayedTaskNotCompleted(firstMemberJoinFuture) + verifyDelayedTaskNotCompleted(secondMemberJoinFuture) + verifyDelayedTaskNotCompleted(thirdMemberJoinFuture) + + // advance clock beyond rebalanceTimeout + timer.advanceClock(1) + + val firstResult = await(firstMemberJoinFuture, 1) + val secondResult = await(secondMemberJoinFuture, 1) + val thirdResult = await(thirdMemberJoinFuture, 1) + assertEquals(Errors.NONE, firstResult.error) + assertEquals(Errors.NONE, secondResult.error) + assertEquals(Errors.NONE, thirdResult.error) + } + + @Test + def testCompleteHeartbeatWithGroupDead(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + heartbeat(groupId, rebalanceResult.leaderId, rebalanceResult.generation) + val group = getGroup(groupId) + group.transitionTo(Dead) + val leaderMemberId = rebalanceResult.leaderId + assertTrue(groupCoordinator.tryCompleteHeartbeat(group, leaderMemberId, isPending = false, () => true)) + groupCoordinator.onExpireHeartbeat(group, leaderMemberId, isPending = false) + assertTrue(group.has(leaderMemberId)) + } + + @Test + def testCompleteHeartbeatWithMemberAlreadyRemoved(): Unit = { + val rebalanceResult = staticMembersJoinAndRebalance(leaderInstanceId, followerInstanceId) + heartbeat(groupId, rebalanceResult.leaderId, rebalanceResult.generation) + val group = getGroup(groupId) + val leaderMemberId = rebalanceResult.leaderId + group.remove(leaderMemberId) + assertTrue(groupCoordinator.tryCompleteHeartbeat(group, leaderMemberId, isPending = false, () => true)) + } + + @Test + def testVerificationErrorsForTxnOffsetCommits(): Unit = { + val tip1 = new TopicIdPartition(Uuid.randomUuid(), 0, "topic-1") + val offset1 = offsetAndMetadata(0) + val tip2 = new TopicIdPartition(Uuid.randomUuid(), 0, "topic-2") + val offset2 = offsetAndMetadata(0) + val producerId = 1000L + val producerEpoch: Short = 2 + + def verifyErrors(error: Errors, expectedError: Errors): Unit = { + val commitOffsetResult = commitTransactionalOffsets(groupId, + producerId, + producerEpoch, + Map(tip1 -> offset1, tip2 -> offset2), + verificationError = error) + assertEquals(expectedError, commitOffsetResult(tip1)) + assertEquals(expectedError, commitOffsetResult(tip2)) + } + + verifyErrors(Errors.INVALID_PRODUCER_ID_MAPPING, Errors.INVALID_PRODUCER_ID_MAPPING) + verifyErrors(Errors.INVALID_TXN_STATE, Errors.INVALID_TXN_STATE) + verifyErrors(Errors.NETWORK_EXCEPTION, Errors.COORDINATOR_LOAD_IN_PROGRESS) + verifyErrors(Errors.NOT_ENOUGH_REPLICAS, Errors.COORDINATOR_NOT_AVAILABLE) + verifyErrors(Errors.NOT_LEADER_OR_FOLLOWER, Errors.NOT_COORDINATOR) + verifyErrors(Errors.KAFKA_STORAGE_ERROR, Errors.NOT_COORDINATOR) + } + + @Test + def testTxnOffsetMetadataTooLarge(): Unit = { + val tip = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = 37 + val producerId = 100L + val producerEpoch: Short = 3 + + val offsets = Map( + tip -> new OffsetAndMetadata(offset, OptionalInt.empty(), "s" * (GroupCoordinatorConfig.OFFSET_METADATA_MAX_SIZE_DEFAULT + 1), 0, OptionalLong.empty()) + ) + + val commitOffsetResult = commitTransactionalOffsets(groupId, producerId, producerEpoch, offsets) + assertEquals(Map(tip -> Errors.OFFSET_METADATA_TOO_LARGE), commitOffsetResult) + } + + private def getGroup(groupId: String): GroupMetadata = { + val groupOpt = groupCoordinator.groupManager.getGroup(groupId) + assertTrue(groupOpt.isDefined) + groupOpt.get + } + + private def setupJoinGroupCallback: (Future[JoinGroupResult], JoinGroupCallback) = { + val responsePromise = Promise[JoinGroupResult]() + val responseFuture = responsePromise.future + val responseCallback: JoinGroupCallback = responsePromise.success + (responseFuture, responseCallback) + } + + private def setupSyncGroupCallback: (Future[SyncGroupResult], SyncGroupCallback) = { + val responsePromise = Promise[SyncGroupResult]() + val responseFuture = responsePromise.future + val responseCallback: SyncGroupCallback = responsePromise.success + (responseFuture, responseCallback) + } + + private def setupHeartbeatCallback: (Future[HeartbeatCallbackParams], HeartbeatCallback) = { + val responsePromise = Promise[HeartbeatCallbackParams]() + val responseFuture = responsePromise.future + val responseCallback: HeartbeatCallback = error => responsePromise.success(error) + (responseFuture, responseCallback) + } + + private def setupCommitOffsetsCallback: (Future[CommitOffsetCallbackParams], CommitOffsetCallback) = { + val responsePromise = Promise[CommitOffsetCallbackParams]() + val responseFuture = responsePromise.future + val responseCallback: CommitOffsetCallback = offsets => responsePromise.success(offsets) + (responseFuture, responseCallback) + } + + private def setupLeaveGroupCallback: (Future[LeaveGroupResult], LeaveGroupCallback) = { + val responsePromise = Promise[LeaveGroupResult]() + val responseFuture = responsePromise.future + val responseCallback: LeaveGroupCallback = result => responsePromise.success(result) + (responseFuture, responseCallback) + } + + private def sendJoinGroup(groupId: String, + memberId: String, + protocolType: String, + protocols: List[(String, Array[Byte])], + groupInstanceId: Option[String] = None, + sessionTimeout: Int = DefaultSessionTimeout, + rebalanceTimeout: Int = DefaultRebalanceTimeout, + requireKnownMemberId: Boolean = false, + supportSkippingAssignment: Boolean = true): Future[JoinGroupResult] = { + val (responseFuture, responseCallback) = setupJoinGroupCallback + + groupCoordinator.handleJoinGroup(groupId, memberId, groupInstanceId, requireKnownMemberId, supportSkippingAssignment, + "clientId", "clientHost", rebalanceTimeout, sessionTimeout, protocolType, protocols, responseCallback) + responseFuture + } + + private def sendStaticJoinGroupWithPersistence(groupId: String, + memberId: String, + protocolType: String, + protocols: List[(String, Array[Byte])], + groupInstanceId: String, + sessionTimeout: Int, + rebalanceTimeout: Int, + appendRecordError: Errors, + requireKnownMemberId: Boolean = false, + supportSkippingAssignment: Boolean): Future[JoinGroupResult] = { + val (responseFuture, responseCallback) = setupJoinGroupCallback + + val capturedArgument: ArgumentCaptor[scala.collection.Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[scala.collection.Map[TopicPartition, PartitionResponse] => Unit]) + + when(replicaManager.appendRecords(anyLong, + anyShort(), + internalTopicsAllowed = ArgumentMatchers.eq(true), + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), + any[Map[TopicPartition, MemoryRecords]], + capturedArgument.capture(), + any[Option[ReentrantLock]], + any(), + any(classOf[RequestLocal]), + any[ActionQueue], + any[Map[TopicPartition, VerificationGuard]] + )).thenAnswer(_ => { + capturedArgument.getValue.apply( + Map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) -> + new PartitionResponse(appendRecordError, 0L, RecordBatch.NO_TIMESTAMP, 0L) + ) + ) + }) + + groupCoordinator.handleJoinGroup(groupId, memberId, Some(groupInstanceId), requireKnownMemberId, supportSkippingAssignment, + "clientId", "clientHost", rebalanceTimeout, sessionTimeout, protocolType, protocols, responseCallback) + responseFuture + } + + private def sendSyncGroupLeader(groupId: String, + generation: Int, + leaderId: String, + protocolType: Option[String], + protocolName: Option[String], + groupInstanceId: Option[String], + assignment: Map[String, Array[Byte]]): Future[SyncGroupResult] = { + val (responseFuture, responseCallback) = setupSyncGroupCallback + + val capturedArgument: ArgumentCaptor[scala.collection.Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[scala.collection.Map[TopicPartition, PartitionResponse] => Unit]) + + when(replicaManager.appendRecords(anyLong, + anyShort(), + internalTopicsAllowed = ArgumentMatchers.eq(true), + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), + any[Map[TopicPartition, MemoryRecords]], + capturedArgument.capture(), + any[Option[ReentrantLock]], + any(), + any(classOf[RequestLocal]), + any[ActionQueue], + any[Map[TopicPartition, VerificationGuard]] + )).thenAnswer(_ => { + capturedArgument.getValue.apply( + Map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) -> + new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L) + ) + ) + } + ) + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) + + groupCoordinator.handleSyncGroup(groupId, generation, leaderId, protocolType, protocolName, + groupInstanceId, assignment, responseCallback) + responseFuture + } + + private def sendSyncGroupFollower(groupId: String, + generation: Int, + memberId: String, + protocolType: Option[String] = None, + protocolName: Option[String] = None, + groupInstanceId: Option[String] = None): Future[SyncGroupResult] = { + val (responseFuture, responseCallback) = setupSyncGroupCallback + + + groupCoordinator.handleSyncGroup(groupId, generation, memberId, + protocolType, protocolName, groupInstanceId, Map.empty[String, Array[Byte]], responseCallback) + responseFuture + } + + private def dynamicJoinGroup(groupId: String, + memberId: String, + protocolType: String, + protocols: List[(String, Array[Byte])], + sessionTimeout: Int = DefaultSessionTimeout, + rebalanceTimeout: Int = DefaultRebalanceTimeout): JoinGroupResult = { + val requireKnownMemberId = true + var responseFuture = sendJoinGroup(groupId, memberId, protocolType, protocols, None, sessionTimeout, rebalanceTimeout, requireKnownMemberId) + + // Since member id is required, we need another bounce to get the successful join group result. + if (memberId == JoinGroupRequest.UNKNOWN_MEMBER_ID && requireKnownMemberId) { + val joinGroupResult = Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS)) + // If some other error is triggered, return the error immediately for caller to handle. + if (joinGroupResult.error != Errors.MEMBER_ID_REQUIRED) { + return joinGroupResult + } + responseFuture = sendJoinGroup(groupId, joinGroupResult.memberId, protocolType, protocols, None, sessionTimeout, rebalanceTimeout, requireKnownMemberId) + } + timer.advanceClock(GroupInitialRebalanceDelay + 1) + // should only have to wait as long as session timeout, but allow some extra time in case of an unexpected delay + Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS)) + } + + private def staticJoinGroup(groupId: String, + memberId: String, + groupInstanceId: String, + protocolType: String, + protocols: List[(String, Array[Byte])], + clockAdvance: Int = GroupInitialRebalanceDelay + 1, + sessionTimeout: Int = DefaultSessionTimeout, + rebalanceTimeout: Int = DefaultRebalanceTimeout, + supportSkippingAssignment: Boolean = true): JoinGroupResult = { + val responseFuture = sendJoinGroup(groupId, memberId, protocolType, protocols, Some(groupInstanceId), sessionTimeout, rebalanceTimeout, + supportSkippingAssignment = supportSkippingAssignment) + + timer.advanceClock(clockAdvance) + // should only have to wait as long as session timeout, but allow some extra time in case of an unexpected delay + Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS)) + } + + private def staticJoinGroupWithPersistence(groupId: String, + memberId: String, + groupInstanceId: String, + protocolType: String, + protocols: List[(String, Array[Byte])], + clockAdvance: Int, + sessionTimeout: Int = DefaultSessionTimeout, + rebalanceTimeout: Int = DefaultRebalanceTimeout, + appendRecordError: Errors = Errors.NONE, + supportSkippingAssignment: Boolean = true): JoinGroupResult = { + val responseFuture = sendStaticJoinGroupWithPersistence(groupId, memberId, protocolType, protocols, + groupInstanceId, sessionTimeout, rebalanceTimeout, appendRecordError, supportSkippingAssignment = supportSkippingAssignment) + + timer.advanceClock(clockAdvance) + // should only have to wait as long as session timeout, but allow some extra time in case of an unexpected delay + Await.result(responseFuture, Duration(rebalanceTimeout + 100, TimeUnit.MILLISECONDS)) + } + + private def syncGroupFollower(groupId: String, + generationId: Int, + memberId: String, + protocolType: Option[String] = None, + protocolName: Option[String] = None, + groupInstanceId: Option[String] = None, + sessionTimeout: Int = DefaultSessionTimeout): SyncGroupResult = { + val responseFuture = sendSyncGroupFollower(groupId, generationId, memberId, protocolType, + protocolName, groupInstanceId) + Await.result(responseFuture, Duration(sessionTimeout + 100, TimeUnit.MILLISECONDS)) + } + + private def syncGroupLeader(groupId: String, + generationId: Int, + memberId: String, + assignment: Map[String, Array[Byte]], + protocolType: Option[String] = None, + protocolName: Option[String] = None, + groupInstanceId: Option[String] = None, + sessionTimeout: Int = DefaultSessionTimeout): SyncGroupResult = { + val responseFuture = sendSyncGroupLeader(groupId, generationId, memberId, protocolType, + protocolName, groupInstanceId, assignment) + Await.result(responseFuture, Duration(sessionTimeout + 100, TimeUnit.MILLISECONDS)) + } + + private def heartbeat(groupId: String, + consumerId: String, + generationId: Int, + groupInstanceId: Option[String] = None): HeartbeatCallbackParams = { + val (responseFuture, responseCallback) = setupHeartbeatCallback + + + groupCoordinator.handleHeartbeat(groupId, consumerId, groupInstanceId, generationId, responseCallback) + Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS)) + } + + private def await[T](future: Future[T], millis: Long): T = { + Await.result(future, Duration(millis, TimeUnit.MILLISECONDS)) + } + + private def commitOffsets(groupId: String, + memberId: String, + generationId: Int, + offsets: Map[TopicIdPartition, OffsetAndMetadata], + groupInstanceId: Option[String] = None): CommitOffsetCallbackParams = { + val (responseFuture, responseCallback) = setupCommitOffsetsCallback + + val capturedArgument: ArgumentCaptor[scala.collection.Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[scala.collection.Map[TopicPartition, PartitionResponse] => Unit]) + + when(replicaManager.appendRecords(anyLong, + anyShort(), + internalTopicsAllowed = ArgumentMatchers.eq(true), + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), + any[Map[TopicPartition, MemoryRecords]], + capturedArgument.capture(), + any[Option[ReentrantLock]], + any(), + any(classOf[RequestLocal]), + any[ActionQueue], + any[Map[TopicPartition, VerificationGuard]] + )).thenAnswer(_ => { + capturedArgument.getValue.apply( + Map(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) -> + new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L) + ) + ) + }) + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) + + groupCoordinator.handleCommitOffsets(groupId, memberId, groupInstanceId, generationId, offsets, responseCallback) + Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS)) + } + + private def commitTransactionalOffsets(groupId: String, + producerId: Long, + producerEpoch: Short, + offsets: Map[TopicIdPartition, OffsetAndMetadata], + memberId: String = JoinGroupRequest.UNKNOWN_MEMBER_ID, + groupInstanceId: Option[String] = Option.empty, + generationId: Int = JoinGroupRequest.UNKNOWN_GENERATION_ID, + verificationError: Errors = Errors.NONE): CommitOffsetCallbackParams = { + val (responseFuture, responseCallback) = setupCommitOffsetsCallback + + val capturedArgument: ArgumentCaptor[scala.collection.Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[scala.collection.Map[TopicPartition, PartitionResponse] => Unit]) + + // Since transactional ID is only used for verification, we can use a dummy value. Ensure it passes through. + val transactionalId = "dummy-txn-id" + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupCoordinator.partitionFor(groupId)) + + val postVerificationCallback: ArgumentCaptor[((Errors, VerificationGuard)) => Unit] = + ArgumentCaptor.forClass(classOf[((Errors, VerificationGuard)) => Unit]) + + // Transactional appends attempt to schedule to the request handler thread using + // a non request handler thread. Set this to avoid error. + KafkaRequestHandler.setBypassThreadCheck(true) + + when(replicaManager.maybeSendPartitionToTransactionCoordinator( + ArgumentMatchers.eq(offsetTopicPartition), + ArgumentMatchers.eq(transactionalId), + ArgumentMatchers.eq(producerId), + ArgumentMatchers.eq(producerEpoch), + any(), + postVerificationCallback.capture(), + any() + )).thenAnswer( + _ => postVerificationCallback.getValue()((verificationError, VerificationGuard.SENTINEL)) + ) + when(replicaManager.appendRecords(anyLong, + anyShort(), + internalTopicsAllowed = ArgumentMatchers.eq(true), + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), + any[Map[TopicPartition, MemoryRecords]], + capturedArgument.capture(), + any[Option[ReentrantLock]], + any(), + any(classOf[RequestLocal]), + any[ActionQueue], + any[Map[TopicPartition, VerificationGuard]] + )).thenAnswer(_ => { + capturedArgument.getValue.apply( + Map(offsetTopicPartition -> + new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L) + ) + ) + }) + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) + + groupCoordinator.handleTxnCommitOffsets(groupId, transactionalId, producerId, producerEpoch, + memberId, groupInstanceId, generationId, offsets, responseCallback, RequestLocal.noCaching, ApiKeys.TXN_OFFSET_COMMIT.latestVersion()) + val result = Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS)) + result + } + + private def singleLeaveGroup(groupId: String, + consumerId: String, + groupInstanceId: Option[String] = None): LeaveGroupResult = { + val singleMemberIdentity = List( + new MemberIdentity() + .setMemberId(consumerId) + .setGroupInstanceId(groupInstanceId.orNull)) + batchLeaveGroup(groupId, singleMemberIdentity) + } + + private def batchLeaveGroup(groupId: String, + memberIdentities: List[MemberIdentity]): LeaveGroupResult = { + val (responseFuture, responseCallback) = setupLeaveGroupCallback + + when(replicaManager.getPartition(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId))) + .thenReturn(HostedPartition.None) + when(replicaManager.onlinePartition(any[TopicPartition])).thenReturn(Some(mock(classOf[Partition]))) + + groupCoordinator.handleLeaveGroup(groupId, memberIdentities, responseCallback) + Await.result(responseFuture, Duration(40, TimeUnit.MILLISECONDS)) + } + + def handleTxnCompletion(producerId: Long, + offsetsPartitions: Iterable[TopicPartition], + transactionResult: TransactionResult): Unit = { + val isCommit = transactionResult == TransactionResult.COMMIT + groupCoordinator.groupManager.handleTxnCompletion(producerId, offsetsPartitions.map(_.partition).toSet, isCommit) + } + + private def offsetAndMetadata(offset: Long): OffsetAndMetadata = { + new OffsetAndMetadata(offset, OptionalInt.empty(), "", timer.time.milliseconds(), OptionalLong.empty()) + } +} + +object GroupCoordinatorTest { + def verifyLeaveGroupResult(leaveGroupResult: LeaveGroupResult, + expectedTopLevelError: Errors = Errors.NONE, + expectedMemberLevelErrors: List[Errors] = List.empty): Unit = { + assertEquals(expectedTopLevelError, leaveGroupResult.topLevelError) + if (expectedMemberLevelErrors.nonEmpty) { + assertEquals(expectedMemberLevelErrors.size, leaveGroupResult.memberResponses.size) + for (i <- expectedMemberLevelErrors.indices) { + assertEquals(expectedMemberLevelErrors(i), leaveGroupResult.memberResponses(i).error) + } + } + } +} diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala new file mode 100644 index 0000000000000..5897889ec3e09 --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataManagerTest.scala @@ -0,0 +1,3095 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import java.lang.management.ManagementFactory +import java.nio.ByteBuffer +import java.util.concurrent.locks.ReentrantLock +import java.util.{Collections, OptionalInt, OptionalLong} +import com.yammer.metrics.core.Gauge + +import javax.management.ObjectName +import kafka.cluster.Partition +import kafka.log.UnifiedLog +import kafka.server.{HostedPartition, KafkaConfig, ReplicaManager} +import kafka.utils.TestUtils +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription +import org.apache.kafka.clients.consumer.internals.ConsumerProtocol +import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.metrics.{JmxReporter, KafkaMetricsContext, Metrics => kMetrics} +import org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection +import org.apache.kafka.common.protocol.types.{CompactArrayOf, Field, Schema, Struct, Type} +import org.apache.kafka.common.protocol.{ByteBufferAccessor, Errors, MessageUtil} +import org.apache.kafka.common.record._ +import org.apache.kafka.common.requests.OffsetFetchResponse +import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse +import org.apache.kafka.common.utils.Utils +import org.apache.kafka.coordinator.group.{GroupCoordinatorConfig, OffsetAndMetadata, OffsetConfig} +import org.apache.kafka.coordinator.group.generated.{GroupMetadataValue, OffsetCommitValue} +import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.metrics.KafkaYammerMetrics +import org.apache.kafka.server.storage.log.FetchIsolation +import org.apache.kafka.server.util.{KafkaScheduler, MockTime} +import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LogAppendInfo, LogOffsetMetadata, VerificationGuard} +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} +import org.mockito.{ArgumentCaptor, ArgumentMatchers} +import org.mockito.ArgumentMatchers.{any, anyInt, anyLong, anyShort} +import org.mockito.Mockito.{mock, reset, times, verify, when} + +import scala.jdk.CollectionConverters._ +import scala.collection.{immutable, _} + +class GroupMetadataManagerTest { + + var time: MockTime = _ + var replicaManager: ReplicaManager = _ + var groupMetadataManager: GroupMetadataManager = _ + var scheduler: KafkaScheduler = _ + var partition: Partition = _ + var defaultOffsetRetentionMs = Long.MaxValue + var metrics: kMetrics = _ + + val groupId = "foo" + val groupInstanceId = "bar" + val groupPartitionId = 0 + val groupTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId) + val protocolType = "protocolType" + val rebalanceTimeout = 60000 + val sessionTimeout = 10000 + val defaultRequireStable = false + val numOffsetsPartitions = 2 + val noLeader = OptionalInt.empty() + val noExpiration = OptionalLong.empty() + + private val offsetConfig = { + val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)) + new OffsetConfig(config.groupCoordinatorConfig.offsetMetadataMaxSize, + config.groupCoordinatorConfig.offsetsLoadBufferSize, + config.groupCoordinatorConfig.offsetsRetentionMs, + config.groupCoordinatorConfig.offsetsRetentionCheckIntervalMs, + config.groupCoordinatorConfig.offsetsTopicPartitions, + config.groupCoordinatorConfig.offsetsTopicSegmentBytes, + config.groupCoordinatorConfig.offsetsTopicReplicationFactor, + config.groupCoordinatorConfig.offsetTopicCompressionType, + config.groupCoordinatorConfig.offsetCommitTimeoutMs) + } + + @BeforeEach + def setUp(): Unit = { + defaultOffsetRetentionMs = offsetConfig.offsetsRetentionMs + metrics = new kMetrics() + time = new MockTime + replicaManager = mock(classOf[ReplicaManager]) + groupMetadataManager = new GroupMetadataManager(0, offsetConfig, replicaManager, time, metrics) + groupMetadataManager.startup(() => numOffsetsPartitions, enableMetadataExpiration = false) + partition = mock(classOf[Partition]) + } + + @AfterEach + def tearDown(): Unit = { + groupMetadataManager.shutdown() + } + + @Test + def testLogInfoFromCleanupGroupMetadata(): Unit = { + var expiredOffsets: Int = 0 + var infoCount = 0 + val gmm = new GroupMetadataManager(0, offsetConfig, replicaManager, time, metrics) { + override def cleanupGroupMetadata(groups: Iterable[GroupMetadata], requestLocal: RequestLocal, + selector: GroupMetadata => Map[TopicPartition, OffsetAndMetadata]): Int = expiredOffsets + + override def info(msg: => String): Unit = infoCount += 1 + } + gmm.startup(() => numOffsetsPartitions, enableMetadataExpiration = false) + try { + // if there are no offsets to expire, we skip to log + gmm.cleanupGroupMetadata() + assertEquals(0, infoCount) + // if there are offsets to expire, we should log info + expiredOffsets = 100 + gmm.cleanupGroupMetadata() + assertEquals(1, infoCount) + } finally { + gmm.shutdown() + } + } + + @Test + def testLoadOffsetsWithoutGroup(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val startOffset = 15L + val groupEpoch = 2 + + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, offsetCommitRecords.toArray: _*) + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testLoadEmptyGroupWithOffsets(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val generation = 15 + val protocolType = "consumer" + val startOffset = 15L + val groupEpoch = 2 + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val groupMetadataRecord = buildEmptyGroupRecord(generation, protocolType) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertNull(group.leaderOrNull) + assertNull(group.protocolName.orNull) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testLoadTransactionalOffsetsWithoutGroup(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val producerId = 1000L + val producerEpoch: Short = 2 + val groupEpoch = 2 + + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val buffer = ByteBuffer.allocate(1024) + var nextOffset = 0 + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, committedOffsets) + nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true) + buffer.flip() + + val records = MemoryRecords.readableRecords(buffer) + expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testDoNotLoadAbortedTransactionalOffsetCommits(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val producerId = 1000L + val producerEpoch: Short = 2 + val groupEpoch = 2 + + val abortedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val buffer = ByteBuffer.allocate(1024) + var nextOffset = 0 + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, abortedOffsets) + nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = false) + buffer.flip() + + val records = MemoryRecords.readableRecords(buffer) + expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + // Since there are no committed offsets for the group, and there is no other group metadata, we don't expect the + // group to be loaded. + assertEquals(None, groupMetadataManager.getGroup(groupId)) + } + + @Test + def testGroupLoadedWithPendingCommits(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val producerId = 1000L + val producerEpoch: Short = 2 + val groupEpoch = 2 + + val foo0 = new TopicPartition("foo", 0) + val foo1 = new TopicPartition("foo", 1) + val bar0 = new TopicPartition("bar", 0) + val pendingOffsets = Map( + foo0 -> 23L, + foo1 -> 455L, + bar0 -> 8992L + ) + + val buffer = ByteBuffer.allocate(1024) + var nextOffset = 0 + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, pendingOffsets) + buffer.flip() + + val records = MemoryRecords.readableRecords(buffer) + expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + // The group should be loaded with pending offsets. + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + // Ensure that no offsets are materialized, but that we have offsets pending. + assertEquals(0, group.allOffsets.size) + assertTrue(group.hasOffsets) + assertTrue(group.hasPendingOffsetCommitsFromProducer(producerId)) + assertTrue(group.hasPendingOffsetCommitsForTopicPartition(foo0)) + assertTrue(group.hasPendingOffsetCommitsForTopicPartition(foo1)) + assertTrue(group.hasPendingOffsetCommitsForTopicPartition(bar0)) + } + + @Test + def testLoadWithCommittedAndAbortedTransactionalOffsetCommits(): Unit = { + // A test which loads a log with a mix of committed and aborted transactional offset committed messages. + val groupMetadataTopicPartition = groupTopicPartition + val producerId = 1000L + val producerEpoch: Short = 2 + val groupEpoch = 2 + + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val abortedOffsets = Map( + new TopicPartition("foo", 2) -> 231L, + new TopicPartition("foo", 3) -> 4551L, + new TopicPartition("bar", 1) -> 89921L + ) + + val buffer = ByteBuffer.allocate(1024) + var nextOffset = 0 + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, abortedOffsets) + nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = false) + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, committedOffsets) + nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true) + buffer.flip() + + val records = MemoryRecords.readableRecords(buffer) + expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + // Ensure that only the committed offsets are materialized, and that there are no pending commits for the producer. + // This allows us to be certain that the aborted offset commits are truly discarded. + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId)) + } + + @Test + def testLoadWithCommittedAndAbortedAndPendingTransactionalOffsetCommits(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val producerId = 1000L + val producerEpoch: Short = 2 + val groupEpoch = 2 + + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val foo3 = new TopicPartition("foo", 3) + + val abortedOffsets = Map( + new TopicPartition("foo", 2) -> 231L, + foo3 -> 4551L, + new TopicPartition("bar", 1) -> 89921L + ) + + val pendingOffsets = Map( + foo3 -> 2312L, + new TopicPartition("foo", 4) -> 45512L, + new TopicPartition("bar", 2) -> 899212L + ) + + val buffer = ByteBuffer.allocate(1024) + var nextOffset = 0 + val commitOffsetsLogPosition = nextOffset + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, committedOffsets) + nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true) + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, abortedOffsets) + nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = false) + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, pendingOffsets) + buffer.flip() + + val records = MemoryRecords.readableRecords(buffer) + expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + + // Ensure that only the committed offsets are materialized, and that there are no pending commits for the producer. + // This allows us to be certain that the aborted offset commits are truly discarded. + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + assertEquals(Some(commitOffsetsLogPosition), group.offsetWithRecordMetadata(topicPartition).head.appendedBatchOffset) + } + + // We should have pending commits. + assertTrue(group.hasPendingOffsetCommitsFromProducer(producerId)) + assertTrue(group.hasPendingOffsetCommitsForTopicPartition(foo3)) + + // The loaded pending commits should materialize after a commit marker comes in. + groupMetadataManager.handleTxnCompletion(producerId, List(groupMetadataTopicPartition.partition).toSet, isCommit = true) + assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId)) + pendingOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testLoadTransactionalOffsetCommitsFromMultipleProducers(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val firstProducerId = 1000L + val firstProducerEpoch: Short = 2 + val secondProducerId = 1001L + val secondProducerEpoch: Short = 3 + val groupEpoch = 2 + + val committedOffsetsFirstProducer = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val committedOffsetsSecondProducer = Map( + new TopicPartition("foo", 2) -> 231L, + new TopicPartition("foo", 3) -> 4551L, + new TopicPartition("bar", 1) -> 89921L + ) + + val buffer = ByteBuffer.allocate(1024) + var nextOffset = 0L + + val firstProduceRecordOffset = nextOffset + nextOffset += appendTransactionalOffsetCommits(buffer, firstProducerId, firstProducerEpoch, nextOffset, committedOffsetsFirstProducer) + nextOffset += completeTransactionalOffsetCommit(buffer, firstProducerId, firstProducerEpoch, nextOffset, isCommit = true) + + val secondProducerRecordOffset = nextOffset + nextOffset += appendTransactionalOffsetCommits(buffer, secondProducerId, secondProducerEpoch, nextOffset, committedOffsetsSecondProducer) + nextOffset += completeTransactionalOffsetCommit(buffer, secondProducerId, secondProducerEpoch, nextOffset, isCommit = true) + buffer.flip() + + val records = MemoryRecords.readableRecords(buffer) + expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + + // Ensure that only the committed offsets are materialized, and that there are no pending commits for the producer. + // This allows us to be certain that the aborted offset commits are truly discarded. + assertEquals(committedOffsetsFirstProducer.size + committedOffsetsSecondProducer.size, group.allOffsets.size) + committedOffsetsFirstProducer.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + assertEquals(Some(firstProduceRecordOffset), group.offsetWithRecordMetadata(topicPartition).head.appendedBatchOffset) + } + committedOffsetsSecondProducer.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + assertEquals(Some(secondProducerRecordOffset), group.offsetWithRecordMetadata(topicPartition).head.appendedBatchOffset) + } + } + + @Test + def testGroupLoadWithConsumerAndTransactionalOffsetCommitsConsumerWins(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val producerId = 1000L + val producerEpoch: Short = 2 + val groupEpoch = 2 + + val transactionalOffsetCommits = Map( + new TopicPartition("foo", 0) -> 23L + ) + + val consumerOffsetCommits = Map( + new TopicPartition("foo", 0) -> 24L + ) + + val buffer = ByteBuffer.allocate(1024) + var nextOffset = 0 + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, transactionalOffsetCommits) + val consumerRecordOffset = nextOffset + nextOffset += appendConsumerOffsetCommit(buffer, nextOffset, consumerOffsetCommits) + nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true) + buffer.flip() + + val records = MemoryRecords.readableRecords(buffer) + expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + // The group should be loaded with pending offsets. + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + assertEquals(1, group.allOffsets.size) + assertTrue(group.hasOffsets) + assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId)) + assertEquals(consumerOffsetCommits.size, group.allOffsets.size) + consumerOffsetCommits.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + assertEquals(Some(consumerRecordOffset), group.offsetWithRecordMetadata(topicPartition).head.appendedBatchOffset) + } + } + + @Test + def testGroupLoadWithConsumerAndTransactionalOffsetCommitsTransactionWins(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val producerId = 1000L + val producerEpoch: Short = 2 + val groupEpoch = 2 + + val transactionalOffsetCommits = Map( + new TopicPartition("foo", 0) -> 23L + ) + + val consumerOffsetCommits = Map( + new TopicPartition("foo", 0) -> 24L + ) + + val buffer = ByteBuffer.allocate(1024) + var nextOffset = 0 + nextOffset += appendConsumerOffsetCommit(buffer, nextOffset, consumerOffsetCommits) + nextOffset += appendTransactionalOffsetCommits(buffer, producerId, producerEpoch, nextOffset, transactionalOffsetCommits) + nextOffset += completeTransactionalOffsetCommit(buffer, producerId, producerEpoch, nextOffset, isCommit = true) + buffer.flip() + + val records = MemoryRecords.readableRecords(buffer) + expectGroupMetadataLoad(groupMetadataTopicPartition, 0, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + // The group should be loaded with pending offsets. + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + assertEquals(1, group.allOffsets.size) + assertTrue(group.hasOffsets) + assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId)) + assertEquals(consumerOffsetCommits.size, group.allOffsets.size) + transactionalOffsetCommits.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testGroupNotExists(): Unit = { + // group is not owned + assertFalse(groupMetadataManager.groupNotExists(groupId)) + + groupMetadataManager.addOwnedPartition(groupPartitionId) + // group is owned but does not exist yet + assertTrue(groupMetadataManager.groupNotExists(groupId)) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + // group is owned but not Dead + assertFalse(groupMetadataManager.groupNotExists(groupId)) + + group.transitionTo(Dead) + // group is owned and Dead + assertTrue(groupMetadataManager.groupNotExists(groupId)) + } + + private def appendConsumerOffsetCommit(buffer: ByteBuffer, baseOffset: Long, offsets: Map[TopicPartition, Long]) = { + val builder = MemoryRecords.builder(buffer, Compression.NONE, TimestampType.LOG_APPEND_TIME, baseOffset) + val commitRecords = createCommittedOffsetRecords(offsets) + commitRecords.foreach(builder.append) + builder.build() + offsets.size + } + + private def appendTransactionalOffsetCommits(buffer: ByteBuffer, producerId: Long, producerEpoch: Short, + baseOffset: Long, offsets: Map[TopicPartition, Long]): Int = { + val builder = MemoryRecords.builder(buffer, Compression.NONE, baseOffset, producerId, producerEpoch, 0, true) + val commitRecords = createCommittedOffsetRecords(offsets) + commitRecords.foreach(builder.append) + builder.build() + offsets.size + } + + private def completeTransactionalOffsetCommit(buffer: ByteBuffer, producerId: Long, producerEpoch: Short, baseOffset: Long, + isCommit: Boolean): Int = { + val builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, + TimestampType.LOG_APPEND_TIME, baseOffset, time.milliseconds(), producerId, producerEpoch, 0, true, true, + RecordBatch.NO_PARTITION_LEADER_EPOCH) + val controlRecordType = if (isCommit) ControlRecordType.COMMIT else ControlRecordType.ABORT + builder.appendEndTxnMarker(time.milliseconds(), new EndTransactionMarker(controlRecordType, 0)) + builder.build() + 1 + } + + @Test + def testLoadOffsetsWithTombstones(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val startOffset = 15L + val groupEpoch = 2 + + val tombstonePartition = new TopicPartition("foo", 1) + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + tombstonePartition -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val tombstone = new SimpleRecord(GroupMetadataManager.offsetCommitKey(groupId, tombstonePartition), null) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(tombstone)).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + assertEquals(committedOffsets.size - 1, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + if (topicPartition == tombstonePartition) + assertEquals(None, group.offset(topicPartition)) + else + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testLoadOffsetsAndGroup(): Unit = { + loadOffsetsAndGroup(groupTopicPartition, 2) + } + + def loadOffsetsAndGroup(groupMetadataTopicPartition: TopicPartition, groupEpoch: Int): GroupMetadata = { + val generation = 935 + val protocolType = "consumer" + val protocol = "range" + val startOffset = 15L + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val memberId = "98098230493" + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId) + + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Stable, group.currentState) + assertEquals(memberId, group.leaderOrNull) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertEquals(protocol, group.protocolName.orNull) + assertEquals(Set(memberId), group.allMembers) + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + assertTrue(group.offset(topicPartition).map(_.expireTimestampMs).get.isEmpty) + } + group + } + + @Test + def testLoadOffsetsAndGroupIgnored(): Unit = { + val groupEpoch = 2 + loadOffsetsAndGroup(groupTopicPartition, groupEpoch) + assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition())) + + groupMetadataManager.removeGroupsAndOffsets(groupTopicPartition, OptionalInt.of(groupEpoch), _ => ()) + assertTrue(groupMetadataManager.getGroup(groupId).isEmpty, + "Removed group remained in cache") + assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition())) + + groupMetadataManager.loadGroupsAndOffsets(groupTopicPartition, groupEpoch - 1, _ => (), 0L) + assertTrue(groupMetadataManager.getGroup(groupId).isEmpty, + "Removed group remained in cache") + assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition())) + } + + @Test + def testUnloadOffsetsAndGroup(): Unit = { + val groupEpoch = 2 + loadOffsetsAndGroup(groupTopicPartition, groupEpoch) + + groupMetadataManager.removeGroupsAndOffsets(groupTopicPartition, OptionalInt.of(groupEpoch), _ => ()) + assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition())) + assertTrue(groupMetadataManager.getGroup(groupId).isEmpty, + "Removed group remained in cache") + } + + @Test + def testUnloadOffsetsAndGroupIgnored(): Unit = { + val groupEpoch = 2 + val initiallyLoaded = loadOffsetsAndGroup(groupTopicPartition, groupEpoch) + + groupMetadataManager.removeGroupsAndOffsets(groupTopicPartition, OptionalInt.of(groupEpoch - 1), _ => ()) + assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition())) + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(initiallyLoaded.groupId, group.groupId) + assertEquals(initiallyLoaded.currentState, group.currentState) + assertEquals(initiallyLoaded.leaderOrNull, group.leaderOrNull) + assertEquals(initiallyLoaded.generationId, group.generationId) + assertEquals(initiallyLoaded.protocolType, group.protocolType) + assertEquals(initiallyLoaded.protocolName.orNull, group.protocolName.orNull) + assertEquals(initiallyLoaded.allMembers, group.allMembers) + assertEquals(initiallyLoaded.allOffsets.size, group.allOffsets.size) + initiallyLoaded.allOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition)) + assertTrue(group.offset(topicPartition).map(_.expireTimestampMs).get.isEmpty) + } + } + + @Test + def testUnloadOffsetsAndGroupIgnoredAfterStopReplica(): Unit = { + val groupEpoch = 2 + val initiallyLoaded = loadOffsetsAndGroup(groupTopicPartition, groupEpoch) + + groupMetadataManager.removeGroupsAndOffsets(groupTopicPartition, OptionalInt.empty, _ => ()) + assertTrue(groupMetadataManager.getGroup(groupId).isEmpty, + "Removed group remained in cache") + assertEquals(groupEpoch, groupMetadataManager.epochForPartitionId.get(groupTopicPartition.partition()), + "Replica which was stopped still in epochForPartitionId") + + loadOffsetsAndGroup(groupTopicPartition, groupEpoch + 1) + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(initiallyLoaded.groupId, group.groupId) + assertEquals(initiallyLoaded.currentState, group.currentState) + assertEquals(initiallyLoaded.leaderOrNull, group.leaderOrNull) + assertEquals(initiallyLoaded.generationId, group.generationId) + assertEquals(initiallyLoaded.protocolType, group.protocolType) + assertEquals(initiallyLoaded.protocolName.orNull, group.protocolName.orNull) + assertEquals(initiallyLoaded.allMembers, group.allMembers) + assertEquals(initiallyLoaded.allOffsets.size, group.allOffsets.size) + initiallyLoaded.allOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition)) + assertTrue(group.offset(topicPartition).map(_.expireTimestampMs).get.isEmpty) + } + } + + @Test + def testLoadGroupWithTombstone(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val startOffset = 15L + val groupEpoch = 2 + val memberId = "98098230493" + val groupMetadataRecord = buildStableGroupRecordWithMember(generation = 15, + protocolType = "consumer", protocol = "range", memberId) + val groupMetadataTombstone = new SimpleRecord(GroupMetadataManager.groupMetadataKey(groupId), null) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + Seq(groupMetadataRecord, groupMetadataTombstone).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + assertEquals(None, groupMetadataManager.getGroup(groupId)) + } + + @Test + def testLoadGroupWithLargeGroupMetadataRecord(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val startOffset = 15L + val groupEpoch = 2 + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + // create a GroupMetadata record larger then offsets.load.buffer.size (here at least 16 bytes larger) + val assignmentSize = GroupCoordinatorConfig.OFFSETS_LOAD_BUFFER_SIZE_DEFAULT + 16 + val memberId = "98098230493" + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val groupMetadataRecord = buildStableGroupRecordWithMember(generation = 15, + protocolType = "consumer", protocol = "range", memberId, new Array[Byte](assignmentSize)) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testLoadGroupAndOffsetsWithCorruptedLog(): Unit = { + // Simulate a case where startOffset < endOffset but log is empty. This could theoretically happen + // when all the records are expired and the active segment is truncated or when the partition + // is accidentally corrupted. + val startOffset = 0L + val endOffset = 10L + val groupEpoch = 2 + + val logMock: UnifiedLog = mock(classOf[UnifiedLog]) + when(replicaManager.getLog(groupTopicPartition)).thenReturn(Some(logMock)) + expectGroupMetadataLoad(logMock, startOffset, MemoryRecords.EMPTY) + when(replicaManager.getLogEndOffset(groupTopicPartition)).thenReturn(Some(endOffset)) + groupMetadataManager.loadGroupsAndOffsets(groupTopicPartition, groupEpoch, _ => (), 0L) + + verify(logMock).logStartOffset + verify(logMock).read(ArgumentMatchers.eq(startOffset), + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true)) + verify(replicaManager).getLog(groupTopicPartition) + verify(replicaManager, times(2)).getLogEndOffset(groupTopicPartition) + + assertFalse(groupMetadataManager.isPartitionLoading(groupTopicPartition.partition())) + } + + @Test + def testOffsetWriteAfterGroupRemoved(): Unit = { + // this test case checks the following scenario: + // 1. the group exists at some point in time, but is later removed (because all members left) + // 2. a "simple" consumer (i.e. not a consumer group) then uses the same groupId to commit some offsets + + val groupMetadataTopicPartition = groupTopicPartition + val generation = 293 + val protocolType = "consumer" + val protocol = "range" + val startOffset = 15L + val groupEpoch = 2 + + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val memberId = "98098230493" + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId) + val groupMetadataTombstone = new SimpleRecord(GroupMetadataManager.groupMetadataKey(groupId), null) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (Seq(groupMetadataRecord, groupMetadataTombstone) ++ offsetCommitRecords).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testLoadGroupAndOffsetsFromDifferentSegments(): Unit = { + val generation = 293 + val protocolType = "consumer" + val protocol = "range" + val startOffset = 15L + val groupEpoch = 2 + val tp0 = new TopicPartition("foo", 0) + val tp1 = new TopicPartition("foo", 1) + val tp2 = new TopicPartition("bar", 0) + val tp3 = new TopicPartition("xxx", 0) + + val fileRecordsMock: FileRecords = mock(classOf[FileRecords]) + val logMock: UnifiedLog = mock(classOf[UnifiedLog]) + when(replicaManager.getLog(groupTopicPartition)).thenReturn(Some(logMock)) + + val segment1MemberId = "a" + val segment1Offsets = Map(tp0 -> 23L, tp1 -> 455L, tp3 -> 42L) + val segment1Records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (createCommittedOffsetRecords(segment1Offsets) ++ Seq(buildStableGroupRecordWithMember( + generation, protocolType, protocol, segment1MemberId))).toArray: _*) + val segment1End = startOffset + segment1Records.records.asScala.size + + val segment2MemberId = "b" + val segment2Offsets = Map(tp0 -> 33L, tp2 -> 8992L, tp3 -> 10L) + val segment2Records = MemoryRecords.withRecords(segment1End, Compression.NONE, + (createCommittedOffsetRecords(segment2Offsets) ++ Seq(buildStableGroupRecordWithMember( + generation, protocolType, protocol, segment2MemberId))).toArray: _*) + val segment2End = segment1End + segment2Records.records.asScala.size + + when(logMock.logStartOffset) + .thenReturn(segment1End) + .thenReturn(segment2End) + when(logMock.read(ArgumentMatchers.eq(segment1End), + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true))) + .thenReturn(new FetchDataInfo(new LogOffsetMetadata(segment1End), fileRecordsMock)) + when(logMock.read(ArgumentMatchers.eq(segment2End), + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true))) + .thenReturn(new FetchDataInfo(new LogOffsetMetadata(segment2End), fileRecordsMock)) + when(fileRecordsMock.sizeInBytes()) + .thenReturn(segment1Records.sizeInBytes) + .thenReturn(segment2Records.sizeInBytes) + + val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer]) + when(fileRecordsMock.readInto(bufferCapture.capture(), anyInt())) + .thenAnswer(_ => { + val buffer = bufferCapture.getValue + buffer.put(segment1Records.buffer.duplicate) + buffer.flip() + }).thenAnswer(_ => { + val buffer = bufferCapture.getValue + buffer.put(segment2Records.buffer.duplicate) + buffer.flip() + }) + + when(replicaManager.getLogEndOffset(groupTopicPartition)).thenReturn(Some(segment2End)) + + groupMetadataManager.loadGroupsAndOffsets(groupTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Stable, group.currentState) + + assertEquals(segment2MemberId, group.leaderOrNull, "segment2 group record member should be elected") + assertEquals(Set(segment2MemberId), group.allMembers, "segment2 group record member should be only member") + + // offsets of segment1 should be overridden by segment2 offsets of the same topic partitions + val committedOffsets = segment1Offsets ++ segment2Offsets + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + @Test + def testAddGroup(): Unit = { + val group = new GroupMetadata("foo", Empty, time) + assertEquals(group, groupMetadataManager.addGroup(group)) + assertEquals(group, groupMetadataManager.addGroup(new GroupMetadata("foo", Empty, time))) + } + + @Test + def testloadGroupWithStaticMember(): Unit = { + val generation = 27 + val protocolType = "consumer" + val staticMemberId = "staticMemberId" + val dynamicMemberId = "dynamicMemberId" + + val staticMember = new MemberMetadata(staticMemberId, Some(groupInstanceId), "", "", rebalanceTimeout, sessionTimeout, + protocolType, List(("protocol", Array[Byte]()))) + + val dynamicMember = new MemberMetadata(dynamicMemberId, None, "", "", rebalanceTimeout, sessionTimeout, + protocolType, List(("protocol", Array[Byte]()))) + + val members = Seq(staticMember, dynamicMember) + + val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, members, time) + + assertTrue(group.is(Empty)) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertTrue(group.has(staticMemberId)) + assertTrue(group.has(dynamicMemberId)) + assertTrue(group.hasStaticMember(groupInstanceId)) + assertEquals(Some(staticMemberId), group.currentStaticMemberId(groupInstanceId)) + } + + @Test + def testLoadConsumerGroup(): Unit = { + val generation = 27 + val protocolType = "consumer" + val protocol = "protocol" + val memberId = "member1" + val topic = "foo" + + val subscriptions = List( + ("protocol", ConsumerProtocol.serializeSubscription(new Subscription(List(topic).asJava)).array()) + ) + + val member = new MemberMetadata(memberId, Some(groupInstanceId), "", "", rebalanceTimeout, + sessionTimeout, protocolType, subscriptions) + + val members = Seq(member) + + val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, null, None, + members, time) + + assertTrue(group.is(Stable)) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertEquals(protocol, group.protocolName.orNull) + assertEquals(Some(Set(topic)), group.getSubscribedTopics) + assertTrue(group.has(memberId)) + } + + @Test + def testLoadEmptyConsumerGroup(): Unit = { + val generation = 27 + val protocolType = "consumer" + + val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, + Seq(), time) + + assertTrue(group.is(Empty)) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertNull(group.protocolName.orNull) + assertEquals(Some(Set.empty), group.getSubscribedTopics) + } + + @Test + def testLoadConsumerGroupWithFaultyConsumerProtocol(): Unit = { + val generation = 27 + val protocolType = "consumer" + val protocol = "protocol" + val memberId = "member1" + + val subscriptions = List(("protocol", Array[Byte]())) + + val member = new MemberMetadata(memberId, Some(groupInstanceId), "", "", rebalanceTimeout, + sessionTimeout, protocolType, subscriptions) + + val members = Seq(member) + + val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, null, None, + members, time) + + assertTrue(group.is(Stable)) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertEquals(protocol, group.protocolName.orNull) + assertEquals(None, group.getSubscribedTopics) + assertTrue(group.has(memberId)) + } + + @Test + def testShouldThrowExceptionForUnsupportedGroupMetadataVersion(): Unit = { + val generation = 1 + val protocol = "range" + val memberId = "memberId" + val unsupportedVersion = Short.MinValue + + // put the unsupported version as the version value + val groupMetadataRecordValue = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId) + .value().putShort(unsupportedVersion) + // reset the position to the starting position 0 so that it can read the data in correct order + groupMetadataRecordValue.position(0) + + val e = assertThrows(classOf[IllegalStateException], + () => GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecordValue, time)) + assertEquals(s"Unknown group metadata message version: $unsupportedVersion", e.getMessage) + } + + @Test + def testCurrentStateTimestampForAllVersions(): Unit = { + val generation = 1 + val protocol = "range" + val memberId = "memberId" + + for (version <- 0 to 3) { + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, + groupMetadataValueVersion = version.toShort) + val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time) + + if (version >= 2) + assertEquals(Some(time.milliseconds()), deserializedGroupMetadata.currentStateTimestamp) + else + assertTrue(deserializedGroupMetadata.currentStateTimestamp.isEmpty) + } + } + + @Test + def testReadFromOldGroupMetadata(): Unit = { + val generation = 1 + val protocol = "range" + val memberId = "memberId" + + for (version <- 0 to 2) { + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, + groupMetadataValueVersion = version.toShort) + + val deserializedGroupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, groupMetadataRecord.value(), time) + assertEquals(groupId, deserializedGroupMetadata.groupId) + assertEquals(generation, deserializedGroupMetadata.generationId) + assertEquals(protocolType, deserializedGroupMetadata.protocolType.get) + assertEquals(protocol, deserializedGroupMetadata.protocolName.orNull) + assertEquals(1, deserializedGroupMetadata.allMembers.size) + assertEquals(deserializedGroupMetadata.allMembers, deserializedGroupMetadata.allDynamicMembers) + assertTrue(deserializedGroupMetadata.allMembers.contains(memberId)) + assertTrue(deserializedGroupMetadata.allStaticMembers.isEmpty) + } + } + + @Test + def testStoreEmptyGroup(): Unit = { + val generation = 27 + val protocolType = "consumer" + + val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, Seq.empty, time) + groupMetadataManager.addGroup(group) + + val capturedRecords = expectAppendMessage(Errors.NONE) + var maybeError: Option[Errors] = None + def callback(error: Errors): Unit = { + maybeError = Some(error) + } + + groupMetadataManager.storeGroup(group, Map.empty, callback) + assertEquals(Some(Errors.NONE), maybeError) + val records = capturedRecords.getValue()(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)) + .records.asScala.toList + assertEquals(1, records.size) + + val record = records.head + val groupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, record.value, time) + assertTrue(groupMetadata.is(Empty)) + assertEquals(generation, groupMetadata.generationId) + assertEquals(Some(protocolType), groupMetadata.protocolType) + } + + @Test + def testStoreEmptySimpleGroup(): Unit = { + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val capturedRecords = expectAppendMessage(Errors.NONE) + var maybeError: Option[Errors] = None + def callback(error: Errors): Unit = { + maybeError = Some(error) + } + + groupMetadataManager.storeGroup(group, Map.empty, callback) + assertEquals(Some(Errors.NONE), maybeError) + val records = capturedRecords.getValue()(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupPartitionId)) + .records.asScala.toList + assertEquals(1, records.size) + + val record = records.head + val groupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, record.value, time) + assertTrue(groupMetadata.is(Empty)) + assertEquals(0, groupMetadata.generationId) + assertEquals(None, groupMetadata.protocolType) + } + + @Test + def testStoreGroupErrorMapping(): Unit = { + assertStoreGroupErrorMapping(Errors.NONE, Errors.NONE) + assertStoreGroupErrorMapping(Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.COORDINATOR_NOT_AVAILABLE) + assertStoreGroupErrorMapping(Errors.NOT_ENOUGH_REPLICAS, Errors.COORDINATOR_NOT_AVAILABLE) + assertStoreGroupErrorMapping(Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND, Errors.COORDINATOR_NOT_AVAILABLE) + assertStoreGroupErrorMapping(Errors.NOT_LEADER_OR_FOLLOWER, Errors.NOT_COORDINATOR) + assertStoreGroupErrorMapping(Errors.MESSAGE_TOO_LARGE, Errors.UNKNOWN_SERVER_ERROR) + assertStoreGroupErrorMapping(Errors.RECORD_LIST_TOO_LARGE, Errors.UNKNOWN_SERVER_ERROR) + assertStoreGroupErrorMapping(Errors.INVALID_FETCH_SIZE, Errors.UNKNOWN_SERVER_ERROR) + assertStoreGroupErrorMapping(Errors.CORRUPT_MESSAGE, Errors.CORRUPT_MESSAGE) + } + + private def assertStoreGroupErrorMapping(appendError: Errors, expectedError: Errors): Unit = { + reset(replicaManager) + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + expectAppendMessage(appendError) + var maybeError: Option[Errors] = None + def callback(error: Errors): Unit = { + maybeError = Some(error) + } + + groupMetadataManager.storeGroup(group, Map.empty, callback) + assertEquals(Some(expectedError), maybeError) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any(), + any(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + any()) + verify(replicaManager).onlinePartition(any()) + } + + @Test + def testStoreNonEmptyGroup(): Unit = { + val memberId = "memberId" + val clientId = "clientId" + val clientHost = "localhost" + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, rebalanceTimeout, sessionTimeout, + protocolType, List(("protocol", Array[Byte]()))) + group.add(member, _ => ()) + group.transitionTo(PreparingRebalance) + group.initNextGeneration() + + expectAppendMessage(Errors.NONE) + var maybeError: Option[Errors] = None + def callback(error: Errors): Unit = { + maybeError = Some(error) + } + + groupMetadataManager.storeGroup(group, Map(memberId -> Array[Byte]()), callback) + assertEquals(Some(Errors.NONE), maybeError) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any(), + any(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + any()) + verify(replicaManager).onlinePartition(any()) + } + + @Test + def testStoreNonEmptyGroupWhenCoordinatorHasMoved(): Unit = { + when(replicaManager.onlinePartition(any())).thenReturn(None) + val memberId = "memberId" + val clientId = "clientId" + val clientHost = "localhost" + + val group = new GroupMetadata(groupId, Empty, time) + + val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, rebalanceTimeout, sessionTimeout, + protocolType, List(("protocol", Array[Byte]()))) + group.add(member, _ => ()) + group.transitionTo(PreparingRebalance) + group.initNextGeneration() + + var maybeError: Option[Errors] = None + def callback(error: Errors): Unit = { + maybeError = Some(error) + } + + groupMetadataManager.storeGroup(group, Map(memberId -> Array[Byte]()), callback) + assertEquals(Some(Errors.NOT_COORDINATOR), maybeError) + + verify(replicaManager).onlinePartition(any()) + } + + @Test + def testCommitOffset(): Unit = { + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map(topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration)) + + expectAppendMessage(Errors.NONE) + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + + assertFalse(commitErrors.isEmpty) + val maybeError = commitErrors.get.get(topicIdPartition) + assertEquals(Some(Errors.NONE), maybeError) + assertTrue(group.hasOffsets) + + val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicIdPartition.topicPartition))) + val maybePartitionResponse = cachedOffsets.get(topicIdPartition.topicPartition) + assertFalse(maybePartitionResponse.isEmpty) + + val partitionResponse = maybePartitionResponse.get + assertEquals(Errors.NONE, partitionResponse.error) + assertEquals(offset, partitionResponse.offset) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any(), + any(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + any()) + // Will update sensor after commit + assertEquals(1, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + } + + @Test + def testTransactionalCommitOffsetCommitted(): Unit = { + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = 37 + val producerId = 232L + val producerEpoch = 0.toShort + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration) + val offsets = immutable.Map(topicIdPartition -> offsetAndMetadata) + + val capturedResponseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + val verificationGuard = new VerificationGuard() + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, producerId, producerEpoch, verificationGuard = Some(verificationGuard)) + assertTrue(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any[Map[TopicPartition, MemoryRecords]], + capturedResponseCallback.capture(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + ArgumentMatchers.eq(Map(offsetTopicPartition -> verificationGuard))) + verify(replicaManager).onlinePartition(any()) + capturedResponseCallback.getValue.apply(Map(groupTopicPartition -> + new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L))) + + assertTrue(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + + group.completePendingTxnOffsetCommit(producerId, isCommit = true) + assertTrue(group.hasOffsets) + assertFalse(group.allOffsets.isEmpty) + assertEquals(Some(offsetAndMetadata), group.offset(topicIdPartition.topicPartition)) + } + + @Test + def testTransactionalCommitOffsetAppendFailure(): Unit = { + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = 37 + val producerId = 232L + val producerEpoch = 0.toShort + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map(topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration)) + + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + val verificationGuard = new VerificationGuard() + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, producerId, producerEpoch, verificationGuard = Some(verificationGuard)) + assertTrue(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + val capturedResponseCallback = verifyAppendAndCaptureCallback() + capturedResponseCallback.getValue.apply(Map(groupTopicPartition -> + new PartitionResponse(Errors.NOT_ENOUGH_REPLICAS, 0L, RecordBatch.NO_TIMESTAMP, 0L))) + + assertFalse(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + + group.completePendingTxnOffsetCommit(producerId, isCommit = false) + assertFalse(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any[Map[TopicPartition, MemoryRecords]], + any(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + ArgumentMatchers.eq(Map(offsetTopicPartition -> verificationGuard))) + verify(replicaManager).onlinePartition(any()) + } + + @Test + def testTransactionalCommitOffsetAborted(): Unit = { + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = 37 + val producerId = 232L + val producerEpoch = 0.toShort + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map(topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration)) + + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + val verificationGuard = new VerificationGuard() + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, producerId, producerEpoch, verificationGuard = Some(verificationGuard)) + assertTrue(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + val capturedResponseCallback = verifyAppendAndCaptureCallback() + capturedResponseCallback.getValue.apply(Map(groupTopicPartition -> + new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L))) + + assertTrue(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + + group.completePendingTxnOffsetCommit(producerId, isCommit = false) + assertFalse(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any[Map[TopicPartition, MemoryRecords]], + any(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + ArgumentMatchers.eq(Map(offsetTopicPartition -> verificationGuard))) + verify(replicaManager).onlinePartition(any()) + } + + @Test + def testCommitOffsetWhenCoordinatorHasMoved(): Unit = { + when(replicaManager.onlinePartition(any())).thenReturn(None) + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map(topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration)) + + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + + assertFalse(commitErrors.isEmpty) + val maybeError = commitErrors.get.get(topicIdPartition) + assertEquals(Some(Errors.NOT_COORDINATOR), maybeError) + + verify(replicaManager).onlinePartition(any()) + } + + @Test + def testCommitOffsetFailure(): Unit = { + assertCommitOffsetErrorMapping(Errors.UNKNOWN_TOPIC_OR_PARTITION, Errors.COORDINATOR_NOT_AVAILABLE) + assertCommitOffsetErrorMapping(Errors.NOT_ENOUGH_REPLICAS, Errors.COORDINATOR_NOT_AVAILABLE) + assertCommitOffsetErrorMapping(Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND, Errors.COORDINATOR_NOT_AVAILABLE) + assertCommitOffsetErrorMapping(Errors.NOT_LEADER_OR_FOLLOWER, Errors.NOT_COORDINATOR) + assertCommitOffsetErrorMapping(Errors.MESSAGE_TOO_LARGE, Errors.INVALID_COMMIT_OFFSET_SIZE) + assertCommitOffsetErrorMapping(Errors.RECORD_LIST_TOO_LARGE, Errors.INVALID_COMMIT_OFFSET_SIZE) + assertCommitOffsetErrorMapping(Errors.INVALID_FETCH_SIZE, Errors.INVALID_COMMIT_OFFSET_SIZE) + assertCommitOffsetErrorMapping(Errors.CORRUPT_MESSAGE, Errors.CORRUPT_MESSAGE) + } + + private def assertCommitOffsetErrorMapping(appendError: Errors, expectedError: Errors): Unit = { + reset(replicaManager) + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map(topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration)) + + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + val capturedResponseCallback = verifyAppendAndCaptureCallback() + capturedResponseCallback.getValue.apply(Map(groupTopicPartition -> + new PartitionResponse(appendError, 0L, RecordBatch.NO_TIMESTAMP, 0L))) + + assertFalse(commitErrors.isEmpty) + val maybeError = commitErrors.get.get(topicIdPartition) + assertEquals(Some(expectedError), maybeError) + assertFalse(group.hasOffsets) + + val cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition.topicPartition)) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition.topicPartition).map(_.offset) + ) + + verify(replicaManager).onlinePartition(any()) + // Will not update sensor if failed + assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + } + + @Test + def testCommitOffsetPartialFailure(): Unit = { + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val topicIdPartitionFailed = new TopicIdPartition(Uuid.randomUuid(), 1, "foo") + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map( + topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration), + // This will failed + topicIdPartitionFailed -> new OffsetAndMetadata(offset, noLeader, "s" * (offsetConfig.maxMetadataSize + 1) , time.milliseconds(), noExpiration) + ) + + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + val capturedResponseCallback = verifyAppendAndCaptureCallback() + capturedResponseCallback.getValue.apply(Map(groupTopicPartition -> + new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L))) + + assertFalse(commitErrors.isEmpty) + assertEquals(Some(Errors.NONE), commitErrors.get.get(topicIdPartition)) + assertEquals(Some(Errors.OFFSET_METADATA_TOO_LARGE), commitErrors.get.get(topicIdPartitionFailed)) + assertTrue(group.hasOffsets) + + val cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition.topicPartition, topicIdPartitionFailed.topicPartition)) + ) + assertEquals( + Some(offset), + cachedOffsets.get(topicIdPartition.topicPartition).map(_.offset) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartitionFailed.topicPartition).map(_.offset) + ) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any[Map[TopicPartition, MemoryRecords]], + any(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + any()) + verify(replicaManager).onlinePartition(any()) + assertEquals(1, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + } + + @Test + def testOffsetMetadataTooLarge(): Unit = { + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map( + topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "s" * (offsetConfig.maxMetadataSize + 1) , time.milliseconds(), noExpiration) + ) + + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertFalse(group.hasOffsets) + + assertFalse(commitErrors.isEmpty) + val maybeError = commitErrors.get.get(topicIdPartition) + assertEquals(Some(Errors.OFFSET_METADATA_TOO_LARGE), maybeError) + assertFalse(group.hasOffsets) + + val cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition.topicPartition)) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition.topicPartition).map(_.offset) + ) + + assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + } + + @Test + def testOffsetMetadataTooLargePartialFailure(): Unit = { + val memberId = "" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val validTopicIdPartition = new TopicIdPartition(topicIdPartition.topicId, 1, "foo") + val offset = 37 + val requireStable = true + + groupMetadataManager.addOwnedPartition(groupPartitionId) + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map( + topicIdPartition -> new OffsetAndMetadata(offset, noLeader, "s" * (offsetConfig.maxMetadataSize + 1) , time.milliseconds(), noExpiration), + validTopicIdPartition -> new OffsetAndMetadata(offset, noLeader, "", time.milliseconds(), noExpiration) + ) + + expectAppendMessage(Errors.NONE) + + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + assertEquals(0, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + + assertEquals(Some(Map( + topicIdPartition -> Errors.OFFSET_METADATA_TOO_LARGE, + validTopicIdPartition -> Errors.NONE) + ), commitErrors) + + val cachedOffsets = groupMetadataManager.getOffsets( + groupId, + requireStable, + Some(Seq(topicIdPartition.topicPartition, validTopicIdPartition.topicPartition)) + ) + + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition.topicPartition).map(_.offset) + ) + assertEquals( + Some(Errors.NONE), + cachedOffsets.get(topicIdPartition.topicPartition).map(_.error) + ) + assertEquals( + Some(offset), + cachedOffsets.get(validTopicIdPartition.topicPartition).map(_.offset) + ) + + assertEquals(1, TestUtils.totalMetricValue(metrics, "offset-commit-count")) + } + + @Test + def testTransactionalCommitOffsetWithOffsetMetadataTooLargePartialFailure(): Unit = { + val memberId = "" + val foo0 = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val foo1 = new TopicIdPartition(Uuid.randomUuid(), 1, "foo") + val producerId = 232L + val producerEpoch = 0.toShort + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val offsets = immutable.Map( + foo0 -> new OffsetAndMetadata(37, noLeader, "", time.milliseconds(), noExpiration), + foo1 -> new OffsetAndMetadata(38, noLeader, "s" * (offsetConfig.maxMetadataSize + 1), time.milliseconds(), noExpiration) + ) + + val capturedResponseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + val verificationGuard = new VerificationGuard() + + groupMetadataManager.storeOffsets( + group, + memberId, + offsetTopicPartition, + offsets, + callback, + producerId, + producerEpoch, + verificationGuard = Some(verificationGuard) + ) + assertTrue(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any[Map[TopicPartition, MemoryRecords]], + capturedResponseCallback.capture(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + ArgumentMatchers.eq(Map(offsetTopicPartition -> verificationGuard))) + verify(replicaManager).onlinePartition(any()) + capturedResponseCallback.getValue.apply(Map(groupTopicPartition -> + new PartitionResponse(Errors.NONE, 0L, RecordBatch.NO_TIMESTAMP, 0L))) + + assertEquals(Some(Map( + foo0 -> Errors.NONE, + foo1 -> Errors.OFFSET_METADATA_TOO_LARGE + )), commitErrors) + + assertTrue(group.hasOffsets) + assertTrue(group.allOffsets.isEmpty) + + group.completePendingTxnOffsetCommit(producerId, isCommit = true) + assertTrue(group.hasOffsets) + assertFalse(group.allOffsets.isEmpty) + assertEquals(offsets.get(foo0), group.offset(foo0.topicPartition)) + } + + @Test + def testExpireOffset(): Unit = { + val memberId = "" + val topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val topicIdPartition2 = new TopicIdPartition(topicIdPartition1.topicId, 1, "foo") + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + // expire the offset after 1 millisecond + val startMs = time.milliseconds + val offsets = immutable.Map( + topicIdPartition1 -> new OffsetAndMetadata(offset, noLeader, "", startMs, OptionalLong.of(startMs + 1)), + topicIdPartition2 -> new OffsetAndMetadata(offset, noLeader, "", startMs, OptionalLong.of(startMs + 3))) + + mockGetPartition() + expectAppendMessage(Errors.NONE) + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + + assertFalse(commitErrors.isEmpty) + assertEquals(Some(Errors.NONE), commitErrors.get.get(topicIdPartition1)) + + // expire only one of the offsets + time.sleep(2) + + when(partition.appendRecordsToLeader(any[MemoryRecords], + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + groupMetadataManager.cleanupGroupMetadata() + + assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) + assertEquals(None, group.offset(topicIdPartition1.topicPartition)) + assertEquals(Some(offset), group.offset(topicIdPartition2.topicPartition).map(_.committedOffset)) + + val cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition, topicIdPartition2.topicPartition)) + ) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset)) + assertEquals(Some(offset), cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset)) + + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any(), + any(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + any()) + verify(replicaManager, times(2)).onlinePartition(any()) + } + + @Test + def testGroupMetadataRemoval(): Unit = { + val topicPartition1 = new TopicPartition("foo", 0) + val topicPartition2 = new TopicPartition("foo", 1) + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + group.generationId = 5 + + // expect the group metadata tombstone + val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords]) + + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + mockGetPartition() + when(partition.appendRecordsToLeader(recordsCapture.capture(), + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + groupMetadataManager.cleanupGroupMetadata() + + val records = recordsCapture.getValue.records.asScala.toList + recordsCapture.getValue.batches.forEach { batch => + assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic) + assertEquals(TimestampType.CREATE_TIME, batch.timestampType) + } + assertEquals(1, records.size) + + val metadataTombstone = records.head + assertTrue(metadataTombstone.hasKey) + assertFalse(metadataTombstone.hasValue) + assertTrue(metadataTombstone.timestamp > 0) + + val groupKey = GroupMetadataManager.readMessageKey(metadataTombstone.key).asInstanceOf[GroupMetadataKey] + assertEquals(groupId, groupKey.key) + + // the full group should be gone since all offsets were removed + assertEquals(None, groupMetadataManager.getGroup(groupId)) + val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2))) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition2).map(_.offset)) + } + + @Test + def testGroupMetadataRemovalWithLogAppendTime(): Unit = { + val topicPartition1 = new TopicPartition("foo", 0) + val topicPartition2 = new TopicPartition("foo", 1) + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + group.generationId = 5 + + // expect the group metadata tombstone + val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords]) + + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + mockGetPartition() + when(partition.appendRecordsToLeader(recordsCapture.capture(), + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + groupMetadataManager.cleanupGroupMetadata() + + val records = recordsCapture.getValue.records.asScala.toList + recordsCapture.getValue.batches.forEach { batch => + assertEquals(RecordBatch.CURRENT_MAGIC_VALUE, batch.magic) + // Use CREATE_TIME, like the producer. The conversion to LOG_APPEND_TIME (if necessary) happens automatically. + assertEquals(TimestampType.CREATE_TIME, batch.timestampType) + } + assertEquals(1, records.size) + + val metadataTombstone = records.head + assertTrue(metadataTombstone.hasKey) + assertFalse(metadataTombstone.hasValue) + assertTrue(metadataTombstone.timestamp > 0) + + val groupKey = GroupMetadataManager.readMessageKey(metadataTombstone.key).asInstanceOf[GroupMetadataKey] + assertEquals(groupId, groupKey.key) + + // the full group should be gone since all offsets were removed + assertEquals(None, groupMetadataManager.getGroup(groupId)) + val cachedOffsets = groupMetadataManager.getOffsets(groupId, defaultRequireStable, Some(Seq(topicPartition1, topicPartition2))) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition1).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicPartition2).map(_.offset)) + } + + @Test + def testExpireGroupWithOffsetsOnly(): Unit = { + // verify that the group is removed properly, but no tombstone is written if + // this is a group which is only using kafka for offset storage + + val memberId = "" + val topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val topicIdPartition2 = new TopicIdPartition(topicIdPartition1.topicId, 1, "foo") + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + // expire the offset after 1 millisecond + val startMs = time.milliseconds + val offsets = immutable.Map( + topicIdPartition1 -> new OffsetAndMetadata(offset, OptionalInt.empty(), "", startMs, OptionalLong.of(startMs + 1)), + topicIdPartition2 -> new OffsetAndMetadata(offset, OptionalInt.empty(), "", startMs, OptionalLong.of(startMs + 3))) + + mockGetPartition() + expectAppendMessage(Errors.NONE) + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + + assertFalse(commitErrors.isEmpty) + assertEquals(Some(Errors.NONE), commitErrors.get.get(topicIdPartition1)) + + // expire all of the offsets + time.sleep(4) + + // expect the offset tombstone + val recordsCapture: ArgumentCaptor[MemoryRecords] = ArgumentCaptor.forClass(classOf[MemoryRecords]) + when(replicaManager.onlinePartition(any())).thenReturn(Some(partition)) + when(partition.appendRecordsToLeader(recordsCapture.capture(), + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + groupMetadataManager.cleanupGroupMetadata() + + // verify the tombstones are correct and only for the expired offsets + val records = recordsCapture.getValue.records.asScala.toList + assertEquals(2, records.size) + records.foreach { message => + assertTrue(message.hasKey) + assertFalse(message.hasValue) + val offsetKey = GroupMetadataManager.readMessageKey(message.key).asInstanceOf[OffsetKey] + assertEquals(groupId, offsetKey.key.group) + assertEquals("foo", offsetKey.key.topicPartition.topic) + } + + // the full group should be gone since all offsets were removed + assertEquals(None, groupMetadataManager.getGroup(groupId)) + val cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition, topicIdPartition2.topicPartition)) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset) + ) + + verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) + } + + @Test + def testOffsetExpirationSemantics(): Unit = { + val memberId = "memberId" + val clientId = "clientId" + val clientHost = "localhost" + val topic = "foo" + val topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), 0, topic) + val topicIdPartition2 = new TopicIdPartition(topicIdPartition1.topicId, 1, topic) + val topicIdPartition3 = new TopicIdPartition(topicIdPartition1.topicId, 2, topic) + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val subscription = new Subscription(List(topic).asJava) + val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, rebalanceTimeout, sessionTimeout, + protocolType, List(("protocol", ConsumerProtocol.serializeSubscription(subscription).array()))) + group.add(member, _ => ()) + group.transitionTo(PreparingRebalance) + group.initNextGeneration() + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val startMs = time.milliseconds + // old clients, expiry timestamp is explicitly set + val tp1OffsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", startMs, OptionalLong.of(startMs + 1)) + val tp2OffsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", startMs, OptionalLong.of(startMs + 3)) + // new clients, no per-partition expiry timestamp, offsets of group expire together + val tp3OffsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", startMs, noExpiration) + val offsets = immutable.Map( + topicIdPartition1 -> tp1OffsetAndMetadata, + topicIdPartition2 -> tp2OffsetAndMetadata, + topicIdPartition3 -> tp3OffsetAndMetadata) + + mockGetPartition() + expectAppendMessage(Errors.NONE) + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + + assertFalse(commitErrors.isEmpty) + assertEquals(Some(Errors.NONE), commitErrors.get.get(topicIdPartition1)) + + // do not expire any offset even though expiration timestamp is reached for one (due to group still being active) + time.sleep(2) + + groupMetadataManager.cleanupGroupMetadata() + + // group and offsets should still be there + assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) + assertEquals(Some(tp1OffsetAndMetadata), group.offset(topicIdPartition1.topicPartition)) + assertEquals(Some(tp2OffsetAndMetadata), group.offset(topicIdPartition2.topicPartition)) + assertEquals(Some(tp3OffsetAndMetadata), group.offset(topicIdPartition3.topicPartition)) + + var cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition, topicIdPartition2.topicPartition, topicIdPartition3.topicPartition)) + ) + assertEquals(Some(offset), cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset)) + assertEquals(Some(offset), cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset)) + assertEquals(Some(offset), cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset)) + + verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) + + group.transitionTo(PreparingRebalance) + group.transitionTo(Empty) + + // expect the offset tombstone + when(partition.appendRecordsToLeader(any[MemoryRecords], + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + groupMetadataManager.cleanupGroupMetadata() + + // group is empty now, only one offset should expire + assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) + assertEquals(None, group.offset(topicIdPartition1.topicPartition)) + assertEquals(Some(tp2OffsetAndMetadata), group.offset(topicIdPartition2.topicPartition)) + assertEquals(Some(tp3OffsetAndMetadata), group.offset(topicIdPartition3.topicPartition)) + + cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition, topicIdPartition2.topicPartition, topicIdPartition3.topicPartition)) + ) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset)) + assertEquals(Some(offset), cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset)) + assertEquals(Some(offset), cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset)) + + verify(replicaManager, times(3)).onlinePartition(groupTopicPartition) + + time.sleep(2) + + // expect the offset tombstone + when(partition.appendRecordsToLeader(any[MemoryRecords], + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + groupMetadataManager.cleanupGroupMetadata() + + // one more offset should expire + assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) + assertEquals(None, group.offset(topicIdPartition1.topicPartition)) + assertEquals(None, group.offset(topicIdPartition2.topicPartition)) + assertEquals(Some(tp3OffsetAndMetadata), group.offset(topicIdPartition3.topicPartition)) + + cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition, topicIdPartition2.topicPartition, topicIdPartition3.topicPartition)) + ) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset)) + assertEquals(Some(OffsetFetchResponse.INVALID_OFFSET), cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset)) + assertEquals(Some(offset), cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset)) + + verify(replicaManager, times(4)).onlinePartition(groupTopicPartition) + + // advance time to just before the offset of last partition is to be expired, no offset should expire + time.sleep(group.currentStateTimestamp.get + defaultOffsetRetentionMs - time.milliseconds() - 1) + + groupMetadataManager.cleanupGroupMetadata() + + // one more offset should expire + assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) + assertEquals(None, group.offset(topicIdPartition1.topicPartition)) + assertEquals(None, group.offset(topicIdPartition2.topicPartition)) + assertEquals(Some(tp3OffsetAndMetadata), group.offset(topicIdPartition3.topicPartition)) + + cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition, topicIdPartition2.topicPartition, topicIdPartition3.topicPartition)) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset) + ) + assertEquals( + Some(offset), + cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset) + ) + + verify(replicaManager, times(5)).onlinePartition(groupTopicPartition) + + // advance time enough for that last offset to expire + time.sleep(2) + + // expect the offset tombstone + when(partition.appendRecordsToLeader(any[MemoryRecords], + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + groupMetadataManager.cleanupGroupMetadata() + + // group and all its offsets should be gone now + assertEquals(None, groupMetadataManager.getGroup(groupId)) + assertEquals(None, group.offset(topicIdPartition1.topicPartition)) + assertEquals(None, group.offset(topicIdPartition2.topicPartition)) + assertEquals(None, group.offset(topicIdPartition3.topicPartition)) + + cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition, topicIdPartition2.topicPartition, topicIdPartition3.topicPartition)) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition2.topicPartition).map(_.offset) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition3.topicPartition).map(_.offset) + ) + + verify(replicaManager, times(6)).onlinePartition(groupTopicPartition) + + assert(group.is(Dead)) + } + + @Test + def testOffsetExpirationOfSimpleConsumer(): Unit = { + val memberId = "memberId" + val topic = "foo" + val topicIdPartition1 = new TopicIdPartition(Uuid.randomUuid(), 0, topic) + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + // expire the offset after 1 and 3 milliseconds (old clients) and after default retention (new clients) + val startMs = time.milliseconds + // old clients, expiry timestamp is explicitly set + val tp1OffsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", startMs, noExpiration) + // new clients, no per-partition expiry timestamp, offsets of group expire together + val offsets = immutable.Map( + topicIdPartition1 -> tp1OffsetAndMetadata) + + mockGetPartition() + expectAppendMessage(Errors.NONE) + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + + assertFalse(commitErrors.isEmpty) + assertEquals(Some(Errors.NONE), commitErrors.get.get(topicIdPartition1)) + + // do not expire offsets while within retention period since commit timestamp + val expiryTimestamp = offsets(topicIdPartition1).commitTimestampMs + defaultOffsetRetentionMs + time.sleep(expiryTimestamp - time.milliseconds() - 1) + + groupMetadataManager.cleanupGroupMetadata() + + // group and offsets should still be there + assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) + assertEquals(Some(tp1OffsetAndMetadata), group.offset(topicIdPartition1.topicPartition)) + + var cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition)) + ) + assertEquals(Some(offset), cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset)) + + verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) + + // advance time to enough for offsets to expire + time.sleep(2) + + // expect the offset tombstone + when(partition.appendRecordsToLeader(any[MemoryRecords], + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + groupMetadataManager.cleanupGroupMetadata() + + // group and all its offsets should be gone now + assertEquals(None, groupMetadataManager.getGroup(groupId)) + assertEquals(None, group.offset(topicIdPartition1.topicPartition)) + + cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq(topicIdPartition1.topicPartition)) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topicIdPartition1.topicPartition).map(_.offset) + ) + + verify(replicaManager, times(3)).onlinePartition(groupTopicPartition) + + assert(group.is(Dead)) + } + + @Test + def testOffsetExpirationOfActiveGroupSemantics(): Unit = { + val memberId = "memberId" + val clientId = "clientId" + val clientHost = "localhost" + + val topic1 = "foo" + val topic1IdPartition0 = new TopicIdPartition(Uuid.randomUuid(), 0, topic1) + val topic1IdPartition1 = new TopicIdPartition(topic1IdPartition0.topicId, 1, topic1) + + val topic2 = "bar" + val topic2IdPartition0 = new TopicIdPartition(Uuid.randomUuid(), 0, topic2) + val topic2IdPartition1 = new TopicIdPartition(topic2IdPartition0.topicId, 1, topic2) + + val offset = 37 + + groupMetadataManager.addOwnedPartition(groupPartitionId) + + val group = new GroupMetadata(groupId, Empty, time) + groupMetadataManager.addGroup(group) + + // Subscribe to topic1 and topic2 + val subscriptionTopic1AndTopic2 = new Subscription(List(topic1, topic2).asJava) + + val member = new MemberMetadata( + memberId, + Some(groupInstanceId), + clientId, + clientHost, + rebalanceTimeout, + sessionTimeout, + ConsumerProtocol.PROTOCOL_TYPE, + List(("protocol", ConsumerProtocol.serializeSubscription(subscriptionTopic1AndTopic2).array())) + ) + + group.add(member, _ => ()) + group.transitionTo(PreparingRebalance) + group.initNextGeneration() + group.transitionTo(Stable) + + val offsetTopicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, groupMetadataManager.partitionFor(group.groupId)) + val startMs = time.milliseconds + + val t1p0OffsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", startMs, noExpiration) + val t1p1OffsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", startMs, noExpiration) + + val t2p0OffsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", startMs, noExpiration) + val t2p1OffsetAndMetadata = new OffsetAndMetadata(offset, noLeader, "", startMs, noExpiration) + + val offsets = immutable.Map( + topic1IdPartition0 -> t1p0OffsetAndMetadata, + topic1IdPartition1 -> t1p1OffsetAndMetadata, + topic2IdPartition0 -> t2p0OffsetAndMetadata, + topic2IdPartition1 -> t2p1OffsetAndMetadata) + + mockGetPartition() + expectAppendMessage(Errors.NONE) + var commitErrors: Option[immutable.Map[TopicIdPartition, Errors]] = None + def callback(errors: immutable.Map[TopicIdPartition, Errors]): Unit = { + commitErrors = Some(errors) + } + + groupMetadataManager.storeOffsets(group, memberId, offsetTopicPartition, offsets, callback, verificationGuard = None) + assertTrue(group.hasOffsets) + + assertFalse(commitErrors.isEmpty) + assertEquals(Some(Errors.NONE), commitErrors.get.get(topic1IdPartition0)) + + // advance time to just after the offset of last partition is to be expired + time.sleep(defaultOffsetRetentionMs + 2) + + // no offset should expire because all topics are actively consumed + groupMetadataManager.cleanupGroupMetadata() + + assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) + assert(group.is(Stable)) + + assertEquals(Some(t1p0OffsetAndMetadata), group.offset(topic1IdPartition0.topicPartition)) + assertEquals(Some(t1p1OffsetAndMetadata), group.offset(topic1IdPartition1.topicPartition)) + assertEquals(Some(t2p0OffsetAndMetadata), group.offset(topic2IdPartition0.topicPartition)) + assertEquals(Some(t2p1OffsetAndMetadata), group.offset(topic2IdPartition1.topicPartition)) + + var cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq( + topic1IdPartition0.topicPartition, + topic1IdPartition1.topicPartition, + topic2IdPartition0.topicPartition, + topic2IdPartition1.topicPartition) + ) + ) + + assertEquals( + Some(offset), + cachedOffsets.get(topic1IdPartition0.topicPartition).map(_.offset) + ) + assertEquals( + Some(offset), + cachedOffsets.get(topic1IdPartition1.topicPartition).map(_.offset) + ) + assertEquals( + Some(offset), + cachedOffsets.get(topic2IdPartition0.topicPartition).map(_.offset) + ) + assertEquals( + Some(offset), + cachedOffsets.get(topic2IdPartition1.topicPartition).map(_.offset) + ) + + verify(replicaManager, times(2)).onlinePartition(groupTopicPartition) + + group.transitionTo(PreparingRebalance) + + // Subscribe to topic1, offsets of topic2 should be removed + val subscriptionTopic1 = new Subscription(List(topic1).asJava) + + group.updateMember( + member, + List(("protocol", ConsumerProtocol.serializeSubscription(subscriptionTopic1).array())), + member.rebalanceTimeoutMs, + member.sessionTimeoutMs, + null + ) + + group.initNextGeneration() + group.transitionTo(Stable) + + when(replicaManager.onlinePartition(any)).thenReturn(Some(partition)) + // expect the offset tombstone + when(partition.appendRecordsToLeader(any[MemoryRecords], + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any())).thenReturn(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO) + + groupMetadataManager.cleanupGroupMetadata() + + verify(partition).appendRecordsToLeader(any[MemoryRecords], + origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), requiredAcks = anyInt(), + any(), any()) + verify(replicaManager, times(3)).onlinePartition(groupTopicPartition) + + assertEquals(Some(group), groupMetadataManager.getGroup(groupId)) + assert(group.is(Stable)) + + assertEquals(Some(t1p0OffsetAndMetadata), group.offset(topic1IdPartition0.topicPartition)) + assertEquals(Some(t1p1OffsetAndMetadata), group.offset(topic1IdPartition1.topicPartition)) + assertEquals(None, group.offset(topic2IdPartition0.topicPartition)) + assertEquals(None, group.offset(topic2IdPartition1.topicPartition)) + + cachedOffsets = groupMetadataManager.getOffsets( + groupId, + defaultRequireStable, + Some(Seq( + topic1IdPartition0.topicPartition, + topic1IdPartition1.topicPartition, + topic2IdPartition0.topicPartition, + topic2IdPartition1.topicPartition) + ) + ) + + assertEquals(Some(offset), cachedOffsets.get(topic1IdPartition0.topicPartition).map(_.offset)) + assertEquals(Some(offset), cachedOffsets.get(topic1IdPartition1.topicPartition).map(_.offset)) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topic2IdPartition0.topicPartition).map(_.offset) + ) + assertEquals( + Some(OffsetFetchResponse.INVALID_OFFSET), + cachedOffsets.get(topic2IdPartition1.topicPartition).map(_.offset) + ) + } + + @Test + def testLoadOffsetFromOldCommit(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val generation = 935 + val protocolType = "consumer" + val protocol = "range" + val startOffset = 15L + val groupEpoch = 2 + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitValueVersion = 1.toShort + val groupMetadataValueVersion = 1.toShort + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, offsetCommitValueVersion = offsetCommitValueVersion, retentionTimeOpt = Some(100)) + val memberId = "98098230493" + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, groupMetadataValueVersion = groupMetadataValueVersion) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Stable, group.currentState) + assertEquals(memberId, group.leaderOrNull) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertEquals(protocol, group.protocolName.orNull) + assertEquals(Set(memberId), group.allMembers) + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + assertTrue(group.offset(topicPartition).map(_.expireTimestampMs).get.isPresent) + } + } + + @Test + def testLoadOffsetWithExplicitRetention(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val generation = 935 + val protocolType = "consumer" + val protocol = "range" + val startOffset = 15L + val groupEpoch = 2 + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets, retentionTimeOpt = Some(100)) + val memberId = "98098230493" + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Stable, group.currentState) + assertEquals(memberId, group.leaderOrNull) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertEquals(protocol, group.protocolName.orNull) + assertEquals(Set(memberId), group.allMembers) + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + assertTrue(group.offset(topicPartition).map(_.expireTimestampMs).get.isPresent) + } + } + + @Test + def testSerdeOffsetCommitValue(): Unit = { + val offsetAndMetadata = new OffsetAndMetadata( + 537L, + OptionalInt.of(15), + "metadata", + time.milliseconds(), + noExpiration) + + val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata) + val buffer = ByteBuffer.wrap(bytes) + val expectedOffsetCommitValueVersion = 3 + assertEquals(expectedOffsetCommitValueVersion, buffer.getShort(0).toInt) + val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) + assertEquals(offsetAndMetadata.committedOffset, deserializedOffsetAndMetadata.committedOffset) + assertEquals(offsetAndMetadata.metadata, deserializedOffsetAndMetadata.metadata) + assertEquals(offsetAndMetadata.commitTimestampMs, deserializedOffsetAndMetadata.commitTimestampMs) + val expectedLeaderEpoch = offsetAndMetadata.leaderEpoch + assertEquals(expectedLeaderEpoch, deserializedOffsetAndMetadata.leaderEpoch) + assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) + } + + @Test + def testSerdeOffsetCommitValueWithExpireTimestamp(): Unit = { + // If expire timestamp is set, we should always use version 1 of the offset commit + // value schema since later versions do not support it + + val offsetAndMetadata = new OffsetAndMetadata( + 537L, + noLeader, + "metadata", + time.milliseconds(), + OptionalLong.of(time.milliseconds() + 1000)) + + val bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata) + val buffer = ByteBuffer.wrap(bytes) + assertEquals(1, buffer.getShort(0).toInt) + + val deserializedOffsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) + assertEquals(offsetAndMetadata, deserializedOffsetAndMetadata) + } + + @Test + def testSerializeGroupMetadataValueToHighestNonFlexibleVersion(): Unit = { + val generation = 935 + val protocolType = "consumer" + val protocol = "range" + val memberId = "98098230493" + val assignmentBytes = Utils.toArray(ConsumerProtocol.serializeAssignment( + new ConsumerPartitionAssignor.Assignment(List(new TopicPartition("topic", 0)).asJava, null) + )) + val record = TestUtils.records(Seq( + buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId, assignmentBytes) + )).records.asScala.head + assertEquals(3, record.value.getShort) + } + + @Test + def testSerializeOffsetCommitValueToHighestNonFlexibleVersion(): Unit = { + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + offsetCommitRecords.foreach { record => + assertEquals(3, record.value.getShort) + } + } + + @Test + def testDeserializeHighestSupportedGroupMetadataValueVersion(): Unit = { + val member = new GroupMetadataValue.MemberMetadata() + .setMemberId("member") + .setClientId("client") + .setClientHost("host") + + val generation = 935 + val protocolType = "consumer" + val protocol = "range" + val leader = "leader" + val groupMetadataValue = new GroupMetadataValue() + .setProtocolType(protocolType) + .setGeneration(generation) + .setProtocol(protocol) + .setLeader(leader) + .setMembers(java.util.Collections.singletonList(member)) + + val deserialized = GroupMetadataManager.readGroupMessageValue("groupId", + MessageUtil.toVersionPrefixedByteBuffer(4, groupMetadataValue), time) + + assertEquals(generation, deserialized.generationId) + assertEquals(protocolType, deserialized.protocolType.get) + assertEquals(protocol, deserialized.protocolName.get) + assertEquals(leader, deserialized.leaderOrNull) + + val actualMember = deserialized.allMemberMetadata.head + assertEquals(member.memberId, actualMember.memberId) + assertEquals(member.clientId, actualMember.clientId) + assertEquals(member.clientHost, actualMember.clientHost) + } + + @Test + def testDeserializeHighestSupportedOffsetCommitValueVersion(): Unit = { + val offsetCommitValue = new OffsetCommitValue() + .setOffset(1000L) + .setMetadata("metadata") + .setCommitTimestamp(1500L) + .setLeaderEpoch(1) + + val serialized = MessageUtil.toVersionPrefixedByteBuffer(4, offsetCommitValue) + val deserialized = GroupMetadataManager.readOffsetMessageValue(serialized) + + assertEquals(1000L, deserialized.committedOffset) + assertEquals("metadata", deserialized.metadata) + assertEquals(1500L, deserialized.commitTimestampMs) + assertEquals(1, deserialized.leaderEpoch.getAsInt) + } + + @Test + def testDeserializeFutureOffsetCommitValue(): Unit = { + // Copy of OffsetCommitValue.SCHEMA_4 with a few + // additional tagged fields. + val futureOffsetCommitSchema = new Schema( + new Field("offset", Type.INT64, ""), + new Field("leader_epoch", Type.INT32, ""), + new Field("metadata", Type.COMPACT_STRING, ""), + new Field("commit_timestamp", Type.INT64, ""), + TaggedFieldsSection.of( + Int.box(0), new Field("offset_foo", Type.STRING, ""), + Int.box(1), new Field("offset_bar", Type.INT32, "") + ) + ) + + // Create OffsetCommitValue with tagged fields + val offsetCommit = new Struct(futureOffsetCommitSchema) + offsetCommit.set("offset", 1000L) + offsetCommit.set("leader_epoch", 100) + offsetCommit.set("metadata", "metadata") + offsetCommit.set("commit_timestamp", 2000L) + val offsetCommitTaggedFields = new java.util.TreeMap[Integer, Any]() + offsetCommitTaggedFields.put(0, "foo") + offsetCommitTaggedFields.put(1, 4000) + offsetCommit.set("_tagged_fields", offsetCommitTaggedFields) + + // Prepare the buffer. + val buffer = ByteBuffer.allocate(offsetCommit.sizeOf() + 2) + buffer.put(0.toByte) + buffer.put(4.toByte) // Add 4 as version. + offsetCommit.writeTo(buffer) + buffer.flip() + + // Read the buffer with the real schema and verify that tagged + // fields were read but ignored. + buffer.getShort() // Skip version. + val value = new OffsetCommitValue(new ByteBufferAccessor(buffer), 4.toShort) + assertEquals(Seq(0, 1), value.unknownTaggedFields().asScala.map(_.tag)) + + // Read the buffer with readOffsetMessageValue. + buffer.rewind() + val offsetAndMetadata = GroupMetadataManager.readOffsetMessageValue(buffer) + assertEquals(1000L, offsetAndMetadata.committedOffset) + assertEquals(100, offsetAndMetadata.leaderEpoch.getAsInt) + assertEquals("metadata", offsetAndMetadata.metadata) + assertEquals(2000L, offsetAndMetadata.commitTimestampMs) + } + + @Test + def testDeserializeFutureGroupMetadataValue(): Unit = { + // Copy of GroupMetadataValue.MemberMetadata.SCHEMA_4 with a few + // additional tagged fields. + val futureMemberSchema = new Schema( + new Field("member_id", Type.COMPACT_STRING, ""), + new Field("group_instance_id", Type.COMPACT_NULLABLE_STRING, ""), + new Field("client_id", Type.COMPACT_STRING, ""), + new Field("client_host", Type.COMPACT_STRING, ""), + new Field("rebalance_timeout", Type.INT32, ""), + new Field("session_timeout", Type.INT32, ""), + new Field("subscription", Type.COMPACT_BYTES, ""), + new Field("assignment", Type.COMPACT_BYTES, ""), + TaggedFieldsSection.of( + Int.box(0), new Field("member_foo", Type.STRING, ""), + Int.box(1), new Field("member_foo", Type.INT32, "") + ) + ) + + // Copy of GroupMetadataValue.SCHEMA_4 with a few + // additional tagged fields. + val futureGroupSchema = new Schema( + new Field("protocol_type", Type.COMPACT_STRING, ""), + new Field("generation", Type.INT32, ""), + new Field("protocol", Type.COMPACT_NULLABLE_STRING, ""), + new Field("leader", Type.COMPACT_NULLABLE_STRING, ""), + new Field("current_state_timestamp", Type.INT64, ""), + new Field("members", new CompactArrayOf(futureMemberSchema), ""), + TaggedFieldsSection.of( + Int.box(0), new Field("group_foo", Type.STRING, ""), + Int.box(1), new Field("group_bar", Type.INT32, "") + ) + ) + + // Create a member with tagged fields. + val member = new Struct(futureMemberSchema) + member.set("member_id", "member_id") + member.set("group_instance_id", "group_instance_id") + member.set("client_id", "client_id") + member.set("client_host", "client_host") + member.set("rebalance_timeout", 1) + member.set("session_timeout", 2) + member.set("subscription", ByteBuffer.allocate(0)) + member.set("assignment", ByteBuffer.allocate(0)) + + val memberTaggedFields = new java.util.TreeMap[Integer, Any]() + memberTaggedFields.put(0, "foo") + memberTaggedFields.put(1, 4000) + member.set("_tagged_fields", memberTaggedFields) + + // Create a group with tagged fields. + val group = new Struct(futureGroupSchema) + group.set("protocol_type", "consumer") + group.set("generation", 10) + group.set("protocol", "range") + group.set("leader", "leader") + group.set("current_state_timestamp", 1000L) + group.set("members", Array(member)) + + val groupTaggedFields = new java.util.TreeMap[Integer, Any]() + groupTaggedFields.put(0, "foo") + groupTaggedFields.put(1, 4000) + group.set("_tagged_fields", groupTaggedFields) + + // Prepare the buffer. + val buffer = ByteBuffer.allocate(group.sizeOf() + 2) + buffer.put(0.toByte) + buffer.put(4.toByte) // Add 4 as version. + group.writeTo(buffer) + buffer.flip() + + // Read the buffer with the real schema and verify that tagged + // fields were read but ignored. + buffer.getShort() // Skip version. + val value = new GroupMetadataValue(new ByteBufferAccessor(buffer), 4.toShort) + assertEquals(Seq(0, 1), value.unknownTaggedFields().asScala.map(_.tag)) + assertEquals(Seq(0, 1), value.members().get(0).unknownTaggedFields().asScala.map(_.tag)) + + // Read the buffer with readGroupMessageValue. + buffer.rewind() + val groupMetadata = GroupMetadataManager.readGroupMessageValue("group", buffer, time) + assertEquals("consumer", groupMetadata.protocolType.get) + assertEquals("leader", groupMetadata.leaderOrNull) + assertTrue(groupMetadata.allMembers.contains("member_id")) + } + + @Test + def testLoadOffsetsWithEmptyControlBatch(): Unit = { + val groupMetadataTopicPartition = groupTopicPartition + val startOffset = 15L + val generation = 15 + val groupEpoch = 2 + + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val groupMetadataRecord = buildEmptyGroupRecord(generation, protocolType) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) + + // Prepend empty control batch to valid records + val mockBatch: MutableRecordBatch = mock(classOf[MutableRecordBatch]) + when(mockBatch.iterator).thenReturn(Collections.emptyIterator[Record]) + when(mockBatch.isControlBatch).thenReturn(true) + when(mockBatch.isTransactional).thenReturn(true) + when(mockBatch.nextOffset).thenReturn(16L) + val mockRecords: MemoryRecords = mock(classOf[MemoryRecords]) + when(mockRecords.batches).thenReturn((Iterable[MutableRecordBatch](mockBatch) ++ records.batches.asScala).asJava) + when(mockRecords.records).thenReturn(records.records()) + when(mockRecords.sizeInBytes()).thenReturn(DefaultRecordBatch.RECORD_BATCH_OVERHEAD + records.sizeInBytes()) + val logMock: UnifiedLog = mock(classOf[UnifiedLog]) + when(logMock.logStartOffset).thenReturn(startOffset) + when(logMock.read(ArgumentMatchers.eq(startOffset), + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true))) + .thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), mockRecords)) + when(replicaManager.getLog(groupMetadataTopicPartition)).thenReturn(Some(logMock)) + when(replicaManager.getLogEndOffset(groupMetadataTopicPartition)).thenReturn(Some[Long](18)) + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), 0L) + + // Empty control batch should not have caused the load to fail + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Empty, group.currentState) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertNull(group.leaderOrNull) + assertNull(group.protocolName.orNull) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + } + } + + private def verifyAppendAndCaptureCallback(): ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = { + val capturedArgument: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + verify(replicaManager).appendRecords(anyLong(), + anyShort(), + any(), + any(), + any[Map[TopicPartition, MemoryRecords]], + capturedArgument.capture(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + any()) + capturedArgument + } + + private def expectAppendMessage(error: Errors): ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = { + val capturedCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + val capturedRecords: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + when(replicaManager.appendRecords(anyLong(), + anyShort(), + any(), + any(), + capturedRecords.capture(), + capturedCallback.capture(), + any[Option[ReentrantLock]], + any(), + any(), + any(), + any() + )).thenAnswer(_ => { + capturedCallback.getValue.apply( + Map(groupTopicPartition -> + new PartitionResponse(error, 0L, RecordBatch.NO_TIMESTAMP, 0L) + ) + )}) + when(replicaManager.onlinePartition(any())).thenReturn(Some(mock(classOf[Partition]))) + capturedRecords + } + + private def buildStableGroupRecordWithMember(generation: Int, + protocolType: String, + protocol: String, + memberId: String, + assignmentBytes: Array[Byte] = Array.emptyByteArray, + groupMetadataValueVersion: Short = 3): SimpleRecord = { + val memberProtocols = List((protocol, Array.emptyByteArray)) + val member = new MemberMetadata(memberId, Some(groupInstanceId), "clientId", "clientHost", 30000, 10000, protocolType, memberProtocols) + val group = GroupMetadata.loadGroup(groupId, Stable, generation, protocolType, protocol, memberId, + if (groupMetadataValueVersion >= 2.toShort) Some(time.milliseconds()) else None, Seq(member), time) + val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId) + val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map(memberId -> assignmentBytes), groupMetadataValueVersion) + new SimpleRecord(groupMetadataKey, groupMetadataValue) + } + + private def buildEmptyGroupRecord(generation: Int, protocolType: String): SimpleRecord = { + val group = GroupMetadata.loadGroup(groupId, Empty, generation, protocolType, null, null, None, Seq.empty, time) + val groupMetadataKey = GroupMetadataManager.groupMetadataKey(groupId) + val groupMetadataValue = GroupMetadataManager.groupMetadataValue(group, Map.empty) + new SimpleRecord(groupMetadataKey, groupMetadataValue) + } + + private def expectGroupMetadataLoad(groupMetadataTopicPartition: TopicPartition, + startOffset: Long, + records: MemoryRecords): Unit = { + val logMock: UnifiedLog = mock(classOf[UnifiedLog]) + when(replicaManager.getLog(groupMetadataTopicPartition)).thenReturn(Some(logMock)) + val endOffset = expectGroupMetadataLoad(logMock, startOffset, records) + when(replicaManager.getLogEndOffset(groupMetadataTopicPartition)).thenReturn(Some(endOffset)) + } + + /** + * mock records into a mocked log + * + * @return the calculated end offset to be mocked into [[ReplicaManager.getLogEndOffset]] + */ + private def expectGroupMetadataLoad(logMock: UnifiedLog, + startOffset: Long, + records: MemoryRecords): Long = { + val endOffset = startOffset + records.records.asScala.size + val fileRecordsMock: FileRecords = mock(classOf[FileRecords]) + + when(logMock.logStartOffset).thenReturn(startOffset) + when(logMock.read(ArgumentMatchers.eq(startOffset), + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true))) + .thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecordsMock)) + + when(fileRecordsMock.sizeInBytes()).thenReturn(records.sizeInBytes) + + val bufferCapture: ArgumentCaptor[ByteBuffer] = ArgumentCaptor.forClass(classOf[ByteBuffer]) + when(fileRecordsMock.readInto(bufferCapture.capture(), anyInt())).thenAnswer(_ => { + val buffer = bufferCapture.getValue + buffer.put(records.buffer.duplicate) + buffer.flip() + }) + endOffset + } + + private def createCommittedOffsetRecords(committedOffsets: Map[TopicPartition, Long], + groupId: String = groupId, + offsetCommitValueVersion: Short = 3, + retentionTimeOpt: Option[Long] = None): Seq[SimpleRecord] = { + committedOffsets.map { case (topicPartition, offset) => + val commitTimestamp = time.milliseconds() + val offsetAndMetadata = retentionTimeOpt match { + case Some(retentionTimeMs) => + val expirationTime = OptionalLong.of(commitTimestamp + retentionTimeMs) + new OffsetAndMetadata(offset, noLeader, "", commitTimestamp, expirationTime) + case None => + new OffsetAndMetadata(offset, noLeader, "", commitTimestamp, noExpiration) + } + val offsetCommitKey = GroupMetadataManager.offsetCommitKey(groupId, topicPartition) + val offsetCommitValue = GroupMetadataManager.offsetCommitValue(offsetAndMetadata, offsetCommitValueVersion) + new SimpleRecord(offsetCommitKey, offsetCommitValue) + }.toSeq + } + + private def mockGetPartition(): Unit = { + when(replicaManager.getPartition(groupTopicPartition)).thenReturn(HostedPartition.Online(partition)) + when(replicaManager.onlinePartition(groupTopicPartition)).thenReturn(Some(partition)) + } + + private def getGauge(manager: GroupMetadataManager, name: String): Gauge[Int] = { + KafkaYammerMetrics.defaultRegistry().allMetrics().get(manager.metricsGroup.metricName(name, Collections.emptyMap())).asInstanceOf[Gauge[Int]] + } + + private def expectMetrics(manager: GroupMetadataManager, + expectedNumGroups: Int, + expectedNumGroupsPreparingRebalance: Int, + expectedNumGroupsCompletingRebalance: Int): Unit = { + assertEquals(expectedNumGroups, getGauge(manager, "NumGroups").value) + assertEquals(expectedNumGroupsPreparingRebalance, getGauge(manager, "NumGroupsPreparingRebalance").value) + assertEquals(expectedNumGroupsCompletingRebalance, getGauge(manager, "NumGroupsCompletingRebalance").value) + } + + @Test + def testMetrics(): Unit = { + groupMetadataManager.cleanupGroupMetadata() + expectMetrics(groupMetadataManager, 0, 0, 0) + val group = new GroupMetadata("foo2", Stable, time) + groupMetadataManager.addGroup(group) + expectMetrics(groupMetadataManager, 1, 0, 0) + group.transitionTo(PreparingRebalance) + expectMetrics(groupMetadataManager, 1, 1, 0) + group.transitionTo(CompletingRebalance) + expectMetrics(groupMetadataManager, 1, 0, 1) + } + + @Test + def testPartitionLoadMetric(): Unit = { + val server = ManagementFactory.getPlatformMBeanServer + val mBeanName = "kafka.server:type=group-coordinator-metrics" + val reporter = new JmxReporter + val metricsContext = new KafkaMetricsContext("kafka.server") + reporter.contextChange(metricsContext) + metrics.addReporter(reporter) + + def partitionLoadTime(attribute: String): Double = { + server.getAttribute(new ObjectName(mBeanName), attribute).asInstanceOf[Double] + } + + assertTrue(server.isRegistered(new ObjectName(mBeanName))) + assertEquals(Double.NaN, partitionLoadTime( "partition-load-time-max"), 0) + assertEquals(Double.NaN, partitionLoadTime("partition-load-time-avg"), 0) + assertTrue(reporter.containsMbean(mBeanName)) + + val groupMetadataTopicPartition = groupTopicPartition + val startOffset = 15L + val memberId = "98098230493" + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val groupMetadataRecord = buildStableGroupRecordWithMember(generation = 15, + protocolType = "consumer", protocol = "range", memberId) + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(groupMetadataRecord)).toArray: _*) + + expectGroupMetadataLoad(groupMetadataTopicPartition, startOffset, records) + // When passed a specific start offset, assert that the measured values are in excess of that. + val now = time.milliseconds() + val diff = 1000 + val groupEpoch = 2 + groupMetadataManager.loadGroupsAndOffsets(groupMetadataTopicPartition, groupEpoch, _ => (), now - diff) + assertTrue(partitionLoadTime("partition-load-time-max") >= diff) + assertTrue(partitionLoadTime("partition-load-time-avg") >= diff) + } + + @Test + def testReadMessageKeyCanReadUnknownMessage(): Unit = { + val record = new org.apache.kafka.coordinator.group.generated.GroupMetadataKey() + val unknownRecord = MessageUtil.toVersionPrefixedBytes(Short.MaxValue, record) + val key = GroupMetadataManager.readMessageKey(ByteBuffer.wrap(unknownRecord)) + assertEquals(UnknownKey(Short.MaxValue), key) + } + + @Test + def testLoadGroupsAndOffsetsWillIgnoreUnknownMessage(): Unit = { + val generation = 935 + val protocolType = "consumer" + val protocol = "range" + val startOffset = 15L + val committedOffsets = Map( + new TopicPartition("foo", 0) -> 23L, + new TopicPartition("foo", 1) -> 455L, + new TopicPartition("bar", 0) -> 8992L + ) + + val offsetCommitRecords = createCommittedOffsetRecords(committedOffsets) + val memberId = "98098230493" + val groupMetadataRecord = buildStableGroupRecordWithMember(generation, protocolType, protocol, memberId) + + // Should ignore unknown record + val unknownKey = new org.apache.kafka.coordinator.group.generated.GroupMetadataKey() + val lowestUnsupportedVersion = (org.apache.kafka.coordinator.group.generated.GroupMetadataKey + .HIGHEST_SUPPORTED_VERSION + 1).toShort + + val unknownMessage1 = MessageUtil.toVersionPrefixedBytes(Short.MaxValue, unknownKey) + val unknownMessage2 = MessageUtil.toVersionPrefixedBytes(lowestUnsupportedVersion, unknownKey) + val unknownRecord1 = new SimpleRecord(unknownMessage1, unknownMessage1) + val unknownRecord2 = new SimpleRecord(unknownMessage2, unknownMessage2) + + val records = MemoryRecords.withRecords(startOffset, Compression.NONE, + (offsetCommitRecords ++ Seq(unknownRecord1, unknownRecord2) ++ Seq(groupMetadataRecord)).toArray: _*) + + expectGroupMetadataLoad(groupTopicPartition, startOffset, records) + + groupMetadataManager.loadGroupsAndOffsets(groupTopicPartition, 1, _ => (), 0L) + + val group = groupMetadataManager.getGroup(groupId).getOrElse(throw new AssertionError("Group was not loaded into the cache")) + assertEquals(groupId, group.groupId) + assertEquals(Stable, group.currentState) + assertEquals(memberId, group.leaderOrNull) + assertEquals(generation, group.generationId) + assertEquals(Some(protocolType), group.protocolType) + assertEquals(protocol, group.protocolName.orNull) + assertEquals(Set(memberId), group.allMembers) + assertEquals(committedOffsets.size, group.allOffsets.size) + committedOffsets.foreach { case (topicPartition, offset) => + assertEquals(Some(offset), group.offset(topicPartition).map(_.committedOffset)) + assertTrue(group.offset(topicPartition).map(_.expireTimestampMs).get.isEmpty) + } + } +} diff --git a/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataTest.scala new file mode 100644 index 0000000000000..5f1d1b8c71ee4 --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/group/GroupMetadataTest.scala @@ -0,0 +1,866 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.coordinator.group + +import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.Subscription +import org.apache.kafka.clients.consumer.internals.ConsumerProtocol +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.utils.{MockTime, Time} +import org.apache.kafka.coordinator.group.OffsetAndMetadata +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{BeforeEach, Test} + +import java.util.{OptionalInt, OptionalLong} +import scala.jdk.CollectionConverters._ + +/** + * Test group state transitions and other GroupMetadata functionality + */ +class GroupMetadataTest { + private val protocolType = "consumer" + private val groupInstanceId = "groupInstanceId" + private val memberId = "memberId" + private val clientId = "clientId" + private val clientHost = "clientHost" + private val rebalanceTimeoutMs = 60000 + private val sessionTimeoutMs = 10000 + + private var group: GroupMetadata = _ + + @BeforeEach + def setUp(): Unit = { + group = new GroupMetadata("groupId", Empty, Time.SYSTEM) + } + + @Test + def testCanRebalanceWhenStable(): Unit = { + assertTrue(group.canRebalance) + } + + @Test + def testCanRebalanceWhenCompletingRebalance(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(CompletingRebalance) + assertTrue(group.canRebalance) + } + + @Test + def testCannotRebalanceWhenPreparingRebalance(): Unit = { + group.transitionTo(PreparingRebalance) + assertFalse(group.canRebalance) + } + + @Test + def testCannotRebalanceWhenDead(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(Empty) + group.transitionTo(Dead) + assertFalse(group.canRebalance) + } + + @Test + def testStableToPreparingRebalanceTransition(): Unit = { + group.transitionTo(PreparingRebalance) + assertState(group, PreparingRebalance) + } + + @Test + def testStableToDeadTransition(): Unit = { + group.transitionTo(Dead) + assertState(group, Dead) + } + + @Test + def testAwaitingRebalanceToPreparingRebalanceTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(CompletingRebalance) + group.transitionTo(PreparingRebalance) + assertState(group, PreparingRebalance) + } + + @Test + def testPreparingRebalanceToDeadTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(Dead) + assertState(group, Dead) + } + + @Test + def testPreparingRebalanceToEmptyTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(Empty) + assertState(group, Empty) + } + + @Test + def testEmptyToDeadTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(Empty) + group.transitionTo(Dead) + assertState(group, Dead) + } + + @Test + def testAwaitingRebalanceToStableTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(CompletingRebalance) + group.transitionTo(Stable) + assertState(group, Stable) + } + + @Test + def testEmptyToStableIllegalTransition(): Unit = { + assertThrows(classOf[IllegalStateException], () => group.transitionTo(Stable)) + } + + @Test + def testStableToStableIllegalTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(CompletingRebalance) + group.transitionTo(Stable) + assertThrows(classOf[IllegalStateException], () => group.transitionTo(Stable)) + } + + @Test + def testEmptyToAwaitingRebalanceIllegalTransition(): Unit = { + assertThrows(classOf[IllegalStateException], () => group.transitionTo(CompletingRebalance)) + } + + @Test + def testPreparingRebalanceToPreparingRebalanceIllegalTransition(): Unit = { + group.transitionTo(PreparingRebalance) + assertThrows(classOf[IllegalStateException], () => group.transitionTo(PreparingRebalance)) + } + + @Test + def testPreparingRebalanceToStableIllegalTransition(): Unit = { + group.transitionTo(PreparingRebalance) + assertThrows(classOf[IllegalStateException], () => group.transitionTo(Stable)) + } + + @Test + def testAwaitingRebalanceToAwaitingRebalanceIllegalTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(CompletingRebalance) + assertThrows(classOf[IllegalStateException], () => group.transitionTo(CompletingRebalance)) + } + + @Test + def testDeadToDeadIllegalTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(Dead) + group.transitionTo(Dead) + assertState(group, Dead) + } + + @Test + def testDeadToStableIllegalTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(Dead) + assertThrows(classOf[IllegalStateException], () => group.transitionTo(Stable)) + } + + @Test + def testDeadToPreparingRebalanceIllegalTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(Dead) + assertThrows(classOf[IllegalStateException], () => group.transitionTo(PreparingRebalance)) + } + + @Test + def testDeadToAwaitingRebalanceIllegalTransition(): Unit = { + group.transitionTo(PreparingRebalance) + group.transitionTo(Dead) + assertThrows(classOf[IllegalStateException], () => group.transitionTo(CompletingRebalance)) + } + + @Test + def testSelectProtocol(): Unit = { + val memberId = "memberId" + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + group.add(member) + assertEquals("range", group.selectProtocol) + + val otherMemberId = "otherMemberId" + val otherMember = new MemberMetadata(otherMemberId, None, clientId, clientHost, rebalanceTimeoutMs, + sessionTimeoutMs, protocolType, List(("roundrobin", Array.empty[Byte]), ("range", Array.empty[Byte]))) + + group.add(otherMember) + // now could be either range or robin since there is no majority preference + assertTrue(Set("range", "roundrobin")(group.selectProtocol)) + + val lastMemberId = "lastMemberId" + val lastMember = new MemberMetadata(lastMemberId, None, clientId, clientHost, rebalanceTimeoutMs, + sessionTimeoutMs, protocolType, List(("roundrobin", Array.empty[Byte]), ("range", Array.empty[Byte]))) + + group.add(lastMember) + // now we should prefer 'roundrobin' + assertEquals("roundrobin", group.selectProtocol) + } + + @Test + def testSelectProtocolRaisesIfNoMembers(): Unit = { + assertThrows(classOf[IllegalStateException], () => group.selectProtocol) + } + + @Test + def testSelectProtocolChoosesCompatibleProtocol(): Unit = { + val memberId = "memberId" + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + val otherMemberId = "otherMemberId" + val otherMember = new MemberMetadata(otherMemberId, None, clientId, clientHost, rebalanceTimeoutMs, + sessionTimeoutMs, protocolType, List(("roundrobin", Array.empty[Byte]), ("blah", Array.empty[Byte]))) + + group.add(member) + group.add(otherMember) + assertEquals("roundrobin", group.selectProtocol) + } + + @Test + def testSupportsProtocols(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + // by default, the group supports everything + assertTrue(group.supportsProtocols(protocolType, Set("roundrobin", "range"))) + + group.add(member) + group.transitionTo(PreparingRebalance) + assertTrue(group.supportsProtocols(protocolType, Set("roundrobin", "foo"))) + assertTrue(group.supportsProtocols(protocolType, Set("range", "foo"))) + assertFalse(group.supportsProtocols(protocolType, Set("foo", "bar"))) + + val otherMemberId = "otherMemberId" + val otherMember = new MemberMetadata(otherMemberId, None, clientId, clientHost, rebalanceTimeoutMs, + sessionTimeoutMs, protocolType, List(("roundrobin", Array.empty[Byte]), ("blah", Array.empty[Byte]))) + + group.add(otherMember) + + assertTrue(group.supportsProtocols(protocolType, Set("roundrobin", "foo"))) + assertFalse(group.supportsProtocols("invalid_type", Set("roundrobin", "foo"))) + assertFalse(group.supportsProtocols(protocolType, Set("range", "foo"))) + } + + @Test + def testOffsetRemovalDuringTransitionFromEmptyToNonEmpty(): Unit = { + val topic = "foo" + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, topic) + val time = new MockTime() + group = new GroupMetadata("groupId", Empty, time) + + // Rebalance once in order to commit offsets + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, + sessionTimeoutMs, protocolType, List(("range", ConsumerProtocol.serializeSubscription(new Subscription(List("foo").asJava)).array()))) + group.transitionTo(PreparingRebalance) + group.add(member) + group.initNextGeneration() + assertEquals(Some(Set("foo")), group.getSubscribedTopics) + + val offset = offsetAndMetadata(offset = 37, timestamp = time.milliseconds()) + val commitRecordOffset = 3 + + group.prepareOffsetCommit(Map(topicIdPartition -> offset)) + assertTrue(group.hasOffsets) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + group.onOffsetCommitAppend(topicIdPartition, CommitRecordMetadataAndOffset(Some(commitRecordOffset), offset)) + + val offsetRetentionMs = 50000L + time.sleep(offsetRetentionMs + 1) + + // Rebalance again so that the group becomes empty + group.transitionTo(PreparingRebalance) + group.remove(memberId) + group.initNextGeneration() + + // The group is empty, but we should not expire the offset because the state was just changed + assertEquals(Empty, group.currentState) + assertEquals(Map.empty, group.removeExpiredOffsets(time.milliseconds(), offsetRetentionMs)) + + // Start a new rebalance to add the member back. The offset should not be expired + // while the rebalance is in progress. + group.transitionTo(PreparingRebalance) + group.add(member) + assertEquals(Map.empty, group.removeExpiredOffsets(time.milliseconds(), offsetRetentionMs)) + } + + @Test + def testSubscribedTopics(): Unit = { + // not able to compute it for a newly created group + assertEquals(None, group.getSubscribedTopics) + + val memberId = "memberId" + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, + sessionTimeoutMs, protocolType, List(("range", ConsumerProtocol.serializeSubscription(new Subscription(List("foo").asJava)).array()))) + + group.transitionTo(PreparingRebalance) + group.add(member) + + group.initNextGeneration() + + assertEquals(Some(Set("foo")), group.getSubscribedTopics) + + group.transitionTo(PreparingRebalance) + group.remove(memberId) + + group.initNextGeneration() + + assertEquals(Some(Set.empty), group.getSubscribedTopics) + + val memberWithFaultyProtocol = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, + sessionTimeoutMs, protocolType, List(("range", Array.empty[Byte]))) + + group.transitionTo(PreparingRebalance) + group.add(memberWithFaultyProtocol) + + group.initNextGeneration() + + assertEquals(None, group.getSubscribedTopics) + } + + @Test + def testSubscribedTopicsNonConsumerGroup(): Unit = { + // not able to compute it for a newly created group + assertEquals(None, group.getSubscribedTopics) + + val memberId = "memberId" + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, + sessionTimeoutMs, "My Protocol", List(("range", Array.empty[Byte]))) + + group.transitionTo(PreparingRebalance) + group.add(member) + + group.initNextGeneration() + + assertEquals(None, group.getSubscribedTopics) + } + + @Test + def testInitNextGeneration(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + member.supportedProtocols = List(("roundrobin", Array.empty[Byte])) + + group.transitionTo(PreparingRebalance) + group.add(member, _ => ()) + + assertEquals(0, group.generationId) + assertNull(group.protocolName.orNull) + + group.initNextGeneration() + + assertEquals(1, group.generationId) + assertEquals("roundrobin", group.protocolName.orNull) + } + + @Test + def testInitNextGenerationEmptyGroup(): Unit = { + assertEquals(Empty, group.currentState) + assertEquals(0, group.generationId) + assertNull(group.protocolName.orNull) + + group.transitionTo(PreparingRebalance) + group.initNextGeneration() + + assertEquals(1, group.generationId) + assertNull(group.protocolName.orNull) + } + + @Test + def testOffsetCommit(): Unit = { + val partition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = offsetAndMetadata(37) + val commitRecordOffset = 3 + + group.prepareOffsetCommit(Map(partition -> offset)) + assertTrue(group.hasOffsets) + assertEquals(None, group.offset(partition.topicPartition)) + + group.onOffsetCommitAppend(partition, CommitRecordMetadataAndOffset(Some(commitRecordOffset), offset)) + assertTrue(group.hasOffsets) + assertEquals(Some(offset), group.offset(partition.topicPartition)) + } + + @Test + def testOffsetCommitFailure(): Unit = { + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = offsetAndMetadata(37) + + group.prepareOffsetCommit(Map(topicIdPartition -> offset)) + assertTrue(group.hasOffsets) + assertEquals(Some(offset), group.pendingOffsetCommit(topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.failPendingOffsetWrite(topicIdPartition, offset) + assertFalse(group.hasOffsets) + assertEquals(None, group.pendingOffsetCommit(topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + } + + @Test + def testOffsetCommitFailureWithAnotherPending(): Unit = { + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val firstOffset = offsetAndMetadata(37) + val secondOffset = offsetAndMetadata(57) + + group.prepareOffsetCommit(Map(topicIdPartition -> firstOffset)) + assertTrue(group.hasOffsets) + assertEquals(Some(firstOffset), group.pendingOffsetCommit(topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.prepareOffsetCommit(Map(topicIdPartition -> secondOffset)) + assertTrue(group.hasOffsets) + assertEquals(Some(secondOffset), group.pendingOffsetCommit(topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.failPendingOffsetWrite(topicIdPartition, firstOffset) + assertTrue(group.hasOffsets) + assertEquals(Some(secondOffset), group.pendingOffsetCommit(topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.onOffsetCommitAppend(topicIdPartition, CommitRecordMetadataAndOffset(Some(3L), secondOffset)) + assertTrue(group.hasOffsets) + assertEquals(None, group.pendingOffsetCommit(topicIdPartition)) + assertEquals(Some(secondOffset), group.offset(topicIdPartition.topicPartition)) + } + + @Test + def testOffsetCommitWithAnotherPending(): Unit = { + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val firstOffset = offsetAndMetadata(37) + val secondOffset = offsetAndMetadata(57) + + group.prepareOffsetCommit(Map(topicIdPartition -> firstOffset)) + assertTrue(group.hasOffsets) + assertEquals(Some(firstOffset), group.pendingOffsetCommit(topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.prepareOffsetCommit(Map(topicIdPartition -> secondOffset)) + assertTrue(group.hasOffsets) + assertEquals(Some(secondOffset), group.pendingOffsetCommit(topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.onOffsetCommitAppend(topicIdPartition, CommitRecordMetadataAndOffset(Some(4L), firstOffset)) + assertTrue(group.hasOffsets) + assertEquals(Some(secondOffset), group.pendingOffsetCommit(topicIdPartition)) + assertEquals(Some(firstOffset), group.offset(topicIdPartition.topicPartition)) + + group.onOffsetCommitAppend(topicIdPartition, CommitRecordMetadataAndOffset(Some(5L), secondOffset)) + assertTrue(group.hasOffsets) + assertEquals(None, group.pendingOffsetCommit(topicIdPartition)) + assertEquals(Some(secondOffset), group.offset(topicIdPartition.topicPartition)) + } + + @Test + def testConsumerBeatsTransactionalOffsetCommit(): Unit = { + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val producerId = 13232L + val txnOffsetCommit = offsetAndMetadata(37) + val consumerOffsetCommit = offsetAndMetadata(57) + + group.prepareTxnOffsetCommit(producerId, Map(topicIdPartition -> txnOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(Some(CommitRecordMetadataAndOffset(None, txnOffsetCommit)), group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.prepareOffsetCommit(Map(topicIdPartition -> consumerOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(Some(consumerOffsetCommit), group.pendingOffsetCommit(topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.onTxnOffsetCommitAppend(producerId, topicIdPartition, CommitRecordMetadataAndOffset(Some(3L), txnOffsetCommit)) + group.onOffsetCommitAppend(topicIdPartition, CommitRecordMetadataAndOffset(Some(4L), consumerOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(Some(CommitRecordMetadataAndOffset(Some(3), txnOffsetCommit)), group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(None, group.pendingOffsetCommit(topicIdPartition)) + assertEquals(Some(consumerOffsetCommit), group.offset(topicIdPartition.topicPartition)) + + group.completePendingTxnOffsetCommit(producerId, isCommit = true) + assertTrue(group.hasOffsets) + assertEquals(None, group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(None, group.pendingOffsetCommit(topicIdPartition)) + // This is the crucial assertion which validates that we materialize offsets in offset order, not transactional order. + assertEquals(Some(consumerOffsetCommit), group.offset(topicIdPartition.topicPartition)) + } + + @Test + def testTransactionBeatsConsumerOffsetCommit(): Unit = { + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val producerId = 13232L + val txnOffsetCommit = offsetAndMetadata(37) + val consumerOffsetCommit = offsetAndMetadata(57) + + group.prepareTxnOffsetCommit(producerId, Map(topicIdPartition -> txnOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(Some(CommitRecordMetadataAndOffset(None, txnOffsetCommit)), group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + + group.prepareOffsetCommit(Map(topicIdPartition -> consumerOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(Some(consumerOffsetCommit), group.pendingOffsetCommit(topicIdPartition)) + + group.onOffsetCommitAppend(topicIdPartition, CommitRecordMetadataAndOffset(Some(3L), consumerOffsetCommit)) + group.onTxnOffsetCommitAppend(producerId, topicIdPartition, CommitRecordMetadataAndOffset(Some(4L), txnOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(Some(CommitRecordMetadataAndOffset(Some(4), txnOffsetCommit)), group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(None, group.pendingOffsetCommit(topicIdPartition)) + // The transactional offset commit hasn't been committed yet, so we should materialize the consumer offset commit. + assertEquals(Some(consumerOffsetCommit), group.offset(topicIdPartition.topicPartition)) + + group.completePendingTxnOffsetCommit(producerId, isCommit = true) + assertTrue(group.hasOffsets) + // The transactional offset commit has been materialized and the transactional commit record is later in the log, + // so it should be materialized. + assertEquals(None, group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(None, group.pendingOffsetCommit(topicIdPartition)) + assertEquals(Some(txnOffsetCommit), group.offset(topicIdPartition.topicPartition)) + } + + @Test + def testTransactionalCommitIsAbortedAndConsumerCommitWins(): Unit = { + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val producerId = 13232L + val txnOffsetCommit = offsetAndMetadata(37) + val consumerOffsetCommit = offsetAndMetadata(57) + + group.prepareTxnOffsetCommit(producerId, Map(topicIdPartition -> txnOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + assertEquals(Some(CommitRecordMetadataAndOffset(None, txnOffsetCommit)), group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + + group.prepareOffsetCommit(Map(topicIdPartition -> consumerOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + assertEquals(Some(CommitRecordMetadataAndOffset(None, txnOffsetCommit)), group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(Some(consumerOffsetCommit), group.pendingOffsetCommit(topicIdPartition)) + + group.onOffsetCommitAppend(topicIdPartition, CommitRecordMetadataAndOffset(Some(3L), consumerOffsetCommit)) + group.onTxnOffsetCommitAppend(producerId, topicIdPartition, CommitRecordMetadataAndOffset(Some(4L), txnOffsetCommit)) + assertTrue(group.hasOffsets) + assertEquals(Some(CommitRecordMetadataAndOffset(Some(4L), txnOffsetCommit)), group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + // The transactional offset commit hasn't been committed yet, so we should materialize the consumer offset commit. + assertEquals(Some(consumerOffsetCommit), group.offset(topicIdPartition.topicPartition)) + + group.completePendingTxnOffsetCommit(producerId, isCommit = false) + assertTrue(group.hasOffsets) + // The transactional offset commit should be discarded and the consumer offset commit should continue to be + // materialized. + assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId)) + assertEquals(None, group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(Some(consumerOffsetCommit), group.offset(topicIdPartition.topicPartition)) + } + + @Test + def testFailedTxnOffsetCommitLeavesNoPendingState(): Unit = { + val topicIdPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val producerId = 13232L + val txnOffsetCommit = offsetAndMetadata(37) + + group.prepareTxnOffsetCommit(producerId, Map(topicIdPartition -> txnOffsetCommit)) + assertTrue(group.hasPendingOffsetCommitsFromProducer(producerId)) + assertTrue(group.hasOffsets) + assertEquals(Some(CommitRecordMetadataAndOffset(None, txnOffsetCommit)), group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + assertEquals(None, group.offset(topicIdPartition.topicPartition)) + group.failPendingTxnOffsetCommit(producerId, topicIdPartition) + assertFalse(group.hasOffsets) + assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId)) + assertEquals(None, group.pendingTxnOffsetCommit(producerId, topicIdPartition)) + + // The commit marker should now have no effect. + group.completePendingTxnOffsetCommit(producerId, isCommit = true) + assertFalse(group.hasOffsets) + assertFalse(group.hasPendingOffsetCommitsFromProducer(producerId)) + } + + @Test + def testUpdateMember(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + group.add(member) + + val newRebalanceTimeout = 120000 + val newSessionTimeout = 20000 + group.updateMember(member, List(("roundrobin", Array[Byte]())), newRebalanceTimeout, newSessionTimeout, null) + + assertEquals(group.rebalanceTimeoutMs, newRebalanceTimeout) + assertEquals(member.sessionTimeoutMs, newSessionTimeout) + } + + + @Test + def testReplaceGroupInstanceWithNonExistingMember(): Unit = { + val newMemberId = "newMemberId" + assertThrows(classOf[IllegalArgumentException], () => group.replaceStaticMember(groupInstanceId, memberId, newMemberId)) + } + + @Test + def testReplaceGroupInstance(): Unit = { + val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + var joinAwaitingMemberFenced = false + group.add(member, joinGroupResult => { + joinAwaitingMemberFenced = joinGroupResult.error == Errors.FENCED_INSTANCE_ID + }) + var syncAwaitingMemberFenced = false + member.awaitingSyncCallback = syncGroupResult => { + syncAwaitingMemberFenced = syncGroupResult.error == Errors.FENCED_INSTANCE_ID + } + assertTrue(group.isLeader(memberId)) + assertEquals(Some(memberId), group.currentStaticMemberId(groupInstanceId)) + + val newMemberId = "newMemberId" + group.replaceStaticMember(groupInstanceId, memberId, newMemberId) + assertTrue(group.isLeader(newMemberId)) + assertEquals(Some(newMemberId), group.currentStaticMemberId(groupInstanceId)) + assertTrue(joinAwaitingMemberFenced) + assertTrue(syncAwaitingMemberFenced) + assertFalse(member.isAwaitingJoin) + assertFalse(member.isAwaitingSync) + } + + @Test + def testInvokeJoinCallback(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + var invoked = false + group.add(member, _ => { + invoked = true + }) + + assertTrue(group.hasAllMembersJoined) + group.maybeInvokeJoinCallback(member, JoinGroupResult(member.memberId, Errors.NONE)) + assertTrue(invoked) + assertFalse(member.isAwaitingJoin) + } + + @Test + def testInvokeJoinCallbackFails(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + var shouldFail = true + var result: Option[JoinGroupResult] = None + def joinCallback(joinGroupResult: JoinGroupResult): Unit = { + if (shouldFail) { + shouldFail = false + throw new Exception("Something went wrong!") + } else { + result = Some(joinGroupResult) + } + } + + group.add(member, joinCallback) + + group.maybeInvokeJoinCallback(member, JoinGroupResult(member.memberId, Errors.NONE)) + + assertEquals(Errors.UNKNOWN_SERVER_ERROR, result.get.error) + assertFalse(member.isAwaitingJoin) + } + + @Test + def testNotInvokeJoinCallback(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + group.add(member) + + assertFalse(member.isAwaitingJoin) + group.maybeInvokeJoinCallback(member, JoinGroupResult(member.memberId, Errors.NONE)) + assertFalse(member.isAwaitingJoin) + } + + @Test + def testInvokeSyncCallbackFails(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + var shouldFail = true + var result: Option[SyncGroupResult] = None + def syncCallback(syncGroupResult: SyncGroupResult): Unit = { + if (shouldFail) { + shouldFail = false + throw new Exception("Something went wrong!") + } else { + result = Some(syncGroupResult) + } + } + + group.add(member) + member.awaitingSyncCallback = syncCallback + + val invoked = group.maybeInvokeSyncCallback(member, SyncGroupResult(Errors.NONE)) + assertTrue(invoked) + assertEquals(Errors.UNKNOWN_SERVER_ERROR, result.get.error) + assertFalse(member.isAwaitingSync) + } + + @Test + def testInvokeSyncCallback(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + + group.add(member) + member.awaitingSyncCallback = _ => {} + + val invoked = group.maybeInvokeSyncCallback(member, SyncGroupResult(Errors.NONE)) + assertTrue(invoked) + assertFalse(member.isAwaitingSync) + } + + @Test + def testNotInvokeSyncCallback(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + group.add(member) + + val invoked = group.maybeInvokeSyncCallback(member, SyncGroupResult(Errors.NONE)) + assertFalse(invoked) + assertFalse(member.isAwaitingSync) + } + + @Test + def testHasPendingNonTxnOffsets(): Unit = { + val partition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = offsetAndMetadata(37) + + group.prepareOffsetCommit(Map(partition -> offset)) + assertTrue(group.hasPendingOffsetCommitsForTopicPartition(partition.topicPartition)) + } + + @Test + def testHasPendingTxnOffsets(): Unit = { + val txnPartition = new TopicIdPartition(Uuid.randomUuid(), 0, "foo") + val offset = offsetAndMetadata(37) + val producerId = 5 + + group.prepareTxnOffsetCommit(producerId, Map(txnPartition -> offset)) + assertTrue(group.hasPendingOffsetCommitsForTopicPartition(txnPartition.topicPartition)) + + assertFalse(group.hasPendingOffsetCommitsForTopicPartition(new TopicPartition("non-exist", 0))) + } + + @Test + def testCannotAddPendingMemberIfStable(): Unit = { + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + group.add(member) + assertThrows(classOf[IllegalStateException], () => group.addPendingMember(memberId)) + } + + @Test + def testRemovalFromPendingAfterMemberIsStable(): Unit = { + group.addPendingMember(memberId) + assertFalse(group.has(memberId)) + assertTrue(group.isPendingMember(memberId)) + + val member = new MemberMetadata(memberId, None, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte]))) + group.add(member) + assertTrue(group.has(memberId)) + assertFalse(group.isPendingMember(memberId)) + } + + @Test + def testRemovalFromPendingWhenMemberIsRemoved(): Unit = { + group.addPendingMember(memberId) + assertFalse(group.has(memberId)) + assertTrue(group.isPendingMember(memberId)) + + group.remove(memberId) + assertFalse(group.has(memberId)) + assertFalse(group.isPendingMember(memberId)) + } + + @Test + def testCannotAddStaticMemberIfAlreadyPresent(): Unit = { + val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, + rebalanceTimeoutMs, sessionTimeoutMs, protocolType, List(("range", Array.empty[Byte]))) + group.add(member) + assertTrue(group.has(memberId)) + assertTrue(group.hasStaticMember(groupInstanceId)) + + // We are not permitted to add the member again if it is already present + assertThrows(classOf[IllegalStateException], () => group.add(member)) + } + + @Test + def testCannotAddPendingSyncOfUnknownMember(): Unit = { + assertThrows(classOf[IllegalStateException], + () => group.addPendingSyncMember(memberId)) + } + + @Test + def testCannotRemovePendingSyncOfUnknownMember(): Unit = { + assertThrows(classOf[IllegalStateException], + () => group.removePendingSyncMember(memberId)) + } + + @Test + def testCanAddAndRemovePendingSyncMember(): Unit = { + val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, + rebalanceTimeoutMs, sessionTimeoutMs, protocolType, List(("range", Array.empty[Byte]))) + group.add(member) + group.addPendingSyncMember(memberId) + assertEquals(Set(memberId), group.allPendingSyncMembers) + group.removePendingSyncMember(memberId) + assertEquals(Set(), group.allPendingSyncMembers) + } + + @Test + def testRemovalFromPendingSyncWhenMemberIsRemoved(): Unit = { + val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, + rebalanceTimeoutMs, sessionTimeoutMs, protocolType, List(("range", Array.empty[Byte]))) + group.add(member) + group.addPendingSyncMember(memberId) + assertEquals(Set(memberId), group.allPendingSyncMembers) + group.remove(memberId) + assertEquals(Set(), group.allPendingSyncMembers) + } + + @Test + def testNewGenerationClearsPendingSyncMembers(): Unit = { + val member = new MemberMetadata(memberId, Some(groupInstanceId), clientId, clientHost, + rebalanceTimeoutMs, sessionTimeoutMs, protocolType, List(("range", Array.empty[Byte]))) + group.add(member) + group.transitionTo(PreparingRebalance) + group.addPendingSyncMember(memberId) + assertEquals(Set(memberId), group.allPendingSyncMembers) + group.initNextGeneration() + assertEquals(Set(), group.allPendingSyncMembers) + } + + private def assertState(group: GroupMetadata, targetState: GroupState): Unit = { + val states: Set[GroupState] = Set(Stable, PreparingRebalance, CompletingRebalance, Dead) + val otherStates = states - targetState + otherStates.foreach { otherState => + assertFalse(group.is(otherState)) + } + assertTrue(group.is(targetState)) + } + + private def offsetAndMetadata(offset: Long, timestamp: Long = Time.SYSTEM.milliseconds()): OffsetAndMetadata = { + new OffsetAndMetadata(offset, OptionalInt.empty(), "", timestamp, OptionalLong.empty()) + } + +} diff --git a/core/src/test/scala/unit/kafka/coordinator/group/MemberMetadataTest.scala b/core/src/test/scala/unit/kafka/coordinator/group/MemberMetadataTest.scala new file mode 100644 index 0000000000000..13f78603b152e --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/group/MemberMetadataTest.scala @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.group + +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Test + +import java.util + +class MemberMetadataTest { + val groupId = "groupId" + val groupInstanceId = Some("groupInstanceId") + val clientId = "clientId" + val clientHost = "clientHost" + val memberId = "memberId" + val protocolType = "consumer" + val rebalanceTimeoutMs = 60000 + val sessionTimeoutMs = 10000 + + + @Test + def testMatchesSupportedProtocols(): Unit = { + val protocols = List(("range", Array.empty[Byte])) + + val member = new MemberMetadata(memberId, groupInstanceId, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, protocols) + assertTrue(member.matches(protocols)) + assertFalse(member.matches(List(("range", Array[Byte](0))))) + assertFalse(member.matches(List(("roundrobin", Array.empty[Byte])))) + assertFalse(member.matches(List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte])))) + } + + @Test + def testVoteForPreferredProtocol(): Unit = { + val protocols = List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte])) + + val member = new MemberMetadata(memberId, groupInstanceId, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, protocols) + assertEquals("range", member.vote(Set("range", "roundrobin"))) + assertEquals("roundrobin", member.vote(Set("blah", "roundrobin"))) + } + + @Test + def testMetadata(): Unit = { + val protocols = List(("range", Array[Byte](0)), ("roundrobin", Array[Byte](1))) + + val member = new MemberMetadata(memberId, groupInstanceId, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, protocols) + assertTrue(util.Arrays.equals(Array[Byte](0), member.metadata("range"))) + assertTrue(util.Arrays.equals(Array[Byte](1), member.metadata("roundrobin"))) + } + + @Test + def testMetadataRaisesOnUnsupportedProtocol(): Unit = { + val protocols = List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte])) + + val member = new MemberMetadata(memberId, groupInstanceId, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, protocols) + assertThrows(classOf[IllegalArgumentException], () => member.metadata("blah")) + } + + @Test + def testVoteRaisesOnNoSupportedProtocols(): Unit = { + val protocols = List(("range", Array.empty[Byte]), ("roundrobin", Array.empty[Byte])) + + val member = new MemberMetadata(memberId, groupInstanceId, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, protocols) + assertThrows(classOf[IllegalArgumentException], () => member.vote(Set("blah"))) + } + + @Test + def testHasValidGroupInstanceId(): Unit = { + val protocols = List(("range", Array[Byte](0)), ("roundrobin", Array[Byte](1))) + + val member = new MemberMetadata(memberId, groupInstanceId, clientId, clientHost, rebalanceTimeoutMs, sessionTimeoutMs, + protocolType, protocols) + assertTrue(member.isStaticMember) + assertEquals(groupInstanceId, member.groupInstanceId) + } +} diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala index 79957b01fb77b..70d543bfbe5de 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorConcurrencyTest.scala @@ -17,31 +17,28 @@ package kafka.coordinator.transaction import java.nio.ByteBuffer -import java.util -import java.util.Optional -import java.util.concurrent.ConcurrentHashMap +import java.util.Collections import java.util.concurrent.atomic.AtomicBoolean import kafka.coordinator.AbstractCoordinatorConcurrencyTest import kafka.coordinator.AbstractCoordinatorConcurrencyTest._ import kafka.coordinator.transaction.TransactionCoordinatorConcurrencyTest._ -import kafka.server.KafkaConfig -import kafka.utils.TestUtils +import kafka.log.UnifiedLog +import kafka.server.{KafkaConfig, MetadataCache} +import kafka.utils.{Pool, TestUtils} import org.apache.kafka.clients.{ClientResponse, NetworkClient} -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{FileRecords, MemoryRecords, RecordBatch, SimpleRecord} import org.apache.kafka.common.requests._ import org.apache.kafka.common.utils.{LogContext, MockTime, ProducerIdAndEpoch} -import org.apache.kafka.common.{Node, TopicPartition, Uuid} -import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionMetadata, TransactionState} -import org.apache.kafka.metadata.MetadataCache +import org.apache.kafka.common.{Node, TopicPartition} +import org.apache.kafka.coordinator.transaction.ProducerIdManager import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion, RequestLocal, TransactionVersion} import org.apache.kafka.server.storage.log.FetchIsolation -import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogConfig, LogOffsetMetadata, UnifiedLog} +import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogConfig, LogOffsetMetadata} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.{ArgumentCaptor, ArgumentMatchers} @@ -63,7 +60,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren private val allOperations = Seq( new InitProducerIdOperation, - new AddPartitionsToTxnOperation(util.Set.of(new TopicPartition("topic", 0))), + new AddPartitionsToTxnOperation(Set(new TopicPartition("topic", 0))), new EndTxnOperation) private val allTransactions = mutable.Set[Transaction]() @@ -83,22 +80,24 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren anyString, anyInt, any[ListenerName]) - ).thenReturn(Optional.of(brokerNode)) + ).thenReturn(Some(brokerNode)) when(metadataCache.features()).thenReturn { new FinalizedFeatures( MetadataVersion.latestTesting(), - util.Map.of(TransactionVersion.FEATURE_NAME, TransactionVersion.TV_2.featureLevel()), - 0) + Collections.singletonMap(TransactionVersion.FEATURE_NAME, TransactionVersion.TV_2.featureLevel()), + 0, + true + ) } when(metadataCache.metadataVersion()) .thenReturn(MetadataVersion.latestProduction()) - + txnStateManager = new TransactionStateManager(0, scheduler, replicaManager, metadataCache, txnConfig, time, new Metrics()) txnStateManager.startup(() => numPartitions, enableTransactionalIdExpiration = true) for (i <- 0 until numPartitions) - txnStateManager.addLoadedTransactionsToCache(i, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + txnStateManager.addLoadedTransactionsToCache(i, coordinatorEpoch, new Pool[String, TransactionMetadata]()) val pidGenerator: ProducerIdManager = mock(classOf[ProducerIdManager]) when(pidGenerator.generateProducerId()) @@ -115,10 +114,6 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren txnStateManager, time) - val transactionStateTopicId = Uuid.randomUuid() - when(replicaManager.metadataCache.getTopicName(transactionStateTopicId)).thenReturn(Optional.of(Topic.TRANSACTION_STATE_TOPIC_NAME)) - when(replicaManager.metadataCache.getTopicId(Topic.TRANSACTION_STATE_TOPIC_NAME)).thenReturn(transactionStateTopicId) - transactionCoordinator = new TransactionCoordinator( txnConfig, scheduler, @@ -459,7 +454,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren val partitionId = txnStateManager.partitionFor(txn.transactionalId) val txnRecords = txnRecordsByPartition(partitionId) val initPidOp = new InitProducerIdOperation() - val addPartitionsOp = new AddPartitionsToTxnOperation(util.Set.of(new TopicPartition("topic", 0))) + val addPartitionsOp = new AddPartitionsToTxnOperation(Set(new TopicPartition("topic", 0))) initPidOp.run(txn) initPidOp.awaitAndVerify(txn) addPartitionsOp.run(txn) @@ -468,7 +463,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren val txnMetadata = transactionMetadata(txn).getOrElse(throw new IllegalStateException(s"Transaction not found $txn")) txnRecords += new SimpleRecord(txn.txnMessageKeyBytes, TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TransactionVersion.TV_2)) - txnMetadata.state(TransactionState.PREPARE_COMMIT) + txnMetadata.state = PrepareCommit txnRecords += new SimpleRecord(txn.txnMessageKeyBytes, TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TransactionVersion.TV_2)) prepareTxnLog(partitionId) @@ -476,7 +471,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren private def prepareTxnLog(partitionId: Int): Unit = { val logMock: UnifiedLog = mock(classOf[UnifiedLog]) - when(logMock.config).thenReturn(new LogConfig(util.Map.of)) + when(logMock.config).thenReturn(new LogConfig(Collections.emptyMap())) val fileRecordsMock: FileRecords = mock(classOf[FileRecords]) @@ -487,9 +482,9 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren when(logMock.logStartOffset).thenReturn(startOffset) when(logMock.read(ArgumentMatchers.eq(startOffset), - anyInt, - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true))) + maxLength = anyInt, + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true))) .thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecordsMock)) when(fileRecordsMock.sizeInBytes()).thenReturn(records.sizeInBytes) @@ -506,18 +501,17 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren } private def prepareExhaustedEpochTxnMetadata(txn: Transaction): TransactionMetadata = { - new TransactionMetadata(txn.transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - (Short.MaxValue - 1).toShort, - RecordBatch.NO_PRODUCER_EPOCH, - 60000, - TransactionState.EMPTY, - new util.HashSet[TopicPartition](), - -1, - time.milliseconds(), - TransactionVersion.TV_0) + new TransactionMetadata(transactionalId = txn.transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = (Short.MaxValue - 1).toShort, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 60000, + state = Empty, + topicPartitions = collection.mutable.Set.empty[TopicPartition], + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TransactionVersion.TV_0) } abstract class TxnOperation[R] extends Operation { @@ -530,26 +524,18 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren class InitProducerIdOperation(val producerIdAndEpoch: Option[ProducerIdAndEpoch] = None) extends TxnOperation[InitProducerIdResult] { override def run(txn: Transaction): Unit = { - transactionCoordinator.handleInitProducerId( - txn.transactionalId, - 60000, - enableTwoPCFlag = false, - keepPreparedTxn = false, - producerIdAndEpoch, - resultCallback, - RequestLocal.withThreadConfinedCaching - ) + transactionCoordinator.handleInitProducerId(txn.transactionalId, 60000, producerIdAndEpoch, resultCallback, + RequestLocal.withThreadConfinedCaching) replicaManager.tryCompleteActions() } - override def awaitAndVerify(txn: Transaction): Unit = { val initPidResult = result.getOrElse(throw new IllegalStateException("InitProducerId has not completed")) assertEquals(Errors.NONE, initPidResult.error) - verifyTransaction(txn, TransactionState.EMPTY) + verifyTransaction(txn, Empty) } } - class AddPartitionsToTxnOperation(partitions: util.Set[TopicPartition]) extends TxnOperation[Errors] { + class AddPartitionsToTxnOperation(partitions: Set[TopicPartition]) extends TxnOperation[Errors] { override def run(txn: Transaction): Unit = { transactionMetadata(txn).foreach { txnMetadata => transactionCoordinator.handleAddPartitionsToTransaction(txn.transactionalId, @@ -565,7 +551,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren override def awaitAndVerify(txn: Transaction): Unit = { val error = result.getOrElse(throw new IllegalStateException("AddPartitionsToTransaction has not completed")) assertEquals(Errors.NONE, error) - verifyTransaction(txn, TransactionState.ONGOING) + verifyTransaction(txn, Ongoing) } } @@ -586,7 +572,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren if (!txn.ended) { txn.ended = true assertEquals(Errors.NONE, error) - val expectedState = if (transactionResult(txn) == TransactionResult.COMMIT) TransactionState.COMPLETE_COMMIT else TransactionState.COMPLETE_ABORT + val expectedState = if (transactionResult(txn) == TransactionResult.COMMIT) CompleteCommit else CompleteAbort verifyTransaction(txn, expectedState) } else assertEquals(Errors.INVALID_TXN_STATE, error) @@ -607,7 +593,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren override def await(): Unit = { allTransactions.foreach { txn => if (txnStateManager.partitionFor(txn.transactionalId) == txnTopicPartitionId) { - verifyTransaction(txn, TransactionState.COMPLETE_COMMIT) + verifyTransaction(txn, CompleteCommit) } } } @@ -630,7 +616,7 @@ class TransactionCoordinatorConcurrencyTest extends AbstractCoordinatorConcurren override def run(): Unit = { transactions.foreach { txn => transactionMetadata(txn).foreach { txnMetadata => - txnMetadata.txnLastUpdateTimestamp(time.milliseconds() - txnConfig.transactionalIdExpirationMs) + txnMetadata.txnLastUpdateTimestamp = time.milliseconds() - txnConfig.transactionalIdExpirationMs } } txnStateManager.enableTransactionalIdExpiration() diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala index d9f6e115fbba8..e5b48d9246632 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionCoordinatorTest.scala @@ -22,20 +22,19 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{AddPartitionsToTxnResponse, TransactionResult} import org.apache.kafka.common.utils.{LogContext, MockTime, ProducerIdAndEpoch} -import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionMetadata, TransactionState, TransactionStateManagerConfig, TxnTransitMetadata} +import org.apache.kafka.coordinator.transaction.{ProducerIdManager, TransactionStateManagerConfig} import org.apache.kafka.server.common.TransactionVersion import org.apache.kafka.server.common.TransactionVersion.{TV_0, TV_2} import org.apache.kafka.server.util.MockScheduler import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{CsvSource, ValueSource} -import org.mockito.ArgumentMatchers.{any, anyBoolean, anyInt} +import org.junit.jupiter.params.provider.ValueSource +import org.mockito.ArgumentMatchers.{any, anyInt} import org.mockito.Mockito._ import org.mockito.{ArgumentCaptor, ArgumentMatchers} -import java.util - +import scala.collection.mutable import scala.jdk.CollectionConverters._ class TransactionCoordinatorTest { @@ -57,8 +56,7 @@ class TransactionCoordinatorTest { private val txnTimeoutMs = 1 private val producerId2 = 11L - private val partitions = new util.HashSet[TopicPartition]() - partitions.add(new TopicPartition("topic1", 0)) + private val partitions = mutable.Set[TopicPartition](new TopicPartition("topic1", 0)) private val scheduler = new MockScheduler(time) val coordinator = new TransactionCoordinator( @@ -84,7 +82,7 @@ class TransactionCoordinatorTest { private def initPidGenericMocks(transactionalId: String): Unit = { mockPidGenerator() - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) } @@ -92,29 +90,9 @@ class TransactionCoordinatorTest { def shouldReturnInvalidRequestWhenTransactionalIdIsEmpty(): Unit = { mockPidGenerator() - coordinator.handleInitProducerId("", txnTimeoutMs, enableTwoPCFlag = false, - keepPreparedTxn = false, None, initProducerIdMockCallback) + coordinator.handleInitProducerId("", txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1L, -1, Errors.INVALID_REQUEST), result) - coordinator.handleInitProducerId("", txnTimeoutMs, enableTwoPCFlag = false, - keepPreparedTxn = false, None, initProducerIdMockCallback) - assertEquals(InitProducerIdResult(-1L, -1, Errors.INVALID_REQUEST), result) - } - - @Test - def shouldReturnInvalidRequestWhenKeepPreparedIsTrue(): Unit = { - mockPidGenerator() - - coordinator.handleInitProducerId("", txnTimeoutMs, enableTwoPCFlag = false, - keepPreparedTxn = true, None, initProducerIdMockCallback) - assertEquals(InitProducerIdResult(-1L, -1, Errors.INVALID_REQUEST), result) - } - - @Test - def shouldReturnInvalidRequestWhen2PCEnabledButBroker2PCConfigFalse(): Unit = { - mockPidGenerator() - - coordinator.handleInitProducerId("", txnTimeoutMs, enableTwoPCFlag = true, - keepPreparedTxn = false, None, initProducerIdMockCallback) + coordinator.handleInitProducerId("", txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1L, -1, Errors.INVALID_REQUEST), result) } @@ -122,11 +100,9 @@ class TransactionCoordinatorTest { def shouldAcceptInitPidAndReturnNextPidWhenTransactionalIdIsNull(): Unit = { mockPidGenerator() - coordinator.handleInitProducerId(null, txnTimeoutMs, enableTwoPCFlag = false, - keepPreparedTxn = false, None, initProducerIdMockCallback) + coordinator.handleInitProducerId(null, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(0L, 0, Errors.NONE), result) - coordinator.handleInitProducerId(null, txnTimeoutMs, enableTwoPCFlag = false, - keepPreparedTxn = false, None, initProducerIdMockCallback) + coordinator.handleInitProducerId(null, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(1L, 0, Errors.NONE), result) } @@ -151,14 +127,7 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => capturedErrorsCallback.getValue.apply(Errors.NONE)) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(nextPid - 1, 0, Errors.NONE), result) } @@ -183,14 +152,8 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => capturedErrorsCallback.getValue.apply(Errors.NONE)) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, producerEpoch)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, producerEpoch)), + initProducerIdMockCallback) assertEquals(InitProducerIdResult(nextPid - 1, 0, Errors.NONE), result) } @@ -199,7 +162,7 @@ class TransactionCoordinatorTest { initPidGenericMocks(transactionalId) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, (Short.MaxValue - 1).toShort, - (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.EMPTY, util.Set.of, time.milliseconds(), time.milliseconds(), TV_0) + (Short.MaxValue - 2).toShort, txnTimeoutMs, Empty, mutable.Set.empty, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -213,14 +176,7 @@ class TransactionCoordinatorTest { any() )).thenAnswer(_ => capturedErrorsCallback.getValue.apply(Errors.NONE)) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertNotEquals(producerId, result.producerId) assertEquals(0, result.producerEpoch) assertEquals(Errors.NONE, result.error) @@ -231,11 +187,11 @@ class TransactionCoordinatorTest { initPidGenericMocks(transactionalId) val txnMetadata1 = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, (Short.MaxValue - 1).toShort, - (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.ONGOING, util.Set.of, time.milliseconds(), time.milliseconds(), TV_2) - // We start with txnMetadata1 so we can transform the metadata to TransactionState.PREPARE_COMMIT. + (Short.MaxValue - 2).toShort, txnTimeoutMs, Ongoing, mutable.Set.empty, time.milliseconds(), time.milliseconds(), TV_2) + // We start with txnMetadata1 so we can transform the metadata to PrepareCommit. val txnMetadata2 = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, (Short.MaxValue - 1).toShort, - (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.ONGOING, util.Set.of, time.milliseconds(), time.milliseconds(), TV_2) - val transitMetadata = txnMetadata2.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_2, producerId2, time.milliseconds(), false) + (Short.MaxValue - 2).toShort, txnTimeoutMs, Ongoing, mutable.Set.empty, time.milliseconds(), time.milliseconds(), TV_2) + val transitMetadata = txnMetadata2.prepareAbortOrCommit(PrepareCommit, TV_2, producerId2, time.milliseconds(), false) txnMetadata2.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata2.producerId) @@ -262,37 +218,23 @@ class TransactionCoordinatorTest { @Test def shouldRespondWithNotCoordinatorOnInitPidWhenNotCoordinator(): Unit = { - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Left(Errors.NOT_COORDINATOR)) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.NOT_COORDINATOR), result) } @Test def shouldRespondWithCoordinatorLoadInProgressOnInitPidWhenCoordinatorLoading(): Unit = { - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Left(Errors.COORDINATOR_LOAD_IN_PROGRESS)) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.COORDINATOR_LOAD_IN_PROGRESS), result) } @@ -344,7 +286,7 @@ class TransactionCoordinatorTest { } // If producer ID is not the same, return INVALID_PRODUCER_ID_MAPPING val wrongPidTxnMetadata = new TransactionMetadata(transactionalId, 1, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.PREPARE_COMMIT, partitions, 0, 0, TV_0) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, PrepareCommit, partitions, 0, 0, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(new CoordinatorEpochAndTxnMetadata(coordinatorEpoch, wrongPidTxnMetadata)))) @@ -355,7 +297,7 @@ class TransactionCoordinatorTest { // If producer epoch is not equal, return PRODUCER_FENCED val oldEpochTxnMetadata = new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.PREPARE_COMMIT, partitions, 0, 0, TV_0) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, PrepareCommit, partitions, 0, 0, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(new CoordinatorEpochAndTxnMetadata(coordinatorEpoch, oldEpochTxnMetadata)))) @@ -366,7 +308,7 @@ class TransactionCoordinatorTest { // If the txn state is Prepare or AbortCommit, we return CONCURRENT_TRANSACTIONS val emptyTxnMetadata = new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.PREPARE_COMMIT, partitions, 0, 0, TV_0) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, PrepareCommit, partitions, 0, 0, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(new CoordinatorEpochAndTxnMetadata(coordinatorEpoch, emptyTxnMetadata)))) @@ -377,8 +319,8 @@ class TransactionCoordinatorTest { // Pending state does not matter, we will just check if the partitions are in the txnMetadata. val ongoingTxnMetadata = new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.ONGOING, util.Set.of, 0, 0, TV_0) - ongoingTxnMetadata.pendingState(util.Optional.of(TransactionState.COMPLETE_COMMIT)) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Ongoing, mutable.Set.empty, 0, 0, TV_0) + ongoingTxnMetadata.pendingState = Some(CompleteCommit) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(new CoordinatorEpochAndTxnMetadata(coordinatorEpoch, ongoingTxnMetadata)))) @@ -390,20 +332,20 @@ class TransactionCoordinatorTest { @Test def shouldRespondWithConcurrentTransactionsOnAddPartitionsWhenStateIsPrepareCommit(): Unit = { - validateConcurrentTransactions(TransactionState.PREPARE_COMMIT) + validateConcurrentTransactions(PrepareCommit) } @Test def shouldRespondWithConcurrentTransactionOnAddPartitionsWhenStateIsPrepareAbort(): Unit = { - validateConcurrentTransactions(TransactionState.PREPARE_ABORT) + validateConcurrentTransactions(PrepareAbort) } def validateConcurrentTransactions(state: TransactionState): Unit = { - // Since the clientTransactionVersion doesn't matter, use 2 since the states are TransactionState.PREPARE_COMMIT and TransactionState.PREPARE_ABORT. + // Since the clientTransactionVersion doesn't matter, use 2 since the states are PrepareCommit and PrepareAbort. when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, util.Set.of, 0, 0, TV_2))))) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, mutable.Set.empty, 0, 0, TV_2))))) coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_2) assertEquals(Errors.CONCURRENT_TRANSACTIONS, error) @@ -411,11 +353,11 @@ class TransactionCoordinatorTest { @Test def shouldRespondWithProducerFencedOnAddPartitionsWhenEpochsAreDifferent(): Unit = { - // Since the clientTransactionVersion doesn't matter, use 2 since the state is TransactionState.PREPARE_COMMIT. + // Since the clientTransactionVersion doesn't matter, use 2 since the state is PrepareCommit. when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 10, 9, 0, TransactionState.PREPARE_COMMIT, util.Set.of, 0, 0, TV_2))))) + 10, 9, 0, PrepareCommit, mutable.Set.empty, 0, 0, TV_2))))) coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_2) assertEquals(Errors.PRODUCER_FENCED, error) @@ -423,30 +365,30 @@ class TransactionCoordinatorTest { @Test def shouldAppendNewMetadataToLogOnAddPartitionsWhenPartitionsAdded(): Unit = { - validateSuccessfulAddPartitions(TransactionState.EMPTY, 0) + validateSuccessfulAddPartitions(Empty, 0) } @Test def shouldRespondWithSuccessOnAddPartitionsWhenStateIsOngoing(): Unit = { - validateSuccessfulAddPartitions(TransactionState.ONGOING, 0) + validateSuccessfulAddPartitions(Ongoing, 0) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldRespondWithSuccessOnAddPartitionsWhenStateIsCompleteCommit(clientTransactionVersion: Short): Unit = { - validateSuccessfulAddPartitions(TransactionState.COMPLETE_COMMIT, clientTransactionVersion) + validateSuccessfulAddPartitions(CompleteCommit, clientTransactionVersion) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldRespondWithSuccessOnAddPartitionsWhenStateIsCompleteAbort(clientTransactionVersion: Short): Unit = { - validateSuccessfulAddPartitions(TransactionState.COMPLETE_ABORT, clientTransactionVersion) + validateSuccessfulAddPartitions(CompleteAbort, clientTransactionVersion) } def validateSuccessfulAddPartitions(previousState: TransactionState, transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, previousState, util.Set.of, time.milliseconds(), time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, previousState, mutable.Set.empty, time.milliseconds(), time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -469,7 +411,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.EMPTY, partitions, 0, 0, TV_0))))) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Empty, partitions, 0, 0, TV_0))))) coordinator.handleAddPartitionsToTransaction(transactionalId, 0L, 0, partitions, errorsCallback, TV_0) assertEquals(Errors.NONE, error) @@ -486,7 +428,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.ONGOING, partitions, 0, 0, TV_0))))) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Ongoing, partitions, 0, 0, TV_0))))) coordinator.handleVerifyPartitionsInTransaction(transactionalId, 0L, 0, partitions, verifyPartitionsInTxnCallback) errors.foreach { case (_, error) => @@ -505,10 +447,9 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.EMPTY, partitions, 0, 0, TV_0))))) - - val extraPartitions = new util.HashSet[TopicPartition](partitions) - extraPartitions.add(new TopicPartition("topic2", 0)) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Empty, partitions, 0, 0, TV_0))))) + + val extraPartitions = partitions ++ Set(new TopicPartition("topic2", 0)) coordinator.handleVerifyPartitionsInTransaction(transactionalId, 0L, 0, extraPartitions, verifyPartitionsInTxnCallback) assertEquals(Errors.TRANSACTION_ABORTABLE, errors(new TopicPartition("topic2", 0))) @@ -530,12 +471,12 @@ class TransactionCoordinatorTest { @ParameterizedTest @ValueSource(shorts = Array(0, 2)) - def shouldReplyWithInvalidPidMappingOnEndTxnWhenPidDoesntMatchMapped(transactionVersion: Short): Unit = { + def shouldReplyWithInvalidPidMappingOnEndTxnWhenPidDosentMatchMapped(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, 10, 10, RecordBatch.NO_PRODUCER_ID, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, TransactionState.ONGOING, util.Set.of, 0, time.milliseconds(), TV_0))))) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, Ongoing, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_0))))) coordinator.handleEndTransaction(transactionalId, 0, 0, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.INVALID_PRODUCER_ID_MAPPING, error) @@ -549,7 +490,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, - (producerEpoch - 1).toShort, 1, TransactionState.ONGOING, util.Set.of, 0, time.milliseconds(), TV_0))))) + (producerEpoch - 1).toShort, 1, Ongoing, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_0))))) coordinator.handleEndTransaction(transactionalId, producerId, 0, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.PRODUCER_FENCED, error) @@ -563,7 +504,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, - (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) + (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) val epoch = if (isRetry) producerEpoch - 1 else producerEpoch coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) @@ -590,7 +531,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, producerEpoch, - (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) + (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) val epoch = if (isRetry) producerEpoch - 1 else producerEpoch coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) @@ -607,7 +548,7 @@ class TransactionCoordinatorTest { def testEndTxnWhenStatusIsCompleteAbortAndResultIsAbortInV1(isRetry: Boolean): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_ABORT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -626,7 +567,7 @@ class TransactionCoordinatorTest { def shouldReturnOkOnEndTxnWhenStatusIsCompleteAbortAndResultIsAbortInV2(isRetry: Boolean): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_ABORT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -663,7 +604,7 @@ class TransactionCoordinatorTest { def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsCompleteAbortAndResultIsNotAbort(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_ABORT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, CompleteAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -676,7 +617,7 @@ class TransactionCoordinatorTest { def shouldReturnInvalidTxnRequestOnEndTxnRequestWhenStatusIsCompleteCommitAndResultIsNotCommit(): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort,1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort,1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -690,7 +631,7 @@ class TransactionCoordinatorTest { def testEndTxnRequestWhenStatusIsCompleteCommitAndResultIsAbortInV1(isRetry: Boolean): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -709,7 +650,7 @@ class TransactionCoordinatorTest { def testEndTxnRequestWhenStatusIsCompleteCommitAndResultIsAbortInV2(isRetry: Boolean): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion) + producerEpoch, (producerEpoch - 1).toShort, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -740,7 +681,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, (producerEpoch - 1).toShort, 1, TransactionState.PREPARE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, (producerEpoch - 1).toShort, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch(clientTransactionVersion), TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.CONCURRENT_TRANSACTIONS, error) @@ -753,7 +694,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.PREPARE_ABORT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, PrepareAbort, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch(clientTransactionVersion), TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.INVALID_TXN_STATE, error) @@ -765,7 +706,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.EMPTY, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.INVALID_TXN_STATE, error) @@ -778,7 +719,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.EMPTY, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) val epoch = if (isRetry) producerEpoch - 1 else producerEpoch coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) @@ -807,7 +748,7 @@ class TransactionCoordinatorTest { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(2) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.EMPTY, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, Empty, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) val epoch = if (isRetry) producerEpoch - 1 else producerEpoch coordinator.handleEndTransaction(transactionalId, producerId, epoch.toShort, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) @@ -823,7 +764,7 @@ class TransactionCoordinatorTest { def shouldReturnInvalidTxnRequestOnEndTxnV2IfNotEndTxnV2Retry(): Unit = { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.PREPARE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) // If producerEpoch is the same, this is not a retry of the EndTxnRequest, but the next EndTxnRequest. Return PRODUCER_FENCED. coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_2, endTxnCallback) @@ -832,7 +773,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2))))) + RecordBatch.NO_PRODUCER_ID, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) // If producerEpoch is the same, this is not a retry of the EndTxnRequest, but the next EndTxnRequest. Return INVALID_TXN_STATE. coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, TV_2, endTxnCallback) @@ -844,7 +785,7 @@ class TransactionCoordinatorTest { def shouldReturnOkOnEndTxnV2IfEndTxnV2RetryEpochOverflow(): Unit = { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, - producerId2, Short.MaxValue, (Short.MaxValue - 1).toShort, 1, TransactionState.PREPARE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2))))) + producerId2, Short.MaxValue, (Short.MaxValue - 1).toShort, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) // Return CONCURRENT_TRANSACTIONS while transaction is still completing coordinator.handleEndTransaction(transactionalId, producerId, (Short.MaxValue - 1).toShort, TransactionResult.COMMIT, TV_2, endTxnCallback) @@ -853,7 +794,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId2, producerId, - RecordBatch.NO_PRODUCER_ID, 0, RecordBatch.NO_PRODUCER_EPOCH, 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2))))) + RecordBatch.NO_PRODUCER_ID, 0, RecordBatch.NO_PRODUCER_EPOCH, 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2))))) coordinator.handleEndTransaction(transactionalId, producerId, (Short.MaxValue - 1).toShort, TransactionResult.COMMIT, TV_2, endTxnCallback) assertEquals(Errors.NONE, error) @@ -866,7 +807,7 @@ class TransactionCoordinatorTest { @Test def shouldReturnConcurrentTxnOnAddPartitionsIfEndTxnV2EpochOverflowAndNotComplete(): Unit = { val prepareWithPending = new TransactionMetadata(transactionalId, producerId, producerId, - producerId2, Short.MaxValue, (Short.MaxValue - 1).toShort, 1, TransactionState.PREPARE_COMMIT, util.Set.of, 0, time.milliseconds(), TV_2) + producerId2, Short.MaxValue, (Short.MaxValue - 1).toShort, 1, PrepareCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), TV_2) val txnTransitMetadata = prepareWithPending.prepareComplete(time.milliseconds()) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) @@ -878,7 +819,7 @@ class TransactionCoordinatorTest { verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) prepareWithPending.completeTransitionTo(txnTransitMetadata) - assertEquals(TransactionState.COMPLETE_COMMIT, prepareWithPending.state) + assertEquals(CompleteCommit, prepareWithPending.state) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, prepareWithPending)))) when(transactionManager.appendTransactionToLog( @@ -900,7 +841,7 @@ class TransactionCoordinatorTest { @ValueSource(shorts = Array(0, 2)) def shouldAppendPrepareCommitToLogOnEndTxnWhenStatusIsOngoingAndResultIsCommit(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) - mockPrepare(TransactionState.PREPARE_COMMIT, clientTransactionVersion) + mockPrepare(PrepareCommit, clientTransactionVersion) coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) @@ -917,7 +858,7 @@ class TransactionCoordinatorTest { @ValueSource(shorts = Array(0, 2)) def shouldAppendPrepareAbortToLogOnEndTxnWhenStatusIsOngoingAndResultIsAbort(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) - mockPrepare(TransactionState.PREPARE_ABORT, clientTransactionVersion) + mockPrepare(PrepareAbort, clientTransactionVersion) coordinator.handleEndTransaction(transactionalId, producerId, producerEpoch, TransactionResult.ABORT, clientTransactionVersion, endTxnCallback) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) @@ -992,7 +933,7 @@ class TransactionCoordinatorTest { when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, metadataEpoch, 1, - 1, TransactionState.COMPLETE_COMMIT, util.Set.of, 0, time.milliseconds(), clientTransactionVersion))))) + 1, CompleteCommit, collection.mutable.Set.empty[TopicPartition], 0, time.milliseconds(), clientTransactionVersion))))) coordinator.handleEndTransaction(transactionalId, producerId, requestEpoch, TransactionResult.COMMIT, clientTransactionVersion, endTxnCallback) assertEquals(Errors.PRODUCER_FENCED, error) @@ -1001,41 +942,37 @@ class TransactionCoordinatorTest { @Test def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingEmptyTransaction(): Unit = { - validateIncrementEpochAndUpdateMetadata(TransactionState.EMPTY, 0) + validateIncrementEpochAndUpdateMetadata(Empty, 0) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingCompleteTransaction(clientTransactionVersion: Short): Unit = { - validateIncrementEpochAndUpdateMetadata(TransactionState.COMPLETE_ABORT, clientTransactionVersion) + validateIncrementEpochAndUpdateMetadata(CompleteAbort, clientTransactionVersion) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def shouldIncrementEpochAndUpdateMetadataOnHandleInitPidWhenExistingCompleteCommitTransaction(clientTransactionVersion: Short): Unit = { - validateIncrementEpochAndUpdateMetadata(TransactionState.COMPLETE_COMMIT, clientTransactionVersion) + validateIncrementEpochAndUpdateMetadata(CompleteCommit, clientTransactionVersion) } @Test def shouldWaitForCommitToCompleteOnHandleInitPidAndExistingTransactionInPrepareCommitState(): Unit = { - validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(TransactionState.PREPARE_COMMIT) + validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(PrepareCommit) } @Test def shouldWaitForCommitToCompleteOnHandleInitPidAndExistingTransactionInPrepareAbortState(): Unit = { - validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(TransactionState.PREPARE_ABORT) + validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(PrepareAbort) } - @ParameterizedTest(name = "enableTwoPCFlag={0}, keepPreparedTxn={1}") - @CsvSource(Array("false, false")) - def shouldAbortTransactionOnHandleInitPidWhenExistingTransactionInOngoingState( - enableTwoPCFlag: Boolean, - keepPreparedTxn: Boolean - ): Unit = { + @Test + def shouldAbortTransactionOnHandleInitPidWhenExistingTransactionInOngoingState(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) @@ -1044,7 +981,7 @@ class TransactionCoordinatorTest { when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) val originalMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), @@ -1054,22 +991,15 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => capturedErrorsCallback.getValue.apply(Errors.NONE)) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag, - keepPreparedTxn, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result) - verify(transactionManager).validateTransactionTimeoutMs(anyBoolean(), anyInt()) + verify(transactionManager).validateTransactionTimeoutMs(anyInt()) verify(transactionManager, times(3)).getTransactionState(ArgumentMatchers.eq(transactionalId)) verify(transactionManager).appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), - ArgumentMatchers.eq(originalMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false)), + ArgumentMatchers.eq(originalMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false)), any(), any(), any()) @@ -1078,40 +1008,33 @@ class TransactionCoordinatorTest { @Test def shouldFailToAbortTransactionOnHandleInitPidWhenProducerEpochIsSmaller(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) val bumpedTxnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (producerEpoch + 2).toShort, (producerEpoch - 1).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + (producerEpoch + 2).toShort, (producerEpoch - 1).toShort, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, bumpedTxnMetadata)))) when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.PRODUCER_FENCED), result) - verify(transactionManager).validateTransactionTimeoutMs(anyBoolean(), anyInt()) + verify(transactionManager).validateTransactionTimeoutMs(anyInt()) verify(transactionManager, times(2)).getTransactionState(ArgumentMatchers.eq(transactionalId)) } @Test def shouldNotRepeatedlyBumpEpochDueToInitPidDuringOngoingTxnIfAppendToLogFails(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.putTransactionStateIfNotExists(any[TransactionMetadata]())) @@ -1123,8 +1046,8 @@ class TransactionCoordinatorTest { when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) val originalMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) - val txnTransitMetadata = originalMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) + (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + val txnTransitMetadata = originalMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) when(transactionManager.appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), @@ -1134,10 +1057,10 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NOT_ENOUGH_REPLICAS) - txnMetadata.pendingState(util.Optional.empty()) + txnMetadata.pendingState = None }).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NOT_ENOUGH_REPLICAS) - txnMetadata.pendingState(util.Optional.empty()) + txnMetadata.pendingState = None }).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) @@ -1147,47 +1070,26 @@ class TransactionCoordinatorTest { }) // For the first two calls, verify that the epoch was only bumped once - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.NOT_ENOUGH_REPLICAS), result) assertEquals((producerEpoch + 1).toShort, txnMetadata.producerEpoch) assertTrue(txnMetadata.hasFailedEpochFence) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.NOT_ENOUGH_REPLICAS), result) assertEquals((producerEpoch + 1).toShort, txnMetadata.producerEpoch) assertTrue(txnMetadata.hasFailedEpochFence) // For the last, successful call, verify that the epoch was not bumped further - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result) assertEquals((producerEpoch + 1).toShort, txnMetadata.producerEpoch) assertFalse(txnMetadata.hasFailedEpochFence) - verify(transactionManager, times(3)).validateTransactionTimeoutMs(anyBoolean(), anyInt()) + verify(transactionManager, times(3)).validateTransactionTimeoutMs(anyInt()) verify(transactionManager, times(9)).getTransactionState(ArgumentMatchers.eq(transactionalId)) verify(transactionManager, times(3)).appendTransactionToLog( ArgumentMatchers.eq(transactionalId), @@ -1201,14 +1103,14 @@ class TransactionCoordinatorTest { @Test def shouldUseLastEpochToFenceWhenEpochsAreExhausted(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) val postFenceTxnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - Short.MaxValue, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.PREPARE_ABORT, partitions, time.milliseconds(), time.milliseconds(), TV_0) + Short.MaxValue, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1220,213 +1122,64 @@ class TransactionCoordinatorTest { when(transactionManager.appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), - ArgumentMatchers.eq(new TxnTransitMetadata( - producerId, - producerId, - RecordBatch.NO_PRODUCER_ID, - Short.MaxValue, - RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs, - TransactionState.PREPARE_ABORT, - partitions, - time.milliseconds(), - time.milliseconds(), - TV_0)), + ArgumentMatchers.eq(TxnTransitMetadata( + producerId = producerId, + prevProducerId = producerId, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = Short.MaxValue, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = txnTimeoutMs, + txnState = PrepareAbort, + topicPartitions = partitions.toSet, + txnStartTimestamp = time.milliseconds(), + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0)), capturedErrorsCallback.capture(), any(), any()) ).thenAnswer(_ => capturedErrorsCallback.getValue.apply(Errors.NONE)) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(Short.MaxValue, txnMetadata.producerEpoch) assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result) - verify(transactionManager).validateTransactionTimeoutMs(anyBoolean(), anyInt()) - verify(transactionManager, times(3)).getTransactionState(ArgumentMatchers.eq(transactionalId)) - verify(transactionManager).appendTransactionToLog( - ArgumentMatchers.eq(transactionalId), - ArgumentMatchers.eq(coordinatorEpoch), - ArgumentMatchers.eq(new TxnTransitMetadata( - producerId, - producerId, - RecordBatch.NO_PRODUCER_ID, - Short.MaxValue, - RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs, - TransactionState.PREPARE_ABORT, - partitions, - time.milliseconds(), - time.milliseconds(), - TV_0)), - any(), - any(), - any()) - } - - @Test - def shouldNotCauseEpochOverflowWhenInitPidDuringOngoingTxnV2(): Unit = { - // When InitProducerId is called with an ongoing transaction at epoch 32766 (Short.MaxValue - 1), - // it should not cause an epoch overflow by incrementing twice. - // The only true increment happens in prepareAbortOrCommit - val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_2) - - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) - .thenReturn(true) - when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) - .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) - - // Capture the transition metadata to verify epoch increments - val capturedTxnTransitMetadata: ArgumentCaptor[TxnTransitMetadata] = ArgumentCaptor.forClass(classOf[TxnTransitMetadata]) - when(transactionManager.appendTransactionToLog( - ArgumentMatchers.eq(transactionalId), - ArgumentMatchers.eq(coordinatorEpoch), - capturedTxnTransitMetadata.capture(), - capturedErrorsCallback.capture(), - any(), - any()) - ).thenAnswer(invocation => { - val transitMetadata = invocation.getArgument[TxnTransitMetadata](2) - // Simulate the metadata update that would happen in the real appendTransactionToLog - txnMetadata.completeTransitionTo(transitMetadata) - capturedErrorsCallback.getValue.apply(Errors.NONE) - }) - - // Handle InitProducerId with ongoing transaction at epoch 32766 - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) - - // Verify that the epoch did not overflow (should be Short.MaxValue = 32767, not negative) - assertEquals(Short.MaxValue, txnMetadata.producerEpoch) - assertEquals(TransactionState.PREPARE_ABORT, txnMetadata.state) - - verify(transactionManager).validateTransactionTimeoutMs(anyBoolean(), anyInt()) + verify(transactionManager).validateTransactionTimeoutMs(anyInt()) verify(transactionManager, times(3)).getTransactionState(ArgumentMatchers.eq(transactionalId)) verify(transactionManager).appendTransactionToLog( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(coordinatorEpoch), - any[TxnTransitMetadata], + ArgumentMatchers.eq(TxnTransitMetadata( + producerId = producerId, + prevProducerId = producerId, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = Short.MaxValue, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = txnTimeoutMs, + txnState = PrepareAbort, + topicPartitions = partitions.toSet, + txnStartTimestamp = time.milliseconds(), + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0)), any(), any(), any()) } - @Test - def shouldHandleTimeoutAtEpochOverflowBoundaryCorrectlyTV2(): Unit = { - // Test the scenario where we have an ongoing transaction at epoch 32766 (Short.MaxValue - 1) - // and the producer crashes/times out. This test verifies that the timeout handling - // correctly manages the epoch overflow scenario without causing failures. - - val epochAtMaxBoundary = (Short.MaxValue - 1).toShort // 32766 - val now = time.milliseconds() - - // Create transaction metadata at the epoch boundary that would cause overflow IFF double-incremented - val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - epochAtMaxBoundary, - RecordBatch.NO_PRODUCER_EPOCH, - txnTimeoutMs, - TransactionState.ONGOING, - partitions, - now, - now, - TV_2 - ) - assertTrue(txnMetadata.isProducerEpochExhausted) - - // Mock the transaction manager to return our test transaction as timed out - when(transactionManager.timedOutTransactions()) - .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, epochAtMaxBoundary))) - when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) - .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) - - // Mock the append operation to simulate successful write and update the metadata - when(transactionManager.appendTransactionToLog( - ArgumentMatchers.eq(transactionalId), - ArgumentMatchers.eq(coordinatorEpoch), - any[TxnTransitMetadata], - capturedErrorsCallback.capture(), - any(), - any()) - ).thenAnswer(invocation => { - val transitMetadata = invocation.getArgument[TxnTransitMetadata](2) - // Simulate the metadata update that would happen in the real appendTransactionToLog - txnMetadata.completeTransitionTo(transitMetadata) - capturedErrorsCallback.getValue.apply(Errors.NONE) - }) - - // Track the actual behavior - var callbackInvoked = false - var resultError: Errors = null - var resultProducerId: Long = -1 - var resultEpoch: Short = -1 - - def checkOnEndTransactionComplete(txnIdAndPidEpoch: TransactionalIdAndProducerIdEpoch) - (error: Errors, newProducerId: Long, newProducerEpoch: Short): Unit = { - callbackInvoked = true - resultError = error - resultProducerId = newProducerId - resultEpoch = newProducerEpoch - } - - // Execute the timeout abort process - coordinator.abortTimedOutTransactions(checkOnEndTransactionComplete) - - assertTrue(callbackInvoked, "Callback should have been invoked") - assertEquals(Errors.NONE, resultError, "Expected no errors in the callback") - assertEquals(producerId, resultProducerId, "Expected producer ID to match") - assertEquals(Short.MaxValue, resultEpoch, "Expected producer epoch to be Short.MaxValue (32767) single epoch bump") - - // Verify the transaction metadata was correctly updated to the final epoch - assertEquals(Short.MaxValue, txnMetadata.producerEpoch, - s"Expected transaction metadata producer epoch to be ${Short.MaxValue} " + - s"after timeout handling, but was ${txnMetadata.producerEpoch}" - ) - - // Verify the basic flow was attempted - verify(transactionManager).timedOutTransactions() - verify(transactionManager, atLeast(1)).getTransactionState(ArgumentMatchers.eq(transactionalId)) - } - @Test def testInitProducerIdWithNoLastProducerData(): Unit = { // If the metadata doesn't include the previous producer data (for example, if it was written to the log by a broker // on an old version), the retry case should fail val txnMetadata = new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) // Simulate producer trying to continue after new producer has already been initialized - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, producerEpoch)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, producerEpoch)), + initProducerIdMockCallback) assertEquals(InitProducerIdResult(RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Errors.PRODUCER_FENCED), result) } @@ -1434,22 +1187,16 @@ class TransactionCoordinatorTest { def testFenceProducerWhenMappingExistsWithDifferentProducerId(): Unit = { // Existing transaction ID maps to new producer ID val txnMetadata = new TransactionMetadata(transactionalId, producerId + 1, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, (producerEpoch - 1).toShort, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) // Simulate producer trying to continue after new producer has already been initialized - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, producerEpoch)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, producerEpoch)), + initProducerIdMockCallback) assertEquals(InitProducerIdResult(RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Errors.PRODUCER_FENCED), result) } @@ -1458,9 +1205,9 @@ class TransactionCoordinatorTest { mockPidGenerator() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, 10, 9, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, 10, 9, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1474,29 +1221,17 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) - txnMetadata.pendingState(util.Optional.empty()) + txnMetadata.pendingState = None }) // Re-initialization should succeed and bump the producer epoch - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, 10)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, 10)), + initProducerIdMockCallback) assertEquals(InitProducerIdResult(producerId, 11, Errors.NONE), result) // Simulate producer retrying after successfully re-initializing but failing to receive the response - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, 10)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, 10)), + initProducerIdMockCallback) assertEquals(InitProducerIdResult(producerId, 11, Errors.NONE), result) } @@ -1505,9 +1240,9 @@ class TransactionCoordinatorTest { mockPidGenerator() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, 10, 9, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, 10, 9, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1522,31 +1257,18 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) - txnMetadata.pendingState(util.Optional.empty()) - txnMetadata.setProducerEpoch(capturedTxnTransitMetadata.getValue.producerEpoch) - txnMetadata.setLastProducerEpoch(capturedTxnTransitMetadata.getValue.lastProducerEpoch) + txnMetadata.pendingState = None + txnMetadata.producerEpoch = capturedTxnTransitMetadata.getValue.producerEpoch + txnMetadata.lastProducerEpoch = capturedTxnTransitMetadata.getValue.lastProducerEpoch }) // With producer epoch at 10, new producer calls InitProducerId and should get epoch 11 - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(producerId, 11, Errors.NONE), result) // Simulate old producer trying to continue from epoch 10 - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, 10)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, 10)), + initProducerIdMockCallback) assertEquals(InitProducerIdResult(RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Errors.PRODUCER_FENCED), result) } @@ -1554,12 +1276,12 @@ class TransactionCoordinatorTest { def testRetryInitProducerIdAfterProducerIdRotation(): Unit = { // Existing transaction ID maps to new producer ID val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) when(pidGenerator.generateProducerId()) .thenReturn(producerId + 1) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1573,33 +1295,21 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) - txnMetadata.pendingState(util.Optional.empty()) - txnMetadata.setProducerId(capturedTxnTransitMetadata.getValue.producerId) - txnMetadata.setPrevProducerId(capturedTxnTransitMetadata.getValue.prevProducerId) - txnMetadata.setProducerEpoch(capturedTxnTransitMetadata.getValue.producerEpoch) - txnMetadata.setLastProducerEpoch(capturedTxnTransitMetadata.getValue.lastProducerEpoch) + txnMetadata.pendingState = None + txnMetadata.producerId = capturedTxnTransitMetadata.getValue.producerId + txnMetadata.previousProducerId = capturedTxnTransitMetadata.getValue.prevProducerId + txnMetadata.producerEpoch = capturedTxnTransitMetadata.getValue.producerEpoch + txnMetadata.lastProducerEpoch = capturedTxnTransitMetadata.getValue.lastProducerEpoch }) // Bump epoch and cause producer ID to be rotated - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, (Short.MaxValue - 1).toShort)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, + (Short.MaxValue - 1).toShort)), initProducerIdMockCallback) assertEquals(InitProducerIdResult(producerId + 1, 0, Errors.NONE), result) // Simulate producer retrying old request after producer bump - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, (Short.MaxValue - 1).toShort)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, + (Short.MaxValue - 1).toShort)), initProducerIdMockCallback) assertEquals(InitProducerIdResult(producerId + 1, 0, Errors.NONE), result) } @@ -1607,12 +1317,12 @@ class TransactionCoordinatorTest { def testInitProducerIdWithInvalidEpochAfterProducerIdRotation(): Unit = { // Existing transaction ID maps to new producer ID val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, TransactionState.EMPTY, partitions, time.milliseconds, time.milliseconds, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (Short.MaxValue - 1).toShort, (Short.MaxValue - 2).toShort, txnTimeoutMs, Empty, partitions, time.milliseconds, time.milliseconds, TV_0) when(pidGenerator.generateProducerId()) .thenReturn(producerId + 1) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1626,33 +1336,21 @@ class TransactionCoordinatorTest { any()) ).thenAnswer(_ => { capturedErrorsCallback.getValue.apply(Errors.NONE) - txnMetadata.pendingState(util.Optional.empty()) - txnMetadata.setProducerId(capturedTxnTransitMetadata.getValue.producerId) - txnMetadata.setPrevProducerId(capturedTxnTransitMetadata.getValue.prevProducerId) - txnMetadata.setProducerEpoch(capturedTxnTransitMetadata.getValue.producerEpoch) - txnMetadata.setLastProducerEpoch(capturedTxnTransitMetadata.getValue.lastProducerEpoch) + txnMetadata.pendingState = None + txnMetadata.producerId = capturedTxnTransitMetadata.getValue.producerId + txnMetadata.previousProducerId = capturedTxnTransitMetadata.getValue.prevProducerId + txnMetadata.producerEpoch = capturedTxnTransitMetadata.getValue.producerEpoch + txnMetadata.lastProducerEpoch = capturedTxnTransitMetadata.getValue.lastProducerEpoch }) // Bump epoch and cause producer ID to be rotated - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, (Short.MaxValue - 1).toShort)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, + (Short.MaxValue - 1).toShort)), initProducerIdMockCallback) assertEquals(InitProducerIdResult(producerId + 1, 0, Errors.NONE), result) // Validate that producer with old producer ID and stale epoch is fenced - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, (Short.MaxValue - 2).toShort)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, + (Short.MaxValue - 2).toShort)), initProducerIdMockCallback) assertEquals(InitProducerIdResult(RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Errors.PRODUCER_FENCED), result) } @@ -1667,7 +1365,7 @@ class TransactionCoordinatorTest { def shouldAbortExpiredTransactionsInOngoingStateAndBumpEpoch(): Unit = { val now = time.milliseconds() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) @@ -1675,8 +1373,8 @@ class TransactionCoordinatorTest { .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) // Transaction timeouts use FenceProducerEpoch so clientTransactionVersion is 0. - val expectedTransition = new TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.PREPARE_ABORT, partitions, now, + val expectedTransition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions.toSet, now, now + TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_DEFAULT, TV_0) when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) @@ -1706,7 +1404,7 @@ class TransactionCoordinatorTest { def shouldNotAcceptSmallerEpochDuringTransactionExpiration(): Unit = { val now = time.milliseconds() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) @@ -1716,7 +1414,7 @@ class TransactionCoordinatorTest { when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) val bumpedTxnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 2).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 2).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, bumpedTxnMetadata)))) @@ -1732,8 +1430,8 @@ class TransactionCoordinatorTest { @Test def shouldNotAbortExpiredTransactionsThatHaveAPendingStateTransition(): Unit = { val metadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) - metadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + metadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) @@ -1751,13 +1449,13 @@ class TransactionCoordinatorTest { def shouldNotBumpEpochWhenAbortingExpiredTransactionIfAppendToLogFails(): Unit = { val now = time.milliseconds() val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) when(transactionManager.timedOutTransactions()) .thenReturn(List(TransactionalIdAndProducerIdEpoch(transactionalId, producerId, producerEpoch))) val txnMetadataAfterAppendFailure = new TransactionMetadata(transactionalId, producerId, producerId, - RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, (producerEpoch + 1).toShort, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1765,8 +1463,8 @@ class TransactionCoordinatorTest { // Transaction timeouts use FenceProducerEpoch so clientTransactionVersion is 0. val bumpedEpoch = (producerEpoch + 1).toShort - val expectedTransition = new TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, bumpedEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.PREPARE_ABORT, partitions, now, + val expectedTransition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, bumpedEpoch, + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, PrepareAbort, partitions.toSet, now, now + TransactionStateManagerConfig.TRANSACTIONS_ABORT_TIMED_OUT_TRANSACTION_CLEANUP_INTERVAL_MS_DEFAULT, TV_0) when(transactionManager.transactionVersionLevel()).thenReturn(TV_0) @@ -1799,25 +1497,19 @@ class TransactionCoordinatorTest { @Test def shouldNotBumpEpochWithPendingTransaction(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) - txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) + txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - Some(new ProducerIdAndEpoch(producerId, 10)), - initProducerIdMockCallback - ) + coordinator.handleInitProducerId(transactionalId, txnTimeoutMs, Some(new ProducerIdAndEpoch(producerId, 10)), + initProducerIdMockCallback) assertEquals(InitProducerIdResult(RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, Errors.CONCURRENT_TRANSACTIONS), result) - verify(transactionManager).validateTransactionTimeoutMs(anyBoolean(), anyInt()) + verify(transactionManager).validateTransactionTimeoutMs(anyInt()) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) } @@ -1834,7 +1526,7 @@ class TransactionCoordinatorTest { coordinator.startup(() => transactionStatePartitionCount, enableTransactionalIdExpiration = false) val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.DEAD, util.Set.of, time.milliseconds(), + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Dead, mutable.Set.empty, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1860,7 +1552,7 @@ class TransactionCoordinatorTest { @Test def testDescribeTransactions(): Unit = { val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_0) + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, time.milliseconds(), time.milliseconds(), TV_0) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) @@ -1874,28 +1566,25 @@ class TransactionCoordinatorTest { assertEquals(txnTimeoutMs, result.transactionTimeoutMs) assertEquals(time.milliseconds(), result.transactionStartTimeMs) - val addedPartitions = result.topics.stream.flatMap(topicData => - topicData.partitions.stream - .map(partition => new TopicPartition(topicData.topic, partition)) - ) - .collect(util.stream.Collectors.toSet()); + val addedPartitions = result.topics.asScala.flatMap { topicData => + topicData.partitions.asScala.map(partition => new TopicPartition(topicData.topic, partition)) + }.toSet assertEquals(partitions, addedPartitions) verify(transactionManager).getTransactionState(ArgumentMatchers.eq(transactionalId)) } private def validateRespondsWithConcurrentTransactionsOnInitPidWhenInPrepareState(state: TransactionState): Unit = { - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) - // Since the clientTransactionVersion doesn't matter, use 2 since the states are TransactionState.PREPARE_COMMIT and TransactionState.PREPARE_ABORT. + // Since the clientTransactionVersion doesn't matter, use 2 since the states are PrepareCommit and PrepareAbort. val metadata = new TransactionMetadata(transactionalId, 0, 0, RecordBatch.NO_PRODUCER_EPOCH, - 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, util.Set.of[TopicPartition](new TopicPartition("topic", 1)), 0, 0, TV_2) + 0, RecordBatch.NO_PRODUCER_EPOCH, 0, state, mutable.Set[TopicPartition](new TopicPartition("topic", 1)), 0, 0, TV_2) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, metadata)))) - coordinator.handleInitProducerId(transactionalId, 10, enableTwoPCFlag = false, - keepPreparedTxn = false, None, initProducerIdMockCallback) + coordinator.handleInitProducerId(transactionalId, 10, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result) } @@ -1905,11 +1594,11 @@ class TransactionCoordinatorTest { when(pidGenerator.generateProducerId()) .thenReturn(producerId) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) + when(transactionManager.validateTransactionTimeoutMs(anyInt())) .thenReturn(true) val metadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, - producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, state, util.Set.of, time.milliseconds(), time.milliseconds(), clientTransactionVersion) + producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, state, mutable.Set.empty[TopicPartition], time.milliseconds(), time.milliseconds(), clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, metadata)))) @@ -1927,8 +1616,7 @@ class TransactionCoordinatorTest { }) val newTxnTimeoutMs = 10 - coordinator.handleInitProducerId(transactionalId, newTxnTimeoutMs, enableTwoPCFlag = false, - keepPreparedTxn = false, None, initProducerIdMockCallback) + coordinator.handleInitProducerId(transactionalId, newTxnTimeoutMs, None, initProducerIdMockCallback) assertEquals(InitProducerIdResult(producerId, (producerEpoch + 1).toShort, Errors.NONE), result) assertEquals(newTxnTimeoutMs, metadata.txnTimeoutMs) @@ -1940,10 +1628,10 @@ class TransactionCoordinatorTest { private def mockPrepare(transactionState: TransactionState, clientTransactionVersion: TransactionVersion, runCallback: Boolean = false): TransactionMetadata = { val now = time.milliseconds() val originalMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, - producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, now, now, TV_0) + producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, Ongoing, partitions, now, now, TV_0) - val transition = new TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, transactionState, partitions, now, now, clientTransactionVersion) + val transition = TxnTransitMetadata(producerId, producerId, RecordBatch.NO_PRODUCER_EPOCH, producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, transactionState, partitions.toSet, now, now, clientTransactionVersion) when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, originalMetadata)))) @@ -1983,153 +1671,4 @@ class TransactionCoordinatorTest { else producerEpoch } - - @Test - def testTV2AllowsEpochReBumpingAfterFailedWrite(): Unit = { - // Test the complete TV2 flow: failed write → epoch fence → abort → retry with epoch bump - // This demonstrates that TV2 allows epoch re-bumping after failed writes (unlike TV1) - val producerEpoch = 1.toShort - val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, RecordBatch.NO_PRODUCER_EPOCH, txnTimeoutMs, TransactionState.ONGOING, partitions, time.milliseconds(), time.milliseconds(), TV_2) - - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) - .thenReturn(true) - when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) - .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) - - // First attempt fails with COORDINATOR_NOT_AVAILABLE - when(transactionManager.appendTransactionToLog( - ArgumentMatchers.eq(transactionalId), - ArgumentMatchers.eq(coordinatorEpoch), - any(), - any(), - any(), - any() - )).thenAnswer(invocation => { - val callback = invocation.getArgument[Errors => Unit](3) - - // Simulate the real TransactionStateManager behavior: reset pendingState on failure - // since handleInitProducerId doesn't provide a custom retryOnError function - txnMetadata.pendingState(util.Optional.empty()) - - // For TV2, hasFailedEpochFence is NOT set to true, allowing epoch bumps on retry - // The epoch remains at its original value (1) since completeTransitionTo was never called - - callback.apply(Errors.COORDINATOR_NOT_AVAILABLE) - }) - - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) - assertEquals(InitProducerIdResult(-1, -1, Errors.COORDINATOR_NOT_AVAILABLE), result) - - // After the first failed attempt, the state should be: - // - hasFailedEpochFence = false (NOT set for TV2) - // - pendingState = None (reset by TransactionStateManager) - // - producerEpoch = 1 (unchanged since completeTransitionTo was never called) - // - transaction still ONGOING - - // Second attempt: Should abort the ongoing transaction - reset(transactionManager) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) - .thenReturn(true) - when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) - .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) - - // Mock the appendTransactionToLog to succeed for the endTransaction call - when(transactionManager.appendTransactionToLog( - ArgumentMatchers.eq(transactionalId), - ArgumentMatchers.eq(coordinatorEpoch), - any(), - any(), - any(), - any() - )).thenAnswer(invocation => { - val newMetadata = invocation.getArgument[TxnTransitMetadata](2) - val callback = invocation.getArgument[Errors => Unit](3) - - // Complete the transition and call the callback with success - txnMetadata.completeTransitionTo(newMetadata) - callback.apply(Errors.NONE) - }) - - // Mock the transactionMarkerChannelManager to simulate the second write (PREPARE_ABORT -> COMPLETE_ABORT) - doAnswer(invocation => { - val newMetadata = invocation.getArgument[TxnTransitMetadata](3) - // Simulate the completion of transaction markers and the second write - // This would normally happen asynchronously after markers are sent - txnMetadata.completeTransitionTo(newMetadata) // This transitions to COMPLETE_ABORT - txnMetadata.pendingState(util.Optional.empty()) - - null - }).when(transactionMarkerChannelManager).addTxnMarkersToSend( - ArgumentMatchers.eq(coordinatorEpoch), - ArgumentMatchers.eq(TransactionResult.ABORT), - ArgumentMatchers.eq(txnMetadata), - any() - ) - - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) - - // The second attempt should return CONCURRENT_TRANSACTIONS (this is intentional) - assertEquals(InitProducerIdResult(-1, -1, Errors.CONCURRENT_TRANSACTIONS), result) - - // The transactionMarkerChannelManager mock should have completed the transition to COMPLETE_ABORT - // Verify that hasFailedEpochFence was never set to true for TV2, allowing future epoch bumps - assertFalse(txnMetadata.hasFailedEpochFence) - - // Third attempt: Client retries after CONCURRENT_TRANSACTIONS - reset(transactionManager) - when(transactionManager.validateTransactionTimeoutMs(anyBoolean(), anyInt())) - .thenReturn(true) - when(transactionManager.getTransactionState(ArgumentMatchers.eq(transactionalId))) - .thenReturn(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata)))) - when(transactionManager.transactionVersionLevel()).thenReturn(TV_2) - - when(transactionManager.appendTransactionToLog( - ArgumentMatchers.eq(transactionalId), - ArgumentMatchers.eq(coordinatorEpoch), - any(), - any(), - any(), - any() - )).thenAnswer(invocation => { - val newMetadata = invocation.getArgument[TxnTransitMetadata](2) - val callback = invocation.getArgument[Errors => Unit](3) - - // Complete the transition and call the callback with success - txnMetadata.completeTransitionTo(newMetadata) - callback.apply(Errors.NONE) - }) - - coordinator.handleInitProducerId( - transactionalId, - txnTimeoutMs, - enableTwoPCFlag = false, - keepPreparedTxn = false, - None, - initProducerIdMockCallback - ) - - // The third attempt should succeed with epoch 3 (2 + 1) - // This demonstrates that TV2 allows epoch re-bumping after failed writes - assertEquals(InitProducerIdResult(producerId, 3.toShort, Errors.NONE), result) - - // Final verification that hasFailedEpochFence was never set to true for TV2 - assertFalse(txnMetadata.hasFailedEpochFence) - } } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala new file mode 100644 index 0000000000000..fd5f1e37a6598 --- /dev/null +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionLogTest.scala @@ -0,0 +1,276 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.coordinator.transaction + + +import kafka.utils.TestUtils +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.protocol.{ByteBufferAccessor, MessageUtil} +import org.apache.kafka.common.protocol.types.Field.TaggedFieldsSection +import org.apache.kafka.common.protocol.types.{CompactArrayOf, Field, Schema, Struct, Type} +import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, SimpleRecord} +import org.apache.kafka.coordinator.transaction.generated.{TransactionLogKey, TransactionLogValue} +import org.apache.kafka.server.common.TransactionVersion.{TV_0, TV_2} +import org.junit.jupiter.api.Assertions.{assertEquals, assertThrows, assertTrue} +import org.junit.jupiter.api.Test + +import java.nio.ByteBuffer +import scala.collection.Seq +import scala.jdk.CollectionConverters._ + +class TransactionLogTest { + + val producerEpoch: Short = 0 + val transactionTimeoutMs: Int = 1000 + + val topicPartitions: Set[TopicPartition] = Set[TopicPartition](new TopicPartition("topic1", 0), + new TopicPartition("topic1", 1), + new TopicPartition("topic2", 0), + new TopicPartition("topic2", 1), + new TopicPartition("topic2", 2)) + + @Test + def shouldThrowExceptionWriteInvalidTxn(): Unit = { + val transactionalId = "transactionalId" + val producerId = 23423L + + val txnMetadata = new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, Empty, collection.mutable.Set.empty[TopicPartition], 0, 0, TV_0) + txnMetadata.addPartitions(topicPartitions) + + assertThrows(classOf[IllegalStateException], () => TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TV_2)) + } + + @Test + def shouldReadWriteMessages(): Unit = { + val pidMappings = Map[String, Long]("zero" -> 0L, + "one" -> 1L, + "two" -> 2L, + "three" -> 3L, + "four" -> 4L, + "five" -> 5L) + + val transactionStates = Map[Long, TransactionState](0L -> Empty, + 1L -> Ongoing, + 2L -> PrepareCommit, + 3L -> CompleteCommit, + 4L -> PrepareAbort, + 5L -> CompleteAbort) + + // generate transaction log messages + val txnRecords = pidMappings.map { case (transactionalId, producerId) => + val txnMetadata = new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, transactionStates(producerId), collection.mutable.Set.empty[TopicPartition], 0, 0, TV_0) + + if (!txnMetadata.state.equals(Empty)) + txnMetadata.addPartitions(topicPartitions) + + val keyBytes = TransactionLog.keyToBytes(transactionalId) + val valueBytes = TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TV_2) + + new SimpleRecord(keyBytes, valueBytes) + }.toSeq + + val records = MemoryRecords.withRecords(0, Compression.NONE, txnRecords: _*) + + var count = 0 + for (record <- records.records.asScala) { + val txnKey = TransactionLog.readTxnRecordKey(record.key) + val transactionalId = txnKey.transactionalId + val txnMetadata = TransactionLog.readTxnRecordValue(transactionalId, record.value).get + + assertEquals(pidMappings(transactionalId), txnMetadata.producerId) + assertEquals(producerEpoch, txnMetadata.producerEpoch) + assertEquals(transactionTimeoutMs, txnMetadata.txnTimeoutMs) + assertEquals(transactionStates(txnMetadata.producerId), txnMetadata.state) + + if (txnMetadata.state.equals(Empty)) + assertEquals(Set.empty[TopicPartition], txnMetadata.topicPartitions) + else + assertEquals(topicPartitions, txnMetadata.topicPartitions) + + count = count + 1 + } + + assertEquals(pidMappings.size, count) + } + + @Test + def testTransactionMetadataParsing(): Unit = { + val transactionalId = "id" + val producerId = 1334L + val topicPartition = new TopicPartition("topic", 0) + + val txnMetadata = new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, producerEpoch, + RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, Ongoing, collection.mutable.Set.empty[TopicPartition], 0, 0, TV_0) + txnMetadata.addPartitions(Set(topicPartition)) + + val keyBytes = TransactionLog.keyToBytes(transactionalId) + val valueBytes = TransactionLog.valueToBytes(txnMetadata.prepareNoTransit(), TV_2) + val transactionMetadataRecord = TestUtils.records(Seq( + new SimpleRecord(keyBytes, valueBytes) + )).records.asScala.head + + val (keyStringOpt, valueStringOpt) = TransactionLog.formatRecordKeyAndValue(transactionMetadataRecord) + assertEquals(Some(s"transaction_metadata::transactionalId=$transactionalId"), keyStringOpt) + assertEquals(Some(s"producerId:$producerId,producerEpoch:$producerEpoch,state=Ongoing," + + s"partitions=[$topicPartition],txnLastUpdateTimestamp=0,txnTimeoutMs=$transactionTimeoutMs"), valueStringOpt) + } + + @Test + def testTransactionMetadataTombstoneParsing(): Unit = { + val transactionalId = "id" + val transactionMetadataRecord = TestUtils.records(Seq( + new SimpleRecord(TransactionLog.keyToBytes(transactionalId), null) + )).records.asScala.head + + val (keyStringOpt, valueStringOpt) = TransactionLog.formatRecordKeyAndValue(transactionMetadataRecord) + assertEquals(Some(s"transaction_metadata::transactionalId=$transactionalId"), keyStringOpt) + assertEquals(Some(""), valueStringOpt) + } + + @Test + def testSerializeTransactionLogValueToHighestNonFlexibleVersion(): Unit = { + val txnTransitMetadata = TxnTransitMetadata(1, 1, 1, 1, 1, 1000, CompleteCommit, Set.empty, 500, 500, TV_0) + val txnLogValueBuffer = ByteBuffer.wrap(TransactionLog.valueToBytes(txnTransitMetadata, TV_0)) + assertEquals(0, txnLogValueBuffer.getShort) + } + + @Test + def testSerializeTransactionLogValueToFlexibleVersion(): Unit = { + val txnTransitMetadata = TxnTransitMetadata(1, 1, 1, 1, 1, 1000, CompleteCommit, Set.empty, 500, 500, TV_2) + val txnLogValueBuffer = ByteBuffer.wrap(TransactionLog.valueToBytes(txnTransitMetadata, TV_2)) + assertEquals(TransactionLogValue.HIGHEST_SUPPORTED_VERSION, txnLogValueBuffer.getShort) + } + + @Test + def testDeserializeHighestSupportedTransactionLogValue(): Unit = { + val txnPartitions = new TransactionLogValue.PartitionsSchema() + .setTopic("topic") + .setPartitionIds(java.util.Collections.singletonList(0)) + + val txnLogValue = new TransactionLogValue() + .setProducerId(100) + .setProducerEpoch(50.toShort) + .setTransactionStatus(CompleteCommit.id) + .setTransactionStartTimestampMs(750L) + .setTransactionLastUpdateTimestampMs(1000L) + .setTransactionTimeoutMs(500) + .setTransactionPartitions(java.util.Collections.singletonList(txnPartitions)) + + val serialized = MessageUtil.toVersionPrefixedByteBuffer(1, txnLogValue) + val deserialized = TransactionLog.readTxnRecordValue("transactionId", serialized).get + + assertEquals(100, deserialized.producerId) + assertEquals(50, deserialized.producerEpoch) + assertEquals(CompleteCommit, deserialized.state) + assertEquals(750L, deserialized.txnStartTimestamp) + assertEquals(1000L, deserialized.txnLastUpdateTimestamp) + assertEquals(500, deserialized.txnTimeoutMs) + + val actualTxnPartitions = deserialized.topicPartitions + assertEquals(1, actualTxnPartitions.size) + assertTrue(actualTxnPartitions.contains(new TopicPartition("topic", 0))) + } + + @Test + def testDeserializeFutureTransactionLogValue(): Unit = { + // Copy of TransactionLogValue.PartitionsSchema.SCHEMA_1 with a few + // additional tagged fields. + val futurePartitionsSchema = new Schema( + new Field("topic", Type.COMPACT_STRING, ""), + new Field("partition_ids", new CompactArrayOf(Type.INT32), ""), + TaggedFieldsSection.of( + Int.box(100), new Field("partition_foo", Type.STRING, ""), + Int.box(101), new Field("partition_foo", Type.INT32, "") + ) + ) + + // Create TransactionLogValue.PartitionsSchema with tagged fields + val txnPartitions = new Struct(futurePartitionsSchema) + txnPartitions.set("topic", "topic") + txnPartitions.set("partition_ids", Array(Integer.valueOf(1))) + val txnPartitionsTaggedFields = new java.util.TreeMap[Integer, Any]() + txnPartitionsTaggedFields.put(100, "foo") + txnPartitionsTaggedFields.put(101, 4000) + txnPartitions.set("_tagged_fields", txnPartitionsTaggedFields) + + // Copy of TransactionLogValue.SCHEMA_1 with a few + // additional tagged fields. + val futureTransactionLogValueSchema = new Schema( + new Field("producer_id", Type.INT64, ""), + new Field("producer_epoch", Type.INT16, ""), + new Field("transaction_timeout_ms", Type.INT32, ""), + new Field("transaction_status", Type.INT8, ""), + new Field("transaction_partitions", CompactArrayOf.nullable(futurePartitionsSchema), ""), + new Field("transaction_last_update_timestamp_ms", Type.INT64, ""), + new Field("transaction_start_timestamp_ms", Type.INT64, ""), + TaggedFieldsSection.of( + Int.box(100), new Field("txn_foo", Type.STRING, ""), + Int.box(101), new Field("txn_bar", Type.INT32, "") + ) + ) + + // Create TransactionLogValue with tagged fields + val transactionLogValue = new Struct(futureTransactionLogValueSchema) + transactionLogValue.set("producer_id", 1000L) + transactionLogValue.set("producer_epoch", 100.toShort) + transactionLogValue.set("transaction_timeout_ms", 1000) + transactionLogValue.set("transaction_status", CompleteCommit.id) + transactionLogValue.set("transaction_partitions", Array(txnPartitions)) + transactionLogValue.set("transaction_last_update_timestamp_ms", 2000L) + transactionLogValue.set("transaction_start_timestamp_ms", 3000L) + val txnLogValueTaggedFields = new java.util.TreeMap[Integer, Any]() + txnLogValueTaggedFields.put(100, "foo") + txnLogValueTaggedFields.put(101, 4000) + transactionLogValue.set("_tagged_fields", txnLogValueTaggedFields) + + // Prepare the buffer. + val buffer = ByteBuffer.allocate(transactionLogValue.sizeOf() + 2) + buffer.put(0.toByte) + buffer.put(1.toByte) // Add 1 as version. + transactionLogValue.writeTo(buffer) + buffer.flip() + + // Read the buffer with the real schema and verify that tagged + // fields were read but ignored. + buffer.getShort() // Skip version. + val value = new TransactionLogValue(new ByteBufferAccessor(buffer), 1.toShort) + assertEquals(Seq(100, 101), value.unknownTaggedFields().asScala.map(_.tag)) + assertEquals(Seq(100, 101), value.transactionPartitions().get(0).unknownTaggedFields().asScala.map(_.tag)) + + // Read the buffer with readTxnRecordValue. + buffer.rewind() + val txnMetadata = TransactionLog.readTxnRecordValue("transaction-id", buffer).get + assertEquals(1000L, txnMetadata.producerId) + assertEquals(100, txnMetadata.producerEpoch) + assertEquals(1000L, txnMetadata.txnTimeoutMs) + assertEquals(CompleteCommit, txnMetadata.state) + assertEquals(Set(new TopicPartition("topic", 1)), txnMetadata.topicPartitions) + assertEquals(2000L, txnMetadata.txnLastUpdateTimestamp) + assertEquals(3000L, txnMetadata.txnStartTimestamp) + } + + @Test + def testReadTxnRecordKeyCanReadUnknownMessage(): Unit = { + val record = new TransactionLogKey() + val unknownRecord = MessageUtil.toVersionPrefixedBytes(Short.MaxValue, record) + val key = TransactionLog.readTxnRecordKey(ByteBuffer.wrap(unknownRecord)) + assertEquals(UnknownKey(Short.MaxValue), key) + } +} diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala index 7699d643a3ec0..65e00d3de1452 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerChannelManagerTest.scala @@ -17,9 +17,10 @@ package kafka.coordinator.transaction import java.util -import java.util.Optional +import java.util.Arrays.asList +import java.util.Collections import java.util.concurrent.{Callable, Executors, Future} -import kafka.server.KafkaConfig +import kafka.server.{KafkaConfig, MetadataCache} import kafka.utils.TestUtils import org.apache.kafka.clients.{ClientResponse, NetworkClient} import org.apache.kafka.common.protocol.{ApiKeys, Errors} @@ -27,8 +28,6 @@ import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{RequestHeader, TransactionResult, WriteTxnMarkersRequest, WriteTxnMarkersResponse} import org.apache.kafka.common.utils.MockTime import org.apache.kafka.common.{Node, TopicPartition} -import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState} -import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.{MetadataVersion, TransactionVersion} import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.util.RequestAndCompletionHandler @@ -41,6 +40,7 @@ import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.Mockito.{clearInvocations, mock, mockConstruction, times, verify, verifyNoMoreInteractions, when} import scala.jdk.CollectionConverters._ +import scala.collection.mutable import scala.util.Try class TransactionMarkerChannelManagerTest { @@ -66,9 +66,9 @@ class TransactionMarkerChannelManagerTest { private val txnTimeoutMs = 0 private val txnResult = TransactionResult.COMMIT private val txnMetadata1 = new TransactionMetadata(transactionalId1, producerId1, producerId1, RecordBatch.NO_PRODUCER_ID, - producerEpoch, lastProducerEpoch, txnTimeoutMs, TransactionState.PREPARE_COMMIT, util.Set.of(partition1, partition2), 0L, 0L, TransactionVersion.TV_2) + producerEpoch, lastProducerEpoch, txnTimeoutMs, PrepareCommit, mutable.Set[TopicPartition](partition1, partition2), 0L, 0L, TransactionVersion.TV_2) private val txnMetadata2 = new TransactionMetadata(transactionalId2, producerId2, producerId2, RecordBatch.NO_PRODUCER_ID, - producerEpoch, lastProducerEpoch, txnTimeoutMs, TransactionState.PREPARE_COMMIT, util.Set.of(partition1), 0L, 0L, TransactionVersion.TV_2) + producerEpoch, lastProducerEpoch, txnTimeoutMs, PrepareCommit, mutable.Set[TopicPartition](partition1), 0L, 0L, TransactionVersion.TV_2) private val capturedErrorsCallback: ArgumentCaptor[Errors => Unit] = ArgumentCaptor.forClass(classOf[Errors => Unit]) private val time = new MockTime @@ -128,7 +128,7 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.of(broker1)) + ).thenReturn(Some(broker1)) when(txnStateManager.appendTransactionToLog( ArgumentMatchers.eq(transactionalId2), @@ -144,33 +144,33 @@ class TransactionMarkerChannelManagerTest { var addMarkerFuture: Future[Try[Unit]] = null val executor = Executors.newFixedThreadPool(1) + txnMetadata2.lock.lock() try { - txnMetadata2.inLock(() => { - addMarkerFuture = executor.submit((() => { - Try(channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, + addMarkerFuture = executor.submit((() => { + Try(channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata2, expectedTransition)) - }): Callable[Try[Unit]]) - - val header = new RequestHeader(ApiKeys.WRITE_TXN_MARKERS, 0, "client", 1) - val response = new WriteTxnMarkersResponse( - util.Map.of(producerId2: java.lang.Long, util.Map.of(partition1, Errors.NONE))) - val clientResponse = new ClientResponse(header, null, null, - time.milliseconds(), time.milliseconds(), false, null, null, - response) - - TestUtils.waitUntilTrue(() => { - val requests = channelManager.generateRequests().asScala - if (requests.nonEmpty) { - assertEquals(1, requests.size) - val request = requests.head - request.handler.onComplete(clientResponse) - true - } else { - false - } - }, "Timed out waiting for expected WriteTxnMarkers request") - }) + }): Callable[Try[Unit]]) + + val header = new RequestHeader(ApiKeys.WRITE_TXN_MARKERS, 0, "client", 1) + val response = new WriteTxnMarkersResponse( + Collections.singletonMap(producerId2: java.lang.Long, Collections.singletonMap(partition1, Errors.NONE))) + val clientResponse = new ClientResponse(header, null, null, + time.milliseconds(), time.milliseconds(), false, null, null, + response) + + TestUtils.waitUntilTrue(() => { + val requests = channelManager.generateRequests().asScala + if (requests.nonEmpty) { + assertEquals(1, requests.size) + val request = requests.head + request.handler.onComplete(clientResponse) + true + } else { + false + } + }, "Timed out waiting for expected WriteTxnMarkers request") } finally { + txnMetadata2.lock.unlock() executor.shutdown() } @@ -197,12 +197,12 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.of(broker1)) + ).thenReturn(Some(broker1)) // Build a successful client response. val header = new RequestHeader(ApiKeys.WRITE_TXN_MARKERS, 0, "client", 1) val successfulResponse = new WriteTxnMarkersResponse( - util.Map.of(producerId2: java.lang.Long, util.Map.of(partition1, Errors.NONE))) + Collections.singletonMap(producerId2: java.lang.Long, Collections.singletonMap(partition1, Errors.NONE))) val successfulClientResponse = new ClientResponse(header, null, null, time.milliseconds(), time.milliseconds(), false, null, null, successfulResponse) @@ -280,12 +280,12 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.of(broker1)) + ).thenReturn(Some(broker1)) when(metadataCache.getPartitionLeaderEndpoint( ArgumentMatchers.eq(partition2.topic), ArgumentMatchers.eq(partition2.partition), any()) - ).thenReturn(Optional.of(broker2)) + ).thenReturn(Some(broker2)) channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata1, txnMetadata1.prepareComplete(time.milliseconds())) channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata2, txnMetadata2.prepareComplete(time.milliseconds())) @@ -299,10 +299,10 @@ class TransactionMarkerChannelManagerTest { assertEquals(0, channelManager.queueForBroker(broker2.id).get.totalNumMarkers(txnTopicPartition2)) val expectedBroker1Request = new WriteTxnMarkersRequest.Builder( - util.List.of(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition1)), - new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition1)))).build() + asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)), + new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build() val expectedBroker2Request = new WriteTxnMarkersRequest.Builder( - util.List.of(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition2)))).build() + asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build() val requests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler => (handler.destination, handler.request.asInstanceOf[WriteTxnMarkersRequest.Builder].build()) @@ -320,12 +320,12 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.empty()) + ).thenReturn(None) when(metadataCache.getPartitionLeaderEndpoint( ArgumentMatchers.eq(partition2.topic), ArgumentMatchers.eq(partition2.partition), any()) - ).thenReturn(Optional.of(broker2)) + ).thenReturn(Some(broker2)) channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata1, txnMetadata1.prepareComplete(time.milliseconds())) @@ -344,17 +344,17 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.of(Node.noNode)) - .thenReturn(Optional.of(Node.noNode)) - .thenReturn(Optional.of(Node.noNode)) - .thenReturn(Optional.of(Node.noNode)) - .thenReturn(Optional.of(broker1)) - .thenReturn(Optional.of(broker1)) + ).thenReturn(Some(Node.noNode)) + .thenReturn(Some(Node.noNode)) + .thenReturn(Some(Node.noNode)) + .thenReturn(Some(Node.noNode)) + .thenReturn(Some(broker1)) + .thenReturn(Some(broker1)) when(metadataCache.getPartitionLeaderEndpoint( ArgumentMatchers.eq(partition2.topic), ArgumentMatchers.eq(partition2.partition), any()) - ).thenReturn(Optional.of(broker2)) + ).thenReturn(Some(broker2)) channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata1, txnMetadata1.prepareComplete(time.milliseconds())) channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata2, txnMetadata2.prepareComplete(time.milliseconds())) @@ -369,10 +369,10 @@ class TransactionMarkerChannelManagerTest { assertEquals(1, channelManager.queueForUnknownBroker.totalNumMarkers(txnTopicPartition2)) val expectedBroker1Request = new WriteTxnMarkersRequest.Builder( - util.List.of(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition1)), - new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition1)))).build() + asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)), + new WriteTxnMarkersRequest.TxnMarkerEntry(producerId2, producerEpoch, coordinatorEpoch, txnResult, asList(partition1)))).build() val expectedBroker2Request = new WriteTxnMarkersRequest.Builder( - util.List.of(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, util.List.of(partition2)))).build() + asList(new WriteTxnMarkersRequest.TxnMarkerEntry(producerId1, producerEpoch, coordinatorEpoch, txnResult, asList(partition2)))).build() val firstDrainedRequests: Map[Node, WriteTxnMarkersRequest] = channelManager.generateRequests().asScala.map { handler => (handler.destination, handler.request.asInstanceOf[WriteTxnMarkersRequest.Builder].build()) @@ -395,12 +395,12 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.of(broker1)) + ).thenReturn(Some(broker1)) when(metadataCache.getPartitionLeaderEndpoint( ArgumentMatchers.eq(partition2.topic), ArgumentMatchers.eq(partition2.partition), any()) - ).thenReturn(Optional.of(broker2)) + ).thenReturn(Some(broker2)) channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata1, txnMetadata1.prepareComplete(time.milliseconds())) channelManager.addTxnMarkersToSend(coordinatorEpoch, txnResult, txnMetadata2, txnMetadata2.prepareComplete(time.milliseconds())) @@ -436,12 +436,12 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.of(broker1)) + ).thenReturn(Some(broker1)) when(metadataCache.getPartitionLeaderEndpoint( ArgumentMatchers.eq(partition2.topic), ArgumentMatchers.eq(partition2.partition), any()) - ).thenReturn(Optional.of(broker2)) + ).thenReturn(Some(broker2)) val txnTransitionMetadata2 = txnMetadata2.prepareComplete(time.milliseconds()) @@ -477,8 +477,8 @@ class TransactionMarkerChannelManagerTest { assertEquals(0, channelManager.numTxnsWithPendingMarkers) assertEquals(0, channelManager.queueForBroker(broker1.id).get.totalNumMarkers) - assertEquals(Optional.empty(), txnMetadata2.pendingState) - assertEquals(TransactionState.COMPLETE_COMMIT, txnMetadata2.state) + assertEquals(None, txnMetadata2.pendingState) + assertEquals(CompleteCommit, txnMetadata2.state) } @Test @@ -489,12 +489,12 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.of(broker1)) + ).thenReturn(Some(broker1)) when(metadataCache.getPartitionLeaderEndpoint( ArgumentMatchers.eq(partition2.topic), ArgumentMatchers.eq(partition2.partition), any()) - ).thenReturn(Optional.of(broker2)) + ).thenReturn(Some(broker2)) val txnTransitionMetadata2 = txnMetadata2.prepareComplete(time.milliseconds()) @@ -506,7 +506,7 @@ class TransactionMarkerChannelManagerTest { any(), any())) .thenAnswer(_ => { - txnMetadata2.pendingState(util.Optional.empty()) + txnMetadata2.pendingState = None capturedErrorsCallback.getValue.apply(Errors.NOT_COORDINATOR) }) @@ -530,8 +530,8 @@ class TransactionMarkerChannelManagerTest { assertEquals(0, channelManager.numTxnsWithPendingMarkers) assertEquals(0, channelManager.queueForBroker(broker1.id).get.totalNumMarkers) - assertEquals(Optional.empty(), txnMetadata2.pendingState) - assertEquals(TransactionState.PREPARE_COMMIT, txnMetadata2.state) + assertEquals(None, txnMetadata2.pendingState) + assertEquals(PrepareCommit, txnMetadata2.state) } @ParameterizedTest @@ -546,12 +546,12 @@ class TransactionMarkerChannelManagerTest { ArgumentMatchers.eq(partition1.topic), ArgumentMatchers.eq(partition1.partition), any()) - ).thenReturn(Optional.of(broker1)) + ).thenReturn(Some(broker1)) when(metadataCache.getPartitionLeaderEndpoint( ArgumentMatchers.eq(partition2.topic), ArgumentMatchers.eq(partition2.partition), any()) - ).thenReturn(Optional.of(broker2)) + ).thenReturn(Some(broker2)) val txnTransitionMetadata2 = txnMetadata2.prepareComplete(time.milliseconds()) @@ -591,8 +591,8 @@ class TransactionMarkerChannelManagerTest { assertEquals(0, channelManager.numTxnsWithPendingMarkers) assertEquals(0, channelManager.queueForBroker(broker1.id).get.totalNumMarkers) - assertEquals(Optional.empty(), txnMetadata2.pendingState) - assertEquals(TransactionState.COMPLETE_COMMIT, txnMetadata2.state) + assertEquals(None, txnMetadata2.pendingState) + assertEquals(CompleteCommit, txnMetadata2.state) } private def createPidErrorMap(errors: Errors): util.HashMap[java.lang.Long, util.Map[TopicPartition, Errors]] = { @@ -631,11 +631,11 @@ class TransactionMarkerChannelManagerTest { txnMetadata: TransactionMetadata ): Unit = { if (isTransactionV2Enabled) { - txnMetadata.clientTransactionVersion(TransactionVersion.TV_2) - txnMetadata.setProducerEpoch((producerEpoch + 1).toShort) - txnMetadata.setLastProducerEpoch(producerEpoch) + txnMetadata.clientTransactionVersion = TransactionVersion.TV_2 + txnMetadata.producerEpoch = (producerEpoch + 1).toShort + txnMetadata.lastProducerEpoch = producerEpoch } else { - txnMetadata.clientTransactionVersion(TransactionVersion.TV_1) + txnMetadata.clientTransactionVersion = TransactionVersion.TV_1 } } } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandlerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandlerTest.scala index e955a9009ce9f..72ffa5629c04e 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandlerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMarkerRequestCompletionHandlerTest.scala @@ -17,18 +17,20 @@ package kafka.coordinator.transaction import java.{lang, util} +import java.util.Arrays.asList import org.apache.kafka.clients.ClientResponse import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{RequestHeader, TransactionResult, WriteTxnMarkersRequest, WriteTxnMarkersResponse} -import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState} import org.apache.kafka.server.common.TransactionVersion import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.mockito.ArgumentMatchers import org.mockito.Mockito.{mock, verify, when} +import scala.collection.mutable + class TransactionMarkerRequestCompletionHandlerTest { private val brokerId = 0 @@ -42,11 +44,11 @@ class TransactionMarkerRequestCompletionHandlerTest { private val txnResult = TransactionResult.COMMIT private val topicPartition = new TopicPartition("topic1", 0) private val txnMetadata = new TransactionMetadata(transactionalId, producerId, producerId, RecordBatch.NO_PRODUCER_ID, - producerEpoch, lastProducerEpoch, txnTimeoutMs, TransactionState.PREPARE_COMMIT, util.Set.of(topicPartition), 0L, 0L, TransactionVersion.TV_2) - private val pendingCompleteTxnAndMarkers = util.List.of( + producerEpoch, lastProducerEpoch, txnTimeoutMs, PrepareCommit, mutable.Set[TopicPartition](topicPartition), 0L, 0L, TransactionVersion.TV_2) + private val pendingCompleteTxnAndMarkers = asList( PendingCompleteTxnAndMarkerEntry( PendingCompleteTxn(transactionalId, coordinatorEpoch, txnMetadata, txnMetadata.prepareComplete(42)), - new WriteTxnMarkersRequest.TxnMarkerEntry(producerId, producerEpoch, coordinatorEpoch, txnResult, util.List.of(topicPartition)))) + new WriteTxnMarkersRequest.TxnMarkerEntry(producerId, producerEpoch, coordinatorEpoch, txnResult, asList(topicPartition)))) private val markerChannelManager: TransactionMarkerChannelManager = mock(classOf[TransactionMarkerChannelManager]) @@ -192,7 +194,7 @@ class TransactionMarkerRequestCompletionHandlerTest { handler.onComplete(new ClientResponse(new RequestHeader(ApiKeys.PRODUCE, 0, "client", 1), null, null, 0, 0, false, null, null, response)) - assertEquals(txnMetadata.topicPartitions, util.Set.of(topicPartition)) + assertEquals(txnMetadata.topicPartitions, mutable.Set[TopicPartition](topicPartition)) verify(markerChannelManager).addTxnMarkersToBrokerQueue(producerId, producerEpoch, txnResult, pendingCompleteTxnAndMarkers.get(0).pendingCompleteTxn, Set[TopicPartition](topicPartition)) diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala index 87a18b18dc09b..6b2d20e69eb65 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionMetadataTest.scala @@ -19,7 +19,6 @@ package kafka.coordinator.transaction import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch -import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState, TxnTransitMetadata} import org.apache.kafka.server.common.TransactionVersion import org.apache.kafka.server.common.TransactionVersion.{TV_0, TV_2} import org.apache.kafka.server.util.MockTime @@ -28,11 +27,7 @@ import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource -import java.util -import java.util.Optional - import scala.collection.mutable -import scala.jdk.CollectionConverters._ class TransactionMetadataTest { @@ -45,20 +40,19 @@ class TransactionMetadataTest { val producerEpoch = RecordBatch.NO_PRODUCER_EPOCH val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.empty()) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, None) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(0, txnMetadata.producerEpoch) @@ -70,20 +64,19 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.empty()) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, None) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -95,22 +88,21 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareIncrementProducerEpoch(30000, - Optional.empty, time.milliseconds())) + None, time.milliseconds())) } @Test @@ -118,20 +110,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_2) - - val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = -1, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -143,20 +135,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.COMPLETE_ABORT, - util.Set.of, - time.milliseconds() - 1, - time.milliseconds(), - TV_2) - - val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = CompleteAbort, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = time.milliseconds() - 1, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -168,20 +160,20 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.COMPLETE_COMMIT, - util.Set.of, - time.milliseconds() - 1, - time.milliseconds(), - TV_2) - - val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = CompleteCommit, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = time.milliseconds() - 1, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_2) + + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() + 1, true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -192,27 +184,27 @@ class TransactionMetadataTest { def testTolerateUpdateTimeShiftDuringEpochBump(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - 1L, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = 1L, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) // let new time be smaller - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.of(producerEpoch), + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Option(producerEpoch), Some(time.milliseconds() - 1)) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) assertEquals(producerEpoch, txnMetadata.lastProducerEpoch) - assertEquals(-1L, txnMetadata.txnStartTimestamp) + assertEquals(1L, txnMetadata.txnStartTimestamp) assertEquals(time.milliseconds() - 1, txnMetadata.txnLastUpdateTimestamp) } @@ -220,26 +212,26 @@ class TransactionMetadataTest { def testTolerateUpdateTimeResetDuringProducerIdRotation(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - 1L, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = 1L, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) // let new time be smaller - val transitMetadata = txnMetadata.prepareProducerIdRotation(producerId + 1, 30000, time.milliseconds() - 1, true) + val transitMetadata = txnMetadata.prepareProducerIdRotation(producerId + 1, 30000, time.milliseconds() - 1, recordLastEpoch = true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId + 1, txnMetadata.producerId) assertEquals(producerEpoch, txnMetadata.lastProducerEpoch) assertEquals(0, txnMetadata.producerEpoch) - assertEquals(-1L, txnMetadata.txnStartTimestamp) + assertEquals(1L, txnMetadata.txnStartTimestamp) assertEquals(time.milliseconds() - 1, txnMetadata.txnLastUpdateTimestamp) } @@ -247,33 +239,33 @@ class TransactionMetadataTest { def testTolerateTimeShiftDuringAddPartitions(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - time.milliseconds(), - time.milliseconds(), - TV_0) - - // let new time be smaller; when transiting from TransactionState.EMPTY the start time would be updated to the update-time - var transitMetadata = txnMetadata.prepareAddPartitions(util.Set.of(new TopicPartition("topic1", 0)), time.milliseconds() - 1, TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = time.milliseconds(), + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + + // let new time be smaller; when transiting from Empty the start time would be updated to the update-time + var transitMetadata = txnMetadata.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0)), time.milliseconds() - 1, TV_0) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(util.Set.of(new TopicPartition("topic1", 0)), txnMetadata.topicPartitions) + assertEquals(Set[TopicPartition](new TopicPartition("topic1", 0)), txnMetadata.topicPartitions) assertEquals(producerId, txnMetadata.producerId) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) assertEquals(time.milliseconds() - 1, txnMetadata.txnStartTimestamp) assertEquals(time.milliseconds() - 1, txnMetadata.txnLastUpdateTimestamp) - // add another partition, check that in TransactionState.ONGOING state the start timestamp would not change to update time - transitMetadata = txnMetadata.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds() - 2, TV_0) + // add another partition, check that in Ongoing state the start timestamp would not change to update time + transitMetadata = txnMetadata.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds() - 2, TV_0) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(util.Set.of(new TopicPartition("topic1", 0), new TopicPartition("topic2", 0)), txnMetadata.topicPartitions) + assertEquals(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic2", 0)), txnMetadata.topicPartitions) assertEquals(producerId, txnMetadata.producerId) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -285,23 +277,23 @@ class TransactionMetadataTest { def testTolerateTimeShiftDuringPrepareCommit(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.ONGOING, - util.Set.of, - 1L, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Ongoing, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = 1L, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) // let new time be smaller - val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(TransactionState.PREPARE_COMMIT, txnMetadata.state) + assertEquals(PrepareCommit, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -313,23 +305,23 @@ class TransactionMetadataTest { def testTolerateTimeShiftDuringPrepareAbort(): Unit = { val producerEpoch: Short = 1 val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.ONGOING, - util.Set.of, - 1L, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Ongoing, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = 1L, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) // let new time be smaller - val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(TransactionState.PREPARE_ABORT, txnMetadata.state) + assertEquals(PrepareAbort, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -344,25 +336,25 @@ class TransactionMetadataTest { val producerEpoch: Short = 1 val lastProducerEpoch: Short = 0 val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - lastProducerEpoch, - 30000, - TransactionState.PREPARE_COMMIT, - util.Set.of(), - 1L, - time.milliseconds(), - clientTransactionVersion + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = lastProducerEpoch, + txnTimeoutMs = 30000, + state = PrepareCommit, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = 1L, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = clientTransactionVersion ) // let new time be smaller val transitMetadata = txnMetadata.prepareComplete(time.milliseconds() - 1) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(TransactionState.COMPLETE_COMMIT, txnMetadata.state) + assertEquals(CompleteCommit, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) assertEquals(lastProducerEpoch, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -377,25 +369,25 @@ class TransactionMetadataTest { val producerEpoch: Short = 1 val lastProducerEpoch: Short = 0 val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - lastProducerEpoch, - 30000, - TransactionState.PREPARE_ABORT, - util.Set.of, - 1L, - time.milliseconds(), - clientTransactionVersion + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = lastProducerEpoch, + txnTimeoutMs = 30000, + state = PrepareAbort, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = 1L, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = clientTransactionVersion ) // let new time be smaller val transitMetadata = txnMetadata.prepareComplete(time.milliseconds() - 1) txnMetadata.completeTransitionTo(transitMetadata) - assertEquals(TransactionState.COMPLETE_ABORT, txnMetadata.state) + assertEquals(CompleteAbort, txnMetadata.state) assertEquals(producerId, txnMetadata.producerId) assertEquals(lastProducerEpoch, txnMetadata.lastProducerEpoch) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -408,29 +400,28 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.ONGOING, - util.Set.of, - -1, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Ongoing, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) val fencingTransitMetadata = txnMetadata.prepareFenceProducerEpoch() assertEquals(Short.MaxValue, fencingTransitMetadata.producerEpoch) assertEquals(RecordBatch.NO_PRODUCER_EPOCH, fencingTransitMetadata.lastProducerEpoch) - assertEquals(Optional.of(TransactionState.PREPARE_EPOCH_FENCE), txnMetadata.pendingState) + assertEquals(Some(PrepareEpochFence), txnMetadata.pendingState) // We should reset the pending state to make way for the abort transition. - txnMetadata.pendingState(Optional.empty()) + txnMetadata.pendingState = None - val transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_ABORT, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) + val transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareAbort, TV_0, RecordBatch.NO_PRODUCER_ID, time.milliseconds(), false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, transitMetadata.producerId) } @@ -440,18 +431,17 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.COMPLETE_COMMIT, - util.Set.of, - -1, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = CompleteCommit, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareFenceProducerEpoch()) @@ -462,18 +452,17 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.COMPLETE_ABORT, - util.Set.of, - -1, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = CompleteAbort, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareFenceProducerEpoch()) @@ -484,18 +473,17 @@ class TransactionMetadataTest { val producerEpoch = Short.MaxValue val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.ONGOING, - util.Set.of, - -1, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Ongoing, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) assertTrue(txnMetadata.isProducerEpochExhausted) assertThrows(classOf[IllegalStateException], () => txnMetadata.prepareFenceProducerEpoch()) } @@ -505,24 +493,23 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_0) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) val newProducerId = 9893L - val transitMetadata = txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), true) + val transitMetadata = txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), recordLastEpoch = true) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(newProducerId, txnMetadata.producerId) - assertEquals(producerId, txnMetadata.prevProducerId) + assertEquals(producerId, txnMetadata.previousProducerId) assertEquals(0, txnMetadata.producerEpoch) assertEquals(producerEpoch, txnMetadata.lastProducerEpoch) } @@ -533,20 +520,20 @@ class TransactionMetadataTest { val producerEpoch = 10.toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.ONGOING, - util.Set.of, - time.milliseconds(), - time.milliseconds(), - TV_2) - - var transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Ongoing, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = time.milliseconds(), + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_2) + + var transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_2, RecordBatch.NO_PRODUCER_ID, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals((producerEpoch + 1).toShort, txnMetadata.producerEpoch) @@ -565,22 +552,22 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.ONGOING, - util.Set.of, - time.milliseconds(), - time.milliseconds(), - TV_2) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Ongoing, + topicPartitions = mutable.Set.empty, + txnStartTimestamp = time.milliseconds(), + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_2) assertTrue(txnMetadata.isProducerEpochExhausted) val newProducerId = 9893L - var transitMetadata = txnMetadata.prepareAbortOrCommit(TransactionState.PREPARE_COMMIT, TV_2, newProducerId, time.milliseconds() - 1, false) + var transitMetadata = txnMetadata.prepareAbortOrCommit(PrepareCommit, TV_2, newProducerId, time.milliseconds() - 1, false) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(Short.MaxValue, txnMetadata.producerEpoch) @@ -597,21 +584,21 @@ class TransactionMetadataTest { @Test def testRotateProducerIdInOngoingState(): Unit = { - assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(TransactionState.ONGOING, TV_0)) + assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(Ongoing, TV_0)) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def testRotateProducerIdInPrepareAbortState(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) - assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(TransactionState.PREPARE_ABORT, clientTransactionVersion)) + assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(PrepareAbort, clientTransactionVersion)) } @ParameterizedTest @ValueSource(shorts = Array(0, 2)) def testRotateProducerIdInPrepareCommitState(transactionVersion: Short): Unit = { val clientTransactionVersion = TransactionVersion.fromFeatureLevel(transactionVersion) - assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(TransactionState.PREPARE_COMMIT, clientTransactionVersion)) + assertThrows(classOf[IllegalStateException], () => testRotateProducerIdInOngoingState(PrepareCommit, clientTransactionVersion)) } @Test @@ -619,20 +606,19 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_EPOCH, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.of(producerEpoch)) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Some(producerEpoch)) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(0, txnMetadata.producerEpoch) @@ -644,20 +630,19 @@ class TransactionMetadataTest { val producerEpoch = 735.toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.of(producerEpoch)) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Some(producerEpoch)) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch + 1, txnMetadata.producerEpoch) @@ -670,20 +655,19 @@ class TransactionMetadataTest { val lastProducerEpoch = (producerEpoch - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - RecordBatch.NO_PRODUCER_ID, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - lastProducerEpoch, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_0) - - val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Optional.of(lastProducerEpoch)) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = RecordBatch.NO_PRODUCER_ID, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = lastProducerEpoch, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + + val transitMetadata = prepareSuccessfulIncrementProducerEpoch(txnMetadata, Some(lastProducerEpoch)) txnMetadata.completeTransitionTo(transitMetadata) assertEquals(producerId, txnMetadata.producerId) assertEquals(producerEpoch, txnMetadata.producerEpoch) @@ -696,34 +680,32 @@ class TransactionMetadataTest { val lastProducerEpoch = (producerEpoch - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - producerId, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - lastProducerEpoch, - 30000, - TransactionState.EMPTY, - util.Set.of, - -1, - time.milliseconds(), - TV_0) - - assertThrows(Errors.PRODUCER_FENCED.exception().getClass, () => - txnMetadata.prepareIncrementProducerEpoch(30000, Optional.of((lastProducerEpoch - 1).toShort), - time.milliseconds()) - ) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = producerId, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = lastProducerEpoch, + txnTimeoutMs = 30000, + state = Empty, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = TV_0) + + val result = txnMetadata.prepareIncrementProducerEpoch(30000, Some((lastProducerEpoch - 1).toShort), + time.milliseconds()) + assertEquals(Left(Errors.PRODUCER_FENCED), result) } @Test def testTransactionStateIdAndNameMapping(): Unit = { - for (state <- TransactionState.ALL_STATES.asScala) { + for (state <- TransactionState.AllStates) { assertEquals(state, TransactionState.fromId(state.id)) - assertEquals(Optional.of(state), TransactionState.fromName(state.stateName)) + assertEquals(Some(state), TransactionState.fromName(state.name)) - if (state != TransactionState.DEAD) { - val clientTransactionState = org.apache.kafka.clients.admin.TransactionState.parse(state.stateName) - assertEquals(state.stateName, clientTransactionState.toString) + if (state != Dead) { + val clientTransactionState = org.apache.kafka.clients.admin.TransactionState.parse(state.name) + assertEquals(state.name, clientTransactionState.toString) assertNotEquals(org.apache.kafka.clients.admin.TransactionState.UNKNOWN, clientTransactionState) } } @@ -732,27 +714,27 @@ class TransactionMetadataTest { @Test def testAllTransactionStatesAreMapped(): Unit = { val unmatchedStates = mutable.Set( - TransactionState.EMPTY, - TransactionState.ONGOING, - TransactionState.PREPARE_COMMIT, - TransactionState.PREPARE_ABORT, - TransactionState.COMPLETE_COMMIT, - TransactionState.COMPLETE_ABORT, - TransactionState.PREPARE_EPOCH_FENCE, - TransactionState.DEAD + Empty, + Ongoing, + PrepareCommit, + PrepareAbort, + CompleteCommit, + CompleteAbort, + PrepareEpochFence, + Dead ) // The exhaustive match is intentional here to ensure that we are // forced to update the test case if a new state is added. - TransactionState.ALL_STATES.asScala.foreach { - case TransactionState.EMPTY => assertTrue(unmatchedStates.remove(TransactionState.EMPTY)) - case TransactionState.ONGOING => assertTrue(unmatchedStates.remove(TransactionState.ONGOING)) - case TransactionState.PREPARE_COMMIT => assertTrue(unmatchedStates.remove(TransactionState.PREPARE_COMMIT)) - case TransactionState.PREPARE_ABORT => assertTrue(unmatchedStates.remove(TransactionState.PREPARE_ABORT)) - case TransactionState.COMPLETE_COMMIT => assertTrue(unmatchedStates.remove(TransactionState.COMPLETE_COMMIT)) - case TransactionState.COMPLETE_ABORT => assertTrue(unmatchedStates.remove(TransactionState.COMPLETE_ABORT)) - case TransactionState.PREPARE_EPOCH_FENCE => assertTrue(unmatchedStates.remove(TransactionState.PREPARE_EPOCH_FENCE)) - case TransactionState.DEAD => assertTrue(unmatchedStates.remove(TransactionState.DEAD)) + TransactionState.AllStates.foreach { + case Empty => assertTrue(unmatchedStates.remove(Empty)) + case Ongoing => assertTrue(unmatchedStates.remove(Ongoing)) + case PrepareCommit => assertTrue(unmatchedStates.remove(PrepareCommit)) + case PrepareAbort => assertTrue(unmatchedStates.remove(PrepareAbort)) + case CompleteCommit => assertTrue(unmatchedStates.remove(CompleteCommit)) + case CompleteAbort => assertTrue(unmatchedStates.remove(CompleteAbort)) + case PrepareEpochFence => assertTrue(unmatchedStates.remove(PrepareEpochFence)) + case Dead => assertTrue(unmatchedStates.remove(Dead)) } assertEquals(Set.empty, unmatchedStates) @@ -762,26 +744,27 @@ class TransactionMetadataTest { val producerEpoch = (Short.MaxValue - 1).toShort val txnMetadata = new TransactionMetadata( - transactionalId, - producerId, - producerId, - RecordBatch.NO_PRODUCER_ID, - producerEpoch, - RecordBatch.NO_PRODUCER_EPOCH, - 30000, - state, - util.Set.of, - -1, - time.milliseconds(), - clientTransactionVersion) + transactionalId = transactionalId, + producerId = producerId, + previousProducerId = producerId, + nextProducerId = RecordBatch.NO_PRODUCER_ID, + producerEpoch = producerEpoch, + lastProducerEpoch = RecordBatch.NO_PRODUCER_EPOCH, + txnTimeoutMs = 30000, + state = state, + topicPartitions = mutable.Set.empty, + txnLastUpdateTimestamp = time.milliseconds(), + clientTransactionVersion = clientTransactionVersion) val newProducerId = 9893L - txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), false) + txnMetadata.prepareProducerIdRotation(newProducerId, 30000, time.milliseconds(), recordLastEpoch = false) } private def prepareSuccessfulIncrementProducerEpoch(txnMetadata: TransactionMetadata, - expectedProducerEpoch: Optional[java.lang.Short], + expectedProducerEpoch: Option[Short], now: Option[Long] = None): TxnTransitMetadata = { - txnMetadata.prepareIncrementProducerEpoch(30000, expectedProducerEpoch, now.getOrElse(time.milliseconds())) + val result = txnMetadata.prepareIncrementProducerEpoch(30000, expectedProducerEpoch, + now.getOrElse(time.milliseconds())) + result.getOrElse(throw new AssertionError(s"prepareIncrementProducerEpoch failed with $result")) } } diff --git a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala index 41ee3f7f4cc90..c3568aa1a88ea 100644 --- a/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala +++ b/core/src/test/scala/unit/kafka/coordinator/transaction/TransactionStateManagerTest.scala @@ -18,13 +18,14 @@ package kafka.coordinator.transaction import java.lang.management.ManagementFactory import java.nio.ByteBuffer -import java.util.concurrent.{ConcurrentHashMap, CountDownLatch} +import java.util.concurrent.CountDownLatch +import java.util.concurrent.locks.ReentrantLock import javax.management.ObjectName -import kafka.server.ReplicaManager -import kafka.utils.TestUtils -import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import kafka.log.UnifiedLog +import kafka.server.{MetadataCache, ReplicaManager} +import kafka.utils.{Pool, TestUtils} +import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.errors.InvalidRegularExpression import org.apache.kafka.common.internals.Topic.TRANSACTION_STATE_TOPIC_NAME import org.apache.kafka.common.metrics.{JmxReporter, KafkaMetricsContext, Metrics} import org.apache.kafka.common.protocol.{Errors, MessageUtil} @@ -32,15 +33,12 @@ import org.apache.kafka.common.record._ import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests.TransactionResult import org.apache.kafka.common.utils.MockTime -import org.apache.kafka.coordinator.transaction.{TransactionMetadata, TransactionState, TxnTransitMetadata} -import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion, RequestLocal, TransactionVersion} import org.apache.kafka.server.common.TransactionVersion.{TV_0, TV_2} import org.apache.kafka.coordinator.transaction.generated.TransactionLogKey -import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.MockScheduler -import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LogConfig, LogOffsetMetadata, UnifiedLog} +import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LogConfig, LogOffsetMetadata} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.junit.jupiter.params.ParameterizedTest @@ -49,7 +47,7 @@ import org.mockito.{ArgumentCaptor, ArgumentMatchers} import org.mockito.ArgumentMatchers.{any, anyInt, anyLong, anyShort} import org.mockito.Mockito.{atLeastOnce, mock, reset, times, verify, when} -import java.util +import java.util.Collections import scala.collection.{Map, mutable} import scala.jdk.CollectionConverters._ @@ -59,7 +57,6 @@ class TransactionStateManagerTest { val numPartitions = 2 val transactionTimeoutMs: Int = 1000 val topicPartition = new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId) - val transactionTopicId = Uuid.randomUuid() val coordinatorEpoch = 10 val txnRecords: mutable.ArrayBuffer[SimpleRecord] = mutable.ArrayBuffer[SimpleRecord]() @@ -72,8 +69,10 @@ class TransactionStateManagerTest { when(metadataCache.features()).thenReturn { new FinalizedFeatures( MetadataVersion.latestTesting(), - util.Map.of(TransactionVersion.FEATURE_NAME, TransactionVersion.TV_2.featureLevel()), - 0) + Collections.singletonMap(TransactionVersion.FEATURE_NAME, TransactionVersion.TV_2.featureLevel()), + 0, + true + ) } val metrics = new Metrics() @@ -98,8 +97,6 @@ class TransactionStateManagerTest { // make sure the transactional id hashes to the assigning partition id assertEquals(partitionId, transactionManager.partitionFor(transactionalId1)) assertEquals(partitionId, transactionManager.partitionFor(transactionalId2)) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) } @AfterEach @@ -109,21 +106,16 @@ class TransactionStateManagerTest { @Test def testValidateTransactionTimeout(): Unit = { - assertTrue(transactionManager.validateTransactionTimeoutMs(enableTwoPC = false, 1)) - assertFalse(transactionManager.validateTransactionTimeoutMs(enableTwoPC = false, -1)) - assertFalse(transactionManager.validateTransactionTimeoutMs(enableTwoPC = false, 0)) - assertTrue(transactionManager.validateTransactionTimeoutMs(enableTwoPC = false, txnConfig.transactionMaxTimeoutMs)) - assertFalse(transactionManager.validateTransactionTimeoutMs(enableTwoPC = false, txnConfig.transactionMaxTimeoutMs + 1)) - // KIP-939 Always return true when two phase commit is enabled on transaction. Two phase commit is enabled in case of - // externally coordinated distributed transactions. - assertTrue(transactionManager.validateTransactionTimeoutMs(enableTwoPC = true, -1)) - assertTrue(transactionManager.validateTransactionTimeoutMs(enableTwoPC = true, 10)) - assertTrue(transactionManager.validateTransactionTimeoutMs(enableTwoPC = true, txnConfig.transactionMaxTimeoutMs + 1)) + assertTrue(transactionManager.validateTransactionTimeoutMs(1)) + assertFalse(transactionManager.validateTransactionTimeoutMs(-1)) + assertFalse(transactionManager.validateTransactionTimeoutMs(0)) + assertTrue(transactionManager.validateTransactionTimeoutMs(txnConfig.transactionMaxTimeoutMs)) + assertFalse(transactionManager.validateTransactionTimeoutMs(txnConfig.transactionMaxTimeoutMs + 1)) } @Test def testAddGetPids(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) assertEquals(Right(None), transactionManager.getTransactionState(transactionalId1)) assertEquals(Right(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1)), @@ -142,10 +134,10 @@ class TransactionStateManagerTest { assertEquals(0, transactionManager.partitionFor(metadata1.transactionalId)) assertEquals(1, transactionManager.partitionFor(metadata2.transactionalId)) - transactionManager.addLoadedTransactionsToCache(0, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(0, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(metadata1) - transactionManager.addLoadedTransactionsToCache(1, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(1, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(metadata2) def cachedProducerEpoch(transactionalId: String): Option[Short] = { @@ -175,14 +167,14 @@ class TransactionStateManagerTest { when(replicaManager.getLog(topicPartition)).thenReturn(Some(logMock)) when(logMock.logStartOffset).thenReturn(startOffset) when(logMock.read(ArgumentMatchers.eq(startOffset), - anyInt(), - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true)) + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true)) ).thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecordsMock)) when(replicaManager.getLogEndOffset(topicPartition)).thenReturn(Some(endOffset)) - txnMetadata1.state(TransactionState.PREPARE_COMMIT) - txnMetadata1.addPartitions(util.Set.of( + txnMetadata1.state = PrepareCommit + txnMetadata1.addPartitions(Set[TopicPartition]( new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) val records = MemoryRecords.withRecords(startOffset, Compression.NONE, @@ -234,14 +226,14 @@ class TransactionStateManagerTest { when(replicaManager.getLog(topicPartition)).thenReturn(Some(logMock)) when(logMock.logStartOffset).thenReturn(startOffset) when(logMock.read(ArgumentMatchers.eq(startOffset), - anyInt(), - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true)) + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true)) ).thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecordsMock)) when(replicaManager.getLogEndOffset(topicPartition)).thenReturn(Some(endOffset)) - txnMetadata1.state(TransactionState.PREPARE_COMMIT) - txnMetadata1.addPartitions(util.Set.of( + txnMetadata1.state = PrepareCommit + txnMetadata1.addPartitions(Set[TopicPartition]( new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) val records = MemoryRecords.withRecords(startOffset, Compression.NONE, @@ -285,44 +277,44 @@ class TransactionStateManagerTest { // generate transaction log messages for two pids traces: // pid1's transaction started with two partitions - txnMetadata1.state(TransactionState.ONGOING) - txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0), + txnMetadata1.state = Ongoing + txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) // pid1's transaction adds three more partitions - txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic2", 0), + txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic2", 0), new TopicPartition("topic2", 1), new TopicPartition("topic2", 2))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) // pid1's transaction is preparing to commit - txnMetadata1.state(TransactionState.PREPARE_COMMIT) + txnMetadata1.state = PrepareCommit txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) // pid2's transaction started with three partitions - txnMetadata2.state(TransactionState.ONGOING) - txnMetadata2.addPartitions(util.Set.of(new TopicPartition("topic3", 0), + txnMetadata2.state = Ongoing + txnMetadata2.addPartitions(Set[TopicPartition](new TopicPartition("topic3", 0), new TopicPartition("topic3", 1), new TopicPartition("topic3", 2))) txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit(), TV_2)) // pid2's transaction is preparing to abort - txnMetadata2.state(TransactionState.PREPARE_ABORT) + txnMetadata2.state = PrepareAbort txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit(), TV_2)) // pid2's transaction has aborted - txnMetadata2.state(TransactionState.COMPLETE_ABORT) + txnMetadata2.state = CompleteAbort txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit(), TV_2)) // pid2's epoch has advanced, with no ongoing transaction yet - txnMetadata2.state(TransactionState.EMPTY) + txnMetadata2.state = Empty txnMetadata2.topicPartitions.clear() txnRecords += new SimpleRecord(txnMessageKeyBytes2, TransactionLog.valueToBytes(txnMetadata2.prepareNoTransit(), TV_2)) @@ -382,7 +374,7 @@ class TransactionStateManagerTest { @Test def testCompleteTransitionWhenAppendSucceeded(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) // first insert the initial transaction metadata transactionManager.putTransactionStateIfNotExists(txnMetadata1) @@ -391,7 +383,7 @@ class TransactionStateManagerTest { expectedError = Errors.NONE // update the metadata to ongoing with two partitions - val newMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic1", 0), + val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // append the new metadata into log @@ -403,11 +395,11 @@ class TransactionStateManagerTest { @Test def testAppendFailToCoordinatorNotAvailableError(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_NOT_AVAILABLE - var failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.UNKNOWN_TOPIC_OR_PARTITION) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -415,19 +407,19 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_ENOUGH_REPLICAS) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_ENOUGH_REPLICAS_AFTER_APPEND) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.REQUEST_TIMED_OUT) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) @@ -436,11 +428,11 @@ class TransactionStateManagerTest { @Test def testAppendFailToNotCoordinatorError(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.NOT_COORDINATOR - var failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NOT_LEADER_OR_FOLLOWER) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -448,30 +440,30 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch + 1, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch + 1, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) } @Test def testAppendFailToCoordinatorLoadingError(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_LOAD_IN_PROGRESS - val failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.NONE) transactionManager.removeTransactionsForTxnTopicPartition(partitionId, coordinatorEpoch) @@ -481,11 +473,11 @@ class TransactionStateManagerTest { @Test def testAppendFailToUnknownError(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.UNKNOWN_SERVER_ERROR - var failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + var failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.MESSAGE_TOO_LARGE) val requestLocal = RequestLocal.withThreadConfinedCaching @@ -493,7 +485,7 @@ class TransactionStateManagerTest { assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) assertTrue(txnMetadata1.pendingState.isEmpty) - failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.RECORD_LIST_TOO_LARGE) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, requestLocal = requestLocal) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) @@ -502,21 +494,21 @@ class TransactionStateManagerTest { @Test def testPendingStateNotResetOnRetryAppend(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) expectedError = Errors.COORDINATOR_NOT_AVAILABLE - val failedMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) + val failedMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic2", 0)), time.milliseconds(), TV_0) prepareForTxnMessageAppend(Errors.UNKNOWN_TOPIC_OR_PARTITION) transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, failedMetadata, assertCallback, _ => true, RequestLocal.withThreadConfinedCaching) assertEquals(Right(Some(CoordinatorEpochAndTxnMetadata(coordinatorEpoch, txnMetadata1))), transactionManager.getTransactionState(transactionalId1)) - assertEquals(util.Optional.of(TransactionState.ONGOING), txnMetadata1.pendingState) + assertEquals(Some(Ongoing), txnMetadata1.pendingState) } @Test def testAppendTransactionToLogWhileProducerFenced(): Unit = { - transactionManager.addLoadedTransactionsToCache(partitionId, 0, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]()) // first insert the initial transaction metadata transactionManager.putTransactionStateIfNotExists(txnMetadata1) @@ -524,11 +516,11 @@ class TransactionStateManagerTest { prepareForTxnMessageAppend(Errors.NONE) expectedError = Errors.NOT_COORDINATOR - val newMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic1", 0), + val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // modify the cache while trying to append the new metadata - txnMetadata1.setProducerEpoch((txnMetadata1.producerEpoch + 1).toShort) + txnMetadata1.producerEpoch = (txnMetadata1.producerEpoch + 1).toShort // append the new metadata into log transactionManager.appendTransactionToLog(transactionalId1, coordinatorEpoch = 10, newMetadata, assertCallback, requestLocal = RequestLocal.withThreadConfinedCaching) @@ -537,17 +529,17 @@ class TransactionStateManagerTest { @Test def testAppendTransactionToLogWhilePendingStateChanged(): Unit = { // first insert the initial transaction metadata - transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, coordinatorEpoch, new Pool[String, TransactionMetadata]()) transactionManager.putTransactionStateIfNotExists(txnMetadata1) prepareForTxnMessageAppend(Errors.NONE) expectedError = Errors.INVALID_PRODUCER_EPOCH - val newMetadata = txnMetadata1.prepareAddPartitions(util.Set.of(new TopicPartition("topic1", 0), + val newMetadata = txnMetadata1.prepareAddPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic1", 1)), time.milliseconds(), TV_0) // modify the cache while trying to append the new metadata - txnMetadata1.pendingState(util.Optional.empty()) + txnMetadata1.pendingState = None // append the new metadata into log assertThrows(classOf[IllegalStateException], () => transactionManager.appendTransactionToLog(transactionalId1, @@ -568,8 +560,7 @@ class TransactionStateManagerTest { val listResponse = transactionManager.listTransactionStates( filterProducerIds = Set.empty, filterStateNames = Set.empty, - -1L, - null + -1L ) assertEquals(Errors.COORDINATOR_LOAD_IN_PROGRESS, Errors.forCode(listResponse.errorCode)) } @@ -577,7 +568,7 @@ class TransactionStateManagerTest { @Test def testListTransactionsFiltering(): Unit = { for (partitionId <- 0 until numPartitions) { - transactionManager.addLoadedTransactionsToCache(partitionId, 0, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]()) } def putTransaction( @@ -591,46 +582,42 @@ class TransactionStateManagerTest { } } - putTransaction(transactionalId = "t0", producerId = 0, state = TransactionState.ONGOING) - putTransaction(transactionalId = "t1", producerId = 1, state = TransactionState.ONGOING) - putTransaction(transactionalId = "my-special-0", producerId = 0, state = TransactionState.ONGOING) + putTransaction(transactionalId = "t0", producerId = 0, state = Ongoing) + putTransaction(transactionalId = "t1", producerId = 1, state = Ongoing) // update time to create transactions with various durations time.sleep(1000) - putTransaction(transactionalId = "t2", producerId = 2, state = TransactionState.PREPARE_COMMIT) - putTransaction(transactionalId = "t3", producerId = 3, state = TransactionState.PREPARE_ABORT) - putTransaction(transactionalId = "your-special-1", producerId = 0, state = TransactionState.PREPARE_ABORT) + putTransaction(transactionalId = "t2", producerId = 2, state = PrepareCommit) + putTransaction(transactionalId = "t3", producerId = 3, state = PrepareAbort) time.sleep(1000) - putTransaction(transactionalId = "t4", producerId = 4, state = TransactionState.COMPLETE_COMMIT) - putTransaction(transactionalId = "t5", producerId = 5, state = TransactionState.COMPLETE_ABORT) - putTransaction(transactionalId = "t6", producerId = 6, state = TransactionState.COMPLETE_ABORT) - putTransaction(transactionalId = "t7", producerId = 7, state = TransactionState.PREPARE_EPOCH_FENCE) - putTransaction(transactionalId = "their-special-2", producerId = 7, state = TransactionState.COMPLETE_ABORT) + putTransaction(transactionalId = "t4", producerId = 4, state = CompleteCommit) + putTransaction(transactionalId = "t5", producerId = 5, state = CompleteAbort) + putTransaction(transactionalId = "t6", producerId = 6, state = CompleteAbort) + putTransaction(transactionalId = "t7", producerId = 7, state = PrepareEpochFence) time.sleep(1000) - // Note that `TransactionState.DEAD` transactions are never returned. This is a transient state + // Note that `Dead` transactions are never returned. This is a transient state // which is used when the transaction state is in the process of being deleted // (whether though expiration or coordinator unloading). - putTransaction(transactionalId = "t8", producerId = 8, state = TransactionState.DEAD) + putTransaction(transactionalId = "t8", producerId = 8, state = Dead) def assertListTransactions( expectedTransactionalIds: Set[String], filterProducerIds: Set[Long] = Set.empty, filterStates: Set[String] = Set.empty, - filterDuration: Long = -1L, - filteredTransactionalIdPattern: String = null + filterDuration: Long = -1L ): Unit = { - val listResponse = transactionManager.listTransactionStates(filterProducerIds, filterStates, filterDuration, filteredTransactionalIdPattern) + val listResponse = transactionManager.listTransactionStates(filterProducerIds, filterStates, filterDuration) assertEquals(Errors.NONE, Errors.forCode(listResponse.errorCode)) assertEquals(expectedTransactionalIds, listResponse.transactionStates.asScala.map(_.transactionalId).toSet) val expectedUnknownStates = filterStates.filter(state => TransactionState.fromName(state).isEmpty) assertEquals(expectedUnknownStates, listResponse.unknownStateFilters.asScala.toSet) } - assertListTransactions(Set("t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "my-special-0", "your-special-1", "their-special-2")) - assertListTransactions(Set("t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "my-special-0", "your-special-1", "their-special-2"), filterDuration = 0L) - assertListTransactions(Set("t0", "t1", "t2", "t3", "my-special-0", "your-special-1"), filterDuration = 1000L) - assertListTransactions(Set("t0", "t1", "my-special-0"), filterDuration = 2000L) + assertListTransactions(Set("t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7")) + assertListTransactions(Set("t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7"), filterDuration = 0L) + assertListTransactions(Set("t0", "t1", "t2", "t3"), filterDuration = 1000L) + assertListTransactions(Set("t0", "t1"), filterDuration = 2000L) assertListTransactions(Set(), filterDuration = 3000L) - assertListTransactions(Set("t0", "t1", "my-special-0"), filterStates = Set("Ongoing")) - assertListTransactions(Set("t0", "t1", "my-special-0"), filterStates = Set("Ongoing", "UnknownState")) + assertListTransactions(Set("t0", "t1"), filterStates = Set("Ongoing")) + assertListTransactions(Set("t0", "t1"), filterStates = Set("Ongoing", "UnknownState")) assertListTransactions(Set("t2", "t4"), filterStates = Set("PrepareCommit", "CompleteCommit")) assertListTransactions(Set(), filterStates = Set("UnknownState")) assertListTransactions(Set("t5"), filterProducerIds = Set(5L)) @@ -640,29 +627,20 @@ class TransactionStateManagerTest { assertListTransactions(Set(), filterProducerIds = Set(3L, 6L), filterStates = Set("UnknownState")) assertListTransactions(Set(), filterProducerIds = Set(10L), filterStates = Set("CompleteCommit")) assertListTransactions(Set(), filterStates = Set("Dead")) - assertListTransactions(Set("my-special-0", "your-special-1", "their-special-2"), filteredTransactionalIdPattern = ".*special-.*") - assertListTransactions(Set(), filteredTransactionalIdPattern = "nothing") - assertListTransactions(Set("my-special-0", "your-special-1"), filterDuration = 1000L, filteredTransactionalIdPattern = ".*special-.*") - assertListTransactions(Set("their-special-2"), filterProducerIds = Set(7L), filterStates = Set("CompleteCommit", "CompleteAbort"), filteredTransactionalIdPattern = ".*special-.*") - } - - @Test - def testListTransactionsFilteringWithInvalidPattern(): Unit = { - assertThrows(classOf[InvalidRegularExpression], () => transactionManager.listTransactionStates(Set.empty, Set.empty, -1L, "(ab(cd")) } @Test def shouldOnlyConsiderTransactionsInTheOngoingStateToAbort(): Unit = { for (partitionId <- 0 until numPartitions) { - transactionManager.addLoadedTransactionsToCache(partitionId, 0, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]()) } - transactionManager.putTransactionStateIfNotExists(transactionMetadata("ongoing", producerId = 0, state = TransactionState.ONGOING)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("not-expiring", producerId = 1, state = TransactionState.ONGOING, txnTimeout = 10000)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("prepare-commit", producerId = 2, state = TransactionState.PREPARE_COMMIT)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("prepare-abort", producerId = 3, state = TransactionState.PREPARE_ABORT)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("complete-commit", producerId = 4, state = TransactionState.COMPLETE_COMMIT)) - transactionManager.putTransactionStateIfNotExists(transactionMetadata("complete-abort", producerId = 5, state = TransactionState.COMPLETE_ABORT)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("ongoing", producerId = 0, state = Ongoing)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("not-expiring", producerId = 1, state = Ongoing, txnTimeout = 10000)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("prepare-commit", producerId = 2, state = PrepareCommit)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("prepare-abort", producerId = 3, state = PrepareAbort)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("complete-commit", producerId = 4, state = CompleteCommit)) + transactionManager.putTransactionStateIfNotExists(transactionMetadata("complete-abort", producerId = 5, state = CompleteAbort)) time.sleep(2000) val expiring = transactionManager.timedOutTransactions() @@ -671,59 +649,59 @@ class TransactionStateManagerTest { @Test def shouldWriteTxnMarkersForTransactionInPreparedCommitState(): Unit = { - verifyWritesTxnMarkersInPrepareState(TransactionState.PREPARE_COMMIT) + verifyWritesTxnMarkersInPrepareState(PrepareCommit) } @Test def shouldWriteTxnMarkersForTransactionInPreparedAbortState(): Unit = { - verifyWritesTxnMarkersInPrepareState(TransactionState.PREPARE_ABORT) + verifyWritesTxnMarkersInPrepareState(PrepareAbort) } @Test def shouldRemoveCompleteCommitExpiredTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.COMPLETE_COMMIT) + setupAndRunTransactionalIdExpiration(Errors.NONE, CompleteCommit) verifyMetadataDoesntExist(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldRemoveCompleteAbortExpiredTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.COMPLETE_ABORT) + setupAndRunTransactionalIdExpiration(Errors.NONE, CompleteAbort) verifyMetadataDoesntExist(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldRemoveEmptyExpiredTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.EMPTY) + setupAndRunTransactionalIdExpiration(Errors.NONE, Empty) verifyMetadataDoesntExist(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldNotRemoveExpiredTransactionalIdsIfLogAppendFails(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NOT_ENOUGH_REPLICAS, TransactionState.COMPLETE_ABORT) + setupAndRunTransactionalIdExpiration(Errors.NOT_ENOUGH_REPLICAS, CompleteAbort) verifyMetadataDoesExistAndIsUsable(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldNotRemoveOngoingTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.ONGOING) + setupAndRunTransactionalIdExpiration(Errors.NONE, Ongoing) verifyMetadataDoesExistAndIsUsable(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldNotRemovePrepareAbortTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.PREPARE_ABORT) + setupAndRunTransactionalIdExpiration(Errors.NONE, PrepareAbort) verifyMetadataDoesExistAndIsUsable(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @Test def shouldNotRemovePrepareCommitTransactionalIds(): Unit = { - setupAndRunTransactionalIdExpiration(Errors.NONE, TransactionState.PREPARE_COMMIT) + setupAndRunTransactionalIdExpiration(Errors.NONE, PrepareCommit) verifyMetadataDoesExistAndIsUsable(transactionalId1) verifyMetadataDoesExistAndIsUsable(transactionalId2) } @@ -743,12 +721,10 @@ class TransactionStateManagerTest { reset(replicaManager) expectLogConfig(partitionIds, maxBatchSize) - val attemptedAppends = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] + val attemptedAppends = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(Errors.MESSAGE_TOO_LARGE, attemptedAppends) assertEquals(allTransactionalIds, listExpirableTransactionalIds()) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) transactionManager.removeExpiredTransactionalIds() verify(replicaManager, atLeastOnce()).appendRecords( anyLong(), @@ -757,6 +733,8 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), any(), any(), + any[Option[ReentrantLock]], + any(), any(), any(), any() @@ -787,9 +765,8 @@ class TransactionStateManagerTest { // No log config returned for partition 0 since it is offline when(replicaManager.getLogConfig(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, offlinePartitionId))) .thenReturn(None) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) - val appendedRecords = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] + + val appendedRecords = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(Errors.NONE, appendedRecords) assertEquals(allTransactionalIds, listExpirableTransactionalIds()) @@ -801,6 +778,8 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), any(), any(), + any[Option[ReentrantLock]], + any(), any(), any(), any() @@ -829,13 +808,10 @@ class TransactionStateManagerTest { reset(replicaManager) expectLogConfig(partitionIds, maxBatchSize) - val appendedRecords = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] + val appendedRecords = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(Errors.NONE, appendedRecords) assertEquals(allTransactionalIds, listExpirableTransactionalIds()) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) - transactionManager.removeExpiredTransactionalIds() verify(replicaManager, atLeastOnce()).appendRecords( anyLong(), @@ -844,6 +820,8 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), any(), any(), + any[Option[ReentrantLock]], + any(), any(), any(), any()) @@ -876,17 +854,15 @@ class TransactionStateManagerTest { // will be expired and it should succeed. val timestamp = time.milliseconds() val txnMetadata = new TransactionMetadata(transactionalId, 1, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_EPOCH, - RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, TransactionState.EMPTY, util.Set.of, timestamp, timestamp, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, transactionTimeoutMs, Empty, collection.mutable.Set.empty[TopicPartition], timestamp, timestamp, TV_0) transactionManager.putTransactionStateIfNotExists(txnMetadata) time.sleep(txnConfig.transactionalIdExpirationMs + 1) reset(replicaManager) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) expectLogConfig(partitionIds, maxBatchSize) - val appendedRecords = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] + val appendedRecords = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(Errors.NONE, appendedRecords) transactionManager.removeExpiredTransactionalIds() @@ -897,6 +873,8 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), any(), any(), + any[Option[ReentrantLock]], + any(), any(), any(), any() @@ -907,19 +885,16 @@ class TransactionStateManagerTest { } private def collectTransactionalIdsFromTombstones( - appendedRecords: mutable.Map[TopicIdPartition, mutable.Buffer[MemoryRecords]] + appendedRecords: mutable.Map[TopicPartition, mutable.Buffer[MemoryRecords]] ): Set[String] = { val expiredTransactionalIds = mutable.Set.empty[String] appendedRecords.values.foreach { batches => batches.foreach { records => records.records.forEach { record => - TransactionLog.readTxnRecordKey(record.key) match { - case Right(transactionalId) => - assertNull(record.value) - expiredTransactionalIds += transactionalId - assertEquals(Right(None), transactionManager.getTransactionState(transactionalId)) - case Left(value) => fail(s"Failed to read transactional id from tombstone: $value") - } + val transactionalId = TransactionLog.readTxnRecordKey(record.key).transactionalId + assertNull(record.value) + expiredTransactionalIds += transactionalId + assertEquals(Right(None), transactionManager.getTransactionState(transactionalId)) } } } @@ -934,7 +909,7 @@ class TransactionStateManagerTest { val txnlId = s"id_$i" val producerId = i val txnMetadata = transactionMetadata(txnlId, producerId) - txnMetadata.txnLastUpdateTimestamp(time.milliseconds() - txnConfig.transactionalIdExpirationMs) + txnMetadata.txnLastUpdateTimestamp = time.milliseconds() - txnConfig.transactionalIdExpirationMs transactionManager.putTransactionStateIfNotExists(txnMetadata) allTransactionalIds += txnlId } @@ -942,7 +917,7 @@ class TransactionStateManagerTest { } private def listExpirableTransactionalIds(): Set[String] = { - val activeTransactionalIds = transactionManager.listTransactionStates(Set.empty, Set.empty, -1L, null) + val activeTransactionalIds = transactionManager.listTransactionStates(Set.empty, Set.empty, -1L) .transactionStates .asScala .map(_.transactionalId) @@ -962,8 +937,8 @@ class TransactionStateManagerTest { @Test def testSuccessfulReimmigration(): Unit = { - txnMetadata1.state(TransactionState.PREPARE_COMMIT) - txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0), + txnMetadata1.state = PrepareCommit + txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) @@ -996,9 +971,9 @@ class TransactionStateManagerTest { when(replicaManager.getLog(topicPartition)).thenReturn(Some(logMock)) when(logMock.logStartOffset).thenReturn(startOffset) when(logMock.read(ArgumentMatchers.eq(startOffset), - anyInt(), - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true)) + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true)) ).thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), MemoryRecords.EMPTY)) when(replicaManager.getLogEndOffset(topicPartition)).thenReturn(Some(endOffset)) @@ -1010,9 +985,9 @@ class TransactionStateManagerTest { verify(replicaManager).getLog(topicPartition) verify(logMock).logStartOffset verify(logMock).read(ArgumentMatchers.eq(startOffset), - anyInt(), - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true)) + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true)) verify(replicaManager, times(2)).getLogEndOffset(topicPartition) assertEquals(0, transactionManager.loadingPartitions.size) } @@ -1029,10 +1004,10 @@ class TransactionStateManagerTest { @Test def testLoadTransactionMetadataContainingSegmentEndingWithEmptyBatch(): Unit = { // Simulate a case where a log contains two segments and the first segment ending with an empty batch. - txnMetadata1.state(TransactionState.PREPARE_COMMIT) - txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0))) - txnMetadata2.state(TransactionState.ONGOING) - txnMetadata2.addPartitions(util.Set.of(new TopicPartition("topic2", 0))) + txnMetadata1.state = PrepareCommit + txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0))) + txnMetadata2.state = Ongoing + txnMetadata2.addPartitions(Set[TopicPartition](new TopicPartition("topic2", 0))) // Create the first segment which contains two batches. // The first batch has one transactional record @@ -1060,14 +1035,14 @@ class TransactionStateManagerTest { when(logMock.logStartOffset).thenReturn(0L) when(logMock.read(ArgumentMatchers.eq(0L), - anyInt(), - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true))) + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true))) .thenReturn(new FetchDataInfo(new LogOffsetMetadata(0L), firstSegmentRecords)) when(logMock.read(ArgumentMatchers.eq(2L), - anyInt(), - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true))) + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true))) .thenReturn(new FetchDataInfo(new LogOffsetMetadata(2L), secondSegmentRecords)) // Load transactions should not stuck. @@ -1078,8 +1053,8 @@ class TransactionStateManagerTest { // all transactions should have been loaded val txnMetadataPool = transactionManager.transactionMetadataCache(partitionId).metadataPerTransactionalId assertEquals(2, txnMetadataPool.size) - assertTrue(txnMetadataPool.containsKey(transactionalId1)) - assertTrue(txnMetadataPool.containsKey(transactionalId2)) + assertTrue(txnMetadataPool.contains(transactionalId1)) + assertTrue(txnMetadataPool.contains(transactionalId2)) } private def verifyMetadataDoesExistAndIsUsable(transactionalId: String): Unit = { @@ -1101,10 +1076,10 @@ class TransactionStateManagerTest { private def expectTransactionalIdExpiration( appendError: Errors, - capturedAppends: mutable.Map[TopicIdPartition, mutable.Buffer[MemoryRecords]] + capturedAppends: mutable.Map[TopicPartition, mutable.Buffer[MemoryRecords]] ): Unit = { - val recordsCapture: ArgumentCaptor[Map[TopicIdPartition, MemoryRecords]] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, MemoryRecords]]) - val callbackCapture: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) + val recordsCapture: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + val callbackCapture: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) when(replicaManager.appendRecords( anyLong(), @@ -1113,6 +1088,8 @@ class TransactionStateManagerTest { ArgumentMatchers.eq(AppendOrigin.COORDINATOR), recordsCapture.capture(), callbackCapture.capture(), + any[Option[ReentrantLock]], + any(), any(), any(), any() @@ -1135,7 +1112,7 @@ class TransactionStateManagerTest { partitionIds: Seq[Int], ): Unit = { for (partitionId <- partitionIds) { - transactionManager.addLoadedTransactionsToCache(partitionId, 0, new ConcurrentHashMap[String, TransactionMetadata]()) + transactionManager.addLoadedTransactionsToCache(partitionId, 0, new Pool[String, TransactionMetadata]()) } } @@ -1156,28 +1133,28 @@ class TransactionStateManagerTest { val partitionIds = 0 until numPartitions loadTransactionsForPartitions(partitionIds) - expectLogConfig(partitionIds, ServerLogConfigs.MAX_MESSAGE_BYTES_DEFAULT) + expectLogConfig(partitionIds, LogConfig.DEFAULT_MAX_MESSAGE_BYTES) - txnMetadata1.txnLastUpdateTimestamp(time.milliseconds() - txnConfig.transactionalIdExpirationMs) - txnMetadata1.state(txnState) + txnMetadata1.txnLastUpdateTimestamp = time.milliseconds() - txnConfig.transactionalIdExpirationMs + txnMetadata1.state = txnState transactionManager.putTransactionStateIfNotExists(txnMetadata1) - txnMetadata2.txnLastUpdateTimestamp(time.milliseconds()) + txnMetadata2.txnLastUpdateTimestamp = time.milliseconds() transactionManager.putTransactionStateIfNotExists(txnMetadata2) - val appendedRecords = mutable.Map.empty[TopicIdPartition, mutable.Buffer[MemoryRecords]] + val appendedRecords = mutable.Map.empty[TopicPartition, mutable.Buffer[MemoryRecords]] expectTransactionalIdExpiration(error, appendedRecords) transactionManager.removeExpiredTransactionalIds() val stateAllowsExpiration = txnState match { - case TransactionState.EMPTY | TransactionState.COMPLETE_COMMIT | TransactionState.COMPLETE_ABORT => true + case Empty | CompleteCommit | CompleteAbort => true case _ => false } if (stateAllowsExpiration) { val partitionId = transactionManager.partitionFor(transactionalId1) - val topicPartition = new TopicIdPartition(transactionTopicId, partitionId, TRANSACTION_STATE_TOPIC_NAME) + val topicPartition = new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId) val expectedTombstone = new SimpleRecord(time.milliseconds(), TransactionLog.keyToBytes(transactionalId1), null) val expectedRecords = MemoryRecords.withRecords(TransactionLog.EnforcedCompression, expectedTombstone) assertEquals(Set(topicPartition), appendedRecords.keySet) @@ -1188,8 +1165,8 @@ class TransactionStateManagerTest { } private def verifyWritesTxnMarkersInPrepareState(state: TransactionState): Unit = { - txnMetadata1.state(state) - txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0), + txnMetadata1.state = state + txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) @@ -1218,11 +1195,11 @@ class TransactionStateManagerTest { private def transactionMetadata(transactionalId: String, producerId: Long, - state: TransactionState = TransactionState.EMPTY, + state: TransactionState = Empty, txnTimeout: Int = transactionTimeoutMs): TransactionMetadata = { val timestamp = time.milliseconds() new TransactionMetadata(transactionalId, producerId, RecordBatch.NO_PRODUCER_ID, RecordBatch.NO_PRODUCER_ID, 0.toShort, - RecordBatch.NO_PRODUCER_EPOCH, txnTimeout, state, util.Set.of, timestamp, timestamp, TV_0) + RecordBatch.NO_PRODUCER_EPOCH, txnTimeout, state, collection.mutable.Set.empty[TopicPartition], timestamp, timestamp, TV_0) } private def prepareTxnLog(topicPartition: TopicPartition, @@ -1240,9 +1217,9 @@ class TransactionStateManagerTest { when(logMock.logStartOffset).thenReturn(startOffset) when(logMock.read(ArgumentMatchers.eq(startOffset), - anyInt(), - ArgumentMatchers.eq(FetchIsolation.LOG_END), - ArgumentMatchers.eq(true))) + maxLength = anyInt(), + isolation = ArgumentMatchers.eq(FetchIsolation.LOG_END), + minOneMessage = ArgumentMatchers.eq(true))) .thenReturn(new FetchDataInfo(new LogOffsetMetadata(startOffset), fileRecordsMock)) when(fileRecordsMock.sizeInBytes()).thenReturn(records.sizeInBytes) @@ -1258,22 +1235,22 @@ class TransactionStateManagerTest { private def prepareForTxnMessageAppend(error: Errors): Unit = { reset(replicaManager) - val capturedArgument: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) + val capturedArgument: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) when(replicaManager.appendRecords(anyLong(), anyShort(), internalTopicsAllowed = ArgumentMatchers.eq(true), origin = ArgumentMatchers.eq(AppendOrigin.COORDINATOR), - any[Map[TopicIdPartition, MemoryRecords]], + any[Map[TopicPartition, MemoryRecords]], capturedArgument.capture(), + any[Option[ReentrantLock]], + any(), any(), any(), any() )).thenAnswer(_ => capturedArgument.getValue.apply( - Map(new TopicIdPartition(transactionTopicId, partitionId, TRANSACTION_STATE_TOPIC_NAME) -> + Map(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, partitionId) -> new PartitionResponse(error, 0L, RecordBatch.NO_TIMESTAMP, 0L))) ) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 0))).thenReturn(new TopicIdPartition(transactionTopicId, 0, TRANSACTION_STATE_TOPIC_NAME)) - when(replicaManager.topicIdPartition(new TopicPartition(TRANSACTION_STATE_TOPIC_NAME, 1))).thenReturn(new TopicIdPartition(transactionTopicId, 1, TRANSACTION_STATE_TOPIC_NAME)) } @Test @@ -1294,8 +1271,8 @@ class TransactionStateManagerTest { assertEquals(Double.NaN, partitionLoadTime("partition-load-time-avg"), 0) assertTrue(reporter.containsMbean(mBeanName)) - txnMetadata1.state(TransactionState.ONGOING) - txnMetadata1.addPartitions(util.List.of(new TopicPartition("topic1", 1), + txnMetadata1.state = Ongoing + txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 1), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) @@ -1313,8 +1290,8 @@ class TransactionStateManagerTest { @Test def testIgnoreUnknownRecordType(): Unit = { - txnMetadata1.state(TransactionState.PREPARE_COMMIT) - txnMetadata1.addPartitions(util.Set.of(new TopicPartition("topic1", 0), + txnMetadata1.state = PrepareCommit + txnMetadata1.addPartitions(Set[TopicPartition](new TopicPartition("topic1", 0), new TopicPartition("topic1", 1))) txnRecords += new SimpleRecord(txnMessageKeyBytes1, TransactionLog.valueToBytes(txnMetadata1.prepareNoTransit(), TV_2)) @@ -1334,11 +1311,11 @@ class TransactionStateManagerTest { assertTrue(transactionManager.transactionMetadataCache.contains(partitionId)) val txnMetadataPool = transactionManager.transactionMetadataCache(partitionId).metadataPerTransactionalId assertFalse(txnMetadataPool.isEmpty) - assertTrue(txnMetadataPool.containsKey(transactionalId1)) + assertTrue(txnMetadataPool.contains(transactionalId1)) val txnMetadata = txnMetadataPool.get(transactionalId1) assertEquals(txnMetadata1.transactionalId, txnMetadata.transactionalId) assertEquals(txnMetadata1.producerId, txnMetadata.producerId) - assertEquals(txnMetadata1.prevProducerId, txnMetadata.prevProducerId) + assertEquals(txnMetadata1.previousProducerId, txnMetadata.previousProducerId) assertEquals(txnMetadata1.producerEpoch, txnMetadata.producerEpoch) assertEquals(txnMetadata1.lastProducerEpoch, txnMetadata.lastProducerEpoch) assertEquals(txnMetadata1.txnTimeoutMs, txnMetadata.txnTimeoutMs) @@ -1354,8 +1331,10 @@ class TransactionStateManagerTest { when(metadataCache.features()).thenReturn { new FinalizedFeatures( MetadataVersion.latestTesting(), - util.Map.of(TransactionVersion.FEATURE_NAME, transactionVersion.featureLevel()), - 0) + Collections.singletonMap(TransactionVersion.FEATURE_NAME, transactionVersion.featureLevel()), + 0, + true + ) } val transactionManager = new TransactionStateManager(0, scheduler, replicaManager, metadataCache, txnConfig, time, metrics) diff --git a/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala b/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala index 7f37eeb25a15d..0fbf014374839 100755 --- a/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala +++ b/core/src/test/scala/unit/kafka/integration/KafkaServerTestHarness.scala @@ -34,7 +34,7 @@ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import java.io.File import java.time.Duration import java.util -import java.util.Properties +import java.util.{Collections, Properties} import scala.collection.{Seq, mutable} import scala.jdk.CollectionConverters._ import scala.util.Using @@ -223,9 +223,9 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { result.exception.ifPresent { e => throw e } } val aclFilter = new AclBindingFilter(resource.toFilter, AccessControlEntryFilter.ANY) - (brokers.map(_.authorizerPlugin.get) ++ controllerServers.map(_.authorizerPlugin.get)).foreach { + (brokers.map(_.authorizer.get) ++ controllerServers.map(_.authorizer.get)).foreach { authorizer => waitAndVerifyAcls( - authorizer.get.acls(aclFilter).asScala.map(_.entry).toSet ++ acls, + authorizer.acls(aclFilter).asScala.map(_.entry).toSet ++ acls, authorizer, resource) } } @@ -239,9 +239,9 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { result.exception.ifPresent { e => throw e } } val aclFilter = new AclBindingFilter(resource.toFilter, AccessControlEntryFilter.ANY) - (brokers.map(_.authorizerPlugin.get) ++ controllerServers.map(_.authorizerPlugin.get)).foreach { + (brokers.map(_.authorizer.get) ++ controllerServers.map(_.authorizer.get)).foreach { authorizer => waitAndVerifyAcls( - authorizer.get.acls(aclFilter).asScala.map(_.entry).toSet -- acls, + authorizer.acls(aclFilter).asScala.map(_.entry).toSet -- acls, authorizer, resource) } } @@ -364,7 +364,7 @@ abstract class KafkaServerTestHarness extends QuorumTestHarness { def changeClientIdConfig(sanitizedClientId: String, configs: Properties): Unit = { Using.resource(createAdminClient(brokers, listenerName)) { admin => { - admin.alterClientQuotas(util.Set.of( + admin.alterClientQuotas(Collections.singleton( new ClientQuotaAlteration( new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> (if (sanitizedClientId == "") null else sanitizedClientId)).asJava), configs.asScala.map { case (key, value) => new ClientQuotaAlteration.Op(key, value.toDouble) }.toList.asJava))).all().get() diff --git a/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala b/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala new file mode 100644 index 0000000000000..ada5683405bbb --- /dev/null +++ b/core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala @@ -0,0 +1,158 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.integration + +import java.util.Properties +import kafka.server.KafkaConfig +import kafka.utils.{Logging, TestUtils} + +import scala.jdk.CollectionConverters._ +import org.junit.jupiter.api.{BeforeEach, TestInfo} +import com.yammer.metrics.core.Gauge +import org.apache.kafka.common.test.api.Flaky +import org.apache.kafka.server.config.{ReplicationConfigs, ServerConfigs, ServerLogConfigs} +import org.apache.kafka.server.metrics.KafkaYammerMetrics +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +class MetricsDuringTopicCreationDeletionTest extends KafkaServerTestHarness with Logging { + + private val nodesNum = 3 + private val topicName = "topic" + private val topicNum = 2 + private val replicationFactor = 3 + private val partitionNum = 3 + private val createDeleteIterations = 3 + + private val overridingProps = new Properties + overridingProps.put(ServerConfigs.DELETE_TOPIC_ENABLE_CONFIG, "true") + overridingProps.put(ServerLogConfigs.AUTO_CREATE_TOPICS_ENABLE_CONFIG, "false") + // speed up the test for UnderReplicatedPartitions, which relies on the ISR expiry thread to execute concurrently with topic creation + // But the replica.lag.time.max.ms value still need to consider the slow Jenkins testing environment + overridingProps.put(ReplicationConfigs.REPLICA_LAG_TIME_MAX_MS_CONFIG, "4000") + + private val testedMetrics = List("OfflinePartitionsCount","PreferredReplicaImbalanceCount","UnderReplicatedPartitions") + private val topics = List.tabulate(topicNum) (n => topicName + n) + + @volatile private var running = true + + override def generateConfigs = TestUtils.createBrokerConfigs(nodesNum) + .map(KafkaConfig.fromProps(_, overridingProps)) + + @BeforeEach + override def setUp(testInfo: TestInfo): Unit = { + // Do some Metrics Registry cleanup by removing the metrics that this test checks. + // This is a test workaround to the issue that prior harness runs may have left a populated registry. + // see https://issues.apache.org/jira/browse/KAFKA-4605 + for (m <- testedMetrics) { + val metricName = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala.keys.find(_.getName.endsWith(m)) + metricName.foreach(KafkaYammerMetrics.defaultRegistry.removeMetric) + } + + super.setUp(testInfo) + } + + /* + * checking all metrics we care in a single test is faster though it would be more elegant to have 3 @Test methods + */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + @Flaky("KAFKA-18245") + def testMetricsDuringTopicCreateDelete(quorum: String): Unit = { + + // For UnderReplicatedPartitions, because of https://issues.apache.org/jira/browse/KAFKA-4605 + // we can't access the metrics value of each server. So instead we directly invoke the method + // replicaManager.underReplicatedPartitionCount() that defines the metrics value. + @volatile var underReplicatedPartitionCount = 0 + + // For OfflinePartitionsCount and PreferredReplicaImbalanceCount even with https://issues.apache.org/jira/browse/KAFKA-4605 + // the test has worked reliably because the metric that gets triggered is the one generated by the first started server (controller) + val offlinePartitionsCountGauge = getGauge("OfflinePartitionsCount") + @volatile var offlinePartitionsCount = offlinePartitionsCountGauge.value + assert(offlinePartitionsCount == 0) + + val preferredReplicaImbalanceCountGauge = getGauge("PreferredReplicaImbalanceCount") + @volatile var preferredReplicaImbalanceCount = preferredReplicaImbalanceCountGauge.value + assert(preferredReplicaImbalanceCount == 0) + + // Thread checking the metric continuously + running = true + val thread = new Thread(() => { + while (running) { + for (s <- servers if running) { + underReplicatedPartitionCount = s.replicaManager.underReplicatedPartitionCount + if (underReplicatedPartitionCount > 0) { + running = false + } + } + + preferredReplicaImbalanceCount = preferredReplicaImbalanceCountGauge.value + if (preferredReplicaImbalanceCount > 0) { + running = false + } + + offlinePartitionsCount = offlinePartitionsCountGauge.value + if (offlinePartitionsCount > 0) { + running = false + } + } + }) + thread.start() + + // breakable loop that creates and deletes topics + createDeleteTopics() + + // if the thread checking the gauge is still run, stop it + running = false + thread.join() + + assert(offlinePartitionsCount==0, s"Expect offlinePartitionsCount to be 0, but got: $offlinePartitionsCount") + assert(preferredReplicaImbalanceCount==0, s"Expect PreferredReplicaImbalanceCount to be 0, but got: $preferredReplicaImbalanceCount") + assert(underReplicatedPartitionCount==0, s"Expect UnderReplicatedPartitionCount to be 0, but got: $underReplicatedPartitionCount") + } + + private def getGauge(metricName: String) = { + KafkaYammerMetrics.defaultRegistry.allMetrics.asScala + .find { case (k, _) => k.getName.endsWith(metricName) } + .getOrElse(throw new AssertionError( "Unable to find metric " + metricName)) + ._2.asInstanceOf[Gauge[Int]] + } + + private def createDeleteTopics(): Unit = { + for (l <- 1 to createDeleteIterations if running) { + // Create topics + for (t <- topics if running) { + try { + createTopic(t, partitionNum, replicationFactor) + } catch { + case e: Exception => e.printStackTrace() + } + } + + // Delete topics + for (t <- topics if running) { + try { + deleteTopic(t) + TestUtils.verifyTopicDeletion(t, partitionNum, servers) + } catch { + case e: Exception => e.printStackTrace() + } + } + } + } +} diff --git a/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala b/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala new file mode 100644 index 0000000000000..fb981369e6b66 --- /dev/null +++ b/core/src/test/scala/unit/kafka/integration/MinIsrConfigTest.scala @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.integration + +import java.util.Properties +import scala.collection.Seq + +import kafka.server.KafkaConfig +import kafka.utils.TestUtils +import org.apache.kafka.server.config.ServerLogConfigs +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +class MinIsrConfigTest extends KafkaServerTestHarness { + val overridingProps = new Properties() + overridingProps.put(ServerLogConfigs.MIN_IN_SYNC_REPLICAS_CONFIG, "5") + def generateConfigs: Seq[KafkaConfig] = TestUtils.createBrokerConfigs(1).map(KafkaConfig.fromProps(_, overridingProps)) + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDefaultKafkaConfig(quorum: String): Unit = { + assert(brokers.head.logManager.initialDefaultConfig.minInSyncReplicas == 5) + } +} diff --git a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala index 03944faaefeec..e08e5775c9058 100755 --- a/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala +++ b/core/src/test/scala/unit/kafka/integration/UncleanLeaderElectionTest.scala @@ -17,13 +17,12 @@ package kafka.integration -import java.util import java.util.Properties import java.util.concurrent.ExecutionException import scala.util.Random import scala.jdk.CollectionConverters._ import scala.collection.{Map, Seq} -import kafka.server.{KafkaBroker, KafkaConfig, QuorumTestHarness} +import kafka.server.{KafkaBroker, KafkaConfig, MetadataCache, QuorumTestHarness} import kafka.utils.{CoreUtils, TestInfoUtils, TestUtils} import kafka.utils.TestUtils._ import org.apache.kafka.common.TopicPartition @@ -32,8 +31,7 @@ import org.apache.kafka.common.errors.{InvalidConfigurationException, TimeoutExc import org.apache.kafka.common.serialization.StringDeserializer import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol -import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsResult, ConfigEntry, FeatureUpdate, UpdateFeaturesOptions} -import org.apache.kafka.metadata.MetadataCache +import org.apache.kafka.clients.admin.{Admin, AdminClientConfig, AlterConfigOp, AlterConfigsResult, ConfigEntry} import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.logging.log4j.{Level, LogManager} @@ -43,7 +41,6 @@ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.MethodSource import com.yammer.metrics.core.Meter import org.apache.kafka.metadata.LeaderConstants -import org.apache.kafka.server.common.MetadataVersion import org.apache.logging.log4j.core.config.Configurator class UncleanLeaderElectionTest extends QuorumTestHarness { @@ -121,34 +118,24 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { admin = TestUtils.createAdminClient(brokers, ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), adminConfigs) } - private def disableEligibleLeaderReplicas(): Unit = { - if (metadataVersion.isAtLeast(MetadataVersion.IBP_4_1_IV0)) { - admin.updateFeatures( - util.Map.of("eligible.leader.replicas.version", new FeatureUpdate(0, FeatureUpdate.UpgradeType.SAFE_DOWNGRADE)), - new UpdateFeaturesOptions()).all().get() - } - } - - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUncleanLeaderElectionEnabled(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUncleanLeaderElectionEnabled(quorum: String, groupProtocol: String): Unit = { // enable unclean leader election configProps1.put("unclean.leader.election.enable", "true") configProps2.put("unclean.leader.election.enable", "true") startBrokers(Seq(configProps1, configProps2)) - disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) verifyUncleanLeaderElectionEnabled() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUncleanLeaderElectionDisabled(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUncleanLeaderElectionDisabled(quorum: String, groupProtocol: String): Unit = { // unclean leader election is disabled by default startBrokers(Seq(configProps1, configProps2)) - disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) @@ -156,14 +143,13 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { verifyUncleanLeaderElectionDisabled() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUncleanLeaderElectionEnabledByTopicOverride(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUncleanLeaderElectionEnabledByTopicOverride(quorum: String, groupProtocol: String): Unit = { // disable unclean leader election globally, but enable for our specific test topic configProps1.put("unclean.leader.election.enable", "false") configProps2.put("unclean.leader.election.enable", "false") startBrokers(Seq(configProps1, configProps2)) - disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election enabled val topicProps = new Properties() @@ -173,14 +159,13 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { verifyUncleanLeaderElectionEnabled() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUncleanLeaderElectionDisabledByTopicOverride(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUncleanLeaderElectionDisabledByTopicOverride(quorum: String, groupProtocol: String): Unit = { // enable unclean leader election globally, but disable for our specific test topic configProps1.put("unclean.leader.election.enable", "true") configProps2.put("unclean.leader.election.enable", "true") startBrokers(Seq(configProps1, configProps2)) - disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker, and unclean leader election disabled val topicProps = new Properties() @@ -190,11 +175,10 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { verifyUncleanLeaderElectionDisabled() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testUncleanLeaderElectionInvalidTopicOverride(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testUncleanLeaderElectionInvalidTopicOverride(quorum: String, groupProtocol: String): Unit = { startBrokers(Seq(configProps1)) - disableEligibleLeaderReplicas() // create topic with an invalid value for unclean leader election val topicProps = new Properties() @@ -299,7 +283,7 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { //make sure follower server joins the ISR TestUtils.waitUntilTrue(() => { val partitionInfoOpt = followerServer.metadataCache.getLeaderAndIsr(topic, partitionId) - partitionInfoOpt.isPresent() && partitionInfoOpt.get.isr.contains(followerId) + partitionInfoOpt.isDefined && partitionInfoOpt.get.isr.contains(followerId) }, "Inconsistent metadata after first server startup") brokers.filter(_.config.brokerId == leaderId).map(shutdownBroker) @@ -332,18 +316,17 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { valueDeserializer = new StringDeserializer) try { val tp = new TopicPartition(topic, partitionId) - consumer.assign(util.List.of(tp)) + consumer.assign(Seq(tp).asJava) consumer.seek(tp, 0) TestUtils.consumeRecords(consumer, numMessages).map(_.value) } finally consumer.close() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testTopicUncleanLeaderElectionEnableWithAlterTopicConfigs(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testTopicUncleanLeaderElectionEnableWithAlterTopicConfigs(quorum: String, groupProtocol: String): Unit = { // unclean leader election is disabled by default startBrokers(Seq(configProps1, configProps2)) - disableEligibleLeaderReplicas() // create topic with 1 partition, 2 replicas, one on each broker TestUtils.createTopicWithAdmin(admin, topic, brokers, controllerServers, replicaAssignment = Map(partitionId -> Seq(brokerId1, brokerId2))) @@ -426,14 +409,10 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { } private def alterTopicConfigs(adminClient: Admin, topic: String, topicConfigs: Properties): AlterConfigsResult = { - val configResource = new ConfigResource(ConfigResource.Type.TOPIC, topic) - - val configEntries = topicConfigs.entrySet().stream() - .map(e => new ConfigEntry(e.getKey.toString, e.getValue.toString)) - .map(e => new AlterConfigOp(e, AlterConfigOp.OpType.SET)) - .toList - - adminClient.incrementalAlterConfigs(util.Map.of(configResource, configEntries)) + val configEntries = topicConfigs.asScala.map { case (k, v) => new ConfigEntry(k, v) }.toList.asJava + adminClient.incrementalAlterConfigs(Map(new ConfigResource(ConfigResource.Type.TOPIC, topic) -> + configEntries.asScala.map((e: ConfigEntry) => new AlterConfigOp(e, AlterConfigOp.OpType.SET)).toSeq + .asJavaCollection).asJava) } private def createAdminClient(): Admin = { @@ -445,9 +424,9 @@ class UncleanLeaderElectionTest extends QuorumTestHarness { } private def waitForNoLeaderAndIsrHasOldLeaderId(metadataCache: MetadataCache, leaderId: Int): Unit = { - waitUntilTrue(() => metadataCache.getLeaderAndIsr(topic, partitionId).isPresent() && + waitUntilTrue(() => metadataCache.getLeaderAndIsr(topic, partitionId).isDefined && metadataCache.getLeaderAndIsr(topic, partitionId).get.leader() == LeaderConstants.NO_LEADER && - util.List.of(leaderId).equals(metadataCache.getLeaderAndIsr(topic, partitionId).get.isr()), + java.util.Arrays.asList(leaderId).equals(metadataCache.getLeaderAndIsr(topic, partitionId).get.isr()), "Timed out waiting for broker metadata cache updates the info for topic partition:" + topicPartition) } } diff --git a/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala b/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala index b1e161b975340..e0a6724d081e9 100644 --- a/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/log/AbstractLogCleanerIntegrationTest.scala @@ -16,7 +16,10 @@ */ package kafka.log -import kafka.utils.TestUtils +import java.io.File +import java.nio.file.Files +import java.util.Properties +import kafka.utils.{Pool, TestUtils} import kafka.utils.Implicits._ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression @@ -25,14 +28,10 @@ import org.apache.kafka.common.record.{MemoryRecords, RecordBatch, RecordVersion import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.{CleanerConfig, LogCleaner, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.{AfterEach, Tag} -import java.io.File -import java.nio.file.Files -import java.util -import java.util.{Optional, Properties} import scala.collection.Seq import scala.collection.mutable.ListBuffer import scala.util.Random @@ -71,7 +70,7 @@ abstract class AbstractLogCleanerIntegrationTest { maxCompactionLagMs: Long = defaultMaxCompactionLagMs): Properties = { val props = new Properties() props.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, maxMessageSize: java.lang.Integer) - props.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, segmentSize: java.lang.Integer) + props.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentSize: java.lang.Integer) props.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 100*1024: java.lang.Integer) props.put(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, deleteDelay: java.lang.Integer) props.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) @@ -94,7 +93,7 @@ abstract class AbstractLogCleanerIntegrationTest { cleanerIoBufferSize: Option[Int] = None, propertyOverrides: Properties = new Properties()): LogCleaner = { - val logMap = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() + val logMap = new Pool[TopicPartition, UnifiedLog]() for (partition <- partitions) { val dir = new File(logDir, s"${partition.topic}-${partition.partition}") Files.createDirectories(dir.toPath) @@ -106,20 +105,20 @@ abstract class AbstractLogCleanerIntegrationTest { deleteDelay = deleteDelay, segmentSize = segmentSize, maxCompactionLagMs = maxCompactionLagMs)) - val log = UnifiedLog.create( - dir, - logConfig, - 0L, - 0L, - time.scheduler, - new BrokerTopicStats, - time, - 5 * 60 * 1000, - new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), - TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, - new LogDirFailureChannel(10), - true, - Optional.empty) + val log = UnifiedLog( + dir = dir, + config = logConfig, + logStartOffset = 0L, + recoveryPoint = 0L, + scheduler = time.scheduler, + time = time, + brokerTopicStats = new BrokerTopicStats, + maxTransactionTimeoutMs = 5 * 60 * 1000, + producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), + producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + logDirFailureChannel = new LogDirFailureChannel(10), + topicId = None, + keepPartitionMetadataFile = true) logMap.put(partition, log) this.logs += log } @@ -134,10 +133,10 @@ abstract class AbstractLogCleanerIntegrationTest { backoffMs, true) new LogCleaner(cleanerConfig, - util.List.of(logDir), - logMap, - new LogDirFailureChannel(1), - time) + logDirs = Array(logDir), + logs = logMap, + logDirFailureChannel = new LogDirFailureChannel(1), + time = time) } private var ctr = 0 @@ -149,7 +148,7 @@ abstract class AbstractLogCleanerIntegrationTest { for (_ <- 0 until numDups; key <- startKey until (startKey + numKeys)) yield { val value = counter.toString val appendInfo = log.appendAsLeaderWithRecordVersion(TestUtils.singletonRecords(value = value.getBytes, codec = codec, - key = key.toString.getBytes, magicValue = magicValue), 0, RecordVersion.lookup(magicValue)) + key = key.toString.getBytes, magicValue = magicValue), leaderEpoch = 0, recordVersion = RecordVersion.lookup(magicValue)) // move LSO forward to increase compaction bound log.updateHighWatermark(log.logEndOffset) incCounter() diff --git a/core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala b/core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala new file mode 100644 index 0000000000000..87f56a9c25074 --- /dev/null +++ b/core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala @@ -0,0 +1,106 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.log + +import kafka.utils._ +import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.record.{CompressionType, MemoryRecords, RecordBatch, SimpleRecord} +import org.apache.kafka.common.utils.Utils +import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.apache.kafka.server.record.BrokerCompressionType +import org.apache.kafka.server.storage.log.FetchIsolation +import org.apache.kafka.server.util.MockTime +import org.apache.kafka.storage.internals.log.{LogConfig, LogDirFailureChannel, ProducerStateManagerConfig} +import org.apache.kafka.storage.log.metrics.BrokerTopicStats +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api._ +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.{Arguments, MethodSource} + +import java.util.Properties + +class BrokerCompressionTest { + + val tmpDir = TestUtils.tempDir() + val logDir = TestUtils.randomPartitionLogDir(tmpDir) + val time = new MockTime(0, 0) + val logConfig = new LogConfig(new Properties) + + @AfterEach + def tearDown(): Unit = { + Utils.delete(tmpDir) + } + + /** + * Test broker-side compression configuration + */ + @ParameterizedTest + @MethodSource(Array("parameters")) + def testBrokerSideCompression(messageCompressionType: CompressionType, brokerCompressionType: BrokerCompressionType): Unit = { + val messageCompression = Compression.of(messageCompressionType).build() + val logProps = new Properties() + logProps.put(TopicConfig.COMPRESSION_TYPE_CONFIG, brokerCompressionType.name) + /*configure broker-side compression */ + val log = UnifiedLog( + dir = logDir, + config = new LogConfig(logProps), + logStartOffset = 0L, + recoveryPoint = 0L, + scheduler = time.scheduler, + time = time, + brokerTopicStats = new BrokerTopicStats, + maxTransactionTimeoutMs = 5 * 60 * 1000, + producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), + producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + logDirFailureChannel = new LogDirFailureChannel(10), + topicId = None, + keepPartitionMetadataFile = true + ) + + /* append two messages */ + log.appendAsLeader(MemoryRecords.withRecords(messageCompression, 0, + new SimpleRecord("hello".getBytes), new SimpleRecord("there".getBytes)), leaderEpoch = 0) + + def readBatch(offset: Int): RecordBatch = { + val fetchInfo = log.read(offset, + maxLength = 4096, + isolation = FetchIsolation.LOG_END, + minOneMessage = true) + fetchInfo.records.batches.iterator.next() + } + + if (brokerCompressionType != BrokerCompressionType.PRODUCER) { + val targetCompression = BrokerCompressionType.targetCompression(log.config.compression, null) + assertEquals(targetCompression.`type`(), readBatch(0).compressionType, "Compression at offset 0 should produce " + brokerCompressionType) + } + else + assertEquals(messageCompressionType, readBatch(0).compressionType, "Compression at offset 0 should produce " + messageCompressionType) + } + +} + +object BrokerCompressionTest { + def parameters: java.util.stream.Stream[Arguments] = { + java.util.Arrays.stream( + for (brokerCompression <- BrokerCompressionType.values; + messageCompression <- CompressionType.values + ) yield Arguments.of(messageCompression, brokerCompression) + ) + } +} diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerIntegrationTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerIntegrationTest.scala index 38aa789729a9a..cecf2c326b61f 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerIntegrationTest.scala @@ -25,7 +25,6 @@ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.{LogCleanerManager, UnifiedLog} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test} @@ -78,8 +77,8 @@ class LogCleanerIntegrationTest extends AbstractLogCleanerIntegrationTest { val uncleanableBytesGauge = getGauge[Long]("uncleanable-bytes", uncleanableDirectory) TestUtils.waitUntilTrue(() => uncleanablePartitionsCountGauge.value() == 2, "There should be 2 uncleanable partitions", 2000L) - val expectedTotalUncleanableBytes = LogCleanerManager.calculateCleanableBytes(log, 0, log.logSegments.asScala.last.baseOffset).getValue + - LogCleanerManager.calculateCleanableBytes(log2, 0, log2.logSegments.asScala.last.baseOffset).getValue + val expectedTotalUncleanableBytes = LogCleanerManager.calculateCleanableBytes(log, 0, log.logSegments.asScala.last.baseOffset)._2 + + LogCleanerManager.calculateCleanableBytes(log2, 0, log2.logSegments.asScala.last.baseOffset)._2 TestUtils.waitUntilTrue(() => uncleanableBytesGauge.value() == expectedTotalUncleanableBytes, s"There should be $expectedTotalUncleanableBytes uncleanable bytes", 1000L) @@ -169,10 +168,10 @@ class LogCleanerIntegrationTest extends AbstractLogCleanerIntegrationTest { val firstBlockCleanableSegmentOffset = activeSegAtT0.baseOffset // the first block should get cleaned - cleaner.awaitCleaned(new TopicPartition("log", 0), firstBlockCleanableSegmentOffset, 60000L) + cleaner.awaitCleaned(new TopicPartition("log", 0), firstBlockCleanableSegmentOffset) val read1 = readFromLog(log) - val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints.get(new TopicPartition("log", 0)) + val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints(new TopicPartition("log", 0)) assertTrue(lastCleaned >= firstBlockCleanableSegmentOffset, s"log cleaner should have processed at least to offset $firstBlockCleanableSegmentOffset, but lastCleaned=$lastCleaned") @@ -181,13 +180,13 @@ class LogCleanerIntegrationTest extends AbstractLogCleanerIntegrationTest { time.sleep(maxCompactionLagMs + 1) // the second block should get cleaned. only zero keys left - cleaner.awaitCleaned(new TopicPartition("log", 0), activeSegAtT1.baseOffset, 60000L) + cleaner.awaitCleaned(new TopicPartition("log", 0), activeSegAtT1.baseOffset) val read2 = readFromLog(log) assertEquals(appends1, read2, s"log should only contains zero keys now") - val lastCleaned2 = cleaner.cleanerManager.allCleanerCheckpoints.get(new TopicPartition("log", 0)) + val lastCleaned2 = cleaner.cleanerManager.allCleanerCheckpoints(new TopicPartition("log", 0)) val secondBlockCleanableSegmentOffset = activeSegAtT1.baseOffset assertTrue(lastCleaned2 >= secondBlockCleanableSegmentOffset, s"log cleaner should have processed at least to offset $secondBlockCleanableSegmentOffset, but lastCleaned=$lastCleaned2") @@ -207,7 +206,7 @@ class LogCleanerIntegrationTest extends AbstractLogCleanerIntegrationTest { for (_ <- 0 until numDups; key <- 0 until numKeys) yield { val curValue = valCounter log.appendAsLeader(TestUtils.singletonRecords(value = curValue.toString.getBytes, codec = codec, - key = key.toString.getBytes, timestamp = timestamp), 0) + key = key.toString.getBytes, timestamp = timestamp), leaderEpoch = 0) // move LSO forward to increase compaction bound log.updateHighWatermark(log.logEndOffset) valCounter += step @@ -222,10 +221,10 @@ class LogCleanerIntegrationTest extends AbstractLogCleanerIntegrationTest { cleaner.startup() assertEquals(0, cleaner.deadThreadCount) // we simulate the unexpected error with an interrupt - cleaner.cleaners.forEach(_.interrupt()) + cleaner.cleaners.foreach(_.interrupt()) // wait until interruption is propagated to all the threads TestUtils.waitUntilTrue( - () => cleaner.cleaners.asScala.foldLeft(true)((result, thread) => { + () => cleaner.cleaners.foldLeft(true)((result, thread) => { thread.isThreadFailed && result }), "Threads didn't terminate unexpectedly" ) diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerLagIntegrationTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerLagIntegrationTest.scala index c632f2c0bf1dd..1cb35e10bb5b3 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerLagIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerLagIntegrationTest.scala @@ -22,7 +22,6 @@ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.record.CompressionType import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.UnifiedLog import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} @@ -63,7 +62,7 @@ class LogCleanerLagIntegrationTest extends AbstractLogCleanerIntegrationTest wit val activeSegAtT0 = log.activeSegment debug(s"active segment at T0 has base offset: ${activeSegAtT0.baseOffset}") - val sizeUpToActiveSegmentAtT0 = log.logSegments(0L, activeSegAtT0.baseOffset).asScala.map(_.size).sum + val sizeUpToActiveSegmentAtT0 = log.logSegments(0L, activeSegAtT0.baseOffset).map(_.size).sum debug(s"log size up to base offset of active segment at T0: $sizeUpToActiveSegmentAtT0") cleaner.startup() @@ -84,15 +83,15 @@ class LogCleanerLagIntegrationTest extends AbstractLogCleanerIntegrationTest wit val firstBlock1SegmentBaseOffset = activeSegAtT0.baseOffset // the first block should get cleaned - cleaner.awaitCleaned(new TopicPartition("log", 0), activeSegAtT0.baseOffset, 60000L) + cleaner.awaitCleaned(new TopicPartition("log", 0), activeSegAtT0.baseOffset) // check the data is the same val read1 = readFromLog(log) assertEquals(appends1.toMap, read1.toMap, "Contents of the map shouldn't change.") - val compactedSize = log.logSegments(0L, activeSegAtT0.baseOffset).asScala.map(_.size).sum + val compactedSize = log.logSegments(0L, activeSegAtT0.baseOffset).map(_.size).sum debug(s"after cleaning the compacted size up to active segment at T0: $compactedSize") - val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints.get(new TopicPartition("log", 0)) + val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints(new TopicPartition("log", 0)) assertTrue(lastCleaned >= firstBlock1SegmentBaseOffset, s"log cleaner should have processed up to offset $firstBlock1SegmentBaseOffset, but lastCleaned=$lastCleaned") assertTrue(sizeUpToActiveSegmentAtT0 > compactedSize, s"log should have been compacted: size up to offset of active segment at T0=$sizeUpToActiveSegmentAtT0 compacted size=$compactedSize") } @@ -109,7 +108,7 @@ class LogCleanerLagIntegrationTest extends AbstractLogCleanerIntegrationTest wit for (_ <- 0 until numDups; key <- 0 until numKeys) yield { val count = counter log.appendAsLeader(TestUtils.singletonRecords(value = counter.toString.getBytes, codec = codec, - key = key.toString.getBytes, timestamp = timestamp), 0) + key = key.toString.getBytes, timestamp = timestamp), leaderEpoch = 0) // move LSO forward to increase compaction bound log.updateHighWatermark(log.logEndOffset) incCounter() diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala index dc9a1d0928a7a..796536780b150 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerManagerTest.scala @@ -19,7 +19,7 @@ package kafka.log import java.io.File import java.nio.file.Files -import java.util.{Optional, Properties} +import java.util.Properties import kafka.utils._ import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression @@ -28,8 +28,7 @@ import org.apache.kafka.common.record._ import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.LogCleaningState.{LOG_CLEANING_ABORTED, LOG_CLEANING_IN_PROGRESS} -import org.apache.kafka.storage.internals.log.{AppendOrigin, LocalLog, LogCleanerManager, LogCleaningException, LogCleaningState, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogToClean, PreCleanStats, ProducerStateManager, ProducerStateManagerConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{AppendOrigin, LocalLog, LogConfig, LogDirFailureChannel, LogLoader, LogSegment, LogSegments, LogStartOffsetIncrementReason, ProducerStateManager, ProducerStateManagerConfig} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test} @@ -37,8 +36,7 @@ import org.junit.jupiter.api.{AfterEach, Test} import java.lang.{Long => JLong} import java.util import java.util.concurrent.ConcurrentHashMap -import java.util.stream.Collectors -import scala.jdk.OptionConverters.RichOptional +import scala.collection.mutable /** * Unit tests for the log cleaning logic @@ -48,11 +46,11 @@ class LogCleanerManagerTest extends Logging { val tmpDir: File = TestUtils.tempDir() val tmpDir2: File = TestUtils.tempDir() val logDir: File = TestUtils.randomPartitionLogDir(tmpDir) - val logDir2: File = TestUtils.randomPartitionLogDir(tmpDir2) + val logDir2: File = TestUtils.randomPartitionLogDir(tmpDir) val topicPartition = new TopicPartition("log", 0) val topicPartition2 = new TopicPartition("log2", 0) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val logConfig: LogConfig = new LogConfig(logProps) @@ -60,21 +58,21 @@ class LogCleanerManagerTest extends Logging { val offset = 999 val producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false) - val cleanerCheckpoints: util.HashMap[TopicPartition, JLong] = new util.HashMap[TopicPartition, JLong]() + val cleanerCheckpoints: mutable.Map[TopicPartition, Long] = mutable.Map[TopicPartition, Long]() - class LogCleanerManagerMock(logDirs: util.List[File], - logs: util.concurrent.ConcurrentMap[TopicPartition, UnifiedLog], + class LogCleanerManagerMock(logDirs: Seq[File], + logs: Pool[TopicPartition, UnifiedLog], logDirFailureChannel: LogDirFailureChannel) extends LogCleanerManager(logDirs, logs, logDirFailureChannel) { - override def allCleanerCheckpoints: util.Map[TopicPartition, JLong] = { - cleanerCheckpoints + override def allCleanerCheckpoints: Map[TopicPartition, Long] = { + cleanerCheckpoints.toMap } - override def updateCheckpoints(dataDir: File, partitionToUpdateOrAdd: Optional[util.Map.Entry[TopicPartition, JLong]], - partitionToRemove: Optional[TopicPartition]): Unit = { + override def updateCheckpoints(dataDir: File, partitionToUpdateOrAdd: Option[(TopicPartition, JLong)] = None, + partitionToRemove: Option[TopicPartition] = None): Unit = { assert(partitionToRemove.isEmpty, "partitionToRemove argument with value not yet handled") - val entry = partitionToUpdateOrAdd.orElseThrow(() => - new IllegalArgumentException("partitionToUpdateOrAdd==None argument not yet handled")) - cleanerCheckpoints.put(entry.getKey, entry.getValue) + val (tp, offset) = partitionToUpdateOrAdd.getOrElse( + throw new IllegalArgumentException("partitionToUpdateOrAdd==None argument not yet handled")) + cleanerCheckpoints.put(tp, offset) } } @@ -85,8 +83,8 @@ class LogCleanerManagerTest extends Logging { private def setupIncreasinglyFilthyLogs(partitions: Seq[TopicPartition], startNumBatches: Int, - batchIncrement: Int): util.concurrent.ConcurrentMap[TopicPartition, UnifiedLog] = { - val logs = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() + batchIncrement: Int): Pool[TopicPartition, UnifiedLog] = { + val logs = new Pool[TopicPartition, UnifiedLog]() var numBatches = startNumBatches for (tp <- partitions) { @@ -112,7 +110,7 @@ class LogCleanerManagerTest extends Logging { val producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT val segments = new LogSegments(tp) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - tpDir, topicPartition, logDirFailureChannel, Optional.empty, time.scheduler) + tpDir, topicPartition, logDirFailureChannel, None, time.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, tpDir, maxTransactionTimeoutMs, producerStateManagerConfig, time) val offsets = new LogLoader( tpDir, @@ -134,8 +132,8 @@ class LogCleanerManagerTest extends Logging { offsets.nextOffsetMetadata, time.scheduler, time, tp, logDirFailureChannel) // the exception should be caught and the partition that caused it marked as uncleanable class LogMock extends UnifiedLog(offsets.logStartOffset, localLog, new BrokerTopicStats, - producerIdExpirationCheckIntervalMs, leaderEpochCache, - producerStateManager, Optional.empty, false, LogOffsetsListener.NO_OP_OFFSETS_LISTENER) { + producerIdExpirationCheckIntervalMs, leaderEpochCache, + producerStateManager, _topicId = None, keepPartitionMetadataFile = true) { // Throw an error in getFirstBatchTimestampForSegments since it is called in grabFilthiestLog() override def getFirstBatchTimestampForSegments(segments: util.Collection[LogSegment]): util.Collection[java.lang.Long] = throw new IllegalStateException("Error!") @@ -148,12 +146,12 @@ class LogCleanerManagerTest extends Logging { batchesPerSegment = 2 ) - val logsPool = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() + val logsPool = new Pool[TopicPartition, UnifiedLog]() logsPool.put(tp, log) val cleanerManager = createCleanerManagerMock(logsPool) cleanerCheckpoints.put(tp, 1) - val thrownException = assertThrows(classOf[LogCleaningException], () => cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).get) + val thrownException = assertThrows(classOf[LogCleaningException], () => cleanerManager.grabFilthiestCompactedLog(time).get) assertEquals(log, thrownException.log) assertTrue(thrownException.getCause.isInstanceOf[IllegalStateException]) } @@ -170,7 +168,7 @@ class LogCleanerManagerTest extends Logging { val cleanerManager = createCleanerManagerMock(logs) partitions.foreach(partition => cleanerCheckpoints.put(partition, 20)) - val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).get + val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time).get assertEquals(tp2, filthiestLog.topicPartition) assertEquals(tp2, filthiestLog.log.topicPartition) } @@ -189,7 +187,7 @@ class LogCleanerManagerTest extends Logging { cleanerManager.markPartitionUncleanable(logs.get(tp2).dir.getParent, tp2) - val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).get + val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time).get assertEquals(tp1, filthiestLog.topicPartition) assertEquals(tp1, filthiestLog.log.topicPartition) } @@ -206,9 +204,9 @@ class LogCleanerManagerTest extends Logging { val cleanerManager = createCleanerManagerMock(logs) partitions.foreach(partition => cleanerCheckpoints.put(partition, 20)) - cleanerManager.setCleaningState(tp2, LOG_CLEANING_IN_PROGRESS) + cleanerManager.setCleaningState(tp2, LogCleaningInProgress) - val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).get + val filthiestLog: LogToClean = cleanerManager.grabFilthiestCompactedLog(time).get assertEquals(tp1, filthiestLog.topicPartition) assertEquals(tp1, filthiestLog.log.topicPartition) } @@ -225,11 +223,11 @@ class LogCleanerManagerTest extends Logging { val cleanerManager = createCleanerManagerMock(logs) partitions.foreach(partition => cleanerCheckpoints.put(partition, 20)) - cleanerManager.setCleaningState(tp2, LOG_CLEANING_IN_PROGRESS) + cleanerManager.setCleaningState(tp2, LogCleaningInProgress) cleanerManager.markPartitionUncleanable(logs.get(tp1).dir.getParent, tp1) - val filthiestLog: Optional[LogToClean] = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()) - assertEquals(Optional.empty(), filthiestLog) + val filthiestLog: Option[LogToClean] = cleanerManager.grabFilthiestCompactedLog(time) + assertEquals(None, filthiestLog) } @Test @@ -239,7 +237,7 @@ class LogCleanerManagerTest extends Logging { val cleanerManager = createCleanerManagerMock(logs) cleanerCheckpoints.put(tp, 200) - val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).get + val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time).get assertEquals(0L, filthiestLog.firstDirtyOffset) } @@ -253,7 +251,7 @@ class LogCleanerManagerTest extends Logging { val cleanerManager = createCleanerManagerMock(logs) cleanerCheckpoints.put(tp, 0L) - val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).get + val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time).get assertEquals(10L, filthiestLog.firstDirtyOffset) } @@ -262,7 +260,7 @@ class LogCleanerManagerTest extends Logging { val tp = new TopicPartition("foo", 0) val log = createLog(segmentSize = 2048, TopicConfig.CLEANUP_POLICY_COMPACT, tp) - val logs = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() + val logs = new Pool[TopicPartition, UnifiedLog]() logs.put(tp, log) appendRecords(log, numRecords = 3) @@ -277,8 +275,8 @@ class LogCleanerManagerTest extends Logging { cleanerCheckpoints.put(tp, 0L) // The active segment is uncleanable and hence not filthy from the POV of the CleanerManager. - val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()) - assertEquals(Optional.empty(), filthiestLog) + val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time) + assertEquals(None, filthiestLog) } @Test @@ -289,7 +287,7 @@ class LogCleanerManagerTest extends Logging { val tp = new TopicPartition("foo", 0) - val logs = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() + val logs = new Pool[TopicPartition, UnifiedLog]() val log = createLog(2048, TopicConfig.CLEANUP_POLICY_COMPACT, topicPartition = tp) logs.put(tp, log) @@ -303,8 +301,8 @@ class LogCleanerManagerTest extends Logging { cleanerCheckpoints.put(tp, 3L) // These segments are uncleanable and hence not filthy - val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()) - assertEquals(Optional.empty(), filthiestLog) + val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time) + assertEquals(None, filthiestLog) } /** @@ -358,9 +356,9 @@ class LogCleanerManagerTest extends Logging { val log: UnifiedLog = createLog(records.sizeInBytes * 5, TopicConfig.CLEANUP_POLICY_DELETE) val cleanerManager: LogCleanerManager = createCleanerManager(log) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) log.roll() - log.appendAsLeader(TestUtils.singletonRecords("test2".getBytes, key="test2".getBytes), 0) + log.appendAsLeader(records, leaderEpoch = 0) log.updateHighWatermark(2L) // simulate cleanup thread working on the log partition @@ -369,7 +367,7 @@ class LogCleanerManagerTest extends Logging { // change cleanup policy from delete to compact val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, log.config.segmentSize(): Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, log.config.segmentSize: Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, log.config.retentionMs: java.lang.Long) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) logProps.put(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, 0: Integer) @@ -377,16 +375,12 @@ class LogCleanerManagerTest extends Logging { log.updateConfig(config) // log cleanup inprogress, the log is not available for compaction - val cleanable = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).toScala + val cleanable = cleanerManager.grabFilthiestCompactedLog(time) assertEquals(0, cleanable.size, "should have 0 logs ready to be compacted") // log cleanup finished, and log can be picked up for compaction - cleanerManager.resumeCleaning( - deletableLog.stream() - .map[TopicPartition](entry => entry.getKey) - .collect(Collectors.toSet[TopicPartition]()) - ) - val cleanable2 = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).toScala + cleanerManager.resumeCleaning(deletableLog.map(_._1)) + val cleanable2 = cleanerManager.grabFilthiestCompactedLog(time) assertEquals(1, cleanable2.size, "should have 1 logs ready to be compacted") // update cleanup policy to delete @@ -399,7 +393,7 @@ class LogCleanerManagerTest extends Logging { assertEquals(0, deletableLog2.size, "should have 0 logs ready to be deleted") // compaction done, should have 1 log eligible for log cleanup - cleanerManager.doneDeleting(util.List.of(cleanable2.get.topicPartition)) + cleanerManager.doneDeleting(Seq(cleanable2.get.topicPartition)) val deletableLog3 = cleanerManager.pauseCleaningForNonCompactedPartitions() assertEquals(1, deletableLog3.size, "should have 1 logs ready to be deleted") } @@ -411,11 +405,11 @@ class LogCleanerManagerTest extends Logging { val cleanerManager: LogCleanerManager = createCleanerManager(log) // expect the checkpoint offset is not the expectedOffset before doing updateCheckpoints - assertNotEquals(offset, cleanerManager.allCleanerCheckpoints.getOrDefault(topicPartition, 0)) + assertNotEquals(offset, cleanerManager.allCleanerCheckpoints.getOrElse(topicPartition, 0)) - cleanerManager.updateCheckpoints(logDir, Optional.of(util.Map.entry(topicPartition, offset)), Optional.empty()) + cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset)) // expect the checkpoint offset is now updated to the expected offset after doing updateCheckpoints - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition)) } @Test @@ -425,12 +419,12 @@ class LogCleanerManagerTest extends Logging { val cleanerManager: LogCleanerManager = createCleanerManager(log) // write some data into the cleaner-offset-checkpoint file - cleanerManager.updateCheckpoints(logDir, Optional.of(util.Map.entry(topicPartition, offset)), Optional.empty()) - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition)) + cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition)) // updateCheckpoints should remove the topicPartition data in the logDir - cleanerManager.updateCheckpoints(logDir, Optional.empty(), Optional.of(topicPartition)) - assertFalse(cleanerManager.allCleanerCheckpoints.containsKey(topicPartition)) + cleanerManager.updateCheckpoints(logDir, partitionToRemove = Option(topicPartition)) + assertFalse(cleanerManager.allCleanerCheckpoints.contains(topicPartition)) } @Test @@ -440,15 +434,15 @@ class LogCleanerManagerTest extends Logging { val cleanerManager: LogCleanerManager = createCleanerManager(log) // write some data into the cleaner-offset-checkpoint file in logDir and logDir2 - cleanerManager.updateCheckpoints(logDir, Optional.of(util.Map.entry(topicPartition, offset)), Optional.empty()) - cleanerManager.updateCheckpoints(logDir2, Optional.of(util.Map.entry(topicPartition2, offset)), Optional.empty()) - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition)) - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition2)) + cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset)) + cleanerManager.updateCheckpoints(logDir2, partitionToUpdateOrAdd = Option(topicPartition2, offset)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition2)) cleanerManager.handleLogDirFailure(logDir.getAbsolutePath) // verify the partition data in logDir is gone, and data in logDir2 is still there - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition2)) - assertFalse(cleanerManager.allCleanerCheckpoints.containsKey(topicPartition)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition2)) + assertFalse(cleanerManager.allCleanerCheckpoints.contains(topicPartition)) } @Test @@ -460,15 +454,15 @@ class LogCleanerManagerTest extends Logging { val higherOffset = 1000L // write some data into the cleaner-offset-checkpoint file in logDir - cleanerManager.updateCheckpoints(logDir, Optional.of(util.Map.entry(topicPartition, offset)), Optional.empty()) - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition)) + cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition)) // we should not truncate the checkpoint data for checkpointed offset <= the given offset (higherOffset) cleanerManager.maybeTruncateCheckpoint(logDir, topicPartition, higherOffset) - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition)) // we should truncate the checkpoint data for checkpointed offset > the given offset (lowerOffset) cleanerManager.maybeTruncateCheckpoint(logDir, topicPartition, lowerOffset) - assertEquals(lowerOffset, cleanerManager.allCleanerCheckpoints.get(topicPartition)) + assertEquals(lowerOffset, cleanerManager.allCleanerCheckpoints(topicPartition)) } @Test @@ -478,17 +472,17 @@ class LogCleanerManagerTest extends Logging { val cleanerManager: LogCleanerManager = createCleanerManager(log) // write some data into the cleaner-offset-checkpoint file in logDir - cleanerManager.updateCheckpoints(logDir, Optional.of(util.Map.entry(topicPartition, offset)), Optional.empty()) - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition)) + cleanerManager.updateCheckpoints(logDir, partitionToUpdateOrAdd = Option(topicPartition, offset)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition)) cleanerManager.alterCheckpointDir(topicPartition, logDir, logDir2) // verify we still can get the partition offset after alterCheckpointDir // This data should locate in logDir2, not logDir - assertEquals(offset, cleanerManager.allCleanerCheckpoints.get(topicPartition)) + assertEquals(offset, cleanerManager.allCleanerCheckpoints(topicPartition)) // force delete the logDir2 from checkpoints, so that the partition data should also be deleted cleanerManager.handleLogDirFailure(logDir2.getAbsolutePath) - assertFalse(cleanerManager.allCleanerCheckpoints.containsKey(topicPartition)) + assertFalse(cleanerManager.allCleanerCheckpoints.contains(topicPartition)) } /** @@ -504,15 +498,11 @@ class LogCleanerManagerTest extends Logging { val pausedPartitions = cleanerManager.pauseCleaningForNonCompactedPartitions() // Log truncation happens due to unclean leader election cleanerManager.abortAndPauseCleaning(log.topicPartition) - cleanerManager.resumeCleaning(util.Set.of(log.topicPartition)) + cleanerManager.resumeCleaning(Seq(log.topicPartition)) // log cleanup finishes and pausedPartitions are resumed - cleanerManager.resumeCleaning( - pausedPartitions.stream() - .map[TopicPartition](entry => entry.getKey) - .collect(Collectors.toSet[TopicPartition]()) - ) + cleanerManager.resumeCleaning(pausedPartitions.map(_._1)) - assertEquals(Optional.empty(), cleanerManager.cleaningState(log.topicPartition)) + assertEquals(None, cleanerManager.cleaningState(log.topicPartition)) } /** @@ -529,13 +519,9 @@ class LogCleanerManagerTest extends Logging { // Broker processes StopReplicaRequest with delete=true cleanerManager.abortCleaning(log.topicPartition) // log cleanup finishes and pausedPartitions are resumed - cleanerManager.resumeCleaning( - pausedPartitions.stream() - .map[TopicPartition](entry => entry.getKey) - .collect(Collectors.toSet[TopicPartition]()) - ) + cleanerManager.resumeCleaning(pausedPartitions.map(_._1)) - assertEquals(Optional.empty(), cleanerManager.cleaningState(log.topicPartition)) + assertEquals(None, cleanerManager.cleaningState(log.topicPartition)) } /** @@ -559,16 +545,16 @@ class LogCleanerManagerTest extends Logging { @Test def testCleanableOffsetsForNone(): Unit = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while (log.numberOfSegments < 8) - log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), 0) + log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), leaderEpoch = 0) log.updateHighWatermark(50) - val lastCleanOffset = Optional.of(0L.asInstanceOf[JLong]) + val lastCleanOffset = Some(0L) val cleanableOffsets = LogCleanerManager.cleanableOffsets(log, lastCleanOffset, time.milliseconds) assertEquals(0L, cleanableOffsets.firstDirtyOffset, "The first cleanable offset starts at the beginning of the log.") assertEquals(log.highWatermark, log.lastStableOffset, "The high watermark equals the last stable offset as no transactions are in progress") @@ -581,16 +567,16 @@ class LogCleanerManagerTest extends Logging { @Test def testCleanableOffsetsActiveSegment(): Unit = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while (log.numberOfSegments < 8) - log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), 0) + log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) - val lastCleanOffset = Optional.of(0L.asInstanceOf[JLong]) + val lastCleanOffset = Some(0L) val cleanableOffsets = LogCleanerManager.cleanableOffsets(log, lastCleanOffset, time.milliseconds) assertEquals(0L, cleanableOffsets.firstDirtyOffset, "The first cleanable offset starts at the beginning of the log.") assertEquals(log.activeSegment.baseOffset, cleanableOffsets.firstUncleanableDirtyOffset, "The first uncleanable offset begins with the active segment.") @@ -603,14 +589,14 @@ class LogCleanerManagerTest extends Logging { def testCleanableOffsetsForTime(): Unit = { val compactionLag = 60 * 60 * 1000 val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG, compactionLag: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val t0 = time.milliseconds while (log.numberOfSegments < 4) - log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t0), 0) + log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t0), leaderEpoch = 0) val activeSegAtT0 = log.activeSegment @@ -618,11 +604,11 @@ class LogCleanerManagerTest extends Logging { val t1 = time.milliseconds while (log.numberOfSegments < 8) - log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t1), 0) + log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t1), leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) - val lastCleanOffset = Optional.of(0L.asInstanceOf[JLong]) + val lastCleanOffset = Some(0L) val cleanableOffsets = LogCleanerManager.cleanableOffsets(log, lastCleanOffset, time.milliseconds) assertEquals(0L, cleanableOffsets.firstDirtyOffset, "The first cleanable offset starts at the beginning of the log.") assertEquals(activeSegAtT0.baseOffset, cleanableOffsets.firstUncleanableDirtyOffset, "The first uncleanable offset begins with the second block of log entries.") @@ -636,20 +622,20 @@ class LogCleanerManagerTest extends Logging { def testCleanableOffsetsForShortTime(): Unit = { val compactionLag = 60 * 60 * 1000 val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG, compactionLag: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val t0 = time.milliseconds while (log.numberOfSegments < 8) - log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t0), 0) + log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, t0), leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) time.sleep(compactionLag + 1) - val lastCleanOffset = Optional.of(0L.asInstanceOf[JLong]) + val lastCleanOffset = Some(0L) val cleanableOffsets = LogCleanerManager.cleanableOffsets(log, lastCleanOffset, time.milliseconds) assertEquals(0L, cleanableOffsets.firstDirtyOffset, "The first cleanable offset starts at the beginning of the log.") assertEquals(log.activeSegment.baseOffset, cleanableOffsets.firstUncleanableDirtyOffset, "The first uncleanable offset begins with active segment.") @@ -661,7 +647,7 @@ class LogCleanerManagerTest extends Logging { val logs = setupIncreasinglyFilthyLogs(Seq(tp), startNumBatches = 20, batchIncrement = 5) logs.get(tp).maybeIncrementLogStartOffset(10L, LogStartOffsetIncrementReason.ClientRecordDeletion) - var lastCleanOffset = Optional.of(15L.asInstanceOf[JLong]) + var lastCleanOffset = Some(15L) var cleanableOffsets = LogCleanerManager.cleanableOffsets(logs.get(tp), lastCleanOffset, time.milliseconds) assertFalse(cleanableOffsets.forceUpdateCheckpoint, "Checkpoint offset should not be reset if valid") @@ -669,7 +655,7 @@ class LogCleanerManagerTest extends Logging { cleanableOffsets = LogCleanerManager.cleanableOffsets(logs.get(tp), lastCleanOffset, time.milliseconds) assertTrue(cleanableOffsets.forceUpdateCheckpoint, "Checkpoint offset needs to be reset if less than log start offset") - lastCleanOffset = Optional.of(25L) + lastCleanOffset = Some(25L) cleanableOffsets = LogCleanerManager.cleanableOffsets(logs.get(tp), lastCleanOffset, time.milliseconds) assertTrue(cleanableOffsets.forceUpdateCheckpoint, "Checkpoint offset needs to be reset if greater than log end offset") } @@ -678,7 +664,7 @@ class LogCleanerManagerTest extends Logging { def testUndecidedTransactionalDataNotCleanable(): Unit = { val compactionLag = 60 * 60 * 1000 val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.MIN_COMPACTION_LAG_MS_CONFIG, compactionLag: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -688,33 +674,33 @@ class LogCleanerManagerTest extends Logging { val sequence = 0 log.appendAsLeader(MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord(time.milliseconds(), "1".getBytes, "a".getBytes), - new SimpleRecord(time.milliseconds(), "2".getBytes, "b".getBytes)), 0) + new SimpleRecord(time.milliseconds(), "2".getBytes, "b".getBytes)), leaderEpoch = 0) log.appendAsLeader(MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence + 2, - new SimpleRecord(time.milliseconds(), "3".getBytes, "c".getBytes)), 0) + new SimpleRecord(time.milliseconds(), "3".getBytes, "c".getBytes)), leaderEpoch = 0) log.roll() log.updateHighWatermark(3L) time.sleep(compactionLag + 1) // although the compaction lag has been exceeded, the undecided data should not be cleaned - var cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Optional.of(0L), time.milliseconds()) + var cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Some(0L), time.milliseconds()) assertEquals(0L, cleanableOffsets.firstDirtyOffset) assertEquals(0L, cleanableOffsets.firstUncleanableDirtyOffset) log.appendAsLeader(MemoryRecords.withEndTransactionMarker(time.milliseconds(), producerId, producerEpoch, - new EndTransactionMarker(ControlRecordType.ABORT, 15)), 0, - AppendOrigin.COORDINATOR) + new EndTransactionMarker(ControlRecordType.ABORT, 15)), leaderEpoch = 0, + origin = AppendOrigin.COORDINATOR) log.roll() log.updateHighWatermark(4L) // the first segment should now become cleanable immediately - cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Optional.of(0L), time.milliseconds()) + cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Some(0L), time.milliseconds()) assertEquals(0L, cleanableOffsets.firstDirtyOffset) assertEquals(3L, cleanableOffsets.firstUncleanableDirtyOffset) time.sleep(compactionLag + 1) // the second segment becomes cleanable after the compaction lag - cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Optional.of(0L), time.milliseconds()) + cleanableOffsets = LogCleanerManager.cleanableOffsets(log, Some(0L), time.milliseconds()) assertEquals(0L, cleanableOffsets.firstDirtyOffset) assertEquals(4L, cleanableOffsets.firstUncleanableDirtyOffset) } @@ -722,29 +708,29 @@ class LogCleanerManagerTest extends Logging { @Test def testDoneCleaning(): Unit = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while (log.numberOfSegments < 8) - log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), 0) + log.appendAsLeader(records(log.logEndOffset.toInt, log.logEndOffset.toInt, time.milliseconds()), leaderEpoch = 0) val cleanerManager: LogCleanerManager = createCleanerManager(log) assertThrows(classOf[IllegalStateException], () => cleanerManager.doneCleaning(topicPartition, log.dir, 1)) - cleanerManager.setCleaningState(topicPartition, LogCleaningState.logCleaningPaused(1)) + cleanerManager.setCleaningState(topicPartition, LogCleaningPaused(1)) assertThrows(classOf[IllegalStateException], () => cleanerManager.doneCleaning(topicPartition, log.dir, 1)) - cleanerManager.setCleaningState(topicPartition, LOG_CLEANING_IN_PROGRESS) + cleanerManager.setCleaningState(topicPartition, LogCleaningInProgress) val endOffset = 1L cleanerManager.doneCleaning(topicPartition, log.dir, endOffset) assertTrue(cleanerManager.cleaningState(topicPartition).isEmpty) - assertTrue(cleanerManager.allCleanerCheckpoints.containsKey(topicPartition)) - assertEquals(Some(endOffset), Option(cleanerManager.allCleanerCheckpoints.get(topicPartition))) + assertTrue(cleanerManager.allCleanerCheckpoints.contains(topicPartition)) + assertEquals(Some(endOffset), cleanerManager.allCleanerCheckpoints.get(topicPartition)) - cleanerManager.setCleaningState(topicPartition, LOG_CLEANING_ABORTED) + cleanerManager.setCleaningState(topicPartition, LogCleaningAborted) cleanerManager.doneCleaning(topicPartition, log.dir, endOffset) - assertEquals(LogCleaningState.logCleaningPaused(1), cleanerManager.cleaningState(topicPartition).get) - assertTrue(cleanerManager.allCleanerCheckpoints.containsKey(topicPartition)) + assertEquals(LogCleaningPaused(1), cleanerManager.cleaningState(topicPartition).get) + assertTrue(cleanerManager.allCleanerCheckpoints.contains(topicPartition)) } @Test @@ -754,18 +740,18 @@ class LogCleanerManagerTest extends Logging { val cleanerManager: LogCleanerManager = createCleanerManager(log) val tp = new TopicPartition("log", 0) - assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(util.List.of(tp))) + assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(Seq(tp))) - cleanerManager.setCleaningState(tp, LogCleaningState.logCleaningPaused(1)) - assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(util.List.of(tp))) + cleanerManager.setCleaningState(tp, LogCleaningPaused(1)) + assertThrows(classOf[IllegalStateException], () => cleanerManager.doneDeleting(Seq(tp))) - cleanerManager.setCleaningState(tp, LOG_CLEANING_IN_PROGRESS) - cleanerManager.doneDeleting(util.List.of(tp)) + cleanerManager.setCleaningState(tp, LogCleaningInProgress) + cleanerManager.doneDeleting(Seq(tp)) assertTrue(cleanerManager.cleaningState(tp).isEmpty) - cleanerManager.setCleaningState(tp, LOG_CLEANING_ABORTED) - cleanerManager.doneDeleting(util.List.of(tp)) - assertEquals(LogCleaningState.logCleaningPaused(1), cleanerManager.cleaningState(tp).get) + cleanerManager.setCleaningState(tp, LogCleaningAborted) + cleanerManager.doneDeleting(Seq(tp)) + assertEquals(LogCleaningPaused(1), cleanerManager.cleaningState(tp).get) } /** @@ -780,9 +766,9 @@ class LogCleanerManagerTest extends Logging { val cleanerManager = createCleanerManagerMock(logs) cleanerCheckpoints.put(tp, 15L) - val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()) - assertEquals(Optional.empty(), filthiestLog, "Log should not be selected for cleaning") - assertEquals(20L, cleanerCheckpoints.get(tp), "Unselected log should have checkpoint offset updated") + val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time) + assertEquals(None, filthiestLog, "Log should not be selected for cleaning") + assertEquals(20L, cleanerCheckpoints(tp), "Unselected log should have checkpoint offset updated") } /** @@ -802,19 +788,19 @@ class LogCleanerManagerTest extends Logging { cleanerCheckpoints.put(tp0, 10L) cleanerCheckpoints.put(tp1, 5L) - val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time, new PreCleanStats()).get + val filthiestLog = cleanerManager.grabFilthiestCompactedLog(time).get assertEquals(tp1, filthiestLog.topicPartition, "Dirtier log should be selected") - assertEquals(15L, cleanerCheckpoints.get(tp0), "Unselected log should have checkpoint offset updated") + assertEquals(15L, cleanerCheckpoints(tp0), "Unselected log should have checkpoint offset updated") } private def createCleanerManager(log: UnifiedLog): LogCleanerManager = { - val logs = new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog]() + val logs = new Pool[TopicPartition, UnifiedLog]() logs.put(topicPartition, log) - new LogCleanerManager(util.List.of(logDir, logDir2), logs, null) + new LogCleanerManager(Seq(logDir, logDir2), logs, null) } - private def createCleanerManagerMock(pool: util.concurrent.ConcurrentMap[TopicPartition, UnifiedLog]): LogCleanerManagerMock = { - new LogCleanerManagerMock(util.List.of(logDir), pool, null) + private def createCleanerManagerMock(pool: Pool[TopicPartition, UnifiedLog]): LogCleanerManagerMock = { + new LogCleanerManagerMock(Seq(logDir), pool, null) } private def createLog(segmentSize: Int, @@ -823,25 +809,25 @@ class LogCleanerManagerTest extends Logging { val config = createLowRetentionLogConfig(segmentSize, cleanupPolicy) val partitionDir = new File(logDir, UnifiedLog.logDirName(topicPartition)) - UnifiedLog.create( - partitionDir, - config, - 0L, - 0L, - time.scheduler, - new BrokerTopicStats, - time, - 5 * 60 * 1000, - producerStateManagerConfig, - TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, - new LogDirFailureChannel(10), - true, - Optional.empty) + UnifiedLog( + dir = partitionDir, + config = config, + logStartOffset = 0L, + recoveryPoint = 0L, + scheduler = time.scheduler, + time = time, + brokerTopicStats = new BrokerTopicStats, + maxTransactionTimeoutMs = 5 * 60 * 1000, + producerStateManagerConfig = producerStateManagerConfig, + producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + logDirFailureChannel = new LogDirFailureChannel(10), + topicId = None, + keepPartitionMetadataFile = true) } private def createLowRetentionLogConfig(segmentSize: Int, cleanupPolicy: String): LogConfig = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, segmentSize: Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentSize: Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, 1: Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, cleanupPolicy) logProps.put(TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG, 0.05: java.lang.Double) // small for easier and clearer tests @@ -872,25 +858,25 @@ class LogCleanerManagerTest extends Logging { new SimpleRecord(currentTimestamp, s"key-$offset".getBytes, s"value-$offset".getBytes) } - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), 1) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), leaderEpoch = 1) log.maybeIncrementHighWatermark(log.logEndOffsetMetadata) } private def makeLog(dir: File = logDir, config: LogConfig) = { - UnifiedLog.create( - dir, - config, - 0L, - 0L, - time.scheduler, - new BrokerTopicStats, - time, - 5 * 60 * 1000, - producerStateManagerConfig, - TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, - new LogDirFailureChannel(10), - true, - Optional.empty + UnifiedLog( + dir = dir, + config = config, + logStartOffset = 0L, + recoveryPoint = 0L, + scheduler = time.scheduler, + time = time, + brokerTopicStats = new BrokerTopicStats, + maxTransactionTimeoutMs = 5 * 60 * 1000, + producerStateManagerConfig = producerStateManagerConfig, + producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + logDirFailureChannel = new LogDirFailureChannel(10), + topicId = None, + keepPartitionMetadataFile = true ) } diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala index e0d3ac5601d23..df461855a9fa9 100755 --- a/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerParameterizedIntegrationTest.scala @@ -18,7 +18,7 @@ package kafka.log import java.io.File -import java.util.{Optional, Properties} +import java.util.Properties import kafka.server.KafkaConfig import kafka.utils._ import org.apache.kafka.common.TopicPartition @@ -29,7 +29,7 @@ import org.apache.kafka.common.utils.Time import org.apache.kafka.server.config.ServerConfigs import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile -import org.apache.kafka.storage.internals.log.{CleanerConfig, LogCleanerManager, LogConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.extension.ExtensionContext import org.junit.jupiter.params.ParameterizedTest @@ -69,7 +69,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati checkLogAfterAppendingDups(log, startSize, appends) - val appendInfo = log.appendAsLeader(largeMessageSet, 0) + val appendInfo = log.appendAsLeader(largeMessageSet, leaderEpoch = 0) // move LSO forward to increase compaction bound log.updateHighWatermark(log.logEndOffset) val largeMessageOffset = appendInfo.firstOffset @@ -85,8 +85,8 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati // force a checkpoint // and make sure its gone from checkpoint file cleaner.logs.remove(topicPartitions(0)) - cleaner.updateCheckpoints(logDir, Optional.of(topicPartitions(0))) - val checkpoints = new OffsetCheckpointFile(new File(logDir, LogCleanerManager.OFFSET_CHECKPOINT_FILE), null).read() + cleaner.updateCheckpoints(logDir, partitionToRemove = Option(topicPartitions(0))) + val checkpoints = new OffsetCheckpointFile(new File(logDir, cleaner.cleanerManager.offsetCheckpointFile), null).read() // we expect partition 0 to be gone assertFalse(checkpoints.containsKey(topicPartitions(0))) } @@ -171,7 +171,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati checkLogAfterAppendingDups(log, startSize, appends1) val dupsV0 = writeDups(numKeys = 40, numDups = 3, log = log, codec = compression, magicValue = RecordBatch.MAGIC_VALUE_V0) - val appendInfo = log.appendAsLeaderWithRecordVersion(largeMessageSet, 0, RecordVersion.V0) + val appendInfo = log.appendAsLeaderWithRecordVersion(largeMessageSet, leaderEpoch = 0, recordVersion = RecordVersion.V0) // move LSO forward to increase compaction bound log.updateHighWatermark(log.logEndOffset) val largeMessageOffset = appendInfo.firstOffset @@ -242,7 +242,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati if (compressionType == CompressionType.NONE) assertEquals(1, recordBatch.iterator().asScala.size) else - assertTrue(recordBatch.iterator().asScala.nonEmpty) + assertTrue(recordBatch.iterator().asScala.size >= 1) val firstRecordKey = TestUtils.readString(recordBatch.iterator().next().key()) if (keysForV0RecordsWithNoV1V2Updates.contains(firstRecordKey)) @@ -280,7 +280,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati // Verify no cleaning with LogCleanerIoBufferSizeProp=1 val firstDirty = log.activeSegment.baseOffset val topicPartition = new TopicPartition("log", 0) - cleaner.awaitCleaned(topicPartition, firstDirty, 10) + cleaner.awaitCleaned(topicPartition, firstDirty, maxWaitMs = 10) assertTrue(cleaner.cleanerManager.allCleanerCheckpoints.isEmpty, "Should not have cleaned") def kafkaConfigWithCleanerConfig(cleanerConfig: CleanerConfig): KafkaConfig = { @@ -317,8 +317,8 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati // wait until cleaning up to base_offset, note that cleaning happens only when "log dirty ratio" is higher than // TopicConfig.MIN_CLEANABLE_DIRTY_RATIO_CONFIG val topicPartition = new TopicPartition(topic, partitionId) - cleaner.awaitCleaned(topicPartition, firstDirty, 60000L) - val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints.get(topicPartition) + cleaner.awaitCleaned(topicPartition, firstDirty) + val lastCleaned = cleaner.cleanerManager.allCleanerCheckpoints(topicPartition) assertTrue(lastCleaned >= firstDirty, s"log cleaner should have processed up to offset $firstDirty, but lastCleaned=$lastCleaned") } @@ -353,7 +353,7 @@ class LogCleanerParameterizedIntegrationTest extends AbstractLogCleanerIntegrati } val appendInfo = log.appendAsLeaderWithRecordVersion(MemoryRecords.withRecords(magicValue, codec, records: _*), - 0, RecordVersion.lookup(magicValue)) + leaderEpoch = 0, recordVersion = RecordVersion.lookup(magicValue)) // move LSO forward to increase compaction bound log.updateHighWatermark(log.logEndOffset) val offsets = appendInfo.firstOffset to appendInfo.lastOffset diff --git a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala index 5b842d60e46ba..1be6cfd62d8f6 100644 --- a/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogCleanerTest.scala @@ -17,18 +17,19 @@ package kafka.log +import kafka.common._ import kafka.server.KafkaConfig -import kafka.utils.{CoreUtils, Logging, TestUtils} +import kafka.utils.{CoreUtils, Logging, Pool, TestUtils} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.CorruptRecordException import org.apache.kafka.common.record._ -import org.apache.kafka.common.utils.{Time, Utils} +import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, CleanedTransactionMetadata, Cleaner, CleanerConfig, CleanerStats, LocalLog, LogAppendInfo, LogCleaner, LogCleanerManager, LogCleaningAbortedException, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogToClean, OffsetMap, ProducerStateManager, ProducerStateManagerConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, CleanerConfig, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetMap, ProducerStateManager, ProducerStateManagerConfig} import org.apache.kafka.storage.internals.utils.Throttler import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ @@ -41,10 +42,8 @@ import java.io.{File, RandomAccessFile} import java.nio._ import java.nio.charset.StandardCharsets import java.nio.file.Paths -import java.util -import java.util.{Optional, Properties} +import java.util.Properties import java.util.concurrent.{ConcurrentHashMap, CountDownLatch, TimeUnit} -import java.util.function.Consumer import scala.collection._ import scala.jdk.CollectionConverters._ @@ -56,7 +55,7 @@ class LogCleanerTest extends Logging { val tmpdir = TestUtils.tempDir() val dir = TestUtils.randomPartitionLogDir(tmpdir) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val logConfig = new LogConfig(logProps) @@ -77,10 +76,10 @@ class LogCleanerTest extends Logging { val mockMetricsGroupCtor = mockConstruction(classOf[KafkaMetricsGroup]) try { val logCleaner = new LogCleaner(new CleanerConfig(true), - util.List.of(TestUtils.tempDir(), TestUtils.tempDir()), - new ConcurrentHashMap[TopicPartition, UnifiedLog](), - new LogDirFailureChannel(1), - time) + logDirs = Array(TestUtils.tempDir(), TestUtils.tempDir()), + logs = new Pool[TopicPartition, UnifiedLog](), + logDirFailureChannel = new LogDirFailureChannel(1), + time = time) val metricsToVerify = new java.util.HashMap[String, java.util.List[java.util.Map[String, String]]]() logCleaner.cleanerManager.gaugeMetricNameWithTag.asScala.foreach { metricNameAndTags => val tags = new java.util.ArrayList[java.util.Map[String, String]]() @@ -91,21 +90,21 @@ class LogCleanerTest extends Logging { logCleaner.shutdown() val mockMetricsGroup = mockMetricsGroupCtor.constructed.get(0) - val numMetricsRegistered = LogCleaner.METRIC_NAMES.size + val numMetricsRegistered = LogCleaner.MetricNames.size verify(mockMetricsGroup, times(numMetricsRegistered)).newGauge(anyString(), any()) // verify that each metric in `LogCleaner` is removed - LogCleaner.METRIC_NAMES.forEach(verify(mockMetricsGroup).removeMetric(_)) + LogCleaner.MetricNames.foreach(verify(mockMetricsGroup).removeMetric(_)) // verify that each metric in `LogCleanerManager` is removed val mockLogCleanerManagerMetricsGroup = mockMetricsGroupCtor.constructed.get(1) - LogCleanerManager.GAUGE_METRIC_NAME_NO_TAG.forEach(metricName => verify(mockLogCleanerManagerMetricsGroup).newGauge(ArgumentMatchers.eq(metricName), any())) + LogCleanerManager.GaugeMetricNameNoTag.foreach(metricName => verify(mockLogCleanerManagerMetricsGroup).newGauge(ArgumentMatchers.eq(metricName), any())) metricsToVerify.asScala.foreach { metricNameAndTags => metricNameAndTags._2.asScala.foreach { tags => verify(mockLogCleanerManagerMetricsGroup).newGauge(ArgumentMatchers.eq(metricNameAndTags._1), any(), ArgumentMatchers.eq(tags)) } } - LogCleanerManager.GAUGE_METRIC_NAME_NO_TAG.forEach(verify(mockLogCleanerManagerMetricsGroup).removeMetric(_)) + LogCleanerManager.GaugeMetricNameNoTag.foreach(verify(mockLogCleanerManagerMetricsGroup).removeMetric(_)) metricsToVerify.asScala.foreach { metricNameAndTags => metricNameAndTags._2.asScala.foreach { tags => verify(mockLogCleanerManagerMetricsGroup).removeMetric(ArgumentMatchers.eq(metricNameAndTags._1), ArgumentMatchers.eq(tags)) @@ -123,20 +122,20 @@ class LogCleanerTest extends Logging { @Test def testMetricsActiveAfterReconfiguration(): Unit = { val logCleaner = new LogCleaner(new CleanerConfig(true), - util.List.of(TestUtils.tempDir()), - new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog](), - new LogDirFailureChannel(1), - time) + logDirs = Array(TestUtils.tempDir()), + logs = new Pool[TopicPartition, UnifiedLog](), + logDirFailureChannel = new LogDirFailureChannel(1), + time = time) try { logCleaner.startup() - var nonexistent = LogCleaner.METRIC_NAMES.asScala.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) + var nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) assertEquals(0, nonexistent.size, s"$nonexistent should be existent") logCleaner.reconfigure(new KafkaConfig(TestUtils.createBrokerConfig(1)), new KafkaConfig(TestUtils.createBrokerConfig(1))) - nonexistent = LogCleaner.METRIC_NAMES.asScala.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) + nonexistent = LogCleaner.MetricNames.diff(KafkaYammerMetrics.defaultRegistry.allMetrics().keySet().asScala.map(_.getName)) assertEquals(0, nonexistent.size, s"$nonexistent should be existent") } finally logCleaner.shutdown() } @@ -148,13 +147,13 @@ class LogCleanerTest extends Logging { def testCleanSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages to the log until we have four segments while (log.numberOfSegments < 4) - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val keysFound = LogTestUtils.keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) @@ -165,10 +164,10 @@ class LogCleanerTest extends Logging { // clean the log val segments = log.logSegments.asScala.take(3).toSeq - val stats = new CleanerStats(Time.SYSTEM) + val stats = new CleanerStats() val expectedBytesRead = segments.map(_.size).sum val shouldRemain = LogTestUtils.keysInLog(log).filterNot(keys.contains) - cleaner.cleanSegments(log, segments.asJava, map, 0L, stats, new CleanedTransactionMetadata, -1, segments.last.readNextOffset) + cleaner.cleanSegments(log, segments, map, 0L, stats, new CleanedTransactionMetadata, -1, segments.last.readNextOffset) assertEquals(shouldRemain, LogTestUtils.keysInLog(log)) assertEquals(expectedBytesRead, stats.bytesRead) } @@ -181,7 +180,7 @@ class LogCleanerTest extends Logging { // Construct a log instance. The replaceSegments() method of the log instance is overridden so that // it waits for another thread to execute deleteOldSegments() val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024 : java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024 : java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT + "," + TopicConfig.CLEANUP_POLICY_DELETE) val config = LogConfig.fromProps(logConfig.originals, logProps) val topicPartition = UnifiedLog.parseTopicPartitionName(dir) @@ -190,7 +189,7 @@ class LogCleanerTest extends Logging { val producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT val logSegments = new LogSegments(topicPartition) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - dir, topicPartition, logDirFailureChannel, Optional.empty, time.scheduler) + dir, topicPartition, logDirFailureChannel, None, time.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, dir, maxTransactionTimeoutMs, producerStateManagerConfig, time) val offsets = new LogLoader( @@ -212,15 +211,14 @@ class LogCleanerTest extends Logging { val localLog = new LocalLog(dir, config, logSegments, offsets.recoveryPoint, offsets.nextOffsetMetadata, time.scheduler, time, topicPartition, logDirFailureChannel) val log = new UnifiedLog(offsets.logStartOffset, - localLog, - new BrokerTopicStats, - producerIdExpirationCheckIntervalMs, - leaderEpochCache, - producerStateManager, - Optional.empty, - false, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) { - override def replaceSegments(newSegments: util.List[LogSegment], oldSegments: util.List[LogSegment]): Unit = { + localLog, + brokerTopicStats = new BrokerTopicStats, + producerIdExpirationCheckIntervalMs = producerIdExpirationCheckIntervalMs, + leaderEpochCache = leaderEpochCache, + producerStateManager = producerStateManager, + _topicId = None, + keepPartitionMetadataFile = true) { + override def replaceSegments(newSegments: Seq[LogSegment], oldSegments: Seq[LogSegment]): Unit = { deleteStartLatch.countDown() if (!deleteCompleteLatch.await(5000, TimeUnit.MILLISECONDS)) { throw new IllegalStateException("Log segment deletion timed out") @@ -244,7 +242,7 @@ class LogCleanerTest extends Logging { // Append records so that segment number increase to 3 while (log.numberOfSegments < 3) { - log.appendAsLeader(record(key = 0, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(key = 0, log.logEndOffset.toInt), leaderEpoch = 0) log.roll() } assertEquals(3, log.numberOfSegments) @@ -256,10 +254,10 @@ class LogCleanerTest extends Logging { // Clean the log. This should trigger replaceSegments() and deleteOldSegments(); val offsetMap = new FakeOffsetMap(Int.MaxValue) val cleaner = makeCleaner(Int.MaxValue) - val segments = log.logSegments(0, log.activeSegment.baseOffset).asScala.toSeq - val stats = new CleanerStats(Time.SYSTEM) + val segments = log.logSegments(0, log.activeSegment.baseOffset).toSeq + val stats = new CleanerStats() cleaner.buildOffsetMap(log, 0, log.activeSegment.baseOffset, offsetMap, stats) - cleaner.cleanSegments(log, segments.asJava, offsetMap, 0L, stats, new CleanedTransactionMetadata, -1, segments.last.readNextOffset) + cleaner.cleanSegments(log, segments, offsetMap, 0L, stats, new CleanedTransactionMetadata, -1, segments.last.readNextOffset) // Validate based on the file name that log segment file is renamed exactly once for async deletion assertEquals(expectedFileName, firstLogFile.file().getPath) @@ -271,21 +269,21 @@ class LogCleanerTest extends Logging { val originalMaxFileSize = 1024 val cleaner = makeCleaner(2) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, originalMaxFileSize: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, originalMaxFileSize: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact": java.lang.String) logProps.put(TopicConfig.PREALLOCATE_CONFIG, "true": java.lang.String) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) - log.appendAsLeader(record(0,0), 0) // offset 0 - log.appendAsLeader(record(1,1), 0) // offset 1 - log.appendAsLeader(record(0,0), 0) // offset 2 - log.appendAsLeader(record(1,1), 0) // offset 3 - log.appendAsLeader(record(0,0), 0) // offset 4 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 0 + log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 1 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 2 + log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 3 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() // clean the log with only one message removed - cleaner.clean(new LogToClean(log, 2, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertTrue(log.logSegments.iterator.next().log.channel.size < originalMaxFileSize, "Cleaned segment file should be trimmed to its real size.") @@ -295,7 +293,7 @@ class LogCleanerTest extends Logging { def testDuplicateCheckAfterCleaning(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) var log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -309,7 +307,7 @@ class LogCleanerTest extends Logging { appendIdempotentAsLeader(log, pid3, producerEpoch)(Seq(1, 4)) log.roll() - cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(2, 5, 7), lastOffsetsPerBatchInLog(log)) assertEquals(Map(pid1 -> 2, pid2 -> 2, pid3 -> 1), lastSequencesInLog(log)) assertEquals(List(2, 3, 1, 4), LogTestUtils.keysInLog(log)) @@ -318,7 +316,7 @@ class LogCleanerTest extends Logging { // we have to reload the log to validate that the cleaner maintained sequence numbers correctly def reloadLog(): Unit = { log.close() - log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) + log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps), recoveryPoint = 0L) } reloadLog() @@ -341,7 +339,7 @@ class LogCleanerTest extends Logging { // do one more append and a round of cleaning to force another deletion from producer 1's batch appendIdempotentAsLeader(log, pid4, producerEpoch)(Seq(2)) log.roll() - cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(Map(pid1 -> 2, pid2 -> 2, pid3 -> 1, pid4 -> 0), lastSequencesInLog(log)) assertEquals(List(2, 5, 7, 8), lastOffsetsPerBatchInLog(log)) assertEquals(List(3, 1, 4, 2), LogTestUtils.keysInLog(log)) @@ -356,15 +354,15 @@ class LogCleanerTest extends Logging { } private def assertAllAbortedTxns( - expectedAbortedTxns: util.List[AbortedTxn], + expectedAbortedTxns: List[AbortedTxn], log: UnifiedLog ): Unit= { - val abortedTxns = log.collectAbortedTransactions(0L, log.logEndOffset) + val abortedTxns = log.collectAbortedTransactions(startOffset = 0L, upperBoundOffset = log.logEndOffset) assertEquals(expectedAbortedTxns, abortedTxns) } private def assertAllTransactionsComplete(log: UnifiedLog): Unit = { - assertTrue(log.activeProducers.asScala.forall(_.currentTxnStartOffset() == -1)) + assertTrue(log.activeProducers.forall(_.currentTxnStartOffset() == -1)) } @Test @@ -387,11 +385,11 @@ class LogCleanerTest extends Logging { val appendProducer2 = appendTransactionalAsLeader(log, producerId2, producerEpoch) def abort(producerId: Long): Unit = { - log.appendAsLeader(abortMarker(producerId, producerEpoch), 0, AppendOrigin.REPLICATION) + log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.REPLICATION) } def commit(producerId: Long): Unit = { - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.REPLICATION) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.REPLICATION) } // Append some transaction data (offset range in parenthesis) @@ -412,7 +410,7 @@ class LogCleanerTest extends Logging { log.roll() assertEquals(20L, log.logEndOffset) - val expectedAbortedTxns = util.List.of( + val expectedAbortedTxns = List( new AbortedTxn(producerId1, 8, 10, 11), new AbortedTxn(producerId2, 11, 16, 17) ) @@ -423,10 +421,10 @@ class LogCleanerTest extends Logging { var dirtyOffset = 0L def cleanSegments(): Unit = { val offsetMap = new FakeOffsetMap(slots = offsetMapSlots) - val segments = log.logSegments(0, log.activeSegment.baseOffset).asScala.toSeq + val segments = log.logSegments(0, log.activeSegment.baseOffset).toSeq val stats = new CleanerStats(time) cleaner.buildOffsetMap(log, dirtyOffset, log.activeSegment.baseOffset, offsetMap, stats) - cleaner.cleanSegments(log, segments.asJava, offsetMap, time.milliseconds(), stats, new CleanedTransactionMetadata, Long.MaxValue, segments.last.readNextOffset) + cleaner.cleanSegments(log, segments, offsetMap, time.milliseconds(), stats, new CleanedTransactionMetadata, Long.MaxValue, segments.last.readNextOffset) dirtyOffset = offsetMap.latestOffset + 1 } @@ -447,7 +445,7 @@ class LogCleanerTest extends Logging { assertEquals(List(0, 2, 4, 6, 7, 10, 13, 15, 16, 17, 19), batchBaseOffsetsInLog(log)) assertEquals(List(0, 2, 4, 5, 6, 7, 10, 13, 14, 15, 16, 17, 18, 19), offsetsInLog(log)) assertAllTransactionsComplete(log) - assertAllAbortedTxns(util.List.of(), log) + assertAllAbortedTxns(List(), log) // On the last pass, wait for the retention time to expire. The abort markers // (offsets 10 and 16) should be deleted. @@ -457,14 +455,14 @@ class LogCleanerTest extends Logging { assertEquals(List(0, 2, 4, 6, 7, 13, 15, 17, 19), batchBaseOffsetsInLog(log)) assertEquals(List(0, 2, 4, 5, 6, 7, 13, 15, 17, 18, 19), offsetsInLog(log)) assertAllTransactionsComplete(log) - assertAllAbortedTxns(util.List.of(), log) + assertAllAbortedTxns(List(), log) } @Test def testBasicTransactionAwareCleaning(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -477,15 +475,15 @@ class LogCleanerTest extends Logging { appendProducer1(Seq(1, 2)) appendProducer2(Seq(2, 3)) appendProducer1(Seq(3, 4)) - log.appendAsLeader(abortMarker(pid1, producerEpoch), 0, AppendOrigin.COORDINATOR) - log.appendAsLeader(commitMarker(pid2, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(abortMarker(pid1, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(pid2, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) appendProducer1(Seq(2)) - log.appendAsLeader(commitMarker(pid1, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(pid1, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) val abortedTransactions = log.collectAbortedTransactions(log.logStartOffset, log.logEndOffset) log.roll() - cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(3, 2), LogTestUtils.keysInLog(log)) assertEquals(List(3, 6, 7, 8, 9), offsetsInLog(log)) @@ -497,7 +495,7 @@ class LogCleanerTest extends Logging { def testCleanWithTransactionsSpanningSegments(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -518,14 +516,14 @@ class LogCleanerTest extends Logging { appendProducer2(Seq(5, 6)) appendProducer3(Seq(6, 7)) appendProducer1(Seq(7, 8)) - log.appendAsLeader(abortMarker(pid2, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(abortMarker(pid2, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) appendProducer3(Seq(8, 9)) - log.appendAsLeader(commitMarker(pid3, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(pid3, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) appendProducer1(Seq(9, 10)) - log.appendAsLeader(abortMarker(pid1, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(abortMarker(pid1, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) // we have only cleaned the records in the first segment - val dirtyOffset = cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false)).getKey + val dirtyOffset = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset))._1 assertEquals(List(2, 3, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10), LogTestUtils.keysInLog(log)) log.roll() @@ -535,15 +533,16 @@ class LogCleanerTest extends Logging { appendProducer1(Seq(12)) // finally only the keys from pid3 should remain - cleaner.clean(new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, dirtyOffset, log.activeSegment.baseOffset)) assertEquals(List(2, 3, 6, 7, 8, 9, 11, 12), LogTestUtils.keysInLog(log)) } @Test def testCommitMarkerRemoval(): Unit = { + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -552,32 +551,32 @@ class LogCleanerTest extends Logging { appendProducer(Seq(1)) appendProducer(Seq(2, 3)) - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) appendProducer(Seq(2)) - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) log.roll() // cannot remove the marker in this pass because there are still valid records - var dirtyOffset = cleaner.doClean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false), largeTimestamp).getKey + var dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertEquals(List(1, 3, 2), LogTestUtils.keysInLog(log)) assertEquals(List(0, 2, 3, 4, 5), offsetsInLog(log)) appendProducer(Seq(1, 3)) - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) log.roll() // the first cleaning preserves the commit marker (at offset 3) since there were still records for the transaction - dirtyOffset = cleaner.doClean(new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), largeTimestamp).getKey + dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertEquals(List(2, 1, 3), LogTestUtils.keysInLog(log)) assertEquals(List(3, 4, 5, 6, 7, 8), offsetsInLog(log)) // clean again with same timestamp to verify marker is not removed early - dirtyOffset = cleaner.doClean(new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), largeTimestamp).getKey + dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertEquals(List(2, 1, 3), LogTestUtils.keysInLog(log)) assertEquals(List(3, 4, 5, 6, 7, 8), offsetsInLog(log)) // clean again with max timestamp to verify the marker is removed - dirtyOffset = cleaner.doClean(new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), Long.MaxValue).getKey + dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = Long.MaxValue)._1 assertEquals(List(2, 1, 3), LogTestUtils.keysInLog(log)) assertEquals(List(4, 5, 6, 7, 8), offsetsInLog(log)) } @@ -588,10 +587,11 @@ class LogCleanerTest extends Logging { */ @Test def testDeletedBatchesWithNoMessagesRead(): Unit = { + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(capacity = Int.MaxValue, maxMessageSize = 100) val logProps = new Properties() logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, 100: java.lang.Integer) - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1000: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1000: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -599,27 +599,28 @@ class LogCleanerTest extends Logging { val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(1)) - log.appendAsLeader(abortMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) appendProducer(Seq(2)) appendProducer(Seq(2)) - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) log.roll() - cleaner.doClean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false), largeTimestamp) + cleaner.doClean(LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(2), LogTestUtils.keysInLog(log)) assertEquals(List(1, 3, 4), offsetsInLog(log)) // In the first pass, the deleteHorizon for {Producer2: Commit} is set. In the second pass, it's removed. - runTwoPassClean(cleaner, new LogToClean(log, 0L, log.activeSegment.baseOffset, false), currentTime = largeTimestamp) + runTwoPassClean(cleaner, LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(2), LogTestUtils.keysInLog(log)) assertEquals(List(3, 4), offsetsInLog(log)) } @Test def testCommitMarkerRetentionWithEmptyBatch(): Unit = { + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -632,26 +633,28 @@ class LogCleanerTest extends Logging { // [{Producer1: 2, 3}], [{Producer2: 2, 3}, {Producer2: Commit}] producer2(Seq(2, 3)) // offsets 2, 3 - log.appendAsLeader(commitMarker(2L, producerEpoch), 0, AppendOrigin.COORDINATOR) // offset 4 + log.appendAsLeader(commitMarker(2L, producerEpoch), leaderEpoch = 0, + origin = AppendOrigin.COORDINATOR) // offset 4 log.roll() // [{Producer1: 2, 3}], [{Producer2: 2, 3}, {Producer2: Commit}], [{2}, {3}, {Producer1: Commit}] // {0, 1}, {2, 3}, {4}, {5}, {6}, {7} ==> Offsets - log.appendAsLeader(record(2, 2), 0) // offset 5 - log.appendAsLeader(record(3, 3), 0) // offset 6 - log.appendAsLeader(commitMarker(1L, producerEpoch), 0, AppendOrigin.COORDINATOR) // offset 7 + log.appendAsLeader(record(2, 2), leaderEpoch = 0) // offset 5 + log.appendAsLeader(record(3, 3), leaderEpoch = 0) // offset 6 + log.appendAsLeader(commitMarker(1L, producerEpoch), leaderEpoch = 0, + origin = AppendOrigin.COORDINATOR) // offset 7 log.roll() // first time through the records are removed // Expected State: [{Producer1: EmptyBatch}, {Producer2: EmptyBatch}, {Producer2: Commit}, {2}, {3}, {Producer1: Commit}] - var dirtyOffset = cleaner.doClean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false), largeTimestamp).getKey + var dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertEquals(List(2, 3), LogTestUtils.keysInLog(log)) assertEquals(List(4, 5, 6, 7), offsetsInLog(log)) assertEquals(List(1, 3, 4, 5, 6, 7), lastOffsetsPerBatchInLog(log)) // the empty batch remains if cleaned again because it still holds the last sequence // Expected State: [{Producer1: EmptyBatch}, {Producer2: EmptyBatch}, {Producer2: Commit}, {2}, {3}, {Producer1: Commit}] - dirtyOffset = cleaner.doClean(new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), largeTimestamp).getKey + dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertEquals(List(2, 3), LogTestUtils.keysInLog(log)) assertEquals(List(4, 5, 6, 7), offsetsInLog(log)) assertEquals(List(1, 3, 4, 5, 6, 7), lastOffsetsPerBatchInLog(log)) @@ -660,19 +663,20 @@ class LogCleanerTest extends Logging { // [{Producer1: EmptyBatch}, {Producer2: EmptyBatch}, {Producer2: Commit}, {2}, {3}, {Producer1: Commit}, {Producer2: 1}, {Producer2: Commit}] // {1}, {3}, {4}, {5}, {6}, {7}, {8}, {9} ==> Offsets producer2(Seq(1)) // offset 8 - log.appendAsLeader(commitMarker(2L, producerEpoch), 0, AppendOrigin.COORDINATOR) // offset 9 + log.appendAsLeader(commitMarker(2L, producerEpoch), leaderEpoch = 0, + origin = AppendOrigin.COORDINATOR) // offset 9 log.roll() // Expected State: [{Producer1: EmptyBatch}, {Producer2: Commit}, {2}, {3}, {Producer1: Commit}, {Producer2: 1}, {Producer2: Commit}] // The deleteHorizon for {Producer2: Commit} is still not set yet. - dirtyOffset = cleaner.doClean(new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), largeTimestamp).getKey + dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertEquals(List(2, 3, 1), LogTestUtils.keysInLog(log)) assertEquals(List(4, 5, 6, 7, 8, 9), offsetsInLog(log)) assertEquals(List(1, 4, 5, 6, 7, 8, 9), lastOffsetsPerBatchInLog(log)) // Expected State: [{Producer1: EmptyBatch}, {2}, {3}, {Producer1: Commit}, {Producer2: 1}, {Producer2: Commit}] // In the first pass, the deleteHorizon for {Producer2: Commit} is set. In the second pass, it's removed. - dirtyOffset = runTwoPassClean(cleaner, new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), currentTime = largeTimestamp) + dirtyOffset = runTwoPassClean(cleaner, LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(2, 3, 1), LogTestUtils.keysInLog(log)) assertEquals(List(5, 6, 7, 8, 9), offsetsInLog(log)) assertEquals(List(1, 5, 6, 7, 8, 9), lastOffsetsPerBatchInLog(log)) @@ -680,31 +684,33 @@ class LogCleanerTest extends Logging { @Test def testCleanEmptyControlBatch(): Unit = { + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort // [{Producer1: Commit}, {2}, {3}] - log.appendAsLeader(commitMarker(1L, producerEpoch), 0, AppendOrigin.COORDINATOR) // offset 1 - log.appendAsLeader(record(2, 2), 0) // offset 2 - log.appendAsLeader(record(3, 3), 0) // offset 3 + log.appendAsLeader(commitMarker(1L, producerEpoch), leaderEpoch = 0, + origin = AppendOrigin.COORDINATOR) // offset 1 + log.appendAsLeader(record(2, 2), leaderEpoch = 0) // offset 2 + log.appendAsLeader(record(3, 3), leaderEpoch = 0) // offset 3 log.roll() // first time through the control batch is retained as an empty batch // Expected State: [{Producer1: EmptyBatch}], [{2}, {3}] // In the first pass, the deleteHorizon for the commit marker is set. In the second pass, the commit marker is removed // but the empty batch is retained for preserving the producer epoch. - var dirtyOffset = runTwoPassClean(cleaner, new LogToClean(log, 0L, log.activeSegment.baseOffset, false), currentTime = largeTimestamp) + var dirtyOffset = runTwoPassClean(cleaner, LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(2, 3), LogTestUtils.keysInLog(log)) assertEquals(List(1, 2), offsetsInLog(log)) assertEquals(List(0, 1, 2), lastOffsetsPerBatchInLog(log)) // the empty control batch does not cause an exception when cleaned // Expected State: [{Producer1: EmptyBatch}], [{2}, {3}] - dirtyOffset = cleaner.doClean(new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), Long.MaxValue).getKey + dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = Long.MaxValue)._1 assertEquals(List(2, 3), LogTestUtils.keysInLog(log)) assertEquals(List(1, 2), offsetsInLog(log)) assertEquals(List(0, 1, 2), lastOffsetsPerBatchInLog(log)) @@ -712,9 +718,10 @@ class LogCleanerTest extends Logging { @Test def testCommittedTransactionSpanningSegments(): Unit = { + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 128: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 128: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L @@ -723,20 +730,21 @@ class LogCleanerTest extends Logging { appendTransaction(Seq(1)) log.roll() - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) log.roll() // Both the record and the marker should remain after cleaning - runTwoPassClean(cleaner, new LogToClean(log, 0L, log.activeSegment.baseOffset, false), currentTime = largeTimestamp) + runTwoPassClean(cleaner, LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(0, 1), offsetsInLog(log)) assertEquals(List(0, 1), lastOffsetsPerBatchInLog(log)) } @Test def testAbortedTransactionSpanningSegments(): Unit = { + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 128: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 128: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort val producerId = 1L @@ -745,27 +753,28 @@ class LogCleanerTest extends Logging { appendTransaction(Seq(1)) log.roll() - log.appendAsLeader(abortMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) log.roll() // Both the batch and the marker should remain after cleaning. The batch is retained // because it is the last entry for this producerId. The marker is retained because // there are still batches remaining from this transaction. - cleaner.doClean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false), largeTimestamp) + cleaner.doClean(LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(1), offsetsInLog(log)) assertEquals(List(0, 1), lastOffsetsPerBatchInLog(log)) // The empty batch and the marker is still retained after a second cleaning. - cleaner.doClean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false), Long.MaxValue) + cleaner.doClean(LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = Long.MaxValue) assertEquals(List(1), offsetsInLog(log)) assertEquals(List(0, 1), lastOffsetsPerBatchInLog(log)) } @Test def testAbortMarkerRemoval(): Unit = { + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -774,18 +783,18 @@ class LogCleanerTest extends Logging { appendProducer(Seq(1)) appendProducer(Seq(2, 3)) - log.appendAsLeader(abortMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) appendProducer(Seq(3)) - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) log.roll() // Aborted records are removed, but the abort marker is still preserved. - val dirtyOffset = cleaner.doClean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false), largeTimestamp).getKey + val dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertEquals(List(3), LogTestUtils.keysInLog(log)) assertEquals(List(3, 4, 5), offsetsInLog(log)) // In the first pass, the delete horizon for the abort marker is set. In the second pass, the abort marker is removed. - runTwoPassClean(cleaner, new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), currentTime = largeTimestamp) + runTwoPassClean(cleaner, LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(3), LogTestUtils.keysInLog(log)) assertEquals(List(4, 5), offsetsInLog(log)) } @@ -797,42 +806,46 @@ class LogCleanerTest extends Logging { val producerEpoch = 0.toShort val producerId = 1L + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 2048: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) - val appendFirstTransaction = appendTransactionalAsLeader(log, producerId, producerEpoch, 0, AppendOrigin.REPLICATION) + val appendFirstTransaction = appendTransactionalAsLeader(log, producerId, producerEpoch, + origin = AppendOrigin.REPLICATION) appendFirstTransaction(Seq(1)) - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) - val appendSecondTransaction = appendTransactionalAsLeader(log, producerId, producerEpoch, 0, AppendOrigin.REPLICATION) + val appendSecondTransaction = appendTransactionalAsLeader(log, producerId, producerEpoch, + origin = AppendOrigin.REPLICATION) appendSecondTransaction(Seq(2)) - log.appendAsLeader(commitMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) - log.appendAsLeader(record(1, 1), 0) - log.appendAsLeader(record(2, 1), 0) + log.appendAsLeader(record(1, 1), leaderEpoch = 0) + log.appendAsLeader(record(2, 1), leaderEpoch = 0) // Roll the log to ensure that the data is cleanable. log.roll() // Both transactional batches will be cleaned. The last one will remain in the log // as an empty batch in order to preserve the producer sequence number and epoch - cleaner.doClean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false), largeTimestamp) + cleaner.doClean(LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(1, 3, 4, 5), offsetsInLog(log)) assertEquals(List(1, 2, 3, 4, 5), lastOffsetsPerBatchInLog(log)) // In the first pass, the delete horizon for the first marker is set. In the second pass, the first marker is removed. - runTwoPassClean(cleaner, new LogToClean(log, 0L, log.activeSegment.baseOffset, false), currentTime = largeTimestamp) + runTwoPassClean(cleaner, LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(3, 4, 5), offsetsInLog(log)) assertEquals(List(2, 3, 4, 5), lastOffsetsPerBatchInLog(log)) } @Test def testAbortMarkerRetentionWithEmptyBatch(): Unit = { + val tp = new TopicPartition("test", 0) val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 256: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -840,28 +853,28 @@ class LogCleanerTest extends Logging { val appendProducer = appendTransactionalAsLeader(log, producerId, producerEpoch) appendProducer(Seq(2, 3)) // batch last offset is 1 - log.appendAsLeader(abortMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) log.roll() def assertAbortedTransactionIndexed(): Unit = { val abortedTxns = log.collectAbortedTransactions(0L, 100L) assertEquals(1, abortedTxns.size) - assertEquals(producerId, abortedTxns.get(0).producerId) - assertEquals(0, abortedTxns.get(0).firstOffset) - assertEquals(2, abortedTxns.get(0).lastOffset) + assertEquals(producerId, abortedTxns.head.producerId) + assertEquals(0, abortedTxns.head.firstOffset) + assertEquals(2, abortedTxns.head.lastOffset) } assertAbortedTransactionIndexed() // first time through the records are removed - var dirtyOffset = cleaner.doClean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false), largeTimestamp).getKey + var dirtyOffset = cleaner.doClean(LogToClean(tp, log, 0L, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertAbortedTransactionIndexed() assertEquals(List(), LogTestUtils.keysInLog(log)) assertEquals(List(2), offsetsInLog(log)) // abort marker is retained assertEquals(List(1, 2), lastOffsetsPerBatchInLog(log)) // empty batch is retained // the empty batch remains if cleaned again because it still holds the last sequence - dirtyOffset = runTwoPassClean(cleaner, new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), currentTime = largeTimestamp) + dirtyOffset = runTwoPassClean(cleaner, LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertAbortedTransactionIndexed() assertEquals(List(), LogTestUtils.keysInLog(log)) assertEquals(List(2), offsetsInLog(log)) // abort marker is still retained @@ -871,14 +884,14 @@ class LogCleanerTest extends Logging { appendProducer(Seq(1)) log.roll() - dirtyOffset = cleaner.doClean(new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), largeTimestamp).getKey + dirtyOffset = cleaner.doClean(LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp)._1 assertAbortedTransactionIndexed() assertEquals(List(1), LogTestUtils.keysInLog(log)) assertEquals(List(2, 3), offsetsInLog(log)) // abort marker is not yet gone because we read the empty batch assertEquals(List(2, 3), lastOffsetsPerBatchInLog(log)) // but we do not preserve the empty batch // In the first pass, the delete horizon for the abort marker is set. In the second pass, the abort marker is removed. - dirtyOffset = runTwoPassClean(cleaner, new LogToClean(log, dirtyOffset, log.activeSegment.baseOffset, false), currentTime = largeTimestamp) + dirtyOffset = runTwoPassClean(cleaner, LogToClean(tp, log, dirtyOffset, log.activeSegment.baseOffset), currentTime = largeTimestamp) assertEquals(List(1), LogTestUtils.keysInLog(log)) assertEquals(List(3), offsetsInLog(log)) // abort marker is gone assertEquals(List(3), lastOffsetsPerBatchInLog(log)) @@ -896,13 +909,13 @@ class LogCleanerTest extends Logging { // Create cleaner with very small default max message size val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, largeMessageSize * 16: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, largeMessageSize * 16: java.lang.Integer) logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, largeMessageSize * 2: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while (log.numberOfSegments < 2) - log.appendAsLeader(record(log.logEndOffset.toInt, Array.fill(largeMessageSize)(0: Byte)), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, Array.fill(largeMessageSize)(0: Byte)), leaderEpoch = 0) val keysFound = LogTestUtils.keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) @@ -912,8 +925,8 @@ class LogCleanerTest extends Logging { keys.foreach(k => map.put(key(k), Long.MaxValue)) // clean the log - val stats = new CleanerStats(Time.SYSTEM) - cleaner.cleanSegments(log, util.List.of(log.logSegments.asScala.head), map, 0L, stats, new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) + val stats = new CleanerStats() + cleaner.cleanSegments(log, Seq(log.logSegments.asScala.head), map, 0L, stats, new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) val shouldRemain = LogTestUtils.keysInLog(log).filterNot(keys.contains) assertEquals(shouldRemain, LogTestUtils.keysInLog(log)) } @@ -926,7 +939,7 @@ class LogCleanerTest extends Logging { val (log, offsetMap) = createLogWithMessagesLargerThanMaxSize(largeMessageSize = 1024 * 1024) val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) - cleaner.cleanSegments(log, util.List.of(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) + cleaner.cleanSegments(log, Seq(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats, new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) val shouldRemain = LogTestUtils.keysInLog(log).filter(k => !offsetMap.map.containsKey(k.toString)) assertEquals(shouldRemain, LogTestUtils.keysInLog(log)) } @@ -945,7 +958,7 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) assertThrows(classOf[CorruptRecordException], () => - cleaner.cleanSegments(log, util.List.of(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) + cleaner.cleanSegments(log, Seq(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats, new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) ) } @@ -962,19 +975,19 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(Int.MaxValue, maxMessageSize=1024) assertThrows(classOf[CorruptRecordException], () => - cleaner.cleanSegments(log, util.List.of(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats(Time.SYSTEM), new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) + cleaner.cleanSegments(log, Seq(log.logSegments.asScala.head), offsetMap, 0L, new CleanerStats, new CleanedTransactionMetadata, -1, log.logSegments.asScala.head.readNextOffset) ) } def createLogWithMessagesLargerThanMaxSize(largeMessageSize: Int): (UnifiedLog, FakeOffsetMap) = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, largeMessageSize * 16: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, largeMessageSize * 16: java.lang.Integer) logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, largeMessageSize * 2: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) while (log.numberOfSegments < 2) - log.appendAsLeader(record(log.logEndOffset.toInt, Array.fill(largeMessageSize)(0: Byte)), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, Array.fill(largeMessageSize)(0: Byte)), leaderEpoch = 0) val keysFound = LogTestUtils.keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) @@ -994,24 +1007,24 @@ class LogCleanerTest extends Logging { def testCleaningWithDeletes(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages with the keys 0 through N while (log.numberOfSegments < 2) - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) // delete all even keys between 0 and N val leo = log.logEndOffset for (key <- 0 until leo.toInt by 2) - log.appendAsLeader(tombstoneRecord(key), 0) + log.appendAsLeader(tombstoneRecord(key), leaderEpoch = 0) // append some new unique keys to pad out to a new active segment while (log.numberOfSegments < 4) - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) - cleaner.clean(new LogToClean(log, 0, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) val keys = LogTestUtils.keysInLog(log).toSet assertTrue((0 until leo.toInt by 2).forall(!keys.contains(_)), "None of the keys we deleted should still exist.") } @@ -1021,23 +1034,22 @@ class LogCleanerTest extends Logging { // because loadFactor is 0.75, this means we can fit 3 messages in the map val cleaner = makeCleaner(4) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) - log.appendAsLeader(record(0,0), 0) // offset 0 - log.appendAsLeader(record(1,1), 0) // offset 1 - log.appendAsLeader(record(0,0), 0) // offset 2 - log.appendAsLeader(record(1,1), 0) // offset 3 - log.appendAsLeader(record(0,0), 0) // offset 4 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 0 + log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 1 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 2 + log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 3 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() val initialLogSize = log.size - val endOffsetAndStats = cleaner.clean(new LogToClean(log, 2, log.activeSegment.baseOffset, false)) - val stats = endOffsetAndStats.getValue - assertEquals(5, endOffsetAndStats.getKey) + val (endOffset, stats) = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) + assertEquals(5, endOffset) assertEquals(5, stats.messagesRead) assertEquals(initialLogSize, stats.bytesRead) assertEquals(2, stats.messagesWritten) @@ -1050,19 +1062,19 @@ class LogCleanerTest extends Logging { def testLogCleanerRetainsProducerLastSequence(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) - log.appendAsLeader(record(0, 0), 0) // offset 0 - log.appendAsLeader(record(0, 1, producerId = 1, producerEpoch = 0, sequence = 0), 0) // offset 1 - log.appendAsLeader(record(0, 2, producerId = 2, producerEpoch = 0, sequence = 0), 0) // offset 2 - log.appendAsLeader(record(0, 3, producerId = 3, producerEpoch = 0, sequence = 0), 0) // offset 3 - log.appendAsLeader(record(1, 1, producerId = 2, producerEpoch = 0, sequence = 1), 0) // offset 4 + log.appendAsLeader(record(0, 0), leaderEpoch = 0) // offset 0 + log.appendAsLeader(record(0, 1, producerId = 1, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 1 + log.appendAsLeader(record(0, 2, producerId = 2, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 2 + log.appendAsLeader(record(0, 3, producerId = 3, producerEpoch = 0, sequence = 0), leaderEpoch = 0) // offset 3 + log.appendAsLeader(record(1, 1, producerId = 2, producerEpoch = 0, sequence = 1), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() - cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(1, 3, 4), lastOffsetsPerBatchInLog(log)) assertEquals(Map(1L -> 0, 2L -> 1, 3L -> 0), lastSequencesInLog(log)) assertEquals(List(0, 1), LogTestUtils.keysInLog(log)) @@ -1073,7 +1085,7 @@ class LogCleanerTest extends Logging { def testLogCleanerRetainsLastSequenceEvenIfTransactionAborted(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val producerEpoch = 0.toShort @@ -1082,10 +1094,10 @@ class LogCleanerTest extends Logging { appendProducer(Seq(1)) appendProducer(Seq(2, 3)) - log.appendAsLeader(abortMarker(producerId, producerEpoch), 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(abortMarker(producerId, producerEpoch), leaderEpoch = 0, origin = AppendOrigin.COORDINATOR) log.roll() - cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(2, 3), lastOffsetsPerBatchInLog(log)) assertEquals(Map(producerId -> 2), lastSequencesInLog(log)) assertEquals(List(), LogTestUtils.keysInLog(log)) @@ -1094,7 +1106,7 @@ class LogCleanerTest extends Logging { // Append a new entry from the producer and verify that the empty batch is cleaned up appendProducer(Seq(1, 5)) log.roll() - cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(3, 5), lastOffsetsPerBatchInLog(log)) assertEquals(Map(producerId -> 4), lastSequencesInLog(log)) @@ -1107,7 +1119,7 @@ class LogCleanerTest extends Logging { def testCleaningWithKeysConflictingWithTxnMarkerKeys(): Unit = { val cleaner = makeCleaner(10) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val leaderEpoch = 5 val producerEpoch = 0.toShort @@ -1116,7 +1128,7 @@ class LogCleanerTest extends Logging { val producerId1 = 1L val appendProducer = appendTransactionalAsLeader(log, producerId1, producerEpoch, leaderEpoch) appendProducer(Seq(1)) - log.appendAsLeader(commitMarker(producerId1, producerEpoch), leaderEpoch, AppendOrigin.COORDINATOR) + log.appendAsLeader(commitMarker(producerId1, producerEpoch), leaderEpoch, origin = AppendOrigin.COORDINATOR) // Now we append one transaction with a key which conflicts with the COMMIT marker appended above def commitRecordKey(): ByteBuffer = { @@ -1135,13 +1147,13 @@ class LogCleanerTest extends Logging { 0, new SimpleRecord(time.milliseconds(), commitRecordKey(), ByteBuffer.wrap("foo".getBytes)) ) - log.appendAsLeader(records, leaderEpoch, AppendOrigin.CLIENT) - log.appendAsLeader(commitMarker(producerId2, producerEpoch), leaderEpoch, AppendOrigin.COORDINATOR) + log.appendAsLeader(records, leaderEpoch, origin = AppendOrigin.CLIENT) + log.appendAsLeader(commitMarker(producerId2, producerEpoch), leaderEpoch, origin = AppendOrigin.COORDINATOR) log.roll() assertEquals(List(0, 1, 2, 3), offsetsInLog(log)) // After cleaning, the marker should not be removed - cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0L, log.activeSegment.baseOffset)) assertEquals(List(0, 1, 2, 3), lastOffsetsPerBatchInLog(log)) assertEquals(List(0, 1, 2, 3), offsetsInLog(log)) } @@ -1151,29 +1163,29 @@ class LogCleanerTest extends Logging { // because loadFactor is 0.75, this means we can fit 1 message in the map val cleaner = makeCleaner(2) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) - log.appendAsLeader(record(0,0), 0) // offset 0 - log.appendAsLeader(record(1,1), 0) // offset 1 - log.appendAsLeader(record(0,0), 0) // offset 2 - log.appendAsLeader(record(1,1), 0) // offset 3 - log.appendAsLeader(record(0,0), 0) // offset 4 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 0 + log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 1 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 2 + log.appendAsLeader(record(1,1), leaderEpoch = 0) // offset 3 + log.appendAsLeader(record(0,0), leaderEpoch = 0) // offset 4 // roll the segment, so we can clean the messages already appended log.roll() // clean the log with only one message removed - cleaner.clean(new LogToClean(log, 2, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertEquals(List(1,0,1,0), LogTestUtils.keysInLog(log)) assertEquals(List(1,2,3,4), offsetsInLog(log)) // continue to make progress, even though we can only clean one message at a time - cleaner.clean(new LogToClean(log, 3, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 3, log.activeSegment.baseOffset)) assertEquals(List(0,1,0), LogTestUtils.keysInLog(log)) assertEquals(List(2,3,4), offsetsInLog(log)) - cleaner.clean(new LogToClean(log, 4, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 4, log.activeSegment.baseOffset)) assertEquals(List(1,0), LogTestUtils.keysInLog(log)) assertEquals(List(3,4), offsetsInLog(log)) } @@ -1182,7 +1194,7 @@ class LogCleanerTest extends Logging { def testCleaningWithUncleanableSection(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1193,31 +1205,31 @@ class LogCleanerTest extends Logging { // append messages with the keys 0 through N-1, values equal offset while (log.numberOfSegments <= numCleanableSegments) - log.appendAsLeader(record(log.logEndOffset.toInt % N, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt % N, log.logEndOffset.toInt), leaderEpoch = 0) // at this point one message past the cleanable segments has been added // the entire segment containing the first uncleanable offset should not be cleaned. val firstUncleanableOffset = log.logEndOffset + 1 // +1 so it is past the baseOffset while (log.numberOfSegments < numTotalSegments - 1) - log.appendAsLeader(record(log.logEndOffset.toInt % N, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt % N, log.logEndOffset.toInt), leaderEpoch = 0) // the last (active) segment has just one message def distinctValuesBySegment = log.logSegments.asScala.map(s => s.log.records.asScala.map(record => TestUtils.readString(record.value)).toSet.size).toSeq - val distinctValuesBySegmentBeforeClean = distinctValuesBySegment + val disctinctValuesBySegmentBeforeClean = distinctValuesBySegment assertTrue(distinctValuesBySegment.reverse.tail.forall(_ > N), "Test is not effective unless each segment contains duplicates. Increase segment size or decrease number of keys.") - cleaner.clean(new LogToClean(log, 0, firstUncleanableOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, firstUncleanableOffset)) val distinctValuesBySegmentAfterClean = distinctValuesBySegment - assertTrue(distinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) + assertTrue(disctinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) .take(numCleanableSegments).forall { case (before, after) => after < before }, "The cleanable segments should have fewer number of values after cleaning") - assertTrue(distinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) + assertTrue(disctinctValuesBySegmentBeforeClean.zip(distinctValuesBySegmentAfterClean) .slice(numCleanableSegments, numTotalSegments).forall { x => x._1 == x._2 }, "The uncleanable segments should have the same number of values after cleaning") } @@ -1225,15 +1237,15 @@ class LogCleanerTest extends Logging { def testLogToClean(): Unit = { // create a log with small segment size val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 100: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 100: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment - def createRecords = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) + def createRecorcs = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) for (_ <- 0 until 6) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecorcs, leaderEpoch = 0) - val logToClean = new LogToClean(log, log.activeSegment.baseOffset, log.activeSegment.baseOffset, false) + val logToClean = LogToClean(new TopicPartition("test", 0), log, log.activeSegment.baseOffset, log.activeSegment.baseOffset) assertEquals(logToClean.totalBytes, log.size - log.activeSegment.size, "Total bytes of LogToClean should equal size of all segments excluding the active segment") @@ -1243,17 +1255,17 @@ class LogCleanerTest extends Logging { def testLogToCleanWithUncleanableSection(): Unit = { // create a log with small segment size val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 100: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 100: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // create 6 segments with only one message in each segment def createRecords = TestUtils.singletonRecords(value = Array.fill[Byte](25)(0), key = 1.toString.getBytes) for (_ <- 0 until 6) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) // segments [0,1] are clean; segments [2, 3] are cleanable; segments [4,5] are uncleanable val segs = log.logSegments.asScala.toSeq - val logToClean = new LogToClean(log, segs(2).baseOffset, segs(4).baseOffset, false) + val logToClean = LogToClean(new TopicPartition("test", 0), log, segs(2).baseOffset, segs(4).baseOffset) val expectedCleanSize = segs.take(2).map(_.size).sum val expectedCleanableSize = segs.slice(2, 4).map(_.size).sum @@ -1276,24 +1288,24 @@ class LogCleanerTest extends Logging { // create a log with compaction turned off so we can append unkeyed messages val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append unkeyed messages while (log.numberOfSegments < 2) - log.appendAsLeader(unkeyedRecord(log.logEndOffset.toInt), 0) + log.appendAsLeader(unkeyedRecord(log.logEndOffset.toInt), leaderEpoch = 0) val numInvalidMessages = unkeyedMessageCountInLog(log) val sizeWithUnkeyedMessages = log.size // append keyed messages while (log.numberOfSegments < 3) - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val expectedSizeAfterCleaning = log.size - sizeWithUnkeyedMessages - val stats = cleaner.clean(new LogToClean(log, 0, log.activeSegment.baseOffset, false)).getValue + val (_, stats) = cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) assertEquals(0, unkeyedMessageCountInLog(log), "Log should only contain keyed messages after cleaning.") assertEquals(expectedSizeAfterCleaning, log.size, "Log should only contain keyed messages after cleaning.") @@ -1334,20 +1346,20 @@ class LogCleanerTest extends Logging { def testCleanSegmentsWithAbort(): Unit = { val cleaner = makeCleaner(Int.MaxValue, abortCheckDone) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages to the log until we have four segments while (log.numberOfSegments < 4) - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val keys = LogTestUtils.keysInLog(log) val map = new FakeOffsetMap(Int.MaxValue) keys.foreach(k => map.put(key(k), Long.MaxValue)) val segments = log.logSegments.asScala.take(3).toSeq assertThrows(classOf[LogCleaningAbortedException], () => - cleaner.cleanSegments(log, segments.asJava, map, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, segments, map, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, segments.last.readNextOffset) ) } @@ -1356,13 +1368,13 @@ class LogCleanerTest extends Logging { def testCleanSegmentsRetainingLastEmptyBatch(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // append messages to the log until we have four segments while (log.numberOfSegments < 4) - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) val keysFound = LogTestUtils.keysInLog(log) assertEquals(0L until log.logEndOffset, keysFound) @@ -1372,8 +1384,8 @@ class LogCleanerTest extends Logging { // clean the log val segments = log.logSegments.asScala.take(3).toSeq - val stats = new CleanerStats(Time.SYSTEM) - cleaner.cleanSegments(log, segments.asJava, map, 0L, stats, new CleanedTransactionMetadata, -1, segments.last.readNextOffset) + val stats = new CleanerStats() + cleaner.cleanSegments(log, segments, map, 0L, stats, new CleanedTransactionMetadata, -1, segments.last.readNextOffset) assertEquals(2, log.logSegments.size) assertEquals(1, log.logSegments.asScala.head.log.batches.asScala.size, "one batch should be retained in the cleaned segment") val retainedBatch = log.logSegments.asScala.head.log.batches.asScala.head @@ -1388,7 +1400,7 @@ class LogCleanerTest extends Logging { def testSegmentGrouping(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 300: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 300: java.lang.Integer) logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, 1: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) @@ -1396,39 +1408,39 @@ class LogCleanerTest extends Logging { // append some messages to the log var i = 0 while (log.numberOfSegments < 10) { - log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) i += 1 } // grouping by very large values should result in a single group with all the segments in it - var groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, Int.MaxValue, log.logEndOffset) + var groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(1, groups.size) - assertEquals(log.numberOfSegments, groups.get(0).size) + assertEquals(log.numberOfSegments, groups.head.size) checkSegmentOrder(groups) // grouping by very small values should result in all groups having one entry - groups = cleaner.groupSegmentsBySize(log.logSegments, 1, Int.MaxValue, log.logEndOffset) + groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = 1, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(log.numberOfSegments, groups.size) - assertTrue(groups.asScala.forall(_.size == 1), "All groups should be singletons.") + assertTrue(groups.forall(_.size == 1), "All groups should be singletons.") checkSegmentOrder(groups) - groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, 1, log.logEndOffset) + groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = 1, log.logEndOffset) assertEquals(log.numberOfSegments, groups.size) - assertTrue(groups.asScala.forall(_.size == 1), "All groups should be singletons.") + assertTrue(groups.forall(_.size == 1), "All groups should be singletons.") checkSegmentOrder(groups) val groupSize = 3 // check grouping by log size - val logSize = log.logSegments.asScala.take(groupSize).map(_.size).sum + 1 - groups = cleaner.groupSegmentsBySize(log.logSegments, logSize, Int.MaxValue, log.logEndOffset) + val logSize = log.logSegments.asScala.take(groupSize).map(_.size).sum.toInt + 1 + groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = logSize, maxIndexSize = Int.MaxValue, log.logEndOffset) checkSegmentOrder(groups) - assertTrue(groups.asScala.dropRight(1).forall(_.size == groupSize), "All but the last group should be the target size.") + assertTrue(groups.dropRight(1).forall(_.size == groupSize), "All but the last group should be the target size.") // check grouping by index size val indexSize = log.logSegments.asScala.take(groupSize).map(_.offsetIndex.sizeInBytes).sum + 1 - groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, indexSize, log.logEndOffset) + groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = indexSize, log.logEndOffset) checkSegmentOrder(groups) - assertTrue(groups.asScala.dropRight(1).forall(_.size == groupSize), + assertTrue(groups.dropRight(1).forall(_.size == groupSize), "All but the last group should be the target size.") } @@ -1443,15 +1455,15 @@ class LogCleanerTest extends Logging { //create 3 segments for (i <- 0 until 3) { - log.appendAsLeader(TestUtils.singletonRecords(value = v, key = k), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = v, key = k), leaderEpoch = 0) //0 to Int.MaxValue is Int.MaxValue+1 message, -1 will be the last message of i-th segment val records = messageWithOffset(k, v, (i + 1L) * (Int.MaxValue + 1L) -1 ) - log.appendAsFollower(records, Int.MaxValue) + log.appendAsFollower(records) assertEquals(i + 1, log.numberOfSegments) } //4th active segment, not clean - log.appendAsLeader(TestUtils.singletonRecords(value = v, key = k), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = v, key = k), leaderEpoch = 0) val totalSegments = 4 //last segment not cleanable @@ -1459,22 +1471,22 @@ class LogCleanerTest extends Logging { val notCleanableSegments = 1 assertEquals(totalSegments, log.numberOfSegments) - var groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, Int.MaxValue, firstUncleanableOffset) + var groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, firstUncleanableOffset) //because index file uses 4 byte relative index offset and current segments all none empty, //segments will not group even their size is very small. assertEquals(totalSegments - notCleanableSegments, groups.size) //do clean to clean first 2 segments to empty - cleaner.clean(new LogToClean(log, 0, firstUncleanableOffset, false)) + cleaner.clean(LogToClean(log.topicPartition, log, 0, firstUncleanableOffset)) assertEquals(totalSegments, log.numberOfSegments) assertEquals(0, log.logSegments.asScala.head.size) //after clean we got 2 empty segment, they will group together this time - groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, Int.MaxValue, firstUncleanableOffset) + groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, firstUncleanableOffset) val noneEmptySegment = 1 assertEquals(noneEmptySegment + 1, groups.size) //trigger a clean and 2 empty segments should cleaned to 1 - cleaner.clean(new LogToClean(log, 0, firstUncleanableOffset, false)) + cleaner.clean(LogToClean(log.topicPartition, log, 0, firstUncleanableOffset)) assertEquals(totalSegments - 1, log.numberOfSegments) } @@ -1489,41 +1501,41 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 400: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 400: java.lang.Integer) logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, 1: java.lang.Integer) val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) // fill up first segment while (log.numberOfSegments == 1) - log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) // forward offset and append message to next segment at offset Int.MaxValue val records = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue - 1) - log.appendAsFollower(records, Int.MaxValue) - log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), 0) + log.appendAsFollower(records) + log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) assertEquals(Int.MaxValue, log.activeSegment.offsetIndex.lastOffset) // grouping should result in a single group with maximum relative offset of Int.MaxValue - var groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, Int.MaxValue, log.logEndOffset) + var groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(1, groups.size) // append another message, making last offset of second segment > Int.MaxValue - log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) // grouping should not group the two segments to ensure that maximum relative offset in each group <= Int.MaxValue - groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, Int.MaxValue, log.logEndOffset) + groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(2, groups.size) checkSegmentOrder(groups) // append more messages, creating new segments, further grouping should still occur while (log.numberOfSegments < 4) - log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "hello".getBytes, key = "hello".getBytes), leaderEpoch = 0) - groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, Int.MaxValue, log.logEndOffset) + groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(log.numberOfSegments - 1, groups.size) - for (group <- groups.asScala) - assertTrue(group.asScala.last.offsetIndex.lastOffset - group.asScala.head.offsetIndex.baseOffset <= Int.MaxValue, + for (group <- groups) + assertTrue(group.last.offsetIndex.lastOffset - group.head.offsetIndex.baseOffset <= Int.MaxValue, "Relative offset greater than Int.MaxValue") checkSegmentOrder(groups) } @@ -1541,7 +1553,7 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 400: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 400: java.lang.Integer) //mimic the effect of loading an empty index file logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, 400: java.lang.Integer) @@ -1549,31 +1561,31 @@ class LogCleanerTest extends Logging { val log = makeLog(config = LogConfig.fromProps(logConfig.originals, logProps)) val record1 = messageWithOffset("hello".getBytes, "hello".getBytes, 0) - log.appendAsFollower(record1, Int.MaxValue) + log.appendAsFollower(record1) val record2 = messageWithOffset("hello".getBytes, "hello".getBytes, 1) - log.appendAsFollower(record2, Int.MaxValue) - log.roll(Optional.of(Int.MaxValue/2)) // starting a new log segment at offset Int.MaxValue/2 + log.appendAsFollower(record2) + log.roll(Some(Int.MaxValue/2)) // starting a new log segment at offset Int.MaxValue/2 val record3 = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue/2) - log.appendAsFollower(record3, Int.MaxValue) + log.appendAsFollower(record3) val record4 = messageWithOffset("hello".getBytes, "hello".getBytes, Int.MaxValue.toLong + 1) - log.appendAsFollower(record4, Int.MaxValue) + log.appendAsFollower(record4) assertTrue(log.logEndOffset - 1 - log.logStartOffset > Int.MaxValue, "Actual offset range should be > Int.MaxValue") assertTrue(log.logSegments.asScala.last.offsetIndex.lastOffset - log.logStartOffset <= Int.MaxValue, "index.lastOffset is reporting the wrong last offset") // grouping should result in two groups because the second segment takes the offset range > MaxInt - val groups = cleaner.groupSegmentsBySize(log.logSegments, Int.MaxValue, Int.MaxValue, log.logEndOffset) + val groups = cleaner.groupSegmentsBySize(log.logSegments.asScala, maxSize = Int.MaxValue, maxIndexSize = Int.MaxValue, log.logEndOffset) assertEquals(2, groups.size) - for (group <- groups.asScala) - assertTrue(group.asScala.last.readNextOffset - 1 - group.asScala.head.baseOffset <= Int.MaxValue, + for (group <- groups) + assertTrue(group.last.readNextOffset - 1 - group.head.baseOffset <= Int.MaxValue, "Relative offset greater than Int.MaxValue") checkSegmentOrder(groups) } - private def checkSegmentOrder(groups: util.List[util.List[LogSegment]]): Unit = { - val offsets = groups.asScala.flatMap(_.asScala.map(_.baseOffset)) + private def checkSegmentOrder(groups: Seq[Seq[LogSegment]]): Unit = { + val offsets = groups.flatMap(_.map(_.baseOffset)) assertEquals(offsets.sorted, offsets, "Offsets should be in increasing order.") } @@ -1590,7 +1602,7 @@ class LogCleanerTest extends Logging { writeToLog(log, (start until end) zip (start until end)) def checkRange(map: FakeOffsetMap, start: Int, end: Int): Unit = { - val stats = new CleanerStats(Time.SYSTEM) + val stats = new CleanerStats() cleaner.buildOffsetMap(log, start, end, map, stats) val endOffset = map.latestOffset + 1 assertEquals(end, endOffset, "Last offset should be the end offset.") @@ -1636,7 +1648,7 @@ class LogCleanerTest extends Logging { // Try to clean segment with offset overflow. This will trigger log split and the cleaning itself must abort. assertThrows(classOf[LogCleaningAbortedException], () => - cleaner.cleanSegments(log, util.List.of(segmentWithOverflow), offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, Seq(segmentWithOverflow), offsetMap, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, segmentWithOverflow.readNextOffset) ) assertEquals(numSegmentsInitial + 1, log.logSegments.size) @@ -1646,7 +1658,7 @@ class LogCleanerTest extends Logging { // Clean each segment now that split is complete. val upperBoundOffset = log.logSegments.asScala.last.readNextOffset for (segmentToClean <- log.logSegments.asScala) - cleaner.cleanSegments(log, util.List.of(segmentToClean), offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, List(segmentToClean), offsetMap, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, upperBoundOffset) assertEquals(expectedKeysAfterCleaning, LogTestUtils.keysInLog(log)) assertFalse(LogTestUtils.hasOffsetOverflow(log)) @@ -1666,7 +1678,7 @@ class LogCleanerTest extends Logging { def testRecoveryAfterCrash(): Unit = { val cleaner = makeCleaner(Int.MaxValue) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 300: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 300: java.lang.Integer) logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, 1: java.lang.Integer) logProps.put(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, 10: java.lang.Integer) @@ -1676,7 +1688,7 @@ class LogCleanerTest extends Logging { var log = makeLog(config = config) var messageCount = 0 while (log.numberOfSegments < 10) { - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } val allKeys = LogTestUtils.keysInLog(log) @@ -1689,7 +1701,7 @@ class LogCleanerTest extends Logging { val upperBoundOffset = log.activeSegment.baseOffset // clean the log - cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq.asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq, offsetMap, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, upperBoundOffset) // clear scheduler so that async deletes don't run time.scheduler.clear() @@ -1698,14 +1710,14 @@ class LogCleanerTest extends Logging { // 1) Simulate recovery just after .cleaned file is created, before rename to .swap // On recovery, clean operation is aborted. All messages should be present in the log - log.logSegments.asScala.head.changeFileSuffixes("", UnifiedLog.CLEANED_FILE_SUFFIX) + log.logSegments.asScala.head.changeFileSuffixes("", UnifiedLog.CleanedFileSuffix) for (file <- dir.listFiles if file.getName.endsWith(LogFileUtils.DELETED_FILE_SUFFIX)) { Utils.atomicMoveWithFallback(file.toPath, Paths.get(Utils.replaceSuffix(file.getPath, LogFileUtils.DELETED_FILE_SUFFIX, "")), false) } log = recoverAndCheck(config, allKeys) // clean again - cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq.asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq, offsetMap, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, upperBoundOffset) // clear scheduler so that async deletes don't run time.scheduler.clear() @@ -1714,15 +1726,15 @@ class LogCleanerTest extends Logging { // 2) Simulate recovery just after .cleaned file is created, and a subset of them are renamed to .swap // On recovery, clean operation is aborted. All messages should be present in the log - log.logSegments.asScala.head.changeFileSuffixes("", UnifiedLog.CLEANED_FILE_SUFFIX) - log.logSegments.asScala.head.log.renameTo(new File(Utils.replaceSuffix(log.logSegments.asScala.head.log.file.getPath, UnifiedLog.CLEANED_FILE_SUFFIX, UnifiedLog.SWAP_FILE_SUFFIX))) + log.logSegments.asScala.head.changeFileSuffixes("", UnifiedLog.CleanedFileSuffix) + log.logSegments.asScala.head.log.renameTo(new File(Utils.replaceSuffix(log.logSegments.asScala.head.log.file.getPath, UnifiedLog.CleanedFileSuffix, UnifiedLog.SwapFileSuffix))) for (file <- dir.listFiles if file.getName.endsWith(LogFileUtils.DELETED_FILE_SUFFIX)) { Utils.atomicMoveWithFallback(file.toPath, Paths.get(Utils.replaceSuffix(file.getPath, LogFileUtils.DELETED_FILE_SUFFIX, "")), false) } log = recoverAndCheck(config, allKeys) // clean again - cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq.asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq, offsetMap, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, upperBoundOffset) // clear scheduler so that async deletes don't run time.scheduler.clear() @@ -1731,7 +1743,7 @@ class LogCleanerTest extends Logging { // 3) Simulate recovery just after swap file is created, before old segment files are // renamed to .deleted. Clean operation is resumed during recovery. - log.logSegments.asScala.head.changeFileSuffixes("", UnifiedLog.SWAP_FILE_SUFFIX) + log.logSegments.asScala.head.changeFileSuffixes("", UnifiedLog.SwapFileSuffix) for (file <- dir.listFiles if file.getName.endsWith(LogFileUtils.DELETED_FILE_SUFFIX)) { Utils.atomicMoveWithFallback(file.toPath, Paths.get(Utils.replaceSuffix(file.getPath, LogFileUtils.DELETED_FILE_SUFFIX, "")), false) } @@ -1739,12 +1751,12 @@ class LogCleanerTest extends Logging { // add some more messages and clean the log again while (log.numberOfSegments < 10) { - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) - cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq.asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq, offsetMap, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, upperBoundOffset) // clear scheduler so that async deletes don't run time.scheduler.clear() @@ -1752,17 +1764,17 @@ class LogCleanerTest extends Logging { // 4) Simulate recovery after swap file is created and old segments files are renamed // to .deleted. Clean operation is resumed during recovery. - log.logSegments.asScala.head.changeFileSuffixes("", UnifiedLog.SWAP_FILE_SUFFIX) + log.logSegments.asScala.head.changeFileSuffixes("", UnifiedLog.SwapFileSuffix) log = recoverAndCheck(config, cleanedKeys) // add some more messages and clean the log again while (log.numberOfSegments < 10) { - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) - cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq.asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq, offsetMap, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, upperBoundOffset) // clear scheduler so that async deletes don't run time.scheduler.clear() @@ -1770,17 +1782,17 @@ class LogCleanerTest extends Logging { // 5) Simulate recovery after a subset of swap files are renamed to regular files and old segments files are renamed // to .deleted. Clean operation is resumed during recovery. - log.logSegments.asScala.head.timeIndex.file.renameTo(new File(Utils.replaceSuffix(log.logSegments.asScala.head.timeIndex.file.getPath, "", UnifiedLog.SWAP_FILE_SUFFIX))) + log.logSegments.asScala.head.timeIndex.file.renameTo(new File(Utils.replaceSuffix(log.logSegments.asScala.head.timeIndex.file.getPath, "", UnifiedLog.SwapFileSuffix))) log = recoverAndCheck(config, cleanedKeys) // add some more messages and clean the log again while (log.numberOfSegments < 10) { - log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), 0) + log.appendAsLeader(record(log.logEndOffset.toInt, log.logEndOffset.toInt), leaderEpoch = 0) messageCount += 1 } for (k <- 1 until messageCount by 2) offsetMap.put(key(k), Long.MaxValue) - cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq.asJava, offsetMap, 0L, new CleanerStats(Time.SYSTEM), + cleaner.cleanSegments(log, log.logSegments.asScala.take(9).toSeq, offsetMap, 0L, new CleanerStats(), new CleanedTransactionMetadata, -1, upperBoundOffset) // clear scheduler so that async deletes don't run time.scheduler.clear() @@ -1797,7 +1809,7 @@ class LogCleanerTest extends Logging { def testBuildOffsetMapFakeLarge(): Unit = { val map = new FakeOffsetMap(1000) val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 120: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 120: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 120: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val logConfig = new LogConfig(logProps) @@ -1809,7 +1821,7 @@ class LogCleanerTest extends Logging { val offsetEnd = 7206178L val offsetSeq = Seq(offsetStart, offsetEnd) writeToLog(log, (keyStart until keyEnd) zip (keyStart until keyEnd), offsetSeq) - cleaner.buildOffsetMap(log, keyStart, offsetEnd + 1L, map, new CleanerStats(Time.SYSTEM)) + cleaner.buildOffsetMap(log, keyStart, offsetEnd + 1L, map, new CleanerStats()) assertEquals(offsetEnd, map.latestOffset, "Last offset should be the end offset.") assertEquals(keyEnd - keyStart, map.size, "Should have the expected number of messages in the map.") assertEquals(0L, map.get(key(0)), "Map should contain first value") @@ -1826,14 +1838,14 @@ class LogCleanerTest extends Logging { val cleaner = makeCleaner(3) val map = cleaner.offsetMap - log.appendAsLeader(record(0,0), 0) - log.appendAsLeader(record(1,1), 0) - log.appendAsLeader(record(2,2), 0) - log.appendAsLeader(record(3,3), 0) - log.appendAsLeader(record(4,4), 0) + log.appendAsLeader(record(0,0), leaderEpoch = 0) + log.appendAsLeader(record(1,1), leaderEpoch = 0) + log.appendAsLeader(record(2,2), leaderEpoch = 0) + log.appendAsLeader(record(3,3), leaderEpoch = 0) + log.appendAsLeader(record(4,4), leaderEpoch = 0) log.roll() - val stats = new CleanerStats(Time.SYSTEM) + val stats = new CleanerStats() cleaner.buildOffsetMap(log, 2, Int.MaxValue, map, stats) assertEquals(2, map.size) assertEquals(-1, map.get(key(0))) @@ -1870,12 +1882,12 @@ class LogCleanerTest extends Logging { val noDupSetOffset = 50 val noDupSet = noDupSetKeys zip (noDupSetOffset until noDupSetOffset + noDupSetKeys.size) - log.appendAsFollower(invalidCleanedMessage(dupSetOffset, dupSet, codec), Int.MaxValue) - log.appendAsFollower(invalidCleanedMessage(noDupSetOffset, noDupSet, codec), Int.MaxValue) + log.appendAsFollower(invalidCleanedMessage(dupSetOffset, dupSet, codec)) + log.appendAsFollower(invalidCleanedMessage(noDupSetOffset, noDupSet, codec)) log.roll() - cleaner.clean(new LogToClean(log, 0, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) for (segment <- log.logSegments.asScala; batch <- segment.log.batches.asScala; record <- batch.asScala) { assertTrue(record.hasMagic(batch.magic)) @@ -1917,24 +1929,24 @@ class LogCleanerTest extends Logging { // Append a message with a large timestamp. log.appendAsLeader(TestUtils.singletonRecords(value = "0".getBytes, key = "0".getBytes, - timestamp = time.milliseconds() + logConfig.deleteRetentionMs + 10000), 0) + timestamp = time.milliseconds() + logConfig.deleteRetentionMs + 10000), leaderEpoch = 0) log.roll() - cleaner.clean(new LogToClean(log, 0, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 0, log.activeSegment.baseOffset)) // Append a tombstone with a small timestamp and roll out a new log segment. log.appendAsLeader(TestUtils.singletonRecords(value = null, key = "0".getBytes, - timestamp = time.milliseconds() - logConfig.deleteRetentionMs - 10000), 0) + timestamp = time.milliseconds() - logConfig.deleteRetentionMs - 10000), leaderEpoch = 0) log.roll() - cleaner.clean(new LogToClean(log, 1, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 1, log.activeSegment.baseOffset)) assertEquals(1, log.logSegments.asScala.head.log.batches.iterator.next().lastOffset, "The tombstone should be retained.") // Append a message and roll out another log segment. log.appendAsLeader(TestUtils.singletonRecords(value = "1".getBytes, key = "1".getBytes, - timestamp = time.milliseconds()), 0) + timestamp = time.milliseconds()), leaderEpoch = 0) log.roll() - cleaner.clean(new LogToClean(log, 2, log.activeSegment.baseOffset, false)) + cleaner.clean(LogToClean(new TopicPartition("test", 0), log, 2, log.activeSegment.baseOffset)) assertEquals(1, log.logSegments.asScala.head.log.batches.iterator.next().lastOffset, "The tombstone should be retained.") } @@ -1945,7 +1957,7 @@ class LogCleanerTest extends Logging { @Test def testCleaningBeyondMissingOffsets(): Unit = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024*1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024*1024: java.lang.Integer) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val logConfig = new LogConfig(logProps) val cleaner = makeCleaner(Int.MaxValue) @@ -1954,12 +1966,12 @@ class LogCleanerTest extends Logging { val log = makeLog(dir = TestUtils.randomPartitionLogDir(tmpdir), config = logConfig) writeToLog(log, (0 to 9) zip (0 to 9), (0L to 9L)) // roll new segment with baseOffset 11, leaving previous with holes in offset range [9,10] - log.roll(Optional.of(11L)) + log.roll(Some(11L)) // active segment record - log.appendAsFollower(messageWithOffset(1015, 1015, 11L), Int.MaxValue) + log.appendAsFollower(messageWithOffset(1015, 1015, 11L)) - val nextDirtyOffset = cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, true)).getKey + val (nextDirtyOffset, _) = cleaner.clean(LogToClean(log.topicPartition, log, 0L, log.activeSegment.baseOffset, needCompactionNow = true)) assertEquals(log.activeSegment.baseOffset, nextDirtyOffset, "Cleaning point should pass offset gap") } @@ -1969,16 +1981,16 @@ class LogCleanerTest extends Logging { val log = makeLog(dir = TestUtils.randomPartitionLogDir(tmpdir), config = logConfig) writeToLog(log, (0 to 9) zip (0 to 9), (0L to 9L)) // roll new segment with baseOffset 15, leaving previous with holes in offset rage [10, 14] - log.roll(Optional.of(15L)) + log.roll(Some(15L)) writeToLog(log, (15 to 24) zip (15 to 24), (15L to 24L)) // roll new segment with baseOffset 30, leaving previous with holes in offset range [25, 29] - log.roll(Optional.of(30L)) + log.roll(Some(30L)) // active segment record - log.appendAsFollower(messageWithOffset(1015, 1015, 30L), Int.MaxValue) + log.appendAsFollower(messageWithOffset(1015, 1015, 30L)) - val nextDirtyOffset = cleaner.clean(new LogToClean(log, 0L, log.activeSegment.baseOffset, true)).getKey + val (nextDirtyOffset, _) = cleaner.clean(LogToClean(log.topicPartition, log, 0L, log.activeSegment.baseOffset, needCompactionNow = true)) assertEquals(log.activeSegment.baseOffset, nextDirtyOffset, "Cleaning point should pass offset gap in multiple segments") } @@ -1987,10 +1999,10 @@ class LogCleanerTest extends Logging { @Test def testMaxCleanTimeSecs(): Unit = { val logCleaner = new LogCleaner(new CleanerConfig(true), - util.List.of(TestUtils.tempDir()), - new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog](), - new LogDirFailureChannel(1), - time) + logDirs = Array(TestUtils.tempDir()), + logs = new Pool[TopicPartition, UnifiedLog](), + logDirFailureChannel = new LogDirFailureChannel(1), + time = time) def checkGauge(name: String): Unit = { val gauge = logCleaner.metricsGroup.newGauge(name, () => 999) @@ -2010,11 +2022,11 @@ class LogCleanerTest extends Logging { val oldKafkaProps = TestUtils.createBrokerConfig(1) oldKafkaProps.setProperty(CleanerConfig.LOG_CLEANER_IO_MAX_BYTES_PER_SECOND_PROP, "10000000") - val logCleaner = new LogCleaner(new CleanerConfig(new KafkaConfig(oldKafkaProps)), - util.List.of(TestUtils.tempDir()), - new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog](), - new LogDirFailureChannel(1), - time) { + val logCleaner = new LogCleaner(LogCleaner.cleanerConfig(new KafkaConfig(oldKafkaProps)), + logDirs = Array(TestUtils.tempDir()), + logs = new Pool[TopicPartition, UnifiedLog](), + logDirFailureChannel = new LogDirFailureChannel(1), + time = time) { // shutdown() and startup() are called in LogCleaner.reconfigure(). // Empty startup() and shutdown() to ensure that no unnecessary log cleaner threads remain after this test. override def startup(): Unit = {} @@ -2036,164 +2048,47 @@ class LogCleanerTest extends Logging { } @Test - def testMaxBufferUtilizationPercentMetric(): Unit = { - val logCleaner = new LogCleaner( - new CleanerConfig(true), - util.List.of(TestUtils.tempDir(), TestUtils.tempDir()), - new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog](), - new LogDirFailureChannel(1), - time - ) - - def assertMaxBufferUtilizationPercent(expected: Int): Unit = { - val gauge = logCleaner.metricsGroup.newGauge(LogCleaner.MAX_BUFFER_UTILIZATION_PERCENT_METRIC_NAME, - () => (logCleaner.maxOverCleanerThreads(_.lastStats.bufferUtilization) * 100).toInt) - assertEquals(expected, gauge.value()) - } - - try { - // No CleanerThreads - assertMaxBufferUtilizationPercent(0) - - val cleaners = logCleaner.cleaners - - val cleaner1 = new logCleaner.CleanerThread(1) - cleaner1.setLastStats(new CleanerStats(time)) - cleaner1.lastStats.setBufferUtilization(0.75) - cleaners.add(cleaner1) - - val cleaner2 = new logCleaner.CleanerThread(2) - cleaner2.setLastStats(new CleanerStats(time)) - cleaner2.lastStats.setBufferUtilization(0.85) - cleaners.add(cleaner2) - - val cleaner3 = new logCleaner.CleanerThread(3) - cleaner3.setLastStats(new CleanerStats(time)) - cleaner3.lastStats.setBufferUtilization(0.65) - cleaners.add(cleaner3) - - // expect the gauge value to reflect the maximum bufferUtilization - assertMaxBufferUtilizationPercent(85) - - // Update bufferUtilization and verify the gauge value updates - cleaner1.lastStats.setBufferUtilization(0.9) - assertMaxBufferUtilizationPercent(90) - - // All CleanerThreads have the same bufferUtilization - cleaners.forEach(_.lastStats.setBufferUtilization(0.5)) - assertMaxBufferUtilizationPercent(50) - } finally { - logCleaner.shutdown() - } - } - - @Test - def testMaxCleanTimeMetric(): Unit = { - val logCleaner = new LogCleaner( - new CleanerConfig(true), - util.List.of(TestUtils.tempDir(), TestUtils.tempDir()), - new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog](), - new LogDirFailureChannel(1), - time - ) - - def assertMaxCleanTime(expected: Int): Unit = { - val gauge = logCleaner.metricsGroup.newGauge(LogCleaner.MAX_CLEAN_TIME_METRIC_NAME, - () => logCleaner.maxOverCleanerThreads(_.lastStats.elapsedSecs).toInt) - assertEquals(expected, gauge.value()) - } - - try { - // No CleanerThreads - assertMaxCleanTime(0) - - val cleaners = logCleaner.cleaners - - val cleaner1 = new logCleaner.CleanerThread(1) - cleaner1.setLastStats(new CleanerStats(time)) - cleaner1.lastStats.setEndTime(cleaner1.lastStats.startTime + 1_000L) - cleaners.add(cleaner1) - - val cleaner2 = new logCleaner.CleanerThread(2) - cleaner2.setLastStats(new CleanerStats(time)) - cleaner2.lastStats.setEndTime(cleaner2.lastStats.startTime + 2_000L) - cleaners.add(cleaner2) - - val cleaner3 = new logCleaner.CleanerThread(3) - cleaner3.setLastStats(new CleanerStats(time)) - cleaner3.lastStats.setEndTime(cleaner3.lastStats.startTime + 3_000L) - cleaners.add(cleaner3) - - // expect the gauge value to reflect the maximum cleanTime - assertMaxCleanTime(3) - - // Update cleanTime and verify the gauge value updates - cleaner1.lastStats.setEndTime(cleaner1.lastStats.startTime + 4_000L) - assertMaxCleanTime(4) - - // All CleanerThreads have the same cleanTime - cleaners.forEach(cleaner => cleaner.lastStats.setEndTime(cleaner.lastStats.startTime + 1_500L)) - assertMaxCleanTime(1) - } finally { - logCleaner.shutdown() - } - } - - @Test - def testMaxCompactionDelayMetrics(): Unit = { - val logCleaner = new LogCleaner( - new CleanerConfig(true), - util.List.of(TestUtils.tempDir(), TestUtils.tempDir()), - new util.concurrent.ConcurrentHashMap[TopicPartition, UnifiedLog](), - new LogDirFailureChannel(1), - time - ) - - def assertMaxCompactionDelay(expected: Int): Unit = { - val gauge = logCleaner.metricsGroup.newGauge(LogCleaner.MAX_COMPACTION_DELAY_METRICS_NAME, - () => (logCleaner.maxOverCleanerThreads(_.lastPreCleanStats.maxCompactionDelayMs.toDouble) / 1000).toInt) - assertEquals(expected, gauge.value()) - } + def testMaxOverCleanerThreads(): Unit = { + val logCleaner = new LogCleaner(new CleanerConfig(true), + logDirs = Array(TestUtils.tempDir(), TestUtils.tempDir()), + logs = new Pool[TopicPartition, UnifiedLog](), + logDirFailureChannel = new LogDirFailureChannel(1), + time = time) - try { - // No CleanerThreads - assertMaxCompactionDelay(0) + val cleaners = logCleaner.cleaners - val cleaners = logCleaner.cleaners + val cleaner1 = new logCleaner.CleanerThread(1) + cleaner1.lastStats = new CleanerStats(time) + cleaner1.lastStats.bufferUtilization = 0.75 + cleaners += cleaner1 - val cleaner1 = new logCleaner.CleanerThread(1) - cleaner1.setLastStats(new CleanerStats(time)) - cleaner1.lastPreCleanStats.maxCompactionDelayMs(1_000L) - cleaners.add(cleaner1) + val cleaner2 = new logCleaner.CleanerThread(2) + cleaner2.lastStats = new CleanerStats(time) + cleaner2.lastStats.bufferUtilization = 0.85 + cleaners += cleaner2 - val cleaner2 = new logCleaner.CleanerThread(2) - cleaner2.setLastStats(new CleanerStats(time)) - cleaner2.lastPreCleanStats.maxCompactionDelayMs(2_000L) - cleaners.add(cleaner2) + val cleaner3 = new logCleaner.CleanerThread(3) + cleaner3.lastStats = new CleanerStats(time) + cleaner3.lastStats.bufferUtilization = 0.65 + cleaners += cleaner3 - val cleaner3 = new logCleaner.CleanerThread(3) - cleaner3.setLastStats(new CleanerStats(time)) - cleaner3.lastPreCleanStats.maxCompactionDelayMs(3_000L) - cleaners.add(cleaner3) + assertEquals(0, logCleaner.maxOverCleanerThreads(_.lastStats.bufferUtilization)) - // expect the gauge value to reflect the maximum CompactionDelay - assertMaxCompactionDelay(3) + cleaners.clear() - // Update CompactionDelay and verify the gauge value updates - cleaner1.lastPreCleanStats.maxCompactionDelayMs(4_000L) - assertMaxCompactionDelay(4) + cleaner1.lastStats.bufferUtilization = 5d + cleaners += cleaner1 + cleaner2.lastStats.bufferUtilization = 6d + cleaners += cleaner2 + cleaner3.lastStats.bufferUtilization = 7d + cleaners += cleaner3 - // All CleanerThreads have the same CompactionDelay - cleaners.forEach(_.lastPreCleanStats.maxCompactionDelayMs(1_500L)) - assertMaxCompactionDelay(1) - } finally { - logCleaner.shutdown() - } + assertEquals(7, logCleaner.maxOverCleanerThreads(_.lastStats.bufferUtilization)) } private def writeToLog(log: UnifiedLog, keysAndValues: Iterable[(Int, Int)], offsetSeq: Iterable[Long]): Iterable[Long] = { for (((key, value), offset) <- keysAndValues.zip(offsetSeq)) - yield log.appendAsFollower(messageWithOffset(key, value, offset), Int.MaxValue).lastOffset + yield log.appendAsFollower(messageWithOffset(key, value, offset)).lastOffset } private def invalidCleanedMessage(initialOffset: Long, @@ -2227,37 +2122,37 @@ class LogCleanerTest extends Logging { private def messageWithOffset(key: Int, value: Int, offset: Long): MemoryRecords = messageWithOffset(key.toString.getBytes, value.toString.getBytes, offset) - private def makeLog(dir: File = dir, config: LogConfig = logConfig, recoveryPoint: Long = 0L): UnifiedLog = { - UnifiedLog.create( - dir, - config, - 0L, - recoveryPoint, - time.scheduler, - new BrokerTopicStats, - time, - 5 * 60 * 1000, - producerStateManagerConfig, - TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, - new LogDirFailureChannel(10), - true, - Optional.empty + private def makeLog(dir: File = dir, config: LogConfig = logConfig, recoveryPoint: Long = 0L) = { + UnifiedLog( + dir = dir, + config = config, + logStartOffset = 0L, + recoveryPoint = recoveryPoint, + scheduler = time.scheduler, + time = time, + brokerTopicStats = new BrokerTopicStats, + maxTransactionTimeoutMs = 5 * 60 * 1000, + producerStateManagerConfig = producerStateManagerConfig, + producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + logDirFailureChannel = new LogDirFailureChannel(10), + topicId = None, + keepPartitionMetadataFile = true ) } - private def makeCleaner(capacity: Int, checkDone: Consumer[TopicPartition] = _ => (), maxMessageSize: Int = 64*1024) = - new Cleaner(0, - new FakeOffsetMap(capacity), - maxMessageSize, - maxMessageSize, - 0.75, - throttler, - time, - checkDone) + private def makeCleaner(capacity: Int, checkDone: TopicPartition => Unit = _ => (), maxMessageSize: Int = 64*1024) = + new Cleaner(id = 0, + offsetMap = new FakeOffsetMap(capacity), + ioBufferSize = maxMessageSize, + maxIoBufferSize = maxMessageSize, + dupBufferLoadFactor = 0.75, + throttler = throttler, + time = time, + checkDone = checkDone) private def writeToLog(log: UnifiedLog, seq: Iterable[(Int, Int)]): Iterable[Long] = { for ((key, value) <- seq) - yield log.appendAsLeader(record(key, value), 0).firstOffset + yield log.appendAsLeader(record(key, value), leaderEpoch = 0).firstOffset } private def key(id: Long) = ByteBuffer.wrap(id.toString.getBytes) @@ -2346,7 +2241,7 @@ class LogCleanerTest extends Logging { private def runTwoPassClean(cleaner: Cleaner, logToClean: LogToClean, currentTime: Long, tombstoneRetentionMs: Long = 86400000) : Long = { cleaner.doClean(logToClean, currentTime) - cleaner.doClean(logToClean, currentTime + tombstoneRetentionMs + 1).getKey + cleaner.doClean(logToClean, currentTime + tombstoneRetentionMs + 1)._1 } } diff --git a/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala b/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala new file mode 100644 index 0000000000000..d6d2b0665064b --- /dev/null +++ b/core/src/test/scala/unit/kafka/log/LogConcurrencyTest.scala @@ -0,0 +1,191 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.log + +import java.util.Properties +import java.util.concurrent.{Callable, Executors} +import kafka.utils.TestUtils +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.record.SimpleRecord +import org.apache.kafka.common.utils.{Time, Utils} +import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.apache.kafka.server.storage.log.FetchIsolation +import org.apache.kafka.server.util.KafkaScheduler +import org.apache.kafka.storage.internals.log.{LogConfig, LogDirFailureChannel, ProducerStateManagerConfig} +import org.apache.kafka.storage.log.metrics.BrokerTopicStats +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} + +import scala.collection.mutable.ListBuffer +import scala.util.Random + +class LogConcurrencyTest { + private val brokerTopicStats = new BrokerTopicStats + private val random = new Random() + private val scheduler = new KafkaScheduler(1) + private val tmpDir = TestUtils.tempDir() + private val logDir = TestUtils.randomPartitionLogDir(tmpDir) + + @BeforeEach + def setup(): Unit = { + scheduler.startup() + } + + @AfterEach + def shutdown(): Unit = { + scheduler.shutdown() + Utils.delete(tmpDir) + } + + @Test + def testUncommittedDataNotConsumed(): Unit = { + testUncommittedDataNotConsumed(createLog()) + } + + @Test + def testUncommittedDataNotConsumedFrequentSegmentRolls(): Unit = { + val logProps = new Properties() + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 237: Integer) + val logConfig = new LogConfig(logProps) + testUncommittedDataNotConsumed(createLog(logConfig)) + } + + def testUncommittedDataNotConsumed(log: UnifiedLog): Unit = { + val executor = Executors.newFixedThreadPool(2) + try { + val maxOffset = 5000 + val consumer = new ConsumerTask(log, maxOffset) + val appendTask = new LogAppendTask(log, maxOffset) + + val consumerFuture = executor.submit(consumer) + val fetcherTaskFuture = executor.submit(appendTask) + + fetcherTaskFuture.get() + consumerFuture.get() + + validateConsumedData(log, consumer.consumedBatches) + } finally executor.shutdownNow() + } + + /** + * Simple consumption task which reads the log in ascending order and collects + * consumed batches for validation + */ + private class ConsumerTask(log: UnifiedLog, lastOffset: Int) extends Callable[Unit] { + val consumedBatches = ListBuffer.empty[FetchedBatch] + + override def call(): Unit = { + var fetchOffset = 0L + while (log.highWatermark < lastOffset) { + val readInfo = log.read( + startOffset = fetchOffset, + maxLength = 1, + isolation = FetchIsolation.HIGH_WATERMARK, + minOneMessage = true + ) + readInfo.records.batches().forEach { batch => + consumedBatches += FetchedBatch(batch.baseOffset, batch.partitionLeaderEpoch) + fetchOffset = batch.lastOffset + 1 + } + } + } + } + + /** + * This class simulates basic leader/follower behavior. + */ + private class LogAppendTask(log: UnifiedLog, lastOffset: Long) extends Callable[Unit] { + override def call(): Unit = { + var leaderEpoch = 1 + var isLeader = true + + while (log.highWatermark < lastOffset) { + random.nextInt(2) match { + case 0 => + val logEndOffsetMetadata = log.logEndOffsetMetadata + val logEndOffset = logEndOffsetMetadata.messageOffset + val batchSize = random.nextInt(9) + 1 + val records = (0 to batchSize).map(i => new SimpleRecord(s"$i".getBytes)) + + if (isLeader) { + log.appendAsLeader(TestUtils.records(records), leaderEpoch) + log.maybeIncrementHighWatermark(logEndOffsetMetadata) + } else { + log.appendAsFollower(TestUtils.records(records, + baseOffset = logEndOffset, + partitionLeaderEpoch = leaderEpoch)) + log.updateHighWatermark(logEndOffset) + } + + case 1 => + isLeader = !isLeader + leaderEpoch += 1 + + if (!isLeader) { + log.truncateTo(log.highWatermark) + } + } + } + } + } + + private def createLog(config: LogConfig = new LogConfig(new Properties())): UnifiedLog = { + UnifiedLog(dir = logDir, + config = config, + logStartOffset = 0L, + recoveryPoint = 0L, + scheduler = scheduler, + brokerTopicStats = brokerTopicStats, + time = Time.SYSTEM, + maxTransactionTimeoutMs = 5 * 60 * 1000, + producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), + producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + logDirFailureChannel = new LogDirFailureChannel(10), + topicId = None, + keepPartitionMetadataFile = true + ) + } + + private def validateConsumedData(log: UnifiedLog, consumedBatches: Iterable[FetchedBatch]): Unit = { + val iter = consumedBatches.iterator + log.logSegments.forEach { segment => + segment.log.batches.forEach { batch => + if (iter.hasNext) { + val consumedBatch = iter.next() + try { + assertEquals(batch.partitionLeaderEpoch, + consumedBatch.epoch, "Consumed batch with unexpected leader epoch") + assertEquals(batch.baseOffset, + consumedBatch.baseOffset, "Consumed batch with unexpected base offset") + } catch { + case t: Throwable => + throw new AssertionError(s"Consumed batch $consumedBatch " + + s"does not match next expected batch in log $batch", t) + } + } + } + } + } + + private case class FetchedBatch(baseOffset: Long, epoch: Int) { + override def toString: String = { + s"FetchedBatch(baseOffset=$baseOffset, epoch=$epoch)" + } + } + +} diff --git a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala index e23e16fa40a5f..1e26d653bbccf 100644 --- a/core/src/test/scala/unit/kafka/log/LogConfigTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogConfigTest.scala @@ -27,16 +27,36 @@ import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test -import java.util -import java.util.Properties +import java.util.{Collections, Properties} import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.storage.internals.log.{LogConfig, ThrottledReplicaListValidator} import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource +import scala.jdk.CollectionConverters._ + class LogConfigTest { + /** + * This test verifies that KafkaConfig object initialization does not depend on + * LogConfig initialization. Bad things happen due to static initialization + * order dependencies. For example, LogConfig.configDef ends up adding null + * values in serverDefaultConfigNames. This test ensures that the mapping of + * keys from LogConfig to KafkaConfig are not missing values. + */ + @Test + def ensureNoStaticInitializationOrderDependency(): Unit = { + // Access any KafkaConfig val to load KafkaConfig object before LogConfig. + assertNotNull(ServerLogConfigs.LOG_RETENTION_TIME_MILLIS_CONFIG) + assertTrue(LogConfig.configNames.asScala + .filter(config => !LogConfig.CONFIGS_WITH_NO_SERVER_DEFAULTS.contains(config)) + .forall { config => + val serverConfigOpt = LogConfig.serverConfigName(config) + serverConfigOpt.isPresent && (serverConfigOpt.get != null) + }) + } + @Test def testKafkaConfigToProps(): Unit = { val millisInHour = 60L * 60L * 1000L @@ -74,7 +94,6 @@ class LogConfigTest { case TopicConfig.COMPRESSION_ZSTD_LEVEL_CONFIG => assertPropertyInvalid(name, "not_a_number", "-0.1") case TopicConfig.REMOTE_LOG_COPY_DISABLE_CONFIG => assertPropertyInvalid(name, "not_a_number", "remove", "0") case TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG => assertPropertyInvalid(name, "not_a_number", "remove", "0") - case LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG => // no op case _ => assertPropertyInvalid(name, "not_a_number", "-1") }) @@ -123,7 +142,7 @@ class LogConfigTest { /* Sanity check that toHtml produces one of the expected configs */ @Test def testToHtml(): Unit = { - val html = LogConfig.configDefCopy.toHtml(4, (key: String) => "prefix_" + key, util.Map.of) + val html = LogConfig.configDefCopy.toHtml(4, (key: String) => "prefix_" + key, Collections.emptyMap()) val expectedConfig = "

          file.delete.delay.ms

          " assertTrue(html.contains(expectedConfig), s"Could not find `$expectedConfig` in:\n $html") } @@ -274,31 +293,29 @@ class LogConfigTest { props.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, localRetentionMs.toString) props.put(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG, localRetentionBytes.toString) assertThrows(classOf[ConfigException], - () => LogConfig.validate(util.Map.of, props, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) + () => LogConfig.validate(Collections.emptyMap(), props, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) } @Test - def testEnableRemoteLogStorageCleanupPolicy(): Unit = { + def testEnableRemoteLogStorageOnCompactedTopic(): Unit = { val kafkaProps = TestUtils.createDummyBrokerConfig() kafkaProps.put(RemoteLogManagerConfig.REMOTE_LOG_STORAGE_SYSTEM_ENABLE_PROP, "true") val kafkaConfig = KafkaConfig.fromProps(kafkaProps) + val logProps = new Properties() - def validateCleanupPolicy(): Unit = { - LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) - } logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_DELETE) logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") - validateCleanupPolicy() + LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) - assertThrows(classOf[ConfigException], () => validateCleanupPolicy()) + assertThrows(classOf[ConfigException], + () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "delete,compact") - assertThrows(classOf[ConfigException], () => validateCleanupPolicy()) + assertThrows(classOf[ConfigException], + () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "compact,delete") - assertThrows(classOf[ConfigException], () => validateCleanupPolicy()) - logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "delete") - validateCleanupPolicy() - logProps.put(TopicConfig.CLEANUP_POLICY_CONFIG, "") - validateCleanupPolicy() + assertThrows(classOf[ConfigException], + () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) } @ParameterizedTest(name = "testEnableRemoteLogStorage with sysRemoteStorageEnabled: {0}") @@ -311,10 +328,10 @@ class LogConfigTest { val logProps = new Properties() logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") if (sysRemoteStorageEnabled) { - LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) } else { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) + () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) assertTrue(message.getMessage.contains("Tiered Storage functionality is disabled in the broker")) } } @@ -330,8 +347,8 @@ class LogConfigTest { logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false") if (wasRemoteStorageEnabled) { val message = assertThrows(classOf[InvalidConfigurationException], - () => LogConfig.validate(util.Map.of(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), - logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) + () => LogConfig.validate(Collections.singletonMap(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), + logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) assertTrue(message.getMessage.contains("It is invalid to disable remote storage without deleting remote data. " + "If you want to keep the remote data and turn to read only, please set `remote.storage.enable=true,remote.log.copy.disable=true`. " + "If you want to disable remote storage and delete all remote data, please set `remote.storage.enable=false,remote.log.delete.on.disable=true`.")) @@ -339,12 +356,12 @@ class LogConfigTest { // It should be able to disable the remote log storage when delete on disable is set to true logProps.put(TopicConfig.REMOTE_LOG_DELETE_ON_DISABLE_CONFIG, "true") - LogConfig.validate(util.Map.of(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), - logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + LogConfig.validate(Collections.singletonMap(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true"), + logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) } else { - LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) - LogConfig.validate(util.Map.of(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"), logProps, - kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) + LogConfig.validate(Collections.singletonMap(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "false"), logProps, + kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) } } @@ -363,12 +380,12 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_MS_CONFIG, "500") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) + () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG)) } else { - LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) } } @@ -387,12 +404,12 @@ class LogConfigTest { logProps.put(TopicConfig.RETENTION_BYTES_CONFIG, "128") if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) + () => LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validate(util.Map.of, logProps, kafkaConfig.extractLogConfigMap, - kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + LogConfig.validate(Collections.emptyMap(), logProps, kafkaConfig.extractLogConfigMap, + kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) } } @@ -407,10 +424,10 @@ class LogConfigTest { if (sysRemoteStorageEnabled) { val message = assertThrows(classOf[ConfigException], - () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled)) + () => LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled())) assertTrue(message.getMessage.contains(TopicConfig.LOCAL_LOG_RETENTION_BYTES_CONFIG)) } else { - LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + LogConfig.validateBrokerLogConfigValues(kafkaConfig.extractLogConfigMap, kafkaConfig.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) } } diff --git a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala index c1d611ce6dc43..b327c3b39e70d 100644 --- a/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogLoaderTest.scala @@ -18,17 +18,18 @@ package kafka.log import kafka.server.KafkaConfig +import kafka.server.metadata.MockConfigRepository import kafka.utils.TestUtils import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression +import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.record.{ControlRecordType, DefaultRecordBatch, MemoryRecords, RecordBatch, SimpleRecord, TimestampType} import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.metadata.MockConfigRepository import org.apache.kafka.server.util.{MockTime, Scheduler} import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AbortedTxn, CleanerConfig, EpochEntry, LocalLog, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetMetadata, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetIndex, ProducerStateManager, ProducerStateManagerConfig, SnapshotFile, UnifiedLog} +import org.apache.kafka.storage.internals.log.{AbortedTxn, CleanerConfig, EpochEntry, LocalLog, LogConfig, LogDirFailureChannel, LogFileUtils, LogLoader, LogOffsetMetadata, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetIndex, ProducerStateManager, ProducerStateManagerConfig, SnapshotFile} import org.apache.kafka.storage.internals.checkpoint.CleanShutdownFileHandler import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions.{assertDoesNotThrow, assertEquals, assertFalse, assertNotEquals, assertThrows, assertTrue} @@ -125,7 +126,8 @@ class LogLoaderTest { brokerTopicStats = new BrokerTopicStats(), logDirFailureChannel = logDirFailureChannel, time = time, - remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled, + keepPartitionMetadataFile = true, + remoteStorageSystemEnable = config.remoteLogManagerConfig.isRemoteStorageSystemEnabled(), initialTaskDelayMs = config.logInitialTaskDelayMs) { override def loadLog(logDir: File, hadCleanShutdown: Boolean, recoveryPoints: util.Map[TopicPartition, JLong], @@ -152,7 +154,7 @@ class LogLoaderTest { val logDirFailureChannel: LogDirFailureChannel = new LogDirFailureChannel(1) val segments = new LogSegments(topicPartition) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, Optional.empty, time.scheduler) + logDir, topicPartition, logDirFailureChannel, None, time.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, logDir, this.maxTransactionTimeoutMs, this.producerStateManagerConfig, time) val logLoader = new LogLoader(logDir, topicPartition, config, time.scheduler, time, @@ -164,7 +166,7 @@ class LogLoaderTest { logDirFailureChannel) new UnifiedLog(offsets.logStartOffset, localLog, brokerTopicStats, this.producerIdExpirationCheckIntervalMs, leaderEpochCache, - producerStateManager, Optional.empty, true, LogOffsetsListener.NO_OP_OFFSETS_LISTENER) + producerStateManager, None, true) } } } @@ -172,7 +174,7 @@ class LogLoaderTest { def initializeLogManagerForSimulatingErrorTest(logDirFailureChannel: LogDirFailureChannel = new LogDirFailureChannel(logDirs.size) ): (LogManager, Executable) = { val logManager: LogManager = interceptedLogManager(logConfig, logDirs, logDirFailureChannel) - log = logManager.getOrCreateLog(topicPartition, isNew = true, topicId = Optional.empty) + log = logManager.getOrCreateLog(topicPartition, isNew = true, topicId = None) assertFalse(logDirFailureChannel.hasOfflineLogDir(logDir.getAbsolutePath), "log dir should not be offline before load logs") @@ -244,14 +246,14 @@ class LogLoaderTest { @Test def testProducerSnapshotsRecoveryAfterUncleanShutdown(): Unit = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "640") + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "640") val logConfig = new LogConfig(logProps) var log = createLog(logDir, logConfig) assertEquals(OptionalLong.empty(), log.oldestProducerSnapshotOffset) for (i <- 0 to 100) { val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes) - log.appendAsLeader(TestUtils.records(List(record)), 0) + log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0) } assertTrue(log.logSegments.size >= 5) @@ -286,7 +288,7 @@ class LogLoaderTest { val wrapper = Mockito.spy(segment) Mockito.doAnswer { in => segmentsWithReads += wrapper - segment.read(in.getArgument(0, classOf[java.lang.Long]), in.getArgument(1, classOf[java.lang.Integer]), in.getArgument(2, classOf[util.Optional[java.lang.Long]]), in.getArgument(3, classOf[java.lang.Boolean])) + segment.read(in.getArgument(0, classOf[java.lang.Long]), in.getArgument(1, classOf[java.lang.Integer]), in.getArgument(2, classOf[java.util.Optional[java.lang.Long]]), in.getArgument(3, classOf[java.lang.Boolean])) }.when(wrapper).read(ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any(), ArgumentMatchers.any()) Mockito.doAnswer { in => recoveredSegments += wrapper @@ -296,7 +298,7 @@ class LogLoaderTest { } } val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, Optional.empty, mockTime.scheduler) + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val producerStateManager = new ProducerStateManager(topicPartition, logDir, maxTransactionTimeoutMs, producerStateManagerConfig, mockTime) val logLoader = new LogLoader( @@ -321,7 +323,7 @@ class LogLoaderTest { logDirFailureChannel) new UnifiedLog(offsets.logStartOffset, localLog, brokerTopicStats, producerIdExpirationCheckIntervalMs, leaderEpochCache, producerStateManager, - Optional.empty, false, LogOffsetsListener.NO_OP_OFFSETS_LISTENER) + None, keepPartitionMetadataFile = true) } // Retain snapshots for the last 2 segments @@ -391,12 +393,12 @@ class LogLoaderTest { codec: Compression = Compression.NONE, timestamp: Long = RecordBatch.NO_TIMESTAMP, magicValue: Byte = RecordBatch.CURRENT_MAGIC_VALUE): MemoryRecords = { - val records = util.List.of(new SimpleRecord(timestamp, key, value)) + val records = Seq(new SimpleRecord(timestamp, key, value)) - val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records)) + val buf = ByteBuffer.allocate(DefaultRecordBatch.sizeInBytes(records.asJava)) val builder = MemoryRecords.builder(buf, magicValue, codec, TimestampType.CREATE_TIME, offset, mockTime.milliseconds, leaderEpoch) - records.forEach(builder.append) + records.foreach(builder.append) builder.build() } @@ -418,7 +420,7 @@ class LogLoaderTest { val config = new LogConfig(new Properties()) val segments = new LogSegments(topicPartition) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, Optional.empty, mockTime.scheduler) + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val offsets = new LogLoader( logDir, topicPartition, @@ -440,13 +442,12 @@ class LogLoaderTest { logDirFailureChannel) val log = new UnifiedLog(offsets.logStartOffset, localLog, - brokerTopicStats, - 30000, - leaderEpochCache, - stateManager, - Optional.empty, - false, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) + brokerTopicStats = brokerTopicStats, + producerIdExpirationCheckIntervalMs = 30000, + leaderEpochCache = leaderEpochCache, + producerStateManager = stateManager, + _topicId = None, + keepPartitionMetadataFile = true) verify(stateManager).updateMapEndOffset(0L) verify(stateManager).removeStraySnapshots(any()) @@ -457,8 +458,8 @@ class LogLoaderTest { reset(stateManager) when(stateManager.firstUnstableOffset).thenReturn(Optional.empty[LogOffsetMetadata]()) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes))), 0) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes))), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes))), leaderEpoch = 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes))), leaderEpoch = 0) verify(stateManager).updateMapEndOffset(1L) verify(stateManager).updateMapEndOffset(2L) @@ -529,7 +530,7 @@ class LogLoaderTest { val logDirFailureChannel = null val segments = new LogSegments(topicPartition) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, Optional.empty, mockTime.scheduler) + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val offsets = new LogLoader( logDir, topicPartition, @@ -551,15 +552,14 @@ class LogLoaderTest { logDirFailureChannel) new UnifiedLog(offsets.logStartOffset, localLog, - brokerTopicStats, - 30000, - leaderEpochCache, - stateManager, - Optional.empty, - false, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) - - verify(stateManager).removeStraySnapshots(any[util.List[java.lang.Long]]) + brokerTopicStats = brokerTopicStats, + producerIdExpirationCheckIntervalMs = 30000, + leaderEpochCache = leaderEpochCache, + producerStateManager = stateManager, + _topicId = None, + keepPartitionMetadataFile = true) + + verify(stateManager).removeStraySnapshots(any[java.util.List[java.lang.Long]]) verify(stateManager, times(2)).updateMapEndOffset(0L) verify(stateManager, times(2)).takeSnapshot() verify(stateManager).isEmpty @@ -575,9 +575,9 @@ class LogLoaderTest { val epoch = 0.toShort log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "a".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "b".getBytes)), producerId = pid2, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) assertEquals(2, log.activeProducersWithLastSequence.size) log.updateHighWatermark(log.logEndOffset) @@ -585,16 +585,17 @@ class LogLoaderTest { // Deleting records should not remove producer state assertEquals(2, log.activeProducersWithLastSequence.size) - val retainedLastSeq = log.activeProducersWithLastSequence.get(pid2) - assertEquals(0, retainedLastSeq) + val retainedLastSeqOpt = log.activeProducersWithLastSequence.get(pid2) + assertTrue(retainedLastSeqOpt.isDefined) + assertEquals(0, retainedLastSeqOpt.get) log.close() // Because the log start offset did not advance, producer snapshots will still be present and the state will be rebuilt val reloadedLog = createLog(logDir, logConfig, logStartOffset = 1L, lastShutdownClean = false) assertEquals(2, reloadedLog.activeProducersWithLastSequence.size) - val reloadedLastSeq = log.activeProducersWithLastSequence.get(pid2) - assertEquals(retainedLastSeq, reloadedLastSeq) + val reloadedLastSeqOpt = log.activeProducersWithLastSequence.get(pid2) + assertEquals(retainedLastSeqOpt, reloadedLastSeqOpt) } @Test @@ -604,13 +605,13 @@ class LogLoaderTest { val pid1 = 1L val epoch = 0.toShort - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes)), producerId = pid1, producerEpoch = epoch, sequence = 0), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes)), producerId = pid1, producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.roll() - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes)), producerId = pid1, producerEpoch = epoch, sequence = 1), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes)), producerId = pid1, producerEpoch = epoch, sequence = 1), leaderEpoch = 0) log.roll() - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("c".getBytes)), producerId = pid1, producerEpoch = epoch, sequence = 2), 0) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("d".getBytes)), producerId = pid1, producerEpoch = epoch, sequence = 3), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("c".getBytes)), producerId = pid1, producerEpoch = epoch, sequence = 2), leaderEpoch = 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("d".getBytes)), producerId = pid1, producerEpoch = epoch, sequence = 3), leaderEpoch = 0) // Close the log, we should now have 3 segments log.close() @@ -636,10 +637,10 @@ class LogLoaderTest { val epoch = 0.toShort log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "a".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "b".getBytes)), producerId = pid2, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) assertEquals(2, log.logSegments.size) assertEquals(2, log.activeProducersWithLastSequence.size) @@ -651,16 +652,17 @@ class LogLoaderTest { // Deleting records should not remove producer state assertEquals(1, log.logSegments.size) assertEquals(2, log.activeProducersWithLastSequence.size) - val retainedLastSeq = log.activeProducersWithLastSequence.get(pid2) - assertEquals(0, retainedLastSeq) + val retainedLastSeqOpt = log.activeProducersWithLastSequence.get(pid2) + assertTrue(retainedLastSeqOpt.isDefined) + assertEquals(0, retainedLastSeqOpt.get) log.close() // After reloading log, producer state should not be regenerated val reloadedLog = createLog(logDir, logConfig, logStartOffset = 1L, lastShutdownClean = false) assertEquals(1, reloadedLog.activeProducersWithLastSequence.size) - val reloadedEntry = log.activeProducersWithLastSequence.get(pid2) - assertEquals(retainedLastSeq, reloadedEntry) + val reloadedEntryOpt = log.activeProducersWithLastSequence.get(pid2) + assertEquals(retainedLastSeqOpt, reloadedEntryOpt) } /** @@ -676,7 +678,7 @@ class LogLoaderTest { var log = createLog(logDir, logConfig) for (i <- 0 until numMessages) log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(messageSize), - timestamp = mockTime.milliseconds + i * 10), 0) + timestamp = mockTime.milliseconds + i * 10), leaderEpoch = 0) assertEquals(numMessages, log.logEndOffset, "After appending %d messages to an empty log, the log end offset should be %d".format(numMessages, numMessages)) val lastIndexOffset = log.activeSegment.offsetIndex.lastOffset @@ -725,7 +727,7 @@ class LogLoaderTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) var log = createLog(logDir, logConfig) for (i <- 0 until numMessages) - log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = mockTime.milliseconds + i * 10), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = mockTime.milliseconds + i * 10), leaderEpoch = 0) val indexFiles = log.logSegments.asScala.map(_.offsetIndexFile) val timeIndexFiles = log.logSegments.asScala.map(_.timeIndexFile) log.close() @@ -743,10 +745,10 @@ class LogLoaderTest { assertEquals(i, LogTestUtils.readLog(log, i, 100).records.batches.iterator.next().lastOffset) if (i == 0) assertEquals(log.logSegments.asScala.head.baseOffset, - log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10, Optional.empty).timestampAndOffsetOpt.get.offset) + log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10).timestampAndOffsetOpt.get.offset) else assertEquals(i, - log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10, Optional.empty).timestampAndOffsetOpt.get.offset) + log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10).timestampAndOffsetOpt.get.offset) } log.close() } @@ -761,7 +763,7 @@ class LogLoaderTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) var log = createLog(logDir, logConfig) for (i <- 0 until numMessages) - log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = mockTime.milliseconds + i * 10), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = mockTime.milliseconds + i * 10), leaderEpoch = 0) val indexFiles = log.logSegments.asScala.map(_.offsetIndexFile()) val timeIndexFiles = log.logSegments.asScala.map(_.timeIndexFile()) log.close() @@ -787,10 +789,10 @@ class LogLoaderTest { assertEquals(i, LogTestUtils.readLog(log, i, 100).records.batches.iterator.next().lastOffset) if (i == 0) assertEquals(log.logSegments.asScala.head.baseOffset, - log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10, Optional.empty).timestampAndOffsetOpt.get.offset) + log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10).timestampAndOffsetOpt.get.offset) else assertEquals(i, - log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10, Optional.empty).timestampAndOffsetOpt.get.offset) + log.fetchOffsetByTimestamp(mockTime.milliseconds + i * 10).timestampAndOffsetOpt.get.offset) } log.close() } @@ -829,7 +831,7 @@ class LogLoaderTest { // check that we can append to the log for (_ <- 0 until 10) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.delete() } @@ -846,7 +848,7 @@ class LogLoaderTest { // add enough messages to roll over several segments then close and re-open and attempt to truncate for (_ <- 0 until 100) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.close() log = createLog(logDir, logConfig, lastShutdownClean = false) log.truncateTo(3) @@ -865,7 +867,7 @@ class LogLoaderTest { // append some messages to create some segments for (_ <- 0 until 100) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) // expire all segments log.updateHighWatermark(log.logEndOffset) @@ -887,7 +889,7 @@ class LogLoaderTest { var log = createLog(logDir, logConfig) val numMessages = 50 + TestUtils.random.nextInt(50) for (_ <- 0 until numMessages) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) val records = log.logSegments.asScala.flatMap(_.log.records.asScala.toList).toList log.close() @@ -924,17 +926,17 @@ class LogLoaderTest { val set3 = MemoryRecords.withRecords(Integer.MAX_VALUE.toLong + 3, Compression.NONE, 0, new SimpleRecord("v4".getBytes(), "k4".getBytes())) val set4 = MemoryRecords.withRecords(Integer.MAX_VALUE.toLong + 4, Compression.NONE, 0, new SimpleRecord("v5".getBytes(), "k5".getBytes())) //Writes into an empty log with baseOffset 0 - log.appendAsFollower(set1, Int.MaxValue) + log.appendAsFollower(set1) assertEquals(0L, log.activeSegment.baseOffset) //This write will roll the segment, yielding a new segment with base offset = max(1, Integer.MAX_VALUE+2) = Integer.MAX_VALUE+2 - log.appendAsFollower(set2, Int.MaxValue) + log.appendAsFollower(set2) assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset) assertTrue(LogFileUtils.producerSnapshotFile(logDir, Integer.MAX_VALUE.toLong + 2).exists) //This will go into the existing log - log.appendAsFollower(set3, Int.MaxValue) + log.appendAsFollower(set3) assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset) //This will go into the existing log - log.appendAsFollower(set4, Int.MaxValue) + log.appendAsFollower(set4) assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset) log.close() val indexFiles = logDir.listFiles.filter(file => file.getName.contains(".index")) @@ -963,17 +965,17 @@ class LogLoaderTest { new SimpleRecord("v7".getBytes(), "k7".getBytes()), new SimpleRecord("v8".getBytes(), "k8".getBytes())) //Writes into an empty log with baseOffset 0 - log.appendAsFollower(set1, Int.MaxValue) + log.appendAsFollower(set1) assertEquals(0L, log.activeSegment.baseOffset) //This write will roll the segment, yielding a new segment with base offset = max(1, Integer.MAX_VALUE+2) = Integer.MAX_VALUE+2 - log.appendAsFollower(set2, Int.MaxValue) + log.appendAsFollower(set2) assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset) assertTrue(LogFileUtils.producerSnapshotFile(logDir, Integer.MAX_VALUE.toLong + 2).exists) //This will go into the existing log - log.appendAsFollower(set3, Int.MaxValue) + log.appendAsFollower(set3) assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset) //This will go into the existing log - log.appendAsFollower(set4, Int.MaxValue) + log.appendAsFollower(set4) assertEquals(Integer.MAX_VALUE.toLong + 2, log.activeSegment.baseOffset) log.close() val indexFiles = logDir.listFiles.filter(file => file.getName.contains(".index")) @@ -1003,18 +1005,18 @@ class LogLoaderTest { new SimpleRecord("v7".getBytes(), "k7".getBytes()), new SimpleRecord("v8".getBytes(), "k8".getBytes())) //Writes into an empty log with baseOffset 0 - log.appendAsFollower(set1, Int.MaxValue) + log.appendAsFollower(set1) assertEquals(0L, log.activeSegment.baseOffset) //This write will roll the segment, yielding a new segment with base offset = max(1, 3) = 3 - log.appendAsFollower(set2, Int.MaxValue) + log.appendAsFollower(set2) assertEquals(3, log.activeSegment.baseOffset) assertTrue(LogFileUtils.producerSnapshotFile(logDir, 3).exists) //This will also roll the segment, yielding a new segment with base offset = max(5, Integer.MAX_VALUE+4) = Integer.MAX_VALUE+4 - log.appendAsFollower(set3, Int.MaxValue) + log.appendAsFollower(set3) assertEquals(Integer.MAX_VALUE.toLong + 4, log.activeSegment.baseOffset) assertTrue(LogFileUtils.producerSnapshotFile(logDir, Integer.MAX_VALUE.toLong + 4).exists) //This will go into the existing log - log.appendAsFollower(set4, Int.MaxValue) + log.appendAsFollower(set4) assertEquals(Integer.MAX_VALUE.toLong + 4, log.activeSegment.baseOffset) log.close() val indexFiles = logDir.listFiles.filter(file => file.getName.contains(".index")) @@ -1057,8 +1059,8 @@ class LogLoaderTest { // Simulate recovery just after .cleaned file is created, before rename to .swap. On recovery, existing split // operation is aborted but the recovery process itself kicks off split which should complete. - newSegments.asScala.reverse.foreach(segment => { - segment.changeFileSuffixes("", UnifiedLog.CLEANED_FILE_SUFFIX) + newSegments.reverse.foreach(segment => { + segment.changeFileSuffixes("", UnifiedLog.CleanedFileSuffix) segment.truncateTo(0) }) for (file <- logDir.listFiles if file.getName.endsWith(LogFileUtils.DELETED_FILE_SUFFIX)) @@ -1082,11 +1084,11 @@ class LogLoaderTest { // Simulate recovery just after one of the new segments has been renamed to .swap. On recovery, existing split // operation is aborted but the recovery process itself kicks off split which should complete. - newSegments.asScala.reverse.foreach { segment => - if (segment != newSegments.asScala.last) - segment.changeFileSuffixes("", UnifiedLog.CLEANED_FILE_SUFFIX) + newSegments.reverse.foreach { segment => + if (segment != newSegments.last) + segment.changeFileSuffixes("", UnifiedLog.CleanedFileSuffix) else - segment.changeFileSuffixes("", UnifiedLog.SWAP_FILE_SUFFIX) + segment.changeFileSuffixes("", UnifiedLog.SwapFileSuffix) segment.truncateTo(0) } for (file <- logDir.listFiles if file.getName.endsWith(LogFileUtils.DELETED_FILE_SUFFIX)) @@ -1110,8 +1112,8 @@ class LogLoaderTest { // Simulate recovery right after all new segments have been renamed to .swap. On recovery, existing split operation // is completed and the old segment must be deleted. - newSegments.asScala.reverse.foreach(segment => { - segment.changeFileSuffixes("", UnifiedLog.SWAP_FILE_SUFFIX) + newSegments.reverse.foreach(segment => { + segment.changeFileSuffixes("", UnifiedLog.SwapFileSuffix) }) for (file <- logDir.listFiles if file.getName.endsWith(LogFileUtils.DELETED_FILE_SUFFIX)) Utils.atomicMoveWithFallback(file.toPath, Paths.get(Utils.replaceSuffix(file.getPath, LogFileUtils.DELETED_FILE_SUFFIX, ""))) @@ -1137,7 +1139,7 @@ class LogLoaderTest { // Simulate recovery right after all new segments have been renamed to .swap and old segment has been deleted. On // recovery, existing split operation is completed. - newSegments.asScala.reverse.foreach(_.changeFileSuffixes("", UnifiedLog.SWAP_FILE_SUFFIX)) + newSegments.reverse.foreach(_.changeFileSuffixes("", UnifiedLog.SwapFileSuffix)) for (file <- logDir.listFiles if file.getName.endsWith(LogFileUtils.DELETED_FILE_SUFFIX)) Utils.delete(file) @@ -1163,7 +1165,7 @@ class LogLoaderTest { // Simulate recovery right after one of the new segment has been renamed to .swap and the other to .log. On // recovery, existing split operation is completed. - newSegments.asScala.last.changeFileSuffixes("", UnifiedLog.SWAP_FILE_SUFFIX) + newSegments.last.changeFileSuffixes("", UnifiedLog.SwapFileSuffix) // Truncate the old segment segmentWithOverflow.truncateTo(0) @@ -1184,7 +1186,7 @@ class LogLoaderTest { // create a log and write some messages to it var log = createLog(logDir, logConfig) for (_ <- 0 until 100) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.close() // check if recovery was attempted. Even if the recovery point is 0L, recovery should not be attempted as the @@ -1204,22 +1206,22 @@ class LogLoaderTest { val log = createLog(logDir, new LogConfig(new Properties)) val leaderEpochCache = log.leaderEpochCache val firstBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 1, offset = 0) - log.appendAsFollower(firstBatch, Int.MaxValue) + log.appendAsFollower(records = firstBatch) val secondBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 2, offset = 1) - log.appendAsFollower(secondBatch, Int.MaxValue) + log.appendAsFollower(records = secondBatch) val thirdBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 2, offset = 2) - log.appendAsFollower(thirdBatch, Int.MaxValue) + log.appendAsFollower(records = thirdBatch) val fourthBatch = singletonRecordsWithLeaderEpoch(value = "random".getBytes, leaderEpoch = 3, offset = 3) - log.appendAsFollower(fourthBatch, Int.MaxValue) + log.appendAsFollower(records = fourthBatch) - assertEquals(util.List.of(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) + assertEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) // deliberately remove some of the epoch entries leaderEpochCache.truncateFromEndAsyncFlush(2) - assertNotEquals(util.List.of(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) + assertNotEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), leaderEpochCache.epochEntries) log.close() // reopen the log and recover from the beginning @@ -1227,7 +1229,7 @@ class LogLoaderTest { val recoveredLeaderEpochCache = recoveredLog.leaderEpochCache // epoch entries should be recovered - assertEquals(util.List.of(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), recoveredLeaderEpochCache.epochEntries) + assertEquals(java.util.Arrays.asList(new EpochEntry(1, 0), new EpochEntry(2, 1), new EpochEntry(3, 3)), recoveredLeaderEpochCache.epochEntries) recoveredLog.close() } @@ -1393,7 +1395,7 @@ class LogLoaderTest { var log = createLog(logDir, logConfig) for (i <- 0 until 5) { val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes) - log.appendAsLeader(TestUtils.records(List(record)), 0) + log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0) log.roll() } assertEquals(6, log.logSegments.size) @@ -1437,7 +1439,7 @@ class LogLoaderTest { // |---> logEndOffset for (i <- 0 until 9) { val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes) - log.appendAsLeader(TestUtils.records(List(record)), 0) + log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0) log.roll() } assertEquals(10, log.logSegments.size) @@ -1516,7 +1518,7 @@ class LogLoaderTest { // |----------------------------------------> logStartOffset for (i <- 0 until 5) { val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes) - log.appendAsLeader(TestUtils.records(List(record)), 0) + log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0) log.roll() } assertEquals(9, log.activeSegment.baseOffset) @@ -1560,7 +1562,7 @@ class LogLoaderTest { var log = createLog(logDir, logConfig) for (i <- 0 until numMessages) log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(messageSize), - timestamp = mockTime.milliseconds + i * 10), 0) + timestamp = mockTime.milliseconds + i * 10), leaderEpoch = 0) assertEquals(numMessages, log.logEndOffset, "After appending %d messages to an empty log, the log end offset should be %d".format(numMessages, numMessages)) log.roll() @@ -1577,7 +1579,7 @@ class LogLoaderTest { for (i <- 0 until numMessages) log.appendAsLeader(TestUtils.singletonRecords(value = TestUtils.randomBytes(messageSize), - timestamp = mockTime.milliseconds + i * 10), 0) + timestamp = mockTime.milliseconds + i * 10), leaderEpoch = 0) log.roll() assertThrows(classOf[NoSuchFileException], () => log.activeSegment.sanityCheck(true)) log.flush(true) @@ -1609,7 +1611,7 @@ class LogLoaderTest { // |---> logEndOffset for (i <- 0 until 9) { val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes) - log.appendAsLeader(TestUtils.records(List(record)), 0) + log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0) log.roll() } log.maybeIncrementHighWatermark(new LogOffsetMetadata(9L)) @@ -1621,7 +1623,7 @@ class LogLoaderTest { assertEquals(5, segments.firstSegment.get.baseOffset) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - logDir, topicPartition, logDirFailureChannel, Optional.empty, mockTime.scheduler) + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) val offsets = new LogLoader( logDir, topicPartition, diff --git a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala index a8946a3d1395f..c0e4eaf5b469b 100755 --- a/core/src/test/scala/unit/kafka/log/LogManagerTest.scala +++ b/core/src/test/scala/unit/kafka/log/LogManagerTest.scala @@ -18,19 +18,21 @@ package kafka.log import com.yammer.metrics.core.{Gauge, MetricName} +import kafka.server.metadata.{ConfigRepository, MockConfigRepository} import kafka.utils._ import org.apache.directory.api.util.FileUtils import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.OffsetOutOfRangeException import org.apache.kafka.common.utils.{Time, Utils} -import org.apache.kafka.common.{DirectoryId, KafkaException, TopicPartition, Uuid} +import org.apache.kafka.common.{DirectoryId, KafkaException, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.metadata.{ConfigRepository, MockConfigRepository} +import org.apache.kafka.image.{TopicImage, TopicsImage} +import org.apache.kafka.metadata.{LeaderRecoveryState, PartitionRegistration} import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.ArgumentMatchers.any -import org.mockito.{ArgumentCaptor, ArgumentMatchers} +import org.mockito.{ArgumentCaptor, ArgumentMatchers, Mockito} import org.mockito.Mockito.{doAnswer, doNothing, mock, never, spy, times, verify} import java.io._ @@ -38,12 +40,12 @@ import java.lang.{Long => JLong} import java.nio.file.Files import java.nio.file.attribute.PosixFilePermission import java.util -import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} +import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap, Future} import java.util.{Collections, Optional, OptionalLong, Properties} import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.{FileLock, KafkaScheduler, MockTime, Scheduler} -import org.apache.kafka.storage.internals.log.{CleanerConfig, FetchDataInfo, LogConfig, LogDirFailureChannel, LogMetricNames, LogManager => JLogManager, LogOffsetsListener, LogStartOffsetIncrementReason, ProducerStateManagerConfig, RemoteIndexCache, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, FetchDataInfo, LogConfig, LogDirFailureChannel, LogStartOffsetIncrementReason, ProducerStateManagerConfig, RemoteIndexCache} import org.apache.kafka.storage.internals.checkpoint.{CleanShutdownFileHandler, OffsetCheckpointFile} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.function.Executable @@ -55,12 +57,13 @@ import scala.jdk.CollectionConverters._ import scala.util.{Failure, Try} class LogManagerTest { + import LogManagerTest._ val time = new MockTime() val maxRollInterval = 100 val maxLogAgeMs: Int = 10 * 60 * 1000 val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, 1024: java.lang.Integer) logProps.put(TopicConfig.SEGMENT_INDEX_BYTES_CONFIG, 4096: java.lang.Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, maxLogAgeMs: java.lang.Integer) val logConfig = new LogConfig(logProps) @@ -93,12 +96,12 @@ class LogManagerTest { */ @Test def testCreateLog(): Unit = { - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None) assertEquals(1, logManager.liveLogDirs.size) val logFile = new File(logDir, name + "-0") assertTrue(logFile.exists) - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), leaderEpoch = 0) } /** @@ -116,13 +119,13 @@ class LogManagerTest { logManager = createLogManager(dirs) - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty, targetLogDirectoryId = Some(targetedLogDirectoryId)) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None, targetLogDirectoryId = Some(targetedLogDirectoryId)) assertEquals(5, logManager.liveLogDirs.size) val logFile = new File(dirs(1), name + "-0") assertTrue(logFile.exists) assertEquals(dirs(1).getAbsolutePath, logFile.getParent) - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), leaderEpoch = 0) } /** @@ -130,23 +133,23 @@ class LogManagerTest { */ @Test def testCreateLogWithTargetedLogDirectorySetAsUnassigned(): Unit = { - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty, targetLogDirectoryId = Some(DirectoryId.UNASSIGNED)) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None, targetLogDirectoryId = Some(DirectoryId.UNASSIGNED)) assertEquals(1, logManager.liveLogDirs.size) val logFile = new File(logDir, name + "-0") assertTrue(logFile.exists) assertFalse(logManager.directoryId(logFile.getParent).equals(DirectoryId.UNASSIGNED)) - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), leaderEpoch = 0) } @Test def testCreateLogWithTargetedLogDirectorySetAsUnknownWithoutAnyOfflineDirectories(): Unit = { - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty, targetLogDirectoryId = Some(DirectoryId.LOST)) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None, targetLogDirectoryId = Some(DirectoryId.LOST)) assertEquals(1, logManager.liveLogDirs.size) val logFile = new File(logDir, name + "-0") assertTrue(logFile.exists) assertFalse(logManager.directoryId(logFile.getParent).equals(DirectoryId.random())) - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), leaderEpoch = 0) } /** @@ -166,21 +169,21 @@ class LogManagerTest { assertEquals(2, logManagerForTest.get.liveLogDirs.size) logManagerForTest.get.startup(Set.empty) - val log1 = logManagerForTest.get.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty) - val log2 = logManagerForTest.get.getOrCreateLog(new TopicPartition(name, 1), topicId = Optional.empty) + val log1 = logManagerForTest.get.getOrCreateLog(new TopicPartition(name, 0), topicId = None) + val log2 = logManagerForTest.get.getOrCreateLog(new TopicPartition(name, 1), topicId = None) val logFile1 = new File(logDir1, name + "-0") assertTrue(logFile1.exists) val logFile2 = new File(logDir2, name + "-1") assertTrue(logFile2.exists) - log1.appendAsLeader(TestUtils.singletonRecords("test1".getBytes()), 0) + log1.appendAsLeader(TestUtils.singletonRecords("test1".getBytes()), leaderEpoch = 0) log1.takeProducerSnapshot() - log1.appendAsLeader(TestUtils.singletonRecords("test1".getBytes()), 0) + log1.appendAsLeader(TestUtils.singletonRecords("test1".getBytes()), leaderEpoch = 0) - log2.appendAsLeader(TestUtils.singletonRecords("test2".getBytes()), 0) + log2.appendAsLeader(TestUtils.singletonRecords("test2".getBytes()), leaderEpoch = 0) log2.takeProducerSnapshot() - log2.appendAsLeader(TestUtils.singletonRecords("test2".getBytes()), 0) + log2.appendAsLeader(TestUtils.singletonRecords("test2".getBytes()), leaderEpoch = 0) // This should cause log1.close() to fail during LogManger shutdown sequence. FileUtils.deleteDirectory(logFile1) @@ -205,8 +208,8 @@ class LogManagerTest { assertEquals(2, logManagerForTest.get.liveLogDirs.size) logManagerForTest.get.startup(Set.empty) - logManagerForTest.get.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty) - logManagerForTest.get.getOrCreateLog(new TopicPartition(name, 1), topicId = Optional.empty) + logManagerForTest.get.getOrCreateLog(new TopicPartition(name, 0), topicId = None) + logManagerForTest.get.getOrCreateLog(new TopicPartition(name, 1), topicId = None) val logFile1 = new File(logDir1, name + "-0") assertTrue(logFile1.exists) @@ -232,17 +235,17 @@ class LogManagerTest { // 1. create two logs under logDir val topicPartition0 = new TopicPartition(name, 0) val topicPartition1 = new TopicPartition(name, 1) - val log0 = logManager.getOrCreateLog(topicPartition0, topicId = Optional.empty) - val log1 = logManager.getOrCreateLog(topicPartition1, topicId = Optional.empty) + val log0 = logManager.getOrCreateLog(topicPartition0, topicId = None) + val log1 = logManager.getOrCreateLog(topicPartition1, topicId = None) val logFile0 = new File(logDir, name + "-0") val logFile1 = new File(logDir, name + "-1") assertTrue(logFile0.exists) assertTrue(logFile1.exists) - log0.appendAsLeader(TestUtils.singletonRecords("test1".getBytes()), 0) + log0.appendAsLeader(TestUtils.singletonRecords("test1".getBytes()), leaderEpoch = 0) log0.takeProducerSnapshot() - log1.appendAsLeader(TestUtils.singletonRecords("test1".getBytes()), 0) + log1.appendAsLeader(TestUtils.singletonRecords("test1".getBytes()), leaderEpoch = 0) log1.takeProducerSnapshot() // 2. simulate unclean shutdown by deleting clean shutdown marker file @@ -291,10 +294,10 @@ class LogManagerTest { logManager = createLogManager(dirs) logManager.startup(Set.empty) - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), isNew = true, topicId = Optional.empty) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), isNew = true, topicId = None) val logFile = new File(logDir, name + "-0") assertTrue(logFile.exists) - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), leaderEpoch = 0) } @Test @@ -323,7 +326,7 @@ class LogManagerTest { // Request creating a new log. // LogManager should try using all configured log directories until one succeeds. - logManager.getOrCreateLog(new TopicPartition(name, 0), isNew = true, topicId = Optional.empty) + logManager.getOrCreateLog(new TopicPartition(name, 0), isNew = true, topicId = None) // Verify that half the directories were considered broken, assertEquals(dirs.length / 2, brokenDirs.size) @@ -352,11 +355,11 @@ class LogManagerTest { */ @Test def testCleanupExpiredSegments(): Unit = { - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None) var offset = 0L for (_ <- 0 until 200) { val set = TestUtils.singletonRecords("test".getBytes()) - val info = log.appendAsLeader(set, 0) + val info = log.appendAsLeader(set, leaderEpoch = 0) offset = info.lastOffset } assertTrue(log.numberOfSegments > 1, "There should be more than one segment now.") @@ -379,7 +382,7 @@ class LogManagerTest { assertThrows(classOf[OffsetOutOfRangeException], () => readLog(log, 0), () => "Should get exception from fetching earlier.") // log should still be appendable - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), leaderEpoch = 0) } /** @@ -391,7 +394,7 @@ class LogManagerTest { logManager.shutdown() val segmentBytes = 10 * setSize val properties = new Properties() - properties.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, segmentBytes.toString) + properties.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentBytes.toString) properties.put(TopicConfig.RETENTION_BYTES_CONFIG, (5L * 10L * setSize + 10L).toString) val configRepository = MockConfigRepository.forTopic(name, properties) @@ -399,14 +402,14 @@ class LogManagerTest { logManager.startup(Set.empty) // create a log - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None) var offset = 0L // add a bunch of messages that should be larger than the retentionSize val numMessages = 200 for (_ <- 0 until numMessages) { val set = TestUtils.singletonRecords("test".getBytes()) - val info = log.appendAsLeader(set, 0) + val info = log.appendAsLeader(set, leaderEpoch = 0) offset = info.firstOffset } @@ -424,7 +427,7 @@ class LogManagerTest { assertEquals(0, readLog(log, offset + 1).records.sizeInBytes, "Should get empty fetch off new log.") assertThrows(classOf[OffsetOutOfRangeException], () => readLog(log, 0)) // log should still be appendable - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), leaderEpoch = 0) } /** @@ -450,11 +453,11 @@ class LogManagerTest { val configRepository = MockConfigRepository.forTopic(name, TopicConfig.CLEANUP_POLICY_CONFIG, policy) logManager = createLogManager(configRepository = configRepository) - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None) var offset = 0L for (_ <- 0 until 200) { val set = TestUtils.singletonRecords("test".getBytes(), key="test".getBytes()) - val info = log.appendAsLeader(set, 0) + val info = log.appendAsLeader(set, leaderEpoch = 0) offset = info.lastOffset } @@ -477,11 +480,11 @@ class LogManagerTest { logManager = createLogManager(configRepository = configRepository) logManager.startup(Set.empty) - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None) val lastFlush = log.lastFlushTime for (_ <- 0 until 200) { val set = TestUtils.singletonRecords("test".getBytes()) - log.appendAsLeader(set, 0) + log.appendAsLeader(set, leaderEpoch = 0) } time.sleep(logManager.initialTaskDelayMs) assertTrue(lastFlush != log.lastFlushTime, "Time based flush should have been triggered") @@ -501,7 +504,7 @@ class LogManagerTest { // verify that logs are always assigned to the least loaded partition for (partition <- 0 until 20) { - logManager.getOrCreateLog(new TopicPartition("test", partition), topicId = Optional.empty) + logManager.getOrCreateLog(new TopicPartition("test", partition), topicId = None) assertEquals(partition + 1, logManager.allLogs.size, "We should have created the right number of logs") val counts = logManager.allLogs.groupBy(_.dir.getParent).values.map(_.size) assertTrue(counts.max <= counts.min + 1, "Load should balance evenly") @@ -528,17 +531,17 @@ class LogManagerTest { val testTopic = "test-stray-topic" val testTopicPartition = new TopicPartition(testTopic, 0) - val log = logManager.getOrCreateLog(testTopicPartition, topicId = Optional.of(Uuid.randomUuid())) + val log = logManager.getOrCreateLog(testTopicPartition, topicId = Some(Uuid.randomUuid())) def providedIsStray(log: UnifiedLog) = { invokedCount += 1 true } - logManager.loadLog(log.dir, hadCleanShutdown = true, util.Map.of[TopicPartition, JLong], util.Map.of[TopicPartition, JLong], logConfig, Map.empty, new ConcurrentHashMap[String, Integer](), providedIsStray) + logManager.loadLog(log.dir, hadCleanShutdown = true, Collections.emptyMap[TopicPartition, JLong], Collections.emptyMap[TopicPartition, JLong], logConfig, Map.empty, new ConcurrentHashMap[String, Integer](), providedIsStray) assertEquals(1, invokedCount) assertTrue( logDir.listFiles().toSet - .exists(f => f.getName.startsWith(testTopic) && f.getName.endsWith(UnifiedLog.STRAY_DIR_SUFFIX)) + .exists(f => f.getName.startsWith(testTopic) && f.getName.endsWith(UnifiedLog.StrayDirSuffix)) ) } @@ -581,16 +584,16 @@ class LogManagerTest { } private def verifyCheckpointRecovery(topicPartitions: Seq[TopicPartition], logManager: LogManager, logDir: File): Unit = { - val logs = topicPartitions.map(logManager.getOrCreateLog(_, topicId = Optional.empty)) + val logs = topicPartitions.map(logManager.getOrCreateLog(_, topicId = None)) logs.foreach { log => for (_ <- 0 until 50) - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes()), leaderEpoch = 0) log.flush(false) } logManager.checkpointLogRecoveryOffsets() - val checkpoints = new OffsetCheckpointFile(new File(logDir, JLogManager.RECOVERY_POINT_CHECKPOINT_FILE), null).read() + val checkpoints = new OffsetCheckpointFile(new File(logDir, LogManager.RecoveryPointCheckpointFile), null).read() topicPartitions.zip(logs).foreach { case (tp, log) => assertEquals(checkpoints.get(tp), log.recoveryPoint, "Recovery point should equal checkpoint") @@ -611,7 +614,7 @@ class LogManagerTest { @Test def testFileReferencesAfterAsyncDelete(): Unit = { - val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = Optional.empty) + val log = logManager.getOrCreateLog(new TopicPartition(name, 0), topicId = None) val activeSegment = log.activeSegment val logName = activeSegment.log.file.getName val indexName = activeSegment.offsetIndex.file.getName @@ -648,7 +651,7 @@ class LogManagerTest { @Test def testCreateAndDeleteOverlyLongTopic(): Unit = { val invalidTopicName = String.join("", Collections.nCopies(253, "x")) - logManager.getOrCreateLog(new TopicPartition(invalidTopicName, 0), topicId = Optional.empty) + logManager.getOrCreateLog(new TopicPartition(invalidTopicName, 0), topicId = None) logManager.asyncDelete(new TopicPartition(invalidTopicName, 0)) } @@ -661,16 +664,16 @@ class LogManagerTest { new TopicPartition("test-b", 0), new TopicPartition("test-b", 1)) - val allLogs = tps.map(logManager.getOrCreateLog(_, topicId = Optional.empty)) + val allLogs = tps.map(logManager.getOrCreateLog(_, topicId = None)) allLogs.foreach { log => for (_ <- 0 until 50) - log.appendAsLeader(TestUtils.singletonRecords("test".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords("test".getBytes), leaderEpoch = 0) log.flush(false) } logManager.checkpointRecoveryOffsetsInDir(logDir) - val checkpoints = new OffsetCheckpointFile(new File(logDir, JLogManager.RECOVERY_POINT_CHECKPOINT_FILE), null).read() + val checkpoints = new OffsetCheckpointFile(new File(logDir, LogManager.RecoveryPointCheckpointFile), null).read() tps.zip(allLogs).foreach { case (tp, log) => assertEquals(checkpoints.get(tp), log.recoveryPoint, @@ -679,7 +682,7 @@ class LogManagerTest { } private def readLog(log: UnifiedLog, offset: Long, maxLength: Int = 1024): FetchDataInfo = { - log.read(offset, maxLength, FetchIsolation.LOG_END, true) + log.read(offset, maxLength, isolation = FetchIsolation.LOG_END, minOneMessage = true) } /** @@ -783,9 +786,9 @@ class LogManagerTest { oldProperties.put(TopicConfig.CLEANUP_POLICY_CONFIG, TopicConfig.CLEANUP_POLICY_COMPACT) val oldLogConfig = LogConfig.fromProps(logConfig.originals, oldProperties) - val log0 = spyLogManager.getOrCreateLog(tp0, topicId = Optional.empty) + val log0 = spyLogManager.getOrCreateLog(tp0, topicId = None) log0.updateConfig(oldLogConfig) - val log1 = spyLogManager.getOrCreateLog(tp1, topicId = Optional.empty) + val log1 = spyLogManager.getOrCreateLog(tp1, topicId = None) log1.updateConfig(oldLogConfig) assertEquals(Set(log0, log1), spyLogManager.logsByTopic(topic).toSet) @@ -831,7 +834,7 @@ class LogManagerTest { val numMessages = Math.floor(segmentBytes * expectedSegmentsPerLog / createRecord.sizeInBytes).asInstanceOf[Int] try { for (_ <- 0 until numMessages) { - log.appendAsLeader(createRecord, 0) + log.appendAsLeader(createRecord, leaderEpoch = 0) } assertEquals(expectedSegmentsPerLog, log.numberOfSegments) @@ -953,25 +956,24 @@ class LogManagerTest { val topicPartition = UnifiedLog.parseTopicPartitionName(dir) val config = topicConfigOverrides.getOrElse(topicPartition.topic, logConfig) - UnifiedLog.create( - dir, - config, - 0, - 0, - mock(classOf[Scheduler]), - mockBrokerTopicStats, - mockTime, - 5 * 60 * 1000, - new ProducerStateManagerConfig(5 * 60 * 1000, false), - TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, - mock(classOf[LogDirFailureChannel]), + UnifiedLog( + dir = dir, + config = config, + logStartOffset = 0, + recoveryPoint = 0, + maxTransactionTimeoutMs = 5 * 60 * 1000, + producerStateManagerConfig = new ProducerStateManagerConfig(5 * 60 * 1000, false), + producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + scheduler = mock(classOf[Scheduler]), + time = mockTime, + brokerTopicStats = mockBrokerTopicStats, + logDirFailureChannel = mock(classOf[LogDirFailureChannel]), // not clean shutdown - false, - Optional.empty, + lastShutdownClean = false, + topicId = None, + keepPartitionMetadataFile = false, // pass mock map for verification later - mockMap, - false, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) + numRemainingSegments = mockMap) }.when(spyLogManager).loadLog(any[File], any[Boolean], any[util.Map[TopicPartition, JLong]], any[util.Map[TopicPartition, JLong]], any[LogConfig], any[Map[String, LogConfig]], any[ConcurrentMap[String, Integer]], any[UnifiedLog => Boolean]()) @@ -1028,14 +1030,14 @@ class LogManagerTest { val metricTag = s"topic=${tp.topic},partition=${tp.partition}" def verifyMetrics(): Unit = { - assertEquals(LogMetricNames.ALL_METRIC_NAMES.size, logMetrics.size) + assertEquals(LogMetricNames.allMetricNames.size, logMetrics.size) logMetrics.foreach { metric => assertTrue(metric.getMBeanName.contains(metricTag)) } } // Create the Log and assert that the metrics are present - logManager.getOrCreateLog(tp, topicId = Optional.empty) + logManager.getOrCreateLog(tp, topicId = None) verifyMetrics() // Trigger the deletion and assert that the metrics have been removed @@ -1043,7 +1045,7 @@ class LogManagerTest { assertTrue(logMetrics.isEmpty) // Recreate the Log and assert that the metrics are present - logManager.getOrCreateLog(tp, topicId = Optional.empty) + logManager.getOrCreateLog(tp, topicId = None) verifyMetrics() // Advance time past the file deletion delay and assert that the removed log has been deleted but the metrics @@ -1068,7 +1070,7 @@ class LogManagerTest { val metricTag = s"topic=${tp.topic},partition=${tp.partition}" def verifyMetrics(logCount: Int): Unit = { - assertEquals(LogMetricNames.ALL_METRIC_NAMES.size * logCount, logMetrics.size) + assertEquals(LogMetricNames.allMetricNames.size * logCount, logMetrics.size) logMetrics.foreach { metric => assertTrue(metric.getMBeanName.contains(metricTag)) } @@ -1076,9 +1078,9 @@ class LogManagerTest { // Create the current and future logs and verify that metrics are present for both current and future logs logManager.maybeUpdatePreferredLogDir(tp, dir1.getAbsolutePath) - logManager.getOrCreateLog(tp, topicId = Optional.empty) + logManager.getOrCreateLog(tp, topicId = None) logManager.maybeUpdatePreferredLogDir(tp, dir2.getAbsolutePath) - logManager.getOrCreateLog(tp, isFuture = true, topicId = Optional.empty) + logManager.getOrCreateLog(tp, isFuture = true, topicId = None) verifyMetrics(2) // Replace the current log with the future one and verify that only one set of metrics are present @@ -1092,6 +1094,36 @@ class LogManagerTest { verifyMetrics(1) } + @Test + def testWaitForAllToComplete(): Unit = { + var invokedCount = 0 + val success: Future[Boolean] = Mockito.mock(classOf[Future[Boolean]]) + Mockito.when(success.get()).thenAnswer { _ => + invokedCount += 1 + true + } + val failure: Future[Boolean] = Mockito.mock(classOf[Future[Boolean]]) + Mockito.when(failure.get()).thenAnswer{ _ => + invokedCount += 1 + throw new RuntimeException + } + + var failureCount = 0 + // all futures should be evaluated + assertFalse(LogManager.waitForAllToComplete(Seq(success, failure), _ => failureCount += 1)) + assertEquals(2, invokedCount) + assertEquals(1, failureCount) + assertFalse(LogManager.waitForAllToComplete(Seq(failure, success), _ => failureCount += 1)) + assertEquals(4, invokedCount) + assertEquals(2, failureCount) + assertTrue(LogManager.waitForAllToComplete(Seq(success, success), _ => failureCount += 1)) + assertEquals(6, invokedCount) + assertEquals(2, failureCount) + assertFalse(LogManager.waitForAllToComplete(Seq(failure, failure), _ => failureCount += 1)) + assertEquals(8, invokedCount) + assertEquals(4, failureCount) + } + @Test def testLoadDirectoryIds(): Unit = { val dirs: Seq[File] = Seq.fill(5)(TestUtils.tempDir()) @@ -1129,14 +1161,14 @@ class LogManagerTest { remoteStorageSystemEnable = true ) - val checkpointFile = new File(logDir, JLogManager.LOG_START_OFFSET_CHECKPOINT_FILE) + val checkpointFile = new File(logDir, LogManager.LogStartOffsetCheckpointFile) val checkpoint = new OffsetCheckpointFile(checkpointFile, null) val topicPartition = new TopicPartition("test", 0) - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) var offset = 0L for(_ <- 0 until 50) { val set = TestUtils.singletonRecords("test".getBytes()) - val info = log.appendAsLeader(set, 0) + val info = log.appendAsLeader(set, leaderEpoch = 0) offset = info.lastOffset if (offset != 0 && offset % 10 == 0) log.roll() @@ -1160,14 +1192,14 @@ class LogManagerTest { @Test def testCheckpointLogStartOffsetForNormalTopic(): Unit = { - val checkpointFile = new File(logDir, JLogManager.LOG_START_OFFSET_CHECKPOINT_FILE) + val checkpointFile = new File(logDir, LogManager.LogStartOffsetCheckpointFile) val checkpoint = new OffsetCheckpointFile(checkpointFile, null) val topicPartition = new TopicPartition("test", 0) - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) var offset = 0L for(_ <- 0 until 50) { val set = TestUtils.singletonRecords("test".getBytes()) - val info = log.appendAsLeader(set, 0) + val info = log.appendAsLeader(set, leaderEpoch = 0) offset = info.lastOffset if (offset != 0 && offset % 10 == 0) log.roll() @@ -1201,6 +1233,65 @@ class LogManagerTest { new File(dir, MetaPropertiesEnsemble.META_PROPERTIES_NAME).getAbsolutePath, false) } + val foo0 = new TopicIdPartition(Uuid.fromString("Sl08ZXU2QW6uF5hIoSzc8w"), new TopicPartition("foo", 0)) + val foo1 = new TopicIdPartition(Uuid.fromString("Sl08ZXU2QW6uF5hIoSzc8w"), new TopicPartition("foo", 1)) + val bar0 = new TopicIdPartition(Uuid.fromString("69O438ZkTSeqqclTtZO2KA"), new TopicPartition("bar", 0)) + val bar1 = new TopicIdPartition(Uuid.fromString("69O438ZkTSeqqclTtZO2KA"), new TopicPartition("bar", 1)) + val baz0 = new TopicIdPartition(Uuid.fromString("2Ik9_5-oRDOKpSXd2SuG5w"), new TopicPartition("baz", 0)) + val baz1 = new TopicIdPartition(Uuid.fromString("2Ik9_5-oRDOKpSXd2SuG5w"), new TopicPartition("baz", 1)) + val baz2 = new TopicIdPartition(Uuid.fromString("2Ik9_5-oRDOKpSXd2SuG5w"), new TopicPartition("baz", 2)) + val quux0 = new TopicIdPartition(Uuid.fromString("YS9owjv5TG2OlsvBM0Qw6g"), new TopicPartition("quux", 0)) + val recreatedFoo0 = new TopicIdPartition(Uuid.fromString("_dOOzPe3TfiWV21Lh7Vmqg"), new TopicPartition("foo", 0)) + val recreatedFoo1 = new TopicIdPartition(Uuid.fromString("_dOOzPe3TfiWV21Lh7Vmqg"), new TopicPartition("foo", 1)) + + @Test + def testIsStrayKraftReplicaWithEmptyImage(): Unit = { + val image: TopicsImage = topicsImage(Seq()) + val onDisk = Seq(foo0, foo1, bar0, bar1, quux0).map(mockLog) + assertTrue(onDisk.forall(log => LogManager.isStrayKraftReplica(0, image, log))) + } + + @Test + def testIsStrayKraftReplicaInImage(): Unit = { + val image: TopicsImage = topicsImage(Seq( + topicImage(Map( + foo0 -> Seq(0, 1, 2), + )), + topicImage(Map( + bar0 -> Seq(0, 1, 2), + bar1 -> Seq(0, 1, 2), + )) + )) + val onDisk = Seq(foo0, foo1, bar0, bar1, quux0).map(mockLog) + val expectedStrays = Set(foo1, quux0).map(_.topicPartition()) + + onDisk.foreach(log => assertEquals(expectedStrays.contains(log.topicPartition), LogManager.isStrayKraftReplica(0, image, log))) + } + + @Test + def testIsStrayKraftReplicaInImageWithRemoteReplicas(): Unit = { + val image: TopicsImage = topicsImage(Seq( + topicImage(Map( + foo0 -> Seq(0, 1, 2), + )), + topicImage(Map( + bar0 -> Seq(1, 2, 3), + bar1 -> Seq(2, 3, 0), + )) + )) + val onDisk = Seq(foo0, bar0, bar1).map(mockLog) + val expectedStrays = Set(bar0).map(_.topicPartition) + + onDisk.foreach(log => assertEquals(expectedStrays.contains(log.topicPartition), LogManager.isStrayKraftReplica(0, image, log))) + } + + @Test + def testIsStrayKraftMissingTopicId(): Unit = { + val log = Mockito.mock(classOf[UnifiedLog]) + Mockito.when(log.topicId).thenReturn(Option.empty) + assertTrue(LogManager.isStrayKraftReplica(0, topicsImage(Seq()), log)) + } + /** * Test LogManager takes file lock by default and the lock is released after shutdown. */ @@ -1211,12 +1302,12 @@ class LogManagerTest { try { // ${tmpLogDir}.lock is acquired by tmpLogManager - val fileLock = new FileLock(new File(tmpLogDir, JLogManager.LOCK_FILE_NAME)) + val fileLock = new FileLock(new File(tmpLogDir, LogManager.LockFileName)) assertFalse(fileLock.tryLock()) } finally { // ${tmpLogDir}.lock is removed after shutdown tmpLogManager.shutdown() - val f = new File(tmpLogDir, JLogManager.LOCK_FILE_NAME) + val f = new File(tmpLogDir, LogManager.LockFileName) assertFalse(f.exists()) } } @@ -1247,6 +1338,7 @@ class LogManagerTest { time = Time.SYSTEM, brokerTopicStats = new BrokerTopicStats, logDirFailureChannel = new LogDirFailureChannel(1), + keepPartitionMetadataFile = true, remoteStorageSystemEnable = false, initialTaskDelayMs = 0) @@ -1285,3 +1377,56 @@ class LogManagerTest { } } } + +object LogManagerTest { + def mockLog( + topicIdPartition: TopicIdPartition + ): UnifiedLog = { + val log = Mockito.mock(classOf[UnifiedLog]) + Mockito.when(log.topicId).thenReturn(Some(topicIdPartition.topicId())) + Mockito.when(log.topicPartition).thenReturn(topicIdPartition.topicPartition()) + log + } + + def topicImage( + partitions: Map[TopicIdPartition, Seq[Int]] + ): TopicImage = { + var topicName: String = null + var topicId: Uuid = null + partitions.keySet.foreach { + partition => if (topicId == null) { + topicId = partition.topicId() + } else if (!topicId.equals(partition.topicId())) { + throw new IllegalArgumentException("partition topic IDs did not match") + } + if (topicName == null) { + topicName = partition.topic() + } else if (!topicName.equals(partition.topic())) { + throw new IllegalArgumentException("partition topic names did not match") + } + } + if (topicId == null) { + throw new IllegalArgumentException("Invalid empty partitions map.") + } + val partitionRegistrations = partitions.map { case (partition, replicas) => + Int.box(partition.partition()) -> new PartitionRegistration.Builder(). + setReplicas(replicas.toArray). + setDirectories(DirectoryId.unassignedArray(replicas.size)). + setIsr(replicas.toArray). + setLeader(replicas.head). + setLeaderRecoveryState(LeaderRecoveryState.RECOVERED). + setLeaderEpoch(0). + setPartitionEpoch(0). + build() + } + new TopicImage(topicName, topicId, partitionRegistrations.asJava) + } + + def topicsImage( + topics: Seq[TopicImage] + ): TopicsImage = { + var retval = TopicsImage.EMPTY + topics.foreach { t => retval = retval.including(t) } + retval + } +} diff --git a/core/src/test/scala/unit/kafka/log/LogTestUtils.scala b/core/src/test/scala/unit/kafka/log/LogTestUtils.scala index 0ff68988d76fb..e98028ab86fe4 100644 --- a/core/src/test/scala/unit/kafka/log/LogTestUtils.scala +++ b/core/src/test/scala/unit/kafka/log/LogTestUtils.scala @@ -17,6 +17,8 @@ package kafka.log +import kafka.log.remote.RemoteLogManager + import java.io.File import java.util.Properties import kafka.utils.TestUtils @@ -30,17 +32,14 @@ import java.nio.file.Files import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap} import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.server.common.RequestLocal import org.apache.kafka.server.config.ServerLogConfigs -import org.apache.kafka.server.log.remote.storage.RemoteLogManager import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.Scheduler import org.apache.kafka.storage.internals.log.LogConfig.{DEFAULT_REMOTE_LOG_COPY_DISABLE_CONFIG, DEFAULT_REMOTE_LOG_DELETE_ON_DISABLE_CONFIG} -import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, FetchDataInfo, LazyIndex, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogOffsetsListener, LogSegment, ProducerStateManager, ProducerStateManagerConfig, TransactionIndex, VerificationGuard, UnifiedLog} +import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, FetchDataInfo, LazyIndex, LogAppendInfo, LogConfig, LogDirFailureChannel, LogFileUtils, LogOffsetsListener, LogSegment, ProducerStateManager, ProducerStateManagerConfig, TransactionIndex} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption object LogTestUtils { /** @@ -53,7 +52,7 @@ object LogTestUtils { val ms = FileRecords.open(LogFileUtils.logFile(logDir, offset)) val idx = LazyIndex.forOffset(LogFileUtils.offsetIndexFile(logDir, offset), offset, 1000) val timeIdx = LazyIndex.forTime(LogFileUtils.timeIndexFile(logDir, offset), offset, 1500) - val txnIndex = new TransactionIndex(offset, LogFileUtils.transactionIndexFile(logDir, offset, "")) + val txnIndex = new TransactionIndex(offset, UnifiedLog.transactionIndexFile(logDir, offset)) new LogSegment(ms, idx, timeIdx, txnIndex, offset, indexIntervalBytes, 0, time) } @@ -66,7 +65,7 @@ object LogTestUtils { localRetentionBytes: Long = LogConfig.DEFAULT_LOCAL_RETENTION_BYTES, segmentJitterMs: Long = LogConfig.DEFAULT_SEGMENT_JITTER_MS, cleanupPolicy: String = ServerLogConfigs.LOG_CLEANUP_POLICY_DEFAULT, - maxMessageBytes: Int = ServerLogConfigs.MAX_MESSAGE_BYTES_DEFAULT, + maxMessageBytes: Int = LogConfig.DEFAULT_MAX_MESSAGE_BYTES, indexIntervalBytes: Int = ServerLogConfigs.LOG_INDEX_INTERVAL_BYTES_DEFAULT, segmentIndexBytes: Int = ServerLogConfigs.LOG_INDEX_SIZE_MAX_BYTES_DEFAULT, fileDeleteDelayMs: Long = ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT, @@ -75,7 +74,7 @@ object LogTestUtils { remoteLogDeleteOnDisable: Boolean = DEFAULT_REMOTE_LOG_DELETE_ON_DISABLE_CONFIG): LogConfig = { val logProps = new Properties() logProps.put(TopicConfig.SEGMENT_MS_CONFIG, segmentMs: java.lang.Long) - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, segmentBytes: Integer) + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, segmentBytes: Integer) logProps.put(TopicConfig.RETENTION_MS_CONFIG, retentionMs: java.lang.Long) logProps.put(TopicConfig.LOCAL_LOG_RETENTION_MS_CONFIG, localRetentionMs: java.lang.Long) logProps.put(TopicConfig.RETENTION_BYTES_CONFIG, retentionBytes: java.lang.Long) @@ -104,27 +103,29 @@ object LogTestUtils { producerIdExpirationCheckIntervalMs: Int = TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, lastShutdownClean: Boolean = true, topicId: Option[Uuid] = None, + keepPartitionMetadataFile: Boolean = true, numRemainingSegments: ConcurrentMap[String, Integer] = new ConcurrentHashMap[String, Integer], remoteStorageSystemEnable: Boolean = false, remoteLogManager: Option[RemoteLogManager] = None, logOffsetsListener: LogOffsetsListener = LogOffsetsListener.NO_OP_OFFSETS_LISTENER): UnifiedLog = { - UnifiedLog.create( - dir, - config, - logStartOffset, - recoveryPoint, - scheduler, - brokerTopicStats, - time, - maxTransactionTimeoutMs, - producerStateManagerConfig, - producerIdExpirationCheckIntervalMs, - new LogDirFailureChannel(10), - lastShutdownClean, - topicId.toJava, - numRemainingSegments, - remoteStorageSystemEnable, - logOffsetsListener + UnifiedLog( + dir = dir, + config = config, + logStartOffset = logStartOffset, + recoveryPoint = recoveryPoint, + scheduler = scheduler, + brokerTopicStats = brokerTopicStats, + time = time, + maxTransactionTimeoutMs = maxTransactionTimeoutMs, + producerStateManagerConfig = producerStateManagerConfig, + producerIdExpirationCheckIntervalMs = producerIdExpirationCheckIntervalMs, + logDirFailureChannel = new LogDirFailureChannel(10), + lastShutdownClean = lastShutdownClean, + topicId = topicId, + keepPartitionMetadataFile = keepPartitionMetadataFile, + numRemainingSegments = numRemainingSegments, + remoteStorageSystemEnable = remoteStorageSystemEnable, + logOffsetsListener = logOffsetsListener ) } @@ -210,8 +211,8 @@ object LogTestUtils { time.sleep(config.fileDeleteDelayMs + 1) for (file <- logDir.listFiles) { assertFalse(file.getName.endsWith(LogFileUtils.DELETED_FILE_SUFFIX), "Unexpected .deleted file after recovery") - assertFalse(file.getName.endsWith(UnifiedLog.CLEANED_FILE_SUFFIX), "Unexpected .cleaned file after recovery") - assertFalse(file.getName.endsWith(UnifiedLog.SWAP_FILE_SUFFIX), "Unexpected .swap file after recovery") + assertFalse(file.getName.endsWith(UnifiedLog.CleanedFileSuffix), "Unexpected .cleaned file after recovery") + assertFalse(file.getName.endsWith(UnifiedLog.SwapFileSuffix), "Unexpected .swap file after recovery") } assertEquals(expectedKeys, keysInLog(recoveredLog)) assertFalse(hasOffsetOverflow(recoveredLog)) @@ -227,7 +228,7 @@ object LogTestUtils { leaderEpoch: Int = 0): LogAppendInfo = { val records = endTxnRecords(controlType, producerId, producerEpoch, coordinatorEpoch = coordinatorEpoch, timestamp = timestamp) - log.appendAsLeader(records, leaderEpoch, AppendOrigin.COORDINATOR, RequestLocal.noCaching(), VerificationGuard.SENTINEL) + log.appendAsLeader(records, origin = AppendOrigin.COORDINATOR, leaderEpoch = leaderEpoch) } private def endTxnRecords(controlRecordType: ControlRecordType, @@ -265,7 +266,7 @@ object LogTestUtils { new SimpleRecord(s"$seq".getBytes) } val records = MemoryRecords.withRecords(Compression.NONE, simpleRecords: _*) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) } def appendTransactionalAsLeader(log: UnifiedLog, @@ -294,7 +295,7 @@ object LogTestUtils { producerEpoch, sequence, simpleRecords: _*) } - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) sequence += numRecords } } diff --git a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala index e6fdf09331bfc..bbcda01451ff2 100755 --- a/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala +++ b/core/src/test/scala/unit/kafka/log/UnifiedLogTest.scala @@ -17,7 +17,8 @@ package kafka.log -import kafka.server.KafkaConfig +import kafka.log.remote.RemoteLogManager +import kafka.server.{DelayedRemoteListOffsets, KafkaConfig} import kafka.utils.TestUtils import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig @@ -32,38 +33,31 @@ import org.apache.kafka.common.record._ import org.apache.kafka.common.requests.{ListOffsetsRequest, ListOffsetsResponse} import org.apache.kafka.common.utils.{BufferSupplier, Time, Utils} import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.server.common.RequestLocal +import org.apache.kafka.server.config.KRaftConfigs import org.apache.kafka.server.log.remote.metadata.storage.TopicBasedRemoteLogMetadataManagerConfig -import org.apache.kafka.server.log.remote.storage.{NoOpRemoteLogMetadataManager, NoOpRemoteStorageManager, RemoteLogManager, RemoteLogManagerConfig} +import org.apache.kafka.server.log.remote.storage.{NoOpRemoteLogMetadataManager, NoOpRemoteStorageManager, RemoteLogManagerConfig} import org.apache.kafka.server.metrics.KafkaYammerMetrics -import org.apache.kafka.server.purgatory.{DelayedOperationPurgatory, DelayedRemoteListOffsets} +import org.apache.kafka.server.purgatory.DelayedOperationPurgatory import org.apache.kafka.server.storage.log.{FetchIsolation, UnexpectedAppendOffsetException} import org.apache.kafka.server.util.{KafkaScheduler, MockTime, Scheduler} import org.apache.kafka.storage.internals.checkpoint.{LeaderEpochCheckpointFile, PartitionMetadataFile} import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, AsyncOffsetReader, Cleaner, EpochEntry, LogConfig, LogFileUtils, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, LogToClean, OffsetResultHolder, OffsetsOutOfOrderException, ProducerStateManager, ProducerStateManagerConfig, RecordValidationException, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AbortedTxn, AppendOrigin, EpochEntry, LogConfig, LogFileUtils, LogOffsetMetadata, LogOffsetSnapshot, LogOffsetsListener, LogSegment, LogSegments, LogStartOffsetIncrementReason, OffsetResultHolder, OffsetsOutOfOrderException, ProducerStateManager, ProducerStateManagerConfig, RecordValidationException, VerificationGuard} import org.apache.kafka.storage.internals.utils.Throttler import org.apache.kafka.storage.log.metrics.{BrokerTopicMetrics, BrokerTopicStats} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ArgumentsSource import org.junit.jupiter.params.provider.{EnumSource, ValueSource} import org.mockito.ArgumentMatchers import org.mockito.ArgumentMatchers.{any, anyLong} import org.mockito.Mockito.{doAnswer, doThrow, spy} -import net.jqwik.api.AfterFailureMode -import net.jqwik.api.ForAll -import net.jqwik.api.Property -import org.apache.kafka.server.config.KRaftConfigs - import java.io._ import java.nio.ByteBuffer import java.nio.file.Files -import java.util import java.util.concurrent.{Callable, ConcurrentHashMap, Executors, TimeUnit} -import java.util.{Optional, OptionalLong, Properties} +import java.util.{Optional, OptionalInt, OptionalLong, Properties} import scala.collection.immutable.SortedSet import scala.collection.mutable.ListBuffer import scala.jdk.CollectionConverters._ @@ -107,10 +101,10 @@ class UnifiedLogTest { expectedSize: Int, expectedOffsets: Seq[Long]): Unit = { val readInfo = log.read( - fetchOffset, - 2048, - FetchIsolation.HIGH_WATERMARK, - false) + startOffset = fetchOffset, + maxLength = 2048, + isolation = FetchIsolation.HIGH_WATERMARK, + minOneMessage = false) assertEquals(expectedSize, readInfo.records.sizeInBytes) assertEquals(expectedOffsets, readInfo.records.records.asScala.map(_.offset)) } @@ -121,7 +115,7 @@ class UnifiedLogTest { new SimpleRecord(mockTime.milliseconds, "c".getBytes, "value".getBytes) )) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) assertFetchSizeAndOffsets(fetchOffset = 0L, 0, Seq()) log.maybeIncrementHighWatermark(log.logEndOffsetMetadata) @@ -130,7 +124,7 @@ class UnifiedLogTest { log.roll() assertFetchSizeAndOffsets(fetchOffset = 0L, records.sizeInBytes, Seq(0, 1, 2)) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) assertFetchSizeAndOffsets(fetchOffset = 3L, 0, Seq()) } @@ -172,17 +166,17 @@ class UnifiedLogTest { val records = TestUtils.records(simpleRecords) - val firstAppendInfo = log.appendAsLeader(records, 0) + val firstAppendInfo = log.appendAsLeader(records, leaderEpoch = 0) assertEquals(0, firstAppendInfo.firstOffset) val secondAppendInfo = log.appendAsLeader( TestUtils.records(simpleRecords), - 0 + leaderEpoch = 0 ) assertEquals(simpleRecords.size, secondAppendInfo.firstOffset) log.roll() - val afterRollAppendInfo = log.appendAsLeader(TestUtils.records(simpleRecords), 0) + val afterRollAppendInfo = log.appendAsLeader(TestUtils.records(simpleRecords), leaderEpoch = 0) assertEquals(simpleRecords.size * 2, afterRollAppendInfo.firstOffset) } @@ -193,7 +187,7 @@ class UnifiedLogTest { @Test def testTruncateFullyAndStartBelowFirstUnstableOffset(): Unit = { - testTruncateBelowFirstUnstableOffset((log, targetOffset) => log.truncateFullyAndStartAt(targetOffset, Optional.empty)) + testTruncateBelowFirstUnstableOffset((log, targetOffset) => log.truncateFullyAndStartAt(targetOffset)) } @Test @@ -209,7 +203,7 @@ class UnifiedLogTest { new SimpleRecord("0".getBytes), new SimpleRecord("1".getBytes), new SimpleRecord("2".getBytes) - )), 0) + )), leaderEpoch = 0) log.appendAsLeader(MemoryRecords.withTransactionalRecords( Compression.NONE, @@ -218,9 +212,9 @@ class UnifiedLogTest { sequence, new SimpleRecord("3".getBytes), new SimpleRecord("4".getBytes) - ), 0) + ), leaderEpoch = 0) - assertEquals(Optional.of(3L), log.firstUnstableOffset) + assertEquals(Some(3L), log.firstUnstableOffset) // We close and reopen the log to ensure that the first unstable offset segment // position will be undefined when we truncate the log. @@ -229,9 +223,9 @@ class UnifiedLogTest { val reopened = createLog(logDir, logConfig) assertEquals(Optional.of(new LogOffsetMetadata(3L)), reopened.producerStateManager.firstUnstableOffset) - reopened.truncateFullyAndStartAt(2L, Optional.of(1L)) - assertEquals(Optional.empty, reopened.firstUnstableOffset) - assertEquals(util.Map.of, reopened.producerStateManager.activeProducers) + reopened.truncateFullyAndStartAt(2L, Some(1L)) + assertEquals(None, reopened.firstUnstableOffset) + assertEquals(java.util.Collections.emptyMap(), reopened.producerStateManager.activeProducers) assertEquals(1L, reopened.logStartOffset) assertEquals(2L, reopened.logEndOffset) } @@ -252,7 +246,7 @@ class UnifiedLogTest { new SimpleRecord("0".getBytes), new SimpleRecord("1".getBytes), new SimpleRecord("2".getBytes) - )), 0) + )), leaderEpoch = 0) log.appendAsLeader(MemoryRecords.withTransactionalRecords( Compression.NONE, @@ -261,9 +255,9 @@ class UnifiedLogTest { sequence, new SimpleRecord("3".getBytes), new SimpleRecord("4".getBytes) - ), 0) + ), leaderEpoch = 0) - assertEquals(Optional.of(3L), log.firstUnstableOffset) + assertEquals(Some(3L), log.firstUnstableOffset) // We close and reopen the log to ensure that the first unstable offset segment // position will be undefined when we truncate the log. @@ -273,8 +267,8 @@ class UnifiedLogTest { assertEquals(Optional.of(new LogOffsetMetadata(3L)), reopened.producerStateManager.firstUnstableOffset) truncateFunc(reopened, 0L) - assertEquals(Optional.empty, reopened.firstUnstableOffset) - assertEquals(util.Map.of, reopened.producerStateManager.activeProducers) + assertEquals(None, reopened.firstUnstableOffset) + assertEquals(java.util.Collections.emptyMap(), reopened.producerStateManager.activeProducers) } @Test @@ -310,7 +304,7 @@ class UnifiedLogTest { assertHighWatermark(3L) // Update high watermark as follower - log.appendAsFollower(records(3L), leaderEpoch) + log.appendAsFollower(records(3L)) log.updateHighWatermark(6L) assertHighWatermark(6L) @@ -318,20 +312,23 @@ class UnifiedLogTest { log.truncateTo(3L) assertHighWatermark(3L) - log.appendAsLeader(records(0L), 0) + log.appendAsLeader(records(0L), leaderEpoch = 0) assertHighWatermark(3L) assertEquals(6L, log.logEndOffset) assertEquals(0L, log.logStartOffset) // Full truncation should also reset high watermark - log.truncateFullyAndStartAt(4L, Optional.empty) + log.truncateFullyAndStartAt(4L) assertEquals(4L, log.logEndOffset) assertEquals(4L, log.logStartOffset) assertHighWatermark(4L) } private def assertNonEmptyFetch(log: UnifiedLog, offset: Long, isolation: FetchIsolation, batchBaseOffset: Long): Unit = { - val readInfo = log.read(offset, Int.MaxValue, isolation, true) + val readInfo = log.read(startOffset = offset, + maxLength = Int.MaxValue, + isolation = isolation, + minOneMessage = true) assertFalse(readInfo.firstEntryIncomplete) assertTrue(readInfo.records.sizeInBytes > 0) @@ -350,7 +347,10 @@ class UnifiedLogTest { } private def assertEmptyFetch(log: UnifiedLog, offset: Long, isolation: FetchIsolation, batchBaseOffset: Long): Unit = { - val readInfo = log.read(offset, Int.MaxValue, isolation, true) + val readInfo = log.read(startOffset = offset, + maxLength = Int.MaxValue, + isolation = isolation, + minOneMessage = true) assertFalse(readInfo.firstEntryIncomplete) assertEquals(0, readInfo.records.sizeInBytes) assertEquals(batchBaseOffset, readInfo.fetchOffsetMetadata.messageOffset) @@ -366,11 +366,11 @@ class UnifiedLogTest { new SimpleRecord("0".getBytes), new SimpleRecord("1".getBytes), new SimpleRecord("2".getBytes) - )), 0) + )), leaderEpoch = 0) log.appendAsLeader(TestUtils.records(List( new SimpleRecord("3".getBytes), new SimpleRecord("4".getBytes) - )), 0) + )), leaderEpoch = 0) val batchBaseOffsets = SortedSet[Long](0, 3, 5) (log.logStartOffset until log.logEndOffset).foreach { offset => @@ -388,11 +388,11 @@ class UnifiedLogTest { new SimpleRecord("0".getBytes), new SimpleRecord("1".getBytes), new SimpleRecord("2".getBytes) - )), 0) + )), leaderEpoch = 0) log.appendAsLeader(TestUtils.records(List( new SimpleRecord("3".getBytes), new SimpleRecord("4".getBytes) - )), 0) + )), leaderEpoch = 0) val batchBaseOffsets = SortedSet[Long](0, 3, 5) def assertHighWatermarkBoundedFetches(): Unit = { @@ -428,7 +428,7 @@ class UnifiedLogTest { currentTxnStartOffset: Option[Long], coordinatorEpoch: Option[Int] ): Unit = { - val producerStateOpt = log.activeProducers.asScala.find(_.producerId == producerId) + val producerStateOpt = log.activeProducers.find(_.producerId == producerId) assertTrue(producerStateOpt.isDefined) val producerState = producerStateOpt.get @@ -527,6 +527,13 @@ class UnifiedLogTest { assertLsoBoundedFetches() } + @Test + def testOffsetFromProducerSnapshotFile(): Unit = { + val offset = 23423423L + val snapshotFile = LogFileUtils.producerSnapshotFile(tmpDir, offset) + assertEquals(offset, UnifiedLog.offsetFromFile(snapshotFile)) + } + /** * Tests for time based log roll. This test appends messages then changes the time * using the mock clock to force the log to roll and checks the number of segments. @@ -541,70 +548,69 @@ class UnifiedLogTest { assertEquals(1, log.numberOfSegments, "Log begins with a single empty segment.") // Test the segment rolling behavior when messages do not have a timestamp. mockTime.sleep(log.config.segmentMs + 1) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(1, log.numberOfSegments, "Log doesn't roll if doing so creates an empty segment.") - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(2, log.numberOfSegments, "Log rolls on this append since time has expired.") for (numSegments <- 3 until 5) { mockTime.sleep(log.config.segmentMs + 1) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(numSegments, log.numberOfSegments, "Changing time beyond rollMs and appending should create a new segment.") } // Append a message with timestamp to a segment whose first message do not have a timestamp. val timestamp = mockTime.milliseconds + log.config.segmentMs + 1 def createRecordsWithTimestamp = TestUtils.singletonRecords(value = "test".getBytes, timestamp = timestamp) - log.appendAsLeader(createRecordsWithTimestamp, 0) + log.appendAsLeader(createRecordsWithTimestamp, leaderEpoch = 0) assertEquals(4, log.numberOfSegments, "Segment should not have been rolled out because the log rolling should be based on wall clock.") // Test the segment rolling behavior when messages have timestamps. mockTime.sleep(log.config.segmentMs + 1) - log.appendAsLeader(createRecordsWithTimestamp, 0) + log.appendAsLeader(createRecordsWithTimestamp, leaderEpoch = 0) assertEquals(5, log.numberOfSegments, "A new segment should have been rolled out") // move the wall clock beyond log rolling time mockTime.sleep(log.config.segmentMs + 1) - log.appendAsLeader(createRecordsWithTimestamp, 0) + log.appendAsLeader(createRecordsWithTimestamp, leaderEpoch = 0) assertEquals(5, log.numberOfSegments, "Log should not roll because the roll should depend on timestamp of the first message.") val recordWithExpiredTimestamp = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds) - log.appendAsLeader(recordWithExpiredTimestamp, 0) + log.appendAsLeader(recordWithExpiredTimestamp, leaderEpoch = 0) assertEquals(6, log.numberOfSegments, "Log should roll because the timestamp in the message should make the log segment expire.") val numSegments = log.numberOfSegments mockTime.sleep(log.config.segmentMs + 1) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE), leaderEpoch = 0) assertEquals(numSegments, log.numberOfSegments, "Appending an empty message set should not roll log even if sufficient time has passed.") } @Test def testRollSegmentThatAlreadyExists(): Unit = { val logConfig = LogTestUtils.createLogConfig(segmentMs = 1 * 60 * 60L) - val partitionLeaderEpoch = 0 // create a log val log = createLog(logDir, logConfig) assertEquals(1, log.numberOfSegments, "Log begins with a single empty segment.") // roll active segment with the same base offset of size zero should recreate the segment - log.roll(Optional.of(0L)) + log.roll(Some(0L)) assertEquals(1, log.numberOfSegments, "Expect 1 segment after roll() empty segment with base offset.") // should be able to append records to active segment val records = TestUtils.records( List(new SimpleRecord(mockTime.milliseconds, "k1".getBytes, "v1".getBytes)), - baseOffset = 0L, partitionLeaderEpoch = partitionLeaderEpoch) - log.appendAsFollower(records, partitionLeaderEpoch) + baseOffset = 0L, partitionLeaderEpoch = 0) + log.appendAsFollower(records) assertEquals(1, log.numberOfSegments, "Expect one segment.") assertEquals(0L, log.activeSegment.baseOffset) // make sure we can append more records val records2 = TestUtils.records( List(new SimpleRecord(mockTime.milliseconds + 10, "k2".getBytes, "v2".getBytes)), - baseOffset = 1L, partitionLeaderEpoch = partitionLeaderEpoch) - log.appendAsFollower(records2, partitionLeaderEpoch) + baseOffset = 1L, partitionLeaderEpoch = 0) + log.appendAsFollower(records2) assertEquals(2, log.logEndOffset, "Expect two records in the log") assertEquals(0, LogTestUtils.readLog(log, 0, 1).records.batches.iterator.next().lastOffset) @@ -619,8 +625,8 @@ class UnifiedLogTest { log.activeSegment.offsetIndex.resize(0) val records3 = TestUtils.records( List(new SimpleRecord(mockTime.milliseconds + 12, "k3".getBytes, "v3".getBytes)), - baseOffset = 2L, partitionLeaderEpoch = partitionLeaderEpoch) - log.appendAsFollower(records3, partitionLeaderEpoch) + baseOffset = 2L, partitionLeaderEpoch = 0) + log.appendAsFollower(records3) assertTrue(log.activeSegment.offsetIndex.maxEntries > 1) assertEquals(2, LogTestUtils.readLog(log, 2, 1).records.batches.iterator.next().lastOffset) assertEquals(2, log.numberOfSegments, "Expect two segments.") @@ -634,10 +640,10 @@ class UnifiedLogTest { val epoch: Short = 0 val records = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = epoch, sequence = 0) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) val nextRecords = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = epoch, sequence = 2) - assertThrows(classOf[OutOfOrderSequenceException], () => log.appendAsLeader(nextRecords, 0)) + assertThrows(classOf[OutOfOrderSequenceException], () => log.appendAsLeader(nextRecords, leaderEpoch = 0)) } @Test @@ -647,13 +653,13 @@ class UnifiedLogTest { // Seed some initial data in the log val records = TestUtils.records(List(new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)), baseOffset = 27) - appendAsFollower(log, records, 19) + appendAsFollower(log, records, leaderEpoch = 19) assertEquals(Optional.of(new EpochEntry(19, 27)), log.leaderEpochCache.latestEntry) assertEquals(29, log.logEndOffset) def verifyTruncationClearsEpochCache(epoch: Int, truncationOffset: Long): Unit = { // Simulate becoming a leader - log.assignEpochStartOffset(epoch, log.logEndOffset) + log.assignEpochStartOffset(leaderEpoch = epoch, startOffset = log.logEndOffset) assertEquals(Optional.of(new EpochEntry(epoch, 29)), log.leaderEpochCache.latestEntry) assertEquals(29, log.logEndOffset) @@ -678,7 +684,7 @@ class UnifiedLogTest { def testLogSegmentsCallCorrect(): Unit = { // Create 3 segments and make sure we get the right values from various logSegments calls. def createRecords = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds) - def getSegmentOffsets(log :UnifiedLog, from: Long, to: Long) = log.logSegments(from, to).stream().map { _.baseOffset }.toList + def getSegmentOffsets(log :UnifiedLog, from: Long, to: Long) = log.logSegments(from, to).map { _.baseOffset } val setSize = createRecords.sizeInBytes val msgPerSeg = 10 val segmentSize = msgPerSeg * setSize // each segment will be 10 messages @@ -689,18 +695,18 @@ class UnifiedLogTest { // segments expire in size for (_ <- 1 to (2 * msgPerSeg + 2)) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(3, log.numberOfSegments, "There should be exactly 3 segments.") // from == to should always be null - assertEquals(util.List.of(), getSegmentOffsets(log, 10, 10)) - assertEquals(util.List.of(), getSegmentOffsets(log, 15, 15)) + assertEquals(List.empty[LogSegment], getSegmentOffsets(log, 10, 10)) + assertEquals(List.empty[LogSegment], getSegmentOffsets(log, 15, 15)) - assertEquals(util.List.of(0L, 10L, 20L), getSegmentOffsets(log, 0, 21)) + assertEquals(List[Long](0, 10, 20), getSegmentOffsets(log, 0, 21)) - assertEquals(util.List.of(0L), getSegmentOffsets(log, 1, 5)) - assertEquals(util.List.of(10L, 20L), getSegmentOffsets(log, 13, 21)) - assertEquals(util.List.of(10L), getSegmentOffsets(log, 13, 17)) + assertEquals(List[Long](0), getSegmentOffsets(log, 1, 5)) + assertEquals(List[Long](10, 20), getSegmentOffsets(log, 13, 21)) + assertEquals(List[Long](10), getSegmentOffsets(log, 13, 17)) // from < to is bad assertThrows(classOf[IllegalArgumentException], () => log.logSegments(10, 0)) @@ -716,7 +722,7 @@ class UnifiedLogTest { for (i <- 0 to 100) { val record = new SimpleRecord(mockTime.milliseconds, i.toString.getBytes) - log.appendAsLeader(TestUtils.records(List(record)), 0) + log.appendAsLeader(TestUtils.records(List(record)), leaderEpoch = 0) } assertTrue(log.logSegments.size >= 2) val logEndOffset = log.logEndOffset @@ -784,7 +790,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig) val records = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes))) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) log.takeProducerSnapshot() assertEquals(OptionalLong.of(1), log.latestProducerSnapshotOffset) } @@ -794,25 +800,17 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig) val pid = 1L - val producerEpoch = 0.toShort - val partitionLeaderEpoch = 0 + val epoch = 0.toShort val seq = 0 val baseOffset = 23L // create a batch with a couple gaps to simulate compaction - val records = TestUtils.records( - producerId = pid, - producerEpoch = producerEpoch, - sequence = seq, - baseOffset = baseOffset, - records = List( - new SimpleRecord(mockTime.milliseconds(), "a".getBytes), - new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes), - new SimpleRecord(mockTime.milliseconds(), "c".getBytes), - new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "d".getBytes) - ) - ) - records.batches.forEach(_.setPartitionLeaderEpoch(partitionLeaderEpoch)) + val records = TestUtils.records(producerId = pid, producerEpoch = epoch, sequence = seq, baseOffset = baseOffset, records = List( + new SimpleRecord(mockTime.milliseconds(), "a".getBytes), + new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes), + new SimpleRecord(mockTime.milliseconds(), "c".getBytes), + new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "d".getBytes))) + records.batches.forEach(_.setPartitionLeaderEpoch(0)) val filtered = ByteBuffer.allocate(2048) records.filterTo(new RecordFilter(0, 0) { @@ -823,25 +821,21 @@ class UnifiedLogTest { filtered.flip() val filteredRecords = MemoryRecords.readableRecords(filtered) - log.appendAsFollower(filteredRecords, partitionLeaderEpoch) + log.appendAsFollower(filteredRecords) // append some more data and then truncate to force rebuilding of the PID map - val moreRecords = TestUtils.records( - baseOffset = baseOffset + 4, - records = List( - new SimpleRecord(mockTime.milliseconds(), "e".getBytes), - new SimpleRecord(mockTime.milliseconds(), "f".getBytes) - ) - ) - moreRecords.batches.forEach(_.setPartitionLeaderEpoch(partitionLeaderEpoch)) - log.appendAsFollower(moreRecords, partitionLeaderEpoch) + val moreRecords = TestUtils.records(baseOffset = baseOffset + 4, records = List( + new SimpleRecord(mockTime.milliseconds(), "e".getBytes), + new SimpleRecord(mockTime.milliseconds(), "f".getBytes))) + moreRecords.batches.forEach(_.setPartitionLeaderEpoch(0)) + log.appendAsFollower(moreRecords) log.truncateTo(baseOffset + 4) val activeProducers = log.activeProducersWithLastSequence - assertTrue(activeProducers.containsKey(pid)) + assertTrue(activeProducers.contains(pid)) - val lastSeq = activeProducers.get(pid) + val lastSeq = activeProducers(pid) assertEquals(3, lastSeq) } @@ -850,23 +844,15 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig) val pid = 1L - val producerEpoch = 0.toShort - val partitionLeaderEpoch = 0 + val epoch = 0.toShort val seq = 0 val baseOffset = 23L // create an empty batch - val records = TestUtils.records( - producerId = pid, - producerEpoch = producerEpoch, - sequence = seq, - baseOffset = baseOffset, - records = List( - new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "a".getBytes), - new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes) - ) - ) - records.batches.forEach(_.setPartitionLeaderEpoch(partitionLeaderEpoch)) + val records = TestUtils.records(producerId = pid, producerEpoch = epoch, sequence = seq, baseOffset = baseOffset, records = List( + new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "a".getBytes), + new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes))) + records.batches.forEach(_.setPartitionLeaderEpoch(0)) val filtered = ByteBuffer.allocate(2048) records.filterTo(new RecordFilter(0, 0) { @@ -877,25 +863,21 @@ class UnifiedLogTest { filtered.flip() val filteredRecords = MemoryRecords.readableRecords(filtered) - log.appendAsFollower(filteredRecords, partitionLeaderEpoch) + log.appendAsFollower(filteredRecords) // append some more data and then truncate to force rebuilding of the PID map - val moreRecords = TestUtils.records( - baseOffset = baseOffset + 2, - records = List( - new SimpleRecord(mockTime.milliseconds(), "e".getBytes), - new SimpleRecord(mockTime.milliseconds(), "f".getBytes) - ) - ) - moreRecords.batches.forEach(_.setPartitionLeaderEpoch(partitionLeaderEpoch)) - log.appendAsFollower(moreRecords, partitionLeaderEpoch) + val moreRecords = TestUtils.records(baseOffset = baseOffset + 2, records = List( + new SimpleRecord(mockTime.milliseconds(), "e".getBytes), + new SimpleRecord(mockTime.milliseconds(), "f".getBytes))) + moreRecords.batches.forEach(_.setPartitionLeaderEpoch(0)) + log.appendAsFollower(moreRecords) log.truncateTo(baseOffset + 2) val activeProducers = log.activeProducersWithLastSequence - assertTrue(activeProducers.containsKey(pid)) + assertTrue(activeProducers.contains(pid)) - val lastSeq = activeProducers.get(pid) + val lastSeq = activeProducers(pid) assertEquals(1, lastSeq) } @@ -904,25 +886,17 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig) val pid = 1L - val producerEpoch = 0.toShort - val partitionLeaderEpoch = 0 + val epoch = 0.toShort val seq = 0 val baseOffset = 23L // create a batch with a couple gaps to simulate compaction - val records = TestUtils.records( - producerId = pid, - producerEpoch = producerEpoch, - sequence = seq, - baseOffset = baseOffset, - records = List( - new SimpleRecord(mockTime.milliseconds(), "a".getBytes), - new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes), - new SimpleRecord(mockTime.milliseconds(), "c".getBytes), - new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "d".getBytes) - ) - ) - records.batches.forEach(_.setPartitionLeaderEpoch(partitionLeaderEpoch)) + val records = TestUtils.records(producerId = pid, producerEpoch = epoch, sequence = seq, baseOffset = baseOffset, records = List( + new SimpleRecord(mockTime.milliseconds(), "a".getBytes), + new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "b".getBytes), + new SimpleRecord(mockTime.milliseconds(), "c".getBytes), + new SimpleRecord(mockTime.milliseconds(), "key".getBytes, "d".getBytes))) + records.batches.forEach(_.setPartitionLeaderEpoch(0)) val filtered = ByteBuffer.allocate(2048) records.filterTo(new RecordFilter(0, 0) { @@ -933,11 +907,11 @@ class UnifiedLogTest { filtered.flip() val filteredRecords = MemoryRecords.readableRecords(filtered) - log.appendAsFollower(filteredRecords, partitionLeaderEpoch) + log.appendAsFollower(filteredRecords) val activeProducers = log.activeProducersWithLastSequence - assertTrue(activeProducers.containsKey(pid)) + assertTrue(activeProducers.contains(pid)) - val lastSeq = activeProducers.get(pid) + val lastSeq = activeProducers(pid) assertEquals(3, lastSeq) } @@ -945,11 +919,11 @@ class UnifiedLogTest { def testProducerIdMapTruncateTo(): Unit = { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes))), 0) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes))), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes))), leaderEpoch = 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes))), leaderEpoch = 0) log.takeProducerSnapshot() - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("c".getBytes))), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("c".getBytes))), leaderEpoch = 0) log.takeProducerSnapshot() log.truncateTo(2) @@ -974,16 +948,19 @@ class UnifiedLogTest { val epoch = 0.toShort log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes)), producerId = pid, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes)), producerId = pid, - producerEpoch = epoch, sequence = 1), 0) + producerEpoch = epoch, sequence = 1), leaderEpoch = 0) LogTestUtils.deleteProducerSnapshotFiles(logDir) log.truncateTo(1L) assertEquals(1, log.activeProducersWithLastSequence.size) - val lastSeq = log.activeProducersWithLastSequence.get(pid) + val lastSeqOpt = log.activeProducersWithLastSequence.get(pid) + assertTrue(lastSeqOpt.isDefined) + + val lastSeq = lastSeqOpt.get assertEquals(0, lastSeq) } @@ -996,13 +973,13 @@ class UnifiedLogTest { val epoch = 0.toShort log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 1), 0) + producerEpoch = epoch, sequence = 1), leaderEpoch = 0) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord("c".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 2), 0) + producerEpoch = epoch, sequence = 2), leaderEpoch = 0) if (createEmptyActiveSegment) { log.roll() } @@ -1026,11 +1003,11 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5, retentionBytes = -1, retentionMs = 900, fileDeleteDelayMs = 0) val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds() + 100, "a".getBytes))), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds() + 100, "a".getBytes))), leaderEpoch = 0) log.roll() - log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "b".getBytes))), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "b".getBytes))), leaderEpoch = 0) log.roll() - log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds() + 100, "c".getBytes))), 0) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds() + 100, "c".getBytes))), leaderEpoch = 0) mockTime.sleep(901) @@ -1050,13 +1027,13 @@ class UnifiedLogTest { val epoch = 0.toShort log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 1), 0) + producerEpoch = epoch, sequence = 1), leaderEpoch = 0) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord("c".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 2), 0) + producerEpoch = epoch, sequence = 2), leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) assertEquals(2, ProducerStateManager.listSnapshotFiles(logDir).size) @@ -1075,23 +1052,23 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) val pid1 = 1L val epoch = 0.toShort - val cleaner = new Cleaner(0, - new FakeOffsetMap(Int.MaxValue), - 64 * 1024, - 64 * 1024, - 0.75, - new Throttler(Double.MaxValue, Long.MaxValue, "throttler", "entries", mockTime), - mockTime, - tp => {}) + val cleaner = new Cleaner(id = 0, + offsetMap = new FakeOffsetMap(Int.MaxValue), + ioBufferSize = 64 * 1024, + maxIoBufferSize = 64 * 1024, + dupBufferLoadFactor = 0.75, + throttler = new Throttler(Double.MaxValue, Long.MaxValue, "throttler", "entries", mockTime), + time = mockTime, + checkDone = _ => {}) log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes, "a".getBytes())), producerId = pid1, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes, "b".getBytes())), producerId = pid1, - producerEpoch = epoch, sequence = 1), 0) + producerEpoch = epoch, sequence = 1), leaderEpoch = 0) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes, "c".getBytes())), producerId = pid1, - producerEpoch = epoch, sequence = 2), 0) + producerEpoch = epoch, sequence = 2), leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) assertEquals(log.logSegments.asScala.map(_.baseOffset).toSeq.sorted.drop(1), ProducerStateManager.listSnapshotFiles(logDir).asScala.map(_.offset).sorted, "expected a snapshot file per segment base offset, except the first segment") @@ -1099,7 +1076,7 @@ class UnifiedLogTest { // Clean segments, this should delete everything except the active segment since there only // exists the key "a". - cleaner.clean(new LogToClean(log, 0, log.logEndOffset, false)) + cleaner.clean(LogToClean(log.topicPartition, log, 0, log.logEndOffset)) log.deleteOldSegments() // Sleep to breach the file delete delay and run scheduled file deletion tasks mockTime.sleep(1) @@ -1126,18 +1103,18 @@ class UnifiedLogTest { val records = TestUtils.singletonRecords("foo".getBytes) val logConfig = LogTestUtils.createLogConfig(segmentBytes = records.sizeInBytes, retentionBytes = records.sizeInBytes * 2) val log = createLog(logDir, logConfig) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) log.takeProducerSnapshot() - log.appendAsLeader(TestUtils.singletonRecords("bar".getBytes), 0) - log.appendAsLeader(TestUtils.singletonRecords("baz".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords("bar".getBytes), leaderEpoch = 0) + log.appendAsLeader(TestUtils.singletonRecords("baz".getBytes), leaderEpoch = 0) log.takeProducerSnapshot() assertEquals(3, log.logSegments.size) assertEquals(3, log.latestProducerStateEndOffset) assertEquals(OptionalLong.of(3), log.latestProducerSnapshotOffset) - log.truncateFullyAndStartAt(29, Optional.empty) + log.truncateFullyAndStartAt(29) assertEquals(1, log.logSegments.size) assertEquals(OptionalLong.empty(), log.latestProducerSnapshotOffset) assertEquals(29, log.latestProducerStateEndOffset) @@ -1149,50 +1126,50 @@ class UnifiedLogTest { val records = TestUtils.records(Seq(new SimpleRecord("foo".getBytes)), producerId = pid1, producerEpoch = 0, sequence = 0) val logConfig = LogTestUtils.createLogConfig(segmentBytes = records.sizeInBytes, retentionBytes = records.sizeInBytes * 2) val log = createLog(logDir, logConfig) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) log.takeProducerSnapshot() val pid2 = 2L log.appendAsLeader(TestUtils.records(Seq(new SimpleRecord("bar".getBytes)), producerId = pid2, producerEpoch = 0, sequence = 0), - 0) + leaderEpoch = 0) log.appendAsLeader(TestUtils.records(Seq(new SimpleRecord("baz".getBytes)), producerId = pid2, producerEpoch = 0, sequence = 1), - 0) + leaderEpoch = 0) log.takeProducerSnapshot() assertEquals(3, log.logSegments.size) - assertEquals(util.Set.of(pid1, pid2), log.activeProducersWithLastSequence.keySet) + assertEquals(Set(pid1, pid2), log.activeProducersWithLastSequence.keySet) log.updateHighWatermark(log.logEndOffset) log.deleteOldSegments() // Producer state should not be removed when deleting log segment assertEquals(2, log.logSegments.size) - assertEquals(util.Set.of(pid1, pid2), log.activeProducersWithLastSequence.keySet) + assertEquals(Set(pid1, pid2), log.activeProducersWithLastSequence.keySet) } @Test def testTakeSnapshotOnRollAndDeleteSnapshotOnRecoveryPointCheckpoint(): Unit = { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.singletonRecords("a".getBytes), 0) - log.roll(Optional.of(1L)) + log.appendAsLeader(TestUtils.singletonRecords("a".getBytes), leaderEpoch = 0) + log.roll(Some(1L)) assertEquals(OptionalLong.of(1L), log.latestProducerSnapshotOffset) assertEquals(OptionalLong.of(1L), log.oldestProducerSnapshotOffset) - log.appendAsLeader(TestUtils.singletonRecords("b".getBytes), 0) - log.roll(Optional.of(2L)) + log.appendAsLeader(TestUtils.singletonRecords("b".getBytes), leaderEpoch = 0) + log.roll(Some(2L)) assertEquals(OptionalLong.of(2L), log.latestProducerSnapshotOffset) assertEquals(OptionalLong.of(1L), log.oldestProducerSnapshotOffset) - log.appendAsLeader(TestUtils.singletonRecords("c".getBytes), 0) - log.roll(Optional.of(3L)) + log.appendAsLeader(TestUtils.singletonRecords("c".getBytes), leaderEpoch = 0) + log.roll(Some(3L)) assertEquals(OptionalLong.of(3L), log.latestProducerSnapshotOffset) // roll triggers a flush at the starting offset of the new segment, we should retain all snapshots assertEquals(OptionalLong.of(1L), log.oldestProducerSnapshotOffset) // even if we flush within the active segment, the snapshot should remain - log.appendAsLeader(TestUtils.singletonRecords("baz".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords("baz".getBytes), leaderEpoch = 0) log.flushUptoOffsetExclusive(4L) assertEquals(OptionalLong.of(3L), log.latestProducerSnapshotOffset) } @@ -1205,12 +1182,12 @@ class UnifiedLogTest { log.appendAsLeader(TestUtils.records(Seq(new SimpleRecord(mockTime.milliseconds(), new Array[Byte](512))), producerId = producerId, producerEpoch = 0, sequence = 0), - 0) + leaderEpoch = 0) // The next append should overflow the segment and cause it to roll log.appendAsLeader(TestUtils.records(Seq(new SimpleRecord(mockTime.milliseconds(), new Array[Byte](512))), producerId = producerId, producerEpoch = 0, sequence = 1), - 0) + leaderEpoch = 0) assertEquals(2, log.logSegments.size) assertEquals(1L, log.activeSegment.baseOffset) @@ -1244,18 +1221,18 @@ class UnifiedLogTest { new SimpleRecord("foo".getBytes), new SimpleRecord("bar".getBytes), new SimpleRecord("baz".getBytes)) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) val abortAppendInfo = LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds()) log.updateHighWatermark(abortAppendInfo.lastOffset + 1) // now there should be no first unstable offset - assertEquals(Optional.empty, log.firstUnstableOffset) + assertEquals(None, log.firstUnstableOffset) log.close() val reopenedLog = createLog(logDir, logConfig, lastShutdownClean = false) reopenedLog.updateHighWatermark(abortAppendInfo.lastOffset + 1) - assertEquals(Optional.empty, reopenedLog.firstUnstableOffset) + assertEquals(None, reopenedLog.firstUnstableOffset) } @Test @@ -1268,15 +1245,15 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig, producerIdExpirationCheckIntervalMs = producerIdExpirationCheckIntervalMs) val records = Seq(new SimpleRecord(mockTime.milliseconds(), "foo".getBytes)) - log.appendAsLeader(TestUtils.records(records, producerId = pid, producerEpoch = 0, sequence = 0), 0) + log.appendAsLeader(TestUtils.records(records, producerId = pid, producerEpoch = 0, sequence = 0), leaderEpoch = 0) - assertEquals(util.Set.of(pid), log.activeProducersWithLastSequence.keySet) + assertEquals(Set(pid), log.activeProducersWithLastSequence.keySet) mockTime.sleep(producerIdExpirationCheckIntervalMs) - assertEquals(util.Set.of(pid), log.activeProducersWithLastSequence.keySet) + assertEquals(Set(pid), log.activeProducersWithLastSequence.keySet) mockTime.sleep(producerIdExpirationCheckIntervalMs) - assertEquals(util.Set.of(), log.activeProducersWithLastSequence.keySet) + assertEquals(Set(), log.activeProducersWithLastSequence.keySet) } @Test @@ -1291,7 +1268,7 @@ class UnifiedLogTest { for (_ <- 0 to 5) { val record = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = epoch, sequence = seq) - log.appendAsLeader(record, 0) + log.appendAsLeader(record, leaderEpoch = 0) seq = seq + 1 } // Append an entry with multiple log records. @@ -1300,7 +1277,7 @@ class UnifiedLogTest { new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes), new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes) ), producerId = pid, producerEpoch = epoch, sequence = seq) - val multiEntryAppendInfo = log.appendAsLeader(createRecords, 0) + val multiEntryAppendInfo = log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals( multiEntryAppendInfo.lastOffset - multiEntryAppendInfo.firstOffset + 1, 3, @@ -1308,7 +1285,7 @@ class UnifiedLogTest { ) // Append a Duplicate of the tail, when the entry at the tail has multiple records. - val dupMultiEntryAppendInfo = log.appendAsLeader(createRecords, 0) + val dupMultiEntryAppendInfo = log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals( multiEntryAppendInfo.firstOffset, dupMultiEntryAppendInfo.firstOffset, @@ -1325,27 +1302,27 @@ class UnifiedLogTest { new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes), new SimpleRecord(mockTime.milliseconds, s"key-$seq".getBytes, s"value-$seq".getBytes)), producerId = pid, producerEpoch = epoch, sequence = seq - 2) - assertThrows(classOf[OutOfOrderSequenceException], () => log.appendAsLeader(records, 0), + assertThrows(classOf[OutOfOrderSequenceException], () => log.appendAsLeader(records, leaderEpoch = 0), () => "Should have received an OutOfOrderSequenceException since we attempted to append a duplicate of a records in the middle of the log.") // Append a duplicate of the batch which is 4th from the tail. This should succeed without error since we // retain the batch metadata of the last 5 batches. val duplicateOfFourth = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = epoch, sequence = 2) - log.appendAsLeader(duplicateOfFourth, 0) + log.appendAsLeader(duplicateOfFourth, leaderEpoch = 0) // Duplicates at older entries are reported as OutOfOrderSequence errors records = TestUtils.records( List(new SimpleRecord(mockTime.milliseconds, s"key-1".getBytes, s"value-1".getBytes)), producerId = pid, producerEpoch = epoch, sequence = 1) - assertThrows(classOf[OutOfOrderSequenceException], () => log.appendAsLeader(records, 0), + assertThrows(classOf[OutOfOrderSequenceException], () => log.appendAsLeader(records, leaderEpoch = 0), () => "Should have received an OutOfOrderSequenceException since we attempted to append a duplicate of a batch which is older than the last 5 appended batches.") // Append a duplicate entry with a single records at the tail of the log. This should return the appendInfo of the original entry. def createRecordsWithDuplicate = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = epoch, sequence = seq) - val origAppendInfo = log.appendAsLeader(createRecordsWithDuplicate, 0) - val newAppendInfo = log.appendAsLeader(createRecordsWithDuplicate, 0) + val origAppendInfo = log.appendAsLeader(createRecordsWithDuplicate, leaderEpoch = 0) + val newAppendInfo = log.appendAsLeader(createRecordsWithDuplicate, leaderEpoch = 0) assertEquals( origAppendInfo.firstOffset, newAppendInfo.firstOffset, @@ -1360,44 +1337,33 @@ class UnifiedLogTest { // create a log val log = createLog(logDir, new LogConfig(new Properties)) - val producerEpoch: Short = 0 - val partitionLeaderEpoch = 0 + val epoch: Short = 0 val buffer = ByteBuffer.allocate(512) - var builder = MemoryRecords.builder( - buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 0L, mockTime.milliseconds(), 1L, producerEpoch, 0, false, - partitionLeaderEpoch - ) + var builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, + TimestampType.LOG_APPEND_TIME, 0L, mockTime.milliseconds(), 1L, epoch, 0, false, 0) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 1L, mockTime.milliseconds(), 2L, producerEpoch, 0, false, - partitionLeaderEpoch) + TimestampType.LOG_APPEND_TIME, 1L, mockTime.milliseconds(), 2L, epoch, 0, false, 0) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() - builder = MemoryRecords.builder( - buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 2L, mockTime.milliseconds(), 3L, producerEpoch, 0, false, - partitionLeaderEpoch - ) + builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, + TimestampType.LOG_APPEND_TIME, 2L, mockTime.milliseconds(), 3L, epoch, 0, false, 0) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() - builder = MemoryRecords.builder( - buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 3L, mockTime.milliseconds(), 4L, producerEpoch, 0, false, - partitionLeaderEpoch - ) + builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, + TimestampType.LOG_APPEND_TIME, 3L, mockTime.milliseconds(), 4L, epoch, 0, false, 0) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() buffer.flip() val memoryRecords = MemoryRecords.readableRecords(buffer) - log.appendAsFollower(memoryRecords, partitionLeaderEpoch) + log.appendAsFollower(memoryRecords) log.flush(false) val fetchedData = LogTestUtils.readLog(log, 0, Int.MaxValue) @@ -1416,7 +1382,7 @@ class UnifiedLogTest { def testDuplicateAppendToFollower(): Unit = { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024 * 5) val log = createLog(logDir, logConfig) - val producerEpoch: Short = 0 + val epoch: Short = 0 val pid = 1L val baseSequence = 0 val partitionLeaderEpoch = 0 @@ -1424,32 +1390,10 @@ class UnifiedLogTest { // this is a bit contrived. to trigger the duplicate case for a follower append, we have to append // a batch with matching sequence numbers, but valid increasing offsets assertEquals(0L, log.logEndOffset) - log.appendAsFollower( - MemoryRecords.withIdempotentRecords( - 0L, - Compression.NONE, - pid, - producerEpoch, - baseSequence, - partitionLeaderEpoch, - new SimpleRecord("a".getBytes), - new SimpleRecord("b".getBytes) - ), - partitionLeaderEpoch - ) - log.appendAsFollower( - MemoryRecords.withIdempotentRecords( - 2L, - Compression.NONE, - pid, - producerEpoch, - baseSequence, - partitionLeaderEpoch, - new SimpleRecord("a".getBytes), - new SimpleRecord("b".getBytes) - ), - partitionLeaderEpoch - ) + log.appendAsFollower(MemoryRecords.withIdempotentRecords(0L, Compression.NONE, pid, epoch, baseSequence, + partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) + log.appendAsFollower(MemoryRecords.withIdempotentRecords(2L, Compression.NONE, pid, epoch, baseSequence, + partitionLeaderEpoch, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes))) // Ensure that even the duplicate sequences are accepted on the follower. assertEquals(4L, log.logEndOffset) @@ -1462,49 +1406,48 @@ class UnifiedLogTest { val pid1 = 1L val pid2 = 2L - val producerEpoch: Short = 0 + val epoch: Short = 0 val buffer = ByteBuffer.allocate(512) // pid1 seq = 0 var builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 0L, mockTime.milliseconds(), pid1, producerEpoch, 0) + TimestampType.LOG_APPEND_TIME, 0L, mockTime.milliseconds(), pid1, epoch, 0) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() // pid2 seq = 0 builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 1L, mockTime.milliseconds(), pid2, producerEpoch, 0) + TimestampType.LOG_APPEND_TIME, 1L, mockTime.milliseconds(), pid2, epoch, 0) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() // pid1 seq = 1 builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 2L, mockTime.milliseconds(), pid1, producerEpoch, 1) + TimestampType.LOG_APPEND_TIME, 2L, mockTime.milliseconds(), pid1, epoch, 1) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() // pid2 seq = 1 builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 3L, mockTime.milliseconds(), pid2, producerEpoch, 1) + TimestampType.LOG_APPEND_TIME, 3L, mockTime.milliseconds(), pid2, epoch, 1) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() // // pid1 seq = 1 (duplicate) builder = MemoryRecords.builder(buffer, RecordBatch.CURRENT_MAGIC_VALUE, Compression.NONE, - TimestampType.LOG_APPEND_TIME, 4L, mockTime.milliseconds(), pid1, producerEpoch, 1) + TimestampType.LOG_APPEND_TIME, 4L, mockTime.milliseconds(), pid1, epoch, 1) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() buffer.flip() - val epoch = 0 val records = MemoryRecords.readableRecords(buffer) - records.batches.forEach(_.setPartitionLeaderEpoch(epoch)) + records.batches.forEach(_.setPartitionLeaderEpoch(0)) // Ensure that batches with duplicates are accepted on the follower. assertEquals(0L, log.logEndOffset) - log.appendAsFollower(records, epoch) + log.appendAsFollower(records) assertEquals(5L, log.logEndOffset) } @@ -1517,10 +1460,10 @@ class UnifiedLogTest { val oldEpoch: Short = 0 val records = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = newEpoch, sequence = 0) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) val nextRecords = TestUtils.records(List(new SimpleRecord(mockTime.milliseconds, "key".getBytes, "value".getBytes)), producerId = pid, producerEpoch = oldEpoch, sequence = 0) - assertThrows(classOf[InvalidProducerEpochException], () => log.appendAsLeader(nextRecords, 0)) + assertThrows(classOf[InvalidProducerEpochException], () => log.appendAsLeader(nextRecords, leaderEpoch = 0)) } @Test @@ -1532,10 +1475,10 @@ class UnifiedLogTest { val epoch = 0.toShort log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "a".getBytes)), producerId = pid1, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "b".getBytes)), producerId = pid2, - producerEpoch = epoch, sequence = 0), 0) + producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.roll() assertEquals(2, log.activeProducersWithLastSequence.size) @@ -1549,8 +1492,9 @@ class UnifiedLogTest { // Deleting records should not remove producer state but should delete snapshots after the file deletion delay. assertEquals(2, log.activeProducersWithLastSequence.size) assertEquals(1, ProducerStateManager.listSnapshotFiles(log.dir).size) - val retainedLastSeq = log.activeProducersWithLastSequence.get(pid2) - assertEquals(0, retainedLastSeq) + val retainedLastSeqOpt = log.activeProducersWithLastSequence.get(pid2) + assertTrue(retainedLastSeqOpt.isDefined) + assertEquals(0, retainedLastSeqOpt.get) } /** @@ -1565,16 +1509,16 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentMs = 1 * 60 * 60L, segmentJitterMs = maxJitter) val log = createLog(logDir, logConfig) assertEquals(1, log.numberOfSegments, "Log begins with a single empty segment.") - log.appendAsLeader(set, 0) + log.appendAsLeader(set, leaderEpoch = 0) mockTime.sleep(log.config.segmentMs - maxJitter) set = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds) - log.appendAsLeader(set, 0) + log.appendAsLeader(set, leaderEpoch = 0) assertEquals(1, log.numberOfSegments, "Log does not roll on this append because it occurs earlier than max jitter") mockTime.sleep(maxJitter - log.activeSegment.rollJitterMs + 1) set = TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds) - log.appendAsLeader(set, 0) + log.appendAsLeader(set, leaderEpoch = 0) assertEquals(2, log.numberOfSegments, "Log should roll after segmentMs adjusted by random jitter") } @@ -1595,7 +1539,7 @@ class UnifiedLogTest { // segments expire in size for (_ <- 1 to (msgPerSeg + 1)) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(2, log.numberOfSegments, "There should be exactly 2 segments.") } @@ -1607,7 +1551,7 @@ class UnifiedLogTest { def testLoadEmptyLog(): Unit = { createEmptyLogs(logDir, 0) val log = createLog(logDir, new LogConfig(new Properties)) - log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds), leaderEpoch = 0) } /** @@ -1620,7 +1564,7 @@ class UnifiedLogTest { val values = (0 until 100 by 2).map(id => id.toString.getBytes).toArray for (value <- values) - log.appendAsLeader(TestUtils.singletonRecords(value = value), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = value), leaderEpoch = 0) for (i <- values.indices) { val read = LogTestUtils.readLog(log, i, 1).records.batches.iterator.next() @@ -1645,12 +1589,8 @@ class UnifiedLogTest { val records = messageIds.map(id => new SimpleRecord(id.toString.getBytes)) // now test the case that we give the offsets and use non-sequential offsets - for (i <- records.indices) { - log.appendAsFollower( - MemoryRecords.withRecords(messageIds(i), Compression.NONE, 0, records(i)), - Int.MaxValue - ) - } + for (i <- records.indices) + log.appendAsFollower(MemoryRecords.withRecords(messageIds(i), Compression.NONE, 0, records(i))) for (i <- 50 until messageIds.max) { val idx = messageIds.indexWhere(_ >= i) val read = LogTestUtils.readLog(log, i, 100).records.records.iterator.next() @@ -1672,7 +1612,7 @@ class UnifiedLogTest { // keep appending until we have two segments with only a single message in the second segment while (log.numberOfSegments == 1) - log.appendAsLeader(TestUtils.singletonRecords(value = "42".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "42".getBytes), leaderEpoch = 0) // now manually truncate off all but one message from the first segment to create a gap in the messages log.logSegments.asScala.head.truncateTo(1) @@ -1686,7 +1626,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig() val log = createLog(logDir, logConfig) log.closeHandlers() - assertThrows(classOf[KafkaStorageException], () => log.roll(Optional.of(1L))) + assertThrows(classOf[KafkaStorageException], () => log.roll(Some(1L))) } @Test @@ -1697,12 +1637,8 @@ class UnifiedLogTest { val records = messageIds.map(id => new SimpleRecord(id.toString.getBytes)) // now test the case that we give the offsets and use non-sequential offsets - for (i <- records.indices) { - log.appendAsFollower( - MemoryRecords.withRecords(messageIds(i), Compression.NONE, 0, records(i)), - Int.MaxValue - ) - } + for (i <- records.indices) + log.appendAsFollower(MemoryRecords.withRecords(messageIds(i), Compression.NONE, 0, records(i))) for (i <- 50 until messageIds.max) { val idx = messageIds.indexWhere(_ >= i) @@ -1726,12 +1662,8 @@ class UnifiedLogTest { val records = messageIds.map(id => new SimpleRecord(id.toString.getBytes)) // now test the case that we give the offsets and use non-sequential offsets - for (i <- records.indices) { - log.appendAsFollower( - MemoryRecords.withRecords(messageIds(i), Compression.NONE, 0, records(i)), - Int.MaxValue - ) - } + for (i <- records.indices) + log.appendAsFollower(MemoryRecords.withRecords(messageIds(i), Compression.NONE, 0, records(i))) for (i <- 50 until messageIds.max) { assertEquals(MemoryRecords.EMPTY, LogTestUtils.readLog(log, i, maxLength = 0, minOneMessage = false).records) @@ -1760,7 +1692,7 @@ class UnifiedLogTest { // set up replica log starting with offset 1024 and with one message (at offset 1024) val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024) val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.singletonRecords(value = "42".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "42".getBytes), leaderEpoch = 0) assertEquals(0, LogTestUtils.readLog(log, 1025, 1000).records.sizeInBytes, "Reading at the log end offset should produce 0 byte read.") @@ -1774,7 +1706,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig() val log = createLog(logDir, logConfig) val message = TestUtils.singletonRecords(value = "Test".getBytes, timestamp = mockTime.milliseconds) - log.appendAsLeader(message, 0) + log.appendAsLeader(message, leaderEpoch = 0) log.roll() assertEquals(2, logDir.listFiles(_.getName.endsWith(".log")).length) assertEquals(1, logDir.listFiles(_.getName.endsWith(".index")).length) @@ -1796,7 +1728,7 @@ class UnifiedLogTest { val numMessages = 100 val messageSets = (0 until numMessages).map(i => TestUtils.singletonRecords(value = i.toString.getBytes, timestamp = mockTime.milliseconds)) - messageSets.foreach(log.appendAsLeader(_, 0)) + messageSets.foreach(log.appendAsLeader(_, leaderEpoch = 0)) log.flush(false) /* do successive reads to ensure all our messages are there */ @@ -1832,8 +1764,8 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) /* append 2 compressed message sets, each with two messages giving offsets 0, 1, 2, 3 */ - log.appendAsLeader(MemoryRecords.withRecords(Compression.gzip().build(), new SimpleRecord("hello".getBytes), new SimpleRecord("there".getBytes)), 0) - log.appendAsLeader(MemoryRecords.withRecords(Compression.gzip().build(), new SimpleRecord("alpha".getBytes), new SimpleRecord("beta".getBytes)), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.gzip().build(), new SimpleRecord("hello".getBytes), new SimpleRecord("there".getBytes)), leaderEpoch = 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.gzip().build(), new SimpleRecord("alpha".getBytes), new SimpleRecord("beta".getBytes)), leaderEpoch = 0) def read(offset: Int) = LogTestUtils.readLog(log, offset, 4096).records.records @@ -1855,7 +1787,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 100, retentionMs = 0) val log = createLog(logDir, logConfig) for (i <- 0 until messagesToAppend) - log.appendAsLeader(TestUtils.singletonRecords(value = i.toString.getBytes, timestamp = mockTime.milliseconds - 10), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = i.toString.getBytes, timestamp = mockTime.milliseconds - 10), leaderEpoch = 0) val currOffset = log.logEndOffset assertEquals(currOffset, messagesToAppend) @@ -1872,7 +1804,7 @@ class UnifiedLogTest { currOffset, log.appendAsLeader( TestUtils.singletonRecords(value = "hello".getBytes, timestamp = mockTime.milliseconds), - 0 + leaderEpoch = 0 ).firstOffset, "Should still be able to append and should get the logEndOffset assigned to the new append") @@ -1893,7 +1825,7 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = configSegmentSize) val log = createLog(logDir, logConfig) - assertThrows(classOf[RecordBatchTooLargeException], () => log.appendAsLeader(messageSet, 0)) + assertThrows(classOf[RecordBatchTooLargeException], () => log.appendAsLeader(messageSet, leaderEpoch = 0)) } @Test @@ -1916,21 +1848,21 @@ class UnifiedLogTest { val errorMsgPrefix = "Compacted topic cannot accept message without key" var e = assertThrows(classOf[RecordValidationException], - () => log.appendAsLeader(messageSetWithUnkeyedMessage, 0)) + () => log.appendAsLeader(messageSetWithUnkeyedMessage, leaderEpoch = 0)) assertTrue(e.invalidException.isInstanceOf[InvalidRecordException]) assertEquals(1, e.recordErrors.size) assertEquals(0, e.recordErrors.get(0).batchIndex) assertTrue(e.recordErrors.get(0).message.startsWith(errorMsgPrefix)) e = assertThrows(classOf[RecordValidationException], - () => log.appendAsLeader(messageSetWithOneUnkeyedMessage, 0)) + () => log.appendAsLeader(messageSetWithOneUnkeyedMessage, leaderEpoch = 0)) assertTrue(e.invalidException.isInstanceOf[InvalidRecordException]) assertEquals(1, e.recordErrors.size) assertEquals(0, e.recordErrors.get(0).batchIndex) assertTrue(e.recordErrors.get(0).message.startsWith(errorMsgPrefix)) e = assertThrows(classOf[RecordValidationException], - () => log.appendAsLeader(messageSetWithCompressedUnkeyedMessage, 0)) + () => log.appendAsLeader(messageSetWithCompressedUnkeyedMessage, leaderEpoch = 0)) assertTrue(e.invalidException.isInstanceOf[InvalidRecordException]) assertEquals(1, e.recordErrors.size) assertEquals(1, e.recordErrors.get(0).batchIndex) // batch index is 1 @@ -1941,9 +1873,9 @@ class UnifiedLogTest { assertTrue(TestUtils.meterCount(s"${BrokerTopicMetrics.NO_KEY_COMPACTED_TOPIC_RECORDS_PER_SEC}") > 0) // the following should succeed without any InvalidMessageException - log.appendAsLeader(messageSetWithKeyedMessage, 0) - log.appendAsLeader(messageSetWithKeyedMessages, 0) - log.appendAsLeader(messageSetWithCompressedKeyedMessage, 0) + log.appendAsLeader(messageSetWithKeyedMessage, leaderEpoch = 0) + log.appendAsLeader(messageSetWithKeyedMessages, leaderEpoch = 0) + log.appendAsLeader(messageSetWithCompressedKeyedMessage, leaderEpoch = 0) } /** @@ -1963,9 +1895,9 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) // should be able to append the small message - log.appendAsLeader(first, 0) + log.appendAsLeader(first, leaderEpoch = 0) - assertThrows(classOf[RecordTooLargeException], () => log.appendAsLeader(second, 0), + assertThrows(classOf[RecordTooLargeException], () => log.appendAsLeader(second, leaderEpoch = 0), () => "Second message set should throw MessageSizeTooLargeException.") } @@ -1979,94 +1911,9 @@ class UnifiedLogTest { val log = createLog(logDir, LogTestUtils.createLogConfig(maxMessageBytes = second.sizeInBytes - 1)) - log.appendAsFollower(first, Int.MaxValue) - // the second record is larger than limit but appendAsFollower does not validate the size. - log.appendAsFollower(second, Int.MaxValue) - } - - @ParameterizedTest - @ArgumentsSource(classOf[InvalidMemoryRecordsProvider]) - def testInvalidMemoryRecords(records: MemoryRecords, expectedException: Optional[Class[Exception]]): Unit = { - val logConfig = LogTestUtils.createLogConfig() - val log = createLog(logDir, logConfig) - val previousEndOffset = log.logEndOffsetMetadata.messageOffset - - if (expectedException.isPresent) { - assertThrows( - expectedException.get(), - () => log.appendAsFollower(records, Int.MaxValue) - ) - } else { - log.appendAsFollower(records, Int.MaxValue) - } - - assertEquals(previousEndOffset, log.logEndOffsetMetadata.messageOffset) - } - - @Property(tries = 100, afterFailure = AfterFailureMode.SAMPLE_ONLY) - def testRandomRecords( - @ForAll(supplier = classOf[ArbitraryMemoryRecords]) records: MemoryRecords - ): Unit = { - val tempDir = TestUtils.tempDir() - val logDir = TestUtils.randomPartitionLogDir(tempDir) - try { - val logConfig = LogTestUtils.createLogConfig() - val log = createLog(logDir, logConfig) - val previousEndOffset = log.logEndOffsetMetadata.messageOffset - - // Depending on the corruption, unified log sometimes throws and sometimes returns an - // empty set of batches - assertThrows( - classOf[CorruptRecordException], - () => { - val info = log.appendAsFollower(records, Int.MaxValue) - if (info.firstOffset == UnifiedLog.UNKNOWN_OFFSET) { - throw new CorruptRecordException("Unknown offset is test") - } - } - ) - - assertEquals(previousEndOffset, log.logEndOffsetMetadata.messageOffset) - } finally { - Utils.delete(tempDir) - } - } - - @Test - def testInvalidLeaderEpoch(): Unit = { - val logConfig = LogTestUtils.createLogConfig() - val log = createLog(logDir, logConfig) - val previousEndOffset = log.logEndOffsetMetadata.messageOffset - val epoch = log.latestEpoch.orElse(0) + 1 - val numberOfRecords = 10 - - val batchWithValidEpoch = MemoryRecords.withRecords( - previousEndOffset, - Compression.NONE, - epoch, - (0 until numberOfRecords).map(number => new SimpleRecord(number.toString.getBytes)): _* - ) - - val batchWithInvalidEpoch = MemoryRecords.withRecords( - previousEndOffset + numberOfRecords, - Compression.NONE, - epoch + 1, - (0 until numberOfRecords).map(number => new SimpleRecord(number.toString.getBytes)): _* - ) - - val buffer = ByteBuffer.allocate(batchWithValidEpoch.sizeInBytes() + batchWithInvalidEpoch.sizeInBytes()) - buffer.put(batchWithValidEpoch.buffer()) - buffer.put(batchWithInvalidEpoch.buffer()) - buffer.flip() - - val records = MemoryRecords.readableRecords(buffer) - - log.appendAsFollower(records, epoch) - - // Check that only the first batch was appended - assertEquals(previousEndOffset + numberOfRecords, log.logEndOffsetMetadata.messageOffset) - // Check that the last fetched epoch matches the first batch - assertEquals(epoch, log.latestEpoch.get) + log.appendAsFollower(first) + // the second record is larger then limit but appendAsFollower does not validate the size. + log.appendAsFollower(second) } @Test @@ -2079,7 +1926,7 @@ class UnifiedLogTest { log.partitionMetadataFile.get.record(topicId) // Should trigger a synchronous flush - log.appendAsLeader(record, 0) + log.appendAsLeader(record, leaderEpoch = 0) assertTrue(log.partitionMetadataFile.get.exists()) assertEquals(topicId, log.partitionMetadataFile.get.read().topicId) } @@ -2112,9 +1959,29 @@ class UnifiedLogTest { // test recovery case log = createLog(logDir, logConfig) - assertTrue(log.topicId.isPresent) - assertEquals(topicId, log.topicId.get) + assertTrue(log.topicId.isDefined) + assertTrue(log.topicId.get == topicId) + log.close() + } + + @Test + def testNoOpWhenKeepPartitionMetadataFileIsFalse(): Unit = { + val logConfig = LogTestUtils.createLogConfig() + val log = createLog(logDir, logConfig, keepPartitionMetadataFile = false) + + val topicId = Uuid.randomUuid() + log.assignTopicId(topicId) + // We should not write to this file or set the topic ID + assertFalse(log.partitionMetadataFile.get.exists()) + assertEquals(None, log.topicId) log.close() + + val log2 = createLog(logDir, logConfig, topicId = Some(Uuid.randomUuid()), keepPartitionMetadataFile = false) + + // We should not write to this file or set the topic ID + assertFalse(log2.partitionMetadataFile.get.exists()) + assertEquals(None, log2.topicId) + log2.close() } @Test @@ -2147,7 +2014,7 @@ class UnifiedLogTest { val messages = (0 until numMessages).map { i => MemoryRecords.withRecords(100 + i, Compression.NONE, 0, new SimpleRecord(mockTime.milliseconds + i, i.toString.getBytes())) } - messages.foreach(message => log.appendAsFollower(message, Int.MaxValue)) + messages.foreach(log.appendAsFollower) val timeIndexEntries = log.logSegments.asScala.foldLeft(0) { (entries, segment) => entries + segment.timeIndex.entries } assertEquals(numMessages - 1, timeIndexEntries, s"There should be ${numMessages - 1} time index entries") assertEquals(mockTime.milliseconds + numMessages - 1, log.activeSegment.timeIndex.lastEntry.timestamp, @@ -2159,40 +2026,40 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) val log = createLog(logDir, logConfig) - assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L, Optional.empty)) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L)) val firstTimestamp = mockTime.milliseconds val firstLeaderEpoch = 0 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = firstTimestamp), - firstLeaderEpoch) + leaderEpoch = firstLeaderEpoch) val secondTimestamp = firstTimestamp + 1 val secondLeaderEpoch = 1 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = secondTimestamp), - secondLeaderEpoch) + leaderEpoch = secondLeaderEpoch) assertEquals(new OffsetResultHolder(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), - log.fetchOffsetByTimestamp(firstTimestamp, Optional.empty)) + log.fetchOffsetByTimestamp(firstTimestamp)) assertEquals(new OffsetResultHolder(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), - log.fetchOffsetByTimestamp(secondTimestamp, Optional.empty)) + log.fetchOffsetByTimestamp(secondTimestamp)) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Optional.empty)) + log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP)) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Optional.empty)) + log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP)) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Optional.empty)) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP)) // The cache can be updated directly after a leader change. // The new latest offset should reflect the updated epoch. log.assignEpochStartOffset(2, 2L) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Optional.empty)) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP)) } @Test @@ -2200,28 +2067,28 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) val log = createLog(logDir, logConfig) - assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L, Optional.empty)) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L)) val firstTimestamp = mockTime.milliseconds val leaderEpoch = 0 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = firstTimestamp), - leaderEpoch) + leaderEpoch = leaderEpoch) val secondTimestamp = firstTimestamp + 1 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = secondTimestamp), - leaderEpoch) + leaderEpoch = leaderEpoch) log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = firstTimestamp), - leaderEpoch) + leaderEpoch = leaderEpoch) assertEquals(new OffsetResultHolder(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(leaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP, Optional.empty)) + log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP)) } @Test @@ -2236,29 +2103,28 @@ class UnifiedLogTest { _ => Optional.empty[UnifiedLog](), (_, _) => {}, brokerTopicStats, - new Metrics(), - Optional.empty)) + new Metrics())) remoteLogManager.setDelayedOperationPurgatory(purgatory) val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1, remoteLogStorageEnable = true) val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true, remoteLogManager = Some(remoteLogManager)) // Note that the log is empty, so remote offset read won't happen - assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L, Optional.of(remoteLogManager))) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L, Some(remoteLogManager))) val firstTimestamp = mockTime.milliseconds val firstLeaderEpoch = 0 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = firstTimestamp), - firstLeaderEpoch) + leaderEpoch = firstLeaderEpoch) val secondTimestamp = firstTimestamp + 1 val secondLeaderEpoch = 1 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = secondTimestamp), - secondLeaderEpoch) + leaderEpoch = secondLeaderEpoch) doAnswer(ans => { val timestamp = ans.getArgument(1).asInstanceOf[Long] @@ -2267,10 +2133,10 @@ class UnifiedLogTest { .map[TimestampAndOffset](x => new TimestampAndOffset(x, 0L, Optional.of(firstLeaderEpoch))) }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) - log.updateLocalLogStartOffset(1) + log._localLogStartOffset = 1 def assertFetchOffsetByTimestamp(expected: Option[TimestampAndOffset], timestamp: Long): Unit = { - val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, Optional.of(remoteLogManager)) + val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, Some(remoteLogManager)) assertTrue(offsetResultHolder.futureHolderOpt.isPresent) offsetResultHolder.futureHolderOpt.get.taskFuture.get(1, TimeUnit.SECONDS) assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.isDone) @@ -2283,18 +2149,18 @@ class UnifiedLogTest { assertFetchOffsetByTimestamp(Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Some(remoteLogManager))) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Some(remoteLogManager))) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(remoteLogManager))) // The cache can be updated directly after a leader change. // The new latest offset should reflect the updated epoch. log.assignEpochStartOffset(2, 2L) - + assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(remoteLogManager))) } @Test @@ -2303,23 +2169,23 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP, Optional.empty)) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP)) val firstTimestamp = mockTime.milliseconds val leaderEpoch = 0 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = firstTimestamp), - leaderEpoch) + leaderEpoch = leaderEpoch) val secondTimestamp = firstTimestamp + 1 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = secondTimestamp), - leaderEpoch) + leaderEpoch = leaderEpoch) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP, Optional.empty)) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP)) } @Test @@ -2334,31 +2200,30 @@ class UnifiedLogTest { _ => Optional.empty[UnifiedLog](), (_, _) => {}, brokerTopicStats, - new Metrics(), - Optional.empty)) + new Metrics())) remoteLogManager.setDelayedOperationPurgatory(purgatory) val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1, remoteLogStorageEnable = true) val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true, remoteLogManager = Some(remoteLogManager)) // Note that the log is empty, so remote offset read won't happen - assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L, Optional.of(remoteLogManager))) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(0L, Some(remoteLogManager))) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0, Optional.empty())), - log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Some(remoteLogManager))) val firstTimestamp = mockTime.milliseconds val firstLeaderEpoch = 0 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = firstTimestamp), - firstLeaderEpoch) + leaderEpoch = firstLeaderEpoch) val secondTimestamp = firstTimestamp + 1 val secondLeaderEpoch = 1 log.appendAsLeader(TestUtils.singletonRecords( value = TestUtils.randomBytes(10), timestamp = secondTimestamp), - secondLeaderEpoch) + leaderEpoch = secondLeaderEpoch) doAnswer(ans => { val timestamp = ans.getArgument(1).asInstanceOf[Long] @@ -2367,11 +2232,11 @@ class UnifiedLogTest { .map[TimestampAndOffset](x => new TimestampAndOffset(x, 0L, Optional.of(firstLeaderEpoch))) }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) - log.updateLocalLogStartOffset(1) - log.updateHighestOffsetInRemoteStorage(0) + log._localLogStartOffset = 1 + log._highestOffsetInRemoteStorage = 0 def assertFetchOffsetByTimestamp(expected: Option[TimestampAndOffset], timestamp: Long): Unit = { - val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, Optional.of(remoteLogManager)) + val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, Some(remoteLogManager)) assertTrue(offsetResultHolder.futureHolderOpt.isPresent) offsetResultHolder.futureHolderOpt.get.taskFuture.get(1, TimeUnit.SECONDS) assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.isDone) @@ -2384,20 +2249,20 @@ class UnifiedLogTest { assertFetchOffsetByTimestamp(Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Some(remoteLogManager))) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIERED_TIMESTAMP, Some(remoteLogManager))) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, Some(remoteLogManager))) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(secondLeaderEpoch))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(remoteLogManager))) // The cache can be updated directly after a leader change. // The new latest offset should reflect the updated epoch. log.assignEpochStartOffset(2, 2L) assertEquals(new OffsetResultHolder(new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(2))), - log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Optional.of(remoteLogManager))) + log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Some(remoteLogManager))) } private def createKafkaConfigWithRLM: KafkaConfig = { @@ -2416,193 +2281,6 @@ class UnifiedLogTest { KafkaConfig.fromProps(props) } - @Test - def testFetchEarliestPendingUploadTimestampNoRemoteStorage(): Unit = { - val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1) - val log = createLog(logDir, logConfig) - - // Test initial state before any records - assertFetchOffsetBySpecialTimestamp(log, None, new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1)), - ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) - - // Append records - val _ = prepareLogWithSequentialRecords(log, recordCount = 2) - - // Test state after records are appended - assertFetchOffsetBySpecialTimestamp(log, None, new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1, Optional.of(-1)), - ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) - } - - @Test - def testFetchEarliestPendingUploadTimestampWithRemoteStorage(): Unit = { - val logStartOffset = 0 - val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset) - - val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch) - val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch) - val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch) - - doAnswer(ans => { - val timestamp = ans.getArgument(1).asInstanceOf[Long] - Optional.of(timestamp) - .filter(_ == timestampAndEpochs.head.timestamp) - .map[TimestampAndOffset](x => new TimestampAndOffset(x, 0L, Optional.of(timestampAndEpochs.head.leaderEpoch))) - }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), - anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) - - // Offset 0 (first timestamp) is in remote storage and deleted locally. Offset 1 (second timestamp) is in local storage. - log.updateLocalLogStartOffset(1) - log.updateHighestOffsetInRemoteStorage(0) - - // In the assertions below we test that offset 0 (first timestamp) is only in remote and offset 1 (second timestamp) is in local storage. - assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp) - assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)), - ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)), - ListOffsetsRequest.LATEST_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)), - ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) - } - - @Test - def testFetchEarliestPendingUploadTimestampWithRemoteStorageNoLocalDeletion(): Unit = { - val logStartOffset = 0 - val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset) - - val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch) - val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch) - val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch) - - // Offsets upto 1 are in remote storage - doAnswer(ans => { - val timestamp = ans.getArgument(1).asInstanceOf[Long] - Optional.of( - timestamp match { - case x if x == firstTimestamp => new TimestampAndOffset(x, 0L, Optional.of(firstLeaderEpoch)) - case x if x == secondTimestamp => new TimestampAndOffset(x, 1L, Optional.of(secondLeaderEpoch)) - case _ => null - } - ) - }).when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), - anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) - - // Offsets 0, 1 (first and second timestamps) are in remote storage and not deleted locally. - log.updateLocalLogStartOffset(0) - log.updateHighestOffsetInRemoteStorage(1) - - // In the assertions below we test that offset 0 (first timestamp) and offset 1 (second timestamp) are on both remote and local storage - assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp) - assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 1L, Optional.of(secondLeaderEpoch)), - ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)), - ListOffsetsRequest.LATEST_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 2L, Optional.of(thirdLeaderEpoch)), - ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) - } - - @Test - def testFetchEarliestPendingUploadTimestampNoSegmentsUploaded(): Unit = { - val logStartOffset = 0 - val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset) - - val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch) - val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch) - val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch) - - // No offsets are in remote storage - doAnswer(_ => Optional.empty[TimestampAndOffset]()) - .when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), - anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) - - // Offsets 0, 1, 2 (first, second and third timestamps) are in local storage only and not uploaded to remote storage. - log.updateLocalLogStartOffset(0) - log.updateHighestOffsetInRemoteStorage(-1) - - // In the assertions below we test that offset 0 (first timestamp), offset 1 (second timestamp) and offset 2 (third timestamp) are only on the local storage. - assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 0L, Optional.of(firstLeaderEpoch))), firstTimestamp) - assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 1L, Optional.of(secondLeaderEpoch))), secondTimestamp) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, -1L, Optional.of(-1)), - ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 3L, Optional.of(thirdLeaderEpoch)), - ListOffsetsRequest.LATEST_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 0L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) - } - - @Test - def testFetchEarliestPendingUploadTimestampStaleHighestOffsetInRemote(): Unit = { - val logStartOffset = 100 - val (remoteLogManager: RemoteLogManager, log: UnifiedLog, timestampAndEpochs: Seq[TimestampAndEpoch]) = prepare(logStartOffset) - - val (firstTimestamp, firstLeaderEpoch) = (timestampAndEpochs.head.timestamp, timestampAndEpochs.head.leaderEpoch) - val (secondTimestamp, secondLeaderEpoch) = (timestampAndEpochs(1).timestamp, timestampAndEpochs(1).leaderEpoch) - val (_, thirdLeaderEpoch) = (timestampAndEpochs(2).timestamp, timestampAndEpochs(2).leaderEpoch) - - // Offsets 100, 101, 102 (first, second and third timestamps) are in local storage and not uploaded to remote storage. - // Tiered storage copy was disabled and then enabled again, because of which the remote log segments are deleted but - // the highest offset in remote storage has become stale - doAnswer(_ => Optional.empty[TimestampAndOffset]()) - .when(remoteLogManager).findOffsetByTimestamp(ArgumentMatchers.eq(log.topicPartition), - anyLong(), anyLong(), ArgumentMatchers.eq(log.leaderEpochCache)) - - log.updateLocalLogStartOffset(100) - log.updateHighestOffsetInRemoteStorage(50) - - // In the assertions below we test that offset 100 (first timestamp), offset 101 (second timestamp) and offset 102 (third timestamp) are only on the local storage. - assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(firstTimestamp, 100L, Optional.of(firstLeaderEpoch))), firstTimestamp) - assertFetchOffsetByTimestamp(log, Some(remoteLogManager), Some(new TimestampAndOffset(secondTimestamp, 101L, Optional.of(secondLeaderEpoch))), secondTimestamp) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 50L, Optional.empty()), - ListOffsetsRequest.LATEST_TIERED_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 103L, Optional.of(thirdLeaderEpoch)), - ListOffsetsRequest.LATEST_TIMESTAMP) - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager),new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, 100L, Optional.of(firstLeaderEpoch)), - ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) - } - - private def prepare(logStartOffset: Int): (RemoteLogManager, UnifiedLog, Seq[TimestampAndEpoch]) = { - val config: KafkaConfig = createKafkaConfigWithRLM - val purgatory = new DelayedOperationPurgatory[DelayedRemoteListOffsets]("RemoteListOffsets", config.brokerId) - val remoteLogManager = spy(new RemoteLogManager(config.remoteLogManagerConfig, - 0, - logDir.getAbsolutePath, - "clusterId", - mockTime, - _ => Optional.empty[UnifiedLog](), - (_, _) => {}, - brokerTopicStats, - new Metrics(), - Optional.empty)) - remoteLogManager.setDelayedOperationPurgatory(purgatory) - - val logConfig = LogTestUtils.createLogConfig(segmentBytes = 200, indexIntervalBytes = 1, remoteLogStorageEnable = true) - val log = createLog(logDir, logConfig, logStartOffset = logStartOffset, remoteStorageSystemEnable = true, remoteLogManager = Some(remoteLogManager)) - - // Verify earliest pending upload offset for empty log - assertFetchOffsetBySpecialTimestamp(log, Some(remoteLogManager), new TimestampAndOffset(ListOffsetsResponse.UNKNOWN_TIMESTAMP, logStartOffset, Optional.empty()), - ListOffsetsRequest.EARLIEST_PENDING_UPLOAD_TIMESTAMP) - - val timestampAndEpochs = prepareLogWithSequentialRecords(log, recordCount = 3) - (remoteLogManager, log, timestampAndEpochs) - } - /** * Test the Log truncate operations */ @@ -2619,7 +2297,7 @@ class UnifiedLogTest { assertEquals(1, log.numberOfSegments, "There should be exactly 1 segment.") for (_ <- 1 to msgPerSeg) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(1, log.numberOfSegments, "There should be exactly 1 segments.") assertEquals(msgPerSeg, log.logEndOffset, "Log end offset should be equal to number of messages") @@ -2640,16 +2318,16 @@ class UnifiedLogTest { assertEquals(0, log.size, "Should change log size") for (_ <- 1 to msgPerSeg) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(log.logEndOffset, lastOffset, "Should be back to original offset") assertEquals(log.size, size, "Should be back to original size") - log.truncateFullyAndStartAt(log.logEndOffset - (msgPerSeg - 1), Optional.empty) + log.truncateFullyAndStartAt(log.logEndOffset - (msgPerSeg - 1)) assertEquals(log.logEndOffset, lastOffset - (msgPerSeg - 1), "Should change offset") assertEquals(log.size, 0, "Should change log size") for (_ <- 1 to msgPerSeg) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertTrue(log.logEndOffset > msgPerSeg, "Should be ahead of to original offset") assertEquals(size, log.size, "log size should be same as before") @@ -2671,12 +2349,12 @@ class UnifiedLogTest { assertEquals(1, log.numberOfSegments, "There should be exactly 1 segment.") for (i<- 1 to msgPerSeg) - log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), leaderEpoch = 0) assertEquals(1, log.numberOfSegments, "There should be exactly 1 segment.") mockTime.sleep(msgPerSeg) for (i<- 1 to msgPerSeg) - log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), leaderEpoch = 0) assertEquals(2, log.numberOfSegments, "There should be exactly 2 segment.") val expectedEntries = msgPerSeg - 1 @@ -2694,7 +2372,7 @@ class UnifiedLogTest { mockTime.sleep(msgPerSeg) for (i<- 1 to msgPerSeg) - log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = "test".getBytes, timestamp = mockTime.milliseconds + i), leaderEpoch = 0) assertEquals(1, log.numberOfSegments, "There should be exactly 1 segment.") } @@ -2712,7 +2390,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 100) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) // files should be renamed val segments = log.logSegments.asScala.toArray @@ -2738,7 +2416,7 @@ class UnifiedLogTest { @Test def testAppendMessageWithNullPayload(): Unit = { val log = createLog(logDir, new LogConfig(new Properties)) - log.appendAsLeader(TestUtils.singletonRecords(value = null), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = null), leaderEpoch = 0) val head = LogTestUtils.readLog(log, 0, 4096).records.records.iterator.next() assertEquals(0, head.offset) assertFalse(head.hasValue, "Message payload should be null.") @@ -2748,22 +2426,20 @@ class UnifiedLogTest { def testAppendWithOutOfOrderOffsetsThrowsException(): Unit = { val log = createLog(logDir, new LogConfig(new Properties)) - val epoch = 0 val appendOffsets = Seq(0L, 1L, 3L, 2L, 4L) val buffer = ByteBuffer.allocate(512) for (offset <- appendOffsets) { val builder = MemoryRecords.builder(buffer, RecordBatch.MAGIC_VALUE_V2, Compression.NONE, TimestampType.LOG_APPEND_TIME, offset, mockTime.milliseconds(), - 1L, 0, 0, false, epoch) + 1L, 0, 0, false, 0) builder.append(new SimpleRecord("key".getBytes, "value".getBytes)) builder.close() } buffer.flip() val memoryRecords = MemoryRecords.readableRecords(buffer) - assertThrows( - classOf[OffsetsOutOfOrderException], - () => log.appendAsFollower(memoryRecords, epoch) + assertThrows(classOf[OffsetsOutOfOrderException], () => + log.appendAsFollower(memoryRecords) ) } @@ -2771,18 +2447,16 @@ class UnifiedLogTest { def testAppendBelowExpectedOffsetThrowsException(): Unit = { val log = createLog(logDir, new LogConfig(new Properties)) val records = (0 until 2).map(id => new SimpleRecord(id.toString.getBytes)).toArray - records.foreach(record => log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, record), 0)) + records.foreach(record => log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, record), leaderEpoch = 0)) val magicVals = Seq(RecordBatch.MAGIC_VALUE_V0, RecordBatch.MAGIC_VALUE_V1, RecordBatch.MAGIC_VALUE_V2) val compressionTypes = Seq(CompressionType.NONE, CompressionType.LZ4) for (magic <- magicVals; compressionType <- compressionTypes) { val compression = Compression.of(compressionType).build() val invalidRecord = MemoryRecords.withRecords(magic, compression, new SimpleRecord(1.toString.getBytes)) - assertThrows( - classOf[UnexpectedAppendOffsetException], - () => log.appendAsFollower(invalidRecord, Int.MaxValue), - () => s"Magic=$magic, compressionType=$compressionType" - ) + assertThrows(classOf[UnexpectedAppendOffsetException], + () => log.appendAsFollower(invalidRecord), + () => s"Magic=$magic, compressionType=$compressionType") } } @@ -2803,10 +2477,7 @@ class UnifiedLogTest { magicValue = magic, codec = Compression.of(compressionType).build(), baseOffset = firstOffset) - val exception = assertThrows( - classOf[UnexpectedAppendOffsetException], - () => log.appendAsFollower(batch, Int.MaxValue) - ) + val exception = assertThrows(classOf[UnexpectedAppendOffsetException], () => log.appendAsFollower(records = batch)) assertEquals(firstOffset, exception.firstOffset, s"Magic=$magic, compressionType=$compressionType, UnexpectedAppendOffsetException#firstOffset") assertEquals(firstOffset + 2, exception.lastOffset, s"Magic=$magic, compressionType=$compressionType, UnexpectedAppendOffsetException#lastOffset") } @@ -2816,7 +2487,7 @@ class UnifiedLogTest { def testAppendWithNoTimestamp(): Unit = { val log = createLog(logDir, new LogConfig(new Properties)) log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, - new SimpleRecord(RecordBatch.NO_TIMESTAMP, "key".getBytes, "value".getBytes)), 0) + new SimpleRecord(RecordBatch.NO_TIMESTAMP, "key".getBytes, "value".getBytes)), leaderEpoch = 0) } @Test @@ -2824,7 +2495,7 @@ class UnifiedLogTest { val pid = 1L val epoch = 0.toShort val log = createLog(logDir, new LogConfig(new Properties)) - log.appendAsLeader(TestUtils.singletonRecords(value = null), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = null), leaderEpoch = 0) assertEquals(0, LogTestUtils.readLog(log, 0, 4096).records.records.iterator.next().offset) val append = LogTestUtils.appendTransactionalAsLeader(log, pid, epoch, mockTime) append(10) @@ -2832,7 +2503,7 @@ class UnifiedLogTest { // to the index will fail. log.activeSegment.txnIndex.renameTo(log.dir) assertThrows(classOf[KafkaStorageException], () => LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds(), coordinatorEpoch = 1)) - assertThrows(classOf[KafkaStorageException], () => log.appendAsLeader(TestUtils.singletonRecords(value = null), 0)) + assertThrows(classOf[KafkaStorageException], () => log.appendAsLeader(TestUtils.singletonRecords(value = null), leaderEpoch = 0)) assertThrows(classOf[KafkaStorageException], () => LogTestUtils.readLog(log, 0, 4096).records.records.iterator.next().offset) } @@ -2840,14 +2511,14 @@ class UnifiedLogTest { def testWriteLeaderEpochCheckpointAfterDirectoryRename(): Unit = { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024) val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), 5) - assertEquals(Optional.of(5), log.latestEpoch) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5) + assertEquals(Some(5), log.latestEpoch) // Ensure that after a directory rename, the epoch cache is written to the right location val tp = UnifiedLog.parseTopicPartitionName(log.dir) log.renameDir(UnifiedLog.logDeleteDirName(tp), true) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), 10) - assertEquals(Optional.of(10), log.latestEpoch) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 10) + assertEquals(Some(10), log.latestEpoch) assertTrue(LeaderEpochCheckpointFile.newFile(log.dir).exists()) assertFalse(LeaderEpochCheckpointFile.newFile(this.logDir).exists()) } @@ -2861,19 +2532,19 @@ class UnifiedLogTest { val topicId = Uuid.randomUuid() log.assignTopicId(topicId) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), 5) - assertEquals(Optional.of(5), log.latestEpoch) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5) + assertEquals(Some(5), log.latestEpoch) // Ensure that after a directory rename, the partition metadata file is written to the right location. val tp = UnifiedLog.parseTopicPartitionName(log.dir) log.renameDir(UnifiedLog.logDeleteDirName(tp), true) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), 10) - assertEquals(Optional.of(10), log.latestEpoch) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 10) + assertEquals(Some(10), log.latestEpoch) assertTrue(PartitionMetadataFile.newFile(log.dir).exists()) assertFalse(PartitionMetadataFile.newFile(this.logDir).exists()) // Check the topic ID remains in memory and was copied correctly. - assertTrue(log.topicId.isPresent) + assertTrue(log.topicId.isDefined) assertEquals(topicId, log.topicId.get) assertEquals(topicId, log.partitionMetadataFile.get.read().topicId) } @@ -2902,37 +2573,30 @@ class UnifiedLogTest { def testLeaderEpochCacheClearedAfterDowngradeInAppendedMessages(): Unit = { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1000, indexIntervalBytes = 1, maxMessageBytes = 64 * 1024) val log = createLog(logDir, logConfig) - log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), 5) - assertEquals(Optional.of(5), log.leaderEpochCache.latestEpoch) - - log.appendAsFollower( - TestUtils.records( - List( - new SimpleRecord("foo".getBytes()) - ), - baseOffset = 1L, - magicValue = RecordVersion.V1.value - ), - 5 - ) - assertEquals(Optional.empty, log.leaderEpochCache.latestEpoch) + log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes()))), leaderEpoch = 5) + assertEquals(OptionalInt.of(5), log.leaderEpochCache.latestEpoch) + + log.appendAsFollower(TestUtils.records(List(new SimpleRecord("foo".getBytes())), + baseOffset = 1L, + magicValue = RecordVersion.V1.value)) + assertEquals(OptionalInt.empty, log.leaderEpochCache.latestEpoch) } @Test def testLeaderEpochCacheCreatedAfterMessageFormatUpgrade(): Unit = { val logProps = new Properties() - logProps.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "1000") + logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1000") logProps.put(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "1") logProps.put(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "65536") val logConfig = new LogConfig(logProps) val log = createLog(logDir, logConfig) log.appendAsLeaderWithRecordVersion(TestUtils.records(List(new SimpleRecord("bar".getBytes())), - magicValue = RecordVersion.V1.value), 5, RecordVersion.V1) - assertTrue(log.latestEpoch.isEmpty) + magicValue = RecordVersion.V1.value), leaderEpoch = 5, RecordVersion.V1) + assertEquals(None, log.latestEpoch) log.appendAsLeader(TestUtils.records(List(new SimpleRecord("foo".getBytes())), - magicValue = RecordVersion.V2.value), 5) - assertEquals(5, log.latestEpoch.get) + magicValue = RecordVersion.V2.value), leaderEpoch = 5) + assertEquals(Some(5), log.latestEpoch) } @Test @@ -2988,7 +2652,7 @@ class UnifiedLogTest { // Need to create the offset files explicitly to avoid triggering segment recovery to truncate segment. Files.createFile(LogFileUtils.offsetIndexFile(logDir, segmentBaseOffset).toPath) Files.createFile(LogFileUtils.timeIndexFile(logDir, segmentBaseOffset).toPath) - records.foreach(segment.append) + records.foreach(segment.append _) segment.close() val logConfig = LogTestUtils.createLogConfig(indexIntervalBytes = 1, fileDeleteDelayMs = 1000) @@ -3018,7 +2682,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 100) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.assignEpochStartOffset(0, 40) log.assignEpochStartOffset(1, 90) @@ -3052,7 +2716,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 100) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.delete() assertEquals(0, log.numberOfSegments, "The number of segments should be 0") @@ -3067,7 +2731,7 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) // append some messages to create some segments - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(1, log.numberOfSegments, "The deleted segments should be gone.") assertEquals(1, epochCache(log).epochEntries.size, "Epoch entries should have gone.") @@ -3085,7 +2749,7 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) for (_ <- 0 until 15) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) assertEquals(3, log.numberOfSegments, "should have 3 segments") assertEquals(log.logStartOffset, 0) log.updateHighWatermark(log.logEndOffset) @@ -3116,7 +2780,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 15) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) log.deleteOldSegments() @@ -3131,7 +2795,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 15) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) log.deleteOldSegments() @@ -3146,7 +2810,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 15) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) log.deleteOldSegments() @@ -3161,7 +2825,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 15) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) log.deleteOldSegments() @@ -3176,9 +2840,9 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 15) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) - // mark the oldest segment as older the retention.ms + // mark oldest segment as older the retention.ms log.logSegments.asScala.head.setLastModified(mockTime.milliseconds - 20000) val segments = log.numberOfSegments @@ -3187,60 +2851,6 @@ class UnifiedLogTest { assertEquals(segments, log.numberOfSegments, "There should be 3 segments remaining") } - @Test - def shouldDeleteLocalLogSegmentsWhenPolicyIsEmptyWithSizeRetention(): Unit = { - def createRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes(), timestamp = 10L) - val recordSize = createRecords.sizeInBytes - val logConfig = LogTestUtils.createLogConfig( - segmentBytes = recordSize * 2, - localRetentionBytes = recordSize / 2, - cleanupPolicy = "", - remoteLogStorageEnable = true - ) - val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true) - - for (_ <- 0 until 10) - log.appendAsLeader(createRecords, 0) - - val segmentsBefore = log.numberOfSegments - log.updateHighWatermark(log.logEndOffset) - log.updateHighestOffsetInRemoteStorage(log.logEndOffset - 1) - val deleteOldSegments = log.deleteOldSegments() - - assertTrue(log.numberOfSegments < segmentsBefore, "Some segments should be deleted due to size retention") - assertTrue(deleteOldSegments > 0, "At least one segment should be deleted") - } - - @Test - def shouldDeleteLocalLogSegmentsWhenPolicyIsEmptyWithMsRetention(): Unit = { - val oldTimestamp = mockTime.milliseconds - 20000 - def oldRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes(), timestamp = oldTimestamp) - val recordSize = oldRecords.sizeInBytes - val logConfig = LogTestUtils.createLogConfig( - segmentBytes = recordSize * 2, - localRetentionMs = 5000, - cleanupPolicy = "", - remoteLogStorageEnable = true - ) - val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true) - - for (_ <- 0 until 10) - log.appendAsLeader(oldRecords, 0) - - def newRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes(), timestamp = mockTime.milliseconds) - for (_ <- 0 until 5) - log.appendAsLeader(newRecords, 0) - - val segmentsBefore = log.numberOfSegments - - log.updateHighWatermark(log.logEndOffset) - log.updateHighestOffsetInRemoteStorage(log.logEndOffset - 1) - val deleteOldSegments = log.deleteOldSegments() - - assertTrue(log.numberOfSegments < segmentsBefore, "Some segments should be deleted due to time retention") - assertTrue(deleteOldSegments > 0, "At least one segment should be deleted") - } - @Test def shouldDeleteSegmentsReadyToBeDeletedWhenCleanupPolicyIsCompactAndDelete(): Unit = { def createRecords = TestUtils.singletonRecords("test".getBytes, key = "test".getBytes, timestamp = 10L) @@ -3249,7 +2859,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 15) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) log.updateHighWatermark(log.logEndOffset) log.deleteOldSegments() @@ -3265,7 +2875,7 @@ class UnifiedLogTest { // append some messages to create some segments for (_ <- 0 until 15) - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) // Three segments should be created assertEquals(3, log.logSegments.asScala.count(_ => true)) @@ -3295,7 +2905,7 @@ class UnifiedLogTest { for (record <- records) log.appendAsLeader( MemoryRecords.withRecords(Compression.NONE, record), - epoch + leaderEpoch = epoch ) //Then leader epoch should be set on messages @@ -3324,9 +2934,9 @@ class UnifiedLogTest { //When appending as follower (assignOffsets = false) for (i <- records.indices) - log.appendAsFollower(recordsForEpoch(i), i) + log.appendAsFollower(recordsForEpoch(i)) - assertEquals(Optional.of(42), log.latestEpoch) + assertEquals(Some(42), log.latestEpoch) } @Test @@ -3338,7 +2948,7 @@ class UnifiedLogTest { // Given three segments of 5 messages each for (_ <- 0 until 15) { - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) } //Given epochs @@ -3351,7 +2961,7 @@ class UnifiedLogTest { log.deleteOldSegments() //The oldest epoch entry should have been removed - assertEquals(util.List.of(new EpochEntry(1, 5), new EpochEntry(2, 10)), cache.epochEntries) + assertEquals(java.util.Arrays.asList(new EpochEntry(1, 5), new EpochEntry(2, 10)), cache.epochEntries) } @Test @@ -3363,7 +2973,7 @@ class UnifiedLogTest { // Given three segments of 5 messages each for (_ <- 0 until 15) { - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) } //Given epochs @@ -3376,7 +2986,7 @@ class UnifiedLogTest { log.deleteOldSegments() //The first entry should have gone from (0,0) => (0,5) - assertEquals(util.List.of(new EpochEntry(0, 5), new EpochEntry(1, 7), new EpochEntry(2, 10)), cache.epochEntries) + assertEquals(java.util.Arrays.asList(new EpochEntry(0, 5), new EpochEntry(1, 7), new EpochEntry(2, 10)), cache.epochEntries) } @Test @@ -3392,7 +3002,7 @@ class UnifiedLogTest { def append(epoch: Int, startOffset: Long, count: Int): Unit = { for (i <- 0 until count) - log.appendAsFollower(createRecords(startOffset + i, epoch), epoch) + log.appendAsFollower(createRecords(startOffset + i, epoch)) } //Given 2 segments, 10 messages per segment @@ -3438,8 +3048,8 @@ class UnifiedLogTest { new SimpleRecord("bar".getBytes), new SimpleRecord("baz".getBytes)) - log.appendAsLeader(records, 0) - assertEquals(Optional.empty, log.firstUnstableOffset) + log.appendAsLeader(records, leaderEpoch = 0) + assertEquals(None, log.firstUnstableOffset) } @Test @@ -3457,26 +3067,26 @@ class UnifiedLogTest { new SimpleRecord("bar".getBytes), new SimpleRecord("baz".getBytes)) - val firstAppendInfo = log.appendAsLeader(records, 0) - assertEquals(Optional.of(firstAppendInfo.firstOffset), log.firstUnstableOffset) + val firstAppendInfo = log.appendAsLeader(records, leaderEpoch = 0) + assertEquals(Some(firstAppendInfo.firstOffset), log.firstUnstableOffset) // add more transactional records seq += 3 log.appendAsLeader(MemoryRecords.withTransactionalRecords(Compression.NONE, pid, epoch, seq, - new SimpleRecord("blah".getBytes)), 0) + new SimpleRecord("blah".getBytes)), leaderEpoch = 0) // LSO should not have changed - assertEquals(Optional.of(firstAppendInfo.firstOffset), log.firstUnstableOffset) + assertEquals(Some(firstAppendInfo.firstOffset), log.firstUnstableOffset) // now transaction is committed val commitAppendInfo = LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.COMMIT, mockTime.milliseconds()) // first unstable offset is not updated until the high watermark is advanced - assertEquals(Optional.of(firstAppendInfo.firstOffset), log.firstUnstableOffset) + assertEquals(Some(firstAppendInfo.firstOffset), log.firstUnstableOffset) log.updateHighWatermark(commitAppendInfo.lastOffset + 1) // now there should be no first unstable offset - assertEquals(Optional.empty, log.firstUnstableOffset) + assertEquals(None, log.firstUnstableOffset) } @Test @@ -3498,7 +3108,11 @@ class UnifiedLogTest { appendProducer(1) - val readInfo = log.read(currentLogEndOffset, Int.MaxValue, FetchIsolation.TXN_COMMITTED, false) + val readInfo = log.read( + startOffset = currentLogEndOffset, + maxLength = Int.MaxValue, + isolation = FetchIsolation.TXN_COMMITTED, + minOneMessage = false) if (readInfo.records.sizeInBytes() > 0) nonEmptyReads += 1 @@ -3580,7 +3194,7 @@ class UnifiedLogTest { assertCachedFirstUnstableOffset(log, expectedOffset = 36L) log.updateHighWatermark(log.logEndOffset) - assertEquals(Optional.empty, log.firstUnstableOffset) + assertEquals(None, log.firstUnstableOffset) } @Test @@ -3622,7 +3236,7 @@ class UnifiedLogTest { buffer.flip() - appendAsFollower(log, MemoryRecords.readableRecords(buffer), epoch) + appendAsFollower(log, MemoryRecords.readableRecords(buffer)) val abortedTransactions = LogTestUtils.allAbortedTransactions(log) val expectedTransactions = List( @@ -3640,7 +3254,7 @@ class UnifiedLogTest { assertCachedFirstUnstableOffset(log, expectedOffset = 36L) log.updateHighWatermark(log.logEndOffset) - assertEquals(Optional.empty, log.firstUnstableOffset) + assertEquals(None, log.firstUnstableOffset) } private def assertCachedFirstUnstableOffset(log: UnifiedLog, expectedOffset: Long): Unit = { @@ -3655,10 +3269,10 @@ class UnifiedLogTest { assertFalse(offsetMetadata.messageOffsetOnly) val segmentBaseOffset = offsetMetadata.segmentBaseOffset - val segments = log.logSegments(segmentBaseOffset, segmentBaseOffset + 1) - assertFalse(segments.isEmpty) + val segmentOpt = log.logSegments(segmentBaseOffset, segmentBaseOffset + 1).headOption + assertTrue(segmentOpt.isDefined) - val segment = segments.iterator().next() + val segment = segmentOpt.get assertEquals(segmentBaseOffset, segment.baseOffset) assertTrue(offsetMetadata.relativePositionInSegment <= segment.size) @@ -3701,17 +3315,17 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) val buffer = ByteBuffer.allocate(256) - val append = appendTransactionalToBuffer(buffer, pid, epoch, 1) + val append = appendTransactionalToBuffer(buffer, pid, epoch, leaderEpoch = 1) append(0, 10) - appendEndTxnMarkerToBuffer(buffer, pid, epoch, 10L, ControlRecordType.COMMIT, 1) + appendEndTxnMarkerToBuffer(buffer, pid, epoch, 10L, ControlRecordType.COMMIT, leaderEpoch = 1) buffer.flip() - log.appendAsFollower(MemoryRecords.readableRecords(buffer), epoch) + log.appendAsFollower(MemoryRecords.readableRecords(buffer)) - LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds(), coordinatorEpoch = 2, 1) - LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds(), coordinatorEpoch = 2, 1) + LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds(), coordinatorEpoch = 2, leaderEpoch = 1) + LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds(), coordinatorEpoch = 2, leaderEpoch = 1) assertThrows(classOf[TransactionCoordinatorFencedException], - () => LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds(), coordinatorEpoch = 1, 1)) + () => LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds(), coordinatorEpoch = 1, leaderEpoch = 1)) } @Test @@ -3742,13 +3356,13 @@ class UnifiedLogTest { assertEquals(2, log.logSegments.size) appendPid(5) - assertEquals(Optional.of(0L), log.firstUnstableOffset) + assertEquals(Some(0L), log.firstUnstableOffset) log.updateHighWatermark(log.logEndOffset) log.maybeIncrementLogStartOffset(5L, LogStartOffsetIncrementReason.ClientRecordDeletion) // the first unstable offset should be lower bounded by the log start offset - assertEquals(Optional.of(5L), log.firstUnstableOffset) + assertEquals(Some(5L), log.firstUnstableOffset) } @Test @@ -3767,7 +3381,7 @@ class UnifiedLogTest { assertEquals(2, log.logSegments.size) appendPid(5) - assertEquals(Optional.of(0L), log.firstUnstableOffset) + assertEquals(Some(0L), log.firstUnstableOffset) log.updateHighWatermark(log.logEndOffset) log.maybeIncrementLogStartOffset(8L, LogStartOffsetIncrementReason.ClientRecordDeletion) @@ -3776,7 +3390,7 @@ class UnifiedLogTest { assertEquals(1, log.logSegments.size) // the first unstable offset should be lower bounded by the log start offset - assertEquals(Optional.of(8L), log.firstUnstableOffset) + assertEquals(Some(8L), log.firstUnstableOffset) } @Test @@ -3818,7 +3432,7 @@ class UnifiedLogTest { assertEquals(11L, reopenedLog.logEndOffset) assertEquals(1, reopenedLog.activeSegment.txnIndex.allAbortedTxns.size) reopenedLog.updateHighWatermark(12L) - assertEquals(Optional.empty, reopenedLog.firstUnstableOffset) + assertEquals(None, reopenedLog.firstUnstableOffset) } @Test @@ -3827,16 +3441,10 @@ class UnifiedLogTest { val log = createLog(logDir, logConfig) // append a few records - appendAsFollower( - log, - MemoryRecords.withRecords( - Compression.NONE, - new SimpleRecord("a".getBytes), - new SimpleRecord("b".getBytes), - new SimpleRecord("c".getBytes) - ), - 5 - ) + appendAsFollower(log, MemoryRecords.withRecords(Compression.NONE, + new SimpleRecord("a".getBytes), + new SimpleRecord("b".getBytes), + new SimpleRecord("c".getBytes)), 5) log.updateHighWatermark(3L) @@ -3866,37 +3474,37 @@ class UnifiedLogTest { val firstAppendInfo = log.appendAsLeader(MemoryRecords.withTransactionalRecords(Compression.NONE, pid1, epoch, seq1, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes), - new SimpleRecord("c".getBytes)), 0) - assertEquals(Optional.of(firstAppendInfo.firstOffset), log.firstUnstableOffset) + new SimpleRecord("c".getBytes)), leaderEpoch = 0) + assertEquals(Some(firstAppendInfo.firstOffset), log.firstUnstableOffset) // mix in some non-transactional data log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("g".getBytes), new SimpleRecord("h".getBytes), - new SimpleRecord("i".getBytes)), 0) + new SimpleRecord("i".getBytes)), leaderEpoch = 0) // append data from a second transactional producer val secondAppendInfo = log.appendAsLeader(MemoryRecords.withTransactionalRecords(Compression.NONE, pid2, epoch, seq2, new SimpleRecord("d".getBytes), new SimpleRecord("e".getBytes), - new SimpleRecord("f".getBytes)), 0) + new SimpleRecord("f".getBytes)), leaderEpoch = 0) // LSO should not have changed - assertEquals(Optional.of(firstAppendInfo.firstOffset), log.firstUnstableOffset) + assertEquals(Some(firstAppendInfo.firstOffset), log.firstUnstableOffset) // now first producer's transaction is aborted val abortAppendInfo = LogTestUtils.appendEndTxnMarkerAsLeader(log, pid1, epoch, ControlRecordType.ABORT, mockTime.milliseconds()) log.updateHighWatermark(abortAppendInfo.lastOffset + 1) // LSO should now point to one less than the first offset of the second transaction - assertEquals(Optional.of(secondAppendInfo.firstOffset), log.firstUnstableOffset) + assertEquals(Some(secondAppendInfo.firstOffset), log.firstUnstableOffset) // commit the second transaction val commitAppendInfo = LogTestUtils.appendEndTxnMarkerAsLeader(log, pid2, epoch, ControlRecordType.COMMIT, mockTime.milliseconds()) log.updateHighWatermark(commitAppendInfo.lastOffset + 1) // now there should be no first unstable offset - assertEquals(Optional.empty, log.firstUnstableOffset) + assertEquals(None, log.firstUnstableOffset) } @Test @@ -3913,25 +3521,28 @@ class UnifiedLogTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = records.sizeInBytes) val log = createLog(logDir, logConfig) - val firstAppendInfo = log.appendAsLeader(records, 0) - assertEquals(Optional.of(firstAppendInfo.firstOffset), log.firstUnstableOffset) + val firstAppendInfo = log.appendAsLeader(records, leaderEpoch = 0) + assertEquals(Some(firstAppendInfo.firstOffset), log.firstUnstableOffset) // this write should spill to the second segment seq = 3 log.appendAsLeader(MemoryRecords.withTransactionalRecords(Compression.NONE, pid, epoch, seq, new SimpleRecord("d".getBytes), new SimpleRecord("e".getBytes), - new SimpleRecord("f".getBytes)), 0) - assertEquals(Optional.of(firstAppendInfo.firstOffset), log.firstUnstableOffset) + new SimpleRecord("f".getBytes)), leaderEpoch = 0) + assertEquals(Some(firstAppendInfo.firstOffset), log.firstUnstableOffset) assertEquals(3L, log.logEndOffsetMetadata.segmentBaseOffset) // now abort the transaction val abortAppendInfo = LogTestUtils.appendEndTxnMarkerAsLeader(log, pid, epoch, ControlRecordType.ABORT, mockTime.milliseconds()) log.updateHighWatermark(abortAppendInfo.lastOffset + 1) - assertEquals(Optional.empty, log.firstUnstableOffset) + assertEquals(None, log.firstUnstableOffset) // now check that a fetch includes the aborted transaction - val fetchDataInfo = log.read(0L, 2048, FetchIsolation.TXN_COMMITTED, true) + val fetchDataInfo = log.read(0L, + maxLength = 2048, + isolation = FetchIsolation.TXN_COMMITTED, + minOneMessage = true) assertTrue(fetchDataInfo.abortedTransactions.isPresent) assertEquals(1, fetchDataInfo.abortedTransactions.get.size) @@ -3960,7 +3571,7 @@ class UnifiedLogTest { val expiredTimestamp = mockTime.milliseconds() - 1000 for (i <- 0 until 100) { val records = TestUtils.singletonRecords(value = s"test$i".getBytes, timestamp = expiredTimestamp) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) } val initialHighWatermark = log.updateHighWatermark(25L) @@ -3984,7 +3595,7 @@ class UnifiedLogTest { val expiredTimestamp = mockTime.milliseconds() - 1000 for (i <- 0 until 100) { val records = TestUtils.singletonRecords(value = s"test$i".getBytes, timestamp = expiredTimestamp) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) } // ensure we have at least a few segments so the test case is not trivial @@ -4026,14 +3637,13 @@ class UnifiedLogTest { for (i <- 0 until 100) { val records = TestUtils.singletonRecords(value = s"test$i".getBytes) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) } log.updateHighWatermark(25L) assertThrows(classOf[OffsetOutOfRangeException], () => log.maybeIncrementLogStartOffset(26L, LogStartOffsetIncrementReason.ClientRecordDeletion)) } - @Test def testBackgroundDeletionWithIOException(): Unit = { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024) val log = createLog(logDir, logConfig) @@ -4080,20 +3690,19 @@ class UnifiedLogTest { for (i <- 0 until 100) { val records = TestUtils.singletonRecords(value = s"test$i".getBytes) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) } - assertEquals(Optional.of(99L), log.maybeUpdateHighWatermark(99L)) - assertEquals(Optional.empty, log.maybeUpdateHighWatermark(99L)) + assertEquals(Some(99L), log.maybeUpdateHighWatermark(99L)) + assertEquals(None, log.maybeUpdateHighWatermark(99L)) - assertEquals(Optional.of(100L), log.maybeUpdateHighWatermark(100L)) - assertEquals(Optional.empty, log.maybeUpdateHighWatermark(100L)) + assertEquals(Some(100L), log.maybeUpdateHighWatermark(100L)) + assertEquals(None, log.maybeUpdateHighWatermark(100L)) // bound by the log end offset - assertEquals(Optional.empty, log.maybeUpdateHighWatermark(101L)) + assertEquals(None, log.maybeUpdateHighWatermark(101L)) } - @Test def testEnableRemoteLogStorageOnCompactedTopics(): Unit = { var logConfig = LogTestUtils.createLogConfig() var log = createLog(logDir, logConfig) @@ -4137,7 +3746,7 @@ class UnifiedLogTest { for (i <- 0 until 100) { val records = TestUtils.singletonRecords(value = s"test$i".getBytes) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) } log.updateHighWatermark(90L) @@ -4152,7 +3761,7 @@ class UnifiedLogTest { for (i <- 0 until 100) { val records = TestUtils.singletonRecords(value = s"test$i".getBytes) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) } log.updateHighWatermark(80L) @@ -4163,7 +3772,7 @@ class UnifiedLogTest { // Truncate the local log and verify that the offsets are updated to expected values val newLocalLogStartOffset = 60L - log.truncateFullyAndStartAt(newLocalLogStartOffset, Optional.of(newLogStartOffset)) + log.truncateFullyAndStartAt(newLocalLogStartOffset, Option.apply(newLogStartOffset)) assertEquals(newLogStartOffset, log.logStartOffset) assertEquals(newLocalLogStartOffset, log.localLogStartOffset()) } @@ -4217,7 +3826,7 @@ class UnifiedLogTest { listener.verify(expectedHighWatermark = 3) log.appendAsLeader(records(0), 0) - log.truncateFullyAndStartAt(4, Optional.empty) + log.truncateFullyAndStartAt(4) listener.verify(expectedHighWatermark = 4) } @@ -4249,13 +3858,12 @@ class UnifiedLogTest { @ParameterizedTest @EnumSource(value = classOf[AppendOrigin], names = Array("CLIENT", "COORDINATOR")) - def testTransactionIsOngoingAndVerificationGuardTV2(appendOrigin: AppendOrigin): Unit = { + def testTransactionIsOngoingAndVerificationGuard(appendOrigin: AppendOrigin): Unit = { val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) val producerId = 23L val producerEpoch = 1.toShort - // For TV2, when there's no existing producer state, sequence must be 0 for both CLIENT and COORDINATOR - var sequence = 0 + var sequence = if (appendOrigin == AppendOrigin.CLIENT) 3 else 0 val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) @@ -4287,7 +3895,7 @@ class UnifiedLogTest { val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, true) assertNotEquals(VerificationGuard.SENTINEL, verificationGuard) - log.appendAsLeader(idempotentRecords, 0, appendOrigin) + log.appendAsLeader(idempotentRecords, origin = appendOrigin, leaderEpoch = 0) assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) // Since we wrote idempotent records, we keep VerificationGuard. @@ -4295,7 +3903,7 @@ class UnifiedLogTest { // Now write the transactional records assertTrue(log.verificationGuard(producerId).verify(verificationGuard)) - log.appendAsLeader(transactionalRecords, 0, appendOrigin, RequestLocal.noCaching(), verificationGuard) + log.appendAsLeader(transactionalRecords, origin = appendOrigin, leaderEpoch = 0, verificationGuard = verificationGuard) assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) // VerificationGuard should be cleared now. assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) @@ -4309,7 +3917,7 @@ class UnifiedLogTest { new EndTransactionMarker(ControlRecordType.COMMIT, 0) ) - log.appendAsLeader(endTransactionMarkerRecord, 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(endTransactionMarkerRecord, origin = AppendOrigin.COORDINATOR, leaderEpoch = 0) assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) @@ -4323,83 +3931,6 @@ class UnifiedLogTest { assertFalse(verificationGuard.verify(newVerificationGuard)) } - @ParameterizedTest - @EnumSource(value = classOf[AppendOrigin], names = Array("CLIENT", "COORDINATOR")) - def testTransactionIsOngoingAndVerificationGuardTV1(appendOrigin: AppendOrigin): Unit = { - val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, false) - - val producerId = 23L - val producerEpoch = 1.toShort - // For TV1, can start with non-zero sequences even with non-zero epoch when no existing producer state - var sequence = if (appendOrigin == AppendOrigin.CLIENT) 3 else 0 - val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) - val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) - assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) - assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) - assertFalse(log.verificationGuard(producerId).verify(VerificationGuard.SENTINEL)) - - val idempotentRecords = MemoryRecords.withIdempotentRecords( - Compression.NONE, - producerId, - producerEpoch, - sequence, - new SimpleRecord("1".getBytes), - new SimpleRecord("2".getBytes) - ) - - // Only clients have nonzero sequences - if (appendOrigin == AppendOrigin.CLIENT) - sequence = sequence + 2 - - val transactionalRecords = MemoryRecords.withTransactionalRecords( - Compression.NONE, - producerId, - producerEpoch, - sequence, - new SimpleRecord("1".getBytes), - new SimpleRecord("2".getBytes) - ) - - // For TV1, create verification guard with supportsEpochBump=false - val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, false) - assertNotEquals(VerificationGuard.SENTINEL, verificationGuard) - - log.appendAsLeader(idempotentRecords, 0, appendOrigin) - assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) - - // Since we wrote idempotent records, we keep VerificationGuard. - assertEquals(verificationGuard, log.verificationGuard(producerId)) - - // Now write the transactional records - assertTrue(log.verificationGuard(producerId).verify(verificationGuard)) - log.appendAsLeader(transactionalRecords, 0, appendOrigin, RequestLocal.noCaching(), verificationGuard) - assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) - // VerificationGuard should be cleared now. - assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) - - // A subsequent maybeStartTransactionVerification will be empty since we are already verified. - assertEquals(VerificationGuard.SENTINEL, log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, false)) - - val endTransactionMarkerRecord = MemoryRecords.withEndTransactionMarker( - producerId, - producerEpoch, - new EndTransactionMarker(ControlRecordType.COMMIT, 0) - ) - - log.appendAsLeader(endTransactionMarkerRecord, 0, AppendOrigin.COORDINATOR) - assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) - assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) - - if (appendOrigin == AppendOrigin.CLIENT) - sequence = sequence + 1 - - // A new maybeStartTransactionVerification will not be empty, as we need to verify the next transaction. - val newVerificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, false) - assertNotEquals(VerificationGuard.SENTINEL, newVerificationGuard) - assertNotEquals(verificationGuard, newVerificationGuard) - assertFalse(verificationGuard.verify(newVerificationGuard)) - } - @ParameterizedTest @ValueSource(booleans = Array(true, false)) def testEmptyTransactionStillClearsVerificationGuard(supportsEpochBump: Boolean): Unit = { @@ -4420,12 +3951,11 @@ class UnifiedLogTest { new EndTransactionMarker(ControlRecordType.COMMIT, 0) ) - log.appendAsLeader(endTransactionMarkerRecord, 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(endTransactionMarkerRecord, origin = AppendOrigin.COORDINATOR, leaderEpoch = 0) assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) } - @Test def testNextTransactionVerificationGuardNotCleared(): Unit = { val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) @@ -4444,7 +3974,7 @@ class UnifiedLogTest { new EndTransactionMarker(ControlRecordType.COMMIT, 0) ) - log.appendAsLeader(endTransactionMarkerRecord, 0, AppendOrigin.COORDINATOR) + log.appendAsLeader(endTransactionMarkerRecord, origin = AppendOrigin.COORDINATOR, leaderEpoch = 0) assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(verificationGuard, log.verificationGuard(producerId)) } @@ -4472,7 +4002,7 @@ class UnifiedLogTest { new SimpleRecord("1".getBytes), new SimpleRecord("2".getBytes) ) - log.appendAsLeader(transactionalRecords, 0) + log.appendAsLeader(transactionalRecords, leaderEpoch = 0) assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) @@ -4484,7 +4014,7 @@ class UnifiedLogTest { val producerId = 23L val producerEpoch = 1.toShort - val sequence = 0 + val sequence = 4 val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) @@ -4498,22 +4028,21 @@ class UnifiedLogTest { new SimpleRecord("1".getBytes), new SimpleRecord("2".getBytes) ) - assertThrows(classOf[InvalidTxnStateException], () => log.appendAsLeader(transactionalRecords, 0)) + assertThrows(classOf[InvalidTxnStateException], () => log.appendAsLeader(transactionalRecords, leaderEpoch = 0)) assertFalse(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, true) assertNotEquals(VerificationGuard.SENTINEL, verificationGuard) - log.appendAsLeader(transactionalRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, verificationGuard) + log.appendAsLeader(transactionalRecords, leaderEpoch = 0, verificationGuard = verificationGuard) assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) assertEquals(VerificationGuard.SENTINEL, log.verificationGuard(producerId)) } - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testNonZeroSequenceOnFirstAppendNonZeroEpoch(transactionVerificationEnabled: Boolean): Unit = { - val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, transactionVerificationEnabled) + @Test + def testAllowNonZeroSequenceOnFirstAppendNonZeroEpoch(): Unit = { + val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) val producerId = 23L val producerEpoch = 1.toShort @@ -4532,19 +4061,9 @@ class UnifiedLogTest { new SimpleRecord("2".getBytes) ) - if (transactionVerificationEnabled) { - // TV2 behavior: Create verification state that supports epoch bumps - val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, true) - // Should reject non-zero sequences when there's no existing producer state - assertThrows(classOf[OutOfOrderSequenceException], () => - log.appendAsLeader(transactionalRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, verificationGuard)) - } else { - // TV1 behavior: Create verification state with supportsEpochBump=false - val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, false) - // Should allow non-zero sequences with non-zero epoch - log.appendAsLeader(transactionalRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, verificationGuard) - assertTrue(log.hasOngoingTransaction(producerId, producerEpoch)) - } + val verificationGuard = log.maybeStartTransactionVerification(producerId, sequence, producerEpoch, true) + // Append should not throw error. + log.appendAsLeader(transactionalRecords, leaderEpoch = 0, verificationGuard = verificationGuard) } @Test @@ -4554,9 +4073,9 @@ class UnifiedLogTest { doThrow(new KafkaStorageException("Injected exception")).when(log).flushProducerStateSnapshot(any()) - log.appendAsLeader(TestUtils.singletonRecords("a".getBytes), 0) + log.appendAsLeader(TestUtils.singletonRecords("a".getBytes), leaderEpoch = 0) try { - log.roll(Optional.of(1L)) + log.roll(Some(1L)) } catch { case _: KafkaStorageException => // ignore } @@ -4573,7 +4092,7 @@ class UnifiedLogTest { val records = TestUtils.records(List( new SimpleRecord(mockTime.milliseconds, "a".getBytes), )) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) log.roll() } log.maybeIncrementHighWatermark(log.logEndOffsetMetadata) @@ -4582,29 +4101,29 @@ class UnifiedLogTest { { val deletable = log.deletableSegments( - (segment: LogSegment, _: Optional[LogSegment]) => segment.baseOffset <= 5) - val expected = log.nonActiveLogSegmentsFrom(0L).stream().filter(segment => segment.baseOffset <= 5).toList - assertEquals(6, expected.size) - assertEquals(expected, deletable) + (segment: LogSegment, _: Option[LogSegment]) => segment.baseOffset <= 5) + val expected = log.nonActiveLogSegmentsFrom(0L).asScala.filter(segment => segment.baseOffset <= 5).toList + assertEquals(6, expected.length) + assertEquals(expected, deletable.toList) } { - val deletable = log.deletableSegments((_: LogSegment, _: Optional[LogSegment]) => true) - val expected = log.nonActiveLogSegmentsFrom(0L).stream().toList - assertEquals(9, expected.size) - assertEquals(expected, deletable) + val deletable = log.deletableSegments((_: LogSegment, _: Option[LogSegment]) => true) + val expected = log.nonActiveLogSegmentsFrom(0L).asScala.toList + assertEquals(9, expected.length) + assertEquals(expected, deletable.toList) } { val records = TestUtils.records(List( new SimpleRecord(mockTime.milliseconds, "a".getBytes), )) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) log.maybeIncrementHighWatermark(log.logEndOffsetMetadata) - val deletable = log.deletableSegments((_: LogSegment, _: Optional[LogSegment]) => true) - val expected = log.logSegments.stream().toList - assertEquals(10, expected.size) - assertEquals(expected, deletable) + val deletable = log.deletableSegments((_: LogSegment, _: Option[LogSegment]) => true) + val expected = log.logSegments.asScala.toList + assertEquals(10, expected.length) + assertEquals(expected, deletable.toList) } } @@ -4616,7 +4135,7 @@ class UnifiedLogTest { val records = TestUtils.records(List( new SimpleRecord(mockTime.milliseconds, "a".getBytes), )) - log.appendAsLeader(records, 0) + log.appendAsLeader(records, leaderEpoch = 0) log.roll() } log.maybeIncrementHighWatermark(log.logEndOffsetMetadata) @@ -4625,7 +4144,7 @@ class UnifiedLogTest { var offset = 0 val deletableSegments = log.deletableSegments( - (segment: LogSegment, nextSegmentOpt: Optional[LogSegment]) => { + (segment: LogSegment, nextSegmentOpt: Option[LogSegment]) => { assertEquals(offset, segment.baseOffset) val logSegments = new LogSegments(log.topicPartition) log.logSegments.forEach(segment => logSegments.add(segment)) @@ -4633,9 +4152,9 @@ class UnifiedLogTest { assertTrue(floorSegmentOpt.isPresent) assertEquals(floorSegmentOpt.get, segment) if (offset == log.logEndOffset) { - assertFalse(nextSegmentOpt.isPresent) + assertFalse(nextSegmentOpt.isDefined) } else { - assertTrue(nextSegmentOpt.isPresent) + assertTrue(nextSegmentOpt.isDefined) val higherSegmentOpt = logSegments.higherSegment(segment.baseOffset) assertTrue(higherSegmentOpt.isPresent) assertEquals(segment.baseOffset + 1, higherSegmentOpt.get.baseOffset) @@ -4645,7 +4164,7 @@ class UnifiedLogTest { true }) assertEquals(10L, log.logSegments.size()) - assertEquals(log.nonActiveLogSegmentsFrom(0L).stream.toList, deletableSegments) + assertEquals(log.nonActiveLogSegmentsFrom(0L).asScala.toSeq, deletableSegments.toSeq) } @Test @@ -4656,7 +4175,7 @@ class UnifiedLogTest { // Append 1 message to the active segment log.appendAsLeader(TestUtils.records(List(new SimpleRecord(mockTime.milliseconds(), "a".getBytes))), - 0) + leaderEpoch = 0) // Update the highWatermark so that these segments will be eligible for deletion. log.updateHighWatermark(log.logEndOffset) assertEquals(1, log.logSegments.size) @@ -4689,16 +4208,16 @@ class UnifiedLogTest { assertTrue(log.isEmpty) log.appendAsLeader(TestUtils.records(List(new SimpleRecord("a".getBytes)), - producerId = pid, producerEpoch = epoch, sequence = 0), 0) + producerId = pid, producerEpoch = epoch, sequence = 0), leaderEpoch = 0) log.appendAsLeader(TestUtils.records(List(new SimpleRecord("b".getBytes)), - producerId = pid, producerEpoch = epoch, sequence = 1), 0) + producerId = pid, producerEpoch = epoch, sequence = 1), leaderEpoch = 0) log.appendAsLeader(TestUtils.records(List(new SimpleRecord("c".getBytes)), - producerId = pid, producerEpoch = epoch, sequence = 2), 0) + producerId = pid, producerEpoch = epoch, sequence = 2), leaderEpoch = 0) log.appendAsLeader(TestUtils.records(List(new SimpleRecord("d".getBytes)), - producerId = pid, producerEpoch = epoch, sequence = 3), 1) + producerId = pid, producerEpoch = epoch, sequence = 3), leaderEpoch = 1) log.roll() log.appendAsLeader(TestUtils.records(List(new SimpleRecord("e".getBytes)), - producerId = pid, producerEpoch = epoch, sequence = 4), 2) + producerId = pid, producerEpoch = epoch, sequence = 4), leaderEpoch = 2) log.updateHighWatermark(log.logEndOffset) assertEquals(2, log.logSegments.size) @@ -4741,7 +4260,7 @@ class UnifiedLogTest { // Given 6 segments of 1 message each for (_ <- 0 until 6) { - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) } assertEquals(6, log.logSegments.size) @@ -4764,7 +4283,7 @@ class UnifiedLogTest { // Given 6 segments of 1 message each for (_ <- 0 until 6) { - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) } assertEquals(6, log.logSegments.size) @@ -4789,7 +4308,7 @@ class UnifiedLogTest { // Given 6 segments of 1 message each for (_ <- 0 until 6) { - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) } assertEquals(6, log.logSegments.size) @@ -4829,7 +4348,7 @@ class UnifiedLogTest { // produce 3 more segments for (_ <- 0 until 3) { - log.appendAsLeader(createRecords, 0) + log.appendAsLeader(createRecords, leaderEpoch = 0) } assertEquals(7, log.logSegments.size) log.updateHighWatermark(log.logEndOffset) @@ -4869,7 +4388,7 @@ class UnifiedLogTest { var offset = 0L for(_ <- 0 until 50) { val records = TestUtils.singletonRecords("test".getBytes()) - val info = log.appendAsLeader(records, 0) + val info = log.appendAsLeader(records, leaderEpoch = 0) offset = info.lastOffset if (offset != 0 && offset % 10 == 0) log.roll() @@ -4893,7 +4412,7 @@ class UnifiedLogTest { var offset = 0L for(_ <- 0 until 50) { val records = TestUtils.singletonRecords("test".getBytes()) - val info = log.appendAsLeader(records, 0) + val info = log.appendAsLeader(records, leaderEpoch = 0) offset = info.lastOffset if (offset != 0 && offset % 10 == 0) log.roll() @@ -4929,15 +4448,15 @@ class UnifiedLogTest { def testGetFirstBatchTimestampForSegments(): Unit = { val log = createLog(logDir, LogTestUtils.createLogConfig()) - val segments: util.List[LogSegment] = new util.ArrayList[LogSegment]() + val segments: java.util.List[LogSegment] = new java.util.ArrayList[LogSegment]() val seg1 = LogTestUtils.createSegment(1, logDir, 10, Time.SYSTEM) val seg2 = LogTestUtils.createSegment(2, logDir, 10, Time.SYSTEM) segments.add(seg1) segments.add(seg2) assertEquals(Seq(Long.MaxValue, Long.MaxValue), log.getFirstBatchTimestampForSegments(segments).asScala.toSeq) - seg1.append(1, MemoryRecords.withRecords(1, Compression.NONE, new SimpleRecord(1000L, "one".getBytes))) - seg2.append(2, MemoryRecords.withRecords(2, Compression.NONE, new SimpleRecord(2000L, "two".getBytes))) + seg1.append(1, 1000L, 1, MemoryRecords.withRecords(1, Compression.NONE, new SimpleRecord("one".getBytes))) + seg2.append(2, 2000L, 1, MemoryRecords.withRecords(2, Compression.NONE, new SimpleRecord("two".getBytes))) assertEquals(Seq(1000L, 2000L), log.getFirstBatchTimestampForSegments(segments).asScala.toSeq) seg1.close() @@ -4948,7 +4467,7 @@ class UnifiedLogTest { def testFetchOffsetByTimestampShouldReadOnlyLocalLogWhenLogIsEmpty(): Unit = { val logConfig = LogTestUtils.createLogConfig(remoteLogStorageEnable = true) val log = createLog(logDir, logConfig, remoteStorageSystemEnable = true) - val result = log.fetchOffsetByTimestamp(mockTime.milliseconds(), Optional.empty) + val result = log.fetchOffsetByTimestamp(mockTime.milliseconds(), Some(null)) assertEquals(new OffsetResultHolder(Optional.empty(), Optional.empty()), result) } @@ -4989,9 +4508,9 @@ class UnifiedLogTest { builder.close() } - private def appendAsFollower(log: UnifiedLog, records: MemoryRecords, leaderEpoch: Int): Unit = { + private def appendAsFollower(log: UnifiedLog, records: MemoryRecords, leaderEpoch: Int = 0): Unit = { records.batches.forEach(_.setPartitionLeaderEpoch(leaderEpoch)) - log.appendAsFollower(records, leaderEpoch) + log.appendAsFollower(records) } private def createLog(dir: File, @@ -5006,12 +4525,13 @@ class UnifiedLogTest { producerIdExpirationCheckIntervalMs: Int = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, lastShutdownClean: Boolean = true, topicId: Option[Uuid] = None, + keepPartitionMetadataFile: Boolean = true, remoteStorageSystemEnable: Boolean = false, remoteLogManager: Option[RemoteLogManager] = None, logOffsetsListener: LogOffsetsListener = LogOffsetsListener.NO_OP_OFFSETS_LISTENER): UnifiedLog = { val log = LogTestUtils.createLog(dir, config, brokerTopicStats, scheduler, time, logStartOffset, recoveryPoint, maxTransactionTimeoutMs, producerStateManagerConfig, producerIdExpirationCheckIntervalMs, - lastShutdownClean, topicId, new ConcurrentHashMap[String, Integer], + lastShutdownClean, topicId, keepPartitionMetadataFile, new ConcurrentHashMap[String, Integer], remoteStorageSystemEnable, remoteLogManager, logOffsetsListener) logsToClose = logsToClose :+ log log @@ -5027,134 +4547,6 @@ class UnifiedLogTest { (log, segmentWithOverflow) } - - private def assertFetchOffsetByTimestamp(log: UnifiedLog, remoteLogManagerOpt: Option[RemoteLogManager], expected: Option[TimestampAndOffset], timestamp: Long): Unit = { - val remoteOffsetReader = getRemoteOffsetReader(remoteLogManagerOpt) - val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, remoteOffsetReader) - assertTrue(offsetResultHolder.futureHolderOpt.isPresent) - offsetResultHolder.futureHolderOpt.get.taskFuture.get(1, TimeUnit.SECONDS) - assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.isDone) - assertTrue(offsetResultHolder.futureHolderOpt.get.taskFuture.get().hasTimestampAndOffset) - assertEquals(expected.get, offsetResultHolder.futureHolderOpt.get.taskFuture.get().timestampAndOffset().orElse(null)) - } - - private def assertFetchOffsetBySpecialTimestamp(log: UnifiedLog, remoteLogManagerOpt: Option[RemoteLogManager], expected: TimestampAndOffset, timestamp: Long): Unit = { - val remoteOffsetReader = getRemoteOffsetReader(remoteLogManagerOpt) - val offsetResultHolder = log.fetchOffsetByTimestamp(timestamp, remoteOffsetReader) - assertEquals(new OffsetResultHolder(expected), offsetResultHolder) - } - - private def getRemoteOffsetReader(remoteLogManagerOpt: Option[Any]): Optional[AsyncOffsetReader] = { - remoteLogManagerOpt match { - case Some(remoteLogManager) => Optional.of(remoteLogManager.asInstanceOf[AsyncOffsetReader]) - case None => Optional.empty[AsyncOffsetReader]() - } - } - - private def prepareLogWithSequentialRecords(log: UnifiedLog, recordCount: Int): Seq[TimestampAndEpoch] = { - val firstTimestamp = mockTime.milliseconds() - - (0 until recordCount).map { i => - val timestampAndEpoch = TimestampAndEpoch(firstTimestamp + i, i) - log.appendAsLeader( - TestUtils.singletonRecords(value = TestUtils.randomBytes(10), timestamp = timestampAndEpoch.timestamp), - timestampAndEpoch.leaderEpoch - ) - timestampAndEpoch - } - } - - case class TimestampAndEpoch(timestamp: Long, leaderEpoch: Int) - - @Test - def testStaleProducerEpochReturnsRecoverableErrorForTV1Clients(): Unit = { - // Producer epoch gets incremented (coordinator fail over, completed transaction, etc.) - // and client has stale cached epoch. Fix prevents fatal InvalidTxnStateException. - - val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) - val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) - val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) - - val producerId = 123L - val oldEpoch = 5.toShort - val newEpoch = 6.toShort - - // Step 1: Simulate a scenario where producer epoch was incremented to fence the producer - val previousRecords = MemoryRecords.withTransactionalRecords( - Compression.NONE, producerId, newEpoch, 0, - new SimpleRecord("previous-key".getBytes, "previous-value".getBytes) - ) - val previousGuard = log.maybeStartTransactionVerification(producerId, 0, newEpoch, false) // TV1 = supportsEpochBump = false - log.appendAsLeader(previousRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, previousGuard) - - // Complete the transaction normally (commits do update producer state with current epoch) - val commitMarker = MemoryRecords.withEndTransactionMarker( - producerId, newEpoch, new EndTransactionMarker(ControlRecordType.COMMIT, 0) - ) - log.appendAsLeader(commitMarker, 0, AppendOrigin.COORDINATOR, RequestLocal.noCaching, VerificationGuard.SENTINEL) - - // Step 2: TV1 client tries to write with stale cached epoch (before learning about epoch increment) - val staleEpochRecords = MemoryRecords.withTransactionalRecords( - Compression.NONE, producerId, oldEpoch, 0, - new SimpleRecord("stale-epoch-key".getBytes, "stale-epoch-value".getBytes) - ) - - // Step 3: Verify our fix - should get InvalidProducerEpochException (recoverable), not InvalidTxnStateException (fatal) - val exception = assertThrows(classOf[InvalidProducerEpochException], () => { - val staleGuard = log.maybeStartTransactionVerification(producerId, 0, oldEpoch, false) - log.appendAsLeader(staleEpochRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, staleGuard) - }) - - // Verify the error message indicates epoch mismatch - assertTrue(exception.getMessage.contains("smaller than the last seen epoch")) - assertTrue(exception.getMessage.contains(s"$oldEpoch")) - assertTrue(exception.getMessage.contains(s"$newEpoch")) - } - - @Test - def testStaleProducerEpochReturnsRecoverableErrorForTV2Clients(): Unit = { - // Check producer epoch FIRST - if stale, return recoverable error before verification checks. - - val producerStateManagerConfig = new ProducerStateManagerConfig(86400000, true) - val logConfig = LogTestUtils.createLogConfig(segmentBytes = 2048 * 5) - val log = createLog(logDir, logConfig, producerStateManagerConfig = producerStateManagerConfig) - - val producerId = 456L - val originalEpoch = 3.toShort - val bumpedEpoch = 4.toShort - - // Step 1: Start transaction with epoch 3 (before timeout) - val initialRecords = MemoryRecords.withTransactionalRecords( - Compression.NONE, producerId, originalEpoch, 0, - new SimpleRecord("ks-initial-key".getBytes, "ks-initial-value".getBytes) - ) - val initialGuard = log.maybeStartTransactionVerification(producerId, 0, originalEpoch, true) // TV2 = supportsEpochBump = true - log.appendAsLeader(initialRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, initialGuard) - - // Step 2: Coordinator times out and aborts transaction - // TV2 (KIP-890): Coordinator bumps epoch from 3 → 4 and sends abort marker with epoch 4 - val abortMarker = MemoryRecords.withEndTransactionMarker( - producerId, bumpedEpoch, new EndTransactionMarker(ControlRecordType.ABORT, 0) - ) - log.appendAsLeader(abortMarker, 0, AppendOrigin.COORDINATOR, RequestLocal.noCaching, VerificationGuard.SENTINEL) - - // Step 3: TV2 transactional producer tries to append with stale epoch (timeout recovery scenario) - val staleEpochRecords = MemoryRecords.withTransactionalRecords( - Compression.NONE, producerId, originalEpoch, 0, - new SimpleRecord("ks-resume-key".getBytes, "ks-resume-value".getBytes) - ) - - // Step 4: Verify our fix works for TV2 - should get InvalidProducerEpochException (recoverable), not InvalidTxnStateException (fatal) - val exception = assertThrows(classOf[InvalidProducerEpochException], () => { - val staleGuard = log.maybeStartTransactionVerification(producerId, 0, originalEpoch, true) // TV2 = supportsEpochBump = true - log.appendAsLeader(staleEpochRecords, 0, AppendOrigin.CLIENT, RequestLocal.noCaching, staleGuard) - }) - - // Verify the error message indicates epoch mismatch (3 < 4) - assertTrue(exception.getMessage.contains("smaller than the last seen epoch")) - assertTrue(exception.getMessage.contains(s"$originalEpoch")) - assertTrue(exception.getMessage.contains(s"$bumpedEpoch")) - } } object UnifiedLogTest { diff --git a/core/src/test/scala/unit/kafka/log/remote/RemoteIndexCacheTest.scala b/core/src/test/scala/unit/kafka/log/remote/RemoteIndexCacheTest.scala new file mode 100644 index 0000000000000..4d55c30b39771 --- /dev/null +++ b/core/src/test/scala/unit/kafka/log/remote/RemoteIndexCacheTest.scala @@ -0,0 +1,1092 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.log.remote + +import kafka.utils.TestUtils +import kafka.utils.TestUtils.waitUntilTrue +import org.apache.kafka.common.utils.Utils +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.server.log.remote.storage.RemoteStorageManager.IndexType +import org.apache.kafka.server.log.remote.storage.{RemoteLogSegmentId, RemoteLogSegmentMetadata, RemoteResourceNotFoundException, RemoteStorageManager} +import org.apache.kafka.server.util.MockTime +import org.apache.kafka.storage.internals.log.RemoteIndexCache._ +import org.apache.kafka.storage.internals.log._ +import org.apache.kafka.test.{TestUtils => JTestUtils} +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.EnumSource +import org.mockito.ArgumentMatchers +import org.mockito.ArgumentMatchers.any +import org.mockito.Mockito._ +import org.mockito.invocation.InvocationOnMock +import org.slf4j.{Logger, LoggerFactory} + +import java.io.{File, FileInputStream, IOException, PrintWriter, UncheckedIOException} +import java.nio.file.{Files, NoSuchFileException, Paths} +import java.util +import java.util.concurrent.{CountDownLatch, Executors, Future, TimeUnit} +import java.util.stream.Collectors +import java.util.{Collections, Optional} +import scala.collection.mutable + +class RemoteIndexCacheTest { + private val defaultRemoteIndexCacheSizeBytes = 1024 * 1024L + private val logger: Logger = LoggerFactory.getLogger(classOf[RemoteIndexCacheTest]) + private val time = new MockTime() + private val brokerId = 1 + private val baseOffset: Long = Int.MaxValue.toLong + 101337 // start with a base offset which is a long + private val lastOffset: Long = baseOffset + 30L + private val segmentSize: Int = 1024 + private val rsm: RemoteStorageManager = mock(classOf[RemoteStorageManager]) + private var cache: RemoteIndexCache = _ + private var rlsMetadata: RemoteLogSegmentMetadata = _ + private var logDir: File = _ + private var tpDir: File = _ + private var idPartition: TopicIdPartition = _ + + @BeforeEach + def setup(): Unit = { + idPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) + logDir = JTestUtils.tempDirectory(s"kafka-${this.getClass.getSimpleName}") + tpDir = new File(logDir, idPartition.toString) + Files.createDirectory(tpDir.toPath) + + val remoteLogSegmentId = RemoteLogSegmentId.generateNew(idPartition) + rlsMetadata = new RemoteLogSegmentMetadata(remoteLogSegmentId, baseOffset, lastOffset, time.milliseconds(), brokerId, time.milliseconds(), segmentSize, Collections.singletonMap(0, 0L)) + + cache = new RemoteIndexCache(defaultRemoteIndexCacheSizeBytes, rsm, tpDir.toString) + + mockRsmFetchIndex(rsm) + } + + @AfterEach + def cleanup(): Unit = { + reset(rsm) + // the files created for the test will be deleted automatically on thread exit since we use temp dir + Utils.closeQuietly(cache, "RemoteIndexCache created for unit test") + // best effort to delete the per-test resource. Even if we don't delete, it is ok because the parent directory + // will be deleted at the end of test. + try { + Utils.delete(logDir) + } catch { + case _: IOException => // ignore + } + // Verify no lingering threads. It is important to have this as the very last statement in the @aftereach + // because this may throw an exception and prevent cleanup after it + TestUtils.assertNoNonDaemonThreads(REMOTE_LOG_INDEX_CACHE_CLEANER_THREAD) + } + + @Test + def testIndexFileNameAndLocationOnDisk(): Unit = { + val entry = cache.getIndexEntry(rlsMetadata) + val offsetIndexFile = entry.offsetIndex.file().toPath + val txnIndexFile = entry.txnIndex.file().toPath + val timeIndexFile = entry.timeIndex.file().toPath + + val expectedOffsetIndexFileName: String = remoteOffsetIndexFileName(rlsMetadata) + val expectedTimeIndexFileName: String = remoteTimeIndexFileName(rlsMetadata) + val expectedTxnIndexFileName: String = remoteTransactionIndexFileName(rlsMetadata) + + assertEquals(expectedOffsetIndexFileName, offsetIndexFile.getFileName.toString) + assertEquals(expectedTxnIndexFileName, txnIndexFile.getFileName.toString) + assertEquals(expectedTimeIndexFileName, timeIndexFile.getFileName.toString) + + // assert that parent directory for the index files is correct + assertEquals(RemoteIndexCache.DIR_NAME, offsetIndexFile.getParent.getFileName.toString, + s"offsetIndex=$offsetIndexFile is created under incorrect parent") + assertEquals(RemoteIndexCache.DIR_NAME, txnIndexFile.getParent.getFileName.toString, + s"txnIndex=$txnIndexFile is created under incorrect parent") + assertEquals(RemoteIndexCache.DIR_NAME, timeIndexFile.getParent.getFileName.toString, + s"timeIndex=$timeIndexFile is created under incorrect parent") + } + + @Test + def testFetchIndexFromRemoteStorage(): Unit = { + val offsetIndex = cache.getIndexEntry(rlsMetadata).offsetIndex + val offsetPosition1 = offsetIndex.entry(1) + // this call should have invoked fetchOffsetIndex, fetchTimestampIndex once + val resultPosition = cache.lookupOffset(rlsMetadata, offsetPosition1.offset) + assertEquals(offsetPosition1.position, resultPosition) + verifyFetchIndexInvocation(count = 1, Seq(IndexType.OFFSET, IndexType.TIMESTAMP)) + + // this should not cause fetching index from RemoteStorageManager as it is already fetched earlier + reset(rsm) + val offsetPosition2 = offsetIndex.entry(2) + val resultPosition2 = cache.lookupOffset(rlsMetadata, offsetPosition2.offset) + assertEquals(offsetPosition2.position, resultPosition2) + assertNotNull(cache.getIndexEntry(rlsMetadata)) + verifyNoInteractions(rsm) + } + + @Test + def testFetchIndexForMissingTransactionIndex(): Unit = { + when(rsm.fetchIndex(any(classOf[RemoteLogSegmentMetadata]), any(classOf[IndexType]))) + .thenAnswer(ans => { + val metadata = ans.getArgument[RemoteLogSegmentMetadata](0) + val indexType = ans.getArgument[IndexType](1) + val offsetIdx = createOffsetIndexForSegmentMetadata(metadata, tpDir) + val timeIdx = createTimeIndexForSegmentMetadata(metadata, tpDir) + maybeAppendIndexEntries(offsetIdx, timeIdx) + indexType match { + case IndexType.OFFSET => new FileInputStream(offsetIdx.file) + case IndexType.TIMESTAMP => new FileInputStream(timeIdx.file) + // Throw RemoteResourceNotFoundException since transaction index is not available + case IndexType.TRANSACTION => throw new RemoteResourceNotFoundException("txn index not found") + case IndexType.LEADER_EPOCH => // leader-epoch-cache is not accessed. + case IndexType.PRODUCER_SNAPSHOT => // producer-snapshot is not accessed. + } + }) + + val entry = cache.getIndexEntry(rlsMetadata) + // Verify an empty file is created in the cache directory + assertTrue(entry.txnIndex().file().exists()) + assertEquals(0, entry.txnIndex().file().length()) + } + + @Test + def testPositionForNonExistingIndexFromRemoteStorage(): Unit = { + val offsetIndex = cache.getIndexEntry(rlsMetadata).offsetIndex + val lastOffsetPosition = cache.lookupOffset(rlsMetadata, offsetIndex.lastOffset) + val greaterOffsetThanLastOffset = offsetIndex.lastOffset + 1 + assertEquals(lastOffsetPosition, cache.lookupOffset(rlsMetadata, greaterOffsetThanLastOffset)) + + // offsetIndex.lookup() returns OffsetPosition(baseOffset, 0) for offsets smaller than least entry in the offset index. + val nonExistentOffsetPosition = new OffsetPosition(baseOffset, 0) + val lowerOffsetThanBaseOffset = offsetIndex.baseOffset - 1 + assertEquals(nonExistentOffsetPosition.position, cache.lookupOffset(rlsMetadata, lowerOffsetThanBaseOffset)) + } + + @Test + def testCacheEntryExpiry(): Unit = { + val estimateEntryBytesSize = estimateOneEntryBytesSize() + // close existing cache created in test setup before creating a new one + Utils.closeQuietly(cache, "RemoteIndexCache created for unit test") + cache = new RemoteIndexCache(2 * estimateEntryBytesSize, rsm, tpDir.toString) + val tpId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) + val metadataList = generateRemoteLogSegmentMetadata(size = 3, tpId) + + assertCacheSize(0) + // getIndex for first time will call rsm#fetchIndex + cache.getIndexEntry(metadataList.head) + assertCacheSize(1) + // Calling getIndex on the same entry should not call rsm#fetchIndex again, but it should retrieve from cache + cache.getIndexEntry(metadataList.head) + assertCacheSize(1) + verifyFetchIndexInvocation(count = 1) + + // Here a new key metadataList(1) is invoked, that should call rsm#fetchIndex, making the count to 2 + cache.getIndexEntry(metadataList.head) + cache.getIndexEntry(metadataList(1)) + assertCacheSize(2) + verifyFetchIndexInvocation(count = 2) + + // Getting index for metadataList.last should call rsm#fetchIndex + // to populate this entry one of the other 2 entries will be evicted. We don't know which one since it's based on + // a probabilistic formula for Window TinyLfu. See docs for RemoteIndexCache + assertNotNull(cache.getIndexEntry(metadataList.last)) + assertAtLeastOnePresent(cache, metadataList(1).remoteLogSegmentId().id(), metadataList.head.remoteLogSegmentId().id()) + assertCacheSize(2) + verifyFetchIndexInvocation(count = 3) + + // getting index for last expired entry should call rsm#fetchIndex as that entry was expired earlier + val missingEntryOpt = { + metadataList.find(segmentMetadata => { + val segmentId = segmentMetadata.remoteLogSegmentId().id() + !cache.internalCache.asMap().containsKey(segmentId) + }) + } + assertFalse(missingEntryOpt.isEmpty) + cache.getIndexEntry(missingEntryOpt.get) + assertCacheSize(2) + verifyFetchIndexInvocation(count = 4) + } + + @Test + def testGetIndexAfterCacheClose(): Unit = { + // close existing cache created in test setup before creating a new one + Utils.closeQuietly(cache, "RemoteIndexCache created for unit test") + + cache = new RemoteIndexCache(2 * estimateOneEntryBytesSize(), rsm, tpDir.toString) + val tpId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) + val metadataList = generateRemoteLogSegmentMetadata(size = 3, tpId) + + assertCacheSize(0) + cache.getIndexEntry(metadataList.head) + assertCacheSize(1) + verifyFetchIndexInvocation(count = 1) + + cache.close() + + // Check IllegalStateException is thrown when index is accessed after it is closed. + assertThrows(classOf[IllegalStateException], () => cache.getIndexEntry(metadataList.head)) + } + + @Test + def testCloseIsIdempotent(): Unit = { + // generate and add entry to cache + val spyEntry = generateSpyCacheEntry() + cache.internalCache.put(rlsMetadata.remoteLogSegmentId().id(), spyEntry) + + cache.close() + cache.close() + + // verify that entry is only closed once + verify(spyEntry).close() + } + + @Test + def testCacheEntryIsDeletedOnRemoval(): Unit = { + def getIndexFileFromDisk(suffix: String) = { + Files.walk(tpDir.toPath) + .filter(Files.isRegularFile(_)) + .filter(path => path.getFileName.toString.endsWith(suffix)) + .findAny() + } + + val internalIndexKey = rlsMetadata.remoteLogSegmentId().id() + val cacheEntry = generateSpyCacheEntry() + + // verify index files on disk + assertTrue(getIndexFileFromDisk(LogFileUtils.INDEX_FILE_SUFFIX).isPresent, s"Offset index file should be present on disk at ${tpDir.toPath}") + assertTrue(getIndexFileFromDisk(LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent, s"Txn index file should be present on disk at ${tpDir.toPath}") + assertTrue(getIndexFileFromDisk(LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent, s"Time index file should be present on disk at ${tpDir.toPath}") + + // add the spied entry into the cache, it will overwrite the non-spied entry + cache.internalCache.put(internalIndexKey, cacheEntry) + + // no expired entries yet + assertEquals(0, cache.expiredIndexes.size, "expiredIndex queue should be zero at start of test") + + // call remove function to mark the entry for removal + cache.remove(internalIndexKey) + + // wait until entry is marked for deletion + TestUtils.waitUntilTrue(() => cacheEntry.isMarkedForCleanup, + "Failed to mark cache entry for cleanup after invalidation") + TestUtils.waitUntilTrue(() => cacheEntry.isCleanStarted, + "Failed to cleanup cache entry after invalidation") + + // first it will be marked for cleanup, second time markForCleanup is called when cleanup() is called + verify(cacheEntry, times(2)).markForCleanup() + // after that async it will be cleaned up + verify(cacheEntry).cleanup() + + // verify that index(s) rename is only called 1 time + verify(cacheEntry.timeIndex).renameTo(any(classOf[File])) + verify(cacheEntry.offsetIndex).renameTo(any(classOf[File])) + verify(cacheEntry.txnIndex).renameTo(any(classOf[File])) + + // verify no index files on disk + assertFalse(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent, + s"Offset index file should not be present on disk at ${tpDir.toPath}") + assertFalse(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent, + s"Txn index file should not be present on disk at ${tpDir.toPath}") + assertFalse(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent, + s"Time index file should not be present on disk at ${tpDir.toPath}") + assertFalse(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.DELETED_FILE_SUFFIX).isPresent, + s"Index file marked for deletion should not be present on disk at ${tpDir.toPath}") + } + + @Test + def testCleanerThreadShutdown(): Unit = { + // cache is empty at beginning + assertTrue(cache.internalCache.asMap().isEmpty) + // verify that cleaner thread is running + getRunningCleanerThread + // create a new entry + val spyEntry = generateSpyCacheEntry() + // an exception should not close the cleaner thread + when(spyEntry.cleanup()).thenThrow(new RuntimeException("kaboom! I am expected exception in unit test.")) + val key = Uuid.randomUuid() + cache.internalCache.put(key, spyEntry) + // trigger cleanup + cache.internalCache.invalidate(key) + // wait for cleanup to start + TestUtils.waitUntilTrue(() => spyEntry.isCleanStarted, "Failed while waiting for clean up to start") + // Give the thread cleaner thread some time to throw an exception + Thread.sleep(100) + // Verify that Cleaner thread is still running even when exception is thrown in doWork() + var threads = getRunningCleanerThread + assertEquals(1, threads.size, + s"Found unexpected ${threads.size} threads=${threads.stream().map[String](t => t.getName).collect(Collectors.joining(", "))}") + + // close the cache properly + cache.close() + + // verify that the thread is closed properly + threads = getRunningCleanerThread + assertTrue(threads.isEmpty, s"Found unexpected ${threads.size} threads=${threads.stream().map[String](t => t.getName).collect(Collectors.joining(", "))}") + // if the thread is correctly being shutdown it will not be running + assertFalse(cache.cleanerThread.isRunning, "Unexpected thread state=running. Check error logs.") + } + + @Test + def testClose(): Unit = { + val spyEntry = generateSpyCacheEntry() + cache.internalCache.put(rlsMetadata.remoteLogSegmentId().id(), spyEntry) + + TestUtils.waitUntilTrue(() => cache.cleanerThread().isStarted, "Cleaner thread should be started") + + // close the cache + cache.close() + + // closing the cache should close the entry + verify(spyEntry).close() + + // close for all index entries must be invoked + verify(spyEntry.txnIndex).close() + verify(spyEntry.offsetIndex).close() + verify(spyEntry.timeIndex).close() + + // index files must not be deleted + verify(spyEntry.txnIndex, times(0)).deleteIfExists() + verify(spyEntry.offsetIndex, times(0)).deleteIfExists() + verify(spyEntry.timeIndex, times(0)).deleteIfExists() + + // verify cleaner thread is shutdown + assertTrue(cache.cleanerThread.isShutdownComplete) + } + + @Test + def testConcurrentReadWriteAccessForCache(): Unit = { + val tpId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) + val metadataList = generateRemoteLogSegmentMetadata(size = 3, tpId) + + assertCacheSize(0) + // getIndex for first time will call rsm#fetchIndex + cache.getIndexEntry(metadataList.head) + assertCacheSize(1) + verifyFetchIndexInvocation(count = 1, Seq(IndexType.OFFSET, IndexType.TIMESTAMP)) + reset(rsm) + + // Simulate a concurrency situation where one thread is reading the entry already present in the cache (cache hit) + // and the other thread is reading an entry which is not available in the cache (cache miss). The expected behaviour + // is for the former thread to succeed while latter is fetching from rsm. + // In this test we simulate the situation using latches. We perform the following operations: + // 1. Start the CacheMiss thread and wait until it starts executing the rsm.fetchIndex + // 2. Block the CacheMiss thread inside the call to rsm.fetchIndex. + // 3. Start the CacheHit thread. Assert that it performs a successful read. + // 4. On completion of successful read by CacheHit thread, signal the CacheMiss thread to release it's block. + // 5. Validate that the test passes. If the CacheMiss thread was blocking the CacheHit thread, the test will fail. + // + val latchForCacheHit = new CountDownLatch(1) + val latchForCacheMiss = new CountDownLatch(1) + + val readerCacheHit = (() => { + // Wait for signal to start executing the read + logger.debug(s"Waiting for signal to begin read from ${Thread.currentThread()}") + latchForCacheHit.await() + val entry = cache.getIndexEntry(metadataList.head) + assertNotNull(entry) + // Signal the CacheMiss to unblock itself + logger.debug(s"Signaling CacheMiss to unblock from ${Thread.currentThread()}") + latchForCacheMiss.countDown() + }): Runnable + + when(rsm.fetchIndex(any(classOf[RemoteLogSegmentMetadata]), any(classOf[IndexType]))) + .thenAnswer(_ => { + logger.debug(s"Signaling CacheHit to begin read from ${Thread.currentThread()}") + latchForCacheHit.countDown() + logger.debug(s"Waiting for signal to complete rsm fetch from ${Thread.currentThread()}") + latchForCacheMiss.await() + }) + + val readerCacheMiss = (() => { + val entry = cache.getIndexEntry(metadataList.last) + assertNotNull(entry) + }): Runnable + + val executor = Executors.newFixedThreadPool(2) + try { + executor.submit(readerCacheMiss: Runnable) + executor.submit(readerCacheHit: Runnable) + assertTrue(latchForCacheMiss.await(30, TimeUnit.SECONDS)) + } finally { + executor.shutdownNow() + } + } + + @Test + def testReloadCacheAfterClose(): Unit = { + val estimateEntryBytesSize = estimateOneEntryBytesSize() + // close existing cache created in test setup before creating a new one + Utils.closeQuietly(cache, "RemoteIndexCache created for unit test") + cache = new RemoteIndexCache(2 * estimateEntryBytesSize, rsm, tpDir.toString) + val tpId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) + val metadataList = generateRemoteLogSegmentMetadata(size = 3, tpId) + + assertCacheSize(0) + // getIndex for first time will call rsm#fetchIndex + cache.getIndexEntry(metadataList.head) + assertCacheSize(1) + // Calling getIndex on the same entry should not call rsm#fetchIndex again, but it should retrieve from cache + cache.getIndexEntry(metadataList.head) + assertCacheSize(1) + verifyFetchIndexInvocation(count = 1) + + // Here a new key metadataList(1) is invoked, that should call rsm#fetchIndex, making the count to 2 + cache.getIndexEntry(metadataList(1)) + assertCacheSize(2) + // Calling getIndex on the same entry should not call rsm#fetchIndex again, but it should retrieve from cache + cache.getIndexEntry(metadataList(1)) + assertCacheSize(2) + verifyFetchIndexInvocation(count = 2) + + // Here a new key metadataList(2) is invoked, that should call rsm#fetchIndex + // The cache max size is 2, it will remove one entry and keep the overall size to 2 + cache.getIndexEntry(metadataList(2)) + assertCacheSize(2) + // Calling getIndex on the same entry should not call rsm#fetchIndex again, but it should retrieve from cache + cache.getIndexEntry(metadataList(2)) + assertCacheSize(2) + verifyFetchIndexInvocation(count = 3) + + // Close the cache + cache.close() + + // Reload the cache from the disk and check the cache size is same as earlier + val reloadedCache = new RemoteIndexCache(2 * estimateEntryBytesSize, rsm, tpDir.toString) + assertEquals(2, reloadedCache.internalCache.asMap().size()) + reloadedCache.close() + + verifyNoMoreInteractions(rsm) + } + + @Test + def testRemoveItem(): Unit = { + val segmentId = rlsMetadata.remoteLogSegmentId() + val segmentUuid = segmentId.id() + // generate and add entry to cache + val spyEntry = generateSpyCacheEntry(segmentId) + cache.internalCache.put(segmentUuid, spyEntry) + assertTrue(cache.internalCache().asMap().containsKey(segmentUuid)) + assertFalse(spyEntry.isMarkedForCleanup) + + cache.remove(segmentId.id()) + assertFalse(cache.internalCache().asMap().containsKey(segmentUuid)) + TestUtils.waitUntilTrue(() => spyEntry.isMarkedForCleanup, "Failed to mark cache entry for cleanup after invalidation") + } + + @Test + def testRemoveNonExistentItem(): Unit = { + // generate and add entry to cache + val segmentId = rlsMetadata.remoteLogSegmentId() + val segmentUuid = segmentId.id() + // generate and add entry to cache + val spyEntry = generateSpyCacheEntry(segmentId) + cache.internalCache.put(segmentUuid, spyEntry) + assertTrue(cache.internalCache().asMap().containsKey(segmentUuid)) + + // remove a random Uuid + cache.remove(Uuid.randomUuid()) + assertTrue(cache.internalCache().asMap().containsKey(segmentUuid)) + assertFalse(spyEntry.isMarkedForCleanup) + } + + @Test + def testRemoveMultipleItems(): Unit = { + // generate and add entry to cache + val uuidAndEntryList = new util.HashMap[Uuid, RemoteIndexCache.Entry]() + for (_ <- 0 until 10) { + val segmentId = RemoteLogSegmentId.generateNew(idPartition) + val segmentUuid = segmentId.id() + val spyEntry = generateSpyCacheEntry(segmentId) + uuidAndEntryList.put(segmentUuid, spyEntry) + + cache.internalCache.put(segmentUuid, spyEntry) + assertTrue(cache.internalCache().asMap().containsKey(segmentUuid)) + assertFalse(spyEntry.isMarkedForCleanup) + } + cache.removeAll(uuidAndEntryList.keySet()) + uuidAndEntryList.values().forEach { entry => + TestUtils.waitUntilTrue(() => entry.isMarkedForCleanup, "Failed to mark cache entry for cleanup after invalidation") + } + } + + @Test + def testClearCacheAndIndexFilesWhenResizeCache(): Unit = { + val tpId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) + val metadataList = generateRemoteLogSegmentMetadata(size = 1, tpId) + + assertCacheSize(0) + // getIndex for first time will call rsm#fetchIndex + val cacheEntry = cache.getIndexEntry(metadataList.head) + assertCacheSize(1) + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent) + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent) + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent) + + cache.resizeCacheSize(1L) + + // wait until entry is marked for deletion + TestUtils.waitUntilTrue(() => cacheEntry.isMarkedForCleanup, + "Failed to mark cache entry for cleanup after resizing cache.") + TestUtils.waitUntilTrue(() => cacheEntry.isCleanStarted, + "Failed to cleanup cache entry after resizing cache.") + + // verify no index files on remote cache dir + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent, + s"Offset index file should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent, + s"Txn index file should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent, + s"Time index file should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.DELETED_FILE_SUFFIX).isPresent, + s"Index file marked for deletion should not be present on disk at ${cache.cacheDir()}") + + assertCacheSize(0) + } + + @Test + def testCorrectnessForCacheAndIndexFilesWhenResizeCache(): Unit = { + + def verifyEntryIsEvicted(metadataToVerify: RemoteLogSegmentMetadata, entryToVerify: Entry): Unit = { + // wait until `entryToVerify` is marked for deletion + TestUtils.waitUntilTrue(() => entryToVerify.isMarkedForCleanup, + "Failed to mark evicted cache entry for cleanup after resizing cache.") + TestUtils.waitUntilTrue(() => entryToVerify.isCleanStarted, + "Failed to cleanup evicted cache entry after resizing cache.") + // verify no index files for `entryToVerify` on remote cache dir + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, remoteOffsetIndexFileName(metadataToVerify)).isPresent, + s"Offset index file for evicted entry should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, remoteTimeIndexFileName(metadataToVerify)).isPresent, + s"Time index file for evicted entry should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, remoteTransactionIndexFileName(metadataToVerify)).isPresent, + s"Txn index file for evicted entry should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, remoteDeletedSuffixIndexFileName(metadataToVerify)).isPresent, + s"Index file marked for deletion for evicted entry should not be present on disk at ${cache.cacheDir()}") + } + + def verifyEntryIsKept(metadataToVerify: RemoteLogSegmentMetadata): Unit = { + assertTrue(getIndexFileFromRemoteCacheDir(cache, remoteOffsetIndexFileName(metadataToVerify)).isPresent) + assertTrue(getIndexFileFromRemoteCacheDir(cache, remoteTimeIndexFileName(metadataToVerify)).isPresent) + assertTrue(getIndexFileFromRemoteCacheDir(cache, remoteTransactionIndexFileName(metadataToVerify)).isPresent) + assertTrue(!getIndexFileFromRemoteCacheDir(cache, remoteDeletedSuffixIndexFileName(metadataToVerify)).isPresent) + } + + // The test process for resizing is: put 1 entry -> evict to empty -> put 3 entries with limited capacity of 2 entries -> + // evict to 1 entry -> resize to 1 entry size -> resize to 2 entries size + val estimateEntryBytesSize = estimateOneEntryBytesSize() + val tpId = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("foo", 0)) + val metadataList = generateRemoteLogSegmentMetadata(size = 3, tpId) + + assertCacheSize(0) + // getIndex for first time will call rsm#fetchIndex + val cacheEntry = cache.getIndexEntry(metadataList.head) + assertCacheSize(1) + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent) + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent) + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent) + + // Reduce the cache size to 1 byte to ensure that all the entries are evicted from it. + cache.resizeCacheSize(1L) + + // wait until entry is marked for deletion + TestUtils.waitUntilTrue(() => cacheEntry.isMarkedForCleanup, + "Failed to mark cache entry for cleanup after resizing cache.") + TestUtils.waitUntilTrue(() => cacheEntry.isCleanStarted, + "Failed to cleanup cache entry after resizing cache.") + + // verify no index files on remote cache dir + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent, + s"Offset index file should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent, + s"Txn index file should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent, + s"Time index file should not be present on disk at ${cache.cacheDir()}") + TestUtils.waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.DELETED_FILE_SUFFIX).isPresent, + s"Index file marked for deletion should not be present on disk at ${cache.cacheDir()}") + + assertCacheSize(0) + + // Increase cache capacity to only store 2 entries + cache.resizeCacheSize(2 * estimateEntryBytesSize) + assertCacheSize(0) + + val entry0 = cache.getIndexEntry(metadataList(0)) + val entry1 = cache.getIndexEntry(metadataList(1)) + cache.getIndexEntry(metadataList(2)) + assertCacheSize(2) + verifyEntryIsEvicted(metadataList(0), entry0) + + // Reduce cache capacity to only store 1 entry + cache.resizeCacheSize(1 * estimateEntryBytesSize) + assertCacheSize(1) + verifyEntryIsEvicted(metadataList(1), entry1) + + // resize to the same size, all entries should be kept + cache.resizeCacheSize(1 * estimateEntryBytesSize) + + // verify all existing entries (`cache.getIndexEntry(metadataList(2))`) are kept + verifyEntryIsKept(metadataList(2)) + assertCacheSize(1) + + // increase the size + cache.resizeCacheSize(2 * estimateEntryBytesSize) + + // verify all existing entries (`cache.getIndexEntry(metadataList(2))`) are kept + verifyEntryIsKept(metadataList(2)) + assertCacheSize(1) + } + + @ParameterizedTest + @EnumSource(value = classOf[IndexType], names = Array("OFFSET", "TIMESTAMP", "TRANSACTION")) + def testCorruptCacheIndexFileExistsButNotInCache(indexType: IndexType): Unit = { + // create Corrupted Index File in remote index cache + createCorruptedIndexFile(indexType, cache.cacheDir()) + val entry = cache.getIndexEntry(rlsMetadata) + // Test would fail if it throws Exception other than CorruptIndexException + val offsetIndexFile = entry.offsetIndex.file().toPath + val txnIndexFile = entry.txnIndex.file().toPath + val timeIndexFile = entry.timeIndex.file().toPath + + val expectedOffsetIndexFileName: String = remoteOffsetIndexFileName(rlsMetadata) + val expectedTimeIndexFileName: String = remoteTimeIndexFileName(rlsMetadata) + val expectedTxnIndexFileName: String = remoteTransactionIndexFileName(rlsMetadata) + + assertEquals(expectedOffsetIndexFileName, offsetIndexFile.getFileName.toString) + assertEquals(expectedTxnIndexFileName, txnIndexFile.getFileName.toString) + assertEquals(expectedTimeIndexFileName, timeIndexFile.getFileName.toString) + + // assert that parent directory for the index files is correct + assertEquals(RemoteIndexCache.DIR_NAME, offsetIndexFile.getParent.getFileName.toString, + s"offsetIndex=$offsetIndexFile is created under incorrect parent") + assertEquals(RemoteIndexCache.DIR_NAME, txnIndexFile.getParent.getFileName.toString, + s"txnIndex=$txnIndexFile is created under incorrect parent") + assertEquals(RemoteIndexCache.DIR_NAME, timeIndexFile.getParent.getFileName.toString, + s"timeIndex=$timeIndexFile is created under incorrect parent") + + // file is corrupted it should fetch from remote storage again + verifyFetchIndexInvocation(count = 1) + } + + @Test + def testConcurrentRemoveReadForCache(): Unit = { + // Create a spy Cache Entry + val rlsMetadata = new RemoteLogSegmentMetadata(RemoteLogSegmentId.generateNew(idPartition), baseOffset, lastOffset, time.milliseconds(), brokerId, time.milliseconds(), segmentSize, Collections.singletonMap(0, 0L)) + + val timeIndex = spy(createTimeIndexForSegmentMetadata(rlsMetadata, new File(tpDir, DIR_NAME))) + val txIndex = spy(createTxIndexForSegmentMetadata(rlsMetadata, new File(tpDir, DIR_NAME))) + val offsetIndex = spy(createOffsetIndexForSegmentMetadata(rlsMetadata, new File(tpDir, DIR_NAME))) + + val spyEntry = spy(new RemoteIndexCache.Entry(offsetIndex, timeIndex, txIndex)) + cache.internalCache.put(rlsMetadata.remoteLogSegmentId().id(), spyEntry) + + assertCacheSize(1) + + var entry: RemoteIndexCache.Entry = null + + val latchForCacheRead = new CountDownLatch(1) + val latchForCacheRemove = new CountDownLatch(1) + val latchForTestWait = new CountDownLatch(1) + + var markForCleanupCallCount = 0 + + doAnswer((invocation: InvocationOnMock) => { + markForCleanupCallCount += 1 + + if (markForCleanupCallCount == 1) { + // Signal the CacheRead to unblock itself + latchForCacheRead.countDown() + // Wait for signal to start renaming the files + latchForCacheRemove.await() + // Calling the markForCleanup() actual method to start renaming the files + invocation.callRealMethod() + // Signal TestWait to unblock itself so that test can be completed + latchForTestWait.countDown() + } + }).when(spyEntry).markForCleanup() + + val removeCache = (() => { + cache.remove(rlsMetadata.remoteLogSegmentId().id()) + }): Runnable + + val readCache = (() => { + // Wait for signal to start CacheRead + latchForCacheRead.await() + entry = cache.getIndexEntry(rlsMetadata) + // Signal the CacheRemove to start renaming the files + latchForCacheRemove.countDown() + }): Runnable + + val executor = Executors.newFixedThreadPool(2) + try { + val removeCacheFuture: Future[_] = executor.submit(removeCache: Runnable) + val readCacheFuture: Future[_] = executor.submit(readCache: Runnable) + + // Verify both tasks are completed without any exception + removeCacheFuture.get() + readCacheFuture.get() + + // Wait for signal to complete the test + latchForTestWait.await() + + // We can't determine read thread or remove thread will go first so if, + // 1. Read thread go first, cache file should not exist and cache size should be zero. + // 2. Remove thread go first, cache file should present and cache size should be one. + // so basically here we are making sure that if cache existed, the cache file should exist, + // and if cache is non-existed, the cache file should not exist. + if (getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent) { + assertCacheSize(1) + } else { + assertCacheSize(0) + } + } finally { + executor.shutdownNow() + } + + } + + @Test + def testMultipleIndexEntriesExecutionInCorruptException(): Unit = { + reset(rsm) + when(rsm.fetchIndex(any(classOf[RemoteLogSegmentMetadata]), any(classOf[IndexType]))) + .thenAnswer(ans => { + val metadata = ans.getArgument[RemoteLogSegmentMetadata](0) + val indexType = ans.getArgument[IndexType](1) + val offsetIdx = createOffsetIndexForSegmentMetadata(metadata, tpDir) + val timeIdx = createTimeIndexForSegmentMetadata(metadata, tpDir) + val txnIdx = createTxIndexForSegmentMetadata(metadata, tpDir) + maybeAppendIndexEntries(offsetIdx, timeIdx) + // Create corrupted index file + createCorruptTimeIndexOffsetFile(tpDir) + indexType match { + case IndexType.OFFSET => new FileInputStream(offsetIdx.file) + case IndexType.TIMESTAMP => new FileInputStream(timeIdx.file) + case IndexType.TRANSACTION => new FileInputStream(txnIdx.file) + case IndexType.LEADER_EPOCH => // leader-epoch-cache is not accessed. + case IndexType.PRODUCER_SNAPSHOT => // producer-snapshot is not accessed. + } + }) + + assertThrows(classOf[CorruptIndexException], () => cache.getIndexEntry(rlsMetadata)) + assertNull(cache.internalCache().getIfPresent(rlsMetadata.remoteLogSegmentId().id())) + verifyFetchIndexInvocation(1, Seq(IndexType.OFFSET, IndexType.TIMESTAMP)) + verifyFetchIndexInvocation(0, Seq(IndexType.TRANSACTION)) + // Current status + // (cache is null) + // RemoteCacheDir contain + // 1. Offset Index File is fine and not corrupted + // 2. Time Index File is corrupted + // What should be the code flow in next execution + // 1. No rsm call for fetching OffSet Index File. + // 2. Time index file should be fetched from remote storage again as it is corrupted in the first execution. + // 3. Transaction index file should be fetched from remote storage. + reset(rsm) + // delete all files created in tpDir + Files.walk(tpDir.toPath, 1) + .filter(Files.isRegularFile(_)) + .forEach(path => Files.deleteIfExists(path)) + // rsm should return no corrupted file in the 2nd execution + when(rsm.fetchIndex(any(classOf[RemoteLogSegmentMetadata]), any(classOf[IndexType]))) + .thenAnswer(ans => { + val metadata = ans.getArgument[RemoteLogSegmentMetadata](0) + val indexType = ans.getArgument[IndexType](1) + val offsetIdx = createOffsetIndexForSegmentMetadata(metadata, tpDir) + val timeIdx = createTimeIndexForSegmentMetadata(metadata, tpDir) + val txnIdx = createTxIndexForSegmentMetadata(metadata, tpDir) + maybeAppendIndexEntries(offsetIdx, timeIdx) + indexType match { + case IndexType.OFFSET => new FileInputStream(offsetIdx.file) + case IndexType.TIMESTAMP => new FileInputStream(timeIdx.file) + case IndexType.TRANSACTION => new FileInputStream(txnIdx.file) + case IndexType.LEADER_EPOCH => // leader-epoch-cache is not accessed. + case IndexType.PRODUCER_SNAPSHOT => // producer-snapshot is not accessed. + } + }) + cache.getIndexEntry(rlsMetadata) + // rsm should not be called to fetch offset Index + verifyFetchIndexInvocation(0, Seq(IndexType.OFFSET)) + verifyFetchIndexInvocation(1, Seq(IndexType.TIMESTAMP)) + // Transaction index would be fetched again + // as previous getIndexEntry failed before fetchTransactionIndex + verifyFetchIndexInvocation(1, Seq(IndexType.TRANSACTION)) + } + + @Test + def testIndexFileAlreadyExistOnDiskButNotInCache(): Unit = { + val remoteIndexCacheDir = cache.cacheDir() + val tempSuffix = ".tmptest" + + def renameRemoteCacheIndexFileFromDisk(suffix: String): Unit = { + Files.walk(remoteIndexCacheDir.toPath) + .filter(Files.isRegularFile(_)) + .filter(path => path.getFileName.toString.endsWith(suffix)) + .forEach(f => Utils.atomicMoveWithFallback(f, f.resolveSibling(f.getFileName.toString.stripSuffix(tempSuffix)))) + } + + val entry = cache.getIndexEntry(rlsMetadata) + verifyFetchIndexInvocation(count = 1) + // copy files with temporary name + Files.copy(entry.offsetIndex().file().toPath, Paths.get(Utils.replaceSuffix(entry.offsetIndex().file().getPath, "", tempSuffix))) + Files.copy(entry.txnIndex().file().toPath, Paths.get(Utils.replaceSuffix(entry.txnIndex().file().getPath, "", tempSuffix))) + Files.copy(entry.timeIndex().file().toPath, Paths.get(Utils.replaceSuffix(entry.timeIndex().file().getPath, "", tempSuffix))) + + cache.remove(rlsMetadata.remoteLogSegmentId().id()) + + // wait until entry is marked for deletion + TestUtils.waitUntilTrue(() => entry.isMarkedForCleanup, + "Failed to mark cache entry for cleanup after invalidation") + TestUtils.waitUntilTrue(() => entry.isCleanStarted, + "Failed to cleanup cache entry after invalidation") + + // restore index files + renameRemoteCacheIndexFileFromDisk(tempSuffix) + // validate cache entry for the above key should be null + assertNull(cache.internalCache().getIfPresent(rlsMetadata.remoteLogSegmentId().id())) + cache.getIndexEntry(rlsMetadata) + // Index Files already exist ,rsm should not fetch them again. + verifyFetchIndexInvocation(count = 1) + // verify index files on disk + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent, s"Offset index file should be present on disk at ${remoteIndexCacheDir.toPath}") + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent, s"Txn index file should be present on disk at ${remoteIndexCacheDir.toPath}") + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent, s"Time index file should be present on disk at ${remoteIndexCacheDir.toPath}") + } + + @ParameterizedTest + @EnumSource(value = classOf[IndexType], names = Array("OFFSET", "TIMESTAMP", "TRANSACTION")) + def testRSMReturnCorruptedIndexFile(testIndexType: IndexType): Unit = { + when(rsm.fetchIndex(any(classOf[RemoteLogSegmentMetadata]), any(classOf[IndexType]))) + .thenAnswer(ans => { + val metadata = ans.getArgument[RemoteLogSegmentMetadata](0) + val indexType = ans.getArgument[IndexType](1) + val offsetIdx = createOffsetIndexForSegmentMetadata(metadata, tpDir) + val timeIdx = createTimeIndexForSegmentMetadata(metadata, tpDir) + val txnIdx = createTxIndexForSegmentMetadata(metadata, tpDir) + maybeAppendIndexEntries(offsetIdx, timeIdx) + // Create corrupt index file return from RSM + createCorruptedIndexFile(testIndexType, tpDir) + indexType match { + case IndexType.OFFSET => new FileInputStream(offsetIdx.file) + case IndexType.TIMESTAMP => new FileInputStream(timeIdx.file) + case IndexType.TRANSACTION => new FileInputStream(txnIdx.file) + case IndexType.LEADER_EPOCH => // leader-epoch-cache is not accessed. + case IndexType.PRODUCER_SNAPSHOT => // producer-snapshot is not accessed. + } + }) + assertThrows(classOf[CorruptIndexException], () => cache.getIndexEntry(rlsMetadata)) + } + + @Test + def testConcurrentCacheDeletedFileExists(): Unit = { + val remoteIndexCacheDir = cache.cacheDir() + + val entry = cache.getIndexEntry(rlsMetadata) + // verify index files on disk + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent, s"Offset index file should be present on disk at ${remoteIndexCacheDir.toPath}") + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent, s"Txn index file should be present on disk at ${remoteIndexCacheDir.toPath}") + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent, s"Time index file should be present on disk at ${remoteIndexCacheDir.toPath}") + + // Simulating a concurrency issue where deleted files already exist on disk + // This happen when cleanerThread is slow and not able to delete index entries + // while same index Entry is cached again and invalidated. + // The new deleted file created should be replaced by existing deleted file. + + // create deleted suffix file + Files.copy(entry.offsetIndex().file().toPath, Paths.get(Utils.replaceSuffix(entry.offsetIndex().file().getPath, "", LogFileUtils.DELETED_FILE_SUFFIX))) + Files.copy(entry.txnIndex().file().toPath, Paths.get(Utils.replaceSuffix(entry.txnIndex().file().getPath, "", LogFileUtils.DELETED_FILE_SUFFIX))) + Files.copy(entry.timeIndex().file().toPath, Paths.get(Utils.replaceSuffix(entry.timeIndex().file().getPath, "", LogFileUtils.DELETED_FILE_SUFFIX))) + + // verify deleted file exists on disk + assertTrue(getIndexFileFromRemoteCacheDir(cache, LogFileUtils.DELETED_FILE_SUFFIX).isPresent, s"Deleted Offset index file should be present on disk at ${remoteIndexCacheDir.toPath}") + + cache.remove(rlsMetadata.remoteLogSegmentId().id()) + + // wait until entry is marked for deletion + TestUtils.waitUntilTrue(() => entry.isMarkedForCleanup, + "Failed to mark cache entry for cleanup after invalidation") + TestUtils.waitUntilTrue(() => entry.isCleanStarted, + "Failed to cleanup cache entry after invalidation") + + // verify no index files on disk + waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.INDEX_FILE_SUFFIX).isPresent, + s"Offset index file should not be present on disk at ${remoteIndexCacheDir.toPath}") + waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TXN_INDEX_FILE_SUFFIX).isPresent, + s"Txn index file should not be present on disk at ${remoteIndexCacheDir.toPath}") + waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.TIME_INDEX_FILE_SUFFIX).isPresent, + s"Time index file should not be present on disk at ${remoteIndexCacheDir.toPath}") + waitUntilTrue(() => !getIndexFileFromRemoteCacheDir(cache, LogFileUtils.DELETED_FILE_SUFFIX).isPresent, + s"Index file marked for deletion should not be present on disk at ${remoteIndexCacheDir.toPath}") + } + + private def generateSpyCacheEntry(remoteLogSegmentId: RemoteLogSegmentId + = RemoteLogSegmentId.generateNew(idPartition)): RemoteIndexCache.Entry = { + val rlsMetadata = new RemoteLogSegmentMetadata(remoteLogSegmentId, baseOffset, lastOffset, time.milliseconds(), brokerId, time.milliseconds(), segmentSize, Collections.singletonMap(0, 0L)) + val timeIndex = spy(createTimeIndexForSegmentMetadata(rlsMetadata, tpDir)) + val txIndex = spy(createTxIndexForSegmentMetadata(rlsMetadata, tpDir)) + val offsetIndex = spy(createOffsetIndexForSegmentMetadata(rlsMetadata, tpDir)) + spy(new RemoteIndexCache.Entry(offsetIndex, timeIndex, txIndex)) + } + + private def assertAtLeastOnePresent(cache: RemoteIndexCache, uuids: Uuid*): Unit = { + uuids.foreach { + uuid => { + if (cache.internalCache.asMap().containsKey(uuid)) return + } + } + fail("all uuids are not present in cache") + } + + private def assertCacheSize(expectedSize: Int): Unit = { + // Cache may grow beyond the size temporarily while evicting, hence, run in a loop to validate + // that cache reaches correct state eventually + TestUtils.waitUntilTrue(() => cache.internalCache.asMap().size() == expectedSize, + msg = s"cache did not adhere to expected size of $expectedSize") + } + + private def verifyFetchIndexInvocation(count: Int, + indexTypes: Seq[IndexType] = + Seq(IndexType.OFFSET, IndexType.TIMESTAMP, IndexType.TRANSACTION)): Unit = { + for (indexType <- indexTypes) { + verify(rsm, times(count)).fetchIndex(any(classOf[RemoteLogSegmentMetadata]), ArgumentMatchers.eq(indexType)) + } + } + + private def createTxIndexForSegmentMetadata(metadata: RemoteLogSegmentMetadata, dir: File): TransactionIndex = { + val txnIdxFile = remoteTransactionIndexFile(dir, metadata) + txnIdxFile.createNewFile() + new TransactionIndex(metadata.startOffset(), txnIdxFile) + } + + private def createCorruptTxnIndexForSegmentMetadata(dir: File, metadata: RemoteLogSegmentMetadata): TransactionIndex = { + val txnIdxFile = remoteTransactionIndexFile(dir, metadata) + txnIdxFile.createNewFile() + val txnIndex = new TransactionIndex(metadata.startOffset(), txnIdxFile) + val abortedTxns = List( + new AbortedTxn(0L, 0, 10, 11), + new AbortedTxn(1L, 5, 15, 13), + new AbortedTxn(2L, 18, 35, 25), + new AbortedTxn(3L, 32, 50, 40)) + abortedTxns.foreach(txnIndex.append) + txnIndex.close() + + // open the index with a different starting offset to fake invalid data + new TransactionIndex(100L, txnIdxFile) + } + + private def createTimeIndexForSegmentMetadata(metadata: RemoteLogSegmentMetadata, dir: File): TimeIndex = { + val maxEntries = (metadata.endOffset() - metadata.startOffset()).asInstanceOf[Int] + new TimeIndex(remoteTimeIndexFile(dir, metadata), metadata.startOffset(), maxEntries * 12) + } + + private def createOffsetIndexForSegmentMetadata(metadata: RemoteLogSegmentMetadata, dir: File) = { + val maxEntries = (metadata.endOffset() - metadata.startOffset()).asInstanceOf[Int] + new OffsetIndex(remoteOffsetIndexFile(dir, metadata), metadata.startOffset(), maxEntries * 8) + } + + private def generateRemoteLogSegmentMetadata(size: Int, + tpId: TopicIdPartition): List[RemoteLogSegmentMetadata] = { + val metadataList = mutable.Buffer.empty[RemoteLogSegmentMetadata] + for (i <- 0 until size) { + metadataList.append(new RemoteLogSegmentMetadata(new RemoteLogSegmentId(tpId, Uuid.randomUuid()), baseOffset * i, baseOffset * i + 10, time.milliseconds(), brokerId, time.milliseconds(), segmentSize, Collections.singletonMap(0, 0L))) + } + metadataList.toList + } + + private def maybeAppendIndexEntries(offsetIndex: OffsetIndex, + timeIndex: TimeIndex): Unit = { + if (!offsetIndex.isFull) { + val curTime = time.milliseconds() + for (i <- 0 until offsetIndex.maxEntries) { + val offset = offsetIndex.baseOffset + i + offsetIndex.append(offset, i) + timeIndex.maybeAppend(curTime + i, offset, true) + } + offsetIndex.flush() + timeIndex.flush() + } + } + + private def estimateOneEntryBytesSize(): Long = { + val tp = new TopicPartition("estimate-entry-bytes-size", 0) + val tpId = new TopicIdPartition(Uuid.randomUuid(), tp) + val tpDir = new File(logDir, tpId.toString) + Files.createDirectory(tpDir.toPath) + val rsm = mock(classOf[RemoteStorageManager]) + mockRsmFetchIndex(rsm) + val cache = new RemoteIndexCache(2L, rsm, tpDir.toString) + val metadataList = generateRemoteLogSegmentMetadata(size = 1, tpId) + val entry = cache.getIndexEntry(metadataList.head) + val entrySizeInBytes = entry.entrySizeBytes() + Utils.closeQuietly(cache, "RemoteIndexCache created for estimating entry size") + entrySizeInBytes + } + + private def mockRsmFetchIndex(rsm: RemoteStorageManager): Unit = { + when(rsm.fetchIndex(any(classOf[RemoteLogSegmentMetadata]), any(classOf[IndexType]))) + .thenAnswer(ans => { + val metadata = ans.getArgument[RemoteLogSegmentMetadata](0) + val indexType = ans.getArgument[IndexType](1) + val offsetIdx = createOffsetIndexForSegmentMetadata(metadata, tpDir) + val timeIdx = createTimeIndexForSegmentMetadata(metadata, tpDir) + val txnIdx = createTxIndexForSegmentMetadata(metadata, tpDir) + maybeAppendIndexEntries(offsetIdx, timeIdx) + indexType match { + case IndexType.OFFSET => new FileInputStream(offsetIdx.file) + case IndexType.TIMESTAMP => new FileInputStream(timeIdx.file) + case IndexType.TRANSACTION => new FileInputStream(txnIdx.file) + case IndexType.LEADER_EPOCH => // leader-epoch-cache is not accessed. + case IndexType.PRODUCER_SNAPSHOT => // producer-snapshot is not accessed. + } + }) + } + + private def createCorruptOffsetIndexFile(dir: File): Unit = { + val pw = new PrintWriter(remoteOffsetIndexFile(dir, rlsMetadata)) + pw.write("Hello, world") + // The size of the string written in the file is 12 bytes, + // but it should be multiple of Offset Index EntrySIZE which is equal to 8. + pw.close() + } + + private def createCorruptTimeIndexOffsetFile(dir: File): Unit = { + val pw = new PrintWriter(remoteTimeIndexFile(dir, rlsMetadata)) + pw.write("Hello, world1") + // The size of the string written in the file is 13 bytes, + // but it should be multiple of Time Index EntrySIZE which is equal to 12. + pw.close() + } + + private def createCorruptedIndexFile(indexType: IndexType, dir: File): Unit = { + if (indexType == IndexType.OFFSET) { + createCorruptOffsetIndexFile(dir) + } else if (indexType == IndexType.TIMESTAMP) { + createCorruptTimeIndexOffsetFile(dir) + } else if (indexType == IndexType.TRANSACTION) { + createCorruptTxnIndexForSegmentMetadata(dir, rlsMetadata) + } + } + + private def getIndexFileFromRemoteCacheDir(cache: RemoteIndexCache, suffix: String) = { + try { + Files.walk(cache.cacheDir().toPath) + .filter(Files.isRegularFile(_)) + .filter(path => path.getFileName.toString.endsWith(suffix)) + .findAny() + } catch { + case _ : NoSuchFileException | _ : UncheckedIOException => Optional.empty() + } + } + + private def getRunningCleanerThread: java.util.Set[Thread] = { + Thread.getAllStackTraces.keySet() + .stream() + .filter(t => t.isAlive && t.getName.startsWith(REMOTE_LOG_INDEX_CACHE_CLEANER_THREAD)) + .collect(Collectors.toSet()) + } +} diff --git a/core/src/test/scala/unit/kafka/metrics/KafkaMetricsGroupTest.scala b/core/src/test/scala/unit/kafka/metrics/KafkaMetricsGroupTest.scala index b0dbd0a05c25f..75946f14075d9 100644 --- a/core/src/test/scala/unit/kafka/metrics/KafkaMetricsGroupTest.scala +++ b/core/src/test/scala/unit/kafka/metrics/KafkaMetricsGroupTest.scala @@ -22,12 +22,15 @@ import org.apache.kafka.server.metrics.KafkaMetricsGroup import org.junit.jupiter.api.Assertions.{assertEquals, assertNull} import org.junit.jupiter.api.Test +import java.util.Collections +import scala.jdk.CollectionConverters._ + class KafkaMetricsGroupTest { @Test def testUntaggedMetricName(): Unit = { val metricsGroup = new KafkaMetricsGroup("kafka.metrics", "TestMetrics") - val metricName = metricsGroup.metricName("TaggedMetric", java.util.Map.of) + val metricName = metricsGroup.metricName("TaggedMetric", Collections.emptyMap()) assertEquals("kafka.metrics", metricName.getGroup) assertEquals("TestMetrics", metricName.getType) @@ -39,13 +42,7 @@ class KafkaMetricsGroupTest { @Test def testTaggedMetricName(): Unit = { - val tags = { - val map = new java.util.LinkedHashMap[String, String]() - map.put("foo", "bar") - map.put("bar", "baz") - map.put("baz", "raz.taz") - map - } + val tags = Map("foo" -> "bar", "bar" -> "baz", "baz" -> "raz.taz").asJava val metricsGroup = new KafkaMetricsGroup("kafka.metrics", "TestMetrics") val metricName = metricsGroup.metricName("TaggedMetric", tags) @@ -59,13 +56,7 @@ class KafkaMetricsGroupTest { @Test def testTaggedMetricNameWithEmptyValue(): Unit = { - val tags = { - val map = new java.util.LinkedHashMap[String, String]() - map.put("foo", "bar") - map.put("bar", "") - map.put("baz", "raz.taz") - map - } + val tags = Map("foo" -> "bar", "bar" -> "", "baz" -> "raz.taz").asJava val metricsGroup = new KafkaMetricsGroup("kafka.metrics", "TestMetrics") val metricName = metricsGroup.metricName("TaggedMetric", tags) @@ -76,4 +67,6 @@ class KafkaMetricsGroupTest { metricName.getMBeanName) assertEquals("baz.raz_taz.foo.bar", metricName.getScope) } + + } diff --git a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala index 834b8efe48db9..5c795a0e68fc8 100644 --- a/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala +++ b/core/src/test/scala/unit/kafka/metrics/MetricsTest.scala @@ -18,7 +18,6 @@ package kafka.metrics import java.lang.management.ManagementFactory -import java.util import java.util.Properties import javax.management.ObjectName import com.yammer.metrics.core.MetricPredicate @@ -37,9 +36,9 @@ import org.apache.kafka.common.utils.Time import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics, LinuxIoMetricsCollector} import org.apache.kafka.storage.log.metrics.BrokerTopicMetrics -import org.junit.jupiter.api.{Test, Timeout} +import org.junit.jupiter.api.Timeout import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource +import org.junit.jupiter.params.provider.{MethodSource, ValueSource} @Timeout(120) class MetricsTest extends KafkaServerTestHarness with Logging { @@ -57,8 +56,9 @@ class MetricsTest extends KafkaServerTestHarness with Logging { val nMessages = 2 - @Test - def testMetricsReporterAfterDeletingTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testMetricsReporterAfterDeletingTopic(quorum: String): Unit = { val topic = "test-topic-metric" createTopic(topic) deleteTopic(topic) @@ -66,8 +66,9 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertEquals(Set.empty, topicMetricGroups(topic), "Topic metrics exists after deleteTopic") } - @Test - def testBrokerTopicMetricsUnregisteredAfterDeletingTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testBrokerTopicMetricsUnregisteredAfterDeletingTopic(quorum: String): Unit = { val topic = "test-broker-topic-metric" createTopic(topic, 2) // Produce a few messages to create the metrics @@ -80,29 +81,33 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertEquals(Set.empty, topicMetricGroups(topic), "Topic metrics exists after deleteTopic") } - @Test - def testClusterIdMetric(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testClusterIdMetric(quorum: String): Unit = { // Check if clusterId metric exists. val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics assertEquals(metrics.keySet.asScala.count(_.getMBeanName == s"$requiredKafkaServerPrefix=ClusterId"), 1) } - @Test - def testBrokerStateMetric(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testBrokerStateMetric(quorum: String): Unit = { // Check if BrokerState metric exists. val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics assertEquals(metrics.keySet.asScala.count(_.getMBeanName == s"$requiredKafkaServerPrefix=BrokerState"), 1) } - @Test - def testYammerMetricsCountMetric(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testYammerMetricsCountMetric(quorum: String): Unit = { // Check if yammer-metrics-count metric exists. val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics assertEquals(metrics.keySet.asScala.count(_.getMBeanName == s"$requiredKafkaServerPrefix=yammer-metrics-count"), 1) } - @Test - def testLinuxIoMetrics(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testLinuxIoMetrics(quorum: String): Unit = { // Check if linux-disk-{read,write}-bytes metrics either do or do not exist depending on whether we are or are not // able to collect those metrics on the platform where this test is running. val usable = new LinuxIoMetricsCollector("/proc", Time.SYSTEM).usable() @@ -112,8 +117,9 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertEquals(metrics.keySet.asScala.count(_.getMBeanName == s"$requiredKafkaServerPrefix=$name"), expectedCount)) } - @Test - def testJMXFilter(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testJMXFilter(quorum: String): Unit = { // Check if cluster id metrics is not exposed in JMX assertTrue(ManagementFactory.getPlatformMBeanServer .isRegistered(new ObjectName("kafka.controller:type=KafkaController,name=ActiveControllerCount"))) @@ -121,11 +127,12 @@ class MetricsTest extends KafkaServerTestHarness with Logging { .isRegistered(new ObjectName(s"$requiredKafkaServerPrefix=ClusterId"))) } - @Test - def testUpdateJMXFilter(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUpdateJMXFilter(quorum: String): Unit = { // verify previously exposed metrics are removed and existing matching metrics are added brokers.foreach(broker => broker.kafkaYammerMetrics.reconfigure( - util.Map.of(JmxReporter.EXCLUDE_CONFIG, "kafka.controller:type=KafkaController,name=ActiveControllerCount") + Map(JmxReporter.EXCLUDE_CONFIG -> "kafka.controller:type=KafkaController,name=ActiveControllerCount").asJava )) assertFalse(ManagementFactory.getPlatformMBeanServer .isRegistered(new ObjectName("kafka.controller:type=KafkaController,name=ActiveControllerCount"))) @@ -133,8 +140,9 @@ class MetricsTest extends KafkaServerTestHarness with Logging { .isRegistered(new ObjectName(s"$requiredKafkaServerPrefix=ClusterId"))) } - @Test - def testGeneralBrokerTopicMetricsAreGreedilyRegistered(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testGeneralBrokerTopicMetricsAreGreedilyRegistered(quorum: String): Unit = { val topic = "test-broker-topic-metric" createTopic(topic, 2) @@ -148,18 +156,19 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertTrue(topicMetricGroups(topic).nonEmpty, "Topic metrics aren't registered") } - @Test - def testWindowsStyleTagNames(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testWindowsStyleTagNames(quorum: String): Unit = { val path = "C:\\windows-path\\kafka-logs" - val tags = util.Map.of("dir", path) - val expectedMBeanName = Set(tags.keySet().iterator().next(), ObjectName.quote(path)).mkString("=") - val metric = new KafkaMetricsGroup(this.getClass).metricName("test-metric", tags) + val tags = Map("dir" -> path) + val expectedMBeanName = Set(tags.keySet.head, ObjectName.quote(path)).mkString("=") + val metric = new KafkaMetricsGroup(this.getClass).metricName("test-metric", tags.asJava) assert(metric.getMBeanName.endsWith(expectedMBeanName)) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testBrokerTopicMetricsBytesInOut(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testBrokerTopicMetricsBytesInOut(quorum: String, groupProtocol: String): Unit = { val topic = "test-bytes-in-out" val replicationBytesIn = BrokerTopicMetrics.REPLICATION_BYTES_IN_PER_SEC val replicationBytesOut = BrokerTopicMetrics.REPLICATION_BYTES_OUT_PER_SEC @@ -204,8 +213,9 @@ class MetricsTest extends KafkaServerTestHarness with Logging { assertTrue(TestUtils.meterCount(bytesOut) > initialBytesOut) } - @Test - def testKRaftControllerMetrics(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testKRaftControllerMetrics(quorum: String): Unit = { val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics Set( "kafka.controller:type=KafkaController,name=ActiveControllerCount", diff --git a/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala b/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala index d9a64b4186fae..3906011a20380 100644 --- a/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/network/ConnectionQuotasTest.scala @@ -20,7 +20,7 @@ package kafka.network import java.net.InetAddress import java.util import java.util.concurrent.{Callable, ExecutorService, Executors, TimeUnit} -import java.util.Properties +import java.util.{Collections, Properties} import com.yammer.metrics.core.Meter import kafka.network.Processor.ListenerMetricTag import kafka.server.KafkaConfig @@ -37,6 +37,7 @@ import org.apache.kafka.server.util.MockTime import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api._ +import scala.jdk.CollectionConverters._ import scala.collection.{Map, mutable} import scala.concurrent.TimeoutException @@ -90,12 +91,12 @@ class ConnectionQuotasTest { listeners.keys.foreach { name => blockedPercentMeters.put(name, new KafkaMetricsGroup(this.getClass).newMeter( - s"${name}BlockedPercent", "blocked time", TimeUnit.NANOSECONDS, util.Map.of(ListenerMetricTag, name))) + s"${name}BlockedPercent", "blocked time", TimeUnit.NANOSECONDS, Map(ListenerMetricTag -> name).asJava)) } // use system time, because ConnectionQuota causes the current thread to wait with timeout, which waits based on // system time; so using mock time will likely result in test flakiness due to a mixed use of mock and system time time = Time.SYSTEM - metrics = new Metrics(new MetricConfig(), util.List.of, time) + metrics = new Metrics(new MetricConfig(), Collections.emptyList(), time) executor = Executors.newFixedThreadPool(listeners.size) } @@ -281,7 +282,7 @@ class ConnectionQuotasTest { addListenersAndVerify(config, connectionQuotas) - val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, listenerMaxConnections.toString) + val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTIONS_CONFIG -> listenerMaxConnections.toString).asJava listeners.values.foreach { listener => connectionQuotas.maxConnectionsPerListener(listener.listenerName).configure(listenerConfig) } @@ -373,7 +374,7 @@ class ConnectionQuotasTest { val config = KafkaConfig.fromProps(props) connectionQuotas = new ConnectionQuotas(config, time, metrics) - val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, listenerRateLimit.toString) + val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> listenerRateLimit.toString).asJava addListenersAndVerify(config, listenerConfig, connectionQuotas) // create connections with the rate < listener quota on every listener, and verify there is no throttling @@ -399,7 +400,7 @@ class ConnectionQuotasTest { val config = KafkaConfig.fromProps(props) connectionQuotas = new ConnectionQuotas(config, time, metrics) - val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, listenerRateLimit.toString) + val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> listenerRateLimit.toString).asJava addListenersAndVerify(config, listenerConfig, connectionQuotas) // create connections with the rate > listener quota on every listener @@ -497,7 +498,7 @@ class ConnectionQuotasTest { // with a default per-IP limit of 25 and a listener rate of 30, only one IP should be able to saturate their IP rate // limit, the other IP will hit listener rate limits and block connectionQuotas.updateIpConnectionRateQuota(None, Some(ipConnectionRateLimit)) - val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, listenerRateLimit.toString) + val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> listenerRateLimit.toString).asJava addListenersAndVerify(config, listenerConfig, connectionQuotas) val listener = listeners("EXTERNAL").listenerName // use a small number of connections because a longer-running test will have both IPs throttle at different times @@ -555,7 +556,7 @@ class ConnectionQuotasTest { connectionQuotas.addListener(config, listeners("EXTERNAL").listenerName) val maxListenerConnectionRate = 0 - val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, maxListenerConnectionRate.toString) + val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> maxListenerConnectionRate.toString).asJava assertThrows(classOf[ConfigException], () => connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).validateReconfiguration(listenerConfig) ) @@ -568,11 +569,11 @@ class ConnectionQuotasTest { connectionQuotas.addListener(config, listeners("EXTERNAL").listenerName) val listenerRateLimit = 20 - val listenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, listenerRateLimit.toString) + val listenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> listenerRateLimit.toString).asJava connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).configure(listenerConfig) // remove connection rate limit - connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).reconfigure(util.Map.of) + connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).reconfigure(Map.empty.asJava) // create connections as fast as possible, will timeout if connections get throttled with previous rate // (50s to create 1000 connections) @@ -585,7 +586,7 @@ class ConnectionQuotasTest { // configure 100 connection/second rate limit val newMaxListenerConnectionRate = 10 - val newListenerConfig = util.Map.of(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG, newMaxListenerConnectionRate.toString) + val newListenerConfig = Map(SocketServerConfigs.MAX_CONNECTION_CREATION_RATE_CONFIG -> newMaxListenerConnectionRate.toString).asJava connectionQuotas.maxConnectionsPerListener(listeners("EXTERNAL").listenerName).reconfigure(newListenerConfig) // verify rate limit @@ -749,7 +750,7 @@ class ConnectionQuotasTest { } private def addListenersAndVerify(config: KafkaConfig, connectionQuotas: ConnectionQuotas) : Unit = { - addListenersAndVerify(config, util.Map.of, connectionQuotas) + addListenersAndVerify(config, Map.empty.asJava, connectionQuotas) } private def addListenersAndVerify(config: KafkaConfig, @@ -828,7 +829,7 @@ class ConnectionQuotasTest { val metricName = metrics.metricName( "connection-accept-throttle-time", SocketServer.MetricsGroup, - util.Map.of(Processor.ListenerMetricTag, listener)) + Collections.singletonMap(Processor.ListenerMetricTag, listener)) metrics.metric(metricName) } @@ -836,7 +837,7 @@ class ConnectionQuotasTest { val metricName = metrics.metricName( "ip-connection-accept-throttle-time", SocketServer.MetricsGroup, - util.Map.of(Processor.ListenerMetricTag, listener)) + Collections.singletonMap(Processor.ListenerMetricTag, listener)) metrics.metric(metricName) } @@ -844,7 +845,7 @@ class ConnectionQuotasTest { val metricName = metrics.metricName( "connection-accept-rate", SocketServer.MetricsGroup, - util.Map.of(Processor.ListenerMetricTag, listener)) + Collections.singletonMap(Processor.ListenerMetricTag, listener)) metrics.metric(metricName) } @@ -859,7 +860,7 @@ class ConnectionQuotasTest { val metricName = metrics.metricName( s"connection-accept-rate", SocketServer.MetricsGroup, - util.Map.of("ip", ip)) + Collections.singletonMap("ip", ip)) metrics.metric(metricName) } diff --git a/core/src/test/scala/unit/kafka/network/ProcessorTest.scala b/core/src/test/scala/unit/kafka/network/ProcessorTest.scala index 54bbd0bf2018a..e5ceb781c5745 100644 --- a/core/src/test/scala/unit/kafka/network/ProcessorTest.scala +++ b/core/src/test/scala/unit/kafka/network/ProcessorTest.scala @@ -18,22 +18,20 @@ package kafka.network import kafka.server.metadata.KRaftMetadataCache -import org.apache.kafka.clients.NodeApiVersions +import kafka.server.{DefaultApiVersionManager, ForwardingManager, SimpleApiVersionManager} import org.apache.kafka.common.errors.{InvalidRequestException, UnsupportedVersionException} import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.message.RequestHeaderData import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.requests.{RequestHeader, RequestTestUtils} -import org.apache.kafka.server.{BrokerFeatures, DefaultApiVersionManager, SimpleApiVersionManager} +import org.apache.kafka.server.BrokerFeatures import org.apache.kafka.server.common.{FinalizedFeatures, KRaftVersion, MetadataVersion} import org.junit.jupiter.api.Assertions.{assertThrows, assertTrue} import org.junit.jupiter.api.Test import org.junit.jupiter.api.function.Executable import org.mockito.Mockito.mock -import java.util -import java.util.function.Supplier -import java.util.Optional +import java.util.Collections class ProcessorTest { @@ -42,11 +40,11 @@ class ProcessorTest { val requestHeader = RequestTestUtils.serializeRequestHeader( new RequestHeader(ApiKeys.INIT_PRODUCER_ID, 0, "clientid", 0)) val apiVersionManager = new SimpleApiVersionManager(ListenerType.CONTROLLER, true, - () => new FinalizedFeatures(MetadataVersion.latestTesting(), util.Map.of[String, java.lang.Short], 0)) + () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0, true)) val e = assertThrows(classOf[InvalidRequestException], (() => Processor.parseRequestHeader(apiVersionManager, requestHeader)): Executable, "INIT_PRODUCER_ID with listener type CONTROLLER should throw InvalidRequestException exception") - assertTrue(e.toString.contains("disabled api")) + assertTrue(e.toString.contains("disabled api")); } @Test @@ -57,26 +55,26 @@ class ProcessorTest { .setRequestApiKey(ApiKeys.LEADER_AND_ISR.id) .setRequestApiVersion(headerVersion) .setClientId("clientid") - .setCorrelationId(0) + .setCorrelationId(0); val requestHeader = RequestTestUtils.serializeRequestHeader(new RequestHeader(requestHeaderData, headerVersion)) - val apiVersionManager = new DefaultApiVersionManager(ListenerType.BROKER, mock(classOf[Supplier[Optional[NodeApiVersions]]]), - BrokerFeatures.createDefault(true), new KRaftMetadataCache(0, () => KRaftVersion.LATEST_PRODUCTION), true, Optional.empty) + val apiVersionManager = new DefaultApiVersionManager(ListenerType.BROKER, mock(classOf[ForwardingManager]), + BrokerFeatures.createDefault(true), new KRaftMetadataCache(0, () => KRaftVersion.LATEST_PRODUCTION), true) val e = assertThrows(classOf[InvalidRequestException], (() => Processor.parseRequestHeader(apiVersionManager, requestHeader)): Executable, "LEADER_AND_ISR should throw InvalidRequestException exception") - assertTrue(e.toString.contains("Unsupported api")) + assertTrue(e.toString.contains("Unsupported api")); } @Test def testParseRequestHeaderWithUnsupportedApiVersion(): Unit = { val requestHeader = RequestTestUtils.serializeRequestHeader( - new RequestHeader(ApiKeys.FETCH, 0, "clientid", 0)) - val apiVersionManager = new DefaultApiVersionManager(ListenerType.BROKER, mock(classOf[Supplier[Optional[NodeApiVersions]]]), - BrokerFeatures.createDefault(true), new KRaftMetadataCache(0, () => KRaftVersion.LATEST_PRODUCTION), true, Optional.empty) + new RequestHeader(ApiKeys.PRODUCE, 0, "clientid", 0)) + val apiVersionManager = new SimpleApiVersionManager(ListenerType.BROKER, true, + () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0, true)) val e = assertThrows(classOf[UnsupportedVersionException], (() => Processor.parseRequestHeader(apiVersionManager, requestHeader)): Executable, "FETCH v0 should throw UnsupportedVersionException exception") - assertTrue(e.toString.contains("unsupported version")) + assertTrue(e.toString.contains("unsupported version")); } /** @@ -88,12 +86,13 @@ class ProcessorTest { for (version <- 0 to 2) { val requestHeader = RequestTestUtils.serializeRequestHeader( new RequestHeader(ApiKeys.PRODUCE, version.toShort, "clientid", 0)) - val apiVersionManager = new DefaultApiVersionManager(ListenerType.BROKER, mock(classOf[Supplier[Optional[NodeApiVersions]]]), - BrokerFeatures.createDefault(true), new KRaftMetadataCache(0, () => KRaftVersion.LATEST_PRODUCTION), true, Optional.empty) + val apiVersionManager = new DefaultApiVersionManager(ListenerType.BROKER, mock(classOf[ForwardingManager]), + BrokerFeatures.createDefault(true), new KRaftMetadataCache(0, () => KRaftVersion.LATEST_PRODUCTION), true) val e = assertThrows(classOf[UnsupportedVersionException], (() => Processor.parseRequestHeader(apiVersionManager, requestHeader)): Executable, s"PRODUCE $version should throw UnsupportedVersionException exception") - assertTrue(e.toString.contains("unsupported version")) + assertTrue(e.toString.contains("unsupported version")); } } + } diff --git a/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala b/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala index 8dbfa808d7f45..1ab0f0ae8e80c 100644 --- a/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala +++ b/core/src/test/scala/unit/kafka/network/RequestChannelTest.scala @@ -17,6 +17,7 @@ package kafka.network + import com.fasterxml.jackson.databind.ObjectMapper import kafka.network import kafka.server.EnvelopeUtils @@ -46,9 +47,9 @@ import org.mockito.Mockito.mock import java.io.IOException import java.net.InetAddress import java.nio.ByteBuffer -import java.util +import java.util.Collections import java.util.concurrent.atomic.AtomicReference -import scala.collection.Map +import scala.collection.{Map, Seq} import scala.jdk.CollectionConverters._ import scala.jdk.OptionConverters.RichOption @@ -63,9 +64,9 @@ class RequestChannelTest { def testAlterRequests(): Unit = { val sensitiveValue = "secret" - def verifyConfig(resource: ConfigResource, entries: util.List[ConfigEntry], expectedValues: Map[String, String]): Unit = { + def verifyConfig(resource: ConfigResource, entries: Seq[ConfigEntry], expectedValues: Map[String, String]): Unit = { val alterConfigs = request(new AlterConfigsRequest.Builder( - util.Map.of(resource, new Config(entries)), true).build()) + Collections.singletonMap(resource, new Config(entries.asJavaCollection)), true).build()) val loggableAlterConfigs = alterConfigs.loggableRequest.asInstanceOf[AlterConfigsRequest] val loggedConfig = loggableAlterConfigs.configs.get(resource) @@ -76,37 +77,37 @@ class RequestChannelTest { val brokerResource = new ConfigResource(ConfigResource.Type.BROKER, "1") val keystorePassword = new ConfigEntry(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, sensitiveValue) - verifyConfig(brokerResource, util.List.of(keystorePassword), Map(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG -> Password.HIDDEN)) + verifyConfig(brokerResource, Seq(keystorePassword), Map(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG -> Password.HIDDEN)) val keystoreLocation = new ConfigEntry(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG, "/path/to/keystore") - verifyConfig(brokerResource, util.List.of(keystoreLocation), Map(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG -> "/path/to/keystore")) - verifyConfig(brokerResource, util.List.of(keystoreLocation, keystorePassword), + verifyConfig(brokerResource, Seq(keystoreLocation), Map(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG -> "/path/to/keystore")) + verifyConfig(brokerResource, Seq(keystoreLocation, keystorePassword), Map(SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG -> "/path/to/keystore", SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG -> Password.HIDDEN)) val listenerKeyPassword = new ConfigEntry(s"listener.name.internal.${SslConfigs.SSL_KEY_PASSWORD_CONFIG}", sensitiveValue) - verifyConfig(brokerResource, util.List.of(listenerKeyPassword), Map(listenerKeyPassword.name -> Password.HIDDEN)) + verifyConfig(brokerResource, Seq(listenerKeyPassword), Map(listenerKeyPassword.name -> Password.HIDDEN)) val listenerKeystore = new ConfigEntry(s"listener.name.internal.${SslConfigs.SSL_KEYSTORE_LOCATION_CONFIG}", "/path/to/keystore") - verifyConfig(brokerResource, util.List.of(listenerKeystore), Map(listenerKeystore.name -> "/path/to/keystore")) + verifyConfig(brokerResource, Seq(listenerKeystore), Map(listenerKeystore.name -> "/path/to/keystore")) val plainJaasConfig = new ConfigEntry(s"listener.name.internal.plain.${SaslConfigs.SASL_JAAS_CONFIG}", sensitiveValue) - verifyConfig(brokerResource, util.List.of(plainJaasConfig), Map(plainJaasConfig.name -> Password.HIDDEN)) + verifyConfig(brokerResource, Seq(plainJaasConfig), Map(plainJaasConfig.name -> Password.HIDDEN)) val plainLoginCallback = new ConfigEntry(s"listener.name.internal.plain.${SaslConfigs.SASL_LOGIN_CALLBACK_HANDLER_CLASS}", "test.LoginClass") - verifyConfig(brokerResource, util.List.of(plainLoginCallback), Map(plainLoginCallback.name -> plainLoginCallback.value)) + verifyConfig(brokerResource, Seq(plainLoginCallback), Map(plainLoginCallback.name -> plainLoginCallback.value)) val customConfig = new ConfigEntry("custom.config", sensitiveValue) - verifyConfig(brokerResource, util.List.of(customConfig), Map(customConfig.name -> Password.HIDDEN)) + verifyConfig(brokerResource, Seq(customConfig), Map(customConfig.name -> Password.HIDDEN)) val topicResource = new ConfigResource(ConfigResource.Type.TOPIC, "testTopic") val compressionType = new ConfigEntry(TopicConfig.COMPRESSION_TYPE_CONFIG, "lz4") - verifyConfig(topicResource, util.List.of(compressionType), Map(TopicConfig.COMPRESSION_TYPE_CONFIG -> "lz4")) - verifyConfig(topicResource, util.List.of(customConfig), Map(customConfig.name -> Password.HIDDEN)) + verifyConfig(topicResource, Seq(compressionType), Map(TopicConfig.COMPRESSION_TYPE_CONFIG -> "lz4")) + verifyConfig(topicResource, Seq(customConfig), Map(customConfig.name -> Password.HIDDEN)) // Verify empty request val alterConfigs = request(new AlterConfigsRequest.Builder( - util.Map.of[ConfigResource, Config], true).build()) - assertEquals(util.Map.of, alterConfigs.loggableRequest.asInstanceOf[AlterConfigsRequest].configs) + Collections.emptyMap[ConfigResource, Config], true).build()) + assertEquals(Collections.emptyMap, alterConfigs.loggableRequest.asInstanceOf[AlterConfigsRequest].configs) } @Test @@ -130,21 +131,11 @@ class RequestChannelTest { op: OpType, entries: Map[String, String], expectedValues: Map[String, String]): Unit = { - val alterConfigs = incrementalAlterConfigs(resource, entries, op) - val alterConfigsString = alterConfigs.toString - entries.foreach { entry => - if (!alterConfigsString.contains(entry._1)) { - fail("Config names should be in the request string") - } - if (entry._2 != null && alterConfigsString.contains(entry._2)) { - fail("Config values should not be in the request string") - } - } - val req = request(alterConfigs) - val loggableAlterConfigs = req.loggableRequest.asInstanceOf[IncrementalAlterConfigsRequest] + val alterConfigs = request(incrementalAlterConfigs(resource, entries, op)) + val loggableAlterConfigs = alterConfigs.loggableRequest.asInstanceOf[IncrementalAlterConfigsRequest] val loggedConfig = loggableAlterConfigs.data.resources.find(resource.`type`.id, resource.name).configs assertEquals(expectedValues, toMap(loggedConfig)) - val alterConfigsDesc = RequestConvertToJson.requestDesc(req.header, req.requestLog.toJava, req.isForwarded).toString + val alterConfigsDesc = RequestConvertToJson.requestDesc(alterConfigs.header, alterConfigs.requestLog.toJava, alterConfigs.isForwarded).toString assertFalse(alterConfigsDesc.contains(sensitiveValue), s"Sensitive config logged $alterConfigsDesc") } @@ -188,7 +179,7 @@ class RequestChannelTest { @Test def testNonAlterRequestsNotTransformed(): Unit = { - val metadataRequest = request(new MetadataRequest.Builder(util.List.of("topic"), true).build()) + val metadataRequest = request(new MetadataRequest.Builder(List("topic").asJava, true).build()) assertSame(metadataRequest.body[MetadataRequest], metadataRequest.loggableRequest) } @@ -197,10 +188,10 @@ class RequestChannelTest { val sensitiveValue = "secret" val resource = new ConfigResource(ConfigResource.Type.BROKER, "1") val keystorePassword = new ConfigEntry(SslConfigs.SSL_KEYSTORE_PASSWORD_CONFIG, sensitiveValue) - val entries = util.List.of(keystorePassword) + val entries = Seq(keystorePassword) - val alterConfigs = request(new AlterConfigsRequest.Builder(util.Map.of(resource, - new Config(entries)), true).build()) + val alterConfigs = request(new AlterConfigsRequest.Builder(Collections.singletonMap(resource, + new Config(entries.asJavaCollection)), true).build()) assertTrue(isValidJson(RequestConvertToJson.request(alterConfigs.loggableRequest).toString)) } diff --git a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala index 6a4b8d8ca672e..5ebcfd65ccec2 100644 --- a/core/src/test/scala/unit/kafka/network/SocketServerTest.scala +++ b/core/src/test/scala/unit/kafka/network/SocketServerTest.scala @@ -19,10 +19,10 @@ package kafka.network import com.fasterxml.jackson.databind.node.{JsonNodeFactory, ObjectNode, TextNode} import com.yammer.metrics.core.{Gauge, Meter} +import kafka.cluster.EndPoint import kafka.server._ import kafka.utils.Implicits._ import kafka.utils.TestUtils -import org.apache.kafka.common.Endpoint import org.apache.kafka.common.memory.MemoryPool import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.message.{ProduceRequestData, SaslAuthenticateRequestData, SaslHandshakeRequestData, VoteRequestData} @@ -38,7 +38,6 @@ import org.apache.kafka.common.utils._ import org.apache.kafka.network.RequestConvertToJson import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.security.CredentialProvider -import org.apache.kafka.server.{ApiVersionManager, SimpleApiVersionManager} import org.apache.kafka.server.common.{FinalizedFeatures, MetadataVersion} import org.apache.kafka.server.config.QuotaConfig import org.apache.kafka.server.metrics.KafkaYammerMetrics @@ -59,7 +58,7 @@ import java.security.cert.X509Certificate import java.util import java.util.concurrent.atomic.AtomicInteger import java.util.concurrent._ -import java.util.{Properties, Random} +import java.util.{Collections, Properties, Random} import javax.net.ssl._ import scala.collection.mutable import scala.collection.mutable.ArrayBuffer @@ -85,16 +84,16 @@ class SocketServerTest { TestUtils.clearYammerMetrics() private val apiVersionManager = new SimpleApiVersionManager(ListenerType.BROKER, true, - () => new FinalizedFeatures(MetadataVersion.latestTesting(), util.Map.of[String, java.lang.Short], 0)) + () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0, true)) var server: SocketServer = _ val sockets = new ArrayBuffer[Socket] private val kafkaLogger = LogManager.getLogger("kafka") private var logLevelToRestore: Level = _ - def endpoint: Endpoint = { + def endpoint: EndPoint = { KafkaConfig.fromProps(props, doLog = false).dataPlaneListeners.head } - def listener: String = endpoint.listener + def listener: String = endpoint.listenerName.value val uncaughtExceptions = new AtomicInteger(0) @BeforeEach @@ -840,7 +839,7 @@ class SocketServerTest { // same as SocketServer.createAcceptor, // except the Acceptor overriding a method to inject the exception - override protected def createDataPlaneAcceptor(endPoint: Endpoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { + override protected def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = { new DataPlaneAcceptor(this, endPoint, this.config, nodeId, connectionQuotas, time, false, requestChannel, serverMetrics, this.credentialProvider, new LogContext(), MemoryPool.NONE, this.apiVersionManager) { override protected def configureAcceptedSocketChannel(socketChannel: SocketChannel): Unit = { @@ -1722,7 +1721,7 @@ class SocketServerTest { val testableServer = new TestableSocketServer(KafkaConfig.fromProps(props), connectionQueueSize = 1) testableServer.enableRequestProcessing(Map.empty).get(1, TimeUnit.MINUTES) val testableSelector = testableServer.testableSelector - val errors = new util.HashSet[String]() + val errors = new mutable.HashSet[String] def acceptorStackTraces: scala.collection.Map[Thread, String] = { Thread.getAllStackTraces.asScala.collect { @@ -1746,7 +1745,7 @@ class SocketServerTest { // Block selector until Acceptor is blocked while connections are pending testableSelector.pollCallback = () => { try { - TestUtils.waitUntilTrue(() => !errors.isEmpty || registeredConnectionCount >= numConnections - 1 || acceptorBlocked, + TestUtils.waitUntilTrue(() => errors.nonEmpty || registeredConnectionCount >= numConnections - 1 || acceptorBlocked, "Acceptor not blocked", waitTimeMs = 10000) } catch { case _: Throwable => errors.add(s"Acceptor not blocked: $acceptorStackTraces") @@ -1754,9 +1753,9 @@ class SocketServerTest { } testableSelector.operationCounts.clear() val sockets = (1 to numConnections).map(_ => connect(testableServer)) - TestUtils.waitUntilTrue(() => !errors.isEmpty || registeredConnectionCount == numConnections, + TestUtils.waitUntilTrue(() => errors.nonEmpty || registeredConnectionCount == numConnections, "Connections not registered", waitTimeMs = 15000) - assertEquals(util.Set.of, errors) + assertEquals(Set.empty, errors) testableSelector.waitForOperations(SelectorOperation.Register, numConnections) // In each iteration, SocketServer processes at most connectionQueueSize (1 in this test) @@ -1858,7 +1857,7 @@ class SocketServerTest { val failedFuture = new CompletableFuture[Void]() failedFuture.completeExceptionally(new RuntimeException("authorizer startup failed")) assertThrows(classOf[ExecutionException], () => { - newServer.enableRequestProcessing(Map(endpoint -> failedFuture)).get() + newServer.enableRequestProcessing(Map(endpoint.toJava -> failedFuture)).get() }) } finally { shutdownServerAndMetrics(newServer) @@ -1891,7 +1890,7 @@ class SocketServerTest { val authorizerFuture = new CompletableFuture[Void]() val enableFuture = newServer.enableRequestProcessing( newServer.dataPlaneAcceptors.keys().asScala. - map(k => k -> authorizerFuture).toMap) + map(_.toJava).map(k => k -> authorizerFuture).toMap) assertFalse(authorizerFuture.isDone) assertFalse(enableFuture.isDone) newServer.dataPlaneAcceptors.values().forEach(a => assertNull(a.serverChannel)) @@ -1992,7 +1991,7 @@ class SocketServerTest { } class TestableAcceptor(socketServer: SocketServer, - endPoint: Endpoint, + endPoint: EndPoint, cfg: KafkaConfig, nodeId: Int, connectionQuotas: ConnectionQuotas, @@ -2061,7 +2060,7 @@ class SocketServerTest { private var conn: Option[Socket] = None override protected[network] def createSelector(channelBuilder: ChannelBuilder): Selector = { - new TestableSelector(config, channelBuilder, time, metrics, metricTags) + new TestableSelector(config, channelBuilder, time, metrics, metricTags.asScala) } override private[network] def processException(errorMessage: String, throwable: Throwable): Unit = { @@ -2098,7 +2097,7 @@ class SocketServerTest { connectionDisconnectListeners = connectionDisconnectListeners ) { - override def createDataPlaneAcceptor(endPoint: Endpoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel) : DataPlaneAcceptor = { + override def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel) : DataPlaneAcceptor = { new TestableAcceptor(this, endPoint, this.config, 0, connectionQuotas, time, isPrivilegedListener, requestChannel, this.metrics, this.credentialProvider, new LogContext, MemoryPool.NONE, this.apiVersionManager, connectionQueueSize) } @@ -2159,9 +2158,9 @@ class SocketServerTest { case object CloseSelector extends SelectorOperation } - class TestableSelector(config: KafkaConfig, channelBuilder: ChannelBuilder, time: Time, metrics: Metrics, metricTags: util.Map[String, String] = new util.HashMap()) + class TestableSelector(config: KafkaConfig, channelBuilder: ChannelBuilder, time: Time, metrics: Metrics, metricTags: mutable.Map[String, String] = mutable.Map.empty) extends Selector(config.socketRequestMaxBytes, config.connectionsMaxIdleMs, config.failedAuthenticationDelayMs, - metrics, time, "socket-server", metricTags, false, true, channelBuilder, MemoryPool.NONE, new LogContext()) { + metrics, time, "socket-server", metricTags.asJava, false, true, channelBuilder, MemoryPool.NONE, new LogContext()) { val failures = mutable.Map[SelectorOperation, Throwable]() val operationCounts = mutable.Map[SelectorOperation, Int]().withDefaultValue(0) diff --git a/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala b/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala index 117c2b63978d8..5f5f94efc95fc 100644 --- a/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala +++ b/core/src/test/scala/unit/kafka/raft/RaftManagerTest.scala @@ -22,21 +22,20 @@ import java.nio.channels.OverlappingFileLockException import java.nio.file.{Files, Path, StandardOpenOption} import java.util.Properties import java.util.concurrent.CompletableFuture +import kafka.log.LogManager import kafka.server.KafkaConfig import kafka.tools.TestRaftServer.ByteArraySerde import kafka.utils.TestUtils import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.Uuid import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.utils.Time -import org.apache.kafka.common.utils.Utils import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.{Endpoints, MetadataLogConfig, QuorumConfig} +import org.apache.kafka.raft.Endpoints +import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.ProcessRole import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} import org.apache.kafka.server.fault.FaultHandler -import org.apache.kafka.storage.internals.log.LogManager import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest @@ -59,7 +58,7 @@ class RaftManagerTest { props.setProperty(ServerLogConfigs.LOG_DIR_CONFIG, value.toString) } metadataDir.foreach { value => - props.setProperty(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, value.toString) + props.setProperty(KRaftConfigs.METADATA_LOG_DIR_CONFIG, value.toString) } props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, processRoles.mkString(",")) props.setProperty(KRaftConfigs.NODE_ID_CONFIG, nodeId.toString) @@ -90,7 +89,7 @@ class RaftManagerTest { val endpoints = Endpoints.fromInetSocketAddresses( config.effectiveAdvertisedControllerListeners .map { endpoint => - (ListenerName.normalised(endpoint.listener), InetSocketAddress.createUnresolved(endpoint.host, endpoint.port)) + (endpoint.listenerName, InetSocketAddress.createUnresolved(endpoint.host, endpoint.port)) } .toMap .asJava @@ -127,21 +126,17 @@ class RaftManagerTest { val logDir = TestUtils.tempDir() val nodeId = 1 - try { - val raftManager = createRaftManager( - new TopicPartition("__raft_id_test", 0), - createConfig( - processRolesSet, - nodeId, - Seq(logDir.toPath), - None - ) + val raftManager = createRaftManager( + new TopicPartition("__raft_id_test", 0), + createConfig( + processRolesSet, + nodeId, + Seq(logDir.toPath), + None ) - assertEquals(nodeId, raftManager.client.nodeId.getAsInt) - raftManager.shutdown() - } finally { - Utils.delete(logDir) - } + ) + assertEquals(nodeId, raftManager.client.nodeId.getAsInt) + raftManager.shutdown() } @ParameterizedTest @@ -160,27 +155,22 @@ class RaftManagerTest { } val nodeId = 1 - try { - val raftManager = createRaftManager( - new TopicPartition("__raft_id_test", 0), - createConfig( - Set(ProcessRole.ControllerRole), - nodeId, - logDir, - metadataDir - ) + val raftManager = createRaftManager( + new TopicPartition("__raft_id_test", 0), + createConfig( + Set(ProcessRole.ControllerRole), + nodeId, + logDir, + metadataDir ) + ) - val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LOCK_FILE_NAME) - assertTrue(fileLocked(lockPath)) + val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LockFileName) + assertTrue(fileLocked(lockPath)) - raftManager.shutdown() + raftManager.shutdown() - assertFalse(fileLocked(lockPath)) - } finally { - logDir.foreach(p => Utils.delete(p.toFile)) - metadataDir.foreach(p => Utils.delete(p.toFile)) - } + assertFalse(fileLocked(lockPath)) } @Test @@ -189,27 +179,22 @@ class RaftManagerTest { val metadataDir = Some(TestUtils.tempDir().toPath) val nodeId = 1 - try { - val raftManager = createRaftManager( - new TopicPartition("__raft_id_test", 0), - createConfig( - Set(ProcessRole.BrokerRole), - nodeId, - logDir, - metadataDir - ) + val raftManager = createRaftManager( + new TopicPartition("__raft_id_test", 0), + createConfig( + Set(ProcessRole.BrokerRole), + nodeId, + logDir, + metadataDir ) + ) - val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LOCK_FILE_NAME) - assertTrue(fileLocked(lockPath)) + val lockPath = metadataDir.getOrElse(logDir.head).resolve(LogManager.LockFileName) + assertTrue(fileLocked(lockPath)) - raftManager.shutdown() + raftManager.shutdown() - assertFalse(fileLocked(lockPath)) - } finally { - logDir.foreach(p => Utils.delete(p.toFile)) - metadataDir.foreach(p => Utils.delete(p.toFile)) - } + assertFalse(fileLocked(lockPath)) } def createMetadataLog(config: KafkaConfig): Unit = { diff --git a/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala b/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala index 0839990868fdd..833cae0672d67 100644 --- a/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala +++ b/core/src/test/scala/unit/kafka/security/authorizer/AuthorizerTest.scala @@ -23,8 +23,6 @@ import org.apache.kafka.common.acl.AclOperation._ import org.apache.kafka.common.acl.AclPermissionType.{ALLOW, DENY} import org.apache.kafka.common.acl._ import org.apache.kafka.common.errors.ApiException -import org.apache.kafka.common.metrics.{Metrics, PluginMetrics} -import org.apache.kafka.common.metrics.internals.PluginMetricsImpl import org.apache.kafka.common.requests.RequestContext import org.apache.kafka.common.resource.PatternType.{LITERAL, MATCH, PREFIXED} import org.apache.kafka.common.resource.Resource.CLUSTER_NAME @@ -41,15 +39,18 @@ import org.apache.kafka.server.authorizer._ import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.net.InetAddress import java.util -import java.util.{Properties, UUID} +import java.util.{Collections, Properties, UUID} import scala.jdk.CollectionConverters._ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { private final val PLAINTEXT = new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "127.0.0.1", 9020) + private final val KRAFT = "kraft" private val allowReadAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, READ, ALLOW) private val allowWriteAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, WRITE, ALLOW) @@ -81,9 +82,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { val props = properties config = KafkaConfig.fromProps(props) authorizer1 = createAuthorizer() - configureAuthorizer(authorizer1, config.originals, new PluginMetricsImpl(new Metrics(), util.Map.of())) + configureAuthorizer(authorizer1, config.originals) authorizer2 = createAuthorizer() - configureAuthorizer(authorizer2, config.originals, new PluginMetricsImpl(new Metrics(), util.Map.of())) + configureAuthorizer(authorizer2, config.originals) resource = new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), LITERAL) } @@ -101,28 +102,32 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { super.tearDown() } - @Test - def testAuthorizeThrowsOnNonLiteralResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAuthorizeThrowsOnNonLiteralResource(quorum: String): Unit = { assertThrows(classOf[IllegalArgumentException], () => authorize(authorizer1, requestContext, READ, new ResourcePattern(TOPIC, "something", PREFIXED))) } - @Test - def testAuthorizeWithEmptyResourceName(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAuthorizeWithEmptyResourceName(quorum: String): Unit = { assertFalse(authorize(authorizer1, requestContext, READ, new ResourcePattern(GROUP, "", LITERAL))) addAcls(authorizer1, Set(allowReadAcl), new ResourcePattern(GROUP, WILDCARD_RESOURCE, LITERAL)) assertTrue(authorize(authorizer1, requestContext, READ, new ResourcePattern(GROUP, "", LITERAL))) } // Authorizing the empty resource is not supported because empty resource name is invalid. - @Test - def testEmptyAclThrowsException(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testEmptyAclThrowsException(quorum: String): Unit = { assertThrows(classOf[ApiException], () => addAcls(authorizer1, Set(allowReadAcl), new ResourcePattern(GROUP, "", LITERAL))) } - @Test - def testTopicAcl(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testTopicAcl(quorum: String): Unit = { val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "rob") val user3 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "batman") @@ -176,8 +181,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { /** * CustomPrincipals should be compared with their principal type and name */ - @Test - def testAllowAccessWithCustomPrincipal(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAllowAccessWithCustomPrincipal(quorum: String): Unit = { val user = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) val customUserPrincipal = new CustomPrincipal(KafkaPrincipal.USER_TYPE, username) val host1 = InetAddress.getByName("192.168.1.1") @@ -196,8 +202,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertFalse(authorize(authorizer1, host1Context, READ, resource), "User1 should not have READ access from host1 due to denyAcl") } - @Test - def testDenyTakesPrecedence(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testDenyTakesPrecedence(quorum: String): Unit = { val user = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) val host = InetAddress.getByName("192.168.2.1") val session = newRequestContext(user, host) @@ -211,8 +218,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertFalse(authorize(authorizer1, session, READ, resource), "deny should take precedence over allow.") } - @Test - def testAllowAllAccess(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAllowAllAccess(quorum: String): Unit = { val allowAllAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, AclOperation.ALL, ALLOW) changeAclAndVerify(Set.empty, Set(allowAllAcl), Set.empty) @@ -221,8 +229,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertTrue(authorize(authorizer1, context, READ, resource), "allow all acl should allow access to all.") } - @Test - def testSuperUserHasAccess(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testSuperUserHasAccess(quorum: String): Unit = { val denyAllAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, AclOperation.ALL, DENY) changeAclAndVerify(Set.empty, Set(denyAllAcl), Set.empty) @@ -237,8 +246,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { /** * CustomPrincipals should be compared with their principal type and name */ - @Test - def testSuperUserWithCustomPrincipalHasAccess(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testSuperUserWithCustomPrincipalHasAccess(quorum: String): Unit = { val denyAllAcl = new AccessControlEntry(WILDCARD_PRINCIPAL_STRING, WILDCARD_HOST, AclOperation.ALL, DENY) changeAclAndVerify(Set.empty, Set(denyAllAcl), Set.empty) @@ -247,8 +257,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertTrue(authorize(authorizer1, session, READ, resource), "superuser with custom principal always has access, no matter what acls.") } - @Test - def testWildCardAcls(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testWildCardAcls(quorum: String): Unit = { assertFalse(authorize(authorizer1, requestContext, READ, resource), "when acls = [], authorizer should fail close.") val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) @@ -271,20 +282,22 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertFalse(authorize(authorizer1, host1Context, WRITE, resource), "User1 should not have WRITE access from host1") } - @Test - def testNoAclFound(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testNoAclFound(quorum: String): Unit = { assertFalse(authorize(authorizer1, requestContext, READ, resource), "when acls = [], authorizer should deny op.") } - @Test - def testNoAclFoundOverride(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testNoAclFoundOverride(quorum: String): Unit = { val props = properties props.put(StandardAuthorizer.ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "true") val cfg = KafkaConfig.fromProps(props) val testAuthorizer = createAuthorizer() try { - configureAuthorizer(testAuthorizer, cfg.originals, new PluginMetricsImpl(new Metrics(), util.Map.of())) + configureAuthorizer(testAuthorizer, cfg.originals) assertTrue(authorize(testAuthorizer, requestContext, READ, resource), "when acls = null or [], authorizer should allow op with allow.everyone = true.") } finally { @@ -292,8 +305,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { } } - @Test - def testAclConfigWithWhitespace(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAclConfigWithWhitespace(quorum: String): Unit = { val props = properties props.put(StandardAuthorizer.ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, " true") // replace all property values with leading & trailing whitespaces @@ -301,7 +315,7 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { val cfg = KafkaConfig.fromProps(props) val testAuthorizer = createAuthorizer() try { - configureAuthorizer(testAuthorizer, cfg.originals, new PluginMetricsImpl(new Metrics(), util.Map.of())) + configureAuthorizer(testAuthorizer, cfg.originals) assertTrue(authorize(testAuthorizer, requestContext, READ, resource), "when acls = null or [], authorizer should allow op with allow.everyone = true.") } finally { @@ -309,8 +323,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { } } - @Test - def testAclManagementAPIs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAclManagementAPIs(quorum: String): Unit = { val user1 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, username) val user2 = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "bob") val host1 = "host1" @@ -351,7 +366,7 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { //test remove all acls for resource removeAcls(authorizer1, Set.empty, resource) - TestUtils.waitAndVerifyAcls(Set.empty[AccessControlEntry], authorizer1, resource, AccessControlEntryFilter.ANY) + TestUtils.waitAndVerifyAcls(Set.empty[AccessControlEntry], authorizer1, resource) acls = changeAclAndVerify(Set.empty, Set(acl1), Set.empty) changeAclAndVerify(acls, Set.empty, acls) @@ -370,18 +385,19 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { addAcls(authorizer1, Set(acl1), commonResource) addAcls(authorizer1, Set(acl2), commonResource) - TestUtils.waitAndVerifyAcls(Set(acl1, acl2), authorizer1, commonResource, AccessControlEntryFilter.ANY) + TestUtils.waitAndVerifyAcls(Set(acl1, acl2), authorizer1, commonResource) } /** * Test ACL inheritance, as described in #{org.apache.kafka.common.acl.AclOperation} */ - @Test - def testAclInheritance(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAclInheritance(quorum: String): Unit = { testImplicationsOfAllow(AclOperation.ALL, Set(READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE, - CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE, CREATE_TOKENS, DESCRIBE_TOKENS, TWO_PHASE_COMMIT)) + CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE, CREATE_TOKENS, DESCRIBE_TOKENS)) testImplicationsOfDeny(AclOperation.ALL, Set(READ, WRITE, CREATE, DELETE, ALTER, DESCRIBE, - CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE, CREATE_TOKENS, DESCRIBE_TOKENS, TWO_PHASE_COMMIT)) + CLUSTER_ACTION, DESCRIBE_CONFIGS, ALTER_CONFIGS, IDEMPOTENT_WRITE, CREATE_TOKENS, DESCRIBE_TOKENS)) testImplicationsOfAllow(READ, Set(DESCRIBE)) testImplicationsOfAllow(WRITE, Set(DESCRIBE)) testImplicationsOfAllow(DELETE, Set(DESCRIBE)) @@ -424,15 +440,17 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { removeAcls(authorizer1, acls, clusterResource) } - @Test - def testAccessAllowedIfAllowAclExistsOnWildcardResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAccessAllowedIfAllowAclExistsOnWildcardResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl), wildCardResource) assertTrue(authorize(authorizer1, requestContext, READ, resource)) } - @Test - def testDeleteAclOnWildcardResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testDeleteAclOnWildcardResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), wildCardResource) removeAcls(authorizer1, Set(allowReadAcl), wildCardResource) @@ -440,8 +458,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set(allowWriteAcl), getAcls(authorizer1, wildCardResource)) } - @Test - def testDeleteAllAclOnWildcardResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testDeleteAllAclOnWildcardResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl), wildCardResource) removeAcls(authorizer1, Set.empty, wildCardResource) @@ -449,15 +468,17 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1)) } - @Test - def testAccessAllowedIfAllowAclExistsOnPrefixedResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAccessAllowedIfAllowAclExistsOnPrefixedResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl), prefixedResource) assertTrue(authorize(authorizer1, requestContext, READ, resource)) } - @Test - def testDeleteAclOnPrefixedResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testDeleteAclOnPrefixedResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), prefixedResource) removeAcls(authorizer1, Set(allowReadAcl), prefixedResource) @@ -465,8 +486,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set(allowWriteAcl), getAcls(authorizer1, prefixedResource)) } - @Test - def testDeleteAllAclOnPrefixedResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testDeleteAllAclOnPrefixedResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), prefixedResource) removeAcls(authorizer1, Set.empty, prefixedResource) @@ -474,8 +496,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1)) } - @Test - def testAddAclsOnLiteralResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAddAclsOnLiteralResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), resource) addAcls(authorizer1, Set(allowWriteAcl, denyReadAcl), resource) @@ -484,8 +507,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1, prefixedResource)) } - @Test - def testAddAclsOnWildcardResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAddAclsOnWildcardResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), wildCardResource) addAcls(authorizer1, Set(allowWriteAcl, denyReadAcl), wildCardResource) @@ -494,8 +518,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1, prefixedResource)) } - @Test - def testAddAclsOnPrefixedResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAddAclsOnPrefixedResource(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl, allowWriteAcl), prefixedResource) addAcls(authorizer1, Set(allowWriteAcl, denyReadAcl), prefixedResource) @@ -504,8 +529,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, getAcls(authorizer1, resource)) } - @Test - def testAuthorizeWithPrefixedResource(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAuthorizeWithPrefixedResource(quorum: String): Unit = { addAcls(authorizer1, Set(denyReadAcl), new ResourcePattern(TOPIC, "a_other", LITERAL)) addAcls(authorizer1, Set(denyReadAcl), new ResourcePattern(TOPIC, "a_other", PREFIXED)) addAcls(authorizer1, Set(denyReadAcl), new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), PREFIXED)) @@ -524,8 +550,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertTrue(authorize(authorizer1, requestContext, READ, resource)) } - @Test - def testSingleCharacterResourceAcls(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testSingleCharacterResourceAcls(quorum: String): Unit = { addAcls(authorizer1, Set(allowReadAcl), new ResourcePattern(TOPIC, "f", LITERAL)) assertTrue(authorize(authorizer1, requestContext, READ, new ResourcePattern(TOPIC, "f", LITERAL))) assertFalse(authorize(authorizer1, requestContext, READ, new ResourcePattern(TOPIC, "foo", LITERAL))) @@ -536,8 +563,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertFalse(authorize(authorizer1, requestContext, READ, new ResourcePattern(TOPIC, "foo_", LITERAL))) } - @Test - def testGetAclsPrincipal(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testGetAclsPrincipal(quorum: String): Unit = { val aclOnSpecificPrincipal = new AccessControlEntry(principal.toString, WILDCARD_HOST, WRITE, ALLOW) addAcls(authorizer1, Set(aclOnSpecificPrincipal), resource) @@ -556,8 +584,9 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(0, getAcls(authorizer1, principal).size, "acl on wildcard should not be returned for specific request") } - @Test - def testAclsFilter(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAclsFilter(quorum: String): Unit = { val resource1 = new ResourcePattern(TOPIC, "foo-" + UUID.randomUUID(), LITERAL) val resource2 = new ResourcePattern(TOPIC, "bar-" + UUID.randomUUID(), LITERAL) val prefixedResource = new ResourcePattern(TOPIC, "bar-", PREFIXED) @@ -567,7 +596,7 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { val acl3 = new AclBinding(resource2, new AccessControlEntry(principal.toString, WILDCARD_HOST, DESCRIBE, ALLOW)) val acl4 = new AclBinding(prefixedResource, new AccessControlEntry(wildcardPrincipal.toString, WILDCARD_HOST, READ, ALLOW)) - authorizer1.createAcls(requestContext, util.List.of(acl1, acl2, acl3, acl4)) + authorizer1.createAcls(requestContext, List(acl1, acl2, acl3, acl4).asJava) assertEquals(Set(acl1, acl2, acl3, acl4), authorizer1.acls(AclBindingFilter.ANY).asScala.toSet) assertEquals(Set(acl1, acl2), authorizer1.acls(new AclBindingFilter(resource1.toFilter, AccessControlEntryFilter.ANY)).asScala.toSet) assertEquals(Set(acl4), authorizer1.acls(new AclBindingFilter(prefixedResource.toFilter, AccessControlEntryFilter.ANY)).asScala.toSet) @@ -591,15 +620,16 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { assertEquals(Set.empty, deleteResults(3).aclBindingDeleteResults.asScala.map(_.aclBinding).toSet) } - @Test - def testAuthorizeByResourceTypeNoAclFoundOverride(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array(KRAFT)) + def testAuthorizeByResourceTypeNoAclFoundOverride(quorum: String): Unit = { val props = properties props.put(StandardAuthorizer.ALLOW_EVERYONE_IF_NO_ACL_IS_FOUND_CONFIG, "true") val cfg = KafkaConfig.fromProps(props) val authorizer: Authorizer = createAuthorizer() try { - configureAuthorizer(authorizer, cfg.originals, new PluginMetricsImpl(new Metrics(), util.Map.of())) + configureAuthorizer(authorizer, cfg.originals) assertTrue(authorizeByResourceType(authorizer, requestContext, READ, resource.resourceType()), "If allow.everyone.if.no.acl.found = true, caller should have read access to at least one topic") assertTrue(authorizeByResourceType(authorizer, requestContext, WRITE, resource.resourceType()), @@ -625,14 +655,14 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { acls --= removedAcls } - TestUtils.waitAndVerifyAcls(acls, authorizer1, resource, AccessControlEntryFilter.ANY) + TestUtils.waitAndVerifyAcls(acls, authorizer1, resource) acls } private def authorize(authorizer: Authorizer, requestContext: RequestContext, operation: AclOperation, resource: ResourcePattern): Boolean = { val action = new Action(operation, resource, 1, true, true) - authorizer.authorize(requestContext, util.List.of(action)).asScala.head == AuthorizationResult.ALLOWED + authorizer.authorize(requestContext, List(action).asJava).asScala.head == AuthorizationResult.ALLOWED } private def getAcls(authorizer: Authorizer, resourcePattern: ResourcePattern): Set[AccessControlEntry] = { @@ -659,17 +689,14 @@ class AuthorizerTest extends QuorumTestHarness with BaseAuthorizerTest { } def configureAuthorizer(authorizer: Authorizer, - configs: util.Map[String, AnyRef], - pluginMetrics: PluginMetrics): Unit = { - configureStandardAuthorizer(authorizer.asInstanceOf[StandardAuthorizer], configs, pluginMetrics) + configs: util.Map[String, AnyRef]): Unit = { + configureStandardAuthorizer(authorizer.asInstanceOf[StandardAuthorizer], configs) } def configureStandardAuthorizer(standardAuthorizer: StandardAuthorizer, - configs: util.Map[String, AnyRef], - pluginMetrics: PluginMetrics): Unit = { + configs: util.Map[String, AnyRef]): Unit = { standardAuthorizer.configure(configs) - standardAuthorizer.withPluginMetrics(pluginMetrics) - initializeStandardAuthorizer(standardAuthorizer, new AuthorizerTestServerInfo(util.List.of(PLAINTEXT))) + initializeStandardAuthorizer(standardAuthorizer, new AuthorizerTestServerInfo(Collections.singletonList(PLAINTEXT))) } def initializeStandardAuthorizer(standardAuthorizer: StandardAuthorizer, diff --git a/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala index 958c8440c2cf9..dcbfbcb349715 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractApiVersionsRequestTest.scala @@ -25,20 +25,29 @@ import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse, RequestUtils} import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.common.utils.Utils -import org.apache.kafka.server.IntegrationTestUtils -import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, GroupVersion, MetadataVersion, ShareVersion, StreamsVersion, TransactionVersion} +import org.apache.kafka.server.common.{EligibleLeaderReplicasVersion, GroupVersion, MetadataVersion, TransactionVersion} import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Tag import scala.jdk.CollectionConverters._ +import scala.jdk.OptionConverters.RichOptional @Tag("integration") abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { + def sendApiVersionsRequest(request: ApiVersionsRequest, listenerName: ListenerName): ApiVersionsResponse = { + val socket = if (cluster.controllerListenerName().toScala.contains(listenerName)) { + cluster.controllerSocketServers().asScala.head + } else { + cluster.brokerSocketServers().asScala.head + } + IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](request, socket, listenerName) + } + def sendUnsupportedApiVersionRequest(request: ApiVersionsRequest): ApiVersionsResponse = { val overrideHeader = IntegrationTestUtils.nextRequestHeader(ApiKeys.API_VERSIONS, Short.MaxValue) - val socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) + val socket = IntegrationTestUtils.connect(cluster.brokerSocketServers().asScala.head, cluster.clientListener()) try { val serializedBytes = Utils.toArray( RequestUtils.serialize(overrideHeader.data, overrideHeader.headerVersion, request.data, request.version)) @@ -55,11 +64,11 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { apiVersion: Short = ApiKeys.API_VERSIONS.latestVersion ): Unit = { if (apiVersion >= 3) { - assertEquals(6, apiVersionsResponse.data().finalizedFeatures().size()) + assertEquals(4, apiVersionsResponse.data().finalizedFeatures().size()) assertEquals(MetadataVersion.latestTesting().featureLevel(), apiVersionsResponse.data().finalizedFeatures().find(MetadataVersion.FEATURE_NAME).minVersionLevel()) assertEquals(MetadataVersion.latestTesting().featureLevel(), apiVersionsResponse.data().finalizedFeatures().find(MetadataVersion.FEATURE_NAME).maxVersionLevel()) - assertEquals(7, apiVersionsResponse.data().supportedFeatures().size()) + assertEquals(5, apiVersionsResponse.data().supportedFeatures().size()) assertEquals(MetadataVersion.MINIMUM_VERSION.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(MetadataVersion.FEATURE_NAME).minVersion()) if (apiVersion < 4) { assertEquals(1, apiVersionsResponse.data().supportedFeatures().find("kraft.version").minVersion()) @@ -76,14 +85,8 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { assertEquals(0, apiVersionsResponse.data().supportedFeatures().find(EligibleLeaderReplicasVersion.FEATURE_NAME).minVersion()) assertEquals(EligibleLeaderReplicasVersion.ELRV_1.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(EligibleLeaderReplicasVersion.FEATURE_NAME).maxVersion()) - - assertEquals(0, apiVersionsResponse.data().supportedFeatures().find(ShareVersion.FEATURE_NAME).minVersion()) - assertEquals(ShareVersion.SV_1.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(ShareVersion.FEATURE_NAME).maxVersion()) - - assertEquals(0, apiVersionsResponse.data().supportedFeatures().find(StreamsVersion.FEATURE_NAME).minVersion()) - assertEquals(StreamsVersion.SV_1.featureLevel(), apiVersionsResponse.data().supportedFeatures().find(StreamsVersion.FEATURE_NAME).maxVersion()) } - val expectedApis = if (cluster.controllerListenerName() == listenerName) { + val expectedApis = if (cluster.controllerListenerName().toScala.contains(listenerName)) { ApiVersionsResponse.collectApis( ApiMessageType.ListenerType.CONTROLLER, ApiKeys.apisForListener(ApiMessageType.ListenerType.CONTROLLER), @@ -101,7 +104,7 @@ abstract class AbstractApiVersionsRequestTest(cluster: ClusterInstance) { assertEquals(expectedApis.size, apiVersionsResponse.data.apiKeys.size, "API keys in ApiVersionsResponse must match API keys supported by broker.") - val defaultApiVersionsResponse = if (cluster.controllerListenerName() == listenerName) { + val defaultApiVersionsResponse = if (cluster.controllerListenerName().toScala.contains(listenerName)) { TestUtils.defaultApiVersionsResponse(0, ListenerType.CONTROLLER, enableUnstableLastVersion) } else { TestUtils.createApiVersionsResponse(0, expectedApis) diff --git a/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala b/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala index d1d6e4a7810ad..7528eefc420ea 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala @@ -17,8 +17,8 @@ package kafka.server import com.yammer.metrics.core.Gauge +import kafka.server.AbstractFetcherThread.{ReplicaFetch, ResultWithPartitions} import kafka.utils.TestUtils -import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} import org.apache.kafka.common.message.FetchResponseData.PartitionData import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.requests.FetchRequest @@ -27,21 +27,14 @@ import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.server.common.OffsetAndEpoch import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.ReplicaFetch -import org.apache.kafka.server.ReplicaState -import org.apache.kafka.server.ResultWithPartitions -import org.apache.kafka.server.PartitionFetchState -import org.apache.kafka.server.LeaderEndPoint import org.apache.kafka.storage.internals.log.LogAppendInfo import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, Test} import org.mockito.Mockito.{mock, verify, when} -import java.util.Optional import scala.collection.{Map, Set, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters._ class AbstractFetcherManagerTest { @@ -67,9 +60,9 @@ class AbstractFetcherManagerTest { val fetchOffset = 10L val leaderEpoch = 15 val tp = new TopicPartition("topic", 0) - val topicId = Uuid.randomUuid() + val topicId = Some(Uuid.randomUuid()) val initialFetchState = InitialFetchState( - topicId = Some(topicId), + topicId = topicId, leader = new BrokerEndPoint(0, "localhost", 9092), currentLeaderEpoch = leaderEpoch, initOffset = fetchOffset) @@ -79,7 +72,7 @@ class AbstractFetcherManagerTest { when(fetcher.addPartitions(Map(tp -> initialFetchState))) .thenReturn(Set(tp)) when(fetcher.fetchState(tp)) - .thenReturn(Some(new PartitionFetchState(Optional.of(topicId), fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) + .thenReturn(Some(PartitionFetchState(topicId, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = None))) .thenReturn(None) when(fetcher.removePartitions(Set(tp))).thenReturn(Map.empty[TopicPartition, PartitionFetchState]) @@ -129,9 +122,9 @@ class AbstractFetcherManagerTest { val fetchOffset = 10L val leaderEpoch = 15 val tp = new TopicPartition("topic", 0) - val topicId = Uuid.randomUuid() + val topicId = Some(Uuid.randomUuid()) val initialFetchState = InitialFetchState( - topicId = Some(topicId), + topicId = topicId, leader = new BrokerEndPoint(0, "localhost", 9092), currentLeaderEpoch = leaderEpoch, initOffset = fetchOffset) @@ -165,8 +158,8 @@ class AbstractFetcherManagerTest { val tp1 = new TopicPartition("topic1", 0) val tp2 = new TopicPartition("topic2", 0) val unknownTp = new TopicPartition("topic2", 1) - val topicId1 = Uuid.randomUuid() - val topicId2 = Uuid.randomUuid() + val topicId1 = Some(Uuid.randomUuid()) + val topicId2 = Some(Uuid.randomUuid()) // Start out with no topic ID. val initialFetchState1 = InitialFetchState( @@ -191,13 +184,13 @@ class AbstractFetcherManagerTest { .thenReturn(Set(tp2)) when(fetcher.fetchState(tp1)) - .thenReturn(Some(new PartitionFetchState(Optional.empty, fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) - .thenReturn(Some(new PartitionFetchState(Optional.of(topicId1), fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) + .thenReturn(Some(PartitionFetchState(None, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = None))) + .thenReturn(Some(PartitionFetchState(topicId1, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = None))) when(fetcher.fetchState(tp2)) - .thenReturn(Some(new PartitionFetchState(Optional.empty, fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) - .thenReturn(Some(new PartitionFetchState(Optional.of(topicId2), fetchOffset, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.TRUNCATING, Optional.empty))) + .thenReturn(Some(PartitionFetchState(None, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = None))) + .thenReturn(Some(PartitionFetchState(topicId2, fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = None))) - val topicIds = Map(tp1.topic -> Some(topicId1), tp2.topic -> Some(topicId2)) + val topicIds = Map(tp1.topic -> topicId1, tp2.topic -> topicId2) // When targeting a fetcher that doesn't exist, we will not see fetcher.maybeUpdateTopicIds called. // We will see it for a topic partition that does not exist. @@ -206,7 +199,7 @@ class AbstractFetcherManagerTest { def verifyFetchState(fetchState: Option[PartitionFetchState], expectedTopicId: Option[Uuid]): Unit = { assertTrue(fetchState.isDefined) - assertEquals(expectedTopicId, fetchState.get.topicId.toScala) + assertEquals(expectedTopicId, fetchState.get.topicId) } fetcherManager.addFetcherForPartitions(Map(tp1 -> initialFetchState1, tp2 -> initialFetchState2)) @@ -215,8 +208,8 @@ class AbstractFetcherManagerTest { val partitionsToUpdate = Map(tp1 -> initialFetchState1.leader.id, tp2 -> initialFetchState2.leader.id) fetcherManager.maybeUpdateTopicIds(partitionsToUpdate, topicIds) - verifyFetchState(fetcher.fetchState(tp1), Some(topicId1)) - verifyFetchState(fetcher.fetchState(tp2), Some(topicId2)) + verifyFetchState(fetcher.fetchState(tp1), topicId1) + verifyFetchState(fetcher.fetchState(tp2), topicId2) // Try an invalid fetcher and an invalid topic partition val invalidPartitionsToUpdate = Map(tp1 -> 2, unknownTp -> initialFetchState1.leader.id) @@ -304,15 +297,15 @@ class AbstractFetcherManagerTest { override def brokerEndPoint(): BrokerEndPoint = sourceBroker - override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = java.util.Map.of() + override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = Map.empty override def fetchEarliestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = new OffsetAndEpoch(1L, 0) override def fetchLatestOffset(topicPartition: TopicPartition, currentLeaderEpoch: Int): OffsetAndEpoch = new OffsetAndEpoch(1L, 0) - override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = java.util.Map.of() + override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = Map.empty - override def buildFetch(partitions: java.util.Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[java.util.Optional[ReplicaFetch]] = new ResultWithPartitions(java.util.Optional.empty[ReplicaFetch](), java.util.Set.of()) + override def buildFetch(partitionMap: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = ResultWithPartitions(None, Set.empty) override val isTruncationOnFetchSupported: Boolean = false @@ -335,24 +328,21 @@ class AbstractFetcherManagerTest { fetchBackOffMs = 0, brokerTopicStats = new BrokerTopicStats) { - override protected def processPartitionData( - topicPartition: TopicPartition, - fetchOffset: Long, - partitionLeaderEpoch: Int, - partitionData: FetchData - ): Option[LogAppendInfo] = None + override protected def processPartitionData(topicPartition: TopicPartition, fetchOffset: Long, partitionData: FetchData): Option[LogAppendInfo] = { + None + } override protected def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = {} override protected def truncateFullyAndStartAt(topicPartition: TopicPartition, offset: Long): Unit = {} - override protected def latestEpoch(topicPartition: TopicPartition): Optional[Integer] = Optional.of(0) + override protected def latestEpoch(topicPartition: TopicPartition): Option[Int] = Some(0) override protected def logStartOffset(topicPartition: TopicPartition): Long = 1 override protected def logEndOffset(topicPartition: TopicPartition): Long = 1 - override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Optional[OffsetAndEpoch] = Optional.of(new OffsetAndEpoch(1, 0)) + override protected def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = Some(new OffsetAndEpoch(1, 0)) } } diff --git a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala index 046ef52a7de90..5f01458ffa7f7 100644 --- a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadTest.scala @@ -28,22 +28,18 @@ import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.common.{KafkaException, TopicPartition, Uuid} import org.apache.kafka.storage.internals.log.LogAppendInfo import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Assumptions.assumeTrue import org.junit.jupiter.api.{BeforeEach, Test} import kafka.server.FetcherThreadTestUtils.{initialFetchState, mkBatch} -import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} -import org.apache.kafka.server.{PartitionFetchState, ReplicaState} -import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.ValueSource -import java.util.Optional import java.util.concurrent.atomic.AtomicInteger import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Set} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters._ class AbstractFetcherThreadTest { + val truncateOnFetch = true val topicIds = Map("topic1" -> Uuid.randomUuid(), "topic2" -> Uuid.randomUuid()) val version = ApiKeys.FETCH.latestVersion() private val partition1 = new TopicPartition("topic1", 0) @@ -60,7 +56,7 @@ class AbstractFetcherThreadTest { @Test def testMetricsRemovedOnShutdown(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) @@ -90,7 +86,7 @@ class AbstractFetcherThreadTest { @Test def testConsumerLagRemovedWithPartition(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) @@ -115,7 +111,7 @@ class AbstractFetcherThreadTest { @Test def testSimpleFetch(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) @@ -140,8 +136,8 @@ class AbstractFetcherThreadTest { val partition = new TopicPartition("topic", 0) val fetchBackOffMs = 250 - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) { - override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { + override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { throw new UnknownTopicIdException("Topic ID was unknown as expected for this test") } } @@ -182,11 +178,11 @@ class AbstractFetcherThreadTest { val partition3 = new TopicPartition("topic3", 0) val fetchBackOffMs = 250 - val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { - override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { - Map(partition1 -> new FetchResponseData.PartitionData().setErrorCode(Errors.UNKNOWN_TOPIC_ID.code), - partition2 -> new FetchResponseData.PartitionData().setErrorCode(Errors.INCONSISTENT_TOPIC_ID.code), - partition3 -> new FetchResponseData.PartitionData().setErrorCode(Errors.NONE.code)).asJava + val mockLeaderEndPoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { + override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { + Map(partition1 -> new FetchData().setErrorCode(Errors.UNKNOWN_TOPIC_ID.code), + partition2 -> new FetchData().setErrorCode(Errors.INCONSISTENT_TOPIC_ID.code), + partition3 -> new FetchData().setErrorCode(Errors.NONE.code)) } } val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) @@ -225,7 +221,7 @@ class AbstractFetcherThreadTest { @Test def testFencedTruncation(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine, failedPartitions = failedPartitions) @@ -253,7 +249,7 @@ class AbstractFetcherThreadTest { @Test def testFencedFetch(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine, failedPartitions = failedPartitions) @@ -286,7 +282,7 @@ class AbstractFetcherThreadTest { @Test def testUnknownLeaderEpochInTruncation(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine, failedPartitions = failedPartitions) @@ -304,7 +300,7 @@ class AbstractFetcherThreadTest { // Not data has been fetched and the follower is still truncating assertEquals(0, replicaState.logEndOffset) - assertEquals(Some(ReplicaState.TRUNCATING), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(Truncating), fetcher.fetchState(partition).map(_.state)) // Bump the epoch on the leader fetcher.mockLeader.leaderPartitionState(partition).leaderEpoch += 1 @@ -313,17 +309,17 @@ class AbstractFetcherThreadTest { fetcher.doWork() assertEquals(1, replicaState.logEndOffset) - assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state)) } @Test def testUnknownLeaderEpochWhileFetching(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - // This test is contrived because it shouldn't be possible to see unknown leader epoch + // This test is contrived because it shouldn't be possible to to see unknown leader epoch // in the Fetching state as the leader must validate the follower's epoch when it checks // the truncation offset. @@ -342,7 +338,7 @@ class AbstractFetcherThreadTest { // We have fetched one batch and gotten out of the truncation phase assertEquals(1, replicaState.logEndOffset) - assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state)) // Somehow the leader epoch rewinds fetcher.mockLeader.leaderPartitionState(partition).leaderEpoch = 0 @@ -350,19 +346,19 @@ class AbstractFetcherThreadTest { // We are stuck at the current offset fetcher.doWork() assertEquals(1, replicaState.logEndOffset) - assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state)) // After returning to the right epoch, we can continue fetching fetcher.mockLeader.leaderPartitionState(partition).leaderEpoch = 1 fetcher.doWork() assertEquals(2, replicaState.logEndOffset) - assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(Some(Fetching), fetcher.fetchState(partition).map(_.state)) } @Test def testTruncation(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) @@ -398,8 +394,8 @@ class AbstractFetcherThreadTest { def testTruncateToHighWatermarkIfLeaderEpochInfoNotAvailable(): Unit = { val highWatermark = 2L val partition = new TopicPartition("topic", 0) - val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { - override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = + val mockLeaderEndPoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { + override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = throw new UnsupportedOperationException } val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) @@ -410,7 +406,7 @@ class AbstractFetcherThreadTest { super.truncate(topicPartition, truncationState) } - override def latestEpoch(topicPartition: TopicPartition): Optional[Integer] = Optional.empty + override def latestEpoch(topicPartition: TopicPartition): Option[Int] = None } val replicaLog = Seq( @@ -435,7 +431,7 @@ class AbstractFetcherThreadTest { val highWatermark = 2L val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) { override def truncateToHighWatermark(partitions: Set[TopicPartition]): Unit = { @@ -443,7 +439,7 @@ class AbstractFetcherThreadTest { super.truncateToHighWatermark(partitions) } - override def latestEpoch(topicPartition: TopicPartition): Optional[Integer] = Optional.empty + override def latestEpoch(topicPartition: TopicPartition): Option[Int] = None } val replicaLog = Seq( @@ -467,7 +463,7 @@ class AbstractFetcherThreadTest { val partition = new TopicPartition("topic", 0) var truncations = 0 - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) { override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = { @@ -509,9 +505,10 @@ class AbstractFetcherThreadTest { @Test def testTruncationOnFetchSkippedIfPartitionRemoved(): Unit = { + assumeTrue(truncateOnFetch) val partition = new TopicPartition("topic", 0) var truncations = 0 - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) { override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = { @@ -553,7 +550,7 @@ class AbstractFetcherThreadTest { @Test def testFollowerFetchOutOfRangeHigh(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) @@ -578,7 +575,7 @@ class AbstractFetcherThreadTest { // initial truncation and verify that the log end offset is updated fetcher.doWork() assertEquals(3L, replicaState.logEndOffset) - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state)) // To hit this case, we have to change the leader log without going through the truncation phase leaderState.log.clear() @@ -598,7 +595,7 @@ class AbstractFetcherThreadTest { val partition = new TopicPartition("topic", 0) var fetchedEarliestOffset = false - val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { + val mockLeaderEndPoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { override def fetchEarliestOffset(topicPartition: TopicPartition, leaderEpoch: Int): OffsetAndEpoch = { fetchedEarliestOffset = true throw new FencedLeaderEpochException(s"Epoch $leaderEpoch is fenced") @@ -634,9 +631,8 @@ class AbstractFetcherThreadTest { @Test def testFollowerFetchOutOfRangeLow(): Unit = { - val leaderEpoch = 4 val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine, failedPartitions = failedPartitions) @@ -644,28 +640,25 @@ class AbstractFetcherThreadTest { val replicaLog = Seq( mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes))) - val replicaState = PartitionState(replicaLog, leaderEpoch = leaderEpoch, highWatermark = 0L) + val replicaState = PartitionState(replicaLog, leaderEpoch = 0, highWatermark = 0L) fetcher.setReplicaState(partition, replicaState) - fetcher.addPartitions( - Map( - partition -> initialFetchState(topicIds.get(partition.topic), 3L, leaderEpoch = leaderEpoch) - ) - ) + fetcher.addPartitions(Map(partition -> initialFetchState(topicIds.get(partition.topic), 3L, leaderEpoch = 0))) val leaderLog = Seq( - mkBatch(baseOffset = 2, leaderEpoch = leaderEpoch, new SimpleRecord("c".getBytes)) - ) + mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes))) - val leaderState = PartitionState(leaderLog, leaderEpoch = leaderEpoch, highWatermark = 2L) + val leaderState = PartitionState(leaderLog, leaderEpoch = 0, highWatermark = 2L) fetcher.mockLeader.setLeaderState(partition, leaderState) fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) // initial truncation and verify that the log start offset is updated fetcher.doWork() - // Second iteration required here since first iteration is required to - // perform initial truncation based on diverging epoch. - fetcher.doWork() - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + if (truncateOnFetch) { + // Second iteration required here since first iteration is required to + // perform initial truncation based on diverging epoch. + fetcher.doWork() + } + assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state)) assertEquals(2, replicaState.logStartOffset) assertEquals(List(), replicaState.log.toList) @@ -681,9 +674,8 @@ class AbstractFetcherThreadTest { @Test def testRetryAfterUnknownLeaderEpochInLatestOffsetFetch(): Unit = { - val leaderEpoch = 4 val partition = new TopicPartition("topic", 0) - val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { + val mockLeaderEndPoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { val tries = new AtomicInteger(0) override def fetchLatestOffset(topicPartition: TopicPartition, leaderEpoch: Int): OffsetAndEpoch = { if (tries.getAndIncrement() == 0) @@ -696,24 +688,22 @@ class AbstractFetcherThreadTest { // The follower begins from an offset which is behind the leader's log start offset val replicaLog = Seq( - mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes)) - ) + mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("a".getBytes))) - val replicaState = PartitionState(replicaLog, leaderEpoch = leaderEpoch, highWatermark = 0L) + val replicaState = PartitionState(replicaLog, leaderEpoch = 0, highWatermark = 0L) fetcher.setReplicaState(partition, replicaState) - fetcher.addPartitions(Map(partition -> initialFetchState(topicIds.get(partition.topic), 3L, leaderEpoch = leaderEpoch))) + fetcher.addPartitions(Map(partition -> initialFetchState(topicIds.get(partition.topic), 3L, leaderEpoch = 0))) val leaderLog = Seq( - mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes)) - ) + mkBatch(baseOffset = 2, leaderEpoch = 4, new SimpleRecord("c".getBytes))) - val leaderState = PartitionState(leaderLog, leaderEpoch = leaderEpoch, highWatermark = 2L) + val leaderState = PartitionState(leaderLog, leaderEpoch = 0, highWatermark = 2L) fetcher.mockLeader.setLeaderState(partition, leaderState) fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) // initial truncation and initial error response handling fetcher.doWork() - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) + assertEquals(Option(Fetching), fetcher.fetchState(partition).map(_.state)) TestUtils.waitUntilTrue(() => { fetcher.doWork() @@ -725,54 +715,14 @@ class AbstractFetcherThreadTest { assertEquals(leaderState.highWatermark, replicaState.highWatermark) } - @Test - def testReplicateBatchesUpToLeaderEpoch(): Unit = { - val leaderEpoch = 4 - val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine, failedPartitions = failedPartitions) - - val replicaState = PartitionState(Seq(), leaderEpoch = leaderEpoch, highWatermark = 0L) - fetcher.setReplicaState(partition, replicaState) - fetcher.addPartitions( - Map( - partition -> initialFetchState(topicIds.get(partition.topic), 3L, leaderEpoch = leaderEpoch) - ) - ) - - val leaderLog = Seq( - mkBatch(baseOffset = 0, leaderEpoch = leaderEpoch - 1, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 1, leaderEpoch = leaderEpoch, new SimpleRecord("d".getBytes)), - mkBatch(baseOffset = 2, leaderEpoch = leaderEpoch + 1, new SimpleRecord("e".getBytes)) - ) - - val leaderState = PartitionState(leaderLog, leaderEpoch = leaderEpoch, highWatermark = 0L) - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.logStartOffset) - assertEquals(List(), replicaState.log.toList) - - TestUtils.waitUntilTrue(() => { - fetcher.doWork() - fetcher.replicaPartitionState(partition).log == fetcher.mockLeader.leaderPartitionState(partition).log.dropRight(1) - }, "Failed to reconcile leader and follower logs up to the leader epoch") - - assertEquals(leaderState.logStartOffset, replicaState.logStartOffset) - assertEquals(leaderState.logEndOffset - 1, replicaState.logEndOffset) - assertEquals(leaderState.highWatermark, replicaState.highWatermark) - } - @Test def testCorruptMessage(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { + val mockLeaderEndPoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { var fetchedOnce = false - override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { - val fetchedData = super.fetch(fetchRequest).asScala + override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { + val fetchedData = super.fetch(fetchRequest) if (!fetchedOnce) { val records = fetchedData.head._2.records.asInstanceOf[MemoryRecords] val buffer = records.buffer() @@ -781,7 +731,7 @@ class AbstractFetcherThreadTest { fetchedOnce = true } fetchedData - }.asJava + } } val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) val fetcher = new MockFetcherThread(mockLeaderEndPoint, mockTierStateMachine) @@ -802,29 +752,33 @@ class AbstractFetcherThreadTest { assertEquals(2L, replicaState.logEndOffset) } - @ParameterizedTest - @ValueSource(ints = Array(0, 1)) - def testParameterizedLeaderEpochChangeDuringFetchEpochsFromLeader(leaderEpochOnLeader: Int): Unit = { - // When leaderEpochOnLeader = 1: + @Test + def testLeaderEpochChangeDuringFencedFetchEpochsFromLeader(): Unit = { // The leader is on the new epoch when the OffsetsForLeaderEpoch with old epoch is sent, so it // returns the fence error. Validate that response is ignored if the leader epoch changes on // the follower while OffsetsForLeaderEpoch request is in flight, but able to truncate and fetch // in the next of round of "doWork" + testLeaderEpochChangeDuringFetchEpochsFromLeader(leaderEpochOnLeader = 1) + } - // When leaderEpochOnLeader = 0: + @Test + def testLeaderEpochChangeDuringSuccessfulFetchEpochsFromLeader(): Unit = { // The leader is on the old epoch when the OffsetsForLeaderEpoch with old epoch is sent // and returns the valid response. Validate that response is ignored if the leader epoch changes // on the follower while OffsetsForLeaderEpoch request is in flight, but able to truncate and // fetch once the leader is on the newer epoch (same as follower) + testLeaderEpochChangeDuringFetchEpochsFromLeader(leaderEpochOnLeader = 0) + } + private def testLeaderEpochChangeDuringFetchEpochsFromLeader(leaderEpochOnLeader: Int): Unit = { val partition = new TopicPartition("topic", 1) val initialLeaderEpochOnFollower = 0 val nextLeaderEpochOnFollower = initialLeaderEpochOnFollower + 1 - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) { + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { var fetchEpochsFromLeaderOnce = false - override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = { + override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { val fetchedEpochs = super.fetchEpochEndOffsets(partitions) if (!fetchEpochsFromLeaderOnce) { responseCallback.apply() @@ -857,7 +811,7 @@ class AbstractFetcherThreadTest { // Since leader epoch changed, fetch epochs response is ignored due to partition being in // truncating state with the updated leader epoch - assertEquals(Option(ReplicaState.TRUNCATING), fetcher.fetchState(partition).map(_.state)) + assertEquals(Option(Truncating), fetcher.fetchState(partition).map(_.state)) assertEquals(Option(nextLeaderEpochOnFollower), fetcher.fetchState(partition).map(_.currentLeaderEpoch)) if (leaderEpochOnLeader < nextLeaderEpochOnFollower) { @@ -877,8 +831,8 @@ class AbstractFetcherThreadTest { val initialLeaderEpochOnFollower = 0 val nextLeaderEpochOnFollower = initialLeaderEpochOnFollower + 1 - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) { - override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset]= { + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { + override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { val fetchedEpochs = super.fetchEpochEndOffsets(partitions) responseCallback.apply() fetchedEpochs @@ -923,15 +877,15 @@ class AbstractFetcherThreadTest { @Test def testTruncationThrowsExceptionIfLeaderReturnsPartitionsNotRequestedInFetchEpochs(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndPoint = new MockLeaderEndPoint(version = version) { - override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = { + val mockLeaderEndPoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) { + override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { val unrequestedTp = new TopicPartition("topic2", 0) - super.fetchEpochEndOffsets(partitions).asScala + (unrequestedTp -> new EpochEndOffset() + super.fetchEpochEndOffsets(partitions).toMap + (unrequestedTp -> new EpochEndOffset() .setPartition(unrequestedTp.partition) .setErrorCode(Errors.NONE.code) .setLeaderEpoch(0) .setEndOffset(0)) - }.asJava + } } val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndPoint) val fetcher = new MockFetcherThread(mockLeaderEndPoint, mockTierStateMachine) @@ -947,19 +901,14 @@ class AbstractFetcherThreadTest { @Test def testFetcherThreadHandlingPartitionFailureDuringAppending(): Unit = { - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcherForAppend = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine, failedPartitions = failedPartitions) { - override def processPartitionData( - topicPartition: TopicPartition, - fetchOffset: Long, - partitionLeaderEpoch: Int, - partitionData: FetchData - ): Option[LogAppendInfo] = { + override def processPartitionData(topicPartition: TopicPartition, fetchOffset: Long, partitionData: FetchData): Option[LogAppendInfo] = { if (topicPartition == partition1) { throw new KafkaException() } else { - super.processPartitionData(topicPartition, fetchOffset, partitionLeaderEpoch, partitionData) + super.processPartitionData(topicPartition, fetchOffset, partitionData) } } } @@ -968,7 +917,7 @@ class AbstractFetcherThreadTest { @Test def testFetcherThreadHandlingPartitionFailureDuringTruncation(): Unit = { - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcherForTruncation = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine, failedPartitions = failedPartitions) { override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = { @@ -1002,7 +951,7 @@ class AbstractFetcherThreadTest { // make sure the fetcher continues to work with rest of the partitions fetcher.doWork() - assertEquals(Some(ReplicaState.FETCHING), fetcher.fetchState(partition2).map(_.state)) + assertEquals(Some(Fetching), fetcher.fetchState(partition2).map(_.state)) assertFalse(failedPartitions.contains(partition2)) // simulate a leader change @@ -1011,7 +960,7 @@ class AbstractFetcherThreadTest { fetcher.addPartitions(Map(partition1 -> initialFetchState(topicIds.get(partition1.topic), 0L, leaderEpoch = 1)), forceTruncation = true) // partition1 added back - assertEquals(Some(ReplicaState.TRUNCATING), fetcher.fetchState(partition1).map(_.state)) + assertEquals(Some(Truncating), fetcher.fetchState(partition1).map(_.state)) assertFalse(failedPartitions.contains(partition1)) } @@ -1019,7 +968,7 @@ class AbstractFetcherThreadTest { @Test def testDivergingEpochs(): Unit = { val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) @@ -1055,20 +1004,18 @@ class AbstractFetcherThreadTest { @Test def testTruncateOnFetchDoesNotProcessPartitionData(): Unit = { + assumeTrue(truncateOnFetch) + val partition = new TopicPartition("topic", 0) + var truncateCalls = 0 var processPartitionDataCalls = 0 - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) { - override def processPartitionData( - topicPartition: TopicPartition, - fetchOffset: Long, - partitionLeaderEpoch: Int, - partitionData: FetchData - ): Option[LogAppendInfo] = { + override def processPartitionData(topicPartition: TopicPartition, fetchOffset: Long, partitionData: FetchData): Option[LogAppendInfo] = { processPartitionDataCalls += 1 - super.processPartitionData(topicPartition, fetchOffset, partitionLeaderEpoch, partitionData) + super.processPartitionData(topicPartition, fetchOffset, partitionData) } override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = { @@ -1131,7 +1078,7 @@ class AbstractFetcherThreadTest { @Test def testMaybeUpdateTopicIds(): Unit = { val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) + val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) @@ -1141,7 +1088,7 @@ class AbstractFetcherThreadTest { def verifyFetchState(fetchState: Option[PartitionFetchState], expectedTopicId: Option[Uuid]): Unit = { assertTrue(fetchState.isDefined) - assertEquals(expectedTopicId, fetchState.get.topicId.toScala) + assertEquals(expectedTopicId, fetchState.get.topicId) } verifyFetchState(fetcher.fetchState(partition), None) @@ -1156,588 +1103,4 @@ class AbstractFetcherThreadTest { assertTrue(fetcher.fetchState(unknownPartition).isEmpty) } - @Test - def testIgnoreFetchResponseWhenLeaderEpochChanged(): Unit = { - val newEpoch = 1 - val initEpoch = 0 - - val partition = new TopicPartition("topic", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - val replicaState = PartitionState(leaderEpoch = newEpoch) - fetcher.setReplicaState(partition, replicaState) - val initFetchState = initialFetchState(topicIds.get(partition.topic), 0L, leaderEpoch = newEpoch) - fetcher.addPartitions(Map(partition -> initFetchState)) - - val batch = mkBatch(baseOffset = 0L, leaderEpoch = initEpoch, new SimpleRecord("a".getBytes)) - val leaderState = PartitionState(Seq(batch), leaderEpoch = initEpoch, highWatermark = 1L) - fetcher.mockLeader.setLeaderState(partition, leaderState) - - val partitionData = Map(partition -> new FetchRequest.PartitionData(Uuid.randomUuid(), 0, 0, 1048576, Optional.of(initEpoch), Optional.of(initEpoch))).asJava - val fetchRequestOpt = FetchRequest.Builder.forReplica(0, 0, initEpoch, 0, Int.MaxValue, partitionData) - - fetcher.processFetchRequest(partitionData, fetchRequestOpt) - assertEquals(0, replicaState.logEndOffset, "FetchResponse should be ignored when leader epoch does not match") - } - - private def emptyReplicaState(rlmEnabled: Boolean, partition: TopicPartition, fetcher: MockFetcherThread): PartitionState = { - // Follower begins with an empty log - val replicaState = PartitionState(Seq(), leaderEpoch = 0, highWatermark = 0L, rlmEnabled = rlmEnabled) - fetcher.setReplicaState(partition, replicaState) - fetcher.addPartitions(Map(partition -> initialFetchState(topicIds.get(partition.topic), fetchOffset = 0, leaderEpoch = 0))) - replicaState - } - - /** - * Test: Empty Follower Fetch with TieredStorage Disabled and Leader LogStartOffset = 0 - * - * Purpose: - * - Simulate a leader with logs starting at offset 0 and validate how the follower - * behaves when TieredStorage is disabled. - * - * Conditions: - * - TieredStorage: **Disabled** - * - Leader LogStartOffset: **0** - * - * Scenario: - * - The leader starts with a log at offset 0, containing three record batches offset at 0, 150, and 199. - * - The follower begins fetching, and we validate the correctness of its replica state as it fetches. - * - * Expected Outcomes: - * 1. The follower fetch state should transition to `FETCHING` initially. - * 2. After the first poll, one record batch is fetched. - * 3. After subsequent polls, the entire leader log is fetched: - * - Replica log size: 3 - * - Replica LogStartOffset: 0 - * - Replica LogEndOffset: 200 - * - Replica HighWatermark: 199 - */ - @Test - def testEmptyFollowerFetchTieredStorageDisabledLeaderLogStartOffsetZero(): Unit = { - val rlmEnabled = false - val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - - val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) - - val leaderLog = Seq( - // LogStartOffset = LocalLogStartOffset = 0 - mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), - mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) - ) - - val leaderState = PartitionState( - leaderLog, - leaderEpoch = 0, - highWatermark = 199L, - rlmEnabled = rlmEnabled - ) - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(1, replicaState.log.size) - assertEquals(0, replicaState.logStartOffset) - assertEquals(1, replicaState.logEndOffset) - assertEquals(Some(1), fetcher.fetchState(partition).map(_.fetchOffset())) - - // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. - for (_ <- 1 to 2) fetcher.doWork() - assertEquals(3, replicaState.log.size) - assertEquals(0, replicaState.logStartOffset) - assertEquals(200, replicaState.logEndOffset) - assertEquals(199, replicaState.highWatermark) - } - - /** - * Test: Empty Follower Fetch with TieredStorage Disabled and Leader LogStartOffset != 0 - * - * Purpose: - * - Validate follower behavior when the leader's log starts at a non-zero offset (10). - * - * Conditions: - * - TieredStorage: **Disabled** - * - Leader LogStartOffset: **10** - * - * Scenario: - * - The leader log starts at offset 10 with batches at 10, 150, and 199. - * - The follower starts fetching from offset 10. - * - * Expected Outcomes: - * 1. The follower's initial log is empty. - * 2. Replica offsets after polls: - * - LogStartOffset = 10 - * - LogEndOffset = 200 - * - HighWatermark = 199 - */ - @Test - def testEmptyFollowerFetchTieredStorageDisabledLeaderLogStartOffsetNonZero(): Unit = { - val rlmEnabled = false - val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - - val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) - - val leaderLog = Seq( - // LogStartOffset = LocalLogStartOffset = 10 - mkBatch(baseOffset = 10, leaderEpoch = 0, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), - mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) - ) - - val leaderState = PartitionState( - leaderLog, - leaderEpoch = 0, - highWatermark = 199L, - rlmEnabled = rlmEnabled - ) - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - // Follower gets out-of-range error (no messages received), fetch offset is updated from 0 to 10 - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.log.size) - assertEquals(10, replicaState.logStartOffset) - assertEquals(10, replicaState.logEndOffset) - assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset())) - - // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. - for (_ <- 1 to 3) fetcher.doWork() - assertEquals(3, replicaState.log.size) - assertEquals(10, replicaState.logStartOffset) - assertEquals(200, replicaState.logEndOffset) - assertEquals(199, replicaState.highWatermark) - } - - /** - * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset = 0, and No Local Deletions - * - * Purpose: - * - Simulate TieredStorage enabled and validate follower fetching behavior when the leader - * log starts at 0 and no segments have been uploaded or deleted locally. - * - * Conditions: - * - TieredStorage: **Enabled** - * - Leader LogStartOffset: **0** - * - Leader LocalLogStartOffset: **0** (No local segments deleted). - * - * Scenario: - * - The leader log contains three record batches at offsets 0, 150, and 199. - * - The follower starts fetching from offset 0. - * - * Expected Outcomes: - * 1. The replica log accurately reflects the leader's log: - * - LogStartOffset = 0 - * - LocalLogStartOffset = 0 - * - LogEndOffset = 200 - * - HighWatermark = 199 - */ - @Test - def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroNoLocalDeletions(): Unit = { - val rlmEnabled = true - val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - - val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) - - val leaderLog = Seq( - // LogStartOffset = LocalLogStartOffset = 0 - mkBatch(baseOffset = 0, leaderEpoch = 0, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), - mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) - ) - - val leaderState = PartitionState( - leaderLog, - leaderEpoch = 0, - highWatermark = 199L, - rlmEnabled = rlmEnabled - ) - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(1, replicaState.log.size) - assertEquals(0, replicaState.logStartOffset) - assertEquals(0, replicaState.localLogStartOffset) - assertEquals(1, replicaState.logEndOffset) - assertEquals(199, replicaState.highWatermark) - assertEquals(Some(1), fetcher.fetchState(partition).map(_.fetchOffset())) - - // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. - for (_ <- 1 to 2) fetcher.doWork() - assertEquals(3, replicaState.log.size) - assertEquals(0, replicaState.logStartOffset) - assertEquals(0, replicaState.localLogStartOffset) - assertEquals(200, replicaState.logEndOffset) - assertEquals(199, replicaState.highWatermark) - } - - /** - * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset = 0, and Local Deletions - * - * Purpose: - * - Simulate TieredStorage enabled with some segments uploaded and deleted locally, causing - * a difference between the leader's LogStartOffset (0) and LocalLogStartOffset (> 0). - * - * Conditions: - * - TieredStorage: **Enabled** - * - Leader LogStartOffset: **0** - * - Leader LocalLogStartOffset: **100** (Some segments deleted locally). - * - * Scenario: - * - The leader log starts at offset 0 but the local leader log starts at offset 100. - * - The follower fetch operation begins from offset 0. - * - * Expected Outcomes: - * 1. After offset adjustments for local deletions: - * - LogStartOffset = 0 - * - LocalLogStartOffset = 100 - * - LogEndOffset = 200 - * - HighWatermark = 199 - */ - @Test - def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroWithLocalDeletions(): Unit = { - val rlmEnabled = true - val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - - val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) - - val leaderLog = Seq( - // LocalLogStartOffset = 100 - mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), - mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) - ) - - val leaderState = PartitionState( - leaderLog, - leaderEpoch = 0, - highWatermark = 199L, - rlmEnabled = rlmEnabled - ) - leaderState.logStartOffset = 0 - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.log.size) - assertEquals(100, replicaState.localLogStartOffset) - assertEquals(100, replicaState.logEndOffset) - assertEquals(Some(100), fetcher.fetchState(partition).map(_.fetchOffset())) - - // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. - for (_ <- 1 to 3) fetcher.doWork() - assertEquals(3, replicaState.log.size) - assertEquals(0, replicaState.logStartOffset) - assertEquals(100, replicaState.localLogStartOffset) - assertEquals(200, replicaState.logEndOffset) - assertEquals(199, replicaState.highWatermark) - } - - /** - * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and No Local Deletions - * - * Purpose: - * - Simulate TieredStorage enabled and validate follower fetch behavior when the leader's log - * starts at a non-zero offset and no local deletions have occurred. - * - * Conditions: - * - TieredStorage: **Enabled** - * - Leader LogStartOffset: **10** - * - Leader LocalLogStartOffset: **10** (No deletions). - * - * Scenario: - * - The leader log starts at offset 10 with batches at 10, 150, and 199. - * - The follower starts fetching from offset 10. - * - * Expected Outcomes: - * 1. After fetching, the replica log matches the leader: - * - LogStartOffset = 10 - * - LocalLogStartOffset = 10 - * - LogEndOffset = 200 - * - HighWatermark = 199 - */ - @Test - def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroNoLocalDeletions(): Unit = { - val rlmEnabled = true - val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - - val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) - - val leaderLog = Seq( - // LogStartOffset = LocalLogStartOffset = 10 - mkBatch(baseOffset = 10, leaderEpoch = 0, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), - mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) - ) - - val leaderState = PartitionState( - leaderLog, - leaderEpoch = 0, - highWatermark = 199L, - rlmEnabled = rlmEnabled, - ) - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.log.size) - assertEquals(10, replicaState.localLogStartOffset) - assertEquals(10, replicaState.logEndOffset) - assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset())) - - // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. - for (_ <- 1 to 3) fetcher.doWork() - assertEquals(3, replicaState.log.size) - assertEquals(10, replicaState.logStartOffset) - assertEquals(10, replicaState.localLogStartOffset) - assertEquals(200, replicaState.logEndOffset) - assertEquals(199, replicaState.highWatermark) - } - - /** - * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and Local Deletions - * - * Purpose: - * - Validate follower adjustments when the leader has log deletions causing - * LocalLogStartOffset > LogStartOffset. - * - * Conditions: - * - TieredStorage: **Enabled** - * - Leader LogStartOffset: **10** - * - Leader LocalLogStartOffset: **100** (All older segments deleted locally). - * - * Scenario: - * - The leader log starts at offset 10 but the local log starts at offset 100. - * - The follower fetch starts at offset 10 but adjusts for local deletions. - * - * Expected Outcomes: - * 1. Initial fetch offset adjustments: - * - First adjustment: LogEndOffset = 10 (after offset-out-of-range error) - * - Second adjustment: LogEndOffset = 100 (after offset-moved-to-tiered-storage error) - * 2. After successful fetches: - * - 3 record batches fetched - * - LogStartOffset = 10 - * - LocalLogStartOffset = 100 - * - LogEndOffset = 200 - * - HighWatermark = 199 - */ - @Test - def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroWithLocalDeletions(): Unit = { - val rlmEnabled = true - val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - - val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) - - val leaderLog = Seq( - // LocalLogStartOffset = 100 - mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), - mkBatch(baseOffset = 199, leaderEpoch = 0, new SimpleRecord("e".getBytes)) - ) - - val leaderState = PartitionState( - leaderLog, - leaderEpoch = 0, - highWatermark = 199L, - rlmEnabled = rlmEnabled, - ) - leaderState.logStartOffset = 10 - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - // On offset-out-of-range error, fetch offset is updated - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.log.size) - assertEquals(10, replicaState.localLogStartOffset) - assertEquals(10, replicaState.logEndOffset) - assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset())) - - fetcher.doWork() - // On offset-moved-to-tiered-storage error, fetch offset is updated - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.log.size) - assertEquals(100, replicaState.localLogStartOffset) - assertEquals(100, replicaState.logEndOffset) - assertEquals(Some(100), fetcher.fetchState(partition).map(_.fetchOffset())) - - // Only 1 record batch is returned after a poll so calling 'n' number of times to get the desired result. - for (_ <- 1 to 3) fetcher.doWork() - assertEquals(3, replicaState.log.size) - assertEquals(10, replicaState.logStartOffset) - assertEquals(100, replicaState.localLogStartOffset) - assertEquals(200, replicaState.logEndOffset) - assertEquals(199, replicaState.highWatermark) - } - - /** - * Test: Empty Follower Fetch with TieredStorage Enabled, All Local Segments Deleted - * - * Purpose: - * - Handle scenarios where all local segments have been deleted: - * - LocalLogStartOffset > LogStartOffset. - * - LocalLogStartOffset = LogEndOffset. - * - * Conditions: - * - TieredStorage: **Enabled** - * - Leader LogStartOffset: **0 or > 0** - * - Leader LocalLogStartOffset: Leader LogEndOffset (all segments deleted locally). - * - * Expected Outcomes: - * 1. Follower state is adjusted to reflect local deletions: - * - LocalLogStartOffset = LogEndOffset. - * - No new data remains to fetch. - */ - @Test - def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetZeroAllLocalSegmentsDeleted(): Unit = { - val rlmEnabled = true - val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - - val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) - - val leaderLog = Seq( - // LocalLogStartOffset = 100 - mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), - ) - - val leaderState = PartitionState( - leaderLog, - leaderEpoch = 0, - highWatermark = 151L, - rlmEnabled = rlmEnabled - ) - leaderState.logStartOffset = 0 - // Set Local Log Start Offset to Log End Offset - leaderState.localLogStartOffset = 151 - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - - // On offset-moved-to-tiered-storage error, fetch offset is updated - fetcher.doWork() - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.log.size) - assertEquals(151, replicaState.localLogStartOffset) - assertEquals(151, replicaState.logEndOffset) - assertEquals(151, replicaState.highWatermark) - assertEquals(Some(151), fetcher.fetchState(partition).map(_.fetchOffset())) - - // Call once again to see if new data is received - fetcher.doWork() - // No metadata update expected - assertEquals(0, replicaState.log.size) - assertEquals(0, replicaState.logStartOffset) - assertEquals(151, replicaState.localLogStartOffset) - assertEquals(151, replicaState.logEndOffset) - assertEquals(151, replicaState.highWatermark) - } - - /** - * Test: Empty Follower Fetch with TieredStorage Enabled, Leader LogStartOffset != 0, and All Local Segments Deleted - * - * Purpose: - * - Validate follower behavior when TieredStorage is enabled, the leader's log starts at a non-zero offset, - * and all local log segments have been deleted. - * - * Conditions: - * - TieredStorage: **Enabled** - * - Leader LogStartOffset: **10** - * - Leader LocalLogStartOffset: **151** (all older segments deleted locally). - * - * Scenario: - * - The leader log contains record batches from offset 100, but all local segments up to offset 151 are deleted. - * - The follower starts at LogStartOffset = 10 and adjusts for local segment deletions. - * - * Expected Outcomes: - * 1. Follower detects offset adjustments due to local deletions: - * - LogStartOffset remains 10. - * - LocalLogStartOffset updates to 151. - * - LogEndOffset updates to 151. - * 2. HighWatermark aligns with the leader (151). - * 3. No new data is fetched since all relevant segments are deleted. - */ - @Test - def testEmptyFollowerFetchTieredStorageEnabledLeaderLogStartOffsetNonZeroAllLocalSegmentsDeleted(): Unit = { - val rlmEnabled = true - val partition = new TopicPartition("topic1", 0) - val mockLeaderEndpoint = new MockLeaderEndPoint(version = version) - val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) - val fetcher = new MockFetcherThread(mockLeaderEndpoint, mockTierStateMachine) - - val replicaState = emptyReplicaState(rlmEnabled, partition, fetcher) - - val leaderLog = Seq( - // LocalLogStartOffset = 100 - mkBatch(baseOffset = 100, leaderEpoch = 0, new SimpleRecord("c".getBytes)), - mkBatch(baseOffset = 150, leaderEpoch = 0, new SimpleRecord("d".getBytes)), - ) - - val leaderState = PartitionState( - leaderLog, - leaderEpoch = 0, - highWatermark = 151L, - rlmEnabled = rlmEnabled - ) - leaderState.logStartOffset = 10 - // Set Local Log Start Offset to Log End Offset - leaderState.localLogStartOffset = 151 - fetcher.mockLeader.setLeaderState(partition, leaderState) - fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) - - fetcher.doWork() - - // On offset-out-of-range error, fetch offset is updated - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.log.size) - assertEquals(10, replicaState.localLogStartOffset) - assertEquals(10, replicaState.logEndOffset) - assertEquals(Some(10), fetcher.fetchState(partition).map(_.fetchOffset())) - - // On offset-moved-to-tiered-storage error, fetch offset is updated - fetcher.doWork() - assertEquals(Option(ReplicaState.FETCHING), fetcher.fetchState(partition).map(_.state)) - assertEquals(0, replicaState.log.size) - assertEquals(151, replicaState.localLogStartOffset) - assertEquals(151, replicaState.logEndOffset) - assertEquals(151, replicaState.highWatermark) - assertEquals(Some(151), fetcher.fetchState(partition).map(_.fetchOffset())) - - // Call once again to see if new data is received - fetcher.doWork() - // No metadata update expected - assertEquals(0, replicaState.log.size) - assertEquals(10, replicaState.logStartOffset) - assertEquals(151, replicaState.localLogStartOffset) - assertEquals(151, replicaState.logEndOffset) - assertEquals(151, replicaState.highWatermark) - } -} \ No newline at end of file +} diff --git a/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadWithIbp26Test.scala b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadWithIbp26Test.scala new file mode 100644 index 0000000000000..f2e04a4449877 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/AbstractFetcherThreadWithIbp26Test.scala @@ -0,0 +1,28 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import org.apache.kafka.common.Uuid + +class AbstractFetcherThreadWithIbp26Test extends AbstractFetcherThreadTest { + + override val truncateOnFetch = false + override val version = 11 + override val topicIds = Map.empty[String, Uuid] + +} diff --git a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala new file mode 100644 index 0000000000000..a4ea6ceca32d0 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnManagerTest.scala @@ -0,0 +1,463 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import com.yammer.metrics.core.{Histogram, Meter} +import kafka.utils.TestUtils +import org.apache.kafka.clients.{ClientResponse, NetworkClient} +import org.apache.kafka.common.errors.{AuthenticationException, SaslAuthenticationException, UnsupportedVersionException} +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartitionsToTxnTopic, AddPartitionsToTxnTopicCollection, AddPartitionsToTxnTransaction, AddPartitionsToTxnTransactionCollection} +import org.apache.kafka.common.message.AddPartitionsToTxnResponseData +import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnResultCollection +import org.apache.kafka.common.{Node, TopicPartition} +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests.{AbstractResponse, AddPartitionsToTxnRequest, AddPartitionsToTxnResponse, MetadataResponse} +import org.apache.kafka.common.utils.MockTime +import org.apache.kafka.metadata.LeaderAndIsr +import org.apache.kafka.server.metrics.KafkaMetricsGroup +import org.apache.kafka.server.util.RequestAndCompletionHandler +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource +import org.mockito.ArgumentMatchers +import org.mockito.ArgumentMatchers.{any, anyLong, anyString} +import org.mockito.MockedConstruction.Context +import org.mockito.Mockito.{mock, mockConstruction, times, verify, verifyNoMoreInteractions, when} + +import java.util +import java.util.concurrent.TimeUnit +import scala.collection.mutable +import scala.jdk.CollectionConverters._ + +class AddPartitionsToTxnManagerTest { + private val networkClient: NetworkClient = mock(classOf[NetworkClient]) + private val metadataCache: MetadataCache = mock(classOf[MetadataCache]) + private val partitionFor: String => Int = mock(classOf[String => Int]) + + private val time = new MockTime + + private var addPartitionsToTxnManager: AddPartitionsToTxnManager = _ + + private val topic = "foo" + private val topicPartitions = List(new TopicPartition(topic, 1), new TopicPartition(topic, 2), new TopicPartition(topic, 3)) + + private val node0 = new Node(0, "host1", 0) + private val node1 = new Node(1, "host2", 1) + private val node2 = new Node(2, "host2", 2) + + private val transactionalId1 = "txn1" + private val transactionalId2 = "txn2" + private val transactionalId3 = "txn3" + + private val producerId1 = 0L + private val producerId2 = 1L + private val producerId3 = 2L + + private val authenticationErrorResponse = clientResponse(null, authException = new SaslAuthenticationException("")) + private val versionMismatchResponse = clientResponse(null, mismatchException = new UnsupportedVersionException("")) + private val disconnectedResponse = clientResponse(null, disconnected = true) + private val transactionSupportedOperation = genericErrorSupported + + private val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(1)) + + @BeforeEach + def setup(): Unit = { + addPartitionsToTxnManager = new AddPartitionsToTxnManager( + config, + networkClient, + metadataCache, + partitionFor, + time + ) + } + + @AfterEach + def teardown(): Unit = { + addPartitionsToTxnManager.shutdown() + } + + private def setErrors(errors: mutable.Map[TopicPartition, Errors])(callbackErrors: Map[TopicPartition, Errors]): Unit = { + callbackErrors.foreachEntry(errors.put) + } + + @ParameterizedTest + @ValueSource(booleans = Array(true, false)) + def testAddTxnData(isAddPartition: Boolean): Unit = { + val transactionSupportedOperation = if (isAddPartition) addPartition else genericErrorSupported + when(partitionFor.apply(transactionalId1)).thenReturn(0) + when(partitionFor.apply(transactionalId2)).thenReturn(1) + when(partitionFor.apply(transactionalId3)).thenReturn(0) + mockTransactionStateMetadata(0, 0, Some(node0)) + mockTransactionStateMetadata(1, 1, Some(node1)) + + val transaction1Errors = mutable.Map[TopicPartition, Errors]() + val transaction2Errors = mutable.Map[TopicPartition, Errors]() + val transaction3Errors = mutable.Map[TopicPartition, Errors]() + + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId3, producerId3, producerEpoch = 0, topicPartitions, setErrors(transaction3Errors), transactionSupportedOperation) + + // We will try to add transaction1 3 more times (retries). One will have the same epoch, one will have a newer epoch, and one will have an older epoch than the new one we just added. + val transaction1RetryWithSameEpochErrors = mutable.Map[TopicPartition, Errors]() + val transaction1RetryWithNewerEpochErrors = mutable.Map[TopicPartition, Errors]() + val transaction1RetryWithOldEpochErrors = mutable.Map[TopicPartition, Errors]() + + // Trying to add more transactional data for the same transactional ID, producer ID, and epoch should simply replace the old data and send a retriable response. + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1RetryWithSameEpochErrors), transactionSupportedOperation) + val expectedNetworkErrors = topicPartitions.map(_ -> Errors.NETWORK_EXCEPTION).toMap + assertEquals(expectedNetworkErrors, transaction1Errors) + + // Trying to add more transactional data for the same transactional ID and producer ID, but new epoch should replace the old data and send an error response for it. + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 1, topicPartitions, setErrors(transaction1RetryWithNewerEpochErrors), transactionSupportedOperation) + val expectedEpochErrors = topicPartitions.map(_ -> Errors.INVALID_PRODUCER_EPOCH).toMap + assertEquals(expectedEpochErrors, transaction1RetryWithSameEpochErrors) + + // Trying to add more transactional data for the same transactional ID and producer ID, but an older epoch should immediately return with error and keep the old data queued to send. + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1RetryWithOldEpochErrors), transactionSupportedOperation) + assertEquals(expectedEpochErrors, transaction1RetryWithOldEpochErrors) + + val requestsAndHandlers = addPartitionsToTxnManager.generateRequests().asScala + requestsAndHandlers.foreach { requestAndHandler => + if (requestAndHandler.destination == node0) { + assertEquals(time.milliseconds(), requestAndHandler.creationTimeMs) + assertEquals( + AddPartitionsToTxnRequest.Builder.forBroker( + new AddPartitionsToTxnTransactionCollection(Seq( + transactionData(transactionalId3, producerId3, verifyOnly = !isAddPartition), + transactionData(transactionalId1, producerId1, producerEpoch = 1, verifyOnly = !isAddPartition) + ).iterator.asJava) + ).data, + requestAndHandler.request.asInstanceOf[AddPartitionsToTxnRequest.Builder].data // insertion order + ) + } else { + verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) + } + } + } + + @ParameterizedTest + @ValueSource(booleans = Array(true, false)) + def testGenerateRequests(isAddPartition: Boolean): Unit = { + when(partitionFor.apply(transactionalId1)).thenReturn(0) + when(partitionFor.apply(transactionalId2)).thenReturn(1) + when(partitionFor.apply(transactionalId3)).thenReturn(2) + mockTransactionStateMetadata(0, 0, Some(node0)) + mockTransactionStateMetadata(1, 1, Some(node1)) + mockTransactionStateMetadata(2, 2, Some(node2)) + val transactionSupportedOperation = if (isAddPartition) addPartition else genericErrorSupported + + val transactionErrors = mutable.Map[TopicPartition, Errors]() + + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + + val requestsAndHandlers = addPartitionsToTxnManager.generateRequests().asScala + assertEquals(2, requestsAndHandlers.size) + // Note: handlers are tested in testAddPartitionsToTxnHandlerErrorHandling + requestsAndHandlers.foreach { requestAndHandler => + if (requestAndHandler.destination == node0) verifyRequest(node0, transactionalId1, producerId1, !isAddPartition, requestAndHandler) + else verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) + } + + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId3, producerId3, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + + // Test creationTimeMs increases too. + time.sleep(10) + + val requestsAndHandlers2 = addPartitionsToTxnManager.generateRequests().asScala + // The request for node1 should not be added because one request is already inflight. + assertEquals(1, requestsAndHandlers2.size) + requestsAndHandlers2.foreach { requestAndHandler => + verifyRequest(node2, transactionalId3, producerId3, !isAddPartition, requestAndHandler) + } + + // Complete the request for node1 so the new one can go through. + requestsAndHandlers.filter(_.destination == node1).head.handler.onComplete(authenticationErrorResponse) + val requestsAndHandlers3 = addPartitionsToTxnManager.generateRequests().asScala + assertEquals(1, requestsAndHandlers3.size) + requestsAndHandlers3.foreach { requestAndHandler => + verifyRequest(node1, transactionalId2, producerId2, !isAddPartition, requestAndHandler) + } + } + + @Test + def testTransactionCoordinatorResolution(): Unit = { + when(partitionFor.apply(transactionalId1)).thenReturn(0) + + def checkError(): Unit = { + val errors = mutable.Map[TopicPartition, Errors]() + + addPartitionsToTxnManager.addOrVerifyTransaction( + transactionalId1, + producerId1, + producerEpoch = 0, + topicPartitions, + setErrors(errors), + transactionSupportedOperation + ) + + assertEquals(topicPartitions.map(tp => tp -> Errors.COORDINATOR_NOT_AVAILABLE).toMap, errors) + } + + // The transaction state topic does not exist. + when(metadataCache.getLeaderAndIsr(Topic.TRANSACTION_STATE_TOPIC_NAME, 0)) + .thenReturn(Option.empty) + checkError() + + // The partition has no leader. + mockTransactionStateMetadata(0, -1, Option.empty) + checkError() + + // The leader is not available. + mockTransactionStateMetadata(0, 0, Option.empty) + checkError() + } + + @Test + def testAddPartitionsToTxnHandlerErrorHandling(): Unit = { + when(partitionFor.apply(transactionalId1)).thenReturn(0) + when(partitionFor.apply(transactionalId2)).thenReturn(0) + mockTransactionStateMetadata(0, 0, Some(node0)) + + val transaction1Errors = mutable.Map[TopicPartition, Errors]() + val transaction2Errors = mutable.Map[TopicPartition, Errors]() + + def addTransactionsToVerify(): Unit = { + transaction1Errors.clear() + transaction2Errors.clear() + + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), transactionSupportedOperation) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), transactionSupportedOperation) + } + + def addTransactionsToVerifyRequestVersion(operationExpected: TransactionSupportedOperation): Unit = { + transaction1Errors.clear() + transaction2Errors.clear() + + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transaction1Errors), operationExpected) + addPartitionsToTxnManager.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transaction2Errors), operationExpected) + } + + val expectedAuthErrors = topicPartitions.map(_ -> Errors.SASL_AUTHENTICATION_FAILED).toMap + addTransactionsToVerify() + receiveResponse(authenticationErrorResponse) + assertEquals(expectedAuthErrors, transaction1Errors) + assertEquals(expectedAuthErrors, transaction2Errors) + + // On version mismatch we ignore errors and keep handling. + val expectedVersionMismatchErrors = mutable.HashMap[TopicPartition, Errors]() + addTransactionsToVerify() + receiveResponse(versionMismatchResponse) + assertEquals(expectedVersionMismatchErrors, transaction1Errors) + assertEquals(expectedVersionMismatchErrors, transaction2Errors) + + val expectedDisconnectedErrors = topicPartitions.map(_ -> Errors.NETWORK_EXCEPTION).toMap + addTransactionsToVerify() + receiveResponse(disconnectedResponse) + assertEquals(expectedDisconnectedErrors, transaction1Errors) + assertEquals(expectedDisconnectedErrors, transaction2Errors) + + val expectedTopLevelErrors = topicPartitions.map(_ -> Errors.INVALID_TXN_STATE).toMap + val topLevelErrorAddPartitionsResponse = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData().setErrorCode(Errors.CLUSTER_AUTHORIZATION_FAILED.code())) + val topLevelErrorResponse = clientResponse(topLevelErrorAddPartitionsResponse) + addTransactionsToVerify() + receiveResponse(topLevelErrorResponse) + assertEquals(expectedTopLevelErrors, transaction1Errors) + assertEquals(expectedTopLevelErrors, transaction2Errors) + + val preConvertedTransaction1Errors = topicPartitions.map(_ -> Errors.PRODUCER_FENCED).toMap + val expectedTransaction1Errors = topicPartitions.map(_ -> Errors.INVALID_PRODUCER_EPOCH).toMap + val preConvertedTransaction2Errors = Map(new TopicPartition("foo", 1) -> Errors.NONE, + new TopicPartition("foo", 2) -> Errors.INVALID_TXN_STATE, + new TopicPartition("foo", 3) -> Errors.NONE) + val expectedTransaction2Errors = Map(new TopicPartition("foo", 2) -> Errors.INVALID_TXN_STATE) + + val transaction1ErrorResponse = AddPartitionsToTxnResponse.resultForTransaction(transactionalId1, preConvertedTransaction1Errors.asJava) + val transaction2ErrorResponse = AddPartitionsToTxnResponse.resultForTransaction(transactionalId2, preConvertedTransaction2Errors.asJava) + val mixedErrorsAddPartitionsResponse = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData() + .setResultsByTransaction(new AddPartitionsToTxnResultCollection(Seq(transaction1ErrorResponse, transaction2ErrorResponse).iterator.asJava))) + val mixedErrorsResponse = clientResponse(mixedErrorsAddPartitionsResponse) + + addTransactionsToVerify() + receiveResponse(mixedErrorsResponse) + assertEquals(expectedTransaction1Errors, transaction1Errors) + assertEquals(expectedTransaction2Errors, transaction2Errors) + + val preConvertedTransactionAbortableErrorsTxn1 = topicPartitions.map(_ -> Errors.TRANSACTION_ABORTABLE).toMap + val preConvertedTransactionAbortableErrorsTxn2 = Map(new TopicPartition("foo", 1) -> Errors.NONE, + new TopicPartition("foo", 2) -> Errors.TRANSACTION_ABORTABLE, + new TopicPartition("foo", 3) -> Errors.NONE) + val transactionAbortableErrorResponseTxn1 = AddPartitionsToTxnResponse.resultForTransaction(transactionalId1, preConvertedTransactionAbortableErrorsTxn1.asJava) + val transactionAbortableErrorResponseTxn2 = AddPartitionsToTxnResponse.resultForTransaction(transactionalId2, preConvertedTransactionAbortableErrorsTxn2.asJava) + val mixedErrorsAddPartitionsResponseAbortableError = new AddPartitionsToTxnResponse(new AddPartitionsToTxnResponseData() + .setResultsByTransaction(new AddPartitionsToTxnResultCollection(Seq(transactionAbortableErrorResponseTxn1, transactionAbortableErrorResponseTxn2).iterator.asJava))) + val mixedAbortableErrorsResponse = clientResponse(mixedErrorsAddPartitionsResponseAbortableError) + + val expectedTransactionAbortableErrorsTxn1LowerVersion = topicPartitions.map(_ -> Errors.INVALID_TXN_STATE).toMap + val expectedTransactionAbortableErrorsTxn2LowerVersion = Map(new TopicPartition("foo", 2) -> Errors.INVALID_TXN_STATE) + + val expectedTransactionAbortableErrorsTxn1HigherVersion = topicPartitions.map(_ -> Errors.TRANSACTION_ABORTABLE).toMap + val expectedTransactionAbortableErrorsTxn2HigherVersion = Map(new TopicPartition("foo", 2) -> Errors.TRANSACTION_ABORTABLE) + + addTransactionsToVerifyRequestVersion(defaultError) + receiveResponse(mixedAbortableErrorsResponse) + assertEquals(expectedTransactionAbortableErrorsTxn1LowerVersion, transaction1Errors) + assertEquals(expectedTransactionAbortableErrorsTxn2LowerVersion, transaction2Errors) + + addTransactionsToVerifyRequestVersion(genericErrorSupported) + receiveResponse(mixedAbortableErrorsResponse) + assertEquals(expectedTransactionAbortableErrorsTxn1HigherVersion, transaction1Errors) + assertEquals(expectedTransactionAbortableErrorsTxn2HigherVersion, transaction2Errors) + } + + @Test + def testAddPartitionsToTxnManagerMetrics(): Unit = { + val startTime = time.milliseconds() + val transactionErrors = mutable.Map[TopicPartition, Errors]() + + var maxVerificationTime: Long = 0 + val mockVerificationFailureMeter = mock(classOf[Meter]) + val mockVerificationTime = mock(classOf[Histogram]) + + when(partitionFor.apply(transactionalId1)).thenReturn(0) + when(partitionFor.apply(transactionalId2)).thenReturn(1) + mockTransactionStateMetadata(0, 0, Some(node0)) + mockTransactionStateMetadata(1, 1, Some(node1)) + + // Update max verification time when we see a higher verification time. + when(mockVerificationTime.update(anyLong())).thenAnswer { invocation => + val newTime = invocation.getArgument(0).asInstanceOf[Long] + if (newTime > maxVerificationTime) + maxVerificationTime = newTime + } + + val mockMetricsGroupCtor = mockConstruction(classOf[KafkaMetricsGroup], (mock: KafkaMetricsGroup, context: Context) => { + when(mock.newMeter(ArgumentMatchers.eq(AddPartitionsToTxnManager.VerificationFailureRateMetricName), anyString(), any(classOf[TimeUnit]))).thenReturn(mockVerificationFailureMeter) + when(mock.newHistogram(ArgumentMatchers.eq(AddPartitionsToTxnManager.VerificationTimeMsMetricName))).thenReturn(mockVerificationTime) + }) + + val addPartitionsManagerWithMockedMetrics = new AddPartitionsToTxnManager( + config, + networkClient, + metadataCache, + partitionFor, + time + ) + + try { + addPartitionsManagerWithMockedMetrics.addOrVerifyTransaction(transactionalId1, producerId1, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + addPartitionsManagerWithMockedMetrics.addOrVerifyTransaction(transactionalId2, producerId2, producerEpoch = 0, topicPartitions, setErrors(transactionErrors), transactionSupportedOperation) + + time.sleep(100) + + val requestsAndHandlers = addPartitionsManagerWithMockedMetrics.generateRequests() + var requestsHandled = 0 + + requestsAndHandlers.forEach { requestAndCompletionHandler => + time.sleep(100) + requestAndCompletionHandler.handler.onComplete(authenticationErrorResponse) + requestsHandled += 1 + verify(mockVerificationTime, times(requestsHandled)).update(anyLong()) + assertEquals(maxVerificationTime, time.milliseconds() - startTime) + verify(mockVerificationFailureMeter, times(requestsHandled)).mark(3) // since there are 3 partitions + } + + // shutdown the manager so that metrics are removed. + addPartitionsManagerWithMockedMetrics.shutdown() + + val mockMetricsGroup = mockMetricsGroupCtor.constructed.get(0) + + verify(mockMetricsGroup).newMeter(ArgumentMatchers.eq(AddPartitionsToTxnManager.VerificationFailureRateMetricName), anyString(), any(classOf[TimeUnit])) + verify(mockMetricsGroup).newHistogram(ArgumentMatchers.eq(AddPartitionsToTxnManager.VerificationTimeMsMetricName)) + verify(mockMetricsGroup).removeMetric(AddPartitionsToTxnManager.VerificationFailureRateMetricName) + verify(mockMetricsGroup).removeMetric(AddPartitionsToTxnManager.VerificationTimeMsMetricName) + + // assert that we have verified all invocations on the metrics group. + verifyNoMoreInteractions(mockMetricsGroup) + } finally { + if (mockMetricsGroupCtor != null) { + mockMetricsGroupCtor.close() + } + if (addPartitionsManagerWithMockedMetrics.isRunning) { + addPartitionsManagerWithMockedMetrics.shutdown() + } + } + } + + private def mockTransactionStateMetadata(partitionIndex: Int, leaderId: Int, leaderNode: Option[Node]): Unit = { + when(metadataCache.getLeaderAndIsr(Topic.TRANSACTION_STATE_TOPIC_NAME, partitionIndex)) + .thenReturn(Some(new LeaderAndIsr(leaderId, util.Arrays.asList(leaderId)))) + if (leaderId != MetadataResponse.NO_LEADER_ID) { + when(metadataCache.getAliveBrokerNode(leaderId, config.interBrokerListenerName)) + .thenReturn(leaderNode) + } + } + + private def clientResponse( + response: AbstractResponse, + authException: AuthenticationException = null, + mismatchException: UnsupportedVersionException = null, + disconnected: Boolean = false + ): ClientResponse = { + new ClientResponse(null, null, null, 0, 0, disconnected, mismatchException, authException, response) + } + + private def transactionData( + transactionalId: String, + producerId: Long, + producerEpoch: Short = 0, + verifyOnly: Boolean, + ): AddPartitionsToTxnTransaction = { + new AddPartitionsToTxnTransaction() + .setTransactionalId(transactionalId) + .setProducerId(producerId) + .setProducerEpoch(producerEpoch) + .setVerifyOnly(verifyOnly) + .setTopics(new AddPartitionsToTxnTopicCollection( + Seq(new AddPartitionsToTxnTopic() + .setName(topic) + .setPartitions(Seq[Integer](1, 2, 3).asJava)).iterator.asJava)) + } + + private def receiveResponse(response: ClientResponse): Unit = { + addPartitionsToTxnManager.generateRequests().asScala.head.handler.onComplete(response) + } + + private def verifyRequest( + expectedDestination: Node, + transactionalId: String, + producerId: Long, + verifyOnly: Boolean, + requestAndHandler: RequestAndCompletionHandler + ): Unit = { + assertEquals(time.milliseconds(), requestAndHandler.creationTimeMs) + assertEquals(expectedDestination, requestAndHandler.destination) + assertEquals( + AddPartitionsToTxnRequest.Builder.forBroker( + new AddPartitionsToTxnTransactionCollection( + Seq(transactionData(transactionalId, producerId, verifyOnly = verifyOnly)).iterator.asJava + ) + ).data, + requestAndHandler.request.asInstanceOf[AddPartitionsToTxnRequest.Builder].data + ) + } +} diff --git a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala index 406609239a002..1b35f93961946 100644 --- a/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AddPartitionsToTxnRequestServerTest.scala @@ -32,9 +32,9 @@ import org.apache.kafka.common.requests.FindCoordinatorRequest.CoordinatorType import org.apache.kafka.common.requests.{AddPartitionsToTxnRequest, AddPartitionsToTxnResponse, FindCoordinatorRequest, FindCoordinatorResponse, InitProducerIdRequest, InitProducerIdResponse} import org.apache.kafka.server.config.ServerLogConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.{Arguments, MethodSource} +import org.junit.jupiter.params.provider.{Arguments, MethodSource, ValueSource} import scala.collection.mutable import scala.jdk.CollectionConverters._ @@ -55,7 +55,7 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { @ParameterizedTest @MethodSource(value = Array("parameters")) - def shouldReceiveOperationNotAttemptedWhenOtherPartitionHasError(version: Short): Unit = { + def shouldReceiveOperationNotAttemptedWhenOtherPartitionHasError(quorum: String, version: Short): Unit = { // The basic idea is that we have one unknown topic and one created topic. We should get the 'UNKNOWN_TOPIC_OR_PARTITION' // error for the unknown topic and the 'OPERATION_NOT_ATTEMPTED' error for the known and authorized topic. val nonExistentTopic = new TopicPartition("unknownTopic", 0) @@ -110,8 +110,9 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, errors.get(nonExistentTopic)) } - @Test - def testOneSuccessOneErrorInBatchedRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testOneSuccessOneErrorInBatchedRequest(quorum: String): Unit = { val tp0 = new TopicPartition(topic1, 0) val transactionalId1 = "foobar" val transactionalId2 = "barfoo" // "barfoo" maps to the same transaction coordinator @@ -148,8 +149,9 @@ class AddPartitionsToTxnRequestServerTest extends BaseRequestTest { assertEquals(expectedErrors, errors) } - @Test - def testVerifyOnly(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testVerifyOnly(quorum: String): Unit = { val tp0 = new TopicPartition(topic1, 0) val transactionalId = "foobar" @@ -207,7 +209,7 @@ object AddPartitionsToTxnRequestServerTest { def parameters: JStream[Arguments] = { val arguments = mutable.ListBuffer[Arguments]() ApiKeys.ADD_PARTITIONS_TO_TXN.allVersions().forEach { version => - arguments += Arguments.of(version) + arguments += Arguments.of("kraft", version) } arguments.asJava.stream() } diff --git a/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala index 16a82fdca8b30..d54e3227f80c4 100644 --- a/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AllocateProducerIdsRequestTest.scala @@ -17,13 +17,12 @@ package unit.kafka.server import kafka.network.SocketServer -import kafka.server.{BrokerServer, ControllerServer} +import kafka.server.{BrokerServer, ControllerServer, IntegrationTestUtils} import org.apache.kafka.common.test.api.{ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.message.AllocateProducerIdsRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests._ import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.server.IntegrationTestUtils import org.apache.kafka.server.common.ProducerIdsBlock import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} @@ -34,7 +33,7 @@ class AllocateProducerIdsRequestTest(cluster: ClusterInstance) { def testAllocateProducersIdSentToController(): Unit = { val sourceBroker = cluster.brokers.values().stream().findFirst().get().asInstanceOf[BrokerServer] - val controllerId = sourceBroker.raftManager.client.leaderAndEpoch.leaderId().getAsInt + val controllerId = sourceBroker.raftManager.leaderAndEpoch.leaderId().getAsInt val controllerServer = cluster.controllers.values().stream() .filter(_.config.nodeId == controllerId) .findFirst() @@ -50,7 +49,7 @@ class AllocateProducerIdsRequestTest(cluster: ClusterInstance) { def testAllocateProducersIdSentToNonController(): Unit = { val sourceBroker = cluster.brokers.values().stream().findFirst().get().asInstanceOf[BrokerServer] - val controllerId = sourceBroker.raftManager.client.leaderAndEpoch.leaderId().getAsInt + val controllerId = sourceBroker.raftManager.leaderAndEpoch.leaderId().getAsInt val controllerServer = cluster.controllers().values().stream() .filter(_.config.nodeId != controllerId) .findFirst() @@ -82,7 +81,9 @@ class AllocateProducerIdsRequestTest(cluster: ClusterInstance) { ): AllocateProducerIdsResponse = { IntegrationTestUtils.connectAndReceive[AllocateProducerIdsResponse]( request, - controllerSocketServer.boundPort(cluster.controllerListenerName()) + controllerSocketServer, + cluster.controllerListenerName.get ) } + } diff --git a/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala b/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala index 4793723bc6aea..eb2a499db2cba 100644 --- a/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterPartitionManagerTest.scala @@ -139,7 +139,7 @@ class AlterPartitionManagerTest { val failedSubmitFuture = alterPartitionManager.submit(tp0, new LeaderAndIsr(1, 1, List(1, 2).map(Int.box).asJava, LeaderRecoveryState.RECOVERED, 10)) assertTrue(failedSubmitFuture.isCompletedExceptionally) - assertFutureThrows(classOf[OperationNotAttemptedException], failedSubmitFuture) + assertFutureThrows(failedSubmitFuture, classOf[OperationNotAttemptedException]) // Simulate response val alterPartitionResp = partitionResponse() @@ -364,7 +364,7 @@ class AlterPartitionManagerTest { val resp = makeClientResponse(alterPartitionResp, ApiKeys.ALTER_PARTITION.latestVersion) callbackCapture.getValue.onComplete(resp) assertTrue(future.isCompletedExceptionally) - assertFutureThrows(error.exception.getClass, future) + assertFutureThrows(future, error.exception.getClass) alterPartitionManager } @@ -426,7 +426,7 @@ class AlterPartitionManagerTest { response = partitionResponse(tp0, Errors.UNKNOWN_SERVER_ERROR), version = expectedVersion )) - assertFutureThrows(classOf[UnknownServerException], future1) + assertFutureThrows(future1, classOf[UnknownServerException]) assertFalse(future2.isDone) assertFalse(future3.isDone) @@ -439,7 +439,7 @@ class AlterPartitionManagerTest { response = partitionResponse(tp2, Errors.UNKNOWN_SERVER_ERROR), version = expectedVersion )) - assertFutureThrows(classOf[UnknownServerException], future3) + assertFutureThrows(future3, classOf[UnknownServerException]) assertFalse(future2.isDone) // The missing partition should be retried @@ -451,7 +451,7 @@ class AlterPartitionManagerTest { response = partitionResponse(tp1, Errors.UNKNOWN_SERVER_ERROR), version = expectedVersion )) - assertFutureThrows(classOf[UnknownServerException], future2) + assertFutureThrows(future2, classOf[UnknownServerException]) } private def verifySendRequest( @@ -505,7 +505,7 @@ class AlterPartitionManagerTest { null, // Response is serialized and deserialized to ensure that its does // not contain ignorable fields used by other versions. - AlterPartitionResponse.parse(MessageUtil.toByteBufferAccessor(response.data, version), version) + AlterPartitionResponse.parse(MessageUtil.toByteBuffer(response.data, version), version) ) } diff --git a/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala index 16cce3ed81a44..8e2698b0842cf 100644 --- a/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterReplicaLogDirsRequestTest.scala @@ -25,9 +25,10 @@ import org.apache.kafka.common.message.AlterReplicaLogDirsRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AlterReplicaLogDirsRequest, AlterReplicaLogDirsResponse} import org.apache.kafka.server.config.ServerLogConfigs -import org.apache.kafka.storage.internals.log.{LogConfig, LogFileUtils} +import org.apache.kafka.storage.internals.log.LogFileUtils import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util.Properties import scala.jdk.CollectionConverters._ @@ -51,12 +52,13 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { .find(p => p.partitionIndex == tp.partition).get.errorCode) } - @Test - def testAlterReplicaLogDirsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterReplicaLogDirsRequest(quorum: String): Unit = { val partitionNum = 5 // Alter replica dir before topic creation - val logDir1 = new File(brokers.head.config.logDirs.get(Random.nextInt(logDirCount))).getAbsolutePath + val logDir1 = new File(brokers.head.config.logDirs(Random.nextInt(logDirCount))).getAbsolutePath val partitionDirs1 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir1).toMap val alterReplicaLogDirsResponse1 = sendAlterReplicaLogDirsRequest(partitionDirs1) @@ -73,7 +75,7 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { } // Alter replica dir again after topic creation - val logDir2 = new File(brokers.head.config.logDirs.get(Random.nextInt(logDirCount))).getAbsolutePath + val logDir2 = new File(brokers.head.config.logDirs(Random.nextInt(logDirCount))).getAbsolutePath val partitionDirs2 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir2).toMap val alterReplicaLogDirsResponse2 = sendAlterReplicaLogDirsRequest(partitionDirs2) // The response should succeed for all partitions @@ -86,12 +88,13 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { } } - @Test - def testAlterReplicaLogDirsRequestErrorCode(): Unit = { - val offlineDir = new File(brokers.head.config.logDirs.get(1)).getAbsolutePath - val validDir1 = new File(brokers.head.config.logDirs.get(1)).getAbsolutePath - val validDir2 = new File(brokers.head.config.logDirs.get(2)).getAbsolutePath - val validDir3 = new File(brokers.head.config.logDirs.get(3)).getAbsolutePath + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterReplicaLogDirsRequestErrorCode(quorum: String): Unit = { + val offlineDir = new File(brokers.head.config.logDirs.tail.head).getAbsolutePath + val validDir1 = new File(brokers.head.config.logDirs(1)).getAbsolutePath + val validDir2 = new File(brokers.head.config.logDirs(2)).getAbsolutePath + val validDir3 = new File(brokers.head.config.logDirs(3)).getAbsolutePath // Test AlterReplicaDirRequest before topic creation val partitionDirs1 = mutable.Map.empty[TopicPartition, String] @@ -124,12 +127,13 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { assertEquals(Errors.KAFKA_STORAGE_ERROR, findErrorForPartition(alterReplicaDirResponse3, new TopicPartition(topic, 2))) } - @Test - def testAlterReplicaLogDirsRequestWithRetention(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterReplicaLogDirsRequestWithRetention(quorum: String): Unit = { val partitionNum = 1 // Alter replica dir before topic creation - val logDir1 = new File(brokers.head.config.logDirs.get(1)).getAbsolutePath + val logDir1 = new File(brokers.head.config.logDirs(1)).getAbsolutePath val partitionDirs1 = (0 until partitionNum).map(partition => new TopicPartition(topic, partition) -> logDir1).toMap val alterReplicaLogDirsResponse1 = sendAlterReplicaLogDirsRequest(partitionDirs1) @@ -144,7 +148,7 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { // We don't want files with `.deleted` suffix are removed too fast, // so we can validate there will be orphan files and orphan files will be removed eventually. topicProperties.put(TopicConfig.FILE_DELETE_DELAY_MS_CONFIG, "10000") - topicProperties.put(LogConfig.INTERNAL_SEGMENT_BYTES_CONFIG, "1024") + topicProperties.put(TopicConfig.SEGMENT_BYTES_CONFIG, "1024") createTopic(topic, partitionNum, 1, topicProperties) assertEquals(logDir1, brokers.head.logManager.getLog(tp).get.dir.getParent) @@ -162,7 +166,7 @@ class AlterReplicaLogDirsRequestTest extends BaseRequestTest { }, "timed out waiting for log segment to retention") // Alter replica dir again after topic creation - val logDir2 = new File(brokers.head.config.logDirs.get(2)).getAbsolutePath + val logDir2 = new File(brokers.head.config.logDirs(2)).getAbsolutePath val alterReplicaLogDirsResponse2 = sendAlterReplicaLogDirsRequest(Map(tp -> logDir2)) // The response should succeed for all partitions assertEquals(Errors.NONE, findErrorForPartition(alterReplicaLogDirsResponse2, tp)) diff --git a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestNotAuthorizedTest.scala b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestNotAuthorizedTest.scala index 99231470b12b0..7ea3052925be9 100644 --- a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestNotAuthorizedTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestNotAuthorizedTest.scala @@ -25,7 +25,9 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{AlterUserScramCredentialsRequest, AlterUserScramCredentialsResponse} import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{Test, TestInfo} +import org.junit.jupiter.api.TestInfo +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util import java.util.Properties @@ -52,8 +54,9 @@ class AlterUserScramCredentialsRequestNotAuthorizedTest extends BaseRequestTest private val user1 = "user1" private val user2 = "user2" - @Test - def testAlterNothingNotAuthorized(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterNothingNotAuthorized(quorum: String): Unit = { val request = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() .setDeletions(new util.ArrayList[AlterUserScramCredentialsRequestData.ScramCredentialDeletion]) @@ -64,8 +67,9 @@ class AlterUserScramCredentialsRequestNotAuthorizedTest extends BaseRequestTest assertEquals(0, results.size) } - @Test - def testAlterSomethingNotAuthorized(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterSomethingNotAuthorized(quorum: String): Unit = { val request = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() .setDeletions(util.Arrays.asList(new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`))) diff --git a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala index 16c6203bac579..ced7887351082 100644 --- a/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/AlterUserScramCredentialsRequestTest.scala @@ -34,7 +34,7 @@ import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuild import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, AuthorizationResult} import org.apache.kafka.server.common.MetadataVersion import org.apache.kafka.server.config.ServerConfigs -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.ValueSource @@ -79,8 +79,9 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { private val user3 = "user3@user3.com" private val unknownUser = "unknownUser" - @Test - def testAlterNothing(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterNothing(quorum: String): Unit = { val request = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() .setDeletions(new util.ArrayList[AlterUserScramCredentialsRequestData.ScramCredentialDeletion]) @@ -91,8 +92,9 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals(0, results.size) } - @Test - def testAlterSameThingTwice(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterSameThingTwice(quorum: String): Unit = { val deletion1 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) val deletion2 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user2).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) val upsertion1 = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) @@ -131,8 +133,9 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { }) } - @Test - def testAlterEmptyUser(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterEmptyUser(quorum: String): Unit = { val deletionEmpty = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName("").setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) val upsertionEmpty = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName("").setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) .setIterations(4096).setSalt(saltBytes).setSaltedPassword(saltedPasswordBytes) @@ -159,8 +162,9 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { }) } - @Test - def testAlterUnknownMechanism(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterUnknownMechanism(quorum: String): Unit = { val deletionUnknown1 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.UNKNOWN.`type`) val deletionValid1 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) val deletionUnknown2 = new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user2).setMechanism(10.toByte) @@ -186,8 +190,9 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { results.asScala.foreach(result => assertEquals("Unknown SCRAM mechanism", result.errorMessage)) } - @Test - def testAlterTooFewIterations(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterTooFewIterations(quorum: String): Unit = { val upsertionTooFewIterations = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName(user1) .setMechanism(ScramMechanism.SCRAM_SHA_256.`type`).setIterations(1) .setSalt(saltBytes).setSaltedPassword(saltedPasswordBytes) @@ -202,8 +207,9 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals("Too few iterations", results.get(0).errorMessage) } - @Test - def testAlterTooManyIterations(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterTooManyIterations(quorum: String): Unit = { val upsertionTooFewIterations = new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion().setName(user1) .setMechanism(ScramMechanism.SCRAM_SHA_256.`type`).setIterations(Integer.MAX_VALUE) .setSalt(saltBytes).setSaltedPassword(saltedPasswordBytes) @@ -218,8 +224,9 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals("Too many iterations", results.get(0).errorMessage) } - @Test - def testDeleteSomethingThatDoesNotExist(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteSomethingThatDoesNotExist(quorum: String): Unit = { val request = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() .setDeletions(util.Arrays.asList(new AlterUserScramCredentialsRequestData.ScramCredentialDeletion().setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`))) @@ -231,14 +238,12 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { checkAllErrorsAlteringCredentials(results, Errors.RESOURCE_NOT_FOUND, "when deleting a non-existing credential") } - @Test - def testAlterAndDescribe(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAlterAndDescribe(quorum: String): Unit = { // create a bunch of credentials val request1_0 = new AlterUserScramCredentialsRequest.Builder( new AlterUserScramCredentialsRequestData() - .setDeletions(util.Arrays.asList( - new AlterUserScramCredentialsRequestData.ScramCredentialDeletion() - .setName(user2).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`))) .setUpsertions(util.Arrays.asList( new AlterUserScramCredentialsRequestData.ScramCredentialUpsertion() .setName(user1).setMechanism(ScramMechanism.SCRAM_SHA_256.`type`) @@ -246,15 +251,10 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { .setSalt(saltBytes) .setSaltedPassword(saltedPasswordBytes), ))).build() - assertEquals("AlterUserScramCredentialsRequestData(" + - "deletions=[ScramCredentialDeletion(name='" + user2 + "', mechanism=" + ScramMechanism.SCRAM_SHA_256.`type` + ")], " + - "upsertions=[ScramCredentialUpsertion(name='" + user1 + "', mechanism=" + ScramMechanism.SCRAM_SHA_256.`type` + - ", iterations=4096, salt=[], saltedPassword=[])])", request1_0.toString) val results1_0 = sendAlterUserScramCredentialsRequest(request1_0).data.results - assertEquals(2, results1_0.size) - assertEquals(1, results1_0.asScala.count(_.errorCode == Errors.RESOURCE_NOT_FOUND.code())) + assertEquals(1, results1_0.size) + checkNoErrorsAlteringCredentials(results1_0) checkUserAppearsInAlterResults(results1_0, user1) - checkUserAppearsInAlterResults(results1_0, user2) // When creating credentials, do not update the same user more than once per request val request1_1 = new AlterUserScramCredentialsRequest.Builder( @@ -276,8 +276,6 @@ class AlterUserScramCredentialsRequestTest extends BaseRequestTest { .setSalt(saltBytes) .setSaltedPassword(saltedPasswordBytes), ))).build() - assertFalse(request1_1.toString.contains(saltBytes)) - assertFalse(request1_1.toString.contains(saltedPasswordBytes)) val results1_1 = sendAlterUserScramCredentialsRequest(request1_1).data.results assertEquals(3, results1_1.size) checkNoErrorsAlteringCredentials(results1_1) diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala new file mode 100644 index 0000000000000..19a95ca945080 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/ApiVersionManagerTest.scala @@ -0,0 +1,133 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.server + +import org.apache.kafka.clients.NodeApiVersions +import org.apache.kafka.common.message.ApiMessageType.ListenerType +import org.apache.kafka.common.metadata.FeatureLevelRecord +import org.apache.kafka.common.protocol.ApiKeys +import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance} +import org.apache.kafka.server.BrokerFeatures +import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion} +import org.junit.jupiter.api.Test +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.EnumSource +import org.mockito.Mockito + +import scala.jdk.CollectionConverters._ + +class ApiVersionManagerTest { + private val brokerFeatures = BrokerFeatures.createDefault(true) + private val metadataCache = { + val cache = MetadataCache.kRaftMetadataCache(1, () => KRaftVersion.LATEST_PRODUCTION) + val delta = new MetadataDelta(MetadataImage.EMPTY); + delta.replay(new FeatureLevelRecord() + .setName(MetadataVersion.FEATURE_NAME) + .setFeatureLevel(MetadataVersion.latestProduction().featureLevel()) + ) + cache.setImage(delta.apply(MetadataProvenance.EMPTY)) + cache + } + + @ParameterizedTest + @EnumSource(classOf[ListenerType]) + def testApiScope(apiScope: ListenerType): Unit = { + val forwardingManager = Mockito.mock(classOf[ForwardingManager]) + val versionManager = new DefaultApiVersionManager( + listenerType = apiScope, + forwardingManager = forwardingManager, + brokerFeatures = brokerFeatures, + metadataCache = metadataCache, + enableUnstableLastVersion = true + ) + assertEquals(ApiKeys.apisForListener(apiScope).asScala, versionManager.enabledApis) + assertTrue(ApiKeys.apisForListener(apiScope).asScala.forall { apiKey => + apiKey.allVersions.asScala.forall { version => + versionManager.isApiEnabled(apiKey, version) + } + }) + } + + @ParameterizedTest + @EnumSource(classOf[ListenerType]) + def testDisabledApis(apiScope: ListenerType): Unit = { + val forwardingManager = Mockito.mock(classOf[ForwardingManager]) + val versionManager = new DefaultApiVersionManager( + listenerType = apiScope, + forwardingManager = forwardingManager, + brokerFeatures = brokerFeatures, + metadataCache = metadataCache, + enableUnstableLastVersion = false + ) + + ApiKeys.apisForListener(apiScope).forEach { apiKey => + if (apiKey.messageType.latestVersionUnstable()) { + assertFalse(versionManager.isApiEnabled(apiKey, apiKey.latestVersion), + s"$apiKey version ${apiKey.latestVersion} should be disabled.") + } + } + } + + @Test + def testControllerApiIntersection(): Unit = { + val controllerMinVersion: Short = 3 + val controllerMaxVersion: Short = 5 + + val forwardingManager = Mockito.mock(classOf[ForwardingManager]) + + Mockito.when(forwardingManager.controllerApiVersions).thenReturn(Some(NodeApiVersions.create( + ApiKeys.CREATE_TOPICS.id, + controllerMinVersion, + controllerMaxVersion + ))) + + val versionManager = new DefaultApiVersionManager( + listenerType = ListenerType.BROKER, + forwardingManager = forwardingManager, + brokerFeatures = brokerFeatures, + metadataCache = metadataCache, + enableUnstableLastVersion = true + ) + + val apiVersionsResponse = versionManager.apiVersionResponse(throttleTimeMs = 0, false) + val alterConfigVersion = apiVersionsResponse.data.apiKeys.find(ApiKeys.CREATE_TOPICS.id) + assertNotNull(alterConfigVersion) + assertEquals(controllerMinVersion, alterConfigVersion.minVersion) + assertEquals(controllerMaxVersion, alterConfigVersion.maxVersion) + } + + @Test + def testEnvelopeDisabledForKRaftBroker(): Unit = { + val forwardingManager = Mockito.mock(classOf[ForwardingManager]) + Mockito.when(forwardingManager.controllerApiVersions).thenReturn(None) + + val versionManager = new DefaultApiVersionManager( + listenerType = ListenerType.BROKER, + forwardingManager = forwardingManager, + brokerFeatures = brokerFeatures, + metadataCache = metadataCache, + enableUnstableLastVersion = true + ) + assertFalse(versionManager.isApiEnabled(ApiKeys.ENVELOPE, ApiKeys.ENVELOPE.latestVersion)) + assertFalse(versionManager.enabledApis.contains(ApiKeys.ENVELOPE)) + + val apiVersionsResponse = versionManager.apiVersionResponse(throttleTimeMs = 0, false) + val envelopeVersion = apiVersionsResponse.data.apiKeys.find(ApiKeys.ENVELOPE.id) + assertNull(envelopeVersion) + } +} diff --git a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala index 9b58207d2f368..6e32cfc01f8be 100644 --- a/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ApiVersionsRequestTest.scala @@ -19,10 +19,9 @@ package kafka.server import org.apache.kafka.common.message.ApiVersionsRequestData import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse} +import org.apache.kafka.common.requests.ApiVersionsRequest import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, Type} -import org.apache.kafka.server.IntegrationTestUtils import org.apache.kafka.server.common.MetadataVersion import org.junit.jupiter.api.Assertions._ @@ -34,7 +33,7 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio )) def testApiVersionsRequest(): Unit = { val request = new ApiVersionsRequest.Builder().build() - val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](request, cluster.brokerBoundPorts().get(0)) + val apiVersionsResponse = sendApiVersionsRequest(request, cluster.clientListener()) validateApiVersionsResponse(apiVersionsResponse) } @@ -44,15 +43,15 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio )) def testApiVersionsRequestIncludesUnreleasedApis(): Unit = { val request = new ApiVersionsRequest.Builder().build() - val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](request, cluster.brokerBoundPorts().get(0)) + val apiVersionsResponse = sendApiVersionsRequest(request, cluster.clientListener()) validateApiVersionsResponse(apiVersionsResponse, enableUnstableLastVersion = true) } @ClusterTest(types = Array(Type.KRAFT)) def testApiVersionsRequestThroughControllerListener(): Unit = { val request = new ApiVersionsRequest.Builder().build() - val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](request, cluster.controllerBoundPorts().get(0)) - validateApiVersionsResponse(apiVersionsResponse, cluster.controllerListenerName(), enableUnstableLastVersion = true) + val apiVersionsResponse = sendApiVersionsRequest(request, cluster.controllerListenerName.get()) + validateApiVersionsResponse(apiVersionsResponse, cluster.controllerListenerName.get(), enableUnstableLastVersion = true) } @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT)) @@ -74,7 +73,7 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio )) def testApiVersionsRequestValidationV0(): Unit = { val apiVersionsRequest = new ApiVersionsRequest.Builder().build(0.asInstanceOf[Short]) - val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](apiVersionsRequest, cluster.brokerBoundPorts().get(0)) + val apiVersionsResponse = sendApiVersionsRequest(apiVersionsRequest, cluster.clientListener()) validateApiVersionsResponse(apiVersionsResponse, apiVersion = 0, enableUnstableLastVersion = !"false".equals( cluster.config().serverProperties().get("unstable.api.versions.enable"))) @@ -83,15 +82,15 @@ class ApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersio @ClusterTest(types = Array(Type.KRAFT)) def testApiVersionsRequestValidationV0ThroughControllerListener(): Unit = { val apiVersionsRequest = new ApiVersionsRequest.Builder().build(0.asInstanceOf[Short]) - val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](apiVersionsRequest, cluster.controllerBoundPorts().get(0)) - validateApiVersionsResponse(apiVersionsResponse, cluster.controllerListenerName(), apiVersion = 0, enableUnstableLastVersion = true) + val apiVersionsResponse = sendApiVersionsRequest(apiVersionsRequest, cluster.controllerListenerName.get()) + validateApiVersionsResponse(apiVersionsResponse, cluster.controllerListenerName.get(), apiVersion = 0, enableUnstableLastVersion = true) } @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT)) def testApiVersionsRequestValidationV3(): Unit = { // Invalid request because Name and Version are empty by default val apiVersionsRequest = new ApiVersionsRequest(new ApiVersionsRequestData(), 3.asInstanceOf[Short]) - val apiVersionsResponse = IntegrationTestUtils.connectAndReceive[ApiVersionsResponse](apiVersionsRequest, cluster.brokerBoundPorts().get(0)) + val apiVersionsResponse = sendApiVersionsRequest(apiVersionsRequest, cluster.clientListener()) assertEquals(Errors.INVALID_REQUEST.code(), apiVersionsResponse.data.errorCode()) } } diff --git a/core/src/test/scala/unit/kafka/server/AuthHelperTest.scala b/core/src/test/scala/unit/kafka/server/AuthHelperTest.scala index cf4b6a10bb183..d91e146c68524 100644 --- a/core/src/test/scala/unit/kafka/server/AuthHelperTest.scala +++ b/core/src/test/scala/unit/kafka/server/AuthHelperTest.scala @@ -23,7 +23,6 @@ import org.apache.kafka.clients.admin.EndpointType import java.net.InetAddress import java.util import org.apache.kafka.common.acl.AclOperation -import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.message.{DescribeClusterRequestData, DescribeClusterResponseData} import org.apache.kafka.common.message.DescribeClusterResponseData.DescribeClusterBrokerCollection import org.apache.kafka.common.network.{ClientInformation, ListenerName} @@ -72,7 +71,6 @@ class AuthHelperTest { @Test def testAuthorize(): Unit = { val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") val operation = AclOperation.WRITE val resourceType = ResourceType.TOPIC @@ -90,7 +88,7 @@ class AuthHelperTest { when(authorizer.authorize(requestContext, expectedActions.asJava)) .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) - val result = new AuthHelper(Some(authorizerPlugin)).authorize( + val result = new AuthHelper(Some(authorizer)).authorize( requestContext, operation, resourceType, resourceName) verify(authorizer).authorize(requestContext, expectedActions.asJava) @@ -101,7 +99,6 @@ class AuthHelperTest { @Test def testFilterByAuthorized(): Unit = { val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") val operation = AclOperation.WRITE val resourceType = ResourceType.TOPIC @@ -135,7 +132,7 @@ class AuthHelperTest { }.asJava } - val result = new AuthHelper(Some(authorizerPlugin)).filterByAuthorized( + val result = new AuthHelper(Some(authorizer)).filterByAuthorized( requestContext, operation, resourceType, @@ -152,9 +149,7 @@ class AuthHelperTest { @Test def testComputeDescribeClusterResponseV1WithUnknownEndpointType(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - val authHelper = new AuthHelper(Some(authorizerPlugin)) + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) val request = newMockDescribeClusterRequest( new DescribeClusterRequestData().setEndpointType(123.toByte), 1) val responseData = authHelper.computeDescribeClusterResponse(request, @@ -169,9 +164,7 @@ class AuthHelperTest { @Test def testComputeDescribeClusterResponseV0WithUnknownEndpointType(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - val authHelper = new AuthHelper(Some(authorizerPlugin)) + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) val request = newMockDescribeClusterRequest( new DescribeClusterRequestData().setEndpointType(123.toByte), 0) val responseData = authHelper.computeDescribeClusterResponse(request, @@ -186,9 +179,7 @@ class AuthHelperTest { @Test def testComputeDescribeClusterResponseV1WithUnexpectedEndpointType(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - val authHelper = new AuthHelper(Some(authorizerPlugin)) + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) val request = newMockDescribeClusterRequest( new DescribeClusterRequestData().setEndpointType(EndpointType.BROKER.id()), 1) val responseData = authHelper.computeDescribeClusterResponse(request, @@ -203,9 +194,7 @@ class AuthHelperTest { @Test def testComputeDescribeClusterResponseV0WithUnexpectedEndpointType(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - val authHelper = new AuthHelper(Some(authorizerPlugin)) + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) val request = newMockDescribeClusterRequest( new DescribeClusterRequestData().setEndpointType(EndpointType.BROKER.id()), 0) val responseData = authHelper.computeDescribeClusterResponse(request, @@ -220,9 +209,7 @@ class AuthHelperTest { @Test def testComputeDescribeClusterResponseWhereControllerIsNotFound(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - val authHelper = new AuthHelper(Some(authorizerPlugin)) + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) val request = newMockDescribeClusterRequest( new DescribeClusterRequestData().setEndpointType(EndpointType.CONTROLLER.id()), 1) val responseData = authHelper.computeDescribeClusterResponse(request, @@ -239,9 +226,7 @@ class AuthHelperTest { @Test def testComputeDescribeClusterResponseSuccess(): Unit = { - val authorizer: Authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - val authHelper = new AuthHelper(Some(authorizerPlugin)) + val authHelper = new AuthHelper(Some(mock(classOf[Authorizer]))) val request = newMockDescribeClusterRequest( new DescribeClusterRequestData().setEndpointType(EndpointType.CONTROLLER.id()), 1) val nodes = new DescribeClusterBrokerCollection( diff --git a/core/src/test/scala/unit/kafka/server/BaseClientQuotaManagerTest.scala b/core/src/test/scala/unit/kafka/server/BaseClientQuotaManagerTest.scala index e25733a6f0dc4..7e2aa9ca65f16 100644 --- a/core/src/test/scala/unit/kafka/server/BaseClientQuotaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BaseClientQuotaManagerTest.scala @@ -31,7 +31,7 @@ import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.utils.MockTime import org.apache.kafka.network.Session import org.apache.kafka.network.metrics.RequestChannelMetrics -import org.apache.kafka.server.quota.{ClientQuotaManager, ThrottleCallback} +import org.apache.kafka.server.quota.ThrottleCallback import org.junit.jupiter.api.AfterEach import org.mockito.Mockito.mock @@ -81,6 +81,6 @@ class BaseClientQuotaManagerTest { protected def throttle(quotaManager: ClientQuotaManager, user: String, clientId: String, throttleTimeMs: Int, channelThrottlingCallback: ThrottleCallback): Unit = { val (_, request) = buildRequest(FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion, 0, 1000, new util.HashMap[TopicPartition, PartitionData])) - quotaManager.throttle(request.header.clientId(), request.session, channelThrottlingCallback, throttleTimeMs) + quotaManager.throttle(request, channelThrottlingCallback, throttleTimeMs) } } diff --git a/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala index 3ace4590aacd5..3a0ffe1b4779f 100644 --- a/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/BaseRequestTest.scala @@ -20,7 +20,7 @@ package kafka.server import kafka.api.IntegrationTestHarness import kafka.network.SocketServer import org.apache.kafka.common.network.ListenerName -import org.apache.kafka.common.protocol.{ApiKeys, ByteBufferAccessor} +import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, RequestHeader, ResponseHeader} import org.apache.kafka.common.utils.Utils import org.apache.kafka.metadata.BrokerState @@ -96,7 +96,7 @@ abstract class BaseRequestTest extends IntegrationTestHarness { val responseBuffer = ByteBuffer.wrap(responseBytes) ResponseHeader.parse(responseBuffer, apiKey.responseHeaderVersion(version)) - AbstractResponse.parseResponse(apiKey, new ByteBufferAccessor(responseBuffer), version) match { + AbstractResponse.parseResponse(apiKey, responseBuffer, version) match { case response: T => response case response => throw new ClassCastException(s"Expected response with type ${classTag.runtimeClass}, but found ${response.getClass}") diff --git a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala index 5b621671ad6e6..71bfbefa307f4 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerLifecycleManagerTest.scala @@ -59,14 +59,14 @@ class BrokerLifecycleManagerTest { @Test def testCreateAndClose(): Unit = { val context = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(context.config, context.time, "create-and-close-", Set(Uuid.fromString("oFoTeS9QT0aAyCyH41v45A"))) + manager = new BrokerLifecycleManager(context.config, context.time, "create-and-close-", isZkBroker = false, Set(Uuid.fromString("oFoTeS9QT0aAyCyH41v45A"))) manager.close() } @Test def testCreateStartAndClose(): Unit = { val context = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(context.config, context.time, "create-start-and-close-", Set(Uuid.fromString("uiUADXZWTPixVvp6UWFWnw"))) + manager = new BrokerLifecycleManager(context.config, context.time, "create-start-and-close-", isZkBroker = false, Set(Uuid.fromString("uiUADXZWTPixVvp6UWFWnw"))) assertEquals(BrokerState.NOT_RUNNING, manager.state) manager.start(() => context.highestMetadataOffset.get(), context.mockChannelManager, context.clusterId, context.advertisedListeners, @@ -81,7 +81,7 @@ class BrokerLifecycleManagerTest { @Test def testSuccessfulRegistration(): Unit = { val context = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(context.config, context.time, "successful-registration-", Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) + manager = new BrokerLifecycleManager(context.config, context.time, "successful-registration-", isZkBroker = false, Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) val controllerNode = new Node(3000, "localhost", 8021) context.controllerNodeProvider.node.set(controllerNode) manager.start(() => context.highestMetadataOffset.get(), @@ -103,7 +103,7 @@ class BrokerLifecycleManagerTest { def testRegistrationTimeout(): Unit = { val context = new RegistrationTestContext(configProperties) val controllerNode = new Node(3000, "localhost", 8021) - manager = new BrokerLifecycleManager(context.config, context.time, "registration-timeout-", Set(Uuid.fromString("9XBOAtr4T0Wbx2sbiWh6xg"))) + manager = new BrokerLifecycleManager(context.config, context.time, "registration-timeout-", isZkBroker = false, Set(Uuid.fromString("9XBOAtr4T0Wbx2sbiWh6xg"))) context.controllerNodeProvider.node.set(controllerNode) def newDuplicateRegistrationResponse(): Unit = { context.mockClient.prepareResponseFrom(new BrokerRegistrationResponse( @@ -143,7 +143,7 @@ class BrokerLifecycleManagerTest { @Test def testControlledShutdown(): Unit = { val context = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(context.config, context.time, "controlled-shutdown-", Set(Uuid.fromString("B4RtUz1ySGip3A7ZFYB2dg"))) + manager = new BrokerLifecycleManager(context.config, context.time, "controlled-shutdown-", isZkBroker = false, Set(Uuid.fromString("B4RtUz1ySGip3A7ZFYB2dg"))) val controllerNode = new Node(3000, "localhost", 8021) context.controllerNodeProvider.node.set(controllerNode) context.mockClient.prepareResponseFrom(new BrokerRegistrationResponse( @@ -224,7 +224,7 @@ class BrokerLifecycleManagerTest { @Test def testAlwaysSendsAccumulatedOfflineDirs(): Unit = { val ctx = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(ctx.config, ctx.time, "offline-dirs-sent-in-heartbeat-", Set(Uuid.fromString("0IbF1sjhSGG6FNvnrPbqQg"))) + manager = new BrokerLifecycleManager(ctx.config, ctx.time, "offline-dirs-sent-in-heartbeat-", isZkBroker = false, Set(Uuid.fromString("0IbF1sjhSGG6FNvnrPbqQg"))) val controllerNode = new Node(3000, "localhost", 8021) ctx.controllerNodeProvider.node.set(controllerNode) @@ -250,7 +250,8 @@ class BrokerLifecycleManagerTest { def testRegistrationIncludesDirs(): Unit = { val logDirs = Set("ad5FLIeCTnaQdai5vOjeng", "ybdzUKmYSLK6oiIpI6CPlw").map(Uuid.fromString) val ctx = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(ctx.config, ctx.time, "registration-includes-dirs-", logDirs) + manager = new BrokerLifecycleManager(ctx.config, ctx.time, "registration-includes-dirs-", + isZkBroker = false, logDirs) val controllerNode = new Node(3000, "localhost", 8021) ctx.controllerNodeProvider.node.set(controllerNode) @@ -267,7 +268,7 @@ class BrokerLifecycleManagerTest { @Test def testKraftJBODMetadataVersionUpdateEvent(): Unit = { val ctx = new RegistrationTestContext(configProperties) - manager = new BrokerLifecycleManager(ctx.config, ctx.time, "jbod-metadata-version-update", Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) + manager = new BrokerLifecycleManager(ctx.config, ctx.time, "jbod-metadata-version-update", isZkBroker = false, Set(Uuid.fromString("gCpDJgRlS2CBCpxoP2VMsQ"))) val controllerNode = new Node(3000, "localhost", 8021) ctx.controllerNodeProvider.node.set(controllerNode) @@ -288,7 +289,7 @@ class BrokerLifecycleManagerTest { assertEquals(1000L, manager.brokerEpoch) // Trigger JBOD MV update - manager.resendBrokerRegistration() + manager.resendBrokerRegistrationUnlessZkMode() // Accept new registration, response sets epoch to 1200 nextRegistrationRequest(1200L) diff --git a/core/src/test/scala/unit/kafka/server/BrokerMetricNamesTest.scala b/core/src/test/scala/unit/kafka/server/BrokerMetricNamesTest.scala index 300bc2d8d42ec..4f48dedbf9c40 100644 --- a/core/src/test/scala/unit/kafka/server/BrokerMetricNamesTest.scala +++ b/core/src/test/scala/unit/kafka/server/BrokerMetricNamesTest.scala @@ -40,7 +40,12 @@ class BrokerMetricNamesTest(cluster: ClusterInstance) { def checkReplicaManagerMetrics(): Unit = { val metrics = KafkaYammerMetrics.defaultRegistry.allMetrics val expectedPrefix = "kafka.server:type=ReplicaManager,name" - val expectedMetricNames = ReplicaManager.MetricNames + val expectedMetricNames = Set( + "LeaderCount", "PartitionCount", "OfflineReplicaCount", "UnderReplicatedPartitions", + "UnderMinIsrPartitionCount", "AtMinIsrPartitionCount", "ReassigningPartitions", + "IsrExpandsPerSec", "IsrShrinksPerSec", "FailedIsrUpdatesPerSec", + "ProducerIdCount", + ) expectedMetricNames.foreach { metricName => assertEquals(1, metrics.keySet.asScala.count(_.getMBeanName == s"$expectedPrefix=$metricName")) } diff --git a/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala b/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala new file mode 100644 index 0000000000000..c7a4bd45f780c --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/BrokerRegistrationRequestTest.scala @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import org.apache.kafka.common.test.api.{ClusterTest, Type} +import org.apache.kafka.clients.ClientResponse +import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic +import org.apache.kafka.common.message.{BrokerRegistrationRequestData, CreateTopicsRequestData} +import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.network.ListenerName +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.requests._ +import org.apache.kafka.common.security.auth.SecurityProtocol +import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.common.utils.Time +import org.apache.kafka.common.{Node, Uuid} +import org.apache.kafka.server.common.{ControllerRequestCompletionHandler, Feature, MetadataVersion, MetadataVersionTestUtils, NodeToControllerChannelManager} +import org.junit.jupiter.api.Assertions.assertEquals + +import java.util +import java.util.Collections +import java.util.concurrent.{CompletableFuture, TimeUnit, TimeoutException} + +/** + * This test simulates a broker registering with the KRaft quorum under different configurations. + */ +class BrokerRegistrationRequestTest { + + def brokerToControllerChannelManager(clusterInstance: ClusterInstance): NodeToControllerChannelManager = { + new NodeToControllerChannelManagerImpl( + new ControllerNodeProvider() { + def node: Option[Node] = Some(new Node( + clusterInstance.anyControllerSocketServer().config.nodeId, + "127.0.0.1", + clusterInstance.anyControllerSocketServer().boundPort(clusterInstance.controllerListenerName().get()), + )) + + def listenerName: ListenerName = clusterInstance.controllerListenerName().get() + + val securityProtocol: SecurityProtocol = SecurityProtocol.PLAINTEXT + + val saslMechanism: String = "" + + override def getControllerInfo(): ControllerInformation = + ControllerInformation(node, listenerName, securityProtocol, saslMechanism) + }, + Time.SYSTEM, + new Metrics(), + clusterInstance.anyControllerSocketServer().config, + "heartbeat", + "test-heartbeat-", + 10000 + ) + } + + def sendAndReceive[T <: AbstractRequest, R <: AbstractResponse]( + channelManager: NodeToControllerChannelManager, + reqBuilder: AbstractRequest.Builder[T], + timeoutMs: Int + ): R = { + val responseFuture = new CompletableFuture[R]() + channelManager.sendRequest(reqBuilder, new ControllerRequestCompletionHandler() { + override def onTimeout(): Unit = responseFuture.completeExceptionally(new TimeoutException()) + + override def onComplete(response: ClientResponse): Unit = + responseFuture.complete(response.responseBody().asInstanceOf[R]) + }) + responseFuture.get(timeoutMs, TimeUnit.MILLISECONDS) + } + + def registerBroker( + channelManager: NodeToControllerChannelManager, + clusterId: String, + brokerId: Int, + zkEpoch: Option[Long], + featureLevelToSend: Option[(Short, Short)] + ): Errors = { + val features = new BrokerRegistrationRequestData.FeatureCollection() + featureLevelToSend.foreach { case (min, max) => + features.add(new BrokerRegistrationRequestData.Feature() + .setName(MetadataVersion.FEATURE_NAME) + .setMinSupportedVersion(min) + .setMaxSupportedVersion(max) + ) + } + Feature.PRODUCTION_FEATURES.stream().filter(_.featureName != MetadataVersion.FEATURE_NAME).forEach { + feature => + features.add(new BrokerRegistrationRequestData.Feature() + .setName(feature.featureName) + .setMinSupportedVersion(feature.minimumProduction()) + .setMaxSupportedVersion(feature.latestTesting())) + } + + val req = new BrokerRegistrationRequestData() + .setBrokerId(brokerId) + .setLogDirs(Collections.singletonList(Uuid.randomUuid())) + .setClusterId(clusterId) + .setIncarnationId(Uuid.randomUuid()) + .setIsMigratingZkBroker(zkEpoch.isDefined) + .setFeatures(features) + .setListeners(new BrokerRegistrationRequestData.ListenerCollection(util.Arrays.asList( + new BrokerRegistrationRequestData.Listener(). + setName("EXTERNAL"). + setHost("example.com"). + setPort(8082). + setSecurityProtocol(SecurityProtocol.PLAINTEXT.id)) + .iterator())) + + val resp = sendAndReceive[BrokerRegistrationRequest, BrokerRegistrationResponse]( + channelManager, new BrokerRegistrationRequest.Builder(req), 30000) + Errors.forCode(resp.data().errorCode()) + } + + + def createTopics(channelManager: NodeToControllerChannelManager, + topicName: String): Errors = { + val createTopics = new CreateTopicsRequestData() + createTopics.setTopics(new CreateTopicsRequestData.CreatableTopicCollection()) + createTopics.topics().add(new CreatableTopic().setName(topicName).setNumPartitions(10).setReplicationFactor(1)) + createTopics.setTimeoutMs(500) + + val req = new CreateTopicsRequest.Builder(createTopics) + val resp = sendAndReceive[CreateTopicsRequest, CreateTopicsResponse](channelManager, req, 3000).data() + Errors.forCode(resp.topics().find(topicName).errorCode()) + } + + @ClusterTest(types = Array(Type.KRAFT), brokers = 0, controllers = 1, metadataVersion = MetadataVersion.IBP_3_3_IV3) + def testRegisterZkWith33Controller(clusterInstance: ClusterInstance): Unit = { + // Verify that a controller running an old metadata.version cannot register a ZK broker + val clusterId = clusterInstance.clusterId() + val channelManager = brokerToControllerChannelManager(clusterInstance) + try { + channelManager.start() + // Invalid registration (isMigratingZkBroker, but MV does not support migrations) + assertEquals( + Errors.BROKER_ID_NOT_REGISTERED, + registerBroker(channelManager, clusterId, 100, Some(1), Some((MetadataVersionTestUtils.IBP_3_3_IV0_FEATURE_LEVEL, MetadataVersion.IBP_3_3_IV3.featureLevel)))) + + // No features (MV) sent with registration, controller can't verify + assertEquals( + Errors.BROKER_ID_NOT_REGISTERED, + registerBroker(channelManager, clusterId, 100, Some(1), None)) + + // Given MV is too high for controller to support + assertEquals( + Errors.BROKER_ID_NOT_REGISTERED, + registerBroker(channelManager, clusterId, 100, Some(1), Some((MetadataVersion.IBP_3_4_IV0.featureLevel, MetadataVersion.IBP_3_4_IV0.featureLevel)))) + + // Controller supports this MV and isMigratingZkBroker is false, so this one works + assertEquals( + Errors.NONE, + registerBroker(channelManager, clusterId, 100, None, Some((MetadataVersion.IBP_3_3_IV3.featureLevel, MetadataVersion.IBP_3_4_IV0.featureLevel)))) + } finally { + channelManager.shutdown() + } + } +} diff --git a/core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala index c166eef801221..6c268d3c3fbd9 100644 --- a/core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ClientQuotaManagerTest.scala @@ -16,19 +16,17 @@ */ package kafka.server -import org.apache.kafka.common.Cluster +import kafka.server.ClientQuotaManager.BaseUserEntity + import java.net.InetAddress -import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.metrics.Quota import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.server.config.ClientQuotaManagerConfig import org.apache.kafka.network.Session -import org.apache.kafka.server.quota.{ClientQuotaCallback, ClientQuotaEntity, ClientQuotaManager, ClientQuotaType, QuotaType} +import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test -import java.util.{Collections, Map, HashMap, Optional} - class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { private val config = new ClientQuotaManagerConfig() @@ -40,12 +38,12 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { clientQuotaManager.updateQuota( client1.configUser, client1.configClientEntity, - Optional.of(new Quota(2000, true)) + Some(new Quota(2000, true)) ) clientQuotaManager.updateQuota( client2.configUser, client2.configClientEntity, - Optional.of(new Quota(4000, true)) + Some(new Quota(4000, true)) ) assertEquals(Long.MaxValue.toDouble, clientQuotaManager.quota(randomClient.user, randomClient.clientId).bound, 0.0, @@ -64,7 +62,7 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { clientQuotaManager.updateQuota( client1.configUser, client1.configClientEntity, - Optional.of(new Quota(3000, true)) + Some(new Quota(3000, true)) ) assertEquals(3000, clientQuotaManager.quota(client1.user, client1.clientId).bound, 0.0, "Should return the newly overridden value (3000)") @@ -75,7 +73,7 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { clientQuotaManager.updateQuota( client1.configUser, client1.configClientEntity, - Optional.of(new Quota(500, true)) + Some(new Quota(500, true)) ) assertEquals(500, clientQuotaManager.quota(client1.user, client1.clientId).bound, 0.0, "Should return the default value (500)") @@ -86,12 +84,12 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { clientQuotaManager.updateQuota( client1.configUser, client1.configClientEntity, - Optional.empty + None ) clientQuotaManager.updateQuota( defaultConfigClient.configUser, defaultConfigClient.configClientEntity, - Optional.of(new Quota(4000, true)) + Some(new Quota(4000, true)) ) assertEquals(4000, clientQuotaManager.quota(client1.user, client1.clientId).bound, 0.0, "Should return the newly overridden value (4000)") @@ -108,10 +106,10 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { */ @Test def testUserQuotaParsingWithDefaultClientIdQuota(): Unit = { - val client1 = UserClient("User1", "p1", Optional.of(new ClientQuotaManager.UserEntity("User1")), Optional.empty) - val client2 = UserClient("User2", "p2", Optional.of(new ClientQuotaManager.UserEntity("User2")), Optional.empty) - val randomClient = UserClient("RandomUser", "random-client-id", Optional.empty, Optional.empty) - val defaultConfigClient = UserClient("", "", Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), Optional.empty) + val client1 = UserClient("User1", "p1", Some(ClientQuotaManager.UserEntity("User1")), None) + val client2 = UserClient("User2", "p2", Some(ClientQuotaManager.UserEntity("User2")), None) + val randomClient = UserClient("RandomUser", "random-client-id", None, None) + val defaultConfigClient = UserClient("", "", Some(ClientQuotaManager.DefaultUserEntity), None) testQuotaParsing(config, client1, client2, randomClient, defaultConfigClient) } @@ -121,7 +119,7 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val expectedMaxValueInQuotaWindow = if (expectedBound < Long.MaxValue) config.quotaWindowSizeSeconds * (config.numQuotaSamples - 1) * expectedBound.toDouble else Double.MaxValue - assertEquals(expectedMaxValueInQuotaWindow, quotaManager.maxValueInQuotaWindow(session, clientId), 0.01) + assertEquals(expectedMaxValueInQuotaWindow, quotaManager.getMaxValueInQuotaWindow(session, clientId), 0.01) val throttleTimeMs = maybeRecord(quotaManager, user, clientId, value * config.numQuotaSamples) if (expectThrottle) @@ -131,7 +129,7 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { } @Test - def testMaxValueInQuotaWindowWithNonDefaultQuotaWindow(): Unit = { + def testGetMaxValueInQuotaWindowWithNonDefaultQuotaWindow(): Unit = { val numFullQuotaWindows = 3 // 3 seconds window (vs. 10 seconds default) val nonDefaultConfig = new ClientQuotaManagerConfig(numFullQuotaWindows + 1) val clientQuotaManager = new ClientQuotaManager(nonDefaultConfig, metrics, QuotaType.FETCH, time, "") @@ -139,15 +137,15 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { try { // no quota set - assertEquals(Double.MaxValue, clientQuotaManager.maxValueInQuotaWindow(userSession, "client1"), 0.01) + assertEquals(Double.MaxValue, clientQuotaManager.getMaxValueInQuotaWindow(userSession, "client1"), 0.01) // Set default quota config clientQuotaManager.updateQuota( - Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), - Optional.empty, - Optional.of(new Quota(10, true)) + Some(ClientQuotaManager.DefaultUserEntity), + None, + Some(new Quota(10, true)) ) - assertEquals(10 * numFullQuotaWindows, clientQuotaManager.maxValueInQuotaWindow(userSession, "client1"), 0.01) + assertEquals(10 * numFullQuotaWindows, clientQuotaManager.getMaxValueInQuotaWindow(userSession, "client1"), 0.01) } finally { clientQuotaManager.shutdown() } @@ -165,17 +163,17 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { // Set default quota config clientQuotaManager.updateQuota( - Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), - Optional.empty, - Optional.of(new Quota(10, true)) + Some(ClientQuotaManager.DefaultUserEntity), + None, + Some(new Quota(10, true)) ) checkQuota(clientQuotaManager, "userA", "client1", 10, 1000, expectThrottle = true) // Remove default quota config, back to no quotas clientQuotaManager.updateQuota( - Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), - Optional.empty, - Optional.empty + Some(ClientQuotaManager.DefaultUserEntity), + None, + None ) checkQuota(clientQuotaManager, "userA", "client1", Long.MaxValue, 1000, expectThrottle = false) } finally { @@ -185,24 +183,24 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { @Test def testSetAndRemoveUserQuota(): Unit = { - + // quotaTypesEnabled will be QuotaTypes.NoQuotas initially val clientQuotaManager = new ClientQuotaManager(new ClientQuotaManagerConfig(), metrics, QuotaType.PRODUCE, time, "") try { // Set quota config clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.empty, - Optional.of(new Quota(10, true)) + Some(ClientQuotaManager.UserEntity("userA")), + None, + Some(new Quota(10, true)) ) checkQuota(clientQuotaManager, "userA", "client1", 10, 1000, expectThrottle = true) // Remove quota config, back to no quotas clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.empty, - Optional.empty + Some(ClientQuotaManager.UserEntity("userA")), + None, + None ) checkQuota(clientQuotaManager, "userA", "client1", Long.MaxValue, 1000, expectThrottle = false) } finally { @@ -219,17 +217,17 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { try { // Set quota config clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), - Optional.of(new Quota(10, true)) + Some(ClientQuotaManager.UserEntity("userA")), + Some(ClientQuotaManager.ClientIdEntity("client1")), + Some(new Quota(10, true)) ) checkQuota(clientQuotaManager, "userA", "client1", 10, 1000, expectThrottle = true) // Remove quota config, back to no quotas clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), - Optional.empty + Some(ClientQuotaManager.UserEntity("userA")), + Some(ClientQuotaManager.ClientIdEntity("client1")), + None ) checkQuota(clientQuotaManager, "userA", "client1", Long.MaxValue, 1000, expectThrottle = false) } finally { @@ -244,54 +242,54 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { try { clientQuotaManager.updateQuota( - Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), - Optional.empty, - Optional.of(new Quota(1000, true)) + Some(ClientQuotaManager.DefaultUserEntity), + None, + Some(new Quota(1000, true)) ) clientQuotaManager.updateQuota( - Optional.empty, - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.of(new Quota(2000, true)) + None, + Some(ClientQuotaManager.DefaultClientIdEntity), + Some(new Quota(2000, true)) ) clientQuotaManager.updateQuota( - Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.of(new Quota(3000, true)) + Some(ClientQuotaManager.DefaultUserEntity), + Some(ClientQuotaManager.DefaultClientIdEntity), + Some(new Quota(3000, true)) ) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.empty, - Optional.of(new Quota(4000, true)) + Some(ClientQuotaManager.UserEntity("userA")), + None, + Some(new Quota(4000, true)) ) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), - Optional.of(new Quota(5000, true)) + Some(ClientQuotaManager.UserEntity("userA")), + Some(ClientQuotaManager.ClientIdEntity("client1")), + Some(new Quota(5000, true)) ) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userB")), - Optional.empty, - Optional.of(new Quota(6000, true)) + Some(ClientQuotaManager.UserEntity("userB")), + None, + Some(new Quota(6000, true)) ) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userB")), - Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), - Optional.of(new Quota(7000, true)) + Some(ClientQuotaManager.UserEntity("userB")), + Some(ClientQuotaManager.ClientIdEntity("client1")), + Some(new Quota(7000, true)) ) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userB")), - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.of(new Quota(8000, true)) + Some(ClientQuotaManager.UserEntity("userB")), + Some(ClientQuotaManager.DefaultClientIdEntity), + Some(new Quota(8000, true)) ) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userC")), - Optional.empty, - Optional.of(new Quota(10000, true)) + Some(ClientQuotaManager.UserEntity("userC")), + None, + Some(new Quota(10000, true)) ) clientQuotaManager.updateQuota( - Optional.empty, - Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), - Optional.of(new Quota(9000, true)) + None, + Some(ClientQuotaManager.ClientIdEntity("client1")), + Some(new Quota(9000, true)) ) checkQuota(clientQuotaManager, "userA", "client1", 5000, 4500, expectThrottle = false) // quota takes precedence over @@ -309,9 +307,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { // Remove default quota config, revert to default clientQuotaManager.updateQuota( - Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.empty + Some(ClientQuotaManager.DefaultUserEntity), + Some(ClientQuotaManager.DefaultClientIdEntity), + None ) checkQuota(clientQuotaManager, "userD", "client1", 1000, 0, expectThrottle = false) // Metrics tags changed, restart counter checkQuota(clientQuotaManager, "userE", "client4", 1000, 1500, expectThrottle = true) @@ -320,9 +318,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { // Remove default quota config, revert to default clientQuotaManager.updateQuota( - Optional.of(ClientQuotaManager.DEFAULT_USER_ENTITY), - Optional.empty, - Optional.empty + Some(ClientQuotaManager.DefaultUserEntity), + None, + None ) checkQuota(clientQuotaManager, "userF", "client4", 2000, 0, expectThrottle = false) // Default quota shared across client-id of all users checkQuota(clientQuotaManager, "userF", "client5", 2000, 0, expectThrottle = false) @@ -331,40 +329,40 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { // Update quotas clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.empty, - Optional.of(new Quota(8000, true)) + Some(ClientQuotaManager.UserEntity("userA")), + None, + Some(new Quota(8000, true)) ) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), - Optional.of(new Quota(10000, true)) + Some(ClientQuotaManager.UserEntity("userA")), + Some(ClientQuotaManager.ClientIdEntity("client1")), + Some(new Quota(10000, true)) ) checkQuota(clientQuotaManager, "userA", "client2", 8000, 0, expectThrottle = false) checkQuota(clientQuotaManager, "userA", "client2", 8000, 4500, expectThrottle = true) // Throttled due to sum of new and earlier values checkQuota(clientQuotaManager, "userA", "client1", 10000, 0, expectThrottle = false) checkQuota(clientQuotaManager, "userA", "client1", 10000, 6000, expectThrottle = true) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), - Optional.empty + Some(ClientQuotaManager.UserEntity("userA")), + Some(ClientQuotaManager.ClientIdEntity("client1")), + None ) checkQuota(clientQuotaManager, "userA", "client6", 8000, 0, expectThrottle = true) // Throttled due to shared user quota clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.of(new ClientQuotaManager.ClientIdEntity("client6")), - Optional.of(new Quota(11000, true)) + Some(ClientQuotaManager.UserEntity("userA")), + Some(ClientQuotaManager.ClientIdEntity("client6")), + Some(new Quota(11000, true)) ) checkQuota(clientQuotaManager, "userA", "client6", 11000, 8500, expectThrottle = false) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.of(new Quota(12000, true)) + Some(ClientQuotaManager.UserEntity("userA")), + Some(ClientQuotaManager.DefaultClientIdEntity), + Some(new Quota(12000, true)) ) clientQuotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity("userA")), - Optional.of(new ClientQuotaManager.ClientIdEntity("client6")), - Optional.empty + Some(ClientQuotaManager.UserEntity("userA")), + Some(ClientQuotaManager.ClientIdEntity("client6")), + None ) checkQuota(clientQuotaManager, "userA", "client6", 12000, 4000, expectThrottle = true) // Throttled due to sum of new and earlier values @@ -379,9 +377,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val queueSizeMetric = metrics.metrics().get(metrics.metricName("queue-size", "Produce", "")) try { clientQuotaManager.updateQuota( - Optional.empty, - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.of(new Quota(500, true)) + None, + Some(ClientQuotaManager.DefaultClientIdEntity), + Some(new Quota(500, true)) ) // We have 10 seconds windows. Make sure that there is no quota violation @@ -403,12 +401,12 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { throttle(clientQuotaManager, "ANONYMOUS", "unknown", throttleTime, callback) assertEquals(1, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) // After a request is delayed, the callback cannot be triggered immediately - clientQuotaManager.processThrottledChannelReaperDoWork + clientQuotaManager.throttledChannelReaper.doWork() assertEquals(0, numCallbacks) time.sleep(throttleTime) // Callback can only be triggered after the delay time passes - clientQuotaManager.processThrottledChannelReaperDoWork() + clientQuotaManager.throttledChannelReaper.doWork() assertEquals(0, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) assertEquals(1, numCallbacks) @@ -430,9 +428,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val clientQuotaManager = new ClientQuotaManager(config, metrics, QuotaType.PRODUCE, time, "") try { clientQuotaManager.updateQuota( - Optional.empty, - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.of(new Quota(500, true)) + None, + Some(ClientQuotaManager.DefaultClientIdEntity), + Some(new Quota(500, true)) ) maybeRecord(clientQuotaManager, "ANONYMOUS", "client1", 100) @@ -455,9 +453,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val clientQuotaManager = new ClientQuotaManager(config, metrics, QuotaType.PRODUCE, time, "") try { clientQuotaManager.updateQuota( - Optional.empty, - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.of(new Quota(500, true)) + None, + Some(ClientQuotaManager.DefaultClientIdEntity), + Some(new Quota(500, true)) ) maybeRecord(clientQuotaManager, "ANONYMOUS", "client1", 100) @@ -485,9 +483,9 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { val clientId = "client@#$%" try { clientQuotaManager.updateQuota( - Optional.empty, - Optional.of(ClientQuotaManager.DEFAULT_USER_CLIENT_ID), - Optional.of(new Quota(500, true)) + None, + Some(ClientQuotaManager.DefaultClientIdEntity), + Some(new Quota(500, true)) ) maybeRecord(clientQuotaManager, "ANONYMOUS", clientId, 100) @@ -503,126 +501,10 @@ class ClientQuotaManagerTest extends BaseClientQuotaManagerTest { } } - @Test - def testQuotaTypesEnabledUpdatesWithDefaultCallback(): Unit = { - val clientQuotaManager = new ClientQuotaManager(config, metrics, QuotaType.CONTROLLER_MUTATION, time, "") - try { - assertEquals(ClientQuotaManager.NO_QUOTAS, clientQuotaManager.quotaTypesEnabled()) - assertFalse(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(5, true))) - assertEquals(ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.empty(), Optional.of(new Quota(5, true))) - assertEquals(ClientQuotaManager.USER_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client2")), Optional.of(new Quota(5, true))) - assertEquals(ClientQuotaManager.USER_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userB")), Optional.empty(), Optional.of(new Quota(5, true))) - assertEquals(ClientQuotaManager.USER_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(10, true))) - assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.USER_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled()) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(12, true))) - assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.USER_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.empty(), Optional.empty()) - assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.USER_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userB")), Optional.empty(), Optional.empty()) - assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) - assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED | ClientQuotaManager.CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client2")), Optional.empty()) - assertEquals(ClientQuotaManager.USER_CLIENT_ID_QUOTA_ENABLED, clientQuotaManager.quotaTypesEnabled) - assertTrue(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) - assertEquals(ClientQuotaManager.NO_QUOTAS, clientQuotaManager.quotaTypesEnabled) - assertFalse(clientQuotaManager.quotasEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) - assertEquals(ClientQuotaManager.NO_QUOTAS, clientQuotaManager.quotaTypesEnabled) - assertFalse(clientQuotaManager.quotasEnabled) - } finally { - clientQuotaManager.shutdown() - } - } - - @Test - def testQuotaTypesEnabledUpdatesWithCustomCallback(): Unit = { - val customQuotaCallback = new ClientQuotaCallback { - val quotas = new HashMap[ClientQuotaEntity, Quota]() - override def configure(configs: Map[String, _]): Unit = {} - - override def quotaMetricTags(quotaType: ClientQuotaType, principal: KafkaPrincipal, clientId: String): Map[String, String] = Collections.emptyMap() - - override def quotaLimit(quotaType: ClientQuotaType, metricTags: Map[String, String]): java.lang.Double = 1 - override def updateClusterMetadata(cluster: Cluster): Boolean = false - - override def updateQuota(quotaType: ClientQuotaType, entity: ClientQuotaEntity, newValue: Double): Unit = { - quotas.put(entity.asInstanceOf[ClientQuotaManager.KafkaQuotaEntity], new Quota(newValue.toLong, true)) - } - - override def removeQuota(quotaType: ClientQuotaType, entity: ClientQuotaEntity): Unit = { - quotas.remove(entity.asInstanceOf[ClientQuotaManager.KafkaQuotaEntity]) - } - - override def quotaResetRequired(quotaType: ClientQuotaType): Boolean = false - - override def close(): Unit = {} - } - val clientQuotaManager = new ClientQuotaManager( - new ClientQuotaManagerConfig(), - metrics, - QuotaType.CONTROLLER_MUTATION, - time, - "", - Optional.of(Plugin.wrapInstance(customQuotaCallback, metrics, "")) - ) - - try { - assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled) - assertTrue(clientQuotaManager.quotasEnabled, "quotasEnabled should be true with custom callback") - - clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(12, true))) - assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled) - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.empty(), Optional.of(new Quota(12, true))) - assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled) - assertTrue(clientQuotaManager.quotasEnabled, "quotasEnabled should remain true") - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.of(new Quota(12, true))) - assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled()) - assertTrue(clientQuotaManager.quotasEnabled, "quotasEnabled should remain true") - - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) - clientQuotaManager.updateQuota(Optional.of(new ClientQuotaManager.UserEntity("userA")), Optional.empty(), Optional.empty()) - clientQuotaManager.updateQuota(Optional.empty(), Optional.of(new ClientQuotaManager.ClientIdEntity("client1")), Optional.empty()) - assertEquals(ClientQuotaManager.CUSTOM_QUOTAS, clientQuotaManager.quotaTypesEnabled()) - assertTrue(clientQuotaManager.quotasEnabled, "quotasEnabled should remain true") - } finally { - clientQuotaManager.shutdown() - } - } - private case class UserClient( user: String, clientId: String, - configUser: Optional[ClientQuotaEntity.ConfigEntity] = Optional.empty, - configClientEntity: Optional[ClientQuotaEntity.ConfigEntity] = Optional.empty + configUser: Option[BaseUserEntity] = None, + configClientEntity: Option[ClientQuotaManager.ClientIdEntity] = None ) } diff --git a/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala b/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala new file mode 100644 index 0000000000000..8c30f749427fc --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/ClientQuotasRequestTest.scala @@ -0,0 +1,592 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import java.net.InetAddress +import java.util +import java.util.concurrent.{ExecutionException, TimeUnit} +import org.apache.kafka.common.test.api.ClusterTest +import kafka.utils.TestUtils +import org.apache.kafka.clients.admin.{ScramCredentialInfo, ScramMechanism, UserScramCredentialUpsertion} +import org.apache.kafka.common.errors.{InvalidRequestException, UnsupportedVersionException} +import org.apache.kafka.common.internals.KafkaFutureImpl +import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity, ClientQuotaFilter, ClientQuotaFilterComponent} +import org.apache.kafka.common.requests.{AlterClientQuotasRequest, AlterClientQuotasResponse, DescribeClientQuotasRequest, DescribeClientQuotasResponse} +import org.apache.kafka.common.test.ClusterInstance +import org.apache.kafka.server.config.QuotaConfig +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.Disabled + +import scala.jdk.CollectionConverters._ + +class ClientQuotasRequestTest(cluster: ClusterInstance) { + @ClusterTest + def testAlterClientQuotasRequest(): Unit = { + + val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user", ClientQuotaEntity.CLIENT_ID -> "client-id").asJava) + + // Expect an empty configuration. + verifyDescribeEntityQuotas(entity, Map.empty) + + // Add two configuration entries. + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0), + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0) + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 10000.0, + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 + )) + + // Update an existing entry. + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(15000.0) + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 15000.0, + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 + )) + + // Remove an existing configuration entry. + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> None + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 + )) + + // Remove a non-existent configuration entry. This should make no changes. + alterEntityQuotas(entity, Map( + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> None + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 + )) + + // Add back a deleted configuration entry. + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(5000.0) + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 5000.0, + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 + )) + + // Perform a mixed update. + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0), + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> None, + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.3) + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 12.3 + )) + } + + @ClusterTest + def testAlterClientQuotasRequestValidateOnly(): Unit = { + val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user").asJava) + + // Set up a configuration. + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0), + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(23.45) + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 + )) + + // Validate-only addition. + alterEntityQuotas(entity, Map( + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(50000.0) + ), validateOnly = true) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 + )) + + // Validate-only modification. + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0) + ), validateOnly = true) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 + )) + + // Validate-only removal. + alterEntityQuotas(entity, Map( + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> None + ), validateOnly = true) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 + )) + + // Validate-only mixed update. + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0), + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(50000.0), + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> None + ), validateOnly = true) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, + QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> 23.45 + )) + } + + @Disabled("TODO: KAFKA-17630 - Convert ClientQuotasRequestTest#testClientQuotasForScramUsers to kraft") + @ClusterTest + def testClientQuotasForScramUsers(): Unit = { + val userName = "user" + + val admin = cluster.admin() + try { + val results = admin.alterUserScramCredentials(util.Arrays.asList( + new UserScramCredentialUpsertion(userName, new ScramCredentialInfo(ScramMechanism.SCRAM_SHA_256, 4096), "password"))) + results.all.get + + val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> userName).asJava) + + verifyDescribeEntityQuotas(entity, Map.empty) + + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.0), + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0) + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 10000.0, + QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0 + )) + } finally { + admin.close() + } + } + + @ClusterTest + def testAlterIpQuotasRequest(): Unit = { + val knownHost = "1.2.3.4" + val unknownHost = "2.3.4.5" + val entity = toIpEntity(Some(knownHost)) + val defaultEntity = toIpEntity(Some(null)) + val entityFilter = ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.IP, knownHost) + val defaultEntityFilter = ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.IP) + val allIpEntityFilter = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP) + + def verifyIpQuotas(entityFilter: ClientQuotaFilterComponent, expectedMatches: Map[ClientQuotaEntity, Double]): Unit = { + TestUtils.tryUntilNoAssertionError() { + val result = describeClientQuotas(ClientQuotaFilter.containsOnly(List(entityFilter).asJava)) + assertEquals(expectedMatches.keySet, result.asScala.keySet) + result.asScala.foreach { case (entity, props) => + assertEquals(Set(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG), props.asScala.keySet) + assertEquals(expectedMatches(entity), props.get(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG)) + val entityName = entity.entries.get(ClientQuotaEntity.IP) + // ClientQuotaEntity with null name maps to default entity + val entityIp = if (entityName == null) + InetAddress.getByName(unknownHost) + else + InetAddress.getByName(entityName) + var currentServerQuota = 0 + currentServerQuota = cluster.brokerSocketServers().asScala.head.connectionQuotas.connectionRateForIp(entityIp) + assertTrue(Math.abs(expectedMatches(entity) - currentServerQuota) < 0.01, + s"Connection quota of $entity is not ${expectedMatches(entity)} but $currentServerQuota") + } + } + } + + // Expect an empty configuration. + verifyIpQuotas(allIpEntityFilter, Map.empty) + + // Add a configuration entry. + alterEntityQuotas(entity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(100.0)), validateOnly = false) + verifyIpQuotas(entityFilter, Map(entity -> 100.0)) + + // update existing entry + alterEntityQuotas(entity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(150.0)), validateOnly = false) + verifyIpQuotas(entityFilter, Map(entity -> 150.0)) + + // update default value + alterEntityQuotas(defaultEntity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(200.0)), validateOnly = false) + verifyIpQuotas(defaultEntityFilter, Map(defaultEntity -> 200.0)) + + // describe all IP quotas + verifyIpQuotas(allIpEntityFilter, Map(entity -> 150.0, defaultEntity -> 200.0)) + + // remove entry + alterEntityQuotas(entity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> None), validateOnly = false) + verifyIpQuotas(entityFilter, Map.empty) + + // remove default value + alterEntityQuotas(defaultEntity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> None), validateOnly = false) + verifyIpQuotas(allIpEntityFilter, Map.empty) + } + + @ClusterTest + def testAlterClientQuotasInvalidRequests(): Unit = { + var entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "").asJava) + assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), validateOnly = true)) + + entity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> "").asJava) + assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), validateOnly = true)) + + entity = new ClientQuotaEntity(Map("" -> "name").asJava) + assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), validateOnly = true)) + + entity = new ClientQuotaEntity(Map.empty.asJava) + assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.5)), validateOnly = true)) + + entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user").asJava) + assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map("bad" -> Some(1.0)), validateOnly = true)) + + entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user").asJava) + assertThrows(classOf[InvalidRequestException], () => alterEntityQuotas(entity, Map(QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(10000.5)), validateOnly = true)) + } + + private def expectInvalidRequestWithMessage(runnable: => Unit, expectedMessage: String): Unit = { + val exception = assertThrows(classOf[InvalidRequestException], () => runnable) + assertTrue(exception.getMessage.contains(expectedMessage), s"Expected message $exception to contain $expectedMessage") + } + + @ClusterTest + def testAlterClientQuotasInvalidEntityCombination(): Unit = { + val userAndIpEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user", ClientQuotaEntity.IP -> "1.2.3.4").asJava) + val clientAndIpEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.CLIENT_ID -> "client", ClientQuotaEntity.IP -> "1.2.3.4").asJava) + val expectedExceptionMessage = "Invalid quota entity combination" + expectInvalidRequestWithMessage(alterEntityQuotas(userAndIpEntity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), + validateOnly = true), expectedExceptionMessage) + expectInvalidRequestWithMessage(alterEntityQuotas(clientAndIpEntity, Map(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG -> Some(12.34)), + validateOnly = true), expectedExceptionMessage) + } + + @ClusterTest + def testAlterClientQuotasBadIp(): Unit = { + val invalidHostPatternEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.IP -> "not a valid host because it has spaces").asJava) + val unresolvableHostEntity = new ClientQuotaEntity(Map(ClientQuotaEntity.IP -> "RFC2606.invalid").asJava) + val expectedExceptionMessage = "not a valid IP" + expectInvalidRequestWithMessage(alterEntityQuotas(invalidHostPatternEntity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(50.0)), + validateOnly = true), expectedExceptionMessage) + expectInvalidRequestWithMessage(alterEntityQuotas(unresolvableHostEntity, Map(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG -> Some(50.0)), + validateOnly = true), expectedExceptionMessage) + } + + @ClusterTest + def testDescribeClientQuotasInvalidFilterCombination(): Unit = { + val ipFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP) + val userFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER) + val clientIdFilterComponent = ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID) + val expectedExceptionMessage = "Invalid entity filter component combination" + expectInvalidRequestWithMessage(describeClientQuotas(ClientQuotaFilter.contains(List(ipFilterComponent, userFilterComponent).asJava)), + expectedExceptionMessage) + expectInvalidRequestWithMessage(describeClientQuotas(ClientQuotaFilter.contains(List(ipFilterComponent, clientIdFilterComponent).asJava)), + expectedExceptionMessage) + } + + // Entities to be matched against. + private val matchUserClientEntities = List( + (Some("user-1"), Some("client-id-1"), 50.50), + (Some("user-2"), Some("client-id-1"), 51.51), + (Some("user-3"), Some("client-id-2"), 52.52), + (Some(null), Some("client-id-1"), 53.53), + (Some("user-1"), Some(null), 54.54), + (Some("user-3"), Some(null), 55.55), + (Some("user-1"), None, 56.56), + (Some("user-2"), None, 57.57), + (Some("user-3"), None, 58.58), + (Some(null), None, 59.59), + (None, Some("client-id-2"), 60.60) + ).map { case (u, c, v) => (toClientEntity(u, c), v) } + + private val matchIpEntities = List( + (Some("1.2.3.4"), 10.0), + (Some("2.3.4.5"), 20.0) + ).map { case (ip, quota) => (toIpEntity(ip), quota)} + + private def setupDescribeClientQuotasMatchTest(): Unit = { + val userClientQuotas = matchUserClientEntities.map { case (e, v) => + e -> Map((QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, Some(v))) + }.toMap + val ipQuotas = matchIpEntities.map { case (e, v) => + e -> Map((QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG, Some(v))) + }.toMap + val result = alterClientQuotas(userClientQuotas ++ ipQuotas, validateOnly = false) + (matchUserClientEntities ++ matchIpEntities).foreach(e => result(e._1).get(10, TimeUnit.SECONDS)) + } + + @ClusterTest + def testDescribeClientQuotasMatchExact(): Unit = { + setupDescribeClientQuotasMatchTest() + + def matchEntity(entity: ClientQuotaEntity) = { + val components = entity.entries.asScala.map { case (entityType, entityName) => + entityName match { + case null => ClientQuotaFilterComponent.ofDefaultEntity(entityType) + case name => ClientQuotaFilterComponent.ofEntity(entityType, name) + } + } + describeClientQuotas(ClientQuotaFilter.containsOnly(components.toList.asJava)) + } + + // Test exact matches. + matchUserClientEntities.foreach { case (e, v) => + TestUtils.tryUntilNoAssertionError() { + val result = matchEntity(e) + assertEquals(1, result.size) + assertTrue(result.get(e) != null) + val value = result.get(e).get(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG) + assertNotNull(value) + assertEquals(value, v, 1e-6) + } + } + + // Entities not contained in `matchEntityList`. + val notMatchEntities = List( + (Some("user-1"), Some("client-id-2")), + (Some("user-3"), Some("client-id-1")), + (Some("user-2"), Some(null)), + (Some("user-4"), None), + (Some(null), Some("client-id-2")), + (None, Some("client-id-1")), + (None, Some("client-id-3")), + ).map { case (u, c) => + new ClientQuotaEntity((u.map((ClientQuotaEntity.USER, _)) ++ + c.map((ClientQuotaEntity.CLIENT_ID, _))).toMap.asJava) + } + + // Verify exact matches of the non-matches returns empty. + notMatchEntities.foreach { e => + val result = matchEntity(e) + assertEquals(0, result.size) + } + } + + @ClusterTest + def testDescribeClientQuotasMatchPartial(): Unit = { + setupDescribeClientQuotasMatchTest() + + def testMatchEntities(filter: ClientQuotaFilter, expectedMatchSize: Int, partition: ClientQuotaEntity => Boolean): Unit = { + TestUtils.tryUntilNoAssertionError() { + val result = describeClientQuotas(filter) + val (expectedMatches, _) = (matchUserClientEntities ++ matchIpEntities).partition(e => partition(e._1)) + assertEquals(expectedMatchSize, expectedMatches.size) // for test verification + assertEquals(expectedMatchSize, result.size, s"Failed to match $expectedMatchSize entities for $filter") + val expectedMatchesMap = expectedMatches.toMap + matchUserClientEntities.foreach { case (entity, expectedValue) => + if (expectedMatchesMap.contains(entity)) { + val config = result.get(entity) + assertNotNull(config) + val value = config.get(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG) + assertNotNull(value) + assertEquals(expectedValue, value, 1e-6) + } else { + assertNull(result.get(entity)) + } + } + matchIpEntities.foreach { case (entity, expectedValue) => + if (expectedMatchesMap.contains(entity)) { + val config = result.get(entity) + assertNotNull(config) + val value = config.get(QuotaConfig.IP_CONNECTION_RATE_OVERRIDE_CONFIG) + assertNotNull(value) + assertEquals(expectedValue, value, 1e-6) + } else { + assertNull(result.get(entity)) + } + } + } + } + + // Match open-ended existing user. + testMatchEntities( + ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "user-1")).asJava), 3, + entity => entity.entries.get(ClientQuotaEntity.USER) == "user-1" + ) + + // Match open-ended non-existent user. + testMatchEntities( + ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "unknown")).asJava), 0, + entity => false + ) + + // Match open-ended existing client ID. + testMatchEntities( + ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.CLIENT_ID, "client-id-2")).asJava), 2, + entity => entity.entries.get(ClientQuotaEntity.CLIENT_ID) == "client-id-2" + ) + + // Match open-ended default user. + testMatchEntities( + ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofDefaultEntity(ClientQuotaEntity.USER)).asJava), 2, + entity => entity.entries.containsKey(ClientQuotaEntity.USER) && entity.entries.get(ClientQuotaEntity.USER) == null + ) + + // Match close-ended existing user. + testMatchEntities( + ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.USER, "user-2")).asJava), 1, + entity => entity.entries.get(ClientQuotaEntity.USER) == "user-2" && !entity.entries.containsKey(ClientQuotaEntity.CLIENT_ID) + ) + + // Match close-ended existing client ID that has no matching entity. + testMatchEntities( + ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntity(ClientQuotaEntity.CLIENT_ID, "client-id-1")).asJava), 0, + entity => false + ) + + // Match against all entities with the user type in a close-ended match. + testMatchEntities( + ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER)).asJava), 4, + entity => entity.entries.containsKey(ClientQuotaEntity.USER) && !entity.entries.containsKey(ClientQuotaEntity.CLIENT_ID) + ) + + // Match against all entities with the user type in an open-ended match. + testMatchEntities( + ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.USER)).asJava), 10, + entity => entity.entries.containsKey(ClientQuotaEntity.USER) + ) + + // Match against all entities with the client ID type in a close-ended match. + testMatchEntities( + ClientQuotaFilter.containsOnly(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID)).asJava), 1, + entity => entity.entries.containsKey(ClientQuotaEntity.CLIENT_ID) && !entity.entries.containsKey(ClientQuotaEntity.USER) + ) + + // Match against all entities with the client ID type in an open-ended match. + testMatchEntities( + ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.CLIENT_ID)).asJava), 7, + entity => entity.entries.containsKey(ClientQuotaEntity.CLIENT_ID) + ) + + // Match against all entities with IP type in an open-ended match. + testMatchEntities( + ClientQuotaFilter.contains(List(ClientQuotaFilterComponent.ofEntityType(ClientQuotaEntity.IP)).asJava), 2, + entity => entity.entries.containsKey(ClientQuotaEntity.IP) + ) + + // Match open-ended empty filter list. This should match all entities. + testMatchEntities(ClientQuotaFilter.contains(List.empty.asJava), 13, entity => true) + + // Match close-ended empty filter list. This should match no entities. + testMatchEntities(ClientQuotaFilter.containsOnly(List.empty.asJava), 0, _ => false) + } + + @ClusterTest + def testClientQuotasUnsupportedEntityTypes(): Unit = { + val entity = new ClientQuotaEntity(Map("other" -> "name").asJava) + assertThrows(classOf[UnsupportedVersionException], () => verifyDescribeEntityQuotas(entity, Map.empty)) + } + + @ClusterTest + def testClientQuotasSanitized(): Unit = { + // An entity with name that must be sanitized when writing to Zookeeper. + val entity = new ClientQuotaEntity(Map(ClientQuotaEntity.USER -> "user with spaces").asJava) + + alterEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> Some(20000.0), + ), validateOnly = false) + + verifyDescribeEntityQuotas(entity, Map( + QuotaConfig.PRODUCER_BYTE_RATE_OVERRIDE_CONFIG -> 20000.0, + )) + } + + private def verifyDescribeEntityQuotas(entity: ClientQuotaEntity, quotas: Map[String, Double]): Unit = { + TestUtils.tryUntilNoAssertionError(waitTime = 5000L) { + val components = entity.entries.asScala.map { case (entityType, entityName) => + Option(entityName).map{ name => ClientQuotaFilterComponent.ofEntity(entityType, name)} + .getOrElse(ClientQuotaFilterComponent.ofDefaultEntity(entityType) + ) + } + val describe = describeClientQuotas(ClientQuotaFilter.containsOnly(components.toList.asJava)) + if (quotas.isEmpty) { + assertEquals(0, describe.size) + } else { + assertEquals(1, describe.size) + val configs = describe.get(entity) + assertNotNull(configs) + assertEquals(quotas.size, configs.size) + quotas.foreach { case (k, v) => + val value = configs.get(k) + assertNotNull(value) + assertEquals(v, value, 1e-6) + } + } + } + } + + private def toClientEntity(user: Option[String], clientId: Option[String]) = + new ClientQuotaEntity((user.map(ClientQuotaEntity.USER -> _) ++ clientId.map(ClientQuotaEntity.CLIENT_ID -> _)).toMap.asJava) + + private def toIpEntity(ip: Option[String]) = new ClientQuotaEntity(ip.map(ClientQuotaEntity.IP -> _).toMap.asJava) + + private def describeClientQuotas(filter: ClientQuotaFilter) = { + val result = new KafkaFutureImpl[java.util.Map[ClientQuotaEntity, java.util.Map[String, java.lang.Double]]] + sendDescribeClientQuotasRequest(filter).complete(result) + try result.get catch { + case e: ExecutionException => throw e.getCause + } + } + + private def sendDescribeClientQuotasRequest(filter: ClientQuotaFilter): DescribeClientQuotasResponse = { + val request = new DescribeClientQuotasRequest.Builder(filter).build() + IntegrationTestUtils.connectAndReceive[DescribeClientQuotasResponse](request, + destination = cluster.anyBrokerSocketServer(), + listenerName = cluster.clientListener()) + } + + private def alterEntityQuotas(entity: ClientQuotaEntity, alter: Map[String, Option[Double]], validateOnly: Boolean) = + try alterClientQuotas(Map(entity -> alter), validateOnly)(entity).get(10, TimeUnit.SECONDS) catch { + case e: ExecutionException => throw e.getCause + } + + private def alterClientQuotas(request: Map[ClientQuotaEntity, Map[String, Option[Double]]], validateOnly: Boolean) = { + val entries = request.map { case (entity, alter) => + val ops = alter.map { case (key, value) => + new ClientQuotaAlteration.Op(key, value.map(Double.box).orNull) + }.asJavaCollection + new ClientQuotaAlteration(entity, ops) + } + + val response = request.map(e => e._1 -> new KafkaFutureImpl[Void]).asJava + sendAlterClientQuotasRequest(entries, validateOnly).complete(response) + val result = response.asScala + assertEquals(request.size, result.size) + request.foreach(e => assertTrue(result.contains(e._1))) + result + } + + private def sendAlterClientQuotasRequest(entries: Iterable[ClientQuotaAlteration], validateOnly: Boolean): AlterClientQuotasResponse = { + val request = new AlterClientQuotasRequest.Builder(entries.asJavaCollection, validateOnly).build() + IntegrationTestUtils.connectAndReceive[AlterClientQuotasResponse](request, + destination = cluster.anyBrokerSocketServer(), + listenerName = cluster.clientListener()) + } + +} diff --git a/core/src/test/scala/unit/kafka/server/ClientRequestQuotaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ClientRequestQuotaManagerTest.scala index 9b8e85c44e7ba..368280d235453 100644 --- a/core/src/test/scala/unit/kafka/server/ClientRequestQuotaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ClientRequestQuotaManagerTest.scala @@ -18,8 +18,7 @@ package kafka.server import org.apache.kafka.common.metrics.Quota import org.apache.kafka.server.config.ClientQuotaManagerConfig -import org.apache.kafka.server.quota.{ClientQuotaManager, QuotaType} -import org.apache.kafka.server.quota.ClientQuotaEntity +import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -31,13 +30,10 @@ class ClientRequestQuotaManagerTest extends BaseClientQuotaManagerTest { @Test def testRequestPercentageQuotaViolation(): Unit = { val clientRequestQuotaManager = new ClientRequestQuotaManager(config, metrics, time, "", Optional.empty()) - val userEntity: ClientQuotaEntity.ConfigEntity = new ClientQuotaManager.UserEntity("ANONYMOUS") - val clientEntity: ClientQuotaEntity.ConfigEntity = new ClientQuotaManager.ClientIdEntity("test-client") - clientRequestQuotaManager.updateQuota( - Optional.of(userEntity), - Optional.of(clientEntity), - Optional.of(Quota.upperBound(1)) + Some(ClientQuotaManager.UserEntity("ANONYMOUS")), + Some(ClientQuotaManager.ClientIdEntity("test-client")), + Some(Quota.upperBound(1)) ) val queueSizeMetric = metrics.metrics().get(metrics.metricName("queue-size", QuotaType.REQUEST.toString, "")) def millisToPercent(millis: Double) = millis * 1000 * 1000 * ClientRequestQuotaManager.NANOS_TO_PERCENTAGE_PER_SECOND @@ -63,12 +59,12 @@ class ClientRequestQuotaManagerTest extends BaseClientQuotaManagerTest { throttle(clientRequestQuotaManager, "ANONYMOUS", "test-client", throttleTime, callback) assertEquals(1, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) // After a request is delayed, the callback cannot be triggered immediately - clientRequestQuotaManager.processThrottledChannelReaperDoWork() + clientRequestQuotaManager.throttledChannelReaper.doWork() assertEquals(0, numCallbacks) time.sleep(throttleTime) // Callback can only be triggered after the delay time passes - clientRequestQuotaManager.processThrottledChannelReaperDoWork() + clientRequestQuotaManager.throttledChannelReaper.doWork() assertEquals(0, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) assertEquals(1, numCallbacks) diff --git a/core/src/test/scala/unit/kafka/server/ConfigAdminManagerTest.scala b/core/src/test/scala/unit/kafka/server/ConfigAdminManagerTest.scala index 6d865219322c9..a4494c5f1e776 100644 --- a/core/src/test/scala/unit/kafka/server/ConfigAdminManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConfigAdminManagerTest.scala @@ -20,6 +20,7 @@ package kafka.server import java.util import java.util.Collections +import kafka.server.metadata.MockConfigRepository import kafka.utils.TestUtils import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.common.config.ConfigResource.Type.{BROKER, BROKER_LOGGER, TOPIC, UNKNOWN} @@ -38,7 +39,6 @@ import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.{Alte import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.protocol.Errors.{INVALID_REQUEST, NONE} import org.apache.kafka.common.requests.ApiError -import org.apache.kafka.metadata.MockConfigRepository import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertTrue} import org.junit.jupiter.api.{Assertions, Test} import org.slf4j.LoggerFactory diff --git a/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala index 0f55feccb46a0..f6831ca8e3dd1 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerGroupDescribeRequestTest.scala @@ -37,17 +37,15 @@ import java.lang.{Byte => JByte} import java.util.Collections import scala.jdk.CollectionConverters._ -@ClusterTestDefaults( - types = Array(Type.KRAFT), - brokers = 1, - serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) +@ClusterTestDefaults(types = Array(Type.KRAFT), brokers = 1) class ConsumerGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { @ClusterTest( + types = Array(Type.KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ), features = Array( new ClusterFeature(feature = Feature.GROUP_VERSION, version = 0) ) @@ -73,7 +71,13 @@ class ConsumerGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCo assertEquals(expectedResponse, consumerGroupDescribeResponse.data) } - @ClusterTest + @ClusterTest( + types = Array(Type.KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) def testConsumerGroupDescribeWithNewGroupCoordinator(): Unit = { // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. @@ -207,27 +211,19 @@ class ConsumerGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCo ) assertEquals(expected, actual) - - val unknownGroupResponse = consumerGroupDescribe( - groupIds = List("grp-unknown"), - includeAuthorizedOperations = true, - version = version.toShort, - ) - assertEquals(Errors.GROUP_ID_NOT_FOUND.code, unknownGroupResponse.head.errorCode()) - - val emptyGroupResponse = consumerGroupDescribe( - groupIds = List(""), - includeAuthorizedOperations = true, - version = version.toShort, - ) - assertEquals(Errors.INVALID_GROUP_ID.code, emptyGroupResponse.head.errorCode()) } } finally { admin.close() } } - @ClusterTest + @ClusterTest( + types = Array(Type.KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) def testConsumerGroupDescribeWithMigrationMember(): Unit = { // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. diff --git a/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala index 506d0007924bb..e94bcbc56a3fb 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerGroupHeartbeatRequestTest.scala @@ -40,7 +40,7 @@ import scala.jdk.CollectionConverters._ new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") ) ) -class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { +class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) { @ClusterTest( serverProperties = Array( @@ -52,7 +52,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC new ConsumerGroupHeartbeatRequestData() ).build() - val consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + val consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) val expectedResponse = new ConsumerGroupHeartbeatResponseData().setErrorCode(Errors.UNSUPPORTED_VERSION.code) assertEquals(expectedResponse, consumerGroupHeartbeatResponse.data) } @@ -67,7 +67,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC new ConsumerGroupHeartbeatRequestData() ).build() - val consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + val consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) val expectedResponse = new ConsumerGroupHeartbeatResponseData().setErrorCode(Errors.UNSUPPORTED_VERSION.code) assertEquals(expectedResponse, consumerGroupHeartbeatResponse.data) } @@ -101,7 +101,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // here because the group coordinator is loaded in the background. var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") @@ -134,7 +134,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // Heartbeats until the partitions are assigned. consumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && consumerGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") @@ -151,7 +151,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC .setMemberEpoch(-1) ).build() - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) // Verify the response. assertEquals(-1, consumerGroupHeartbeatResponse.data.memberEpoch) @@ -189,7 +189,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // here because the group coordinator is loaded in the background. var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") @@ -222,7 +222,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // Heartbeats until the partitions are assigned. consumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && consumerGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") @@ -248,7 +248,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // Heartbeats until the partitions are revoked. consumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && consumerGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Could not get partitions revoked. Last response $consumerGroupHeartbeatResponse.") @@ -290,7 +290,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // here because the group coordinator is loaded in the background. var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.INVALID_REGULAR_EXPRESSION.code }, msg = s"Did not receive the expected error. Last response $consumerGroupHeartbeatResponse.") @@ -301,48 +301,6 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC } } - @ClusterTest - def testEmptyConsumerGroupId(): Unit = { - val admin = cluster.admin() - - // Creates the __consumer_offsets topics because it won't be created automatically - // in this test because it does not use FindCoordinator API. - try { - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = cluster.brokers.values().asScala.toSeq, - controllers = cluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group. Note that the member subscribes - // to an nonexistent topic. - val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequest.Builder( - new ConsumerGroupHeartbeatRequestData() - .setGroupId("") - .setMemberId(Uuid.randomUuid().toString) - .setMemberEpoch(0) - .setRebalanceTimeoutMs(5 * 60 * 1000) - .setSubscribedTopicNames(List("foo").asJava) - .setTopicPartitions(List.empty.asJava), - true - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) - consumerGroupHeartbeatResponse.data.errorCode == Errors.INVALID_REQUEST.code - }, msg = s"Did not receive the expected error. Last response $consumerGroupHeartbeatResponse.") - - // Verify the response. - assertEquals(Errors.INVALID_REQUEST.code, consumerGroupHeartbeatResponse.data.errorCode) - assertEquals("GroupId can't be empty.", consumerGroupHeartbeatResponse.data.errorMessage) - } finally { - admin.close() - } - } - @ClusterTest def testConsumerGroupHeartbeatWithEmptySubscription(): Unit = { val admin = cluster.admin() @@ -371,7 +329,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // here because the group coordinator is loaded in the background. var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code }, msg = s"Did not receive the expected successful response. Last response $consumerGroupHeartbeatResponse.") @@ -390,7 +348,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // here because the group coordinator is loaded in the background. consumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code }, msg = s"Did not receive the expected successful response. Last response $consumerGroupHeartbeatResponse.") } finally { @@ -428,7 +386,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // here because the group coordinator is loaded in the background. var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code }, msg = s"Static member could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") @@ -462,7 +420,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // Heartbeats until the partitions are assigned. consumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && consumerGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Static member could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") @@ -483,7 +441,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC .setMemberEpoch(-2) ).build() - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) // Verify the response. assertEquals(-2, consumerGroupHeartbeatResponse.data.memberEpoch) @@ -500,7 +458,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC .setTopicPartitions(List.empty.asJava) ).build() - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) // Verify the response. assertNotNull(consumerGroupHeartbeatResponse.data.memberId) @@ -549,7 +507,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // here because the group coordinator is loaded in the background. var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") @@ -583,7 +541,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // Heartbeats until the partitions are assigned. consumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && consumerGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Could not get partitions assigned. Last response $consumerGroupHeartbeatResponse.") @@ -605,14 +563,14 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC ).build() // Validating that trying to join with an in-use instanceId would throw an UnreleasedInstanceIdException. - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) assertEquals(Errors.UNRELEASED_INSTANCE_ID.code, consumerGroupHeartbeatResponse.data.errorCode) // The new static member join group will keep failing with an UnreleasedInstanceIdException // until eventually it gets through because the existing member will be kicked out // because of not sending a heartbeat till session timeout expiry. TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && consumerGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Could not re-join the group successfully. Last response $consumerGroupHeartbeatResponse.") @@ -662,7 +620,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // here because the group coordinator is loaded in the background. var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") @@ -690,7 +648,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC // Verify the response. The heartbeat interval was updated. TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && newHeartbeatIntervalMs == consumerGroupHeartbeatResponse.data.heartbeatIntervalMs }, msg = s"Dynamic update consumer group config failed. Last response $consumerGroupHeartbeatResponse.") @@ -723,7 +681,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.INVALID_REQUEST.code }, msg = "Should fail due to invalid member id.") } finally { @@ -754,7 +712,7 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC var consumerGroupHeartbeatResponse: ConsumerGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { - consumerGroupHeartbeatResponse = connectAndReceive[ConsumerGroupHeartbeatResponse](consumerGroupHeartbeatRequest) + consumerGroupHeartbeatResponse = connectAndReceive(consumerGroupHeartbeatRequest) consumerGroupHeartbeatResponse.data.errorCode == Errors.NONE.code }, msg = s"Could not join the group successfully. Last response $consumerGroupHeartbeatResponse.") @@ -763,4 +721,12 @@ class ConsumerGroupHeartbeatRequestTest(cluster: ClusterInstance) extends GroupC assertFalse(memberId.isEmpty) admin.close() } + + private def connectAndReceive(request: ConsumerGroupHeartbeatRequest): ConsumerGroupHeartbeatResponse = { + IntegrationTestUtils.connectAndReceive[ConsumerGroupHeartbeatResponse]( + request, + cluster.anyBrokerSocketServer(), + cluster.clientListener() + ) + } } diff --git a/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala b/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala index 1b1dec69eaf73..0007f32714626 100644 --- a/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala +++ b/core/src/test/scala/unit/kafka/server/ConsumerProtocolMigrationTest.scala @@ -19,7 +19,7 @@ package kafka.server import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor import org.apache.kafka.clients.consumer.internals.ConsumerProtocol import org.apache.kafka.common.{TopicPartition, Uuid} -import org.apache.kafka.common.message.{JoinGroupResponseData, ListGroupsResponseData, OffsetFetchRequestData, OffsetFetchResponseData, SyncGroupResponseData} +import org.apache.kafka.common.message.{JoinGroupResponseData, ListGroupsResponseData, OffsetFetchResponseData, SyncGroupResponseData} import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.test.ClusterInstance @@ -34,16 +34,12 @@ import java.util.Collections import scala.jdk.CollectionConverters._ @Timeout(120) -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) +@ClusterTestDefaults(types = Array(Type.KRAFT)) class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") ) ) @@ -53,6 +49,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "upgrade") ) ) @@ -62,6 +60,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "downgrade") ) ) @@ -71,6 +71,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "disabled") ) ) @@ -80,6 +82,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") ) ) @@ -89,6 +93,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "upgrade") ) ) @@ -98,6 +104,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "downgrade") ) ) @@ -107,6 +115,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "disabled") ) ) @@ -116,6 +126,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") ) ) @@ -125,6 +137,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "upgrade") ) ) @@ -134,6 +148,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "downgrade") ) ) @@ -143,6 +159,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "disabled") ) ) @@ -152,6 +170,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") ) ) @@ -161,6 +181,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") ) ) @@ -170,6 +192,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") ) ) @@ -179,6 +203,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") ) ) @@ -188,6 +214,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "upgrade") ) ) @@ -260,6 +288,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "downgrade") ) ) @@ -362,6 +392,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "disabled") ) ) @@ -398,6 +430,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "disabled") ) ) @@ -459,6 +493,8 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord */ @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.CONSUMER_GROUP_MIGRATION_POLICY_CONFIG, value = "bidirectional") ) ) @@ -690,7 +726,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord val topicName = "foo" // Create the topic. - val topicId = createTopic( + createTopic( topic = topicName, numPartitions = 3 ) @@ -702,7 +738,6 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord memberId = "member-id", memberEpoch = -1, topic = topicName, - topicId = topicId, partition = 0, offset = 1000L, expectedError = Errors.NONE, @@ -766,7 +801,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord createOffsetsTopic() // Create the topic. - val topicId = createTopic( + createTopic( topic = "foo", numPartitions = 3 ) @@ -866,7 +901,6 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord memberId = memberId1, memberEpoch = 1, topic = "foo", - topicId = topicId, partition = partitionId, offset = 100L + 10 * version + partitionId, expectedError = Errors.NONE, @@ -883,8 +917,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord .setGroupId(groupId) .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -898,16 +931,14 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ).asJava) ).asJava), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupId) - .setMemberId(memberId1) - .setMemberEpoch(1) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0, 1, 2).asJava) - ).asJava), + groupId = groupId, + memberId = memberId1, + memberEpoch = 1, + partitions = List( + new TopicPartition("foo", 0), + new TopicPartition("foo", 1), + new TopicPartition("foo", 2) + ), requireStable = false, version = version.toShort ) @@ -1101,7 +1132,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord createOffsetsTopic() // Create the topic. - val topicId = createTopic( + createTopic( topic = "foo", numPartitions = 3 ) @@ -1169,7 +1200,6 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord memberId = memberId1, memberEpoch = 1, topic = "foo", - topicId = topicId, partition = partitionId, offset = 100L + 10 * version + partitionId, expectedError = Errors.NONE, @@ -1186,8 +1216,7 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord .setGroupId(groupId) .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -1201,16 +1230,14 @@ class ConsumerProtocolMigrationTest(cluster: ClusterInstance) extends GroupCoord ).asJava) ).asJava), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupId) - .setMemberId(memberId1) - .setMemberEpoch(1) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0, 1, 2).asJava) - ).asJava), + groupId = groupId, + memberId = memberId1, + memberEpoch = 1, + partitions = List( + new TopicPartition("foo", 0), + new TopicPartition("foo", 1), + new TopicPartition("foo", 2) + ), requireStable = false, version = version.toShort ) diff --git a/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala b/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala index 43c7d5aecf464..db0b3d0c599c5 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerApisTest.scala @@ -18,6 +18,7 @@ package kafka.server import kafka.network.RequestChannel +import kafka.raft.RaftManager import kafka.server.QuotaFactory.QuotaManagers import kafka.server.metadata.KRaftMetadataCache import org.apache.kafka.clients.admin.AlterConfigOp @@ -25,7 +26,7 @@ import org.apache.kafka.common.Uuid.ZERO_UUID import org.apache.kafka.common.acl.AclOperation import org.apache.kafka.common.config.{ConfigResource, TopicConfig} import org.apache.kafka.common.errors._ -import org.apache.kafka.common.internals.{Plugin, Topic} +import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.memory.MemoryPool import org.apache.kafka.common.message.AlterConfigsRequestData.{AlterConfigsResource => OldAlterConfigsResource, AlterConfigsResourceCollection => OldAlterConfigsResourceCollection, AlterableConfig => OldAlterableConfig, AlterableConfigCollection => OldAlterableConfigCollection} import org.apache.kafka.common.message.AlterConfigsResponseData.{AlterConfigsResourceResponse => OldAlterConfigsResourceResponse} @@ -48,22 +49,17 @@ import org.apache.kafka.common.security.auth.{KafkaPrincipal, SecurityProtocol} import org.apache.kafka.common.test.MockController import org.apache.kafka.common.utils.MockTime import org.apache.kafka.common.{ElectionType, Uuid} -import org.apache.kafka.common.requests.RequestHeader import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT import org.apache.kafka.controller.{Controller, ControllerRequestContext, ResultOrError} import org.apache.kafka.image.publisher.ControllerRegistrationsPublisher import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.network.metrics.RequestChannelMetrics -import org.apache.kafka.network.Session -import org.apache.kafka.raft.{QuorumConfig, RaftManager} -import org.apache.kafka.server.SimpleApiVersionManager +import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, AuthorizationResult, Authorizer} import org.apache.kafka.server.common.{ApiMessageAndVersion, FinalizedFeatures, KRaftVersion, MetadataVersion, ProducerIdsBlock, RequestLocal} import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs} -import org.apache.kafka.server.quota.{ClientQuotaManager, ControllerMutationQuota, ControllerMutationQuotaManager} import org.apache.kafka.server.util.FutureUtils import org.apache.kafka.storage.internals.log.CleanerConfig -import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test} import org.junit.jupiter.params.ParameterizedTest @@ -116,23 +112,21 @@ class ControllerApisTest { private val clientRequestQuotaManager: ClientRequestQuotaManager = mock(classOf[ClientRequestQuotaManager]) private val neverThrottlingClientControllerQuotaManager: ControllerMutationQuotaManager = mock(classOf[ControllerMutationQuotaManager]) when(neverThrottlingClientControllerQuotaManager.newQuotaFor( - any(classOf[Session]), - any(classOf[RequestHeader]), + any(classOf[RequestChannel.Request]), any(classOf[Short]) )).thenReturn( MockControllerMutationQuota(Integer.MAX_VALUE) // never throttles ) private val alwaysThrottlingClientControllerQuotaManager: ControllerMutationQuotaManager = mock(classOf[ControllerMutationQuotaManager]) when(alwaysThrottlingClientControllerQuotaManager.newQuotaFor( - any(classOf[Session]), - any(classOf[RequestHeader]), + any(classOf[RequestChannel.Request]), any(classOf[Short]) )).thenReturn( MockControllerMutationQuota(0) // always throttles ) private val replicaQuotaManager: ReplicationQuotaManager = mock(classOf[ReplicationQuotaManager]) private val raftManager: RaftManager[ApiMessageAndVersion] = mock(classOf[RaftManager[ApiMessageAndVersion]]) - private val metadataCache: KRaftMetadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) + private val metadataCache: KRaftMetadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) private val quotasNeverThrottleControllerMutations = new QuotaManagers( clientQuotaManager, @@ -156,7 +150,7 @@ class ControllerApisTest { private var controllerApis: ControllerApis = _ - private def createControllerApis(authorizer: Option[Plugin[Authorizer]], + private def createControllerApis(authorizer: Option[Authorizer], controller: Controller, props: Properties = new Properties(), throttle: Boolean = false): ControllerApis = { @@ -205,7 +199,7 @@ class ControllerApisTest { requestChannelMetrics) } - def createDenyAllAuthorizer(): Plugin[Authorizer] = { + def createDenyAllAuthorizer(): Authorizer = { val authorizer = mock(classOf[Authorizer]) when(authorizer.authorize( any(classOf[AuthorizableRequestContext]), @@ -213,7 +207,7 @@ class ControllerApisTest { )).thenReturn( singletonList(AuthorizationResult.DENIED) ) - Plugin.wrapInstance(authorizer, null, "authorizer.class.name") + authorizer } @Test @@ -956,18 +950,18 @@ class ControllerApisTest { controllerApis = createControllerApis(None, controller, props) val request = new DeleteTopicsRequestData() request.topics().add(new DeleteTopicState().setName("foo").setTopicId(ZERO_UUID)) - - TestUtils.assertFutureThrows(classOf[TopicDeletionDisabledException], controllerApis.deleteTopics(ANONYMOUS_CONTEXT, request, - ApiKeys.DELETE_TOPICS.latestVersion().toInt, - hasClusterAuth = false, - _ => Set("foo", "bar"), - _ => Set("foo", "bar"))) - - TestUtils.assertFutureThrows(classOf[InvalidRequestException], controllerApis.deleteTopics(ANONYMOUS_CONTEXT, request, - 1, - hasClusterAuth = false, - _ => Set("foo", "bar"), - _ => Set("foo", "bar"))) + assertThrows(classOf[TopicDeletionDisabledException], + () => controllerApis.deleteTopics(ANONYMOUS_CONTEXT, request, + ApiKeys.DELETE_TOPICS.latestVersion().toInt, + hasClusterAuth = false, + _ => Set("foo", "bar"), + _ => Set("foo", "bar"))) + assertThrows(classOf[InvalidRequestException], + () => controllerApis.deleteTopics(ANONYMOUS_CONTEXT, request, + 1, + hasClusterAuth = false, + _ => Set("foo", "bar"), + _ => Set("foo", "bar"))) } @ParameterizedTest @@ -1014,8 +1008,7 @@ class ControllerApisTest { .newInitialTopic("foo", Uuid.fromString("vZKYST0pSA2HO5x_6hoO2Q")) .build() val authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - controllerApis = createControllerApis(Some(authorizerPlugin), controller) + controllerApis = createControllerApis(Some(authorizer), controller) val requestData = new CreatePartitionsRequestData() requestData.topics().add(new CreatePartitionsTopic().setName("foo").setAssignments(null).setCount(2)) requestData.topics().add(new CreatePartitionsTopic().setName("bar").setAssignments(null).setCount(10)) @@ -1075,9 +1068,8 @@ class ControllerApisTest { @Test def testElectLeadersAuthorization(): Unit = { val authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") val controller = mock(classOf[Controller]) - controllerApis = createControllerApis(Some(authorizerPlugin), controller) + controllerApis = createControllerApis(Some(authorizer), controller) val request = new ElectLeadersRequest.Builder( ElectionType.PREFERRED, null, @@ -1220,8 +1212,7 @@ class ControllerApisTest { def testAssignReplicasToDirs(): Unit = { val controller = mock(classOf[Controller]) val authorizer = mock(classOf[Authorizer]) - val authorizerPlugin = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - controllerApis = createControllerApis(Some(authorizerPlugin), controller) + controllerApis = createControllerApis(Some(authorizer), controller) val request = new AssignReplicasToDirsRequest.Builder(new AssignReplicasToDirsRequestData()).build() when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(Collections.singletonList(new Action( @@ -1295,22 +1286,20 @@ class ControllerApisTest { @Test def testUnauthorizedControllerRegistrationRequest(): Unit = { - val exception = assertThrows(classOf[ClusterAuthorizationException], () => { + assertThrows(classOf[ClusterAuthorizationException], () => { controllerApis = createControllerApis(Some(createDenyAllAuthorizer()), new MockController.Builder().build()) controllerApis.handleControllerRegistration(buildRequest( new ControllerRegistrationRequest(new ControllerRegistrationRequestData(), 0.toShort))) }) - assertTrue(exception.getMessage.contains("needs CLUSTER_ACTION permission")) } @Test def testUnauthorizedDescribeClusterRequest(): Unit = { - val exception = assertThrows(classOf[ClusterAuthorizationException], () => { + assertThrows(classOf[ClusterAuthorizationException], () => { controllerApis = createControllerApis(Some(createDenyAllAuthorizer()), new MockController.Builder().build()) controllerApis.handleDescribeCluster(buildRequest( new DescribeClusterRequest(new DescribeClusterRequestData(), 1.toShort))) }) - assertTrue(exception.getMessage.contains("needs ALTER permission")) } @AfterEach diff --git a/core/src/test/scala/unit/kafka/server/ControllerConfigurationValidatorTest.scala b/core/src/test/scala/unit/kafka/server/ControllerConfigurationValidatorTest.scala index 3056753f53bf4..9f903336d23a7 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerConfigurationValidatorTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerConfigurationValidatorTest.scala @@ -128,9 +128,9 @@ class ControllerConfigurationValidatorTest { @Test def testValidClientMetricsConfig(): Unit = { val config = new util.TreeMap[String, String]() - config.put(ClientMetricsConfigs.INTERVAL_MS_CONFIG, "2000") - config.put(ClientMetricsConfigs.METRICS_CONFIG, "org.apache.kafka.client.producer.partition.queue.,org.apache.kafka.client.producer.partition.latency") - config.put(ClientMetricsConfigs.MATCH_CONFIG, "client_instance_id=b69cc35a-7a54-4790-aa69-cc2bd4ee4538,client_id=1" + + config.put(ClientMetricsConfigs.PUSH_INTERVAL_MS, "2000") + config.put(ClientMetricsConfigs.SUBSCRIPTION_METRICS, "org.apache.kafka.client.producer.partition.queue.,org.apache.kafka.client.producer.partition.latency") + config.put(ClientMetricsConfigs.CLIENT_MATCH_PATTERN, "client_instance_id=b69cc35a-7a54-4790-aa69-cc2bd4ee4538,client_id=1" + ",client_software_name=apache-kafka-java,client_software_version=2.8.0-SNAPSHOT,client_source_address=127.0.0.1," + "client_source_port=1234") validator.validate(new ConfigResource(CLIENT_METRICS, "subscription-1"), config, emptyMap()) @@ -147,12 +147,12 @@ class ControllerConfigurationValidatorTest { @Test def testInvalidIntervalClientMetricsConfig(): Unit = { val config = new util.TreeMap[String, String]() - config.put(ClientMetricsConfigs.INTERVAL_MS_CONFIG, "10") + config.put(ClientMetricsConfigs.PUSH_INTERVAL_MS, "10") assertEquals("Invalid value 10 for interval.ms, interval must be between 100 and 3600000 (1 hour)", assertThrows(classOf[InvalidRequestException], () => validator.validate( new ConfigResource(CLIENT_METRICS, "subscription-1"), config, emptyMap())). getMessage) - config.put(ClientMetricsConfigs.INTERVAL_MS_CONFIG, "3600001") + config.put(ClientMetricsConfigs.PUSH_INTERVAL_MS, "3600001") assertEquals("Invalid value 3600001 for interval.ms, interval must be between 100 and 3600000 (1 hour)", assertThrows(classOf[InvalidRequestException], () => validator.validate( new ConfigResource(CLIENT_METRICS, "subscription-1"), config, emptyMap())). getMessage) @@ -170,7 +170,7 @@ class ControllerConfigurationValidatorTest { @Test def testInvalidMatchClientMetricsConfig(): Unit = { val config = new util.TreeMap[String, String]() - config.put(ClientMetricsConfigs.MATCH_CONFIG, "10") + config.put(ClientMetricsConfigs.CLIENT_MATCH_PATTERN, "10") assertEquals("Illegal client matching pattern: 10", assertThrows(classOf[InvalidConfigurationException], () => validator.validate( new ConfigResource(CLIENT_METRICS, "subscription-1"), config, emptyMap())). getMessage) diff --git a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala index a40087a597376..2a7cfe35a85b0 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaManagerTest.scala @@ -25,14 +25,12 @@ import org.apache.kafka.common.metrics.QuotaViolationException import org.apache.kafka.common.metrics.stats.TokenBucket import org.apache.kafka.common.utils.MockTime import org.apache.kafka.server.config.ClientQuotaManagerConfig -import org.apache.kafka.server.quota.{ClientQuotaManager, ControllerMutationQuota, ControllerMutationQuotaManager, PermissiveControllerMutationQuota, QuotaType, StrictControllerMutationQuota} +import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Assertions.assertFalse import org.junit.jupiter.api.Test -import java.util.Optional - class StrictControllerMutationQuotaTest { @Test def testControllerMutationQuotaViolation(): Unit = { @@ -120,7 +118,7 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { private val config = new ClientQuotaManagerConfig(10, 1) private def withQuotaManager(f: ControllerMutationQuotaManager => Unit): Unit = { - val quotaManager = new ControllerMutationQuotaManager(config, metrics, time,"", Optional.empty()) + val quotaManager = new ControllerMutationQuotaManager(config, metrics, time,"", None) try { f(quotaManager) } finally { @@ -139,18 +137,18 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { sensor.add(metricName, new TokenBucket) val metric = metrics.metric(metricName) - assertEquals(0, throttleTimeMs(new QuotaViolationException(metric, 0, 10))) - assertEquals(500, throttleTimeMs(new QuotaViolationException(metric, -5, 10))) - assertEquals(1000, throttleTimeMs(new QuotaViolationException(metric, -10, 10))) + assertEquals(0, throttleTimeMs(new QuotaViolationException(metric, 0, 10), time.milliseconds())) + assertEquals(500, throttleTimeMs(new QuotaViolationException(metric, -5, 10), time.milliseconds())) + assertEquals(1000, throttleTimeMs(new QuotaViolationException(metric, -10, 10), time.milliseconds())) } @Test def testControllerMutationQuotaViolation(): Unit = { withQuotaManager { quotaManager => quotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity(User)), - Optional.of(new ClientQuotaManager.ClientIdEntity(ClientId)), - Optional.of(Quota.upperBound(10)) + Some(User).map(s => ClientQuotaManager.UserEntity(s)), + Some(ClientQuotaManager.ClientIdEntity(ClientId)), + Some(Quota.upperBound(10)) ) val queueSizeMetric = metrics.metrics().get( metrics.metricName("queue-size", QuotaType.CONTROLLER_MUTATION.toString, "")) @@ -182,12 +180,12 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { assertEquals(1, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) // After a request is delayed, the callback cannot be triggered immediately - quotaManager.processThrottledChannelReaperDoWork() + quotaManager.throttledChannelReaper.doWork() assertEquals(0, numCallbacks) // Callback can only be triggered after the delay time passes time.sleep(throttleTime) - quotaManager.processThrottledChannelReaperDoWork() + quotaManager.throttledChannelReaper.doWork() assertEquals(0, queueSizeMetric.metricValue.asInstanceOf[Double].toInt) assertEquals(1, numCallbacks) @@ -201,7 +199,7 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { @Test def testNewStrictQuotaForReturnsUnboundedQuotaWhenQuotaIsDisabled(): Unit = { withQuotaManager { quotaManager => - assertEquals(ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, + assertEquals(UnboundedControllerMutationQuota, quotaManager.newStrictQuotaFor(buildSession(User), ClientId)) } } @@ -210,9 +208,9 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { def testNewStrictQuotaForReturnsStrictQuotaWhenQuotaIsEnabled(): Unit = { withQuotaManager { quotaManager => quotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity(User)), - Optional.of(new ClientQuotaManager.ClientIdEntity(ClientId)), - Optional.of(Quota.upperBound(10)) + Some(User).map(s => ClientQuotaManager.UserEntity(s)), + Some(ClientQuotaManager.ClientIdEntity(ClientId)), + Some(Quota.upperBound(10)) ) val quota = quotaManager.newStrictQuotaFor(buildSession(User), ClientId) assertTrue(quota.isInstanceOf[StrictControllerMutationQuota]) @@ -223,7 +221,7 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { @Test def testNewPermissiveQuotaForReturnsUnboundedQuotaWhenQuotaIsDisabled(): Unit = { withQuotaManager { quotaManager => - assertEquals(ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA, + assertEquals(UnboundedControllerMutationQuota, quotaManager.newPermissiveQuotaFor(buildSession(User), ClientId)) } } @@ -232,9 +230,9 @@ class ControllerMutationQuotaManagerTest extends BaseClientQuotaManagerTest { def testNewPermissiveQuotaForReturnsStrictQuotaWhenQuotaIsEnabled(): Unit = { withQuotaManager { quotaManager => quotaManager.updateQuota( - Optional.of(new ClientQuotaManager.UserEntity(User)), - Optional.of(new ClientQuotaManager.ClientIdEntity(ClientId)), - Optional.of(Quota.upperBound(10)) + Some(User).map(s => ClientQuotaManager.UserEntity(s)), + Some(ClientQuotaManager.ClientIdEntity(ClientId)), + Some(Quota.upperBound(10)) ) val quota = quotaManager.newPermissiveQuotaFor(buildSession(User), ClientId) assertTrue(quota.isInstanceOf[PermissiveControllerMutationQuota]) diff --git a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala index 516b5edd082f3..f63434a256166 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerMutationQuotaTest.scala @@ -16,6 +16,7 @@ package kafka.server import java.util.Properties import java.util.concurrent.ExecutionException import java.util.concurrent.TimeUnit +import kafka.server.ClientQuotaManager.DefaultTags import kafka.utils.TestUtils import org.apache.kafka.common.config.internals.BrokerSecurityConfigs import org.apache.kafka.common.internals.KafkaFutureImpl @@ -42,13 +43,15 @@ import org.apache.kafka.common.security.auth.AuthenticationContext import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuilder import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs} -import org.apache.kafka.server.quota.{ClientQuotaManager, QuotaType} +import org.apache.kafka.server.quota.QuotaType import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.test.{TestUtils => JTestUtils} import org.junit.jupiter.api.Assertions.assertEquals import org.junit.jupiter.api.Assertions.assertTrue import org.junit.jupiter.api.Assertions.fail -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.collection.Seq import scala.jdk.CollectionConverters._ @@ -123,8 +126,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { waitUserQuota(ThrottledPrincipal.getName, ControllerMutationRate) } - @Test - def testSetUnsetQuota(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testSetUnsetQuota(quorum: String): Unit = { val rate = 1.5 val principal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "User") // Default Value @@ -139,8 +143,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { waitUserQuota(principal.getName, Long.MaxValue) } - @Test - def testQuotaMetric(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testQuotaMetric(quorum: String): Unit = { asPrincipal(ThrottledPrincipal) { // Metric is lazily created assertTrue(quotaMetric(principal.getName).isEmpty) @@ -161,8 +166,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testStrictCreateTopicsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testStrictCreateTopicsRequest(quorum: String): Unit = { asPrincipal(ThrottledPrincipal) { // Create two topics worth of 30 partitions each. As we use a strict quota, we // expect one to be created and one to be rejected. @@ -184,8 +190,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testPermissiveCreateTopicsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testPermissiveCreateTopicsRequest(quorum: String): Unit = { asPrincipal(ThrottledPrincipal) { // Create two topics worth of 30 partitions each. As we use a permissive quota, we // expect both topics to be created. @@ -197,8 +204,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testUnboundedCreateTopicsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnboundedCreateTopicsRequest(quorum: String): Unit = { asPrincipal(UnboundedPrincipal) { // Create two topics worth of 30 partitions each. As we use an user without quota, we // expect both topics to be created. The throttle time should be equal to 0. @@ -208,8 +216,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testStrictDeleteTopicsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testStrictDeleteTopicsRequest(quorum: String): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion) } @@ -235,8 +244,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testPermissiveDeleteTopicsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testPermissiveDeleteTopicsRequest(quorum: String): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion) } @@ -252,8 +262,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testUnboundedDeleteTopicsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnboundedDeleteTopicsRequest(quorum: String): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWith30Partitions, StrictCreateTopicsRequestVersion) @@ -265,8 +276,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testStrictCreatePartitionsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testStrictCreatePartitionsRequest(quorum: String): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion) } @@ -292,8 +304,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testPermissiveCreatePartitionsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testPermissiveCreatePartitionsRequest(quorum: String): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion) } @@ -309,8 +322,9 @@ class ControllerMutationQuotaTest extends BaseRequestTest { } } - @Test - def testUnboundedCreatePartitionsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnboundedCreatePartitionsRequest(quorum: String): Unit = { asPrincipal(UnboundedPrincipal) { createTopics(TopicsWithOnePartition, StrictCreatePartitionsRequestVersion) @@ -388,7 +402,7 @@ class ControllerMutationQuotaTest extends BaseRequestTest { "tokens", QuotaType.CONTROLLER_MUTATION.toString, "Tracking remaining tokens in the token bucket per user/client-id", - java.util.Map.of(ClientQuotaManager.USER_TAG, user, ClientQuotaManager.CLIENT_ID_TAG, "")) + Map(DefaultTags.User -> user, DefaultTags.ClientId -> "").asJava) Option(metrics.metric(metricName)) } diff --git a/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala b/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala index 46ea20758e2df..61cc1363027ea 100644 --- a/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ControllerRegistrationManagerTest.scala @@ -76,7 +76,7 @@ class ControllerRegistrationManagerTest { "controller-registration-manager-test-", createSupportedFeatures(MetadataVersion.IBP_3_7_IV0), RecordTestUtils.createTestControllerRegistration(1, false).incarnationId(), - ListenerInfo.create(context.config.controllerListeners.asJava), + ListenerInfo.create(context.config.controllerListeners.map(_.toJava).asJava), new ExponentialBackoff(1, 2, 100, 0.02)) } diff --git a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala index df2ef30ee45d0..f4d2916986f36 100644 --- a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestTest.scala @@ -24,14 +24,16 @@ import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopicCol import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.CreateTopicsRequest import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { - @Test - def testValidCreateTopicsRequests(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testValidCreateTopicsRequests(quorum: String): Unit = { // Generated assignments validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic1")))) validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic2", replicationFactor = 3)))) @@ -59,8 +61,9 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { topicReq("topic14", replicationFactor = -1, numPartitions = 2)))) } - @Test - def testErrorCreateTopicsRequests(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testErrorCreateTopicsRequests(quorum: String): Unit = { val existingTopic = "existing-topic" createTopic(existingTopic) // Basic @@ -74,6 +77,9 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-config", config=Map("not.a.property" -> "error")))), Map("error-config" -> error(Errors.INVALID_CONFIG)), checkErrorMessage = false) + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-config-value", + config=Map("message.format.version" -> "invalid-value")))), + Map("error-config-value" -> error(Errors.INVALID_CONFIG)), checkErrorMessage = false) validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-assignment", assignment=Map(0 -> List(0, 1), 1 -> List(0))))), Map("error-assignment" -> error(Errors.INVALID_REPLICA_ASSIGNMENT)), checkErrorMessage = false) @@ -96,8 +102,9 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { validateTopicExists("partial-none") } - @Test - def testInvalidCreateTopicsRequests(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidCreateTopicsRequests(quorum: String): Unit = { // Partitions/ReplicationFactor and ReplicaAssignment validateErrorCreateTopicsRequests(topicsReq(Seq( topicReq("bad-args-topic", numPartitions = 10, replicationFactor = 3, @@ -110,8 +117,9 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { Map("bad-args-topic" -> error(Errors.INVALID_REQUEST)), checkErrorMessage = false) } - @Test - def testCreateTopicsRequestVersions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateTopicsRequestVersions(quorum: String): Unit = { for (version <- ApiKeys.CREATE_TOPICS.oldestVersion to ApiKeys.CREATE_TOPICS.latestVersion) { val topic = s"topic_$version" val data = new CreateTopicsRequestData() @@ -148,8 +156,9 @@ class CreateTopicsRequestTest extends AbstractCreateTopicsRequestTest { } } - @Test - def testCreateClusterMetadataTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateClusterMetadataTopic(quorum: String): Unit = { validateErrorCreateTopicsRequests( topicsReq(Seq(topicReq(Topic.CLUSTER_METADATA_TOPIC_NAME))), Map(Topic.CLUSTER_METADATA_TOPIC_NAME -> diff --git a/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala new file mode 100644 index 0000000000000..96ebfd66683b6 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/CreateTopicsRequestWithPolicyTest.scala @@ -0,0 +1,179 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server + +import java.util +import java.util.Properties +import org.apache.kafka.common.config.TopicConfig +import org.apache.kafka.common.errors.PolicyViolationException +import org.apache.kafka.common.internals.Topic +import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.server.config.ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG +import org.apache.kafka.server.policy.CreateTopicPolicy +import org.apache.kafka.server.policy.CreateTopicPolicy.RequestMetadata +import org.junit.jupiter.api.TestInfo +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource + +import scala.jdk.CollectionConverters._ + +class CreateTopicsRequestWithPolicyTest extends AbstractCreateTopicsRequestTest { + import CreateTopicsRequestWithPolicyTest._ + + override def brokerPropertyOverrides(properties: Properties): Unit = { + super.brokerPropertyOverrides(properties) + properties.put(CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG, classOf[Policy].getName) + } + + override def kraftControllerConfigs(testInfo: TestInfo): Seq[Properties] = { + val properties = new Properties() + properties.put(CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG, classOf[Policy].getName) + Seq(properties) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testValidCreateTopicsRequests(quorum: String): Unit = { + validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic1", + numPartitions = 5)))) + + validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic2", + numPartitions = 5, replicationFactor = 3)), + validateOnly = true)) + + validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic3", + numPartitions = 11, replicationFactor = 2, + config = Map(TopicConfig.RETENTION_MS_CONFIG -> 4999.toString))), + validateOnly = true)) + + validateValidCreateTopicsRequests(topicsReq(Seq(topicReq("topic4", + assignment = Map(0 -> List(1, 0), 1 -> List(0, 1)))))) + } + + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testErrorCreateTopicsRequests(quorum: String): Unit = { + val existingTopic = "existing-topic" + createTopic(existingTopic, 5) + + // Policy violations + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic1", + numPartitions = 4, replicationFactor = 1))), + Map("policy-topic1" -> error(Errors.POLICY_VIOLATION, Some("Topics should have at least 5 partitions, received 4")))) + + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic2", + numPartitions = 4, replicationFactor = 3)), validateOnly = true), + Map("policy-topic2" -> error(Errors.POLICY_VIOLATION, Some("Topics should have at least 5 partitions, received 4")))) + + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic3", + numPartitions = 11, replicationFactor = 2, + config = Map(TopicConfig.RETENTION_MS_CONFIG -> 5001.toString))), validateOnly = true), + Map("policy-topic3" -> error(Errors.POLICY_VIOLATION, + Some("RetentionMs should be less than 5000ms if replicationFactor > 5")))) + + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic4", + numPartitions = 11, replicationFactor = 3, + config = Map(TopicConfig.RETENTION_MS_CONFIG -> 5001.toString))), validateOnly = true), + Map("policy-topic4" -> error(Errors.POLICY_VIOLATION, + Some("RetentionMs should be less than 5000ms if replicationFactor > 5")))) + + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("policy-topic5", + assignment = Map(0 -> List(1), 1 -> List(0)), + config = Map(TopicConfig.RETENTION_MS_CONFIG -> 5001.toString))), validateOnly = true), + Map("policy-topic5" -> error(Errors.POLICY_VIOLATION, + Some("Topic partitions should have at least 2 partitions, received 1 for partition 0")))) + + // Check that basic errors still work + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq(existingTopic, + numPartitions = 5, replicationFactor = 1))), + Map(existingTopic -> error(Errors.TOPIC_ALREADY_EXISTS, + Some("Topic 'existing-topic' already exists.")))) + + var errorMsg = "Unable to replicate the partition 4 time(s): The target replication factor of 4 cannot be reached because only 3 broker(s) are registered." + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-replication", + numPartitions = 10, replicationFactor = brokerCount + 1)), validateOnly = true), + Map("error-replication" -> error(Errors.INVALID_REPLICATION_FACTOR, + Some(errorMsg)))) + + errorMsg = "Replication factor must be larger than 0, or -1 to use the default value." + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-replication2", + numPartitions = 10, replicationFactor = -2)), validateOnly = true), + Map("error-replication2" -> error(Errors.INVALID_REPLICATION_FACTOR, + Some(errorMsg)))) + + errorMsg = "Number of partitions was set to an invalid non-positive value." + validateErrorCreateTopicsRequests(topicsReq(Seq(topicReq("error-partitions", + numPartitions = -2, replicationFactor = 1)), validateOnly = true), + Map("error-partitions" -> error(Errors.INVALID_PARTITIONS, + Some(errorMsg)))) + } + +} + +object CreateTopicsRequestWithPolicyTest { + + class Policy extends CreateTopicPolicy { + + var configs: Map[String, _] = _ + var closed = false + + def configure(configs: util.Map[String, _]): Unit = { + this.configs = configs.asScala.toMap + } + + def validate(requestMetadata: RequestMetadata): Unit = { + if (Topic.isInternal(requestMetadata.topic())) { + // Do not verify internal topics + return + } + require(!closed, "Policy should not be closed") + require(configs.nonEmpty, "configure should have been called with non empty configs") + + import requestMetadata._ + if (numPartitions != null || replicationFactor != null) { + require(numPartitions != null, s"numPartitions should not be null, but it is $numPartitions") + require(replicationFactor != null, s"replicationFactor should not be null, but it is $replicationFactor") + require(replicasAssignments == null, s"replicaAssignments should be null, but it is $replicasAssignments") + + if (numPartitions < 5) + throw new PolicyViolationException(s"Topics should have at least 5 partitions, received $numPartitions") + + if (numPartitions > 10) { + if (requestMetadata.configs.asScala.get(TopicConfig.RETENTION_MS_CONFIG).fold(true)(_.toInt > 5000)) + throw new PolicyViolationException("RetentionMs should be less than 5000ms if replicationFactor > 5") + } else + require(requestMetadata.configs.isEmpty, s"Topic configs should be empty, but it is ${requestMetadata.configs}") + + } else { + require(numPartitions == null, s"numPartitions should be null, but it is $numPartitions") + require(replicationFactor == null, s"replicationFactor should be null, but it is $replicationFactor") + require(replicasAssignments != null, s"replicaAssignments should not be null, but it is $replicasAssignments") + + replicasAssignments.asScala.toSeq.sortBy { case (tp, _) => tp }.foreach { case (partitionId, assignment) => + if (assignment.size < 2) + throw new PolicyViolationException("Topic partitions should have at least 2 partitions, received " + + s"${assignment.size} for partition $partitionId") + } + } + + } + + def close(): Unit = closed = true + + } +} diff --git a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsOnPlainTextTest.scala b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsOnPlainTextTest.scala index 8e20f98aaafd3..b4c8d922dd9b7 100644 --- a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsOnPlainTextTest.scala +++ b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsOnPlainTextTest.scala @@ -21,8 +21,10 @@ import kafka.security.JaasTestUtils import java.util import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} import org.apache.kafka.common.errors.UnsupportedByAuthenticationException -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions.assertThrows +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.concurrent.ExecutionException import scala.jdk.javaapi.OptionConverters @@ -46,8 +48,9 @@ class DelegationTokenRequestsOnPlainTextTest extends BaseRequestTest { config } - @Test - def testDelegationTokenRequests(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDelegationTokenRequests(quorum: String): Unit = { adminClient = Admin.create(createAdminConfig) val createResult = adminClient.createDelegationToken() diff --git a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala index 38040d0a120a0..2c211eb042a11 100644 --- a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala +++ b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsTest.scala @@ -25,7 +25,9 @@ import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.SecurityUtils import org.apache.kafka.server.config.DelegationTokenManagerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util import scala.concurrent.ExecutionException @@ -63,8 +65,9 @@ class DelegationTokenRequestsTest extends IntegrationTestHarness with SaslSetup config } - @Test - def testDelegationTokenRequests(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDelegationTokenRequests(quorum: String): Unit = { adminClient = Admin.create(createAdminConfig) // create token1 with renewer1 diff --git a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala index bb55cf33ffd59..c380816f769fe 100644 --- a/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala +++ b/core/src/test/scala/unit/kafka/server/DelegationTokenRequestsWithDisableTokenFeatureTest.scala @@ -22,7 +22,9 @@ import org.apache.kafka.clients.admin.{Admin, AdminClientConfig} import org.apache.kafka.common.errors.DelegationTokenDisabledException import org.apache.kafka.common.security.auth.SecurityProtocol import org.junit.jupiter.api.Assertions.assertThrows -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util import scala.concurrent.ExecutionException @@ -53,8 +55,9 @@ class DelegationTokenRequestsWithDisableTokenFeatureTest extends BaseRequestTest config } - @Test - def testDelegationTokenRequests(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDelegationTokenRequests(quorum: String): Unit = { adminClient = Admin.create(createAdminConfig) val createResult = adminClient.createDelegationToken() diff --git a/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala index d945c02b6f5f5..f9b9e9c946aa2 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteGroupsRequestTest.scala @@ -22,33 +22,54 @@ import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.group.classic.ClassicGroupState -import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Assertions.{assertEquals, fail} -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) +@ClusterTestDefaults(types = Array(Type.KRAFT)) class DeleteGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testDeleteGroupsWithNewConsumerGroupProtocol(): Unit = { + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testDeleteGroupsWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testDeleteGroups(true) } - @ClusterTest - def testDeleteGroupsWithOldConsumerGroupProtocol(): Unit = { + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testDeleteGroupsWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testDeleteGroups(false) + } + + @ClusterTest( + types = Array(Type.KRAFT, Type.CO_KRAFT), + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testDeleteGroupsWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { testDeleteGroups(false) } private def testDeleteGroups(useNewProtocol: Boolean): Unit = { + if (useNewProtocol && !isNewGroupCoordinatorEnabled) { + fail("Cannot use the new protocol with the old group coordinator.") + } + // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() // Create the topic. - val topicId = createTopic( + createTopic( topic = "foo", numPartitions = 3 ) @@ -78,8 +99,8 @@ class DeleteGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinator ) deleteGroups( - groupIds = List("grp-non-empty", "grp", ""), - expectedErrors = List(Errors.NON_EMPTY_GROUP, Errors.NONE, Errors.GROUP_ID_NOT_FOUND), + groupIds = List("grp-non-empty", "grp"), + expectedErrors = List(Errors.NON_EMPTY_GROUP, Errors.NONE), version = version.toShort ) @@ -89,7 +110,6 @@ class DeleteGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = 0, offset = 100L, expectedError = Errors.GROUP_ID_NOT_FOUND, diff --git a/core/src/test/scala/unit/kafka/server/DeleteRecordsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DeleteRecordsRequestTest.scala index de2eb967d9718..587fc4e5e626a 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteRecordsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteRecordsRequestTest.scala @@ -25,7 +25,8 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{DeleteRecordsRequest, DeleteRecordsResponse} import org.apache.kafka.common.serialization.StringSerializer import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util.Collections import java.util.concurrent.TimeUnit @@ -35,8 +36,9 @@ class DeleteRecordsRequestTest extends BaseRequestTest { private val TIMEOUT_MS = 1000 private val MESSAGES_PRODUCED_PER_PARTITION = 10 - @Test - def testDeleteRecordsHappyCase(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteRecordsHappyCase(quorum: String): Unit = { val (topicPartition: TopicPartition, leaderId: Int) = createTopicAndSendRecords // Create the DeleteRecord request requesting deletion of offset which is not present @@ -59,8 +61,9 @@ class DeleteRecordsRequestTest extends BaseRequestTest { validateLogStartOffsetForTopic(topicPartition, offsetToDelete) } - @Test - def testErrorWhenDeletingRecordsWithInvalidOffset(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testErrorWhenDeletingRecordsWithInvalidOffset(quorum: String): Unit = { val (topicPartition: TopicPartition, leaderId: Int) = createTopicAndSendRecords // Create the DeleteRecord request requesting deletion of offset which is not present @@ -83,8 +86,9 @@ class DeleteRecordsRequestTest extends BaseRequestTest { validateLogStartOffsetForTopic(topicPartition, 0) } - @Test - def testErrorWhenDeletingRecordsWithInvalidTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testErrorWhenDeletingRecordsWithInvalidTopic(quorum: String): Unit = { val invalidTopicPartition = new TopicPartition("invalid-topic", 0) // Create the DeleteRecord request requesting deletion of offset which is not present val offsetToDelete = 1 diff --git a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala index d51aba04c8dac..4d89b0ae423d5 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestTest.scala @@ -28,16 +28,18 @@ import org.apache.kafka.common.requests.DeleteTopicsResponse import org.apache.kafka.common.requests.MetadataRequest import org.apache.kafka.common.requests.MetadataResponse import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.collection.Seq import scala.jdk.CollectionConverters._ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { - @Test - def testTopicDeletionClusterHasOfflinePartitions(): Unit = { - // Create two topics with one partition/replica. Make one of them offline. + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testTopicDeletionClusterHasOfflinePartitions(quorum: String): Unit = { + // Create a two topics with one partition/replica. Make one of them offline. val offlineTopic = "topic-1" val onlineTopic = "topic-2" createTopicWithAssignment(offlineTopic, Map[Int, Seq[Int]](0 -> Seq(0))) @@ -47,9 +49,9 @@ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { // Ensure one topic partition is offline. TestUtils.waitUntilTrue(() => { - aliveBrokers.head.metadataCache.getLeaderAndIsr(onlineTopic, 0).filter(_.leader() == 1).isPresent() && - aliveBrokers.head.metadataCache.getLeaderAndIsr(offlineTopic, 0).filter(_.leader() == - MetadataResponse.NO_LEADER_ID).isPresent() + aliveBrokers.head.metadataCache.getLeaderAndIsr(onlineTopic, 0).exists(_.leader() == 1) && + aliveBrokers.head.metadataCache.getLeaderAndIsr(offlineTopic, 0).exists(_.leader() == + MetadataResponse.NO_LEADER_ID) }, "Topic partition is not offline") // Delete the newly created topic and topic with offline partition. See the deletion is @@ -68,8 +70,9 @@ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { "The topics are found in the Broker's cache") } - @Test - def testValidDeleteTopicRequests(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testValidDeleteTopicRequests(quorum: String): Unit = { val timeout = 10000 // Single topic createTopic("topic-1") @@ -135,8 +138,9 @@ class DeleteTopicsRequestTest extends BaseRequestTest with Logging { connectAndReceive[DeleteTopicsResponse](request, destination = socketServer) } - @Test - def testDeleteTopicsVersions(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteTopicsVersions(quorum: String): Unit = { val timeout = 10000 for (version <- ApiKeys.DELETE_TOPICS.oldestVersion to ApiKeys.DELETE_TOPICS.latestVersion) { info(s"Creating and deleting tests for version $version") diff --git a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala index d8d654082e9ad..4232030634cb8 100644 --- a/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala +++ b/core/src/test/scala/unit/kafka/server/DeleteTopicsRequestWithDeletionDisabledTest.scala @@ -25,7 +25,9 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{DeleteTopicsRequest, DeleteTopicsResponse} import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{Test, TestInfo} +import org.junit.jupiter.api.TestInfo +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource class DeleteTopicsRequestWithDeletionDisabledTest extends BaseRequestTest { @@ -46,8 +48,9 @@ class DeleteTopicsRequestWithDeletionDisabledTest extends BaseRequestTest { props.map(KafkaConfig.fromProps) } - @Test - def testDeleteRecordsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDeleteRecordsRequest(quorum: String): Unit = { val topic = "topic-1" val request = new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() diff --git a/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala index 1d3048cec6ae8..6e43f904c11c7 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeClusterRequestTest.scala @@ -27,7 +27,9 @@ import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.security.authorizer.AclEntry import org.apache.kafka.server.config.{ServerConfigs, ReplicationConfigs} import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.lang.{Byte => JByte} import java.util.Properties @@ -46,13 +48,15 @@ class DescribeClusterRequestTest extends BaseRequestTest { doSetup(testInfo, createOffsetsTopic = false) } - @Test - def testDescribeClusterRequestIncludingClusterAuthorizedOperations(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeClusterRequestIncludingClusterAuthorizedOperations(quorum: String): Unit = { testDescribeClusterRequest(true) } - @Test - def testDescribeClusterRequestExcludingClusterAuthorizedOperations(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeClusterRequestExcludingClusterAuthorizedOperations(quorum: String): Unit = { testDescribeClusterRequest(false) } diff --git a/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala index 67db0449ffe72..4f1ba4b9b2ca3 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeGroupsRequestTest.scala @@ -26,16 +26,27 @@ import org.junit.jupiter.api.Assertions.assertEquals import scala.jdk.CollectionConverters._ -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( +@ClusterTestDefaults(types = Array(Type.KRAFT)) +class DescribeGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { + @ClusterTest(serverProperties = Array( new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) -class DescribeGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testDescribeGroupsWithOldConsumerGroupProtocol(): Unit = { + )) + def testDescribeGroupsWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testDescribeGroups() + } + + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testDescribeGroupsWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testDescribeGroups() + } + + private def testDescribeGroups(): Unit = { // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() @@ -93,15 +104,10 @@ class DescribeGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinat .setGroupId("grp-unknown") .setGroupState(ClassicGroupState.DEAD.toString) // Return DEAD group when the group does not exist. .setErrorCode(if (version >= 6) Errors.GROUP_ID_NOT_FOUND.code() else Errors.NONE.code()) - .setErrorMessage(if (version >= 6) "Group grp-unknown not found." else null), - new DescribedGroup() - .setGroupId("") - .setGroupState(ClassicGroupState.DEAD.toString) // Return DEAD group when the group does not exist. - .setErrorCode(if (version >= 6) Errors.GROUP_ID_NOT_FOUND.code() else Errors.NONE.code()) - .setErrorMessage(if (version >= 6) "Group not found." else null) + .setErrorMessage(if (version >= 6) "Group grp-unknown not found." else null) ), describeGroups( - groupIds = List("grp-1", "grp-2", "grp-unknown", ""), + groupIds = List("grp-1", "grp-2", "grp-unknown"), version = version.toShort ) ) diff --git a/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala index ab2ea99782d11..d22b53c0cb67c 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeLogDirsRequestTest.scala @@ -25,7 +25,8 @@ import org.apache.kafka.common.message.DescribeLogDirsRequestData import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests._ import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ @@ -38,10 +39,11 @@ class DescribeLogDirsRequestTest extends BaseRequestTest { val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) - @Test - def testDescribeLogDirsRequest(): Unit = { - val onlineDir = new File(brokers.head.config.logDirs.get(0)).getAbsolutePath - val offlineDir = new File(brokers.head.config.logDirs.get(1)).getAbsolutePath + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeLogDirsRequest(quorum: String): Unit = { + val onlineDir = new File(brokers.head.config.logDirs.head).getAbsolutePath + val offlineDir = new File(brokers.head.config.logDirs.tail.head).getAbsolutePath brokers.head.replicaManager.handleLogDirFailure(offlineDir) createTopic(topic, partitionNum, 1) TestUtils.generateAndProduceMessages(brokers, topic, 10) diff --git a/core/src/test/scala/unit/kafka/server/DescribeQuorumRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeQuorumRequestTest.scala index bc0e768c6f48f..2aa8f5a9e2ca0 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeQuorumRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeQuorumRequestTest.scala @@ -19,12 +19,12 @@ package kafka.server import org.apache.kafka.common.test.api.{ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.DescribeQuorumRequest.singletonRequest -import org.apache.kafka.common.requests.{DescribeQuorumRequest, DescribeQuorumResponse} +import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, DescribeQuorumRequest, DescribeQuorumResponse} import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.server.IntegrationTestUtils import org.junit.jupiter.api.Assertions._ import scala.jdk.CollectionConverters._ +import scala.reflect.ClassTag @ClusterTestDefaults(types = Array(Type.KRAFT)) class DescribeQuorumRequestTest(cluster: ClusterInstance) { @@ -35,7 +35,7 @@ class DescribeQuorumRequestTest(cluster: ClusterInstance) { val request = new DescribeQuorumRequest.Builder( singletonRequest(KafkaRaftServer.MetadataPartition) ).build(version.toShort) - val response = IntegrationTestUtils.connectAndReceive[DescribeQuorumResponse](request, cluster.brokerBoundPorts().get(0)) + val response = connectAndReceive[DescribeQuorumResponse](request) assertEquals(Errors.NONE, Errors.forCode(response.data.errorCode)) assertEquals("", response.data.errorMessage) @@ -81,8 +81,21 @@ class DescribeQuorumRequestTest(cluster: ClusterInstance) { val nodes = response.data.nodes().asScala assertEquals(cluster.controllerIds().asScala, nodes.map(_.nodeId()).toSet) val node = nodes.find(_.nodeId() == cluster.controllers().keySet().asScala.head) - assertEquals(cluster.controllerListenerName().value(), node.get.listeners().asScala.head.name()) + assertEquals(cluster.controllerListenerName().get().value(), node.get.listeners().asScala.head.name()) } } } + + private def connectAndReceive[T <: AbstractResponse]( + request: AbstractRequest + )( + implicit classTag: ClassTag[T] + ): T = { + IntegrationTestUtils.connectAndReceive( + request, + cluster.brokerSocketServers().asScala.head, + cluster.clientListener() + ) + } + } diff --git a/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestNotAuthorizedTest.scala b/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestNotAuthorizedTest.scala index 418753e4a6e18..7a4a885d9edd7 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestNotAuthorizedTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestNotAuthorizedTest.scala @@ -24,7 +24,8 @@ import org.apache.kafka.common.requests.{DescribeUserScramCredentialsRequest, De import org.apache.kafka.metadata.authorizer.StandardAuthorizer import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util.Properties @@ -38,8 +39,9 @@ class DescribeUserScramCredentialsRequestNotAuthorizedTest extends BaseRequestTe properties.put(BrokerSecurityConfigs.PRINCIPAL_BUILDER_CLASS_CONFIG, classOf[DescribeCredentialsTest.TestPrincipalBuilderReturningUnauthorized].getName) } - @Test - def testDescribeNotAuthorized(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeNotAuthorized(quorum: String): Unit = { val request = new DescribeUserScramCredentialsRequest.Builder( new DescribeUserScramCredentialsRequestData()).build() val response = sendDescribeUserScramCredentialsRequest(request) diff --git a/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestTest.scala b/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestTest.scala index 850cca028e50e..bc8bd5a13ac29 100644 --- a/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/DescribeUserScramCredentialsRequestTest.scala @@ -28,6 +28,8 @@ import org.apache.kafka.common.security.authenticator.DefaultKafkaPrincipalBuild import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ @@ -46,8 +48,9 @@ class DescribeUserScramCredentialsRequestTest extends BaseRequestTest { super.setUp(testInfo) } - @Test - def testDescribeNothing(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeNothing(quorum: String): Unit = { val request = new DescribeUserScramCredentialsRequest.Builder( new DescribeUserScramCredentialsRequestData()).build() val response = sendDescribeUserScramCredentialsRequest(request) @@ -57,8 +60,9 @@ class DescribeUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals(0, response.data.results.size, "Expected no credentials when describing everything and there are no credentials") } - @Test - def testDescribeWithNull(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeWithNull(quorum: String): Unit = { val request = new DescribeUserScramCredentialsRequest.Builder( new DescribeUserScramCredentialsRequestData().setUsers(null)).build() val response = sendDescribeUserScramCredentialsRequest(request) @@ -78,8 +82,9 @@ class DescribeUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals(Errors.NONE.code, error, "Did not expect controller error when routed to non-controller") } - @Test - def testDescribeSameUserTwice(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDescribeSameUserTwice(quorum: String): Unit = { val user = "user1" val userName = new UserName().setName(user) val request = new DescribeUserScramCredentialsRequest.Builder( @@ -93,8 +98,9 @@ class DescribeUserScramCredentialsRequestTest extends BaseRequestTest { assertEquals(s"Cannot describe SCRAM credentials for the same user twice in a single request: $user", result.errorMessage) } - @Test - def testUnknownUser(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnknownUser(quorum: String): Unit = { val unknownUser = "unknownUser" val request = new DescribeUserScramCredentialsRequest.Builder( new DescribeUserScramCredentialsRequestData().setUsers(List(new UserName().setName(unknownUser)).asJava)).build() diff --git a/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala b/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala index 141b5138c0753..e8702eacfa318 100755 --- a/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/DynamicBrokerConfigTest.scala @@ -22,21 +22,20 @@ import java.util.{Optional, Properties, Map => JMap} import java.util.concurrent.{CompletionStage, TimeUnit} import java.util.concurrent.atomic.AtomicReference import kafka.log.LogManager +import kafka.log.remote.RemoteLogManager import kafka.network.{DataPlaneAcceptor, SocketServer} import kafka.utils.TestUtils import org.apache.kafka.common.{Endpoint, Reconfigurable} import org.apache.kafka.common.acl.{AclBinding, AclBindingFilter} import org.apache.kafka.common.config.{ConfigException, SslConfigs} -import org.apache.kafka.common.internals.Plugin import org.apache.kafka.common.metrics.{JmxReporter, Metrics} import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.server.DynamicThreadPool import org.apache.kafka.server.authorizer._ import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs} -import org.apache.kafka.server.log.remote.storage.{RemoteLogManager, RemoteLogManagerConfig} +import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.{KafkaYammerMetrics, MetricConfigs} import org.apache.kafka.server.util.KafkaScheduler import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, ProducerStateManagerConfig} @@ -175,7 +174,6 @@ class DynamicBrokerConfigTest { assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE, config.remoteLogManagerConfig.remoteLogManagerCopierThreadPoolSize()) assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE, config.remoteLogManagerConfig.remoteLogManagerExpirationThreadPoolSize()) assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_READER_THREADS, config.remoteLogManagerConfig.remoteLogReaderThreads()) - assertEquals(RemoteLogManagerConfig.DEFAULT_REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE, config.remoteLogManagerConfig.remoteLogManagerFollowerThreadPoolSize()) val serverMock = mock(classOf[KafkaBroker]) val remoteLogManager = mock(classOf[RemoteLogManager]) @@ -204,13 +202,6 @@ class DynamicBrokerConfigTest { config.dynamicConfig.updateDefaultConfig(props) assertEquals(6, config.remoteLogManagerConfig.remoteLogReaderThreads()) verify(remoteLogManager).resizeReaderThreadPool(6) - - props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP, "3") - config.dynamicConfig.validate(props, perBrokerConfig = false) - config.dynamicConfig.updateDefaultConfig(props) - assertEquals(3, config.remoteLogManagerConfig.remoteLogManagerFollowerThreadPoolSize()) - verify(remoteLogManager).resizeFollowerThreadPool(3) - props.clear() verifyNoMoreInteractions(remoteLogManager) } @@ -249,33 +240,6 @@ class DynamicBrokerConfigTest { val err3 = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props, perBrokerConfig = true)) assertTrue(err3.getMessage.contains("Value must be at least 1")) verifyNoMoreInteractions(remoteLogManager) - - val props4 = new Properties() - props4.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP, "10") - val err4 = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props4, perBrokerConfig = false)) - assertTrue(err4.getMessage.contains("value should not be greater than double the current value")) - verifyNoMoreInteractions(remoteLogManager) - } - - @Test - def testDynamicRemoteLogManagerFollowerThreadPoolSizeConfig(): Unit = { - val origProps = TestUtils.createBrokerConfig(0, port = 9092) - origProps.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP, "10") - val config = KafkaConfig(origProps) - - val serverMock = mock(classOf[KafkaBroker]) - val remoteLogManager = mock(classOf[RemoteLogManager]) - when(serverMock.config).thenReturn(config) - when(serverMock.remoteLogManagerOpt).thenReturn(Some(remoteLogManager)) - - config.dynamicConfig.initialize(None) - config.dynamicConfig.addBrokerReconfigurable(new DynamicRemoteLogConfig(serverMock)) - - val props = new Properties() - props.put(RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP, "2") - val err = assertThrows(classOf[ConfigException], () => config.dynamicConfig.validate(props, perBrokerConfig = false)) - assertTrue(err.getMessage.contains("value should be at least half the current value")) - verifyNoMoreInteractions(remoteLogManager) } @Test @@ -413,7 +377,7 @@ class DynamicBrokerConfigTest { def updateConfig(): Unit = { if (perBrokerConfig) - config.dynamicConfig.updateBrokerConfig(0, props) + config.dynamicConfig.updateBrokerConfig(0, config.dynamicConfig.toPersistentProps(props, perBrokerConfig)) else config.dynamicConfig.updateDefaultConfig(props) } @@ -504,7 +468,7 @@ class DynamicBrokerConfigTest { val metrics: Metrics = mock(classOf[Metrics]) when(kafkaServer.metrics).thenReturn(metrics) val quotaManagers: QuotaFactory.QuotaManagers = mock(classOf[QuotaFactory.QuotaManagers]) - when(quotaManagers.clientQuotaCallbackPlugin).thenReturn(Optional.empty()) + when(quotaManagers.clientQuotaCallback).thenReturn(Optional.empty()) when(kafkaServer.quotaManagers).thenReturn(quotaManagers) val socketServer: SocketServer = mock(classOf[SocketServer]) when(socketServer.reconfigurableConfigs).thenReturn(SocketServer.ReconfigurableConfigs) @@ -515,8 +479,7 @@ class DynamicBrokerConfigTest { when(kafkaServer.logManager).thenReturn(logManager) val authorizer = new TestAuthorizer - val authorizerPlugin: Plugin[Authorizer] = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - when(kafkaServer.authorizerPlugin).thenReturn(Some(authorizerPlugin)) + when(kafkaServer.authorizer).thenReturn(Some(authorizer)) kafkaServer.config.dynamicConfig.addReconfigurables(kafkaServer) props.put("super.users", "User:admin") @@ -551,15 +514,14 @@ class DynamicBrokerConfigTest { val metrics: Metrics = mock(classOf[Metrics]) when(controllerServer.metrics).thenReturn(metrics) val quotaManagers: QuotaFactory.QuotaManagers = mock(classOf[QuotaFactory.QuotaManagers]) - when(quotaManagers.clientQuotaCallbackPlugin).thenReturn(Optional.empty()) + when(quotaManagers.clientQuotaCallback).thenReturn(Optional.empty()) when(controllerServer.quotaManagers).thenReturn(quotaManagers) val socketServer: SocketServer = mock(classOf[SocketServer]) when(socketServer.reconfigurableConfigs).thenReturn(SocketServer.ReconfigurableConfigs) when(controllerServer.socketServer).thenReturn(socketServer) val authorizer = new TestAuthorizer - val authorizerPlugin: Plugin[Authorizer] = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - when(controllerServer.authorizerPlugin).thenReturn(Some(authorizerPlugin)) + when(controllerServer.authorizer).thenReturn(Some(authorizer)) controllerServer.config.dynamicConfig.addReconfigurables(controllerServer) props.put("super.users", "User:admin") @@ -597,15 +559,14 @@ class DynamicBrokerConfigTest { val metrics: Metrics = mock(classOf[Metrics]) when(controllerServer.metrics).thenReturn(metrics) val quotaManagers: QuotaFactory.QuotaManagers = mock(classOf[QuotaFactory.QuotaManagers]) - when(quotaManagers.clientQuotaCallbackPlugin).thenReturn(Optional.empty()) + when(quotaManagers.clientQuotaCallback).thenReturn(Optional.empty()) when(controllerServer.quotaManagers).thenReturn(quotaManagers) val socketServer: SocketServer = mock(classOf[SocketServer]) when(socketServer.reconfigurableConfigs).thenReturn(SocketServer.ReconfigurableConfigs) when(controllerServer.socketServer).thenReturn(socketServer) val authorizer = new TestAuthorizer - val authorizerPlugin: Plugin[Authorizer] = Plugin.wrapInstance(authorizer, null, "authorizer.class.name") - when(controllerServer.authorizerPlugin).thenReturn(Some(authorizerPlugin)) + when(controllerServer.authorizer).thenReturn(Some(authorizer)) controllerServer.config.dynamicConfig.addReconfigurables(controllerServer) props.put("super.users", "User:admin") @@ -632,7 +593,7 @@ class DynamicBrokerConfigTest { config.dynamicConfig.initialize(None) assertEquals(SocketServerConfigs.MAX_CONNECTIONS_DEFAULT, config.maxConnections) - assertEquals(ServerLogConfigs.MAX_MESSAGE_BYTES_DEFAULT, config.messageMaxBytes) + assertEquals(LogConfig.DEFAULT_MAX_MESSAGE_BYTES, config.messageMaxBytes) var newProps = new Properties() newProps.put(SocketServerConfigs.MAX_CONNECTIONS_CONFIG, "9999") @@ -705,6 +666,16 @@ class DynamicBrokerConfigTest { assertTrue(m.currentReporters.isEmpty) } + @Test + def testNonInternalValuesDoesNotExposeInternalConfigs(): Unit = { + val props = TestUtils.createBrokerConfig(0, port = 8181) + props.put(KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG, "1024") + val config = new KafkaConfig(props) + assertFalse(config.nonInternalValues.containsKey(KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG)) + config.updateCurrentConfig(new KafkaConfig(props)) + assertFalse(config.nonInternalValues.containsKey(KRaftConfigs.METADATA_LOG_SEGMENT_MIN_BYTES_CONFIG)) + } + @Test def testDynamicLogLocalRetentionMsConfig(): Unit = { val props = TestUtils.createBrokerConfig(0, port = 8181) @@ -1061,7 +1032,7 @@ class DynamicBrokerConfigTest { props.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "SASL_PLAINTEXT://localhost:8181") ctx.config.dynamicConfig.updateDefaultConfig(props) ctx.config.effectiveAdvertisedBrokerListeners.foreach(e => - assertEquals(SecurityProtocol.PLAINTEXT.name, e.listener) + assertEquals(SecurityProtocol.PLAINTEXT.name, e.listenerName.value) ) assertFalse(ctx.currentDefaultLogConfig.get().originals().containsKey(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG)) } @@ -1070,7 +1041,7 @@ class DynamicBrokerConfigTest { class TestDynamicThreadPool extends BrokerReconfigurable { override def reconfigurableConfigs: Set[String] = { - DynamicThreadPool.RECONFIGURABLE_CONFIGS.asScala + DynamicThreadPool.ReconfigurableConfigs } override def reconfigure(oldConfig: KafkaConfig, newConfig: KafkaConfig): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala b/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala index 519a7d951a381..bd30a1e2557d5 100644 --- a/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala +++ b/core/src/test/scala/unit/kafka/server/DynamicConfigChangeTest.scala @@ -18,6 +18,8 @@ package kafka.server import kafka.cluster.Partition import kafka.integration.KafkaServerTestHarness +import kafka.log.UnifiedLog +import kafka.log.remote.RemoteLogManager import kafka.utils.TestUtils.random import kafka.utils._ import org.apache.kafka.clients.CommonClientConfigs @@ -32,14 +34,13 @@ import org.apache.kafka.common.quota.{ClientQuotaAlteration, ClientQuotaEntity} import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.coordinator.group.GroupConfig -import org.apache.kafka.metadata.MetadataCache import org.apache.kafka.server.config.{QuotaConfig, ServerLogConfigs} -import org.apache.kafka.server.log.remote.TopicPartitionLog -import org.apache.kafka.server.log.remote.storage.RemoteLogManager -import org.apache.kafka.storage.internals.log.{LogConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.LogConfig import org.apache.kafka.test.TestUtils.assertFutureThrows import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{Test, Timeout} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import org.mockito.ArgumentCaptor import org.mockito.ArgumentMatchers.any import org.mockito.Mockito._ @@ -59,8 +60,9 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { List(KafkaConfig.fromProps(cfg)) } - @Test - def testConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConfigChange(quorum: String): Unit = { val oldVal: java.lang.Long = 100000L val newVal: java.lang.Long = 200000L val tp = new TopicPartition("test", 0) @@ -92,20 +94,21 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @Test - def testDynamicTopicConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDynamicTopicConfigChange(quorum: String): Unit = { val tp = new TopicPartition("test", 0) - val oldSegmentSize = 2 * 1024 * 1024 + val oldSegmentSize = 1000 val logProps = new Properties() logProps.put(TopicConfig.SEGMENT_BYTES_CONFIG, oldSegmentSize.toString) createTopic(tp.topic, 1, 1, logProps) TestUtils.retry(10000) { val logOpt = this.brokers.head.logManager.getLog(tp) assertTrue(logOpt.isDefined) - assertEquals(oldSegmentSize, logOpt.get.config.segmentSize()) + assertEquals(oldSegmentSize, logOpt.get.config.segmentSize) } - val newSegmentSize = 2 * 1024 * 1024 + val newSegmentSize = 2000 val admin = createAdminClient() try { val resource = new ConfigResource(ConfigResource.Type.TOPIC, tp.topic()) @@ -117,7 +120,7 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } val log = brokers.head.logManager.getLog(tp).get TestUtils.retry(10000) { - assertEquals(newSegmentSize, log.config.segmentSize()) + assertEquals(newSegmentSize, log.config.segmentSize) } (1 to 50).foreach(i => TestUtils.produceMessage(brokers, tp.topic, i.toString)) @@ -176,52 +179,59 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @Test - def testClientIdQuotaConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testClientIdQuotaConfigChange(quorum: String): Unit = { val m = new util.HashMap[String, String] m.put(CLIENT_ID, "testClient") testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @Test - def testUserQuotaConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUserQuotaConfigChange(quorum: String): Unit = { val m = new util.HashMap[String, String] m.put(USER, "ANONYMOUS") testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @Test - def testUserClientIdQuotaChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUserClientIdQuotaChange(quorum: String): Unit = { val m = new util.HashMap[String, String] m.put(USER, "ANONYMOUS") m.put(CLIENT_ID, "testClient") testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @Test - def testDefaultClientIdQuotaConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDefaultClientIdQuotaConfigChange(quorum: String): Unit = { val m = new util.HashMap[String, String] m.put(CLIENT_ID, null) testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @Test - def testDefaultUserQuotaConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDefaultUserQuotaConfigChange(quorum: String): Unit = { val m = new util.HashMap[String, String] m.put(USER, null) testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @Test - def testDefaultUserClientIdQuotaConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDefaultUserClientIdQuotaConfigChange(quorum: String): Unit = { val m = new util.HashMap[String, String] m.put(USER, null) m.put(CLIENT_ID, null) testQuotaConfigChange(new ClientQuotaEntity(m), KafkaPrincipal.ANONYMOUS, "testClient") } - @Test - def testIpQuotaInitialization(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIpQuotaInitialization(quorum: String): Unit = { val broker = brokers.head val admin = createAdminClient() try { @@ -241,8 +251,9 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @Test - def testIpQuotaConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIpQuotaConfigChange(quorum: String): Unit = { val admin = createAdminClient() try { val alterations = util.Arrays.asList( @@ -284,8 +295,9 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { private def tempTopic() : String = "testTopic" + random.nextInt(1000000) - @Test - def testConfigChangeOnNonExistingTopicWithAdminClient(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConfigChangeOnNonExistingTopicWithAdminClient(quorum: String): Unit = { val topic = tempTopic() val admin = createAdminClient() try { @@ -301,14 +313,15 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @Test - def testIncrementalAlterDefaultTopicConfig(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterDefaultTopicConfig(quorum: String): Unit = { val admin = createAdminClient() try { val resource = new ConfigResource(ConfigResource.Type.TOPIC, "") val op = new AlterConfigOp(new ConfigEntry(TopicConfig.FLUSH_MESSAGES_INTERVAL_CONFIG, "200000"), OpType.SET) val future = admin.incrementalAlterConfigs(Map(resource -> List(op).asJavaCollection).asJava).all - assertFutureThrows(classOf[InvalidRequestException], future) + assertFutureThrows(future, classOf[InvalidRequestException]) } finally { admin.close() } @@ -332,8 +345,9 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @Test - def testBrokerIdConfigChangeAndDelete(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testBrokerIdConfigChangeAndDelete(quorum: String): Unit = { val newValue: Long = 100000L val brokerId: String = this.brokers.head.config.brokerId.toString setBrokerConfigs(brokerId, newValue) @@ -355,8 +369,9 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @Test - def testDefaultBrokerIdConfigChangeAndDelete(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDefaultBrokerIdConfigChangeAndDelete(quorum: String): Unit = { val newValue: Long = 100000L val brokerId: String = "" setBrokerConfigs(brokerId, newValue) @@ -377,8 +392,9 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @Test - def testDefaultAndBrokerIdConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDefaultAndBrokerIdConfigChange(quorum: String): Unit = { val newValue: Long = 100000L val brokerId: String = this.brokers.head.config.brokerId.toString setBrokerConfigs(brokerId, newValue) @@ -394,8 +410,9 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { } } - @Test - def testDynamicGroupConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testDynamicGroupConfigChange(quorum: String): Unit = { val newSessionTimeoutMs = 50000 val consumerGroupId = "group-foo" val admin = createAdminClient() @@ -420,8 +437,9 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { assertEquals(newSessionTimeoutMs, groupConfig.consumerSessionTimeoutMs()) } - @Test - def testDynamicShareGroupConfigChange(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft+kip848")) + def testDynamicShareGroupConfigChange(quorum: String): Unit = { val newRecordLockDurationMs = 50000 val shareGroupId = "group-foo" val admin = createAdminClient() @@ -446,14 +464,15 @@ class DynamicConfigChangeTest extends KafkaServerTestHarness { assertEquals(newRecordLockDurationMs, groupConfig.shareRecordLockDurationMs) } - @Test - def testIncrementalAlterDefaultGroupConfig(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIncrementalAlterDefaultGroupConfig(quorum: String): Unit = { val admin = createAdminClient() try { val resource = new ConfigResource(ConfigResource.Type.GROUP, "") val op = new AlterConfigOp(new ConfigEntry(GroupConfig.CONSUMER_SESSION_TIMEOUT_MS_CONFIG, "200000"), OpType.SET) val future = admin.incrementalAlterConfigs(Map(resource -> List(op).asJavaCollection).asJava).all - assertFutureThrows(classOf[InvalidRequestException], future) + assertFutureThrows(future, classOf[InvalidRequestException]) } finally { admin.close() } @@ -499,9 +518,9 @@ class DynamicConfigChangeUnitTest { @Test def shouldParseRegardlessOfWhitespaceAroundValues(): Unit = { def parse(configHandler: TopicConfigHandler, value: String): Seq[Int] = { - val props = new Properties() - props.put(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, value) - configHandler.parseThrottledPartitions(props, 102, QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) + configHandler.parseThrottledPartitions( + CoreUtils.propsWith(QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG, value), + 102, QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG) } val configHandler: TopicConfigHandler = new TopicConfigHandler(null, null, null) assertEquals(ReplicationQuotaManager.ALL_REPLICAS.asScala.map(_.toInt).toSeq, parse(configHandler, "* ")) @@ -555,8 +574,8 @@ class DynamicConfigChangeUnitTest { when(replicaManager.onlinePartition(tp1)).thenReturn(Some(partition1)) when(log1.config).thenReturn(new LogConfig(Collections.emptyMap())) - val leaderPartitionsArg: ArgumentCaptor[util.Set[TopicPartitionLog]] = ArgumentCaptor.forClass(classOf[util.Set[TopicPartitionLog]]) - val followerPartitionsArg: ArgumentCaptor[util.Set[TopicPartitionLog]] = ArgumentCaptor.forClass(classOf[util.Set[TopicPartitionLog]]) + val leaderPartitionsArg: ArgumentCaptor[util.Set[Partition]] = ArgumentCaptor.forClass(classOf[util.Set[Partition]]) + val followerPartitionsArg: ArgumentCaptor[util.Set[Partition]] = ArgumentCaptor.forClass(classOf[util.Set[Partition]]) doNothing().when(rlm).onLeadershipChange(leaderPartitionsArg.capture(), followerPartitionsArg.capture(), any()) val isRemoteLogEnabledBeforeUpdate = false diff --git a/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala index ed1f94c319349..85ae9121843a4 100755 --- a/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/EdgeCaseRequestTest.scala @@ -28,15 +28,16 @@ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.message.ProduceRequestData import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.types.Type -import org.apache.kafka.common.protocol.{ApiKeys, ByteBufferAccessor, Errors} +import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.requests.{ProduceResponse, ResponseHeader} import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.utils.ByteUtils -import org.apache.kafka.common.{TopicPartition, Uuid, requests} +import org.apache.kafka.common.{TopicPartition, requests} import org.apache.kafka.server.config.ServerLogConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ @@ -117,8 +118,9 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { } } - @Test - def testProduceRequestWithNullClientId(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testProduceRequestWithNullClientId(quorum: String): Unit = { val topic = "topic" val topicPartition = new TopicPartition(topic, 0) val correlationId = -1 @@ -127,12 +129,10 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { val version = ApiKeys.PRODUCE.latestVersion: Short val (serializedBytes, responseHeaderVersion) = { val headerBytes = requestHeaderBytes(ApiKeys.PRODUCE.id, version, "", correlationId) - val topicId = getTopicIds().getOrElse(topicPartition.topic(), Uuid.ZERO_UUID) val request = requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( Collections.singletonList(new ProduceRequestData.TopicProduceData() - .setTopicId(topicId) - .setPartitionData(Collections.singletonList( + .setName(topicPartition.topic()).setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(topicPartition.partition()) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("message".getBytes)))))) @@ -141,7 +141,7 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { .setTimeoutMs(10000) .setTransactionalId(null)) .build() - val bodyBytes = request.serialize.buffer + val bodyBytes = request.serialize val byteBuffer = ByteBuffer.allocate(headerBytes.length + bodyBytes.remaining()) byteBuffer.put(headerBytes) byteBuffer.put(bodyBytes) @@ -152,7 +152,7 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { val responseBuffer = ByteBuffer.wrap(response) val responseHeader = ResponseHeader.parse(responseBuffer, responseHeaderVersion) - val produceResponse = ProduceResponse.parse(new ByteBufferAccessor(responseBuffer), version) + val produceResponse = ProduceResponse.parse(responseBuffer, version) assertEquals(0, responseBuffer.remaining, "The response should parse completely") assertEquals(correlationId, responseHeader.correlationId, "The correlationId should match request") @@ -164,23 +164,27 @@ class EdgeCaseRequestTest extends KafkaServerTestHarness { assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse.errorCode), "There should be no error") } - @Test - def testHeaderOnlyRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testHeaderOnlyRequest(quorum: String): Unit = { verifyDisconnect(requestHeaderBytes(ApiKeys.PRODUCE.id, 1)) } - @Test - def testInvalidApiKeyRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidApiKeyRequest(quorum: String): Unit = { verifyDisconnect(requestHeaderBytes(-1, 0)) } - @Test - def testInvalidApiVersionRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testInvalidApiVersionRequest(quorum: String): Unit = { verifyDisconnect(requestHeaderBytes(ApiKeys.PRODUCE.id, -1)) } - @Test - def testMalformedHeaderRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testMalformedHeaderRequest(quorum: String): Unit = { val serializedBytes = { // Only send apiKey and apiVersion val buffer = ByteBuffer.allocate( diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala index 63215defd8f7e..a446bc9036098 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestMaxBytesTest.scala @@ -25,7 +25,9 @@ import org.apache.kafka.common.requests.FetchRequest.PartitionData import org.apache.kafka.common.requests.{FetchRequest, FetchResponse} import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util.{Optional, Properties} import scala.jdk.CollectionConverters._ @@ -102,8 +104,9 @@ class FetchRequestMaxBytesTest extends BaseRequestTest { * Note that when a single batch is larger than FetchMaxBytes, it will be * returned in full even if this is larger than FetchMaxBytes. See KIP-74. */ - @Test - def testConsumeMultipleRecords(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsumeMultipleRecords(quorum: String): Unit = { createTopics() expectNextRecords(IndexedSeq(messages(0), messages(1)), 0) diff --git a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala index 5f5c17f50e693..f96b2ceca3159 100644 --- a/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/FetchRequestTest.scala @@ -26,7 +26,8 @@ import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.{IsolationLevel, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.server.record.BrokerCompressionType import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util import java.util.Optional @@ -40,8 +41,9 @@ import scala.util.Random */ class FetchRequestTest extends BaseFetchRequestTest { - @Test - def testBrokerRespectsPartitionsOrderAndSizeLimits(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testBrokerRespectsPartitionsOrderAndSizeLimits(quorum: String): Unit = { initProducer() val messagesPerPartition = 9 @@ -142,8 +144,9 @@ class FetchRequestTest extends BaseFetchRequestTest { evaluateResponse4(fetchResponse4V12, 12) } - @Test - def testFetchRequestV4WithReadCommitted(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchRequestV4WithReadCommitted(quorum: String): Unit = { initProducer() val maxPartitionBytes = 200 val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1).head @@ -160,8 +163,9 @@ class FetchRequestTest extends BaseFetchRequestTest { assertTrue(records(partitionData).map(_.sizeInBytes).sum > 0) } - @Test - def testFetchRequestToNonReplica(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchRequestToNonReplica(quorum: String): Unit = { val topic = "topic" val partition = 0 val topicPartition = new TopicPartition(topic, partition) @@ -190,13 +194,15 @@ class FetchRequestTest extends BaseFetchRequestTest { assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, oldPartitionData.errorCode) } - @Test - def testLastFetchedEpochValidation(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testLastFetchedEpochValidation(quorum: String): Unit = { checkLastFetchedEpochValidation(ApiKeys.FETCH.latestVersion()) } - @Test - def testLastFetchedEpochValidationV12(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testLastFetchedEpochValidationV12(quorum: String): Unit = { checkLastFetchedEpochValidation(12) } @@ -243,13 +249,15 @@ class FetchRequestTest extends BaseFetchRequestTest { assertEquals(firstEpochEndOffset, divergingEpoch.endOffset) } - @Test - def testCurrentEpochValidation(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCurrentEpochValidation(quorum: String): Unit = { checkCurrentEpochValidation(ApiKeys.FETCH.latestVersion()) } - @Test - def testCurrentEpochValidationV12(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCurrentEpochValidationV12(quorum: String): Unit = { checkCurrentEpochValidation(12) } @@ -291,13 +299,15 @@ class FetchRequestTest extends BaseFetchRequestTest { assertResponseErrorForEpoch(Errors.FENCED_LEADER_EPOCH, followerId, Optional.of(secondLeaderEpoch - 1)) } - @Test - def testEpochValidationWithinFetchSession(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testEpochValidationWithinFetchSession(quorum: String): Unit = { checkEpochValidationWithinFetchSession(ApiKeys.FETCH.latestVersion()) } - @Test - def testEpochValidationWithinFetchSessionV12(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testEpochValidationWithinFetchSessionV12(quorum: String): Unit = { checkEpochValidationWithinFetchSession(12) } @@ -357,8 +367,9 @@ class FetchRequestTest extends BaseFetchRequestTest { * those partitions are returned in all incremental fetch requests. * This tests using FetchRequests that don't use topic IDs */ - @Test - def testCreateIncrementalFetchWithPartitionsInErrorV12(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCreateIncrementalFetchWithPartitionsInErrorV12(quorum: String): Unit = { def createConsumerFetchRequest(topicPartitions: Seq[TopicPartition], metadata: JFetchMetadata, toForget: Seq[TopicIdPartition]): FetchRequest = @@ -419,8 +430,9 @@ class FetchRequestTest extends BaseFetchRequestTest { /** * Test that when a Fetch Request receives an unknown topic ID, it returns a top level error. */ - @Test - def testFetchWithPartitionsWithIdError(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchWithPartitionsWithIdError(quorum: String): Unit = { def createConsumerFetchRequest(fetchData: util.LinkedHashMap[TopicPartition, FetchRequest.PartitionData], metadata: JFetchMetadata, toForget: Seq[TopicIdPartition]): FetchRequest = { @@ -463,8 +475,9 @@ class FetchRequestTest extends BaseFetchRequestTest { assertEquals(Errors.UNKNOWN_TOPIC_ID.code, responseData1.get(bar0).errorCode) } - @Test - def testZStdCompressedTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testZStdCompressedTopic(quorum: String): Unit = { // ZSTD compressed topic val topicConfig = Map(TopicConfig.COMPRESSION_TYPE_CONFIG -> BrokerCompressionType.ZSTD.name) val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1, configs = topicConfig).head @@ -510,8 +523,9 @@ class FetchRequestTest extends BaseFetchRequestTest { assertEquals(3, records(data2).size) } - @Test - def testZStdCompressedRecords(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testZStdCompressedRecords(quorum: String): Unit = { // Producer compressed topic val topicConfig = Map(TopicConfig.COMPRESSION_TYPE_CONFIG -> BrokerCompressionType.PRODUCER.name) val (topicPartition, leaderId) = createTopics(numTopics = 1, numPartitions = 1, configs = topicConfig).head diff --git a/core/src/test/scala/unit/kafka/server/ForwardingManagerMetricsTest.scala b/core/src/test/scala/unit/kafka/server/ForwardingManagerMetricsTest.scala new file mode 100644 index 0000000000000..2c10decb3ed5c --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/ForwardingManagerMetricsTest.scala @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package unit.kafka.server + +import kafka.server.ForwardingManagerMetrics +import org.apache.kafka.common.MetricName +import org.apache.kafka.common.metrics.Metrics +import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} +import org.junit.jupiter.api.Test + +import java.util.Collections +import scala.jdk.CollectionConverters._ + +final class ForwardingManagerMetricsTest { + @Test + def testMetricsNames(): Unit = { + val metrics = new Metrics() + val expectedGroup = "ForwardingManager" + + val expectedMetrics = Set( + new MetricName("QueueTimeMs.p99", expectedGroup, "", Collections.emptyMap()), + new MetricName("QueueTimeMs.p999", expectedGroup, "", Collections.emptyMap()), + new MetricName("QueueLength", expectedGroup, "", Collections.emptyMap()), + new MetricName("RemoteTimeMs.p99", expectedGroup, "", Collections.emptyMap()), + new MetricName("RemoteTimeMs.p999", expectedGroup, "", Collections.emptyMap()) + ) + + var metricsMap = metrics.metrics().asScala.filter { case (name, _) => name.group == expectedGroup } + assertEquals(0, metricsMap.size) + + ForwardingManagerMetrics(metrics, 1000) + metricsMap = metrics.metrics().asScala.filter { case (name, _) => name.group == expectedGroup } + assertEquals(metricsMap.size, expectedMetrics.size) + metricsMap.foreach { case (name, _) => + assertTrue(expectedMetrics.contains(name)) + } + } + + @Test + def testQueueTimeMs(): Unit = { + val metrics = new Metrics() + + val forwardingManagerMetrics = ForwardingManagerMetrics(metrics, 1000) + val queueTimeMsP99 = metrics.metrics().get(forwardingManagerMetrics.queueTimeMsHist.latencyP99Name) + val queueTimeMsP999 = metrics.metrics().get(forwardingManagerMetrics.queueTimeMsHist.latencyP999Name) + assertEquals(Double.NaN, queueTimeMsP99.metricValue.asInstanceOf[Double]) + assertEquals(Double.NaN, queueTimeMsP999.metricValue.asInstanceOf[Double]) + for(i <- 0 to 999) { + forwardingManagerMetrics.queueTimeMsHist.record(i) + } + assertEquals(990.0, queueTimeMsP99.metricValue.asInstanceOf[Double]) + assertEquals(999.0, queueTimeMsP999.metricValue.asInstanceOf[Double]) + } + + @Test + def testQueueLength(): Unit = { + val metrics = new Metrics() + + val forwardingManagerMetrics = ForwardingManagerMetrics(metrics, 1000) + val queueLength = metrics.metrics().get(forwardingManagerMetrics.queueLengthName) + assertEquals(0, queueLength.metricValue.asInstanceOf[Int]) + forwardingManagerMetrics.queueLength.getAndIncrement() + assertEquals(1, queueLength.metricValue.asInstanceOf[Int]) + } + + @Test + def testRemoteTimeMs(): Unit = { + val metrics = new Metrics() + + val forwardingManagerMetrics = ForwardingManagerMetrics(metrics, 1000) + val remoteTimeMsP99 = metrics.metrics().get(forwardingManagerMetrics.remoteTimeMsHist.latencyP99Name) + val remoteTimeMsP999 = metrics.metrics().get(forwardingManagerMetrics.remoteTimeMsHist.latencyP999Name) + assertEquals(Double.NaN, remoteTimeMsP99.metricValue.asInstanceOf[Double]) + assertEquals(Double.NaN, remoteTimeMsP999.metricValue.asInstanceOf[Double]) + for (i <- 0 to 999) { + forwardingManagerMetrics.remoteTimeMsHist.record(i) + } + assertEquals(990.0, remoteTimeMsP99.metricValue.asInstanceOf[Double]) + assertEquals(999.0, remoteTimeMsP999.metricValue.asInstanceOf[Double]) + } + + @Test + def testTimeoutMs(): Unit = { + val metrics = new Metrics() + val timeoutMs = 500 + val forwardingManagerMetrics = ForwardingManagerMetrics(metrics, timeoutMs) + val queueTimeMsP99 = metrics.metrics().get(forwardingManagerMetrics.queueTimeMsHist.latencyP99Name) + val queueTimeMsP999 = metrics.metrics().get(forwardingManagerMetrics.queueTimeMsHist.latencyP999Name) + assertEquals(Double.NaN, queueTimeMsP99.metricValue.asInstanceOf[Double]) + assertEquals(Double.NaN, queueTimeMsP999.metricValue.asInstanceOf[Double]) + for(i <- 0 to 99) { + forwardingManagerMetrics.queueTimeMsHist.record(i) + } + forwardingManagerMetrics.queueTimeMsHist.record(1000) + + assertEquals(99, queueTimeMsP99.metricValue.asInstanceOf[Double]) + assertEquals(timeoutMs * 0.999, queueTimeMsP999.metricValue.asInstanceOf[Double]) + } +} diff --git a/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala b/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala index 16e4b2bcb66f4..d2d8d3e0382c3 100644 --- a/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ForwardingManagerTest.scala @@ -53,7 +53,7 @@ class ForwardingManagerTest { private val forwardingManager = new ForwardingManagerImpl(brokerToController, metrics) private val principalBuilder = new DefaultKafkaPrincipalBuilder(null, null) private val queueTimeMsP999 = metrics.metrics().get(forwardingManager.forwardingManagerMetrics.queueTimeMsHist.latencyP999Name) - private val queueLength = metrics.metrics().get(forwardingManager.forwardingManagerMetrics.queueLengthName()) + private val queueLength = metrics.metrics().get(forwardingManager.forwardingManagerMetrics.queueLengthName) private val remoteTimeMsP999 = metrics.metrics().get(forwardingManager.forwardingManagerMetrics.remoteTimeMsHist.latencyP999Name) private def controllerApiVersions: NodeApiVersions = { diff --git a/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala b/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala index d5ab1356ac938..7f1b7a4d5ff35 100644 --- a/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/GroupCoordinatorBaseRequestTest.scala @@ -19,31 +19,26 @@ package kafka.server import kafka.network.SocketServer import kafka.utils.TestUtils import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord, RecordMetadata} -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.{TopicCollection, TopicIdPartition, TopicPartition, Uuid} +import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.message.DeleteGroupsResponseData.{DeletableGroupResult, DeletableGroupResultCollection} import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity import org.apache.kafka.common.message.LeaveGroupResponseData.MemberResponse import org.apache.kafka.common.message.SyncGroupRequestData.SyncGroupRequestAssignment -import org.apache.kafka.common.message.WriteTxnMarkersRequestData.{WritableTxnMarker, WritableTxnMarkerTopic} -import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AddOffsetsToTxnResponseData, ConsumerGroupDescribeRequestData, ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, DeleteGroupsRequestData, DeleteGroupsResponseData, DescribeGroupsRequestData, DescribeGroupsResponseData, EndTxnRequestData, HeartbeatRequestData, HeartbeatResponseData, InitProducerIdRequestData, JoinGroupRequestData, JoinGroupResponseData, LeaveGroupResponseData, ListGroupsRequestData, ListGroupsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteRequestData, OffsetDeleteResponseData, OffsetFetchRequestData, OffsetFetchResponseData, ShareGroupDescribeRequestData, ShareGroupDescribeResponseData, ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData, SyncGroupRequestData, SyncGroupResponseData, TxnOffsetCommitRequestData, TxnOffsetCommitResponseData, WriteTxnMarkersRequestData} +import org.apache.kafka.common.message.{AddOffsetsToTxnRequestData, AddOffsetsToTxnResponseData, ConsumerGroupDescribeRequestData, ConsumerGroupDescribeResponseData, ConsumerGroupHeartbeatRequestData, ConsumerGroupHeartbeatResponseData, DeleteGroupsRequestData, DeleteGroupsResponseData, DescribeGroupsRequestData, DescribeGroupsResponseData, EndTxnRequestData, HeartbeatRequestData, HeartbeatResponseData, InitProducerIdRequestData, JoinGroupRequestData, JoinGroupResponseData, LeaveGroupResponseData, ListGroupsRequestData, ListGroupsResponseData, OffsetCommitRequestData, OffsetCommitResponseData, OffsetDeleteRequestData, OffsetDeleteResponseData, OffsetFetchResponseData, ShareGroupDescribeRequestData, ShareGroupDescribeResponseData, ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData, SyncGroupRequestData, SyncGroupResponseData, TxnOffsetCommitRequestData, TxnOffsetCommitResponseData} import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, AddOffsetsToTxnRequest, AddOffsetsToTxnResponse, ConsumerGroupDescribeRequest, ConsumerGroupDescribeResponse, ConsumerGroupHeartbeatRequest, ConsumerGroupHeartbeatResponse, DeleteGroupsRequest, DeleteGroupsResponse, DescribeGroupsRequest, DescribeGroupsResponse, EndTxnRequest, EndTxnResponse, HeartbeatRequest, HeartbeatResponse, InitProducerIdRequest, InitProducerIdResponse, JoinGroupRequest, JoinGroupResponse, LeaveGroupRequest, LeaveGroupResponse, ListGroupsRequest, ListGroupsResponse, OffsetCommitRequest, OffsetCommitResponse, OffsetDeleteRequest, OffsetDeleteResponse, OffsetFetchRequest, OffsetFetchResponse, ShareGroupDescribeRequest, ShareGroupDescribeResponse, ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse, SyncGroupRequest, SyncGroupResponse, TxnOffsetCommitRequest, TxnOffsetCommitResponse, WriteTxnMarkersRequest, WriteTxnMarkersResponse} +import org.apache.kafka.common.requests.{AbstractRequest, AbstractResponse, AddOffsetsToTxnRequest, AddOffsetsToTxnResponse, ConsumerGroupDescribeRequest, ConsumerGroupDescribeResponse, ConsumerGroupHeartbeatRequest, ConsumerGroupHeartbeatResponse, DeleteGroupsRequest, DeleteGroupsResponse, DescribeGroupsRequest, DescribeGroupsResponse, EndTxnRequest, EndTxnResponse, HeartbeatRequest, HeartbeatResponse, InitProducerIdRequest, InitProducerIdResponse, JoinGroupRequest, JoinGroupResponse, LeaveGroupRequest, LeaveGroupResponse, ListGroupsRequest, ListGroupsResponse, OffsetCommitRequest, OffsetCommitResponse, OffsetDeleteRequest, OffsetDeleteResponse, OffsetFetchRequest, OffsetFetchResponse, ShareGroupDescribeRequest, ShareGroupDescribeResponse, ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse, SyncGroupRequest, SyncGroupResponse, TxnOffsetCommitRequest, TxnOffsetCommitResponse} import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.common.utils.ProducerIdAndEpoch import org.apache.kafka.controller.ControllerRequestContextUtil.ANONYMOUS_CONTEXT -import org.apache.kafka.server.IntegrationTestUtils import org.junit.jupiter.api.Assertions.{assertEquals, fail} -import java.net.Socket -import java.util import java.util.{Comparator, Properties} import java.util.stream.Collectors import scala.collection.Seq -import scala.collection.mutable.ListBuffer +import scala.collection.convert.ImplicitConversions.{`collection AsScalaIterable`, `map AsScala`} import scala.jdk.CollectionConverters._ - +import scala.reflect.ClassTag class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { private def brokers(): Seq[KafkaBroker] = cluster.brokers.values().stream().collect(Collectors.toList[KafkaBroker]).asScala.toSeq @@ -52,8 +47,6 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { protected var producer: KafkaProducer[String, String] = _ - protected var openSockets: ListBuffer[Socket] = ListBuffer[Socket]() - protected def createOffsetsTopic(): Unit = { val admin = cluster.admin() try { @@ -83,7 +76,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { protected def createTopic( topic: String, numPartitions: Int - ): Uuid = { + ): Unit = { val admin = cluster.admin() try { TestUtils.createTopicWithAdmin( @@ -93,26 +86,6 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { topic = topic, numPartitions = numPartitions ) - admin - .describeTopics(TopicCollection.ofTopicNames(List(topic).asJava)) - .allTopicNames() - .get() - .get(topic) - .topicId() - } finally { - admin.close() - } - } - - protected def deleteTopic( - topic: String - ): Unit = { - val admin = cluster.admin() - try { - admin - .deleteTopics(TopicCollection.ofTopicNames(List(topic).asJava)) - .all() - .get() } finally { admin.close() } @@ -135,7 +108,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { replicationFactor = replicationFactor, topicConfig = topicConfig ) - partitionToLeader.map { case (partition, leader) => new TopicIdPartition(getTopicIds.get(topic), new TopicPartition(topic, partition)) -> leader } + partitionToLeader.map { case (partition, leader) => new TopicIdPartition(getTopicIds(topic), new TopicPartition(topic, partition)) -> leader } } finally { admin.close() } @@ -145,12 +118,16 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { cluster.brokers.values.stream.allMatch(b => b.config.unstableApiVersionsEnabled) } - protected def getTopicIds: util.Map[String, Uuid] = { - cluster.controllers().get(cluster.controllerIds().iterator().next()).controller.findAllTopicIds(ANONYMOUS_CONTEXT).get() + protected def isNewGroupCoordinatorEnabled: Boolean = { + cluster.brokers.values.stream.allMatch(b => b.config.isNewGroupCoordinatorEnabled) + } + + protected def getTopicIds: Map[String, Uuid] = { + cluster.controllers().get(cluster.controllerIds().iterator().next()).controller.findAllTopicIds(ANONYMOUS_CONTEXT).get().toMap } protected def getBrokers: Seq[KafkaBroker] = { - cluster.brokers.values().stream().collect(Collectors.toList[KafkaBroker]).asScala.toSeq + cluster.brokers.values().stream().collect(Collectors.toList[KafkaBroker]).toSeq } protected def bootstrapServers(): String = { @@ -162,16 +139,8 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { keySerializer = new StringSerializer, valueSerializer = new StringSerializer) } - protected def closeSockets(): Unit = { - while (openSockets.nonEmpty) { - val socket = openSockets.head - socket.close() - openSockets.remove(0) - } - } - protected def closeProducer(): Unit = { - if(producer != null) + if( producer != null ) producer.close() } @@ -202,24 +171,18 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { memberId: String, memberEpoch: Int, topic: String, - topicId: Uuid, partition: Int, offset: Long, expectedError: Errors, version: Short = ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled) ): Unit = { - if (version >= 10 && topicId == Uuid.ZERO_UUID) { - throw new IllegalArgumentException(s"Cannot call OffsetCommit API version $version without a topic id") - } - - val request = OffsetCommitRequest.Builder.forTopicIdsOrNames( + val request = new OffsetCommitRequest.Builder( new OffsetCommitRequestData() .setGroupId(groupId) .setMemberId(memberId) .setGenerationIdOrMemberEpoch(memberEpoch) .setTopics(List( new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(topicId) .setName(topic) .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() @@ -233,8 +196,7 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { val expectedResponse = new OffsetCommitResponseData() .setTopics(List( new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) - .setName(if (version < 10) topic else "") + .setName(topic) .setPartitions(List( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(partition) @@ -354,71 +316,21 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { assertEquals(expectedError, connectAndReceive[EndTxnResponse](request).error) } - protected def writeTxnMarkers( - producerId: Long, - producerEpoch: Short, - committed: Boolean, - expectedError: Errors = Errors.NONE, - version: Short = ApiKeys.WRITE_TXN_MARKERS.latestVersion(isUnstableApiEnabled) - ): Unit = { - val request = new WriteTxnMarkersRequest.Builder( - new WriteTxnMarkersRequestData() - .setMarkers(List( - new WritableTxnMarker() - .setProducerId(producerId) - .setProducerEpoch(producerEpoch) - .setTransactionResult(committed) - .setTopics(List( - new WritableTxnMarkerTopic() - .setName(Topic.GROUP_METADATA_TOPIC_NAME) - .setPartitionIndexes(List[Integer](0).asJava) - ).asJava) - .setCoordinatorEpoch(0) - ).asJava) - ).build(version) - - assertEquals( - expectedError.code, - connectAndReceive[WriteTxnMarkersResponse](request).data.markers.get(0).topics.get(0).partitions.get(0).errorCode - ) - } - - protected def fetchOffsets( - groups: List[OffsetFetchRequestData.OffsetFetchRequestGroup], - requireStable: Boolean, - version: Short - ): List[OffsetFetchResponseData.OffsetFetchResponseGroup] = { - if (version < 8) { - fail(s"OffsetFetch API version $version cannot fetch multiple groups.") - } - - val request = OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setRequireStable(requireStable) - .setGroups(groups.asJava), - false, - true - ).build(version) - - val response = connectAndReceive[OffsetFetchResponse](request) - - // Sort topics and partitions within the response as their order is not guaranteed. - response.data.groups.asScala.foreach(sortTopicPartitions) - - response.data.groups.asScala.toList - } - protected def fetchOffsets( - group: OffsetFetchRequestData.OffsetFetchRequestGroup, + groupId: String, + memberId: String, + memberEpoch: Int, + partitions: List[TopicPartition], requireStable: Boolean, version: Short ): OffsetFetchResponseData.OffsetFetchResponseGroup = { - val request = OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setRequireStable(requireStable) - .setGroups(List(group).asJava), - false, - true + val request = new OffsetFetchRequest.Builder( + groupId, + memberId, + memberEpoch, + requireStable, + if (partitions == null) null else partitions.asJava, + false ).build(version) val response = connectAndReceive[OffsetFetchResponse](request) @@ -427,11 +339,11 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { // same format to the caller. val groupResponse = if (version >= 8) { assertEquals(1, response.data.groups.size) - assertEquals(group.groupId, response.data.groups.get(0).groupId) + assertEquals(groupId, response.data.groups.get(0).groupId) response.data.groups.asScala.head } else { new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId(group.groupId) + .setGroupId(groupId) .setErrorCode(response.data.errorCode) .setTopics(response.data.topics.asScala.map { topic => new OffsetFetchResponseData.OffsetFetchResponseTopics() @@ -453,25 +365,27 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { groupResponse } - protected def fetchOffset( - groupId: String, - topic: String, - partition: Int - ): Long = { - val groupIdRecord = fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId(groupId) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(topic) - .setPartitionIndexes(List[Integer](partition).asJava) - ).asJava), - requireStable = true, - version = 9 - ) - val topicRecord = groupIdRecord.topics.asScala.find(_.name == topic).head - val partitionRecord = topicRecord.partitions.asScala.find(_.partitionIndex == partition).head - partitionRecord.committedOffset + protected def fetchOffsets( + groups: Map[String, List[TopicPartition]], + requireStable: Boolean, + version: Short + ): List[OffsetFetchResponseData.OffsetFetchResponseGroup] = { + if (version < 8) { + fail(s"OffsetFetch API version $version cannot fetch multiple groups.") + } + + val request = new OffsetFetchRequest.Builder( + groups.map { case (k, v) => (k, v.asJava) }.asJava, + requireStable, + false + ).build(version) + + val response = connectAndReceive[OffsetFetchResponse](request) + + // Sort topics and partitions within the response as their order is not guaranteed. + response.data.groups.asScala.foreach(sortTopicPartitions) + + response.data.groups.asScala.toList } protected def deleteOffset( @@ -761,7 +675,8 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { val shareGroupDescribeRequest = new ShareGroupDescribeRequest.Builder( new ShareGroupDescribeRequestData() .setGroupIds(groupIds.asJava) - .setIncludeAuthorizedOperations(includeAuthorizedOperations) + .setIncludeAuthorizedOperations(includeAuthorizedOperations), + true ).build(version) val shareGroupDescribeResponse = connectAndReceive[ShareGroupDescribeResponse](shareGroupDescribeRequest) @@ -841,7 +756,8 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { .setMemberId(memberId) .setMemberEpoch(memberEpoch) .setRackId(rackId) - .setSubscribedTopicNames(subscribedTopicNames.asJava) + .setSubscribedTopicNames(subscribedTopicNames.asJava), + true ).build() // Send the request until receiving a successful response. There is a delay @@ -953,31 +869,25 @@ class GroupCoordinatorBaseRequestTest(cluster: ClusterInstance) { assertEquals(expectedResponseData.results.asScala.toSet, deleteGroupsResponse.data.results.asScala.toSet) } - protected def connectAny(): Socket = { - val socket: Socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) - openSockets += socket - socket - } - - protected def connect(destination: Int): Socket = { - val socket = IntegrationTestUtils.connect(brokerSocketServer(destination).boundPort(cluster.clientListener())) - openSockets += socket - socket - } - protected def connectAndReceive[T <: AbstractResponse]( request: AbstractRequest - ): T = { - IntegrationTestUtils.connectAndReceive[T](request, cluster.brokerBoundPorts().get(0)) + )(implicit classTag: ClassTag[T]): T = { + IntegrationTestUtils.connectAndReceive[T]( + request, + cluster.anyBrokerSocketServer(), + cluster.clientListener() + ) } protected def connectAndReceive[T <: AbstractResponse]( request: AbstractRequest, destination: Int - ): T = { - val socketServer = brokerSocketServer(destination) - val listenerName = cluster.clientListener() - IntegrationTestUtils.connectAndReceive[T](request, socketServer.boundPort(listenerName)) + )(implicit classTag: ClassTag[T]): T = { + IntegrationTestUtils.connectAndReceive[T]( + request, + brokerSocketServer(destination), + cluster.clientListener() + ) } private def brokerSocketServer(brokerId: Int): SocketServer = { diff --git a/core/src/test/scala/unit/kafka/server/HeartbeatRequestTest.scala b/core/src/test/scala/unit/kafka/server/HeartbeatRequestTest.scala index 3026cdecb2754..332c01aeeb53b 100644 --- a/core/src/test/scala/unit/kafka/server/HeartbeatRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/HeartbeatRequestTest.scala @@ -30,16 +30,27 @@ import java.util.Collections import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( +@ClusterTestDefaults(types = Array(Type.KRAFT)) +class HeartbeatRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { + @ClusterTest(serverProperties = Array( new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) -class HeartbeatRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testHeartbeatWithOldConsumerGroupProtocol(): Unit = { + )) + def testHeartbeatWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testHeartbeat() + } + + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testHeartbeatWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testHeartbeat() + } + + private def testHeartbeat(): Unit = { // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() @@ -179,15 +190,6 @@ class HeartbeatRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBas expectedError = Errors.UNKNOWN_MEMBER_ID, version = version.toShort ) - - // Heartbeat with empty group id. - heartbeat( - groupId = "", - memberId = leaderMemberId, - generationId = -1, - expectedError = Errors.INVALID_GROUP_ID, - version = version.toShort - ) } } } diff --git a/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala b/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala index 9ea25f76b4673..f625afa1fa719 100755 --- a/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala +++ b/core/src/test/scala/unit/kafka/server/HighwatermarkPersistenceTest.scala @@ -24,18 +24,13 @@ import org.junit.jupiter.api._ import org.junit.jupiter.api.Assertions._ import kafka.utils.TestUtils import kafka.cluster.Partition -import kafka.server.metadata.KRaftMetadataCache +import kafka.server.metadata.MockConfigRepository import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.record.SimpleRecord -import org.apache.kafka.metadata.MockConfigRepository import org.apache.kafka.server.common.KRaftVersion import org.apache.kafka.server.util.{KafkaScheduler, MockTime} import org.apache.kafka.storage.internals.log.{CleanerConfig, LogDirFailureChannel} -import java.util.Optional - -import scala.jdk.CollectionConverters._ - class HighwatermarkPersistenceTest { val configs = TestUtils.createBrokerConfigs(2).map(KafkaConfig.fromProps) @@ -43,7 +38,7 @@ class HighwatermarkPersistenceTest { val configRepository = new MockConfigRepository() val logManagers = configs map { config => TestUtils.createLogManager( - logDirs = config.logDirs.asScala.map(new File(_)), + logDirs = config.logDirs.map(new File(_)), cleanerConfig = new CleanerConfig(true)) } @@ -66,7 +61,7 @@ class HighwatermarkPersistenceTest { scheduler.startup() val metrics = new Metrics val time = new MockTime - val quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "", "") + val quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "") // create replica manager val replicaManager = new ReplicaManager( metrics = metrics, @@ -75,7 +70,7 @@ class HighwatermarkPersistenceTest { scheduler = scheduler, logManager = logManagers.head, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = logDirFailureChannels.head, alterPartitionManager = alterIsrManager) replicaManager.startup() @@ -86,7 +81,7 @@ class HighwatermarkPersistenceTest { val tp0 = new TopicPartition(topic, 0) val partition0 = replicaManager.createPartition(tp0) // create leader and follower replicas - val log0 = logManagers.head.getOrCreateLog(new TopicPartition(topic, 0), topicId = Optional.empty) + val log0 = logManagers.head.getOrCreateLog(new TopicPartition(topic, 0), topicId = None) partition0.setLog(log0, isFutureLog = false) partition0.updateAssignmentAndIsr( @@ -124,7 +119,7 @@ class HighwatermarkPersistenceTest { scheduler.startup() val metrics = new Metrics val time = new MockTime - val quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "", "") + val quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "") // create replica manager val replicaManager = new ReplicaManager( metrics = metrics, @@ -133,7 +128,7 @@ class HighwatermarkPersistenceTest { scheduler = scheduler, logManager = logManagers.head, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = logDirFailureChannels.head, alterPartitionManager = alterIsrManager) replicaManager.startup() @@ -144,7 +139,7 @@ class HighwatermarkPersistenceTest { val t1p0 = new TopicPartition(topic1, 0) val topic1Partition0 = replicaManager.createPartition(t1p0) // create leader log - val topic1Log0 = logManagers.head.getOrCreateLog(t1p0, topicId = Optional.empty) + val topic1Log0 = logManagers.head.getOrCreateLog(t1p0, topicId = None) // create a local replica for topic1 topic1Partition0.setLog(topic1Log0, isFutureLog = false) replicaManager.checkpointHighWatermarks() @@ -161,7 +156,7 @@ class HighwatermarkPersistenceTest { val t2p0 = new TopicPartition(topic2, 0) val topic2Partition0 = replicaManager.createPartition(t2p0) // create leader log - val topic2Log0 = logManagers.head.getOrCreateLog(t2p0, topicId = Optional.empty) + val topic2Log0 = logManagers.head.getOrCreateLog(t2p0, topicId = None) // create a local replica for topic2 topic2Partition0.setLog(topic2Log0, isFutureLog = false) replicaManager.checkpointHighWatermarks() @@ -193,11 +188,11 @@ class HighwatermarkPersistenceTest { private def append(partition: Partition, count: Int): Unit = { val records = TestUtils.records((0 to count).map(i => new SimpleRecord(s"$i".getBytes))) - partition.localLogOrException.appendAsLeader(records, 0) + partition.localLogOrException.appendAsLeader(records, leaderEpoch = 0) } private def hwmFor(replicaManager: ReplicaManager, topic: String, partition: Int): Long = { - replicaManager.highWatermarkCheckpoints(new File(replicaManager.config.logDirs.get(0)).getAbsolutePath).read().getOrDefault( + replicaManager.highWatermarkCheckpoints(new File(replicaManager.config.logDirs.head).getAbsolutePath).read().getOrDefault( new TopicPartition(topic, partition), 0L) } } diff --git a/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala b/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala index 5836f3618c181..2f11690bacdc4 100644 --- a/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala +++ b/core/src/test/scala/unit/kafka/server/IsrExpirationTest.scala @@ -19,9 +19,8 @@ package kafka.server import java.io.File import java.util.Properties import kafka.cluster.Partition -import kafka.log.LogManager +import kafka.log.{LogManager, UnifiedLog} import kafka.server.QuotaFactory.QuotaManagers -import kafka.server.metadata.KRaftMetadataCache import kafka.utils.TestUtils.MockAlterPartitionManager import kafka.utils._ import org.apache.kafka.common.TopicPartition @@ -31,7 +30,7 @@ import org.apache.kafka.metadata.LeaderRecoveryState import org.apache.kafka.server.common.KRaftVersion import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.{LogDirFailureChannel, LogOffsetMetadata, UnifiedLog} +import org.apache.kafka.storage.internals.log.{LogDirFailureChannel, LogOffsetMetadata} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.Mockito.{atLeastOnce, mock, verify, when} @@ -66,7 +65,7 @@ class IsrExpirationTest { when(logManager.liveLogDirs).thenReturn(Array.empty[File]) alterIsrManager = TestUtils.createAlterIsrManager() - quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "", "") + quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "") replicaManager = new ReplicaManager( metrics = metrics, config = configs.head, @@ -74,7 +73,7 @@ class IsrExpirationTest { scheduler = null, logManager = logManager, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(configs.head.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(configs.head.logDirs.size), alterPartitionManager = alterIsrManager) } @@ -100,11 +99,11 @@ class IsrExpirationTest { // let the follower catch up to the Leader logEndOffset - 1 for (replica <- partition0.remoteReplicas) replica.updateFetchStateOrThrow( - new LogOffsetMetadata(leaderLogEndOffset - 1), - 0L, - time.milliseconds, - leaderLogEndOffset, - 1L) + followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset - 1), + followerStartOffset = 0L, + followerFetchTimeMs= time.milliseconds, + leaderEndOffset = leaderLogEndOffset, + brokerEpoch = 1L) var partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs) assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync") @@ -150,11 +149,11 @@ class IsrExpirationTest { // Make the remote replica not read to the end of log. It should be not be out of sync for at least 100 ms for (replica <- partition0.remoteReplicas) replica.updateFetchStateOrThrow( - new LogOffsetMetadata(leaderLogEndOffset - 2), - 0L, - time.milliseconds, - leaderLogEndOffset, - 1L) + followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset - 2), + followerStartOffset = 0L, + followerFetchTimeMs= time.milliseconds, + leaderEndOffset = leaderLogEndOffset, + brokerEpoch = 1L) // Simulate 2 fetch requests spanning more than 100 ms which do not read to the end of the log. // The replicas will no longer be in ISR. We do 2 fetches because we want to simulate the case where the replica is lagging but is not stuck @@ -165,11 +164,11 @@ class IsrExpirationTest { partition0.remoteReplicas.foreach { r => r.updateFetchStateOrThrow( - new LogOffsetMetadata(leaderLogEndOffset - 1), - 0L, - time.milliseconds, - leaderLogEndOffset, - 1L) + followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset - 1), + followerStartOffset = 0L, + followerFetchTimeMs= time.milliseconds, + leaderEndOffset = leaderLogEndOffset, + brokerEpoch = 1L) } partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs) assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync") @@ -183,11 +182,11 @@ class IsrExpirationTest { // Now actually make a fetch to the end of the log. The replicas should be back in ISR partition0.remoteReplicas.foreach { r => r.updateFetchStateOrThrow( - new LogOffsetMetadata(leaderLogEndOffset), - 0L, - time.milliseconds, - leaderLogEndOffset, - 1L) + followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset), + followerStartOffset = 0L, + followerFetchTimeMs= time.milliseconds, + leaderEndOffset = leaderLogEndOffset, + brokerEpoch = 1L) } partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs) assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync") @@ -208,11 +207,11 @@ class IsrExpirationTest { // let the follower catch up to the Leader logEndOffset for (replica <- partition0.remoteReplicas) replica.updateFetchStateOrThrow( - new LogOffsetMetadata(leaderLogEndOffset), - 0L, - time.milliseconds, - leaderLogEndOffset, - 1L) + followerFetchOffsetMetadata = new LogOffsetMetadata(leaderLogEndOffset), + followerStartOffset = 0L, + followerFetchTimeMs= time.milliseconds, + leaderEndOffset = leaderLogEndOffset, + brokerEpoch = 1L) var partition0OSR = partition0.getOutOfSyncReplicas(configs.head.replicaLagTimeMaxMs) assertEquals(Set.empty[Int], partition0OSR, "No replica should be out of sync") @@ -245,11 +244,11 @@ class IsrExpirationTest { // set lastCaughtUpTime to current time for (replica <- partition.remoteReplicas) replica.updateFetchStateOrThrow( - new LogOffsetMetadata(0L), - 0L, - time.milliseconds, - 0L, - 1L) + followerFetchOffsetMetadata = new LogOffsetMetadata(0L), + followerStartOffset = 0L, + followerFetchTimeMs= time.milliseconds, + leaderEndOffset = 0L, + brokerEpoch = 1L) // set the leader and its hw and the hw update time partition.leaderReplicaIdOpt = Some(leaderId) diff --git a/core/src/test/scala/unit/kafka/server/JoinGroupRequestTest.scala b/core/src/test/scala/unit/kafka/server/JoinGroupRequestTest.scala index 0f2ab3669c90d..f77c2fc1bfadd 100644 --- a/core/src/test/scala/unit/kafka/server/JoinGroupRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/JoinGroupRequestTest.scala @@ -16,7 +16,7 @@ */ package kafka.server -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, Type} import kafka.utils.TestUtils import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor import org.apache.kafka.clients.consumer.internals.ConsumerProtocol @@ -34,17 +34,27 @@ import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} import scala.jdk.CollectionConverters._ -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( +class JoinGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { + @ClusterTest(types = Array(Type.KRAFT), serverProperties = Array( new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "1000") - ) -) -class JoinGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testJoinGroupWithOldConsumerGroupProtocol(): Unit = { + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "1000"), + )) + def testJoinGroupWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testJoinGroup() + } + + @ClusterTest(serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "1000"), + )) + def testJoinGroupWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testJoinGroup() + } + + private def testJoinGroup(): Unit = { // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() @@ -139,17 +149,6 @@ class JoinGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBas ) ) - // Join with an empty group id. - verifyJoinGroupResponseDataEquals( - new JoinGroupResponseData() - .setErrorCode(Errors.INVALID_GROUP_ID.code) - .setProtocolName(if (version >= 7) null else ""), - sendJoinRequest( - groupId = "", - version = version.toShort - ) - ) - // Join with an inconsistent protocolType. verifyJoinGroupResponseDataEquals( new JoinGroupResponseData() diff --git a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala index bdd62291407a5..20a030714f7d2 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaApisTest.scala @@ -19,11 +19,12 @@ package kafka.server import kafka.cluster.Partition import kafka.coordinator.transaction.{InitProducerIdResult, TransactionCoordinator} +import kafka.log.UnifiedLog import kafka.network.RequestChannel import kafka.server.QuotaFactory.QuotaManagers -import kafka.server.metadata.KRaftMetadataCache +import kafka.server.metadata.{ConfigRepository, KRaftMetadataCache, MockConfigRepository} import kafka.server.share.SharePartitionManager -import kafka.utils.{CoreUtils, Logging, TestUtils} +import kafka.utils.{CoreUtils, Log4jController, Logging, TestUtils} import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin.{AlterConfigOp, ConfigEntry} import org.apache.kafka.common._ @@ -32,25 +33,20 @@ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.config.ConfigResource.Type.{BROKER, BROKER_LOGGER} import org.apache.kafka.common.errors.{ClusterAuthorizationException, UnsupportedVersionException} -import org.apache.kafka.common.internals.{Plugin, Topic} -import org.apache.kafka.common.internals.Topic.SHARE_GROUP_STATE_TOPIC_NAME +import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.memory.MemoryPool import org.apache.kafka.common.message.AddPartitionsToTxnRequestData.{AddPartitionsToTxnTopic, AddPartitionsToTxnTopicCollection, AddPartitionsToTxnTransaction, AddPartitionsToTxnTransactionCollection} import org.apache.kafka.common.message.AddPartitionsToTxnResponseData.AddPartitionsToTxnResult import org.apache.kafka.common.message.AlterConfigsRequestData.{AlterConfigsResource => LAlterConfigsResource, AlterConfigsResourceCollection => LAlterConfigsResourceCollection, AlterableConfig => LAlterableConfig, AlterableConfigCollection => LAlterableConfigCollection} import org.apache.kafka.common.message.AlterConfigsResponseData.{AlterConfigsResourceResponse => LAlterConfigsResourceResponse} -import org.apache.kafka.common.message.AlterShareGroupOffsetsRequestData.{AlterShareGroupOffsetsRequestPartition, AlterShareGroupOffsetsRequestTopic, AlterShareGroupOffsetsRequestTopicCollection} import org.apache.kafka.common.message.ApiMessageType.ListenerType import org.apache.kafka.common.message.ConsumerGroupDescribeResponseData.{DescribedGroup, TopicPartitions} import org.apache.kafka.common.message.CreateTopicsRequestData.CreatableTopic import org.apache.kafka.common.message.CreateTopicsResponseData.CreatableTopicResult -import org.apache.kafka.common.message.DeleteShareGroupOffsetsRequestData.DeleteShareGroupOffsetsRequestTopic -import org.apache.kafka.common.message.DeleteShareGroupOffsetsResponseData.DeleteShareGroupOffsetsResponseTopic -import org.apache.kafka.common.message.DescribeShareGroupOffsetsRequestData.{DescribeShareGroupOffsetsRequestGroup, DescribeShareGroupOffsetsRequestTopic} -import org.apache.kafka.common.message.DescribeShareGroupOffsetsResponseData.{DescribeShareGroupOffsetsResponseGroup, DescribeShareGroupOffsetsResponsePartition, DescribeShareGroupOffsetsResponseTopic} import org.apache.kafka.common.message.IncrementalAlterConfigsRequestData.{AlterConfigsResource => IAlterConfigsResource, AlterConfigsResourceCollection => IAlterConfigsResourceCollection, AlterableConfig => IAlterableConfig, AlterableConfigCollection => IAlterableConfigCollection} import org.apache.kafka.common.message.IncrementalAlterConfigsResponseData.{AlterConfigsResourceResponse => IAlterConfigsResourceResponse} import org.apache.kafka.common.message.LeaveGroupRequestData.MemberIdentity +import org.apache.kafka.common.message.ListClientMetricsResourcesResponseData.ClientMetricsResource import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} import org.apache.kafka.common.message.MetadataResponseData.MetadataResponseTopic @@ -58,7 +54,8 @@ import org.apache.kafka.common.message.OffsetDeleteRequestData.{OffsetDeleteRequ import org.apache.kafka.common.message.OffsetDeleteResponseData.{OffsetDeleteResponsePartition, OffsetDeleteResponsePartitionCollection, OffsetDeleteResponseTopic, OffsetDeleteResponseTopicCollection} import org.apache.kafka.common.message.ShareFetchRequestData.{AcknowledgementBatch, ForgottenTopic} import org.apache.kafka.common.message.ShareFetchResponseData.{AcquiredRecords, PartitionData, ShareFetchableTopicResponse} -import org.apache.kafka.common.metadata.{FeatureLevelRecord, PartitionRecord, RegisterBrokerRecord, TopicRecord} +import org.apache.kafka.common.metadata.{TopicRecord, PartitionRecord, RegisterBrokerRecord} +import org.apache.kafka.common.metadata.FeatureLevelRecord import org.apache.kafka.common.metadata.RegisterBrokerRecord.{BrokerEndpoint, BrokerEndpointCollection} import org.apache.kafka.common.protocol.ApiMessage import org.apache.kafka.common.message._ @@ -75,32 +72,28 @@ import org.apache.kafka.common.resource.{PatternType, Resource, ResourcePattern, import org.apache.kafka.common.security.auth.{KafkaPrincipal, KafkaPrincipalSerde, SecurityProtocol} import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource import org.apache.kafka.common.utils.{ImplicitLinkedHashCollection, ProducerIdAndEpoch, SecurityUtils, Utils} -import org.apache.kafka.coordinator.group.GroupConfig.{CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, CONSUMER_SESSION_TIMEOUT_MS_CONFIG, SHARE_AUTO_OFFSET_RESET_CONFIG, SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, SHARE_ISOLATION_LEVEL_CONFIG, SHARE_RECORD_LOCK_DURATION_MS_CONFIG, SHARE_SESSION_TIMEOUT_MS_CONFIG, STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG, STREAMS_NUM_STANDBY_REPLICAS_CONFIG, STREAMS_SESSION_TIMEOUT_MS_CONFIG} +import org.apache.kafka.coordinator.group.GroupConfig.{CONSUMER_HEARTBEAT_INTERVAL_MS_CONFIG, CONSUMER_SESSION_TIMEOUT_MS_CONFIG, SHARE_AUTO_OFFSET_RESET_CONFIG, SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, SHARE_RECORD_LOCK_DURATION_MS_CONFIG, SHARE_SESSION_TIMEOUT_MS_CONFIG} import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig -import org.apache.kafka.coordinator.group.{GroupConfig, GroupConfigManager, GroupCoordinator, GroupCoordinatorConfig} -import org.apache.kafka.coordinator.group.streams.StreamsGroupHeartbeatResult +import org.apache.kafka.coordinator.group.{GroupConfig, GroupCoordinator, GroupCoordinatorConfig} import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorTestConfig} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance} -import org.apache.kafka.metadata.{ConfigRepository, MetadataCache, MockConfigRepository} -import org.apache.kafka.network.Session import org.apache.kafka.network.metrics.{RequestChannelMetrics, RequestMetrics} import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.{ClientMetricsManager, SimpleApiVersionManager} +import org.apache.kafka.server.{BrokerFeatures, ClientMetricsManager} import org.apache.kafka.server.authorizer.{Action, AuthorizationResult, Authorizer} -import org.apache.kafka.server.common.{FeatureVersion, FinalizedFeatures, GroupVersion, KRaftVersion, MetadataVersion, RequestLocal, ShareVersion, StreamsVersion, TransactionVersion} +import org.apache.kafka.server.common.{FeatureVersion, FinalizedFeatures, GroupVersion, KRaftVersion, MetadataVersion, RequestLocal, TransactionVersion} import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerConfigs, ServerLogConfigs} -import org.apache.kafka.server.logger.LoggingController import org.apache.kafka.server.metrics.ClientMetricsTestUtils -import org.apache.kafka.server.share.{CachedSharePartition, ErroneousAndValidPartitionData, SharePartitionKey} -import org.apache.kafka.server.quota.{ClientQuotaManager, ControllerMutationQuota, ControllerMutationQuotaManager, ThrottleCallback} +import org.apache.kafka.server.share.{CachedSharePartition, ErroneousAndValidPartitionData} +import org.apache.kafka.server.quota.ThrottleCallback import org.apache.kafka.server.share.acknowledge.ShareAcknowledgementBatch import org.apache.kafka.server.share.context.{FinalContext, ShareSessionContext} import org.apache.kafka.server.share.session.{ShareSession, ShareSessionKey} import org.apache.kafka.server.storage.log.{FetchParams, FetchPartitionData} import org.apache.kafka.server.util.{FutureUtils, MockTime} -import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test} @@ -116,9 +109,9 @@ import java.nio.ByteBuffer import java.nio.charset.StandardCharsets import java.time.Duration import java.util +import java.util.Arrays.asList import java.util.concurrent.{CompletableFuture, TimeUnit} -import java.util.function.Consumer -import java.util.{Comparator, Optional, OptionalInt, OptionalLong, Properties} +import java.util.{Collections, Comparator, Optional, OptionalInt, OptionalLong, Properties} import scala.collection.{Map, Seq, mutable} import scala.jdk.CollectionConverters._ @@ -138,7 +131,7 @@ class KafkaApisTest extends Logging { } private val metrics = new Metrics() private val brokerId = 1 - private var metadataCache: MetadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) + private var metadataCache: MetadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) private val clientQuotaManager: ClientQuotaManager = mock(classOf[ClientQuotaManager]) private val clientRequestQuotaManager: ClientRequestQuotaManager = mock(classOf[ClientRequestQuotaManager]) private val clientControllerQuotaManager: ControllerMutationQuotaManager = mock(classOf[ControllerMutationQuotaManager]) @@ -148,12 +141,12 @@ class KafkaApisTest extends Logging { private val fetchManager: FetchManager = mock(classOf[FetchManager]) private val sharePartitionManager: SharePartitionManager = mock(classOf[SharePartitionManager]) private val clientMetricsManager: ClientMetricsManager = mock(classOf[ClientMetricsManager]) - private val groupConfigManager: GroupConfigManager = mock(classOf[GroupConfigManager]) private val brokerTopicStats = new BrokerTopicStats private val clusterId = "clusterId" private val time = new MockTime private val clientId = "" private var kafkaApis: KafkaApis = _ + private val partitionMaxBytes = 40000 @AfterEach def tearDown(): Unit = { @@ -168,8 +161,7 @@ class KafkaApisTest extends Logging { authorizer: Option[Authorizer] = None, configRepository: ConfigRepository = new MockConfigRepository(), overrideProperties: Map[String, String] = Map.empty, - featureVersions: Seq[FeatureVersion] = Seq.empty, - autoTopicCreationManager: Option[AutoTopicCreationManager] = None + featureVersions: Seq[FeatureVersion] = Seq.empty ): KafkaApis = { val properties = TestUtils.createBrokerConfig(brokerId) @@ -181,11 +173,17 @@ class KafkaApisTest extends Logging { overrideProperties.foreach( p => properties.put(p._1, p._2)) val config = new KafkaConfig(properties) + val listenerType = ListenerType.BROKER + val enabledApis = ApiKeys.apisForListener(listenerType).asScala + val apiVersionManager = new SimpleApiVersionManager( - ListenerType.BROKER, + listenerType, + enabledApis, + BrokerFeatures.defaultSupportedFeatures(true), true, - () => new FinalizedFeatures(MetadataVersion.latestTesting(), util.Map.of[String, java.lang.Short], 0)) + () => new FinalizedFeatures(MetadataVersion.latestTesting(), Collections.emptyMap[String, java.lang.Short], 0, true)) + when(groupCoordinator.isNewGroupCoordinator).thenReturn(config.isNewGroupCoordinatorEnabled) setupFeatures(featureVersions) new KafkaApis( @@ -194,14 +192,14 @@ class KafkaApisTest extends Logging { replicaManager = replicaManager, groupCoordinator = groupCoordinator, txnCoordinator = txnCoordinator, - shareCoordinator = shareCoordinator, - autoTopicCreationManager = autoTopicCreationManager.getOrElse(this.autoTopicCreationManager), + shareCoordinator = Some(shareCoordinator), + autoTopicCreationManager = autoTopicCreationManager, brokerId = brokerId, config = config, configRepository = configRepository, metadataCache = metadataCache, metrics = metrics, - authorizerPlugin = authorizer.map(Plugin.wrapInstance(_, null, "authorizer.class.name")), + authorizer = authorizer, quotas = quotas, fetchManager = fetchManager, sharePartitionManager = sharePartitionManager, @@ -210,43 +208,27 @@ class KafkaApisTest extends Logging { time = time, tokenManager = null, apiVersionManager = apiVersionManager, - clientMetricsManager = clientMetricsManager, - groupConfigManager = groupConfigManager) + clientMetricsManager = clientMetricsManager) } private def setupFeatures(featureVersions: Seq[FeatureVersion]): Unit = { if (featureVersions.isEmpty) return - when(metadataCache.features()).thenReturn { - new FinalizedFeatures( - MetadataVersion.latestTesting, - featureVersions.map { featureVersion => - featureVersion.featureName -> featureVersion.featureLevel.asInstanceOf[java.lang.Short] - }.toMap.asJava, - 0) - } - } + metadataCache match { + case cache: KRaftMetadataCache => + when(cache.features()).thenReturn { + new FinalizedFeatures( + MetadataVersion.latestTesting, + featureVersions.map { featureVersion => + featureVersion.featureName -> featureVersion.featureLevel.asInstanceOf[java.lang.Short] + }.toMap.asJava, + 0, + true + ) + } - def initializeMetadataCacheWithShareGroupsEnabled(enableShareGroups: Boolean = true): MetadataCache = { - val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY) - delta.replay(new FeatureLevelRecord() - .setName(MetadataVersion.FEATURE_NAME) - .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) - ) - if (enableShareGroups) { - delta.replay(new FeatureLevelRecord() - .setName(ShareVersion.FEATURE_NAME) - .setFeatureLevel(ShareVersion.SV_1.featureLevel()) - ) - } else { - delta.replay(new FeatureLevelRecord() - .setName(ShareVersion.FEATURE_NAME) - .setFeatureLevel(ShareVersion.SV_0.featureLevel()) - ) + case _ => throw new IllegalStateException("Test must set an instance of KRaftMetadataCache") } - cache.setImage(delta.apply(MetadataProvenance.EMPTY)) - cache } @Test @@ -259,14 +241,14 @@ class KafkaApisTest extends Logging { val requestHeader = new RequestHeader(ApiKeys.DESCRIBE_CONFIGS, ApiKeys.DESCRIBE_CONFIGS.latestVersion, clientId, 0) - val expectedActions = util.List.of( + val expectedActions = Seq( new Action(operation, new ResourcePattern(resourceType, resourceName, PatternType.LITERAL), 1, true, true) ) // Verify that authorize is only called once - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions.asJava))) + .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) val configRepository: ConfigRepository = mock(classOf[ConfigRepository]) val topicConfigs = new Properties() @@ -280,16 +262,16 @@ class KafkaApisTest extends Logging { val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() .setIncludeSynonyms(true) - .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(List(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceName(resourceName) - .setResourceType(ConfigResource.Type.TOPIC.id)))) + .setResourceType(ConfigResource.Type.TOPIC.id)).asJava)) .build(requestHeader.apiVersion) val request = buildRequest(describeConfigsRequest, requestHeader = Option(requestHeader)) kafkaApis = createKafkaApis(authorizer = Some(authorizer), configRepository = configRepository) kafkaApis.handleDescribeConfigsRequest(request) - verify(authorizer).authorize(any(), ArgumentMatchers.eq(expectedActions)) + verify(authorizer).authorize(any(), ArgumentMatchers.eq(expectedActions.asJava)) val response = verifyNoThrottling[DescribeConfigsResponse](request) val results = response.data.results assertEquals(1, results.size) @@ -326,7 +308,7 @@ class KafkaApisTest extends Logging { Seq(resource), "consumer.session.timeout.ms", "45000").build(requestHeader.apiVersion) val request = buildRequest(incrementalAlterConfigsRequest, requestHeader = Option(requestHeader)) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) createKafkaApis(authorizer = Some(authorizer)).handleIncrementalAlterConfigsRequest(request) verify(forwardingManager, times(1)).forwardRequest( any(), @@ -343,13 +325,13 @@ class KafkaApisTest extends Logging { val consumerGroupId = "consumer_group_1" val requestHeader = new RequestHeader(ApiKeys.DESCRIBE_CONFIGS, ApiKeys.DESCRIBE_CONFIGS.latestVersion, clientId, 0) - val expectedActions = util.List.of( + val expectedActions = Seq( new Action(operation, new ResourcePattern(resourceType, consumerGroupId, PatternType.LITERAL), 1, true, true) ) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions.asJava))) + .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) val configRepository: ConfigRepository = mock(classOf[ConfigRepository]) val cgConfigs = new Properties() @@ -359,18 +341,13 @@ class KafkaApisTest extends Logging { cgConfigs.put(SHARE_HEARTBEAT_INTERVAL_MS_CONFIG, GroupCoordinatorConfig.SHARE_GROUP_HEARTBEAT_INTERVAL_MS_DEFAULT.toString) cgConfigs.put(SHARE_RECORD_LOCK_DURATION_MS_CONFIG, ShareGroupConfig.SHARE_GROUP_RECORD_LOCK_DURATION_MS_DEFAULT.toString) cgConfigs.put(SHARE_AUTO_OFFSET_RESET_CONFIG, GroupConfig.SHARE_AUTO_OFFSET_RESET_DEFAULT) - cgConfigs.put(SHARE_ISOLATION_LEVEL_CONFIG, GroupConfig.SHARE_ISOLATION_LEVEL_DEFAULT) - cgConfigs.put(STREAMS_HEARTBEAT_INTERVAL_MS_CONFIG, GroupCoordinatorConfig.STREAMS_GROUP_HEARTBEAT_INTERVAL_MS_DEFAULT.toString) - cgConfigs.put(STREAMS_SESSION_TIMEOUT_MS_CONFIG, GroupCoordinatorConfig.STREAMS_GROUP_SESSION_TIMEOUT_MS_DEFAULT.toString) - cgConfigs.put(STREAMS_NUM_STANDBY_REPLICAS_CONFIG, GroupCoordinatorConfig.STREAMS_GROUP_NUM_STANDBY_REPLICAS_DEFAULT.toString) - when(configRepository.groupConfig(consumerGroupId)).thenReturn(cgConfigs) val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() .setIncludeSynonyms(true) - .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(List(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceName(consumerGroupId) - .setResourceType(ConfigResource.Type.GROUP.id)))) + .setResourceType(ConfigResource.Type.GROUP.id)).asJava)) .build(requestHeader.apiVersion) val request = buildRequest(describeConfigsRequest, requestHeader = Option(requestHeader)) @@ -398,18 +375,18 @@ class KafkaApisTest extends Logging { val subscriptionName = "client_metric_subscription_1" val authorizedResource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, subscriptionName) - val props = ClientMetricsTestUtils.defaultTestProperties + val props = ClientMetricsTestUtils.defaultProperties val configEntries = new util.ArrayList[AlterConfigsRequest.ConfigEntry]() props.forEach((x, y) => configEntries.add(new AlterConfigsRequest.ConfigEntry(x.asInstanceOf[String], y.asInstanceOf[String]))) - val configs = util.Map.of(authorizedResource, new AlterConfigsRequest.Config(configEntries)) + val configs = Map(authorizedResource -> new AlterConfigsRequest.Config(configEntries)) val requestHeader = new RequestHeader(ApiKeys.ALTER_CONFIGS, ApiKeys.ALTER_CONFIGS.latestVersion, clientId, 0) - val apiRequest = new AlterConfigsRequest.Builder(configs, false).build(requestHeader.apiVersion) + val apiRequest = new AlterConfigsRequest.Builder(configs.asJava, false).build(requestHeader.apiVersion) val request = buildRequest(apiRequest) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) kafkaApis = createKafkaApis() kafkaApis.handleAlterConfigsRequest(request) verify(forwardingManager, times(1)).forwardRequest( @@ -431,7 +408,7 @@ class KafkaApisTest extends Logging { Seq(resource), "metrics", "foo.bar").build(requestHeader.apiVersion) val request = buildRequest(incrementalAlterConfigsRequest, requestHeader = Option(requestHeader)) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) kafkaApis = createKafkaApis() kafkaApis.handleIncrementalAlterConfigsRequest(request) verify(forwardingManager, times(1)).forwardRequest( @@ -459,17 +436,17 @@ class KafkaApisTest extends Logging { val subscriptionName = "client_metric_subscription_1" val requestHeader = new RequestHeader(ApiKeys.DESCRIBE_CONFIGS, ApiKeys.DESCRIBE_CONFIGS.latestVersion, clientId, 0) - val expectedActions = util.List.of( + val expectedActions = Seq( new Action(operation, new ResourcePattern(resourceType, Resource.CLUSTER_NAME, PatternType.LITERAL), 1, true, true) ) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(expectedActions.asJava))) + .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) val resource = new ConfigResource(ConfigResource.Type.CLIENT_METRICS, subscriptionName) val configRepository: ConfigRepository = mock(classOf[ConfigRepository]) - val cmConfigs = ClientMetricsTestUtils.defaultTestProperties + val cmConfigs = ClientMetricsTestUtils.defaultProperties when(configRepository.config(resource)).thenReturn(cmConfigs) metadataCache = mock(classOf[KRaftMetadataCache]) @@ -477,9 +454,9 @@ class KafkaApisTest extends Logging { val describeConfigsRequest = new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() .setIncludeSynonyms(true) - .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(List(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceName(subscriptionName) - .setResourceType(ConfigResource.Type.CLIENT_METRICS.id)))) + .setResourceType(ConfigResource.Type.CLIENT_METRICS.id)).asJava)) .build(requestHeader.apiVersion) val request = buildRequest(describeConfigsRequest, requestHeader = Option(requestHeader)) @@ -504,7 +481,7 @@ class KafkaApisTest extends Logging { def testDescribeQuorumForwardedForKRaftClusters(): Unit = { val requestData = DescribeQuorumRequest.singletonRequest(KafkaRaftServer.MetadataPartition) val requestBuilder = new DescribeQuorumRequest.Builder(requestData) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis() testForwardableApi(kafkaApis = kafkaApis, ApiKeys.DESCRIBE_QUORUM, @@ -516,7 +493,7 @@ class KafkaApisTest extends Logging { apiKey: ApiKeys, requestBuilder: AbstractRequest.Builder[_ <: AbstractRequest] ): Unit = { - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis() testForwardableApi(kafkaApis = kafkaApis, apiKey, @@ -570,8 +547,8 @@ class KafkaApisTest extends Logging { new ResourcePattern(resourceType, resourceName, PatternType.LITERAL), 1, logIfAllowed, logIfDenied) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(util.List.of(expectedAuthorizedAction)))) - .thenReturn(util.List.of(result)) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(Seq(expectedAuthorizedAction).asJava))) + .thenReturn(Seq(result).asJava) } @Test @@ -587,7 +564,7 @@ class KafkaApisTest extends Logging { .build(requestHeader.apiVersion) val request = buildRequest(incrementalAlterConfigsRequest, requestHeader = Option(requestHeader)) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.LATEST_PRODUCTION) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleIncrementalAlterConfigsRequest(request) @@ -615,7 +592,7 @@ class KafkaApisTest extends Logging { controllerThrottleTimeMs: Int, requestThrottleTimeMs: Int ): Unit = { - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) val topicToCreate = new CreatableTopic() .setName("topic") @@ -652,8 +629,7 @@ class KafkaApisTest extends Logging { val expectedThrottleTimeMs = math.max(controllerThrottleTimeMs, requestThrottleTimeMs) verify(clientRequestQuotaManager).throttle( - ArgumentMatchers.eq(request.header.clientId()), - ArgumentMatchers.eq(request.session), + ArgumentMatchers.eq(request), any[ThrottleCallback](), ArgumentMatchers.eq(expectedThrottleTimeMs) ) @@ -761,7 +737,7 @@ class KafkaApisTest extends Logging { new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKeyType(coordinatorType.id()) - .setCoordinatorKeys(util.List.of(key))) + .setCoordinatorKeys(asList(key))) } else { new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() @@ -772,7 +748,7 @@ class KafkaApisTest extends Logging { when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) - val capturedRequest = verifyTopicCreation(topicName, enableAutoTopicCreation = true, isInternal = true, request) + val capturedRequest = verifyTopicCreation(topicName, true, true, request) kafkaApis = createKafkaApis(authorizer = Some(authorizer), overrideProperties = topicConfigOverride) kafkaApis.handleFindCoordinatorRequest(request) @@ -792,58 +768,6 @@ class KafkaApisTest extends Logging { } } - @Test - def testFindCoordinatorWithInvalidSharePartitionKey(): Unit = { - val request = new FindCoordinatorRequestData() - .setKeyType(CoordinatorType.SHARE.id) - .setCoordinatorKeys(util.List.of("")) - - val requestChannelRequest = buildRequest(new FindCoordinatorRequest.Builder(request).build()) - - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val expectedResponse = new FindCoordinatorResponseData() - .setCoordinators(util.List.of( - new FindCoordinatorResponseData.Coordinator() - .setKey("") - .setErrorCode(Errors.INVALID_REQUEST.code) - .setNodeId(-1) - .setHost("") - .setPort(-1))) - - val response = verifyNoThrottling[FindCoordinatorResponse](requestChannelRequest) - assertEquals(expectedResponse, response.data) - } - - @Test - def testFindCoordinatorWithValidSharePartitionKey(): Unit = { - addTopicToMetadataCache(SHARE_GROUP_STATE_TOPIC_NAME, 10, 3) - val key = SharePartitionKey.getInstance("foo", Uuid.randomUuid(), 10) - - val request = new FindCoordinatorRequestData() - .setKeyType(CoordinatorType.SHARE.id) - .setCoordinatorKeys(util.List.of(key.asCoordinatorKey)) - - val requestChannelRequest = buildRequest(new FindCoordinatorRequest.Builder(request).build()) - - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - when(shareCoordinator.partitionFor(ArgumentMatchers.eq(key))).thenReturn(10) - - val expectedResponse = new FindCoordinatorResponseData() - .setCoordinators(util.List.of( - new FindCoordinatorResponseData.Coordinator() - .setKey(key.asCoordinatorKey) - .setNodeId(0) - .setHost("broker0") - .setPort(9092))) - - val response = verifyNoThrottling[FindCoordinatorResponse](requestChannelRequest) - assertEquals(expectedResponse, response.data) - } - @Test def testMetadataAutoTopicCreationForOffsetTopic(): Unit = { testMetadataAutoTopicCreation(Topic.GROUP_METADATA_TOPIC_NAME, enableAutoTopicCreation = true, @@ -925,7 +849,7 @@ class KafkaApisTest extends Logging { } val metadataRequest = new MetadataRequest.Builder( - util.List.of(topicName), enableAutoTopicCreation + List(topicName).asJava, enableAutoTopicCreation ).build(requestHeader.apiVersion) val request = buildRequest(metadataRequest) @@ -937,11 +861,11 @@ class KafkaApisTest extends Logging { kafkaApis.handleTopicMetadataRequest(request) val response = verifyNoThrottling[MetadataResponse](request) - val expectedMetadataResponse = util.List.of(new TopicMetadata( + val expectedMetadataResponse = util.Collections.singletonList(new TopicMetadata( expectedError, topicName, isInternal, - util.List.of() + util.Collections.emptyList() )) assertEquals(expectedMetadataResponse, response.topicMetadata()) @@ -958,15 +882,12 @@ class KafkaApisTest extends Logging { request: RequestChannel.Request): ArgumentCaptor[Option[RequestContext]] = { val capturedRequest: ArgumentCaptor[Option[RequestContext]] = ArgumentCaptor.forClass(classOf[Option[RequestContext]]) if (enableAutoTopicCreation) { - - when(clientControllerQuotaManager.newPermissiveQuotaFor( - ArgumentMatchers.eq(request.session), - ArgumentMatchers.eq(request.header.clientId()) - )).thenReturn(ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA) + when(clientControllerQuotaManager.newPermissiveQuotaFor(ArgumentMatchers.eq(request))) + .thenReturn(UnboundedControllerMutationQuota) when(autoTopicCreationManager.createTopics( ArgumentMatchers.eq(Set(topicName)), - ArgumentMatchers.eq(ControllerMutationQuota.UNBOUNDED_CONTROLLER_MUTATION_QUOTA), + ArgumentMatchers.eq(UnboundedControllerMutationQuota), capturedRequest.capture())).thenReturn( Seq(new MetadataResponseTopic() .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code()) @@ -997,7 +918,7 @@ class KafkaApisTest extends Logging { val invalidVersions = Set(10, 11) invalidVersions.foreach( version => topics.foreach(topic => { - val metadataRequestData = new MetadataRequestData().setTopics(util.List.of(topic)) + val metadataRequestData = new MetadataRequestData().setTopics(Collections.singletonList(topic)) val request = buildRequest(new MetadataRequest(metadataRequestData, version.toShort)) val kafkaApis = createKafkaApis() try { @@ -1020,43 +941,27 @@ class KafkaApisTest extends Logging { ) } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) - def testHandleOffsetCommitRequest(version: Short): Unit = { - val topicName = "foo" - val topicId = Uuid.randomUuid() - addTopicToMetadataCache(topicName, topicId = topicId, numPartitions = 1) + @Test + def testHandleOffsetCommitRequest(): Unit = { + addTopicToMetadataCache("foo", numPartitions = 1) val offsetCommitRequest = new OffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) - .setName(if (version < 10) topicName else "") - .setPartitions(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(0) - .setCommittedOffset(10))))) - - val expectedOffsetCommitRequest = new OffsetCommitRequestData() - .setGroupId("group") - .setMemberId("member") - .setTopics(util.List.of( + .setTopics(List( new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) - .setName(topicName) - .setPartitions(util.List.of( + .setName("foo") + .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) - .setCommittedOffset(10))))) + .setCommittedOffset(10)).asJava)).asJava) - val requestChannelRequest = buildRequest(OffsetCommitRequest.Builder.forTopicIdsOrNames(offsetCommitRequest, true).build(version)) + val requestChannelRequest = buildRequest(new OffsetCommitRequest.Builder(offsetCommitRequest).build()) val future = new CompletableFuture[OffsetCommitResponseData]() when(groupCoordinator.commitOffsets( requestChannelRequest.context, - expectedOffsetCommitRequest, + offsetCommitRequest, RequestLocal.noCaching.bufferSupplier )).thenReturn(future) kafkaApis = createKafkaApis() @@ -1067,57 +972,40 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val offsetCommitResponse = new OffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) - .setName(if (version < 10) topicName else "") - .setPartitions(util.List.of( + .setName("foo") + .setPartitions(List( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code))))) + .setErrorCode(Errors.NONE.code)).asJava)).asJava) future.complete(offsetCommitResponse) val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) assertEquals(offsetCommitResponse, response.data) } - @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_COMMIT) - def testHandleOffsetCommitRequestFutureFailed(version: Short): Unit = { - val topicName = "foo" - val topicId = Uuid.randomUuid() - addTopicToMetadataCache(topicName, topicId = topicId, numPartitions = 1) + @Test + def testHandleOffsetCommitRequestFutureFailed(): Unit = { + addTopicToMetadataCache("foo", numPartitions = 1) val offsetCommitRequest = new OffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) - .setName(if (version < 10) topicName else "") - .setPartitions(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(0) - .setCommittedOffset(10))))) - - val expectedOffsetCommitRequest = new OffsetCommitRequestData() - .setGroupId("group") - .setMemberId("member") - .setTopics(util.List.of( + .setTopics(List( new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) - .setName(topicName) - .setPartitions(util.List.of( + .setName("foo") + .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) - .setCommittedOffset(10))))) + .setCommittedOffset(10)).asJava)).asJava) - val requestChannelRequest = buildRequest(OffsetCommitRequest.Builder.forTopicIdsOrNames(offsetCommitRequest, true).build(version)) + val requestChannelRequest = buildRequest(new OffsetCommitRequest.Builder(offsetCommitRequest).build()) val future = new CompletableFuture[OffsetCommitResponseData]() when(groupCoordinator.commitOffsets( requestChannelRequest.context, - expectedOffsetCommitRequest, + offsetCommitRequest, RequestLocal.noCaching.bufferSupplier )).thenReturn(future) @@ -1128,175 +1016,19 @@ class KafkaApisTest extends Logging { ) val expectedOffsetCommitResponse = new OffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) - .setName(if (version < 10) topicName else "") - .setPartitions(util.List.of( + .setName("foo") + .setPartitions(List( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) - .setErrorCode(Errors.NOT_COORDINATOR.code))))) + .setErrorCode(Errors.NOT_COORDINATOR.code)).asJava)).asJava) future.completeExceptionally(Errors.NOT_COORDINATOR.exception) val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) assertEquals(expectedOffsetCommitResponse, response.data) } - @Test - def testHandleOffsetCommitRequestTopicsAndPartitionsValidationWithTopicIds(): Unit = { - val fooId = Uuid.randomUuid() - val barId = Uuid.randomUuid() - val zarId = Uuid.randomUuid() - val fooName = "foo" - val barName = "bar" - addTopicToMetadataCache(fooName, topicId = fooId, numPartitions = 2) - addTopicToMetadataCache(barName, topicId = barId, numPartitions = 2) - - val offsetCommitRequest = new OffsetCommitRequestData() - .setGroupId("group") - .setMemberId("member") - .setTopics(util.List.of( - // foo exists but only has 2 partitions. - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(fooId) - .setPartitions(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(0) - .setCommittedOffset(10), - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(1) - .setCommittedOffset(20), - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(2) - .setCommittedOffset(30))), - // bar exists. - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(barId) - .setPartitions(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(0) - .setCommittedOffset(40), - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(1) - .setCommittedOffset(50))), - // zar does not exist. - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(zarId) - .setPartitions(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(0) - .setCommittedOffset(60), - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(1) - .setCommittedOffset(70))))) - - val requestChannelRequest = buildRequest(OffsetCommitRequest.Builder.forTopicIdsOrNames(offsetCommitRequest, true).build()) - - // This is the request expected by the group coordinator. - val expectedOffsetCommitRequest = new OffsetCommitRequestData() - .setGroupId("group") - .setMemberId("member") - .setTopics(util.List.of( - // foo exists but only has 2 partitions. - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(fooId) - .setName(fooName) - .setPartitions(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(0) - .setCommittedOffset(10), - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(1) - .setCommittedOffset(20))), - new OffsetCommitRequestData.OffsetCommitRequestTopic() - .setTopicId(barId) - .setName(barName) - .setPartitions(util.List.of( - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(0) - .setCommittedOffset(40), - new OffsetCommitRequestData.OffsetCommitRequestPartition() - .setPartitionIndex(1) - .setCommittedOffset(50))))) - - val future = new CompletableFuture[OffsetCommitResponseData]() - when(groupCoordinator.commitOffsets( - requestChannelRequest.context, - expectedOffsetCommitRequest, - RequestLocal.noCaching.bufferSupplier - )).thenReturn(future) - kafkaApis = createKafkaApis() - kafkaApis.handle( - requestChannelRequest, - RequestLocal.noCaching - ) - - // This is the response returned by the group coordinator. - val offsetCommitResponse = new OffsetCommitResponseData() - .setTopics(util.List.of( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setTopicId(fooId) - .setName(fooName) - .setPartitions(util.List.of( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code), - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))), - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setTopicId(barId) - .setName(barName) - .setPartitions(util.List.of( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code), - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))))) - - val expectedOffsetCommitResponse = new OffsetCommitResponseData() - .setTopics(util.List.of( - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setTopicId(fooId) - .setPartitions(util.List.of( - // foo-2 is first because partitions failing the validation - // are put in the response first. - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(2) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code), - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))), - // zar is before bar because topics failing the validation are - // put in the response first. - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setTopicId(zarId) - .setPartitions(util.List.of( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code), - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code))), - new OffsetCommitResponseData.OffsetCommitResponseTopic() - .setTopicId(barId) - .setPartitions(util.List.of( - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code), - new OffsetCommitResponseData.OffsetCommitResponsePartition() - .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))))) - - future.complete(offsetCommitResponse) - val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) - assertEquals(expectedOffsetCommitResponse, response.data) - } - @Test def testHandleOffsetCommitRequestTopicsAndPartitionsValidation(): Unit = { addTopicToMetadataCache("foo", numPartitions = 2) @@ -1305,11 +1037,11 @@ class KafkaApisTest extends Logging { val offsetCommitRequest = new OffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(util.List.of( + .setTopics(List( // foo exists but only has 2 partitions. new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(10), @@ -1318,54 +1050,54 @@ class KafkaApisTest extends Logging { .setCommittedOffset(20), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(2) - .setCommittedOffset(30))), + .setCommittedOffset(30)).asJava), // bar exists. new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(40), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(50))), + .setCommittedOffset(50)).asJava), // zar does not exist. new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("zar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(60), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(70))))) + .setCommittedOffset(70)).asJava)).asJava) - val requestChannelRequest = buildRequest(OffsetCommitRequest.Builder.forTopicNames(offsetCommitRequest).build()) + val requestChannelRequest = buildRequest(new OffsetCommitRequest.Builder(offsetCommitRequest).build()) // This is the request expected by the group coordinator. val expectedOffsetCommitRequest = new OffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(util.List.of( + .setTopics(List( // foo exists but only has 2 partitions. new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(10), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(20))), + .setCommittedOffset(20)).asJava), new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(40), new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(50))))) + .setCommittedOffset(50)).asJava)).asJava) val future = new CompletableFuture[OffsetCommitResponseData]() when(groupCoordinator.commitOffsets( @@ -1381,31 +1113,31 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val offsetCommitResponse = new OffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))), + .setErrorCode(Errors.NONE.code)).asJava), new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))))) + .setErrorCode(Errors.NONE.code)).asJava)).asJava) val expectedOffsetCommitResponse = new OffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( // foo-2 is first because partitions failing the validation // are put in the response first. new OffsetCommitResponseData.OffsetCommitResponsePartition() @@ -1416,33 +1148,75 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.NONE.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))), + .setErrorCode(Errors.NONE.code)).asJava), // zar is before bar because topics failing the validation are // put in the response first. new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("zar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code))), + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code)).asJava), new OffsetCommitResponseData.OffsetCommitResponseTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetCommitResponseData.OffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))))) + .setErrorCode(Errors.NONE.code)).asJava)).asJava) future.complete(offsetCommitResponse) val response = verifyNoThrottling[OffsetCommitResponse](requestChannelRequest) assertEquals(expectedOffsetCommitResponse, response.data) } + @Test + def testOffsetCommitWithInvalidPartition(): Unit = { + val topic = "topic" + addTopicToMetadataCache(topic, numPartitions = 1) + + def checkInvalidPartition(invalidPartitionId: Int): Unit = { + reset(replicaManager, clientRequestQuotaManager, requestChannel) + + val offsetCommitRequest = new OffsetCommitRequest.Builder( + new OffsetCommitRequestData() + .setGroupId("groupId") + .setTopics(Collections.singletonList( + new OffsetCommitRequestData.OffsetCommitRequestTopic() + .setName(topic) + .setPartitions(Collections.singletonList( + new OffsetCommitRequestData.OffsetCommitRequestPartition() + .setPartitionIndex(invalidPartitionId) + .setCommittedOffset(15) + .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) + .setCommittedMetadata("")) + ) + ))).build() + + val request = buildRequest(offsetCommitRequest) + when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), + any[Long])).thenReturn(0) + val kafkaApis = createKafkaApis() + try { + kafkaApis.handleOffsetCommitRequest(request, RequestLocal.withThreadConfinedCaching) + + val response = verifyNoThrottling[OffsetCommitResponse](request) + assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, + Errors.forCode(response.data.topics().get(0).partitions().get(0).errorCode)) + } finally { + kafkaApis.close() + } + } + + checkInvalidPartition(-1) + checkInvalidPartition(1) // topic has only one partition + } + @Test def testTxnOffsetCommitWithInvalidPartition(): Unit = { val topic = "topic" @@ -1458,7 +1232,7 @@ class KafkaApisTest extends Logging { "groupId", 15L, 0.toShort, - util.Map.of(invalidTopicPartition, partitionOffsetCommitData), + Map(invalidTopicPartition -> partitionOffsetCommitData).asJava, true ).build() val request = buildRequest(offsetCommitRequest) @@ -1491,13 +1265,13 @@ class KafkaApisTest extends Logging { .setProducerEpoch(30) .setGroupInstanceId("instance-id") .setTransactionalId("transactional-id") - .setTopics(util.List.of( + .setTopics(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) - .setCommittedOffset(10))))) + .setCommittedOffset(10)).asJava)).asJava) val requestChannelRequest = buildRequest(new TxnOffsetCommitRequest.Builder(txnOffsetCommitRequest).build()) @@ -1516,13 +1290,13 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val txnOffsetCommitResponse = new TxnOffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code))))) + .setErrorCode(Errors.NONE.code)).asJava)).asJava) future.complete(txnOffsetCommitResponse) val response = verifyNoThrottling[TxnOffsetCommitResponse](requestChannelRequest) @@ -1536,13 +1310,13 @@ class KafkaApisTest extends Logging { val txnOffsetCommitRequest = new TxnOffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(util.List.of( + .setTopics(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) - .setCommittedOffset(10))))) + .setCommittedOffset(10)).asJava)).asJava) val requestChannelRequest = buildRequest(new TxnOffsetCommitRequest.Builder(txnOffsetCommitRequest).build()) @@ -1560,13 +1334,13 @@ class KafkaApisTest extends Logging { ) val expectedTxnOffsetCommitResponse = new TxnOffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) - .setErrorCode(Errors.NOT_COORDINATOR.code))))) + .setErrorCode(Errors.NOT_COORDINATOR.code)).asJava)).asJava) future.completeExceptionally(Errors.NOT_COORDINATOR.exception) val response = verifyNoThrottling[TxnOffsetCommitResponse](requestChannelRequest) @@ -1581,11 +1355,11 @@ class KafkaApisTest extends Logging { val txnOffsetCommitRequest = new TxnOffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(util.List.of( + .setTopics(List( // foo exists but only has 2 partitions. new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(10), @@ -1594,27 +1368,27 @@ class KafkaApisTest extends Logging { .setCommittedOffset(20), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(2) - .setCommittedOffset(30))), + .setCommittedOffset(30)).asJava), // bar exists. new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(40), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(50))), + .setCommittedOffset(50)).asJava), // zar does not exist. new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("zar") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(60), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(70))))) + .setCommittedOffset(70)).asJava)).asJava) val requestChannelRequest = buildRequest(new TxnOffsetCommitRequest.Builder(txnOffsetCommitRequest).build()) @@ -1622,26 +1396,26 @@ class KafkaApisTest extends Logging { val expectedTxnOffsetCommitRequest = new TxnOffsetCommitRequestData() .setGroupId("group") .setMemberId("member") - .setTopics(util.List.of( + .setTopics(List( // foo exists but only has 2 partitions. new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(10), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(20))), + .setCommittedOffset(20)).asJava), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedOffset(40), new TxnOffsetCommitRequestData.TxnOffsetCommitRequestPartition() .setPartitionIndex(1) - .setCommittedOffset(50))))) + .setCommittedOffset(50)).asJava)).asJava) val future = new CompletableFuture[TxnOffsetCommitResponseData]() when(txnCoordinator.partitionFor(expectedTxnOffsetCommitRequest.transactionalId)).thenReturn(0) @@ -1658,31 +1432,31 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val txnOffsetCommitResponse = new TxnOffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))), + .setErrorCode(Errors.NONE.code)).asJava), new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))))) + .setErrorCode(Errors.NONE.code)).asJava)).asJava) val expectedTxnOffsetCommitResponse = new TxnOffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( // foo-2 is first because partitions failing the validation // are put in the response first. new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() @@ -1693,27 +1467,27 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.NONE.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))), + .setErrorCode(Errors.NONE.code)).asJava), // zar is before bar because topics failing the validation are // put in the response first. new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("zar") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code))), + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code)).asJava), new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code))))) + .setErrorCode(Errors.NONE.code)).asJava)).asJava) future.complete(txnOffsetCommitResponse) val response = verifyNoThrottling[TxnOffsetCommitResponse](requestChannelRequest) @@ -1740,7 +1514,7 @@ class KafkaApisTest extends Logging { groupId, producerId, epoch, - util.Map.of(topicPartition, partitionOffsetCommitData), + Map(topicPartition -> partitionOffsetCommitData).asJava, version >= TxnOffsetCommitRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 ).build(version) val request = buildRequest(offsetCommitRequest) @@ -1755,15 +1529,15 @@ class KafkaApisTest extends Logging { )).thenReturn(future) future.complete(new TxnOffsetCommitResponseData() - .setTopics(util.List.of( + .setTopics(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponseTopic() .setName(topicPartition.topic) - .setPartitions(util.List.of( + .setPartitions(List( new TxnOffsetCommitResponseData.TxnOffsetCommitResponsePartition() .setPartitionIndex(topicPartition.partition) .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code) - )) - ))) + ).asJava) + ).asJava)) kafkaApis = createKafkaApis() kafkaApis.handleTxnOffsetCommitRequest(request, requestLocal) @@ -1810,8 +1584,6 @@ class KafkaApisTest extends Logging { new InitProducerIdRequestData() .setTransactionalId(transactionalId) .setTransactionTimeoutMs(txnTimeoutMs) - .setEnable2Pc(false) - .setKeepPreparedTxn(false) .setProducerId(producerId) .setProducerEpoch(epoch) ).build(version.toShort) @@ -1827,8 +1599,6 @@ class KafkaApisTest extends Logging { when(txnCoordinator.handleInitProducerId( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(txnTimeoutMs), - ArgumentMatchers.eq(false), - ArgumentMatchers.eq(false), ArgumentMatchers.eq(expectedProducerIdAndEpoch), responseCallback.capture(), ArgumentMatchers.eq(requestLocal) @@ -1891,7 +1661,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(epoch), - ArgumentMatchers.eq(util.Set.of(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partition))), + ArgumentMatchers.eq(Set(new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, partition))), responseCallback.capture(), ArgumentMatchers.eq(TransactionVersion.TV_0), ArgumentMatchers.eq(requestLocal) @@ -1941,7 +1711,7 @@ class KafkaApisTest extends Logging { transactionalId, producerId, epoch, - util.List.of(topicPartition) + Collections.singletonList(topicPartition) ).build(version.toShort) val request = buildRequest(addPartitionsToTxnRequest) @@ -1950,7 +1720,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(epoch), - ArgumentMatchers.eq(util.Set.of(topicPartition)), + ArgumentMatchers.eq(Set(topicPartition)), responseCallback.capture(), ArgumentMatchers.eq(TransactionVersion.TV_0), ArgumentMatchers.eq(requestLocal) @@ -1967,9 +1737,9 @@ class KafkaApisTest extends Logging { val response = capturedResponse.getValue if (version < 2) { - assertEquals(util.Map.of(topicPartition, Errors.INVALID_PRODUCER_EPOCH), response.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID)) + assertEquals(Collections.singletonMap(topicPartition, Errors.INVALID_PRODUCER_EPOCH), response.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID)) } else { - assertEquals(util.Map.of(topicPartition, Errors.PRODUCER_FENCED), response.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID)) + assertEquals(Collections.singletonMap(topicPartition, Errors.PRODUCER_FENCED), response.errors().get(AddPartitionsToTxnResponse.V3_AND_BELOW_TXN_ID)) } } finally { kafkaApis.close() @@ -1978,162 +1748,32 @@ class KafkaApisTest extends Logging { } @Test - def testInitProducerIdWithEnable2PcFailsWithoutTwoPhaseCommitAcl(): Unit = { - val transactionalId = "txnId" - addTopicToMetadataCache("topic", numPartitions = 1) - - val initProducerIdRequest = new InitProducerIdRequest.Builder( - new InitProducerIdRequestData() - .setTransactionalId(transactionalId) - .setTransactionTimeoutMs(TimeUnit.MINUTES.toMillis(15).toInt) - .setEnable2Pc(true) - .setProducerId(RecordBatch.NO_PRODUCER_ID) - .setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) - ).build(6.toShort) // Use version 6 which supports enable2Pc - - val request = buildRequest(initProducerIdRequest) - val requestLocal = RequestLocal.withThreadConfinedCaching - val authorizer: Authorizer = mock(classOf[Authorizer]) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - - // Allow WRITE but deny TWO_PHASE_COMMIT - when(authorizer.authorize( - any(), - ArgumentMatchers.eq(util.List.of(new Action( - AclOperation.WRITE, - new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL), - 1, - true, - true))) - )).thenReturn(util.List.of(AuthorizationResult.ALLOWED)) + def testBatchedAddPartitionsToTxnRequest(): Unit = { + val topic = "topic" + addTopicToMetadataCache(topic, numPartitions = 2) - when(authorizer.authorize( - any(), - ArgumentMatchers.eq(util.List.of(new Action( - AclOperation.TWO_PHASE_COMMIT, - new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL), - 1, - true, - true))) - )).thenReturn(util.List.of(AuthorizationResult.DENIED)) + val responseCallback: ArgumentCaptor[Errors => Unit] = ArgumentCaptor.forClass(classOf[Errors => Unit]) + val verifyPartitionsCallback: ArgumentCaptor[AddPartitionsToTxnResult => Unit] = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnResult => Unit]) - val capturedResponse = ArgumentCaptor.forClass(classOf[InitProducerIdResponse]) + val transactionalId1 = "txnId1" + val transactionalId2 = "txnId2" + val producerId = 15L + val epoch = 0.toShort - kafkaApis.handleInitProducerIdRequest(request, requestLocal) - - verify(requestChannel).sendResponse( - ArgumentMatchers.eq(request), - capturedResponse.capture(), - ArgumentMatchers.eq(None) - ) - - assertEquals(Errors.TRANSACTIONAL_ID_AUTHORIZATION_FAILED.code, capturedResponse.getValue.data.errorCode) - } - - @Test - def testInitProducerIdWithEnable2PcSucceedsWithTwoPhaseCommitAcl(): Unit = { - val transactionalId = "txnId" - addTopicToMetadataCache("topic", numPartitions = 1) - - val initProducerIdRequest = new InitProducerIdRequest.Builder( - new InitProducerIdRequestData() - .setTransactionalId(transactionalId) - .setTransactionTimeoutMs(TimeUnit.MINUTES.toMillis(15).toInt) - .setEnable2Pc(true) - .setProducerId(RecordBatch.NO_PRODUCER_ID) - .setProducerEpoch(RecordBatch.NO_PRODUCER_EPOCH) - ).build(6.toShort) // Use version 6 which supports enable2Pc - - val request = buildRequest(initProducerIdRequest) - val requestLocal = RequestLocal.withThreadConfinedCaching - val authorizer: Authorizer = mock(classOf[Authorizer]) - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - - // Both permissions are allowed - when(authorizer.authorize( - any(), - ArgumentMatchers.eq(util.List.of(new Action( - AclOperation.WRITE, - new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL), - 1, - true, - true))) - )).thenReturn(util.List.of(AuthorizationResult.ALLOWED)) - - when(authorizer.authorize( - any(), - ArgumentMatchers.eq(util.List.of(new Action( - AclOperation.TWO_PHASE_COMMIT, - new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL), - 1, - true, - true))) - )).thenReturn(util.List.of(AuthorizationResult.ALLOWED)) - - val responseCallback = ArgumentCaptor.forClass(classOf[InitProducerIdResult => Unit]) - - when(txnCoordinator.handleInitProducerId( - ArgumentMatchers.eq(transactionalId), - anyInt(), - ArgumentMatchers.eq(true), // enable2Pc = true - anyBoolean(), - any(), - responseCallback.capture(), - ArgumentMatchers.eq(requestLocal) - )).thenAnswer(_ => responseCallback.getValue.apply(InitProducerIdResult(15L, 0.toShort, Errors.NONE))) - - kafkaApis.handleInitProducerIdRequest(request, requestLocal) - - // Verify coordinator was called with enable2Pc=true - verify(txnCoordinator).handleInitProducerId( - ArgumentMatchers.eq(transactionalId), - anyInt(), - ArgumentMatchers.eq(true), // enable2Pc = true - anyBoolean(), - any(), - any(), - ArgumentMatchers.eq(requestLocal) - ) - - val capturedResponse = ArgumentCaptor.forClass(classOf[InitProducerIdResponse]) - verify(requestChannel).sendResponse( - ArgumentMatchers.eq(request), - capturedResponse.capture(), - ArgumentMatchers.eq(None) - ) - - assertEquals(Errors.NONE.code, capturedResponse.getValue.data.errorCode) - assertEquals(15L, capturedResponse.getValue.data.producerId) - assertEquals(0, capturedResponse.getValue.data.producerEpoch) - } - - @Test - def testBatchedAddPartitionsToTxnRequest(): Unit = { - val topic = "topic" - addTopicToMetadataCache(topic, numPartitions = 2) - - val responseCallback: ArgumentCaptor[Errors => Unit] = ArgumentCaptor.forClass(classOf[Errors => Unit]) - val verifyPartitionsCallback: ArgumentCaptor[AddPartitionsToTxnResult => Unit] = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnResult => Unit]) - - val transactionalId1 = "txnId1" - val transactionalId2 = "txnId2" - val producerId = 15L - val epoch = 0.toShort - - val tp0 = new TopicPartition(topic, 0) - val tp1 = new TopicPartition(topic, 1) + val tp0 = new TopicPartition(topic, 0) + val tp1 = new TopicPartition(topic, 1) val addPartitionsToTxnRequest = AddPartitionsToTxnRequest.Builder.forBroker( new AddPartitionsToTxnTransactionCollection( - util.List.of(new AddPartitionsToTxnTransaction() + List(new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId1) .setProducerId(producerId) .setProducerEpoch(epoch) .setVerifyOnly(false) .setTopics(new AddPartitionsToTxnTopicCollection( - util.List.of(new AddPartitionsToTxnTopic() + Collections.singletonList(new AddPartitionsToTxnTopic() .setName(tp0.topic) - .setPartitions(util.List.of(tp0.partition)) + .setPartitions(Collections.singletonList(tp0.partition)) ).iterator()) ), new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId2) @@ -2141,12 +1781,12 @@ class KafkaApisTest extends Logging { .setProducerEpoch(epoch) .setVerifyOnly(true) .setTopics(new AddPartitionsToTxnTopicCollection( - util.List.of(new AddPartitionsToTxnTopic() + Collections.singletonList(new AddPartitionsToTxnTopic() .setName(tp1.topic) - .setPartitions(util.List.of(tp1.partition)) + .setPartitions(Collections.singletonList(tp1.partition)) ).iterator()) ) - ).iterator() + ).asJava.iterator() ) ).build(4.toShort) val request = buildRequest(addPartitionsToTxnRequest) @@ -2156,7 +1796,7 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(transactionalId1), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(epoch), - ArgumentMatchers.eq(util.Set.of(tp0)), + ArgumentMatchers.eq(Set(tp0)), responseCallback.capture(), any[TransactionVersion], ArgumentMatchers.eq(requestLocal) @@ -2166,18 +1806,18 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(transactionalId2), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(epoch), - ArgumentMatchers.eq(util.Set.of(tp1)), + ArgumentMatchers.eq(Set(tp1)), verifyPartitionsCallback.capture(), - )).thenAnswer(_ => verifyPartitionsCallback.getValue.apply(AddPartitionsToTxnResponse.resultForTransaction(transactionalId2, util.Map.of(tp1, Errors.PRODUCER_FENCED)))) + )).thenAnswer(_ => verifyPartitionsCallback.getValue.apply(AddPartitionsToTxnResponse.resultForTransaction(transactionalId2, Map(tp1 -> Errors.PRODUCER_FENCED).asJava))) kafkaApis = createKafkaApis() kafkaApis.handleAddPartitionsToTxnRequest(request, requestLocal) val response = verifyNoThrottling[AddPartitionsToTxnResponse](request) - val expectedErrors = util.Map.of( - transactionalId1, util.Map.of(tp0, Errors.NONE), - transactionalId2, util.Map.of(tp1, Errors.PRODUCER_FENCED) - ) + val expectedErrors = Map( + transactionalId1 -> Collections.singletonMap(tp0, Errors.NONE), + transactionalId2 -> Collections.singletonMap(tp1, Errors.PRODUCER_FENCED) + ).asJava assertEquals(expectedErrors, response.errors()) } @@ -2185,7 +1825,7 @@ class KafkaApisTest extends Logging { @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.ADD_PARTITIONS_TO_TXN) def testHandleAddPartitionsToTxnAuthorizationFailedAndMetrics(version: Short): Unit = { - val requestMetrics = new RequestChannelMetrics(util.Set.of(ApiKeys.ADD_PARTITIONS_TO_TXN)) + val requestMetrics = new RequestChannelMetrics(Collections.singleton(ApiKeys.ADD_PARTITIONS_TO_TXN)) try { val topic = "topic" @@ -2201,27 +1841,27 @@ class KafkaApisTest extends Logging { transactionalId, producerId, epoch, - util.List.of(tp)).build(version) + Collections.singletonList(tp)).build(version) else AddPartitionsToTxnRequest.Builder.forBroker( new AddPartitionsToTxnTransactionCollection( - util.List.of(new AddPartitionsToTxnTransaction() + List(new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId) .setProducerId(producerId) .setProducerEpoch(epoch) .setVerifyOnly(true) .setTopics(new AddPartitionsToTxnTopicCollection( - util.List.of(new AddPartitionsToTxnTopic() + Collections.singletonList(new AddPartitionsToTxnTopic() .setName(tp.topic) - .setPartitions(util.List.of(tp.partition)) + .setPartitions(Collections.singletonList(tp.partition)) ).iterator())) - ).iterator())).build(version) + ).asJava.iterator())).build(version) val requestChannelRequest = buildRequest(addPartitionsToTxnRequest, requestMetrics = requestMetrics) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handle( requestChannelRequest, @@ -2262,21 +1902,21 @@ class KafkaApisTest extends Logging { transactionalId, producerId, epoch, - util.List.of(tp0, tp1)).build(version) + List(tp0, tp1).asJava).build(version) else AddPartitionsToTxnRequest.Builder.forBroker( new AddPartitionsToTxnTransactionCollection( - util.List.of(new AddPartitionsToTxnTransaction() + List(new AddPartitionsToTxnTransaction() .setTransactionalId(transactionalId) .setProducerId(producerId) .setProducerEpoch(epoch) .setVerifyOnly(true) .setTopics(new AddPartitionsToTxnTopicCollection( - util.List.of(new AddPartitionsToTxnTopic() + Collections.singletonList(new AddPartitionsToTxnTopic() .setName(tp0.topic) - .setPartitions(util.List.of[Integer](tp0.partition, tp1.partition())) + .setPartitions(List[Integer](tp0.partition, tp1.partition()).asJava) ).iterator())) - ).iterator())).build(version) + ).asJava.iterator())).build(version) val requestChannelRequest = buildRequest(addPartitionsToTxnRequest) kafkaApis = createKafkaApis() @@ -2363,31 +2003,23 @@ class KafkaApisTest extends Logging { @Test def shouldReplaceProducerFencedWithInvalidProducerEpochInProduceResponse(): Unit = { val topic = "topic" - val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") - val tp = new TopicIdPartition(topicId, 0, "topic") - addTopicToMetadataCache(topic, numPartitions = 2, topicId = topicId) + addTopicToMetadataCache(topic, numPartitions = 2) for (version <- ApiKeys.PRODUCE.oldestVersion to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) + val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - val produceData = new ProduceRequestData.TopicProduceData() - .setPartitionData(util.List.of( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes))))) - - if (version >= 13 ) { - produceData.setTopicId(topicId) - } else { - produceData.setName(tp.topic) - } + val tp = new TopicPartition("topic", 0) val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - util.List.of(produceData) + Collections.singletonList(new ProduceRequestData.TopicProduceData() + .setName(tp.topic).setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) .iterator)) .setAcks(1.toShort) .setTimeoutMs(5000)) @@ -2402,13 +2034,14 @@ class KafkaApisTest extends Logging { responseCallback.capture(), any(), any(), + any(), any() )).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.INVALID_PRODUCER_EPOCH)))) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val kafkaApis = createKafkaApis() try { kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) @@ -2426,363 +2059,30 @@ class KafkaApisTest extends Logging { } } - @Test - def testHandleShareFetchRequestQuotaTagsVerification(): Unit = { - val topicName = "foo" - val topicId = Uuid.randomUuid() - val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName, 1, topicId = topicId) - val memberId: Uuid = Uuid.randomUuid() - val groupId = "group" - - // Create test principal and client address to verify quota tags - val testPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "test-user") - val testClientAddress = InetAddress.getByName("192.168.1.100") - val testClientId = "test-client-id" - - // Mock share partition manager responses - val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), - new ShareFetchResponseData.PartitionData() - .setErrorCode(Errors.NONE.code) - .setAcknowledgeErrorCode(Errors.NONE.code) - .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of( - new ShareFetchResponseData.AcquiredRecords() - .setFirstOffset(0) - .setLastOffset(9) - .setDeliveryCount(1) - )))))) - - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName))) - ) - - // Create argument captors to verify session information passed to quota managers - val sessionCaptorFetch = ArgumentCaptor.forClass(classOf[Session]) - val clientIdCaptor = ArgumentCaptor.forClass(classOf[String]) - val requestCaptor = ArgumentCaptor.forClass(classOf[RequestChannel.Request]) - - // Mock quota manager responses and capture arguments - when(quotas.fetch.maybeRecordAndGetThrottleTimeMs( - sessionCaptorFetch.capture(), clientIdCaptor.capture(), anyDouble, anyLong)).thenReturn(0) - when(quotas.request.maybeRecordAndGetThrottleTimeMs( - requestCaptor.capture(), anyLong)).thenReturn(0) - - // Create ShareFetch request - val shareFetchRequestData = new ShareFetchRequestData() - .setGroupId(groupId) - .setMemberId(memberId.toString) - .setShareSessionEpoch(0) - .setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic() - .setTopicId(topicId) - .setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( - new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex) - ).iterator)) - ).iterator)) - - val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - - // Create request with custom principal and client address to test quota tags - val requestHeader = new RequestHeader(shareFetchRequest.apiKey, shareFetchRequest.version, testClientId, 0) - val request = buildRequest(shareFetchRequest, testPrincipal, testClientAddress, - ListenerName.forSecurityProtocol(SecurityProtocol.SSL), fromPrivilegedListener = false, Some(requestHeader), requestChannelMetrics) - - // Test that the request itself contains the proper tags and information - assertEquals(testClientId, request.header.clientId) - assertEquals(testPrincipal, request.context.principal) - assertEquals(testClientAddress, request.context.clientAddress) - assertEquals(ApiKeys.SHARE_FETCH, request.header.apiKey) - assertEquals("1", request.context.connectionId) - - kafkaApis = createKafkaApis() - kafkaApis.handleShareFetchRequest(request) - val response = verifyNoThrottling[ShareFetchResponse](request) - - // Verify response is successful - val responseData = response.data() - assertEquals(Errors.NONE.code, responseData.errorCode) - - // Verify that quota methods were called and captured session information - verify(quotas.fetch, times(1)).maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong) - verify(quotas.request, times(1)).maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyLong) - - // Verify the Session data passed to fetch quota manager is exactly what was defined in the test - val capturedSession = sessionCaptorFetch.getValue - assertNotNull(capturedSession) - assertNotNull(capturedSession.principal) - assertEquals(KafkaPrincipal.USER_TYPE, capturedSession.principal.getPrincipalType) - assertEquals("test-user", capturedSession.principal.getName) - assertEquals(testClientAddress, capturedSession.clientAddress) - assertEquals("test-user", capturedSession.sanitizedUser) - - // Verify client ID passed to fetch quota manager matches what was defined - val capturedClientId = clientIdCaptor.getValue - assertEquals(testClientId, capturedClientId) - - // Verify the Request data passed to request quota manager is exactly what was defined - val capturedRequest = requestCaptor.getValue - assertNotNull(capturedRequest) - assertEquals(testClientId, capturedRequest.header.clientId) - assertEquals(testPrincipal, capturedRequest.context.principal) - assertEquals(testClientAddress, capturedRequest.context.clientAddress) - assertEquals(ApiKeys.SHARE_FETCH, capturedRequest.header.apiKey) - } - - @Test - def testHandleShareAcknowledgeRequestQuotaTagsVerification(): Unit = { - val topicName = "foo" - val topicId = Uuid.randomUuid() - val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName, 1, topicId = topicId) - val memberId: Uuid = Uuid.randomUuid() - val groupId = "group" - - // Create test principal and client address to verify quota tags - val testPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "test-user") - val testClientAddress = InetAddress.getByName("192.168.1.100") - val testClientId = "test-client-id" - - // Mock share partition manager acknowledge response - when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), - new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partitionIndex) - .setErrorCode(Errors.NONE.code)))) - - // Create argument captors to verify session information passed to quota managers - val requestCaptor = ArgumentCaptor.forClass(classOf[RequestChannel.Request]) - - // Mock quota manager responses and capture arguments - // For ShareAcknowledge, we only verify Request quota (not fetch quota) - when(quotas.request.maybeRecordAndGetThrottleTimeMs( - requestCaptor.capture(), anyLong)).thenReturn(0) - - // Create ShareAcknowledge request - val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData() - .setGroupId(groupId) - .setMemberId(memberId.toString) - .setShareSessionEpoch(1) - .setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection( - util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic() - .setTopicId(topicId) - .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection( - util.List.of(new ShareAcknowledgeRequestData.AcknowledgePartition() - .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( - new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) - - val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) - - // Create request with custom principal and client address to test quota tags - val requestHeader = new RequestHeader(shareAcknowledgeRequest.apiKey, shareAcknowledgeRequest.version, testClientId, 0) - val request = buildRequest(shareAcknowledgeRequest, testPrincipal, testClientAddress, - ListenerName.forSecurityProtocol(SecurityProtocol.SSL), fromPrivilegedListener = false, Some(requestHeader), requestChannelMetrics) - - // Test that the request itself contains the proper tags and information - assertEquals(testClientId, request.header.clientId) - assertEquals(testPrincipal, request.context.principal) - assertEquals(testClientAddress, request.context.clientAddress) - assertEquals(ApiKeys.SHARE_ACKNOWLEDGE, request.header.apiKey) - assertEquals("1", request.context.connectionId) - - kafkaApis = createKafkaApis() - kafkaApis.handleShareAcknowledgeRequest(request) - val response = verifyNoThrottling[ShareAcknowledgeResponse](request) - - // Verify response is successful - val responseData = response.data() - assertEquals(Errors.NONE.code, responseData.errorCode) - - // Verify that request quota method was called - verify(quotas.request, times(1)).maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyLong) - - // Verify that fetch quota method was NOT called (ShareAcknowledge only uses request quota) - verify(quotas.fetch, times(0)).maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong) - - // Verify the Request data passed to request quota manager is exactly what was defined - val capturedRequest = requestCaptor.getValue - assertNotNull(capturedRequest) - assertEquals(testClientId, capturedRequest.header.clientId) - assertEquals(testPrincipal, capturedRequest.context.principal) - assertEquals(testClientAddress, capturedRequest.context.clientAddress) - assertEquals(ApiKeys.SHARE_ACKNOWLEDGE, capturedRequest.header.apiKey) - } - - @Test - def testHandleShareFetchWithAcknowledgementQuotaTagsVerification(): Unit = { - val topicName = "foo" - val topicId = Uuid.randomUuid() - val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName, 1, topicId = topicId) - val memberId: Uuid = Uuid.randomUuid() - val groupId = "group" - - // Create test principal and client address to verify quota tags - val testPrincipal = new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "test-user") - val testClientAddress = InetAddress.getByName("192.168.1.100") - val testClientId = "test-client-id" - - // Mock share partition manager responses for both fetch and acknowledge - val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), - new ShareFetchResponseData.PartitionData() - .setErrorCode(Errors.NONE.code) - .setAcknowledgeErrorCode(Errors.NONE.code) - .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of( - new ShareFetchResponseData.AcquiredRecords() - .setFirstOffset(0) - .setLastOffset(9) - .setDeliveryCount(1) - )))))) - - when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), - new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(partitionIndex) - .setErrorCode(Errors.NONE.code)))) - - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 1), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName))) - ) - - // Create argument captors to verify session information passed to quota managers - val sessionCaptorFetch = ArgumentCaptor.forClass(classOf[Session]) - val clientIdCaptor = ArgumentCaptor.forClass(classOf[String]) - val requestCaptor = ArgumentCaptor.forClass(classOf[RequestChannel.Request]) - - // Mock quota manager responses and capture arguments - when(quotas.fetch.maybeRecordAndGetThrottleTimeMs( - sessionCaptorFetch.capture(), clientIdCaptor.capture(), anyDouble, anyLong)).thenReturn(0) - when(quotas.request.maybeRecordAndGetThrottleTimeMs( - requestCaptor.capture(), anyLong)).thenReturn(0) - - // Create ShareFetch request with acknowledgement data - val shareFetchRequestData = new ShareFetchRequestData() - .setGroupId(groupId) - .setMemberId(memberId.toString) - .setShareSessionEpoch(1) - .setMaxWaitMs(100) - .setMinBytes(1) - .setMaxBytes(1000000) - .setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic() - .setTopicId(topicId) - .setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( - new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( - new ShareFetchRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) - - val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - - // Create request with custom principal and client address to test quota tags - val requestHeader = new RequestHeader(shareFetchRequest.apiKey, shareFetchRequest.version, testClientId, 0) - val request = buildRequest(shareFetchRequest, testPrincipal, testClientAddress, - ListenerName.forSecurityProtocol(SecurityProtocol.SSL), fromPrivilegedListener = false, Some(requestHeader), requestChannelMetrics) - - // Test that the request itself contains the proper tags and information - assertEquals(testClientId, request.header.clientId) - assertEquals(testPrincipal, request.context.principal) - assertEquals(testClientAddress, request.context.clientAddress) - assertEquals(ApiKeys.SHARE_FETCH, request.header.apiKey) - assertEquals("1", request.context.connectionId) - - kafkaApis = createKafkaApis() - kafkaApis.handleShareFetchRequest(request) - val response = verifyNoThrottling[ShareFetchResponse](request) - - // Verify response is successful - val responseData = response.data() - assertEquals(Errors.NONE.code, responseData.errorCode) - - // Verify that quota methods were called exactly once each (not twice despite having acknowledgements) - verify(quotas.fetch, times(1)).maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong) - verify(quotas.request, times(1)).maybeRecordAndGetThrottleTimeMs( - any[RequestChannel.Request](), anyLong) - - // Verify the Session data passed to fetch quota manager is exactly what was defined in the test - val capturedSession = sessionCaptorFetch.getValue - assertNotNull(capturedSession) - assertNotNull(capturedSession.principal) - assertEquals(KafkaPrincipal.USER_TYPE, capturedSession.principal.getPrincipalType) - assertEquals("test-user", capturedSession.principal.getName) - assertEquals(testClientAddress, capturedSession.clientAddress) - assertEquals("test-user", capturedSession.sanitizedUser) - - // Verify client ID passed to fetch quota manager matches what was defined - val capturedClientId = clientIdCaptor.getValue - assertEquals(testClientId, capturedClientId) - - // Verify the Request data passed to request quota manager is exactly what was defined - val capturedRequest = requestCaptor.getValue - assertNotNull(capturedRequest) - assertEquals(testClientId, capturedRequest.header.clientId) - assertEquals(testPrincipal, capturedRequest.context.principal) - assertEquals(testClientAddress, capturedRequest.context.clientAddress) - assertEquals(ApiKeys.SHARE_FETCH, capturedRequest.header.apiKey) - } - @Test def testProduceResponseContainsNewLeaderOnNotLeaderOrFollower(): Unit = { val topic = "topic" - val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") - addTopicToMetadataCache(topic, numPartitions = 2, numBrokers = 3, topicId = topicId) + addTopicToMetadataCache(topic, numPartitions = 2, numBrokers = 3) for (version <- 10 to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) + val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - val tp = new TopicIdPartition(topicId, 0, topic) + val tp = new TopicPartition(topic, 0) val partition = mock(classOf[Partition]) val newLeaderId = 2 val newLeaderEpoch = 5 - val produceData = new ProduceRequestData.TopicProduceData() - .setPartitionData(util.List.of( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes))))) - - if (version >= 13 ) { - produceData.setTopicId(topicId) - } else { - produceData.setName(tp.topic) - } val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - util.List.of(produceData).iterator)) + Collections.singletonList(new ProduceRequestData.TopicProduceData() + .setName(tp.topic).setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) + .iterator)) .setAcks(1.toShort) .setTimeoutMs(5000)) .build(version.toShort) @@ -2796,17 +2096,18 @@ class KafkaApisTest extends Logging { responseCallback.capture(), any(), any(), + any(), any()) ).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.NOT_LEADER_OR_FOLLOWER)))) - when(replicaManager.getPartitionOrError(tp.topicPartition())).thenAnswer(_ => Right(partition)) + when(replicaManager.getPartitionOrError(tp)).thenAnswer(_ => Right(partition)) when(partition.leaderReplicaIdOpt).thenAnswer(_ => Some(newLeaderId)) when(partition.getLeaderEpoch).thenAnswer(_ => newLeaderEpoch) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) kafkaApis = createKafkaApis() kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) @@ -2829,31 +2130,24 @@ class KafkaApisTest extends Logging { @Test def testProduceResponseReplicaManagerLookupErrorOnNotLeaderOrFollower(): Unit = { val topic = "topic" - val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") - addTopicToMetadataCache(topic, numPartitions = 2, numBrokers = 3, topicId = topicId) + addTopicToMetadataCache(topic, numPartitions = 2, numBrokers = 3) for (version <- 10 to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) + val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - val tp = new TopicIdPartition(topicId, 0, topic) - - val produceData = new ProduceRequestData.TopicProduceData() - .setPartitionData(util.List.of( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes))))) + val tp = new TopicPartition(topic, 0) - if (version >= 13 ) { - produceData.setTopicId(topicId) - } else { - produceData.setName(tp.topic) - } val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - util.List.of(produceData).iterator)) + Collections.singletonList(new ProduceRequestData.TopicProduceData() + .setName(tp.topic).setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) + .iterator)) .setAcks(1.toShort) .setTimeoutMs(5000)) .build(version.toShort) @@ -2867,15 +2161,16 @@ class KafkaApisTest extends Logging { responseCallback.capture(), any(), any(), + any(), any()) ).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.NOT_LEADER_OR_FOLLOWER)))) - when(replicaManager.getPartitionOrError(tp.topicPartition())).thenAnswer(_ => Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)) + when(replicaManager.getPartitionOrError(tp)).thenAnswer(_ => Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) kafkaApis = createKafkaApis() kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) @@ -2899,29 +2194,20 @@ class KafkaApisTest extends Logging { @Test def testProduceResponseMetadataLookupErrorOnNotLeaderOrFollower(): Unit = { val topic = "topic" - val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") metadataCache = mock(classOf[KRaftMetadataCache]) for (version <- 10 to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) - - val tp = new TopicIdPartition(topicId, 0, topic) + val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) - val topicProduceData = new ProduceRequestData.TopicProduceData() - - if (version >= 13 ) { - topicProduceData.setTopicId(topicId) - } else { - topicProduceData.setName(tp.topic) - } + val tp = new TopicPartition(topic, 0) val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - util.List.of(topicProduceData - .setPartitionData(util.List.of( + Collections.singletonList(new ProduceRequestData.TopicProduceData() + .setName(tp.topic).setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(tp.partition) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) @@ -2939,24 +2225,20 @@ class KafkaApisTest extends Logging { responseCallback.capture(), any(), any(), + any(), any()) ).thenAnswer(_ => responseCallback.getValue.apply(Map(tp -> new PartitionResponse(Errors.NOT_LEADER_OR_FOLLOWER)))) - when(replicaManager.getPartitionOrError(tp.topicPartition)).thenAnswer(_ => Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)) + when(replicaManager.getPartitionOrError(tp)).thenAnswer(_ => Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) - when(metadataCache.contains(tp.topicPartition())).thenAnswer(_ => true) - when(metadataCache.getLeaderAndIsr(tp.topic(), tp.partition())).thenAnswer(_ => Optional.empty()) - when(metadataCache.getAliveBrokerNode(any(), any())).thenReturn(Optional.empty()) - if (version >= 13) { - when(metadataCache.getTopicName(tp.topicId())).thenReturn(Optional.of(tp.topic())) - } else { - when(metadataCache.getTopicId(tp.topic())).thenReturn(tp.topicId()) - } - val kafkaApis = createKafkaApis() + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) + when(metadataCache.contains(tp)).thenAnswer(_ => true) + when(metadataCache.getLeaderAndIsr(tp.topic(), tp.partition())).thenAnswer(_ => Option.empty) + when(metadataCache.getAliveBrokerNode(any(), any())).thenReturn(Option.empty) + kafkaApis = createKafkaApis() kafkaApis.handleProduceRequest(request, RequestLocal.withThreadConfinedCaching) val response = verifyNoThrottling[ProduceResponse](request) @@ -2977,28 +2259,21 @@ class KafkaApisTest extends Logging { val topic = "topic" val transactionalId = "txn1" - val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") - val tp = new TopicIdPartition(topicId, 0, "topic") - addTopicToMetadataCache(topic, numPartitions = 2, topicId = tp.topicId()) + addTopicToMetadataCache(topic, numPartitions = 2) for (version <- ApiKeys.PRODUCE.oldestVersion to ApiKeys.PRODUCE.latestVersion) { reset(replicaManager, clientQuotaManager, clientRequestQuotaManager, requestChannel, txnCoordinator) - val produceData = new ProduceRequestData.TopicProduceData() - .setPartitionData(util.List.of( - new ProduceRequestData.PartitionProduceData() - .setIndex(tp.partition) - .setRecords(MemoryRecords.withTransactionalRecords(Compression.NONE, 0, 0, 0, new SimpleRecord("test".getBytes))))) + val tp = new TopicPartition("topic", 0) - if (version >= 13 ) { - produceData.setTopicId(topicId) - } else { - produceData.setName(tp.topic) - } val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - util.List.of(produceData) + Collections.singletonList(new ProduceRequestData.TopicProduceData() + .setName(tp.topic).setPartitionData(Collections.singletonList( + new ProduceRequestData.PartitionProduceData() + .setIndex(tp.partition) + .setRecords(MemoryRecords.withTransactionalRecords(Compression.NONE, 0, 0, 0, new SimpleRecord("test".getBytes)))))) .iterator)) .setAcks(1.toShort) .setTransactionalId(transactionalId) @@ -3018,6 +2293,7 @@ class KafkaApisTest extends Logging { any(), any(), any(), + any(), any()) } finally { kafkaApis.close() @@ -3035,7 +2311,7 @@ class KafkaApisTest extends Logging { val invalidTopicPartition = new TopicPartition(topic, invalidPartitionId) val addPartitionsToTxnRequest = AddPartitionsToTxnRequest.Builder.forClient( - "txnlId", 15L, 0.toShort, util.List.of(invalidTopicPartition) + "txnlId", 15L, 0.toShort, List(invalidTopicPartition).asJava ).build() val request = buildRequest(addPartitionsToTxnRequest) @@ -3055,17 +2331,17 @@ class KafkaApisTest extends Logging { checkInvalidPartition(-1) checkInvalidPartition(1) // topic has only one partition } - + @Test def requiredAclsNotPresentWriteTxnMarkersThrowsAuthorizationException(): Unit = { val topicPartition = new TopicPartition("t", 0) - val (_, request) = createWriteTxnMarkersRequest(util.List.of(topicPartition)) + val (_, request) = createWriteTxnMarkersRequest(asList(topicPartition)) val authorizer: Authorizer = mock(classOf[Authorizer]) val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) - val alterActions = util.List.of(new Action(AclOperation.ALTER, clusterResource, 1, true, false)) - val clusterActions = util.List.of(new Action(AclOperation.CLUSTER_ACTION, clusterResource, 1, true, true)) - val deniedList = util.List.of(AuthorizationResult.DENIED) + val alterActions = Collections.singletonList(new Action(AclOperation.ALTER, clusterResource, 1, true, false)) + val clusterActions = Collections.singletonList(new Action(AclOperation.CLUSTER_ACTION, clusterResource, 1, true, true)) + val deniedList = Collections.singletonList(AuthorizationResult.DENIED) when(authorizer.authorize( request.context, alterActions @@ -3083,8 +2359,8 @@ class KafkaApisTest extends Logging { @Test def shouldRespondWithUnknownTopicWhenPartitionIsNotHosted(): Unit = { val topicPartition = new TopicPartition("t", 0) - val (_, request) = createWriteTxnMarkersRequest(util.List.of(topicPartition)) - val expectedErrors = util.Map.of(topicPartition, Errors.UNKNOWN_TOPIC_OR_PARTITION) + val (_, request) = createWriteTxnMarkersRequest(asList(topicPartition)) + val expectedErrors = Map(topicPartition -> Errors.UNKNOWN_TOPIC_OR_PARTITION).asJava val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) when(replicaManager.onlinePartition(topicPartition)) @@ -3107,15 +2383,17 @@ class KafkaApisTest extends Logging { // with no records. val topicPartition = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - util.List.of( - new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, util.List.of(topicPartition)), - new TxnMarkerEntry(2, 1.toShort, 0, TransactionResult.COMMIT, util.List.of(topicPartition)), + asList( + new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)), + new TxnMarkerEntry(2, 1.toShort, 0, TransactionResult.COMMIT, asList(topicPartition)), )).build() val request = buildRequest(writeTxnMarkersRequest) val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) when(replicaManager.onlinePartition(any())) .thenReturn(Some(mock(classOf[Partition]))) + when(groupCoordinator.isNewGroupCoordinator) + .thenReturn(true) when(groupCoordinator.completeTransaction( ArgumentMatchers.eq(topicPartition), any(), @@ -3136,17 +2414,16 @@ class KafkaApisTest extends Logging { val markersResponse = capturedResponse.getValue assertEquals(2, markersResponse.errorsByProducerId.size()) } - + @Test def shouldRespondWithUnknownTopicOrPartitionForBadPartitionAndNoErrorsForGoodPartition(): Unit = { val tp1 = new TopicPartition("t", 0) val tp2 = new TopicPartition("t1", 0) - val topicId = Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg") - val (_, request) = createWriteTxnMarkersRequest(util.List.of(tp1, tp2)) - val expectedErrors = util.Map.of(tp1, Errors.UNKNOWN_TOPIC_OR_PARTITION, tp2, Errors.NONE) + val (_, request) = createWriteTxnMarkersRequest(asList(tp1, tp2)) + val expectedErrors = Map(tp1 -> Errors.UNKNOWN_TOPIC_OR_PARTITION, tp2 -> Errors.NONE).asJava val capturedResponse: ArgumentCaptor[WriteTxnMarkersResponse] = ArgumentCaptor.forClass(classOf[WriteTxnMarkersResponse]) - val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) + val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) when(replicaManager.onlinePartition(tp1)) .thenReturn(None) @@ -3161,9 +2438,11 @@ class KafkaApisTest extends Logging { any(), responseCallback.capture(), any(), + any(), ArgumentMatchers.eq(requestLocal), + any(), any() - )).thenAnswer(_ => responseCallback.getValue.apply(Map(new TopicIdPartition(topicId,tp2) -> new PartitionResponse(Errors.NONE)))) + )).thenAnswer(_ => responseCallback.getValue.apply(Map(tp2 -> new PartitionResponse(Errors.NONE)))) kafkaApis = createKafkaApis() kafkaApis.handleWriteTxnMarkersRequest(request, requestLocal) verify(requestChannel).sendResponse( @@ -3180,7 +2459,7 @@ class KafkaApisTest extends Logging { @ValueSource(strings = Array("ALTER", "CLUSTER_ACTION")) def shouldAppendToLogOnWriteTxnMarkersWhenCorrectMagicVersion(allowedAclOperation: String): Unit = { val topicPartition = new TopicPartition("t", 0) - val request = createWriteTxnMarkersRequest(util.List.of(topicPartition))._2 + val request = createWriteTxnMarkersRequest(asList(topicPartition))._2 when(replicaManager.onlinePartition(topicPartition)) .thenReturn(Some(mock(classOf[Partition]))) @@ -3189,15 +2468,15 @@ class KafkaApisTest extends Logging { // Allowing WriteTxnMarkers API with the help of allowedAclOperation parameter. val authorizer: Authorizer = mock(classOf[Authorizer]) val clusterResource = new ResourcePattern(ResourceType.CLUSTER, Resource.CLUSTER_NAME, PatternType.LITERAL) - val allowedAction = util.List.of(new Action( + val allowedAction = Collections.singletonList(new Action( AclOperation.fromString(allowedAclOperation), clusterResource, 1, true, allowedAclOperation.equals("CLUSTER_ACTION") )) - val deniedList = util.List.of(AuthorizationResult.DENIED) - val allowedList = util.List.of(AuthorizationResult.ALLOWED) + val deniedList = Collections.singletonList(AuthorizationResult.DENIED) + val allowedList = Collections.singletonList(AuthorizationResult.ALLOWED) when(authorizer.authorize( ArgumentMatchers.eq(request.context), any() @@ -3216,20 +2495,19 @@ class KafkaApisTest extends Logging { any(), any(), any(), + any(), ArgumentMatchers.eq(requestLocal), + any(), any()) } @Test - def testHandleWriteTxnMarkersRequest(): Unit = { + def testHandleWriteTxnMarkersRequestWithOldGroupCoordinator(): Unit = { val offset0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) val offset1 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 1) val foo0 = new TopicPartition("foo", 0) val foo1 = new TopicPartition("foo", 1) - val topicIds = Map( - Topic.GROUP_METADATA_TOPIC_NAME -> Uuid.fromString("JaTH2JYK2ed2GzUapg8tgg"), - "foo" -> Uuid.fromString("d2Gg8tgzJa2JYK2eTHUapg")) val allPartitions = List( offset0, offset1, @@ -3238,53 +2516,47 @@ class KafkaApisTest extends Logging { ) val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - util.List.of( + List( new TxnMarkerEntry( 1L, 1.toShort, 0, TransactionResult.COMMIT, - util.List.of(offset0, foo0) + List(offset0, foo0).asJava ), new TxnMarkerEntry( 2L, 1.toShort, 0, TransactionResult.ABORT, - util.List.of(offset1, foo1) + List(offset1, foo1).asJava ) - ) + ).asJava ).build() val requestChannelRequest = buildRequest(writeTxnMarkersRequest) allPartitions.foreach { tp => - when(replicaManager.onlinePartition(tp)).thenReturn(Some(mock(classOf[Partition]))) - when(replicaManager.topicIdPartition(tp)).thenReturn(new TopicIdPartition(topicIds.get(tp.topic()).getOrElse(Uuid.ZERO_UUID), tp)) + when(replicaManager.onlinePartition(tp)) + .thenReturn(Some(mock(classOf[Partition]))) } - when(groupCoordinator.completeTransaction( - ArgumentMatchers.eq(offset0), + when(groupCoordinator.onTransactionCompleted( ArgumentMatchers.eq(1L), - ArgumentMatchers.eq(1.toShort), - ArgumentMatchers.eq(0), - ArgumentMatchers.eq(TransactionResult.COMMIT), - ArgumentMatchers.eq(Duration.ofMillis(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT)) + ArgumentMatchers.any(), + ArgumentMatchers.eq(TransactionResult.COMMIT) )).thenReturn(CompletableFuture.completedFuture[Void](null)) - when(groupCoordinator.completeTransaction( - ArgumentMatchers.eq(offset1), + when(groupCoordinator.onTransactionCompleted( ArgumentMatchers.eq(2L), - ArgumentMatchers.eq(1.toShort), - ArgumentMatchers.eq(0), - ArgumentMatchers.eq(TransactionResult.ABORT), - ArgumentMatchers.eq(Duration.ofMillis(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT)) - )).thenReturn(CompletableFuture.completedFuture[Void](null)) + ArgumentMatchers.any(), + ArgumentMatchers.eq(TransactionResult.ABORT) + )).thenReturn(FutureUtils.failedFuture[Void](Errors.NOT_CONTROLLER.exception)) - val entriesPerPartition: ArgumentCaptor[Map[TopicIdPartition, MemoryRecords]] = - ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, MemoryRecords]]) - val responseCallback: ArgumentCaptor[Map[TopicIdPartition, PartitionResponse] => Unit] = - ArgumentCaptor.forClass(classOf[Map[TopicIdPartition, PartitionResponse] => Unit]) + val entriesPerPartition: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) when(replicaManager.appendRecords( ArgumentMatchers.eq(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT.toLong), @@ -3294,7 +2566,9 @@ class KafkaApisTest extends Logging { entriesPerPartition.capture(), responseCallback.capture(), any(), - ArgumentMatchers.eq(RequestLocal.noCaching), + any(), + ArgumentMatchers.eq(RequestLocal.noCaching()), + any(), any() )).thenAnswer { _ => responseCallback.getValue.apply( @@ -3303,79 +2577,94 @@ class KafkaApisTest extends Logging { }.toMap ) } - kafkaApis = createKafkaApis() - kafkaApis.handleWriteTxnMarkersRequest(requestChannelRequest, RequestLocal.noCaching) + kafkaApis = createKafkaApis(overrideProperties = Map( + GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG -> "false" + )) + kafkaApis.handleWriteTxnMarkersRequest(requestChannelRequest, RequestLocal.noCaching()) val expectedResponse = new WriteTxnMarkersResponseData() - .setMarkers(util.List.of( + .setMarkers(List( new WriteTxnMarkersResponseData.WritableTxnMarkerResult() .setProducerId(1L) - .setTopics(util.List.of( + .setTopics(List( new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName(Topic.GROUP_METADATA_TOPIC_NAME) - .setPartitions(util.List.of( + .setPartitions(List( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - )), + ).asJava), new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - )) - )), + ).asJava) + ).asJava), new WriteTxnMarkersResponseData.WritableTxnMarkerResult() .setProducerId(2L) - .setTopics(util.List.of( + .setTopics(List( new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName(Topic.GROUP_METADATA_TOPIC_NAME) - .setPartitions(util.List.of( + .setPartitions(List( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(1) - .setErrorCode(Errors.NONE.code) - )), + .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) + ).asJava), new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - )) - )) - )) + ).asJava) + ).asJava) + ).asJava) val response = verifyNoThrottling[WriteTxnMarkersResponse](requestChannelRequest) assertEquals(normalize(expectedResponse), normalize(response.data)) } - @ParameterizedTest - @EnumSource(value = classOf[Errors], names = Array( - "COORDINATOR_NOT_AVAILABLE", - "COORDINATOR_LOAD_IN_PROGRESS", - "NOT_COORDINATOR", - "REQUEST_TIMED_OUT" - )) - def testHandleWriteTxnMarkersRequestErrorTranslation(error: Errors): Unit = { + @Test + def testHandleWriteTxnMarkersRequestWithNewGroupCoordinator(): Unit = { val offset0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) + val offset1 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 1) + val foo0 = new TopicPartition("foo", 0) + val foo1 = new TopicPartition("foo", 1) + + val allPartitions = List( + offset0, + offset1, + foo0, + foo1 + ) val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - util.List.of( + List( new TxnMarkerEntry( 1L, 1.toShort, 0, TransactionResult.COMMIT, - util.List.of(offset0) + List(offset0, foo0).asJava + ), + new TxnMarkerEntry( + 2L, + 1.toShort, + 0, + TransactionResult.ABORT, + List(offset1, foo1).asJava ) - ) + ).asJava ).build() val requestChannelRequest = buildRequest(writeTxnMarkersRequest) - when(replicaManager.onlinePartition(offset0)) - .thenReturn(Some(mock(classOf[Partition]))) + allPartitions.foreach { tp => + when(replicaManager.onlinePartition(tp)) + .thenReturn(Some(mock(classOf[Partition]))) + } when(groupCoordinator.completeTransaction( ArgumentMatchers.eq(offset0), @@ -3384,31 +2673,147 @@ class KafkaApisTest extends Logging { ArgumentMatchers.eq(0), ArgumentMatchers.eq(TransactionResult.COMMIT), ArgumentMatchers.eq(Duration.ofMillis(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT)) - )).thenReturn(FutureUtils.failedFuture[Void](error.exception())) - kafkaApis = createKafkaApis() - kafkaApis.handleWriteTxnMarkersRequest(requestChannelRequest, RequestLocal.noCaching) + )).thenReturn(CompletableFuture.completedFuture[Void](null)) - val expectedError = error match { - case Errors.COORDINATOR_NOT_AVAILABLE | Errors.COORDINATOR_LOAD_IN_PROGRESS | Errors.NOT_COORDINATOR => - Errors.NOT_LEADER_OR_FOLLOWER - case error => + when(groupCoordinator.completeTransaction( + ArgumentMatchers.eq(offset1), + ArgumentMatchers.eq(2L), + ArgumentMatchers.eq(1.toShort), + ArgumentMatchers.eq(0), + ArgumentMatchers.eq(TransactionResult.ABORT), + ArgumentMatchers.eq(Duration.ofMillis(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT)) + )).thenReturn(CompletableFuture.completedFuture[Void](null)) + + val entriesPerPartition: ArgumentCaptor[Map[TopicPartition, MemoryRecords]] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, MemoryRecords]]) + val responseCallback: ArgumentCaptor[Map[TopicPartition, PartitionResponse] => Unit] = + ArgumentCaptor.forClass(classOf[Map[TopicPartition, PartitionResponse] => Unit]) + + when(replicaManager.appendRecords( + ArgumentMatchers.eq(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT.toLong), + ArgumentMatchers.eq(-1), + ArgumentMatchers.eq(true), + ArgumentMatchers.eq(AppendOrigin.COORDINATOR), + entriesPerPartition.capture(), + responseCallback.capture(), + any(), + any(), + ArgumentMatchers.eq(RequestLocal.noCaching), + any(), + any() + )).thenAnswer { _ => + responseCallback.getValue.apply( + entriesPerPartition.getValue.keySet.map { tp => + tp -> new PartitionResponse(Errors.NONE) + }.toMap + ) + } + kafkaApis = createKafkaApis() + kafkaApis.handleWriteTxnMarkersRequest(requestChannelRequest, RequestLocal.noCaching) + + val expectedResponse = new WriteTxnMarkersResponseData() + .setMarkers(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerResult() + .setProducerId(1L) + .setTopics(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() + .setName(Topic.GROUP_METADATA_TOPIC_NAME) + .setPartitions(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code) + ).asJava), + new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() + .setName("foo") + .setPartitions(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() + .setPartitionIndex(0) + .setErrorCode(Errors.NONE.code) + ).asJava) + ).asJava), + new WriteTxnMarkersResponseData.WritableTxnMarkerResult() + .setProducerId(2L) + .setTopics(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() + .setName(Topic.GROUP_METADATA_TOPIC_NAME) + .setPartitions(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() + .setPartitionIndex(1) + .setErrorCode(Errors.NONE.code) + ).asJava), + new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() + .setName("foo") + .setPartitions(List( + new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() + .setPartitionIndex(1) + .setErrorCode(Errors.NONE.code) + ).asJava) + ).asJava) + ).asJava) + + val response = verifyNoThrottling[WriteTxnMarkersResponse](requestChannelRequest) + assertEquals(normalize(expectedResponse), normalize(response.data)) + } + + @ParameterizedTest + @EnumSource(value = classOf[Errors], names = Array( + "COORDINATOR_NOT_AVAILABLE", + "COORDINATOR_LOAD_IN_PROGRESS", + "NOT_COORDINATOR", + "REQUEST_TIMED_OUT" + )) + def testHandleWriteTxnMarkersRequestWithNewGroupCoordinatorErrorTranslation(error: Errors): Unit = { + val offset0 = new TopicPartition(Topic.GROUP_METADATA_TOPIC_NAME, 0) + + val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( + List( + new TxnMarkerEntry( + 1L, + 1.toShort, + 0, + TransactionResult.COMMIT, + List(offset0).asJava + ) + ).asJava + ).build() + + val requestChannelRequest = buildRequest(writeTxnMarkersRequest) + + when(replicaManager.onlinePartition(offset0)) + .thenReturn(Some(mock(classOf[Partition]))) + + when(groupCoordinator.completeTransaction( + ArgumentMatchers.eq(offset0), + ArgumentMatchers.eq(1L), + ArgumentMatchers.eq(1.toShort), + ArgumentMatchers.eq(0), + ArgumentMatchers.eq(TransactionResult.COMMIT), + ArgumentMatchers.eq(Duration.ofMillis(ServerConfigs.REQUEST_TIMEOUT_MS_DEFAULT)) + )).thenReturn(FutureUtils.failedFuture[Void](error.exception())) + kafkaApis = createKafkaApis() + kafkaApis.handleWriteTxnMarkersRequest(requestChannelRequest, RequestLocal.noCaching) + + val expectedError = error match { + case Errors.COORDINATOR_NOT_AVAILABLE | Errors.COORDINATOR_LOAD_IN_PROGRESS | Errors.NOT_COORDINATOR => + Errors.NOT_LEADER_OR_FOLLOWER + case error => error } val expectedResponse = new WriteTxnMarkersResponseData() - .setMarkers(util.List.of( + .setMarkers(List( new WriteTxnMarkersResponseData.WritableTxnMarkerResult() .setProducerId(1L) - .setTopics(util.List.of( + .setTopics(List( new WriteTxnMarkersResponseData.WritableTxnMarkerTopicResult() .setName(Topic.GROUP_METADATA_TOPIC_NAME) - .setPartitions(util.List.of( + .setPartitions(List( new WriteTxnMarkersResponseData.WritableTxnMarkerPartitionResult() .setPartitionIndex(0) .setErrorCode(expectedError.code) - )) - )) - )) + ).asJava) + ).asJava) + ).asJava) val response = verifyNoThrottling[WriteTxnMarkersResponse](requestChannelRequest) assertEquals(normalize(expectedResponse), normalize(response.data)) @@ -3454,18 +2859,18 @@ class KafkaApisTest extends Logging { @Test def testHandleDeleteGroups(): Unit = { - val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(util.List.of( + val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(List( "group-1", "group-2", "group-3" - )) + ).asJava) val requestChannelRequest = buildRequest(new DeleteGroupsRequest.Builder(deleteGroupsRequest).build()) val future = new CompletableFuture[DeleteGroupsResponseData.DeletableGroupResultCollection]() when(groupCoordinator.deleteGroups( requestChannelRequest.context, - util.List.of("group-1", "group-2", "group-3"), + List("group-1", "group-2", "group-3").asJava, RequestLocal.noCaching.bufferSupplier )).thenReturn(future) kafkaApis = createKafkaApis() @@ -3474,7 +2879,7 @@ class KafkaApisTest extends Logging { RequestLocal.noCaching ) - val results = new DeleteGroupsResponseData.DeletableGroupResultCollection(util.List.of( + val results = new DeleteGroupsResponseData.DeletableGroupResultCollection(List( new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-1") .setErrorCode(Errors.NONE.code), @@ -3484,7 +2889,7 @@ class KafkaApisTest extends Logging { new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-3") .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code), - ).iterator) + ).iterator.asJava) future.complete(results) @@ -3497,18 +2902,18 @@ class KafkaApisTest extends Logging { @Test def testHandleDeleteGroupsFutureFailed(): Unit = { - val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(util.List.of( + val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(List( "group-1", "group-2", "group-3" - )) + ).asJava) val requestChannelRequest = buildRequest(new DeleteGroupsRequest.Builder(deleteGroupsRequest).build()) val future = new CompletableFuture[DeleteGroupsResponseData.DeletableGroupResultCollection]() when(groupCoordinator.deleteGroups( requestChannelRequest.context, - util.List.of("group-1", "group-2", "group-3"), + List("group-1", "group-2", "group-3").asJava, RequestLocal.noCaching.bufferSupplier )).thenReturn(future) kafkaApis = createKafkaApis() @@ -3520,7 +2925,7 @@ class KafkaApisTest extends Logging { future.completeExceptionally(Errors.NOT_CONTROLLER.exception) val expectedDeleteGroupsResponse = new DeleteGroupsResponseData() - .setResults(new DeleteGroupsResponseData.DeletableGroupResultCollection(util.List.of( + .setResults(new DeleteGroupsResponseData.DeletableGroupResultCollection(List( new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-1") .setErrorCode(Errors.NOT_CONTROLLER.code), @@ -3530,7 +2935,7 @@ class KafkaApisTest extends Logging { new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-3") .setErrorCode(Errors.NOT_CONTROLLER.code), - ).iterator)) + ).iterator.asJava)) val response = verifyNoThrottling[DeleteGroupsResponse](requestChannelRequest) assertEquals(expectedDeleteGroupsResponse, response.data) @@ -3538,11 +2943,11 @@ class KafkaApisTest extends Logging { @Test def testHandleDeleteGroupsAuthenticationFailed(): Unit = { - val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(util.List.of( + val deleteGroupsRequest = new DeleteGroupsRequestData().setGroupsNames(List( "group-1", "group-2", "group-3" - )) + ).asJava) val requestChannelRequest = buildRequest(new DeleteGroupsRequest.Builder(deleteGroupsRequest).build()) @@ -3559,15 +2964,15 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList + actions.asScala.map { action => + acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) + }.asJava } val future = new CompletableFuture[DeleteGroupsResponseData.DeletableGroupResultCollection]() when(groupCoordinator.deleteGroups( requestChannelRequest.context, - util.List.of("group-2", "group-3"), + List("group-2", "group-3").asJava, RequestLocal.noCaching.bufferSupplier )).thenReturn(future) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) @@ -3576,17 +2981,17 @@ class KafkaApisTest extends Logging { RequestLocal.noCaching ) - future.complete(new DeleteGroupsResponseData.DeletableGroupResultCollection(util.List.of( + future.complete(new DeleteGroupsResponseData.DeletableGroupResultCollection(List( new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-2") .setErrorCode(Errors.NONE.code), new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-3") .setErrorCode(Errors.NONE.code) - ).iterator)) + ).iterator.asJava)) val expectedDeleteGroupsResponse = new DeleteGroupsResponseData() - .setResults(new DeleteGroupsResponseData.DeletableGroupResultCollection(util.List.of( + .setResults(new DeleteGroupsResponseData.DeletableGroupResultCollection(List( new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-2") .setErrorCode(Errors.NONE.code), @@ -3595,7 +3000,7 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.NONE.code), new DeleteGroupsResponseData.DeletableGroupResult() .setGroupId("group-1") - .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)).iterator)) + .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code)).iterator.asJava)) val response = verifyNoThrottling[DeleteGroupsResponse](requestChannelRequest) assertEquals(expectedDeleteGroupsResponse, response.data) @@ -3603,12 +3008,12 @@ class KafkaApisTest extends Logging { @Test def testHandleDescribeGroups(): Unit = { - val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(util.List.of( + val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(List( "group-1", "group-2", "group-3", "group-4" - )) + ).asJava) val requestChannelRequest = buildRequest(new DescribeGroupsRequest.Builder(describeGroupsRequest).build()) @@ -3620,15 +3025,15 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis() kafkaApis.handleDescribeGroupsRequest(requestChannelRequest) - val groupResults = util.List.of( + val groupResults = List( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-1") .setProtocolType("consumer") .setProtocolData("range") .setGroupState("Stable") - .setMembers(util.List.of( + .setMembers(List( new DescribeGroupsResponseData.DescribedGroupMember() - .setMemberId("member-1"))), + .setMemberId("member-1")).asJava), new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-2") .setErrorCode(Errors.NOT_COORDINATOR.code), @@ -3640,7 +3045,7 @@ class KafkaApisTest extends Logging { .setGroupState("Dead") .setErrorCode(Errors.GROUP_ID_NOT_FOUND.code) .setErrorMessage("Group group-4 is not a classic group.") - ) + ).asJava future.complete(groupResults) @@ -3651,11 +3056,11 @@ class KafkaApisTest extends Logging { @Test def testHandleDescribeGroupsFutureFailed(): Unit = { - val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(util.List.of( + val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(List( "group-1", "group-2", "group-3" - )) + ).asJava) val requestChannelRequest = buildRequest(new DescribeGroupsRequest.Builder(describeGroupsRequest).build()) @@ -3667,7 +3072,7 @@ class KafkaApisTest extends Logging { kafkaApis = createKafkaApis() kafkaApis.handleDescribeGroupsRequest(requestChannelRequest) - val expectedDescribeGroupsResponse = new DescribeGroupsResponseData().setGroups(util.List.of( + val expectedDescribeGroupsResponse = new DescribeGroupsResponseData().setGroups(List( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-1") .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code), @@ -3677,7 +3082,7 @@ class KafkaApisTest extends Logging { new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-3") .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) - )) + ).asJava) future.completeExceptionally(Errors.UNKNOWN_SERVER_ERROR.exception) @@ -3687,11 +3092,11 @@ class KafkaApisTest extends Logging { @Test def testHandleDescribeGroupsAuthenticationFailed(): Unit = { - val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(util.List.of( + val describeGroupsRequest = new DescribeGroupsRequestData().setGroups(List( "group-1", "group-2", "group-3" - )) + ).asJava) val requestChannelRequest = buildRequest(new DescribeGroupsRequest.Builder(describeGroupsRequest).build()) @@ -3708,26 +3113,26 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream(). - map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList + actions.asScala.map { action => + acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) + }.asJava } val future = new CompletableFuture[util.List[DescribeGroupsResponseData.DescribedGroup]]() when(groupCoordinator.describeGroups( requestChannelRequest.context, - util.List.of("group-2") + List("group-2").asJava )).thenReturn(future) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleDescribeGroupsRequest(requestChannelRequest) - future.complete(util.List.of( + future.complete(List( new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-2") .setErrorCode(Errors.NOT_COORDINATOR.code) - )) + ).asJava) - val expectedDescribeGroupsResponse = new DescribeGroupsResponseData().setGroups(util.List.of( + val expectedDescribeGroupsResponse = new DescribeGroupsResponseData().setGroups(List( // group-1 and group-3 are first because unauthorized are put first into the response. new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-1") @@ -3738,7 +3143,7 @@ class KafkaApisTest extends Logging { new DescribeGroupsResponseData.DescribedGroup() .setGroupId("group-2") .setErrorCode(Errors.NOT_COORDINATOR.code) - )) + ).asJava) val response = verifyNoThrottling[DescribeGroupsResponse](requestChannelRequest) assertEquals(expectedDescribeGroupsResponse, response.data) @@ -3753,14 +3158,14 @@ class KafkaApisTest extends Logging { val topics = new OffsetDeleteRequestTopicCollection() topics.add(new OffsetDeleteRequestTopic() .setName("topic-1") - .setPartitions(util.List.of( + .setPartitions(Seq( new OffsetDeleteRequestPartition().setPartitionIndex(0), - new OffsetDeleteRequestPartition().setPartitionIndex(1)))) + new OffsetDeleteRequestPartition().setPartitionIndex(1)).asJava)) topics.add(new OffsetDeleteRequestTopic() .setName("topic-2") - .setPartitions(util.List.of( + .setPartitions(Seq( new OffsetDeleteRequestPartition().setPartitionIndex(0), - new OffsetDeleteRequestPartition().setPartitionIndex(1)))) + new OffsetDeleteRequestPartition().setPartitionIndex(1)).asJava)) val offsetDeleteRequest = new OffsetDeleteRequest.Builder( new OffsetDeleteRequestData() @@ -3780,28 +3185,28 @@ class KafkaApisTest extends Logging { kafkaApis.handleOffsetDeleteRequest(request, requestLocal) val offsetDeleteResponseData = new OffsetDeleteResponseData() - .setTopics(new OffsetDeleteResponseData.OffsetDeleteResponseTopicCollection(util.List.of( + .setTopics(new OffsetDeleteResponseData.OffsetDeleteResponseTopicCollection(List( new OffsetDeleteResponseData.OffsetDeleteResponseTopic() .setName("topic-1") - .setPartitions(new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection(util.List.of( + .setPartitions(new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection(List( new OffsetDeleteResponseData.OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponseData.OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).iterator)), + ).asJava.iterator)), new OffsetDeleteResponseData.OffsetDeleteResponseTopic() .setName("topic-2") - .setPartitions(new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection(util.List.of( + .setPartitions(new OffsetDeleteResponseData.OffsetDeleteResponsePartitionCollection(List( new OffsetDeleteResponseData.OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponseData.OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).iterator)) - ).iterator())) + ).asJava.iterator)) + ).asJava.iterator())) future.complete(offsetDeleteResponseData) @@ -3817,30 +3222,30 @@ class KafkaApisTest extends Logging { val offsetDeleteRequest = new OffsetDeleteRequestData() .setGroupId(group) - .setTopics(new OffsetDeleteRequestTopicCollection(util.List.of( + .setTopics(new OffsetDeleteRequestTopicCollection(List( // foo exists but has only 2 partitions. new OffsetDeleteRequestTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1), new OffsetDeleteRequestPartition().setPartitionIndex(2) - )), + ).asJava), // bar exists. new OffsetDeleteRequestTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1) - )), + ).asJava), // zar does not exist. new OffsetDeleteRequestTopic() .setName("zar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1) - )), - ).iterator)) + ).asJava), + ).asJava.iterator)) val requestChannelRequest = buildRequest(new OffsetDeleteRequest.Builder(offsetDeleteRequest).build()) @@ -3848,20 +3253,20 @@ class KafkaApisTest extends Logging { // only existing topic-partitions. val expectedOffsetDeleteRequest = new OffsetDeleteRequestData() .setGroupId(group) - .setTopics(new OffsetDeleteRequestTopicCollection(util.List.of( + .setTopics(new OffsetDeleteRequestTopicCollection(List( new OffsetDeleteRequestTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1) - )), + ).asJava), new OffsetDeleteRequestTopic() .setName("bar") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetDeleteRequestPartition().setPartitionIndex(0), new OffsetDeleteRequestPartition().setPartitionIndex(1) - )) - ).iterator)) + ).asJava) + ).asJava.iterator)) val future = new CompletableFuture[OffsetDeleteResponseData]() when(groupCoordinator.deleteOffsets( @@ -3877,34 +3282,34 @@ class KafkaApisTest extends Logging { // This is the response returned by the group coordinator. val offsetDeleteResponse = new OffsetDeleteResponseData() - .setTopics(new OffsetDeleteResponseTopicCollection(util.List.of( + .setTopics(new OffsetDeleteResponseTopicCollection(List( new OffsetDeleteResponseTopic() .setName("foo") - .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( + .setPartitions(new OffsetDeleteResponsePartitionCollection(List( new OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).iterator)), + ).asJava.iterator)), new OffsetDeleteResponseTopic() .setName("bar") - .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( + .setPartitions(new OffsetDeleteResponsePartitionCollection(List( new OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).iterator)), - ).iterator)) + ).asJava.iterator)), + ).asJava.iterator)) val expectedOffsetDeleteResponse = new OffsetDeleteResponseData() - .setTopics(new OffsetDeleteResponseTopicCollection(util.List.of( + .setTopics(new OffsetDeleteResponseTopicCollection(List( new OffsetDeleteResponseTopic() .setName("foo") - .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( + .setPartitions(new OffsetDeleteResponsePartitionCollection(List( // foo-2 is first because partitions failing the validation // are put in the response first. new OffsetDeleteResponsePartition() @@ -3916,30 +3321,30 @@ class KafkaApisTest extends Logging { new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).iterator)), + ).asJava.iterator)), // zar is before bar because topics failing the validation are // put in the response first. new OffsetDeleteResponseTopic() .setName("zar") - .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( + .setPartitions(new OffsetDeleteResponsePartitionCollection(List( new OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) - ).iterator)), + ).asJava.iterator)), new OffsetDeleteResponseTopic() .setName("bar") - .setPartitions(new OffsetDeleteResponsePartitionCollection(util.List.of( + .setPartitions(new OffsetDeleteResponsePartitionCollection(List( new OffsetDeleteResponsePartition() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), new OffsetDeleteResponsePartition() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ).iterator)), - ).iterator)) + ).asJava.iterator)), + ).asJava.iterator)) future.complete(offsetDeleteResponse) val response = verifyNoThrottling[OffsetDeleteResponse](requestChannelRequest) @@ -3958,7 +3363,7 @@ class KafkaApisTest extends Logging { val topics = new OffsetDeleteRequestTopicCollection() topics.add(new OffsetDeleteRequestTopic() .setName(topic) - .setPartitions(util.List.of( + .setPartitions(Collections.singletonList( new OffsetDeleteRequestPartition().setPartitionIndex(invalidPartitionId)))) val offsetDeleteRequest = new OffsetDeleteRequest.Builder( new OffsetDeleteRequestData() @@ -4029,9 +3434,9 @@ class KafkaApisTest extends Logging { val offsetDeleteRequest = new OffsetDeleteRequest.Builder( new OffsetDeleteRequestData() .setGroupId(group) - .setTopics(new OffsetDeleteRequestTopicCollection(util.List.of(new OffsetDeleteRequestTopic() + .setTopics(new OffsetDeleteRequestTopicCollection(Collections.singletonList(new OffsetDeleteRequestTopic() .setName("topic-unknown") - .setPartitions(util.List.of(new OffsetDeleteRequestPartition() + .setPartitions(Collections.singletonList(new OffsetDeleteRequestPartition() .setPartitionIndex(0) )) ).iterator())) @@ -4070,24 +3475,24 @@ class KafkaApisTest extends Logging { ArgumentMatchers.anyInt(), // correlationId ArgumentMatchers.anyShort(), // version ArgumentMatchers.any[(Errors, ListOffsetsPartition) => ListOffsetsPartitionResponse](), - ArgumentMatchers.any[Consumer[util.Collection[ListOffsetsTopicResponse]]], + ArgumentMatchers.any[List[ListOffsetsTopicResponse] => Unit](), ArgumentMatchers.anyInt() // timeoutMs )).thenAnswer(ans => { - val callback = ans.getArgument[Consumer[util.List[ListOffsetsTopicResponse]]](8) + val callback = ans.getArgument[List[ListOffsetsTopicResponse] => Unit](8) val partitionResponse = new ListOffsetsPartitionResponse() .setErrorCode(error.code()) .setOffset(ListOffsetsResponse.UNKNOWN_OFFSET) .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP) .setPartitionIndex(tp.partition()) - callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(util.List.of(partitionResponse)))) + callback(List(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(List(partitionResponse).asJava))) }) - val targetTimes = util.List.of(new ListOffsetsTopic() + val targetTimes = List(new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(util.List.of(new ListOffsetsPartition() + .setPartitions(List(new ListOffsetsPartition() .setPartitionIndex(tp.partition) .setTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP) - .setCurrentLeaderEpoch(currentLeaderEpoch.get)))) + .setCurrentLeaderEpoch(currentLeaderEpoch.get)).asJava)).asJava val listOffsetRequest = ListOffsetsRequest.Builder.forConsumer(true, isolationLevel) .setTargetTimes(targetTimes).build() val request = buildRequest(listOffsetRequest) @@ -4159,47 +3564,6 @@ class KafkaApisTest extends Logging { assertEquals(Set(0), response.brokers.asScala.map(_.id).toSet) } - - /** - * Metadata request to fetch all topics should not result in the followings: - * 1) Auto topic creation - * 2) UNKNOWN_TOPIC_OR_PARTITION - * - * This case is testing the case that a topic is being deleted from MetadataCache right after - * authorization but before checking in MetadataCache. - */ - @Test - def testGetAllTopicMetadataShouldNotCreateTopicOrReturnUnknownTopicPartition(): Unit = { - // Setup: authorizer authorizes 2 topics, but one got deleted in metadata cache - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.getAliveBrokerNodes(any())).thenReturn(util.List.of(new Node(brokerId,"localhost", 0))) - when(metadataCache.getRandomAliveBrokerId).thenReturn(util.Optional.empty()) - - // 2 topics returned for authorization in during handle - val topicsReturnedFromMetadataCacheForAuthorization = util.Set.of("remaining-topic", "later-deleted-topic") - when(metadataCache.getAllTopics).thenReturn(topicsReturnedFromMetadataCacheForAuthorization) - // 1 topic is deleted from metadata right at the time between authorization and the next getTopicMetadata() call - when(metadataCache.getTopicMetadata( - ArgumentMatchers.eq(topicsReturnedFromMetadataCacheForAuthorization), - any[ListenerName], - anyBoolean, - anyBoolean - )).thenReturn(util.List.of( - new MetadataResponseTopic() - .setErrorCode(Errors.NONE.code) - .setName("remaining-topic") - .setIsInternal(false) - )) - - val response = sendMetadataRequestWithInconsistentListeners(new ListenerName("PLAINTEXT")) - val responseTopics = response.topicMetadata().asScala.map { metadata => metadata.topic() } - - // verify we don't create topic when getAllTopicMetadata - verify(autoTopicCreationManager, never).createTopics(any(), any(), any()) - assertEquals(List("remaining-topic"), responseTopics) - assertTrue(response.topicsByError(Errors.UNKNOWN_TOPIC_OR_PARTITION).isEmpty) - } - @Test def testUnauthorizedTopicMetadataRequest(): Unit = { // 1. Set up broker information @@ -4221,19 +3585,20 @@ class KafkaApisTest extends Logging { val unauthorizedTopic = "unauthorized-topic" val authorizedTopic = "authorized-topic" - val expectedActions = util.List.of( + val expectedActions = Seq( new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, unauthorizedTopic, PatternType.LITERAL), 1, true, true), new Action(AclOperation.DESCRIBE, new ResourcePattern(ResourceType.TOPIC, authorizedTopic, PatternType.LITERAL), 1, true, true) ) - when(authorizer.authorize(any[RequestContext], argThat((t: java.util.List[Action]) => t.containsAll(expectedActions)))) + when(authorizer.authorize(any[RequestContext], argThat((t: java.util.List[Action]) => t.containsAll(expectedActions.asJava)))) .thenAnswer { invocation => - val actions = invocation.getArgument(1, classOf[util.List[Action]]) - val results = new util.ArrayList[AuthorizationResult]() - actions.forEach { a => - results.add(if (a.resourcePattern.name == authorizedTopic) AuthorizationResult.ALLOWED else AuthorizationResult.DENIED) - } - results + val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]].asScala + actions.map { action => + if (action.resourcePattern().name().equals(authorizedTopic)) + AuthorizationResult.ALLOWED + else + AuthorizationResult.DENIED + }.asJava } // 3. Set up MetadataCache @@ -4248,15 +3613,15 @@ class KafkaApisTest extends Logging { .setPartitionId(0) .setLeader(0) .setLeaderEpoch(0) - .setReplicas(util.List.of(0)) - .setIsr(util.List.of(0)) + .setReplicas(Collections.singletonList(0)) + .setIsr(Collections.singletonList(0)) } val partitionRecords = Seq(authorizedTopicId, unauthorizedTopicId).map(createDummyPartitionRecord) MetadataCacheTest.updateCache(metadataCache, partitionRecords) // 4. Send TopicMetadataReq using topicId - val metadataReqByTopicId = MetadataRequest.Builder.forTopicIds(util.Set.of(authorizedTopicId, unauthorizedTopicId)).build() + val metadataReqByTopicId = new MetadataRequest.Builder(util.Arrays.asList(authorizedTopicId, unauthorizedTopicId)).build() val repByTopicId = buildRequest(metadataReqByTopicId, plaintextListener) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) @@ -4281,7 +3646,7 @@ class KafkaApisTest extends Logging { // 4. Send TopicMetadataReq using topic name reset(clientRequestQuotaManager, requestChannel) - val metadataReqByTopicName = new MetadataRequest.Builder(util.List.of(authorizedTopic, unauthorizedTopic), false).build() + val metadataReqByTopicName = new MetadataRequest.Builder(util.Arrays.asList(authorizedTopic, unauthorizedTopic), false).build() val repByTopicName = buildRequest(metadataReqByTopicName, plaintextListener) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleTopicMetadataRequest(repByTopicName) @@ -4292,7 +3657,7 @@ class KafkaApisTest extends Logging { metadataByTopicName.foreach { case (topicName, metadataResponseTopic) => if (topicName == unauthorizedTopic) { assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), metadataResponseTopic.errorCode()) - // Do not return topicId on unauthorized error + // Do not return topic Id on unauthorized error assertEquals(Uuid.ZERO_UUID, metadataResponseTopic.topicId()) } else { assertEquals(Errors.NONE.code(), metadataResponseTopic.errorCode()) @@ -4328,10 +3693,10 @@ class KafkaApisTest extends Logging { Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false))) }) - val fetchData = util.Map.of(tidp, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 1000, - Optional.empty())) - val fetchDataBuilder = util.Map.of(tp, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 1000, - Optional.empty())) + val fetchData = Map(tidp -> new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 1000, + Optional.empty())).asJava + val fetchDataBuilder = Map(tp -> new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, 1000, + Optional.empty())).asJava val fetchMetadata = new JFetchMetadata(0, 0) val fetchContext = new FullFetchContext(time, new FetchSessionCacheShard(1000, 100), fetchMetadata, fetchData, false, false) @@ -4344,7 +3709,7 @@ class KafkaApisTest extends Logging { any[util.Map[Uuid, String]])).thenReturn(fetchContext) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val fetchRequest = new FetchRequest.Builder(9, 9, -1, -1, 100, 0, fetchDataBuilder) .build() @@ -4380,10 +3745,10 @@ class KafkaApisTest extends Logging { when(replicaManager.getLogConfig(ArgumentMatchers.eq(unresolvedFoo.topicPartition))).thenReturn(None) // Simulate unknown topic ID in the context - val fetchData = util.Map.of(new TopicIdPartition(foo.topicId, new TopicPartition(null, foo.partition)), - new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, Optional.empty())) - val fetchDataBuilder = util.Map.of(foo.topicPartition, new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, - Optional.empty())) + val fetchData = Map(new TopicIdPartition(foo.topicId, new TopicPartition(null, foo.partition)) -> + new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, Optional.empty())).asJava + val fetchDataBuilder = Map(foo.topicPartition -> new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, + Optional.empty())).asJava val fetchMetadata = new JFetchMetadata(0, 0) val fetchContext = new FullFetchContext(time, new FetchSessionCacheShard(1000, 100), fetchMetadata, fetchData, true, replicaId >= 0) @@ -4392,13 +3757,13 @@ class KafkaApisTest extends Logging { ApiKeys.FETCH.latestVersion, fetchMetadata, replicaId >= 0, - util.Map.of(foo, new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, Optional.empty())), - util.List.of[TopicIdPartition], + Collections.singletonMap(foo, new FetchRequest.PartitionData(foo.topicId, 0, 0, 1000, Optional.empty())), + Collections.emptyList[TopicIdPartition], metadataCache.topicIdsToNames()) ).thenReturn(fetchContext) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) // If replicaId is -1 we will build a consumer request. Any non-negative replicaId will build a follower request. val replicaEpoch = if (replicaId < 0) -1 else 1 @@ -4428,7 +3793,7 @@ class KafkaApisTest extends Logging { addTopicToMetadataCache(tp.topic, numPartitions = 1, numBrokers = 3, topicId) when(replicaManager.getLogConfig(ArgumentMatchers.eq(tp))).thenReturn(Some(LogConfig.fromProps( - util.Map.of(), + Collections.emptyMap(), new Properties() ))) @@ -4447,14 +3812,14 @@ class KafkaApisTest extends Logging { any[Seq[(TopicIdPartition, FetchPartitionData)] => Unit]() )).thenAnswer(invocation => { val callback = invocation.getArgument(3).asInstanceOf[Seq[(TopicIdPartition, FetchPartitionData)] => Unit] - callback(Seq(tidp -> new FetchPartitionData(Errors.NOT_LEADER_OR_FOLLOWER, UnifiedLog.UNKNOWN_OFFSET, UnifiedLog.UNKNOWN_OFFSET, MemoryRecords.EMPTY, + callback(Seq(tidp -> new FetchPartitionData(Errors.NOT_LEADER_OR_FOLLOWER, UnifiedLog.UnknownOffset, UnifiedLog.UnknownOffset, MemoryRecords.EMPTY, Optional.empty(), OptionalLong.empty(), Optional.empty(), OptionalInt.empty(), false))) }) - val fetchData = util.Map.of(tidp, new FetchRequest.PartitionData(topicId, 0, 0, 1000, - Optional.empty())) - val fetchDataBuilder = util.Map.of(tp, new FetchRequest.PartitionData(topicId, 0, 0, 1000, - Optional.empty())) + val fetchData = Map(tidp -> new FetchRequest.PartitionData(topicId, 0, 0, 1000, + Optional.empty())).asJava + val fetchDataBuilder = Map(tp -> new FetchRequest.PartitionData(topicId, 0, 0, 1000, + Optional.empty())).asJava val fetchMetadata = new JFetchMetadata(0, 0) val fetchContext = new FullFetchContext(time, new FetchSessionCacheShard(1000, 100), fetchMetadata, fetchData, true, false) @@ -4467,7 +3832,7 @@ class KafkaApisTest extends Logging { any[util.Map[Uuid, String]])).thenReturn(fetchContext) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val fetchRequest = new FetchRequest.Builder(16, 16, -1, -1, 100, 0, fetchDataBuilder) .build() @@ -4492,7 +3857,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4500,43 +3865,50 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName))) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, shareSessionEpoch), Map( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + ).asJava) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex)).iterator))).iterator)) + .setPartitionIndex(partitionIndex) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4544,20 +3916,19 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(records, topicResponse.partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(records, topicResponses.get(0).partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) } @Test def testHandleShareFetchRequestInvalidRequestOnInitialEpoch(): Unit = { val topicName = "foo" val topicId = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4566,56 +3937,61 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, partitionIndex, topicName), false)) + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false)) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenThrow( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenThrow( Errors.INVALID_REQUEST.exception() ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2 + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2 ))) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(partitionMaxBytes) + setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4627,13 +4003,14 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).iterator)) - ).iterator)) + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava) + ).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4644,13 +4021,12 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(records, topicResponse.partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(records, topicResponses.get(0).partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) } @Test @@ -4658,7 +4034,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4666,53 +4042,59 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName) - )) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + ).asJava) ).thenThrow(Errors.INVALID_REQUEST.exception) when(sharePartitionManager.releaseSession(any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) - )) + ).asJava) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -4720,31 +4102,31 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(records, topicResponse.partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(records, topicResponses.get(0).partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(-1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(partitionMaxBytes) // partitionMaxBytes are set even on the final fetch request, this is an invalid request + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -4760,36 +4142,42 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( FutureUtils.failedFuture[util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData]](Errors.UNKNOWN_SERVER_ERROR.exception()) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName) - )) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + ).asJava) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4802,7 +4190,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -4810,19 +4198,19 @@ class KafkaApisTest extends Logging { val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( @@ -4831,37 +4219,42 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, partitionIndex, topicName), false)) + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false)) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())) .thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(partitionMaxBytes) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4874,13 +4267,13 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID val groupId = "group" - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( FutureUtils.failedFuture[util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData]](Errors.UNKNOWN_SERVER_ERROR.exception()) ) @@ -4890,37 +4283,42 @@ class KafkaApisTest extends Logging { val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, partitionIndex, topicName), false)) + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false)) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())) .thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(partitionMaxBytes) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4933,44 +4331,50 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID val records = MemoryRecords.EMPTY - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.REPLICA_NOT_AVAILABLE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of)) - )) + .setAcquiredRecords(new util.ArrayList(List().asJava)) + ).asJava) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName) - )) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + ).asJava) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -4978,13 +4382,12 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.REPLICA_NOT_AVAILABLE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(records, topicResponse.partitions.get(0).records) - assertTrue(topicResponse.partitions.get(0).acquiredRecords.toArray().isEmpty) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.REPLICA_NOT_AVAILABLE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(records, topicResponses.get(0).partitions.get(0).records) + assertTrue(topicResponses.get(0).partitions.get(0).acquiredRecords.toArray().isEmpty) } @Test @@ -4992,51 +4395,57 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID val groupId = "group" val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, partitionIndex, topicName), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName) - )) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + ).asJava) ).thenThrow(Errors.SHARE_SESSION_NOT_FOUND.exception) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -5044,13 +4453,12 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(records, topicResponse.partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(records, topicResponses.get(0).partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) val memberId2 = Uuid.randomUuid() @@ -5059,11 +4467,12 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId2.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5079,51 +4488,57 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID val groupId = "group" val records = memoryRecords(10, 0) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName) - )) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + ).asJava) ).thenThrow(Errors.INVALID_SHARE_SESSION_EPOCH.exception) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -5131,23 +4546,23 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(records, topicResponse.partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(records, topicResponses.get(0).partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). // Invalid share session epoch, should have 1 for the second request. - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5158,44 +4573,12 @@ class KafkaApisTest extends Logging { assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, responseData.errorCode) } - @Test - def testHandleShareFetchRequestWhenShareSessionCacheIsFull(): Unit = { - val topicId = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache("foo", 1, topicId = topicId) - - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())) - .thenThrow(Errors.SHARE_SESSION_LIMIT_REACHED.exception) - - when(sharePartitionManager.createIdleShareFetchTimerTask(anyLong())) - .thenReturn(CompletableFuture.completedFuture(null)) - - val shareFetchRequestData = new ShareFetchRequestData(). - setGroupId("group"). - setMemberId(Uuid.randomUuid.toString). - setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). - setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( - new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) - - val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() - kafkaApis.handleShareFetchRequest(request) - val response = verifyNoThrottling[ShareFetchResponse](request) - val responseData = response.data() - - assertEquals(Errors.SHARE_SESSION_LIMIT_REACHED.code, responseData.errorCode) - } - @Test def testHandleShareFetchRequestShareSessionSuccessfullyEstablished(): Unit = { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() @@ -5205,98 +4588,104 @@ class KafkaApisTest extends Logging { val records2 = memoryRecords(10, 10) val records3 = memoryRecords(10, 20) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records2) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(10) .setLastOffset(19) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records3) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(20) .setLastOffset(29) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) - )) + ).asJava) ).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NONE.code) - )) + ).asJava) ) val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, partitionIndex, topicName), false) + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName) - )) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 2), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 3)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 10L, 3)) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(partitionIndex)).iterator))).iterator)) + .setPartitionIndex(partitionIndex) + .setPartitionMaxBytes(partitionMaxBytes)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -5304,9 +4693,8 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - var topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) compareResponsePartitions( partitionIndex, @@ -5314,23 +4702,24 @@ class KafkaApisTest extends Logging { Errors.NONE.code, records1, expectedAcquiredRecords(0, 9, 1), - topicResponse.partitions.get(0) + topicResponses.get(0).partitions.get(0) ) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition(). setPartitionIndex(partitionIndex). - setAcknowledgementBatches(util.List.of( + setPartitionMaxBytes(partitionMaxBytes). + setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch(). setFirstOffset(0). setLastOffset(9). - setAcknowledgeTypes(util.List.of[java.lang.Byte](1.toByte))))).iterator))).iterator)) + setAcknowledgeTypes(List[java.lang.Byte](1.toByte).asJava)).asJava)).asJava)).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5342,9 +4731,8 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) compareResponsePartitions( partitionIndex, @@ -5352,23 +4740,24 @@ class KafkaApisTest extends Logging { Errors.NONE.code, records2, expectedAcquiredRecords(10, 19, 1), - topicResponse.partitions.get(0) + topicResponses.get(0).partitions.get(0) ) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition(). setPartitionIndex(partitionIndex). - setAcknowledgementBatches(util.List.of( + setPartitionMaxBytes(partitionMaxBytes). + setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch(). setFirstOffset(10). setLastOffset(19). - setAcknowledgeTypes(util.List.of[java.lang.Byte](1.toByte))))).iterator))).iterator)) + setAcknowledgeTypes(List[java.lang.Byte](1.toByte).asJava)).asJava)).asJava)).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5380,9 +4769,8 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) compareResponsePartitions( partitionIndex, @@ -5390,7 +4778,7 @@ class KafkaApisTest extends Logging { Errors.NONE.code, records3, expectedAcquiredRecords(20, 29, 1), - topicResponse.partitions.get(0) + topicResponses.get(0).partitions.get(0) ) } @@ -5408,7 +4796,7 @@ class KafkaApisTest extends Logging { val topicName4 = "foo4" val topicId4 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName1, 2, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) addTopicToMetadataCache(topicName3, 1, topicId = topicId3) @@ -5428,197 +4816,209 @@ class KafkaApisTest extends Logging { val groupId = "group" - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t1_p1_1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))), - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), + ).asJava)), + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t1_p2_1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(10) .setLastOffset(19) .setDeliveryCount(1) - ))), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), + ).asJava)), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t2_p1_1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(43) .setLastOffset(52) .setDeliveryCount(1) - ))), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)), + ).asJava)), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t2_p2_1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(17) .setLastOffset(26) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t3_p1_1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(54) .setLastOffset(73) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t3_p1_2) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(74) .setLastOffset(93) .setDeliveryCount(1) - ))), - new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), + ).asJava)), + new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setRecords(records_t4_p1_1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(10) .setLastOffset(24) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) val cachedSharePartitions1 = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId1, 0, topicName1), false + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId1, 1, topicName1), false + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId2, 0, topicName2), false + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId2, 1, topicName2), false + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)), new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes), false )) cachedSharePartitions1.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId3, 0, topicName3), false + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), new ShareFetchRequest.SharePartitionData(topicId3, partitionMaxBytes), false )) val cachedSharePartitions2 = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions2.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId3, 0, topicName3), false + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), new ShareFetchRequest.SharePartitionData(topicId3, partitionMaxBytes), false )) cachedSharePartitions2.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId4, 0, topicName4), false + new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), new ShareFetchRequest.SharePartitionData(topicId4, partitionMaxBytes), false )) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) - )) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)) -> + new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes), + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)) -> + new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) -> + new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) -> + new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) + ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions1, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions1, 0L, 0L, 2)) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 2), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions2, 3)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions2, 0L, 0L, 3)) ).thenReturn(new FinalContext()) when(sharePartitionManager.releaseSession(any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), + new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - )) + ).asJava) ) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)), + new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)), + new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)), + new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)), + new TopicIdPartition(topicId4, new TopicPartition(topicName4, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - )) + ).asJava) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0), + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).iterator)), + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0), + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).iterator)) - ).iterator)) + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava) + ).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -5690,14 +5090,15 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).iterator)), - ).iterator)) + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), + ).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5708,9 +5109,8 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId3, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) + assertEquals(topicId3, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) compareResponsePartitions( 0, @@ -5718,29 +5118,30 @@ class KafkaApisTest extends Logging { Errors.NONE.code, records_t3_p1_1, expectedAcquiredRecords(54, 73, 1), - topicResponse.partitions.get(0) + topicResponses.get(0).partitions.get(0) ) shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(2). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId4). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).iterator)), - ).iterator)) - .setForgottenTopicsData(util.List.of( + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), + ).asJava) + .setForgottenTopicsData(List( new ForgottenTopic() .setTopicId(topicId1) - .setPartitions(util.List.of(Integer.valueOf(0), Integer.valueOf(1))), + .setPartitions(List(Integer.valueOf(0), Integer.valueOf(1)).asJava), new ForgottenTopic() .setTopicId(topicId2) - .setPartitions(util.List.of(Integer.valueOf(0), Integer.valueOf(1))) - )) + .setPartitions(List(Integer.valueOf(0), Integer.valueOf(1)).asJava) + ).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5784,72 +5185,78 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(-1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(0) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)), - )), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)), + ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(0) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(19) - .setAcknowledgeTypes(util.List.of(1.toByte)), - )) - ).iterator)), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)), + ).asJava) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(0) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(43) .setLastOffset(52) - .setAcknowledgeTypes(util.List.of(1.toByte)), - )), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)), + ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(0) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(17) .setLastOffset(26) - .setAcknowledgeTypes(util.List.of(1.toByte)), - )) - ).iterator)), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)), + ).asJava) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(0) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(54) .setLastOffset(93) - .setAcknowledgeTypes(util.List.of(1.toByte)), - )), - ).iterator)), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)), + ).asJava), + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId4). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(0) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(24) - .setAcknowledgeTypes(util.List.of(1.toByte)), - )), - ).iterator)), - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)), + ).asJava), + ).asJava), + ).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -5869,7 +5276,7 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -5884,47 +5291,59 @@ class KafkaApisTest extends Logging { val tp2 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - tp1, + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + tp1 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) .setRecords(records_t1_p1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))), - tp2, + ).asJava)), + tp2 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) .setRecords(records_t2_p1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(14) .setDeliveryCount(1) - ))), - tp3, + ).asJava)), + tp3 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) .setRecords(records_t2_p2) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(19) .setDeliveryCount(1) - ))), - )) + ).asJava)), + ).asJava) ) val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.List[TopicIdPartition] = util.List.of(tp1, tp2, tp3) + val validPartitions: util.Map[TopicIdPartition, ShareFetchRequest.SharePartitionData] = new util.HashMap() + validPartitions.put( + tp1, + new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes) + ) + validPartitions.put( + tp2, + new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) + ) + validPartitions.put( + tp3, + new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) + ) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -5937,31 +5356,37 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).iterator)), + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0), + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).iterator)), - ).iterator)) + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, - 0, erroneousAndValidPartitionData, sharePartitionManager, authorizedTopics @@ -6017,7 +5442,7 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -6030,34 +5455,41 @@ class KafkaApisTest extends Logging { val tp2 = new TopicIdPartition(topicId1, new TopicPartition(topicName1, 1)) val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - tp1, + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + tp1 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) .setRecords(records_t1_p1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) - val erroneousPartitions = util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( + val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() + erroneousPartitions.put( tp2, new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) - .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code), + .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) + ) + erroneousPartitions.put( tp3, new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_TOPIC_OR_PARTITION.code) ) - val validPartitions: util.List[TopicIdPartition] = util.List.of(tp1) + val validPartitions: util.Map[TopicIdPartition, ShareFetchRequest.SharePartitionData] = new util.HashMap() + validPartitions.put( + tp1, + new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes) + ) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -6070,31 +5502,37 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0), + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).iterator)), + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0), - ).iterator)), - ).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes), + ).asJava), + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, - 0, erroneousAndValidPartitionData, sharePartitionManager, authorizedTopics @@ -6144,7 +5582,7 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -6157,32 +5595,44 @@ class KafkaApisTest extends Logging { val tp2 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 0)) val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - tp1, + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + tp1 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) .setRecords(emptyRecords) - .setAcquiredRecords(new util.ArrayList(util.List.of)), - tp2, + .setAcquiredRecords(new util.ArrayList(List().asJava)), + tp2 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) .setRecords(emptyRecords) - .setAcquiredRecords(new util.ArrayList(util.List.of)), - tp3, + .setAcquiredRecords(new util.ArrayList(List().asJava)), + tp3 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) .setRecords(emptyRecords) - .setAcquiredRecords(new util.ArrayList(util.List.of)) - )) + .setAcquiredRecords(new util.ArrayList(List().asJava)) + ).asJava) ) val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.List[TopicIdPartition] = util.List.of(tp1, tp2, tp3) + val validPartitions: util.Map[TopicIdPartition, ShareFetchRequest.SharePartitionData] = new util.HashMap() + validPartitions.put( + tp1, + new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes) + ) + validPartitions.put( + tp2, + new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) + ) + validPartitions.put( + tp3, + new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) + ) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -6195,31 +5645,37 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).iterator)), + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0), + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).iterator)), - ).iterator)) + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) // First share fetch request is to establish the share session with the broker. - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, - 0, erroneousAndValidPartitionData, sharePartitionManager, authorizedTopics @@ -6236,7 +5692,7 @@ class KafkaApisTest extends Logging { Errors.UNKNOWN_SERVER_ERROR.code, Errors.NONE.code, emptyRecords, - util.List.of[AcquiredRecords](), + Collections.emptyList[AcquiredRecords](), partitionData1 ) @@ -6249,7 +5705,7 @@ class KafkaApisTest extends Logging { Errors.UNKNOWN_SERVER_ERROR.code, Errors.NONE.code, emptyRecords, - util.List.of[AcquiredRecords](), + Collections.emptyList[AcquiredRecords](), partitionData2 ) @@ -6262,7 +5718,7 @@ class KafkaApisTest extends Logging { Errors.UNKNOWN_SERVER_ERROR.code, Errors.NONE.code, emptyRecords, - util.List.of[AcquiredRecords](), + Collections.emptyList[AcquiredRecords](), partitionData3 ) } @@ -6277,7 +5733,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val topicId3 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) // topicName3 is not in the metadataCache. @@ -6293,36 +5749,52 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) val tp4 = new TopicIdPartition(topicId3, new TopicPartition(topicName3, 0)) - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - tp2, + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + tp2 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) .setRecords(records1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))), - tp3, + ).asJava)), + tp3 -> new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) .setRecords(records2) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(19) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) val erroneousPartitions: util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = new util.HashMap() - val validPartitions: util.List[TopicIdPartition] = util.List.of(tp1, tp2, tp3, tp4) + val validPartitions: util.Map[TopicIdPartition, ShareFetchRequest.SharePartitionData] = new util.HashMap() + validPartitions.put( + tp1, + new ShareFetchRequest.SharePartitionData(topicId1, partitionMaxBytes) + ) + validPartitions.put( + tp2, + new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) + ) + validPartitions.put( + tp3, + new ShareFetchRequest.SharePartitionData(topicId2, partitionMaxBytes) + ) + validPartitions.put( + tp4, + new ShareFetchRequest.SharePartitionData(topicId3, partitionMaxBytes) + ) val erroneousAndValidPartitionData: ErroneousAndValidPartitionData = new ErroneousAndValidPartitionData(erroneousPartitions, validPartitions) @@ -6336,36 +5808,43 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(shareSessionEpoch). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).iterator)), + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0), + .setPartitionIndex(0) + .setPartitionMaxBytes(partitionMaxBytes), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).iterator)), + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId3). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - ).iterator)), - ).iterator)) + .setPartitionMaxBytes(partitionMaxBytes) + ).asJava), + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val fetchResult: Map[TopicIdPartition, ShareFetchResponseData.PartitionData] = kafkaApis.handleFetchFromShareFetchRequest( request, - 0, erroneousAndValidPartitionData, sharePartitionManager, authorizedTopics @@ -6381,8 +5860,8 @@ class KafkaApisTest extends Logging { 0, Errors.TOPIC_AUTHORIZATION_FAILED.code, Errors.NONE.code, - MemoryRecords.EMPTY, - util.List.of[AcquiredRecords](), + null, + Collections.emptyList[AcquiredRecords](), partitionData1 ) @@ -6420,8 +5899,8 @@ class KafkaApisTest extends Logging { 0, Errors.UNKNOWN_TOPIC_OR_PARTITION.code, Errors.NONE.code, - MemoryRecords.EMPTY, - util.List.of[AcquiredRecords](), + null, + Collections.emptyList[AcquiredRecords](), partitionData4 ) } @@ -6453,7 +5932,7 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() @@ -6462,74 +5941,80 @@ class KafkaApisTest extends Logging { val groupId = "group" - when(sharePartitionManager.fetchMessages(any(), any(), any(), anyInt(), anyInt(), anyInt(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + when(sharePartitionManager.fetchMessages(any(), any(), any(), any())).thenReturn( + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records1) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(0) .setLastOffset(9) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareFetchResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareFetchResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareFetchResponseData.PartitionData() .setErrorCode(Errors.NONE.code) .setAcknowledgeErrorCode(Errors.NONE.code) .setRecords(records2) - .setAcquiredRecords(new util.ArrayList(util.List.of( + .setAcquiredRecords(new util.ArrayList(List( new ShareFetchResponseData.AcquiredRecords() .setFirstOffset(10) .setLastOffset(19) .setDeliveryCount(1) - ))) - )) + ).asJava)) + ).asJava) ) val cachedSharePartitions = new ImplicitLinkedHashCollection[CachedSharePartition] cachedSharePartitions.mustAdd(new CachedSharePartition( - new TopicIdPartition(topicId, 0, topicName), false + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes), false )) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( - new ShareSessionContext(new ShareRequestMetadata(memberId, 0), util.List.of( - new TopicIdPartition(topicId, partitionIndex, topicName) - )) + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( + new ShareSessionContext(new ShareRequestMetadata(memberId, 0), Map( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> + new ShareFetchRequest.SharePartitionData(topicId, partitionMaxBytes) + ).asJava) ).thenReturn(new ShareSessionContext(new ShareRequestMetadata(memberId, 1), new ShareSession( - new ShareSessionKey(groupId, memberId), cachedSharePartitions, 2)) + new ShareSessionKey(groupId, memberId), cachedSharePartitions, 0L, 0L, 2)) ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - )) + ).asJava) ) var shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(0)).iterator))).iterator)) + .setPartitionIndex(0) + .setPartitionMaxBytes(40000)).asJava)).asJava) var shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) var request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) var response = verifyNoThrottling[ShareFetchResponse](request) var responseData = response.data() @@ -6537,31 +6022,31 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - var topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(records1, topicResponse.partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(records1, topicResponses.get(0).partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(0, 9, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) request = buildRequest(shareFetchRequest) @@ -6572,59 +6057,94 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).acknowledgeErrorCode) - assertEquals(records2, topicResponse.partitions.get(0).records) - assertArrayEquals(expectedAcquiredRecords(10, 19, 1).toArray(), topicResponse.partitions.get(0).acquiredRecords.toArray()) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).acknowledgeErrorCode) + assertEquals(records2, topicResponses.get(0).partitions.get(0).records) + assertArrayEquals(expectedAcquiredRecords(10, 19, 1).toArray(), topicResponses.get(0).partitions.get(0).acquiredRecords.toArray()) } @Test - def testHandleShareFetchShareGroupDisabled(): Unit = { + def testHandleShareFetchNewGroupCoordinatorDisabled(): Unit = { val topicId = Uuid.randomUuid() val memberId: Uuid = Uuid.randomUuid() val groupId = "group" - metadataCache = { - val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY) - delta.replay(new FeatureLevelRecord() - .setName(MetadataVersion.FEATURE_NAME) - .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) - ) - delta.replay(new FeatureLevelRecord() - .setName(ShareVersion.FEATURE_NAME) - .setFeatureLevel(ShareVersion.SV_0.featureLevel()) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + + val shareFetchRequestData = new ShareFetchRequestData(). + setGroupId(groupId). + setMemberId(memberId.toString). + setShareSessionEpoch(1). + setTopics(List(new ShareFetchRequestData.FetchTopic(). + setTopicId(topicId). + setPartitions(List( + new ShareFetchRequestData.FetchPartition() + .setPartitionIndex(0) + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( + new AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) + + val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) + val request = buildRequest(shareFetchRequest) + + kafkaApis = createKafkaApis( + overrideProperties = Map( + GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG -> "false", + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), ) - cache.setImage(delta.apply(MetadataProvenance.EMPTY)) - cache - } + kafkaApis.handleShareFetchRequest(request) + + val response = verifyNoThrottling[ShareFetchResponse](request) + val responseData = response.data() + + assertEquals(Errors.UNSUPPORTED_VERSION.code, responseData.errorCode) + } + + @Test + def testHandleShareFetchShareGroupDisabled(): Unit = { + val topicId = Uuid.randomUuid() + val memberId: Uuid = Uuid.randomUuid() + val groupId = "group" + + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "false"), + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) @@ -6638,40 +6158,44 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any(), any())).thenReturn(util.List.of[AuthorizationResult]( + when(authorizer.authorize(any(), any())).thenReturn(List[AuthorizationResult]( AuthorizationResult.DENIED - )) + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareFetchRequest) kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Option(authorizer), ) kafkaApis.handleShareFetchRequest(request) @@ -6687,22 +6211,22 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID val groupId = "group" when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - )) + ).asJava) ) - when(sharePartitionManager.newContext(any(), any(), any(), any(), any(), any())).thenReturn( + when(sharePartitionManager.newContext(any(), any(), any(), any(), any())).thenReturn( new FinalContext() ) @@ -6711,28 +6235,32 @@ class KafkaApisTest extends Logging { ) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(ShareRequestMetadata.FINAL_EPOCH). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of(new ShareFetchRequestData.FetchTopic(). + setTopics(List(new ShareFetchRequestData.FetchTopic(). setTopicId(topicId). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava)).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) val request = buildRequest(shareFetchRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareFetchRequest(request) val response = verifyNoThrottling[ShareFetchResponse](request) val responseData = response.data() @@ -6740,14 +6268,13 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).acknowledgeErrorCode) - assertEquals(MemoryRecords.EMPTY, topicResponse.partitions.get(0).records) - assertEquals(0, topicResponse.partitions.get(0).acquiredRecords.toArray().length) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).acknowledgeErrorCode) + assertNull(topicResponses.get(0).partitions.get(0).records) + assertEquals(0, topicResponses.get(0).partitions.get(0).acquiredRecords.toArray().length) } @Test @@ -6755,22 +6282,22 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() val groupId = "group" when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - )) + ).asJava) ) doNothing().when(sharePartitionManager).acknowledgeSessionUpdate(any(), any()) @@ -6779,23 +6306,28 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -6803,55 +6335,89 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) } @Test - def testHandleShareAcknowledgeShareGroupDisabled(): Unit = { + def testHandleShareAcknowledgeNewGroupCoordinatorDisabled(): Unit = { val topicId = Uuid.randomUuid() val memberId: Uuid = Uuid.randomUuid() val groupId = "group" - metadataCache = { - val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY) - delta.replay(new FeatureLevelRecord() - .setName(MetadataVersion.FEATURE_NAME) - .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) - ) - delta.replay(new FeatureLevelRecord() - .setName(ShareVersion.FEATURE_NAME) - .setFeatureLevel(ShareVersion.SV_0.featureLevel()) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + + val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData() + .setGroupId(groupId) + .setMemberId(memberId.toString) + .setShareSessionEpoch(1) + .setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopicId(topicId) + .setPartitions(List( + new ShareAcknowledgeRequestData.AcknowledgePartition() + .setPartitionIndex(0) + .setAcknowledgementBatches(List( + new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) + + val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) + val request = buildRequest(shareAcknowledgeRequest) + + kafkaApis = createKafkaApis( + overrideProperties = Map( + GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG -> "false", + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), ) - cache.setImage(delta.apply(MetadataProvenance.EMPTY)) - cache - } + kafkaApis.handleShareAcknowledgeRequest(request) + + val response = verifyNoThrottling[ShareAcknowledgeResponse](request) + val responseData = response.data() + + assertEquals(Errors.UNSUPPORTED_VERSION.code, responseData.errorCode) + } + + @Test + def testHandleShareAcknowledgeShareGroupDisabled(): Unit = { + val topicId = Uuid.randomUuid() + val memberId: Uuid = Uuid.randomUuid() + val groupId = "group" + + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData() .setGroupId(groupId) .setMemberId(memberId.toString) .setShareSessionEpoch(1) - .setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic() + .setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic() .setTopicId(topicId) - .setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + .setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "false"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -6865,39 +6431,43 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any(), any())).thenReturn(util.List.of[AuthorizationResult]( + when(authorizer.authorize(any(), any())).thenReturn(List[AuthorizationResult]( AuthorizationResult.DENIED - )) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), authorizer = Option(authorizer), ) kafkaApis.handleShareAcknowledgeRequest(request) @@ -6913,12 +6483,12 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledgeSessionUpdate(any(), any())).thenThrow( Errors.INVALID_SHARE_SESSION_EPOCH.exception @@ -6928,24 +6498,29 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -6959,12 +6534,12 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledgeSessionUpdate(any(), any())).thenThrow( Errors.SHARE_SESSION_NOT_FOUND.exception @@ -6974,24 +6549,29 @@ class KafkaApisTest extends Logging { setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(0). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7005,13 +6585,13 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val groupId: String = "group" val memberId: Uuid = Uuid.ZERO_UUID when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) doNothing().when(sharePartitionManager).acknowledgeSessionUpdate(any(), any()) @@ -7019,23 +6599,28 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(4) // end offset is less than base offset - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7044,11 +6629,10 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.INVALID_REQUEST.code, topicResponse.partitions.get(0).errorCode) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.INVALID_REQUEST.code, topicResponses.get(0).partitions.get(0).errorCode) } @Test @@ -7058,7 +6642,7 @@ class KafkaApisTest extends Logging { val partitionIndex = 0 val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) val topicPartition = topicIdPartition.topicPartition - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicPartition.topic, numPartitions = 1, numBrokers = 3, topicId) val memberId: Uuid = Uuid.ZERO_UUID @@ -7076,37 +6660,42 @@ class KafkaApisTest extends Logging { any(), any(), any() - )).thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)), + )).thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, partitionIndex)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(partitionIndex) .setErrorCode(Errors.NOT_LEADER_OR_FOLLOWER.code()) - ))) + ).asJava)) when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId("group"). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(20) - .setAcknowledgeTypes(util.List.of(1.toByte,1.toByte,0.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(util.Arrays.asList(1.toByte,1.toByte,0.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte,1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) @@ -7115,13 +6704,12 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, topicResponse.partitions.get(0).errorCode) - assertEquals(newLeaderId, topicResponse.partitions.get(0).currentLeader.leaderId) - assertEquals(newLeaderEpoch, topicResponse.partitions.get(0).currentLeader.leaderEpoch) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, topicResponses.get(0).partitions.get(0).errorCode) + assertEquals(newLeaderId, topicResponses.get(0).partitions.get(0).currentLeader.leaderId) + assertEquals(newLeaderEpoch, topicResponses.get(0).partitions.get(0).currentLeader.leaderEpoch) assertEquals(2, responseData.nodeEndpoints.asScala.head.nodeId) } @@ -7130,14 +6718,14 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() val groupId = "group" when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( FutureUtils.failedFuture[util.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]](Errors.UNKNOWN_SERVER_ERROR.exception()) @@ -7149,23 +6737,28 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -7178,56 +6771,61 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() val groupId = "group" when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - )) + ).asJava) ) doNothing().when(sharePartitionManager).acknowledgeSessionUpdate(any(), any()) when(sharePartitionManager.releaseSession(any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - )) + ).asJava) ) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(ShareRequestMetadata.FINAL_EPOCH). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -7235,11 +6833,10 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) } @Test @@ -7247,22 +6844,22 @@ class KafkaApisTest extends Logging { val topicName = "foo" val topicId = Uuid.randomUuid() val partitionIndex = 0 - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName, 1, topicId = topicId) val memberId: Uuid = Uuid.randomUuid() val groupId = "group" when(clientQuotaManager.maybeRecordAndGetThrottleTimeMs( - any[Session](), anyString, anyDouble, anyLong)).thenReturn(0) + any[RequestChannel.Request](), anyDouble, anyLong)).thenReturn(0) when(sharePartitionManager.acknowledge(any(), any(), any())).thenReturn( - CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId, new TopicPartition(topicName, 0)), + CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId, new TopicPartition(topicName, 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - )) + ).asJava) ) doNothing().when(sharePartitionManager).acknowledgeSessionUpdate(any(), any()) @@ -7275,23 +6872,28 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(ShareRequestMetadata.FINAL_EPOCH). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of(new ShareAcknowledgeRequestData.AcknowledgeTopic(). + setTopics(List(new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(partitionIndex) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator))).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) kafkaApis.handleShareAcknowledgeRequest(request) val response = verifyNoThrottling[ShareAcknowledgeResponse](request) val responseData = response.data() @@ -7299,11 +6901,10 @@ class KafkaApisTest extends Logging { assertEquals(Errors.NONE.code, responseData.errorCode) assertEquals(1, topicResponses.size()) - val topicResponse = topicResponses.stream.findFirst.get - assertEquals(topicId, topicResponse.topicId) - assertEquals(1, topicResponse.partitions.size()) - assertEquals(partitionIndex, topicResponse.partitions.get(0).partitionIndex) - assertEquals(Errors.NONE.code, topicResponse.partitions.get(0).errorCode) + assertEquals(topicId, topicResponses.get(0).topicId) + assertEquals(1, topicResponses.get(0).partitions.size()) + assertEquals(partitionIndex, topicResponses.get(0).partitions.get(0).partitionIndex) + assertEquals(Errors.NONE.code, topicResponses.get(0).partitions.get(0).errorCode) } private def expectedAcquiredRecords(firstOffset: Long, lastOffset: Long, deliveryCount: Int): util.List[AcquiredRecords] = { @@ -7319,56 +6920,66 @@ class KafkaApisTest extends Logging { def testGetAcknowledgeBatchesFromShareFetchRequest(): Unit = { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)), new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(17) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(2.toByte)) - )) - ).iterator)), + .setAcknowledgeTypes(Collections.singletonList(2.toByte)) + ).asJava) + ).asJava), new ShareFetchRequestData.FetchTopic(). setTopicId(topicId2). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(24) .setLastOffset(65) - .setAcknowledgeTypes(util.List.of(3.toByte)) - )), + .setAcknowledgeTypes(Collections.singletonList(3.toByte)) + ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - ).iterator)) - ).iterator)) + .setPartitionMaxBytes(40000) + ).asJava) + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val topicNames = util.Map.of(topicId1, "foo1", topicId2, "foo2") + val topicNames = new util.HashMap[Uuid, String] + topicNames.put(topicId1, "foo1") + topicNames.put(topicId2, "foo2") val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareFetchRequest(shareFetchRequest, topicNames, erroneous) assertEquals(4, acknowledgeBatches.size) @@ -7386,50 +6997,58 @@ class KafkaApisTest extends Logging { def testGetAcknowledgeBatchesFromShareFetchRequestError(): Unit = { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) val shareFetchRequestData = new ShareFetchRequestData(). setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(new ShareFetchRequestData.FetchTopicCollection(util.List.of( + setTopics(List( new ShareFetchRequestData.FetchTopic(). setTopicId(topicId1). - setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(7.toByte)) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) - )), + .setAcknowledgeTypes(Collections.singletonList(7.toByte)) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) + ).asJava), new ShareFetchRequestData.FetchPartition() .setPartitionIndex(1) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of()) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) - )) - ).iterator)), + .setAcknowledgeTypes(Collections.emptyList()) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) + ).asJava) + ).asJava), new ShareFetchRequestData.FetchTopic() .setTopicId(topicId2) - .setPartitions(new ShareFetchRequestData.FetchPartitionCollection(util.List.of( + .setPartitions(List( new ShareFetchRequestData.FetchPartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setPartitionMaxBytes(40000) + .setAcknowledgementBatches(List( new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(24) .setLastOffset(65) - .setAcknowledgeTypes(util.List.of(3.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(3.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareFetchRequest = new ShareFetchRequest.Builder(shareFetchRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val topicIdNames = util.Map.of(topicId1, "foo1") // topicId2 is not present in topicIdNames + val topicIdNames = new util.HashMap[Uuid, String] + topicIdNames.put(topicId1, "foo1") // topicId2 is not present in topicIdNames val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareFetchRequest(shareFetchRequest, topicIdNames, erroneous) val erroneousTopicIdPartitions = kafkaApis.validateAcknowledgementBatches(acknowledgeBatches, erroneous) @@ -7448,55 +7067,61 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of( + setTopics(List( new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId1). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)), new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(17) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava), new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(1) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(2.toByte)) - )) - ).iterator)), + .setAcknowledgeTypes(Collections.singletonList(2.toByte)) + ).asJava) + ).asJava), new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId2). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(24) .setLastOffset(65) - .setAcknowledgeTypes(util.List.of(3.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(3.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val topicNames = util.Map.of(topicId1, "foo1", topicId2, "foo2") + val topicNames = new util.HashMap[Uuid, String] + topicNames.put(topicId1, "foo1") + topicNames.put(topicId2, "foo2") val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareAcknowledgeRequest(shareAcknowledgeRequest, topicNames, erroneous) assertEquals(3, acknowledgeBatches.size) @@ -7514,51 +7139,56 @@ class KafkaApisTest extends Logging { def testGetAcknowledgeBatchesFromShareAcknowledgeRequestError(): Unit = { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) val shareAcknowledgeRequestData = new ShareAcknowledgeRequestData(). setGroupId("group"). setMemberId(Uuid.randomUuid().toString). setShareSessionEpoch(0). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of( + setTopics(List( new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId1). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(7.toByte)) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) - )), + .setAcknowledgeTypes(Collections.singletonList(7.toByte)) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) + ).asJava), new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(1) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) - )) - ).iterator)), + .setAcknowledgeTypes(Collections.emptyList()) // wrong acknowledgement type here (can only be 0, 1, 2 or 3) + ).asJava) + ).asJava), new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId2). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(24) .setLastOffset(65) - .setAcknowledgeTypes(util.List.of(3.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(3.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData).build(ApiKeys.SHARE_FETCH.latestVersion) - val topicIdNames = util.Map.of(topicId1, "foo1") // topicId2 not present in topicIdNames + val topicIdNames = new util.HashMap[Uuid, String] + topicIdNames.put(topicId1, "foo1") // topicId2 not present in topicIdNames val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val acknowledgeBatches = kafkaApis.getAcknowledgeBatchesFromShareAcknowledgeRequest(shareAcknowledgeRequest, topicIdNames, erroneous) val erroneousTopicIdPartitions = kafkaApis.validateAcknowledgementBatches(acknowledgeBatches, erroneous) @@ -7585,7 +7215,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val memberId = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -7594,39 +7224,43 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.acknowledge(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - tp1, + .thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + tp1 -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - tp2, + tp2 -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - tp3, + tp3 -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ))) + ).asJava)) val acknowledgementData = mutable.Map[TopicIdPartition, util.List[ShareAcknowledgementBatch]]() - acknowledgementData += (tp1 -> util.List.of( - new ShareAcknowledgementBatch(0, 9, util.List.of(1.toByte)), - new ShareAcknowledgementBatch(10, 19, util.List.of(2.toByte)) + acknowledgementData += (tp1 -> util.Arrays.asList( + new ShareAcknowledgementBatch(0, 9, Collections.singletonList(1.toByte)), + new ShareAcknowledgementBatch(10, 19, Collections.singletonList(2.toByte)) )) - acknowledgementData += (tp2 -> util.List.of( - new ShareAcknowledgementBatch(5, 19, util.List.of(2.toByte)) + acknowledgementData += (tp2 -> util.Arrays.asList( + new ShareAcknowledgementBatch(5, 19, Collections.singletonList(2.toByte)) )) - acknowledgementData += (tp3 -> util.List.of( - new ShareAcknowledgementBatch(34, 56, util.List.of(1.toByte)) + acknowledgementData += (tp3 -> util.Arrays.asList( + new ShareAcknowledgementBatch(34, 56, Collections.singletonList(1.toByte)) )) val authorizedTopics: Set[String] = Set(topicName1, topicName2) val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -7660,7 +7294,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val memberId = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -7669,39 +7303,43 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.acknowledge(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId1, new TopicPartition("foo1", 0)), + .thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId1, new TopicPartition("foo1", 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition("foo2", 0)), + new TopicIdPartition(topicId2, new TopicPartition("foo2", 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition("foo2", 1)), + new TopicIdPartition(topicId2, new TopicPartition("foo2", 1)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ))) + ).asJava)) val acknowledgementData = mutable.Map[TopicIdPartition, util.List[ShareAcknowledgementBatch]]() - acknowledgementData += (tp1 -> util.List.of( - new ShareAcknowledgementBatch(39, 24, util.List.of(1.toByte)), // this is an invalid batch because last offset is less than base offset - new ShareAcknowledgementBatch(43, 56, util.List.of(2.toByte)) + acknowledgementData += (tp1 -> util.Arrays.asList( + new ShareAcknowledgementBatch(39, 24, Collections.singletonList(1.toByte)), // this is an invalid batch because last offset is less than base offset + new ShareAcknowledgementBatch(43, 56, Collections.singletonList(2.toByte)) )) - acknowledgementData += (tp2 -> util.List.of( - new ShareAcknowledgementBatch(5, 19, util.List.of(0.toByte, 2.toByte)) + acknowledgementData += (tp2 -> util.Arrays.asList( + new ShareAcknowledgementBatch(5, 19, util.Arrays.asList(0.toByte, 2.toByte)) )) - acknowledgementData += (tp3 -> util.List.of( - new ShareAcknowledgementBatch(34, 56, util.List.of(1.toByte)), - new ShareAcknowledgementBatch(10, 19, util.List.of(1.toByte)) // this is an invalid batch because start is offset is less than previous end offset + acknowledgementData += (tp3 -> util.Arrays.asList( + new ShareAcknowledgementBatch(34, 56, Collections.singletonList(1.toByte)), + new ShareAcknowledgementBatch(10, 19, Collections.singletonList(1.toByte)) // this is an invalid batch because start is offset is less than previous end offset )) val authorizedTopics: Set[String] = Set(topicName1, topicName2) val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -7735,7 +7373,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val memberId = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) // Topic with id topicId1 is not present in Metadata Cache addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -7744,40 +7382,44 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.acknowledge(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - new TopicIdPartition(topicId1, new TopicPartition("foo1", 0)), + .thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + new TopicIdPartition(topicId1, new TopicPartition("foo1", 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition("foo2", 0)), + new TopicIdPartition(topicId2, new TopicPartition("foo2", 0)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - new TopicIdPartition(topicId2, new TopicPartition("foo2", 1)), + new TopicIdPartition(topicId2, new TopicPartition("foo2", 1)) -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code) - ))) + ).asJava)) val acknowledgementData = mutable.Map[TopicIdPartition, util.List[ShareAcknowledgementBatch]]() - acknowledgementData += (tp1 -> util.List.of( - new ShareAcknowledgementBatch(24, 39, util.List.of(1.toByte)), - new ShareAcknowledgementBatch(43, 56, util.List.of(2.toByte)) + acknowledgementData += (tp1 -> util.Arrays.asList( + new ShareAcknowledgementBatch(24, 39, Collections.singletonList(1.toByte)), + new ShareAcknowledgementBatch(43, 56, Collections.singletonList(2.toByte)) )) - acknowledgementData += (tp2 -> util.List.of( - new ShareAcknowledgementBatch(5, 19, util.List.of(2.toByte)) + acknowledgementData += (tp2 -> util.Arrays.asList( + new ShareAcknowledgementBatch(5, 19, Collections.singletonList(2.toByte)) )) - acknowledgementData += (tp3 -> util.List.of( - new ShareAcknowledgementBatch(34, 56, util.List.of(1.toByte)), - new ShareAcknowledgementBatch(67, 87, util.List.of(1.toByte)) + acknowledgementData += (tp3 -> util.Arrays.asList( + new ShareAcknowledgementBatch(34, 56, Collections.singletonList(1.toByte)), + new ShareAcknowledgementBatch(67, 87, Collections.singletonList(1.toByte)) )) val authorizedTopics: Set[String] = Set(topicName1) // Topic with topicId2 is not authorized val erroneous = mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]() - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -7811,7 +7453,7 @@ class KafkaApisTest extends Logging { val topicId2 = Uuid.randomUuid() val memberId = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) addTopicToMetadataCache(topicName1, 1, topicId = topicId1) addTopicToMetadataCache(topicName2, 2, topicId = topicId2) @@ -7820,25 +7462,25 @@ class KafkaApisTest extends Logging { val tp3 = new TopicIdPartition(topicId2, new TopicPartition(topicName2, 1)) when(sharePartitionManager.acknowledge(any(), any(), any())) - .thenReturn(CompletableFuture.completedFuture(util.Map.of[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( - tp1, + .thenReturn(CompletableFuture.completedFuture(Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData]( + tp1 -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code), - tp2, + tp2 -> new ShareAcknowledgeResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code) - ))) + ).asJava)) val acknowledgementData = mutable.Map[TopicIdPartition, util.List[ShareAcknowledgementBatch]]() - acknowledgementData += (tp1 -> util.List.of( - new ShareAcknowledgementBatch(0, 9, util.List.of(1.toByte)), - new ShareAcknowledgementBatch(10, 19, util.List.of(2.toByte)) + acknowledgementData += (tp1 -> util.Arrays.asList( + new ShareAcknowledgementBatch(0, 9, Collections.singletonList(1.toByte)), + new ShareAcknowledgementBatch(10, 19, Collections.singletonList(2.toByte)) )) - acknowledgementData += (tp2 -> util.List.of( - new ShareAcknowledgementBatch(5, 19, util.List.of(2.toByte)) + acknowledgementData += (tp2 -> util.Arrays.asList( + new ShareAcknowledgementBatch(5, 19, Collections.singletonList(2.toByte)) )) val authorizedTopics: Set[String] = Set(topicName1, topicName2) @@ -7847,7 +7489,11 @@ class KafkaApisTest extends Logging { erroneous += (tp3 -> ShareAcknowledgeResponse.partitionResponse(tp3, Errors.UNKNOWN_TOPIC_ID)) - kafkaApis = createKafkaApis() + kafkaApis = createKafkaApis( + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val ackResult = kafkaApis.handleAcknowledgements( acknowledgementData, erroneous, @@ -7879,7 +7525,7 @@ class KafkaApisTest extends Logging { val topicId1 = Uuid.randomUuid() val topicId2 = Uuid.randomUuid() - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) val responseAcknowledgeData: mutable.Map[TopicIdPartition, ShareAcknowledgeResponseData.PartitionData] = mutable.Map() responseAcknowledgeData += (new TopicIdPartition(topicId1, new TopicPartition("foo", 0)) -> new ShareAcknowledgeResponseData.PartitionData().setPartitionIndex(0).setErrorCode(Errors.NONE.code)) @@ -7894,54 +7540,57 @@ class KafkaApisTest extends Logging { setGroupId(groupId). setMemberId(memberId.toString). setShareSessionEpoch(1). - setTopics(new ShareAcknowledgeRequestData.AcknowledgeTopicCollection(util.List.of( + setTopics(List( new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId1). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava), new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(1) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava), new ShareAcknowledgeRequestData.AcknowledgeTopic(). setTopicId(topicId2). - setPartitions(new ShareAcknowledgeRequestData.AcknowledgePartitionCollection(util.List.of( + setPartitions(List( new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(0) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )), + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava), new ShareAcknowledgeRequestData.AcknowledgePartition() .setPartitionIndex(1) - .setAcknowledgementBatches(util.List.of( + .setAcknowledgementBatches(List( new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)) - )) - ).iterator)) - ).iterator)) + .setAcknowledgeTypes(Collections.singletonList(1.toByte)) + ).asJava) + ).asJava) + ).asJava) val shareAcknowledgeRequest = new ShareAcknowledgeRequest.Builder(shareAcknowledgeRequestData) .build(ApiKeys.SHARE_ACKNOWLEDGE.latestVersion) val request = buildRequest(shareAcknowledgeRequest) kafkaApis = createKafkaApis( - ) + overrideProperties = Map( + ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG -> "true", + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + ) val response = kafkaApis.processShareAcknowledgeResponse(responseAcknowledgeData, request) val responseData = response.data() val topicResponses = responseData.responses() @@ -7978,12 +7627,12 @@ class KafkaApisTest extends Logging { private def compareAcknowledgementBatches(baseOffset: Long, endOffset: Long, - acknowledgeType: Byte, + acknowledgementType: Byte, acknowledgementBatch: ShareAcknowledgementBatch ): Boolean = { if (baseOffset == acknowledgementBatch.firstOffset() && endOffset == acknowledgementBatch.lastOffset() - && acknowledgeType == acknowledgementBatch.acknowledgeTypes().get(0)) { + && acknowledgementType == acknowledgementBatch.acknowledgeTypes().get(0)) { return true } false @@ -8098,7 +7747,7 @@ class KafkaApisTest extends Logging { val expectedJoinGroupResponse = new JoinGroupResponseData() .setErrorCode(Errors.INCONSISTENT_GROUP_PROTOCOL.code) .setMemberId("member") - .setProtocolName(if (version >= 7) null else "") + .setProtocolName(if (version >= 7) null else kafka.coordinator.group.GroupCoordinator.NoProtocol) future.complete(joinGroupResponse) val response = verifyNoThrottling[JoinGroupResponse](requestChannelRequest) @@ -8146,7 +7795,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleJoinGroupRequest( requestChannelRequest, @@ -8277,7 +7926,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleSyncGroupRequest( requestChannelRequest, @@ -8395,7 +8044,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleHeartbeatRequest( requestChannelRequest @@ -8411,14 +8060,15 @@ class KafkaApisTest extends Logging { def makeRequest(version: Short): RequestChannel.Request = { buildRequest(new LeaveGroupRequest.Builder( "group", - util.List.of( + List( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1"), new MemberIdentity() .setMemberId("member-2") .setGroupInstanceId("instance-2") - )).build(version)) + ).asJava + ).build(version)) } if (version < 3) { @@ -8429,14 +8079,14 @@ class KafkaApisTest extends Logging { val expectedLeaveGroupRequest = new LeaveGroupRequestData() .setGroupId("group") - .setMembers(util.List.of( + .setMembers(List( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1"), new MemberIdentity() .setMemberId("member-2") .setGroupInstanceId("instance-2") - )) + ).asJava) val future = new CompletableFuture[LeaveGroupResponseData]() when(groupCoordinator.leaveGroup( @@ -8448,14 +8098,14 @@ class KafkaApisTest extends Logging { val expectedLeaveResponse = new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code) - .setMembers(util.List.of( + .setMembers(List( new LeaveGroupResponseData.MemberResponse() .setMemberId("member-1") .setGroupInstanceId("instance-1"), new LeaveGroupResponseData.MemberResponse() .setMemberId("member-2") .setGroupInstanceId("instance-2"), - )) + ).asJava) future.complete(expectedLeaveResponse) val response = verifyNoThrottling[LeaveGroupResponse](requestChannelRequest) @@ -8468,20 +8118,20 @@ class KafkaApisTest extends Logging { def testHandleLeaveGroupWithSingleMember(version: Short): Unit = { val requestChannelRequest = buildRequest(new LeaveGroupRequest.Builder( "group", - util.List.of( + List( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ) + ).asJava ).build(version)) val expectedLeaveGroupRequest = new LeaveGroupRequestData() .setGroupId("group") - .setMembers(util.List.of( + .setMembers(List( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId(if (version >= 3) "instance-1" else null) - )) + ).asJava) val future = new CompletableFuture[LeaveGroupResponseData]() when(groupCoordinator.leaveGroup( @@ -8493,20 +8143,20 @@ class KafkaApisTest extends Logging { val leaveGroupResponse = new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code) - .setMembers(util.List.of( + .setMembers(List( new LeaveGroupResponseData.MemberResponse() .setMemberId("member-1") .setGroupInstanceId("instance-1") - )) + ).asJava) val expectedLeaveResponse = if (version >= 3) { new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code) - .setMembers(util.List.of( + .setMembers(List( new LeaveGroupResponseData.MemberResponse() .setMemberId("member-1") .setGroupInstanceId("instance-1") - )) + ).asJava) } else { new LeaveGroupResponseData() .setErrorCode(Errors.NONE.code) @@ -8521,20 +8171,20 @@ class KafkaApisTest extends Logging { def testHandleLeaveGroupFutureFailed(): Unit = { val requestChannelRequest = buildRequest(new LeaveGroupRequest.Builder( "group", - util.List.of( + List( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ) + ).asJava ).build(ApiKeys.LEAVE_GROUP.latestVersion)) val expectedLeaveGroupRequest = new LeaveGroupRequestData() .setGroupId("group") - .setMembers(util.List.of( + .setMembers(List( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - )) + ).asJava) val future = new CompletableFuture[LeaveGroupResponseData]() when(groupCoordinator.leaveGroup( @@ -8553,20 +8203,20 @@ class KafkaApisTest extends Logging { def testHandleLeaveGroupAuthenticationFailed(): Unit = { val requestChannelRequest = buildRequest(new LeaveGroupRequest.Builder( "group", - util.List.of( + List( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - ) + ).asJava ).build(ApiKeys.LEAVE_GROUP.latestVersion)) val expectedLeaveGroupRequest = new LeaveGroupRequestData() .setGroupId("group") - .setMembers(util.List.of( + .setMembers(List( new MemberIdentity() .setMemberId("member-1") .setGroupInstanceId("instance-1") - )) + ).asJava) val future = new CompletableFuture[LeaveGroupResponseData]() when(groupCoordinator.leaveGroup( @@ -8576,7 +8226,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleLeaveGroupRequest(requestChannelRequest) @@ -8587,38 +8237,21 @@ class KafkaApisTest extends Logging { @ParameterizedTest @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) def testHandleOffsetFetchWithMultipleGroups(version: Short): Unit = { - val foo = "foo" - val bar = "bar" - val fooId = Uuid.randomUuid() - addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) + // Version 0 gets offsets from Zookeeper. We are not interested + // in testing this here. + if (version == 0) return def makeRequest(version: Short): RequestChannel.Request = { - buildRequest( - OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(foo) - .setTopicId(fooId) - .setPartitionIndexes(util.List.of[Integer](0, 1)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-2") - .setTopics(null), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-3") - .setTopics(null), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-4") - .setTopics(null), - )), - false, - true - ).build(version) - ) + val groups = Map( + "group-1" -> List( + new TopicPartition("foo", 0), + new TopicPartition("foo", 1) + ).asJava, + "group-2" -> null, + "group-3" -> null, + "group-4" -> null, + ).asJava + buildRequest(new OffsetFetchRequest.Builder(groups, false, false).build(version)) } if (version < 8) { @@ -8632,11 +8265,10 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-1") - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID) .setName("foo") - .setPartitionIndexes(util.List.of[Integer](0, 1)))), + .setPartitionIndexes(List[Integer](0, 1).asJava)).asJava), false )).thenReturn(group1Future) @@ -8667,33 +8299,14 @@ class KafkaApisTest extends Logging { false )).thenReturn(group4Future) kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + kafkaApis.handleOffsetFetchRequest(requestChannelRequest) val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setTopicId(fooId) - .setName(foo) - .setPartitions(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(1), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(200) - .setCommittedLeaderEpoch(2) - )) - )) - - val expectedGroup1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group-1") - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID) - .setName(if (version < 10) foo else "") - .setPartitions(util.List.of( + .setName("foo") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8702,15 +8315,15 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(2) - )) - )) + ).asJava) + ).asJava) val group2Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-2") - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(bar) - .setPartitions(util.List.of( + .setName("bar") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8723,8 +8336,8 @@ class KafkaApisTest extends Logging { .setPartitionIndex(2) .setCommittedOffset(300) .setCommittedLeaderEpoch(3) - )) - )) + ).asJava) + ).asJava) val group3Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-3") @@ -8734,7 +8347,7 @@ class KafkaApisTest extends Logging { .setGroupId("group-4") .setErrorCode(Errors.INVALID_GROUP_ID.code) - val expectedGroups = List(expectedGroup1Response, group2Response, group3Response, group4Response) + val expectedGroups = List(group1Response, group2Response, group3Response, group4Response) group1Future.complete(group1Response) group2Future.complete(group2Response) @@ -8742,196 +8355,50 @@ class KafkaApisTest extends Logging { group4Future.complete(group4Response) val response = verifyNoThrottling[OffsetFetchResponse](requestChannelRequest) - assertEquals(expectedGroups.toSet, response.data.groups.asScala.toSet) + assertEquals(expectedGroups.toSet, response.data.groups().asScala.toSet) } } @ParameterizedTest - // We only test with topic ids. - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 10) - def testHandleOffsetFetchWithUnknownTopicIds(version: Short): Unit = { - val foo = "foo" - val bar = "bar" - val fooId = Uuid.randomUuid() - val barId = Uuid.randomUuid() - addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) + def testHandleOffsetFetchWithSingleGroup(version: Short): Unit = { + // Version 0 gets offsets from Zookeeper. We are not interested + // in testing this here. + if (version == 0) return def makeRequest(version: Short): RequestChannel.Request = { - buildRequest( - OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(foo) - .setTopicId(fooId) - .setPartitionIndexes(util.List.of[Integer](0)), - // bar does not exist so it must return UNKNOWN_TOPIC_ID. - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(bar) - .setTopicId(barId) - .setPartitionIndexes(util.List.of[Integer](0)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-2") - .setTopics(null) - )), - false, - true - ).build(version) - ) + buildRequest(new OffsetFetchRequest.Builder( + "group-1", + false, + List( + new TopicPartition("foo", 0), + new TopicPartition("foo", 1) + ).asJava, + false + ).build(version)) } val requestChannelRequest = makeRequest(version) - val group1Future = new CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]() + val future = new CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]() when(groupCoordinator.fetchOffsets( requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setTopicId(fooId) - .setName("foo") - .setPartitionIndexes(util.List.of[Integer](0)))), - false - )).thenReturn(group1Future) - - val group2Future = new CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]() - when(groupCoordinator.fetchAllOffsets( - requestChannelRequest.context, - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-2") - .setTopics(null), + .setTopics(List(new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("foo") + .setPartitionIndexes(List[Integer](0, 1).asJava)).asJava), false - )).thenReturn(group2Future) - + )).thenReturn(future) kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + kafkaApis.handleOffsetFetchRequest(requestChannelRequest) val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setTopicId(fooId) - .setName(foo) - .setPartitions(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(1) - )) - )) - - val group2Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group-2") - .setTopics(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(foo) - .setPartitions(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(1) - )), - // bar does not exist so it must be filtered out. - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(bar) - .setPartitions(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(1) - )) - )) - - val expectedResponse = new OffsetFetchResponseData() - .setGroups(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setTopicId(fooId) - .setPartitions(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(1) - )), - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setTopicId(barId) - .setPartitions(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(-1) - .setErrorCode(Errors.UNKNOWN_TOPIC_ID.code) - )) - )), - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group-2") - .setTopics(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setTopicId(fooId) - .setPartitions(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(1) - )) - )) - )) - - group1Future.complete(group1Response) - group2Future.complete(group2Response) - - val response = verifyNoThrottling[OffsetFetchResponse](requestChannelRequest) - assertEquals(expectedResponse, response.data) - } - - @ParameterizedTest - // The single group builder does not support topic ids. - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, toVersion = 9) - def testHandleOffsetFetchWithSingleGroup(version: Short): Unit = { - def makeRequest(version: Short): RequestChannel.Request = { - buildRequest(OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setRequireStable(false) - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setPartitionIndexes(util.List.of[Integer](0, 1)) - )) - )), - false - ).build(version)) - } - - val requestChannelRequest = makeRequest(version) - - val future = new CompletableFuture[OffsetFetchResponseData.OffsetFetchResponseGroup]() - when(groupCoordinator.fetchOffsets( - requestChannelRequest.context, - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-1") - .setTopics(util.List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setPartitionIndexes(util.List.of[Integer](0, 1)))), - false - )).thenReturn(future) - kafkaApis = createKafkaApis() - kafkaApis.handleOffsetFetchRequest(requestChannelRequest) - - val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group-1") - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8940,18 +8407,18 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(2) - )) - )) + ).asJava) + ).asJava) val expectedOffsetFetchResponse = if (version >= 8) { new OffsetFetchResponseData() - .setGroups(util.List.of(group1Response)) + .setGroups(List(group1Response).asJava) } else { new OffsetFetchResponseData() - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartition() .setPartitionIndex(0) .setCommittedOffset(100) @@ -8960,8 +8427,8 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(if (version >= 5) 2 else -1) - )) - )) + ).asJava) + ).asJava) } future.complete(group1Response) @@ -8971,24 +8438,18 @@ class KafkaApisTest extends Logging { } @ParameterizedTest - // Version 1 does not support fetching offsets for all topics. - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 2) + @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH) def testHandleOffsetFetchAllOffsetsWithSingleGroup(version: Short): Unit = { - val foo = "foo" - val fooId = Uuid.randomUuid() - addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) + // Version 0 gets offsets from Zookeeper. Version 1 does not support fetching all + // offsets request. We are not interested in testing these here. + if (version < 2) return def makeRequest(version: Short): RequestChannel.Request = { - buildRequest(OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setRequireStable(false) - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-1") - .setTopics(null) // all offsets. - )), + buildRequest(new OffsetFetchRequest.Builder( + "group-1", false, - true + null, // all offsets. + false ).build(version)) } @@ -9007,10 +8468,10 @@ class KafkaApisTest extends Logging { val group1Response = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(foo) - .setPartitions(util.List.of( + .setName("foo") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) @@ -9019,36 +8480,18 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(2) - )) - )) + ).asJava) + ).asJava) val expectedOffsetFetchResponse = if (version >= 8) { new OffsetFetchResponseData() - .setGroups(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) foo else "") - .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID) - .setPartitions(util.List.of( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(100) - .setCommittedLeaderEpoch(1), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(200) - .setCommittedLeaderEpoch(2) - )) - )) - )) + .setGroups(List(group1Response).asJava) } else { new OffsetFetchResponseData() - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopic() .setName("foo") - .setPartitions(util.List.of( + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartition() .setPartitionIndex(0) .setCommittedOffset(100) @@ -9057,8 +8500,8 @@ class KafkaApisTest extends Logging { .setPartitionIndex(1) .setCommittedOffset(200) .setCommittedLeaderEpoch(if (version >= 5) 2 else -1) - )) - )) + ).asJava) + ).asJava) } future.complete(group1Response) @@ -9067,60 +8510,25 @@ class KafkaApisTest extends Logging { assertEquals(expectedOffsetFetchResponse, response.data) } - @ParameterizedTest - // We don't test the non batched API. - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 8) - def testHandleOffsetFetchAuthorization(version: Short): Unit = { - val foo = "foo" - val bar = "bar" - val fooId = Uuid.randomUuid() - val barId = Uuid.randomUuid() - addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) - addTopicToMetadataCache(bar, topicId = barId, numPartitions = 2) - + @Test + def testHandleOffsetFetchAuthorization(): Unit = { def makeRequest(version: Short): RequestChannel.Request = { - buildRequest( - OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(foo) - .setTopicId(fooId) - .setPartitionIndexes(util.List.of[Integer](0)), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(bar) - .setTopicId(barId) - .setPartitionIndexes(util.List.of[Integer](0)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-2") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(foo) - .setTopicId(fooId) - .setPartitionIndexes(util.List.of[Integer](0)), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(bar) - .setTopicId(barId) - .setPartitionIndexes(util.List.of[Integer](0)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-3") - .setTopics(null), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-4") - .setTopics(null), - )), - false, - true - ).build(version) - ) + val groups = Map( + "group-1" -> List( + new TopicPartition("foo", 0), + new TopicPartition("bar", 0) + ).asJava, + "group-2" -> List( + new TopicPartition("foo", 0), + new TopicPartition("bar", 0) + ).asJava, + "group-3" -> null, + "group-4" -> null, + ).asJava + buildRequest(new OffsetFetchRequest.Builder(groups, false, false).build(version)) } - val requestChannelRequest = makeRequest(version) + val requestChannelRequest = makeRequest(ApiKeys.OFFSET_FETCH.latestVersion) val authorizer: Authorizer = mock(classOf[Authorizer]) @@ -9138,9 +8546,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList + actions.asScala.map { action => + acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) + }.asJava } // group-1 is allowed and bar is allowed. @@ -9149,10 +8557,9 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-1") - .setTopics(util.List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(bar) - .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) - .setPartitionIndexes(util.List.of[Integer](0)))), + .setTopics(List(new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("bar") + .setPartitionIndexes(List[Integer](0).asJava)).asJava), false )).thenReturn(group1Future) @@ -9170,67 +8577,62 @@ class KafkaApisTest extends Logging { val group1ResponseFromCoordinator = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(bar) - .setTopicId(barId) - .setPartitions(util.List.of( + .setName("bar") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - )) - )) + ).asJava) + ).asJava) val group3ResponseFromCoordinator = new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-3") - .setTopics(util.List.of( + .setTopics(List( // foo should be filtered out. new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(foo) - .setTopicId(fooId) - .setPartitions(util.List.of( + .setName("foo") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - )), + ).asJava), new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(bar) - .setTopicId(barId) - .setPartitions(util.List.of( + .setName("bar") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - )) - )) + ).asJava) + ).asJava) val expectedOffsetFetchResponse = new OffsetFetchResponseData() - .setGroups(util.List.of( + .setGroups(List( // group-1 is authorized but foo is not. new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) bar else "") - .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) - .setPartitions(util.List.of( + .setName("bar") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - )), + ).asJava), new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) foo else "") - .setTopicId(if (version >= 10) fooId else Uuid.ZERO_UUID) - .setPartitions(util.List.of( + .setName("foo") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) .setCommittedOffset(-1) - )) - )), + ).asJava) + ).asJava), // group-2 is not authorized. new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-2") @@ -9238,22 +8640,21 @@ class KafkaApisTest extends Logging { // group-3 is authorized but foo is not. new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-3") - .setTopics(util.List.of( + .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) bar else "") - .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) - .setPartitions(util.List.of( + .setName("bar") + .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(100) .setCommittedLeaderEpoch(1) - )) - )), + ).asJava) + ).asJava), // group-4 is not authorized. new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-4") .setErrorCode(Errors.GROUP_AUTHORIZATION_FAILED.code), - )) + ).asJava) group1Future.complete(group1ResponseFromCoordinator) group3Future.complete(group3ResponseFromCoordinator) @@ -9262,54 +8663,23 @@ class KafkaApisTest extends Logging { assertEquals(expectedOffsetFetchResponse, response.data) } - @ParameterizedTest - // We don't test the non batched API. - @ApiKeyVersionsSource(apiKey = ApiKeys.OFFSET_FETCH, fromVersion = 8) - def testHandleOffsetFetchWithUnauthorizedTopicAndTopLevelError(version: Short): Unit = { - val foo = "foo" - val bar = "bar" - val fooId = Uuid.randomUuid() - val barId = Uuid.randomUuid() - addTopicToMetadataCache(foo, topicId = fooId, numPartitions = 2) - addTopicToMetadataCache(bar, topicId = barId, numPartitions = 2) - + @Test + def testHandleOffsetFetchWithUnauthorizedTopicAndTopLevelError(): Unit = { def makeRequest(version: Short): RequestChannel.Request = { - buildRequest( - OffsetFetchRequest.Builder.forTopicIdsOrNames( - new OffsetFetchRequestData() - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-1") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(foo) - .setTopicId(fooId) - .setPartitionIndexes(util.List.of[Integer](0)), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(bar) - .setTopicId(barId) - .setPartitionIndexes(util.List.of[Integer](0)) - )), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("group-2") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(foo) - .setTopicId(fooId) - .setPartitionIndexes(util.List.of[Integer](0)), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(bar) - .setTopicId(barId) - .setPartitionIndexes(util.List.of[Integer](0)) - )) - )), - false, - true - ).build(version) - ) + val groups = Map( + "group-1" -> List( + new TopicPartition("foo", 0), + new TopicPartition("bar", 0) + ).asJava, + "group-2" -> List( + new TopicPartition("foo", 0), + new TopicPartition("bar", 0) + ).asJava + ).asJava + buildRequest(new OffsetFetchRequest.Builder(groups, false, false).build(version)) } - val requestChannelRequest = makeRequest(version) + val requestChannelRequest = makeRequest(ApiKeys.OFFSET_FETCH.latestVersion) val authorizer: Authorizer = mock(classOf[Authorizer]) @@ -9325,9 +8695,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList + actions.asScala.map { action => + acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) + }.asJava } // group-1 and group-2 are allowed and bar is allowed. @@ -9336,10 +8706,9 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-1") - .setTopics(util.List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(bar) - .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) - .setPartitionIndexes(util.List.of[Integer](0)))), + .setTopics(List(new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("bar") + .setPartitionIndexes(List[Integer](0).asJava)).asJava), false )).thenReturn(group1Future) @@ -9348,10 +8717,9 @@ class KafkaApisTest extends Logging { requestChannelRequest.context, new OffsetFetchRequestData.OffsetFetchRequestGroup() .setGroupId("group-2") - .setTopics(util.List.of(new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(bar) - .setTopicId(if (version >= 10) barId else Uuid.ZERO_UUID) - .setPartitionIndexes(util.List.of[Integer](0)))), + .setTopics(List(new OffsetFetchRequestData.OffsetFetchRequestTopics() + .setName("bar") + .setPartitionIndexes(List[Integer](0).asJava)).asJava), false )).thenReturn(group1Future) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) @@ -9365,12 +8733,12 @@ class KafkaApisTest extends Logging { .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code) val expectedOffsetFetchResponse = new OffsetFetchResponseData() - .setGroups(util.List.of( + .setGroups(List( new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("group-1") .setErrorCode(Errors.COORDINATOR_NOT_AVAILABLE.code), group2ResponseFromCoordinator - )) + ).asJava) group1Future.completeExceptionally(Errors.COORDINATOR_NOT_AVAILABLE.exception) group2Future.complete(group2ResponseFromCoordinator) @@ -9398,8 +8766,8 @@ class KafkaApisTest extends Logging { setupBasicMetadataCache(tp0.topic, numPartitions = 1, 1, topicId) val hw = 3 - val fetchDataBuilder = util.Map.of(tp0, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, Int.MaxValue, Optional.of(leaderEpoch))) - val fetchData = util.Map.of(tidp0, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, Int.MaxValue, Optional.of(leaderEpoch))) + val fetchDataBuilder = Collections.singletonMap(tp0, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, Int.MaxValue, Optional.of(leaderEpoch))) + val fetchData = Collections.singletonMap(tidp0, new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0, 0, Int.MaxValue, Optional.of(leaderEpoch))) val fetchFromFollower = buildRequest(new FetchRequest.Builder( ApiKeys.FETCH.oldestVersion(), ApiKeys.FETCH.latestVersion(), 1, 1, 1000, 0, fetchDataBuilder).build()) @@ -9444,14 +8812,14 @@ class KafkaApisTest extends Logging { @ApiKeyVersionsSource(apiKey = ApiKeys.LIST_GROUPS) def testListGroupsRequest(version: Short): Unit = { val listGroupsRequest = new ListGroupsRequestData() - .setStatesFilter(if (version >= 4) util.List.of("Stable", "Empty") else util.List.of) - .setTypesFilter(if (version >= 5) util.List.of("classic", "consumer") else util.List.of) + .setStatesFilter(if (version >= 4) List("Stable", "Empty").asJava else List.empty.asJava) + .setTypesFilter(if (version >= 5) List("classic", "consumer").asJava else List.empty.asJava) val requestChannelRequest = buildRequest(new ListGroupsRequest.Builder(listGroupsRequest).build(version)) val expectedListGroupsRequest = new ListGroupsRequestData() - .setStatesFilter(if (version >= 4) util.List.of("Stable", "Empty") else util.List.of) - .setTypesFilter(if (version >= 5) util.List.of("classic", "consumer") else util.List.of) + .setStatesFilter(if (version >= 4) List("Stable", "Empty").asJava else List.empty.asJava) + .setTypesFilter(if (version >= 5) List("classic", "consumer").asJava else List.empty.asJava) val future = new CompletableFuture[ListGroupsResponseData]() when(groupCoordinator.listGroups( @@ -9462,7 +8830,7 @@ class KafkaApisTest extends Logging { kafkaApis.handleListGroupsRequest(requestChannelRequest) val expectedListGroupsResponse = new ListGroupsResponseData() - .setGroups(util.List.of( + .setGroups(List( new ListGroupsResponseData.ListedGroup() .setGroupId("group1") .setProtocolType("protocol1") @@ -9478,7 +8846,7 @@ class KafkaApisTest extends Logging { .setProtocolType("protocol3") .setGroupState(if (version >= 4) "Stable" else "") .setGroupType(if (version >= 5) "classic" else ""), - )) + ).asJava) future.complete(expectedListGroupsResponse) val response = verifyNoThrottling[ListGroupsResponse](requestChannelRequest) @@ -9488,14 +8856,14 @@ class KafkaApisTest extends Logging { @Test def testListGroupsRequestFutureFailed(): Unit = { val listGroupsRequest = new ListGroupsRequestData() - .setStatesFilter(util.List.of("Stable", "Empty")) - .setTypesFilter(util.List.of("classic", "consumer")) + .setStatesFilter(List("Stable", "Empty").asJava) + .setTypesFilter(List("classic", "consumer").asJava) val requestChannelRequest = buildRequest(new ListGroupsRequest.Builder(listGroupsRequest).build()) val expectedListGroupsRequest = new ListGroupsRequestData() - .setStatesFilter(util.List.of("Stable", "Empty")) - .setTypesFilter(util.List.of("classic", "consumer")) + .setStatesFilter(List("Stable", "Empty").asJava) + .setTypesFilter(List("classic", "consumer").asJava) val future = new CompletableFuture[ListGroupsResponseData]() when(groupCoordinator.listGroups( @@ -9657,8 +9025,8 @@ class KafkaApisTest extends Logging { assertEquals(clusterId, describeClusterResponse.data.clusterId) assertEquals(8096, describeClusterResponse.data.clusterAuthorizedOperations) - assertEquals(util.Set.copyOf(metadataCache.getAliveBrokerNodes(plaintextListener)), - util.Set.copyOf(describeClusterResponse.nodes.values)) + assertEquals(metadataCache.getAliveBrokerNodes(plaintextListener).toSet, + describeClusterResponse.nodes.asScala.values.toSet) } /** @@ -9712,11 +9080,11 @@ class KafkaApisTest extends Logging { private def testConsumerListOffsetWithUnsupportedVersion(timestamp: Long, version: Short): Unit = { val tp = new TopicPartition("foo", 0) - val targetTimes = util.List.of(new ListOffsetsTopic() + val targetTimes = List(new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(util.List.of(new ListOffsetsPartition() + .setPartitions(List(new ListOffsetsPartition() .setPartitionIndex(tp.partition) - .setTimestamp(timestamp)))) + .setTimestamp(timestamp)).asJava)).asJava when(replicaManager.fetchOffset( ArgumentMatchers.any[Seq[ListOffsetsTopic]](), @@ -9727,11 +9095,11 @@ class KafkaApisTest extends Logging { ArgumentMatchers.anyInt(), // correlationId ArgumentMatchers.anyShort(), // version ArgumentMatchers.any[(Errors, ListOffsetsPartition) => ListOffsetsPartitionResponse](), - ArgumentMatchers.any[Consumer[util.Collection[ListOffsetsTopicResponse]]], + ArgumentMatchers.any[List[ListOffsetsTopicResponse] => Unit](), ArgumentMatchers.anyInt() // timeoutMs )).thenAnswer(ans => { val version = ans.getArgument[Short](6) - val callback = ans.getArgument[Consumer[util.List[ListOffsetsTopicResponse]]](8) + val callback = ans.getArgument[List[ListOffsetsTopicResponse] => Unit](8) val errorCode = if (ReplicaManager.isListOffsetsTimestampUnsupported(timestamp, version)) Errors.UNSUPPORTED_VERSION.code() else @@ -9741,11 +9109,11 @@ class KafkaApisTest extends Logging { .setOffset(ListOffsetsResponse.UNKNOWN_OFFSET) .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP) .setPartitionIndex(tp.partition()) - callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(util.List.of(partitionResponse)))) + callback(List(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(List(partitionResponse).asJava))) }) val data = new ListOffsetsRequestData().setTopics(targetTimes).setReplicaId(ListOffsetsRequest.CONSUMER_REPLICA_ID) - val listOffsetRequest = ListOffsetsRequest.parse(MessageUtil.toByteBufferAccessor(data, version), version) + val listOffsetRequest = ListOffsetsRequest.parse(MessageUtil.toByteBuffer(data, version), version) val request = buildRequest(listOffsetRequest) kafkaApis = createKafkaApis() @@ -9764,11 +9132,11 @@ class KafkaApisTest extends Logging { val tp = new TopicPartition("foo", 0) val latestOffset = 15L - val targetTimes = util.List.of(new ListOffsetsTopic() + val targetTimes = List(new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(util.List.of(new ListOffsetsPartition() + .setPartitions(List(new ListOffsetsPartition() .setPartitionIndex(tp.partition) - .setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP)))) + .setTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP)).asJava)).asJava when(replicaManager.fetchOffset( ArgumentMatchers.eq(targetTimes.asScala.toSeq), @@ -9779,16 +9147,16 @@ class KafkaApisTest extends Logging { ArgumentMatchers.anyInt(), // correlationId ArgumentMatchers.anyShort(), // version ArgumentMatchers.any[(Errors, ListOffsetsPartition) => ListOffsetsPartitionResponse](), - ArgumentMatchers.any[Consumer[util.Collection[ListOffsetsTopicResponse]]], + ArgumentMatchers.any[List[ListOffsetsTopicResponse] => Unit](), ArgumentMatchers.anyInt() // timeoutMs )).thenAnswer(ans => { - val callback = ans.getArgument[Consumer[util.List[ListOffsetsTopicResponse]]](8) + val callback = ans.getArgument[List[ListOffsetsTopicResponse] => Unit](8) val partitionResponse = new ListOffsetsPartitionResponse() .setErrorCode(Errors.NONE.code()) .setOffset(latestOffset) .setTimestamp(ListOffsetsResponse.UNKNOWN_TIMESTAMP) .setPartitionIndex(tp.partition()) - callback.accept(util.List.of(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(util.List.of(partitionResponse)))) + callback(List(new ListOffsetsTopicResponse().setName(tp.topic()).setPartitions(List(partitionResponse).asJava))) }) val listOffsetRequest = ListOffsetsRequest.Builder.forConsumer(true, isolationLevel) @@ -9810,7 +9178,7 @@ class KafkaApisTest extends Logging { private def createWriteTxnMarkersRequest(partitions: util.List[TopicPartition]) = { val writeTxnMarkersRequest = new WriteTxnMarkersRequest.Builder( - util.List.of(new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions))).build() + asList(new TxnMarkerEntry(1, 1.toShort, 0, TransactionResult.COMMIT, partitions))).build() (writeTxnMarkersRequest, buildRequest(writeTxnMarkersRequest)) } @@ -9819,17 +9187,6 @@ class KafkaApisTest extends Logging { fromPrivilegedListener: Boolean = false, requestHeader: Option[RequestHeader] = None, requestMetrics: RequestChannelMetrics = requestChannelMetrics): RequestChannel.Request = { - buildRequest(request, new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "Alice"), InetAddress.getLocalHost, listenerName, - fromPrivilegedListener, requestHeader, requestMetrics) - } - - private def buildRequest(request: AbstractRequest, - principal: KafkaPrincipal, - clientAddress: InetAddress, - listenerName: ListenerName, - fromPrivilegedListener: Boolean, - requestHeader: Option[RequestHeader], - requestMetrics: RequestChannelMetrics): RequestChannel.Request = { val buffer = request.serializeWithHeader( requestHeader.getOrElse(new RequestHeader(request.apiKey, request.version, clientId, 0))) @@ -9839,8 +9196,8 @@ class KafkaApisTest extends Logging { // and have a non KafkaPrincipal.ANONYMOUS principal. This test is done before the check // for forwarding because after forwarding the context will have a different context. // We validate the context authenticated failure case in other integration tests. - val context = new RequestContext(header, "1", clientAddress, Optional.empty(), - principal, listenerName, SecurityProtocol.SSL, + val context = new RequestContext(header, "1", InetAddress.getLocalHost, Optional.empty(), + new KafkaPrincipal(KafkaPrincipal.USER_TYPE, "Alice"), listenerName, SecurityProtocol.SSL, ClientInformation.EMPTY, fromPrivilegedListener, Optional.of(kafkaPrincipalSerde)) new RequestChannel.Request(processor = 1, context = context, startTimeNanos = 0, MemoryPool.NONE, buffer, requestMetrics, envelope = None) @@ -9856,13 +9213,13 @@ class KafkaApisTest extends Logging { any() ) val response = capturedResponse.getValue - val readable = MessageUtil.toByteBufferAccessor( + val buffer = MessageUtil.toByteBuffer( response.data, request.context.header.apiVersion ) AbstractResponse.parseResponse( request.context.header.apiKey, - readable, + buffer, request.context.header.apiVersion, ).asInstanceOf[T] } @@ -9877,7 +9234,7 @@ class KafkaApisTest extends Logging { any() ) val response = capturedResponse.getValue - val readable = MessageUtil.toByteBufferAccessor( + val buffer = MessageUtil.toByteBuffer( response.data, request.context.header.apiVersion ) @@ -9893,7 +9250,7 @@ class KafkaApisTest extends Logging { AbstractResponse.parseResponse( request.context.header.apiKey, - readable, + buffer, request.context.header.apiVersion, ).asInstanceOf[T] } @@ -9908,7 +9265,7 @@ class KafkaApisTest extends Logging { val topicRecord = new TopicRecord().setName(topic).setTopicId(topicId) results += topicRecord - val replicas = util.List.of(0.asInstanceOf[Integer]) + val replicas = List(0.asInstanceOf[Integer]).asJava def createPartitionRecord(partition: Int) = new PartitionRecord() .setTopicId(topicId) @@ -9963,7 +9320,7 @@ class KafkaApisTest extends Logging { val data = new AlterReplicaLogDirsRequestData() val dir = new AlterReplicaLogDirsRequestData.AlterReplicaLogDir() .setPath("/foo") - dir.topics().add(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic().setName("t0").setPartitions(util.List.of(0, 1, 2))) + dir.topics().add(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic().setName("t0").setPartitions(asList(0, 1, 2))) data.dirs().add(dir) val alterReplicaLogDirsRequest = new AlterReplicaLogDirsRequest.Builder( data @@ -9995,9 +9352,9 @@ class KafkaApisTest extends Logging { new TopicPartition(tr.topicName, pr.partitionIndex) -> Errors.forCode(pr.errorCode) } }.toMap) - assertEquals(util.Map.of(Errors.NONE, 1, - Errors.LOG_DIR_NOT_FOUND, 1, - Errors.INVALID_TOPIC_EXCEPTION, 1), response.errorCounts) + assertEquals(Map(Errors.NONE -> 1, + Errors.LOG_DIR_NOT_FOUND -> 1, + Errors.INVALID_TOPIC_EXCEPTION -> 1).asJava, response.errorCounts) } @Test @@ -10019,7 +9376,7 @@ class KafkaApisTest extends Logging { topicIds.put(tp.topicPartition.topic, tp.topicId) topicNames.put(tp.topicId, tp.topicPartition.topic) } - FetchResponse.of(Errors.NONE, 100, 100, responseData, List.empty.asJava) + FetchResponse.of(Errors.NONE, 100, 100, responseData) } val throttledPartition = new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("throttledData", 0)) @@ -10045,45 +9402,45 @@ class KafkaApisTest extends Logging { val tp4 = new TopicPartition("invalid;topic", 1) val authorizer: Authorizer = mock(classOf[Authorizer]) - val data = new DescribeProducersRequestData().setTopics(util.List.of( + val data = new DescribeProducersRequestData().setTopics(List( new DescribeProducersRequestData.TopicRequest() .setName(tp1.topic) - .setPartitionIndexes(util.List.of(Int.box(tp1.partition))), + .setPartitionIndexes(List(Int.box(tp1.partition)).asJava), new DescribeProducersRequestData.TopicRequest() .setName(tp2.topic) - .setPartitionIndexes(util.List.of(Int.box(tp2.partition))), + .setPartitionIndexes(List(Int.box(tp2.partition)).asJava), new DescribeProducersRequestData.TopicRequest() .setName(tp3.topic) - .setPartitionIndexes(util.List.of(Int.box(tp3.partition))), + .setPartitionIndexes(List(Int.box(tp3.partition)).asJava), new DescribeProducersRequestData.TopicRequest() .setName(tp4.topic) - .setPartitionIndexes(util.List.of(Int.box(tp4.partition))) - )) + .setPartitionIndexes(List(Int.box(tp4.partition)).asJava) + ).asJava) def buildExpectedActions(topic: String): util.List[Action] = { val pattern = new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) val action = new Action(AclOperation.READ, pattern, 1, true, true) - util.List.of(action) + Collections.singletonList(action) } // Topic `foo` is authorized and present in the metadata addTopicToMetadataCache(tp1.topic, 4) // We will only access the first topic when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions(tp1.topic)))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) + .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) // Topic `bar` is not authorized when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions(tp2.topic)))) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) // Topic `baz` is authorized, but not present in the metadata when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions(tp3.topic)))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) + .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) when(replicaManager.activeProducerState(tp1)) .thenReturn(new DescribeProducersResponseData.PartitionResponse() .setErrorCode(Errors.NONE.code) .setPartitionIndex(tp1.partition) - .setActiveProducers(util.List.of( + .setActiveProducers(List( new DescribeProducersResponseData.ProducerState() .setProducerId(12345L) .setProducerEpoch(15) @@ -10091,7 +9448,7 @@ class KafkaApisTest extends Logging { .setLastTimestamp(time.milliseconds()) .setCurrentTxnStartOffset(-1) .setCoordinatorEpoch(200) - ))) + ).asJava)) val describeProducersRequest = new DescribeProducersRequest.Builder(data).build() val request = buildRequest(describeProducersRequest) @@ -10133,7 +9490,7 @@ class KafkaApisTest extends Logging { def testDescribeTransactions(): Unit = { val authorizer: Authorizer = mock(classOf[Authorizer]) val data = new DescribeTransactionsRequestData() - .setTransactionalIds(util.List.of("foo", "bar")) + .setTransactionalIds(List("foo", "bar").asJava) val describeTransactionsRequest = new DescribeTransactionsRequest.Builder(data).build() val request = buildRequest(describeTransactionsRequest) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), @@ -10142,7 +9499,7 @@ class KafkaApisTest extends Logging { def buildExpectedActions(transactionalId: String): util.List[Action] = { val pattern = new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL) val action = new Action(AclOperation.DESCRIBE, pattern, 1, true, true) - util.List.of(action) + Collections.singletonList(action) } when(txnCoordinator.handleDescribeTransactions("foo")) @@ -10156,10 +9513,10 @@ class KafkaApisTest extends Logging { .setTransactionTimeoutMs(10000)) when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("foo")))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) + .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("bar")))) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleDescribeTransactionsRequest(request) @@ -10184,7 +9541,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) val transactionalId = "foo" val data = new DescribeTransactionsRequestData() - .setTransactionalIds(util.List.of(transactionalId)) + .setTransactionalIds(List(transactionalId).asJava) val describeTransactionsRequest = new DescribeTransactionsRequest.Builder(data).build() val request = buildRequest(describeTransactionsRequest) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), @@ -10197,10 +9554,10 @@ class KafkaApisTest extends Logging { ): Unit = { val pattern = new ResourcePattern(resourceType, transactionalId, PatternType.LITERAL) val action = new Action(AclOperation.DESCRIBE, pattern, 1, true, true) - val actions = util.List.of(action) + val actions = Collections.singletonList(action) when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(actions))) - .thenReturn(util.List.of(result)) + .thenReturn(Seq(result).asJava) } // Principal is authorized to one of the two topics. The second topic should be @@ -10256,7 +9613,7 @@ class KafkaApisTest extends Logging { when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) - when(txnCoordinator.handleListTransactions(Set.empty[Long], Set.empty[String], -1L, null)) + when(txnCoordinator.handleListTransactions(Set.empty[Long], Set.empty[String], -1L)) .thenReturn(new ListTransactionsResponseData() .setErrorCode(Errors.COORDINATOR_LOAD_IN_PROGRESS.code)) kafkaApis = createKafkaApis() @@ -10270,7 +9627,7 @@ class KafkaApisTest extends Logging { @Test def testListTransactionsAuthorization(): Unit = { val authorizer: Authorizer = mock(classOf[Authorizer]) - val data = new ListTransactionsRequestData().setTransactionalIdPattern("my.*") + val data = new ListTransactionsRequestData() val listTransactionsRequest = new ListTransactionsRequest.Builder(data).build() val request = buildRequest(listTransactionsRequest) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), @@ -10278,15 +9635,15 @@ class KafkaApisTest extends Logging { val transactionStates = new util.ArrayList[ListTransactionsResponseData.TransactionState]() transactionStates.add(new ListTransactionsResponseData.TransactionState() - .setTransactionalId("myFoo") + .setTransactionalId("foo") .setProducerId(12345L) .setTransactionState("Ongoing")) transactionStates.add(new ListTransactionsResponseData.TransactionState() - .setTransactionalId("myBar") + .setTransactionalId("bar") .setProducerId(98765) .setTransactionState("PrepareAbort")) - when(txnCoordinator.handleListTransactions(Set.empty[Long], Set.empty[String], -1L, "my.*")) + when(txnCoordinator.handleListTransactions(Set.empty[Long], Set.empty[String], -1L)) .thenReturn(new ListTransactionsResponseData() .setErrorCode(Errors.NONE.code) .setTransactionStates(transactionStates)) @@ -10294,21 +9651,21 @@ class KafkaApisTest extends Logging { def buildExpectedActions(transactionalId: String): util.List[Action] = { val pattern = new ResourcePattern(ResourceType.TRANSACTIONAL_ID, transactionalId, PatternType.LITERAL) val action = new Action(AclOperation.DESCRIBE, pattern, 1, true, true) - util.List.of(action) + Collections.singletonList(action) } - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("myFoo")))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("foo")))) + .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("myBar")))) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActions("bar")))) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis(authorizer = Some(authorizer)) kafkaApis.handleListTransactionsRequest(request) val response = verifyNoThrottling[ListTransactionsResponse](request) assertEquals(1, response.data.transactionStates.size()) val transactionState = response.data.transactionStates.get(0) - assertEquals("myFoo", transactionState.transactionalId) + assertEquals("foo", transactionState.transactionalId) assertEquals(12345L, transactionState.producerId) assertEquals("Ongoing", transactionState.transactionState) } @@ -10316,7 +9673,7 @@ class KafkaApisTest extends Logging { @Test def testEmptyLegacyAlterConfigsRequestWithKRaft(): Unit = { val request = buildRequest(new AlterConfigsRequest(new AlterConfigsRequestData(), 1.toShort)) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) kafkaApis = createKafkaApis() @@ -10329,20 +9686,20 @@ class KafkaApisTest extends Logging { def testInvalidLegacyAlterConfigsRequestWithKRaft(): Unit = { val request = buildRequest(new AlterConfigsRequest(new AlterConfigsRequestData(). setValidateOnly(true). - setResources(new LAlterConfigsResourceCollection(util.List.of( + setResources(new LAlterConfigsResourceCollection(asList( new LAlterConfigsResource(). setResourceName(brokerId.toString). setResourceType(BROKER.id()). - setConfigs(new LAlterableConfigCollection(util.List.of(new LAlterableConfig(). + setConfigs(new LAlterableConfigCollection(asList(new LAlterableConfig(). setName("foo"). setValue(null)).iterator()))).iterator())), 1.toShort)) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) kafkaApis = createKafkaApis() kafkaApis.handleAlterConfigsRequest(request) val response = verifyNoThrottling[AlterConfigsResponse](request) - assertEquals(new AlterConfigsResponseData().setResponses(util.List.of( + assertEquals(new AlterConfigsResponseData().setResponses(asList( new LAlterConfigsResourceResponse(). setErrorCode(Errors.INVALID_REQUEST.code()). setErrorMessage("Null value not supported for : foo"). @@ -10353,13 +9710,8 @@ class KafkaApisTest extends Logging { @Test def testEmptyIncrementalAlterConfigsRequestWithKRaft(): Unit = { - val alterConfigsRequest = new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(), 1.toShort) - assertEquals( - "IncrementalAlterConfigsRequestData(resources=[], validateOnly=false)", - alterConfigsRequest.toString - ) - val request = buildRequest(alterConfigsRequest) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + val request = buildRequest(new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(), 1.toShort)) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) kafkaApis = createKafkaApis() @@ -10370,30 +9722,22 @@ class KafkaApisTest extends Logging { @Test def testLog4jIncrementalAlterConfigsRequestWithKRaft(): Unit = { - val alterConfigsRequest = new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(). + val request = buildRequest(new IncrementalAlterConfigsRequest(new IncrementalAlterConfigsRequestData(). setValidateOnly(true). - setResources(new IAlterConfigsResourceCollection(util.List.of(new IAlterConfigsResource(). + setResources(new IAlterConfigsResourceCollection(asList(new IAlterConfigsResource(). setResourceName(brokerId.toString). setResourceType(BROKER_LOGGER.id()). - setConfigs(new IAlterableConfigCollection(util.List.of(new IAlterableConfig(). - setName(LoggingController.ROOT_LOGGER). - setValue("TRACE")).iterator()))).iterator())), 1.toShort) - assertEquals( - "IncrementalAlterConfigsRequestData(resources=[" + - "AlterConfigsResource(resourceType=" + BROKER_LOGGER.id() + ", " + - "resourceName='"+ brokerId + "', " + - "configs=[AlterableConfig(name='" + LoggingController.ROOT_LOGGER + "', configOperation=0, value='REDACTED')])], " + - "validateOnly=true)", - alterConfigsRequest.toString - ) - val request = buildRequest(alterConfigsRequest) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + setConfigs(new IAlterableConfigCollection(asList(new IAlterableConfig(). + setName(Log4jController.ROOT_LOGGER). + setValue("TRACE")).iterator()))).iterator())), + 1.toShort)) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) when(clientRequestQuotaManager.maybeRecordAndGetThrottleTimeMs(any[RequestChannel.Request](), any[Long])).thenReturn(0) kafkaApis = createKafkaApis() kafkaApis.handleIncrementalAlterConfigsRequest(request) val response = verifyNoThrottling[IncrementalAlterConfigsResponse](request) - assertEquals(new IncrementalAlterConfigsResponseData().setResponses(util.List.of( + assertEquals(new IncrementalAlterConfigsResponseData().setResponses(asList( new IAlterConfigsResourceResponse(). setErrorCode(0.toShort). setErrorMessage(null). @@ -10408,8 +9752,8 @@ class KafkaApisTest extends Logging { val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest).build()) metadataCache = { - val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY) + val cache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) + val delta = new MetadataDelta(MetadataImage.EMPTY); delta.replay(new FeatureLevelRecord() .setName(MetadataVersion.FEATURE_NAME) .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) @@ -10485,7 +9829,7 @@ class KafkaApisTest extends Logging { val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) kafkaApis = createKafkaApis( authorizer = Some(authorizer), featureVersions = Seq(GroupVersion.GV_1) @@ -10506,7 +9850,7 @@ class KafkaApisTest extends Logging { val consumerGroupHeartbeatRequest = new ConsumerGroupHeartbeatRequestData() .setGroupId(groupId) - .setSubscribedTopicNames(util.List.of(fooTopicName, barTopicName, zarTopicName)) + .setSubscribedTopicNames(List(fooTopicName, barTopicName, zarTopicName).asJava) val requestChannelRequest = buildRequest(new ConsumerGroupHeartbeatRequest.Builder(consumerGroupHeartbeatRequest).build()) @@ -10521,9 +9865,9 @@ class KafkaApisTest extends Logging { any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList + actions.asScala.map { action => + acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) + }.asJava } kafkaApis = createKafkaApis( @@ -10536,14 +9880,110 @@ class KafkaApisTest extends Logging { assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, response.data.errorCode) } + @ParameterizedTest + @ValueSource(booleans = Array(true, false)) + def testConsumerGroupDescribe(includeAuthorizedOperations: Boolean): Unit = { + val fooTopicName = "foo" + val barTopicName = "bar" + metadataCache = mock(classOf[KRaftMetadataCache]) + + val groupIds = List("group-id-0", "group-id-1", "group-id-2").asJava + val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() + .setIncludeAuthorizedOperations(includeAuthorizedOperations) + consumerGroupDescribeRequestData.groupIds.addAll(groupIds) + val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) + + val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() + when(groupCoordinator.consumerGroupDescribe( + any[RequestContext], + any[util.List[String]] + )).thenReturn(future) + kafkaApis = createKafkaApis( + featureVersions = Seq(GroupVersion.GV_1) + ) + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + val member0 = new ConsumerGroupDescribeResponseData.Member() + .setMemberId("member0") + .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName)).asJava)) + + val member1 = new ConsumerGroupDescribeResponseData.Member() + .setMemberId("member1") + .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName), + new TopicPartitions().setTopicName(barTopicName)).asJava)) + + val member2 = new ConsumerGroupDescribeResponseData.Member() + .setMemberId("member2") + .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(barTopicName)).asJava)) + .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName)).asJava)) + + future.complete(List( + new DescribedGroup() + .setGroupId(groupIds.get(0)) + .setMembers(List(member0).asJava), + new DescribedGroup() + .setGroupId(groupIds.get(1)) + .setMembers(List(member0, member1).asJava), + new DescribedGroup() + .setGroupId(groupIds.get(2)) + .setMembers(List(member2).asJava) + ).asJava) + + var authorizedOperationsInt = Int.MinValue + if (includeAuthorizedOperations) { + authorizedOperationsInt = Utils.to32BitField( + AclEntry.supportedOperations(ResourceType.GROUP).asScala + .map(_.code.asInstanceOf[JByte]).asJava) + } + + // Can't reuse the above list here because we would not test the implementation in KafkaApis then + val describedGroups = List( + new DescribedGroup() + .setGroupId(groupIds.get(0)) + .setMembers(List(member0).asJava), + new DescribedGroup() + .setGroupId(groupIds.get(1)) + .setMembers(List(member0, member1).asJava), + new DescribedGroup() + .setGroupId(groupIds.get(2)) + .setMembers(List(member2).asJava) + ).map(group => group.setAuthorizedOperations(authorizedOperationsInt)) + val expectedConsumerGroupDescribeResponseData = new ConsumerGroupDescribeResponseData() + .setGroups(describedGroups.asJava) + + val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) + + assertEquals(expectedConsumerGroupDescribeResponseData, response.data) + } + @Test - def testStreamsGroupHeartbeatReturnsUnsupportedVersion(): Unit = { - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") + def testConsumerGroupDescribeReturnsUnsupportedVersion(): Unit = { + val groupId = "group0" + val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() + consumerGroupDescribeRequestData.groupIds.add(groupId) + val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + val errorCode = Errors.UNSUPPORTED_VERSION.code + val expectedDescribedGroup = new DescribedGroup().setGroupId(groupId).setErrorCode(errorCode) + val expectedResponse = new ConsumerGroupDescribeResponseData() + expectedResponse.groups.add(expectedDescribedGroup) metadataCache = { - val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY) + val cache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) + val delta = new MetadataDelta(MetadataImage.EMPTY); delta.replay(new FeatureLevelRecord() .setName(MetadataVersion.FEATURE_NAME) .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) @@ -10553,2871 +9993,467 @@ class KafkaApisTest extends Logging { } kafkaApis = createKafkaApis() kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) - val expectedHeartbeatResponse = new StreamsGroupHeartbeatResponseData() - .setErrorCode(Errors.UNSUPPORTED_VERSION.code) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(expectedHeartbeatResponse, response.data) + assertEquals(expectedResponse, response.data) } @Test - def testStreamsGroupHeartbeatRequest(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - + def testConsumerGroupDescribeAuthorizationFailed(): Unit = { metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") + val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() + consumerGroupDescribeRequestData.groupIds.add("group-id") + val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + val authorizer: Authorizer = mock(classOf[Authorizer]) + when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) - val future = new CompletableFuture[StreamsGroupHeartbeatResult]() - when(groupCoordinator.streamsGroupHeartbeat( - requestChannelRequest.context, - streamsGroupHeartbeatRequest + val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() + when(groupCoordinator.consumerGroupDescribe( + any[RequestContext], + any[util.List[String]] )).thenReturn(future) - kafkaApis = createKafkaApis() + future.complete(List().asJava) + kafkaApis = createKafkaApis( + authorizer = Some(authorizer), + featureVersions = Seq(GroupVersion.GV_1) + ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() - .setMemberId("member") - - future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, util.Map.of())) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(streamsGroupHeartbeatResponse, response.data) + val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) + assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.groups.get(0).errorCode) } @Test - def testStreamsGroupHeartbeatRequestWithAuthorizedTopology(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - + def testConsumerGroupDescribeFutureFailed(): Unit = { metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val groupId = "group" - val fooTopicName = "foo" - val barTopicName = "bar" - val zarTopicName = "zar" - val tarTopicName = "tar" - val booTopicName = "boo" - - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId(groupId).setTopology( - new StreamsGroupHeartbeatRequestData.Topology() - .setEpoch(3) - .setSubtopologies( - util.List.of( - new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology1") - .setSourceTopics(util.List.of(fooTopicName)) - .setRepartitionSinkTopics(util.List.of(barTopicName)) - .setStateChangelogTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(tarTopicName))), - new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology2") - .setSourceTopics(util.List.of(zarTopicName)) - .setRepartitionSourceTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(barTopicName))) - ) - ) - ) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() + consumerGroupDescribeRequestData.groupIds.add("group-id") + val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) - val authorizer: Authorizer = mock(classOf[Authorizer]) - val acls = Map( - groupId -> AuthorizationResult.ALLOWED, - fooTopicName -> AuthorizationResult.ALLOWED, - barTopicName -> AuthorizationResult.ALLOWED, - zarTopicName -> AuthorizationResult.ALLOWED, - tarTopicName -> AuthorizationResult.ALLOWED, - booTopicName -> AuthorizationResult.ALLOWED - ) - when(authorizer.authorize( + val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() + when(groupCoordinator.consumerGroupDescribe( any[RequestContext], - any[util.List[Action]] - )).thenAnswer { invocation => - val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList - } - - val future = new CompletableFuture[StreamsGroupHeartbeatResult]() - when(groupCoordinator.streamsGroupHeartbeat( - requestChannelRequest.context, - streamsGroupHeartbeatRequest + any[util.List[String]] )).thenReturn(future) kafkaApis = createKafkaApis( - authorizer = Some(authorizer) + featureVersions = Seq(GroupVersion.GV_1) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() - .setMemberId("member") - - future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, util.Map.of())) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(streamsGroupHeartbeatResponse, response.data) - } - - @Test - def testStreamsGroupHeartbeatRequestFutureFailed(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") - - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) - - val future = new CompletableFuture[StreamsGroupHeartbeatResult]() - when(groupCoordinator.streamsGroupHeartbeat( - requestChannelRequest.context, - streamsGroupHeartbeatRequest - )).thenReturn(future) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - future.completeExceptionally(Errors.FENCED_MEMBER_EPOCH.exception) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.FENCED_MEMBER_EPOCH.code, response.data.errorCode) - } - - @Test - def testStreamsGroupHeartbeatRequestGroupAuthorizationFailed(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") - - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - kafkaApis = createKafkaApis( - authorizer = Some(authorizer) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.errorCode) + val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) + assertEquals(Errors.FENCED_MEMBER_EPOCH.code, response.data.groups.get(0).errorCode) } @Test - def testStreamsGroupHeartbeatRequestTopicAuthorizationFailed(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val groupId = "group" + def testConsumerGroupDescribeFilterUnauthorizedTopics(): Unit = { val fooTopicName = "foo" val barTopicName = "bar" - val zarTopicName = "zar" - val tarTopicName = "tar" - - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId(groupId).setTopology( - new StreamsGroupHeartbeatRequestData.Topology() - .setEpoch(3) - .setSubtopologies( - util.List.of(new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology") - .setSourceTopics(util.List.of(fooTopicName)) - .setRepartitionSinkTopics(util.List.of(barTopicName)) - .setRepartitionSourceTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(zarTopicName))) - .setStateChangelogTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName(tarTopicName))) - ) - ) - ) + val errorMessage = "The group has described topic(s) that the client is not authorized to describe." + + metadataCache = mock(classOf[KRaftMetadataCache]) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + val groupIds = List("group-id-0", "group-id-1", "group-id-2").asJava + val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() + .setGroupIds(groupIds) + val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) val authorizer: Authorizer = mock(classOf[Authorizer]) val acls = Map( - groupId -> AuthorizationResult.ALLOWED, - fooTopicName -> AuthorizationResult.ALLOWED, - barTopicName -> AuthorizationResult.DENIED, - zarTopicName -> AuthorizationResult.ALLOWED, - tarTopicName -> AuthorizationResult.ALLOWED + groupIds.get(0) -> AuthorizationResult.ALLOWED, + groupIds.get(1) -> AuthorizationResult.ALLOWED, + groupIds.get(2) -> AuthorizationResult.ALLOWED, + fooTopicName -> AuthorizationResult.ALLOWED, + barTopicName -> AuthorizationResult.DENIED, ) when(authorizer.authorize( any[RequestContext], any[util.List[Action]] )).thenAnswer { invocation => val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList + actions.asScala.map { action => + acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED) + }.asJava } + val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() + when(groupCoordinator.consumerGroupDescribe( + any[RequestContext], + any[util.List[String]] + )).thenReturn(future) kafkaApis = createKafkaApis( - authorizer = Some(authorizer) + authorizer = Some(authorizer), + featureVersions = Seq(GroupVersion.GV_1) ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, response.data.errorCode) - } + val member0 = new ConsumerGroupDescribeResponseData.Member() + .setMemberId("member0") + .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName)).asJava)) - @Test - def testStreamsGroupHeartbeatRequestProtocolDisabledViaConfig(): Unit = { - metadataCache = mock(classOf[KRaftMetadataCache]) + val member1 = new ConsumerGroupDescribeResponseData.Member() + .setMemberId("member1") + .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName)).asJava)) + .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName), + new TopicPartitions().setTopicName(barTopicName)).asJava)) - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") + val member2 = new ConsumerGroupDescribeResponseData.Member() + .setMemberId("member2") + .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(barTopicName)).asJava)) + .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() + .setTopicPartitions(List( + new TopicPartitions().setTopicName(fooTopicName)).asJava)) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + future.complete(List( + new DescribedGroup() + .setGroupId(groupIds.get(0)) + .setMembers(List(member0).asJava), + new DescribedGroup() + .setGroupId(groupIds.get(1)) + .setMembers(List(member0, member1).asJava), + new DescribedGroup() + .setGroupId(groupIds.get(2)) + .setMembers(List(member2).asJava) + ).asJava) - kafkaApis = createKafkaApis( - overrideProperties = Map(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG -> "classic,consumer") - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + val expectedConsumerGroupDescribeResponseData = new ConsumerGroupDescribeResponseData() + .setGroups(List( + new DescribedGroup() + .setGroupId(groupIds.get(0)) + .setMembers(List(member0).asJava), + new DescribedGroup() + .setGroupId(groupIds.get(1)) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setErrorMessage(errorMessage), + new DescribedGroup() + .setGroupId(groupIds.get(2)) + .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) + .setErrorMessage(errorMessage) + ).asJava) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.UNSUPPORTED_VERSION.code, response.data.errorCode) + val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) + + assertEquals(expectedConsumerGroupDescribeResponseData, response.data) } @Test - def testStreamsGroupHeartbeatRequestProtocolDisabledViaFeature(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 0.toShort)) + def testGetTelemetrySubscriptions(): Unit = { + val request = buildRequest(new GetTelemetrySubscriptionsRequest.Builder( + new GetTelemetrySubscriptionsRequestData(), true).build()) - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) + when(clientMetricsManager.isTelemetryReceiverConfigured).thenReturn(true) + when(clientMetricsManager.processGetTelemetrySubscriptionRequest(any[GetTelemetrySubscriptionsRequest](), + any[RequestContext]())).thenReturn(new GetTelemetrySubscriptionsResponse( + new GetTelemetrySubscriptionsResponseData())) + + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + + val response = verifyNoThrottling[GetTelemetrySubscriptionsResponse](request) - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") + val expectedResponse = new GetTelemetrySubscriptionsResponseData() + assertEquals(expectedResponse, response.data) + } + + @Test + def testGetTelemetrySubscriptionsWithException(): Unit = { + val request = buildRequest(new GetTelemetrySubscriptionsRequest.Builder( + new GetTelemetrySubscriptionsRequestData(), true).build()) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + when(clientMetricsManager.isTelemetryReceiverConfigured).thenReturn(true) + when(clientMetricsManager.processGetTelemetrySubscriptionRequest(any[GetTelemetrySubscriptionsRequest](), + any[RequestContext]())).thenThrow(new RuntimeException("test")) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + kafkaApis.handle(request, RequestLocal.noCaching) + + val response = verifyNoThrottling[GetTelemetrySubscriptionsResponse](request) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.UNSUPPORTED_VERSION.code, response.data.errorCode) + val expectedResponse = new GetTelemetrySubscriptionsResponseData().setErrorCode(Errors.INVALID_REQUEST.code) + assertEquals(expectedResponse, response.data) } @Test - def testStreamsGroupHeartbeatRequestInvalidTopicNames(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group").setTopology( - new StreamsGroupHeartbeatRequestData.Topology() - .setEpoch(3) - .setSubtopologies( - util.List.of(new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology") - .setSourceTopics(util.List.of("a ")) - .setRepartitionSinkTopics(util.List.of("b?")) - .setRepartitionSourceTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("c!"))) - .setStateChangelogTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("d/"))) - ) - ) - ) + def testPushTelemetry(): Unit = { + val request = buildRequest(new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true).build()) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + when(clientMetricsManager.isTelemetryReceiverConfigured).thenReturn(true) + when(clientMetricsManager.processPushTelemetryRequest(any[PushTelemetryRequest](), any[RequestContext]())) + .thenReturn(new PushTelemetryResponse(new PushTelemetryResponseData())) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[PushTelemetryResponse](request) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.STREAMS_INVALID_TOPOLOGY.code, response.data.errorCode) - assertEquals("Topic names a ,b?,c!,d/ are not valid topic names.", response.data.errorMessage()) + val expectedResponse = new PushTelemetryResponseData().setErrorCode(Errors.NONE.code) + assertEquals(expectedResponse, response.data) } @Test - def testStreamsGroupHeartbeatRequestInternalTopicNames(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group").setTopology( - new StreamsGroupHeartbeatRequestData.Topology() - .setEpoch(3) - .setSubtopologies( - util.List.of(new StreamsGroupHeartbeatRequestData.Subtopology().setSubtopologyId("subtopology") - .setSourceTopics(util.List.of("__consumer_offsets")) - .setRepartitionSinkTopics(util.List.of("__transaction_state")) - .setRepartitionSourceTopics(util.List.of(new StreamsGroupHeartbeatRequestData.TopicInfo().setName("__share_group_state"))) - ) - ) - ) + def testPushTelemetryWithException(): Unit = { + val request = buildRequest(new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true).build()) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + when(clientMetricsManager.isTelemetryReceiverConfigured).thenReturn(true) + when(clientMetricsManager.processPushTelemetryRequest(any[PushTelemetryRequest](), any[RequestContext]())) + .thenThrow(new RuntimeException("test")) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[PushTelemetryResponse](request) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.STREAMS_INVALID_TOPOLOGY.code, response.data.errorCode) - assertEquals("Use of Kafka internal topics __consumer_offsets,__transaction_state,__share_group_state in a Kafka Streams topology is prohibited.", response.data.errorMessage()) + val expectedResponse = new PushTelemetryResponseData().setErrorCode(Errors.INVALID_REQUEST.code) + assertEquals(expectedResponse, response.data) } @Test - def testStreamsGroupHeartbeatRequestWithInternalTopicsToCreate(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + def testListClientMetricsResources(): Unit = { + val request = buildRequest(new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build()) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) + val resources = new mutable.HashSet[String] + resources.add("test1") + resources.add("test2") + when(clientMetricsManager.listClientMetricsResources).thenReturn(resources.asJava) + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) + val expectedResponse = new ListClientMetricsResourcesResponseData().setClientMetricsResources( + resources.map(resource => new ClientMetricsResource().setName(resource)).toBuffer.asJava) + assertEquals(expectedResponse, response.data) + } - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") + @Test + def testListClientMetricsResourcesEmptyResponse(): Unit = { + val request = buildRequest(new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build()) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + val resources = new mutable.HashSet[String] + when(clientMetricsManager.listClientMetricsResources).thenReturn(resources.asJava) + kafkaApis = createKafkaApis() + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) + val expectedResponse = new ListClientMetricsResourcesResponseData() + assertEquals(expectedResponse, response.data) + } - val future = new CompletableFuture[StreamsGroupHeartbeatResult]() - when(groupCoordinator.streamsGroupHeartbeat( - requestChannelRequest.context, - streamsGroupHeartbeatRequest - )).thenReturn(future) + @Test + def testListClientMetricsResourcesWithException(): Unit = { + val request = buildRequest(new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()).build()) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + when(clientMetricsManager.listClientMetricsResources).thenThrow(new RuntimeException("test")) kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val missingTopics = Map("test" -> new CreatableTopic()) - val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() - .setMemberId("member") + kafkaApis.handle(request, RequestLocal.noCaching) + val response = verifyNoThrottling[ListClientMetricsResourcesResponse](request) - future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, missingTopics.asJava)) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(streamsGroupHeartbeatResponse, response.data) - verify(autoTopicCreationManager).createStreamsInternalTopics(any(), any(), anyLong()) + val expectedResponse = new ListClientMetricsResourcesResponseData().setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) + assertEquals(expectedResponse, response.data) } @Test - def testStreamsGroupHeartbeatRequestWithInternalTopicsToCreateMissingCreateACL(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) + def testShareGroupHeartbeatReturnsUnsupportedVersion(): Unit = { + val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) + val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + kafkaApis = createKafkaApis() + kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) + + val expectedHeartbeatResponse = new ShareGroupHeartbeatResponseData() + .setErrorCode(Errors.UNSUPPORTED_VERSION.code) + val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) + assertEquals(expectedHeartbeatResponse, response.data) + } - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") + @Test + def testShareGroupHeartbeatRequest(): Unit = { + val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) + val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) - val future = new CompletableFuture[StreamsGroupHeartbeatResult]() - when(groupCoordinator.streamsGroupHeartbeat( + val future = new CompletableFuture[ShareGroupHeartbeatResponseData]() + when(groupCoordinator.shareGroupHeartbeat( requestChannelRequest.context, - streamsGroupHeartbeatRequest + shareGroupHeartbeatRequest )).thenReturn(future) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], isNotNull[util.List[Action]])).thenAnswer(invocation => { - val actions = invocation.getArgument(1).asInstanceOf[util.List[Action]] - val results: util.List[AuthorizationResult] = new util.ArrayList[AuthorizationResult](actions.size()) - actions.forEach { action => - val result = if (action.resourcePattern.name == "test" && action.operation == AclOperation.CREATE && action.resourcePattern.resourceType == ResourceType.TOPIC) { - AuthorizationResult.DENIED - } else if (action.operation == AclOperation.CREATE && action.resourcePattern.resourceType == ResourceType.CLUSTER) { - AuthorizationResult.DENIED - } else { - AuthorizationResult.ALLOWED - } - results.add(result) - } - results - }) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis( - authorizer = Some(authorizer) + overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - val missingTopics = util.Map.of("test", new CreatableTopic()) - val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() + val shareGroupHeartbeatResponse = new ShareGroupHeartbeatResponseData() .setMemberId("member") - future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, missingTopics)) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.NONE.code, response.data.errorCode()) - assertEquals(null, response.data.errorMessage()) - assertEquals( - java.util.List.of( - new StreamsGroupHeartbeatResponseData.Status() - .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) - .setStatusDetail("Unauthorized to CREATE on topics test.") - ), - response.data.status() - ) + future.complete(shareGroupHeartbeatResponse) + val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) + assertEquals(shareGroupHeartbeatResponse, response.data) } @Test - def testStreamsGroupHeartbeatRequestWithCachedTopicCreationErrors(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val streamsGroupHeartbeatRequest = new StreamsGroupHeartbeatRequestData().setGroupId("group") - val requestChannelRequest = buildRequest(new StreamsGroupHeartbeatRequest.Builder(streamsGroupHeartbeatRequest, true).build()) - - val future = new CompletableFuture[StreamsGroupHeartbeatResult]() - when(groupCoordinator.streamsGroupHeartbeat( - requestChannelRequest.context, - streamsGroupHeartbeatRequest - )).thenReturn(future) + def testShareGroupHeartbeatRequestAuthorizationFailed(): Unit = { + val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - // Mock AutoTopicCreationManager to return cached errors - val mockAutoTopicCreationManager = mock(classOf[AutoTopicCreationManager]) - when(mockAutoTopicCreationManager.getStreamsInternalTopicCreationErrors(ArgumentMatchers.eq(Set("test-topic")), any())) - .thenReturn(Map("test-topic" -> "INVALID_REPLICATION_FACTOR")) - // Mock the createStreamsInternalTopics method to do nothing (simulate topic creation attempt) - doNothing().when(mockAutoTopicCreationManager).createStreamsInternalTopics(any(), any(), anyLong()) + val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) - kafkaApis = createKafkaApis(autoTopicCreationManager = Some(mockAutoTopicCreationManager)) + val authorizer: Authorizer = mock(classOf[Authorizer]) + when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) + kafkaApis = createKafkaApis( + overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), + authorizer = Some(authorizer), + ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - // Group coordinator returns MISSING_INTERNAL_TOPICS status and topics to create - val missingTopics = util.Map.of("test-topic", new CreatableTopic()) - val streamsGroupHeartbeatResponse = new StreamsGroupHeartbeatResponseData() - .setMemberId("member") - .setStatus(util.List.of( - new StreamsGroupHeartbeatResponseData.Status() - .setStatusCode(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code()) - .setStatusDetail("Internal topics are missing: [test-topic]") - )) - - future.complete(new StreamsGroupHeartbeatResult(streamsGroupHeartbeatResponse, missingTopics)) - val response = verifyNoThrottling[StreamsGroupHeartbeatResponse](requestChannelRequest) - - assertEquals(Errors.NONE.code, response.data.errorCode()) - assertEquals(null, response.data.errorMessage()) - - // Verify that the cached error was appended to the existing status detail - assertEquals(1, response.data.status().size()) - val status = response.data.status().get(0) - assertEquals(StreamsGroupHeartbeatResponse.Status.MISSING_INTERNAL_TOPICS.code(), status.statusCode()) - assertTrue(status.statusDetail().contains("Internal topics are missing: [test-topic]")) - assertTrue(status.statusDetail().contains("Creation failed: test-topic (INVALID_REPLICATION_FACTOR)")) - - // Verify that createStreamsInternalTopics was called - verify(mockAutoTopicCreationManager).createStreamsInternalTopics(any(), any(), anyLong()) - verify(mockAutoTopicCreationManager).getStreamsInternalTopicCreationErrors(ArgumentMatchers.eq(Set("test-topic")), any()) + val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) + assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.errorCode) } - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testConsumerGroupDescribe(includeAuthorizedOperations: Boolean): Unit = { - val fooTopicName = "foo" - val barTopicName = "bar" - metadataCache = mock(classOf[KRaftMetadataCache]) + @Test + def testShareGroupHeartbeatRequestFutureFailed(): Unit = { + val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - val groupIds = util.List.of("group-id-0", "group-id-1", "group-id-2") - val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() - .setIncludeAuthorizedOperations(includeAuthorizedOperations) - consumerGroupDescribeRequestData.groupIds.addAll(groupIds) - val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) + val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest, true).build()) - val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() - when(groupCoordinator.consumerGroupDescribe( - any[RequestContext], - any[util.List[String]] + val future = new CompletableFuture[ShareGroupHeartbeatResponseData]() + when(groupCoordinator.shareGroupHeartbeat( + requestChannelRequest.context, + shareGroupHeartbeatRequest )).thenReturn(future) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis( - featureVersions = Seq(GroupVersion.GV_1) + overrideProperties = Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true"), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - val member0 = new ConsumerGroupDescribeResponseData.Member() - .setMemberId("member0") - .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName)))) - .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName)))) - - val member1 = new ConsumerGroupDescribeResponseData.Member() - .setMemberId("member1") - .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName)))) - .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName), - new TopicPartitions().setTopicName(barTopicName)))) - - val member2 = new ConsumerGroupDescribeResponseData.Member() - .setMemberId("member2") - .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(barTopicName)))) - .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName)))) - - future.complete(util.List.of( - new DescribedGroup() - .setGroupId(groupIds.get(0)) - .setMembers(util.List.of(member0)), - new DescribedGroup() - .setGroupId(groupIds.get(1)) - .setMembers(util.List.of(member0, member1)), - new DescribedGroup() - .setGroupId(groupIds.get(2)) - .setMembers(util.List.of(member2)) - )) - - var authorizedOperationsInt = Int.MinValue - if (includeAuthorizedOperations) { - authorizedOperationsInt = Utils.to32BitField( - AclEntry.supportedOperations(ResourceType.GROUP).asScala - .map(_.code.asInstanceOf[JByte]).asJava) - } - - // Can't reuse the above list here because we would not test the implementation in KafkaApis then - val describedGroups = List( - new DescribedGroup() - .setGroupId(groupIds.get(0)) - .setMembers(util.List.of(member0)), - new DescribedGroup() - .setGroupId(groupIds.get(1)) - .setMembers(util.List.of(member0, member1)), - new DescribedGroup() - .setGroupId(groupIds.get(2)) - .setMembers(util.List.of(member2)) - ).map(group => group.setAuthorizedOperations(authorizedOperationsInt)) - val expectedConsumerGroupDescribeResponseData = new ConsumerGroupDescribeResponseData() - .setGroups(describedGroups.asJava) - - val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) - - assertEquals(expectedConsumerGroupDescribeResponseData, response.data) - } - - @Test - def testConsumerGroupDescribeReturnsUnsupportedVersion(): Unit = { - val groupId = "group0" - val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() - consumerGroupDescribeRequestData.groupIds.add(groupId) - val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) - - val errorCode = Errors.UNSUPPORTED_VERSION.code - val expectedDescribedGroup = new DescribedGroup().setGroupId(groupId).setErrorCode(errorCode) - val expectedResponse = new ConsumerGroupDescribeResponseData() - expectedResponse.groups.add(expectedDescribedGroup) - metadataCache = { - val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY) - delta.replay(new FeatureLevelRecord() - .setName(MetadataVersion.FEATURE_NAME) - .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) - ) - cache.setImage(delta.apply(MetadataProvenance.EMPTY)) - cache - } - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) - - assertEquals(expectedResponse, response.data) - } - - @Test - def testConsumerGroupDescribeAuthorizationFailed(): Unit = { - metadataCache = mock(classOf[KRaftMetadataCache]) - - val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() - consumerGroupDescribeRequestData.groupIds.add("group-id") - val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - - val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() - when(groupCoordinator.consumerGroupDescribe( - any[RequestContext], - any[util.List[String]] - )).thenReturn(future) - future.complete(util.List.of) - kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - featureVersions = Seq(GroupVersion.GV_1) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.groups.get(0).errorCode) - } - - @Test - def testConsumerGroupDescribeFutureFailed(): Unit = { - metadataCache = mock(classOf[KRaftMetadataCache]) - - val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() - consumerGroupDescribeRequestData.groupIds.add("group-id") - val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) - - val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() - when(groupCoordinator.consumerGroupDescribe( - any[RequestContext], - any[util.List[String]] - )).thenReturn(future) - kafkaApis = createKafkaApis( - featureVersions = Seq(GroupVersion.GV_1) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - future.completeExceptionally(Errors.FENCED_MEMBER_EPOCH.exception) - val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) - assertEquals(Errors.FENCED_MEMBER_EPOCH.code, response.data.groups.get(0).errorCode) - } - - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testStreamsGroupDescribe(includeAuthorizedOperations: Boolean): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val fooTopicName = "foo" - val barTopicName = "bar" - - val groupIds = util.List.of("group-id-0", "group-id-1", "group-id-2") - val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() - .setIncludeAuthorizedOperations(includeAuthorizedOperations) - streamsGroupDescribeRequestData.groupIds.addAll(groupIds) - val requestChannelRequest = buildRequest(new StreamsGroupDescribeRequest.Builder(streamsGroupDescribeRequestData, true).build()) - - val future = new CompletableFuture[util.List[StreamsGroupDescribeResponseData.DescribedGroup]]() - when(groupCoordinator.streamsGroupDescribe( - any[RequestContext], - any[util.List[String]] - )).thenReturn(future) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val subtopology0 = new StreamsGroupDescribeResponseData.Subtopology() - .setSubtopologyId("subtopology0") - .setSourceTopics(util.List.of(fooTopicName)) - - val subtopology1 = new StreamsGroupDescribeResponseData.Subtopology() - .setSubtopologyId("subtopology1") - .setRepartitionSinkTopics(util.List.of(barTopicName)) - - val subtopology2 = new StreamsGroupDescribeResponseData.Subtopology() - .setSubtopologyId("subtopology2") - .setSourceTopics(util.List.of(fooTopicName)) - .setRepartitionSinkTopics(util.List.of(barTopicName)) - - future.complete(util.List.of( - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(0)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology0))), - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(1)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology1))), - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(2)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology2))) - )) - - var authorizedOperationsInt = Int.MinValue - if (includeAuthorizedOperations) { - authorizedOperationsInt = Utils.to32BitField( - AclEntry.supportedOperations(ResourceType.GROUP).asScala - .map(_.code.asInstanceOf[JByte]).asJava) - } - - // Can't reuse the above list here because we would not test the implementation in KafkaApis then - val describedGroups = List( - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(0)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology0))), - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(1)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology1))), - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(2)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology2))) - ).map(group => group.setAuthorizedOperations(authorizedOperationsInt)) - val expectedStreamsGroupDescribeResponseData = new StreamsGroupDescribeResponseData() - .setGroups(describedGroups.asJava) - - val response = verifyNoThrottling[StreamsGroupDescribeResponse](requestChannelRequest) - - assertEquals(expectedStreamsGroupDescribeResponseData, response.data) - } - - @Test - def testStreamsGroupDescribeReturnsUnsupportedVersion(): Unit = { - val groupId = "group0" - val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() - streamsGroupDescribeRequestData.groupIds.add(groupId) - val requestChannelRequest = buildRequest(new StreamsGroupDescribeRequest.Builder(streamsGroupDescribeRequestData, true).build()) - - val errorCode = Errors.UNSUPPORTED_VERSION.code - val expectedDescribedGroup = new StreamsGroupDescribeResponseData.DescribedGroup().setGroupId(groupId).setErrorCode(errorCode) - val expectedResponse = new StreamsGroupDescribeResponseData() - expectedResponse.groups.add(expectedDescribedGroup) - metadataCache = { - val cache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_1) - val delta = new MetadataDelta(MetadataImage.EMPTY) - delta.replay(new FeatureLevelRecord() - .setName(MetadataVersion.FEATURE_NAME) - .setFeatureLevel(MetadataVersion.MINIMUM_VERSION.featureLevel()) - ) - cache.setImage(delta.apply(MetadataProvenance.EMPTY)) - cache - } - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - val response = verifyNoThrottling[StreamsGroupDescribeResponse](requestChannelRequest) - - assertEquals(expectedResponse, response.data) - } - - @Test - def testStreamsGroupDescribeAuthorizationFailed(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() - streamsGroupDescribeRequestData.groupIds.add("group-id") - val requestChannelRequest = buildRequest(new StreamsGroupDescribeRequest.Builder(streamsGroupDescribeRequestData, true).build()) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - - val future = new CompletableFuture[util.List[StreamsGroupDescribeResponseData.DescribedGroup]]() - when(groupCoordinator.streamsGroupDescribe( - any[RequestContext], - any[util.List[String]] - )).thenReturn(future) - future.complete(util.List.of) - kafkaApis = createKafkaApis( - authorizer = Some(authorizer) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[StreamsGroupDescribeResponse](requestChannelRequest) - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.groups.get(0).errorCode) - } - - @Test - def testStreamsGroupDescribeFutureFailed(): Unit = { - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() - streamsGroupDescribeRequestData.groupIds.add("group-id") - val requestChannelRequest = buildRequest(new StreamsGroupDescribeRequest.Builder(streamsGroupDescribeRequestData, true).build()) - - val future = new CompletableFuture[util.List[StreamsGroupDescribeResponseData.DescribedGroup]]() - when(groupCoordinator.streamsGroupDescribe( - any[RequestContext], - any[util.List[String]] - )).thenReturn(future) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - future.completeExceptionally(Errors.FENCED_MEMBER_EPOCH.exception) - val response = verifyNoThrottling[StreamsGroupDescribeResponse](requestChannelRequest) - assertEquals(Errors.FENCED_MEMBER_EPOCH.code, response.data.groups.get(0).errorCode) - } - - @ParameterizedTest - @ValueSource(booleans = Array(true, false)) - def testStreamsGroupDescribeFilterUnauthorizedTopics(includeAuthorizedOperations: Boolean): Unit = { - val fooTopicName = "foo" - val barTopicName = "bar" - val errorMessage = "The described group uses topics that the client is not authorized to describe." - - val features = mock(classOf[FinalizedFeatures]) - when(features.finalizedFeatures()).thenReturn(util.Map.of(StreamsVersion.FEATURE_NAME, 1.toShort)) - - metadataCache = mock(classOf[KRaftMetadataCache]) - when(metadataCache.features()).thenReturn(features) - - val groupIds = util.List.of("group-id-0", "group-id-1", "group-id-2") - val streamsGroupDescribeRequestData = new StreamsGroupDescribeRequestData() - .setIncludeAuthorizedOperations(includeAuthorizedOperations) - streamsGroupDescribeRequestData.groupIds.addAll(groupIds) - val requestChannelRequest = buildRequest(new StreamsGroupDescribeRequest.Builder(streamsGroupDescribeRequestData, true).build()) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - val acls = Map( - groupIds.get(0) -> AuthorizationResult.ALLOWED, - groupIds.get(1) -> AuthorizationResult.ALLOWED, - groupIds.get(2) -> AuthorizationResult.ALLOWED, - fooTopicName -> AuthorizationResult.ALLOWED, - barTopicName -> AuthorizationResult.DENIED, - ) - when(authorizer.authorize( - any[RequestContext], - any[util.List[Action]] - )).thenAnswer { invocation => - val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList - } - - val future = new CompletableFuture[util.List[StreamsGroupDescribeResponseData.DescribedGroup]]() - when(groupCoordinator.streamsGroupDescribe( - any[RequestContext], - any[util.List[String]] - )).thenReturn(future) - kafkaApis = createKafkaApis( - authorizer = Some(authorizer) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val subtopology0 = new StreamsGroupDescribeResponseData.Subtopology() - .setSubtopologyId("subtopology0") - .setSourceTopics(util.List.of(fooTopicName)) - - val subtopology1 = new StreamsGroupDescribeResponseData.Subtopology() - .setSubtopologyId("subtopology1") - .setRepartitionSinkTopics(util.List.of(barTopicName)) - - val subtopology2 = new StreamsGroupDescribeResponseData.Subtopology() - .setSubtopologyId("subtopology2") - .setSourceTopics(util.List.of(fooTopicName)) - .setRepartitionSinkTopics(util.List.of(barTopicName)) - - future.complete(util.List.of( - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(0)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology0))), - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(1)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology1))), - new StreamsGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(2)) - .setTopology(new StreamsGroupDescribeResponseData.Topology() - .setSubtopologies(util.List.of(subtopology2))) - )) - - val response = verifyNoThrottling[StreamsGroupDescribeResponse](requestChannelRequest) - assertNotNull(response.data) - assertEquals(3, response.data.groups.size) - assertEquals(Errors.NONE.code(), response.data.groups.get(0).errorCode()) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), response.data.groups.get(1).errorCode()) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), response.data.groups.get(2).errorCode()) - assertEquals(errorMessage, response.data.groups.get(1).errorMessage()) - assertEquals(errorMessage, response.data.groups.get(2).errorMessage()) - } - - @Test - def testConsumerGroupDescribeFilterUnauthorizedTopics(): Unit = { - val fooTopicName = "foo" - val barTopicName = "bar" - val errorMessage = "The group has described topic(s) that the client is not authorized to describe." - - metadataCache = mock(classOf[KRaftMetadataCache]) - - val groupIds = util.List.of("group-id-0", "group-id-1", "group-id-2") - val consumerGroupDescribeRequestData = new ConsumerGroupDescribeRequestData() - .setGroupIds(groupIds) - val requestChannelRequest = buildRequest(new ConsumerGroupDescribeRequest.Builder(consumerGroupDescribeRequestData, true).build()) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - val acls = Map( - groupIds.get(0) -> AuthorizationResult.ALLOWED, - groupIds.get(1) -> AuthorizationResult.ALLOWED, - groupIds.get(2) -> AuthorizationResult.ALLOWED, - fooTopicName -> AuthorizationResult.ALLOWED, - barTopicName -> AuthorizationResult.DENIED, - ) - when(authorizer.authorize( - any[RequestContext], - any[util.List[Action]] - )).thenAnswer { invocation => - val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList - } - - val future = new CompletableFuture[util.List[ConsumerGroupDescribeResponseData.DescribedGroup]]() - when(groupCoordinator.consumerGroupDescribe( - any[RequestContext], - any[util.List[String]] - )).thenReturn(future) - kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - featureVersions = Seq(GroupVersion.GV_1) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val member0 = new ConsumerGroupDescribeResponseData.Member() - .setMemberId("member0") - .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName)))) - .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName)))) - - val member1 = new ConsumerGroupDescribeResponseData.Member() - .setMemberId("member1") - .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName)))) - .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName), - new TopicPartitions().setTopicName(barTopicName)))) - - val member2 = new ConsumerGroupDescribeResponseData.Member() - .setMemberId("member2") - .setAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(barTopicName)))) - .setTargetAssignment(new ConsumerGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new TopicPartitions().setTopicName(fooTopicName)))) - - future.complete(util.List.of( - new DescribedGroup() - .setGroupId(groupIds.get(0)) - .setMembers(util.List.of(member0)), - new DescribedGroup() - .setGroupId(groupIds.get(1)) - .setMembers(util.List.of(member0, member1)), - new DescribedGroup() - .setGroupId(groupIds.get(2)) - .setMembers(util.List.of(member2)) - )) - - val expectedConsumerGroupDescribeResponseData = new ConsumerGroupDescribeResponseData() - .setGroups(util.List.of( - new DescribedGroup() - .setGroupId(groupIds.get(0)) - .setMembers(util.List.of(member0)), - new DescribedGroup() - .setGroupId(groupIds.get(1)) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage(errorMessage), - new DescribedGroup() - .setGroupId(groupIds.get(2)) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage(errorMessage) - )) - - val response = verifyNoThrottling[ConsumerGroupDescribeResponse](requestChannelRequest) - - assertEquals(expectedConsumerGroupDescribeResponseData, response.data) - } - - @Test - def testGetTelemetrySubscriptions(): Unit = { - val request = buildRequest(new GetTelemetrySubscriptionsRequest.Builder( - new GetTelemetrySubscriptionsRequestData(), true).build()) - - when(clientMetricsManager.isTelemetryReceiverConfigured).thenReturn(true) - when(clientMetricsManager.processGetTelemetrySubscriptionRequest(any[GetTelemetrySubscriptionsRequest](), - any[RequestContext]())).thenReturn(new GetTelemetrySubscriptionsResponse( - new GetTelemetrySubscriptionsResponseData())) - - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - - val response = verifyNoThrottling[GetTelemetrySubscriptionsResponse](request) - - val expectedResponse = new GetTelemetrySubscriptionsResponseData() - assertEquals(expectedResponse, response.data) - } - - @Test - def testGetTelemetrySubscriptionsWithException(): Unit = { - val request = buildRequest(new GetTelemetrySubscriptionsRequest.Builder( - new GetTelemetrySubscriptionsRequestData(), true).build()) - - when(clientMetricsManager.isTelemetryReceiverConfigured).thenReturn(true) - when(clientMetricsManager.processGetTelemetrySubscriptionRequest(any[GetTelemetrySubscriptionsRequest](), - any[RequestContext]())).thenThrow(new RuntimeException("test")) - - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - - val response = verifyNoThrottling[GetTelemetrySubscriptionsResponse](request) - - val expectedResponse = new GetTelemetrySubscriptionsResponseData().setErrorCode(Errors.INVALID_REQUEST.code) - assertEquals(expectedResponse, response.data) - } - - @Test - def testPushTelemetry(): Unit = { - val request = buildRequest(new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true).build()) - - when(clientMetricsManager.isTelemetryReceiverConfigured).thenReturn(true) - when(clientMetricsManager.processPushTelemetryRequest(any[PushTelemetryRequest](), any[RequestContext]())) - .thenReturn(new PushTelemetryResponse(new PushTelemetryResponseData())) - - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[PushTelemetryResponse](request) - - val expectedResponse = new PushTelemetryResponseData().setErrorCode(Errors.NONE.code) - assertEquals(expectedResponse, response.data) - } - - @Test - def testPushTelemetryWithException(): Unit = { - val request = buildRequest(new PushTelemetryRequest.Builder(new PushTelemetryRequestData(), true).build()) - - when(clientMetricsManager.isTelemetryReceiverConfigured).thenReturn(true) - when(clientMetricsManager.processPushTelemetryRequest(any[PushTelemetryRequest](), any[RequestContext]())) - .thenThrow(new RuntimeException("test")) - - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[PushTelemetryResponse](request) - - val expectedResponse = new PushTelemetryResponseData().setErrorCode(Errors.INVALID_REQUEST.code) - assertEquals(expectedResponse, response.data) - } - - @Test - def testListConfigResourcesV0(): Unit = { - val requestMetrics = new RequestChannelMetrics(util.Set.of(ApiKeys.LIST_CONFIG_RESOURCES)) - try { - val request = buildRequest(new ListConfigResourcesRequest.Builder( - new ListConfigResourcesRequestData().setResourceTypes(util.List.of(ConfigResource.Type.CLIENT_METRICS.id))).build(0), - requestMetrics = requestMetrics) - metadataCache = mock(classOf[KRaftMetadataCache]) - - val resources = util.Set.of("client-metric1", "client-metric2") - when(clientMetricsManager.listClientMetricsResources).thenReturn(resources) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottlingAndUpdateMetrics[ListConfigResourcesResponse](request) - val expectedResponseData = new ListConfigResourcesResponseData() - .setConfigResources( - resources.stream.map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource) - ).collect(util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) - assertEquals(expectedResponseData, response.data) - - verify(metadataCache, never).getAllTopics - verify(groupConfigManager, never).groupIds - verify(metadataCache, never).getBrokerNodes(any) - assertTrue(requestMetrics.apply(ApiKeys.LIST_CONFIG_RESOURCES.name).requestQueueTimeHist.count > 0) - assertTrue(requestMetrics.apply(RequestMetrics.LIST_CLIENT_METRICS_RESOURCES_METRIC_NAME).requestQueueTimeHist.count > 0) - } finally { - requestMetrics.close() - } - } - - @Test - def testListConfigResourcesV1WithEmptyResourceTypes(): Unit = { - val requestMetrics = new RequestChannelMetrics(util.Set.of(ApiKeys.LIST_CONFIG_RESOURCES)) - try { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()).build(1), - requestMetrics = requestMetrics) - metadataCache = mock(classOf[KRaftMetadataCache]) - - val clientMetrics = util.Set.of("client-metric1", "client-metric2") - val topics = util.Set.of("topic1", "topic2") - val groupIds = util.List.of("group1", "group2") - val nodeIds = util.List.of(1, 2) - when(clientMetricsManager.listClientMetricsResources).thenReturn(clientMetrics) - when(metadataCache.getAllTopics).thenReturn(topics) - when(groupConfigManager.groupIds).thenReturn(groupIds) - when(metadataCache.getBrokerNodes(any())).thenReturn( - nodeIds.stream().map(id => new Node(id, "localhost", 1234)).collect(java.util.stream.Collectors.toList())) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottlingAndUpdateMetrics[ListConfigResourcesResponse](request) - val expectedResponseData = new ListConfigResourcesResponseData() - .setConfigResources( - util.stream.Stream.of( - groupIds.stream().map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.GROUP.id) - ).toList, - clientMetrics.stream.map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.CLIENT_METRICS.id) - ).toList, - nodeIds.stream().map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource.toString).setResourceType(ConfigResource.Type.BROKER_LOGGER.id) - ).toList, - nodeIds.stream().map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource.toString).setResourceType(ConfigResource.Type.BROKER.id) - ).toList, - topics.stream().map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.TOPIC.id) - ).toList - ).flatMap(s => s.stream).collect(util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) - assertEquals(expectedResponseData, response.data) - assertTrue(requestMetrics.apply(ApiKeys.LIST_CONFIG_RESOURCES.name).requestQueueTimeHist.count > 0) - assertEquals(0, requestMetrics.apply(RequestMetrics.LIST_CLIENT_METRICS_RESOURCES_METRIC_NAME).requestQueueTimeHist.count) - } finally { - requestMetrics.close() - } - } - - @Test - def testListConfigResourcesV1WithGroup(): Unit = { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() - .setResourceTypes(util.List.of(ConfigResource.Type.GROUP.id))).build(1)) - metadataCache = mock(classOf[KRaftMetadataCache]) - - val groupIds = util.List.of("group1", "group2") - when(groupConfigManager.groupIds).thenReturn(groupIds) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListConfigResourcesResponse](request) - val expectedResponseData = new ListConfigResourcesResponseData() - .setConfigResources( - groupIds.stream().map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.GROUP.id) - ).collect(util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) - assertEquals(expectedResponseData, response.data) - - verify(metadataCache, never).getAllTopics - verify(clientMetricsManager, never).listClientMetricsResources - verify(metadataCache, never).getBrokerNodes(any) - } - - @Test - def testListConfigResourcesV1WithClientMetrics(): Unit = { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() - .setResourceTypes(util.List.of(ConfigResource.Type.CLIENT_METRICS.id))).build(1)) - metadataCache = mock(classOf[KRaftMetadataCache]) - - val clientMetrics = util.Set.of("client-metric1", "client-metric2") - when(clientMetricsManager.listClientMetricsResources).thenReturn(clientMetrics) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListConfigResourcesResponse](request) - val expectedResponseData = new ListConfigResourcesResponseData() - .setConfigResources( - clientMetrics.stream.map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.CLIENT_METRICS.id) - ).collect(util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) - assertEquals(expectedResponseData, response.data) - - verify(metadataCache, never).getAllTopics - verify(groupConfigManager, never).groupIds - verify(metadataCache, never).getBrokerNodes(any) - } - - @Test - def testListConfigResourcesV1WithBrokerLogger(): Unit = { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() - .setResourceTypes(util.List.of(ConfigResource.Type.BROKER_LOGGER.id))).build(1)) - metadataCache = mock(classOf[KRaftMetadataCache]) - - val nodeIds = util.List.of(1, 2) - when(metadataCache.getBrokerNodes(any())).thenReturn( - nodeIds.stream().map(id => new Node(id, "localhost", 1234)).collect(java.util.stream.Collectors.toList())) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListConfigResourcesResponse](request) - val expectedResponseData = new ListConfigResourcesResponseData() - .setConfigResources( - nodeIds.stream().map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource.toString).setResourceType(ConfigResource.Type.BROKER_LOGGER.id) - ).collect(java.util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) - assertEquals(expectedResponseData, response.data) - - verify(metadataCache, never).getAllTopics - verify(groupConfigManager, never).groupIds - verify(clientMetricsManager, never).listClientMetricsResources - } - - @Test - def testListConfigResourcesV1WithBroker(): Unit = { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() - .setResourceTypes(util.List.of(ConfigResource.Type.BROKER.id))).build(1)) - metadataCache = mock(classOf[KRaftMetadataCache]) - - val nodeIds = util.List.of(1, 2) - when(metadataCache.getBrokerNodes(any())).thenReturn( - nodeIds.stream().map(id => new Node(id, "localhost", 1234)).collect(java.util.stream.Collectors.toList())) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListConfigResourcesResponse](request) - val expectedResponseData = new ListConfigResourcesResponseData() - .setConfigResources( - nodeIds.stream().map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource.toString).setResourceType(ConfigResource.Type.BROKER.id) - ).collect(java.util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) - assertEquals(expectedResponseData, response.data) - - verify(metadataCache, never).getAllTopics - verify(groupConfigManager, never).groupIds - verify(clientMetricsManager, never).listClientMetricsResources - } - - @Test - def testListConfigResourcesV1WithTopic(): Unit = { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() - .setResourceTypes(util.List.of(ConfigResource.Type.TOPIC.id))).build(1)) - metadataCache = mock(classOf[KRaftMetadataCache]) - - val topics = util.Set.of("topic1", "topic2") - when(metadataCache.getAllTopics).thenReturn(topics) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListConfigResourcesResponse](request) - val expectedResponseData = new ListConfigResourcesResponseData() - .setConfigResources( - topics.stream().map(resource => - new ListConfigResourcesResponseData.ConfigResource().setResourceName(resource).setResourceType(ConfigResource.Type.TOPIC.id) - ).collect(java.util.stream.Collectors.toList[ListConfigResourcesResponseData.ConfigResource])) - assertEquals(expectedResponseData, response.data) - - verify(groupConfigManager, never).groupIds - verify(clientMetricsManager, never).listClientMetricsResources - verify(metadataCache, never).getBrokerNodes(any) - } - - @Test - def testListConfigResourcesEmptyResponse(): Unit = { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()).build()) - metadataCache = mock(classOf[KRaftMetadataCache]) - - when(clientMetricsManager.listClientMetricsResources).thenReturn(util.Set.of) - when(metadataCache.getAllTopics).thenReturn(util.Set.of) - when(groupConfigManager.groupIds).thenReturn(util.List.of) - when(metadataCache.getBrokerNodes(any())).thenReturn(util.List.of) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListConfigResourcesResponse](request) - val expectedResponse = new ListConfigResourcesResponseData() - assertEquals(expectedResponse, response.data) - } - - @Test - def testListConfigResourcesV1WithUnknown(): Unit = { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData() - .setResourceTypes(util.List.of(ConfigResource.Type.UNKNOWN.id))).build(1)) - metadataCache = mock(classOf[KRaftMetadataCache]) - - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListConfigResourcesResponse](request) - assertEquals(Errors.UNSUPPORTED_VERSION.code(), response.data.errorCode()) - - verify(metadataCache, never).getAllTopics - verify(groupConfigManager, never).groupIds - verify(clientMetricsManager, never).listClientMetricsResources - verify(metadataCache, never).getBrokerNodes(any) - } - - @Test - def testListConfigResourcesWithException(): Unit = { - val request = buildRequest(new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()).build()) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - - when(clientMetricsManager.listClientMetricsResources).thenThrow(new RuntimeException("test")) - kafkaApis = createKafkaApis() - kafkaApis.handle(request, RequestLocal.noCaching) - val response = verifyNoThrottling[ListConfigResourcesResponse](request) - - val expectedResponse = new ListConfigResourcesResponseData().setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code) - assertEquals(expectedResponse, response.data) - } - - @Test - def testShareGroupHeartbeatReturnsUnsupportedVersion(): Unit = { - val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) - metadataCache = mock(classOf[KRaftMetadataCache]) - kafkaApis = createKafkaApis( - featureVersions = Seq(ShareVersion.SV_0) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val expectedHeartbeatResponse = new ShareGroupHeartbeatResponseData() - .setErrorCode(Errors.UNSUPPORTED_VERSION.code) - val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) - assertEquals(expectedHeartbeatResponse, response.data) - } - - @Test - def testShareGroupHeartbeatRequest(): Unit = { - val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) - - val future = new CompletableFuture[ShareGroupHeartbeatResponseData]() - when(groupCoordinator.shareGroupHeartbeat( - requestChannelRequest.context, - shareGroupHeartbeatRequest - )).thenReturn(future) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val shareGroupHeartbeatResponse = new ShareGroupHeartbeatResponseData() - .setMemberId("member") - - future.complete(shareGroupHeartbeatResponse) - val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) - assertEquals(shareGroupHeartbeatResponse, response.data) - } - - @Test - def testShareGroupHeartbeatRequestGroupAuthorizationFailed(): Unit = { - val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.errorCode) - } - - @Test - def testShareGroupHeartbeatRequestTopicAuthorizationFailed(): Unit = { - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - val groupId = "group" - val fooTopicName = "foo" - val barTopicName = "bar" - val zarTopicName = "zar" - - val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData() - .setGroupId(groupId) - .setSubscribedTopicNames(util.List.of(fooTopicName, barTopicName, zarTopicName)) - - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - val acls = Map( - groupId -> AuthorizationResult.ALLOWED, - fooTopicName -> AuthorizationResult.ALLOWED, - barTopicName -> AuthorizationResult.DENIED, - ) - when(authorizer.authorize( - any[RequestContext], - any[util.List[Action]] - )).thenAnswer { invocation => - val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList - } - - kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code, response.data.errorCode) - } - - @Test - def testShareGroupHeartbeatRequestFutureFailed(): Unit = { - val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequestData().setGroupId("group") - - val requestChannelRequest = buildRequest(new ShareGroupHeartbeatRequest.Builder(shareGroupHeartbeatRequest).build()) - - val future = new CompletableFuture[ShareGroupHeartbeatResponseData]() - when(groupCoordinator.shareGroupHeartbeat( - requestChannelRequest.context, - shareGroupHeartbeatRequest - )).thenReturn(future) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - future.completeExceptionally(Errors.FENCED_MEMBER_EPOCH.exception) - val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) - assertEquals(Errors.FENCED_MEMBER_EPOCH.code, response.data.errorCode) - } - - @Test - def testShareGroupDescribeSuccess(): Unit = { - val fooTopicName = "foo" - val barTopicName = "bar" - - val groupIds = util.List.of("share-group-id-0", "share-group-id-1", "share-group_id-2") - - val member0 = new ShareGroupDescribeResponseData.Member() - .setMemberId("member0") - .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName)))) - - val member1 = new ShareGroupDescribeResponseData.Member() - .setMemberId("member1") - .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName), - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)))) - - val member2 = new ShareGroupDescribeResponseData.Member() - .setMemberId("member2") - .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)))) - - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of( - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(0)).setMembers(util.List.of(member0)), - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)).setMembers(util.List.of(member1)), - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(2)).setMembers(util.List.of(member2)) - ) - getShareGroupDescribeResponse(groupIds, enableShareGroups = true, verifyNoErr = true, null, describedGroups) - } - - @Test - def testShareGroupDescribeReturnsUnsupportedVersion(): Unit = { - val groupIds = util.List.of("share-group-id-0", "share-group-id-1") - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of( - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(0)), - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)) - ) - val response = getShareGroupDescribeResponse(groupIds, enableShareGroups = false, verifyNoErr = false, null, describedGroups) - assertNotNull(response.data) - assertEquals(2, response.data.groups.size) - response.data.groups.forEach(group => assertEquals(Errors.UNSUPPORTED_VERSION.code(), group.errorCode())) - } - - @Test - def testShareGroupDescribeRequestAuthorizationFailed(): Unit = { - val groupIds = util.List.of("share-group-id-0", "share-group-id-1") - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - val response = getShareGroupDescribeResponse(groupIds, enableShareGroups = true, verifyNoErr = false, authorizer, describedGroups) - assertNotNull(response.data) - assertEquals(2, response.data.groups.size) - response.data.groups.forEach(group => assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code(), group.errorCode())) - } - - @Test - def testShareGroupDescribeRequestAuthorizationFailedForOneGroup(): Unit = { - val groupIds = util.List.of("share-group-id-fail-0", "share-group-id-1") - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of( - new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)) - ) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - - val response = getShareGroupDescribeResponse(groupIds, enableShareGroups = true, verifyNoErr = false, authorizer, describedGroups) - - assertNotNull(response.data) - assertEquals(2, response.data.groups.size) - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code(), response.data.groups.get(0).errorCode()) - assertEquals(Errors.NONE.code(), response.data.groups.get(1).errorCode()) - } - - @Test - def testShareGroupDescribeFilterUnauthorizedTopics(): Unit = { - val fooTopicName = "foo" - val barTopicName = "bar" - val errorMessage = "The group has described topic(s) that the client is not authorized to describe." - - val groupIds = util.List.of("share-group-id-0", "share-group-id-1", "share-group_id-2") - - val authorizer: Authorizer = mock(classOf[Authorizer]) - val acls = Map( - groupIds.get(0) -> AuthorizationResult.ALLOWED, - groupIds.get(1) -> AuthorizationResult.ALLOWED, - groupIds.get(2) -> AuthorizationResult.ALLOWED, - fooTopicName -> AuthorizationResult.ALLOWED, - barTopicName -> AuthorizationResult.DENIED, - ) - when(authorizer.authorize( - any[RequestContext], - any[util.List[Action]] - )).thenAnswer { invocation => - val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList - } - val member0 = new ShareGroupDescribeResponseData.Member() - .setMemberId("member0") - .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName)))) - - val member1 = new ShareGroupDescribeResponseData.Member() - .setMemberId("member1") - .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(fooTopicName), - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)))) - - val member2 = new ShareGroupDescribeResponseData.Member() - .setMemberId("member2") - .setAssignment(new ShareGroupDescribeResponseData.Assignment() - .setTopicPartitions(util.List.of( - new ShareGroupDescribeResponseData.TopicPartitions().setTopicName(barTopicName)))) - - val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = util.List.of( - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(0)) - .setMembers(util.List.of(member0)), - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(1)) - .setMembers(util.List.of(member1)), - new ShareGroupDescribeResponseData.DescribedGroup() - .setGroupId(groupIds.get(2)) - .setMembers(util.List.of(member2))) - - val response = getShareGroupDescribeResponse(groupIds, enableShareGroups = true, verifyNoErr = false, authorizer, describedGroups) - - assertNotNull(response.data) - assertEquals(3, response.data.groups.size) - assertEquals(Errors.NONE.code(), response.data.groups.get(0).errorCode()) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), response.data.groups.get(1).errorCode()) - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), response.data.groups.get(2).errorCode()) - assertEquals(errorMessage, response.data.groups.get(1).errorMessage()) - assertEquals(errorMessage, response.data.groups.get(2).errorMessage()) - } - - @Test - def testReadShareGroupStateSuccess(): Unit = { - val topicId = Uuid.randomUuid() - val readRequestData = new ReadShareGroupStateRequestData() - .setGroupId("group1") - .setTopics(util.List.of( - new ReadShareGroupStateRequestData.ReadStateData() - .setTopicId(topicId) - .setPartitions(util.List.of( - new ReadShareGroupStateRequestData.PartitionData() - .setPartition(1) - .setLeaderEpoch(1) - )) - )) - - val readStateResultData: util.List[ReadShareGroupStateResponseData.ReadStateResult] = util.List.of( - new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicId) - .setPartitions(util.List.of( - new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(1) - .setErrorCode(Errors.NONE.code()) - .setErrorMessage(null) - .setStateEpoch(1) - .setStartOffset(10) - .setStateBatches(util.List.of( - new ReadShareGroupStateResponseData.StateBatch() - .setFirstOffset(11) - .setLastOffset(15) - .setDeliveryState(0) - .setDeliveryCount(1) - )) - )) - ) - - val response = getReadShareGroupStateResponse( - readRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, - verifyNoErr = true, - null, - readStateResultData - ) - - assertNotNull(response.data) - assertEquals(1, response.data.results.size) - } - - @Test - def testReadShareGroupStateAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid() - val readRequestData = new ReadShareGroupStateRequestData() - .setGroupId("group1") - .setTopics(util.List.of( - new ReadShareGroupStateRequestData.ReadStateData() - .setTopicId(topicId) - .setPartitions(util.List.of( - new ReadShareGroupStateRequestData.PartitionData() - .setPartition(1) - .setLeaderEpoch(1) - )) - )) - - val readStateResultData: util.List[ReadShareGroupStateResponseData.ReadStateResult] = util.List.of( - new ReadShareGroupStateResponseData.ReadStateResult() - .setTopicId(topicId) - .setPartitions(util.List.of( - new ReadShareGroupStateResponseData.PartitionResult() - .setPartition(1) - .setErrorCode(Errors.NONE.code()) - .setErrorMessage(null) - .setStateEpoch(1) - .setStartOffset(10) - .setStateBatches(util.List.of( - new ReadShareGroupStateResponseData.StateBatch() - .setFirstOffset(11) - .setLastOffset(15) - .setDeliveryState(0) - .setDeliveryCount(1) - )) - )) - ) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - - val response = getReadShareGroupStateResponse( - readRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, - verifyNoErr = false, - authorizer, - readStateResultData - ) - - assertNotNull(response.data) - assertEquals(1, response.data.results.size) - response.data.results.forEach(readResult => { - assertEquals(1, readResult.partitions.size) - assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED.code(), readResult.partitions.get(0).errorCode()) - }) - } - - @Test - def testReadShareGroupStateSummarySuccess(): Unit = { - val topicId = Uuid.randomUuid() - val readSummaryRequestData = new ReadShareGroupStateSummaryRequestData() - .setGroupId("group1") - .setTopics(util.List.of( - new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() - .setTopicId(topicId) - .setPartitions(util.List.of( - new ReadShareGroupStateSummaryRequestData.PartitionData() - .setPartition(1) - .setLeaderEpoch(1) - )) - )) - - val readStateSummaryResultData: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult] = util.List.of( - new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() - .setTopicId(topicId) - .setPartitions(util.List.of( - new ReadShareGroupStateSummaryResponseData.PartitionResult() - .setPartition(1) - .setErrorCode(Errors.NONE.code()) - .setErrorMessage(null) - .setStateEpoch(1) - .setStartOffset(10) - )) - ) - - val response = getReadShareGroupStateSummaryResponse( - readSummaryRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, - verifyNoErr = true, - null, - readStateSummaryResultData - ) - - assertNotNull(response.data) - assertEquals(1, response.data.results.size) - } - - @Test - def testReadShareGroupStateSummaryAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid() - val readSummaryRequestData = new ReadShareGroupStateSummaryRequestData() - .setGroupId("group1") - .setTopics(util.List.of( - new ReadShareGroupStateSummaryRequestData.ReadStateSummaryData() - .setTopicId(topicId) - .setPartitions(util.List.of( - new ReadShareGroupStateSummaryRequestData.PartitionData() - .setPartition(1) - .setLeaderEpoch(1) - )) - )) - - val readStateSummaryResultData: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult] = util.List.of( - new ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult() - .setTopicId(topicId) - .setPartitions(util.List.of( - new ReadShareGroupStateSummaryResponseData.PartitionResult() - .setPartition(1) - .setErrorCode(Errors.NONE.code()) - .setErrorMessage(null) - .setStateEpoch(1) - .setStartOffset(10) - )) - ) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) - - val response = getReadShareGroupStateSummaryResponse( - readSummaryRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, - verifyNoErr = false, - authorizer, - readStateSummaryResultData - ) - - assertNotNull(response.data) - assertEquals(1, response.data.results.size) - response.data.results.forEach(readResult => { - assertEquals(1, readResult.partitions.size) - assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED.code(), readResult.partitions.get(0).errorCode()) - }) - } - - @Test - def testDescribeShareGroupOffsetsReturnsUnsupportedVersion(): Unit = { - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData().setGroups( - util.List.of(new DescribeShareGroupOffsetsRequestGroup().setGroupId("group").setTopics( - util.List.of(new DescribeShareGroupOffsetsRequestTopic().setTopicName("topic-1").setPartitions(util.List.of(1))) - )) - ) - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build()) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - response.data.groups.forEach(group => group.topics.forEach(topic => topic.partitions.forEach(partition => assertEquals(Errors.UNSUPPORTED_VERSION.code, partition.errorCode)))) - } - - @Test - def testDescribeShareGroupOffsetsRequestGroupAuthorizationFailed(): Unit = { - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData().setGroups( - util.List.of(new DescribeShareGroupOffsetsRequestGroup().setGroupId("group").setTopics( - util.List.of(new DescribeShareGroupOffsetsRequestTopic().setTopicName("topic-1").setPartitions(util.List.of(1))) - )) - ) - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - response.data.groups.forEach( - group => group.topics.forEach( - topic => topic.partitions.forEach( - partition => assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, partition.errorCode) - ) - ) - ) - } - - @Test - def testDescribeShareGroupAllOffsetsRequestGroupAuthorizationFailed(): Unit = { - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData().setGroups( - util.List.of(new DescribeShareGroupOffsetsRequestGroup().setGroupId("group").setTopics(null)) - ) - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - response.data.groups.forEach( - group => group.topics.forEach( - topic => topic.partitions.forEach( - partition => assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, partition.errorCode) - ) - ) - ) - } - - @Test - def testDescribeShareGroupOffsetsRequestSuccess(): Unit = { - val topicName1 = "topic-1" - val topicId1 = Uuid.randomUuid - val topicName2 = "topic-2" - val topicId2 = Uuid.randomUuid - val topicName3 = "topic-3" - val topicId3 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 1, topicId = topicId1) - addTopicToMetadataCache(topicName2, 1, topicId = topicId2) - addTopicToMetadataCache(topicName3, 1, topicId = topicId3) - - val describeShareGroupOffsetsRequestGroup1 = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group1").setTopics( - util.List.of( - new DescribeShareGroupOffsetsRequestTopic().setTopicName(topicName1).setPartitions(util.List.of(1, 2, 3)), - new DescribeShareGroupOffsetsRequestTopic().setTopicName(topicName2).setPartitions(util.List.of(10, 20)), - ) - ) - - val describeShareGroupOffsetsRequestGroup2 = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group2").setTopics( - util.List.of( - new DescribeShareGroupOffsetsRequestTopic().setTopicName(topicName3).setPartitions(util.List.of(0)), - ) - ) - - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData() - .setGroups(util.List.of(describeShareGroupOffsetsRequestGroup1, describeShareGroupOffsetsRequestGroup2)) - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) - - val futureGroup1 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupOffsets( - requestChannelRequest.context, - describeShareGroupOffsetsRequestGroup1 - )).thenReturn(futureGroup1) - val futureGroup2 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupOffsets( - requestChannelRequest.context, - describeShareGroupOffsetsRequestGroup2 - )).thenReturn(futureGroup2) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val describeShareGroupOffsetsResponseGroup1 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group1") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName1) - .setTopicId(topicId1) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(2) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(3) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )), - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName2) - .setTopicId(topicId2) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(10) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(20) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )) - )) - - val describeShareGroupOffsetsResponseGroup2 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group2") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName3) - .setTopicId(topicId3) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )) - )) - - val describeShareGroupOffsetsResponse = new DescribeShareGroupOffsetsResponseData() - .setGroups(util.List.of(describeShareGroupOffsetsResponseGroup1, describeShareGroupOffsetsResponseGroup2)) - - futureGroup1.complete(describeShareGroupOffsetsResponseGroup1) - futureGroup2.complete(describeShareGroupOffsetsResponseGroup2) - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(describeShareGroupOffsetsResponse, response.data) - } - - @Test - def testDescribeShareGroupOffsetsRequestTopicAuthorizationFailed(): Unit = { - val topicName1 = "topic-1" - val topicId1 = Uuid.randomUuid - val topicName2 = "topic-2" - val topicId2 = Uuid.randomUuid - val topicName3 = "topic-3" - val topicId3 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 1, topicId = topicId1) - addTopicToMetadataCache(topicName2, 1, topicId = topicId2) - addTopicToMetadataCache(topicName3, 1, topicId = topicId3) - - val describeShareGroupOffsetsRequestGroup1 = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group1").setTopics( - util.List.of( - new DescribeShareGroupOffsetsRequestTopic().setTopicName(topicName1).setPartitions(util.List.of(1, 2, 3)), - new DescribeShareGroupOffsetsRequestTopic().setTopicName(topicName2).setPartitions(util.List.of(10, 20)), - ) - ) - - val describeShareGroupOffsetsRequestGroup2 = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group2").setTopics( - util.List.of( - new DescribeShareGroupOffsetsRequestTopic().setTopicName(topicName3).setPartitions(util.List.of(0)), - ) - ) - - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData() - .setGroups(util.List.of(describeShareGroupOffsetsRequestGroup1, describeShareGroupOffsetsRequestGroup2)) - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) - - // The group coordinator will only be asked for information about topics which are authorized - val futureGroup1 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupOffsets( - requestChannelRequest.context, - new DescribeShareGroupOffsetsRequestGroup().setGroupId("group1").setTopics( - util.List.of( - new DescribeShareGroupOffsetsRequestTopic().setTopicName(topicName1).setPartitions(util.List.of(1, 2, 3)), - ) - ) - )).thenReturn(futureGroup1) - - val futureGroup2 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupOffsets( - requestChannelRequest.context, - new DescribeShareGroupOffsetsRequestGroup().setGroupId("group2").setTopics( - util.List.of( - ) - ) - )).thenReturn(futureGroup2) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - val acls = Map( - "group1" -> AuthorizationResult.ALLOWED, - "group2" -> AuthorizationResult.ALLOWED, - topicName1 -> AuthorizationResult.ALLOWED, - topicName2 -> AuthorizationResult.DENIED, - topicName3 -> AuthorizationResult.DENIED - ) - when(authorizer.authorize( - any[RequestContext], - any[util.List[Action]] - )).thenAnswer { invocation => - val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList - } - kafkaApis = createKafkaApis( - authorizer = Some(authorizer) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - // These are the responses to the KafkaApis request, complete with authorization errors - val describeShareGroupOffsetsResponseGroup1 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group1") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName1) - .setTopicId(topicId1) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(2) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(3) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )), - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName2) - .setTopicId(Uuid.ZERO_UUID) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(10) - .setStartOffset(-1) - .setLeaderEpoch(0) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(20) - .setStartOffset(-1) - .setLeaderEpoch(0) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - )) - )) - - val describeShareGroupOffsetsResponseGroup2 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group2") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName3) - .setTopicId(Uuid.ZERO_UUID) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setStartOffset(-1) - .setLeaderEpoch(0) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - )) - )) - - val describeShareGroupOffsetsResponse = new DescribeShareGroupOffsetsResponseData() - .setGroups(util.List.of(describeShareGroupOffsetsResponseGroup1, describeShareGroupOffsetsResponseGroup2)) - - // And these are the responses to the topics which were authorized - val describeShareGroupOffsetsGroupCoordinatorResponseGroup1 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group1") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName1) - .setTopicId(topicId1) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(2) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(3) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )) - )) - - val describeShareGroupOffsetsGroupCoordinatorResponseGroup2 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group2") - .setTopics(util.List.of()) - - futureGroup1.complete(describeShareGroupOffsetsGroupCoordinatorResponseGroup1) - futureGroup2.complete(describeShareGroupOffsetsGroupCoordinatorResponseGroup2) - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(describeShareGroupOffsetsResponse, response.data) - } - - @Test - def testDescribeShareGroupAllOffsetsRequestTopicAuthorizationFailed(): Unit = { - val topicName1 = "topic-1" - val topicId1 = Uuid.randomUuid - val topicName2 = "topic-2" - val topicId2 = Uuid.randomUuid - val topicName3 = "topic-3" - val topicId3 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 1, topicId = topicId1) - addTopicToMetadataCache(topicName2, 1, topicId = topicId2) - addTopicToMetadataCache(topicName3, 1, topicId = topicId3) - - val describeShareGroupOffsetsRequestGroup1 = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group1").setTopics(null) - - val describeShareGroupOffsetsRequestGroup2 = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group2").setTopics(null) - - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData() - .setGroups(util.List.of(describeShareGroupOffsetsRequestGroup1, describeShareGroupOffsetsRequestGroup2)) - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) - - // The group coordinator is being asked for information about all topics, not just those which are authorized - val futureGroup1 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupAllOffsets( - requestChannelRequest.context, - new DescribeShareGroupOffsetsRequestGroup().setGroupId("group1").setTopics(null) - )).thenReturn(futureGroup1) - - val futureGroup2 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupAllOffsets( - requestChannelRequest.context, - new DescribeShareGroupOffsetsRequestGroup().setGroupId("group2").setTopics(null) - )).thenReturn(futureGroup2) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - val acls = Map( - "group1" -> AuthorizationResult.ALLOWED, - "group2" -> AuthorizationResult.ALLOWED, - topicName1 -> AuthorizationResult.ALLOWED, - topicName2 -> AuthorizationResult.DENIED, - topicName3 -> AuthorizationResult.DENIED - ) - when(authorizer.authorize( - any[RequestContext], - any[util.List[Action]] - )).thenAnswer { invocation => - val actions = invocation.getArgument(1, classOf[util.List[Action]]) - actions.stream() - .map(action => acls.getOrElse(action.resourcePattern.name, AuthorizationResult.DENIED)) - .toList - } - kafkaApis = createKafkaApis( - authorizer = Some(authorizer) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - // These are the responses to the KafkaApis request, with unauthorized topics filtered out - val describeShareGroupOffsetsResponseGroup1 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group1") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName1) - .setTopicId(topicId1) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(2) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(3) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )) - )) - - val describeShareGroupOffsetsResponseGroup2 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group2") - .setTopics(util.List.of()) - - // And these are the responses from the group coordinator for all topics, even those which are not authorized - val describeShareGroupOffsetsGroupCoordinatorResponseGroup1 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group1") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName1) - .setTopicId(topicId1) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(2) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(3) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )), - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName2) - .setTopicId(topicId2) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(10) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(20) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )) - )) - - val describeShareGroupOffsetsGroupCoordinatorResponseGroup2 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group2") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName3) - .setTopicId(topicId3) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )) - )) - - val describeShareGroupOffsetsResponse = new DescribeShareGroupOffsetsResponseData() - .setGroups(util.List.of(describeShareGroupOffsetsResponseGroup1, describeShareGroupOffsetsResponseGroup2)) - - futureGroup1.complete(describeShareGroupOffsetsGroupCoordinatorResponseGroup1) - futureGroup2.complete(describeShareGroupOffsetsGroupCoordinatorResponseGroup2) - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(describeShareGroupOffsetsResponse, response.data) - } - - @Test - def testDescribeShareGroupAllOffsetsRequestSuccess(): Unit = { - val topicName1 = "topic-1" - val topicId1 = Uuid.randomUuid - val topicName2 = "topic-2" - val topicId2 = Uuid.randomUuid - val topicName3 = "topic-3" - val topicId3 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 1, topicId = topicId1) - addTopicToMetadataCache(topicName2, 1, topicId = topicId2) - addTopicToMetadataCache(topicName3, 1, topicId = topicId3) - - val describeShareGroupOffsetsRequestGroup1 = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group1").setTopics(null) - - val describeShareGroupOffsetsRequestGroup2 = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group2").setTopics(null) - - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData() - .setGroups(util.List.of(describeShareGroupOffsetsRequestGroup1, describeShareGroupOffsetsRequestGroup2)) - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) - - val futureGroup1 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupAllOffsets( - requestChannelRequest.context, - describeShareGroupOffsetsRequestGroup1 - )).thenReturn(futureGroup1) - val futureGroup2 = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupAllOffsets( - requestChannelRequest.context, - describeShareGroupOffsetsRequestGroup2 - )).thenReturn(futureGroup2) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val describeShareGroupOffsetsResponseGroup1 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group1") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName1) - .setTopicId(topicId1) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(1) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(2) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(3) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )), - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName2) - .setTopicId(topicId2) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(10) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0), - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(20) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )) - )) - - val describeShareGroupOffsetsResponseGroup2 = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group2") - .setTopics(util.List.of( - new DescribeShareGroupOffsetsResponseTopic() - .setTopicName(topicName3) - .setTopicId(topicId3) - .setPartitions(util.List.of( - new DescribeShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setStartOffset(0) - .setLeaderEpoch(1) - .setErrorMessage(null) - .setErrorCode(0) - )) - )) - - val describeShareGroupOffsetsResponse = new DescribeShareGroupOffsetsResponseData() - .setGroups(util.List.of(describeShareGroupOffsetsResponseGroup1, describeShareGroupOffsetsResponseGroup2)) - - futureGroup1.complete(describeShareGroupOffsetsResponseGroup1) - futureGroup2.complete(describeShareGroupOffsetsResponseGroup2) - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(describeShareGroupOffsetsResponse, response.data) - } - - @Test - def testDescribeShareGroupOffsetsRequestEmptyGroupsSuccess(): Unit = { - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) - - val future = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val describeShareGroupOffsetsResponseGroup = new DescribeShareGroupOffsetsResponseGroup() - - val describeShareGroupOffsetsResponse = new DescribeShareGroupOffsetsResponseData() - - future.complete(describeShareGroupOffsetsResponseGroup) - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(describeShareGroupOffsetsResponse, response.data) - } - - @Test - def testDescribeShareGroupOffsetsRequestEmptyTopicsSuccess(): Unit = { - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - - val describeShareGroupOffsetsRequestGroup = new DescribeShareGroupOffsetsRequestGroup().setGroupId("group") - - val describeShareGroupOffsetsRequest = new DescribeShareGroupOffsetsRequestData().setGroups(util.List.of(describeShareGroupOffsetsRequestGroup)) - - val requestChannelRequest = buildRequest(new DescribeShareGroupOffsetsRequest.Builder(describeShareGroupOffsetsRequest).build) - - val future = new CompletableFuture[DescribeShareGroupOffsetsResponseData.DescribeShareGroupOffsetsResponseGroup] - when(groupCoordinator.describeShareGroupOffsets( - requestChannelRequest.context, - describeShareGroupOffsetsRequestGroup - )).thenReturn(future) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val describeShareGroupOffsetsResponseGroup = new DescribeShareGroupOffsetsResponseGroup() - .setGroupId("group") - .setTopics(util.List.of()) - - val describeShareGroupOffsetsResponse = new DescribeShareGroupOffsetsResponseData().setGroups(util.List.of(describeShareGroupOffsetsResponseGroup)) - - future.complete(describeShareGroupOffsetsResponseGroup) - val response = verifyNoThrottling[DescribeShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(describeShareGroupOffsetsResponse, response.data) - } - - @Test - def testDeleteShareGroupOffsetsReturnsUnsupportedVersion(): Unit = { - val deleteShareGroupOffsetsRequest = new DeleteShareGroupOffsetsRequestData() - .setGroupId("group") - .setTopics(util.List.of(new DeleteShareGroupOffsetsRequestTopic().setTopicName("topic-1"))) - - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequest).build()) - metadataCache = new KRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[DeleteShareGroupOffsetsResponse](requestChannelRequest) - response.data.responses.forEach(topic => assertEquals(Errors.UNSUPPORTED_VERSION.code, topic.errorCode)) - } - - @Test - def testDeleteShareGroupOffsetsRequestsGroupAuthorizationFailed(): Unit = { - val deleteShareGroupOffsetsRequest = new DeleteShareGroupOffsetsRequestData() - .setGroupId("group") - .setTopics(util.List.of(new DeleteShareGroupOffsetsRequestTopic().setTopicName("topic-1"))) - - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequest).build) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis( - authorizer = Some(authorizer), - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[DeleteShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code, response.data.errorCode) - } - - @Test - def testDeleteShareGroupOffsetsRequestsTopicAuthorizationFailed(): Unit = { - - def buildExpectedActionsTopic(topic: String): util.List[Action] = { - val pattern = new ResourcePattern(ResourceType.TOPIC, topic, PatternType.LITERAL) - val action = new Action(AclOperation.READ, pattern, 1, true, true) - util.List.of(action) - } - - def buildExpectedActionsGroup(topic: String): util.List[Action] = { - val pattern = new ResourcePattern(ResourceType.GROUP, topic, PatternType.LITERAL) - val action = new Action(AclOperation.DELETE, pattern, 1, true, true) - util.List.of(action) - } - - val groupId = "group" - - val topicName1 = "topic-1" - val topicId1 = Uuid.randomUuid - val topicName2 = "topic-2" - val topicId2 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 2, topicId = topicId1) - addTopicToMetadataCache(topicName2, 2, topicId = topicId2) - - val deleteShareGroupOffsetsRequestTopic1 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName1) - - val deleteShareGroupOffsetsRequestTopic2 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName2) - - val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() - .setGroupId(groupId) - .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic1, deleteShareGroupOffsetsRequestTopic2)) - - val deleteShareGroupOffsetsGroupCoordinatorRequestData = new DeleteShareGroupOffsetsRequestData() - .setGroupId(groupId) - .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic2)) - - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) - - val resultFuture = new CompletableFuture[DeleteShareGroupOffsetsResponseData] - when(groupCoordinator.deleteShareGroupOffsets( - requestChannelRequest.context, - deleteShareGroupOffsetsGroupCoordinatorRequestData - )).thenReturn(resultFuture) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActionsGroup(groupId)))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActionsTopic(topicName1)))) - .thenReturn(util.List.of(AuthorizationResult.DENIED)) - when(authorizer.authorize(any[RequestContext], ArgumentMatchers.eq(buildExpectedActionsTopic(topicName2)))) - .thenReturn(util.List.of(AuthorizationResult.ALLOWED)) - - kafkaApis = createKafkaApis( - authorizer = Some(authorizer) - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val deleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()) - .setResponses(util.List.of( - new DeleteShareGroupOffsetsResponseTopic() - .setTopicName(topicName2) - .setTopicId(topicId2) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()) - ) - ) - - val expectedResponseTopics: util.List[DeleteShareGroupOffsetsResponseTopic] = new util.ArrayList[DeleteShareGroupOffsetsResponseTopic]() - - expectedResponseTopics.add( - new DeleteShareGroupOffsetsResponseTopic() - .setTopicName(topicName1) - .setErrorCode(Errors.TOPIC_AUTHORIZATION_FAILED.code) - .setErrorMessage(Errors.TOPIC_AUTHORIZATION_FAILED.message()) - ) - - deleteShareGroupOffsetsResponseData.responses.forEach{ topic => { - expectedResponseTopics.add(topic) - }} - - val expectedResponseData: DeleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() - .setErrorCode(Errors.NONE.code()) - .setErrorMessage(null) - .setResponses(expectedResponseTopics) - - resultFuture.complete(deleteShareGroupOffsetsResponseData) - val response = verifyNoThrottling[DeleteShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(expectedResponseData, response.data) - } - - @Test - def testDeleteShareGroupOffsetsRequestSuccess(): Unit = { - val topicName1 = "topic-1" - val topicId1 = Uuid.randomUuid - val topicName2 = "topic-2" - val topicId2 = Uuid.randomUuid - val topicName3 = "topic-3" - val topicId3 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 1, topicId = topicId1) - addTopicToMetadataCache(topicName2, 2, topicId = topicId2) - addTopicToMetadataCache(topicName3, 3, topicId = topicId3) - - val deleteShareGroupOffsetsRequestTopic1 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName1) - - val deleteShareGroupOffsetsRequestTopic2 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName2) - - val deleteShareGroupOffsetsRequestTopic3 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName3) - - val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() - .setGroupId("group") - .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic1, deleteShareGroupOffsetsRequestTopic2, deleteShareGroupOffsetsRequestTopic3)) - - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) - - val resultFuture = new CompletableFuture[DeleteShareGroupOffsetsResponseData] - when(groupCoordinator.deleteShareGroupOffsets( - requestChannelRequest.context, - deleteShareGroupOffsetsRequestData - )).thenReturn(resultFuture) - - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val deleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()) - .setResponses(util.List.of( - new DeleteShareGroupOffsetsResponseTopic() - .setTopicName(topicName1) - .setTopicId(topicId1) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()), - new DeleteShareGroupOffsetsResponseTopic() - .setTopicName(topicName2) - .setTopicId(topicId2) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()), - new DeleteShareGroupOffsetsResponseTopic() - .setTopicName(topicName3) - .setTopicId(topicId3) - .setErrorMessage(null) - .setErrorCode(Errors.NONE.code()), - )) - - resultFuture.complete(deleteShareGroupOffsetsResponseData) - val response = verifyNoThrottling[DeleteShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(deleteShareGroupOffsetsResponseData, response.data) - } - - @Test - def testDeleteShareGroupOffsetsRequestGroupCoordinatorThrowsError(): Unit = { - val topicName1 = "topic-1" - val topicId1 = Uuid.randomUuid - val topicName2 = "topic-2" - val topicId2 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 1, topicId = topicId1) - addTopicToMetadataCache(topicName2, 2, topicId = topicId2) - - val deleteShareGroupOffsetsRequestTopic1 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName1) - - val deleteShareGroupOffsetsRequestTopic2 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName2) - - val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() - .setGroupId("group") - .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic1, deleteShareGroupOffsetsRequestTopic2)) - - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) - - when(groupCoordinator.deleteShareGroupOffsets( - requestChannelRequest.context, - deleteShareGroupOffsetsRequestData - )).thenReturn(CompletableFuture.failedFuture(Errors.UNKNOWN_SERVER_ERROR.exception)) - - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val deleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() - .setErrorMessage(Errors.UNKNOWN_SERVER_ERROR.message()) - .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) - - val response = verifyNoThrottling[DeleteShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(deleteShareGroupOffsetsResponseData, response.data) - } - - @Test - def testDeleteShareGroupOffsetsRequestGroupCoordinatorErrorResponse(): Unit = { - val topicName1 = "topic-1" - val topicId1 = Uuid.randomUuid - val topicName2 = "topic-2" - val topicId2 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 1, topicId = topicId1) - addTopicToMetadataCache(topicName2, 2, topicId = topicId2) - - val deleteShareGroupOffsetsRequestTopic1 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName1) - - val deleteShareGroupOffsetsRequestTopic2 = new DeleteShareGroupOffsetsRequestTopic() - .setTopicName(topicName2) - - val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() - .setGroupId("group") - .setTopics(util.List.of(deleteShareGroupOffsetsRequestTopic1, deleteShareGroupOffsetsRequestTopic2)) - - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) - - val groupCoordinatorResponse: DeleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() - .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) - .setErrorMessage(Errors.UNKNOWN_SERVER_ERROR.message()) - - when(groupCoordinator.deleteShareGroupOffsets( - requestChannelRequest.context, - deleteShareGroupOffsetsRequestData - )).thenReturn(CompletableFuture.completedFuture(groupCoordinatorResponse)) - - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val deleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() - .setErrorMessage(Errors.UNKNOWN_SERVER_ERROR.message()) - .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) - - val response = verifyNoThrottling[DeleteShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(deleteShareGroupOffsetsResponseData, response.data) - } + future.completeExceptionally(Errors.FENCED_MEMBER_EPOCH.exception) + val response = verifyNoThrottling[ShareGroupHeartbeatResponse](requestChannelRequest) + assertEquals(Errors.FENCED_MEMBER_EPOCH.code, response.data.errorCode) + } @Test - def testDeleteShareGroupOffsetsRequestEmptyTopicsSuccess(): Unit = { - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - - val deleteShareGroupOffsetsRequestData = new DeleteShareGroupOffsetsRequestData() - .setGroupId("group") - - val requestChannelRequest = buildRequest(new DeleteShareGroupOffsetsRequest.Builder(deleteShareGroupOffsetsRequestData).build) - - val groupCoordinatorResponse: DeleteShareGroupOffsetsResponseData = new DeleteShareGroupOffsetsResponseData() - .setErrorCode(Errors.NONE.code()) - - when(groupCoordinator.deleteShareGroupOffsets( - requestChannelRequest.context, - deleteShareGroupOffsetsRequestData - )).thenReturn(CompletableFuture.completedFuture(groupCoordinatorResponse)) - - val resultFuture = new CompletableFuture[DeleteShareGroupOffsetsResponseData] - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val deleteShareGroupOffsetsResponse = new DeleteShareGroupOffsetsResponseData() - - resultFuture.complete(deleteShareGroupOffsetsResponse) - val response = verifyNoThrottling[DeleteShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(deleteShareGroupOffsetsResponse, response.data) + def testShareGroupDescribeSuccess(): Unit = { + val groupIds = List("share-group-id-0", "share-group-id-1").asJava + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List( + new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(0)), + new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)) + ).asJava + getShareGroupDescribeResponse(groupIds, Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true") + , true, null, describedGroups) } @Test - def testWriteShareGroupStateSuccess(): Unit = { - val topicId = Uuid.randomUuid() - val writeRequestData = new WriteShareGroupStateRequestData() - .setGroupId("group1") - .setTopics(util.List.of( - new WriteShareGroupStateRequestData.WriteStateData() - .setTopicId(topicId) - .setPartitions(util.List.of( - new WriteShareGroupStateRequestData.PartitionData() - .setPartition(1) - .setLeaderEpoch(1) - .setStateEpoch(2) - .setStartOffset(10) - .setStateBatches(util.List.of( - new WriteShareGroupStateRequestData.StateBatch() - .setFirstOffset(11) - .setLastOffset(15) - .setDeliveryCount(1) - .setDeliveryState(0) - )) - )) - )) - - val writeStateResultData: util.List[WriteShareGroupStateResponseData.WriteStateResult] = util.List.of( - new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicId) - .setPartitions(util.List.of( - new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(1) - .setErrorCode(Errors.NONE.code()) - .setErrorMessage(null) - )) - ) - - val response = getWriteShareGroupStateResponse( - writeRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, - verifyNoErr = true, - null, - writeStateResultData - ) - + def testShareGroupDescribeReturnsUnsupportedVersion(): Unit = { + val groupIds = List("share-group-id-0", "share-group-id-1").asJava + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List( + new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(0)), + new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)) + ).asJava + val response = getShareGroupDescribeResponse(groupIds, Map.empty, false, null, describedGroups) assertNotNull(response.data) - assertEquals(1, response.data.results.size) - } - - @Test - def testWriteShareGroupStateAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid() - val writeRequestData = new WriteShareGroupStateRequestData() - .setGroupId("group1") - .setTopics(util.List.of( - new WriteShareGroupStateRequestData.WriteStateData() - .setTopicId(topicId) - .setPartitions(util.List.of( - new WriteShareGroupStateRequestData.PartitionData() - .setPartition(1) - .setLeaderEpoch(1) - .setStateEpoch(2) - .setStartOffset(10) - .setStateBatches(util.List.of( - new WriteShareGroupStateRequestData.StateBatch() - .setFirstOffset(11) - .setLastOffset(15) - .setDeliveryCount(1) - .setDeliveryState(0) - )) - )) - )) + assertEquals(2, response.data.groups.size) + response.data.groups.forEach(group => assertEquals(Errors.UNSUPPORTED_VERSION.code(), group.errorCode())) + } - val writeStateResultData: util.List[WriteShareGroupStateResponseData.WriteStateResult] = util.List.of( - new WriteShareGroupStateResponseData.WriteStateResult() - .setTopicId(topicId) - .setPartitions(util.List.of( - new WriteShareGroupStateResponseData.PartitionResult() - .setPartition(1) - .setErrorCode(Errors.NONE.code()) - .setErrorMessage(null) - )) - ) + @Test + def testShareGroupDescribeRequestAuthorizationFailed(): Unit = { + val groupIds = List("share-group-id-0", "share-group-id-1").asJava + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List().asJava + val authorizer: Authorizer = mock(classOf[Authorizer]) + when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava) + val response = getShareGroupDescribeResponse(groupIds, Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true") + , false, authorizer, describedGroups) + assertNotNull(response.data) + assertEquals(2, response.data.groups.size) + response.data.groups.forEach(group => assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code(), group.errorCode())) + } + + @Test + def testShareGroupDescribeRequestAuthorizationFailedForOneGroup(): Unit = { + val groupIds = List("share-group-id-fail-0", "share-group-id-1").asJava + val describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup] = List( + new ShareGroupDescribeResponseData.DescribedGroup().setGroupId(groupIds.get(1)) + ).asJava val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) - val response = getWriteShareGroupStateResponse( - writeRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, - verifyNoErr = false, - authorizer, - writeStateResultData - ) + val response = getShareGroupDescribeResponse(groupIds, Map(ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true") + , false, authorizer, describedGroups) assertNotNull(response.data) - assertEquals(1, response.data.results.size) - response.data.results.forEach(writeResult => { - assertEquals(1, writeResult.partitions.size) - assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED.code(), writeResult.partitions.get(0).errorCode()) - }) + assertEquals(2, response.data.groups.size) + assertEquals(Errors.GROUP_AUTHORIZATION_FAILED.code(), response.data.groups.get(0).errorCode()) + assertEquals(Errors.NONE.code(), response.data.groups.get(1).errorCode()) } @Test - def testDeleteShareGroupStateSuccess(): Unit = { - val topicId = Uuid.randomUuid() - val deleteRequestData = new DeleteShareGroupStateRequestData() + def testReadShareGroupStateSuccess(): Unit = { + val topicId = Uuid.randomUuid(); + val readRequestData = new ReadShareGroupStateRequestData() .setGroupId("group1") - .setTopics(util.List.of( - new DeleteShareGroupStateRequestData.DeleteStateData() + .setTopics(List( + new ReadShareGroupStateRequestData.ReadStateData() .setTopicId(topicId) - .setPartitions(util.List.of( - new DeleteShareGroupStateRequestData.PartitionData() + .setPartitions(List( + new ReadShareGroupStateRequestData.PartitionData() .setPartition(1) - )) - )) + .setLeaderEpoch(1) + ).asJava) + ).asJava) - val deleteStateResultData: util.List[DeleteShareGroupStateResponseData.DeleteStateResult] = util.List.of( - new DeleteShareGroupStateResponseData.DeleteStateResult() + val readStateResultData: util.List[ReadShareGroupStateResponseData.ReadStateResult] = List( + new ReadShareGroupStateResponseData.ReadStateResult() .setTopicId(topicId) - .setPartitions(util.List.of( - new DeleteShareGroupStateResponseData.PartitionResult() + .setPartitions(List( + new ReadShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - )) + .setStateEpoch(1) + .setStartOffset(10) + .setStateBatches(List( + new ReadShareGroupStateResponseData.StateBatch() + .setFirstOffset(11) + .setLastOffset(15) + .setDeliveryState(0) + .setDeliveryCount(1) + ).asJava) + ).asJava) + ).asJava + + val config = Map( + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", ) - val response = getDeleteShareGroupStateResponse( - deleteRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, + val response = getReadShareGroupResponse( + readRequestData, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, - deleteStateResultData + readStateResultData ) assertNotNull(response.data) @@ -13425,82 +10461,109 @@ class KafkaApisTest extends Logging { } @Test - def testDeleteShareGroupStateAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid() - val deleteRequestData = new DeleteShareGroupStateRequestData() + def testReadShareGroupStateAuthorizationFailed(): Unit = { + val topicId = Uuid.randomUuid(); + val readRequestData = new ReadShareGroupStateRequestData() .setGroupId("group1") - .setTopics(util.List.of( - new DeleteShareGroupStateRequestData.DeleteStateData() + .setTopics(List( + new ReadShareGroupStateRequestData.ReadStateData() .setTopicId(topicId) - .setPartitions(util.List.of( - new DeleteShareGroupStateRequestData.PartitionData() + .setPartitions(List( + new ReadShareGroupStateRequestData.PartitionData() .setPartition(1) - )) - )) + .setLeaderEpoch(1) + ).asJava) + ).asJava) - val deleteStateResultData: util.List[DeleteShareGroupStateResponseData.DeleteStateResult] = util.List.of( - new DeleteShareGroupStateResponseData.DeleteStateResult() + val readStateResultData: util.List[ReadShareGroupStateResponseData.ReadStateResult] = List( + new ReadShareGroupStateResponseData.ReadStateResult() .setTopicId(topicId) - .setPartitions(util.List.of( - new DeleteShareGroupStateResponseData.PartitionResult() + .setPartitions(List( + new ReadShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - )) - ) + .setStateEpoch(1) + .setStartOffset(10) + .setStateBatches(List( + new ReadShareGroupStateResponseData.StateBatch() + .setFirstOffset(11) + .setLastOffset(15) + .setDeliveryState(0) + .setDeliveryCount(1) + ).asJava) + ).asJava) + ).asJava val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) + + val config = Map( + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", + ) - val response = getDeleteShareGroupStateResponse( - deleteRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, + val response = getReadShareGroupResponse( + readRequestData, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, - deleteStateResultData + readStateResultData ) assertNotNull(response.data) assertEquals(1, response.data.results.size) - response.data.results.forEach(deleteResult => { - assertEquals(1, deleteResult.partitions.size) - assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED.code(), deleteResult.partitions.get(0).errorCode()) + response.data.results.forEach(readResult => { + assertEquals(1, readResult.partitions.size) + assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED.code(), readResult.partitions.get(0).errorCode()) }) } @Test - def testInitializeShareGroupStateSuccess(): Unit = { - val topicId = Uuid.randomUuid() - val initRequestData = new InitializeShareGroupStateRequestData() + def testWriteShareGroupStateSuccess(): Unit = { + val topicId = Uuid.randomUuid(); + val writeRequestData = new WriteShareGroupStateRequestData() .setGroupId("group1") - .setTopics(util.List.of( - new InitializeShareGroupStateRequestData.InitializeStateData() + .setTopics(List( + new WriteShareGroupStateRequestData.WriteStateData() .setTopicId(topicId) - .setPartitions(util.List.of( - new InitializeShareGroupStateRequestData.PartitionData() + .setPartitions(List( + new WriteShareGroupStateRequestData.PartitionData() .setPartition(1) - .setStateEpoch(0) - )) - )) + .setLeaderEpoch(1) + .setStateEpoch(2) + .setStartOffset(10) + .setStateBatches(List( + new WriteShareGroupStateRequestData.StateBatch() + .setFirstOffset(11) + .setLastOffset(15) + .setDeliveryCount(1) + .setDeliveryState(0) + ).asJava) + ).asJava) + ).asJava) - val initStateResultData: util.List[InitializeShareGroupStateResponseData.InitializeStateResult] = util.List.of( - new InitializeShareGroupStateResponseData.InitializeStateResult() + val writeStateResultData: util.List[WriteShareGroupStateResponseData.WriteStateResult] = List( + new WriteShareGroupStateResponseData.WriteStateResult() .setTopicId(topicId) - .setPartitions(util.List.of( - new InitializeShareGroupStateResponseData.PartitionResult() + .setPartitions(List( + new WriteShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - )) + ).asJava) + ).asJava + + val config = Map( + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", ) - val response = getInitializeShareGroupStateResponse( - initRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, + val response = getWriteShareGroupResponse( + writeRequestData, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = true, null, - initStateResultData + writeStateResultData ) assertNotNull(response.data) @@ -13508,273 +10571,79 @@ class KafkaApisTest extends Logging { } @Test - def testInitializeShareGroupStateAuthorizationFailed(): Unit = { - val topicId = Uuid.randomUuid() - val initRequestData = new InitializeShareGroupStateRequestData() + def testWriteShareGroupStateAuthorizationFailed(): Unit = { + val topicId = Uuid.randomUuid(); + val writeRequestData = new WriteShareGroupStateRequestData() .setGroupId("group1") - .setTopics(util.List.of( - new InitializeShareGroupStateRequestData.InitializeStateData() + .setTopics(List( + new WriteShareGroupStateRequestData.WriteStateData() .setTopicId(topicId) - .setPartitions(util.List.of( - new InitializeShareGroupStateRequestData.PartitionData() + .setPartitions(List( + new WriteShareGroupStateRequestData.PartitionData() .setPartition(1) - .setStateEpoch(0) - )) - )) + .setLeaderEpoch(1) + .setStateEpoch(2) + .setStartOffset(10) + .setStateBatches(List( + new WriteShareGroupStateRequestData.StateBatch() + .setFirstOffset(11) + .setLastOffset(15) + .setDeliveryCount(1) + .setDeliveryState(0) + ).asJava) + ).asJava) + ).asJava) - val initStateResultData: util.List[InitializeShareGroupStateResponseData.InitializeStateResult] = util.List.of( - new InitializeShareGroupStateResponseData.InitializeStateResult() + val writeStateResultData: util.List[WriteShareGroupStateResponseData.WriteStateResult] = List( + new WriteShareGroupStateResponseData.WriteStateResult() .setTopicId(topicId) - .setPartitions(util.List.of( - new InitializeShareGroupStateResponseData.PartitionResult() + .setPartitions(List( + new WriteShareGroupStateResponseData.PartitionResult() .setPartition(1) .setErrorCode(Errors.NONE.code()) .setErrorMessage(null) - )) + ).asJava) + ).asJava + + val config = Map( + ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG -> "true", ) val authorizer: Authorizer = mock(classOf[Authorizer]) when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(util.List.of(AuthorizationResult.DENIED), util.List.of(AuthorizationResult.ALLOWED)) + .thenReturn(Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) - val response = getInitializeShareGroupStateResponse( - initRequestData, - ShareCoordinatorTestConfig.testConfigMap().asScala, + val response = getWriteShareGroupResponse( + writeRequestData, + config ++ ShareCoordinatorTestConfig.testConfigMap().asScala, verifyNoErr = false, authorizer, - initStateResultData + writeStateResultData ) assertNotNull(response.data) assertEquals(1, response.data.results.size) - response.data.results.forEach(deleteResult => { - assertEquals(1, deleteResult.partitions.size) - assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED.code(), deleteResult.partitions.get(0).errorCode()) - }) - } - - @Test - def testAlterShareGroupOffsetsReturnsUnsupportedVersion(): Unit = { - val alterShareGroupOffsetsRequest = new AlterShareGroupOffsetsRequestData() - .setGroupId("group") - .setTopics( - new AlterShareGroupOffsetsRequestTopicCollection( - util.List.of( - new AlterShareGroupOffsetsRequestTopic() - .setTopicName("topic-1") - .setPartitions(util.List.of( - new AlterShareGroupOffsetsRequestPartition().setPartitionIndex(0).setStartOffset(0), - new AlterShareGroupOffsetsRequestPartition().setPartitionIndex(1).setStartOffset(0)) - ), - new AlterShareGroupOffsetsRequestTopic() - .setTopicName("topic-2") - .setPartitions(util.List.of( - new AlterShareGroupOffsetsRequestPartition().setPartitionIndex(0).setStartOffset(0)) - ) - ).iterator() - ) - ) - - val requestChannelRequest = buildRequest(new AlterShareGroupOffsetsRequest.Builder(alterShareGroupOffsetsRequest).build()) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled(enableShareGroups = false) - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val response = verifyNoThrottling[AlterShareGroupOffsetsResponse](requestChannelRequest) - response.data.responses.forEach(topic => { - topic.partitions().forEach(partition => assertEquals(Errors.UNSUPPORTED_VERSION.code, partition.errorCode)) - }) - } - - @Test - def testAlterShareGroupOffsetsSuccess(): Unit = { - val groupId = "group" - val topicName1 = "foo" - val topicId1 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 2, topicId = topicId1) - val topicCollection = new AlterShareGroupOffsetsRequestTopicCollection(); - topicCollection.addAll(util.List.of( - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() - .setTopicName(topicName1) - .setPartitions(List( - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(0) - .setStartOffset(0L), - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(1) - .setStartOffset(0L) - ).asJava))) - - val alterRequestData = new AlterShareGroupOffsetsRequestData() - .setGroupId(groupId) - .setTopics(topicCollection) - - val requestChannelRequest = buildRequest(new AlterShareGroupOffsetsRequest.Builder(alterRequestData).build) - val resultFuture = new CompletableFuture[AlterShareGroupOffsetsResponseData] - when(groupCoordinator.alterShareGroupOffsets( - any(), - ArgumentMatchers.eq[String](groupId), - ArgumentMatchers.any(classOf[AlterShareGroupOffsetsRequestData]) - )).thenReturn(resultFuture) - - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val alterShareGroupOffsetsResponse = new AlterShareGroupOffsetsResponseData() - resultFuture.complete(alterShareGroupOffsetsResponse) - val response = verifyNoThrottling[AlterShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(alterShareGroupOffsetsResponse, response.data) - } - - @Test - def testAlterShareGroupOffsetsAuthorizationFailed(): Unit = { - val groupId = "group" - val topicName1 = "foo" - val topicId1 = Uuid.randomUuid - val topicName2 = "bar" - val topicId2 = Uuid.randomUuid - val topicName3 = "zoo" - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 2, topicId = topicId1) - addTopicToMetadataCache(topicName2, 1, topicId = topicId2) - val topicCollection = new AlterShareGroupOffsetsRequestTopicCollection(); - topicCollection.addAll(util.List.of( - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() - .setTopicName(topicName1) - .setPartitions(List( - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(0) - .setStartOffset(0L), - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(1) - .setStartOffset(0L) - ).asJava), - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() - .setTopicName(topicName2) - .setPartitions(List( - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(0) - .setStartOffset(0L) - ).asJava), - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() - .setTopicName(topicName3) - setPartitions(List( - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(0) - .setStartOffset(0L) - ).asJava)) - ) - - val authorizer: Authorizer = mock(classOf[Authorizer]) - when(authorizer.authorize(any[RequestContext], any[util.List[Action]])) - .thenReturn(Seq(AuthorizationResult.ALLOWED).asJava, Seq(AuthorizationResult.DENIED).asJava, Seq(AuthorizationResult.ALLOWED).asJava, Seq(AuthorizationResult.ALLOWED).asJava) - - val alterRequestData = new AlterShareGroupOffsetsRequestData() - .setGroupId(groupId) - .setTopics(topicCollection) - - val requestChannelRequest = buildRequest(new AlterShareGroupOffsetsRequest.Builder(alterRequestData).build) - val resultFuture = new CompletableFuture[AlterShareGroupOffsetsResponseData] - when(groupCoordinator.alterShareGroupOffsets( - any(), - ArgumentMatchers.eq[String](groupId), - ArgumentMatchers.any(classOf[AlterShareGroupOffsetsRequestData]) - )).thenReturn(resultFuture) - - kafkaApis = createKafkaApis(authorizer = Some(authorizer)) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val alterShareGroupOffsetsResponse = new AlterShareGroupOffsetsResponseData() - .setResponses(new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopicCollection(util.List.of( - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponseTopic() - .setTopicName(topicName2) - .setTopicId(topicId2) - .setPartitions(List( - new AlterShareGroupOffsetsResponseData.AlterShareGroupOffsetsResponsePartition() - .setPartitionIndex(0) - .setErrorCode(Errors.NONE.code()) - .setErrorMessage(Errors.NONE.message()) - ).asJava) - ).iterator)) - resultFuture.complete(alterShareGroupOffsetsResponse) - val response = verifyNoThrottling[AlterShareGroupOffsetsResponse](requestChannelRequest) - - assertNotNull(response.data) - assertEquals(1, response.errorCounts().get(Errors.UNKNOWN_TOPIC_OR_PARTITION)) - assertEquals(2, response.errorCounts().get(Errors.TOPIC_AUTHORIZATION_FAILED)) - assertEquals(3, response.data().responses().size()) - - val bar = response.data().responses().find("bar") - val foo = response.data().responses().find("foo") - val zoo = response.data().responses().find("zoo") - assertEquals(topicName1, foo.topicName()) - assertEquals(topicId1, foo.topicId()) - assertEquals(topicName2, bar.topicName()) - assertEquals(topicId2, bar.topicId()) - assertEquals(topicName3, zoo.topicName()) - assertEquals(Uuid.ZERO_UUID, zoo.topicId()) - foo.partitions().forEach(partition => { - assertEquals(Errors.TOPIC_AUTHORIZATION_FAILED.code(), partition.errorCode()) + response.data.results.forEach(writeResult => { + assertEquals(1, writeResult.partitions.size) + assertEquals(Errors.CLUSTER_AUTHORIZATION_FAILED.code(), writeResult.partitions.get(0).errorCode()) }) } - @Test - def testAlterShareGroupOffsetsRequestGroupCoordinatorThrowsError(): Unit = { - val groupId = "group" - val topicName1 = "foo" - val topicId1 = Uuid.randomUuid - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - addTopicToMetadataCache(topicName1, 2, topicId = topicId1) - val topicCollection = new AlterShareGroupOffsetsRequestTopicCollection(); - topicCollection.addAll(util.List.of( - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestTopic() - .setTopicName(topicName1) - .setPartitions(List( - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(0) - .setStartOffset(0L), - new AlterShareGroupOffsetsRequestData.AlterShareGroupOffsetsRequestPartition() - .setPartitionIndex(1) - .setStartOffset(0L) - ).asJava))) - - val alterRequestData = new AlterShareGroupOffsetsRequestData() - .setGroupId(groupId) - .setTopics(topicCollection) - - val requestChannelRequest = buildRequest(new AlterShareGroupOffsetsRequest.Builder(alterRequestData).build) - when(groupCoordinator.alterShareGroupOffsets( - any(), - ArgumentMatchers.eq[String](groupId), - ArgumentMatchers.any(classOf[AlterShareGroupOffsetsRequestData]) - )).thenReturn(CompletableFuture.failedFuture(Errors.UNKNOWN_SERVER_ERROR.exception)) - - kafkaApis = createKafkaApis() - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) - - val alterShareGroupOffsetsResponseData = new AlterShareGroupOffsetsResponseData() - .setErrorMessage(Errors.UNKNOWN_SERVER_ERROR.message()) - .setErrorCode(Errors.UNKNOWN_SERVER_ERROR.code()) - - val response = verifyNoThrottling[AlterShareGroupOffsetsResponse](requestChannelRequest) - assertEquals(alterShareGroupOffsetsResponseData, response.data) - } - - def getShareGroupDescribeResponse(groupIds: util.List[String], enableShareGroups: Boolean = true, + def getShareGroupDescribeResponse(groupIds: util.List[String], configOverrides: Map[String, String] = Map.empty, verifyNoErr: Boolean = true, authorizer: Authorizer = null, describedGroups: util.List[ShareGroupDescribeResponseData.DescribedGroup]): ShareGroupDescribeResponse = { val shareGroupDescribeRequestData = new ShareGroupDescribeRequestData() shareGroupDescribeRequestData.groupIds.addAll(groupIds) - val requestChannelRequest = buildRequest(new ShareGroupDescribeRequest.Builder(shareGroupDescribeRequestData).build()) + val requestChannelRequest = buildRequest(new ShareGroupDescribeRequest.Builder(shareGroupDescribeRequestData, true).build()) val future = new CompletableFuture[util.List[ShareGroupDescribeResponseData.DescribedGroup]]() when(groupCoordinator.shareGroupDescribe( any[RequestContext], any[util.List[String]] )).thenReturn(future) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled(enableShareGroups) + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis( + overrideProperties = configOverrides, authorizer = Option(authorizer), ) kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching) @@ -13790,17 +10659,17 @@ class KafkaApisTest extends Logging { response } - def getReadShareGroupStateResponse(requestData: ReadShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - readStateResult: util.List[ReadShareGroupStateResponseData.ReadStateResult]): ReadShareGroupStateResponse = { - val requestChannelRequest = buildRequest(new ReadShareGroupStateRequest.Builder(requestData).build()) + def getReadShareGroupResponse(requestData: ReadShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, + verifyNoErr: Boolean = true, authorizer: Authorizer = null, + readStateResult: util.List[ReadShareGroupStateResponseData.ReadStateResult]): ReadShareGroupStateResponse = { + val requestChannelRequest = buildRequest(new ReadShareGroupStateRequest.Builder(requestData, true).build()) val future = new CompletableFuture[ReadShareGroupStateResponseData]() when(shareCoordinator.readState( any[RequestContext], any[ReadShareGroupStateRequestData] )).thenReturn(future) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), @@ -13819,46 +10688,17 @@ class KafkaApisTest extends Logging { response } - def getReadShareGroupStateSummaryResponse(requestData: ReadShareGroupStateSummaryRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - readStateSummaryResult: util.List[ReadShareGroupStateSummaryResponseData.ReadStateSummaryResult]): ReadShareGroupStateSummaryResponse = { - val requestChannelRequest = buildRequest(new ReadShareGroupStateSummaryRequest.Builder(requestData).build()) - - val future = new CompletableFuture[ReadShareGroupStateSummaryResponseData]() - when(shareCoordinator.readStateSummary( - any[RequestContext], - any[ReadShareGroupStateSummaryRequestData] - )).thenReturn(future) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis( - overrideProperties = configOverrides, - authorizer = Option(authorizer), - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching()) - - future.complete(new ReadShareGroupStateSummaryResponseData() - .setResults(readStateSummaryResult)) - - val response = verifyNoThrottling[ReadShareGroupStateSummaryResponse](requestChannelRequest) - if (verifyNoErr) { - val expectedReadShareGroupStateSummaryResponseData = new ReadShareGroupStateSummaryResponseData() - .setResults(readStateSummaryResult) - assertEquals(expectedReadShareGroupStateSummaryResponseData, response.data) - } - response - } - - def getWriteShareGroupStateResponse(requestData: WriteShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - writeStateResult: util.List[WriteShareGroupStateResponseData.WriteStateResult]): WriteShareGroupStateResponse = { - val requestChannelRequest = buildRequest(new WriteShareGroupStateRequest.Builder(requestData).build()) + def getWriteShareGroupResponse(requestData: WriteShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, + verifyNoErr: Boolean = true, authorizer: Authorizer = null, + writeStateResult: util.List[WriteShareGroupStateResponseData.WriteStateResult]): WriteShareGroupStateResponse = { + val requestChannelRequest = buildRequest(new WriteShareGroupStateRequest.Builder(requestData, true).build()) val future = new CompletableFuture[WriteShareGroupStateResponseData]() when(shareCoordinator.writeState( any[RequestContext], any[WriteShareGroupStateRequestData] )).thenReturn(future) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() + metadataCache = MetadataCache.kRaftMetadataCache(brokerId, () => KRaftVersion.KRAFT_VERSION_0) kafkaApis = createKafkaApis( overrideProperties = configOverrides, authorizer = Option(authorizer), @@ -13876,62 +10716,4 @@ class KafkaApisTest extends Logging { } response } - - def getDeleteShareGroupStateResponse(requestData: DeleteShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - deleteStateResult: util.List[DeleteShareGroupStateResponseData.DeleteStateResult]): DeleteShareGroupStateResponse = { - val requestChannelRequest = buildRequest(new DeleteShareGroupStateRequest.Builder(requestData).build()) - - val future = new CompletableFuture[DeleteShareGroupStateResponseData]() - when(shareCoordinator.deleteState( - any[RequestContext], - any[DeleteShareGroupStateRequestData] - )).thenReturn(future) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis( - overrideProperties = configOverrides, - authorizer = Option(authorizer), - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching()) - - future.complete(new DeleteShareGroupStateResponseData() - .setResults(deleteStateResult)) - - val response = verifyNoThrottling[DeleteShareGroupStateResponse](requestChannelRequest) - if (verifyNoErr) { - val expectedDeleteShareGroupStateResponseData = new DeleteShareGroupStateResponseData() - .setResults(deleteStateResult) - assertEquals(expectedDeleteShareGroupStateResponseData, response.data) - } - response - } - - def getInitializeShareGroupStateResponse(requestData: InitializeShareGroupStateRequestData, configOverrides: Map[String, String] = Map.empty, - verifyNoErr: Boolean = true, authorizer: Authorizer = null, - initStateResult: util.List[InitializeShareGroupStateResponseData.InitializeStateResult]): InitializeShareGroupStateResponse = { - val requestChannelRequest = buildRequest(new InitializeShareGroupStateRequest.Builder(requestData).build()) - - val future = new CompletableFuture[InitializeShareGroupStateResponseData]() - when(shareCoordinator.initializeState( - any[RequestContext], - any[InitializeShareGroupStateRequestData] - )).thenReturn(future) - metadataCache = initializeMetadataCacheWithShareGroupsEnabled() - kafkaApis = createKafkaApis( - overrideProperties = configOverrides, - authorizer = Option(authorizer), - ) - kafkaApis.handle(requestChannelRequest, RequestLocal.noCaching()) - - future.complete(new InitializeShareGroupStateResponseData() - .setResults(initStateResult)) - - val response = verifyNoThrottling[InitializeShareGroupStateResponse](requestChannelRequest) - if (verifyNoErr) { - val expectedInitShareGroupStateResponseData = new InitializeShareGroupStateResponseData() - .setResults(initStateResult) - assertEquals(expectedInitShareGroupStateResponseData, response.data) - } - response - } } diff --git a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala index dc24a36951527..70111b5fde883 100755 --- a/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala @@ -20,34 +20,32 @@ package kafka.server import java.net.InetSocketAddress import java.util import java.util.{Arrays, Collections, Properties} +import kafka.cluster.EndPoint import kafka.utils.TestUtils.assertBadConfigContainingMessage import kafka.utils.{CoreUtils, TestUtils} -import org.apache.kafka.common.{Endpoint, Node} -import org.apache.kafka.common.config.{AbstractConfig, ConfigException, SaslConfigs, SecurityConfig, SslConfigs, TopicConfig} +import org.apache.kafka.common.Node +import org.apache.kafka.common.config.{ConfigException, SaslConfigs, SecurityConfig, SslConfigs, TopicConfig} import org.apache.kafka.common.metrics.Sensor import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.record.{CompressionType, Records} import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.config.internals.BrokerSecurityConfigs -import org.apache.kafka.common.utils.LogCaptureAppender import org.apache.kafka.coordinator.group.ConsumerGroupMigrationPolicy import org.apache.kafka.coordinator.group.Group.GroupType import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.coordinator.transaction.{TransactionLogConfig, TransactionStateManagerConfig} import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} +import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfigs, QuotaConfig, ReplicationConfigs, ServerConfigs, ServerLogConfigs, ServerTopicConfigSynonyms} import org.apache.kafka.server.log.remote.storage.RemoteLogManagerConfig import org.apache.kafka.server.metrics.MetricConfigs import org.apache.kafka.storage.internals.log.CleanerConfig -import org.apache.logging.log4j.Level import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import org.junit.jupiter.api.function.Executable import scala.jdk.CollectionConverters._ -import scala.util.Using class KafkaConfigTest { @@ -223,7 +221,7 @@ class KafkaConfigTest { // but not duplicate names props.setProperty(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, "HOST://localhost:9091,HOST://localhost:9091") - assertBadConfigContainingMessage(props, "Configuration 'advertised.listeners' values must not be duplicated.") + assertBadConfigContainingMessage(props, "Each listener must have a different name") } @Test @@ -248,8 +246,8 @@ class KafkaConfigTest { assertTrue(caught.getMessage.contains("If you have two listeners on the same port then one needs to be IPv4 and the other IPv6")) props.put(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://127.0.0.1:9092,PLAINTEXT://127.0.0.1:9092") - val exception = assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)) - assertTrue(exception.getMessage.contains("values must not be duplicated.")) + caught = assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)) + assertTrue(caught.getMessage.contains("Each listener must have a different name")) props.put(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://127.0.0.1:9092,SSL://127.0.0.1:9092,SASL_SSL://127.0.0.1:9092") caught = assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)) @@ -301,8 +299,7 @@ class KafkaConfigTest { props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "2") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") - assertBadConfigContainingMessage(props, - "Missing required configuration \"controller.listener.names\" which has no default value.") + assertBadConfigContainingMessage(props, "The listeners config must only contain KRaft controller listeners from controller.listener.names when process.roles=controller") props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") KafkaConfig.fromProps(props) @@ -322,8 +319,7 @@ class KafkaConfigTest { props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") assertFalse(isValidKafkaConfig(props)) - assertBadConfigContainingMessage(props, - "Missing required configuration \"controller.listener.names\" which has no default value.") + assertBadConfigContainingMessage(props, "controller.listener.names must contain at least one value when running KRaft with just the broker role") props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") KafkaConfig.fromProps(props) @@ -346,7 +342,7 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals( - Seq(new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "lb1.example.com", 9000)), + Seq(EndPoint("lb1.example.com", 9000, ListenerName.normalised("CONTROLLER"), SecurityProtocol.PLAINTEXT)), config.effectiveAdvertisedControllerListeners ) } @@ -362,7 +358,7 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals( - Seq(new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "localhost", 9093)), + Seq(EndPoint("localhost", 9093, ListenerName.normalised("CONTROLLER"), SecurityProtocol.PLAINTEXT)), config.effectiveAdvertisedControllerListeners ) } @@ -380,8 +376,8 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals( Seq( - new Endpoint("CONTROLLER", SecurityProtocol.PLAINTEXT, "lb1.example.com", 9000), - new Endpoint("CONTROLLER_NEW", SecurityProtocol.PLAINTEXT, "localhost", 9094) + EndPoint("lb1.example.com", 9000, ListenerName.normalised("CONTROLLER"), SecurityProtocol.PLAINTEXT), + EndPoint("localhost", 9094, ListenerName.normalised("CONTROLLER_NEW"), SecurityProtocol.PLAINTEXT) ), config.effectiveAdvertisedControllerListeners ) @@ -445,7 +441,7 @@ class KafkaConfigTest { props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") val controllerListenerName = new ListenerName("CONTROLLER") - assertEquals(SecurityProtocol.PLAINTEXT, + assertEquals(Some(SecurityProtocol.PLAINTEXT), KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(controllerListenerName)) // ensure we don't map it to PLAINTEXT when there is a SSL or SASL controller listener props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER,SSL") @@ -458,7 +454,7 @@ class KafkaConfigTest { props.remove(SocketServerConfigs.LISTENERS_CONFIG) // ensure we don't map it to PLAINTEXT when it is explicitly mapped otherwise props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "PLAINTEXT:PLAINTEXT,CONTROLLER:SSL") - assertEquals(SecurityProtocol.SSL, + assertEquals(Some(SecurityProtocol.SSL), KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(controllerListenerName)) // ensure we don't map it to PLAINTEXT when anything is explicitly given // (i.e. it is only part of the default value, even with KRaft) @@ -467,7 +463,7 @@ class KafkaConfigTest { // ensure we can map it to a non-PLAINTEXT security protocol by default (i.e. when nothing is given) props.remove(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG) props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") - assertEquals(SecurityProtocol.SSL, + assertEquals(Some(SecurityProtocol.SSL), KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(new ListenerName("SSL"))) } @@ -479,9 +475,9 @@ class KafkaConfigTest { props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER1,CONTROLLER2") props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "1@localhost:9092") - assertEquals(SecurityProtocol.PLAINTEXT, + assertEquals(Some(SecurityProtocol.PLAINTEXT), KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(new ListenerName("CONTROLLER1"))) - assertEquals(SecurityProtocol.PLAINTEXT, + assertEquals(Some(SecurityProtocol.PLAINTEXT), KafkaConfig.fromProps(props).effectiveListenerSecurityProtocolMap.get(new ListenerName("CONTROLLER2"))) } @@ -510,16 +506,16 @@ class KafkaConfigTest { props.setProperty(ReplicationConfigs.INTER_BROKER_LISTENER_NAME_CONFIG, "REPLICATION") val config = KafkaConfig.fromProps(props) val expectedListeners = Seq( - new Endpoint("CLIENT", SecurityProtocol.SSL, "localhost", 9091), - new Endpoint("REPLICATION", SecurityProtocol.SSL, "localhost", 9092), - new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9093)) + EndPoint("localhost", 9091, new ListenerName("CLIENT"), SecurityProtocol.SSL), + EndPoint("localhost", 9092, new ListenerName("REPLICATION"), SecurityProtocol.SSL), + EndPoint("localhost", 9093, new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT)) assertEquals(expectedListeners, config.listeners) assertEquals(expectedListeners, config.effectiveAdvertisedBrokerListeners) - val expectedSecurityProtocolMap = util.Map.of( - new ListenerName("CLIENT"), SecurityProtocol.SSL, - new ListenerName("REPLICATION"), SecurityProtocol.SSL, - new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT, - new ListenerName("CONTROLLER"), SecurityProtocol.PLAINTEXT + val expectedSecurityProtocolMap = Map( + new ListenerName("CLIENT") -> SecurityProtocol.SSL, + new ListenerName("REPLICATION") -> SecurityProtocol.SSL, + new ListenerName("INTERNAL") -> SecurityProtocol.PLAINTEXT, + new ListenerName("CONTROLLER") -> SecurityProtocol.PLAINTEXT ) assertEquals(expectedSecurityProtocolMap, config.effectiveListenerSecurityProtocolMap) } @@ -539,21 +535,21 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) val expectedListeners = Seq( - new Endpoint("EXTERNAL", SecurityProtocol.SSL, "localhost", 9091), - new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "localhost", 9093) + EndPoint("localhost", 9091, new ListenerName("EXTERNAL"), SecurityProtocol.SSL), + EndPoint("localhost", 9093, new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT) ) assertEquals(expectedListeners, config.listeners) val expectedAdvertisedListeners = Seq( - new Endpoint("EXTERNAL", SecurityProtocol.SSL, "lb1.example.com", 9000), - new Endpoint("INTERNAL", SecurityProtocol.PLAINTEXT, "host1", 9093) + EndPoint("lb1.example.com", 9000, new ListenerName("EXTERNAL"), SecurityProtocol.SSL), + EndPoint("host1", 9093, new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT) ) assertEquals(expectedAdvertisedListeners, config.effectiveAdvertisedBrokerListeners) - val expectedSecurityProtocolMap = util.Map.of( - new ListenerName("EXTERNAL"), SecurityProtocol.SSL, - new ListenerName("INTERNAL"), SecurityProtocol.PLAINTEXT, - new ListenerName("CONTROLLER"), SecurityProtocol.PLAINTEXT + val expectedSecurityProtocolMap = Map( + new ListenerName("EXTERNAL") -> SecurityProtocol.SSL, + new ListenerName("INTERNAL") -> SecurityProtocol.PLAINTEXT, + new ListenerName("CONTROLLER") -> SecurityProtocol.PLAINTEXT ) assertEquals(expectedSecurityProtocolMap, config.effectiveListenerSecurityProtocolMap) } @@ -596,21 +592,12 @@ class KafkaConfigTest { props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "plaintext://localhost:9091,SsL://localhost:9092") props.setProperty(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, "PLAINTEXT:PLAINTEXT,SSL:SSL,CONTROLLER:PLAINTEXT") val config = KafkaConfig.fromProps(props) - assertEndpointsEqual(new Endpoint("SSL", SecurityProtocol.SSL, "localhost", 9092), - config.listeners.find(_.listener == "SSL").getOrElse(fail("SSL endpoint not found"))) - assertEndpointsEqual( new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "localhost", 9091), - config.listeners.find(_.listener == "PLAINTEXT").getOrElse(fail("PLAINTEXT endpoint not found"))) + assertEquals(Some("SSL://localhost:9092"), config.listeners.find(_.listenerName.value == "SSL").map(_.connectionString)) + assertEquals(Some("PLAINTEXT://localhost:9091"), config.listeners.find(_.listenerName.value == "PLAINTEXT").map(_.connectionString)) } - private def assertEndpointsEqual(expected: Endpoint, actual: Endpoint): Unit = { - assertEquals(expected.host(), actual.host(), "Host mismatch") - assertEquals(expected.port(), actual.port(), "Port mismatch") - assertEquals(expected.listener(), actual.listener(), "Listener mismatch") - assertEquals(expected.securityProtocol(), actual.securityProtocol(), "Security protocol mismatch") - } - - private def listenerListToEndPoints(listenerList: java.util.List[String], - securityProtocolMap: util.Map[ListenerName, SecurityProtocol] = SocketServerConfigs.DEFAULT_NAME_TO_SECURITY_PROTO) = + private def listenerListToEndPoints(listenerList: String, + securityProtocolMap: collection.Map[ListenerName, SecurityProtocol] = SocketServerConfigs.DEFAULT_NAME_TO_SECURITY_PROTO.asScala) = CoreUtils.listenerListToEndPoints(listenerList, securityProtocolMap) @Test @@ -623,9 +610,9 @@ class KafkaConfigTest { // configuration with no listeners val conf = KafkaConfig.fromProps(props) - assertEquals(listenerListToEndPoints(util.List.of("PLAINTEXT://:9092")), conf.listeners) + assertEquals(listenerListToEndPoints("PLAINTEXT://:9092"), conf.listeners) assertNull(conf.listeners.find(_.securityProtocol == SecurityProtocol.PLAINTEXT).get.host) - assertEquals(conf.effectiveAdvertisedBrokerListeners, listenerListToEndPoints(util.List.of("PLAINTEXT://:9092"))) + assertEquals(conf.effectiveAdvertisedBrokerListeners, listenerListToEndPoints("PLAINTEXT://:9092")) } private def isValidKafkaConfig(props: Properties): Boolean = { @@ -787,7 +774,6 @@ class KafkaConfigTest { KafkaConfig.configNames.foreach { name => name match { - case AbstractConfig.CONFIG_PROVIDERS_CONFIG => // ignore string case ServerConfigs.BROKER_ID_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ServerConfigs.NUM_IO_THREADS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") case ServerConfigs.BACKGROUND_THREADS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") @@ -802,17 +788,13 @@ class KafkaConfigTest { case KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case KRaftConfigs.NODE_ID_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - case MetadataLogConfig.METADATA_LOG_DIR_CONFIG => // ignore string - case MetadataLogConfig.METADATA_LOG_SEGMENT_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - case MetadataLogConfig.METADATA_LOG_SEGMENT_MILLIS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - case MetadataLogConfig.METADATA_MAX_RETENTION_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - case MetadataLogConfig.METADATA_MAX_RETENTION_MILLIS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - case MetadataLogConfig.INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG => // no op - case MetadataLogConfig.INTERNAL_METADATA_MAX_BATCH_SIZE_IN_BYTES_CONFIG => // no op - case MetadataLogConfig.INTERNAL_METADATA_MAX_FETCH_SIZE_IN_BYTES_CONFIG => // no op - case MetadataLogConfig.INTERNAL_METADATA_DELETE_DELAY_MILLIS_CONFIG => // no op + case KRaftConfigs.METADATA_LOG_DIR_CONFIG => // ignore string + case KRaftConfigs.METADATA_LOG_SEGMENT_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") + case KRaftConfigs.METADATA_LOG_SEGMENT_MILLIS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") + case KRaftConfigs.METADATA_MAX_RETENTION_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") + case KRaftConfigs.METADATA_MAX_RETENTION_MILLIS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG => // ignore string - case MetadataLogConfig.METADATA_MAX_IDLE_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") + case KRaftConfigs.METADATA_MAX_IDLE_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ServerConfigs.AUTHORIZER_CLASS_NAME_CONFIG => //ignore string case ServerLogConfigs.CREATE_TOPIC_POLICY_CLASS_NAME_CONFIG => //ignore string @@ -829,8 +811,8 @@ class KafkaConfigTest { case SocketServerConfigs.NUM_NETWORK_THREADS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") case ServerLogConfigs.NUM_PARTITIONS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") - case ServerLogConfigs.LOG_DIRS_CONFIG => assertPropertyInvalid(baseProperties, name, "") - case ServerLogConfigs.LOG_DIR_CONFIG => assertPropertyInvalid(baseProperties, name, "") + case ServerLogConfigs.LOG_DIRS_CONFIG => // ignore string + case ServerLogConfigs.LOG_DIR_CONFIG => // ignore string case ServerLogConfigs.LOG_SEGMENT_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", Records.LOG_OVERHEAD - 1) case ServerLogConfigs.LOG_ROLL_TIME_MILLIS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0") @@ -871,7 +853,7 @@ class KafkaConfigTest { case ReplicationConfigs.REPLICA_FETCH_MIN_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.REPLICA_FETCH_RESPONSE_MAX_BYTES_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.REPLICA_SELECTOR_CLASS_CONFIG => // Ignore string - case ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", "0", "-1") + case ReplicationConfigs.NUM_REPLICA_FETCHERS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.REPLICA_HIGH_WATERMARK_CHECKPOINT_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.FETCH_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") case ReplicationConfigs.PRODUCER_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") @@ -960,33 +942,16 @@ class KafkaConfigTest { case SaslConfigs.SASL_LOGIN_READ_TIMEOUT_MS => case SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MAX_MS => case SaslConfigs.SASL_LOGIN_RETRY_BACKOFF_MS => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_ALGORITHM => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_AUD => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_EXP_SECONDS => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_ISS => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_JTI_INCLUDE => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_NBF_SECONDS => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_CLAIM_SUB => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_FILE => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_FILE => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_PRIVATE_KEY_PASSPHRASE => - case SaslConfigs.SASL_OAUTHBEARER_ASSERTION_TEMPLATE_FILE => - case SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_ID => - case SaslConfigs.SASL_OAUTHBEARER_CLIENT_CREDENTIALS_CLIENT_SECRET => - case SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS => - case SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE => - case SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER => - case SaslConfigs.SASL_OAUTHBEARER_HEADER_URLENCODE => - case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS => - case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS => - case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS => - case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL => - case SaslConfigs.SASL_OAUTHBEARER_JWT_RETRIEVER_CLASS => - case SaslConfigs.SASL_OAUTHBEARER_JWT_VALIDATOR_CLASS => - case SaslConfigs.SASL_OAUTHBEARER_SCOPE => case SaslConfigs.SASL_OAUTHBEARER_SCOPE_CLAIM_NAME => case SaslConfigs.SASL_OAUTHBEARER_SUB_CLAIM_NAME => case SaslConfigs.SASL_OAUTHBEARER_TOKEN_ENDPOINT_URL => + case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_URL => + case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_REFRESH_MS => + case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MAX_MS => + case SaslConfigs.SASL_OAUTHBEARER_JWKS_ENDPOINT_RETRY_BACKOFF_MS => + case SaslConfigs.SASL_OAUTHBEARER_CLOCK_SKEW_SECONDS => + case SaslConfigs.SASL_OAUTHBEARER_EXPECTED_AUDIENCE => + case SaslConfigs.SASL_OAUTHBEARER_EXPECTED_ISSUER => // Security config case SecurityConfig.SECURITY_PROVIDERS_CONFIG => @@ -1024,7 +989,6 @@ class KafkaConfigTest { case RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_LISTENER_NAME_PROP => // ignore string case RemoteLogManagerConfig.REMOTE_LOG_INDEX_FILE_CACHE_TOTAL_SIZE_BYTES_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_FOLLOWER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_COPIER_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1, -2) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_EXPIRATION_THREAD_POOL_SIZE_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1, -2) case RemoteLogManagerConfig.REMOTE_LOG_MANAGER_TASK_INTERVAL_MS_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) @@ -1037,6 +1001,7 @@ class KafkaConfigTest { case RemoteLogManagerConfig.LOG_LOCAL_RETENTION_BYTES_PROP => assertPropertyInvalid(baseProperties, name, "not_a_number", -3) /** New group coordinator configs */ + case GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG => // ignore case GroupCoordinatorConfig.GROUP_COORDINATOR_NUM_THREADS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) /** Consumer groups configs */ @@ -1061,21 +1026,12 @@ class KafkaConfigTest { case ShareGroupConfig.SHARE_GROUP_MIN_RECORD_LOCK_DURATION_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case ShareGroupConfig.SHARE_GROUP_MAX_RECORD_LOCK_DURATION_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case ShareGroupConfig.SHARE_GROUP_PARTITION_MAX_RECORD_LOCKS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) + case ShareGroupConfig.SHARE_GROUP_MAX_GROUPS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case GroupCoordinatorConfig.SHARE_GROUP_MAX_SIZE_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case ShareGroupConfig.SHARE_FETCH_PURGATORY_PURGE_INTERVAL_REQUESTS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - case ShareGroupConfig.SHARE_GROUP_MAX_SHARE_SESSIONS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) case ShareGroupConfig.SHARE_GROUP_PERSISTER_CLASS_NAME_CONFIG => //ignore string + case ShareGroupConfig.SHARE_FETCH_MAX_FETCH_RECORDS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number") - /** Streams groups configs */ - case GroupCoordinatorConfig.STREAMS_GROUP_SESSION_TIMEOUT_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case GroupCoordinatorConfig.STREAMS_GROUP_MIN_SESSION_TIMEOUT_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case GroupCoordinatorConfig.STREAMS_GROUP_MAX_SESSION_TIMEOUT_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case GroupCoordinatorConfig.STREAMS_GROUP_HEARTBEAT_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case GroupCoordinatorConfig.STREAMS_GROUP_MIN_HEARTBEAT_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case GroupCoordinatorConfig.STREAMS_GROUP_MAX_HEARTBEAT_INTERVAL_MS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case GroupCoordinatorConfig.STREAMS_GROUP_MAX_SIZE_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", 0, -1) - case GroupCoordinatorConfig.STREAMS_GROUP_NUM_STANDBY_REPLICAS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", -1) - case GroupCoordinatorConfig.STREAMS_GROUP_MAX_STANDBY_REPLICAS_CONFIG => assertPropertyInvalid(baseProperties, name, "not_a_number", -1) case _ => assertPropertyInvalid(baseProperties, name, "not_a_number", "-1") } @@ -1171,8 +1127,6 @@ class KafkaConfigTest { // topic only config case QuotaConfig.LEADER_REPLICATION_THROTTLED_REPLICAS_CONFIG => // topic only config - case "internal.segment.bytes" => - // topic internal config case prop => fail(prop + " must be explicitly checked for dynamic updatability. Note that LogConfig(s) require that KafkaConfig value lookups are dynamic and not static values.") } @@ -1200,16 +1154,23 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(defaults) assertEquals(1, config.brokerId) - assertEndpointsEqual(new Endpoint("PLAINTEXT", SecurityProtocol.PLAINTEXT, "127.0.0.1", 1122), - config.effectiveAdvertisedBrokerListeners.head) + assertEquals(Seq("PLAINTEXT://127.0.0.1:1122"), config.effectiveAdvertisedBrokerListeners.map(_.connectionString)) assertEquals(Map("127.0.0.1" -> 2, "127.0.0.2" -> 3), config.maxConnectionsPerIpOverrides) - assertEquals(util.List.of("/tmp1", "/tmp2"), config.logDirs) + assertEquals(List("/tmp1", "/tmp2"), config.logDirs) assertEquals(12 * 60L * 1000L * 60, config.logRollTimeMillis) assertEquals(11 * 60L * 1000L * 60, config.logRollTimeJitterMillis) assertEquals(10 * 60L * 1000L * 60, config.logRetentionTimeMillis) assertEquals(123L, config.logFlushIntervalMs) assertEquals(CompressionType.SNAPPY, config.groupCoordinatorConfig.offsetTopicCompressionType) assertEquals(Sensor.RecordingLevel.DEBUG.toString, config.metricRecordingLevel) + assertEquals(false, config.tokenAuthEnabled) + assertEquals(7 * 24 * 60L * 60L * 1000L, config.delegationTokenMaxLifeMs) + assertEquals(24 * 60L * 60L * 1000L, config.delegationTokenExpiryTimeMs) + assertEquals(1 * 60L * 1000L * 60, config.delegationTokenExpiryCheckIntervalMs) + + defaults.setProperty(DelegationTokenManagerConfigs.DELEGATION_TOKEN_SECRET_KEY_CONFIG, "1234567890") + val config1 = KafkaConfig.fromProps(defaults) + assertEquals(true, config1.tokenAuthEnabled) } @Test @@ -1494,18 +1455,6 @@ class KafkaConfigTest { assertEquals(expected, addresses) } - @Test - def testInvalidQuorumAutoJoinForKRaftBroker(): Unit = { - val props = TestUtils.createBrokerConfig(0) - props.setProperty(QuorumConfig.QUORUM_AUTO_JOIN_ENABLE_CONFIG, String.valueOf(true)) - assertEquals( - "requirement failed: controller.quorum.auto.join.enable is only " + - "supported when process.roles contains the 'controller' role.", - assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage - ) - - } - @Test def testAcceptsLargeId(): Unit = { val largeBrokerId = 2000 @@ -1542,7 +1491,7 @@ class KafkaConfigTest { val props = new Properties() props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") - props.setProperty(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, metadataDir) + props.setProperty(KRaftConfigs.METADATA_LOG_DIR_CONFIG, metadataDir) props.setProperty(ServerLogConfigs.LOG_DIR_CONFIG, dataDir) props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "1") props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093") @@ -1550,7 +1499,7 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals(metadataDir, config.metadataLogDir) - assertEquals(util.List.of(dataDir), config.logDirs) + assertEquals(Seq(dataDir), config.logDirs) } @Test @@ -1568,7 +1517,7 @@ class KafkaConfigTest { val config = KafkaConfig.fromProps(props) assertEquals(dataDir1, config.metadataLogDir) - assertEquals(util.List.of(dataDir1, dataDir2), config.logDirs) + assertEquals(Seq(dataDir1, dataDir2), config.logDirs) } @Test @@ -1602,7 +1551,6 @@ class KafkaConfigTest { props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") props.setProperty(ServerConfigs.BROKER_ID_CONFIG, "1") props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "2") - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "CONTROLLER") assertEquals("You must set `node.id` to the same value as `broker.id`.", assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)).getMessage()) } @@ -1723,12 +1671,12 @@ class KafkaConfigTest { val validValue = 100 val props = new Properties() props.putAll(kraftProps()) - props.setProperty(MetadataLogConfig.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, validValue.toString) + props.setProperty(KRaftConfigs.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, validValue.toString) val config = KafkaConfig.fromProps(props) assertEquals(validValue, config.metadataSnapshotMaxIntervalMs) - props.setProperty(MetadataLogConfig.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, "-1") + props.setProperty(KRaftConfigs.METADATA_SNAPSHOT_MAX_INTERVAL_MS_CONFIG, "-1") val errorMessage = assertThrows(classOf[ConfigException], () => KafkaConfig.fromProps(props)).getMessage assertEquals( @@ -1794,15 +1742,15 @@ class KafkaConfigTest { props.put(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer") var config = KafkaConfig.fromProps(props) assertEquals(Set(GroupType.CLASSIC, GroupType.CONSUMER), config.groupCoordinatorRebalanceProtocols) + assertTrue(config.isNewGroupCoordinatorEnabled) + assertFalse(config.shareGroupConfig.isShareGroupEnabled) // This is OK. props.put(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,consumer,share") config = KafkaConfig.fromProps(props) assertEquals(Set(GroupType.CLASSIC, GroupType.CONSUMER, GroupType.SHARE), config.groupCoordinatorRebalanceProtocols) - - props.put(GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, "classic,streams") - val config2 = KafkaConfig.fromProps(props) - assertEquals(Set(GroupType.CLASSIC, GroupType.STREAMS), config2.groupCoordinatorRebalanceProtocols) + assertTrue(config.isNewGroupCoordinatorEnabled) + assertTrue(config.shareGroupConfig.isShareGroupEnabled) } @Test @@ -1897,27 +1845,4 @@ class KafkaConfigTest { props.put(ShareGroupConfig.SHARE_GROUP_RECORD_LOCK_DURATION_MS_CONFIG, "30000") assertDoesNotThrow(() => KafkaConfig.fromProps(props)) } - - @Test - def testLowercaseControllerListenerNames(): Unit = { - val props = createDefaultConfig() - props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "controller") - val message = assertThrows(classOf[IllegalArgumentException], () => KafkaConfig.fromProps(props)).getMessage - assertEquals("requirement failed: controller.listener.names must contain at least one value appearing in the 'listeners' configuration when running the KRaft controller role", message) - } - - @Test - def testLogBrokerHeartbeatIntervalMsShouldBeLowerThanHalfOfBrokerSessionTimeoutMs(): Unit = { - val props = createDefaultConfig() - Using.resource(LogCaptureAppender.createAndRegister) { appender => - appender.setClassLogger(KafkaConfig.getClass, Level.ERROR) - props.setProperty(KRaftConfigs.BROKER_HEARTBEAT_INTERVAL_MS_CONFIG, "4500") - props.setProperty(KRaftConfigs.BROKER_SESSION_TIMEOUT_MS_CONFIG, "8999") - KafkaConfig.fromProps(props) - assertTrue(appender.getMessages.contains("broker.heartbeat.interval.ms (4500 ms) must be less than or equal to half of the broker.session.timeout.ms (8999 ms). " + - "The broker.session.timeout.ms is configured on controller. The broker.heartbeat.interval.ms is configured on broker. " + - "If a broker doesn't send heartbeat request within broker.session.timeout.ms, it loses broker lease. " + - "Please increase broker.session.timeout.ms or decrease broker.heartbeat.interval.ms.")) - } - } } diff --git a/core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala b/core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala index 51c5d192c6d3f..3d4ea198753b8 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaMetricReporterExceptionHandlingTest.scala @@ -24,7 +24,9 @@ import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.server.config.QuotaConfig import org.apache.kafka.server.metrics.MetricConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.net.Socket import java.util.concurrent.atomic.AtomicInteger @@ -61,8 +63,9 @@ class KafkaMetricReporterExceptionHandlingTest extends BaseRequestTest { super.tearDown() } - @Test - def testBothReportersAreInvoked(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testBothReportersAreInvoked(quorum: String): Unit = { val port = anySocketServer.boundPort(ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)) val socket = new Socket("localhost", port) socket.setSoTimeout(10000) diff --git a/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala b/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala index c8692661134ee..686cea80d0bfe 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaMetricsReporterTest.scala @@ -22,9 +22,10 @@ import kafka.utils.{CoreUtils, TestUtils} import org.apache.kafka.common.metrics.{KafkaMetric, MetricsContext, MetricsReporter} import org.apache.kafka.server.config.ServerConfigs import org.apache.kafka.server.metrics.MetricConfigs -import org.apache.kafka.test.{TestUtils => JTestUtils} -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource object KafkaMetricsReporterTest { @@ -76,14 +77,15 @@ class KafkaMetricsReporterTest extends QuorumTestHarness { broker.startup() } - @Test - def testMetricsContextNamespacePresent(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testMetricsContextNamespacePresent(quorum: String): Unit = { assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.CLUSTERID.get()) assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.NODEID.get()) assertNotNull(KafkaMetricsReporterTest.MockMetricsReporter.JMXPREFIX.get()) broker.shutdown() - JTestUtils.assertNoLeakedThreadsWithNameAndDaemonStatus(this.getClass.getName, true) + TestUtils.assertNoNonDaemonThreads(this.getClass.getName) } @AfterEach diff --git a/core/src/test/scala/unit/kafka/server/KafkaRaftServerTest.scala b/core/src/test/scala/unit/kafka/server/KafkaRaftServerTest.scala index aa558dca7f310..7f9d7ad46644c 100644 --- a/core/src/test/scala/unit/kafka/server/KafkaRaftServerTest.scala +++ b/core/src/test/scala/unit/kafka/server/KafkaRaftServerTest.scala @@ -19,15 +19,15 @@ package kafka.server import java.io.File import java.nio.file.Files import java.util.{Optional, Properties} +import kafka.log.UnifiedLog import org.apache.kafka.common.{KafkaException, Uuid} import org.apache.kafka.common.utils.Utils import org.apache.kafka.metadata.bootstrap.{BootstrapDirectory, BootstrapMetadata} import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} -import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} +import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} import org.apache.kafka.server.common.MetadataVersion -import org.apache.kafka.storage.internals.log.UnifiedLog import org.apache.kafka.test.TestUtils import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -159,7 +159,7 @@ class KafkaRaftServerTest { configProperties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") configProperties.put(QuorumConfig.QUORUM_VOTERS_CONFIG, s"${nodeId + 1}@localhost:9092") configProperties.put(KRaftConfigs.NODE_ID_CONFIG, nodeId.toString) - configProperties.put(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, invalidDir.getAbsolutePath) + configProperties.put(KRaftConfigs.METADATA_LOG_DIR_CONFIG, invalidDir.getAbsolutePath) configProperties.put(ServerLogConfigs.LOG_DIR_CONFIG, validDir.getAbsolutePath) configProperties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") val config = KafkaConfig.fromProps(configProperties) @@ -189,7 +189,7 @@ class KafkaRaftServerTest { configProperties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") configProperties.put(KRaftConfigs.NODE_ID_CONFIG, nodeId.toString) configProperties.put(QuorumConfig.QUORUM_VOTERS_CONFIG, s"${nodeId + 1}@localhost:9092") - configProperties.put(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, validDir.getAbsolutePath) + configProperties.put(KRaftConfigs.METADATA_LOG_DIR_CONFIG, validDir.getAbsolutePath) configProperties.put(ServerLogConfigs.LOG_DIR_CONFIG, invalidDir.getAbsolutePath) configProperties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") val config = KafkaConfig.fromProps(configProperties) @@ -225,7 +225,7 @@ class KafkaRaftServerTest { configProperties.put(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker") configProperties.put(KRaftConfigs.NODE_ID_CONFIG, nodeId.toString) configProperties.put(QuorumConfig.QUORUM_VOTERS_CONFIG, s"${nodeId + 1}@localhost:9092") - configProperties.put(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, metadataDir.getAbsolutePath) + configProperties.put(KRaftConfigs.METADATA_LOG_DIR_CONFIG, metadataDir.getAbsolutePath) configProperties.put(ServerLogConfigs.LOG_DIR_CONFIG, dataDir.getAbsolutePath) configProperties.put(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL") val config = KafkaConfig.fromProps(configProperties) diff --git a/core/src/test/scala/unit/kafka/server/LeaveGroupRequestTest.scala b/core/src/test/scala/unit/kafka/server/LeaveGroupRequestTest.scala index 5e074880e7cba..4ff454d1d2de2 100644 --- a/core/src/test/scala/unit/kafka/server/LeaveGroupRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/LeaveGroupRequestTest.scala @@ -29,13 +29,10 @@ import org.junit.jupiter.api.Assertions.assertEquals import scala.jdk.CollectionConverters._ -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) +@ClusterTestDefaults(types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") +)) class LeaveGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { @ClusterTest def testLeaveGroupWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/ListGroupsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ListGroupsRequestTest.scala index 86cf887f2b2d3..3961c725ed445 100644 --- a/core/src/test/scala/unit/kafka/server/ListGroupsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ListGroupsRequestTest.scala @@ -23,28 +23,46 @@ import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.classic.ClassicGroupState import org.apache.kafka.coordinator.group.modern.consumer.ConsumerGroup.ConsumerGroupState import org.apache.kafka.coordinator.group.{Group, GroupCoordinatorConfig} -import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Assertions.{assertEquals, fail} -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( +@ClusterTestDefaults(types = Array(Type.KRAFT)) +class ListGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "1000") + ) + ) + def testListGroupsWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testListGroups(true) + } + + @ClusterTest(serverProperties = Array( new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "1000") - ) -) -class ListGroupsRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testListGroupsWithNewConsumerGroupProtocol(): Unit = { - testListGroups(true) + )) + def testListGroupsWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testListGroups(false) } - @ClusterTest - def testListGroupsWithOldConsumerGroupProtocol(): Unit = { + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "1000") + )) + def testListGroupsWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { testListGroups(false) } private def testListGroups(useNewProtocol: Boolean): Unit = { + if (!isNewGroupCoordinatorEnabled && useNewProtocol) { + fail("Cannot use the new protocol with the old group coordinator.") + } + // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() diff --git a/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala b/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala index 304e63602a3d6..5ba6ef34603a3 100644 --- a/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ListOffsetsRequestTest.scala @@ -21,12 +21,11 @@ import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartit import org.apache.kafka.common.message.ListOffsetsResponseData.ListOffsetsPartitionResponse import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.requests.{ListOffsetsRequest, ListOffsetsResponse} -import org.apache.kafka.common.utils.annotation.ApiKeyVersionsSource import org.apache.kafka.common.{IsolationLevel, TopicPartition} import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.util.{Optional, Properties} import scala.collection.Seq @@ -44,8 +43,9 @@ class ListOffsetsRequestTest extends BaseRequestTest { } } - @Test - def testListOffsetsErrorCodes(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testListOffsetsErrorCodes(quorum: String): Unit = { val targetTimes = List(new ListOffsetsTopic() .setName(topic) .setPartitions(List(new ListOffsetsPartition() @@ -108,8 +108,9 @@ class ListOffsetsRequestTest extends BaseRequestTest { assertResponseError(error, brokerId, request) } - @Test - def testCurrentEpochValidation(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCurrentEpochValidation(quorum: String): Unit = { val topic = "topic" val topicPartition = new TopicPartition(topic, 0) val partitionToLeader = createTopic(numPartitions = 1, replicationFactor = 3) @@ -167,8 +168,9 @@ class ListOffsetsRequestTest extends BaseRequestTest { (partitionData.offset, partitionData.leaderEpoch, partitionData.errorCode()) } - @Test - def testResponseIncludesLeaderEpoch(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testResponseIncludesLeaderEpoch(quorum: String): Unit = { val partitionToLeader = createTopic(numPartitions = 1, replicationFactor = 3) val firstLeaderId = partitionToLeader(partition.partition) @@ -208,38 +210,46 @@ class ListOffsetsRequestTest extends BaseRequestTest { } @ParameterizedTest - @ApiKeyVersionsSource(apiKey = ApiKeys.LIST_OFFSETS) - def testResponseDefaultOffsetAndLeaderEpochForAllVersions(version: Short): Unit = { + @ValueSource(strings = Array("kraft")) + def testResponseDefaultOffsetAndLeaderEpochForAllVersions(quorum: String): Unit = { val partitionToLeader = createTopic(numPartitions = 1, replicationFactor = 3) val firstLeaderId = partitionToLeader(partition.partition) TestUtils.generateAndProduceMessages(brokers, topic, 9) TestUtils.produceMessage(brokers, topic, "test-10", System.currentTimeMillis() + 10L) - if (version >= 1 && version <= 3) { - assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, 0L, version)) - assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version)) - assertEquals((10L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version)) - } else if (version >= 4 && version <= 6) { - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version)) - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version)) - assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version)) - } else if (version == 7) { - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version)) - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version)) - assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version)) - assertEquals((9L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version)) - assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version)) - } else if (version >= 8) { - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version)) - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version)) - assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version)) - assertEquals((9L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version)) - assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version)) + for (version <- ApiKeys.LIST_OFFSETS.oldestVersion to ApiKeys.LIST_OFFSETS.latestVersion) { + if (version == 0) { + assertEquals((-1L, -1), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) + assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) + assertEquals((10L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) + } else if (version >= 1 && version <= 3) { + assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) + assertEquals((0L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) + assertEquals((10L, -1), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) + } else if (version >= 4 && version <= 6) { + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) + assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) + } else if (version == 7) { + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) + assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) + assertEquals((9L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) + assertEquals((-1L, -1, Errors.UNSUPPORTED_VERSION.code()), fetchOffsetAndEpochWithError(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) + } else if (version >= 8) { + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, 0L, version.toShort)) + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_TIMESTAMP, version.toShort)) + assertEquals((10L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.LATEST_TIMESTAMP, version.toShort)) + assertEquals((9L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.MAX_TIMESTAMP, version.toShort)) + assertEquals((0L, 0), fetchOffsetAndEpoch(firstLeaderId, ListOffsetsRequest.EARLIEST_LOCAL_TIMESTAMP, version.toShort)) + } } } diff --git a/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala b/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala index cbc6df7180e6e..aa2e634e9bfaf 100644 --- a/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogDirFailureTest.scala @@ -17,7 +17,6 @@ package kafka.server import java.io.File -import java.util import java.util.Collections import java.util.concurrent.{ExecutionException, TimeUnit} import kafka.api.IntegrationTestHarness @@ -62,15 +61,15 @@ class LogDirFailureTest extends IntegrationTestHarness { ensureConsistentKRaftMetadata() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProduceErrorFromFailureOnLogRoll(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceErrorFromFailureOnLogRoll(quorum: String, groupProtocol: String): Unit = { testProduceErrorsFromLogDirFailureOnLeader(Roll) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testLogDirNotificationTimeout(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testLogDirNotificationTimeout(quorum: String, groupProtocol: String): Unit = { // Disable retries to allow exception to bubble up for validation this.producerConfig.setProperty(ProducerConfig.RETRIES_CONFIG, "0") this.producerConfig.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false") @@ -93,27 +92,27 @@ class LogDirFailureTest extends IntegrationTestHarness { leaderServer.awaitShutdown() } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testIOExceptionDuringLogRoll(groupProtocol: String): Unit = { - testProduceAfterLogDirFailureOnLeader(Roll) + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testIOExceptionDuringLogRoll(quorum: String, groupProtocol: String): Unit = { + testProduceAfterLogDirFailureOnLeader(Roll, quorum) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testProduceErrorFromFailureOnCheckpoint(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testProduceErrorFromFailureOnCheckpoint(quorum: String, groupProtocol: String): Unit = { testProduceErrorsFromLogDirFailureOnLeader(Checkpoint) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testIOExceptionDuringCheckpoint(groupProtocol: String): Unit = { - testProduceAfterLogDirFailureOnLeader(Checkpoint) + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testIOExceptionDuringCheckpoint(quorum: String, groupProtocol: String): Unit = { + testProduceAfterLogDirFailureOnLeader(Checkpoint, quorum) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testReplicaFetcherThreadAfterLogDirFailureOnFollower(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testReplicaFetcherThreadAfterLogDirFailureOnFollower(quorum: String, groupProtocol: String): Unit = { this.producerConfig.setProperty(ProducerConfig.RETRIES_CONFIG, "0") this.producerConfig.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "false") val producer = createProducer() @@ -165,7 +164,7 @@ class LogDirFailureTest extends IntegrationTestHarness { e.getCause.isInstanceOf[NotLeaderOrFollowerException]) } - def testProduceAfterLogDirFailureOnLeader(failureType: LogDirFailureType): Unit = { + def testProduceAfterLogDirFailureOnLeader(failureType: LogDirFailureType, quorum: String): Unit = { val consumer = createConsumer() subscribeAndWaitForAssignment(topic, consumer) @@ -203,10 +202,8 @@ class LogDirFailureTest extends IntegrationTestHarness { // check if the broker has the offline replica hasOfflineDir && brokerWithDirFail.exists(broker => broker.replicaManager.metadataCache - .getTopicMetadata(util.Set.of(topic), broker.config.interBrokerListenerName, false, false) - .stream() - .flatMap(t => t.partitions().stream()) - .anyMatch(p => p.partitionIndex() == 0 && p.offlineReplicas().contains(originalLeaderServerId))) + .getClusterMetadata(broker.clusterId, broker.config.interBrokerListenerName) + .partition(new TopicPartition(topic, 0)).offlineReplicas().map(_.id()).contains(originalLeaderServerId)) }, "Expected to find an offline log dir") } diff --git a/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala b/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala index fae2b32b86bb7..efb057bd1cb3b 100755 --- a/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogOffsetTest.scala @@ -17,6 +17,7 @@ package kafka.server +import kafka.log.UnifiedLog import kafka.utils.TestUtils import org.apache.kafka.common.message.ListOffsetsRequestData.{ListOffsetsPartition, ListOffsetsTopic} import org.apache.kafka.common.message.ListOffsetsResponseData.{ListOffsetsPartitionResponse, ListOffsetsTopicResponse} @@ -24,11 +25,19 @@ import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.FileRecords import org.apache.kafka.common.requests.{FetchRequest, FetchResponse, ListOffsetsRequest, ListOffsetsResponse} import org.apache.kafka.common.{IsolationLevel, TopicPartition} -import org.apache.kafka.storage.internals.log.{LogStartOffsetIncrementReason, OffsetResultHolder, UnifiedLog} +import org.apache.kafka.storage.internals.log.{LogSegment, LogStartOffsetIncrementReason, OffsetResultHolder} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{Test, Timeout} +import org.junit.jupiter.api.Timeout +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource +import org.mockito.Mockito.{mock, when} +import org.mockito.invocation.InvocationOnMock +import org.mockito.stubbing.Answer import java.io.File +import java.util +import java.util.Arrays.asList +import java.util.concurrent.atomic.AtomicInteger import java.util.{Optional, Properties, Random} import scala.collection.mutable import scala.jdk.CollectionConverters._ @@ -45,8 +54,9 @@ class LogOffsetTest extends BaseRequestTest { props.put("log.retention.check.interval.ms", (5 * 1000 * 60).toString) } - @Test - def testGetOffsetsForUnknownTopic(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testGetOffsetsForUnknownTopic(quorum: String): Unit = { val topicPartition = new TopicPartition("foo", 0) val request = ListOffsetsRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED) .setTargetTimes(buildTargetTimes(topicPartition, ListOffsetsRequest.LATEST_TIMESTAMP).asJava).build(1) @@ -55,21 +65,22 @@ class LogOffsetTest extends BaseRequestTest { } @deprecated("ListOffsetsRequest V0", since = "") - @Test - def testGetOffsetsAfterDeleteRecords(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testGetOffsetsAfterDeleteRecords(quorum: String): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) for (_ <- 0 until 20) - log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0) log.flush(false) log.updateHighWatermark(log.logEndOffset) log.maybeIncrementLogStartOffset(3, LogStartOffsetIncrementReason.ClientRecordDeletion) log.deleteOldSegments() - val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Optional.empty).timestampAndOffsetOpt.map(_.offset) + val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP).timestampAndOffsetOpt.map(_.offset) assertEquals(Optional.of(20L), offset) TestUtils.waitUntilTrue(() => isLeaderLocalOnBroker(topic, topicPartition.partition, broker), @@ -80,47 +91,50 @@ class LogOffsetTest extends BaseRequestTest { assertEquals(20L, consumerOffset) } - @Test - def testFetchOffsetByTimestampForMaxTimestampAfterTruncate(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchOffsetByTimestampForMaxTimestampAfterTruncate(quorum: String): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) for (timestamp <- 0 until 20) - log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes(), timestamp = timestamp.toLong), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes(), timestamp = timestamp.toLong), leaderEpoch = 0) log.flush(false) log.updateHighWatermark(log.logEndOffset) - val firstOffset = log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP, Optional.empty).timestampAndOffsetOpt + val firstOffset = log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP).timestampAndOffsetOpt assertEquals(19L, firstOffset.get.offset) assertEquals(19L, firstOffset.get.timestamp) log.truncateTo(0) - assertEquals(Optional.empty, log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP, Optional.empty).timestampAndOffsetOpt) + assertEquals(Optional.empty, log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP).timestampAndOffsetOpt) } - @Test - def testFetchOffsetByTimestampForMaxTimestampWithUnorderedTimestamps(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchOffsetByTimestampForMaxTimestampWithUnorderedTimestamps(quorum: String): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) for (timestamp <- List(0L, 1L, 2L, 3L, 4L, 6L, 5L)) - log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes(), timestamp = timestamp), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes(), timestamp = timestamp), leaderEpoch = 0) log.flush(false) log.updateHighWatermark(log.logEndOffset) - val maxTimestampOffset = log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP, Optional.empty).timestampAndOffsetOpt + val maxTimestampOffset = log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP).timestampAndOffsetOpt assertEquals(7L, log.logEndOffset) assertEquals(5L, maxTimestampOffset.get.offset) assertEquals(6L, maxTimestampOffset.get.timestamp) } - @Test - def testGetOffsetsBeforeLatestTime(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testGetOffsetsBeforeLatestTime(quorum: String): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) @@ -130,10 +144,10 @@ class LogOffsetTest extends BaseRequestTest { val topicId = topicIds.get(topic) for (_ <- 0 until 20) - log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0) log.flush(false) - val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP, Optional.empty).timestampAndOffsetOpt.map(_.offset) + val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.LATEST_TIMESTAMP).timestampAndOffsetOpt.map(_.offset) assertEquals(Optional.of(20L), offset) TestUtils.waitUntilTrue(() => isLeaderLocalOnBroker(topic, 0, broker), @@ -151,8 +165,9 @@ class LogOffsetTest extends BaseRequestTest { assertFalse(FetchResponse.recordsOrFail(fetchResponse.responseData(topicNames, ApiKeys.FETCH.latestVersion).get(topicPartition)).batches.iterator.hasNext) } - @Test - def testEmptyLogsGetOffsets(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testEmptyLogsGetOffsets(quorum: String): Unit = { val random = new Random val topic = "kafka-" val topicPartition = new TopicPartition(topic, random.nextInt(10)) @@ -174,8 +189,9 @@ class LogOffsetTest extends BaseRequestTest { assertFalse(offsetChanged) } - @Test - def testFetchOffsetByTimestampForMaxTimestampWithEmptyLog(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchOffsetByTimestampForMaxTimestampWithEmptyLog(quorum: String): Unit = { val topic = "kafka-" val topicPartition = new TopicPartition(topic, 0) val log = createTopicAndGetLog(topic, topicPartition) @@ -183,11 +199,12 @@ class LogOffsetTest extends BaseRequestTest { log.updateHighWatermark(log.logEndOffset) assertEquals(0L, log.logEndOffset) - assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP, Optional.empty)) + assertEquals(new OffsetResultHolder(Optional.empty[FileRecords.TimestampAndOffset]()), log.fetchOffsetByTimestamp(ListOffsetsRequest.MAX_TIMESTAMP)) } - @Test - def testGetOffsetsBeforeEarliestTime(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testGetOffsetsBeforeEarliestTime(quorum: String): Unit = { val random = new Random val topic = "kafka-" val topicPartition = new TopicPartition(topic, random.nextInt(3)) @@ -195,12 +212,12 @@ class LogOffsetTest extends BaseRequestTest { createTopic(topic, 3) val logManager = broker.logManager - val log = logManager.getOrCreateLog(topicPartition, topicId = Optional.empty) + val log = logManager.getOrCreateLog(topicPartition, topicId = None) for (_ <- 0 until 20) - log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), 0) + log.appendAsLeader(TestUtils.singletonRecords(value = Integer.toString(42).getBytes()), leaderEpoch = 0) log.flush(false) - val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP, Optional.empty).timestampAndOffsetOpt.map(_.offset) + val offset = log.fetchOffsetByTimestamp(ListOffsetsRequest.EARLIEST_TIMESTAMP).timestampAndOffsetOpt.map(_.offset) assertEquals(Optional.of(0L), offset) TestUtils.waitUntilTrue(() => isLeaderLocalOnBroker(topic, topicPartition.partition, broker), @@ -211,6 +228,38 @@ class LogOffsetTest extends BaseRequestTest { assertEquals(0L, offsetFromResponse) } + /* We test that `fetchOffsetsBefore` works correctly if `LogSegment.size` changes after each invocation (simulating + * a race condition) */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchOffsetsBeforeWithChangingSegmentSize(quorum: String): Unit = { + val log: UnifiedLog = mock(classOf[UnifiedLog]) + val logSegment: LogSegment = mock(classOf[LogSegment]) + when(logSegment.size).thenAnswer(new Answer[Int] { + private[this] val value = new AtomicInteger(0) + override def answer(invocation: InvocationOnMock): Int = value.getAndIncrement() + }) + val logSegments = Seq(logSegment).asJava + when(log.logSegments).thenReturn(logSegments) + log.legacyFetchOffsetsBefore(System.currentTimeMillis, 100) + } + + /* We test that `fetchOffsetsBefore` works correctly if `Log.logSegments` content and size are + * different (simulating a race condition) */ + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testFetchOffsetsBeforeWithChangingSegments(quorum: String): Unit = { + val log: UnifiedLog = mock(classOf[UnifiedLog]) + val logSegment: LogSegment = mock(classOf[LogSegment]) + when(log.logSegments).thenReturn( + new util.AbstractCollection[LogSegment] { + override def size = 2 + override def iterator = asList(logSegment).iterator + } + ) + log.legacyFetchOffsetsBefore(System.currentTimeMillis, 100) + } + private def broker: KafkaBroker = brokers.head private def sendListOffsetsRequest(request: ListOffsetsRequest): ListOffsetsResponse = { diff --git a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala index 5db1e0873a3d2..f9970d2967afa 100755 --- a/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala +++ b/core/src/test/scala/unit/kafka/server/LogRecoveryTest.scala @@ -27,7 +27,9 @@ import org.apache.kafka.common.serialization.{IntegerSerializer, StringSerialize import org.apache.kafka.server.config.ReplicationConfigs import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.io.File import java.util.Properties @@ -60,20 +62,10 @@ class LogRecoveryTest extends QuorumTestHarness { var admin: Admin = _ var producer: KafkaProducer[Integer, String] = _ - def hwFile1 = new OffsetCheckpointFile(new File(configProps1.logDirs.get(0), ReplicaManager.HighWatermarkFilename), null) - def hwFile2 = new OffsetCheckpointFile(new File(configProps2.logDirs.get(0), ReplicaManager.HighWatermarkFilename), null) + def hwFile1 = new OffsetCheckpointFile(new File(configProps1.logDirs.head, ReplicaManager.HighWatermarkFilename), null) + def hwFile2 = new OffsetCheckpointFile(new File(configProps2.logDirs.head, ReplicaManager.HighWatermarkFilename), null) var servers = Seq.empty[KafkaBroker] - // testHWCheckpointWithFailuresMultipleLogSegments simulates broker failures that can leave the only available replica out of the - // ISR. By enabling unclean leader election, we ensure that the test can proceed and elect - // the out-of-sync replica as the new leader, which is necessary to validate the log - // recovery and high-watermark checkpointing logic under these specific failure conditions. - override def kraftControllerConfigs(testInfo: TestInfo): Seq[Properties] = { - val properties = new Properties() - properties.put(ReplicationConfigs.UNCLEAN_LEADER_ELECTION_ENABLE_CONFIG, "true") - Seq(properties) - } - // Some tests restart the brokers then produce more data. But since test brokers use random ports, we need // to use a new producer that knows the new ports def updateProducer(): Unit = { @@ -112,8 +104,9 @@ class LogRecoveryTest extends QuorumTestHarness { super.tearDown() } - @Test - def testHWCheckpointNoFailuresSingleLogSegment(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testHWCheckpointNoFailuresSingleLogSegment(quorum: String): Unit = { val numMessages = 2L sendMessages(numMessages.toInt) @@ -129,8 +122,9 @@ class LogRecoveryTest extends QuorumTestHarness { assertEquals(numMessages, followerHW) } - @Test - def testHWCheckpointWithFailuresSingleLogSegment(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testHWCheckpointWithFailuresSingleLogSegment(quorum: String): Unit = { var leader = getLeaderIdForPartition(servers, topicPartition) assertEquals(0L, hwFile1.read().getOrDefault(topicPartition, 0L)) @@ -189,8 +183,9 @@ class LogRecoveryTest extends QuorumTestHarness { assertEquals(hw, hwFile2.read().getOrDefault(topicPartition, 0L)) } - @Test - def testHWCheckpointNoFailuresMultipleLogSegments(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testHWCheckpointNoFailuresMultipleLogSegments(quorum: String): Unit = { sendMessages(20) val hw = 20L // give some time for follower 1 to record leader HW of 600 @@ -205,8 +200,9 @@ class LogRecoveryTest extends QuorumTestHarness { assertEquals(hw, followerHW) } - @Test - def testHWCheckpointWithFailuresMultipleLogSegments(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testHWCheckpointWithFailuresMultipleLogSegments(quorum: String): Unit = { var leader = getLeaderIdForPartition(servers, topicPartition) sendMessages(2) @@ -225,7 +221,7 @@ class LogRecoveryTest extends QuorumTestHarness { server2.startup() updateProducer() // check if leader moves to the other server - leader = awaitLeaderChange(servers, topicPartition, oldLeaderOpt = Some(leader), timeout = 30000L) + leader = awaitLeaderChange(servers, topicPartition, oldLeaderOpt = Some(leader)) assertEquals(1, leader, "Leader must move to broker 1") assertEquals(hw, hwFile1.read().getOrDefault(topicPartition, 0L)) diff --git a/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala b/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala index 78a857a202f59..285a5dded31c6 100644 --- a/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala +++ b/core/src/test/scala/unit/kafka/server/MetadataCacheTest.scala @@ -26,7 +26,7 @@ import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.{DirectoryId, TopicPartition, Uuid} import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataProvenance} -import org.apache.kafka.metadata.{LeaderRecoveryState, MetadataCache} +import org.apache.kafka.metadata.LeaderRecoveryState import org.apache.kafka.server.common.KRaftVersion import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -43,7 +43,7 @@ import scala.jdk.CollectionConverters._ object MetadataCacheTest { def cacheProvider(): util.stream.Stream[MetadataCache] = util.stream.Stream.of[MetadataCache]( - new KRaftMetadataCache(1, () => KRaftVersion.KRAFT_VERSION_0) + MetadataCache.kRaftMetadataCache(1, () => KRaftVersion.KRAFT_VERSION_0) ) def updateCache(cache: MetadataCache, records: Seq[ApiMessage]): Unit = { @@ -77,7 +77,7 @@ class MetadataCacheTest { @MethodSource(Array("cacheProvider")) def getTopicMetadataNonExistingTopics(cache: MetadataCache): Unit = { val topic = "topic" - val topicMetadata = cache.getTopicMetadata(util.Set.of(topic), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT), false, false) + val topicMetadata = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)) assertTrue(topicMetadata.isEmpty) } @@ -145,7 +145,7 @@ class MetadataCacheTest { val listenerName = ListenerName.forSecurityProtocol(securityProtocol) def checkTopicMetadata(topic: String): Unit = { - val topicMetadatas = cache.getTopicMetadata(util.Set.of(topic), listenerName, false, false).asScala + val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName) assertEquals(1, topicMetadatas.size) val topicMetadata = topicMetadatas.head @@ -265,7 +265,7 @@ class MetadataCacheTest { .setReplicas(asList(0))) MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) - val topicMetadatas = cache.getTopicMetadata(util.Set.of(topic), listenerName, false, errorUnavailableListeners).asScala + val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableListeners = errorUnavailableListeners) assertEquals(1, topicMetadatas.size) val topicMetadata = topicMetadatas.head @@ -323,7 +323,7 @@ class MetadataCacheTest { MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) // Validate errorUnavailableEndpoints = false - val topicMetadatas = cache.getTopicMetadata(util.Set.of(topic), listenerName, false, false).asScala + val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false) assertEquals(1, topicMetadatas.size) val topicMetadata = topicMetadatas.head @@ -339,7 +339,7 @@ class MetadataCacheTest { assertEquals(Set(0), partitionMetadata.isrNodes.asScala.toSet) // Validate errorUnavailableEndpoints = true - val topicMetadatasWithError = cache.getTopicMetadata(util.Set.of(topic), listenerName, true, false).asScala + val topicMetadatasWithError = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = true) assertEquals(1, topicMetadatasWithError.size) val topicMetadataWithError = topicMetadatasWithError.head @@ -397,7 +397,7 @@ class MetadataCacheTest { MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) // Validate errorUnavailableEndpoints = false - val topicMetadatas = cache.getTopicMetadata(util.Set.of(topic), listenerName, false, false).asScala + val topicMetadatas = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = false) assertEquals(1, topicMetadatas.size) val topicMetadata = topicMetadatas.head @@ -413,7 +413,7 @@ class MetadataCacheTest { assertEquals(Set(0, 1), partitionMetadata.isrNodes.asScala.toSet) // Validate errorUnavailableEndpoints = true - val topicMetadatasWithError = cache.getTopicMetadata(util.Set.of(topic), listenerName, true, false).asScala + val topicMetadatasWithError = cache.getTopicMetadata(Set(topic), listenerName, errorUnavailableEndpoints = true) assertEquals(1, topicMetadatasWithError.size) val topicMetadataWithError = topicMetadatasWithError.head @@ -461,7 +461,7 @@ class MetadataCacheTest { .setReplicas(replicas)) MetadataCacheTest.updateCache(cache, Seq(brokers, topicRecord) ++ partitionStates) - val topicMetadata = cache.getTopicMetadata(util.Set.of(topic), ListenerName.forSecurityProtocol(SecurityProtocol.SSL), false, false).asScala + val topicMetadata = cache.getTopicMetadata(Set(topic), ListenerName.forSecurityProtocol(SecurityProtocol.SSL)) assertEquals(1, topicMetadata.size) assertEquals(1, topicMetadata.head.partitions.size) assertEquals(RecordBatch.NO_PARTITION_LEADER_EPOCH, topicMetadata.head.partitions.get(0).leaderId) @@ -506,11 +506,10 @@ class MetadataCacheTest { val initialBrokerIds = (0 to 2) updateCache(initialBrokerIds) + val aliveBrokersFromCache = cache.getAliveBrokers() // This should not change `aliveBrokersFromCache` updateCache((0 to 3)) - initialBrokerIds.foreach { brokerId => - assertTrue(cache.hasAliveBroker(brokerId)) - } + assertEquals(initialBrokerIds.toSet, aliveBrokersFromCache.map(_.id).toSet) } @ParameterizedTest @@ -563,7 +562,7 @@ class MetadataCacheTest { (0 until numPartitions).foreach { partitionId => val tp = new TopicPartition(topic, partitionId) - val brokerIdToNodeMap = cache.getPartitionReplicaEndpoints(tp, listenerName).asScala + val brokerIdToNodeMap = cache.getPartitionReplicaEndpoints(tp, listenerName) val replicaSet = brokerIdToNodeMap.keySet val expectedReplicaSet = partitionRecords(partitionId).replicas().asScala.toSet // Verify that we have endpoints for exactly the non-fenced brokers of the replica set @@ -610,7 +609,7 @@ class MetadataCacheTest { @Test def testIsBrokerFenced(): Unit = { - val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) + val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) val delta = new MetadataDelta.Builder().build() delta.replay(new RegisterBrokerRecord() @@ -630,9 +629,45 @@ class MetadataCacheTest { assertTrue(metadataCache.isBrokerFenced(0)) } + @Test + def testGetAliveBrokersWithBrokerFenced(): Unit = { + val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) + val listenerName = "listener" + val endpoints = new BrokerEndpointCollection() + endpoints.add(new BrokerEndpoint(). + setName(listenerName). + setHost("foo"). + setPort(123). + setSecurityProtocol(0)) + val delta = new MetadataDelta.Builder().build() + delta.replay(new RegisterBrokerRecord() + .setBrokerId(0) + .setFenced(false) + .setEndPoints(endpoints)) + delta.replay(new RegisterBrokerRecord() + .setBrokerId(1) + .setFenced(false) + .setEndPoints(endpoints)) + delta.replay(new BrokerRegistrationChangeRecord() + .setBrokerId(1) + .setFenced(1.toByte)) + + val metadataImage = delta.apply(MetadataProvenance.EMPTY) + + metadataCache.setImage(metadataImage) + assertFalse(metadataCache.isBrokerFenced(0)) + assertTrue(metadataCache.isBrokerFenced(1)) + + val aliveBrokers = metadataCache.getAliveBrokers().map(_.id).toSet + metadataImage.cluster().brokers().forEach { (brokerId, registration) => + assertEquals(!registration.fenced(), aliveBrokers.contains(brokerId)) + assertEquals(aliveBrokers.contains(brokerId), metadataCache.getAliveBrokerNode(brokerId, new ListenerName(listenerName)).isDefined) + } + } + @Test def testIsBrokerInControlledShutdown(): Unit = { - val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) + val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) val delta = new MetadataDelta.Builder().build() delta.replay(new RegisterBrokerRecord() @@ -654,7 +689,7 @@ class MetadataCacheTest { @Test def testGetLiveBrokerEpoch(): Unit = { - val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) + val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) val delta = new MetadataDelta.Builder().build() delta.replay(new RegisterBrokerRecord() @@ -669,13 +704,13 @@ class MetadataCacheTest { metadataCache.setImage(delta.apply(MetadataProvenance.EMPTY)) - assertEquals(100L, metadataCache.getAliveBrokerEpoch(0).orElse(-1L)) - assertEquals(-1L, metadataCache.getAliveBrokerEpoch(1).orElse(-1L)) + assertEquals(100L, metadataCache.getAliveBrokerEpoch(0).getOrElse(-1L)) + assertEquals(-1L, metadataCache.getAliveBrokerEpoch(1).getOrElse(-1L)) } @Test - def testDescribeTopicResponse(): Unit = { - val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) + def testGetTopicMetadataForDescribeTopicPartitionsResponse(): Unit = { + val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_0) val securityProtocol = SecurityProtocol.PLAINTEXT val listenerName = ListenerName.forSecurityProtocol(securityProtocol) @@ -775,7 +810,7 @@ class MetadataCacheTest { } // Basic test - var result = metadataCache.describeTopicResponse(util.List.of(topic0, topic1).iterator, listenerName, _ => 0, 10, false).topics().asScala.toList + var result = metadataCache.getTopicMetadataForDescribeTopicResponse(Seq(topic0, topic1).iterator, listenerName, _ => 0, 10, false).topics().asScala.toList assertEquals(2, result.size) var resultTopic = result(0) assertEquals(topic0, resultTopic.name()) @@ -792,7 +827,7 @@ class MetadataCacheTest { checkTopicMetadata(topic1, Set(0), resultTopic.partitions().asScala) // Quota reached - var response = metadataCache.describeTopicResponse(util.List.of(topic0, topic1).iterator, listenerName, _ => 0, 2, false) + var response = metadataCache.getTopicMetadataForDescribeTopicResponse(Seq(topic0, topic1).iterator, listenerName, _ => 0, 2, false) result = response.topics().asScala.toList assertEquals(1, result.size) resultTopic = result(0) @@ -805,7 +840,7 @@ class MetadataCacheTest { assertEquals(2, response.nextCursor().partitionIndex()) // With start index - result = metadataCache.describeTopicResponse(util.List.of(topic0).iterator, listenerName, t => if (t.equals(topic0)) 1 else 0, 10, false).topics().asScala.toList + result = metadataCache.getTopicMetadataForDescribeTopicResponse(Seq(topic0).iterator, listenerName, t => if (t.equals(topic0)) 1 else 0, 10, false).topics().asScala.toList assertEquals(1, result.size) resultTopic = result(0) assertEquals(topic0, resultTopic.name()) @@ -815,7 +850,7 @@ class MetadataCacheTest { checkTopicMetadata(topic0, Set(1, 2), resultTopic.partitions().asScala) // With start index and quota reached - response = metadataCache.describeTopicResponse(util.List.of(topic0, topic1).iterator, listenerName, t => if (t.equals(topic0)) 2 else 0, 1, false) + response = metadataCache.getTopicMetadataForDescribeTopicResponse(Seq(topic0, topic1).iterator, listenerName, t => if (t.equals(topic0)) 2 else 0, 1, false) result = response.topics().asScala.toList assertEquals(1, result.size) @@ -829,7 +864,7 @@ class MetadataCacheTest { assertEquals(0, response.nextCursor().partitionIndex()) // When the first topic does not exist - result = metadataCache.describeTopicResponse(util.List.of("Non-exist", topic0).iterator, listenerName, t => if (t.equals("Non-exist")) 1 else 0, 1, false).topics().asScala.toList + result = metadataCache.getTopicMetadataForDescribeTopicResponse(Seq("Non-exist", topic0).iterator, listenerName, t => if (t.equals("Non-exist")) 1 else 0, 1, false).topics().asScala.toList assertEquals(2, result.size) resultTopic = result(0) assertEquals("Non-exist", resultTopic.name()) @@ -881,11 +916,11 @@ class MetadataCacheTest { MetadataCacheTest.updateCache(cache, brokers ++ topicRecords ++ partitionStates) val leaderAndIsr = cache.getLeaderAndIsr(topic, partitionIndex) - assertEquals(util.Optional.of(leader), leaderAndIsr.map(_.leader())) - assertEquals(util.Optional.of(leaderEpoch), leaderAndIsr.map(_.leaderEpoch())) - assertEquals(util.Optional.of(isr), leaderAndIsr.map(_.isr())) - assertEquals(util.Optional.of(-1), leaderAndIsr.map(_.partitionEpoch())) - assertEquals(util.Optional.of(LeaderRecoveryState.RECOVERED), leaderAndIsr.map(_.leaderRecoveryState())) + assertEquals(Some(leader), leaderAndIsr.map(_.leader())) + assertEquals(Some(leaderEpoch), leaderAndIsr.map(_.leaderEpoch())) + assertEquals(Some(isr), leaderAndIsr.map(_.isr())) + assertEquals(Some(-1), leaderAndIsr.map(_.partitionEpoch())) + assertEquals(Some(LeaderRecoveryState.RECOVERED), leaderAndIsr.map(_.leaderRecoveryState())) } @Test @@ -907,9 +942,9 @@ class MetadataCacheTest { new PartitionRecord().setTopicId(topicId).setPartitionId(partition.id). setReplicas(partition.replicas).setDirectories(partition.dirs). setLeader(partition.replicas.get(0)).setIsr(partition.replicas))) - val cache = new KRaftMetadataCache(1, () => KRaftVersion.KRAFT_VERSION_0) + val cache = MetadataCache.kRaftMetadataCache(1, () => KRaftVersion.KRAFT_VERSION_0) cache.setImage(delta.apply(MetadataProvenance.EMPTY)) - val topicMetadata = cache.getTopicMetadata(util.Set.of("foo"), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)).asScala.head + val topicMetadata = cache.getTopicMetadata(Set("foo"), ListenerName.forSecurityProtocol(SecurityProtocol.PLAINTEXT)).head topicMetadata.partitions().asScala.map(p => (p.partitionIndex(), p.offlineReplicas())).toMap } diff --git a/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala b/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala index 181fd2f644c66..2b2250ff95de4 100644 --- a/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/MetadataRequestTest.scala @@ -27,7 +27,9 @@ import org.apache.kafka.common.requests.{MetadataRequest, MetadataResponse} import org.apache.kafka.metadata.BrokerState import org.apache.kafka.test.TestUtils.isValidClusterId import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.collection.Seq import scala.jdk.CollectionConverters._ @@ -39,21 +41,24 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { doSetup(testInfo, createOffsetsTopic = false) } - @Test - def testClusterIdWithRequestVersion1(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testClusterIdWithRequestVersion1(quorum: String): Unit = { val v1MetadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(1.toShort)) val v1ClusterId = v1MetadataResponse.clusterId assertNull(v1ClusterId, s"v1 clusterId should be null") } - @Test - def testClusterIdIsValid(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testClusterIdIsValid(quorum: String): Unit = { val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(4.toShort)) isValidClusterId(metadataResponse.clusterId) } - @Test - def testRack(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testRack(quorum: String): Unit = { val metadataResponse = sendMetadataRequest(MetadataRequest.Builder.allTopics.build(4.toShort)) // Validate rack matches what's set in generateConfigs() above metadataResponse.brokers.forEach { broker => @@ -61,8 +66,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { } } - @Test - def testIsInternal(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIsInternal(quorum: String): Unit = { val internalTopic = Topic.GROUP_METADATA_TOPIC_NAME val notInternalTopic = "notInternal" // create the topics @@ -82,8 +88,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(Set(internalTopic).asJava, metadataResponse.buildCluster().internalTopics) } - @Test - def testNoTopicsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testNoTopicsRequest(quorum: String): Unit = { // create some topics createTopic("t1", 3, 2) createTopic("t2", 3, 2) @@ -93,8 +100,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertTrue(metadataResponse.topicMetadata.isEmpty, "Response should have no topics") } - @Test - def testAutoTopicCreation(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAutoTopicCreation(quorum: String): Unit = { val topic1 = "t1" val topic2 = "t2" val topic3 = "t3" @@ -120,8 +128,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, response3.errors.get(topic5)) } - @Test - def testAutoCreateTopicWithInvalidReplicationFactor(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAutoCreateTopicWithInvalidReplicationFactor(quorum: String): Unit = { // Shutdown all but one broker so that the number of brokers is less than the default replication factor brokers.tail.foreach(_.shutdown()) brokers.tail.foreach(_.awaitShutdown()) @@ -135,8 +144,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(0, topicMetadata.partitionMetadata.size) } - @Test - def testAllTopicsRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAllTopicsRequest(quorum: String): Unit = { // create some topics createTopic("t1", 3, 2) createTopic("t2", 3, 2) @@ -152,8 +162,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(2, metadataResponseV1.topicMetadata.size(), "V1 Response should have 2 (all) topics") } - @Test - def testTopicIdsInResponse(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testTopicIdsInResponse(quorum: String): Unit = { val replicaAssignment = Map(0 -> Seq(1, 2, 0), 1 -> Seq(2, 0, 1)) val topic1 = "topic1" val topic2 = "topic2" @@ -181,8 +192,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { /** * Preferred replica should be the first item in the replicas list */ - @Test - def testPreferredReplica(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testPreferredReplica(quorum: String): Unit = { val replicaAssignment = Map(0 -> Seq(1, 2, 0), 1 -> Seq(2, 0, 1)) createTopicWithAssignment("t1", replicaAssignment) // Test metadata on two different brokers to ensure that metadata propagation works correctly @@ -204,8 +216,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { } } - @Test - def testReplicaDownResponse(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testReplicaDownResponse(quorum: String): Unit = { val replicaDownTopic = "replicaDown" val replicaCount = 3 @@ -249,8 +262,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { assertEquals(replicaCount, v1PartitionMetadata.replicaIds.size, s"Response should have $replicaCount replicas") } - @Test - def testIsrAfterBrokerShutDownAndJoinsBack(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testIsrAfterBrokerShutDownAndJoinsBack(quorum: String): Unit = { def checkIsr[B <: KafkaBroker]( brokers: Seq[B], topic: String @@ -286,8 +300,9 @@ class MetadataRequestTest extends AbstractMetadataRequestTest { checkIsr(brokers, topic) } - @Test - def testAliveBrokersWithNoTopics(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testAliveBrokersWithNoTopics(quorum: String): Unit = { def checkMetadata[B <: KafkaBroker]( brokers: Seq[B], expectedBrokersCount: Int diff --git a/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala b/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala index 5165debe66cd7..2754685b8f443 100644 --- a/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala +++ b/core/src/test/scala/unit/kafka/server/MockFetcherThread.scala @@ -22,16 +22,14 @@ import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.UNDEFINED_ import org.apache.kafka.common.requests.FetchResponse import org.apache.kafka.common.utils.Time import org.apache.kafka.server.common.OffsetAndEpoch -import org.apache.kafka.server.ReplicaState import org.apache.kafka.common.TopicPartition import org.apache.kafka.storage.internals.log.LogAppendInfo import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ -import java.util.Optional +import java.util.OptionalInt import scala.collection.{Map, Set, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOption class MockFetcherThread(val mockLeader: MockLeaderEndPoint, val mockTierStateMachine: MockTierStateMachine, @@ -48,7 +46,7 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, brokerTopicStats = new BrokerTopicStats) { private val replicaPartitionStates = mutable.Map[TopicPartition, PartitionState]() - private var latestEpochDefault: Optional[Integer] = Optional.of(0) + private var latestEpochDefault: Option[Int] = Some(0) mockTierStateMachine.setFetcher(this) @@ -62,18 +60,15 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, } def addPartitions(initialFetchStates: Map[TopicPartition, InitialFetchState], forceTruncation: Boolean): Set[TopicPartition] = { - latestEpochDefault = if (forceTruncation) Optional.empty else Optional.of(0) + latestEpochDefault = if (forceTruncation) None else Some(0) val partitions = super.addPartitions(initialFetchStates) - latestEpochDefault = Optional.of(0) + latestEpochDefault = Some(0) partitions } - override def processPartitionData( - topicPartition: TopicPartition, - fetchOffset: Long, - leaderEpochForReplica: Int, - partitionData: FetchData - ): Option[LogAppendInfo] = { + override def processPartitionData(topicPartition: TopicPartition, + fetchOffset: Long, + partitionData: FetchData): Option[LogAppendInfo] = { val state = replicaPartitionState(topicPartition) if (leader.isTruncationOnFetchSupported && FetchResponse.isDivergingEpoch(partitionData)) { @@ -91,25 +86,18 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, var maxTimestamp = RecordBatch.NO_TIMESTAMP var shallowOffsetOfMaxTimestamp = -1L var lastOffset = state.logEndOffset - var lastEpoch: Optional[Integer] = Optional.empty() - var skipRemainingBatches = false + var lastEpoch: OptionalInt = OptionalInt.empty() for (batch <- batches) { batch.ensureValid() - - skipRemainingBatches = skipRemainingBatches || hasHigherPartitionLeaderEpoch(batch, leaderEpochForReplica) - if (skipRemainingBatches) { - info(s"Skipping batch $batch because leader epoch is $leaderEpochForReplica") - } else { - if (batch.maxTimestamp > maxTimestamp) { - maxTimestamp = batch.maxTimestamp - shallowOffsetOfMaxTimestamp = batch.baseOffset - } - state.log.append(batch) - state.logEndOffset = batch.nextOffset - lastOffset = batch.lastOffset - lastEpoch = Optional.of(batch.partitionLeaderEpoch) + if (batch.maxTimestamp > maxTimestamp) { + maxTimestamp = batch.maxTimestamp + shallowOffsetOfMaxTimestamp = batch.baseOffset } + state.log.append(batch) + state.logEndOffset = batch.nextOffset + lastOffset = batch.lastOffset + lastEpoch = OptionalInt.of(batch.partitionLeaderEpoch) } state.logStartOffset = partitionData.logStartOffset @@ -119,6 +107,7 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, lastOffset, lastEpoch, maxTimestamp, + shallowOffsetOfMaxTimestamp, Time.SYSTEM.milliseconds(), state.logStartOffset, RecordValidationStats.EMPTY, @@ -127,11 +116,6 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, batches.headOption.map(_.lastOffset).getOrElse(-1))) } - private def hasHigherPartitionLeaderEpoch(batch: RecordBatch, leaderEpoch: Int): Boolean = { - batch.partitionLeaderEpoch() != RecordBatch.NO_PARTITION_LEADER_EPOCH && - batch.partitionLeaderEpoch() > leaderEpoch - } - override def truncate(topicPartition: TopicPartition, truncationState: OffsetTruncationState): Unit = { val state = replicaPartitionState(topicPartition) state.log = state.log.takeWhile { batch => @@ -153,34 +137,30 @@ class MockFetcherThread(val mockLeader: MockLeaderEndPoint, state.highWatermark = offset } - override def latestEpoch(topicPartition: TopicPartition): Optional[Integer] = { + override def latestEpoch(topicPartition: TopicPartition): Option[Int] = { val state = replicaPartitionState(topicPartition) - val partitionLeaderEpoch: Optional[Integer] = state.log.lastOption.toJava.map(_.partitionLeaderEpoch) - if (partitionLeaderEpoch.isPresent) - partitionLeaderEpoch - else - latestEpochDefault + state.log.lastOption.map(_.partitionLeaderEpoch).orElse(latestEpochDefault) } override def logStartOffset(topicPartition: TopicPartition): Long = replicaPartitionState(topicPartition).logStartOffset override def logEndOffset(topicPartition: TopicPartition): Long = replicaPartitionState(topicPartition).logEndOffset - override def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Optional[OffsetAndEpoch] = { + override def endOffsetForEpoch(topicPartition: TopicPartition, epoch: Int): Option[OffsetAndEpoch] = { val epochData = new EpochData() .setPartition(topicPartition.partition) .setLeaderEpoch(epoch) val result = mockLeader.lookupEndOffsetForEpoch(topicPartition, epochData, replicaPartitionState(topicPartition)) if (result.endOffset == UNDEFINED_EPOCH_OFFSET) - Optional.empty + None else - Optional.of(new OffsetAndEpoch(result.endOffset, result.leaderEpoch)) + Some(new OffsetAndEpoch(result.endOffset, result.leaderEpoch)) } def verifyLastFetchedEpoch(partition: TopicPartition, expectedEpoch: Option[Int]): Unit = { if (leader.isTruncationOnFetchSupported) { - assertEquals(Some(ReplicaState.FETCHING), fetchState(partition).map(_.state)) - assertEquals(expectedEpoch, fetchState(partition).map(_.lastFetchedEpoch.get())) + assertEquals(Some(Fetching), fetchState(partition).map(_.state)) + assertEquals(expectedEpoch, fetchState(partition).flatMap(_.lastFetchedEpoch)) } } } diff --git a/core/src/test/scala/unit/kafka/server/MockLeaderEndPoint.scala b/core/src/test/scala/unit/kafka/server/MockLeaderEndPoint.scala index 96e43955d9ea7..25de039232905 100644 --- a/core/src/test/scala/unit/kafka/server/MockLeaderEndPoint.scala +++ b/core/src/test/scala/unit/kafka/server/MockLeaderEndPoint.scala @@ -17,7 +17,9 @@ package kafka.server -import org.apache.kafka.common.message.{FetchResponseData, OffsetForLeaderEpochRequestData} +import kafka.server.AbstractFetcherThread.ReplicaFetch +import kafka.server.AbstractFetcherThread.ResultWithPartitions +import org.apache.kafka.common.message.FetchResponseData import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record._ @@ -26,13 +28,12 @@ import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.server.common.OffsetAndEpoch import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.{LeaderEndPoint, PartitionFetchState, ReplicaFetch, ResultWithPartitions} import java.nio.ByteBuffer import java.util.Optional -import scala.collection.{mutable} +import scala.collection.{Map, Set, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.RichOptional +import scala.jdk.OptionConverters.{RichOption, RichOptional} import scala.util.Random class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "localhost", Random.nextInt()), @@ -75,7 +76,7 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l override def brokerEndPoint(): BrokerEndPoint = sourceBroker - override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { + override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { fetchRequest.fetchData.asScala.map { case (partition, fetchData) => val leaderState = leaderPartitionState(partition) val epochCheckError = checkExpectedLeaderEpoch(fetchData.currentLeaderEpoch, leaderState) @@ -104,7 +105,7 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l (Errors.NONE, records) } - val partitionData = new FetchResponseData.PartitionData() + val partitionData = new FetchData() .setPartitionIndex(partition.partition) .setErrorCode(error.code) .setHighWatermark(leaderState.highWatermark) @@ -114,7 +115,7 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l divergingEpoch.foreach(partitionData.setDivergingEpoch) (partition, partitionData) - }.toMap.asJava + }.toMap } override def fetchEarliestOffset(topicPartition: TopicPartition, leaderEpoch: Int): OffsetAndEpoch = { @@ -135,9 +136,9 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l new OffsetAndEpoch(leaderState.localLogStartOffset, leaderState.leaderEpoch) } - override def fetchEpochEndOffsets(partitions: java.util.Map[TopicPartition, OffsetForLeaderEpochRequestData.OffsetForLeaderPartition]): java.util.Map[TopicPartition, EpochEndOffset] = { - val endOffsets = new java.util.HashMap[TopicPartition, EpochEndOffset]() - partitions.forEach { (partition, epochData) => + override def fetchEpochEndOffsets(partitions: Map[TopicPartition, EpochData]): Map[TopicPartition, EpochEndOffset] = { + val endOffsets = mutable.Map[TopicPartition, EpochEndOffset]() + partitions.foreachEntry { (partition, epochData) => assert(partition.partition == epochData.partition, "Partition must be consistent between TopicPartition and EpochData") val leaderState = leaderPartitionState(partition) @@ -147,27 +148,27 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l endOffsets } - override def buildFetch(partitions: java.util.Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[java.util.Optional[ReplicaFetch]] = { + override def buildFetch(partitionMap: Map[TopicPartition, PartitionFetchState]): ResultWithPartitions[Option[ReplicaFetch]] = { val fetchData = mutable.Map.empty[TopicPartition, FetchRequest.PartitionData] - partitions.forEach { case (partition, state) => + partitionMap.foreach { case (partition, state) => if (state.isReadyForFetch) { val replicaState = replicaPartitionStateCallback(partition).getOrElse(throw new IllegalArgumentException(s"Unknown partition $partition")) val lastFetchedEpoch = if (isTruncationOnFetchSupported) - state.lastFetchedEpoch + state.lastFetchedEpoch.map(_.asInstanceOf[Integer]).toJava else Optional.empty[Integer] fetchData.put(partition, - new FetchRequest.PartitionData(state.topicId.orElse(Uuid.ZERO_UUID), state.fetchOffset, replicaState.logStartOffset, + new FetchRequest.PartitionData(state.topicId.getOrElse(Uuid.ZERO_UUID), state.fetchOffset, replicaState.logStartOffset, 1024 * 1024, Optional.of[Integer](state.currentLeaderEpoch), lastFetchedEpoch)) } } val fetchRequest = FetchRequest.Builder.forReplica(version, replicaId, 1, 0, 1, fetchData.asJava) val fetchRequestOpt = if (fetchData.isEmpty) - java.util.Optional.empty[ReplicaFetch]() + None else - Optional.of(new ReplicaFetch(fetchData.asJava, fetchRequest)) - new ResultWithPartitions(fetchRequestOpt, java.util.Collections.emptySet()) + Some(ReplicaFetch(fetchData.asJava, fetchRequest)) + ResultWithPartitions(fetchRequestOpt, Set.empty) } private def checkLeaderEpochAndThrow(expectedEpoch: Int, partitionState: PartitionState): Unit = { @@ -205,9 +206,9 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l partitionState: PartitionState): Option[FetchResponseData.EpochEndOffset] = { lastFetchedEpoch.toScala.flatMap { fetchEpoch => val epochEndOffset = fetchEpochEndOffsets( - java.util.Map.of(topicPartition, new OffsetForLeaderEpochRequestData.OffsetForLeaderPartition() + Map(topicPartition -> new EpochData() .setPartition(topicPartition.partition) - .setLeaderEpoch(fetchEpoch))).get(topicPartition) + .setLeaderEpoch(fetchEpoch)))(topicPartition) if (partitionState.log.isEmpty || epochEndOffset.endOffset == UNDEFINED_EPOCH_OFFSET @@ -223,7 +224,7 @@ class MockLeaderEndPoint(sourceBroker: BrokerEndPoint = new BrokerEndPoint(1, "l } def lookupEndOffsetForEpoch(topicPartition: TopicPartition, - epochData: OffsetForLeaderEpochRequestData.OffsetForLeaderPartition, + epochData: EpochData, partitionState: PartitionState): EpochEndOffset = { checkExpectedLeaderEpoch(epochData.currentLeaderEpoch, partitionState).foreach { error => return new EpochEndOffset() diff --git a/core/src/test/scala/unit/kafka/server/MockTierStateMachine.scala b/core/src/test/scala/unit/kafka/server/MockTierStateMachine.scala index ca37d9a3f19f8..86df92d77daf9 100644 --- a/core/src/test/scala/unit/kafka/server/MockTierStateMachine.scala +++ b/core/src/test/scala/unit/kafka/server/MockTierStateMachine.scala @@ -19,11 +19,6 @@ package kafka.server import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.message.FetchResponseData -import org.apache.kafka.server.LeaderEndPoint -import org.apache.kafka.server.PartitionFetchState -import org.apache.kafka.server.ReplicaState - -import java.util.Optional class MockTierStateMachine(leader: LeaderEndPoint) extends TierStateMachine(leader, null, false) { @@ -36,10 +31,8 @@ class MockTierStateMachine(leader: LeaderEndPoint) extends TierStateMachine(lead val offsetToFetch = leader.fetchEarliestLocalOffset(topicPartition, currentFetchState.currentLeaderEpoch).offset val initialLag = leaderEndOffset - offsetToFetch fetcher.truncateFullyAndStartAt(topicPartition, offsetToFetch) - new PartitionFetchState(currentFetchState.topicId, offsetToFetch, Optional.of(initialLag), - currentFetchState.currentLeaderEpoch, Optional.empty(), ReplicaState.FETCHING, - Optional.of(currentFetchState.currentLeaderEpoch) - ) + PartitionFetchState(currentFetchState.topicId, offsetToFetch, Option.apply(initialLag), currentFetchState.currentLeaderEpoch, + Fetching, Some(currentFetchState.currentLeaderEpoch)) } def setFetcher(mockFetcherThread: MockFetcherThread): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala index eceb21a407787..577007123792b 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetCommitRequestTest.scala @@ -16,38 +16,56 @@ */ package kafka.server -import org.apache.kafka.common.Uuid import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.junit.jupiter.api.Assertions.fail -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) +@ClusterTestDefaults(types = Array(Type.KRAFT)) class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testOffsetCommitWithNewConsumerGroupProtocol(): Unit = { + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testOffsetCommitWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testOffsetCommit(true) } - @ClusterTest - def testOffsetCommitWithOldConsumerGroupProtocol(): Unit = { + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testOffsetCommitWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testOffsetCommit(false) + } + + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testOffsetCommitWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { testOffsetCommit(false) } private def testOffsetCommit(useNewProtocol: Boolean): Unit = { + if (useNewProtocol && !isNewGroupCoordinatorEnabled) { + fail("Cannot use the new protocol with the old group coordinator.") + } + // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() // Create the topic. - val topicId = createTopic( + createTopic( topic = "foo", numPartitions = 3 ) @@ -56,6 +74,7 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator // a session long enough for the duration of the test. val (memberId, memberEpoch) = joinConsumerGroup("grp", useNewProtocol) + // Start from version 1 because version 0 goes to ZK. for (version <- ApiKeys.OFFSET_COMMIT.oldestVersion to ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { // Commit offset. commitOffset( @@ -63,7 +82,6 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = 0, offset = 100L, expectedError = if (useNewProtocol && version < 9) Errors.UNSUPPORTED_VERSION else Errors.NONE, @@ -76,11 +94,10 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = 0, offset = 100L, expectedError = - if (version >= 9) Errors.GROUP_ID_NOT_FOUND + if (isNewGroupCoordinatorEnabled && version >= 9) Errors.GROUP_ID_NOT_FOUND else Errors.ILLEGAL_GENERATION, version = version.toShort ) @@ -91,11 +108,10 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = 0, offset = 100L, expectedError = - if (version >= 9) Errors.GROUP_ID_NOT_FOUND + if (isNewGroupCoordinatorEnabled && version >= 9) Errors.GROUP_ID_NOT_FOUND else Errors.ILLEGAL_GENERATION, version = version.toShort ) @@ -106,7 +122,6 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = "", memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = 0, offset = 100L, expectedError = Errors.UNKNOWN_MEMBER_ID, @@ -119,7 +134,6 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch + 1, topic = "foo", - topicId = topicId, partition = 0, offset = 100L, expectedError = @@ -136,27 +150,11 @@ class OffsetCommitRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = "", memberEpoch = -1, topic = "foo", - topicId = topicId, partition = 0, offset = 100L, expectedError = Errors.NONE, version = version.toShort ) - - // Commit offset to a group with an unknown topic id. - if (version >= 10) { - commitOffset( - groupId = "grp", - memberId = memberId, - memberEpoch = memberEpoch, - topic = "bar", - topicId = Uuid.randomUuid(), - partition = 0, - offset = 100L, - expectedError = Errors.UNKNOWN_TOPIC_ID, - version = version.toShort - ) - } } } } diff --git a/core/src/test/scala/unit/kafka/server/OffsetDeleteRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetDeleteRequestTest.scala index 0fc414e24c99e..0a808f6c868f4 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetDeleteRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetDeleteRequestTest.scala @@ -20,32 +20,49 @@ import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, Clu import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.GroupCoordinatorConfig +import org.junit.jupiter.api.Assertions.fail -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) +@ClusterTestDefaults(types = Array(Type.KRAFT)) class OffsetDeleteRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testOffsetDeleteWithNewConsumerGroupProtocol(): Unit = { + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testOffsetDeleteWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testOffsetDelete(true) } - @ClusterTest - def testOffsetDeleteWithOldConsumerGroupProtocol(): Unit = { + @ClusterTest(serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testOffsetDeleteWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testOffsetDelete(false) + } + + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testOffsetDeleteWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { testOffsetDelete(false) } private def testOffsetDelete(useNewProtocol: Boolean): Unit = { + if (useNewProtocol && !isNewGroupCoordinatorEnabled) { + fail("Cannot use the new protocol with the old group coordinator.") + } + // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() // Create the topic. - val topicId = createTopic( + createTopic( topic = "foo", numPartitions = 3 ) @@ -65,7 +82,6 @@ class OffsetDeleteRequestTest(cluster: ClusterInstance) extends GroupCoordinator memberId = memberId, memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = partitionId, offset = 100L + partitionId, expectedError = Errors.NONE, diff --git a/core/src/test/scala/unit/kafka/server/OffsetFetchRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetFetchRequestTest.scala index 75bf82ef155d7..a504ecdeea0f8 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetFetchRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetFetchRequestTest.scala @@ -17,63 +17,115 @@ package kafka.server import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} -import org.apache.kafka.common.Uuid -import org.apache.kafka.common.message.{OffsetFetchRequestData, OffsetFetchResponseData} +import org.apache.kafka.common.TopicPartition +import org.apache.kafka.common.message.OffsetFetchResponseData import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.test.ClusterInstance import org.apache.kafka.coordinator.group.GroupCoordinatorConfig -import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Assertions.{assertEquals, fail} import scala.jdk.CollectionConverters._ -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), - new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") - ) -) +@ClusterTestDefaults(types = Array(Type.KRAFT)) class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testSingleGroupOffsetFetchWithNewConsumerGroupProtocol(): Unit = { + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testSingleGroupOffsetFetchWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testSingleGroupOffsetFetch(useNewProtocol = true, requireStable = true) } - @ClusterTest - def testSingleGroupOffsetFetchWithOldConsumerGroupProtocol(): Unit = { + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testSingleGroupOffsetFetchWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testSingleGroupOffsetFetch(useNewProtocol = false, requireStable = false) } - @ClusterTest - def testSingleGroupAllOffsetFetchWithNewConsumerGroupProtocol(): Unit = { + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testSingleGroupOffsetFetchWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testSingleGroupOffsetFetch(useNewProtocol = false, requireStable = true) + } + + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testSingleGroupAllOffsetFetchWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testSingleGroupAllOffsetFetch(useNewProtocol = true, requireStable = true) } - @ClusterTest - def testSingleGroupAllOffsetFetchWithOldConsumerGroupProtocol(): Unit = { + @ClusterTest(serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testSingleGroupAllOffsetFetchWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testSingleGroupAllOffsetFetch(useNewProtocol = false, requireStable = false) } - @ClusterTest - def testMultiGroupsOffsetFetchWithNewConsumerGroupProtocol(): Unit = { + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testSingleGroupAllOffsetFetchWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testSingleGroupAllOffsetFetch(useNewProtocol = false, requireStable = true) + } + + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + ) + ) + def testMultiGroupsOffsetFetchWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testMultipleGroupsOffsetFetch(useNewProtocol = true, requireStable = true) } - @ClusterTest - def testMultiGroupsOffsetFetchWithOldConsumerGroupProtocol(): Unit = { + @ClusterTest(serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testMultiGroupsOffsetFetchWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testMultipleGroupsOffsetFetch(useNewProtocol = false, requireStable = false) } + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1") + )) + def testMultiGroupsOffsetFetchWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testMultipleGroupsOffsetFetch(useNewProtocol = false, requireStable = true) + } + private def testSingleGroupOffsetFetch(useNewProtocol: Boolean, requireStable: Boolean): Unit = { + if (useNewProtocol && !isNewGroupCoordinatorEnabled) { + fail("Cannot use the new protocol with the old group coordinator.") + } + // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() - val unknownTopicId = Uuid.randomUuid() - // Create the topic. - val topicId = createTopic( + createTopic( topic = "foo", numPartitions = 3 ) @@ -89,7 +141,6 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB memberId = memberId, memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = partitionId, offset = 100L + partitionId, expectedError = Errors.NONE, @@ -97,6 +148,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ) } + // Start from version 1 because version 0 goes to ZK. for (version <- 1 to ApiKeys.OFFSET_FETCH.latestVersion(isUnstableApiEnabled)) { // Fetch with partitions. assertEquals( @@ -104,8 +156,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -119,16 +170,14 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ).asJava) ).asJava), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0, 1, 5).asJava) // 5 does not exist. - ).asJava), + groupId = "grp", + memberId = memberId, + memberEpoch = memberEpoch, + partitions = List( + new TopicPartition("foo", 0), + new TopicPartition("foo", 1), + new TopicPartition("foo", 5) // This one does not exist. + ), requireStable = requireStable, version = version.toShort ) @@ -140,8 +189,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("unknown") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -155,16 +203,14 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ).asJava) ).asJava), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("unknown") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0, 1, 5).asJava) // 5 does not exist. - ).asJava), + groupId = "unknown", + memberId = memberId, + memberEpoch = memberEpoch, + partitions = List( + new TopicPartition("foo", 0), + new TopicPartition("foo", 1), + new TopicPartition("foo", 5) // This one does not exist. + ), requireStable = requireStable, version = version.toShort ) @@ -176,8 +222,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("unknown") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -187,30 +232,22 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setCommittedOffset(-1L) ).asJava), new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo-unknown" else "") - .setTopicId(if (version >= 10) unknownTopicId else Uuid.ZERO_UUID) + .setName("foo-unknown") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(1) .setCommittedOffset(-1L) - .setErrorCode(if (version >= 10) Errors.UNKNOWN_TOPIC_ID.code else Errors.NONE.code) ).asJava), ).asJava), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("unknown") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0, 5).asJava), // 5 does not exist. - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo-unknown") - .setTopicId(unknownTopicId) - .setPartitionIndexes(List[Integer](1).asJava) // 5 does not exist. - ).asJava), + groupId = "unknown", + memberId = memberId, + memberEpoch = memberEpoch, + partitions = List( + new TopicPartition("foo", 0), + new TopicPartition("foo-unknown", 1), + new TopicPartition("foo", 5) // This one does not exist. + ), requireStable = requireStable, version = version.toShort ) @@ -223,47 +260,10 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp") - .setMemberId("") - .setMemberEpoch(memberEpoch) - .setTopics(List.empty.asJava), - requireStable = requireStable, - version = version.toShort - ) - ) - - // Fetch with empty group id. - assertEquals( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("") - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(-1L), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(-1L), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(5) - .setCommittedOffset(-1L) - ).asJava) - ).asJava), - fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0, 1, 5).asJava) // 5 does not exist. - ).asJava), + groupId = "grp", + memberId = "", + memberEpoch = memberEpoch, + partitions = List.empty, requireStable = requireStable, version = version.toShort ) @@ -275,11 +275,10 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setErrorCode(Errors.STALE_MEMBER_EPOCH.code), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch + 1) - .setTopics(List.empty.asJava), + groupId = "grp", + memberId = memberId, + memberEpoch = memberEpoch + 1, + partitions = List.empty, requireStable = requireStable, version = version.toShort ) @@ -289,12 +288,16 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB } private def testSingleGroupAllOffsetFetch(useNewProtocol: Boolean, requireStable: Boolean): Unit = { + if (useNewProtocol && !isNewGroupCoordinatorEnabled) { + fail("Cannot use the new protocol with the old group coordinator.") + } + // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() // Create the topic. - val topicId = createTopic( + createTopic( topic = "foo", numPartitions = 3 ) @@ -310,7 +313,6 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB memberId = memberId, memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = partitionId, offset = 100L + partitionId, expectedError = Errors.NONE, @@ -327,8 +329,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -342,11 +343,10 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ).asJava) ).asJava), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch) - .setTopics(null), + groupId = "grp", + memberId = memberId, + memberEpoch = memberEpoch, + partitions = null, requireStable = requireStable, version = version.toShort ) @@ -357,11 +357,10 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB new OffsetFetchResponseData.OffsetFetchResponseGroup() .setGroupId("unknown"), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("unknown") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch) - .setTopics(null), + groupId = "unknown", + memberId = memberId, + memberEpoch = memberEpoch, + partitions = null, requireStable = requireStable, version = version.toShort ) @@ -374,11 +373,10 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setErrorCode(Errors.UNKNOWN_MEMBER_ID.code), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp") - .setMemberId("") - .setMemberEpoch(memberEpoch) - .setTopics(null), + groupId = "grp", + memberId = "", + memberEpoch = memberEpoch, + partitions = null, requireStable = requireStable, version = version.toShort ) @@ -390,11 +388,10 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp") .setErrorCode(Errors.STALE_MEMBER_EPOCH.code), fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch + 1) - .setTopics(null), + groupId = "grp", + memberId = memberId, + memberEpoch = memberEpoch + 1, + partitions = null, requireStable = requireStable, version = version.toShort ) @@ -404,14 +401,16 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB } private def testMultipleGroupsOffsetFetch(useNewProtocol: Boolean, requireStable: Boolean): Unit = { + if (useNewProtocol && !isNewGroupCoordinatorEnabled) { + fail("Cannot use the new protocol with the old group coordinator.") + } + // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() - val unknownTopicId = Uuid.randomUuid() - // Create the topic. - val topicId = createTopic( + createTopic( topic = "foo", numPartitions = 3 ) @@ -428,7 +427,6 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB memberId = memberId, memberEpoch = memberEpoch, topic = "foo", - topicId = topicId, partition = partitionId, offset = 100L + partitionId, expectedError = Errors.NONE, @@ -447,8 +445,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp-0") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -466,8 +463,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp-1") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -489,8 +485,7 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp-3") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) @@ -502,60 +497,37 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB .setGroupId("grp-4") .setTopics(List( new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo" else "") - .setTopicId(if (version >= 10) topicId else Uuid.ZERO_UUID) + .setName("foo") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(5) .setCommittedOffset(-1L) ).asJava), new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName(if (version < 10) "foo-unknown" else "") - .setTopicId(if (version >= 10) unknownTopicId else Uuid.ZERO_UUID) + .setName("foo-unknown") .setPartitions(List( new OffsetFetchResponseData.OffsetFetchResponsePartitions() .setPartitionIndex(0) .setCommittedOffset(-1L) - .setErrorCode(if (version >= 10) Errors.UNKNOWN_TOPIC_ID.code else Errors.NONE.code) ).asJava) ).asJava), ).toSet, fetchOffsets( - groups = List( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp-0") - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0, 1, 5).asJava) // 5 does not exist. - ).asJava), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp-1") - .setTopics(null), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp-2") - .setTopics(List.empty.asJava), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp-3") - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0).asJava) - ).asJava), - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp-4") - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo-unknown") // Unknown topic - .setTopicId(unknownTopicId) - .setPartitionIndexes(List[Integer](0).asJava), - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](5).asJava) // 5 does not exist. - ).asJava), + groups = Map( + "grp-0" -> List( + new TopicPartition("foo", 0), + new TopicPartition("foo", 1), + new TopicPartition("foo", 5) // This one does not exist. + ), + "grp-1" -> null, + "grp-2" -> List.empty, + "grp-3" -> List( + new TopicPartition("foo", 0) + ), + "grp-4" -> List( + new TopicPartition("foo-unknown", 0), // unknown topic id + new TopicPartition("foo", 5) // The partition doesn't exist. + ), ), requireStable = requireStable, version = version.toShort @@ -563,137 +535,4 @@ class OffsetFetchRequestTest(cluster: ClusterInstance) extends GroupCoordinatorB ) } } - - @ClusterTest - def testFetchOffsetWithRecreatedTopic(): Unit = { - // There are two ways to ensure that committed of recreated topics are not returned. - // 1) When a topic is deleted, GroupCoordinatorService#onPartitionsDeleted is called to - // delete all its committed offsets. - // 2) Since version 10 of the OffsetCommit API, the topic id is stored alongside the - // committed offset. When it is queried, it is only returned iff the topic id of - // committed offset matches the requested one. - // The test tests both conditions but not in a deterministic way as they race - // against each others. - - createOffsetsTopic() - - // Create the topic. - var topicId = createTopic( - topic = "foo", - numPartitions = 3 - ) - - // Join the consumer group. Note that we don't heartbeat here so we must use - // a session long enough for the duration of the test. - val (memberId, memberEpoch) = joinConsumerGroup("grp", true) - - // Commit offsets. - for (partitionId <- 0 to 2) { - commitOffset( - groupId = "grp", - memberId = memberId, - memberEpoch = memberEpoch, - topic = "foo", - topicId = topicId, - partition = partitionId, - offset = 100L + partitionId, - expectedError = Errors.NONE, - version = ApiKeys.OFFSET_COMMIT.latestVersion(isUnstableApiEnabled) - ) - } - - // Delete topic. - deleteTopic("foo") - - // Recreate topic. - topicId = createTopic( - topic = "foo", - numPartitions = 3 - ) - - // Start from version 10 because fetching topic id is not supported before. - for (version <- 10 to ApiKeys.OFFSET_FETCH.latestVersion(isUnstableApiEnabled)) { - assertEquals( - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("grp") - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setTopicId(topicId) - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setCommittedOffset(-1L), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(1) - .setCommittedOffset(-1L), - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(2) - .setCommittedOffset(-1L) - ).asJava) - ).asJava), - fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(memberEpoch) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0, 1, 2).asJava) - ).asJava), - requireStable = true, - version = version.toShort - ) - ) - } - } - - @ClusterTest - def testGroupErrors(): Unit = { - val topicId = createTopic( - topic = "foo", - numPartitions = 3 - ) - - for (version <- ApiKeys.OFFSET_FETCH.oldestVersion() to ApiKeys.OFFSET_FETCH.latestVersion(isUnstableApiEnabled)) { - assertEquals( - if (version >= 2) { - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("unknown") - .setErrorCode(Errors.NOT_COORDINATOR.code) - } else { - // Version 1 does not support group level errors. Hence, the error is - // returned at the partition level. - new OffsetFetchResponseData.OffsetFetchResponseGroup() - .setGroupId("unknown") - .setTopics(List( - new OffsetFetchResponseData.OffsetFetchResponseTopics() - .setName("foo") - .setPartitions(List( - new OffsetFetchResponseData.OffsetFetchResponsePartitions() - .setPartitionIndex(0) - .setErrorCode(Errors.NOT_COORDINATOR.code) - .setCommittedOffset(-1) - .setCommittedLeaderEpoch(-1) - .setMetadata("") - ).asJava) - ).asJava) - }, - fetchOffsets( - group = new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("unknown") - .setMemberId("") - .setMemberEpoch(0) - .setTopics(List( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName("foo") - .setTopicId(topicId) - .setPartitionIndexes(List[Integer](0).asJava) - ).asJava), - requireStable = false, - version = version.toShort - ) - ) - } - } } diff --git a/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala b/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala index 1b13674685d45..fc06a9eeeb759 100644 --- a/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/OffsetsForLeaderEpochRequestTest.scala @@ -26,14 +26,16 @@ import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.{OffsetsForLeaderEpochRequest, OffsetsForLeaderEpochResponse} import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest { - @Test - def testOffsetsForLeaderEpochErrorCodes(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testOffsetsForLeaderEpochErrorCodes(quorum: String): Unit = { val topic = "topic" val partition = new TopicPartition(topic, 0) val epochs = offsetForLeaderTopicCollectionFor(partition, 0, RecordBatch.NO_PARTITION_LEADER_EPOCH) @@ -55,8 +57,9 @@ class OffsetsForLeaderEpochRequestTest extends BaseRequestTest { assertResponseError(Errors.NOT_LEADER_OR_FOLLOWER, nonReplica, request) } - @Test - def testCurrentEpochValidation(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCurrentEpochValidation(quorum: String): Unit = { val topic = "topic" val topicPartition = new TopicPartition(topic, 0) val partitionToLeader = createTopic(topic, replicationFactor = 3) diff --git a/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala b/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala index 57545c7ba2b00..019bf14c79362 100644 --- a/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ProduceRequestTest.scala @@ -21,7 +21,7 @@ import java.nio.ByteBuffer import java.util.{Collections, Properties} import kafka.utils.TestUtils import org.apache.kafka.clients.admin.{Admin, TopicDescription} -import org.apache.kafka.common.{TopicIdPartition, TopicPartition} +import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.message.ProduceRequestData @@ -32,9 +32,9 @@ import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.record.BrokerCompressionType import org.apache.kafka.storage.log.metrics.BrokerTopicMetrics import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.Test import org.junit.jupiter.params.ParameterizedTest import org.junit.jupiter.params.provider.{Arguments, MethodSource} +import org.junit.jupiter.params.provider.ValueSource import java.util.concurrent.TimeUnit import scala.jdk.CollectionConverters._ @@ -47,18 +47,19 @@ class ProduceRequestTest extends BaseRequestTest { val metricsKeySet = KafkaYammerMetrics.defaultRegistry.allMetrics.keySet.asScala - @Test - def testSimpleProduceRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testSimpleProduceRequest(quorum: String): Unit = { val (partition, leader) = createTopicAndFindPartitionWithLeader("topic") def sendAndCheck(memoryRecords: MemoryRecords, expectedOffset: Long): Unit = { - val topicId = getTopicIds().get("topic").get + val topicPartition = new TopicPartition("topic", partition) val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(topicId) + .setName(topicPartition.topic()) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(partition) + .setIndex(topicPartition.partition()) .setRecords(memoryRecords)))).iterator)) .setAcks((-1).toShort) .setTimeoutMs(3000) @@ -69,8 +70,8 @@ class ProduceRequestTest extends BaseRequestTest { val topicProduceResponse = produceResponse.data.responses.asScala.head assertEquals(1, topicProduceResponse.partitionResponses.size) val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head - assertEquals(topicId, topicProduceResponse.topicId()) - assertEquals(partition, partitionProduceResponse.index()) + val tp = new TopicPartition(topicProduceResponse.name, partitionProduceResponse.index) + assertEquals(topicPartition, tp) assertEquals(Errors.NONE, Errors.forCode(partitionProduceResponse.errorCode)) assertEquals(expectedOffset, partitionProduceResponse.baseOffset) assertEquals(-1, partitionProduceResponse.logAppendTimeMs) @@ -102,7 +103,7 @@ class ProduceRequestTest extends BaseRequestTest { }).toMap } - @ParameterizedTest + @ParameterizedTest(name = "quorum=kraft") @MethodSource(Array("timestampConfigProvider")) def testProduceWithInvalidTimestamp(messageTimeStampConfig: String, recordTimestamp: Long): Unit = { val topic = "topic" @@ -121,7 +122,6 @@ class ProduceRequestTest extends BaseRequestTest { ) val partitionToLeader = getPartitionToLeader(admin, topic) val leader = partitionToLeader(partition) - val topicDescription = TestUtils.describeTopic(createAdminClient(), topic) def createRecords(magicValue: Byte, timestamp: Long, codec: Compression): MemoryRecords = { val buf = ByteBuffer.allocate(512) @@ -133,11 +133,11 @@ class ProduceRequestTest extends BaseRequestTest { } val records = createRecords(RecordBatch.MAGIC_VALUE_V2, recordTimestamp, Compression.gzip().build()) - val topicPartition = new TopicIdPartition(topicDescription.topicId(), partition, "topic") + val topicPartition = new TopicPartition("topic", partition) val produceResponse = sendProduceRequest(leader, ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(topicPartition.topicId()) + .setName(topicPartition.topic()) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() .setIndex(topicPartition.partition()) .setRecords(records)))).iterator)) @@ -149,9 +149,7 @@ class ProduceRequestTest extends BaseRequestTest { val topicProduceResponse = produceResponse.data.responses.asScala.head assertEquals(1, topicProduceResponse.partitionResponses.size) val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head - val tp = new TopicIdPartition(topicProduceResponse.topicId(), - partitionProduceResponse.index, - getTopicNames().get(topicProduceResponse.topicId()).getOrElse("")) + val tp = new TopicPartition(topicProduceResponse.name, partitionProduceResponse.index) assertEquals(topicPartition, tp) assertEquals(Errors.INVALID_TIMESTAMP, Errors.forCode(partitionProduceResponse.errorCode)) // there are 3 records with InvalidTimestampException created from inner function createRecords @@ -162,8 +160,9 @@ class ProduceRequestTest extends BaseRequestTest { assertEquals("One or more records have been rejected due to invalid timestamp", partitionProduceResponse.errorMessage) } - @Test - def testProduceToNonReplica(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testProduceToNonReplica(quorum: String): Unit = { val topic = "topic" val partition = 0 @@ -183,12 +182,13 @@ class ProduceRequestTest extends BaseRequestTest { // Send the produce request to the non-replica val records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("key".getBytes, "value".getBytes)) + val topicPartition = new TopicPartition("topic", partition) val produceRequest = ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(getTopicIds().get("topic").get) + .setName(topicPartition.topic()) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(partition) + .setIndex(topicPartition.partition()) .setRecords(records)))).iterator)) .setAcks((-1).toShort) .setTimeoutMs(3000) @@ -210,22 +210,23 @@ class ProduceRequestTest extends BaseRequestTest { }.getOrElse(throw new AssertionError(s"No leader elected for topic $topic")) } - @Test - def testCorruptLz4ProduceRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCorruptLz4ProduceRequest(quorum: String): Unit = { val (partition, leader) = createTopicAndFindPartitionWithLeader("topic") - val topicId = getTopicIds().get("topic").get val timestamp = 1000000 val memoryRecords = MemoryRecords.withRecords(Compression.lz4().build(), new SimpleRecord(timestamp, "key".getBytes, "value".getBytes)) // Change the lz4 checksum value (not the kafka record crc) so that it doesn't match the contents val lz4ChecksumOffset = 6 memoryRecords.buffer.array.update(DefaultRecordBatch.RECORD_BATCH_OVERHEAD + lz4ChecksumOffset, 0) + val topicPartition = new TopicPartition("topic", partition) val produceResponse = sendProduceRequest(leader, ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setTopicId(topicId) + .setName(topicPartition.topic()) .setPartitionData(Collections.singletonList(new ProduceRequestData.PartitionProduceData() - .setIndex(partition) + .setIndex(topicPartition.partition()) .setRecords(memoryRecords)))).iterator)) .setAcks((-1).toShort) .setTimeoutMs(3000) @@ -235,8 +236,8 @@ class ProduceRequestTest extends BaseRequestTest { val topicProduceResponse = produceResponse.data.responses.asScala.head assertEquals(1, topicProduceResponse.partitionResponses.size) val partitionProduceResponse = topicProduceResponse.partitionResponses.asScala.head - assertEquals(topicId, topicProduceResponse.topicId()) - assertEquals(partition, partitionProduceResponse.index()) + val tp = new TopicPartition(topicProduceResponse.name, partitionProduceResponse.index) + assertEquals(topicPartition, tp) assertEquals(Errors.CORRUPT_MESSAGE, Errors.forCode(partitionProduceResponse.errorCode)) assertEquals(-1, partitionProduceResponse.baseOffset) assertEquals(-1, partitionProduceResponse.logAppendTimeMs) @@ -244,8 +245,9 @@ class ProduceRequestTest extends BaseRequestTest { assertTrue(TestUtils.meterCount(s"${BrokerTopicMetrics.INVALID_MESSAGE_CRC_RECORDS_PER_SEC}") > 0) } - @Test - def testZSTDProduceRequest(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testZSTDProduceRequest(quorum: String): Unit = { val topic = "topic" val partition = 0 @@ -260,8 +262,7 @@ class ProduceRequestTest extends BaseRequestTest { val partitionRecords = new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection(Collections.singletonList( new ProduceRequestData.TopicProduceData() - .setName("topic") // This test case is testing producer v.7, no need to use topic id - .setPartitionData(Collections.singletonList( + .setName("topic").setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(partition) .setRecords(memoryRecords)))) diff --git a/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala b/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala index dd5968055e0f9..9bf4d4d7e001f 100644 --- a/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala +++ b/core/src/test/scala/unit/kafka/server/RegistrationTestContext.scala @@ -66,7 +66,7 @@ class RegistrationTestContext( val controllerEpoch = new AtomicInteger(123) config.effectiveAdvertisedBrokerListeners.foreach { ep => advertisedListeners.add(new Listener().setHost(ep.host). - setName(ep.listener). + setName(ep.listenerName.value()). setPort(ep.port.shortValue()). setSecurityProtocol(ep.securityProtocol.id)) } diff --git a/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala index 5c04e473d447e..f60d0f0e3fd11 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaAlterLogDirsThreadTest.scala @@ -17,23 +17,22 @@ package kafka.server import kafka.cluster.Partition -import kafka.log.LogManager +import kafka.log.{LogManager, UnifiedLog} +import kafka.server.AbstractFetcherThread.ResultWithPartitions import kafka.server.QuotaFactory.UNBOUNDED_QUOTA import kafka.server.ReplicaAlterLogDirsThread.ReassignmentState -import kafka.server.metadata.KRaftMetadataCache import kafka.utils.TestUtils import org.apache.kafka.common.errors.KafkaStorageException import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.OffsetForLeaderPartition import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset -import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.MemoryRecords import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.server.{PartitionFetchState, ReplicaState, common} +import org.apache.kafka.server.common import org.apache.kafka.server.common.{DirectoryEventHandler, KRaftVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} -import org.apache.kafka.storage.internals.log.UnifiedLog import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test @@ -53,7 +52,7 @@ class ReplicaAlterLogDirsThreadTest { private val topicNames = collection.immutable.Map(topicId -> "topic1") private val tid1p0 = new TopicIdPartition(topicId, t1p0) private val failedPartitions = new FailedPartitions - private val metadataCache = new KRaftMetadataCache(1, () => KRaftVersion.LATEST_PRODUCTION) + private val metadataCache = MetadataCache.kRaftMetadataCache(1, () => KRaftVersion.LATEST_PRODUCTION) private def initialFetchState(fetchOffset: Long, leaderEpoch: Int = 1): InitialFetchState = { InitialFetchState(topicId = Some(topicId), leader = new BrokerEndPoint(0, "localhost", 9092), @@ -123,7 +122,7 @@ class ReplicaAlterLogDirsThreadTest { when(futureLog.logStartOffset).thenReturn(0L) when(futureLog.logEndOffset).thenReturn(0L) - when(futureLog.latestEpoch).thenReturn(Optional.empty) + when(futureLog.latestEpoch).thenReturn(None) val fencedRequestData = new FetchRequest.PartitionData(topicId, 0L, 0L, config.replicaFetchMaxBytes, Optional.of(leaderEpoch - 1)) @@ -223,7 +222,7 @@ class ReplicaAlterLogDirsThreadTest { when(futureLog.logStartOffset).thenReturn(0L) when(futureLog.logEndOffset).thenReturn(0L) - when(futureLog.latestEpoch).thenReturn(Optional.empty) + when(futureLog.latestEpoch).thenReturn(None) val requestData = new FetchRequest.PartitionData(topicId, 0L, 0L, config.replicaFetchMaxBytes, Optional.of(leaderEpoch)) @@ -309,7 +308,7 @@ class ReplicaAlterLogDirsThreadTest { when(futureLog.logStartOffset).thenReturn(0L) when(futureLog.logEndOffset).thenReturn(0L) - when(futureLog.latestEpoch).thenReturn(Optional.empty) + when(futureLog.latestEpoch).thenReturn(None) val requestData = new FetchRequest.PartitionData(topicId, 0L, 0L, config.replicaFetchMaxBytes, Optional.of(leaderEpoch)) @@ -404,7 +403,7 @@ class ReplicaAlterLogDirsThreadTest { when(futureLog.logStartOffset).thenReturn(0L) when(futureLog.logEndOffset).thenReturn(0L) - when(futureLog.latestEpoch).thenReturn(Optional.empty) + when(futureLog.latestEpoch).thenReturn(None) val requestData = new FetchRequest.PartitionData(topicId, 0L, 0L, config.replicaFetchMaxBytes, Optional.of(leaderEpoch)) @@ -504,6 +503,7 @@ class ReplicaAlterLogDirsThreadTest { ArgumentCaptor.forClass(classOf[Seq[(TopicIdPartition, FetchPartitionData)] => Unit]) val expectedFetchParams = new FetchParams( + ApiKeys.FETCH.latestVersion, FetchRequest.FUTURE_LOCAL_REPLICA_ID, -1, 0L, @@ -573,13 +573,13 @@ class ReplicaAlterLogDirsThreadTest { null, config.replicaFetchBackoffMs) - val result = thread.leader.fetchEpochEndOffsets(java.util.Map.of( - t1p0, new OffsetForLeaderPartition() + val result = thread.leader.fetchEpochEndOffsets(Map( + t1p0 -> new OffsetForLeaderPartition() .setPartition(t1p0.partition) .setLeaderEpoch(leaderEpochT1p0), - t1p1, new OffsetForLeaderPartition() + t1p1 -> new OffsetForLeaderPartition() .setPartition(t1p1.partition) - .setLeaderEpoch(leaderEpochT1p1))).asScala + .setLeaderEpoch(leaderEpochT1p1))) val expected = Map( t1p0 -> new EpochEndOffset() @@ -635,13 +635,13 @@ class ReplicaAlterLogDirsThreadTest { null, config.replicaFetchBackoffMs) - val result = thread.leader.fetchEpochEndOffsets(java.util.Map.of( - t1p0, new OffsetForLeaderPartition() + val result = thread.leader.fetchEpochEndOffsets(Map( + t1p0 -> new OffsetForLeaderPartition() .setPartition(t1p0.partition) .setLeaderEpoch(leaderEpoch), - t1p1, new OffsetForLeaderPartition() + t1p1 -> new OffsetForLeaderPartition() .setPartition(t1p1.partition) - .setLeaderEpoch(leaderEpoch))).asScala + .setLeaderEpoch(leaderEpoch))) val expected = Map( t1p0 -> new EpochEndOffset() @@ -702,9 +702,9 @@ class ReplicaAlterLogDirsThreadTest { when(futureLogT1p0.logEndOffset).thenReturn(futureReplicaLEO) when(futureLogT1p1.logEndOffset).thenReturn(futureReplicaLEO) - when(futureLogT1p0.latestEpoch).thenReturn(Optional.of(leaderEpoch)) + when(futureLogT1p0.latestEpoch).thenReturn(Some(leaderEpoch)) when(futureLogT1p0.endOffsetForEpoch(leaderEpoch)).thenReturn( - Optional.of(new OffsetAndEpoch(futureReplicaLEO, leaderEpoch))) + Some(new OffsetAndEpoch(futureReplicaLEO, leaderEpoch))) when(partitionT1p0.lastOffsetForLeaderEpoch(Optional.of(1), leaderEpoch, fetchOnlyFromLeader = false)) .thenReturn(new EpochEndOffset() .setPartition(partitionT1p0Id) @@ -712,9 +712,9 @@ class ReplicaAlterLogDirsThreadTest { .setLeaderEpoch(leaderEpoch) .setEndOffset(replicaT1p0LEO)) - when(futureLogT1p1.latestEpoch).thenReturn(Optional.of(leaderEpoch)) + when(futureLogT1p1.latestEpoch).thenReturn(Some(leaderEpoch)) when(futureLogT1p1.endOffsetForEpoch(leaderEpoch)).thenReturn( - Optional.of(new OffsetAndEpoch(futureReplicaLEO, leaderEpoch))) + Some(new OffsetAndEpoch(futureReplicaLEO, leaderEpoch))) when(partitionT1p1.lastOffsetForLeaderEpoch(Optional.of(1), leaderEpoch, fetchOnlyFromLeader = false)) .thenReturn(new EpochEndOffset() .setPartition(partitionT1p1Id) @@ -785,8 +785,8 @@ class ReplicaAlterLogDirsThreadTest { when(futureLog.logEndOffset).thenReturn(futureReplicaLEO) when(futureLog.latestEpoch) - .thenReturn(Optional.of(leaderEpoch)) - .thenReturn(Optional.of(leaderEpoch - 2)) + .thenReturn(Some(leaderEpoch)) + .thenReturn(Some(leaderEpoch - 2)) // leader replica truncated and fetched new offsets with new leader epoch when(partition.lastOffsetForLeaderEpoch(Optional.of(1), leaderEpoch, fetchOnlyFromLeader = false)) @@ -797,7 +797,7 @@ class ReplicaAlterLogDirsThreadTest { .setEndOffset(replicaLEO)) // but future replica does not know about this leader epoch, so returns a smaller leader epoch when(futureLog.endOffsetForEpoch(leaderEpoch - 1)).thenReturn( - Optional.of(new OffsetAndEpoch(futureReplicaLEO, leaderEpoch - 2))) + Some(new OffsetAndEpoch(futureReplicaLEO, leaderEpoch - 2))) // finally, the leader replica knows about the leader epoch and returns end offset when(partition.lastOffsetForLeaderEpoch(Optional.of(1), leaderEpoch - 2, fetchOnlyFromLeader = false)) .thenReturn(new EpochEndOffset() @@ -806,7 +806,7 @@ class ReplicaAlterLogDirsThreadTest { .setLeaderEpoch(leaderEpoch - 2) .setEndOffset(replicaEpochEndOffset)) when(futureLog.endOffsetForEpoch(leaderEpoch - 2)).thenReturn( - Optional.of(new OffsetAndEpoch(futureReplicaEpochEndOffset, leaderEpoch - 2))) + Some(new OffsetAndEpoch(futureReplicaEpochEndOffset, leaderEpoch - 2))) when(partition.logDirectoryId()).thenReturn(Some(Uuid.fromString("n6WOe2zPScqZLIreCWN6Ug"))) when(replicaManager.logManager).thenReturn(logManager) @@ -865,7 +865,7 @@ class ReplicaAlterLogDirsThreadTest { when(partition.logDirectoryId()).thenReturn(Some(Uuid.fromString("b2e1ihvGQiu6A504oKoddQ"))) // pretend this is a completely new future replica, with no leader epochs recorded - when(futureLog.latestEpoch).thenReturn(Optional.empty) + when(futureLog.latestEpoch).thenReturn(None) stubWithFetchMessages(log, null, futureLog, partition, replicaManager, responseCallback) @@ -923,9 +923,9 @@ class ReplicaAlterLogDirsThreadTest { when(replicaManager.futureLocalLogOrException(t1p0)).thenReturn(futureLog) when(replicaManager.futureLogExists(t1p0)).thenReturn(true) when(futureLog.logEndOffset).thenReturn(futureReplicaLEO) - when(futureLog.latestEpoch).thenReturn(Optional.of(futureReplicaLeaderEpoch)) + when(futureLog.latestEpoch).thenReturn(Some(futureReplicaLeaderEpoch)) when(futureLog.endOffsetForEpoch(futureReplicaLeaderEpoch)).thenReturn( - Optional.of(new OffsetAndEpoch(futureReplicaLEO, futureReplicaLeaderEpoch))) + Some(new OffsetAndEpoch(futureReplicaLEO, futureReplicaLeaderEpoch))) when(replicaManager.localLog(t1p0)).thenReturn(Some(log)) // this will cause fetchEpochsFromLeader return an error with undefined offset @@ -1016,10 +1016,10 @@ class ReplicaAlterLogDirsThreadTest { when(replicaManager.futureLocalLogOrException(t1p0)).thenReturn(futureLog) when(replicaManager.futureLogExists(t1p0)).thenReturn(true) - when(futureLog.latestEpoch).thenReturn(Optional.of(leaderEpoch)) + when(futureLog.latestEpoch).thenReturn(Some(leaderEpoch)) when(futureLog.logEndOffset).thenReturn(futureReplicaLEO) when(futureLog.endOffsetForEpoch(leaderEpoch)).thenReturn( - Optional.of(new OffsetAndEpoch(futureReplicaLEO, leaderEpoch))) + Some(new OffsetAndEpoch(futureReplicaLEO, leaderEpoch))) when(replicaManager.logManager).thenReturn(logManager) stubWithFetchMessages(log, null, futureLog, partition, replicaManager, responseCallback) @@ -1082,15 +1082,14 @@ class ReplicaAlterLogDirsThreadTest { t1p0 -> initialFetchState(0L, leaderEpoch), t1p1 -> initialFetchState(0L, leaderEpoch))) - val result = thread.leader.buildFetch(java.util.Map.of( - t1p0, new PartitionFetchState(Optional.of(topicId), 150, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty), - t1p1, new PartitionFetchState(Optional.of(topicId), 160, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty))) - val fetchRequestOpt = result.result - val partitionsWithError = result.partitionsWithError - assertTrue(fetchRequestOpt.isPresent) + val ResultWithPartitions(fetchRequestOpt, partitionsWithError) = thread.leader.buildFetch(Map( + t1p0 -> PartitionFetchState(Some(topicId), 150, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = None), + t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = None))) + + assertTrue(fetchRequestOpt.isDefined) val fetchRequest = fetchRequestOpt.get.fetchRequest assertFalse(fetchRequest.fetchData.isEmpty) - assertTrue(partitionsWithError.isEmpty) + assertFalse(partitionsWithError.nonEmpty) val request = fetchRequest.build() assertEquals(0, request.minBytes) val fetchInfos = request.fetchData(topicNames.asJava).asScala.toSeq @@ -1138,54 +1137,39 @@ class ReplicaAlterLogDirsThreadTest { t1p1 -> initialFetchState(0L, leaderEpoch))) // one partition is ready and one is truncating - val result1 = thread.leader.buildFetch(java.util.Map.of( - t1p0, new PartitionFetchState(Optional.of(topicId), 150, Optional.empty(), leaderEpoch, Optional.empty(), - ReplicaState.FETCHING, Optional.empty()), - t1p1, new PartitionFetchState(Optional.of(topicId), 160, Optional.empty(), leaderEpoch, Optional.empty(), - ReplicaState.TRUNCATING, Optional.empty()) - )) - val fetchRequestOpt1 = result1.result - val partitionsWithError1 = result1.partitionsWithError - - assertTrue(fetchRequestOpt1.isPresent) - val fetchRequest = fetchRequestOpt1.get - assertFalse(fetchRequest.fetchRequest.fetchData.isEmpty) - assertTrue(partitionsWithError1.isEmpty) + val ResultWithPartitions(fetchRequestOpt, partitionsWithError) = thread.leader.buildFetch(Map( + t1p0 -> PartitionFetchState(Some(topicId), 150, None, leaderEpoch, state = Fetching, lastFetchedEpoch = None), + t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, state = Truncating, lastFetchedEpoch = None))) + + assertTrue(fetchRequestOpt.isDefined) + val fetchRequest = fetchRequestOpt.get + assertFalse(fetchRequest.partitionData.isEmpty) + assertFalse(partitionsWithError.nonEmpty) val fetchInfos = fetchRequest.fetchRequest.build().fetchData(topicNames.asJava).asScala.toSeq assertEquals(1, fetchInfos.length) assertEquals(t1p0, fetchInfos.head._1.topicPartition, "Expected fetch request for non-truncating partition") assertEquals(150, fetchInfos.head._2.fetchOffset) // one partition is ready and one is delayed - val result2 = thread.leader.buildFetch(java.util.Map.of( - t1p0, new PartitionFetchState(Optional.of(topicId), 140, Optional.empty(), leaderEpoch, Optional.empty(), - ReplicaState.FETCHING, Optional.empty()), - t1p1, new PartitionFetchState(Optional.of(topicId), 160, Optional.empty(), leaderEpoch, Optional.of(5000L), - ReplicaState.FETCHING, Optional.empty()) - )) - val fetchRequest2Opt = result2.result - val partitionsWithError2 = result2.partitionsWithError - - assertTrue(fetchRequest2Opt.isPresent) + val ResultWithPartitions(fetchRequest2Opt, partitionsWithError2) = thread.leader.buildFetch(Map( + t1p0 -> PartitionFetchState(Some(topicId), 140, None, leaderEpoch, state = Fetching, lastFetchedEpoch = None), + t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = None))) + + assertTrue(fetchRequest2Opt.isDefined) val fetchRequest2 = fetchRequest2Opt.get - assertFalse(fetchRequest2.fetchRequest.fetchData().isEmpty) - assertTrue(partitionsWithError2.isEmpty()) + assertFalse(fetchRequest2.partitionData.isEmpty) + assertFalse(partitionsWithError2.nonEmpty) val fetchInfos2 = fetchRequest2.fetchRequest.build().fetchData(topicNames.asJava).asScala.toSeq assertEquals(1, fetchInfos2.length) assertEquals(t1p0, fetchInfos2.head._1.topicPartition, "Expected fetch request for non-delayed partition") assertEquals(140, fetchInfos2.head._2.fetchOffset) // both partitions are delayed - val result3 = thread.leader.buildFetch(java.util.Map.of( - t1p0, new PartitionFetchState(Optional.of(topicId), 140, Optional.empty(), leaderEpoch, Optional.of(5000L), - ReplicaState.FETCHING, Optional.empty()), - t1p1, new PartitionFetchState(Optional.of(topicId), 160, Optional.empty(), leaderEpoch, Optional.of(5000L), - ReplicaState.FETCHING, Optional.empty()) - )) - val fetchRequest3Opt = result3.result - val partitionsWithError3 = result3.partitionsWithError + val ResultWithPartitions(fetchRequest3Opt, partitionsWithError3) = thread.leader.buildFetch(Map( + t1p0 -> PartitionFetchState(Some(topicId), 140, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = None), + t1p1 -> PartitionFetchState(Some(topicId), 160, None, leaderEpoch, delay = Some(5000), state = Fetching, lastFetchedEpoch = None))) assertTrue(fetchRequest3Opt.isEmpty, "Expected no fetch requests since all partitions are delayed") - assertTrue(partitionsWithError3.isEmpty()) + assertFalse(partitionsWithError3.nonEmpty) } def stub(logT1p0: UnifiedLog, logT1p1: UnifiedLog, futureLog: UnifiedLog, partition: Partition, diff --git a/core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala index 66b41c0aaf134..d86cc54ca939a 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaFetchTest.scala @@ -17,13 +17,15 @@ package kafka.server -import org.junit.jupiter.api.{AfterEach, Test} +import org.junit.jupiter.api.AfterEach import kafka.utils.TestUtils import TestUtils._ import kafka.api.IntegrationTestHarness import org.apache.kafka.clients.producer.ProducerRecord import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.serialization.StringSerializer +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource class ReplicaFetchTest extends IntegrationTestHarness { val topic1 = "foo" @@ -37,8 +39,9 @@ class ReplicaFetchTest extends IntegrationTestHarness { override def brokerCount: Int = 2 - @Test - def testReplicaFetcherThread(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testReplicaFetcherThread(quorum: String): Unit = { val partition = 0 val testMessageList1 = List("test1", "test2", "test3", "test4") val testMessageList2 = List("test5", "test6", "test7", "test8") diff --git a/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala index 91aa1d5c97821..ff556f586c482 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaFetcherThreadTest.scala @@ -17,11 +17,10 @@ package kafka.server import kafka.cluster.Partition -import kafka.log.LogManager - +import kafka.log.{LogManager, UnifiedLog} +import kafka.server.AbstractFetcherThread.ResultWithPartitions import kafka.server.QuotaFactory.UNBOUNDED_QUOTA import kafka.server.epoch.util.MockBlockingSender -import kafka.server.metadata.KRaftMetadataCache import kafka.utils.TestUtils import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.compress.Compression @@ -36,9 +35,7 @@ import org.apache.kafka.common.requests.{FetchRequest, FetchResponse} import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion, OffsetAndEpoch} import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.ReplicaState -import org.apache.kafka.server.PartitionFetchState -import org.apache.kafka.storage.internals.log.{LogAppendInfo, UnifiedLog} +import org.apache.kafka.storage.internals.log.LogAppendInfo import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test} @@ -48,13 +45,11 @@ import org.mockito.ArgumentCaptor import org.mockito.ArgumentMatchers.{any, anyBoolean, anyLong} import org.mockito.Mockito.{mock, times, verify, when} -import java.lang.{Long => JLong} import java.nio.charset.StandardCharsets import java.util -import java.util.{Collections, Optional} +import java.util.{Collections, Optional, OptionalInt} import scala.collection.mutable import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters._ class ReplicaFetcherThreadTest { @@ -70,7 +65,7 @@ class ReplicaFetcherThreadTest { private val brokerEndPoint = new BrokerEndPoint(0, "localhost", 1000) private val failedPartitions = new FailedPartitions - private val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.LATEST_PRODUCTION) + private val metadataCache = MetadataCache.kRaftMetadataCache(0, () => KRaftVersion.LATEST_PRODUCTION) private def initialFetchState(topicId: Option[Uuid], fetchOffset: Long, leaderEpoch: Int = 1): InitialFetchState = { InitialFetchState(topicId = topicId, leader = new BrokerEndPoint(0, "localhost", 9092), @@ -102,7 +97,8 @@ class ReplicaFetcherThreadTest { failedPartitions, replicaMgr, quota, - logContext.logPrefix) + logContext.logPrefix, + () => metadataVersion) } @Test @@ -163,13 +159,13 @@ class ReplicaFetcherThreadTest { mockBlockingSend ) - val result = thread.leader.fetchEpochEndOffsets(java.util.Map.of( - t1p0, new OffsetForLeaderPartition() + val result = thread.leader.fetchEpochEndOffsets(Map( + t1p0 -> new OffsetForLeaderPartition() .setPartition(t1p0.partition) .setLeaderEpoch(0), - t1p1, new OffsetForLeaderPartition() + t1p1 -> new OffsetForLeaderPartition() .setPartition(t1p1.partition) - .setLeaderEpoch(0))).asScala + .setLeaderEpoch(0))) val expected = Map( t1p0 -> newOffsetForLeaderPartitionResult(t1p0, Errors.UNKNOWN_SERVER_ERROR, UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET), @@ -201,9 +197,9 @@ class ReplicaFetcherThreadTest { //Stubs when(partition.localLogOrException).thenReturn(log) when(log.highWatermark).thenReturn(0) - when(log.latestEpoch).thenReturn(Optional.of(leaderEpoch)) + when(log.latestEpoch).thenReturn(Some(leaderEpoch)) when(log.endOffsetForEpoch(leaderEpoch)).thenReturn( - Optional.of(new OffsetAndEpoch(0, leaderEpoch))) + Some(new OffsetAndEpoch(0, leaderEpoch))) when(replicaManager.metadataCache).thenReturn(metadataCache) when(replicaManager.logManager).thenReturn(logManager) when(replicaManager.replicaAlterLogDirsManager).thenReturn(replicaAlterLogDirsManager) @@ -211,9 +207,9 @@ class ReplicaFetcherThreadTest { stub(partition, replicaManager, log) //Define the offsets for the OffsetsForLeaderEpochResponse - val offsets = java.util.Map.of( - t1p0, newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 1), - t1p1, newOffsetForLeaderPartitionResult(t1p1, leaderEpoch, 1)) + val offsets = Map( + t1p0 -> newOffsetForLeaderPartitionResult(t1p0, leaderEpoch, 1), + t1p1 -> newOffsetForLeaderPartitionResult(t1p1, leaderEpoch, 1)).asJava //Create the fetcher thread val mockNetwork = new MockBlockingSender(offsets, brokerEndPoint, Time.SYSTEM) @@ -262,15 +258,15 @@ class ReplicaFetcherThreadTest { val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) val initialLEO = 200 - var latestLogEpoch: Optional[Int] = Optional.of(5) + var latestLogEpoch: Option[Int] = Some(5) // Stubs when(partition.localLogOrException).thenReturn(log) when(log.highWatermark).thenReturn(115) when(log.latestEpoch).thenAnswer(_ => latestLogEpoch) - when(log.endOffsetForEpoch(4)).thenReturn(Optional.of(new OffsetAndEpoch(149, 4))) - when(log.endOffsetForEpoch(3)).thenReturn(Optional.of(new OffsetAndEpoch(129, 2))) - when(log.endOffsetForEpoch(2)).thenReturn(Optional.of(new OffsetAndEpoch(119, 1))) + when(log.endOffsetForEpoch(4)).thenReturn(Some(new OffsetAndEpoch(149, 4))) + when(log.endOffsetForEpoch(3)).thenReturn(Some(new OffsetAndEpoch(129, 2))) + when(log.endOffsetForEpoch(2)).thenReturn(Some(new OffsetAndEpoch(119, 1))) when(log.logEndOffset).thenReturn(initialLEO) when(replicaManager.metadataCache).thenReturn(metadataCache) when(replicaManager.localLogOrException(any[TopicPartition])).thenReturn(log) @@ -285,21 +281,9 @@ class ReplicaFetcherThreadTest { val fetchSessionHandler = new FetchSessionHandler(logContext, brokerEndPoint.id) val leader = new RemoteLeaderEndPoint(logContext.logPrefix, mockNetwork, fetchSessionHandler, config, replicaManager, quota, () => MetadataVersion.MINIMUM_VERSION, () => 1) - val thread = new ReplicaFetcherThread( - "bob", - leader, - config, - failedPartitions, - replicaManager, - quota, - logContext.logPrefix - ) { - override def processPartitionData( - topicPartition: TopicPartition, - fetchOffset: Long, - partitionLeaderEpoch: Int, - partitionData: FetchData - ): Option[LogAppendInfo] = None + val thread = new ReplicaFetcherThread("bob", leader, config, failedPartitions, + replicaManager, quota, logContext.logPrefix, () => MetadataVersion.MINIMUM_VERSION) { + override def processPartitionData(topicPartition: TopicPartition, fetchOffset: Long, partitionData: FetchData): Option[LogAppendInfo] = None } thread.addPartitions(Map(t1p0 -> initialFetchState(Some(topicId1), initialLEO), t1p1 -> initialFetchState(Some(topicId1), initialLEO))) val partitions = Set(t1p0, t1p1) @@ -309,7 +293,7 @@ class ReplicaFetcherThreadTest { thread.doWork() assertEquals(0, mockNetwork.epochFetchCount) assertEquals(1, mockNetwork.fetchCount) - partitions.foreach { tp => assertEquals(ReplicaState.FETCHING, thread.fetchState(tp).get.state) } + partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) } def partitionData(partition: Int, divergingEpoch: FetchResponseData.EpochEndOffset): FetchResponseData.PartitionData = { new FetchResponseData.PartitionData() @@ -325,7 +309,7 @@ class ReplicaFetcherThreadTest { t1p1 -> partitionData(t1p1.partition, new FetchResponseData.EpochEndOffset().setEpoch(4).setEndOffset(141)) )) mockNetwork.setIdsForNextResponse(topicIds) - latestLogEpoch = Optional.of(4) + latestLogEpoch = Some(4) thread.doWork() assertEquals(0, mockNetwork.epochFetchCount) assertEquals(2, mockNetwork.fetchCount) @@ -334,7 +318,7 @@ class ReplicaFetcherThreadTest { "Expected " + t1p0 + " to truncate to offset 140 (truncation offsets: " + truncateToCapture.getAllValues + ")") assertTrue(truncateToCapture.getAllValues.asScala.contains(141), "Expected " + t1p1 + " to truncate to offset 141 (truncation offsets: " + truncateToCapture.getAllValues + ")") - partitions.foreach { tp => assertEquals(ReplicaState.FETCHING, thread.fetchState(tp).get.state) } + partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) } // Loop 3 should truncate because of diverging epoch. Offset truncation is not complete // because divergent epoch is not known to follower. We truncate and stay in Fetching state. @@ -349,7 +333,7 @@ class ReplicaFetcherThreadTest { verify(partition, times(4)).truncateTo(truncateToCapture.capture(), anyBoolean()) assertTrue(truncateToCapture.getAllValues.asScala.contains(129), "Expected to truncate to offset 129 (truncation offsets: " + truncateToCapture.getAllValues + ")") - partitions.foreach { tp => assertEquals(ReplicaState.FETCHING, thread.fetchState(tp).get.state) } + partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) } // Loop 4 should truncate because of diverging epoch. Offset truncation is not complete // because divergent epoch is not known to follower. Last fetched epoch cannot be determined @@ -359,14 +343,14 @@ class ReplicaFetcherThreadTest { t1p1 -> partitionData(t1p1.partition, new FetchResponseData.EpochEndOffset().setEpoch(2).setEndOffset(121)) )) mockNetwork.setIdsForNextResponse(topicIds) - latestLogEpoch = Optional.empty + latestLogEpoch = None thread.doWork() assertEquals(0, mockNetwork.epochFetchCount) assertEquals(4, mockNetwork.fetchCount) verify(partition, times(6)).truncateTo(truncateToCapture.capture(), anyBoolean()) assertTrue(truncateToCapture.getAllValues.asScala.contains(119), "Expected to truncate to offset 119 (truncation offsets: " + truncateToCapture.getAllValues + ")") - partitions.foreach { tp => assertEquals(ReplicaState.FETCHING, thread.fetchState(tp).get.state) } + partitions.foreach { tp => assertEquals(Fetching, thread.fetchState(tp).get.state) } } @Test @@ -383,8 +367,8 @@ class ReplicaFetcherThreadTest { val highWatermark = 130 when(log.highWatermark).thenReturn(highWatermark) - when(log.latestEpoch).thenReturn(Optional.of(5)) - when(log.endOffsetForEpoch(4)).thenReturn(Optional.of(new OffsetAndEpoch(149, 4))) + when(log.latestEpoch).thenReturn(Some(5)) + when(log.endOffsetForEpoch(4)).thenReturn(Some(new OffsetAndEpoch(149, 4))) when(log.logEndOffset).thenReturn(logEndOffset) when(replicaManager.metadataCache).thenReturn(metadataCache) @@ -395,7 +379,7 @@ class ReplicaFetcherThreadTest { when(replicaManager.getPartitionOrException(t1p0)).thenReturn(partition) when(partition.localLogOrException).thenReturn(log) - when(partition.appendRecordsToFollowerOrFutureReplica(any(), any(), any())).thenReturn(None) + when(partition.appendRecordsToFollowerOrFutureReplica(any(), any())).thenReturn(None) val logContext = new LogContext(s"[ReplicaFetcher replicaId=${config.brokerId}, leaderId=${brokerEndPoint.id}, fetcherId=0] ") @@ -423,7 +407,8 @@ class ReplicaFetcherThreadTest { failedPartitions, replicaManager, quota, - logContext.logPrefix + logContext.logPrefix, + () => MetadataVersion.MINIMUM_VERSION ) thread.addPartitions(Map( @@ -463,10 +448,10 @@ class ReplicaFetcherThreadTest { val lastFetchedEpoch = 2 when(log.highWatermark).thenReturn(0) - when(log.latestEpoch).thenReturn(Optional.of(lastFetchedEpoch)) - when(log.endOffsetForEpoch(0)).thenReturn(Optional.of(new OffsetAndEpoch(0, 0))) + when(log.latestEpoch).thenReturn(Some(lastFetchedEpoch)) + when(log.endOffsetForEpoch(0)).thenReturn(Some(new OffsetAndEpoch(0, 0))) when(log.logEndOffset).thenReturn(0) - when(log.maybeUpdateHighWatermark(0)).thenReturn(Optional.empty) + when(log.maybeUpdateHighWatermark(0)).thenReturn(None) when(replicaManager.metadataCache).thenReturn(metadataCache) when(replicaManager.logManager).thenReturn(logManager) @@ -475,11 +460,12 @@ class ReplicaFetcherThreadTest { when(replicaManager.brokerTopicStats).thenReturn(mock(classOf[BrokerTopicStats])) when(partition.localLogOrException).thenReturn(log) - when(partition.appendRecordsToFollowerOrFutureReplica(any(), any(), any())).thenReturn(Some(new LogAppendInfo( + when(partition.appendRecordsToFollowerOrFutureReplica(any(), any())).thenReturn(Some(new LogAppendInfo( -1, 0, - Optional.empty, + OptionalInt.empty, RecordBatch.NO_TIMESTAMP, + -1L, RecordBatch.NO_TIMESTAMP, -1L, RecordValidationStats.EMPTY, @@ -514,7 +500,8 @@ class ReplicaFetcherThreadTest { failedPartitions, replicaManager, quota, - logContext.logPrefix + logContext.logPrefix, + () => MetadataVersion.MINIMUM_VERSION ) thread.addPartitions(Map( @@ -523,7 +510,7 @@ class ReplicaFetcherThreadTest { // Lag is initialized to None when the partition fetch // state is created. - assertEquals(None, thread.fetchState(t1p0).flatMap(_.lag.toScala)) + assertEquals(None, thread.fetchState(t1p0).flatMap(_.lag)) // Prepare the fetch response data. mockNetwork.setFetchPartitionDataForNextResponse(Map( @@ -541,8 +528,8 @@ class ReplicaFetcherThreadTest { assertEquals(1, mockNetwork.fetchCount) // Lag is set to Some(0). - assertEquals(Some(0), thread.fetchState(t1p0).flatMap(_.lag.toScala)) - assertEquals(Optional.of(lastFetchedEpoch), thread.fetchState(t1p0).toJava.flatMap(_.lastFetchedEpoch)) + assertEquals(Some(0), thread.fetchState(t1p0).flatMap(_.lag)) + assertEquals(Some(lastFetchedEpoch), thread.fetchState(t1p0).flatMap(_.lastFetchedEpoch)) } @Test @@ -618,19 +605,19 @@ class ReplicaFetcherThreadTest { failedPartitions, replicaManager, replicaQuota, - logContext.logPrefix) + logContext.logPrefix, + () => MetadataVersion.MINIMUM_VERSION) val leaderEpoch = 1 val partitionMap = Map( - t1p0 -> new PartitionFetchState(Optional.of(topicId1), 150, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty), - t1p1 -> new PartitionFetchState(Optional.of(topicId1), 155, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty), - t2p1 -> new PartitionFetchState(Optional.of(topicId2), 160, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty)) + t1p0 -> PartitionFetchState(Some(topicId1), 150, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = None), + t1p1 -> PartitionFetchState(Some(topicId1), 155, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = None), + t2p1 -> PartitionFetchState(Some(topicId2), 160, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = None)) - val result = thread.leader.buildFetch(partitionMap.asJava) - val fetchRequestOpt = result.result + val ResultWithPartitions(fetchRequestOpt, _) = thread.leader.buildFetch(partitionMap) - assertTrue(fetchRequestOpt.isPresent) + assertTrue(fetchRequestOpt.isDefined) val fetchRequestBuilder = fetchRequestOpt.get.fetchRequest val partitionDataMap = partitionMap.map { case (tp, state) => @@ -646,17 +633,16 @@ class ReplicaFetcherThreadTest { responseData.put(tid1p0, new FetchResponseData.PartitionData()) responseData.put(tid1p1, new FetchResponseData.PartitionData()) responseData.put(tid2p1, new FetchResponseData.PartitionData()) - val fetchResponse = FetchResponse.of(Errors.NONE, 0, 123, responseData, List.empty.asJava) + val fetchResponse = FetchResponse.of(Errors.NONE, 0, 123, responseData) leader.fetchSessionHandler.handleResponse(fetchResponse, ApiKeys.FETCH.latestVersion()) // Remove t1p0, change the ID for t2p1, and keep t1p1 the same val newTopicId = Uuid.randomUuid() val partitionMap2 = Map( - t1p1 -> new PartitionFetchState(Optional.of(topicId1), 155, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty), - t2p1 -> new PartitionFetchState(Optional.of(newTopicId), 160, Optional.empty, leaderEpoch, Optional.empty, ReplicaState.FETCHING, Optional.empty)) - val result2 = thread.leader.buildFetch(partitionMap2.asJava) - val fetchRequestOpt2 = result2.result + t1p1 -> PartitionFetchState(Some(topicId1), 155, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = None), + t2p1 -> PartitionFetchState(Some(newTopicId), 160, None, leaderEpoch, None, state = Fetching, lastFetchedEpoch = None)) + val ResultWithPartitions(fetchRequestOpt2, _) = thread.leader.buildFetch(partitionMap2) // Since t1p1 didn't change, we drop that one val partitionDataMap2 = partitionMap2.drop(1).map { case (tp, state) => @@ -664,7 +650,7 @@ class ReplicaFetcherThreadTest { config.replicaFetchMaxBytes, Optional.of(state.currentLeaderEpoch), Optional.empty())) } - assertTrue(fetchRequestOpt2.isPresent) + assertTrue(fetchRequestOpt2.isDefined) val fetchRequestBuilder2 = fetchRequestOpt2.get.fetchRequest assertEquals(partitionDataMap2.asJava, fetchRequestBuilder2.fetchData()) assertEquals(Collections.singletonList(tid2p1), fetchRequestBuilder2.replaced()) @@ -681,10 +667,10 @@ class ReplicaFetcherThreadTest { val mockBlockingSend: BlockingSend = mock(classOf[BlockingSend]) when(mockBlockingSend.brokerEndPoint()).thenReturn(brokerEndPoint) - val maybeNewHighWatermark: Optional[JLong] = if (highWatermarkUpdated) { - Optional.of(highWatermarkReceivedFromLeader) + val maybeNewHighWatermark = if (highWatermarkUpdated) { + Some(highWatermarkReceivedFromLeader) } else { - Optional.empty + None } val log: UnifiedLog = mock(classOf[UnifiedLog]) when(log.maybeUpdateHighWatermark(highWatermarkReceivedFromLeader)) @@ -694,7 +680,7 @@ class ReplicaFetcherThreadTest { val partition: Partition = mock(classOf[Partition]) when(partition.localLogOrException).thenReturn(log) - when(partition.appendRecordsToFollowerOrFutureReplica(any[MemoryRecords], any[Boolean], any[Int])).thenReturn(appendInfo) + when(partition.appendRecordsToFollowerOrFutureReplica(any[MemoryRecords], any[Boolean])).thenReturn(appendInfo) // Capture the argument at the time of invocation. val completeDelayedFetchRequestsArgument = mutable.Buffer.empty[TopicPartition] @@ -725,8 +711,8 @@ class ReplicaFetcherThreadTest { .setRecords(records) .setHighWatermark(highWatermarkReceivedFromLeader) - thread.processPartitionData(tp0, 0, Int.MaxValue, partitionData.setPartitionIndex(0)) - thread.processPartitionData(tp1, 0, Int.MaxValue, partitionData.setPartitionIndex(1)) + thread.processPartitionData(tp0, 0, partitionData.setPartitionIndex(0)) + thread.processPartitionData(tp1, 0, partitionData.setPartitionIndex(1)) verify(replicaManager, times(0)).completeDelayedFetchRequests(any[Seq[TopicPartition]]) thread.doWork() @@ -770,13 +756,13 @@ class ReplicaFetcherThreadTest { val log: UnifiedLog = mock(classOf[UnifiedLog]) val records = MemoryRecords.withRecords(Compression.NONE, new SimpleRecord(1000, "foo".getBytes(StandardCharsets.UTF_8))) - when(log.maybeUpdateHighWatermark(0)).thenReturn(Optional.empty) + when(log.maybeUpdateHighWatermark(hw = 0)).thenReturn(None) val partition: Partition = mock(classOf[Partition]) when(partition.localLogOrException).thenReturn(log) when(partition.isReassigning).thenReturn(isReassigning) when(partition.isAddingLocalReplica).thenReturn(isReassigning) - when(partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false, Int.MaxValue)).thenReturn(None) + when(partition.appendRecordsToFollowerOrFutureReplica(records, isFuture = false)).thenReturn(None) val replicaManager: ReplicaManager = mock(classOf[ReplicaManager]) when(replicaManager.getPartitionOrException(any[TopicPartition])).thenReturn(partition) @@ -800,7 +786,7 @@ class ReplicaFetcherThreadTest { .setLastStableOffset(0) .setLogStartOffset(0) .setRecords(records) - thread.processPartitionData(t1p0, 0, Int.MaxValue, partitionData) + thread.processPartitionData(t1p0, 0, partitionData) if (isReassigning) assertEquals(records.sizeInBytes(), brokerTopicStats.allTopicsStats.reassignmentBytesInPerSec.get.count()) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala index 52dd464e5c3e0..fd6590ca05504 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerConcurrencyTest.scala @@ -22,12 +22,12 @@ import java.util.concurrent.{CompletableFuture, Executors, LinkedBlockingQueue, import java.util.{Optional, Properties} import kafka.server.QuotaFactory.QuotaManagers import kafka.server.metadata.KRaftMetadataCache +import kafka.server.metadata.MockConfigRepository import kafka.utils.TestUtils.waitUntilTrue import kafka.utils.{CoreUtils, Logging, TestUtils} -import org.apache.kafka.common import org.apache.kafka.common.metadata.{FeatureLevelRecord, PartitionChangeRecord, PartitionRecord, RegisterBrokerRecord, TopicRecord} import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.protocol.Errors +import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record.SimpleRecord import org.apache.kafka.common.replica.ClientMetadata.DefaultClientMetadata import org.apache.kafka.common.requests.{FetchRequest, ProduceResponse} @@ -35,7 +35,7 @@ import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.utils.{Time, Utils} import org.apache.kafka.common.{DirectoryId, IsolationLevel, TopicPartition, Uuid} import org.apache.kafka.image.{MetadataDelta, MetadataImage} -import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState, MetadataCache, MockConfigRepository} +import org.apache.kafka.metadata.{LeaderAndIsr, LeaderRecoveryState} import org.apache.kafka.metadata.PartitionRegistration import org.apache.kafka.metadata.storage.Formatter import org.apache.kafka.raft.QuorumConfig @@ -83,7 +83,7 @@ class ReplicaManagerConcurrencyTest extends Logging { def testIsrExpandAndShrinkWithConcurrentProduce(): Unit = { val localId = 0 val remoteId = 1 - val metadataCache = new KRaftMetadataCache(localId, () => KRaftVersion.KRAFT_VERSION_0) + val metadataCache = MetadataCache.kRaftMetadataCache(localId, () => KRaftVersion.KRAFT_VERSION_0) channel = new ControllerChannel replicaManager = buildReplicaManager(localId, channel, metadataCache) @@ -184,7 +184,7 @@ class ReplicaManagerConcurrencyTest extends Logging { time = time ) - quotaManagers = QuotaFactory.instantiate(config, metrics, time, "", "") + quotaManagers = QuotaFactory.instantiate(config, metrics, time, "") new ReplicaManager( metrics = metrics, @@ -200,6 +200,7 @@ class ReplicaManagerConcurrencyTest extends Logging { override def createReplicaFetcherManager( metrics: Metrics, time: Time, + threadNamePrefix: Option[String], quotaManager: ReplicationQuotaManager ): ReplicaFetcherManager = { Mockito.mock(classOf[ReplicaFetcherManager]) @@ -253,6 +254,7 @@ class ReplicaManagerConcurrencyTest extends Logging { } val fetchParams = new FetchParams( + ApiKeys.FETCH.latestVersion, replicaId, defaultBrokerEpoch(replicaId), random.nextInt(100), @@ -292,13 +294,11 @@ class ReplicaManagerConcurrencyTest extends Logging { } val future = new CompletableFuture[ProduceResponse.PartitionResponse]() - val topicIdPartition: common.TopicIdPartition = replicaManager.topicIdPartition(topicPartition) - - def produceCallback(results: collection.Map[common.TopicIdPartition, ProduceResponse.PartitionResponse]): Unit = { + def produceCallback(results: collection.Map[TopicPartition, ProduceResponse.PartitionResponse]): Unit = { try { assertEquals(1, results.size) val (topicPartition, result) = results.head - assertEquals(topicIdPartition, topicPartition) + assertEquals(this.topicPartition, topicPartition) assertEquals(Errors.NONE, result.error) future.complete(result) } catch { @@ -311,7 +311,7 @@ class ReplicaManagerConcurrencyTest extends Logging { requiredAcks = (-1).toShort, internalTopicsAllowed = false, origin = AppendOrigin.CLIENT, - entriesPerPartition = collection.Map(topicIdPartition -> TestUtils.records(records)), + entriesPerPartition = collection.Map(topicPartition -> TestUtils.records(records)), responseCallback = produceCallback ) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala index a7948ae901f14..f0a4be811bb3f 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerQuotasTest.scala @@ -19,12 +19,12 @@ package kafka.server import java.io.File import java.util.{Collections, Optional, Properties} import kafka.cluster.{Partition, PartitionTest} -import kafka.log.LogManager +import kafka.log.{LogManager, UnifiedLog} import kafka.server.QuotaFactory.QuotaManagers -import kafka.server.metadata.KRaftMetadataCache import kafka.utils._ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.metrics.Metrics +import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.record.{MemoryRecords, SimpleRecord} import org.apache.kafka.common.requests.FetchRequest import org.apache.kafka.common.requests.FetchRequest.PartitionData @@ -33,7 +33,7 @@ import org.apache.kafka.metadata.LeaderRecoveryState import org.apache.kafka.server.common.KRaftVersion import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams} import org.apache.kafka.server.util.{KafkaScheduler, MockTime} -import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogConfig, LogDirFailureChannel, LogOffsetMetadata, LogOffsetSnapshot, UnifiedLog} +import org.apache.kafka.storage.internals.log.{FetchDataInfo, LogConfig, LogDirFailureChannel, LogOffsetMetadata, LogOffsetSnapshot} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test} import org.mockito.ArgumentMatchers.{any, anyBoolean, anyInt, anyLong} @@ -175,6 +175,7 @@ class ReplicaManagerQuotasTest { new LogOffsetMetadata(50L, 0L, 250), new PartitionData(Uuid.ZERO_UUID, 50, 0, 1, Optional.empty())) val fetchParams = new FetchParams( + ApiKeys.FETCH.latestVersion, 1, 1, 600, @@ -226,6 +227,7 @@ class ReplicaManagerQuotasTest { new LogOffsetMetadata(50L, 0L, 250), new PartitionData(Uuid.ZERO_UUID, 50, 0, 1, Optional.empty())) val fetchParams = new FetchParams( + ApiKeys.FETCH.latestVersion, FetchRequest.CONSUMER_REPLICA_ID, -1, 600L, @@ -261,14 +263,14 @@ class ReplicaManagerQuotasTest { when(log.highWatermark).thenReturn(5) when(log.lastStableOffset).thenReturn(5) when(log.logEndOffsetMetadata).thenReturn(new LogOffsetMetadata(20L)) - when(log.topicId).thenReturn(Optional.of(topicId)) + when(log.topicId).thenReturn(Some(topicId)) when(log.config).thenReturn(new LogConfig(Collections.emptyMap())) //if we ask for len 1 return a message when(log.read(anyLong, - AdditionalMatchers.geq(1), - any[FetchIsolation], - anyBoolean)).thenReturn( + maxLength = AdditionalMatchers.geq(1), + isolation = any[FetchIsolation], + minOneMessage = anyBoolean)).thenReturn( new FetchDataInfo( new LogOffsetMetadata(0L, 0L, 0), MemoryRecords.withRecords(Compression.NONE, record) @@ -276,9 +278,9 @@ class ReplicaManagerQuotasTest { //if we ask for len = 0, return 0 messages when(log.read(anyLong, - ArgumentMatchers.eq(0), - any[FetchIsolation], - anyBoolean)).thenReturn( + maxLength = ArgumentMatchers.eq(0), + isolation = any[FetchIsolation], + minOneMessage = anyBoolean)).thenReturn( new FetchDataInfo( new LogOffsetMetadata(0L, 0L, 0), MemoryRecords.EMPTY @@ -286,7 +288,7 @@ class ReplicaManagerQuotasTest { when(log.maybeIncrementHighWatermark( any[LogOffsetMetadata] - )).thenReturn(Optional.empty) + )).thenReturn(None) //Create log manager val logManager: LogManager = mock(classOf[LogManager]) @@ -298,7 +300,7 @@ class ReplicaManagerQuotasTest { val alterIsrManager: AlterPartitionManager = mock(classOf[AlterPartitionManager]) val leaderBrokerId = configs.head.brokerId - quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "", "") + quotaManager = QuotaFactory.instantiate(configs.head, metrics, time, "") replicaManager = new ReplicaManager( metrics = metrics, config = configs.head, @@ -306,7 +308,7 @@ class ReplicaManagerQuotasTest { scheduler = scheduler, logManager = logManager, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(leaderBrokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(leaderBrokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(configs.head.logDirs.size), alterPartitionManager = alterIsrManager) diff --git a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala index 2483a1f85c04f..5c2b61adfbc5b 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicaManagerTest.scala @@ -20,27 +20,25 @@ package kafka.server import com.yammer.metrics.core.{Gauge, Meter, Timer} import kafka.cluster.PartitionTest.MockPartitionListener import kafka.cluster.Partition -import kafka.log.LogManager +import kafka.log.{LogManager, UnifiedLog} +import kafka.log.remote.RemoteLogManager import org.apache.kafka.server.log.remote.quota.RLMQuotaManagerConfig.INACTIVE_SENSOR_EXPIRATION_TIME_SECONDS import org.apache.kafka.server.log.remote.quota.RLMQuotaMetrics import kafka.server.QuotaFactory.{QuotaManagers, UNBOUNDED_QUOTA} import kafka.server.epoch.util.MockBlockingSender -import kafka.server.metadata.KRaftMetadataCache -import kafka.server.share.{DelayedShareFetch, SharePartition} +import kafka.server.share.DelayedShareFetch import kafka.utils.TestUtils.waitUntilTrue -import kafka.utils.TestUtils +import kafka.utils.{Pool, TestUtils} import org.apache.kafka.clients.FetchSessionHandler import org.apache.kafka.common.{DirectoryId, IsolationLevel, Node, TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.errors.InvalidPidMappingException import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.message.{DeleteRecordsResponseData, FetchResponseData, ShareFetchResponseData} +import org.apache.kafka.common.message.DeleteRecordsResponseData import org.apache.kafka.common.message.OffsetForLeaderEpochResponseData.EpochEndOffset import org.apache.kafka.common.metadata.{PartitionChangeRecord, PartitionRecord, RemoveTopicRecord, TopicRecord} import org.apache.kafka.common.metrics.Metrics -import org.apache.kafka.common.metrics.Monitorable -import org.apache.kafka.common.metrics.PluginMetrics import org.apache.kafka.common.network.ListenerName import org.apache.kafka.common.protocol.{ApiKeys, Errors} import org.apache.kafka.common.record._ @@ -52,31 +50,23 @@ import org.apache.kafka.common.requests.ProduceResponse.PartitionResponse import org.apache.kafka.common.requests._ import org.apache.kafka.common.security.auth.KafkaPrincipal import org.apache.kafka.common.utils.{LogContext, Time, Utils} -import org.apache.kafka.coordinator.transaction.{AddPartitionsToTxnConfig, TransactionLogConfig} +import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.image._ import org.apache.kafka.metadata.LeaderConstants.NO_LEADER -import org.apache.kafka.metadata.{LeaderRecoveryState, MetadataCache, PartitionRegistration} +import org.apache.kafka.metadata.LeaderAndIsr import org.apache.kafka.metadata.properties.{MetaProperties, MetaPropertiesEnsemble, MetaPropertiesVersion, PropertiesUtils} -import org.apache.kafka.server.common.{DirectoryEventHandler, KRaftVersion, MetadataVersion, OffsetAndEpoch, RequestLocal, StopPartition} +import org.apache.kafka.server.common.{DirectoryEventHandler, KRaftVersion, MetadataVersion, OffsetAndEpoch, StopPartition} import org.apache.kafka.server.config.{KRaftConfigs, ReplicationConfigs, ServerLogConfigs} -import org.apache.kafka.server.log.remote.TopicPartitionLog import org.apache.kafka.server.log.remote.storage._ import org.apache.kafka.server.metrics.{KafkaMetricsGroup, KafkaYammerMetrics} import org.apache.kafka.server.network.BrokerEndPoint -import org.apache.kafka.server.{LogReadResult, PartitionFetchState} -import org.apache.kafka.server.purgatory.{DelayedDeleteRecords, DelayedOperationPurgatory, DelayedRemoteListOffsets} -import org.apache.kafka.server.share.SharePartitionKey -import org.apache.kafka.server.share.fetch.{DelayedShareFetchGroupKey, DelayedShareFetchKey, ShareFetch} -import org.apache.kafka.server.share.metrics.ShareGroupMetrics +import org.apache.kafka.server.purgatory.DelayedOperationPurgatory import org.apache.kafka.server.storage.log.{FetchIsolation, FetchParams, FetchPartitionData} -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager.TransactionSupportedOperation -import org.apache.kafka.server.transaction.AddPartitionsToTxnManager.TransactionSupportedOperation.{ADD_PARTITION, GENERIC_ERROR_SUPPORTED} import org.apache.kafka.server.util.timer.MockTimer import org.apache.kafka.server.util.{MockScheduler, MockTime, Scheduler} import org.apache.kafka.storage.internals.checkpoint.LazyOffsetCheckpoints import org.apache.kafka.storage.internals.epoch.LeaderEpochFileCache -import org.apache.kafka.storage.internals.log.{AppendOrigin, CleanerConfig, FetchDataInfo, LocalLog, LogAppendInfo, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogOffsetsListener, LogOffsetSnapshot, LogSegments, ProducerStateManager, ProducerStateManagerConfig, RemoteLogReadResult, RemoteStorageFetchInfo, UnifiedLog, VerificationGuard} +import org.apache.kafka.storage.internals.log.{AppendOrigin, FetchDataInfo, LocalLog, LogConfig, LogDirFailureChannel, LogLoader, LogOffsetMetadata, LogOffsetSnapshot, LogSegments, ProducerStateManager, ProducerStateManagerConfig, RemoteStorageFetchInfo, VerificationGuard} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterAll, AfterEach, BeforeEach, Test} @@ -92,14 +82,13 @@ import java.io.{ByteArrayInputStream, File} import java.net.InetAddress import java.nio.file.{Files, Paths} import java.util -import java.util.concurrent.atomic.{AtomicLong, AtomicReference} -import java.util.concurrent.{Callable, CompletableFuture, ConcurrentHashMap, CountDownLatch, Future, TimeUnit} -import java.util.function.{BiConsumer, Consumer} +import java.util.concurrent.atomic.{AtomicBoolean, AtomicLong, AtomicReference} +import java.util.concurrent.{Callable, ConcurrentHashMap, CountDownLatch, TimeUnit} import java.util.stream.IntStream import java.util.{Collections, Optional, OptionalLong, Properties} -import scala.collection.{mutable, Map, Seq} +import scala.collection.{Map, Seq, mutable} import scala.jdk.CollectionConverters._ -import scala.jdk.OptionConverters.{RichOption, RichOptional} +import scala.jdk.OptionConverters.RichOption object ReplicaManagerTest { @AfterAll @@ -111,12 +100,9 @@ object ReplicaManagerTest { class ReplicaManagerTest { private val topic = "test-topic" - private val topic2 = "test-topic2" - private val topicId = Uuid.fromString("YK2ed2GaTH2JpgzUaJ8tgg") - private val topicId2 = Uuid.randomUuid() + private val topicId = Uuid.randomUuid() private val topicIds = scala.Predef.Map("test-topic" -> topicId) - private val topicNames = topicIds.map(_.swap) - private val topicPartition = new TopicPartition(topic, 0) + private val topicNames = scala.Predef.Map(topicId -> "test-topic") private val transactionalId = "txn" private val time = new MockTime private val metrics = new Metrics @@ -129,12 +115,13 @@ class ReplicaManagerTest { private var mockRemoteLogManager: RemoteLogManager = _ private var addPartitionsToTxnManager: AddPartitionsToTxnManager = _ private var brokerTopicStats: BrokerTopicStats = _ - private val metadataCache: KRaftMetadataCache = mock(classOf[KRaftMetadataCache]) private val quotaExceededThrottleTime = 1000 private val quotaAvailableThrottleTime = 0 // Constants defined for readability - private val partitionEpoch = 0 + private val zkVersion = 0 + private val correlationId = 0 + private val controllerEpoch = 0 private val brokerEpoch = 0L // These metrics are static and once we remove them after each test, they won't be created and verified anymore @@ -145,7 +132,7 @@ class ReplicaManagerTest { val props = TestUtils.createBrokerConfig(1) config = KafkaConfig.fromProps(props) alterPartitionManager = mock(classOf[AlterPartitionManager]) - quotaManager = QuotaFactory.instantiate(config, metrics, time, "", "") + quotaManager = QuotaFactory.instantiate(config, metrics, time, "") mockRemoteLogManager = mock(classOf[RemoteLogManager]) when(mockRemoteLogManager.fetchThrottleTimeSensor()).thenReturn( new RLMQuotaMetrics(metrics, @@ -159,23 +146,7 @@ class ReplicaManagerTest { // Anytime we try to verify, just automatically run the callback as though the transaction was verified. when(addPartitionsToTxnManager.addOrVerifyTransaction(any(), any(), any(), any(), any(), any())).thenAnswer { invocationOnMock => val callback = invocationOnMock.getArgument(4, classOf[AddPartitionsToTxnManager.AppendCallback]) - callback.complete(util.Map.of()) - } - // make sure metadataCache can map between topic name and id - setupMetadataCacheWithTopicIds(topicIds, metadataCache) - } - - private def setupMetadataCacheWithTopicIds(topicIds: Map[String, Uuid], metadataCache:MetadataCache): Unit = { - val topicNames = topicIds.map(_.swap) - topicNames.foreach { - case (id, name) => - when(metadataCache.getTopicName(id)).thenReturn(Optional.of(name)) - when(metadataCache.getTopicId(name)).thenReturn(id) - } - when(metadataCache.topicIdsToNames()).thenReturn(topicNames.asJava) - - topicIds.foreach { case (topicName, topicId) => - when(metadataCache.getTopicId(topicName)).thenReturn(topicId) + callback(Map.empty[TopicPartition, Errors].toMap) } } @@ -191,7 +162,7 @@ class ReplicaManagerTest { @Test def testHighWaterMarkDirectoryMapping(): Unit = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) val rm = new ReplicaManager( metrics = metrics, config = config, @@ -199,7 +170,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager) try { @@ -207,8 +178,8 @@ class ReplicaManagerTest { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) rm.checkpointHighWatermarks() - config.logDirs.stream().map(s => Paths.get(s, ReplicaManager.HighWatermarkFilename)) - .forEach(checkpointFile => assertTrue(Files.exists(checkpointFile), + config.logDirs.map(s => Paths.get(s, ReplicaManager.HighWatermarkFilename)) + .foreach(checkpointFile => assertTrue(Files.exists(checkpointFile), s"checkpoint file does not exist at $checkpointFile")) } finally { rm.shutdown(checkpointHW = false) @@ -220,7 +191,7 @@ class ReplicaManagerTest { val props = TestUtils.createBrokerConfig(1) props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) val config = KafkaConfig.fromProps(props) - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) val rm = new ReplicaManager( metrics = metrics, config = config, @@ -228,7 +199,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager) try { @@ -236,8 +207,8 @@ class ReplicaManagerTest { partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) rm.checkpointHighWatermarks() - config.logDirs.stream().map(s => Paths.get(s, ReplicaManager.HighWatermarkFilename)) - .forEach(checkpointFile => assertTrue(Files.exists(checkpointFile), + config.logDirs.map(s => Paths.get(s, ReplicaManager.HighWatermarkFilename)) + .foreach(checkpointFile => assertTrue(Files.exists(checkpointFile), s"checkpoint file does not exist at $checkpointFile")) } finally { rm.shutdown(checkpointHW = false) @@ -246,7 +217,7 @@ class ReplicaManagerTest { @Test def testIllegalRequiredAcks(): Unit = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) val rm = new ReplicaManager( metrics = metrics, config = config, @@ -254,11 +225,12 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), - alterPartitionManager = alterPartitionManager) + alterPartitionManager = alterPartitionManager, + threadNamePrefix = Option(this.getClass.getName)) try { - def callback(responseStatus: Map[TopicIdPartition, PartitionResponse]): Unit = { + def callback(responseStatus: Map[TopicPartition, PartitionResponse]): Unit = { assert(responseStatus.values.head.error == Errors.INVALID_REQUIRED_ACKS) } rm.appendRecords( @@ -266,7 +238,7 @@ class ReplicaManagerTest { requiredAcks = 3, internalTopicsAllowed = false, origin = AppendOrigin.CLIENT, - entriesPerPartition = Map(new TopicIdPartition(Uuid.randomUuid(), 0, "test1") -> MemoryRecords.withRecords(Compression.NONE, + entriesPerPartition = Map(new TopicPartition("test1", 0) -> MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("first message".getBytes))), responseCallback = callback) } finally { @@ -281,12 +253,12 @@ class ReplicaManagerTest { } }) when(cache.getAliveBrokerNode(anyInt, any[ListenerName])). - thenAnswer(new Answer[Optional[Node]]() { - override def answer(invocation: InvocationOnMock): Optional[Node] = { - Optional.of(aliveBrokers.find(node => node.id == invocation.getArgument(0).asInstanceOf[Integer]).get) + thenAnswer(new Answer[Option[Node]]() { + override def answer(invocation: InvocationOnMock): Option[Node] = { + aliveBrokers.find(node => node.id == invocation.getArgument(0).asInstanceOf[Integer]) } }) - when(cache.getAliveBrokerNodes(any[ListenerName])).thenReturn(aliveBrokers.asJava) + when(cache.getAliveBrokerNodes(any[ListenerName])).thenReturn(aliveBrokers) } @Test @@ -296,7 +268,8 @@ class ReplicaManagerTest { val props = TestUtils.createBrokerConfig(0) props.put("log.dirs", dir1.getAbsolutePath + "," + dir2.getAbsolutePath) val config = KafkaConfig.fromProps(props) - val logManager = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), new LogConfig(new Properties())) + val logManager = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(new Properties())) + val metadataCache: MetadataCache = mock(classOf[MetadataCache]) mockGetAliveBrokerFunctions(metadataCache, Seq(new Node(0, "host0", 0))) when(metadataCache.metadataVersion()).thenReturn(MetadataVersion.MINIMUM_VERSION) val rm = new ReplicaManager( @@ -311,26 +284,38 @@ class ReplicaManagerTest { alterPartitionManager = alterPartitionManager) try { - val delta = topicsCreateDelta(0, isStartIdLeader = true, partitions = List(0), topicName = topic, topicId = topicIds(topic)) - val image = imageFromTopics(delta.apply()) - rm.applyDelta(delta, image) - val partition = rm.getPartitionOrException(topicPartition) + val partition = rm.createPartition(new TopicPartition(topic, 0)) + partition.createLogIfNotExists(isNew = false, isFutureReplica = false, + new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) - appendRecords(rm, topicPartition, + rm.becomeLeaderOrFollower(0, new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(Seq[Integer](0).asJava) + .setPartitionEpoch(0) + .setReplicas(Seq[Integer](0).asJava) + .setIsNew(false)).asJava, + Collections.singletonMap(topic, Uuid.randomUuid()), + Set(new Node(0, "host1", 0)).asJava).build(), (_, _) => ()) + appendRecords(rm, new TopicPartition(topic, 0), MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("first message".getBytes()), new SimpleRecord("second message".getBytes()))) - logManager.maybeUpdatePreferredLogDir(topicPartition, dir2.getAbsolutePath) + logManager.maybeUpdatePreferredLogDir(new TopicPartition(topic, 0), dir2.getAbsolutePath) partition.createLogIfNotExists(isNew = true, isFutureReplica = true, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) // this method should use hw of future log to create log dir fetcher. Otherwise, it causes offset mismatch error rm.maybeAddLogDirFetchers(Set(partition), new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), _ => None) - rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => t.fetchState(topicPartition).foreach(s => assertEquals(0L, s.fetchOffset))) + rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => t.fetchState(new TopicPartition(topic, 0)).foreach(s => assertEquals(0L, s.fetchOffset))) // make sure alter log dir thread has processed the data rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => t.doWork()) assertEquals(Set.empty, rm.replicaAlterLogDirsManager.failedPartitions.partitions()) // the future log becomes the current log, so the partition state should get removed - rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => assertEquals(None, t.fetchState(topicPartition))) + rm.replicaAlterLogDirsManager.fetcherThreadMap.values.foreach(t => assertEquals(None, t.fetchState(new TopicPartition(topic, 0)))) } finally { rm.shutdown(checkpointHW = false) } @@ -344,11 +329,13 @@ class ReplicaManagerTest { val props = TestUtils.createBrokerConfig(0) props.put("log.dirs", dir1.getAbsolutePath + "," + dir2.getAbsolutePath) val config = KafkaConfig.fromProps(props) - val logManager = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), new LogConfig(new Properties())) + val logManager = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(new Properties())) val spyLogManager = spy(logManager) val metadataCache: MetadataCache = mock(classOf[MetadataCache]) mockGetAliveBrokerFunctions(metadataCache, Seq(new Node(0, "host0", 0))) when(metadataCache.metadataVersion()).thenReturn(MetadataVersion.MINIMUM_VERSION) + val tp0 = new TopicPartition(topic, 0) + val uuid = Uuid.randomUuid() val rm = new ReplicaManager( metrics = metrics, config = config, @@ -361,13 +348,28 @@ class ReplicaManagerTest { alterPartitionManager = alterPartitionManager) try { - val delta = topicsCreateDelta(startId = 0, isStartIdLeader = true, - partitions = List(0), topicName = topic, topicId = topicId) - val image = imageFromTopics(delta.apply()) - rm.applyDelta(delta, image) - val partition = rm.getPartitionOrException(topicPartition) - - spyLogManager.maybeUpdatePreferredLogDir(topicPartition, dir2.getAbsolutePath) + val partition = rm.createPartition(tp0) + partition.createLogIfNotExists(isNew = false, isFutureReplica = false, + new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), Option.apply(uuid)) + + val response = rm.becomeLeaderOrFollower(0, new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(Seq[Integer](0).asJava) + .setPartitionEpoch(0) + .setReplicas(Seq[Integer](0).asJava) + .setIsNew(false)).asJava, + Collections.singletonMap(topic, uuid), + Set(new Node(0, "host1", 0)).asJava).build(), (_, _) => ()) + // expect the errorCounts only has 1 entry with Errors.NONE + val errorCounts = response.errorCounts() + assertEquals(1, response.errorCounts().size()) + assertNotNull(errorCounts.get(Errors.NONE)) + spyLogManager.maybeUpdatePreferredLogDir(tp0, dir2.getAbsolutePath) if (futureLogCreated) { // create future log before maybeAddLogDirFetchers invoked @@ -375,12 +377,12 @@ class ReplicaManagerTest { new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) } else { val mockLog = mock(classOf[UnifiedLog]) - when(spyLogManager.getLog(topicPartition, isFuture = true)).thenReturn(Option.apply(mockLog)) - when(mockLog.topicId).thenReturn(Optional.of(topicId)) + when(spyLogManager.getLog(tp0, isFuture = true)).thenReturn(Option.apply(mockLog)) + when(mockLog.topicId).thenReturn(Option.apply(uuid)) when(mockLog.parentDir).thenReturn(dir2.getAbsolutePath) } - val topicIdMap: Map[String, Option[Uuid]] = Map(topic -> Option.apply(topicId)) + val topicIdMap: Map[String, Option[Uuid]] = Map(topic -> Option.apply(uuid)) rm.maybeAddLogDirFetchers(Set(partition), new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), topicIdMap) if (futureLogCreated) { // since the futureLog is already created, we don't have to abort and pause the cleaning @@ -400,8 +402,9 @@ class ReplicaManagerTest { props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) val config = KafkaConfig.fromProps(props) val logProps = new Properties() - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), new LogConfig(logProps)) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(logProps)) val aliveBrokers = Seq(new Node(0, "host0", 0), new Node(1, "host1", 1)) + val metadataCache: MetadataCache = mock(classOf[MetadataCache]) mockGetAliveBrokerFunctions(metadataCache, aliveBrokers) when(metadataCache.metadataVersion()).thenReturn(MetadataVersion.MINIMUM_VERSION) val rm = new ReplicaManager( @@ -417,15 +420,26 @@ class ReplicaManagerTest { try { val brokerList = Seq[Integer](0, 1).asJava + val topicIds = Collections.singletonMap(topic, Uuid.randomUuid()) - val topicPartition = new TopicPartition(topic, 0) - val partition = rm.createPartition(topicPartition) + val partition = rm.createPartition(new TopicPartition(topic, 0)) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val delta = createLeaderDelta(topicId, topicPartition, brokerList.get(0), brokerList, brokerList) - val leaderMetadataImage = imageFromTopics(delta.apply()) - rm.applyDelta(delta, leaderMetadataImage) + val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + rm.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) rm.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -435,9 +449,20 @@ class ReplicaManagerTest { } // Make this replica the follower - val delta1 = createLeaderDelta(topicId, topicPartition, brokerList.get(1), brokerList, brokerList, 1) - val followerMetadataImage = imageFromTopics(delta1.apply()) - rm.applyDelta(delta1, followerMetadataImage) + val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(1) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + rm.becomeLeaderOrFollower(1, leaderAndIsrRequest2, (_, _) => ()) assertTrue(appendResult.hasFired) } finally { @@ -459,9 +484,10 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), - alterPartitionManager = alterPartitionManager) + alterPartitionManager = alterPartitionManager, + threadNamePrefix = Option(this.getClass.getName)) // shutdown ReplicaManager so that metrics are removed rm.shutdown(checkpointHW = false) @@ -482,21 +508,37 @@ class ReplicaManagerTest { } } - @ParameterizedTest - @ValueSource(ints = Array(0, 1, 10)) - def testFencedErrorCausedByBecomeLeader(loopEpochChange: Int): Unit = { - val localId = 0 + @Test + def testFencedErrorCausedByBecomeLeader(): Unit = { + testFencedErrorCausedByBecomeLeader(0) + testFencedErrorCausedByBecomeLeader(1) + testFencedErrorCausedByBecomeLeader(10) + } + + private[this] def testFencedErrorCausedByBecomeLeader(loopEpochChange: Int): Unit = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time)) try { + val brokerList = Seq[Integer](0, 1).asJava val topicPartition = new TopicPartition(topic, 0) replicaManager.createPartition(topicPartition) .createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic)) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) - + def leaderAndIsrRequest(epoch: Int): LeaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(epoch) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0), (_, _) => ()) val partition = replicaManager.getPartitionOrException(new TopicPartition(topic, 0)) assertEquals(1, replicaManager.logManager.liveLogDirs.filterNot(_ == partition.log.get.dir.getParentFile).size) @@ -508,12 +550,7 @@ class ReplicaManagerTest { // make sure the future log is created replicaManager.futureLocalLogOrException(topicPartition) assertEquals(1, replicaManager.replicaAlterLogDirsManager.fetcherThreadMap.size) - (1 to loopEpochChange).foreach( - epoch => { - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = epoch) - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) - } - ) + (1 to loopEpochChange).foreach(epoch => replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(epoch), (_, _) => ())) // wait for the ReplicaAlterLogDirsThread to complete TestUtils.waitUntilTrue(() => { replicaManager.replicaAlterLogDirsManager.shutdownIdleFetcherThreads() @@ -544,16 +581,25 @@ class ReplicaManagerTest { try { val brokerList = Seq[Integer](0, 1).asJava - val tp = new TopicPartition(topic, 0) - val partition = replicaManager.createPartition(tp) + val partition = replicaManager.createPartition(new TopicPartition(topic, 0)) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val delta = createLeaderDelta(topicId, tp, 0, brokerList, brokerList) - val leaderMetadataImage = imageFromTopics(delta.apply()) - - replicaManager.applyDelta(delta, leaderMetadataImage) + val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + Collections.singletonMap(topic, Uuid.randomUuid()), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) replicaManager.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -594,28 +640,35 @@ class ReplicaManagerTest { try { val brokerList = Seq[Integer](0, 1).asJava - val tp0 = new TopicPartition(topic, 0) - val tp1 = new TopicPartition(topic, 1) // Create a couple partition for the topic. - val partition0 = replicaManager.createPartition(tp0) + val partition0 = replicaManager.createPartition(new TopicPartition(topic, 0)) partition0.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) - val partition1 = replicaManager.createPartition(tp1) + val partition1 = replicaManager.createPartition(new TopicPartition(topic, 1)) partition1.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader for the partitions. - Seq(tp0, tp1).foreach { tp => - val delta = createLeaderDelta( - topicId = topicId, - partition = tp, - leaderId = 0, - replicas = brokerList, - isr = brokerList - ) - replicaManager.applyDelta(delta, imageFromTopics(delta.apply())) - replicaManager.getPartitionOrException(tp) + Seq(0, 1).foreach { partition => + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(partition) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + Collections.singletonMap(topic, Uuid.randomUuid()), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava, + LeaderAndIsrRequest.Type.UNKNOWN + ).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + replicaManager.getPartitionOrException(new TopicPartition(topic, partition)) + .localLogOrException } def appendRecord(pid: Long, sequence: Int, partition: Int): Unit = { @@ -669,7 +722,6 @@ class ReplicaManagerTest { val timer = new MockTimer(time) val replicaManager = setupReplicaManagerWithMockedPurgatories(timer) val topicPartition = new TopicPartition(topic, 0) - setupMetadataCacheWithTopicIds(topicIds, replicaManager.metadataCache) def assertLateTransactionCount(expectedCount: Option[Int]): Unit = { assertEquals(expectedCount, yammerGaugeValue[Int]("PartitionsWithLateTransactionsCount")) @@ -684,14 +736,20 @@ class ReplicaManagerTest { // Make this replica the leader. val brokerList = Seq[Integer](0, 1, 2).asJava - val leaderDelta = createLeaderDelta( - topicId = topicId, - partition = topicPartition, - leaderId = 0, - replicas = brokerList, - isr = brokerList, - ) - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) // Start a transaction val producerId = 234L @@ -729,7 +787,6 @@ class ReplicaManagerTest { def testReadCommittedFetchLimitedAtLSO(): Unit = { val timer = new MockTimer(time) val replicaManager = setupReplicaManagerWithMockedPurgatories(timer) - setupMetadataCacheWithTopicIds(topicIds, replicaManager.metadataCache) try { val brokerList = Seq[Integer](0, 1).asJava @@ -739,10 +796,20 @@ class ReplicaManagerTest { new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val delta = createLeaderDelta(topicId, new TopicPartition(topic, 0), 0, brokerList, brokerList) - val leaderMetadataImage = imageFromTopics(delta.apply()) - - replicaManager.applyDelta(delta, leaderMetadataImage) + val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) replicaManager.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -842,7 +909,6 @@ class ReplicaManagerTest { def testDelayedFetchIncludesAbortedTransactions(): Unit = { val timer = new MockTimer(time) val replicaManager = setupReplicaManagerWithMockedPurgatories(timer) - setupMetadataCacheWithTopicIds(topicIds, replicaManager.metadataCache) try { val brokerList = Seq[Integer](0, 1).asJava @@ -851,9 +917,20 @@ class ReplicaManagerTest { new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val delta = topicsCreateDelta(brokerList.get(0), isStartIdLeader = true, partitions = List(0), List.empty, topic, topicIds(topic)) - val leaderMetadataImage = imageFromTopics(delta.apply()) - replicaManager.applyDelta(delta, leaderMetadataImage) + val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) replicaManager.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -920,16 +997,25 @@ class ReplicaManagerTest { try { val brokerList = Seq[Integer](0, 1, 2).asJava - val tp = new TopicPartition(topic, 0) - val partition = rm.createPartition(tp) + val partition = rm.createPartition(new TopicPartition(topic, 0)) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) // Make this replica the leader. - val leaderDelta = createLeaderDelta(topicId, tp, leaderId = 0, replicas = brokerList, isr = brokerList) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - rm.applyDelta(leaderDelta, leaderMetadataImage) - + val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1), new Node(2, "host2", 2)).asJava).build() + rm.becomeLeaderOrFollower(0, leaderAndIsrRequest1, (_, _) => ()) rm.getPartitionOrException(new TopicPartition(topic, 0)) .localLogOrException @@ -967,7 +1053,6 @@ class ReplicaManagerTest { @Test def testFollowerStateNotUpdatedIfLogReadFails(): Unit = { - val localId = 0 val maxFetchBytes = 1024 * 1024 val aliveBrokersIds = Seq(0, 1) val leaderEpoch = 5 @@ -976,11 +1061,25 @@ class ReplicaManagerTest { try { val tp = new TopicPartition(topic, 0) val tidp = new TopicIdPartition(topicId, tp) + val replicas = aliveBrokersIds.toList.map(Int.box).asJava // Broker 0 becomes leader of the partition - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = leaderEpoch) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(leaderEpoch) + .setIsr(replicas) + .setPartitionEpoch(0) + .setReplicas(replicas) + .setIsNew(true) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(leaderAndIsrPartitionState).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + val leaderAndIsrResponse = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + assertEquals(Errors.NONE, leaderAndIsrResponse.error) // Follower replica state is initialized, but initial state is not known assertTrue(replicaManager.onlinePartition(tp).isDefined) @@ -1053,7 +1152,6 @@ class ReplicaManagerTest { @Test def testFetchMessagesWithInconsistentTopicId(): Unit = { - val localId = 0 val maxFetchBytes = 1024 * 1024 val aliveBrokersIds = Seq(0, 1) val leaderEpoch = 5 @@ -1062,11 +1160,25 @@ class ReplicaManagerTest { try { val tp = new TopicPartition(topic, 0) val tidp = new TopicIdPartition(topicId, tp) + val replicas = aliveBrokersIds.toList.map(Int.box).asJava // Broker 0 becomes leader of the partition - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = leaderEpoch) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(leaderEpoch) + .setIsr(replicas) + .setPartitionEpoch(0) + .setReplicas(replicas) + .setIsNew(true) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(leaderAndIsrPartitionState).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + val leaderAndIsrResponse = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + assertEquals(Errors.NONE, leaderAndIsrResponse.error) assertEquals(Some(topicId), replicaManager.getPartitionOrException(tp).topicId) @@ -1106,6 +1218,54 @@ class ReplicaManagerTest { val fetch2 = successfulFetch.headOption.filter(_._1 == zeroTidp).map(_._2) assertTrue(fetch2.isDefined) assertEquals(Errors.NONE, fetch2.get.error) + + // Next create a topic without a topic ID written in the log. + val tp2 = new TopicPartition("noIdTopic", 0) + val tidp2 = new TopicIdPartition(Uuid.randomUuid(), tp2) + + // Broker 0 becomes leader of the partition + val leaderAndIsrPartitionState2 = new LeaderAndIsrRequest.PartitionState() + .setTopicName("noIdTopic") + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(leaderEpoch) + .setIsr(replicas) + .setPartitionEpoch(0) + .setReplicas(replicas) + .setIsNew(true) + val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(leaderAndIsrPartitionState2).asJava, + Collections.emptyMap(), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + val leaderAndIsrResponse2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest2, (_, _) => ()) + assertEquals(Errors.NONE, leaderAndIsrResponse2.error) + + assertEquals(None, replicaManager.getPartitionOrException(tp2).topicId) + + // Fetch messages simulating the request containing a topic ID. We should not have an error. + fetchPartitions( + replicaManager, + replicaId = 1, + fetchInfos = Seq(tidp2 -> validFetchPartitionData), + responseCallback = callback + ) + val fetch3 = successfulFetch.headOption.filter(_._1 == tidp2).map(_._2) + assertTrue(fetch3.isDefined) + assertEquals(Errors.NONE, fetch3.get.error) + + // Fetch messages simulating the request not containing a topic ID. We should not have an error. + val zeroTidp2 = new TopicIdPartition(Uuid.ZERO_UUID, tidp2.topicPartition) + fetchPartitions( + replicaManager, + replicaId = 1, + fetchInfos = Seq(zeroTidp2 -> validFetchPartitionData), + responseCallback = callback + ) + val fetch4 = successfulFetch.headOption.filter(_._1 == zeroTidp2).map(_._2) + assertTrue(fetch4.isDefined) + assertEquals(Errors.NONE, fetch4.get.error) + } finally { replicaManager.shutdown(checkpointHW = false) } @@ -1120,10 +1280,10 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2)) try { - val leaderEpoch = 0 // Create 2 partitions, assign replica 0 as the leader for both a different follower (1 and 2) for each val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) + val topicId = Uuid.randomUuid() val tidp0 = new TopicIdPartition(topicId, tp0) val tidp1 = new TopicIdPartition(topicId, tp1) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) @@ -1131,14 +1291,34 @@ class ReplicaManagerTest { replicaManager.createPartition(tp1).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val partition0Replicas = Seq[Integer](0, 1).asJava val partition1Replicas = Seq[Integer](0, 2).asJava - - val leaderDelta0 = createLeaderDelta(topicIds(topic), tp0, 0, partition0Replicas, partition0Replicas) - val leaderMetadataImage0 = imageFromTopics(leaderDelta0.apply()) - replicaManager.applyDelta(leaderDelta0, leaderMetadataImage0) - - val leaderDelta1 = createLeaderDelta(topicIds(topic), tp1, 0, partition1Replicas, partition1Replicas) - val leaderMetadataImage1 = imageFromTopics(leaderDelta1.apply()) - replicaManager.applyDelta(leaderDelta1, leaderMetadataImage1) + val topicIds = Map(tp0.topic -> topicId, tp1.topic -> topicId).asJava + val leaderEpoch = 0 + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(leaderEpoch) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true), + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp1.topic) + .setPartitionIndex(tp1.partition) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(leaderEpoch) + .setIsr(partition1Replicas) + .setPartitionEpoch(0) + .setReplicas(partition1Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) // Append a couple of messages. for (i <- 1 to 2) { @@ -1195,51 +1375,66 @@ class ReplicaManagerTest { } } + @Test + def testBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(): Unit = { + verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(new Properties, expectTruncation = false) + } + /** * If a partition becomes a follower and the leader is unchanged it should check for truncation * if the epoch has increased by more than one (which suggests it has missed an update). For * IBP version 2.7 onwards, we don't require this since we can truncate at any time based * on diverging epochs returned in fetch responses. - * This test assumes IBP >= 2.7 behavior, so `expectTruncation` is set to false and truncation is not expected. */ - @Test - def testBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(): Unit = { - val extraProps = new Properties + private def verifyBecomeFollowerWhenLeaderIsUnchangedButMissedLeaderUpdate(extraProps: Properties, + expectTruncation: Boolean): Unit = { + val topicPartition = 0 + val topicId = Uuid.randomUuid() val followerBrokerId = 0 val leaderBrokerId = 1 + val controllerId = 0 + val controllerEpoch = 0 var leaderEpoch = 1 val leaderEpochIncrement = 2 + val aliveBrokerIds = Seq[Integer](followerBrokerId, leaderBrokerId) val countDownLatch = new CountDownLatch(1) val offsetFromLeader = 5 + // Prepare the mocked components for the test val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time), - topicPartition.partition(), leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, - expectTruncation = false, localLogOffset = Optional.of(10), offsetFromLeader = offsetFromLeader, extraProps = extraProps, topicId = Optional.of(topicId)) + topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, + expectTruncation = expectTruncation, localLogOffset = Some(10), offsetFromLeader = offsetFromLeader, extraProps = extraProps, topicId = Some(topicId)) try { // Initialize partition state to follower, with leader = 1, leaderEpoch = 1 - val partition = replicaManager.createPartition(topicPartition) + val tp = new TopicPartition(topic, topicPartition) + val partition = replicaManager.createPartition(tp) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - val followerDelta = topicsCreateDelta(startId = followerBrokerId, isStartIdLeader = false, partitions = List(topicPartition.partition()), List.empty, topic, topicIds(topic), leaderEpoch) - replicaManager.applyDelta(followerDelta, imageFromTopics(followerDelta.apply())) - - // Verify log created and partition is hosted - val localLog = replicaManager.localLog(topicPartition) - assertTrue(localLog.isDefined, "Log should be created for follower after applyDelta") - val hostedPartition = replicaManager.getPartition(topicPartition) - assertTrue(hostedPartition.isInstanceOf[HostedPartition.Online]) + partition.makeFollower( + leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds), + offsetCheckpoints, + None) // Make local partition a follower - because epoch increased by more than 1, truncation should // trigger even though leader does not change leaderEpoch += leaderEpochIncrement - val epochJumpDelta = topicsCreateDelta(startId = followerBrokerId, isStartIdLeader = false, partitions = List(topicPartition.partition()), List.empty, topic, topicIds(topic), leaderEpoch) - replicaManager.applyDelta(epochJumpDelta, imageFromTopics(epochJumpDelta.apply())) - + val leaderAndIsrRequest0 = new LeaderAndIsrRequest.Builder( + controllerId, controllerEpoch, brokerEpoch, + Seq(leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds)).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(followerBrokerId, "host1", 0), + new Node(leaderBrokerId, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest0, + (_, followers) => assertEquals(followerBrokerId, followers.head.partitionId)) assertTrue(countDownLatch.await(1000L, TimeUnit.MILLISECONDS)) + // Truncation should have happened once + if (expectTruncation) { + verify(mockLogMgr).truncateTo(Map(tp -> offsetFromLeader), isFuture = false) + } - verify(mockLogMgr).finishedInitializingLog(ArgumentMatchers.eq(topicPartition), any()) + verify(mockLogMgr).finishedInitializingLog(ArgumentMatchers.eq(tp), any()) } finally { replicaManager.shutdown(checkpointHW = false) } @@ -1252,7 +1447,7 @@ class ReplicaManagerTest { val leaderBrokerId = 1 val leaderEpoch = 1 val leaderEpochIncrement = 2 - val aliveBrokerIds = Array(followerBrokerId, leaderBrokerId) + val aliveBrokerIds = Seq[Integer](followerBrokerId, leaderBrokerId) val countDownLatch = new CountDownLatch(1) // Prepare the mocked components for the test @@ -1266,8 +1461,8 @@ class ReplicaManagerTest { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) partition.createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - partition.makeLeader(partitionRegistration(leaderBrokerId, leaderEpoch, aliveBrokerIds, partitionEpoch, aliveBrokerIds), - isNew = false, + partition.makeLeader( + leaderAndIsrPartitionState(tp, leaderEpoch, leaderBrokerId, aliveBrokerIds), offsetCheckpoints, None) @@ -1286,6 +1481,7 @@ class ReplicaManagerTest { @Test def testPreferredReplicaAsFollower(): Unit = { val topicPartition = 0 + val topicId = Uuid.randomUuid() val followerBrokerId = 0 val leaderBrokerId = 1 val leaderEpoch = 1 @@ -1295,22 +1491,29 @@ class ReplicaManagerTest { // Prepare the mocked components for the test val (replicaManager, _) = prepareReplicaManagerAndLogManager(new MockTimer(time), topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, - leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Optional.of(topicId)) + leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Some(topicId)) try { + val brokerList = Seq[Integer](0, 1).asJava val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) // Make this replica the follower - val followerDelta = createFollowerDelta( - topicId = topicId, - partition = tp0, - followerId = 0, - leaderId = 1, - leaderEpoch = 1, - ) - replicaManager.applyDelta(followerDelta, imageFromTopics(followerDelta.apply())) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(1) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest, (_, _) => ()) val metadata: ClientMetadata = new DefaultClientMetadata("rack-a", "client-id", InetAddress.getByName("localhost"), KafkaPrincipal.ANONYMOUS, "default") @@ -1332,6 +1535,7 @@ class ReplicaManagerTest { @Test def testPreferredReplicaAsLeader(): Unit = { val topicPartition = 0 + val topicId = Uuid.randomUuid() val followerBrokerId = 0 val leaderBrokerId = 1 val leaderEpoch = 1 @@ -1341,7 +1545,7 @@ class ReplicaManagerTest { // Prepare the mocked components for the test val (replicaManager, _) = prepareReplicaManagerAndLogManager(new MockTimer(time), topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, - leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Optional.of(topicId)) + leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Some(topicId)) try { val brokerList = Seq[Integer](0, 1).asJava @@ -1349,19 +1553,21 @@ class ReplicaManagerTest { val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) - val partition = replicaManager.createPartition(tp0) - partition.createLogIfNotExists(isNew = false, isFutureReplica = false, - new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) // Make this replica the leader - val leaderDelta = createLeaderDelta( - topicId = topicId, - partition = tp0, - leaderId = 0, - replicas = brokerList, - isr = brokerList, - leaderEpoch = 1 - ) - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(1) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest, (_, _) => ()) val metadata = new DefaultClientMetadata("rack-a", "client-id", InetAddress.getByName("localhost"), KafkaPrincipal.ANONYMOUS, "default") @@ -1391,27 +1597,37 @@ class ReplicaManagerTest { val leaderNode = new Node(leaderBrokerId, "host1", 0, "rack-a") val followerNode = new Node(followerBrokerId, "host2", 1, "rack-b") val brokerList = Seq[Integer](leaderBrokerId, followerBrokerId).asJava + val topicId = Uuid.randomUuid() val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) when(replicaManager.metadataCache.getPartitionReplicaEndpoints( tp0, new ListenerName("default") - )).thenReturn(util.Map.of( - leaderBrokerId, leaderNode, - followerBrokerId, followerNode - )) + )).thenReturn(Map( + leaderBrokerId -> leaderNode, + followerBrokerId -> followerNode + ).toMap) // Make this replica the leader and remove follower from ISR. - val leaderDelta = createLeaderDelta( - topicId = topicId, - partition = tp0, - leaderId = leaderBrokerId, - replicas = brokerList, - isr = util.Arrays.asList(leaderBrokerId), - leaderEpoch = 1 - ) - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder( + 0, + 0, + brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(leaderBrokerId) + .setLeaderEpoch(1) + .setIsr(Seq[Integer](leaderBrokerId).asJava) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + Collections.singletonMap(topic, topicId), + Set(leaderNode, followerNode).asJava).build() + + replicaManager.becomeLeaderOrFollower(2, leaderAndIsrRequest, (_, _) => ()) appendRecords(replicaManager, tp0, TestUtils.singletonRecords(s"message".getBytes)).onFire { response => assertEquals(Errors.NONE, response.error) @@ -1439,7 +1655,7 @@ class ReplicaManagerTest { // PartitionView passed to ReplicaSelector should not contain the follower as it's not in the ISR val expectedReplicaViews = Set(new DefaultReplicaView(leaderNode, 1, 0)) - val partitionView = replicaManager.replicaSelectorPlugin.get.get + val partitionView = replicaManager.replicaSelectorOpt.get .asInstanceOf[MockReplicaSelector].getPartitionViewArgument assertTrue(partitionView.isDefined) @@ -1454,13 +1670,28 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), propsModifier = props => props.put(ReplicationConfigs.REPLICA_SELECTOR_CLASS_CONFIG, classOf[MockReplicaSelector].getName)) try { + val leaderBrokerId = 0 + val followerBrokerId = 1 + val brokerList = Seq[Integer](leaderBrokerId, followerBrokerId).asJava + val topicId = Uuid.randomUuid() val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) // Make this replica the follower - val followerDelta = createFollowerDelta(topicId, tp0, 0, 1, 1) - val followerMetadataImage = imageFromTopics(followerDelta.apply()) - replicaManager.applyDelta(followerDelta, followerMetadataImage) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(1) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest, (_, _) => ()) val metadata = new DefaultClientMetadata("rack-a", "client-id", InetAddress.getLocalHost, KafkaPrincipal.ANONYMOUS, "default") @@ -1473,7 +1704,7 @@ class ReplicaManagerTest { assertTrue(consumerResult.hasFired) // Expect not run the preferred read replica selection - assertEquals(0, replicaManager.replicaSelectorPlugin.get.get.asInstanceOf[MockReplicaSelector].getSelectionCount) + assertEquals(0, replicaManager.replicaSelectorOpt.get.asInstanceOf[MockReplicaSelector].getSelectionCount) // Only leader will compute preferred replica assertTrue(!consumerResult.assertFired.preferredReadReplica.isPresent) @@ -1485,29 +1716,41 @@ class ReplicaManagerTest { @Test def testFetchShouldReturnImmediatelyWhenPreferredReadReplicaIsDefined(): Unit = { - val localId = 0 val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), propsModifier = props => props.put(ReplicationConfigs.REPLICA_SELECTOR_CLASS_CONFIG, "org.apache.kafka.common.replica.RackAwareReplicaSelector")) try { val leaderBrokerId = 0 val followerBrokerId = 1 + val brokerList = Seq[Integer](leaderBrokerId, followerBrokerId).asJava + val topicId = Uuid.randomUuid() val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) when(replicaManager.metadataCache.getPartitionReplicaEndpoints( tp0, new ListenerName("default") - )).thenReturn(util.Map.of( - leaderBrokerId, new Node(leaderBrokerId, "host1", 9092, "rack-a"), - followerBrokerId, new Node(followerBrokerId, "host2", 9092, "rack-b") - )) + )).thenReturn(Map( + leaderBrokerId -> new Node(leaderBrokerId, "host1", 9092, "rack-a"), + followerBrokerId -> new Node(followerBrokerId, "host2", 9092, "rack-b") + ).toMap) // Make this replica the leader val leaderEpoch = 1 - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = leaderEpoch) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(leaderEpoch) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest, (_, _) => ()) // The leader must record the follower's fetch offset to make it eligible for follower fetch selection val followerFetchData = new PartitionData(topicId, 0L, 0L, Int.MaxValue, Optional.of(Int.box(leaderEpoch)), Optional.empty[Integer]) @@ -1552,17 +1795,29 @@ class ReplicaManagerTest { // Prepare the mocked components for the test val (replicaManager, _) = prepareReplicaManagerAndLogManager(timer, topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, - leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Optional.of(topicId)) + leaderBrokerId, countDownLatch, expectTruncation = true, topicId = Some(topicId)) try { + + val brokerList = Seq[Integer](0, 1).asJava + val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) - val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) // Make this replica the follower - val followerDelta = createFollowerDelta(topicId, tp0, 1, 0, 1) - val followerImage = imageFromTopics(followerDelta.apply()) - replicaManager.applyDelta(followerDelta, followerImage) + val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(1) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(false)).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, leaderAndIsrRequest2, (_, _) => ()) val simpleRecords = Seq(new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes)) val appendResult = appendRecords(replicaManager, tp0, @@ -1633,7 +1888,7 @@ class ReplicaManagerTest { topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, expectTruncation = true) try { - assertFalse(replicaManager.replicaSelectorPlugin.isDefined) + assertFalse(replicaManager.replicaSelectorOpt.isDefined) } finally { replicaManager.shutdown(checkpointHW = false) } @@ -1648,10 +1903,21 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) - - val followerDelta = createFollowerDelta(topicId, tp0, 0, 1) - val followerImage = imageFromTopics(followerDelta.apply()) - replicaManager.applyDelta(followerDelta, followerImage) + val partition0Replicas = Seq[Integer](0, 1).asJava + val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ()) // Fetch from follower, with non-empty ClientMetadata (FetchRequest v11+) val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "") @@ -1673,7 +1939,6 @@ class ReplicaManagerTest { @Test def testFetchRequestRateMetrics(): Unit = { - val localId = 0 val mockTimer = new MockTimer(time) val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer, aliveBrokerIds = Seq(0, 1)) @@ -1682,10 +1947,22 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + val partition0Replicas = Seq[Integer](0, 1).asJava - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = 1) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) + val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(1) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) def assertMetricCount(expected: Int): Unit = { assertEquals(expected, replicaManager.brokerTopicStats.allTopicsStats.totalFetchRequestRate.count) @@ -1719,10 +1996,22 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + val partition0Replicas = Seq[Integer](0, 1).asJava - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 0) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(1) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) val partitionData = new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0L, 0L, 100, Optional.empty()) @@ -1730,9 +2019,20 @@ class ReplicaManagerTest { assertFalse(fetchResult.hasFired) // Become a follower and ensure that the delayed fetch returns immediately - val followerDelta = createFollowerDelta(topicId, tp0, followerId = 0, leaderId = 1, leaderEpoch = 2) - val followerMetadataImage = imageFromTopics(followerDelta.apply()) - replicaManager.applyDelta(followerDelta, followerMetadataImage) + val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(2) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ()) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, fetchResult.assertFired.error) } finally { replicaManager.shutdown(checkpointHW = false) @@ -1751,9 +2051,20 @@ class ReplicaManagerTest { replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val partition0Replicas = Seq[Integer](0, 1).asJava - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 0, leaderEpoch = 1, replicas = partition0Replicas, isr = partition0Replicas) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(1) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "") val partitionData = new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0L, 0L, 100, @@ -1768,9 +2079,20 @@ class ReplicaManagerTest { assertFalse(fetchResult.hasFired) // Become a follower and ensure that the delayed fetch returns immediately - val followerDelta = createFollowerDelta(topicId, tp0, followerId = 0, leaderId = 1, leaderEpoch = 2) - val followerMetadataImage = imageFromTopics(followerDelta.apply()) - replicaManager.applyDelta(followerDelta, followerMetadataImage) + val becomeFollowerRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(2) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, becomeFollowerRequest, (_, _) => ()) assertEquals(Errors.FENCED_LEADER_EPOCH, fetchResult.assertFired.error) } finally { replicaManager.shutdown(checkpointHW = false) @@ -1779,7 +2101,6 @@ class ReplicaManagerTest { @Test def testFetchFromLeaderAlwaysAllowed(): Unit = { - val localId = 0 val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1)) try { @@ -1787,10 +2108,22 @@ class ReplicaManagerTest { val tidp0 = new TopicIdPartition(topicId, tp0) val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + val partition0Replicas = Seq[Integer](0, 1).asJava - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, topicName = topic, topicId = topicIds(topic), leaderEpoch = 1) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) + val becomeLeaderRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(1) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true)).asJava, + topicIds.asJava, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) val clientMetadata = new DefaultClientMetadata("", "", null, KafkaPrincipal.ANONYMOUS, "") var partitionData = new FetchRequest.PartitionData(Uuid.ZERO_UUID, 0L, 0L, 100, @@ -1815,16 +2148,16 @@ class ReplicaManagerTest { val producerEpoch = 0.toShort val sequence = 0 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) + val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0, tp1)) - val brokerList = Seq[Integer](0, 1).asJava try { - val leaderDelta0 = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) - val leaderDelta1 = createLeaderDelta(topicId, tp1, leaderId = 1, replicas = brokerList, isr = brokerList) - val image0 = imageFromTopics(leaderDelta0.apply()) - replicaManager.applyDelta(leaderDelta0, image0) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) - val image1 = imageFromTopics(leaderDelta1.apply()) - replicaManager.applyDelta(leaderDelta1, image1) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp1.topic), tp1, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) // If we supply no transactional ID and idempotent records, we do not verify. val idempotentRecords = MemoryRecords.withIdempotentRecords(Compression.NONE, producerId, producerEpoch, sequence, @@ -1844,7 +2177,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), any[AddPartitionsToTxnManager.AppendCallback](), any() ) @@ -1863,13 +2196,12 @@ class ReplicaManagerTest { val producerEpoch = 0.toShort val sequence = 6 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) - val brokerList = Seq[Integer](0, 1).asJava val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) // Append some transactional records. val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, @@ -1882,7 +2214,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback.capture(), any() ) @@ -1890,8 +2222,8 @@ class ReplicaManagerTest { assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) // Confirm we did not write to the log and instead returned error. - val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback.complete(util.Map.of(tp0, Errors.INVALID_TXN_STATE)) + val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue() + callback(Map(tp0 -> Errors.INVALID_TXN_STATE).toMap) assertEquals(Errors.INVALID_TXN_STATE, result.assertFired.error) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) @@ -1902,14 +2234,14 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback2.capture(), any() ) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) - val callback2: AddPartitionsToTxnManager.AppendCallback = appendCallback2.getValue - callback2.complete(util.Map.of()) + val callback2: AddPartitionsToTxnManager.AppendCallback = appendCallback2.getValue() + callback2(Map.empty[TopicPartition, Errors].toMap) assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId, producerEpoch)) } finally { @@ -1929,16 +2261,15 @@ class ReplicaManagerTest { val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort - val sequence = 0 + val sequence = 6 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) val scheduler = new MockScheduler(time) - val brokerList = Seq[Integer](0, 1).asJava val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0), scheduler = scheduler) try { - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) // Append some transactional records. val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, @@ -1946,13 +2277,13 @@ class ReplicaManagerTest { // We should add these partitions to the manager to verify. val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, origin = AppendOrigin.CLIENT, - transactionalId = transactionalId, transactionSupportedOperation = ADD_PARTITION) + transactionalId = transactionalId, transactionSupportedOperation = addPartition) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback.capture(), any() ) @@ -1960,8 +2291,8 @@ class ReplicaManagerTest { assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) // Confirm we did not write to the log and instead returned error. - var callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback.complete(util.Map.of(tp0, error)) + var callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue() + callback(Map(tp0 -> error).toMap) if (error != Errors.CONCURRENT_TRANSACTIONS) { // NOT_COORDINATOR is converted to NOT_ENOUGH_REPLICAS @@ -1971,19 +2302,19 @@ class ReplicaManagerTest { assertFalse(result.hasFired) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) - time.sleep(new AddPartitionsToTxnConfig(config).addPartitionsToTxnRetryBackoffMs + 1) + time.sleep(config.addPartitionsToTxnConfig.addPartitionsToTxnRetryBackoffMs + 1) scheduler.tick() verify(addPartitionsToTxnManager, times(2)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback.capture(), any() ) - callback = appendCallback.getValue - callback.complete(util.Map.of()) + callback = appendCallback.getValue() + callback(Map.empty[TopicPartition, Errors].toMap) assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId, producerEpoch)) } @@ -1997,17 +2328,16 @@ class ReplicaManagerTest { val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort - val sequence = 0 + val sequence = 6 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) - val brokerList = Seq[Integer](0, 1).asJava val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) - // Start with sequence 0 + // Start with sequence 6 val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("message".getBytes)) @@ -2018,7 +2348,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback.capture(), any() ) @@ -2026,12 +2356,12 @@ class ReplicaManagerTest { assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) // Confirm we did not write to the log and instead returned error. - val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback.complete(util.Map.of(tp0, Errors.INVALID_PRODUCER_ID_MAPPING)) + val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue() + callback(Map(tp0 -> Errors.INVALID_PRODUCER_ID_MAPPING).toMap) assertEquals(Errors.INVALID_PRODUCER_ID_MAPPING, result.assertFired.error) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) - // Try to append a higher sequence (1) after the first one failed with a retriable error. + // Try to append a higher sequence (7) after the first one failed with a retriable error. val transactionalRecords2 = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence + 1, new SimpleRecord("message".getBytes)) @@ -2041,15 +2371,15 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback2.capture(), any() ) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) // Verification should succeed, but we expect to fail with OutOfOrderSequence and for the VerificationGuard to remain. - val callback2: AddPartitionsToTxnManager.AppendCallback = appendCallback2.getValue - callback2.complete(util.Map.of()) + val callback2: AddPartitionsToTxnManager.AppendCallback = appendCallback2.getValue() + callback2(Map.empty[TopicPartition, Errors].toMap) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) assertEquals(Errors.OUT_OF_ORDER_SEQUENCE_NUMBER, result2.assertFired.error) } finally { @@ -2057,79 +2387,8 @@ class ReplicaManagerTest { } } - @Test - def testTransactionVerificationRejectsLowerProducerEpoch(): Unit = { - val tp0 = new TopicPartition(topic, 0) - val producerId = 24L - val producerEpoch = 5.toShort - val lowerProducerEpoch= 4.toShort - val sequence = 6 - val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) - val brokerList = Seq[Integer](0, 1).asJava - - val replicaManager = - setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) - - try { - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) - - // first append with epoch 5 - val transactionalRecords = MemoryRecords.withTransactionalRecords( - Compression.NONE, - producerId, - producerEpoch, - sequence, - new SimpleRecord("message".getBytes) - ) - - handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId) - - val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) - verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( - ArgumentMatchers.eq(transactionalId), - ArgumentMatchers.eq(producerId), - ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), - appendCallback.capture(), - any() - ) - - val verificationGuard = getVerificationGuard(replicaManager, tp0, producerId) - assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) - - // simulate successful verification - val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback.complete(util.Map.of()) - - assertEquals(VerificationGuard.SENTINEL, getVerificationGuard(replicaManager, tp0, producerId)) - assertTrue(replicaManager.localLog(tp0).get.hasOngoingTransaction(producerId, producerEpoch)) - - // append lower epoch 4 - val transactionalRecords2 = MemoryRecords.withTransactionalRecords( - Compression.NONE, - producerId, - lowerProducerEpoch, - sequence + 1, - new SimpleRecord("message".getBytes) - ) - - val result2 = handleProduceAppend(replicaManager, tp0, transactionalRecords2, transactionalId = transactionalId) - - // no extra call to the txn‑manager should have been made - verifyNoMoreInteractions(addPartitionsToTxnManager) - - // broker returns the fencing error - assertEquals(Errors.INVALID_PRODUCER_EPOCH, result2.assertFired.error) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - @Test def testTransactionVerificationGuardOnMultiplePartitions(): Unit = { - val localId = 0 val mockTimer = new MockTimer(time) val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) @@ -2138,11 +2397,14 @@ class ReplicaManagerTest { val sequence = 0 val replicaManager = setupReplicaManagerWithMockedPurgatories(mockTimer) - setupMetadataCacheWithTopicIds(topicIds, replicaManager.metadataCache) try { - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(0, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) + + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp1.topic), tp1, Seq(0, 1), new LeaderAndIsr(0, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord(s"message $sequence".getBytes)) @@ -2159,7 +2421,6 @@ class ReplicaManagerTest { @Test def testExceptionWhenUnverifiedTransactionHasMultipleProducerIds(): Unit = { - val localId = 1 val tp0 = new TopicPartition(topic, 0) val tp1 = new TopicPartition(topic, 1) val transactionalId = "txn1" @@ -2172,9 +2433,13 @@ class ReplicaManagerTest { val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0, tp1)) try { - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) + + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp1.topic), tp1, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) // Append some transactional records with different producer IDs val transactionalRecords = mutable.Map[TopicPartition, MemoryRecords]() @@ -2197,7 +2462,7 @@ class ReplicaManagerTest { val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort - val sequence = 0 + val sequence = 6 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) @@ -2233,9 +2498,8 @@ class ReplicaManagerTest { val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp), config = config) try { - val delta = topicsCreateDelta(0, isStartIdLeader = true, partitions = List(0), List.empty, topic, topicIds(topic)) - val leaderMetadataImage = imageFromTopics(delta.apply()) - replicaManager.applyDelta(delta, leaderMetadataImage) + val becomeLeaderRequest = makeLeaderAndIsrRequest(topicIds(tp.topic), tp, Seq(0, 1), new LeaderAndIsr(0, List(0, 1).map(Int.box).asJava)) + replicaManager.becomeLeaderOrFollower(1, becomeLeaderRequest, (_, _) => ()) val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord(s"message $sequence".getBytes)) @@ -2252,8 +2516,7 @@ class ReplicaManagerTest { val props = new Properties() props.put(TransactionLogConfig.TRANSACTION_PARTITION_VERIFICATION_ENABLE_CONFIG, "true") config.dynamicConfig.updateBrokerConfig(config.brokerId, props) - val transactionLogConfig = new TransactionLogConfig(config) - TestUtils.waitUntilTrue(() => transactionLogConfig.transactionPartitionVerificationEnable, "Config did not dynamically update.") + TestUtils.waitUntilTrue(() => config.transactionLogConfig.transactionPartitionVerificationEnable == true, "Config did not dynamically update.") // Try to append more records. We don't need to send a request since the transaction is already ongoing. val moreTransactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence + 1, @@ -2275,13 +2538,12 @@ class ReplicaManagerTest { val producerEpoch = 0.toShort val sequence = 6 val addPartitionsToTxnManager = mock(classOf[AddPartitionsToTxnManager]) - val brokerList = Seq[Integer](0, 1).asJava val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = brokerList, isr = brokerList) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) // Append some transactional records. val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, @@ -2294,7 +2556,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback.capture(), any() ) @@ -2306,12 +2568,11 @@ class ReplicaManagerTest { val props = new Properties() props.put(TransactionLogConfig.TRANSACTION_PARTITION_VERIFICATION_ENABLE_CONFIG, "false") config.dynamicConfig.updateBrokerConfig(config.brokerId, props) - val transactionLogConfig = new TransactionLogConfig(config) - TestUtils.waitUntilTrue(() => !transactionLogConfig.transactionPartitionVerificationEnable, "Config did not dynamically update.") + TestUtils.waitUntilTrue(() => config.transactionLogConfig.transactionPartitionVerificationEnable == false, "Config did not dynamically update.") // Confirm we did not write to the log and instead returned error. - val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback.complete(util.Map.of(tp0, Errors.INVALID_TXN_STATE)) + val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue() + callback(Map(tp0 -> Errors.INVALID_TXN_STATE).toMap) assertEquals(Errors.INVALID_TXN_STATE, result.assertFired.error) assertEquals(verificationGuard, getVerificationGuard(replicaManager, tp0, producerId)) @@ -2336,7 +2597,6 @@ class ReplicaManagerTest { ) ) def testVerificationErrorConversionsTV2(error: Errors): Unit = { - val localId = 1 val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort @@ -2345,28 +2605,29 @@ class ReplicaManagerTest { val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("message".getBytes)) // Start verification and return the coordinator related errors. val expectedMessage = s"Unable to verify the partition has been added to the transaction. Underlying error: ${error.toString}" - val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId, transactionSupportedOperation = ADD_PARTITION) + val result = handleProduceAppend(replicaManager, tp0, transactionalRecords, transactionalId = transactionalId, transactionSupportedOperation = addPartition) val appendCallback = ArgumentCaptor.forClass(classOf[AddPartitionsToTxnManager.AppendCallback]) verify(addPartitionsToTxnManager, times(1)).addOrVerifyTransaction( ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback.capture(), any() ) // Confirm we did not write to the log and instead returned the converted error with the correct error message. - val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback.complete(util.Map.of(tp0, error)) + val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue() + callback(Map(tp0 -> error).toMap) assertEquals(Errors.NOT_ENOUGH_REPLICAS, result.assertFired.error) assertEquals(expectedMessage, result.assertFired.errorMessage) } finally { @@ -2386,7 +2647,6 @@ class ReplicaManagerTest { ) ) def testVerificationErrorConversionsTV1(error: Errors): Unit = { - val localId = 1 val tp0 = new TopicPartition(topic, 0) val producerId = 24L val producerEpoch = 0.toShort @@ -2395,8 +2655,9 @@ class ReplicaManagerTest { val replicaManager = setUpReplicaManagerWithMockedAddPartitionsToTxnManager(addPartitionsToTxnManager, List(tp0)) try { - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + replicaManager.becomeLeaderOrFollower(1, + makeLeaderAndIsrRequest(topicIds(tp0.topic), tp0, Seq(0, 1), new LeaderAndIsr(1, List(0, 1).map(Int.box).asJava)), + (_, _) => ()) val transactionalRecords = MemoryRecords.withTransactionalRecords(Compression.NONE, producerId, producerEpoch, sequence, new SimpleRecord("message".getBytes)) @@ -2409,14 +2670,14 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback.capture(), any() ) // Confirm we did not write to the log and instead returned the converted error with the correct error message. - val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue - callback.complete(util.Map.of(tp0, error)) + val callback: AddPartitionsToTxnManager.AppendCallback = appendCallback.getValue() + callback(Map(tp0 -> error).toMap) assertEquals(Errors.NOT_ENOUGH_REPLICAS, result.assertFired.error) assertEquals(expectedMessage, result.assertFired.errorMessage) } finally { @@ -2440,7 +2701,7 @@ class ReplicaManagerTest { ArgumentMatchers.eq(transactionalId), ArgumentMatchers.eq(producerId), ArgumentMatchers.eq(producerEpoch), - ArgumentMatchers.eq(util.List.of(tp0)), + ArgumentMatchers.eq(Seq(tp0)), appendCallback.capture(), any() ) @@ -2452,11 +2713,11 @@ class ReplicaManagerTest { private def sendProducerAppend( replicaManager: ReplicaManager, - topicPartition: TopicIdPartition, + topicPartition: TopicPartition, numOfRecords: Int ): AtomicReference[PartitionResponse] = { val produceResult = new AtomicReference[PartitionResponse]() - def callback(response: Map[TopicIdPartition, PartitionResponse]): Unit = { + def callback(response: Map[TopicPartition, PartitionResponse]): Unit = { produceResult.set(response(topicPartition)) } @@ -2479,6 +2740,11 @@ class ReplicaManagerTest { produceResult } + /** + * This method assumes that the test using created ReplicaManager calls + * ReplicaManager.becomeLeaderOrFollower() once with LeaderAndIsrRequest containing + * 'leaderEpochInLeaderAndIsr' leader epoch for partition 'topicPartition'. + */ private def prepareReplicaManagerAndLogManager(timer: MockTimer, topicPartition: Int, leaderEpochInLeaderAndIsr: Int, @@ -2486,17 +2752,17 @@ class ReplicaManagerTest { leaderBrokerId: Int, countDownLatch: CountDownLatch, expectTruncation: Boolean, - localLogOffset: Optional[Long] = Optional.empty, + localLogOffset: Option[Long] = None, offsetFromLeader: Long = 5, leaderEpochFromLeader: Int = 3, extraProps: Properties = new Properties(), - topicId: Optional[Uuid] = Optional.empty): (ReplicaManager, LogManager) = { + topicId: Option[Uuid] = None): (ReplicaManager, LogManager) = { val props = TestUtils.createBrokerConfig(0) props.put("log.dir", TestUtils.tempRelativeDir("data").getAbsolutePath) props.asScala ++= extraProps.asScala val config = KafkaConfig.fromProps(props) val logConfig = new LogConfig(new Properties) - val logDir = new File(new File(config.logDirs.get(0)), s"$topic-$topicPartition") + val logDir = new File(new File(config.logDirs.head), s"$topic-$topicPartition") Files.createDirectories(logDir.toPath) val mockScheduler = new MockScheduler(time) val mockBrokerTopicStats = new BrokerTopicStats @@ -2506,7 +2772,7 @@ class ReplicaManagerTest { val maxProducerIdExpirationMs = 30000 val segments = new LogSegments(tp) val leaderEpochCache = UnifiedLog.createLeaderEpochCache( - logDir, tp, mockLogDirFailureChannel, Optional.empty, time.scheduler) + logDir, tp, mockLogDirFailureChannel, None, time.scheduler) val producerStateManager = new ProducerStateManager(tp, logDir, maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, true), time) val offsets = new LogLoader( @@ -2528,55 +2794,52 @@ class ReplicaManagerTest { val localLog = new LocalLog(logDir, logConfig, segments, offsets.recoveryPoint, offsets.nextOffsetMetadata, mockScheduler, time, tp, mockLogDirFailureChannel) val mockLog = new UnifiedLog( - offsets.logStartOffset, - localLog, - mockBrokerTopicStats, - 30000, - leaderEpochCache, - producerStateManager, - Optional.empty, - false, - LogOffsetsListener.NO_OP_OFFSETS_LISTENER) { + logStartOffset = offsets.logStartOffset, + localLog = localLog, + brokerTopicStats = mockBrokerTopicStats, + producerIdExpirationCheckIntervalMs = 30000, + leaderEpochCache = leaderEpochCache, + producerStateManager = producerStateManager, + _topicId = topicId, + keepPartitionMetadataFile = true) { - override def endOffsetForEpoch(leaderEpoch: Int): Optional[OffsetAndEpoch] = { + override def endOffsetForEpoch(leaderEpoch: Int): Option[OffsetAndEpoch] = { assertEquals(leaderEpoch, leaderEpochFromLeader) - localLogOffset.toScala.map { logOffset => - Optional.of(new OffsetAndEpoch(logOffset, leaderEpochFromLeader)) + localLogOffset.map { logOffset => + Some(new OffsetAndEpoch(logOffset, leaderEpochFromLeader)) }.getOrElse(super.endOffsetForEpoch(leaderEpoch)) } - override def latestEpoch: Optional[Integer] = Optional.of(leaderEpochFromLeader) + override def latestEpoch: Option[Int] = Some(leaderEpochFromLeader) - override def logEndOffsetMetadata: LogOffsetMetadata = { - localLogOffset.toScala.map { new LogOffsetMetadata(_) }.getOrElse(super.logEndOffsetMetadata) - } + override def logEndOffsetMetadata: LogOffsetMetadata = + localLogOffset.map(new LogOffsetMetadata(_)).getOrElse(super.logEndOffsetMetadata) - override def logEndOffset: Long = localLogOffset.orElse(super.logEndOffset) + override def logEndOffset: Long = localLogOffset.getOrElse(super.logEndOffset) } // Expect to call LogManager.truncateTo exactly once val topicPartitionObj = new TopicPartition(topic, topicPartition) val mockLogMgr: LogManager = mock(classOf[LogManager]) - when(mockLogMgr.liveLogDirs).thenReturn(config.logDirs.asScala.map(new File(_).getAbsoluteFile)) + when(mockLogMgr.liveLogDirs).thenReturn(config.logDirs.map(new File(_).getAbsoluteFile)) when(mockLogMgr.getOrCreateLog(ArgumentMatchers.eq(topicPartitionObj), ArgumentMatchers.eq(false), ArgumentMatchers.eq(false), any(), any())).thenReturn(mockLog) when(mockLogMgr.getLog(topicPartitionObj, isFuture = false)).thenReturn(Some(mockLog)) when(mockLogMgr.getLog(topicPartitionObj, isFuture = true)).thenReturn(None) - val allLogs = new ConcurrentHashMap[TopicPartition, UnifiedLog]() + val allLogs = new Pool[TopicPartition, UnifiedLog]() allLogs.put(topicPartitionObj, mockLog) - when(mockLogMgr.allLogs).thenReturn(allLogs.values.asScala) + when(mockLogMgr.allLogs).thenReturn(allLogs.values) when(mockLogMgr.isLogDirOnline(anyString)).thenReturn(true) - when(mockLogMgr.directoryId(anyString)).thenReturn(None) val aliveBrokerIds = Seq[Integer](followerBrokerId, leaderBrokerId) val aliveBrokers = aliveBrokerIds.map(brokerId => new Node(brokerId, s"host$brokerId", brokerId)) + val metadataCache: MetadataCache = mock(classOf[MetadataCache]) mockGetAliveBrokerFunctions(metadataCache, aliveBrokers) when(metadataCache.getPartitionReplicaEndpoints( any[TopicPartition], any[ListenerName])). - thenReturn(util.Map.of(leaderBrokerId, new Node(leaderBrokerId, "host1", 9092, "rack-a"), - followerBrokerId, new Node(followerBrokerId, "host2", 9092, "rack-b"))) + thenReturn(Map(leaderBrokerId -> new Node(leaderBrokerId, "host1", 9092, "rack-a"), + followerBrokerId -> new Node(followerBrokerId, "host2", 9092, "rack-b")).toMap) when(metadataCache.metadataVersion()).thenReturn(MetadataVersion.MINIMUM_VERSION) - when(metadataCache.getAliveBrokerEpoch(leaderBrokerId)).thenReturn(util.Optional.of(brokerEpoch)) val mockProducePurgatory = new DelayedOperationPurgatory[DelayedProduce]( "Produce", timer, 0, false) val mockFetchPurgatory = new DelayedOperationPurgatory[DelayedFetch]( @@ -2614,13 +2877,15 @@ class ReplicaManagerTest { delayedDeleteRecordsPurgatoryParam = Some(mockDeleteRecordsPurgatory), delayedRemoteFetchPurgatoryParam = Some(mockRemoteFetchPurgatory), delayedRemoteListOffsetsPurgatoryParam = Some(mockRemoteListOffsetsPurgatory), - delayedShareFetchPurgatoryParam = Some(mockDelayedShareFetchPurgatory)) { + delayedShareFetchPurgatoryParam = Some(mockDelayedShareFetchPurgatory), + threadNamePrefix = Option(this.getClass.getName)) { override protected def createReplicaFetcherManager(metrics: Metrics, time: Time, + threadNamePrefix: Option[String], replicationQuotaManager: ReplicationQuotaManager): ReplicaFetcherManager = { val rm = this - new ReplicaFetcherManager(this.config, rm, metrics, time, replicationQuotaManager, () => this.metadataCache.metadataVersion(), () => 1) { + new ReplicaFetcherManager(this.config, rm, metrics, time, threadNamePrefix, replicationQuotaManager, () => this.metadataCache.metadataVersion(), () => 1) { override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaFetcherThread = { val logContext = new LogContext(s"[ReplicaFetcher replicaId=${rm.config.brokerId}, leaderId=${sourceBroker.id}, " + @@ -2629,12 +2894,12 @@ class ReplicaManagerTest { val leader = new RemoteLeaderEndPoint(logContext.logPrefix, blockingSend, fetchSessionHandler, rm.config, rm, quotaManager.follower, () => MetadataVersion.MINIMUM_VERSION, () => 1) new ReplicaFetcherThread(s"ReplicaFetcherThread-$fetcherId", leader, rm.config, failedPartitions, rm, - quotaManager.follower, logContext.logPrefix) { + quotaManager.follower, logContext.logPrefix, () => MetadataVersion.MINIMUM_VERSION) { override def doWork(): Unit = { // In case the thread starts before the partition is added by AbstractFetcherManager, // add it here (it's a no-op if already added) val initialOffset = InitialFetchState( - topicId = topicId.toScala, + topicId = topicId, leader = new BrokerEndPoint(0, "localhost", 9092), initOffset = 0L, currentLeaderEpoch = leaderEpochInLeaderAndIsr) addPartitions(Map(new TopicPartition(topic, topicPartition) -> initialOffset)) @@ -2653,21 +2918,21 @@ class ReplicaManagerTest { (replicaManager, mockLogMgr) } - private def partitionRegistration(leader: Int, - leaderEpoch: Int, - isr: Array[Int], - partitionEpoch: Int, - replicas: Array[Int]): PartitionRegistration = { - new PartitionRegistration.Builder() - .setLeader(leader) - .setLeaderRecoveryState(LeaderRecoveryState.RECOVERED) + private def leaderAndIsrPartitionState(topicPartition: TopicPartition, + leaderEpoch: Int, + leaderBrokerId: Int, + aliveBrokerIds: Seq[Integer], + isNew: Boolean = false): LeaderAndIsrRequest.PartitionState = { + new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(topicPartition.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(leaderBrokerId) .setLeaderEpoch(leaderEpoch) - .setIsr(isr) - .setPartitionEpoch(partitionEpoch) - .setReplicas(replicas) - .setDirectories(DirectoryId.unassignedArray(replicas.length)) - .build() - + .setIsr(aliveBrokerIds.asJava) + .setPartitionEpoch(zkVersion) + .setReplicas(aliveBrokerIds.asJava) + .setIsNew(isNew) } private class CallbackResult[T] { @@ -2701,9 +2966,8 @@ class ReplicaManagerTest { origin: AppendOrigin = AppendOrigin.CLIENT, requiredAcks: Short = -1): CallbackResult[PartitionResponse] = { val result = new CallbackResult[PartitionResponse]() - val topicIdPartition = new TopicIdPartition(topicId, partition) - def appendCallback(responses: Map[TopicIdPartition, PartitionResponse]): Unit = { - val response = responses.get(topicIdPartition) + def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { + val response = responses.get(partition) assertTrue(response.isDefined) result.fire(response.get) } @@ -2713,7 +2977,7 @@ class ReplicaManagerTest { requiredAcks = requiredAcks, internalTopicsAllowed = false, origin = origin, - entriesPerPartition = Map(new TopicIdPartition(topicId, partition) -> records), + entriesPerPartition = Map(partition -> records), responseCallback = appendCallback, ) @@ -2723,11 +2987,12 @@ class ReplicaManagerTest { private def handleProduceAppendToMultipleTopics(replicaManager: ReplicaManager, entriesToAppend: Map[TopicPartition, MemoryRecords], transactionalId: String, + origin: AppendOrigin = AppendOrigin.CLIENT, requiredAcks: Short = -1, - transactionSupportedOperation: TransactionSupportedOperation = GENERIC_ERROR_SUPPORTED - ): CallbackResult[Map[TopicIdPartition, PartitionResponse]] = { - val result = new CallbackResult[Map[TopicIdPartition, PartitionResponse]]() - def appendCallback(responses: Map[TopicIdPartition, PartitionResponse]): Unit = { + transactionSupportedOperation: TransactionSupportedOperation = genericErrorSupported + ): CallbackResult[Map[TopicPartition, PartitionResponse]] = { + val result = new CallbackResult[Map[TopicPartition, PartitionResponse]]() + def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { responses.foreach( response => assertTrue(responses.get(response._1).isDefined)) result.fire(responses) } @@ -2737,7 +3002,7 @@ class ReplicaManagerTest { requiredAcks = requiredAcks, internalTopicsAllowed = false, transactionalId = transactionalId, - entriesPerPartition = entriesToAppend.map { case(tp, memoryRecords) => replicaManager.topicIdPartition(tp) -> memoryRecords }, + entriesPerPartition = entriesToAppend, responseCallback = appendCallback, transactionSupportedOperation = transactionSupportedOperation ) @@ -2751,13 +3016,12 @@ class ReplicaManagerTest { origin: AppendOrigin = AppendOrigin.CLIENT, requiredAcks: Short = -1, transactionalId: String, - transactionSupportedOperation: TransactionSupportedOperation = GENERIC_ERROR_SUPPORTED + transactionSupportedOperation: TransactionSupportedOperation = genericErrorSupported ): CallbackResult[PartitionResponse] = { val result = new CallbackResult[PartitionResponse]() - val topicIdPartition = new TopicIdPartition(topicIds.get(partition.topic()).getOrElse(Uuid.ZERO_UUID), partition) - def appendCallback(responses: Map[TopicIdPartition, PartitionResponse]): Unit = { - val response = responses.get(topicIdPartition) + def appendCallback(responses: Map[TopicPartition, PartitionResponse]): Unit = { + val response = responses.get(partition) assertTrue(response.isDefined) result.fire(response.get) } @@ -2768,9 +3032,7 @@ class ReplicaManagerTest { requiredAcks = requiredAcks, internalTopicsAllowed = false, transactionalId = transactionalId, - entriesPerPartition = entriesPerPartition.map { - case (topicPartition, records) => replicaManager.topicIdPartition(topicPartition) -> records - }, + entriesPerPartition = entriesPerPartition, responseCallback = appendCallback, transactionSupportedOperation = transactionSupportedOperation ) @@ -2784,7 +3046,7 @@ class ReplicaManagerTest { producerId: Long, producerEpoch: Short, baseSequence: Int = 0, - transactionSupportedOperation: TransactionSupportedOperation = GENERIC_ERROR_SUPPORTED + transactionSupportedOperation: TransactionSupportedOperation = genericErrorSupported ): CallbackResult[Either[Errors, VerificationGuard]] = { val result = new CallbackResult[Either[Errors, VerificationGuard]]() def postVerificationCallback(errorAndGuard: (Errors, VerificationGuard)): Unit = { @@ -2903,6 +3165,7 @@ class ReplicaManagerTest { clientMetadata: Option[ClientMetadata] = None ): Unit = { val params = new FetchParams( + requestVersion, replicaId, 1, maxWaitMs, @@ -2930,7 +3193,9 @@ class ReplicaManagerTest { transactionalTopicPartitions: List[TopicPartition], config: KafkaConfig = config, scheduler: Scheduler = new MockScheduler(time)): ReplicaManager = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) + val metadataCache = mock(classOf[MetadataCache]) + val replicaManager = new ReplicaManager( metrics = metrics, config = config, @@ -2957,6 +3222,7 @@ class ReplicaManagerTest { propsModifier: Properties => Unit = _ => {}, mockReplicaFetcherManager: Option[ReplicaFetcherManager] = None, mockReplicaAlterLogDirsManager: Option[ReplicaAlterLogDirsManager] = None, + isShuttingDown: AtomicBoolean = new AtomicBoolean(false), enableRemoteStorage: Boolean = false, shouldMockLog: Boolean = false, remoteLogManager: Option[RemoteLogManager] = None, @@ -2977,11 +3243,10 @@ class ReplicaManagerTest { if (enableRemoteStorage && defaultTopicRemoteLogStorageEnable) { logProps.put(TopicConfig.REMOTE_LOG_STORAGE_ENABLE_CONFIG, "true") } - val logConfig = new LogConfig(logProps) - val mockLogFn = (topicPartition: TopicPartition, topicId: Option[Uuid]) => setupMockLog(path1, logConfig, enableRemoteStorage, topicPartition, topicId) + val mockLog = setupMockLog(path1) if (setupLogDirMetaProperties) { // add meta.properties file in each dir - config.logDirs.stream().forEach(dir => { + config.logDirs.foreach(dir => { val metaProps = new MetaProperties.Builder(). setVersion(MetaPropertiesVersion.V0). setClusterId("clusterId"). @@ -2992,15 +3257,20 @@ class ReplicaManagerTest { new File(new File(dir), MetaPropertiesEnsemble.META_PROPERTIES_NAME).getAbsolutePath, false) }) } - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), logConfig, logFn = if (shouldMockLog) Some(mockLogFn) else None, remoteStorageSystemEnable = enableRemoteStorage) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_)), new LogConfig(logProps), log = if (shouldMockLog) Some(mockLog) else None, remoteStorageSystemEnable = enableRemoteStorage) + val logConfig = new LogConfig(logProps) + when(mockLog.config).thenReturn(logConfig) + when(mockLog.remoteLogEnabled()).thenReturn(enableRemoteStorage) + when(mockLog.remoteStorageSystemEnable).thenReturn(enableRemoteStorage) val aliveBrokers = aliveBrokerIds.map(brokerId => new Node(brokerId, s"host$brokerId", brokerId)) brokerTopicStats = new BrokerTopicStats(KafkaConfig.fromProps(props).remoteLogManagerConfig.isRemoteStorageSystemEnabled) val metadataCache: MetadataCache = mock(classOf[MetadataCache]) + when(metadataCache.topicIdInfo()).thenReturn((topicIds.asJava, topicNames.asJava)) + when(metadataCache.topicNamesToIds()).thenReturn(topicIds.asJava) when(metadataCache.topicIdsToNames()).thenReturn(topicNames.asJava) when(metadataCache.metadataVersion()).thenReturn(MetadataVersion.MINIMUM_VERSION) mockGetAliveBrokerFunctions(metadataCache, aliveBrokers) - when(metadataCache.getAliveBrokerEpoch(brokerId+1)).thenReturn(util.Optional.of(brokerEpoch)) val mockProducePurgatory = new DelayedOperationPurgatory[DelayedProduce]( "Produce", timer, 0, false) val mockFetchPurgatory = new DelayedOperationPurgatory[DelayedFetch]( @@ -3019,9 +3289,9 @@ class ReplicaManagerTest { if (remoteFetchQuotaExceeded.isDefined) { assertFalse(remoteLogManager.isDefined) if (remoteFetchQuotaExceeded.get) { - when(mockRemoteLogManager.getFetchThrottleTimeMs).thenReturn(quotaExceededThrottleTime) + when(mockRemoteLogManager.getFetchThrottleTimeMs()).thenReturn(quotaExceededThrottleTime) } else { - when(mockRemoteLogManager.getFetchThrottleTimeMs).thenReturn(quotaAvailableThrottleTime) + when(mockRemoteLogManager.getFetchThrottleTimeMs()).thenReturn(quotaAvailableThrottleTime) } } @@ -3039,12 +3309,14 @@ class ReplicaManagerTest { logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager, brokerTopicStats = brokerTopicStats, + isShuttingDown = isShuttingDown, delayedProducePurgatoryParam = Some(mockProducePurgatory), delayedFetchPurgatoryParam = Some(mockFetchPurgatory), delayedDeleteRecordsPurgatoryParam = Some(mockDeleteRecordsPurgatory), delayedRemoteFetchPurgatoryParam = Some(mockDelayedRemoteFetchPurgatory), delayedRemoteListOffsetsPurgatoryParam = Some(mockDelayedRemoteListOffsetsPurgatory), delayedShareFetchPurgatoryParam = Some(mockDelayedShareFetchPurgatory), + threadNamePrefix = Option(this.getClass.getName), addPartitionsToTxnManager = Some(addPartitionsToTxnManager), directoryEventHandler = directoryEventHandler, remoteLogManager = if (enableRemoteStorage) { @@ -3057,6 +3329,7 @@ class ReplicaManagerTest { override protected def createReplicaFetcherManager( metrics: Metrics, time: Time, + threadNamePrefix: Option[String], quotaManager: ReplicationQuotaManager ): ReplicaFetcherManager = { mockReplicaFetcherManager.getOrElse { @@ -3064,25 +3337,27 @@ class ReplicaManagerTest { super.createReplicaFetcherManager( metrics, time, + threadNamePrefix, quotaManager ) val config = this.config val metadataCache = this.metadataCache - new ReplicaFetcherManager(config, this, metrics, time, quotaManager, () => metadataCache.metadataVersion(), () => 1) { + new ReplicaFetcherManager(config, this, metrics, time, threadNamePrefix, quotaManager, () => metadataCache.metadataVersion(), () => 1) { override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): ReplicaFetcherThread = { - val threadName = s"ReplicaFetcherThread-$fetcherId-${sourceBroker.id}" + val prefix = threadNamePrefix.map(tp => s"$tp:").getOrElse("") + val threadName = s"${prefix}ReplicaFetcherThread-$fetcherId-${sourceBroker.id}" val tp = new TopicPartition(topic, 0) val leader = new MockLeaderEndPoint() { - override def fetch(fetchRequest: FetchRequest.Builder): java.util.Map[TopicPartition, FetchResponseData.PartitionData] = { - Map(tp -> new FetchResponseData.PartitionData().setErrorCode(Errors.OFFSET_MOVED_TO_TIERED_STORAGE.code)) - }.asJava + override def fetch(fetchRequest: FetchRequest.Builder): Map[TopicPartition, FetchData] = { + Map(tp -> new FetchData().setErrorCode(Errors.OFFSET_MOVED_TO_TIERED_STORAGE.code)) + } } leader.setLeaderState(tp, PartitionState(leaderEpoch = 0)) - leader.setReplicaPartitionStateCallback(_ => PartitionState(leaderEpoch = 0)) + leader.setReplicaPartitionStateCallback(tp => PartitionState(leaderEpoch = 0)) val fetcher = new ReplicaFetcherThread(threadName, leader, config, failedPartitions, replicaManager, - quotaManager, "") + quotaManager, "", () => MetadataVersion.MINIMUM_VERSION) val initialFetchState = InitialFetchState( topicId = Some(Uuid.randomUuid()), @@ -3099,6 +3374,7 @@ class ReplicaManagerTest { super.createReplicaFetcherManager( metrics, time, + threadNamePrefix, quotaManager ) } @@ -3121,8 +3397,11 @@ class ReplicaManagerTest { @Test def testOldLeaderLosesMetricsWhenReassignPartitions(): Unit = { + val controllerEpoch = 0 val leaderEpoch = 0 val leaderEpochIncrement = 1 + val correlationId = 0 + val controllerId = 0 val mockTopicStats1: BrokerTopicStats = mock(classOf[BrokerTopicStats]) val (rm0, rm1) = prepareDifferentReplicaManagers(mock(classOf[BrokerTopicStats]), mockTopicStats1) @@ -3135,34 +3414,64 @@ class ReplicaManagerTest { val partition1Replicas = Seq[Integer](1, 0).asJava val topicIds = Map(tp0.topic -> Uuid.randomUuid(), tp1.topic -> Uuid.randomUuid()).asJava - val delta1 = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(0), partition0Replicas, partition0Replicas, leaderEpoch) - delta1.replay(new PartitionRecord() - .setPartitionId(tp1.partition) - .setTopicId(topicIds.get(topic)) - .setIsr(partition1Replicas) - .setReplicas(partition1Replicas) - .setLeader(partition1Replicas.get(0)) - .setLeaderEpoch(leaderEpoch) - .setPartitionEpoch(0) - ) - val leaderMetadataImage1 = imageFromTopics(delta1.apply()) - rm0.applyDelta(delta1, leaderMetadataImage1) - rm1.applyDelta(delta1, leaderMetadataImage1) + val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(controllerId, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(0) + .setLeaderEpoch(leaderEpoch) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true), + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp1.topic) + .setPartitionIndex(tp1.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(1) + .setLeaderEpoch(leaderEpoch) + .setIsr(partition1Replicas) + .setPartitionEpoch(0) + .setReplicas(partition1Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build() + + rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ()) + rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ()) // make broker 0 the leader of partition 1 so broker 1 loses its leadership position - val delta2 = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(0), partition0Replicas, partition0Replicas, leaderEpoch + leaderEpochIncrement) - delta2.replay(new PartitionRecord() - .setPartitionId(tp1.partition) - .setTopicId(topicIds.get(topic)) - .setIsr(partition1Replicas) - .setReplicas(partition1Replicas) - .setLeader(partition1Replicas.get(1)) - .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) - .setPartitionEpoch(0) - ) - val leaderMetadataImage2 = imageFromTopics(delta2.apply()) - rm0.applyDelta(delta2, leaderMetadataImage2) - rm1.applyDelta(delta2, leaderMetadataImage2) + val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder( controllerId, controllerEpoch, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(0) + .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true), + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp1.topic) + .setPartitionIndex(tp1.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(0) + .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) + .setIsr(partition1Replicas) + .setPartitionEpoch(0) + .setReplicas(partition1Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build() + + rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ()) + rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ()) } finally { Utils.tryAll(util.Arrays.asList[Callable[Void]]( () => { @@ -3182,8 +3491,11 @@ class ReplicaManagerTest { @Test def testOldFollowerLosesMetricsWhenReassignPartitions(): Unit = { + val controllerEpoch = 0 val leaderEpoch = 0 val leaderEpochIncrement = 1 + val correlationId = 0 + val controllerId = 0 val mockTopicStats1: BrokerTopicStats = mock(classOf[BrokerTopicStats]) val (rm0, rm1) = prepareDifferentReplicaManagers(mock(classOf[BrokerTopicStats]), mockTopicStats1) @@ -3196,34 +3508,65 @@ class ReplicaManagerTest { val partition1Replicas = Seq[Integer](1, 0).asJava val topicIds = Map(tp0.topic -> Uuid.randomUuid(), tp1.topic -> Uuid.randomUuid()).asJava - val delta = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(0), partition0Replicas, partition0Replicas, leaderEpoch) - delta.replay(new PartitionRecord() - .setPartitionId(tp1.partition) - .setTopicId(topicIds.get(topic)) - .setIsr(partition1Replicas) - .setReplicas(partition1Replicas) - .setLeader(partition1Replicas.get(0)) - .setLeaderEpoch(leaderEpoch) - .setPartitionEpoch(0) - ) - val leaderMetadataImage = imageFromTopics(delta.apply()) - rm0.applyDelta(delta, leaderMetadataImage) - rm1.applyDelta(delta, leaderMetadataImage) + val leaderAndIsrRequest1 = new LeaderAndIsrRequest.Builder(controllerId, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(1) + .setLeaderEpoch(leaderEpoch) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true), + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp1.topic) + .setPartitionIndex(tp1.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(1) + .setLeaderEpoch(leaderEpoch) + .setIsr(partition1Replicas) + .setPartitionEpoch(0) + .setReplicas(partition1Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build() + + rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ()) + rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest1, (_, _) => ()) // make broker 0 the leader of partition 1 so broker 1 loses its leadership position - val delta2 = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(1), partition0Replicas, partition0Replicas, leaderEpoch + leaderEpochIncrement) - delta2.replay(new PartitionRecord() - .setPartitionId(tp1.partition) - .setTopicId(topicIds.get(topic)) - .setIsr(partition1Replicas) - .setReplicas(partition1Replicas) - .setLeader(partition1Replicas.get(1)) - .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) - .setPartitionEpoch(0) - ) - val leaderMetadataImage2 = imageFromTopics(delta2.apply()) - rm0.applyDelta(delta2, leaderMetadataImage2) - rm1.applyDelta(delta2, leaderMetadataImage2) + val leaderAndIsrRequest2 = new LeaderAndIsrRequest.Builder(controllerId, + controllerEpoch, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(0) + .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true), + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp1.topic) + .setPartitionIndex(tp1.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(0) + .setLeaderEpoch(leaderEpoch + leaderEpochIncrement) + .setIsr(partition1Replicas) + .setPartitionEpoch(0) + .setReplicas(partition1Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host0", 0), new Node(1, "host1", 1)).asJava).build() + + rm0.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ()) + rm1.becomeLeaderOrFollower(correlationId, leaderAndIsrRequest2, (_, _) => ()) } finally { Utils.tryAll(util.Arrays.asList[Callable[Void]]( () => { @@ -3239,6 +3582,7 @@ class ReplicaManagerTest { // verify that broker 1 did remove its metrics when no longer being the leader of partition 1 verify(mockTopicStats1).removeOldLeaderMetrics(topic) + verify(mockTopicStats1).removeOldFollowerMetrics(topic) } private def prepareDifferentReplicaManagers(brokerTopicStats1: BrokerTopicStats, @@ -3252,8 +3596,8 @@ class ReplicaManagerTest { val config0 = KafkaConfig.fromProps(props0) val config1 = KafkaConfig.fromProps(props1) - val mockLogMgr0 = TestUtils.createLogManager(config0.logDirs.asScala.map(new File(_))) - val mockLogMgr1 = TestUtils.createLogManager(config1.logDirs.asScala.map(new File(_))) + val mockLogMgr0 = TestUtils.createLogManager(config0.logDirs.map(new File(_))) + val mockLogMgr1 = TestUtils.createLogManager(config1.logDirs.map(new File(_))) val metadataCache0: MetadataCache = mock(classOf[MetadataCache]) val metadataCache1: MetadataCache = mock(classOf[MetadataCache]) @@ -3294,53 +3638,51 @@ class ReplicaManagerTest { @ValueSource(booleans = Array(true, false)) def testOffsetOutOfRangeExceptionWhenReadFromLog(isFromFollower: Boolean): Unit = { val replicaId = if (isFromFollower) 1 else -1 - val fetchMaxBytes = 150 - val partitionMaxBytes = 100 val tp0 = new TopicPartition(topic, 0) - val tp02 = new TopicPartition(topic2, 0) val tidp0 = new TopicIdPartition(topicId, tp0) - val tidp02 = new TopicIdPartition(topicId2, tp02) // create a replicaManager with remoteLog enabled val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteFetchQuotaExceeded = Some(false)) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) - replicaManager.createPartition(tp02).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId2)) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val partition0Replicas = Seq[Integer](0, 1).asJava - val topicIds = Map(tp0.topic -> topicId, tp02.topic -> topicId2).asJava + val topicIds = Map(tp0.topic -> topicId).asJava val leaderEpoch = 0 - val delta = createLeaderDelta(topicIds.get(topic), tp0, partition0Replicas.get(0), partition0Replicas, partition0Replicas) - val delta2 = createLeaderDelta(topicIds.get(topic2), tp02, partition0Replicas.get(0), partition0Replicas, partition0Replicas) - val leaderMetadataImage = imageFromTopics(delta.apply()) - val leaderMetadataImage2 = imageFromTopics(delta2.apply()) - replicaManager.applyDelta(delta, leaderMetadataImage) - replicaManager.applyDelta(delta2, leaderMetadataImage2) - - val params = new FetchParams(replicaId, 1, 100, 0, fetchMaxBytes, FetchIsolation.LOG_END, Optional.empty) - // when reading logs from 2 partitions, they'll throw OffsetOutOfRangeException, which will be handled separately - val results = replicaManager.readFromLog(params, Seq( - tidp0 -> new PartitionData(topicId, 1, 0, partitionMaxBytes, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch)), - tidp02 -> new PartitionData(topicId2, 1, 0, partitionMaxBytes, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch))), UNBOUNDED_QUOTA, false) - - results.foreach { case (tidp, partitionData) => - assertEquals(startOffset, partitionData.leaderLogStartOffset) - assertEquals(endOffset, partitionData.leaderLogEndOffset) - assertEquals(highHW, partitionData.highWatermark) - if (isFromFollower) { - // expect OFFSET_MOVED_TO_TIERED_STORAGE error returned if it's from follower, since the data is already available in remote log - assertEquals(Errors.OFFSET_MOVED_TO_TIERED_STORAGE, partitionData.error) - assertFalse(partitionData.info.delayedRemoteStorageFetch.isPresent) - } else { - assertEquals(Errors.NONE, partitionData.error) - // for consumer fetch, we should return a delayedRemoteStorageFetch to wait for remote fetch - assertTrue(partitionData.info.delayedRemoteStorageFetch.isPresent) - // verify the 1st partition will set the fetchMaxBytes to partitionMaxBytes, - // and the 2nd one will set to the remaining (fetchMaxBytes - partitionMaxBytes) to meet the "fetch.max.bytes" config. - if (tidp.topic == topic) - assertEquals(partitionMaxBytes, partitionData.info.delayedRemoteStorageFetch.get().fetchMaxBytes) - else - assertEquals(fetchMaxBytes - partitionMaxBytes, partitionData.info.delayedRemoteStorageFetch.get().fetchMaxBytes) - } + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(leaderEpoch) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + + val params = new FetchParams(ApiKeys.FETCH.latestVersion, replicaId, 1, 1000, 0, 100, FetchIsolation.LOG_END, None.asJava) + // when reading log, it'll throw OffsetOutOfRangeException, which will be handled separately + val result = replicaManager.readFromLog(params, Seq(tidp0 -> new PartitionData(topicId, 1, 0, 100000, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch))), UNBOUNDED_QUOTA, false) + + if (isFromFollower) { + // expect OFFSET_MOVED_TO_TIERED_STORAGE error returned if it's from follower, since the data is already available in remote log + assertEquals(Errors.OFFSET_MOVED_TO_TIERED_STORAGE, result.head._2.error) + } else { + assertEquals(Errors.NONE, result.head._2.error) + } + assertEquals(startOffset, result.head._2.leaderLogStartOffset) + assertEquals(endOffset, result.head._2.leaderLogEndOffset) + assertEquals(highHW, result.head._2.highWatermark) + if (isFromFollower) { + assertFalse(result.head._2.info.delayedRemoteStorageFetch.isPresent) + } else { + // for consumer fetch, we should return a delayedRemoteStorageFetch to wait for remote fetch + assertTrue(result.head._2.info.delayedRemoteStorageFetch.isPresent) } } finally { replicaManager.shutdown(checkpointHW = false) @@ -3350,7 +3692,6 @@ class ReplicaManagerTest { @ParameterizedTest @ValueSource(booleans = Array(true, false)) def testOffsetOutOfRangeExceptionWhenFetchMessages(isFromFollower: Boolean): Unit = { - val brokerList = Seq[Integer](0, 1).asJava val replicaId = if (isFromFollower) 1 else -1 val tp0 = new TopicPartition(topic, 0) val tidp0 = new TopicIdPartition(topicId, tp0) @@ -3358,14 +3699,28 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog= true, remoteFetchQuotaExceeded = Some(false)) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + val partition0Replicas = Seq[Integer](0, 1).asJava val topicIds = Map(tp0.topic -> topicId).asJava val leaderEpoch = 0 - val delta = createLeaderDelta(topicIds.get(topic), tp0, brokerList.get(0), brokerList, brokerList) - val leaderMetadataImage = imageFromTopics(delta.apply()) - replicaManager.applyDelta(delta, leaderMetadataImage) - - val params = new FetchParams(replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, Optional.empty) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(leaderEpoch) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + + val params = new FetchParams(ApiKeys.FETCH.latestVersion, replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, None.asJava) val fetchOffset = 1 def fetchCallback(responseStatus: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { @@ -3388,7 +3743,7 @@ class ReplicaManagerTest { } else { verify(mockRemoteLogManager).asyncRead(remoteStorageFetchInfoArg.capture(), any()) val remoteStorageFetchInfo = remoteStorageFetchInfoArg.getValue - assertEquals(tp0, remoteStorageFetchInfo.topicIdPartition.topicPartition) + assertEquals(tp0, remoteStorageFetchInfo.topicPartition) assertEquals(fetchOffset, remoteStorageFetchInfo.fetchInfo.fetchOffset) assertEquals(topicId, remoteStorageFetchInfo.fetchInfo.topicId) assertEquals(startOffset, remoteStorageFetchInfo.fetchInfo.logStartOffset) @@ -3419,7 +3774,7 @@ class ReplicaManagerTest { props.put(RemoteLogManagerConfig.REMOTE_LOG_READER_THREADS_PROP, 2.toString) val config = KafkaConfig.fromProps(props) val mockLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + val brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) val remoteLogManager = new RemoteLogManager( config.remoteLogManagerConfig, 0, @@ -3429,21 +3784,35 @@ class ReplicaManagerTest { _ => Optional.of(mockLog), (TopicPartition, Long) => {}, brokerTopicStats, - metrics, - Optional.empty) + metrics) + remoteLogManager.startup() val spyRLM = spy(remoteLogManager) val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteLogManager = Some(spyRLM)) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) - + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + val partition0Replicas = Seq[Integer](0, 1).asJava + val topicIds = Map(tp0.topic -> topicId).asJava val leaderEpoch = 0 - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 0, leaderEpoch = leaderEpoch) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) - - val params = new FetchParams(replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, Optional.empty) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(leaderEpoch) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + + val params = new FetchParams(ApiKeys.FETCH.latestVersion, replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, None.asJava) val fetchOffset = 1 val responseLatch = new CountDownLatch(5) @@ -3516,7 +3885,7 @@ class ReplicaManagerTest { props.put(RemoteLogManagerConfig.REMOTE_LOG_METADATA_MANAGER_CLASS_NAME_PROP, classOf[NoOpRemoteLogMetadataManager].getName) val config = KafkaConfig.fromProps(props) val dummyLog = mock(classOf[UnifiedLog]) - val brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.isRemoteStorageSystemEnabled) + val brokerTopicStats = new BrokerTopicStats(config.remoteLogManagerConfig.isRemoteStorageSystemEnabled()) val remoteLogManager = new RemoteLogManager( config.remoteLogManagerConfig, 0, @@ -3526,8 +3895,8 @@ class ReplicaManagerTest { _ => Optional.of(dummyLog), (TopicPartition, Long) => {}, brokerTopicStats, - metrics, - Optional.empty) + metrics) + remoteLogManager.startup() val spyRLM = spy(remoteLogManager) val timer = new MockTimer(time) @@ -3535,15 +3904,29 @@ class ReplicaManagerTest { try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) - + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + val partition0Replicas = Seq[Integer](0, 1).asJava + val topicIds = Map(tp0.topic -> topicId).asJava val leaderEpoch = 0 - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 0, leaderEpoch = leaderEpoch) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(leaderEpoch) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) val mockLog = replicaManager.getPartitionOrException(tp0).log.get - when(mockLog.endOffsetForEpoch(anyInt())).thenReturn(Optional.of(new OffsetAndEpoch(1, 1))) + when(mockLog.endOffsetForEpoch(anyInt())).thenReturn(Some(new OffsetAndEpoch(1, 1))) when(mockLog.read(anyLong(), anyInt(), any(), anyBoolean())).thenReturn(new FetchDataInfo( new LogOffsetMetadata(0L, 0L, 0), MemoryRecords.EMPTY @@ -3555,7 +3938,7 @@ class ReplicaManagerTest { endOffsetMetadata, endOffsetMetadata)) - val params = new FetchParams(replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, Optional.empty) + val params = new FetchParams(ApiKeys.FETCH.latestVersion, replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, None.asJava) val fetchOffset = 1 def fetchCallback(responseStatus: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { @@ -3592,109 +3975,6 @@ class ReplicaManagerTest { } } - @Test - def testMultipleRemoteFetchesInOneFetchRequest(): Unit = { - val replicaId = -1 - val tp0 = new TopicPartition(topic, 0) - val tp1 = new TopicPartition(topic, 1) - val tidp0 = new TopicIdPartition(topicId, tp0) - val tidp1 = new TopicIdPartition(topicId, tp1) - - val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteFetchQuotaExceeded = Some(false)) - - try { - val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) - replicaManager.createPartition(tp1).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) - - val leaderEpoch = 0 - val leaderDelta0 = createLeaderDelta(topicId, tp0, leaderId = 0, leaderEpoch = leaderEpoch) - val leaderDelta1 = createLeaderDelta(topicId, tp1, leaderId = 0, leaderEpoch = leaderEpoch) - val leaderMetadataImage0 = imageFromTopics(leaderDelta0.apply()) - val leaderMetadataImage1 = imageFromTopics(leaderDelta1.apply()) - replicaManager.applyDelta(leaderDelta0, leaderMetadataImage0) - replicaManager.applyDelta(leaderDelta1, leaderMetadataImage1) - - val params = new FetchParams(replicaId, 1, 1000, 10, 100, FetchIsolation.LOG_END, Optional.empty) - val fetchOffsetTp0 = 1 - val fetchOffsetTp1 = 2 - - val responseSeq = new AtomicReference[Seq[(TopicIdPartition, FetchPartitionData)]]() - val responseLatch = new CountDownLatch(1) - - def fetchCallback(responseStatus: Seq[(TopicIdPartition, FetchPartitionData)]): Unit = { - responseSeq.set(responseStatus) - responseLatch.countDown() - } - - val callbacks: util.Set[Consumer[RemoteLogReadResult]] = new util.HashSet[Consumer[RemoteLogReadResult]]() - when(mockRemoteLogManager.asyncRead(any(), any())).thenAnswer(ans => { - callbacks.add(ans.getArgument(1, classOf[Consumer[RemoteLogReadResult]])) - mock(classOf[Future[Void]]) - }) - - // Start the fetch request for both partitions - this should trigger remote fetches since - // the default mocked log behavior throws OffsetOutOfRangeException - replicaManager.fetchMessages(params, Seq( - tidp0 -> new PartitionData(topicId, fetchOffsetTp0, startOffset, 100000, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch)), - tidp1 -> new PartitionData(topicId, fetchOffsetTp1, startOffset, 100000, Optional.of[Integer](leaderEpoch), Optional.of[Integer](leaderEpoch)) - ), UNBOUNDED_QUOTA, fetchCallback) - - // Verify that exactly two asyncRead calls were made (one for each partition) - val remoteStorageFetchInfoArg: ArgumentCaptor[RemoteStorageFetchInfo] = ArgumentCaptor.forClass(classOf[RemoteStorageFetchInfo]) - verify(mockRemoteLogManager, times(2)).asyncRead(remoteStorageFetchInfoArg.capture(), any()) - - // Verify that remote fetch operations were properly set up for both partitions - assertTrue(replicaManager.delayedRemoteFetchPurgatory.watched == 2, "DelayedRemoteFetch purgatory should have operations") - - // Verify both partitions were captured in the remote fetch requests - val capturedFetchInfos = remoteStorageFetchInfoArg.getAllValues.asScala - assertEquals(2, capturedFetchInfos.size, "Should have 2 remote storage fetch info calls") - - val capturedTopicPartitions = capturedFetchInfos.map(_.topicIdPartition.topicPartition).toSet - assertTrue(capturedTopicPartitions.contains(tp0), "Should contain " + tp0) - assertTrue(capturedTopicPartitions.contains(tp1), "Should contain " + tp1) - - // Verify the fetch info details are correct for both partitions - capturedFetchInfos.foreach { fetchInfo => - assertEquals(topicId, fetchInfo.fetchInfo.topicId) - assertEquals(startOffset, fetchInfo.fetchInfo.logStartOffset) - assertEquals(leaderEpoch, fetchInfo.fetchInfo.currentLeaderEpoch.get()) - if (fetchInfo.topicIdPartition.topicPartition == tp0) { - assertEquals(fetchOffsetTp0, fetchInfo.fetchInfo.fetchOffset) - } else { - assertEquals(fetchOffsetTp1, fetchInfo.fetchInfo.fetchOffset) - } - } - - // Complete the 2 asyncRead tasks - callbacks.forEach(callback => callback.accept(buildRemoteReadResult(Errors.NONE))) - - // Wait for the fetch callback to complete and verify responseSeq content - assertTrue(responseLatch.await(5, TimeUnit.SECONDS), "Fetch callback should complete") - - val responseData = responseSeq.get() - assertNotNull(responseData, "Response sequence should not be null") - assertEquals(2, responseData.size, "Response should contain data for both partitions") - - // Verify that response contains both tidp0 and tidp1 and have no errors - val responseTopicIdPartitions = responseData.map(_._1).toSet - assertTrue(responseTopicIdPartitions.contains(tidp0), "Response should contain " + tidp0) - assertTrue(responseTopicIdPartitions.contains(tidp1), "Response should contain " + tidp1) - responseData.foreach { case (_, fetchPartitionData) => - assertEquals(Errors.NONE, fetchPartitionData.error) - } - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - private def buildRemoteReadResult(error: Errors): RemoteLogReadResult = { - new RemoteLogReadResult( - Optional.of(new FetchDataInfo(LogOffsetMetadata.UNKNOWN_OFFSET_METADATA, MemoryRecords.EMPTY)), - if (error != Errors.NONE) Optional.of[Throwable](error.exception) else Optional.empty[Throwable]()) - } - private def yammerMetricValue(name: String): Any = { val allMetrics = KafkaYammerMetrics.defaultRegistry.allMetrics.asScala val (_, metric) = allMetrics.find { case (n, _) => n.getMBeanName.endsWith(name) } @@ -3709,6 +3989,8 @@ class ReplicaManagerTest { @Test def testSuccessfulBuildRemoteLogAuxStateMetrics(): Unit = { + val tp0 = new TopicPartition(topic, 0) + val remoteLogManager = mock(classOf[RemoteLogManager]) val remoteLogSegmentMetadata = mock(classOf[RemoteLogSegmentMetadata]) when(remoteLogManager.fetchRemoteLogSegmentMetadata(any(), anyInt(), anyLong())).thenReturn( @@ -3717,29 +3999,43 @@ class ReplicaManagerTest { val storageManager = mock(classOf[RemoteStorageManager]) when(storageManager.fetchIndex(any(), any())).thenReturn(new ByteArrayInputStream("0".getBytes())) when(remoteLogManager.storageManager()).thenReturn(storageManager) - when(remoteLogManager.isPartitionReady(any())).thenReturn(true) val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteLogManager = Some(remoteLogManager), buildRemoteLogAuxState = true) try { + val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(topicPartition).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val partition0Replicas = Seq[Integer](0, 1).asJava + val topicIds = Map(tp0.topic -> topicId).asJava + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() // Verify the metrics for build remote log state and for failures is zero before replicas start to fetch - assertEquals(0, brokerTopicStats.topicStats(topicPartition.topic()).buildRemoteLogAuxStateRequestRate.count) - assertEquals(0, brokerTopicStats.topicStats(topicPartition.topic()).failedBuildRemoteLogAuxStateRate.count) + assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) + assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).failedBuildRemoteLogAuxStateRate.count) // Verify aggregate metrics assertEquals(0, brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) assertEquals(0, brokerTopicStats.allTopicsStats.failedBuildRemoteLogAuxStateRate.count) - val leaderDelta = createLeaderDelta(topicId, topicPartition, leaderId = 1, replicas = partition0Replicas, isr = partition0Replicas) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) // Replicas fetch from the leader periodically, therefore we check that the metric value is increasing - waitUntilTrue(() => brokerTopicStats.topicStats(topicPartition.topic()).buildRemoteLogAuxStateRequestRate.count > 0, - "Should have buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.topicStats(topicPartition.topic()).buildRemoteLogAuxStateRequestRate.count) - assertEquals(0, brokerTopicStats.topicStats(topicPartition.topic()).failedBuildRemoteLogAuxStateRate.count) + waitUntilTrue(() => brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count > 0, + "Should have buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) + assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).failedBuildRemoteLogAuxStateRate.count) // Verify aggregate metrics waitUntilTrue(() => brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count > 0, "Should have all topic buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) @@ -3761,7 +4057,24 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteLogManager = Some(remoteLogManager), buildRemoteLogAuxState = true) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) + val partition0Replicas = Seq[Integer](0, 1).asJava + val topicIds = Map(tp0.topic -> topicId).asJava + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() // Verify the metrics for build remote log state and for failures is zero before replicas start to fetch assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) @@ -3770,13 +4083,10 @@ class ReplicaManagerTest { assertEquals(0, brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) assertEquals(0, brokerTopicStats.allTopicsStats.failedBuildRemoteLogAuxStateRate.count) - val brokerList = Seq[Integer](0, 1).asJava - val delta = createLeaderDelta(topicId, new TopicPartition(topic, 0), brokerList.get(1), brokerList, brokerList) - val leaderMetadataImage = imageFromTopics(delta.apply()) - replicaManager.applyDelta(delta, leaderMetadataImage) + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) // Replicas fetch from the leader periodically, therefore we check that the metric value is increasing - // We expect failedBuildRemoteLogAuxStateRate to increase because the RemoteLogManager is not ready for the tp0 + // We expect failedBuildRemoteLogAuxStateRate to increase because there is no remoteLogSegmentMetadata // when attempting to build log aux state TestUtils.waitUntilTrue(() => brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count > 0, "Should have buildRemoteLogAuxStateRequestRate count > 0, but got:" + brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) @@ -3806,8 +4116,24 @@ class ReplicaManagerTest { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true, remoteLogManager = Some(remoteLogManager), buildRemoteLogAuxState = true) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, Some(topicId)) + replicaManager.createPartition(tp0).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints, None) val partition0Replicas = Seq[Integer](0, 1).asJava + val topicIds = Map(tp0.topic -> topicId).asJava + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp0.topic) + .setPartitionIndex(tp0.partition) + .setControllerEpoch(0) + .setLeader(1) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() // Verify the metrics for build remote log state and for failures is zero before replicas start to fetch assertEquals(0, brokerTopicStats.topicStats(tp0.topic()).buildRemoteLogAuxStateRequestRate.count) @@ -3816,9 +4142,7 @@ class ReplicaManagerTest { assertEquals(0, brokerTopicStats.allTopicsStats.buildRemoteLogAuxStateRequestRate.count) assertEquals(0, brokerTopicStats.allTopicsStats.failedBuildRemoteLogAuxStateRate.count) - val leaderDelta = createLeaderDelta(topicId, tp0, leaderId = 1, replicas = partition0Replicas, isr = partition0Replicas) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) // Replicas fetch from the leader periodically, therefore we check that the metric value is increasing // We expect failedBuildRemoteLogAuxStateRate to increase because fetchRemoteLogSegmentMetadata returns RemoteStorageException @@ -3836,29 +4160,27 @@ class ReplicaManagerTest { } } - private def setupMockLog(path: String, logConfig: LogConfig, enableRemoteStorage: Boolean, topicPartition: TopicPartition, topicId: Option[Uuid]): UnifiedLog = { + private def setupMockLog(path: String): UnifiedLog = { val mockLog = mock(classOf[UnifiedLog]) - val partitionDir = new File(path, s"$topicPartition") + val partitionDir = new File(path, s"$topic-0") partitionDir.mkdir() when(mockLog.dir).thenReturn(partitionDir) when(mockLog.parentDir).thenReturn(path) - when(mockLog.topicId).thenReturn(topicId.toJava) - when(mockLog.topicPartition).thenReturn(topicPartition) + when(mockLog.topicId).thenReturn(Some(topicId)) + when(mockLog.topicPartition).thenReturn(new TopicPartition(topic, 0)) when(mockLog.highWatermark).thenReturn(highHW) when(mockLog.updateHighWatermark(anyLong())).thenReturn(0L) when(mockLog.logEndOffsetMetadata).thenReturn(new LogOffsetMetadata(10)) - when(mockLog.maybeIncrementHighWatermark(any(classOf[LogOffsetMetadata]))).thenReturn(Optional.empty) - when(mockLog.endOffsetForEpoch(anyInt())).thenReturn(Optional.empty) + when(mockLog.maybeIncrementHighWatermark(any(classOf[LogOffsetMetadata]))).thenReturn(None) + when(mockLog.endOffsetForEpoch(anyInt())).thenReturn(None) // try to return a high start offset to cause OffsetOutOfRangeException at the 1st time when(mockLog.logStartOffset).thenReturn(endOffset).thenReturn(startOffset) when(mockLog.logEndOffset).thenReturn(endOffset) when(mockLog.localLogStartOffset()).thenReturn(endOffset - 10) when(mockLog.leaderEpochCache).thenReturn(mock(classOf[LeaderEpochFileCache])) - when(mockLog.latestEpoch).thenReturn(Optional.of(0)) + when(mockLog.latestEpoch).thenReturn(Some(0)) val producerStateManager = mock(classOf[ProducerStateManager]) when(mockLog.producerStateManager).thenReturn(producerStateManager) - when(mockLog.config).thenReturn(logConfig) - when(mockLog.remoteLogEnabled()).thenReturn(enableRemoteStorage) mockLog } @@ -3869,7 +4191,7 @@ class ReplicaManagerTest { def createReplicaManager(): ReplicaManager = { val props = TestUtils.createBrokerConfig(1) val config = KafkaConfig.fromProps(props) - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) new ReplicaManager( metrics = metrics, config = config, @@ -3877,7 +4199,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager) { override def getPartitionOrException(topicPartition: TopicPartition): Partition = { @@ -3901,14 +4223,30 @@ class ReplicaManagerTest { def testPartitionMetadataFile(): Unit = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time)) try { - val leaderDelta = topicsCreateDelta(0, isStartIdLeader = true, partitions = List(0), - topicName = topic, topicId = topicIds(topic)) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) - - assertTrue(replicaManager.getPartition(topicPartition).isInstanceOf[HostedPartition.Online]) + val brokerList = Seq[Integer](0, 1).asJava + val topicPartition = new TopicPartition(topic, 0) + val topicIds = Collections.singletonMap(topic, Uuid.randomUuid()) + val topicNames = topicIds.asScala.map(_.swap).asJava + + def leaderAndIsrRequest(epoch: Int, topicIds: java.util.Map[String, Uuid]): LeaderAndIsrRequest = + new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(epoch) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + + val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topicIds), (_, _) => ()) + assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition)) assertFalse(replicaManager.localLog(topicPartition).isEmpty) - val id = topicIds(topicPartition.topic) + val id = topicIds.get(topicPartition.topic()) val log = replicaManager.localLog(topicPartition).get assertTrue(log.partitionMetadataFile.get.exists()) val partitionMetadata = log.partitionMetadataFile.get.read() @@ -3925,60 +4263,84 @@ class ReplicaManagerTest { def testInconsistentIdReturnsError(): Unit = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time)) try { - val invalidTopicId = Uuid.randomUuid() - - val initialDelta = topicsCreateDelta(0, isStartIdLeader = true, - partitions = List(0), topicName = topic, topicId = topicIds(topic)) - val initialImage = imageFromTopics(initialDelta.apply()) - replicaManager.applyDelta(initialDelta, initialImage) - - val updateDelta = topicsCreateDelta(0, isStartIdLeader = true, - partitions = List(0), topicName = topic, topicId = topicIds(topic), leaderEpoch = 1) - val updateImage = imageFromTopics(updateDelta.apply()) - replicaManager.applyDelta(updateDelta, updateImage) + val brokerList = Seq[Integer](0, 1).asJava + val topicPartition = new TopicPartition(topic, 0) + val topicIds = Collections.singletonMap(topic, Uuid.randomUuid()) + val topicNames = topicIds.asScala.map(_.swap).asJava + + val invalidTopicIds = Collections.singletonMap(topic, Uuid.randomUuid()) + val invalidTopicNames = invalidTopicIds.asScala.map(_.swap).asJava + + def leaderAndIsrRequest(epoch: Int, topicIds: java.util.Map[String, Uuid]): LeaderAndIsrRequest = + new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(epoch) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + + val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topicIds), (_, _) => ()) + assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition)) + + val response2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(1, topicIds), (_, _) => ()) + assertEquals(Errors.NONE, response2.partitionErrors(topicNames).get(topicPartition)) // Send request with inconsistent ID. - val inconsistentDelta1 = topicsCreateDelta(0, isStartIdLeader = true, - partitions = List(0), topicName = topic, topicId = invalidTopicId, leaderEpoch = 1) - val inconsistentImage1 = imageFromTopics(inconsistentDelta1.apply()) - val exception1 = assertThrows(classOf[IllegalStateException], () => { - replicaManager.applyDelta(inconsistentDelta1, inconsistentImage1) - }) - assertEquals(s"Topic ${topic}-0 exists, but its ID is ${topicId}, not ${invalidTopicId} as expected", exception1.getMessage) - - val inconsistentDelta2 = topicsCreateDelta(0, isStartIdLeader = true, - partitions = List(0), topicName = topic, topicId = invalidTopicId, leaderEpoch = 2) - val inconsistentImage2 = imageFromTopics(inconsistentDelta2.apply()) - val exception2 = assertThrows(classOf[IllegalStateException], () => { - replicaManager.applyDelta(inconsistentDelta2, inconsistentImage2) - }) - assertEquals(s"Topic ${topic}-0 exists, but its ID is ${topicId}, not ${invalidTopicId} as expected", exception2.getMessage) + val response3 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(1, invalidTopicIds), (_, _) => ()) + assertEquals(Errors.INCONSISTENT_TOPIC_ID, response3.partitionErrors(invalidTopicNames).get(topicPartition)) + val response4 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(2, invalidTopicIds), (_, _) => ()) + assertEquals(Errors.INCONSISTENT_TOPIC_ID, response4.partitionErrors(invalidTopicNames).get(topicPartition)) } finally { replicaManager.shutdown(checkpointHW = false) } } @Test - def testPartitionMetadataFileCreated(): Unit = { + def testPartitionMetadataFileNotCreated(): Unit = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time)) try { val brokerList = Seq[Integer](0, 1).asJava val topicPartition = new TopicPartition(topic, 0) - - val leaderDelta = createLeaderDelta( - topicId = Uuid.ZERO_UUID, - partition = topicPartition, - leaderId = 0, - replicas = brokerList, - isr = brokerList, - ) - - // The file exists if the topic has the default UUID. - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + val topicPartitionFake = new TopicPartition("fakeTopic", 0) + val topicIds = Map(topic -> Uuid.ZERO_UUID, "foo" -> Uuid.randomUuid()).asJava + val topicNames = topicIds.asScala.map(_.swap).asJava + + def leaderAndIsrRequest(epoch: Int, name: String): LeaderAndIsrRequest = + new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(new LeaderAndIsrRequest.PartitionState() + .setTopicName(name) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(epoch) + .setIsr(brokerList) + .setPartitionEpoch(0) + .setReplicas(brokerList) + .setIsNew(true)).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + + // There is no file if the topic does not have an associated topic ID. + val response = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, "fakeTopic"), (_, _) => ()) + assertTrue(replicaManager.localLog(topicPartitionFake).isDefined) + val log = replicaManager.localLog(topicPartitionFake).get + assertFalse(log.partitionMetadataFile.get.exists()) + assertEquals(Errors.NONE, response.partitionErrors(topicNames).get(topicPartition)) + + // There is no file if the topic has the default UUID. + val response2 = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest(0, topic), (_, _) => ()) assertTrue(replicaManager.localLog(topicPartition).isDefined) - val log = replicaManager.localLog(topicPartition).get - assertTrue(log.partitionMetadataFile.get.exists()) + val log2 = replicaManager.localLog(topicPartition).get + assertFalse(log2.partitionMetadataFile.get.exists()) + assertEquals(Errors.NONE, response2.partitionErrors(topicNames).get(topicPartition)) } finally { replicaManager.shutdown(checkpointHW = false) @@ -3999,22 +4361,21 @@ class ReplicaManagerTest { // Delete the data directory to trigger a storage exception Utils.delete(dataDir) - val leaderDelta = createLeaderDelta( - topicId = topicId, - partition = topicPartition, - leaderId = if (becomeLeader) 0 else 1, - replicas = util.Arrays.asList(0 , 1), - isr = util.Arrays.asList(0, 1), + val request = makeLeaderAndIsrRequest( + topicId = Uuid.randomUuid(), + topicPartition = topicPartition, + replicas = Seq(0, 1), + leaderAndIsr = new LeaderAndIsr(if (becomeLeader) 0 else 1, List(0, 1).map(Int.box).asJava) ) - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + replicaManager.becomeLeaderOrFollower(0, request, (_, _) => ()) val hostedPartition = replicaManager.getPartition(topicPartition) assertEquals( classOf[HostedPartition.Offline], hostedPartition.getClass ) assertEquals( - topicId, + request.topicIds().get(topicPartition.topic()), hostedPartition.asInstanceOf[HostedPartition.Offline].partition.flatMap(p => p.topicId).get ) } finally { @@ -4022,6 +4383,43 @@ class ReplicaManagerTest { } } + private def makeLeaderAndIsrRequest( + topicId: Uuid, + topicPartition: TopicPartition, + replicas: Seq[Int], + leaderAndIsr: LeaderAndIsr, + isNew: Boolean = true, + brokerEpoch: Int = 0, + controllerId: Int = 0, + controllerEpoch: Int = 0 + ): LeaderAndIsrRequest = { + val partitionState = new LeaderAndIsrRequest.PartitionState() + .setTopicName(topicPartition.topic) + .setPartitionIndex(topicPartition.partition) + .setControllerEpoch(controllerEpoch) + .setLeader(leaderAndIsr.leader) + .setLeaderEpoch(leaderAndIsr.leaderEpoch) + .setIsr(leaderAndIsr.isr) + .setPartitionEpoch(leaderAndIsr.partitionEpoch) + .setReplicas(replicas.map(Int.box).asJava) + .setIsNew(isNew) + + def mkNode(replicaId: Int): Node = { + new Node(replicaId, s"host-$replicaId", 9092) + } + + val nodes = Set(mkNode(controllerId)) ++ replicas.map(mkNode).toSet + + new LeaderAndIsrRequest.Builder( + controllerId, + controllerEpoch, + brokerEpoch, + Seq(partitionState).asJava, + Map(topicPartition.topic -> topicId).asJava, + nodes.asJava + ).build() + } + @Test def testActiveProducerState(): Unit = { val brokerId = 0 @@ -4037,87 +4435,35 @@ class ReplicaManagerTest { val oofProducerState = replicaManager.activeProducerState(oofPartition) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, Errors.forCode(oofProducerState.errorCode)) - val barPartition = new TopicPartition("bar", 0) - val barTopicId = Uuid.randomUuid() - - val leaderDelta = createLeaderDelta(barTopicId, barPartition, brokerId) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) + // This API is supported by both leaders and followers + val barPartition = new TopicPartition("bar", 0) + val barLeaderAndIsrRequest = makeLeaderAndIsrRequest( + topicId = Uuid.randomUuid(), + topicPartition = barPartition, + replicas = Seq(brokerId), + leaderAndIsr = new LeaderAndIsr(brokerId, List(brokerId).map(Int.box).asJava) + ) + replicaManager.becomeLeaderOrFollower(0, barLeaderAndIsrRequest, (_, _) => ()) val barProducerState = replicaManager.activeProducerState(barPartition) assertEquals(Errors.NONE, Errors.forCode(barProducerState.errorCode)) - val bazPartition = new TopicPartition("baz", 0) - val bazTopicId = Uuid.randomUuid() val otherBrokerId = 1 - - val followerDelta = createFollowerDelta(bazTopicId, bazPartition, brokerId, otherBrokerId) - val followerMetadataImage = imageFromTopics(followerDelta.apply()) - replicaManager.applyDelta(followerDelta, followerMetadataImage) - + val bazPartition = new TopicPartition("baz", 0) + val bazLeaderAndIsrRequest = makeLeaderAndIsrRequest( + topicId = Uuid.randomUuid(), + topicPartition = bazPartition, + replicas = Seq(brokerId, otherBrokerId), + leaderAndIsr = new LeaderAndIsr(otherBrokerId, List(brokerId, otherBrokerId).map(Int.box).asJava) + ) + replicaManager.becomeLeaderOrFollower(0, bazLeaderAndIsrRequest, (_, _) => ()) val bazProducerState = replicaManager.activeProducerState(bazPartition) assertEquals(Errors.NONE, Errors.forCode(bazProducerState.errorCode)) - } finally { replicaManager.shutdown(checkpointHW = false) } } - private def createLeaderDelta( - topicId: Uuid, - partition: TopicPartition, - leaderId: Integer, - replicas: util.List[Integer] = null, - isr: util.List[Integer] = null, - leaderEpoch: Int = 0): TopicsDelta = { - val delta = new TopicsDelta(TopicsImage.EMPTY) - val effectiveReplicas = Option(replicas).getOrElse(java.util.List.of(leaderId)) - val effectiveIsr = Option(isr).getOrElse(java.util.List.of(leaderId)) - - delta.replay(new TopicRecord() - .setName(partition.topic) - .setTopicId(topicId) - ) - - delta.replay(new PartitionRecord() - .setPartitionId(partition.partition) - .setTopicId(topicId) - .setReplicas(effectiveReplicas) - .setIsr(effectiveIsr) - .setLeader(leaderId) - .setLeaderEpoch(leaderEpoch) - .setPartitionEpoch(0) - ) - - delta - } - - private def createFollowerDelta( - topicId: Uuid, - partition: TopicPartition, - followerId: Int, - leaderId: Int, - leaderEpoch: Int = 0): TopicsDelta = { - val delta = new TopicsDelta(TopicsImage.EMPTY) - - delta.replay(new TopicRecord() - .setName(partition.topic) - .setTopicId(topicId) - ) - - delta.replay(new PartitionRecord() - .setPartitionId(partition.partition) - .setTopicId(topicId) - .setReplicas(util.Arrays.asList(followerId, leaderId)) - .setIsr(util.Arrays.asList(followerId, leaderId)) - .setLeader(leaderId) - .setLeaderEpoch(leaderEpoch) - .setPartitionEpoch(0) - ) - - delta - } - val FOO_UUID = Uuid.fromString("fFJBx0OmQG-UqeaT6YaSwA") val BAR_UUID = Uuid.fromString("vApAP6y7Qx23VOfKBzbOBQ") @@ -4143,7 +4489,7 @@ class ReplicaManagerTest { val mockLog = mock(classOf[UnifiedLog]) when(replicaManager.logManager.getLog(bar1)).thenReturn(Some(mockLog)) - when(mockLog.topicId).thenReturn(Optional.of(BAR_UUID)) + when(mockLog.topicId).thenReturn(Some(BAR_UUID)) replicaManager.markPartitionOffline(bar1) assertTrue(replicaManager.getOrCreatePartition(bar1, emptyDelta, BAR_UUID).isEmpty) @@ -4157,16 +4503,14 @@ class ReplicaManagerTest { val localId = 1 val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, "foo") val directoryEventHandler = mock(classOf[DirectoryEventHandler]) - val aliveBrokerIds = Array(1, 2) val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) try { val directoryIds = replicaManager.logManager.directoryIdsSet.toList assertEquals(directoryIds.size, 2) - val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, true, partitions = List(0), directoryIds = directoryIds) + val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, true, partition = 0, directoryIds = directoryIds) val (partition: Partition, isNewWhenCreatedForFirstTime: Boolean) = replicaManager.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get - partition.makeLeader(partitionRegistration(localId, 1, aliveBrokerIds, partitionEpoch, aliveBrokerIds), - isNew = false, + partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava), None) @@ -4183,8 +4527,8 @@ class ReplicaManagerTest { } private def verifyRLMOnLeadershipChange(leaderPartitions: util.Set[Partition], followerPartitions: util.Set[Partition]): Unit = { - val leaderCapture: ArgumentCaptor[util.Set[TopicPartitionLog]] = ArgumentCaptor.forClass(classOf[util.Set[TopicPartitionLog]]) - val followerCapture: ArgumentCaptor[util.Set[TopicPartitionLog]] = ArgumentCaptor.forClass(classOf[util.Set[TopicPartitionLog]]) + val leaderCapture: ArgumentCaptor[util.Set[Partition]] = ArgumentCaptor.forClass(classOf[util.Set[Partition]]) + val followerCapture: ArgumentCaptor[util.Set[Partition]] = ArgumentCaptor.forClass(classOf[util.Set[Partition]]) val topicIdsCapture: ArgumentCaptor[util.Map[String, Uuid]] = ArgumentCaptor.forClass(classOf[util.Map[String, Uuid]]) verify(mockRemoteLogManager).onLeadershipChange(leaderCapture.capture(), followerCapture.capture(), topicIdsCapture.capture()) @@ -4211,7 +4555,7 @@ class ReplicaManagerTest { // Test applying delta as leader val directoryIds = replicaManager.logManager.directoryIdsSet.toList // Make the local replica the leader - val leaderTopicsDelta = topicsCreateDelta(localId, true, partitions = List(0), directoryIds = directoryIds) + val leaderTopicsDelta = topicsCreateDelta(localId, true, partition = 0, directoryIds = directoryIds) val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) @@ -4221,7 +4565,7 @@ class ReplicaManagerTest { assertEquals(directoryIds.head, logDirIdHostingPartition0) // Test applying delta as follower - val followerTopicsDelta = topicsCreateDelta(localId, false, partitions = List(1), directoryIds = directoryIds) + val followerTopicsDelta = topicsCreateDelta(localId, false, partition = 1, directoryIds = directoryIds) val followerMetadataImage = imageFromTopics(followerTopicsDelta.apply()) replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) @@ -4249,7 +4593,7 @@ class ReplicaManagerTest { try { // Make the local replica the leader - val leaderTopicsDelta = topicsCreateDelta(localId, true, partitions = List(0), directoryIds = List(DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) + val leaderTopicsDelta = topicsCreateDelta(localId, true, partition = 0, directoryIds = List(DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) val topicId = leaderMetadataImage.topics().topicsByName.get("foo").id val topicIdPartition0 = new TopicIdPartition(topicId, topicPartition0) @@ -4257,7 +4601,7 @@ class ReplicaManagerTest { replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) // Make the local replica the as follower - val followerTopicsDelta = topicsCreateDelta(localId, false, partitions = List(1), directoryIds = List(DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) + val followerTopicsDelta = topicsCreateDelta(localId, false, partition = 1, directoryIds = List(DirectoryId.UNASSIGNED, DirectoryId.UNASSIGNED)) val followerMetadataImage = imageFromTopics(followerTopicsDelta.apply()) replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) @@ -4303,7 +4647,7 @@ class ReplicaManagerTest { replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) // Make the local replica the as follower - val followerTopicsDelta = topicsCreateDelta(localId, false, partitions = List(1), directoryIds = List(DirectoryId.LOST, DirectoryId.LOST)) + val followerTopicsDelta = topicsCreateDelta(localId, false, partition = 1, directoryIds = List(DirectoryId.LOST, DirectoryId.LOST)) val followerMetadataImage = imageFromTopics(followerTopicsDelta.apply()) replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) @@ -4342,7 +4686,8 @@ class ReplicaManagerTest { // Make the local replica the leader val leaderTopicsDelta = topicsCreateDelta(localId, true) val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) - val topicIdPartition = new TopicIdPartition(FOO_UUID, topicPartition) + val topicId = leaderMetadataImage.topics().topicsByName.get("foo").id + val topicIdPartition = new TopicIdPartition(topicId, topicPartition) replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) @@ -4360,7 +4705,7 @@ class ReplicaManagerTest { } // Send a produce request and advance the highwatermark - val leaderResponse = sendProducerAppend(replicaManager, topicIdPartition, numOfRecords) + val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) fetchPartitionAsFollower( replicaManager, topicIdPartition, @@ -4375,7 +4720,7 @@ class ReplicaManagerTest { replicaManager.applyDelta(followerTopicsDelta, followerMetadataImage) // Append on a follower should fail - val followerResponse = sendProducerAppend(replicaManager, topicIdPartition, numOfRecords) + val followerResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, followerResponse.get.error) // Check the state of that partition and fetcher @@ -4425,19 +4770,18 @@ class ReplicaManagerTest { assertEquals(Some(new BrokerEndPoint(otherId, otherEndpoint.host(), otherEndpoint.port())), fetcher.map(_.leader.brokerEndPoint())) // Append on a follower should fail - val followerResponse = sendProducerAppend(replicaManager, - new TopicIdPartition(followerMetadataImage.topics().topicsByName().get("foo").id, topicPartition), - numOfRecords) + val followerResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER, followerResponse.get.error) // Change the local replica to leader val leaderTopicsDelta = topicsChangeDelta(followerMetadataImage.topics(), localId, true) val leaderMetadataImage = imageFromTopics(leaderTopicsDelta.apply()) + val topicId = leaderMetadataImage.topics().topicsByName.get("foo").id + val topicIdPartition = new TopicIdPartition(topicId, topicPartition) replicaManager.applyDelta(leaderTopicsDelta, leaderMetadataImage) - val topicIdPartition = new TopicIdPartition(leaderMetadataImage.topics().topicsByName().get("foo").id, topicPartition) // Send a produce request and advance the highwatermark - val leaderResponse = sendProducerAppend(replicaManager, topicIdPartition, numOfRecords) + val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) fetchPartitionAsFollower( replicaManager, topicIdPartition, @@ -4703,8 +5047,7 @@ class ReplicaManagerTest { val localId = 1 val otherId = localId + 1 val numOfRecords = 3 - val topicIdPartition = new TopicIdPartition(FOO_UUID, 0, "foo") - val topicPartition = topicIdPartition.topicPartition() + val topicPartition = new TopicPartition("foo", 0) val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, enableRemoteStorage = enableRemoteStorage) try { @@ -4727,7 +5070,7 @@ class ReplicaManagerTest { assertEquals(None, replicaManager.replicaFetcherManager.getFetcher(topicPartition)) // Send a produce request - val leaderResponse = sendProducerAppend(replicaManager, topicIdPartition, numOfRecords) + val leaderResponse = sendProducerAppend(replicaManager, topicPartition, numOfRecords) // Change the local replica to follower val followerTopicsDelta = topicsChangeDelta(leaderMetadataImage.topics(), localId, false) @@ -4905,12 +5248,9 @@ class ReplicaManagerTest { replicaManager.getPartition(topicPartition) match { case HostedPartition.Online(partition) => partition.appendRecordsToFollowerOrFutureReplica( - records = MemoryRecords.withRecords( - Compression.NONE, 0, - new SimpleRecord("first message".getBytes) - ), - isFuture = false, - partitionLeaderEpoch = 0 + records = MemoryRecords.withRecords(Compression.NONE, 0, + new SimpleRecord("first message".getBytes)), + isFuture = false ) case _ => @@ -5069,10 +5409,12 @@ class ReplicaManagerTest { val foo2 = new TopicPartition("foo", 2) val mockReplicaFetcherManager = mock(classOf[ReplicaFetcherManager]) + val isShuttingDown = new AtomicBoolean(false) val replicaManager = setupReplicaManagerWithMockedPurgatories( timer = new MockTimer(time), brokerId = localId, mockReplicaFetcherManager = Some(mockReplicaFetcherManager), + isShuttingDown = isShuttingDown, enableRemoteStorage = enableRemoteStorage ) @@ -5150,6 +5492,10 @@ class ReplicaManagerTest { reset(mockReplicaFetcherManager) + // The broker transitions to SHUTTING_DOWN state. This should not have + // any impact in KRaft mode. + isShuttingDown.set(true) + // The replica begins the controlled shutdown. replicaManager.beginControlledShutdown() @@ -5225,15 +5571,22 @@ class ReplicaManagerTest { assertFalse(replicaManager.maybeAddListener(tp, listener)) // Broker 0 becomes leader of the partition - val leaderDelta = createLeaderDelta( - topicId = topicId, - partition = tp, - leaderId = 0, - replicas = replicas, - isr = replicas, - leaderEpoch = leaderEpoch - ) - replicaManager.applyDelta(leaderDelta, imageFromTopics(leaderDelta.apply())) + val leaderAndIsrPartitionState = new LeaderAndIsrRequest.PartitionState() + .setTopicName(topic) + .setPartitionIndex(0) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(leaderEpoch) + .setIsr(replicas) + .setPartitionEpoch(0) + .setReplicas(replicas) + .setIsNew(true) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq(leaderAndIsrPartitionState).asJava, + Collections.singletonMap(topic, topicId), + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + val leaderAndIsrResponse = replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + assertEquals(Errors.NONE, leaderAndIsrResponse.error) // Registering it should succeed now. assertTrue(replicaManager.maybeAddListener(tp, listener)) @@ -5287,30 +5640,29 @@ class ReplicaManagerTest { } } - private def topicsCreateDelta(startId: Int, isStartIdLeader: Boolean, partitions:List[Int] = List(0), directoryIds: List[Uuid] = List.empty, topicName: String = "foo", topicId: Uuid = FOO_UUID, leaderEpoch: Int = 0): TopicsDelta = { + private def topicsCreateDelta(startId: Int, isStartIdLeader: Boolean, partition:Int = 0, directoryIds: List[Uuid] = List.empty): TopicsDelta = { val leader = if (isStartIdLeader) startId else startId + 1 val delta = new TopicsDelta(TopicsImage.EMPTY) - delta.replay(new TopicRecord().setName(topicName).setTopicId(topicId)) - - partitions.foreach { partition => - val record = partitionRecord(startId, leader, partition, topicId, leaderEpoch) - if (directoryIds.nonEmpty) { - record.setDirectories(directoryIds.asJava) - } - delta.replay(record) + delta.replay(new TopicRecord().setName("foo").setTopicId(FOO_UUID)) + val record = partitionRecord(startId, leader, partition) + if (!directoryIds.isEmpty) { + record.setDirectories(directoryIds.asJava) } + delta.replay(record) delta } - private def partitionRecord(startId: Int, leader: Int, partition: Int = 0, topicId: Uuid = FOO_UUID, leaderEpoch: Int = 0) = { + private def partitionRecord(startId: Int, leader: Int, partition: Int = 0) = { new PartitionRecord() .setPartitionId(partition) - .setTopicId(topicId) + .setTopicId(FOO_UUID) .setReplicas(util.Arrays.asList(startId, startId + 1)) .setIsr(util.Arrays.asList(startId, startId + 1)) + .setRemovingReplicas(Collections.emptyList()) + .setAddingReplicas(Collections.emptyList()) .setLeader(leader) - .setLeaderEpoch(leaderEpoch) + .setLeaderEpoch(0) .setPartitionEpoch(0) } @@ -5363,74 +5715,8 @@ class ReplicaManagerTest { assertEquals(expectedTopicId, fetchState.get.topicId) } - @Test - def testReplicaAlterLogDirsMultipleReassignmentDoesNotBlockLogCleaner(): Unit = { - val localId = 0 - val tp = new TopicPartition(topic, 0) - val tpId = new TopicIdPartition(topicId, tp) - - val props = TestUtils.createBrokerConfig(localId) - val path1 = TestUtils.tempRelativeDir("data").getAbsolutePath - val path2 = TestUtils.tempRelativeDir("data2").getAbsolutePath - val path3 = TestUtils.tempRelativeDir("data3").getAbsolutePath - props.put("log.dirs", Seq(path1, path2, path3).mkString(",")) - val config = KafkaConfig.fromProps(props) - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_)), cleanerConfig = new CleanerConfig(true)) - mockLogMgr.startup(Set()) - val replicaManager = new ReplicaManager( - metrics = metrics, - config = config, - time = time, - scheduler = new MockScheduler(time), - logManager = mockLogMgr, - quotaManagers = quotaManager, - metadataCache = metadataCache, - logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), - alterPartitionManager = alterPartitionManager, - addPartitionsToTxnManager = Some(addPartitionsToTxnManager)) - - try { - val spiedPartition = spy(Partition(tpId, time, replicaManager)) - replicaManager.addOnlinePartition(tp, spiedPartition) - - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) - - // Move the replica to the second log directory. - val partition = replicaManager.getPartitionOrException(tp) - val firstLogDir = partition.log.get.dir.getParentFile - val newReplicaFolder = replicaManager.logManager.liveLogDirs.filterNot(_ == firstLogDir).head - replicaManager.alterReplicaLogDirs(Map(tp -> newReplicaFolder.getAbsolutePath)) - - // Prevent promotion of future replica - doReturn(false).when(spiedPartition).maybeReplaceCurrentWithFutureReplica() - - // Make sure the future log is created with the correct topic ID. - val futureLog = replicaManager.futureLocalLogOrException(tp) - assertEquals(Optional.of(topicId), futureLog.topicId) - - // Move the replica to the third log directory - val finalReplicaFolder = replicaManager.logManager.liveLogDirs.filterNot(it => it == firstLogDir || it == newReplicaFolder).head - replicaManager.alterReplicaLogDirs(Map(tp -> finalReplicaFolder.getAbsolutePath)) - - reset(spiedPartition) - - TestUtils.waitUntilTrue(() => { - replicaManager.replicaAlterLogDirsManager.shutdownIdleFetcherThreads() - replicaManager.replicaAlterLogDirsManager.fetcherThreadMap.isEmpty - }, s"ReplicaAlterLogDirsThread should be gone", waitTimeMs = 60_000) - - verify(replicaManager.logManager.cleaner, times(2)).resumeCleaning(Set(tp).asJava) - } finally { - replicaManager.shutdown(checkpointHW = false) - mockLogMgr.shutdown() - } - } - @Test def testReplicaAlterLogDirs(): Unit = { - val localId = 0 val tp = new TopicPartition(topic, 0) val mockReplicaAlterLogDirsManager = mock(classOf[ReplicaAlterLogDirsManager]) @@ -5447,9 +5733,13 @@ class ReplicaManagerTest { topicId = None ) - val leaderDelta = topicsCreateDelta(localId, isStartIdLeader = true, partitions = List(0, 1), List.empty, topic, topicIds(topic)) - val leaderImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderImage) + val leaderAndIsrRequest = makeLeaderAndIsrRequest( + topicId = topicId, + topicPartition = tp, + replicas = Seq(0, 1), + leaderAndIsr = new LeaderAndIsr(0, List(0, 1).map(Int.box).asJava), + ) + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) // Move the replica to the second log directory. val partition = replicaManager.getPartitionOrException(tp) @@ -5458,7 +5748,7 @@ class ReplicaManagerTest { // Make sure the future log is created with the correct topic ID. val futureLog = replicaManager.futureLocalLogOrException(tp) - assertEquals(Optional.of(topicId), futureLog.topicId) + assertEquals(Some(topicId), futureLog.topicId) // Verify that addFetcherForPartitions was called with the correct topic ID. verify(mockReplicaAlterLogDirsManager, times(1)).addFetcherForPartitions(Map(tp -> InitialFetchState( @@ -5486,12 +5776,12 @@ class ReplicaManagerTest { // Prepare the mocked components for the test val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time), topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, - expectTruncation = false, localLogOffset = Optional.of(10), offsetFromLeader = offsetFromLeader, topicId = Optional.of(topicId)) + expectTruncation = false, localLogOffset = Some(10), offsetFromLeader = offsetFromLeader, topicId = Some(topicId)) try { val responses = replicaManager.describeLogDirs(Set(new TopicPartition(topic, topicPartition))) assertEquals(mockLogMgr.liveLogDirs.size, responses.size) - responses.forEach { response => + responses.foreach { response => assertEquals(Errors.NONE.code, response.errorCode) assertTrue(response.totalBytes > 0) assertTrue(response.usableBytes >= 0) @@ -5518,12 +5808,12 @@ class ReplicaManagerTest { // Prepare the mocked components for the test val (replicaManager, mockLogMgr) = prepareReplicaManagerAndLogManager(new MockTimer(time), topicPartition, leaderEpoch + leaderEpochIncrement, followerBrokerId, leaderBrokerId, countDownLatch, - expectTruncation = false, localLogOffset = Optional.of(10), offsetFromLeader = offsetFromLeader, topicId = Optional.of(topicId)) + expectTruncation = false, localLogOffset = Some(10), offsetFromLeader = offsetFromLeader, topicId = Some(topicId)) try { val responses = replicaManager.describeLogDirs(Set(new TopicPartition(noneTopic, topicPartition))) assertEquals(mockLogMgr.liveLogDirs.size, responses.size) - responses.forEach { response => + responses.foreach { response => assertEquals(Errors.NONE.code, response.errorCode) assertTrue(response.totalBytes > 0) assertTrue(response.usableBytes >= 0) @@ -5536,7 +5826,7 @@ class ReplicaManagerTest { @Test def testCheckpointHwOnShutdown(): Unit = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) + val mockLogMgr = TestUtils.createLogManager(config.logDirs.map(new File(_))) val spyRm = spy(new ReplicaManager( metrics = metrics, config = config, @@ -5544,7 +5834,7 @@ class ReplicaManagerTest { scheduler = new MockScheduler(time), logManager = mockLogMgr, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterPartitionManager)) @@ -5608,7 +5898,7 @@ class ReplicaManagerTest { val hostedPartition = partition.asInstanceOf[HostedPartition.Online] assertTrue(hostedPartition.partition.log.isDefined, s"Expected ${topicIdPartition} to have a log set in ReplicaManager, but it did not.") - assertTrue(hostedPartition.partition.log.get.topicId.isPresent, + assertTrue(hostedPartition.partition.log.get.topicId.isDefined, s"Expected the log for ${topicIdPartition} to topic ID set in LogManager, but it did not.") assertEquals(topicIdPartition.topicId(), hostedPartition.partition.log.get.topicId.get) assertEquals(topicIdPartition.topicPartition(), hostedPartition.partition.topicPartition) @@ -5658,7 +5948,7 @@ class ReplicaManagerTest { assertEquals(tpId0, fetch.head._1) val fetchInfo = fetch.head._2.info assertEquals(1L, fetchInfo.fetchOffsetMetadata.messageOffset) - assertEquals(UnifiedLog.UNKNOWN_OFFSET, fetchInfo.fetchOffsetMetadata.segmentBaseOffset) + assertEquals(UnifiedLog.UnknownOffset, fetchInfo.fetchOffsetMetadata.segmentBaseOffset) assertEquals(-1, fetchInfo.fetchOffsetMetadata.relativePositionInSegment) assertEquals(MemoryRecords.EMPTY, fetchInfo.records) assertTrue(fetchInfo.delayedRemoteStorageFetch.isPresent) @@ -5718,15 +6008,13 @@ class ReplicaManagerTest { val localId = 1 val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, Topic.GROUP_METADATA_TOPIC_NAME) val directoryEventHandler = mock(classOf[DirectoryEventHandler]) - val aliveBrokerIds = Array(1, 2) val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) val directoryIds = rm.logManager.directoryIdsSet.toList assertEquals(directoryIds.size, 2) val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, isStartIdLeader = true, directoryIds = directoryIds) val (partition: Partition, _) = rm.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get - partition.makeLeader(partitionRegistration(localId, 1, aliveBrokerIds, partitionEpoch, aliveBrokerIds), - isNew = false, + partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), None) @@ -5747,17 +6035,15 @@ class ReplicaManagerTest { val localId = 1 val topicPartition0 = new TopicIdPartition(FOO_UUID, 0, Topic.GROUP_METADATA_TOPIC_NAME) val directoryEventHandler = mock(classOf[DirectoryEventHandler]) - val aliveBrokerIds = Array(1, 2) val rm = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), localId, setupLogDirMetaProperties = true, directoryEventHandler = directoryEventHandler) val directoryIds = rm.logManager.directoryIdsSet.toList assertEquals(directoryIds.size, 2) val leaderTopicsDelta: TopicsDelta = topicsCreateDelta(localId, isStartIdLeader = true, directoryIds = directoryIds) val (partition: Partition, _) = rm.getOrCreatePartition(topicPartition0.topicPartition(), leaderTopicsDelta, FOO_UUID).get - partition.makeLeader(partitionRegistration(localId, 1, aliveBrokerIds, partitionEpoch, aliveBrokerIds), - isNew = false, - new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), - None) + partition.makeLeader(leaderAndIsrPartitionState(topicPartition0.topicPartition(), 1, localId, Seq(1, 2)), + new LazyOffsetCheckpoints(rm.highWatermarkCheckpoints.asJava), + None) def callback(responseStatus: Map[TopicPartition, DeleteRecordsResponseData.DeleteRecordsPartitionResult]): Unit = { assert(responseStatus.values.head.errorCode == Errors.NONE.code) @@ -5772,162 +6058,32 @@ class ReplicaManagerTest { ) } - @Test - def testDelayedShareFetchPurgatoryOperationExpiration(): Unit = { - val mockLogMgr = TestUtils.createLogManager(config.logDirs.asScala.map(new File(_))) - val rm = new ReplicaManager( - metrics = metrics, - config = config, - time = time, - scheduler = new MockScheduler(time), - logManager = mockLogMgr, - quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), - logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), - alterPartitionManager = alterPartitionManager) - - try { - val groupId = "grp" - val tp1 = new TopicIdPartition(Uuid.randomUuid, new TopicPartition("foo1", 0)) - val topicPartitions = util.List.of(tp1) - - val sp1 = mock(classOf[SharePartition]) - val sharePartitions = new util.LinkedHashMap[TopicIdPartition, SharePartition] - sharePartitions.put(tp1, sp1) - - val future = new CompletableFuture[util.Map[TopicIdPartition, ShareFetchResponseData.PartitionData]] - val shareFetch = new ShareFetch( - new FetchParams(FetchRequest.ORDINARY_CONSUMER_ID, -1, 500, 1, 1024 * 1024, FetchIsolation.HIGH_WATERMARK, Optional.empty, true), - groupId, - Uuid.randomUuid.toString, - future, - topicPartitions, - 500, - 100, - brokerTopicStats) - - val delayedShareFetch = spy(new DelayedShareFetch( - shareFetch, - rm, - mock(classOf[BiConsumer[SharePartitionKey, Throwable]]), - sharePartitions, - mock(classOf[ShareGroupMetrics]), - time, - 500)) - - val delayedShareFetchWatchKeys : util.List[DelayedShareFetchKey] = new util.ArrayList[DelayedShareFetchKey] - topicPartitions.forEach((topicIdPartition: TopicIdPartition) => delayedShareFetchWatchKeys.add(new DelayedShareFetchGroupKey(groupId, topicIdPartition.topicId, topicIdPartition.partition))) - - // You cannot acquire records for sp1, so request will be stored in purgatory waiting for timeout. - when(sp1.maybeAcquireFetchLock(any())).thenReturn(false) - - rm.addDelayedShareFetchRequest(delayedShareFetch = delayedShareFetch, delayedShareFetchKeys = delayedShareFetchWatchKeys) - verify(delayedShareFetch, times(0)).forceComplete() - assertEquals(1, rm.delayedShareFetchPurgatory.watched) - - // Future is not complete initially. - assertFalse(future.isDone) - // Post timeout, share fetch request will timeout and the future should complete. The timeout is set at 500ms but - // kept a buffer of additional 500ms so the task can always timeout. - waitUntilTrue(() => future.isDone, "Processing in delayed share fetch purgatory never ended.", 1000) - verify(delayedShareFetch, times(1)).forceComplete() - assertFalse(future.isCompletedExceptionally) - // Since no partition could be acquired, the future should be empty. - assertEquals(0, future.join.size) - } finally { - rm.shutdown() - } - - } - - @Test - def testAppendRecordsToLeader(): Unit = { - val localId = 0 - val foo = new TopicIdPartition(Uuid.randomUuid, 0, "foo") - val bar = new TopicIdPartition(Uuid.randomUuid, 0, "bar") - - val replicaManager = setupReplicaManagerWithMockedPurgatories( - timer = new MockTimer(time), - brokerId = localId - ) - - try { - val topicDelta = new TopicsDelta(TopicsImage.EMPTY) - topicDelta.replay(new TopicRecord() - .setName(foo.topic) - .setTopicId(foo.topicId) - ) - topicDelta.replay(new PartitionRecord() - .setTopicId(foo.topicId) - .setPartitionId(foo.partition) - .setLeader(localId) - .setLeaderEpoch(0) - .setPartitionEpoch(0) - .setReplicas(List[Integer](localId).asJava) - .setIsr(List[Integer](localId).asJava) - ) - - val metadataImage = imageFromTopics(topicDelta.apply()) - replicaManager.applyDelta(topicDelta, metadataImage) - - // Create test records. - val records = TestUtils.singletonRecords( - value = "test".getBytes, - timestamp = time.milliseconds - ) - - // Append records to both foo and bar. - val result = replicaManager.appendRecordsToLeader( - requiredAcks = 1, - internalTopicsAllowed = true, - origin = AppendOrigin.CLIENT, - entriesPerPartition = Map( - foo -> records, - bar -> records - ), - requestLocal = RequestLocal.noCaching - ) - - assertNotNull(result) - assertEquals(2, result.size) - - val fooResult = result(foo) - assertEquals(Errors.NONE, fooResult.error) - assertEquals(0, fooResult.info.logStartOffset) - assertEquals(0, fooResult.info.firstOffset) - assertEquals(0, fooResult.info.lastOffset) - - val barResult = result(bar) - assertEquals(Errors.UNKNOWN_TOPIC_OR_PARTITION, barResult.error) - assertEquals(LogAppendInfo.UNKNOWN_LOG_APPEND_INFO, barResult.info) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - - @Test - def testMonitorableReplicaSelector(): Unit = { - val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), - propsModifier = props => props.put(ReplicationConfigs.REPLICA_SELECTOR_CLASS_CONFIG, classOf[MonitorableReplicaSelector].getName)) - try { - assertTrue(replicaManager.replicaSelectorPlugin.get.get.asInstanceOf[MonitorableReplicaSelector].pluginMetrics) - } finally { - replicaManager.shutdown(checkpointHW = false) - } - } - private def readFromLogWithOffsetOutOfRange(tp: TopicPartition): Seq[(TopicIdPartition, LogReadResult)] = { val replicaManager = setupReplicaManagerWithMockedPurgatories(new MockTimer(time), aliveBrokerIds = Seq(0, 1, 2), enableRemoteStorage = true, shouldMockLog = true) try { val offsetCheckpoints = new LazyOffsetCheckpoints(replicaManager.highWatermarkCheckpoints.asJava) - replicaManager.createPartition(tp).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints = offsetCheckpoints, Some(topicId)) + replicaManager.createPartition(tp).createLogIfNotExists(isNew = false, isFutureReplica = false, offsetCheckpoints = offsetCheckpoints, None) val partition0Replicas = Seq[Integer](0, 1).asJava + val topicIds = Map(tp.topic -> topicId).asJava val leaderEpoch = 0 - val leaderDelta = createLeaderDelta(topicId, tp, leaderId = 0, leaderEpoch = leaderEpoch, replicas = partition0Replicas, isr = partition0Replicas) - val leaderMetadataImage = imageFromTopics(leaderDelta.apply()) - replicaManager.applyDelta(leaderDelta, leaderMetadataImage) - - val params = new FetchParams(-1, 1, 1000, 0, 100, FetchIsolation.HIGH_WATERMARK, Optional.empty) + val leaderAndIsrRequest = new LeaderAndIsrRequest.Builder(0, 0, brokerEpoch, + Seq( + new LeaderAndIsrRequest.PartitionState() + .setTopicName(tp.topic) + .setPartitionIndex(tp.partition) + .setControllerEpoch(0) + .setLeader(0) + .setLeaderEpoch(0) + .setIsr(partition0Replicas) + .setPartitionEpoch(0) + .setReplicas(partition0Replicas) + .setIsNew(true) + ).asJava, + topicIds, + Set(new Node(0, "host1", 0), new Node(1, "host2", 1)).asJava).build() + replicaManager.becomeLeaderOrFollower(0, leaderAndIsrRequest, (_, _) => ()) + + val params = new FetchParams(ApiKeys.FETCH.latestVersion, -1, 1, 1000, 0, 100, FetchIsolation.HIGH_WATERMARK, None.asJava) replicaManager.readFromLog( params, Seq(new TopicIdPartition(topicId, 0, topic) -> new PartitionData(topicId, 1, 0, 100000, Optional.of[Integer](leaderEpoch), Optional.of(leaderEpoch))), @@ -5981,12 +6137,3 @@ class MockReplicaSelector extends ReplicaSelector { Optional.of(partitionView.leader) } } - - -class MonitorableReplicaSelector extends MockReplicaSelector with Monitorable { - var pluginMetrics = false - - override def withPluginMetrics(metrics: PluginMetrics): Unit = { - pluginMetrics = true - } -} diff --git a/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala b/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala index 42a5e8accc9c3..ae1e0a1f5e871 100644 --- a/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala +++ b/core/src/test/scala/unit/kafka/server/ReplicationQuotasTest.scala @@ -37,7 +37,9 @@ import org.apache.kafka.server.common.{Feature, MetadataVersion} import org.apache.kafka.server.config.QuotaConfig import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, Test} +import org.junit.jupiter.api.AfterEach +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.jdk.CollectionConverters._ import scala.util.Using @@ -65,13 +67,15 @@ class ReplicationQuotasTest extends QuorumTestHarness { super.tearDown() } - @Test - def shouldBootstrapTwoBrokersWithLeaderThrottle(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldBootstrapTwoBrokersWithLeaderThrottle(quorum: String): Unit = { shouldMatchQuotaReplicatingThroughAnAsymmetricTopology(true) } - @Test - def shouldBootstrapTwoBrokersWithFollowerThrottle(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldBootstrapTwoBrokersWithFollowerThrottle(quorum: String): Unit = { shouldMatchQuotaReplicatingThroughAnAsymmetricTopology(false) } @@ -190,8 +194,9 @@ class ReplicationQuotasTest extends QuorumTestHarness { def tp(partition: Int): TopicPartition = new TopicPartition(topic, partition) - @Test - def shouldThrottleOldSegments(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldThrottleOldSegments(quorum: String): Unit = { /** * Simple test which ensures throttled replication works when the dataset spans many segments */ diff --git a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala index e188889556b7c..0df8fcb70926a 100644 --- a/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala +++ b/core/src/test/scala/unit/kafka/server/RequestQuotaTest.scala @@ -43,12 +43,14 @@ import org.apache.kafka.server.authorizer.{Action, AuthorizableRequestContext, A import org.apache.kafka.server.config.{QuotaConfig, ServerConfigs} import org.apache.kafka.server.quota.QuotaType import org.junit.jupiter.api.Assertions._ -import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, TestInfo} +import org.junit.jupiter.api.{AfterEach, BeforeEach, TestInfo} +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import java.net.InetAddress import java.util import java.util.concurrent.{Executors, Future, TimeUnit} -import java.util.{Optional, Properties} +import java.util.{Collections, Optional, Properties} import scala.collection.mutable.ListBuffer import scala.jdk.CollectionConverters._ @@ -131,74 +133,32 @@ class RequestQuotaTest extends BaseRequestTest { finally super.tearDown() } - @Test - def testResponseThrottleTime(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testResponseThrottleTime(quorum: String): Unit = { for (apiKey <- clientActions ++ clusterActionsWithThrottleForBroker) submitTest(apiKey, () => checkRequestThrottleTime(apiKey)) waitAndCheckResults() } - @Test - def testResponseThrottleTimeWhenBothProduceAndRequestQuotasViolated(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testResponseThrottleTimeWhenBothProduceAndRequestQuotasViolated(quorum: String): Unit = { submitTest(ApiKeys.PRODUCE, () => checkSmallQuotaProducerRequestThrottleTime()) waitAndCheckResults() } - @Test - def testResponseThrottleTimeWhenBothFetchAndRequestQuotasViolated(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testResponseThrottleTimeWhenBothFetchAndRequestQuotasViolated(quorum: String): Unit = { submitTest(ApiKeys.FETCH, () => checkSmallQuotaConsumerRequestThrottleTime()) waitAndCheckResults() } - @Test - def testResponseThrottleTimeWhenBothShareFetchAndRequestQuotasViolated(): Unit = { - submitTest(ApiKeys.SHARE_FETCH, () => checkSmallQuotaShareFetchRequestThrottleTime()) - waitAndCheckResults() - } - - - @Test - def testShareFetchUsesSameFetchSensor(): Unit = { - // This test verifies that ShareFetch and Fetch use the same FETCH quota sensor per KIP-932 - val testClientId = "same-sensor-test-client" - - val quotaProps = new Properties() - quotaProps.put(QuotaConfig.CONSUMER_BYTE_RATE_OVERRIDE_CONFIG, "1") // Very small quota - quotaProps.put(QuotaConfig.REQUEST_PERCENTAGE_OVERRIDE_CONFIG, "0.01") // Very small request quota - changeClientIdConfig(Sanitizer.sanitize(testClientId), quotaProps) - - TestUtils.retry(20000) { - val consumeQuotaManager = brokers.head.dataPlaneRequestProcessor.quotas.fetch - assertEquals(Quota.upperBound(1), consumeQuotaManager.quota("some-user", testClientId), - s"Consumer quota override not set") - } - - // First, make a Fetch request and verify it uses FETCH quota - val fetchClient = Client(testClientId, ApiKeys.FETCH) - val fetchThrottled = fetchClient.runUntil(_.throttleTimeMs > 0) - assertTrue(fetchThrottled, "Fetch should be throttled") - - // Check quota types to verify which one is being used - val fetchThrottleTimeAfterFetch = throttleTimeMetricValueForQuotaType(testClientId, QuotaType.FETCH) - - // Now make a ShareFetch request and verify it ALSO uses FETCH quota sensor - val shareFetchClient = Client(testClientId, ApiKeys.SHARE_FETCH) - val shareFetchThrottled = shareFetchClient.runUntil(_.throttleTimeMs > 0) - assertTrue(shareFetchThrottled, "ShareFetch should be throttled") - - // Check quota types after ShareFetch - val fetchThrottleTimeAfterShareFetch = throttleTimeMetricValueForQuotaType(testClientId, QuotaType.FETCH) - - // Verify both requests use FETCH quota (not REQUEST quota) - assertTrue(!fetchThrottleTimeAfterFetch.isNaN && fetchThrottleTimeAfterFetch > 0, - s"Fetch should use FETCH quota sensor: $fetchThrottleTimeAfterFetch") - assertTrue(!fetchThrottleTimeAfterShareFetch.isNaN && fetchThrottleTimeAfterShareFetch > 0, - s"ShareFetch should use FETCH quota sensor: $fetchThrottleTimeAfterShareFetch") - } - - @Test - def testUnthrottledClient(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnthrottledClient(quorum: String): Unit = { for (apiKey <- clientActions) { submitTest(apiKey, () => checkUnthrottledClient(apiKey)) } @@ -206,8 +166,9 @@ class RequestQuotaTest extends BaseRequestTest { waitAndCheckResults() } - @Test - def testExemptRequestTime(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testExemptRequestTime(quorum: String): Unit = { // Exclude `DESCRIBE_QUORUM`, maybe it shouldn't be a cluster action val actions = clusterActions -- clusterActionsWithThrottleForBroker -- RequestQuotaTest.Envelope -- RequestQuotaTest.ShareGroupState - ApiKeys.DESCRIBE_QUORUM for (apiKey <- actions) { @@ -217,8 +178,9 @@ class RequestQuotaTest extends BaseRequestTest { waitAndCheckResults() } - @Test - def testUnauthorizedThrottle(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testUnauthorizedThrottle(quorum: String): Unit = { RequestQuotaTest.principal = RequestQuotaTest.UnauthorizedPrincipal val apiKeys = ApiKeys.brokerApis @@ -280,8 +242,8 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.PRODUCE => requests.ProduceRequest.builder(new ProduceRequestData() .setTopicData(new ProduceRequestData.TopicProduceDataCollection( - util.List.of(new ProduceRequestData.TopicProduceData() - .setTopicId(getTopicIds().get(tp.topic()).get).setPartitionData(util.List.of( + Collections.singletonList(new ProduceRequestData.TopicProduceData() + .setName(tp.topic()).setPartitionData(Collections.singletonList( new ProduceRequestData.PartitionProduceData() .setIndex(tp.partition()) .setRecords(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord("test".getBytes)))))) @@ -295,30 +257,30 @@ class RequestQuotaTest extends BaseRequestTest { FetchRequest.Builder.forConsumer(ApiKeys.FETCH.latestVersion, 0, 0, partitionMap) case ApiKeys.METADATA => - new MetadataRequest.Builder(util.List.of(topic), true) + new MetadataRequest.Builder(List(topic).asJava, true) case ApiKeys.LIST_OFFSETS => val topic = new ListOffsetsTopic() .setName(tp.topic) - .setPartitions(util.List.of(new ListOffsetsPartition() + .setPartitions(List(new ListOffsetsPartition() .setPartitionIndex(tp.partition) .setTimestamp(0L) - .setCurrentLeaderEpoch(15))) + .setCurrentLeaderEpoch(15)).asJava) ListOffsetsRequest.Builder.forConsumer(false, IsolationLevel.READ_UNCOMMITTED) - .setTargetTimes(util.List.of(topic)) + .setTargetTimes(List(topic).asJava) case ApiKeys.OFFSET_COMMIT => - OffsetCommitRequest.Builder.forTopicNames( + new OffsetCommitRequest.Builder( new OffsetCommitRequestData() .setGroupId("test-group") .setGenerationIdOrMemberEpoch(1) .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) .setTopics( - util.List.of( + Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestTopic() .setName(topic) .setPartitions( - util.List.of( + Collections.singletonList( new OffsetCommitRequestData.OffsetCommitRequestPartition() .setPartitionIndex(0) .setCommittedLeaderEpoch(RecordBatch.NO_PARTITION_LEADER_EPOCH) @@ -330,25 +292,13 @@ class RequestQuotaTest extends BaseRequestTest { ) ) case ApiKeys.OFFSET_FETCH => - OffsetFetchRequest.Builder.forTopicNames( - new OffsetFetchRequestData() - .setGroups(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestGroup() - .setGroupId("test-group") - .setTopics(util.List.of( - new OffsetFetchRequestData.OffsetFetchRequestTopics() - .setName(tp.topic) - .setPartitionIndexes(util.List.of[Integer](tp.partition)) - )) - )), - false - ) + new OffsetFetchRequest.Builder(Map("test-group"-> List(tp).asJava).asJava, false, false) case ApiKeys.FIND_COORDINATOR => new FindCoordinatorRequest.Builder( new FindCoordinatorRequestData() .setKeyType(FindCoordinatorRequest.CoordinatorType.GROUP.id) - .setCoordinatorKeys(util.List.of("test-group"))) + .setCoordinatorKeys(Collections.singletonList("test-group"))) case ApiKeys.JOIN_GROUP => new JoinGroupRequest.Builder( @@ -360,7 +310,7 @@ class RequestQuotaTest extends BaseRequestTest { .setProtocolType("consumer") .setProtocols( new JoinGroupRequestProtocolCollection( - util.List.of(new JoinGroupRequestData.JoinGroupRequestProtocol() + Collections.singletonList(new JoinGroupRequestData.JoinGroupRequestProtocol() .setName("consumer-range") .setMetadata("test".getBytes())).iterator() ) @@ -379,7 +329,7 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.LEAVE_GROUP => new LeaveGroupRequest.Builder( "test-leave-group", - util.List.of( + Collections.singletonList( new MemberIdentity() .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID)) ) @@ -390,11 +340,11 @@ class RequestQuotaTest extends BaseRequestTest { .setGroupId("test-sync-group") .setGenerationId(1) .setMemberId(JoinGroupRequest.UNKNOWN_MEMBER_ID) - .setAssignments(util.List.of) + .setAssignments(Collections.emptyList()) ) case ApiKeys.DESCRIBE_GROUPS => - new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(util.List.of("test-group"))) + new DescribeGroupsRequest.Builder(new DescribeGroupsRequestData().setGroups(List("test-group").asJava)) case ApiKeys.LIST_GROUPS => new ListGroupsRequest.Builder(new ListGroupsRequestData()) @@ -411,23 +361,23 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.CREATE_TOPICS => new CreateTopicsRequest.Builder( new CreateTopicsRequestData().setTopics( - new CreatableTopicCollection(util.Set.of( + new CreatableTopicCollection(Collections.singleton( new CreatableTopic().setName("topic-2").setNumPartitions(1). setReplicationFactor(1.toShort)).iterator()))) case ApiKeys.DELETE_TOPICS => new DeleteTopicsRequest.Builder( new DeleteTopicsRequestData() - .setTopicNames(util.List.of("topic-2")) + .setTopicNames(Collections.singletonList("topic-2")) .setTimeoutMs(5000)) case ApiKeys.DELETE_RECORDS => new DeleteRecordsRequest.Builder( new DeleteRecordsRequestData() .setTimeoutMs(5000) - .setTopics(util.List.of(new DeleteRecordsRequestData.DeleteRecordsTopic() + .setTopics(Collections.singletonList(new DeleteRecordsRequestData.DeleteRecordsTopic() .setName(tp.topic()) - .setPartitions(util.List.of(new DeleteRecordsRequestData.DeleteRecordsPartition() + .setPartitions(Collections.singletonList(new DeleteRecordsRequestData.DeleteRecordsPartition() .setPartitionIndex(tp.partition()) .setOffset(0L)))))) @@ -441,14 +391,14 @@ class RequestQuotaTest extends BaseRequestTest { val epochs = new OffsetForLeaderTopicCollection() epochs.add(new OffsetForLeaderTopic() .setTopic(tp.topic()) - .setPartitions(util.List.of(new OffsetForLeaderPartition() + .setPartitions(List(new OffsetForLeaderPartition() .setPartition(tp.partition()) .setLeaderEpoch(0) - .setCurrentLeaderEpoch(15)))) + .setCurrentLeaderEpoch(15)).asJava)) OffsetsForLeaderEpochRequest.Builder.forConsumer(epochs) case ApiKeys.ADD_PARTITIONS_TO_TXN => - AddPartitionsToTxnRequest.Builder.forClient("test-transactional-id", 1, 0, util.List.of(tp)) + AddPartitionsToTxnRequest.Builder.forClient("test-transactional-id", 1, 0, List(tp).asJava) case ApiKeys.ADD_OFFSETS_TO_TXN => new AddOffsetsToTxnRequest.Builder(new AddOffsetsToTxnRequestData() @@ -476,7 +426,7 @@ class RequestQuotaTest extends BaseRequestTest { "test-txn-group", 2, 0, - util.Map.of[TopicPartition, TxnOffsetCommitRequest.CommittedOffset], + Map.empty[TopicPartition, TxnOffsetCommitRequest.CommittedOffset].asJava, true ) @@ -484,7 +434,7 @@ class RequestQuotaTest extends BaseRequestTest { new DescribeAclsRequest.Builder(AclBindingFilter.ANY) case ApiKeys.CREATE_ACLS => - new CreateAclsRequest.Builder(new CreateAclsRequestData().setCreations(util.List.of( + new CreateAclsRequest.Builder(new CreateAclsRequestData().setCreations(Collections.singletonList( new CreateAclsRequestData.AclCreation() .setResourceType(AdminResourceType.TOPIC.code) .setResourceName("mytopic") @@ -494,7 +444,7 @@ class RequestQuotaTest extends BaseRequestTest { .setOperation(AclOperation.WRITE.code) .setPermissionType(AclPermissionType.DENY.code)))) case ApiKeys.DELETE_ACLS => - new DeleteAclsRequest.Builder(new DeleteAclsRequestData().setFilters(util.List.of( + new DeleteAclsRequest.Builder(new DeleteAclsRequestData().setFilters(Collections.singletonList( new DeleteAclsRequestData.DeleteAclsFilter() .setResourceTypeFilter(AdminResourceType.TOPIC.code) .setResourceNameFilter(null) @@ -505,14 +455,14 @@ class RequestQuotaTest extends BaseRequestTest { .setPermissionType(AclPermissionType.DENY.code)))) case ApiKeys.DESCRIBE_CONFIGS => new DescribeConfigsRequest.Builder(new DescribeConfigsRequestData() - .setResources(util.List.of(new DescribeConfigsRequestData.DescribeConfigsResource() + .setResources(Collections.singletonList(new DescribeConfigsRequestData.DescribeConfigsResource() .setResourceType(ConfigResource.Type.TOPIC.id) .setResourceName(tp.topic)))) case ApiKeys.ALTER_CONFIGS => new AlterConfigsRequest.Builder( - util.Map.of(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic), - new AlterConfigsRequest.Config(util.Set.of( + Collections.singletonMap(new ConfigResource(ConfigResource.Type.TOPIC, tp.topic), + new AlterConfigsRequest.Config(Collections.singleton( new AlterConfigsRequest.ConfigEntry(TopicConfig.MAX_MESSAGE_BYTES_CONFIG, "1000000") ))), true) @@ -521,7 +471,7 @@ class RequestQuotaTest extends BaseRequestTest { .setPath(logDir) dir.topics.add(new AlterReplicaLogDirsRequestData.AlterReplicaLogDirTopic() .setName(tp.topic) - .setPartitions(util.List.of(tp.partition))) + .setPartitions(Collections.singletonList(tp.partition))) val data = new AlterReplicaLogDirsRequestData() data.dirs.add(dir) new AlterReplicaLogDirsRequest.Builder(data) @@ -530,7 +480,7 @@ class RequestQuotaTest extends BaseRequestTest { val data = new DescribeLogDirsRequestData() data.topics.add(new DescribeLogDirsRequestData.DescribableLogDirTopic() .setTopic(tp.topic) - .setPartitions(util.List.of(tp.partition))) + .setPartitions(Collections.singletonList(tp.partition))) new DescribeLogDirsRequest.Builder(data) case ApiKeys.CREATE_PARTITIONS => @@ -543,7 +493,7 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.CREATE_DELEGATION_TOKEN => new CreateDelegationTokenRequest.Builder( new CreateDelegationTokenRequestData() - .setRenewers(util.List.of(new CreateDelegationTokenRequestData.CreatableRenewers() + .setRenewers(Collections.singletonList(new CreateDelegationTokenRequestData.CreatableRenewers() .setPrincipalType("User") .setPrincipalName("test"))) .setMaxLifetimeMs(1000) @@ -556,7 +506,7 @@ class RequestQuotaTest extends BaseRequestTest { .setExpiryTimePeriodMs(1000L)) case ApiKeys.DESCRIBE_DELEGATION_TOKEN => - new DescribeDelegationTokenRequest.Builder(util.List.of(SecurityUtils.parseKafkaPrincipal("User:test"))) + new DescribeDelegationTokenRequest.Builder(Collections.singletonList(SecurityUtils.parseKafkaPrincipal("User:test"))) case ApiKeys.RENEW_DELEGATION_TOKEN => new RenewDelegationTokenRequest.Builder( @@ -566,12 +516,12 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DELETE_GROUPS => new DeleteGroupsRequest.Builder(new DeleteGroupsRequestData() - .setGroupsNames(util.List.of("test-group"))) + .setGroupsNames(Collections.singletonList("test-group"))) case ApiKeys.ELECT_LEADERS => new ElectLeadersRequest.Builder( ElectionType.PREFERRED, - util.List.of(new TopicPartition("my_topic", 0)), + Collections.singletonList(new TopicPartition("my_topic", 0)), 0 ) @@ -594,9 +544,9 @@ class RequestQuotaTest extends BaseRequestTest { new OffsetDeleteRequestData() .setGroupId("test-group") .setTopics(new OffsetDeleteRequestData.OffsetDeleteRequestTopicCollection( - util.List.of(new OffsetDeleteRequestData.OffsetDeleteRequestTopic() + Collections.singletonList(new OffsetDeleteRequestData.OffsetDeleteRequestTopic() .setName("test-topic") - .setPartitions(util.List.of( + .setPartitions(Collections.singletonList( new OffsetDeleteRequestData.OffsetDeleteRequestPartition() .setPartitionIndex(0)))).iterator()))) @@ -604,7 +554,7 @@ class RequestQuotaTest extends BaseRequestTest { new DescribeClientQuotasRequest.Builder(ClientQuotaFilter.all()) case ApiKeys.ALTER_CLIENT_QUOTAS => - new AlterClientQuotasRequest.Builder(util.List.of, false) + new AlterClientQuotasRequest.Builder(List.empty.asJava, false) case ApiKeys.DESCRIBE_USER_SCRAM_CREDENTIALS => new DescribeUserScramCredentialsRequest.Builder(new DescribeUserScramCredentialsRequestData()) @@ -620,7 +570,7 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.END_QUORUM_EPOCH => new EndQuorumEpochRequest.Builder(EndQuorumEpochRequest.singletonRequest( - tp, 10, 5, util.List.of(3))) + tp, 10, 5, Collections.singletonList(3))) case ApiKeys.DESCRIBE_QUORUM => new DescribeQuorumRequest.Builder(DescribeQuorumRequest.singletonRequest( @@ -639,7 +589,7 @@ class RequestQuotaTest extends BaseRequestTest { "client-id", 0 ) - val embedRequestData = new AlterClientQuotasRequest.Builder(util.List.of, false).build() + val embedRequestData = new AlterClientQuotasRequest.Builder(List.empty.asJava, false).build() .serializeWithHeader(requestHeader) new EnvelopeRequest.Builder(embedRequestData, new Array[Byte](0), InetAddress.getByName("192.168.1.1").getAddress) @@ -649,9 +599,9 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DESCRIBE_PRODUCERS => new DescribeProducersRequest.Builder(new DescribeProducersRequestData() - .setTopics(util.List.of(new DescribeProducersRequestData.TopicRequest() + .setTopics(List(new DescribeProducersRequestData.TopicRequest() .setName("test-topic") - .setPartitionIndexes(util.List.of[Integer](1, 2, 3))))) + .setPartitionIndexes(List(1, 2, 3).map(Int.box).asJava)).asJava)) case ApiKeys.BROKER_REGISTRATION => new BrokerRegistrationRequest.Builder(new BrokerRegistrationRequestData()) @@ -664,7 +614,7 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.DESCRIBE_TRANSACTIONS => new DescribeTransactionsRequest.Builder(new DescribeTransactionsRequestData() - .setTransactionalIds(util.List.of("test-transactional-id"))) + .setTransactionalIds(List("test-transactional-id").asJava)) case ApiKeys.LIST_TRANSACTIONS => new ListTransactionsRequest.Builder(new ListTransactionsRequestData()) @@ -687,39 +637,23 @@ class RequestQuotaTest extends BaseRequestTest { case ApiKeys.ASSIGN_REPLICAS_TO_DIRS => new AssignReplicasToDirsRequest.Builder(new AssignReplicasToDirsRequestData()) - case ApiKeys.LIST_CONFIG_RESOURCES => - new ListConfigResourcesRequest.Builder(new ListConfigResourcesRequestData()) + case ApiKeys.LIST_CLIENT_METRICS_RESOURCES => + new ListClientMetricsResourcesRequest.Builder(new ListClientMetricsResourcesRequestData()) case ApiKeys.DESCRIBE_TOPIC_PARTITIONS => new DescribeTopicPartitionsRequest.Builder(new DescribeTopicPartitionsRequestData()) case ApiKeys.SHARE_GROUP_HEARTBEAT => - new ShareGroupHeartbeatRequest.Builder(new ShareGroupHeartbeatRequestData()) + new ShareGroupHeartbeatRequest.Builder(new ShareGroupHeartbeatRequestData(), true) case ApiKeys.SHARE_GROUP_DESCRIBE => - new ShareGroupDescribeRequest.Builder(new ShareGroupDescribeRequestData()) + new ShareGroupDescribeRequest.Builder(new ShareGroupDescribeRequestData(), true) case ApiKeys.SHARE_FETCH => - new ShareFetchRequest.Builder( - new ShareFetchRequestData() - .setGroupId("test-share-group") - .setMemberId(Uuid.randomUuid().toString) - .setShareSessionEpoch(0) - .setMaxWaitMs(0) - .setMinBytes(1) - .setMaxBytes(1000000) - .setTopics(new ShareFetchRequestData.FetchTopicCollection( - util.List.of(new ShareFetchRequestData.FetchTopic() - .setTopicId(getTopicIds().getOrElse(tp.topic, Uuid.ZERO_UUID)) - .setPartitions( - new ShareFetchRequestData.FetchPartitionCollection( - util.List.of(new ShareFetchRequestData.FetchPartition() - .setPartitionIndex(tp.partition) - ).iterator)) - ).iterator))) + new ShareFetchRequest.Builder(new ShareFetchRequestData(), true) case ApiKeys.SHARE_ACKNOWLEDGE => - new ShareAcknowledgeRequest.Builder(new ShareAcknowledgeRequestData()) + new ShareAcknowledgeRequest.Builder(new ShareAcknowledgeRequestData(), true) case ApiKeys.ADD_RAFT_VOTER => new AddRaftVoterRequest.Builder(new AddRaftVoterRequestData()) @@ -731,34 +665,19 @@ class RequestQuotaTest extends BaseRequestTest { new UpdateRaftVoterRequest.Builder(new UpdateRaftVoterRequestData()) case ApiKeys.INITIALIZE_SHARE_GROUP_STATE => - new InitializeShareGroupStateRequest.Builder(new InitializeShareGroupStateRequestData()) + new InitializeShareGroupStateRequest.Builder(new InitializeShareGroupStateRequestData(), true) case ApiKeys.READ_SHARE_GROUP_STATE => - new ReadShareGroupStateRequest.Builder(new ReadShareGroupStateRequestData()) + new ReadShareGroupStateRequest.Builder(new ReadShareGroupStateRequestData(), true) case ApiKeys.WRITE_SHARE_GROUP_STATE => - new WriteShareGroupStateRequest.Builder(new WriteShareGroupStateRequestData()) + new WriteShareGroupStateRequest.Builder(new WriteShareGroupStateRequestData(), true) case ApiKeys.DELETE_SHARE_GROUP_STATE => - new DeleteShareGroupStateRequest.Builder(new DeleteShareGroupStateRequestData()) + new DeleteShareGroupStateRequest.Builder(new DeleteShareGroupStateRequestData(), true) case ApiKeys.READ_SHARE_GROUP_STATE_SUMMARY => - new ReadShareGroupStateSummaryRequest.Builder(new ReadShareGroupStateSummaryRequestData()) - - case ApiKeys.STREAMS_GROUP_HEARTBEAT => - new StreamsGroupHeartbeatRequest.Builder(new StreamsGroupHeartbeatRequestData(), true) - - case ApiKeys.STREAMS_GROUP_DESCRIBE => - new StreamsGroupDescribeRequest.Builder(new StreamsGroupDescribeRequestData(), true) - - case ApiKeys.DESCRIBE_SHARE_GROUP_OFFSETS => - new DescribeShareGroupOffsetsRequest.Builder(new DescribeShareGroupOffsetsRequestData()) - - case ApiKeys.ALTER_SHARE_GROUP_OFFSETS => - new AlterShareGroupOffsetsRequest.Builder(new AlterShareGroupOffsetsRequestData()) - - case ApiKeys.DELETE_SHARE_GROUP_OFFSETS => - new DeleteShareGroupOffsetsRequest.Builder(new DeleteShareGroupOffsetsRequestData()) + new ReadShareGroupStateSummaryRequest.Builder(new ReadShareGroupStateSummaryRequestData(), true) case _ => throw new IllegalArgumentException("Unsupported API key " + apiKey) @@ -851,21 +770,6 @@ class RequestQuotaTest extends BaseRequestTest { s"Throttle time metrics for request quota updated: $smallQuotaConsumerClient") } - private def checkSmallQuotaShareFetchRequestThrottleTime(): Unit = { - // Request until throttled using client-id with default small consumer quota - // This test verifies ShareFetch is throttled similarly to Fetch (KIP-932) - val smallQuotaShareFetchClient = Client(smallQuotaConsumerClientId, ApiKeys.SHARE_FETCH) - val throttled = smallQuotaShareFetchClient.runUntil(_.throttleTimeMs > 0) - - assertTrue(throttled, s"ShareFetch response not throttled: $smallQuotaShareFetchClient") - // KIP-932: ShareFetch should use the same quota and sensors as Fetch - // Since the implementation uses the same quota mechanisms, we verify throttling occurs - assertTrue(throttleTimeMetricValueForQuotaType(smallQuotaConsumerClientId, QuotaType.FETCH) > 0, - s"ShareFetch should be throttled using FETCH quota sensors: $smallQuotaShareFetchClient") - assertTrue(throttleTimeMetricValueForQuotaType(smallQuotaConsumerClientId, QuotaType.REQUEST).isNaN, - s"Throttle time metrics for request quota updated: $smallQuotaShareFetchClient") - } - private def checkUnthrottledClient(apiKey: ApiKeys): Unit = { // Test that request from client with large quota is not throttled @@ -906,16 +810,13 @@ object RequestQuotaTest { class KraftTestAuthorizer extends StandardAuthorizer { override def authorize(requestContext: AuthorizableRequestContext, actions: util.List[Action]): util.List[AuthorizationResult] = { - val results = new util.ArrayList[AuthorizationResult]() - actions.forEach(_ => { - val result = if (requestContext.principal != UnauthorizedPrincipal) AuthorizationResult.ALLOWED else AuthorizationResult.DENIED - results.add(result) - }) - results + actions.asScala.map { _ => + if (requestContext.principal != UnauthorizedPrincipal) AuthorizationResult.ALLOWED else AuthorizationResult.DENIED + }.asJava } } - class TestPrincipalBuilder extends KafkaPrincipalBuilder { + class TestPrincipalBuilder extends KafkaPrincipalBuilder with KafkaPrincipalSerde { override def build(context: AuthenticationContext): KafkaPrincipal = { principal } diff --git a/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala b/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala index 580b4a71f09b6..7f1f8b6aae358 100644 --- a/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala @@ -22,12 +22,15 @@ import org.apache.kafka.common.requests.{ApiVersionsRequest, ApiVersionsResponse import org.apache.kafka.common.security.auth.SecurityProtocol import org.apache.kafka.common.test.api.{ClusterTest, Type} import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.server.IntegrationTestUtils +import org.apache.kafka.common.test.junit.ClusterTestExtensions import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.extension.ExtendWith import java.net.Socket import java.util.Collections +import scala.jdk.CollectionConverters._ +@ExtendWith(value = Array(classOf[ClusterTestExtensions])) class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVersionsRequestTest(cluster) { @ClusterTest(types = Array(Type.KRAFT), @@ -35,7 +38,7 @@ class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVe controllerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT ) def testApiVersionsRequestBeforeSaslHandshakeRequest(): Unit = { - val socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) + val socket = IntegrationTestUtils.connect(cluster.brokerSocketServers().asScala.head, cluster.clientListener()) try { val apiVersionsResponse = IntegrationTestUtils.sendAndReceive[ApiVersionsResponse]( new ApiVersionsRequest.Builder().build(0), socket) @@ -56,7 +59,7 @@ class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVe controllerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT ) def testApiVersionsRequestAfterSaslHandshakeRequest(): Unit = { - val socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) + val socket = IntegrationTestUtils.connect(cluster.brokerSocketServers().asScala.head, cluster.clientListener()) try { sendSaslHandshakeRequestValidateResponse(socket) val response = IntegrationTestUtils.sendAndReceive[ApiVersionsResponse]( @@ -72,7 +75,7 @@ class SaslApiVersionsRequestTest(cluster: ClusterInstance) extends AbstractApiVe controllerSecurityProtocol = SecurityProtocol.SASL_PLAINTEXT ) def testApiVersionsRequestWithUnsupportedVersion(): Unit = { - val socket = IntegrationTestUtils.connect(cluster.brokerBoundPorts().get(0)) + val socket = IntegrationTestUtils.connect(cluster.brokerSocketServers().asScala.head, cluster.clientListener()) try { val apiVersionsRequest = new ApiVersionsRequest.Builder().build(0) val apiVersionsResponse = sendUnsupportedApiVersionRequest(apiVersionsRequest) diff --git a/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala b/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala index ec1679dd6324e..cb3ebc93785b7 100644 --- a/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala +++ b/core/src/test/scala/unit/kafka/server/ServerShutdownTest.scala @@ -21,6 +21,7 @@ import kafka.utils.{CoreUtils, TestInfoUtils, TestUtils} import java.io.File import java.util.concurrent.CancellationException import kafka.integration.KafkaServerTestHarness +import kafka.log.LogManager import org.apache.kafka.clients.consumer.Consumer import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord} import org.apache.kafka.common.security.auth.SecurityProtocol @@ -28,12 +29,11 @@ import org.apache.kafka.common.serialization.{IntegerDeserializer, IntegerSerial import org.apache.kafka.common.utils.Exit import org.apache.kafka.metadata.BrokerState import org.apache.kafka.server.config.{KRaftConfigs, ServerLogConfigs} -import org.apache.kafka.storage.internals.log.LogManager -import org.junit.jupiter.api.{BeforeEach, Test, TestInfo, Timeout} +import org.junit.jupiter.api.{BeforeEach, TestInfo, Timeout} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.function.Executable import org.junit.jupiter.params.ParameterizedTest -import org.junit.jupiter.params.provider.MethodSource +import org.junit.jupiter.params.provider.{MethodSource, ValueSource} import java.time.Duration import java.util.Properties @@ -74,9 +74,9 @@ class ServerShutdownTest extends KafkaServerTestHarness { super.setUp(testInfo) } - @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedGroupProtocolNames) - @MethodSource(Array("getTestGroupProtocolParametersAll")) - def testCleanShutdown(groupProtocol: String): Unit = { + @ParameterizedTest(name = TestInfoUtils.TestWithParameterizedQuorumAndGroupProtocolNames) + @MethodSource(Array("getTestQuorumAndGroupProtocolParametersAll")) + def testCleanShutdown(quorum: String, groupProtocol: String): Unit = { def createProducer(): KafkaProducer[Integer, String] = TestUtils.createProducer( @@ -104,8 +104,8 @@ class ServerShutdownTest extends KafkaServerTestHarness { // do a clean shutdown and check that offset checkpoint file exists shutdownBroker() - for (logDir <- config.logDirs.asScala) { - val OffsetCheckpointFile = new File(logDir, LogManager.RECOVERY_POINT_CHECKPOINT_FILE) + for (logDir <- config.logDirs) { + val OffsetCheckpointFile = new File(logDir, LogManager.RecoveryPointCheckpointFile) assertTrue(OffsetCheckpointFile.exists) assertTrue(OffsetCheckpointFile.length() > 0) } @@ -134,19 +134,21 @@ class ServerShutdownTest extends KafkaServerTestHarness { producer.close() } - @Test - def testCleanShutdownAfterFailedStartup(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testCleanShutdownAfterFailedStartup(quorum: String): Unit = { propsToChangeUponRestart.setProperty(KRaftConfigs.INITIAL_BROKER_REGISTRATION_TIMEOUT_MS_CONFIG, "1000") shutdownBroker() shutdownKRaftController() verifyCleanShutdownAfterFailedStartup[CancellationException] } - @Test - def testNoCleanShutdownAfterFailedStartupDueToCorruptLogs(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testNoCleanShutdownAfterFailedStartupDueToCorruptLogs(quorum: String): Unit = { createTopic(topic) shutdownBroker() - config.logDirs.forEach { dirName => + config.logDirs.foreach { dirName => val partitionDir = new File(dirName, s"$topic-0") partitionDir.listFiles.foreach(f => TestUtils.appendNonsenseToFile(f, TestUtils.random.nextInt(1024) + 1)) } @@ -172,8 +174,9 @@ class ServerShutdownTest extends KafkaServerTestHarness { } } - @Test - def testShutdownWithKRaftControllerUnavailable(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testShutdownWithKRaftControllerUnavailable(quorum: String): Unit = { shutdownKRaftController() killBroker(0, Duration.ofSeconds(1)) CoreUtils.delete(broker.config.logDirs) @@ -217,8 +220,9 @@ class ServerShutdownTest extends KafkaServerTestHarness { .count(isNonDaemonKafkaThread)) } - @Test - def testConsecutiveShutdown(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def testConsecutiveShutdown(quorum: String): Unit = { shutdownBroker() brokers.head.shutdown() } diff --git a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala index 27938114517a9..9cae66ffbff50 100644 --- a/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala @@ -17,81 +17,76 @@ package kafka.server import kafka.utils.TestUtils -import org.apache.kafka.clients.admin.DescribeShareGroupsOptions -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterFeature, ClusterTest, ClusterTestDefaults, ClusterTests, Type} +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, ClusterTests, Type} import org.apache.kafka.common.message.ShareFetchResponseData.AcquiredRecords -import org.apache.kafka.common.message.{FindCoordinatorRequestData, ShareAcknowledgeRequestData, ShareAcknowledgeResponseData, ShareFetchRequestData, ShareFetchResponseData, ShareGroupHeartbeatRequestData} +import org.apache.kafka.common.message.{ShareAcknowledgeRequestData, ShareAcknowledgeResponseData, ShareFetchRequestData, ShareFetchResponseData} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} -import org.apache.kafka.common.requests.{FindCoordinatorRequest, FindCoordinatorResponse, ShareAcknowledgeRequest, ShareAcknowledgeResponse, ShareFetchRequest, ShareFetchResponse, ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse, ShareRequestMetadata} +import org.apache.kafka.common.requests.{ShareAcknowledgeRequest, ShareAcknowledgeResponse, ShareFetchRequest, ShareFetchResponse, ShareRequestMetadata} import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.server.common.Feature -import org.apache.kafka.server.IntegrationTestUtils import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue} -import org.junit.jupiter.api.{AfterEach, Timeout} +import org.junit.jupiter.api.{AfterEach, Tag, Timeout} -import java.net.Socket import java.util +import java.util.Collections +import scala.collection.convert.ImplicitConversions.`list asScalaBuffer` +import scala.jdk.CollectionConverters._ @Timeout(1200) @ClusterTestDefaults(types = Array(Type.KRAFT), brokers = 1, serverProperties = Array( new ClusterConfigProperty(key = "group.share.persister.class.name", value = "") )) -class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - +@Tag("integration") +class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster){ + + private final val MAX_PARTITION_BYTES = 10000 private final val MAX_WAIT_MS = 5000 - private final val GROUP_ID = "group" - private final val TOPIC = "topic" - private final val PARTITION = 0 - private final val MEMBER_ID = Uuid.randomUuid() @AfterEach def tearDown(): Unit = { - closeProducer() - closeSockets() + closeProducer } @ClusterTest( - features = Array( - new ClusterFeature(feature = Feature.SHARE_VERSION, version = 0) + serverProperties = Array( + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ) def testShareFetchRequestIsInAccessibleWhenConfigsDisabled(): Unit = { + val groupId: String = "group" val metadata: ShareRequestMetadata = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH) - val send = util.List.of( + val send: Seq[TopicIdPartition] = Seq( new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 0)), new TopicIdPartition(Uuid.randomUuid(), new TopicPartition("topic1", 1)) ) - val socket: Socket = connectAny() - - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) - assertEquals(Errors.UNSUPPORTED_VERSION.code, shareFetchResponse.data.errorCode) - assertEquals(0, shareFetchResponse.data.acquisitionLockTimeoutMs) + assertEquals(Errors.UNSUPPORTED_VERSION.code(), shareFetchResponse.data().errorCode()) } @ClusterTest( - features = Array( - new ClusterFeature(feature = Feature.SHARE_VERSION, version = 0) + serverProperties = Array( + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ) def testShareAcknowledgeRequestIsInAccessibleWhenConfigsDisabled(): Unit = { + val groupId: String = "group" val metadata: ShareRequestMetadata = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH) - val socket: Socket = connectAny() - - val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, util.Map.of) - val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, Map.empty) + val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) - assertEquals(Errors.UNSUPPORTED_VERSION.code, shareAcknowledgeResponse.data.errorCode) + assertEquals(Errors.UNSUPPORTED_VERSION.code(), shareAcknowledgeResponse.data().errorCode()) } @ClusterTests( Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ), @@ -99,45 +94,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ), brokers = 2 ), ) ) def testShareFetchRequestToNonLeaderReplica(): Unit = { - val metadata: ShareRequestMetadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.INITIAL_EPOCH) + val groupId: String = "group" + val metadata: ShareRequestMetadata = new ShareRequestMetadata(Uuid.randomUuid(), ShareRequestMetadata.INITIAL_EPOCH) + + val topic = "topic" + val partition = 0 // Create a single-partition topic and find a broker which is not the leader - val partitionToLeader = createTopicAndReturnLeaders(TOPIC) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - val topicNames = { - val map = new java.util.LinkedHashMap[Uuid, String]() - topicIds.forEach((k, v) => map.put(v, k)) // swap key and value - map - } + val partitionToLeader = createTopicAndReturnLeaders(topic) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + val topicNames = topicIds.asScala.map(_.swap).asJava val leader = partitionToLeader(topicIdPartition) val nonReplicaOpt = getBrokers.find(_.config.brokerId != leader) assertTrue(nonReplicaOpt.isDefined) - val nonReplicaId = nonReplicaOpt.get.config.brokerId - - val send = util.List.of(topicIdPartition) - - val socket: Socket = connect(nonReplicaId) + val nonReplicaId = nonReplicaOpt.get.config.brokerId - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 1)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the share fetch request to the non-replica and verify the error code - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) - assertEquals(30000, shareFetchResponse.data.acquisitionLockTimeoutMs) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest, nonReplicaId) val partitionData = shareFetchResponse.responseData(topicNames).get(topicIdPartition) assertEquals(Errors.NOT_LEADER_OR_FOLLOWER.code, partitionData.errorCode) assertEquals(leader, partitionData.currentLeader().leaderId()) @@ -147,61 +139,65 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ) ) ) def testShareFetchRequestSuccess(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - - val send = util.List.of(topicIdPartition) + val groupId: String = "group" + val memberId = Uuid.randomUuid() + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above produceData(topicIdPartition, 10) // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) } @@ -209,38 +205,43 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ) ) ) def testShareFetchRequestSuccessMultiplePartitions(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 0)) - val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 1)) - val topicIdPartition3 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 2)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition1, topicIdPartition2, topicIdPartition3) + val topic = "topic" - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(topic, 0)) + val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(topic, 1)) + val topicIdPartition3 = new TopicIdPartition(topicId, new TopicPartition(topic, 2)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition1, topicIdPartition2, topicIdPartition3) // Send the first share fetch request to initialize the share partitions - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic partitions created above @@ -249,23 +250,22 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo produceData(topicIdPartition3, 10) // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap) + val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) // For the multi partition fetch request, the response may not be available in the first attempt // as the share partitions might not be initialized yet. So, we retry until we get the response. var responses = Seq[ShareFetchResponseData.PartitionData]() TestUtils.waitUntilTrue(() => { - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - val partitionsCount = shareFetchResponseData.responses().stream().findFirst().get().partitions().size() + val partitionsCount = shareFetchResponseData.responses().get(0).partitions().size() if (partitionsCount > 0) { - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - shareFetchResponseData.responses().stream().findFirst().get().partitions().forEach(partitionData => { + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + shareFetchResponseData.responses().get(0).partitions().foreach(partitionData => { if (!partitionData.acquiredRecords().isEmpty) { responses = responses :+ partitionData } @@ -278,19 +278,19 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo .setPartitionIndex(0) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) val expectedPartitionData2 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) val expectedPartitionData3 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(2) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) responses.foreach(partitionData => { partitionData.partitionIndex() match { @@ -305,6 +305,8 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ), @@ -312,51 +314,52 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ), brokers = 3 ), ) ) def testShareFetchRequestSuccessMultiplePartitionsMultipleBrokers(): Unit = { - val partitionToLeaders = createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 0)) - val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 1)) - val topicIdPartition3 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, 2)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() + + val topic = "topic" + + val partitionToLeaders = createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(topic, 0)) + val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(topic, 1)) + val topicIdPartition3 = new TopicIdPartition(topicId, new TopicPartition(topic, 2)) val leader1 = partitionToLeaders(topicIdPartition1) val leader2 = partitionToLeaders(topicIdPartition2) val leader3 = partitionToLeaders(topicIdPartition3) - val send1 = util.List.of(topicIdPartition1) - val send2 = util.List.of(topicIdPartition2) - val send3 = util.List.of(topicIdPartition3) - - val metadata: ShareRequestMetadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - - val socket1: Socket = connect(leader1) - val socket2: Socket = connect(leader2) - val socket3: Socket = connect(leader3) + val send1: Seq[TopicIdPartition] = Seq(topicIdPartition1) + val send2: Seq[TopicIdPartition] = Seq(topicIdPartition2) + val send3: Seq[TopicIdPartition] = Seq(topicIdPartition3) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val metadata: ShareRequestMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty // Send the first share fetch request to initialize the share partitions // Create different share fetch requests for different partitions as they may have leaders on separate brokers - var shareFetchRequest1 = createShareFetchRequest(GROUP_ID, metadata, send1, util.List.of, acknowledgementsMap) - var shareFetchRequest2 = createShareFetchRequest(GROUP_ID, metadata, send2, util.List.of, acknowledgementsMap) - var shareFetchRequest3 = createShareFetchRequest(GROUP_ID, metadata, send3, util.List.of, acknowledgementsMap) + var shareFetchRequest1 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send1, Seq.empty, acknowledgementsMap) + var shareFetchRequest2 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send2, Seq.empty, acknowledgementsMap) + var shareFetchRequest3 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send3, Seq.empty, acknowledgementsMap) - var shareFetchResponse1 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest1, socket1) - var shareFetchResponse2 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest2, socket2) - var shareFetchResponse3 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest3, socket3) + var shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1, destination = leader1) + var shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2, destination = leader2) + var shareFetchResponse3 = connectAndReceive[ShareFetchResponse](shareFetchRequest3, destination = leader3) initProducer() // Producing 10 records to the topic partitions created above @@ -366,54 +369,52 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above // Create different share fetch requests for different partitions as they may have leaders on separate brokers - shareFetchRequest1 = createShareFetchRequest(GROUP_ID, metadata, send1, util.List.of, acknowledgementsMap) - shareFetchRequest2 = createShareFetchRequest(GROUP_ID, metadata, send2, util.List.of, acknowledgementsMap) - shareFetchRequest3 = createShareFetchRequest(GROUP_ID, metadata, send3, util.List.of, acknowledgementsMap) + shareFetchRequest1 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send1, Seq.empty, acknowledgementsMap) + shareFetchRequest2 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send2, Seq.empty, acknowledgementsMap) + shareFetchRequest3 = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send3, Seq.empty, acknowledgementsMap) - shareFetchResponse1 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest1, socket1) - shareFetchResponse2 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest2, socket2) - shareFetchResponse3 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest3, socket3) + shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1, destination = leader1) + shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2, destination = leader2) + shareFetchResponse3 = connectAndReceive[ShareFetchResponse](shareFetchRequest3, destination = leader3) val shareFetchResponseData1 = shareFetchResponse1.data() assertEquals(Errors.NONE.code, shareFetchResponseData1.errorCode) - assertEquals(30000, shareFetchResponseData1.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData1.responses().size()) - assertEquals(topicId, shareFetchResponseData1.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData1.responses().stream().findFirst().get().partitions().size()) - val partitionData1 = shareFetchResponseData1.responses().stream().findFirst().get().partitions().get(0) + assertEquals(topicId, shareFetchResponseData1.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData1.responses().get(0).partitions().size()) + val partitionData1 = shareFetchResponseData1.responses().get(0).partitions().get(0) val shareFetchResponseData2 = shareFetchResponse2.data() assertEquals(Errors.NONE.code, shareFetchResponseData2.errorCode) - assertEquals(30000, shareFetchResponseData2.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData2.responses().size()) - assertEquals(topicId, shareFetchResponseData2.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData2.responses().stream().findFirst().get().partitions().size()) - val partitionData2 = shareFetchResponseData2.responses().stream().findFirst().get().partitions().get(0) + assertEquals(topicId, shareFetchResponseData2.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData2.responses().get(0).partitions().size()) + val partitionData2 = shareFetchResponseData2.responses().get(0).partitions().get(0) val shareFetchResponseData3 = shareFetchResponse3.data() assertEquals(Errors.NONE.code, shareFetchResponseData3.errorCode) assertEquals(1, shareFetchResponseData3.responses().size()) - assertEquals(topicId, shareFetchResponseData3.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData3.responses().stream().findFirst().get().partitions().size()) - val partitionData3 = shareFetchResponseData3.responses().stream().findFirst().get().partitions().get(0) + assertEquals(topicId, shareFetchResponseData3.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData3.responses().get(0).partitions().size()) + val partitionData3 = shareFetchResponseData3.responses().get(0).partitions().get(0) val expectedPartitionData1 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(0) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) val expectedPartitionData2 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(1) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) val expectedPartitionData3 = new ShareFetchResponseData.PartitionData() .setPartitionIndex(2) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) compareFetchResponsePartitions(expectedPartitionData1, partitionData1) compareFetchResponsePartitions(expectedPartitionData2, partitionData2) @@ -424,36 +425,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareAcknowledgeRequestSuccessAccept(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize share partitions - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -461,49 +468,48 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records - val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) - val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records + val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) + val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) val expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) - val acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) + val acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Producing 10 more records to the topic @@ -511,24 +517,23 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // Only the records from offset 10 onwards should be fetched because records at offsets 0-9 have been acknowledged + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // Only the records from offset 10 onwards should be fetched because records at offsets 0-9 have been acknowledged - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -536,38 +541,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "group.share.record.lock.duration.ms", value = "15000") + new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "group.share.record.lock.duration.ms", value = "15000") + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareFetchRequestPiggybackedAccept(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket, 15000) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -575,25 +584,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch: Int = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(15000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above @@ -601,28 +609,27 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the third Share Fetch request with piggybacked acknowledgements shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(15000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic @@ -630,24 +637,23 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to confirm if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(15000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(20), util.List.of(29), util.List.of(1))) // Only the records from offset 20 onwards should be fetched because records at offsets 0-9 have been acknowledged before and 10 to 19 are currently acquired + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(20), Collections.singletonList(29), Collections.singletonList(1))) // Only the records from offset 20 onwards should be fetched because records at offsets 0-9 have been acknowledged before and 10 to 19 are currently acquired - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -655,36 +661,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareAcknowledgeRequestSuccessRelease(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partiion - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -692,71 +704,69 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(2.toByte)))) // Release the records - val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) - val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records + val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) + val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) val expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) - val acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) + val acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(2))) // Records at offsets 0 to 9 should be fetched again because they were released with delivery count as 2 + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(2))) // Records at offsets 0 to 9 should be fetched again because they were released with delivery count as 2 - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -764,36 +774,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareFetchRequestPiggybackedRelease(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -801,64 +817,62 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - val fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + val fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above produceData(topicIdPartition, 10) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0L, 10L), util.List.of(9L, 19L), util.List.of(2, 1))) + .setAcquiredRecords(expectedAcquiredRecords(List(0L, 10L).asJava, List(9L, 19L).asJava, List(2, 1).asJava)) - val acquiredRecords = new util.ArrayList[AcquiredRecords]() + val acquiredRecords : util.List[AcquiredRecords] = new util.ArrayList[AcquiredRecords]() var releaseAcknowledgementSent = false TestUtils.waitUntilTrue(() => { shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) if (releaseAcknowledgementSent) { // For fourth share fetch request onwards - acknowledgementsMapForFetch = util.Map.of + acknowledgementsMapForFetch = Map.empty } else { // Send a third Share Fetch request with piggybacked acknowledgements - acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() + acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(2.toByte)))) // Release the records + .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records releaseAcknowledgementSent = true } - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - val responseSize = shareFetchResponseData.responses().stream().findFirst().get().partitions().size() + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + val responseSize = shareFetchResponseData.responses().get(0).partitions().size() if (responseSize > 0) { - acquiredRecords.addAll(shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0).acquiredRecords()) + acquiredRecords.addAll(shareFetchResponseData.responses().get(0).partitions().get(0).acquiredRecords()) } // There should be 2 acquired record batches finally - // 1. batch containing 0-9 offsets which were initially acknowledged as RELEASED. @@ -877,36 +891,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareAcknowledgeRequestSuccessReject(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -914,49 +934,48 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(3.toByte)))) // Reject the records - val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) - val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + .setAcknowledgeTypes(Collections.singletonList(3.toByte))).asJava) // Reject the records + val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) + val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) val expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) - val acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) + val acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Producing 10 more records to the topic @@ -964,24 +983,23 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // Only the records from offset 10 onwards should be fetched because records at offsets 0-9 have been rejected + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // Only the records from offset 10 onwards should be fetched because records at offsets 0-9 have been rejected - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -989,36 +1007,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareFetchRequestPiggybackedReject(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -1026,25 +1050,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above @@ -1052,28 +1075,27 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send a third Share Fetch request with piggybacked acknowledgements shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(3.toByte)))) // Reject the records - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + .setAcknowledgeTypes(Collections.singletonList(3.toByte))).asJava) // Reject the records + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // The records at offsets 0 to 9 will not be re fetched because they have been rejected + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // The records at offsets 0 to 9 will not be re fetched because they have been rejected - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic @@ -1081,24 +1103,23 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to confirm if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(20), util.List.of(29), util.List.of(1))) // Only the records from offset 20 onwards should be fetched because records at offsets 0-9 have been rejected before and 10 to 19 are currently acquired + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(20), Collections.singletonList(29), Collections.singletonList(1))) // Only the records from offset 20 onwards should be fetched because records at offsets 0-9 have been rejected before and 10 to 19 are currently acquired - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -1106,6 +1127,8 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.delivery.count.limit", value = "2") // Setting max delivery count config to 2 @@ -1113,31 +1136,35 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true"), new ClusterConfigProperty(key = "group.share.delivery.count.limit", value = "2") // Setting max delivery count config to 2 ) ), ) ) def testShareAcknowledgeRequestMaxDeliveryAttemptExhausted(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the shar partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -1145,94 +1172,92 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - var acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + var acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(2.toByte)))) // Release the records - var shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) - var shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records + var shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) + var shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) var shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) var expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) - var acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) + var acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Sending a third share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(2))) // Records at offsets 0 to 9 should be fetched again because they were released with delivery count as 2 + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(2))) // Records at offsets 0 to 9 should be fetched again because they were released with delivery count as 2 - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Send a Share Acknowledge request to acknowledge the fetched records shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - acknowledgementsMapForAcknowledge = util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(2.toByte)))) // Release the records again - shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) - shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + acknowledgementsMapForAcknowledge = Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(Collections.singletonList(2.toByte))).asJava) // Release the records again + shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) + shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) - acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) + acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) // Producing 10 new records to the topic @@ -1240,24 +1265,23 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending a fourth share fetch request to check if acknowledgements were done successfully shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // Only new records from offset 10 to 19 will be fetched, records at offsets 0 to 9 have been archived because delivery count limit has been exceeded + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // Only new records from offset 10 to 19 will be fetched, records at offsets 0 to 9 have been archived because delivery count limit has been exceeded - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) } @@ -1265,108 +1289,192 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") + ) + ), + ) + ) + def testShareFetchBrokerRespectsPartitionsSizeLimit(): Unit = { + val groupId: String = "group" + val memberId = Uuid.randomUuid() + + val topic = "topic" + val partition = 0 + + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) + + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) + + // Send the first share fetch request to initialize the share partition + sendFirstShareFetchRequest(memberId, groupId, send) + + initProducer() + // Producing 3 large messages to the topic created above + produceData(topicIdPartition, 10) + produceData(topicIdPartition, "large message 1", new String(new Array[Byte](MAX_PARTITION_BYTES/3))) + produceData(topicIdPartition, "large message 2", new String(new Array[Byte](MAX_PARTITION_BYTES/3))) + produceData(topicIdPartition, "large message 3", new String(new Array[Byte](MAX_PARTITION_BYTES/3))) + + // Send the second share fetch request to fetch the records produced above + val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) + + val shareFetchResponseData = shareFetchResponse.data() + assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) + assertEquals(1, shareFetchResponseData.responses().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + + val expectedPartitionData = new ShareFetchResponseData.PartitionData() + .setPartitionIndex(partition) + .setErrorCode(Errors.NONE.code()) + .setAcknowledgeErrorCode(Errors.NONE.code()) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(11), Collections.singletonList(1))) + // The first 10 records will be consumed as it is. For the last 3 records, each of size MAX_PARTITION_BYTES/3, + // only 2 of then will be consumed (offsets 10 and 11) because the inclusion of the third last record will exceed + // the max partition bytes limit + + val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) + compareFetchResponsePartitions(expectedPartitionData, partitionData) + } + + @ClusterTests( + Array( + new ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), + new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") + ) + ), + new ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), + new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), + new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), + new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareFetchRequestSuccessfulSharingBetweenMultipleConsumers(): Unit = { + val groupId: String = "group" + + val memberId = Uuid.randomUuid() val memberId1 = Uuid.randomUuid() val memberId2 = Uuid.randomUuid() val memberId3 = Uuid.randomUuid() - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket1: Socket = connectAny() - val socket2: Socket = connectAny() - val socket3: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(memberId1, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) - shareHeartbeat(memberId2, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) - shareHeartbeat(memberId3, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Sending a dummy share fetch request to initialize the share partition - sendFirstShareFetchRequest(memberId1, GROUP_ID, send, socket1) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() - // Producing 1 record to the topic created above - produceData(topicIdPartition, 1) + // Producing 10000 records to the topic created above + produceData(topicIdPartition, 10000) - // Sending a share Fetch Request + // Sending 3 share Fetch Requests with same groupId to the same topicPartition but with different memberIds, + // mocking the behaviour of multiple share consumers from the same share group val metadata1: ShareRequestMetadata = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap1 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest1 = createShareFetchRequest(GROUP_ID, metadata1, send, util.List.of, acknowledgementsMap1, minBytes = 100, maxBytes = 1500) - val shareFetchResponse1 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest1, socket1) + val acknowledgementsMap1: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest1 = createShareFetchRequest(groupId, metadata1, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap1) + + val metadata2: ShareRequestMetadata = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap2: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest2 = createShareFetchRequest(groupId, metadata2, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap2) + + val metadata3: ShareRequestMetadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap3: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest3 = createShareFetchRequest(groupId, metadata3, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap3) + + val shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1) + val shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2) + val shareFetchResponse3 = connectAndReceive[ShareFetchResponse](shareFetchRequest3) + + val shareFetchResponseData1 = shareFetchResponse1.data() - val partitionData1 = shareFetchResponseData1.responses().stream().findFirst().get().partitions().get(0) + assertEquals(Errors.NONE.code, shareFetchResponseData1.errorCode) + assertEquals(1, shareFetchResponseData1.responses().size()) + assertEquals(topicId, shareFetchResponseData1.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData1.responses().get(0).partitions().size()) - // Producing 1 record to the topic created above - produceData(topicIdPartition, 1) + val partitionData1 = shareFetchResponseData1.responses().get(0).partitions().get(0) - // Sending another share Fetch Request with same groupId to the same topicPartition but with different memberId, - // mocking the behaviour of multiple share consumers from the same share group - val metadata2: ShareRequestMetadata = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap2 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest2 = createShareFetchRequest(GROUP_ID, metadata2, send, util.List.of, acknowledgementsMap2, minBytes = 100, maxBytes = 1500) - val shareFetchResponse2 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest2, socket2) val shareFetchResponseData2 = shareFetchResponse2.data() - val partitionData2 = shareFetchResponseData2.responses().stream().findFirst().get().partitions().get(0) + assertEquals(Errors.NONE.code, shareFetchResponseData2.errorCode) + assertEquals(1, shareFetchResponseData2.responses().size()) + assertEquals(topicId, shareFetchResponseData2.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData2.responses().get(0).partitions().size()) - // Producing 1 record to the topic created above - produceData(topicIdPartition, 1) + val partitionData2 = shareFetchResponseData2.responses().get(0).partitions().get(0) - // Sending another share Fetch Request with same groupId to the same topicPartition but with different memberId, - // mocking the behaviour of multiple share consumers from the same share group - val metadata3: ShareRequestMetadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap3 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest3 = createShareFetchRequest(GROUP_ID, metadata3, send, util.List.of, acknowledgementsMap3, minBytes = 100, maxBytes = 1500) - val shareFetchResponse3 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest3, socket3) val shareFetchResponseData3 = shareFetchResponse3.data() - val partitionData3 = shareFetchResponseData3.responses().stream().findFirst().get().partitions().get(0) - - // Each consumer should have received 1 record and any record should only be consumed by 1 consumer - assertEquals(partitionData1.acquiredRecords().get(0).firstOffset(), partitionData1.acquiredRecords().get(0).lastOffset()) - assertEquals(partitionData1.acquiredRecords().get(0).firstOffset(), 0) - - assertEquals(partitionData2.acquiredRecords().get(0).firstOffset(), partitionData2.acquiredRecords().get(0).lastOffset()) - assertEquals(partitionData2.acquiredRecords().get(0).firstOffset(), 1) + assertEquals(Errors.NONE.code, shareFetchResponseData3.errorCode) + assertEquals(1, shareFetchResponseData3.responses().size()) + assertEquals(topicId, shareFetchResponseData3.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData3.responses().get(0).partitions().size()) - assertEquals(partitionData3.acquiredRecords().get(0).firstOffset(), partitionData3.acquiredRecords().get(0).lastOffset()) - assertEquals(partitionData3.acquiredRecords().get(0).firstOffset(), 2) + val partitionData3 = shareFetchResponseData3.responses().get(0).partitions().get(0) + // There should be no common records between the 3 consumers as they are part of the same group + assertTrue(partitionData1.acquiredRecords().get(0).lastOffset() < partitionData2.acquiredRecords().get(0).firstOffset()) + assertTrue(partitionData2.acquiredRecords().get(0).lastOffset() < partitionData3.acquiredRecords().get(0).firstOffset()) } @ClusterTests( Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) @@ -1380,26 +1488,20 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo val memberId2 = Uuid.randomUuid() val memberId3 = Uuid.randomUuid() - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket1: Socket = connectAny() - val socket2: Socket = connectAny() - val socket3: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(memberId1, groupId1, util.Map.of[String, Int](TOPIC, 3)) - shareHeartbeat(memberId2, groupId2, util.Map.of[String, Int](TOPIC, 3)) - shareHeartbeat(memberId3, groupId3, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) - // Sending 3 dummy share Fetch Requests with to initialize the share partitions for each share group\ - sendFirstShareFetchRequest(memberId1, groupId1, send, socket1) - sendFirstShareFetchRequest(memberId2, groupId2, send, socket2) - sendFirstShareFetchRequest(memberId3, groupId3, send, socket3) + // Sending 3 dummy share Fetch Requests with to inititlaize the share partitions for each share group\ + sendFirstShareFetchRequest(memberId1, groupId1, send) + sendFirstShareFetchRequest(memberId2, groupId2, send) + sendFirstShareFetchRequest(memberId3, groupId3, send) initProducer() // Producing 10 records to the topic created above @@ -1408,86 +1510,92 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Sending 3 share Fetch Requests with different groupId and different memberIds to the same topicPartition, // mocking the behaviour of 3 different share groups val metadata1 = new ShareRequestMetadata(memberId1, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap1 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest1 = createShareFetchRequest(groupId1, metadata1, send, util.List.of, acknowledgementsMap1) + val acknowledgementsMap1: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest1 = createShareFetchRequest(groupId1, metadata1, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap1) val metadata2 = new ShareRequestMetadata(memberId2, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap2 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest2 = createShareFetchRequest(groupId2, metadata2, send, util.List.of, acknowledgementsMap2) + val acknowledgementsMap2: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest2 = createShareFetchRequest(groupId2, metadata2, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap2) val metadata3 = new ShareRequestMetadata(memberId3, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap3 = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest3 = createShareFetchRequest(groupId3, metadata3, send, util.List.of, acknowledgementsMap3) + val acknowledgementsMap3: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + val shareFetchRequest3 = createShareFetchRequest(groupId3, metadata3, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap3) - val shareFetchResponse1 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest1, socket1) - val shareFetchResponse2 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest2, socket2) - val shareFetchResponse3 = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest3, socket3) + val shareFetchResponse1 = connectAndReceive[ShareFetchResponse](shareFetchRequest1) + val shareFetchResponse2 = connectAndReceive[ShareFetchResponse](shareFetchRequest2) + val shareFetchResponse3 = connectAndReceive[ShareFetchResponse](shareFetchRequest3) val shareFetchResponseData1 = shareFetchResponse1.data() assertEquals(Errors.NONE.code, shareFetchResponseData1.errorCode) assertEquals(1, shareFetchResponseData1.responses().size()) - assertEquals(topicId, shareFetchResponseData1.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData1.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData1.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData1.responses().get(0).partitions().size()) - val partitionData1 = shareFetchResponseData1.responses().stream().findFirst().get().partitions().get(0) + val partitionData1 = shareFetchResponseData1.responses().get(0).partitions().get(0) val shareFetchResponseData2 = shareFetchResponse2.data() assertEquals(Errors.NONE.code, shareFetchResponseData2.errorCode) assertEquals(1, shareFetchResponseData2.responses().size()) - assertEquals(topicId, shareFetchResponseData2.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData2.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData2.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData2.responses().get(0).partitions().size()) - val partitionData2 = shareFetchResponseData2.responses().stream().findFirst().get().partitions().get(0) + val partitionData2 = shareFetchResponseData2.responses().get(0).partitions().get(0) val shareFetchResponseData3 = shareFetchResponse3.data() assertEquals(Errors.NONE.code, shareFetchResponseData3.errorCode) assertEquals(1, shareFetchResponseData3.responses().size()) - assertEquals(topicId, shareFetchResponseData3.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData3.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData3.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData3.responses().get(0).partitions().size()) - val partitionData3 = shareFetchResponseData3.responses().stream().findFirst().get().partitions().get(0) + val partitionData3 = shareFetchResponseData3.responses().get(0).partitions().get(0) // All the consumers should consume all the records since they are part of different groups - assertEquals(partitionData1.acquiredRecords(), expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - assertEquals(partitionData2.acquiredRecords(), expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) - assertEquals(partitionData3.acquiredRecords(), expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + assertEquals(partitionData1.acquiredRecords(), expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + assertEquals(partitionData2.acquiredRecords(), expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) + assertEquals(partitionData3.acquiredRecords(), expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) } @ClusterTests( Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareSessionCloseWithShareFetch(): Unit = { - createTopicAndReturnLeaders(TOPIC) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -1495,25 +1603,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above @@ -1521,43 +1628,41 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send a third Share Fetch request with piggybacked acknowledgements shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Sending a final fetch request to close the session shareSessionEpoch = ShareRequestMetadata.FINAL_EPOCH - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(10) .setLastOffset(19) - .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records + shareFetchRequest = createShareFetchRequest(groupId, metadata, 0, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(0, shareFetchResponseData.responses().size()) // responses list will be empty because there are no responses for the final fetch request } @@ -1565,36 +1670,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareSessionCloseWithShareAcknowledge(): Unit = { - createTopicAndReturnLeaders(TOPIC) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -1602,25 +1713,24 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - var acknowledgementsMapForFetch = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + var acknowledgementsMapForFetch: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) var expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - var fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + var fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Producing 10 more records to the topic created above @@ -1628,52 +1738,51 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send a third Share Fetch request with piggybacked acknowledgements shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - acknowledgementsMapForFetch = util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + acknowledgementsMapForFetch = Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMapForFetch) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMapForFetch) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) expectedFetchPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) // The records at offsets 0 to 9 will not be re fetched because they have been acknowledged - fetchPartitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + fetchPartitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedFetchPartitionData, fetchPartitionData) // Sending a Share Acknowledge request to close the session shareSessionEpoch = ShareRequestMetadata.FINAL_EPOCH - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMapForAcknowledge: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(10) - .setLastOffset(19) - .setAcknowledgeTypes(util.List.of(1.toByte)))) // Accept the records - val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMapForAcknowledge) - val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMapForAcknowledge: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(10) + .setLastOffset(19) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Accept the records + val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMapForAcknowledge) + val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.NONE.code, shareAcknowledgeResponseData.errorCode) assertEquals(1, shareAcknowledgeResponseData.responses().size()) - assertEquals(topicId, shareAcknowledgeResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareAcknowledgeResponseData.responses().get(0).topicId()) + assertEquals(1, shareAcknowledgeResponseData.responses().get(0).partitions().size()) val expectedAcknowledgePartitionData = new ShareAcknowledgeResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) - val acknowledgePartitionData = shareAcknowledgeResponseData.responses().stream().findFirst().get().partitions().get(0) + val acknowledgePartitionData = shareAcknowledgeResponseData.responses().get(0).partitions().get(0) compareAcknowledgeResponsePartitions(expectedAcknowledgePartitionData, acknowledgePartitionData) } @@ -1681,87 +1790,104 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareFetchInitialEpochWithAcknowledgements(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() + + val topic = "topic" + val partition = 0 + + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) initProducer() // Producing 10 records to the topic created above produceData(topicIdPartition, 10) - val send = util.List.of(topicIdPartition) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) - val socket: Socket = connectAny() - - val metadata: ShareRequestMetadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, util.List.of(new ShareFetchRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)))) // Acknowledgements in the Initial Fetch Request - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + val metadata: ShareRequestMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> List(new ShareFetchRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) // Acknowledgements in the Initial Fetch Request + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() // The response will have a top level error code because this is an Initial Fetch request with acknowledgement data present assertEquals(Errors.INVALID_REQUEST.code(), shareFetchResponseData.errorCode) - assertEquals(0, shareFetchResponse.data.acquisitionLockTimeoutMs) } @ClusterTests( Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareAcknowledgeInitialRequestError(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val socket: Socket = connectAny() + val topic = "topic" + val partition = 0 + + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) // Send the share fetch request to fetch the records produced above - val metadata: ShareRequestMetadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.INITIAL_EPOCH) - val acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, - util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)))) - val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMap) - val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + val metadata: ShareRequestMetadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> + List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) + val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMap) + val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, shareAcknowledgeResponseData.errorCode) @@ -1771,36 +1897,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareFetchRequestInvalidShareSessionEpoch(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -1808,31 +1940,30 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) // Sending a thord Share Fetch request with invalid share session epoch shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.nextEpoch(shareSessionEpoch)) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, shareFetchResponseData.errorCode) @@ -1842,36 +1973,42 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareAcknowledgeRequestInvalidShareSessionEpoch(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val groupId: String = "group" + val memberId = Uuid.randomUuid() - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -1879,36 +2016,35 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) // Sending Share Acknowledge request with invalid share session epoch shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.nextEpoch(shareSessionEpoch)) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() - .setFirstOffset(0) - .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)))) - val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMap) - val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() + .setFirstOffset(0) + .setLastOffset(9) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) + val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMap) + val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.INVALID_SHARE_SESSION_EPOCH.code, shareAcknowledgeResponseData.errorCode) @@ -1918,38 +2054,43 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareFetchRequestShareSessionNotFound(): Unit = { + val groupId: String = "group" + val memberId = Uuid.randomUuid() val wrongMemberId = Uuid.randomUuid() - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) + val topic = "topic" + val partition = 0 - val send = util.List.of(topicIdPartition) + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - val socket: Socket = connectAny() - - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -1957,30 +2098,30 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - var shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + var shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) var shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) + val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) - // Sending a third Share Fetch request with wrong memberId + // Sending a third Share Fetch request with wrong member Id shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(wrongMemberId, shareSessionEpoch) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.SHARE_SESSION_NOT_FOUND.code, shareFetchResponseData.errorCode) @@ -1990,122 +2131,43 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "group.share.max.share.sessions", value = "2"), - new ClusterConfigProperty(key = "group.share.max.size", value = "2") - ) - ), - new ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "group.share.max.share.sessions", value = "2"), - new ClusterConfigProperty(key = "group.share.max.size", value = "2") - ) - ), - ) - ) - def testShareSessionEvictedOnConnectionDrop(): Unit = { - val memberId1 = Uuid.randomUuid() - val memberId2 = Uuid.randomUuid() - val memberId3 = Uuid.randomUuid() - - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - - val send = util.List.of(topicIdPartition) - - val socket1: Socket = connectAny() - val socket2: Socket = connectAny() - val socket3: Socket = connectAny() - - // member1 sends share fetch request to register its share session. Note it does not close the socket connection after. - TestUtils.waitUntilTrue(() => { - val metadata = new ShareRequestMetadata(memberId1, ShareRequestMetadata.INITIAL_EPOCH) - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket1) - val shareFetchResponseData = shareFetchResponse.data() - shareFetchResponseData.errorCode == Errors.NONE.code - }, "Share fetch request failed", 5000) - - // member2 sends share fetch request to register its share session. Note it does not close the socket connection after. - TestUtils.waitUntilTrue(() => { - val metadata = new ShareRequestMetadata(memberId2, ShareRequestMetadata.INITIAL_EPOCH) - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket2) - val shareFetchResponseData = shareFetchResponse.data() - shareFetchResponseData.errorCode == Errors.NONE.code - }, "Share fetch request failed", 5000) - - // member3 sends share fetch request to register its share session. Since the maximum number of share sessions that could - // exist in the share session cache is 2 (group.share.max.share.sessions), the attempt to register a third - // share session with the ShareSessionCache would throw SHARE_SESSION_LIMIT_REACHED - TestUtils.waitUntilTrue(() => { - val metadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket3) - val shareFetchResponseData = shareFetchResponse.data() - shareFetchResponseData.errorCode == Errors.SHARE_SESSION_LIMIT_REACHED.code - }, "Share fetch request failed", 5000) - - // Now we will close the socket connections for the members, mimicking a client disconnection - closeSockets() - - val socket4: Socket = connectAny() - - // Since one of the socket connections was closed before, the corresponding share session was dropped from the ShareSessionCache - // on the broker. Now, since the cache is not full, new share sessions can be registered - TestUtils.waitUntilTrue(() => { - val metadata = new ShareRequestMetadata(memberId3, ShareRequestMetadata.INITIAL_EPOCH) - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket4) - val shareFetchResponseData = shareFetchResponse.data() - shareFetchResponseData.errorCode == Errors.NONE.code - }, "Share fetch request failed", 5000) - } - - @ClusterTests( - Array( - new ClusterTest( - serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareAcknowledgeRequestShareSessionNotFound(): Unit = { + val groupId: String = "group" + val memberId = Uuid.randomUuid() val wrongMemberId = Uuid.randomUuid() - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - - val send = util.List.of(topicIdPartition) + val topic = "topic" + val partition = 0 - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(topic, partition)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic created above @@ -2113,36 +2175,35 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, Map.empty) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) + .setPartitionIndex(partition) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(9), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(0), Collections.singletonList(9), Collections.singletonList(1))) - val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) + val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) - // Sending a Share Acknowledge request with wrong memberId + // Sending a Share Acknowledge request with wrong member Id shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) metadata = new ShareRequestMetadata(wrongMemberId, shareSessionEpoch) - val acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = - util.Map.of(topicIdPartition, util.List.of(new ShareAcknowledgeRequestData.AcknowledgementBatch() + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] = + Map(topicIdPartition -> List(new ShareAcknowledgeRequestData.AcknowledgementBatch() .setFirstOffset(0) .setLastOffset(9) - .setAcknowledgeTypes(util.List.of(1.toByte)))) - val shareAcknowledgeRequest = createShareAcknowledgeRequest(GROUP_ID, metadata, acknowledgementsMap) - val shareAcknowledgeResponse = IntegrationTestUtils.sendAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest, socket) + .setAcknowledgeTypes(Collections.singletonList(1.toByte))).asJava) + val shareAcknowledgeRequest = createShareAcknowledgeRequest(groupId, metadata, acknowledgementsMap) + val shareAcknowledgeResponse = connectAndReceive[ShareAcknowledgeResponse](shareAcknowledgeRequest) val shareAcknowledgeResponseData = shareAcknowledgeResponse.data() assertEquals(Errors.SHARE_SESSION_NOT_FOUND.code, shareAcknowledgeResponseData.errorCode) @@ -2152,40 +2213,44 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo Array( new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") ) ), new ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.rebalance.protocols", value = "classic,consumer,share"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") + new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1"), + new ClusterConfigProperty(key = "unstable.api.versions.enable", value = "true") ) ), ) ) def testShareFetchRequestForgetTopicPartitions(): Unit = { + val groupId: String = "group" + val memberId = Uuid.randomUuid() + + val topic = "topic1" val partition1 = 0 val partition2 = 1 - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, partition1)) - val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(TOPIC, partition2)) - - val send = util.List.of(topicIdPartition1, topicIdPartition2) - - val socket: Socket = connectAny() + createTopicAndReturnLeaders(topic, numPartitions = 3) + val topicIds = getTopicIds.asJava + val topicId = topicIds.get(topic) + val topicIdPartition1 = new TopicIdPartition(topicId, new TopicPartition(topic, partition1)) + val topicIdPartition2 = new TopicIdPartition(topicId, new TopicPartition(topic, partition2)) - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) + val send: Seq[TopicIdPartition] = Seq(topicIdPartition1, topicIdPartition2) // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) + sendFirstShareFetchRequest(memberId, groupId, send) initProducer() // Producing 10 records to the topic partitions created above @@ -2194,23 +2259,22 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send the second share fetch request to fetch the records produced above var shareSessionEpoch = ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH) - var metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - var shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap) + var metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] = Map.empty + var shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, send, Seq.empty, acknowledgementsMap) // For the multi partition fetch request, the response may not be available in the first attempt // as the share partitions might not be initialized yet. So, we retry until we get the response. var responses = Seq[ShareFetchResponseData.PartitionData]() TestUtils.waitUntilTrue(() => { - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - val partitionsCount = shareFetchResponseData.responses().stream().findFirst().get().partitions().size() + val partitionsCount = shareFetchResponseData.responses().get(0).partitions().size() if (partitionsCount > 0) { - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - shareFetchResponseData.responses().stream().findFirst().get().partitions().forEach(partitionData => { + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + shareFetchResponseData.responses().get(0).partitions().foreach(partitionData => { if (!partitionData.acquiredRecords().isEmpty) { responses = responses :+ partitionData } @@ -2225,165 +2289,39 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo // Send another share fetch request with forget list populated with topicIdPartition2 shareSessionEpoch = ShareRequestMetadata.nextEpoch(shareSessionEpoch) - metadata = new ShareRequestMetadata(MEMBER_ID, shareSessionEpoch) - val forget = util.List.of(topicIdPartition1) - shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, util.List.of, forget, acknowledgementsMap) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + metadata = new ShareRequestMetadata(memberId, shareSessionEpoch) + val forget: Seq[TopicIdPartition] = Seq(topicIdPartition1) + shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, Seq.empty, forget, acknowledgementsMap) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) assertEquals(1, shareFetchResponseData.responses().size()) - assertEquals(topicId, shareFetchResponseData.responses().stream().findFirst().get().topicId()) - assertEquals(1, shareFetchResponseData.responses().stream().findFirst().get().partitions().size()) + assertEquals(topicId, shareFetchResponseData.responses().get(0).topicId()) + assertEquals(1, shareFetchResponseData.responses().get(0).partitions().size()) val expectedPartitionData = new ShareFetchResponseData.PartitionData() .setPartitionIndex(partition2) .setErrorCode(Errors.NONE.code()) .setAcknowledgeErrorCode(Errors.NONE.code()) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(10), util.List.of(19), util.List.of(1))) + .setAcquiredRecords(expectedAcquiredRecords(Collections.singletonList(10), Collections.singletonList(19), Collections.singletonList(1))) - val partitionData = shareFetchResponseData.responses().stream().findFirst().get().partitions().get(0) - compareFetchResponsePartitions(expectedPartitionData, partitionData) - } - - @ClusterTests( - Array( - new ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") - ) - ), - new ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") - ) - ) - ) - ) - def testShareFetchRequestWithMaxRecordsAndBatchSize(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - - val send = util.List.of(topicIdPartition) - - val socket: Socket = connectAny() - - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) - - // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) - - initProducer() - // Producing 10 records to the topic created above - produceData(topicIdPartition, 10) - - // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap, maxRecords = 1, batchSize = 1) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) - - val shareFetchResponseData = shareFetchResponse.data - assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) - assertEquals(1, shareFetchResponseData.responses.size) - assertEquals(topicId, shareFetchResponseData.responses.stream().findFirst().get().topicId) - assertEquals(1, shareFetchResponseData.responses.stream().findFirst().get().partitions.size) - - val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) - .setErrorCode(Errors.NONE.code) - .setAcknowledgeErrorCode(Errors.NONE.code) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0), util.List.of(0), util.List.of(1))) - - val partitionData = shareFetchResponseData.responses.stream().findFirst().get().partitions.get(0) - compareFetchResponsePartitions(expectedPartitionData, partitionData) - } - - @ClusterTests( - Array( - new ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") - ) - ), - new ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "group.share.persister.class.name", value = "org.apache.kafka.server.share.persister.DefaultStatePersister"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "share.coordinator.state.topic.num.partitions", value = "1") - ) - ) - ) - ) - def testShareFetchRequestMultipleBatchesWithMaxRecordsAndBatchSize(): Unit = { - createTopicAndReturnLeaders(TOPIC, numPartitions = 3) - val topicIds = getTopicIds - val topicId = topicIds.get(TOPIC) - val topicIdPartition = new TopicIdPartition(topicId, new TopicPartition(TOPIC, PARTITION)) - - val send = util.List.of(topicIdPartition) - - val socket: Socket = connectAny() - - createOffsetsTopic() - shareHeartbeat(MEMBER_ID, GROUP_ID, util.Map.of[String, Int](TOPIC, 3)) - - // Send the first share fetch request to initialize the share partition - sendFirstShareFetchRequest(MEMBER_ID, GROUP_ID, send, socket) - - initProducer() - // Producing 10 records to the topic created above - produceData(topicIdPartition, 10) - - // Send the second share fetch request to fetch the records produced above - val metadata = new ShareRequestMetadata(MEMBER_ID, ShareRequestMetadata.nextEpoch(ShareRequestMetadata.INITIAL_EPOCH)) - val acknowledgementsMap = util.Map.of[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]] - val shareFetchRequest = createShareFetchRequest(GROUP_ID, metadata, send, util.List.of, acknowledgementsMap, maxRecords = 5, batchSize = 1) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) - - val shareFetchResponseData = shareFetchResponse.data - assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(30000, shareFetchResponseData.acquisitionLockTimeoutMs) - assertEquals(1, shareFetchResponseData.responses.size) - assertEquals(topicId, shareFetchResponseData.responses.stream().findFirst().get().topicId) - assertEquals(1, shareFetchResponseData.responses.stream().findFirst().get().partitions.size) - - val expectedPartitionData = new ShareFetchResponseData.PartitionData() - .setPartitionIndex(PARTITION) - .setErrorCode(Errors.NONE.code) - .setAcknowledgeErrorCode(Errors.NONE.code) - .setAcquiredRecords(expectedAcquiredRecords(util.List.of(0, 1, 2, 3, 4), util.List.of(0, 1, 2, 3, 4), util.List.of(1, 1, 1, 1, 1))) - - val partitionData = shareFetchResponseData.responses.stream().findFirst().get().partitions.get(0) + val partitionData = shareFetchResponseData.responses().get(0).partitions().get(0) compareFetchResponsePartitions(expectedPartitionData, partitionData) } // For initial fetch request, the response may not be available in the first attempt when the share // partition is not initialized yet. Hence, wait for response from all partitions before proceeding. - private def sendFirstShareFetchRequest(memberId: Uuid, groupId: String, topicIdPartitions: util.List[TopicIdPartition], socket: Socket, lockTimeout: Int = 30000): Unit = { + private def sendFirstShareFetchRequest(memberId: Uuid, groupId: String, topicIdPartitions: Seq[TopicIdPartition]): Unit = { val partitions: util.Set[Integer] = new util.HashSet() TestUtils.waitUntilTrue(() => { val metadata = new ShareRequestMetadata(memberId, ShareRequestMetadata.INITIAL_EPOCH) - val shareFetchRequest = createShareFetchRequest(groupId, metadata, topicIdPartitions, util.List.of, util.Map.of) - val shareFetchResponse = IntegrationTestUtils.sendAndReceive[ShareFetchResponse](shareFetchRequest, socket) + val shareFetchRequest = createShareFetchRequest(groupId, metadata, MAX_PARTITION_BYTES, topicIdPartitions, Seq.empty, Map.empty) + val shareFetchResponse = connectAndReceive[ShareFetchResponse](shareFetchRequest) val shareFetchResponseData = shareFetchResponse.data() assertEquals(Errors.NONE.code, shareFetchResponseData.errorCode) - assertEquals(lockTimeout, shareFetchResponseData.acquisitionLockTimeoutMs) - shareFetchResponseData.responses().forEach(response => { + shareFetchResponseData.responses().foreach(response => { if (!response.partitions().isEmpty) { response.partitions().forEach(partitionData => partitions.add(partitionData.partitionIndex)) } @@ -2393,52 +2331,9 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo }, "Share fetch request failed", 5000) } - private def shareHeartbeat(memberId: Uuid, groupId: String, topics: util.Map[String, Int]): Unit = { - val coordResp = connectAndReceive[FindCoordinatorResponse](new FindCoordinatorRequest.Builder(new FindCoordinatorRequestData() - .setKey(groupId) - .setKeyType(0.toByte) - ).build(0) - ) - - val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setMemberId(memberId.toString) - .setGroupId(groupId) - .setMemberEpoch(0) - .setSubscribedTopicNames(new java.util.ArrayList[String](topics.keySet())) - ).build() - - TestUtils.waitUntilTrue(() => { - val resp = connectAndReceive[ShareGroupHeartbeatResponse](shareGroupHeartbeatRequest, coordResp.node().id()) - resp.data().errorCode() == Errors.NONE.code() && assignment(memberId.toString, groupId) - }, "Heartbeat failed") - } - - private def assignment(memberId: String, groupId: String): Boolean = { - val admin = cluster.admin() - - val members = admin - .describeShareGroups(util.List.of(groupId), new DescribeShareGroupsOptions().includeAuthorizedOperations(true)) - .describedGroups() - .get(groupId) - .get() - .members() - - var isAssigned = false - val iter = members.iterator() - while (iter.hasNext && !isAssigned) { - val desc = iter.next() - if (desc.consumerId() == memberId && !desc.assignment().topicPartitions().isEmpty) - isAssigned = true - } - - admin.close() - isAssigned - } - private def expectedAcquiredRecords(firstOffsets: util.List[Long], lastOffsets: util.List[Long], deliveryCounts: util.List[Int]): util.List[AcquiredRecords] = { val acquiredRecordsList: util.List[AcquiredRecords] = new util.ArrayList() - for (i <- 0 until firstOffsets.size()) { + for (i <- firstOffsets.indices) { acquiredRecordsList.add(new AcquiredRecords() .setFirstOffset(firstOffsets.get(i)) .setLastOffset(lastOffsets.get(i)) @@ -2451,7 +2346,7 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo actualResponse: ShareFetchResponseData.PartitionData): Unit = { assertEquals(expectedResponse.partitionIndex, actualResponse.partitionIndex) assertEquals(expectedResponse.errorCode, actualResponse.errorCode) - assertEquals(expectedResponse.errorMessage, actualResponse.errorMessage) + assertEquals(expectedResponse.errorCode, actualResponse.errorCode) assertEquals(expectedResponse.acknowledgeErrorCode, actualResponse.acknowledgeErrorCode) assertEquals(expectedResponse.acquiredRecords, actualResponse.acquiredRecords) } @@ -2464,23 +2359,21 @@ class ShareFetchAcknowledgeRequestTest(cluster: ClusterInstance) extends GroupCo private def createShareFetchRequest(groupId: String, metadata: ShareRequestMetadata, - send: util.List[TopicIdPartition], - forget: util.List[TopicIdPartition], - acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]], + maxPartitionBytes: Int, + send: Seq[TopicIdPartition], + forget: Seq[TopicIdPartition], + acknowledgementsMap: Map[TopicIdPartition, util.List[ShareFetchRequestData.AcknowledgementBatch]], maxWaitMs: Int = MAX_WAIT_MS, minBytes: Int = 0, - maxBytes: Int = Int.MaxValue, - maxRecords: Int = 500, - batchSize: Int = 500): ShareFetchRequest = { - ShareFetchRequest.Builder.forConsumer(groupId, metadata, maxWaitMs, minBytes, maxBytes, maxRecords, batchSize, send, forget, acknowledgementsMap) + maxBytes: Int = Int.MaxValue): ShareFetchRequest = { + ShareFetchRequest.Builder.forConsumer(groupId, metadata, maxWaitMs, minBytes, maxBytes, maxPartitionBytes, send.asJava, forget.asJava, acknowledgementsMap.asJava) .build() } - - private def createShareAcknowledgeRequest(groupId: String, + + private def createShareAcknowledgeRequest(groupId: String, metadata: ShareRequestMetadata, - acknowledgementsMap: util.Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]] - ): ShareAcknowledgeRequest = { - ShareAcknowledgeRequest.Builder.forConsumer(groupId, metadata, acknowledgementsMap) + acknowledgementsMap: Map[TopicIdPartition, util.List[ShareAcknowledgeRequestData.AcknowledgementBatch]]): ShareAcknowledgeRequest = { + ShareAcknowledgeRequest.Builder.forConsumer(groupId, metadata, acknowledgementsMap.asJava) .build() } } diff --git a/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala index 408f31db8d15f..cf297198e39e2 100644 --- a/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareGroupDescribeRequestTest.scala @@ -29,9 +29,9 @@ import org.apache.kafka.common.utils.Utils import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.group.modern.share.ShareGroupConfig import org.apache.kafka.security.authorizer.AclEntry -import org.apache.kafka.server.common.Feature +import org.apache.kafka.server.config.ServerConfigs import org.junit.jupiter.api.Assertions.assertEquals -import org.junit.jupiter.api.Timeout +import org.junit.jupiter.api.{Tag, Timeout} import java.lang.{Byte => JByte} import scala.jdk.CollectionConverters._ @@ -40,16 +40,18 @@ import scala.jdk.CollectionConverters._ @ClusterTestDefaults(types = Array(Type.KRAFT), brokers = 1, serverProperties = Array( new ClusterConfigProperty(key = ShareGroupConfig.SHARE_GROUP_PERSISTER_CLASS_NAME_CONFIG, value = "") )) +@Tag("integration") class ShareGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { @ClusterTest( - features = Array( - new ClusterFeature(feature = Feature.SHARE_VERSION, version = 0) + serverProperties = Array( + new ClusterConfigProperty(key = ServerConfigs.UNSTABLE_API_VERSIONS_ENABLE_CONFIG, value = "true") ) ) def testShareGroupDescribeIsInAccessibleWhenConfigsDisabled(): Unit = { val shareGroupDescribeRequest = new ShareGroupDescribeRequest.Builder( - new ShareGroupDescribeRequestData().setGroupIds(List("grp-1", "grp-2").asJava) + new ShareGroupDescribeRequestData().setGroupIds(List("grp-1", "grp-2").asJava), + true ).build(ApiKeys.SHARE_GROUP_DESCRIBE.latestVersion(isUnstableApiEnabled)) val shareGroupDescribeResponse = connectAndReceive[ShareGroupDescribeResponse](shareGroupDescribeRequest) @@ -70,8 +72,10 @@ class ShareGroupDescribeRequestTest(cluster: ClusterInstance) extends GroupCoord @ClusterTest( serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic,consumer,share"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = ShareGroupConfig.SHARE_GROUP_ENABLE_CONFIG, value = "true"), ) ) def testShareGroupDescribe(): Unit = { diff --git a/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala b/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala index b05a97fe119c2..07c7b959ab81c 100644 --- a/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/ShareGroupHeartbeatRequestTest.scala @@ -16,37 +16,33 @@ */ package kafka.server -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterFeature, ClusterTest, ClusterTestDefaults, Type} +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import kafka.utils.TestUtils +import kafka.utils.TestUtils.waitForAllPartitionsMetadata import org.apache.kafka.clients.admin.{Admin, NewPartitions} -import org.apache.kafka.common.Uuid +import org.apache.kafka.common.{TopicPartition, Uuid} import org.apache.kafka.common.message.{ShareGroupHeartbeatRequestData, ShareGroupHeartbeatResponseData} import org.apache.kafka.common.protocol.Errors import org.apache.kafka.common.requests.{ShareGroupHeartbeatRequest, ShareGroupHeartbeatResponse} import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.server.common.Feature -import org.apache.kafka.server.IntegrationTestUtils; import org.junit.jupiter.api.Assertions.{assertEquals, assertNotEquals, assertNotNull, assertNull, assertTrue} -import org.junit.jupiter.api.Timeout +import org.junit.jupiter.api.{Tag, Timeout} - -import java.util import scala.jdk.CollectionConverters._ @Timeout(120) @ClusterTestDefaults(types = Array(Type.KRAFT), brokers = 1, serverProperties = Array( new ClusterConfigProperty(key = "group.share.persister.class.name", value = "") )) +@Tag("integration") class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { @ClusterTest( - features = Array( - new ClusterFeature(feature = Feature.SHARE_VERSION, version = 0) - ) + types = Array(Type.KRAFT) ) def testShareGroupHeartbeatIsInAccessibleWhenConfigsDisabled(): Unit = { val shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() + new ShareGroupHeartbeatRequestData(), true ).build() val shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) @@ -55,7 +51,10 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( + types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.new.enable", value = "true"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -78,7 +77,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) + .setSubscribedTopicNames(List("foo").asJava), + true ).build() // Send the request until receiving a successful response. There is a delay @@ -106,7 +106,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(shareGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch) + .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch), + true ).build() // This is the expected assignment. here @@ -124,7 +125,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") // Verify the response. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) assertEquals(expectedAssignment, shareGroupHeartbeatResponse.data.assignment) // Leave the group. @@ -132,7 +133,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(shareGroupHeartbeatResponse.data.memberId) - .setMemberEpoch(-1) + .setMemberEpoch(-1), + true ).build() shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) @@ -145,7 +147,10 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( + types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.new.enable", value = "true"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -168,7 +173,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) + .setSubscribedTopicNames(List("foo").asJava), + true ).build() // Send the request until receiving a successful response. There is a delay @@ -191,7 +197,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) + .setSubscribedTopicNames(List("foo").asJava), + true ).build() // Send the second member request until receiving a successful response. @@ -209,70 +216,57 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { assertNotEquals(memberId1, memberId2) // Create the topic. - TestUtils.createTopicWithAdminRaw( + val topicId = TestUtils.createTopicWithAdminRaw( admin = admin, topic = "foo", numPartitions = 3 ) + // This is the expected assignment. + val expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() + .setTopicPartitions(List(new ShareGroupHeartbeatResponseData.TopicPartitions() + .setTopicId(topicId) + .setPartitions(List[Integer](0, 1, 2).asJava)).asJava) + // Prepare the next heartbeat for member 1. shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId1) - .setMemberEpoch(1) + .setMemberEpoch(1), + true ).build() // Heartbeats until the partitions are assigned for member 1. shareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - if (shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && shareGroupHeartbeatResponse.data().assignment() != null) { - true - } else { - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId1) - .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch()) - ).build() - false - } + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") - val topicPartitionsAssignedToMember1 = shareGroupHeartbeatResponse.data.assignment.topicPartitions() // Verify the response. - assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) // Prepare the next heartbeat for member 2. shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId2) - .setMemberEpoch(2) + .setMemberEpoch(2), + true ).build() // Heartbeats until the partitions are assigned for member 2. shareGroupHeartbeatResponse = null TestUtils.waitUntilTrue(() => { shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && shareGroupHeartbeatResponse.data.assignment != null + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") - val topicPartitionsAssignedToMember2 = shareGroupHeartbeatResponse.data.assignment.topicPartitions() // Verify the response. - assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) - - val partitionsAssigned: util.Set[Integer] = new util.HashSet[Integer]() - topicPartitionsAssignedToMember1.forEach(topicPartition => { - partitionsAssigned.addAll(topicPartition.partitions()) - }) - topicPartitionsAssignedToMember2.forEach(topicPartition => { - partitionsAssigned.addAll(topicPartition.partitions()) - }) - // Verify all the 3 topic partitions for "foo" have been assigned to at least 1 member. - assertEquals(util.Set.of(0, 1, 2), partitionsAssigned) + assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) // Verify the assignments are not changed for member 1. // Prepare another heartbeat for member 1 with latest received epoch 3 for member 1. @@ -280,7 +274,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId1) - .setMemberEpoch(3) + .setMemberEpoch(3), + true ).build() // Heartbeats until the response for no change of assignment occurs for member 1 with same epoch. @@ -292,14 +287,17 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") // Verify the response. - assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) } finally { admin.close() } } @ClusterTest( + types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.new.enable", value = "true"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -322,7 +320,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) + .setSubscribedTopicNames(List("foo").asJava), + true ).build() // Send the request until receiving a successful response. There is a delay @@ -357,7 +356,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(1) + .setMemberEpoch(1), + true ).build() TestUtils.waitUntilTrue(() => { @@ -367,14 +367,15 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") // Verify the response. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) // Member leaves the group. shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberEpoch(-1) - .setMemberId(memberId) + .setMemberId(memberId), + true ).build() // Send the member request until receiving a successful response. @@ -392,13 +393,14 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberEpoch(0) .setMemberId(memberId) - .setSubscribedTopicNames(List("foo").asJava) + .setSubscribedTopicNames(List("foo").asJava), + true ).build() shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) // Verify the response for member 1. - assertEquals(5, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) assertEquals(memberId, shareGroupHeartbeatResponse.data.memberId) // Partition assignment remains intact on rejoining. assertEquals(expectedAssignment, shareGroupHeartbeatResponse.data.assignment) @@ -408,7 +410,10 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( + types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.new.enable", value = "true"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -429,7 +434,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo", "bar", "baz").asJava) + .setSubscribedTopicNames(List("foo", "bar", "baz").asJava), + true ).build() // Send the request until receiving a successful response. There is a delay // here because the group coordinator is loaded in the background. @@ -469,11 +475,12 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(1) + .setMemberEpoch(1), + true ).build() - cluster.waitTopicCreation("foo", 2) - cluster.waitTopicCreation("bar", 3) + cluster.waitForTopic("foo", 2) + cluster.waitForTopic("bar", 3) TestUtils.waitUntilTrue(() => { shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) @@ -483,7 +490,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) }, msg = s"Could not get partitions for topic foo and bar assigned. Last response $shareGroupHeartbeatResponse.") // Verify the response. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) // Create the topic baz. val bazTopicId = TestUtils.createTopicWithAdminRaw( admin = admin, @@ -507,7 +514,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(3) + .setMemberEpoch(2), + true ).build() TestUtils.waitUntilTrue(() => { @@ -518,9 +526,9 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) }, msg = s"Could not get partitions for topic baz assigned. Last response $shareGroupHeartbeatResponse.") // Verify the response. - assertEquals(5, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) // Increasing the partitions of topic bar which is already being consumed in the share group. - increasePartitions(admin, "bar", 6) + increasePartitions(admin, "bar", 6, Seq.empty) expectedAssignment = new ShareGroupHeartbeatResponseData.Assignment() .setTopicPartitions(List( @@ -538,7 +546,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(5) + .setMemberEpoch(3), + true ).build() TestUtils.waitUntilTrue(() => { @@ -549,7 +558,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) }, msg = s"Could not update partitions assignment for topic bar. Last response $shareGroupHeartbeatResponse.") // Verify the response. - assertEquals(7, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(4, shareGroupHeartbeatResponse.data.memberEpoch) // Delete the topic foo. TestUtils.deleteTopicWithAdmin( admin = admin, @@ -571,7 +580,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(7) + .setMemberEpoch(4), + true ).build() TestUtils.waitUntilTrue(() => { @@ -582,80 +592,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) }, msg = s"Could not update partitions assignment for topic foo. Last response $shareGroupHeartbeatResponse.") // Verify the response. - assertEquals(8, shareGroupHeartbeatResponse.data.memberEpoch) - } finally { - admin.close() - } - } - - @ClusterTest( - serverProperties = Array( - new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), - new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), - new ClusterConfigProperty(key = "group.share.max.size", value = "2") - )) - def testShareGroupMaxSizeConfigExceeded(): Unit = { - val groupId: String = "group" - val memberId1 = Uuid.randomUuid() - val memberId2 = Uuid.randomUuid() - val memberId3 = Uuid.randomUuid() - - val admin = cluster.admin() - - // Creates the __consumer_offsets topics because it won't be created automatically - // in this test because it does not use FindCoordinator API. - try { - TestUtils.createOffsetsTopicWithAdmin( - admin = admin, - brokers = cluster.brokers.values().asScala.toSeq, - controllers = cluster.controllers().values().asScala.toSeq - ) - - // Heartbeat request to join the group by the first member (memberId1). - var shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId(groupId) - .setMemberId(memberId1.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) - ).build() - - // Send the request until receiving a successful response. There is a delay - // here because the group coordinator is loaded in the background. - var shareGroupHeartbeatResponse: ShareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - - // Heartbeat request to join the group by the second member (memberId2). - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId(groupId) - .setMemberId(memberId2.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) - ).build() - - // Send the request until receiving a successful response - TestUtils.waitUntilTrue(() => { - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code - }, msg = s"Could not join the group successfully. Last response $shareGroupHeartbeatResponse.") - - // Heartbeat request to join the group by the third member (memberId3). - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId(groupId) - .setMemberId(memberId3.toString) - .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) - ).build() - - shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - // Since the group.share.max.size config is set to 2, a third member cannot join the same group. - assertEquals(shareGroupHeartbeatResponse.data.errorCode, Errors.GROUP_MAX_SIZE_REACHED.code) - + assertEquals(5, shareGroupHeartbeatResponse.data.memberEpoch) } finally { admin.close() } @@ -664,6 +601,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { @ClusterTest( types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.new.enable", value = "true"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1"), new ClusterConfigProperty(key = "group.share.heartbeat.interval.ms", value = "500"), @@ -690,7 +629,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) + .setSubscribedTopicNames(List("foo").asJava), + true ).build() // Send the request until receiving a successful response. There is a delay @@ -726,7 +666,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(memberEpoch) + .setMemberEpoch(memberEpoch), + true ).build() TestUtils.waitUntilTrue(() => { @@ -745,7 +686,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(memberId) .setMemberEpoch(memberEpoch) - .setSubscribedTopicNames(List("foo", "bar").asJava) + .setSubscribedTopicNames(List("foo", "bar").asJava), + true ).build() val barId = TestUtils.createTopicWithAdminRaw( @@ -762,23 +704,12 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setTopicId(barId) .setPartitions(List[Integer](0).asJava)).asJava) - shareGroupHeartbeatResponse = null - TestUtils.waitUntilTrue(() => { shareGroupHeartbeatResponse = connectAndReceive(shareGroupHeartbeatRequest) - if (shareGroupHeartbeatResponse.data.assignment != null && + shareGroupHeartbeatResponse.data.errorCode == Errors.NONE.code && + shareGroupHeartbeatResponse.data.assignment != null && expectedAssignment.topicPartitions.containsAll(shareGroupHeartbeatResponse.data.assignment.topicPartitions) && - shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions)) { - true - } else { - shareGroupHeartbeatRequest = new ShareGroupHeartbeatRequest.Builder( - new ShareGroupHeartbeatRequestData() - .setGroupId("grp") - .setMemberId(memberId) - .setMemberEpoch(shareGroupHeartbeatResponse.data.memberEpoch), - ).build() - false - } + shareGroupHeartbeatResponse.data.assignment.topicPartitions.containsAll(expectedAssignment.topicPartitions) }, msg = s"Could not get bar partitions assigned. Last response $shareGroupHeartbeatResponse.") // Verify the response, the epoch should have been bumped. @@ -790,7 +721,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(memberEpoch) + .setMemberEpoch(memberEpoch), + true ).build() TestUtils.waitUntilTrue(() => { @@ -809,7 +741,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(memberEpoch) + .setMemberEpoch(memberEpoch), + true ).build() TestUtils.waitUntilTrue(() => { @@ -823,7 +756,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(memberId) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo", "bar").asJava) + .setSubscribedTopicNames(List("foo", "bar").asJava), + true ).build() TestUtils.waitUntilTrue(() => { @@ -842,7 +776,10 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } @ClusterTest( + types = Array(Type.KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = "group.coordinator.new.enable", value = "true"), + new ClusterConfigProperty(key = "group.share.enable", value = "true"), new ClusterConfigProperty(key = "offsets.topic.num.partitions", value = "1"), new ClusterConfigProperty(key = "offsets.topic.replication.factor", value = "1") )) @@ -863,7 +800,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { .setGroupId("grp") .setMemberId(Uuid.randomUuid.toString) .setMemberEpoch(0) - .setSubscribedTopicNames(List("foo").asJava) + .setSubscribedTopicNames(List("foo").asJava), + true ).build() // Send the request until receiving a successful response. There is a delay // here because the group coordinator is loaded in the background. @@ -893,7 +831,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(1) + .setMemberEpoch(1), + true ).build() TestUtils.waitUntilTrue(() => { @@ -902,7 +841,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { shareGroupHeartbeatResponse.data.assignment == expectedAssignment }, msg = s"Could not get partitions assigned. Last response $shareGroupHeartbeatResponse.") // Verify the response. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) // Restart the only running broker. val broker = cluster.brokers().values().iterator().next() @@ -914,7 +853,8 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { new ShareGroupHeartbeatRequestData() .setGroupId("grp") .setMemberId(memberId) - .setMemberEpoch(2) + .setMemberEpoch(2), + true ).build() // Should receive no error and no assignment changes. @@ -925,7 +865,7 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { // Verify the response. Epoch should not have changed and null assignments determines that no // change in old assignment. - assertEquals(3, shareGroupHeartbeatResponse.data.memberEpoch) + assertEquals(2, shareGroupHeartbeatResponse.data.memberEpoch) assertNull(shareGroupHeartbeatResponse.data.assignment) } finally { admin.close() @@ -933,14 +873,29 @@ class ShareGroupHeartbeatRequestTest(cluster: ClusterInstance) { } private def connectAndReceive(request: ShareGroupHeartbeatRequest): ShareGroupHeartbeatResponse = { - IntegrationTestUtils.connectAndReceive[ShareGroupHeartbeatResponse](request, cluster.brokerBoundPorts().get(0)) + IntegrationTestUtils.connectAndReceive[ShareGroupHeartbeatResponse]( + request, + cluster.anyBrokerSocketServer(), + cluster.clientListener() + ) } private def increasePartitions[B <: KafkaBroker](admin: Admin, topic: String, - totalPartitionCount: Int + totalPartitionCount: Int, + brokersToValidate: Seq[B] ): Unit = { val newPartitionSet: Map[String, NewPartitions] = Map.apply(topic -> NewPartitions.increaseTo(totalPartitionCount)) admin.createPartitions(newPartitionSet.asJava) + + if (brokersToValidate.nonEmpty) { + // wait until we've propagated all partitions metadata to all brokers + val allPartitionsMetadata = waitForAllPartitionsMetadata(brokersToValidate, topic, totalPartitionCount) + (0 until totalPartitionCount - 1).foreach(i => { + allPartitionsMetadata.get(new TopicPartition(topic, i)).foreach { partitionMetadata => + assertEquals(totalPartitionCount, partitionMetadata.isr.size) + } + }) + } } } diff --git a/core/src/test/scala/unit/kafka/server/SyncGroupRequestTest.scala b/core/src/test/scala/unit/kafka/server/SyncGroupRequestTest.scala index 09ed807db8ecb..3a53fbf144aba 100644 --- a/core/src/test/scala/unit/kafka/server/SyncGroupRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/SyncGroupRequestTest.scala @@ -31,17 +31,29 @@ import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.duration.Duration import scala.concurrent.{Await, Future} -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( +@ClusterTestDefaults(types = Array(Type.KRAFT)) +class SyncGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { + @ClusterTest(serverProperties = Array( new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "1000") - ) -) -class SyncGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { - @ClusterTest - def testSyncGroupWithOldConsumerGroupProtocol(): Unit = { + )) + def testSyncGroupWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { + testSyncGroup() + } + + @ClusterTest(types = Array(Type.KRAFT, Type.CO_KRAFT), serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_INITIAL_REBALANCE_DELAY_MS_CONFIG, value = "1000") + )) + def testSyncGroupWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testSyncGroup() + } + + private def testSyncGroup(): Unit = { // Creates the __consumer_offsets topics because it won't be created automatically // in this test because it does not use FindCoordinator API. createOffsetsTopic() @@ -64,17 +76,6 @@ class SyncGroupRequestTest(cluster: ClusterInstance) extends GroupCoordinatorBas version = version.toShort ) - // Sync with empty group id. - verifySyncGroupWithOldProtocol( - groupId = "", - memberId = "member-id", - generationId = -1, - expectedProtocolType = null, - expectedProtocolName = null, - expectedError = Errors.INVALID_GROUP_ID, - version = version.toShort - ) - val metadata = ConsumerProtocol.serializeSubscription( new ConsumerPartitionAssignor.Subscription(Collections.singletonList("foo")) ).array diff --git a/core/src/test/scala/unit/kafka/server/ThrottledChannelExpirationTest.scala b/core/src/test/scala/unit/kafka/server/ThrottledChannelExpirationTest.scala index 2e2b32ee5ba3a..6d8adef1f8bf9 100644 --- a/core/src/test/scala/unit/kafka/server/ThrottledChannelExpirationTest.scala +++ b/core/src/test/scala/unit/kafka/server/ThrottledChannelExpirationTest.scala @@ -23,7 +23,7 @@ import java.util.concurrent.DelayQueue import org.apache.kafka.common.metrics.MetricConfig import org.apache.kafka.common.utils.MockTime import org.apache.kafka.server.config.ClientQuotaManagerConfig -import org.apache.kafka.server.quota.{ClientQuotaManager, QuotaType, ThrottleCallback, ThrottledChannel} +import org.apache.kafka.server.quota.{QuotaType, ThrottleCallback, ThrottledChannel} import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{BeforeEach, Test} diff --git a/core/src/test/scala/unit/kafka/server/TierStateMachineTest.scala b/core/src/test/scala/unit/kafka/server/TierStateMachineTest.scala index f9314e5d409aa..139aeb053ffea 100644 --- a/core/src/test/scala/unit/kafka/server/TierStateMachineTest.scala +++ b/core/src/test/scala/unit/kafka/server/TierStateMachineTest.scala @@ -22,7 +22,6 @@ import org.apache.kafka.common.message.FetchResponseData import org.apache.kafka.common.protocol.ApiKeys import org.apache.kafka.common.record._ import org.apache.kafka.common.{TopicPartition, Uuid} -import org.apache.kafka.server.{PartitionFetchState, ReplicaState} import org.junit.jupiter.api.Assertions._ import kafka.server.FetcherThreadTestUtils.{initialFetchState, mkBatch} import org.junit.jupiter.params.ParameterizedTest @@ -68,7 +67,7 @@ class TierStateMachineTest { fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) assertEquals(3L, replicaState.logEndOffset) - val expectedState = if (truncateOnFetch) Option(ReplicaState.FETCHING) else Option(ReplicaState.TRUNCATING) + val expectedState = if (truncateOnFetch) Option(Fetching) else Option(Truncating) assertEquals(expectedState, fetcher.fetchState(partition).map(_.state)) fetcher.doWork() @@ -129,7 +128,7 @@ class TierStateMachineTest { fetcher.mockLeader.setReplicaPartitionStateCallback(fetcher.replicaPartitionState) assertEquals(3L, replicaState.logEndOffset) - val expectedState = if (truncateOnFetch) Option(ReplicaState.FETCHING) else Option(ReplicaState.TRUNCATING) + val expectedState = if (truncateOnFetch) Option(Fetching) else Option(Truncating) assertEquals(expectedState, fetcher.fetchState(partition).map(_.state)) fetcher.doWork() @@ -163,9 +162,7 @@ class TierStateMachineTest { var isErrorHandled = false val mockLeaderEndpoint = new MockLeaderEndPoint(truncateOnFetch = truncateOnFetch, version = version) val mockTierStateMachine = new MockTierStateMachine(mockLeaderEndpoint) { - override def start(topicPartition: TopicPartition, - currentFetchState: PartitionFetchState, - fetchPartitionData: FetchResponseData.PartitionData): PartitionFetchState = { + override def start(topicPartition: TopicPartition, currentFetchState: PartitionFetchState, fetchPartitionData: FetchResponseData.PartitionData): PartitionFetchState = { isErrorHandled = true throw new FencedLeaderEpochException(s"Epoch ${currentFetchState.currentLeaderEpoch} is fenced") } diff --git a/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala b/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala index aef40390d8596..b2cd44bbd9222 100644 --- a/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala +++ b/core/src/test/scala/unit/kafka/server/TxnOffsetCommitRequestTest.scala @@ -16,20 +16,21 @@ */ package kafka.server +import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import kafka.utils.TestUtils +import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.UnsupportedVersionException import org.apache.kafka.common.protocol.{ApiKeys, Errors} -import org.apache.kafka.common.requests.{EndTxnRequest, JoinGroupRequest} +import org.apache.kafka.common.requests.JoinGroupRequest import org.apache.kafka.common.test.ClusterInstance -import org.apache.kafka.common.test.api.{ClusterConfigProperty, ClusterTest, ClusterTestDefaults, Type} import org.apache.kafka.common.utils.ProducerIdAndEpoch import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.junit.jupiter.api.Assertions.{assertNotEquals, assertThrows} +import org.junit.jupiter.api.Assertions.{assertThrows, assertTrue, fail} -@ClusterTestDefaults( - types = Array(Type.KRAFT), - serverProperties = Array( +import scala.jdk.CollectionConverters.IterableHasAsScala + +@ClusterTestDefaults(types = Array(Type.KRAFT), serverProperties = Array( new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_PARTITIONS_CONFIG, value = "1"), new ClusterConfigProperty(key = GroupCoordinatorConfig.OFFSETS_TOPIC_REPLICATION_FACTOR_CONFIG, value = "1"), new ClusterConfigProperty(key = TransactionLogConfig.TRANSACTIONS_TOPIC_PARTITIONS_CONFIG, value = "1"), @@ -39,26 +40,30 @@ import org.junit.jupiter.api.Assertions.{assertNotEquals, assertThrows} class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinatorBaseRequestTest(cluster) { @ClusterTest - def testTxnOffsetCommitWithNewConsumerGroupProtocol(): Unit = { + def testTxnOffsetCommitWithNewConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testTxnOffsetCommit(true) } @ClusterTest - def testTxnOffsetCommitWithOldConsumerGroupProtocol(): Unit = { + def testTxnOffsetCommitWithOldConsumerGroupProtocolAndNewGroupCoordinator(): Unit = { testTxnOffsetCommit(false) } - @ClusterTest - def testDelayedTxnOffsetCommitWithBumpedEpochIsRejectedWithNewConsumerGroupProtocol(): Unit = { - testDelayedTxnOffsetCommitWithBumpedEpochIsRejected(true) - } - - @ClusterTest - def testDelayedTxnOffsetCommitWithBumpedEpochIsRejectedWithOldConsumerGroupProtocol(): Unit = { - testDelayedTxnOffsetCommitWithBumpedEpochIsRejected(false) + @ClusterTest( + serverProperties = Array( + new ClusterConfigProperty(key = GroupCoordinatorConfig.NEW_GROUP_COORDINATOR_ENABLE_CONFIG, value = "false"), + new ClusterConfigProperty(key = GroupCoordinatorConfig.GROUP_COORDINATOR_REBALANCE_PROTOCOLS_CONFIG, value = "classic"), + ) + ) + def testTxnOffsetCommitWithOldConsumerGroupProtocolAndOldGroupCoordinator(): Unit = { + testTxnOffsetCommit(false) } private def testTxnOffsetCommit(useNewProtocol: Boolean): Unit = { + if (useNewProtocol && !isNewGroupCoordinatorEnabled) { + fail("Cannot use the new protocol with the old group coordinator.") + } + val topic = "topic" val partition = 0 val transactionalId = "txn" @@ -72,8 +77,8 @@ class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinat // Join the consumer group. Note that we don't heartbeat here so we must use // a session long enough for the duration of the test. val (memberId: String, memberEpoch: Int) = joinConsumerGroup(groupId, useNewProtocol) - assertNotEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, memberId) - assertNotEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, memberEpoch) + assertTrue(memberId != JoinGroupRequest.UNKNOWN_MEMBER_ID) + assertTrue(memberEpoch != JoinGroupRequest.UNKNOWN_GENERATION_ID) createTopic(topic, 1) @@ -185,7 +190,7 @@ class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinat transactionalId = transactionalId ) - val originalOffset = fetchOffset(groupId, topic, partition) + val originalOffset = fetchOffset(topic, partition, groupId) commitTxnOffset( groupId = groupId, @@ -214,107 +219,26 @@ class TxnOffsetCommitRequestTest(cluster:ClusterInstance) extends GroupCoordinat TestUtils.waitUntilTrue(() => try { - fetchOffset(groupId, topic, partition) == expectedOffset + fetchOffset(topic, partition, groupId) == expectedOffset } catch { case _: Throwable => false }, "txn commit offset validation failed" ) } - private def testDelayedTxnOffsetCommitWithBumpedEpochIsRejected(useNewProtocol: Boolean): Unit = { - val topic = "topic" - val partition = 0 - val transactionalId = "txn" - val groupId = "group" - val offset = 100L - - // Creates the __consumer_offsets and __transaction_state topics because it won't be created automatically - // in this test because it does not use FindCoordinator API. - createOffsetsTopic() - createTransactionStateTopic() - - // Join the consumer group. Note that we don't heartbeat here so we must use - // a session long enough for the duration of the test. - val (memberId: String, memberEpoch: Int) = joinConsumerGroup(groupId, useNewProtocol) - assertNotEquals(JoinGroupRequest.UNKNOWN_MEMBER_ID, memberId) - assertNotEquals(JoinGroupRequest.UNKNOWN_GENERATION_ID, memberEpoch) - - createTopic(topic, 1) - - for (version <- ApiKeys.TXN_OFFSET_COMMIT.oldestVersion to ApiKeys.TXN_OFFSET_COMMIT.latestVersion(isUnstableApiEnabled)) { - val useTV2 = version > EndTxnRequest.LAST_STABLE_VERSION_BEFORE_TRANSACTION_V2 - - // Initialize producer. Wait until the coordinator finishes loading. - var producerIdAndEpoch: ProducerIdAndEpoch = null - TestUtils.waitUntilTrue(() => - try { - producerIdAndEpoch = initProducerId( - transactionalId = transactionalId, - producerIdAndEpoch = ProducerIdAndEpoch.NONE, - expectedError = Errors.NONE - ) - true - } catch { - case _: Throwable => false - }, "initProducerId request failed" - ) - - addOffsetsToTxn( - groupId = groupId, - producerId = producerIdAndEpoch.producerId, - producerEpoch = producerIdAndEpoch.epoch, - transactionalId = transactionalId - ) - - // Complete the transaction. - endTxn( - producerId = producerIdAndEpoch.producerId, - producerEpoch = producerIdAndEpoch.epoch, - transactionalId = transactionalId, - isTransactionV2Enabled = useTV2, - committed = true, - expectedError = Errors.NONE - ) - - // Start a new transaction. Wait for the previous transaction to complete. - TestUtils.waitUntilTrue(() => - try { - addOffsetsToTxn( - groupId = groupId, - producerId = producerIdAndEpoch.producerId, - producerEpoch = if (useTV2) (producerIdAndEpoch.epoch + 1).toShort else producerIdAndEpoch.epoch, - transactionalId = transactionalId - ) - true - } catch { - case _: Throwable => false - }, "addOffsetsToTxn request failed" - ) - - // Committing offset with old epoch succeeds for TV1 and fails for TV2. - commitTxnOffset( - groupId = groupId, - memberId = if (version >= 3) memberId else JoinGroupRequest.UNKNOWN_MEMBER_ID, - generationId = if (version >= 3) 1 else JoinGroupRequest.UNKNOWN_GENERATION_ID, - producerId = producerIdAndEpoch.producerId, - producerEpoch = producerIdAndEpoch.epoch, - transactionalId = transactionalId, - topic = topic, - partition = partition, - offset = offset, - expectedError = if (useTV2) Errors.INVALID_PRODUCER_EPOCH else Errors.NONE, - version = version.toShort - ) - - // Complete the transaction. - endTxn( - producerId = producerIdAndEpoch.producerId, - producerEpoch = if (useTV2) (producerIdAndEpoch.epoch + 1).toShort else producerIdAndEpoch.epoch, - transactionalId = transactionalId, - isTransactionV2Enabled = useTV2, - committed = true, - expectedError = Errors.NONE - ) - } + private def fetchOffset( + topic: String, + partition: Int, + groupId: String + ): Long = { + val fetchOffsetsResp = fetchOffsets( + groups = Map(groupId -> List(new TopicPartition(topic, partition))), + requireStable = true, + version = ApiKeys.OFFSET_FETCH.latestVersion + ) + val groupIdRecord = fetchOffsetsResp.find(_.groupId == groupId).head + val topicRecord = groupIdRecord.topics.asScala.find(_.name == topic).head + val partitionRecord = topicRecord.partitions.asScala.find(_.partitionIndex == partition).head + partitionRecord.committedOffset } } diff --git a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochFileCacheTest.scala b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochFileCacheTest.scala index 7d123cea9d365..6f6d0bdbda58d 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochFileCacheTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochFileCacheTest.scala @@ -65,7 +65,7 @@ class LeaderEpochFileCacheTest { val logEndOffset = 11 //Then - assertEquals(Optional.of(2), cache.latestEpoch) + assertEquals(OptionalInt.of(2), cache.latestEpoch) assertEquals(new EpochEntry(2, 10), cache.epochEntries().get(0)) assertEquals((2, logEndOffset), toTuple(cache.endOffsetFor(2, logEndOffset))) //should match logEndOffset } @@ -271,7 +271,7 @@ class LeaderEpochFileCacheTest { logEndOffset = 8 //Then later epochs will be removed - assertEquals(Optional.of(1), cache.latestEpoch) + assertEquals(OptionalInt.of(1), cache.latestEpoch) //Then end offset for epoch 1 will have changed assertEquals((1, 8), toTuple(cache.endOffsetFor(1, logEndOffset))) @@ -306,7 +306,7 @@ class LeaderEpochFileCacheTest { cache.assign(1, 0) //logEndOffset=0 //Then epoch should go up - assertEquals(Optional.of(1), cache.latestEpoch) + assertEquals(OptionalInt.of(1), cache.latestEpoch) //offset for 1 should still be 0 assertEquals((1, 0), toTuple(cache.endOffsetFor(1, logEndOffset))) //offset for epoch 0 should still be 0 @@ -346,7 +346,7 @@ class LeaderEpochFileCacheTest { logEndOffset = 3 //Then epoch should stay, offsets should grow - assertEquals(Optional.of(0), cache.latestEpoch) + assertEquals(OptionalInt.of(0), cache.latestEpoch) assertEquals((0, logEndOffset), toTuple(cache.endOffsetFor(0, logEndOffset))) //When messages arrive with greater epoch @@ -357,7 +357,7 @@ class LeaderEpochFileCacheTest { cache.assign(1, 5) logEndOffset = 6 - assertEquals(Optional.of(1), cache.latestEpoch) + assertEquals(OptionalInt.of(1), cache.latestEpoch) assertEquals((1, logEndOffset), toTuple(cache.endOffsetFor(1, logEndOffset))) //When @@ -368,7 +368,7 @@ class LeaderEpochFileCacheTest { cache.assign(2, 8) logEndOffset = 9 - assertEquals(Optional.of(2), cache.latestEpoch) + assertEquals(OptionalInt.of(2), cache.latestEpoch) assertEquals((2, logEndOffset), toTuple(cache.endOffsetFor(2, logEndOffset))) //Older epochs should return the start offset of the first message in the subsequent epoch. @@ -514,7 +514,7 @@ class LeaderEpochFileCacheTest { cache.truncateFromEndAsyncFlush( 9) //Then should keep the preceding epochs - assertEquals(Optional.of(3), cache.latestEpoch) + assertEquals(OptionalInt.of(3), cache.latestEpoch) assertEquals(java.util.Arrays.asList(new EpochEntry(2, 6), new EpochEntry(3, 8)), cache.epochEntries) } @@ -563,7 +563,7 @@ class LeaderEpochFileCacheTest { @Test def shouldFetchLatestEpochOfEmptyCache(): Unit = { //Then - assertEquals(Optional.empty(), cache.latestEpoch) + assertEquals(OptionalInt.empty(), cache.latestEpoch) } @Test @@ -598,7 +598,7 @@ class LeaderEpochFileCacheTest { assertEquals(OptionalInt.of(4), cache.previousEpoch(10)) cache.truncateFromEndAsyncFlush(18) - assertEquals(OptionalInt.of(2), cache.previousEpoch(cache.latestEpoch.get)) + assertEquals(OptionalInt.of(2), cache.previousEpoch(cache.latestEpoch.getAsInt)) } @Test @@ -615,7 +615,7 @@ class LeaderEpochFileCacheTest { assertEquals(Optional.of(new EpochEntry(4, 15)), cache.previousEntry(10)) cache.truncateFromEndAsyncFlush(18) - assertEquals(Optional.of(new EpochEntry(2, 10)), cache.previousEntry(cache.latestEpoch.get)) + assertEquals(Optional.of(new EpochEntry(2, 10)), cache.previousEntry(cache.latestEpoch.getAsInt)) } @Test diff --git a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala index f1ba2c7ac5ed6..e32f039b9b534 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/LeaderEpochIntegrationTest.scala @@ -34,8 +34,10 @@ import org.apache.kafka.common.serialization.StringSerializer import org.apache.kafka.common.utils.{LogContext, Time} import org.apache.kafka.server.network.BrokerEndPoint import org.apache.kafka.test.{TestUtils => JTestUtils} -import org.junit.jupiter.api.{AfterEach, Test} +import org.junit.jupiter.api.AfterEach import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.params.ParameterizedTest +import org.junit.jupiter.params.provider.ValueSource import scala.collection.mutable.ListBuffer import scala.collection.{Map, Seq} @@ -62,8 +64,9 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { super.tearDown() } - @Test - def shouldAddCurrentLeaderEpochToMessagesAsTheyAreWrittenToLeader(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldAddCurrentLeaderEpochToMessagesAsTheyAreWrittenToLeader(quorum: String): Unit = { brokers ++= (0 to 1).map { id => createBroker(fromProps(createBrokerConfig(id))) } // Given two topics with replication of a single partition @@ -94,8 +97,9 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { waitUntilTrue(() => messagesHaveLeaderEpoch(brokers(0), expectedLeaderEpoch, 4), "Leader epoch should be 1") } - @Test - def shouldSendLeaderEpochRequestAndGetAResponse(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldSendLeaderEpochRequestAndGetAResponse(quorum: String): Unit = { //3 brokers, put partition on 100/101 and then pretend to be 102 brokers ++= (100 to 102).map { id => createBroker(fromProps(createBrokerConfig(id))) } @@ -141,8 +145,9 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { fetcher1.close() } - @Test - def shouldIncreaseLeaderEpochBetweenLeaderRestarts(): Unit = { + @ParameterizedTest + @ValueSource(strings = Array("kraft")) + def shouldIncreaseLeaderEpochBetweenLeaderRestarts(quorum: String): Unit = { //Setup: we are only interested in the single partition on broker 101 brokers += createBroker(fromProps(createBrokerConfig(100))) assertEquals(controllerServer.config.nodeId, waitUntilQuorumLeaderElected(controllerServer)) @@ -240,7 +245,7 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { private def waitForEpochChangeTo(topic: String, partition: Int, epoch: Int): Unit = { TestUtils.waitUntilTrue(() => { - brokers(0).metadataCache.getLeaderAndIsr(topic, partition).filter(_.leaderEpoch == epoch).isPresent() + brokers(0).metadataCache.getLeaderAndIsr(topic, partition).exists(_.leaderEpoch == epoch) }, "Epoch didn't change") } @@ -293,7 +298,7 @@ class LeaderEpochIntegrationTest extends QuorumTestHarness with Logging { } private def waitUntilQuorumLeaderElected(controllerServer: ControllerServer, timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Int = { - val (leaderAndEpoch, _) = computeUntilTrue(controllerServer.raftManager.client.leaderAndEpoch, waitTime = timeout)(_.leaderId().isPresent) + val (leaderAndEpoch, _) = computeUntilTrue(controllerServer.raftManager.leaderAndEpoch, waitTime = timeout)(_.leaderId().isPresent) leaderAndEpoch.leaderId().orElseThrow(() => new AssertionError(s"Quorum Controller leader not elected after $timeout ms")) } diff --git a/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala b/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala index 3abea688468b1..79f4be41b8f62 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/OffsetsForLeaderEpochTest.scala @@ -17,10 +17,9 @@ package kafka.server.epoch import java.io.File -import kafka.log.LogManager +import kafka.log.{LogManager, UnifiedLog} import kafka.server.QuotaFactory.QuotaManagers import kafka.server._ -import kafka.server.metadata.KRaftMetadataCache import kafka.utils.TestUtils import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.message.OffsetForLeaderEpochRequestData.{OffsetForLeaderPartition, OffsetForLeaderTopic} @@ -31,12 +30,11 @@ import org.apache.kafka.common.record.RecordBatch import org.apache.kafka.common.requests.OffsetsForLeaderEpochResponse.{UNDEFINED_EPOCH, UNDEFINED_EPOCH_OFFSET} import org.apache.kafka.server.common.{KRaftVersion, OffsetAndEpoch} import org.apache.kafka.server.util.MockTime -import org.apache.kafka.storage.internals.log.{LogDirFailureChannel, UnifiedLog} +import org.apache.kafka.storage.internals.log.LogDirFailureChannel import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} import org.mockito.Mockito.{mock, when} -import java.util.Optional import scala.jdk.CollectionConverters._ class OffsetsForLeaderEpochTest { @@ -50,7 +48,7 @@ class OffsetsForLeaderEpochTest { @BeforeEach def setUp(): Unit = { - quotaManager = QuotaFactory.instantiate(config, metrics, time, "", "") + quotaManager = QuotaFactory.instantiate(config, metrics, time, "") } @Test @@ -63,7 +61,7 @@ class OffsetsForLeaderEpochTest { //Stubs val mockLog: UnifiedLog = mock(classOf[UnifiedLog]) val logManager: LogManager = mock(classOf[LogManager]) - when(mockLog.endOffsetForEpoch(epochRequested)).thenReturn(Optional.of(offsetAndEpoch)) + when(mockLog.endOffsetForEpoch(epochRequested)).thenReturn(Some(offsetAndEpoch)) when(logManager.liveLogDirs).thenReturn(Array.empty[File]) // create a replica manager with 1 partition that has 1 replica @@ -74,7 +72,7 @@ class OffsetsForLeaderEpochTest { scheduler = null, logManager = logManager, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterIsrManager) val partition = replicaManager.createPartition(tp) @@ -86,7 +84,7 @@ class OffsetsForLeaderEpochTest { //Then assertEquals( - Seq(newOffsetForLeaderTopicResult(tp, Errors.NONE, offsetAndEpoch.epoch(), offsetAndEpoch.offset)), + Seq(newOffsetForLeaderTopicResult(tp, Errors.NONE, offsetAndEpoch.leaderEpoch, offsetAndEpoch.offset)), response) } @@ -103,7 +101,7 @@ class OffsetsForLeaderEpochTest { scheduler = null, logManager = logManager, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterIsrManager) replicaManager.createPartition(tp) @@ -134,7 +132,7 @@ class OffsetsForLeaderEpochTest { scheduler = null, logManager = logManager, quotaManagers = quotaManager, - metadataCache = new KRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), + metadataCache = MetadataCache.kRaftMetadataCache(config.brokerId, () => KRaftVersion.KRAFT_VERSION_0), logDirFailureChannel = new LogDirFailureChannel(config.logDirs.size), alterPartitionManager = alterIsrManager) diff --git a/core/src/test/scala/unit/kafka/server/epoch/util/MockBlockingSender.scala b/core/src/test/scala/unit/kafka/server/epoch/util/MockBlockingSender.scala index 297348ed790b5..47b33e0dd7b82 100644 --- a/core/src/test/scala/unit/kafka/server/epoch/util/MockBlockingSender.scala +++ b/core/src/test/scala/unit/kafka/server/epoch/util/MockBlockingSender.scala @@ -116,8 +116,7 @@ class MockBlockingSender(offsets: java.util.Map[TopicPartition, EpochEndOffset], topicIds = Map.empty FetchResponse.of(Errors.NONE, 0, if (partitionData.isEmpty) JFetchMetadata.INVALID_SESSION_ID else 1, - partitionData, List.empty.asJava - ) + partitionData) case ApiKeys.LIST_OFFSETS => listOffsetsCount += 1 diff --git a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala index 32727a4c3cc7c..a166368a5aa28 100644 --- a/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala +++ b/core/src/test/scala/unit/kafka/server/metadata/BrokerMetadataPublisherTest.scala @@ -20,30 +20,24 @@ package kafka.server.metadata import kafka.coordinator.transaction.TransactionCoordinator import java.util.Collections.{singleton, singletonList, singletonMap} -import java.util.{OptionalInt, Properties} +import java.util.Properties import java.util.concurrent.atomic.{AtomicInteger, AtomicReference} import kafka.log.LogManager -import kafka.server.share.SharePartitionManager import kafka.server.{BrokerServer, KafkaConfig, ReplicaManager} import kafka.utils.TestUtils import org.apache.kafka.clients.admin.AlterConfigOp.OpType.SET import org.apache.kafka.clients.admin.{Admin, AlterConfigOp, ConfigEntry, NewTopic} -import org.apache.kafka.common.Uuid import org.apache.kafka.common.config.ConfigResource import org.apache.kafka.common.config.ConfigResource.Type.BROKER -import org.apache.kafka.common.internals.Topic -import org.apache.kafka.common.metadata.{FeatureLevelRecord, PartitionRecord, RemoveTopicRecord, TopicRecord} import org.apache.kafka.common.test.{KafkaClusterTestKit, TestKitNodes} import org.apache.kafka.common.utils.Exit -import org.apache.kafka.coordinator.common.runtime.{KRaftCoordinatorMetadataDelta, KRaftCoordinatorMetadataImage} import org.apache.kafka.coordinator.group.GroupCoordinator import org.apache.kafka.coordinator.share.ShareCoordinator -import org.apache.kafka.image.{AclsImage, ClientQuotasImage, ClusterImageTest, ConfigurationsImage, DelegationTokenImage, FeaturesImage, MetadataDelta, MetadataImage, MetadataImageTest, MetadataProvenance, ProducerIdsImage, ScramImage, TopicsImage} +import org.apache.kafka.image.{MetadataDelta, MetadataImage, MetadataImageTest, MetadataProvenance} import org.apache.kafka.image.loader.LogDeltaManifest -import org.apache.kafka.metadata.publisher.{AclPublisher, DelegationTokenPublisher, ScramPublisher} import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.raft.LeaderAndEpoch -import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion, ShareVersion} +import org.apache.kafka.server.common.{KRaftVersion, MetadataVersion} import org.apache.kafka.server.fault.FaultHandler import org.junit.jupiter.api.Assertions.{assertEquals, assertNotNull, assertTrue} import org.junit.jupiter.api.{AfterEach, BeforeEach, Test} @@ -53,7 +47,6 @@ import org.mockito.Mockito.{doThrow, mock, verify} import org.mockito.invocation.InvocationOnMock import org.mockito.stubbing.Answer -import java.util import java.util.concurrent.TimeUnit import scala.jdk.CollectionConverters._ @@ -186,70 +179,6 @@ class BrokerMetadataPublisherTest { } } - @Test - def testGroupCoordinatorTopicDeletion(): Unit = { - val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)) - val metadataCache = new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_1) - val logManager = mock(classOf[LogManager]) - val replicaManager = mock(classOf[ReplicaManager]) - val groupCoordinator = mock(classOf[GroupCoordinator]) - val faultHandler = mock(classOf[FaultHandler]) - - val metadataPublisher = new BrokerMetadataPublisher( - config, - metadataCache, - logManager, - replicaManager, - groupCoordinator, - mock(classOf[TransactionCoordinator]), - mock(classOf[ShareCoordinator]), - mock(classOf[SharePartitionManager]), - mock(classOf[DynamicConfigPublisher]), - mock(classOf[DynamicClientQuotaPublisher]), - mock(classOf[DynamicTopicClusterQuotaPublisher]), - mock(classOf[ScramPublisher]), - mock(classOf[DelegationTokenPublisher]), - mock(classOf[AclPublisher]), - faultHandler, - faultHandler - ) - - val topicId = Uuid.randomUuid() - var delta = new MetadataDelta(MetadataImage.EMPTY) - delta.replay(new TopicRecord() - .setName(Topic.GROUP_METADATA_TOPIC_NAME) - .setTopicId(topicId) - ) - delta.replay(new PartitionRecord() - .setTopicId(topicId) - .setPartitionId(0) - .setLeader(config.brokerId) - ) - delta.replay(new PartitionRecord() - .setTopicId(topicId) - .setPartitionId(1) - .setLeader(config.brokerId) - ) - val image = delta.apply(MetadataProvenance.EMPTY) - - delta = new MetadataDelta(image) - delta.replay(new RemoveTopicRecord() - .setTopicId(topicId) - ) - - metadataPublisher.onMetadataUpdate(delta, delta.apply(MetadataProvenance.EMPTY), - LogDeltaManifest.newBuilder() - .provenance(MetadataProvenance.EMPTY) - .leaderAndEpoch(LeaderAndEpoch.UNKNOWN) - .numBatches(1) - .elapsedNs(100) - .numBytes(42) - .build()) - - verify(groupCoordinator).onResignation(0, OptionalInt.empty()) - verify(groupCoordinator).onResignation(1, OptionalInt.empty()) - } - @Test def testNewImagePushedToGroupCoordinator(): Unit = { val config = KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)) @@ -266,8 +195,7 @@ class BrokerMetadataPublisherTest { replicaManager, groupCoordinator, mock(classOf[TransactionCoordinator]), - mock(classOf[ShareCoordinator]), - mock(classOf[SharePartitionManager]), + Some(mock(classOf[ShareCoordinator])), mock(classOf[DynamicConfigPublisher]), mock(classOf[DynamicClientQuotaPublisher]), mock(classOf[DynamicTopicClusterQuotaPublisher]), @@ -275,7 +203,7 @@ class BrokerMetadataPublisherTest { mock(classOf[DelegationTokenPublisher]), mock(classOf[AclPublisher]), faultHandler, - faultHandler, + faultHandler ) val image = MetadataImage.EMPTY @@ -292,70 +220,6 @@ class BrokerMetadataPublisherTest { .numBytes(42) .build()) - verify(groupCoordinator).onNewMetadataImage(new KRaftCoordinatorMetadataImage(image), new KRaftCoordinatorMetadataDelta(delta)) - } - - @Test - def testNewShareVersionPushedToSharePartitionManager(): Unit = { - val sharePartitionManager = mock(classOf[SharePartitionManager]) - val faultHandler = mock(classOf[FaultHandler]) - - val metadataPublisher = new BrokerMetadataPublisher( - KafkaConfig.fromProps(TestUtils.createBrokerConfig(0)), - new KRaftMetadataCache(0, () => KRaftVersion.KRAFT_VERSION_1), - mock(classOf[LogManager]), - mock(classOf[ReplicaManager]), - mock(classOf[GroupCoordinator]), - mock(classOf[TransactionCoordinator]), - mock(classOf[ShareCoordinator]), - sharePartitionManager, - mock(classOf[DynamicConfigPublisher]), - mock(classOf[DynamicClientQuotaPublisher]), - mock(classOf[DynamicTopicClusterQuotaPublisher]), - mock(classOf[ScramPublisher]), - mock(classOf[DelegationTokenPublisher]), - mock(classOf[AclPublisher]), - faultHandler, - faultHandler - ) - - val featuresImage = new FeaturesImage( - util.Map.of( - MetadataVersion.FEATURE_NAME, MetadataVersion.latestTesting().featureLevel(), - ShareVersion.FEATURE_NAME, ShareVersion.SV_1.featureLevel() - ), - MetadataVersion.latestTesting()) - - val image = new MetadataImage( - MetadataProvenance.EMPTY, - featuresImage, - ClusterImageTest.IMAGE1, - TopicsImage.EMPTY, - ConfigurationsImage.EMPTY, - ClientQuotasImage.EMPTY, - ProducerIdsImage.EMPTY, - AclsImage.EMPTY, - ScramImage.EMPTY, - DelegationTokenImage.EMPTY - ) - - // Share version 1 is getting passed to features delta. - val delta = new MetadataDelta(image) - delta.replay(new FeatureLevelRecord().setName(ShareVersion.FEATURE_NAME).setFeatureLevel(1)) - - metadataPublisher.onMetadataUpdate( - delta, - image, - LogDeltaManifest.newBuilder(). - provenance(MetadataProvenance.EMPTY). - leaderAndEpoch(new LeaderAndEpoch(OptionalInt.of(1), 1)). - numBatches(1). - elapsedNs(1L). - numBytes(1). - build() - ) - - // SharePartitionManager is receiving the latest changes. - verify(sharePartitionManager).onShareVersionToggle(any(), any()) + verify(groupCoordinator).onNewMetadataImage(image, delta) } } diff --git a/core/src/test/scala/unit/kafka/server/metadata/MockConfigRepositoryTest.scala b/core/src/test/scala/unit/kafka/server/metadata/MockConfigRepositoryTest.scala new file mode 100644 index 0000000000000..fbeccc1e646f5 --- /dev/null +++ b/core/src/test/scala/unit/kafka/server/metadata/MockConfigRepositoryTest.scala @@ -0,0 +1,55 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.server.metadata + +import java.util.Properties + +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Test + +class MockConfigRepositoryTest { + @Test + def testEmptyRepository(): Unit = { + val repository = new MockConfigRepository() + assertEquals(new Properties(), repository.brokerConfig(0)) + assertEquals(new Properties(), repository.topicConfig("foo")) + assertEquals(new Properties(), repository.groupConfig("group")) + } + + @Test + def testSetTopicConfig(): Unit = { + val repository = new MockConfigRepository() + val topic0 = "topic0" + repository.setTopicConfig(topic0, "foo", null) + + val topic1 = "topic1" + repository.setTopicConfig(topic1, "foo", "bar") + val topicProperties = new Properties() + topicProperties.put("foo", "bar") + assertEquals(topicProperties, repository.topicConfig(topic1)) + + val topicProperties2 = new Properties() + topicProperties2.put("foo", "bar") + topicProperties2.put("foo2", "baz") + repository.setTopicConfig(topic1, "foo2", "baz") // add another prop + assertEquals(topicProperties2, repository.topicConfig(topic1)) // should get both props + + repository.setTopicConfig(topic1, "foo2", null) + assertEquals(topicProperties, repository.topicConfig(topic1)) + } +} diff --git a/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala b/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala index 336a8dd55c3bc..b86a5608c3db6 100644 --- a/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala +++ b/core/src/test/scala/unit/kafka/tools/DumpLogSegmentsTest.scala @@ -24,34 +24,36 @@ import java.util.Collections import java.util.Optional import java.util.Properties import java.util.stream.IntStream -import kafka.log.LogTestUtils -import kafka.raft.KafkaMetadataLog +import kafka.log.{LogTestUtils, UnifiedLog} +import kafka.raft.{KafkaMetadataLog, MetadataLogConfig} import kafka.server.KafkaRaftServer -import kafka.tools.DumpLogSegments.{OffsetsMessageParser, ShareGroupStateMessageParser, TimeIndexDumpErrors, TransactionLogMessageParser} +import kafka.tools.DumpLogSegments.{OffsetsMessageParser, ShareGroupStateMessageParser, TimeIndexDumpErrors} import kafka.utils.TestUtils import org.apache.kafka.clients.consumer.ConsumerPartitionAssignor.{Assignment, Subscription} import org.apache.kafka.clients.consumer.internals.ConsumerProtocol import org.apache.kafka.common.{TopicIdPartition, TopicPartition, Uuid} import org.apache.kafka.common.compress.Compression -import org.apache.kafka.common.config.{AbstractConfig, TopicConfig} -import org.apache.kafka.common.message.{KRaftVersionRecord, LeaderChangeMessage, SnapshotFooterRecord, SnapshotHeaderRecord, VotersRecord} +import org.apache.kafka.common.config.TopicConfig import org.apache.kafka.common.metadata.{PartitionChangeRecord, RegisterBrokerRecord, TopicRecord} -import org.apache.kafka.common.protocol.{ApiMessage, ByteBufferAccessor, MessageUtil, ObjectSerializationCache} -import org.apache.kafka.common.record.{ControlRecordType, ControlRecordUtils, EndTransactionMarker, MemoryRecords, Record, RecordVersion, SimpleRecord} +import org.apache.kafka.common.protocol.{ByteBufferAccessor, ObjectSerializationCache} +import org.apache.kafka.common.record.{ControlRecordType, EndTransactionMarker, MemoryRecords, Record, RecordBatch, RecordVersion, SimpleRecord} import org.apache.kafka.common.utils.{Exit, Utils} +import org.apache.kafka.coordinator.common.runtime.CoordinatorRecord +import org.apache.kafka.coordinator.group.GroupCoordinatorRecordSerde import org.apache.kafka.coordinator.group.generated.{ConsumerGroupMemberMetadataValue, ConsumerGroupMetadataKey, ConsumerGroupMetadataValue, GroupMetadataKey, GroupMetadataValue} import org.apache.kafka.coordinator.share.generated.{ShareSnapshotKey, ShareSnapshotValue, ShareUpdateKey, ShareUpdateValue} -import org.apache.kafka.coordinator.transaction.generated.{TransactionLogKey, TransactionLogValue} +import org.apache.kafka.coordinator.share.{ShareCoordinator, ShareCoordinatorRecordSerde} import org.apache.kafka.coordinator.transaction.TransactionLogConfig import org.apache.kafka.metadata.MetadataRecordSerde -import org.apache.kafka.raft.{MetadataLogConfig, VoterSetTest} -import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion, OffsetAndEpoch} +import org.apache.kafka.raft.{KafkaRaftClient, OffsetAndEpoch, VoterSetTest} +import org.apache.kafka.server.common.{ApiMessageAndVersion, KRaftVersion} +import org.apache.kafka.server.config.ServerLogConfigs import org.apache.kafka.server.log.remote.metadata.storage.serialization.RemoteLogMetadataSerde import org.apache.kafka.server.log.remote.storage.{RemoteLogSegmentId, RemoteLogSegmentMetadata, RemoteLogSegmentMetadataUpdate, RemoteLogSegmentState, RemotePartitionDeleteMetadata, RemotePartitionDeleteState} import org.apache.kafka.server.storage.log.FetchIsolation import org.apache.kafka.server.util.MockTime import org.apache.kafka.snapshot.RecordsSnapshotWriter -import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{AppendOrigin, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.{AfterEach, Test} @@ -87,20 +89,20 @@ class DumpLogSegmentsTest { props.setProperty(TopicConfig.INDEX_INTERVAL_BYTES_CONFIG, "128") // This test uses future timestamps beyond the default of 1 hour. props.setProperty(TopicConfig.MESSAGE_TIMESTAMP_AFTER_MAX_MS_CONFIG, Long.MaxValue.toString) - log = UnifiedLog.create( - logDir, - new LogConfig(props), - 0L, - 0L, - time.scheduler, - new BrokerTopicStats, - time, - 5 * 60 * 1000, - new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), - TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, - new LogDirFailureChannel(10), - true, - Optional.empty + log = UnifiedLog( + dir = logDir, + config = new LogConfig(props), + logStartOffset = 0L, + recoveryPoint = 0L, + scheduler = time.scheduler, + time = time, + brokerTopicStats = new BrokerTopicStats, + maxTransactionTimeoutMs = 5 * 60 * 1000, + producerStateManagerConfig = new ProducerStateManagerConfig(TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT, false), + producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT, + logDirFailureChannel = new LogDirFailureChannel(10), + topicId = None, + keepPartitionMetadataFile = true ) log } @@ -117,7 +119,8 @@ class DumpLogSegmentsTest { batches += BatchInfo(fourthBatchRecords, hasKeys = false, hasValues = false) batches.foreach { batchInfo => - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, 0, batchInfo.records: _*), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, 0, batchInfo.records: _*), + leaderEpoch = 0) } // Flush, but don't close so that the indexes are not trimmed and contain some zero entries log.flush(false) @@ -130,31 +133,31 @@ class DumpLogSegmentsTest { log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, 0, new SimpleRecord("a".getBytes), new SimpleRecord("b".getBytes) - ), 0) + ), leaderEpoch = 0) log.appendAsLeader(MemoryRecords.withRecords(Compression.gzip().build(), 0, new SimpleRecord(time.milliseconds(), "c".getBytes, "1".getBytes), new SimpleRecord("d".getBytes) - ), 3) + ), leaderEpoch = 3) log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, 0, new SimpleRecord("e".getBytes, null), new SimpleRecord(null, "f".getBytes), new SimpleRecord("g".getBytes) - ), 3) + ), leaderEpoch = 3) log.appendAsLeader(MemoryRecords.withIdempotentRecords(Compression.NONE, 29342342L, 15.toShort, 234123, new SimpleRecord("h".getBytes) - ), 3) + ), leaderEpoch = 3) log.appendAsLeader(MemoryRecords.withTransactionalRecords(Compression.gzip().build(), 98323L, 99.toShort, 266, new SimpleRecord("i".getBytes), new SimpleRecord("j".getBytes) - ), 5) + ), leaderEpoch = 5) log.appendAsLeader(MemoryRecords.withEndTransactionMarker(98323L, 99.toShort, new EndTransactionMarker(ControlRecordType.COMMIT, 100) - ), 7, AppendOrigin.COORDINATOR) + ), origin = AppendOrigin.COORDINATOR, leaderEpoch = 7) assertDumpLogRecordMetadata(log) } @@ -295,7 +298,7 @@ class DumpLogSegmentsTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024) log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats, time.scheduler, time) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), leaderEpoch = 0) log.flush(false) val expectedDeletePayload = String.format("RemotePartitionDeleteMetadata{topicPartition=%s:%s-0, " + @@ -327,7 +330,7 @@ class DumpLogSegmentsTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024) log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats, time.scheduler, time) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), leaderEpoch = 0) log.flush(false) val expectedUpdatePayload = String.format("RemoteLogSegmentMetadataUpdate{remoteLogSegmentId=" + @@ -365,8 +368,8 @@ class DumpLogSegmentsTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024) log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats, time.scheduler, time) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), 0) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), leaderEpoch = 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), leaderEpoch = 0) log.flush(false) val expectedUpdatePayload = String.format("RemoteLogSegmentMetadataUpdate{remoteLogSegmentId=" + @@ -397,9 +400,9 @@ class DumpLogSegmentsTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024) log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats, time.scheduler, time) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), leaderEpoch = 0) val secondSegment = log.roll() - secondSegment.append(1L, MemoryRecords.withRecords(Compression.NONE, metadataRecords: _*)) + secondSegment.append(1L, RecordBatch.NO_TIMESTAMP, 1L, MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*)) secondSegment.flush() log.flush(true) @@ -419,7 +422,7 @@ class DumpLogSegmentsTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024) log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats, time.scheduler, time) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), leaderEpoch = 0) log.flush(false) val output = runDumpLogSegments(Array("--remote-log-metadata-decoder", "--files", logFilePath)) @@ -443,7 +446,7 @@ class DumpLogSegmentsTest { val logConfig = LogTestUtils.createLogConfig(segmentBytes = 1024 * 1024) log = LogTestUtils.createLog(logDir, logConfig, new BrokerTopicStats, time.scheduler, time) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), 0) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, metadataRecords:_*), leaderEpoch = 0) log.flush(false) Files.setPosixFilePermissions(Paths.get(logFilePath), PosixFilePermissions.fromString("-w-------")) @@ -485,7 +488,7 @@ class DumpLogSegmentsTest { new TopicRecord().setName("test-topic").setTopicId(Uuid.randomUuid()), 0.toShort), new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(Uuid.randomUuid()).setLeader(1). - setPartitionId(0).setIsr(util.List.of(0, 1, 2)), 0.toShort) + setPartitionId(0).setIsr(util.Arrays.asList(0, 1, 2)), 0.toShort) ) val records: Array[SimpleRecord] = metadataRecords.map(message => { @@ -498,7 +501,7 @@ class DumpLogSegmentsTest { buf.flip() new SimpleRecord(null, buf.array) }).toArray - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), 1) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), leaderEpoch = 1) log.flush(false) var output = runDumpLogSegments(Array("--cluster-metadata-decoder", "--files", logFilePath)) @@ -515,8 +518,8 @@ class DumpLogSegmentsTest { val writer = new ByteBufferAccessor(buf) writer.writeUnsignedVarint(10000) writer.writeUnsignedVarint(10000) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord(null, buf.array)), 2) - log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), 2) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, new SimpleRecord(null, buf.array)), leaderEpoch = 2) + log.appendAsLeader(MemoryRecords.withRecords(Compression.NONE, records:_*), leaderEpoch = 2) output = runDumpLogSegments(Array("--cluster-metadata-decoder", "--skip-record-metadata", "--files", logFilePath)) assertTrue(output.contains("TOPIC_RECORD")) @@ -524,48 +527,9 @@ class DumpLogSegmentsTest { assertTrue(output.contains("skipping")) } - @Test - def testDumpControlRecord(): Unit = { - log = createTestLog - - log.appendAsLeader(MemoryRecords.withEndTransactionMarker(0L, 0.toShort, - new EndTransactionMarker(ControlRecordType.COMMIT, 100) - ), 0, AppendOrigin.COORDINATOR) - - log.appendAsLeader(MemoryRecords.withLeaderChangeMessage(0L, 0L, 0, ByteBuffer.allocate(4), - new LeaderChangeMessage() - ), 0, AppendOrigin.COORDINATOR) - - log.appendAsLeader(MemoryRecords.withSnapshotHeaderRecord(0L, 0L, 0, ByteBuffer.allocate(4), - new SnapshotHeaderRecord() - ), 0, AppendOrigin.COORDINATOR) - - log.appendAsLeader(MemoryRecords.withSnapshotFooterRecord(0L, 0L, 0, ByteBuffer.allocate(4), - new SnapshotFooterRecord() - .setVersion(ControlRecordUtils.SNAPSHOT_FOOTER_CURRENT_VERSION) - ), 0, AppendOrigin.COORDINATOR) - - log.appendAsLeader(MemoryRecords.withKRaftVersionRecord(0L, 0L, 0, ByteBuffer.allocate(4), - new KRaftVersionRecord() - ), 0, AppendOrigin.COORDINATOR) - - log.appendAsLeader(MemoryRecords.withVotersRecord(0L, 0L, 0, ByteBuffer.allocate(4), - new VotersRecord() - ), 0, AppendOrigin.COORDINATOR) - log.flush(false) - - val output = runDumpLogSegments(Array("--cluster-metadata-decoder", "--files", logFilePath)) - assertTrue(output.contains("endTxnMarker"), output) - assertTrue(output.contains("LeaderChange"), output) - assertTrue(output.contains("SnapshotHeader"), output) - assertTrue(output.contains("SnapshotFooter"), output) - assertTrue(output.contains("KRaftVersion"), output) - assertTrue(output.contains("KRaftVoters"), output) - } - @Test def testDumpMetadataSnapshot(): Unit = { - val metadataRecords = util.List.of( + val metadataRecords = Seq( new ApiMessageAndVersion( new RegisterBrokerRecord().setBrokerId(0).setBrokerEpoch(10), 0.toShort), new ApiMessageAndVersion( @@ -574,7 +538,7 @@ class DumpLogSegmentsTest { new TopicRecord().setName("test-topic").setTopicId(Uuid.randomUuid()), 0.toShort), new ApiMessageAndVersion( new PartitionChangeRecord().setTopicId(Uuid.randomUuid()).setLeader(1). - setPartitionId(0).setIsr(util.List.of(0, 1, 2)), 0.toShort) + setPartitionId(0).setIsr(util.Arrays.asList(0, 1, 2)), 0.toShort) ) val metadataLog = KafkaMetadataLog( @@ -583,13 +547,17 @@ class DumpLogSegmentsTest { logDir, time, time.scheduler, - createMetadataLogConfig( - 100 * 1024, - 10 * 1000, - 100 * 1024, - 60 * 1000 - ), - 1 + MetadataLogConfig( + logSegmentBytes = 100 * 1024, + logSegmentMinBytes = 100 * 1024, + logSegmentMillis = 10 * 1000, + retentionMaxBytes = 100 * 1024, + retentionMillis = 60 * 1000, + maxBatchSizeInBytes = KafkaRaftClient.MAX_BATCH_SIZE_BYTES, + maxFetchSizeInBytes = KafkaRaftClient.MAX_FETCH_SIZE_BYTES, + fileDeleteDelayMs = ServerLogConfigs.LOG_DELETE_DELAY_MS_DEFAULT, + nodeId = 1 + ) ) val lastContainedLogTimestamp = 10000 @@ -603,7 +571,7 @@ class DumpLogSegmentsTest { .setVoterSet(Optional.of(VoterSetTest.voterSet(VoterSetTest.voterMap(IntStream.of(1, 2, 3), true)))) .build(MetadataRecordSerde.INSTANCE) ) { snapshotWriter => - snapshotWriter.append(metadataRecords) + snapshotWriter.append(metadataRecords.asJava) snapshotWriter.freeze() } @@ -662,14 +630,14 @@ class DumpLogSegmentsTest { // Get all the batches val output = runDumpLogSegments(Array("--files", logFilePath)) - val lines = util.List.of(output.split("\n"): _*).listIterator() + val lines = util.Arrays.asList(output.split("\n"): _*).listIterator() // Get total bytes of the partial batches val partialBatchesBytes = readPartialBatchesBytes(lines, partialBatches) // Request only the partial batches by bytes val partialOutput = runDumpLogSegments(Array("--max-bytes", partialBatchesBytes.toString, "--files", logFilePath)) - val partialLines = util.List.of(partialOutput.split("\n"): _*).listIterator() + val partialLines = util.Arrays.asList(partialOutput.split("\n"): _*).listIterator() // Count the total of partial batches limited by bytes val partialBatchesCount = countBatches(partialLines) @@ -677,20 +645,22 @@ class DumpLogSegmentsTest { assertEquals(partialBatches, partialBatchesCount) } - private def serializedRecord(key: ApiMessage, value: ApiMessageAndVersion): Record = { - TestUtils.singletonRecords( - key = MessageUtil.toCoordinatorTypePrefixedBytes(key), - value = if (value == null) null else MessageUtil.toVersionPrefixedBytes(value.version, value.message) - ).records.iterator.next - } - @Test def testOffsetsMessageParser(): Unit = { + val serde = new GroupCoordinatorRecordSerde() val parser = new OffsetsMessageParser() + def serializedRecord(key: ApiMessageAndVersion, value: ApiMessageAndVersion): Record = { + val record = new CoordinatorRecord(key, value) + TestUtils.singletonRecords( + key = serde.serializeKey(record), + value = serde.serializeValue(record) + ).records.iterator.next + } + // The key is mandatory. assertEquals( - "Failed to decode message at offset 0 using the specified decoder (message had a missing key)", + "Failed to decode message at offset 0 using offset topic decoder (message had a missing key)", assertThrows( classOf[RuntimeException], () => parser.parse(TestUtils.singletonRecords(key = null, value = null).records.iterator.next) @@ -704,8 +674,11 @@ class DumpLogSegmentsTest { Some("{\"version\":\"0\",\"data\":{\"epoch\":10}}") ), parser.parse(serializedRecord( - new ConsumerGroupMetadataKey() - .setGroupId("group"), + new ApiMessageAndVersion( + new ConsumerGroupMetadataKey() + .setGroupId("group"), + 3.toShort + ), new ApiMessageAndVersion( new ConsumerGroupMetadataValue() .setEpoch(10), @@ -727,8 +700,11 @@ class DumpLogSegmentsTest { "[{\"topic\":\"foo\",\"partitions\":[0]}],\"userData\":null}}]}}") ), parser.parse(serializedRecord( - new GroupMetadataKey() - .setGroup("group"), + new ApiMessageAndVersion( + new GroupMetadataKey() + .setGroup("group"), + 2.toShort + ), new ApiMessageAndVersion( new GroupMetadataValue() .setProtocolType("consumer") @@ -769,8 +745,11 @@ class DumpLogSegmentsTest { "\"assignment\":\"QXNzaWdubWVudA==\"}]}}") ), parser.parse(serializedRecord( - new GroupMetadataKey() - .setGroup("group"), + new ApiMessageAndVersion( + new GroupMetadataKey() + .setGroup("group"), + 2.toShort + ), new ApiMessageAndVersion( new GroupMetadataValue() .setProtocolType("consumer") @@ -800,8 +779,11 @@ class DumpLogSegmentsTest { Some("") ), parser.parse(serializedRecord( - new ConsumerGroupMetadataKey() - .setGroupId("group"), + new ApiMessageAndVersion( + new ConsumerGroupMetadataKey() + .setGroupId("group"), + 3.toShort + ), null )) ) @@ -814,12 +796,18 @@ class DumpLogSegmentsTest { ), None ), - parser.parse( - TestUtils.singletonRecords( - key = ByteBuffer.allocate(2).putShort(Short.MaxValue).array(), - value = Array.empty - ).records.iterator.next - ) + parser.parse(serializedRecord( + new ApiMessageAndVersion( + new ConsumerGroupMetadataKey() + .setGroupId("group"), + Short.MaxValue // Invalid record id. + ), + new ApiMessageAndVersion( + new ConsumerGroupMetadataValue() + .setEpoch(10), + 0.toShort + ) + )) ) // Any parsing error is swallowed and reported. @@ -832,111 +820,17 @@ class DumpLogSegmentsTest { None ), parser.parse(serializedRecord( - new ConsumerGroupMetadataKey() - .setGroupId("group"), new ApiMessageAndVersion( - new ConsumerGroupMemberMetadataValue(), // The value does correspond to the record id. - 0.toShort - ) - )) - ) - } - - @Test - def testTransactionLogMessageParser(): Unit = { - val parser = new TransactionLogMessageParser() - - // The key is mandatory. - assertEquals( - "Failed to decode message at offset 0 using the specified decoder (message had a missing key)", - assertThrows( - classOf[RuntimeException], - () => parser.parse(TestUtils.singletonRecords(key = null, value = null).records.iterator.next) - ).getMessage - ) - - // A valid key and value should work. - assertEquals( - ( - Some("{\"type\":\"0\",\"data\":{\"transactionalId\":\"txnId\"}}"), - Some("{\"version\":\"0\",\"data\":{\"producerId\":123,\"producerEpoch\":0,\"transactionTimeoutMs\":0," + - "\"transactionStatus\":0,\"transactionPartitions\":[],\"transactionLastUpdateTimestampMs\":0," + - "\"transactionStartTimestampMs\":0}}") - ), - parser.parse(serializedRecord( - new TransactionLogKey() - .setTransactionalId("txnId"), + new ConsumerGroupMetadataKey() + .setGroupId("group"), + 3.toShort + ), new ApiMessageAndVersion( - new TransactionLogValue() - .setProducerId(123L), + new ConsumerGroupMemberMetadataValue(), // The value does correspond to the record id. 0.toShort ) )) ) - - // A valid key with a tombstone should work. - assertEquals( - ( - Some("{\"type\":\"0\",\"data\":{\"transactionalId\":\"txnId\"}}"), - Some("") - ), - parser.parse(serializedRecord( - new TransactionLogKey() - .setTransactionalId("txnId"), - null - )) - ) - - // An unknown record type should be handled and reported as such. - assertEquals( - ( - Some("Unknown record type 32767 at offset 0, skipping."), - None - ), - parser.parse( - TestUtils.singletonRecords( - key = ByteBuffer.allocate(2).putShort(Short.MaxValue).array(), - value = Array.empty - ).records.iterator.next - ) - ) - - // A valid key and value with all fields set should work. - assertEquals( - ( - Some("{\"type\":\"0\",\"data\":{\"transactionalId\":\"txnId\"}}"), - Some("{\"version\":\"1\",\"data\":{\"producerId\":12,\"previousProducerId\":11,\"nextProducerId\":10," + - "\"producerEpoch\":2,\"transactionTimeoutMs\":14,\"transactionStatus\":0," + - "\"transactionPartitions\":[{\"topic\":\"topic1\",\"partitionIds\":[0,1,2]}," + - "{\"topic\":\"topic2\",\"partitionIds\":[3,4,5]}],\"transactionLastUpdateTimestampMs\":123," + - "\"transactionStartTimestampMs\":13}}") - ), - parser.parse(serializedRecord( - new TransactionLogKey() - .setTransactionalId("txnId"), - new ApiMessageAndVersion( - new TransactionLogValue() - .setClientTransactionVersion(0.toShort) - .setNextProducerId(10L) - .setPreviousProducerId(11L) - .setProducerEpoch(2.toShort) - .setProducerId(12L) - .setTransactionLastUpdateTimestampMs(123L) - .setTransactionPartitions(util.List.of( - new TransactionLogValue.PartitionsSchema() - .setTopic("topic1") - .setPartitionIds(util.List.of[Integer](0, 1, 2)), - new TransactionLogValue.PartitionsSchema() - .setTopic("topic2") - .setPartitionIds(util.List.of[Integer](3, 4, 5)) - )) - .setTransactionStartTimestampMs(13L) - .setTransactionStatus(0) - .setTransactionTimeoutMs(14), - 1.toShort - ) - )) - ) } private def readBatchMetadata(lines: util.ListIterator[String]): Option[String] = { @@ -1020,11 +914,15 @@ class DumpLogSegmentsTest { } private def assertDumpLogRecordMetadata(log: UnifiedLog): Unit = { - val logReadInfo = log.read(0, Int.MaxValue, FetchIsolation.LOG_END, true + val logReadInfo = log.read( + startOffset = 0, + maxLength = Int.MaxValue, + isolation = FetchIsolation.LOG_END, + minOneMessage = true ) val output = runDumpLogSegments(Array("--deep-iteration", "--files", logFilePath)) - val lines = util.List.of(output.split("\n"): _*).listIterator() + val lines = util.Arrays.asList(output.split("\n"): _*).listIterator() for (batch <- logReadInfo.records.batches.asScala) { val parsedBatchOpt = readBatchMetadata(lines) @@ -1071,12 +969,20 @@ class DumpLogSegmentsTest { @Test def testShareGroupStateMessageParser(): Unit = { + val serde = new ShareCoordinatorRecordSerde() val parser = new ShareGroupStateMessageParser() - val timestamp = System.currentTimeMillis + + def serializedRecord(key: ApiMessageAndVersion, value: ApiMessageAndVersion): Record = { + val record = new CoordinatorRecord(key, value) + TestUtils.singletonRecords( + key = serde.serializeKey(record), + value = serde.serializeValue(record) + ).records.iterator.next + } // The key is mandatory. assertEquals( - "Failed to decode message at offset 0 using the specified decoder (message had a missing key)", + "Failed to decode message at offset 0 using share group state topic decoder (message had a missing key)", assertThrows( classOf[RuntimeException], () => parser.parse(TestUtils.singletonRecords(key = null, value = null).records.iterator.next) @@ -1087,28 +993,27 @@ class DumpLogSegmentsTest { assertEquals( ( Some("{\"type\":\"0\",\"data\":{\"groupId\":\"gs1\",\"topicId\":\"Uj5wn_FqTXirEASvVZRY1w\",\"partition\":0}}"), - Some(s"{\"version\":\"0\",\"data\":{\"snapshotEpoch\":0,\"stateEpoch\":0,\"leaderEpoch\":0,\"startOffset\":0,\"createTimestamp\":$timestamp,\"writeTimestamp\":$timestamp,\"stateBatches\":[{\"firstOffset\":0,\"lastOffset\":4,\"deliveryState\":2,\"deliveryCount\":1}]}}") + Some("{\"type\":\"0\",\"data\":{\"snapshotEpoch\":0,\"stateEpoch\":0,\"leaderEpoch\":0,\"startOffset\":0,\"stateBatches\":[{\"firstOffset\":0,\"lastOffset\":4,\"deliveryState\":2,\"deliveryCount\":1}]}}") ), parser.parse(serializedRecord( - new ShareSnapshotKey() + new ApiMessageAndVersion(new ShareSnapshotKey() .setGroupId("gs1") .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) .setPartition(0), + ShareCoordinator.SHARE_SNAPSHOT_RECORD_KEY_VERSION), new ApiMessageAndVersion(new ShareSnapshotValue() .setSnapshotEpoch(0) .setStateEpoch(0) .setLeaderEpoch(0) .setStartOffset(0) - .setCreateTimestamp(timestamp) - .setWriteTimestamp(timestamp) - .setStateBatches(util.List.of[ShareSnapshotValue.StateBatch]( + .setStateBatches(List[ShareSnapshotValue.StateBatch]( new ShareSnapshotValue.StateBatch() .setFirstOffset(0) .setLastOffset(4) .setDeliveryState(2) .setDeliveryCount(1) - )), - 0.toShort) + ).asJava), + ShareCoordinator.SHARE_SNAPSHOT_RECORD_VALUE_VERSION) )) ) @@ -1116,24 +1021,25 @@ class DumpLogSegmentsTest { assertEquals( ( Some("{\"type\":\"1\",\"data\":{\"groupId\":\"gs1\",\"topicId\":\"Uj5wn_FqTXirEASvVZRY1w\",\"partition\":0}}"), - Some("{\"version\":\"0\",\"data\":{\"snapshotEpoch\":0,\"leaderEpoch\":0,\"startOffset\":0,\"stateBatches\":[{\"firstOffset\":0,\"lastOffset\":4,\"deliveryState\":2,\"deliveryCount\":1}]}}") + Some("{\"type\":\"0\",\"data\":{\"snapshotEpoch\":0,\"leaderEpoch\":0,\"startOffset\":0,\"stateBatches\":[{\"firstOffset\":0,\"lastOffset\":4,\"deliveryState\":2,\"deliveryCount\":1}]}}") ), parser.parse(serializedRecord( - new ShareUpdateKey() + new ApiMessageAndVersion(new ShareUpdateKey() .setGroupId("gs1") .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) .setPartition(0), + ShareCoordinator.SHARE_UPDATE_RECORD_KEY_VERSION), new ApiMessageAndVersion(new ShareUpdateValue() .setSnapshotEpoch(0) .setLeaderEpoch(0) .setStartOffset(0) - .setStateBatches(util.List.of[ShareUpdateValue.StateBatch]( + .setStateBatches(List[ShareUpdateValue.StateBatch]( new ShareUpdateValue.StateBatch() .setFirstOffset(0) .setLastOffset(4) .setDeliveryState(2) .setDeliveryCount(1) - )), + ).asJava), 0.toShort) )) ) @@ -1145,10 +1051,13 @@ class DumpLogSegmentsTest { Some("") ), parser.parse(serializedRecord( - new ShareSnapshotKey() - .setGroupId("gs1") - .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) - .setPartition(0), + new ApiMessageAndVersion( + new ShareSnapshotKey() + .setGroupId("gs1") + .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) + .setPartition(0), + 0.toShort + ), null )) ) @@ -1161,12 +1070,20 @@ class DumpLogSegmentsTest { ), None ), - parser.parse( - TestUtils.singletonRecords( - key = ByteBuffer.allocate(2).putShort(Short.MaxValue).array(), - value = Array.empty - ).records.iterator.next - ) + parser.parse(serializedRecord( + new ApiMessageAndVersion( + new ShareSnapshotKey() + .setGroupId("group") + .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) + .setPartition(0), + Short.MaxValue // Invalid record id. + ), + new ApiMessageAndVersion( + new ShareSnapshotValue() + .setSnapshotEpoch(0), + 0.toShort + ) + )) ) // Any parsing error is swallowed and reported. @@ -1179,10 +1096,13 @@ class DumpLogSegmentsTest { None ), parser.parse(serializedRecord( - new ShareUpdateKey() - .setGroupId("group") - .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) - .setPartition(0), + new ApiMessageAndVersion( + new ShareUpdateKey() + .setGroupId("group") + .setTopicId(Uuid.fromString("Uj5wn_FqTXirEASvVZRY1w")) + .setPartition(0), + 1.toShort + ), new ApiMessageAndVersion( new ShareSnapshotValue(), // incorrect class to deserialize the snapshot update value 0.toShort @@ -1190,19 +1110,4 @@ class DumpLogSegmentsTest { )) ) } - - private def createMetadataLogConfig( - internalLogSegmentBytes: Int, - logSegmentMillis: Long, - retentionMaxBytes: Long, - retentionMillis: Long - ): MetadataLogConfig = { - val config: util.Map[String, Any] = util.Map.of( - MetadataLogConfig.INTERNAL_METADATA_LOG_SEGMENT_BYTES_CONFIG, internalLogSegmentBytes, - MetadataLogConfig.METADATA_LOG_SEGMENT_MILLIS_CONFIG, logSegmentMillis, - MetadataLogConfig.METADATA_MAX_RETENTION_BYTES_CONFIG, retentionMaxBytes, - MetadataLogConfig.METADATA_MAX_RETENTION_MILLIS_CONFIG, retentionMillis, - ) - new MetadataLogConfig(new AbstractConfig(MetadataLogConfig.CONFIG_DEF, config, false)) - } } diff --git a/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala b/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala index a36ad51572a3f..9fde243ec1997 100644 --- a/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala +++ b/core/src/test/scala/unit/kafka/tools/StorageToolTest.scala @@ -32,7 +32,7 @@ import org.apache.kafka.metadata.bootstrap.BootstrapDirectory import org.apache.kafka.metadata.properties.{MetaPropertiesEnsemble, PropertiesUtils} import org.apache.kafka.metadata.storage.FormatterException import org.apache.kafka.network.SocketServerConfigs -import org.apache.kafka.raft.{MetadataLogConfig, QuorumConfig} +import org.apache.kafka.raft.QuorumConfig import org.apache.kafka.server.config.{KRaftConfigs, ServerConfigs, ServerLogConfigs} import org.junit.jupiter.api.Assertions.{assertEquals, assertFalse, assertThrows, assertTrue} import org.junit.jupiter.api.{Test, Timeout} @@ -67,7 +67,7 @@ class StorageToolTest { @Test def testConfigToLogDirectoriesWithMetaLogDir(): Unit = { val properties = newSelfManagedProperties() - properties.setProperty(MetadataLogConfig.METADATA_LOG_DIR_CONFIG, "/tmp/baz") + properties.setProperty(KRaftConfigs.METADATA_LOG_DIR_CONFIG, "/tmp/baz") val config = new KafkaConfig(properties) assertEquals(Seq("/tmp/bar", "/tmp/baz", "/tmp/foo"), StorageTool.configToLogDirectories(config)) @@ -79,7 +79,7 @@ class StorageToolTest { val tempDir = TestUtils.tempDir() try { assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), kraftMode = true, Seq(tempDir.toString))) + infoCommand(new PrintStream(stream), true, Seq(tempDir.toString))) assertEquals(s"""Found log directory: ${tempDir.toString} @@ -97,7 +97,7 @@ Found problem: tempDir.delete() try { assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), kraftMode = true, Seq(tempDir.toString))) + infoCommand(new PrintStream(stream), true, Seq(tempDir.toString))) assertEquals(s"""Found problem: ${tempDir.toString} does not exist @@ -111,7 +111,7 @@ Found problem: val tempFile = TestUtils.tempFile() try { assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), kraftMode = true, Seq(tempFile.toString))) + infoCommand(new PrintStream(stream), true, Seq(tempFile.toString))) assertEquals(s"""Found problem: ${tempFile.toString} is not a directory @@ -125,13 +125,13 @@ Found problem: val tempDir = TestUtils.tempDir() try { Files.write(tempDir.toPath.resolve(MetaPropertiesEnsemble.META_PROPERTIES_NAME), - String.join("\n", util.List.of( + String.join("\n", util.Arrays.asList( "version=1", "node.id=1", "cluster.id=XcZZOzUqS4yHOjhMQB6JLQ")). getBytes(StandardCharsets.UTF_8)) assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), kraftMode = false, Seq(tempDir.toString))) + infoCommand(new PrintStream(stream), false, Seq(tempDir.toString))) assertEquals(s"""Found log directory: ${tempDir.toString} @@ -150,13 +150,13 @@ Found problem: val tempDir = TestUtils.tempDir() try { Files.write(tempDir.toPath.resolve(MetaPropertiesEnsemble.META_PROPERTIES_NAME), - String.join("\n", util.List.of( + String.join("\n", util.Arrays.asList( "version=0", "broker.id=1", "cluster.id=26c36907-4158-4a35-919d-6534229f5241")). getBytes(StandardCharsets.UTF_8)) assertEquals(1, StorageTool. - infoCommand(new PrintStream(stream), kraftMode = true, Seq(tempDir.toString))) + infoCommand(new PrintStream(stream), true, Seq(tempDir.toString))) assertEquals(s"""Found log directory: ${tempDir.toString} @@ -193,7 +193,7 @@ Found problem: ): Int = { val tempDir = TestUtils.tempDir() try { - val configPathString = new File(tempDir.getAbsolutePath, "format.props").toString + val configPathString = new File(tempDir.getAbsolutePath(), "format.props").toString PropertiesUtils.writePropertiesFile(properties, configPathString, true) val arguments = ListBuffer[String]("format", "--cluster-id", "XcZZOzUqS4yHOjhMQB6JLQ") @@ -234,7 +234,7 @@ Found problem: val unavailableDir1 = TestUtils.tempFile() val properties = new Properties() properties.putAll(defaultStaticQuorumProperties) - properties.setProperty("log.dirs", s"$availableDir1,$unavailableDir1") + properties.setProperty("log.dirs", s"${availableDir1},${unavailableDir1}") val stream = new ByteArrayOutputStream() assertEquals(0, runFormatCommand(stream, properties)) @@ -273,7 +273,7 @@ Found problem: assertEquals(0, runFormatCommand(stream, properties)) properties.setProperty("log.dirs", availableDirs.mkString(",")) val stream2 = new ByteArrayOutputStream() - assertEquals(0, runFormatCommand(stream2, properties, Seq(), ignoreFormatted = true)) + assertEquals(0, runFormatCommand(stream2, properties, Seq(), true)) } @Test @@ -282,7 +282,7 @@ Found problem: val unavailableDir2 = TestUtils.tempFile() val properties = new Properties() properties.putAll(defaultStaticQuorumProperties) - properties.setProperty("log.dirs", s"$unavailableDir1,$unavailableDir2") + properties.setProperty("log.dirs", s"${unavailableDir1},${unavailableDir2}") val stream = new ByteArrayOutputStream() assertEquals("No available log directories to format.", assertThrows(classOf[FormatterException], () => runFormatCommand(stream, properties)).getMessage) @@ -306,20 +306,6 @@ Found problem: "Failed to find content in output: " + stream.toString()) } - @Test - def testFormatWithUnsupportedReleaseVersion(): Unit = { - val availableDirs = Seq(TestUtils.tempDir()) - val properties = new Properties() - properties.putAll(defaultStaticQuorumProperties) - properties.setProperty("log.dirs", availableDirs.mkString(",")) - val stream = new ByteArrayOutputStream() - val failure = assertThrows(classOf[TerseFailure], () => - runFormatCommand(stream, properties, Seq("--release-version", "3.3-IV1"))).getMessage - assertTrue(failure.contains("Unknown metadata.version '3.3-IV1'")) - assertTrue(failure.contains(MetadataVersion.MINIMUM_VERSION.version)) - assertTrue(failure.contains(MetadataVersion.latestProduction().version)) - } - @Test def testFormatWithReleaseVersionAsFeature(): Unit = { val availableDirs = Seq(TestUtils.tempDir()) @@ -339,7 +325,7 @@ Found problem: properties.putAll(defaultStaticQuorumProperties) properties.setProperty("log.dirs", availableDirs.mkString(",")) assertEquals("Unsupported feature: non.existent.feature. Supported features are: " + - "eligible.leader.replicas.version, group.version, kraft.version, share.version, streams.version, transaction.version", + "eligible.leader.replicas.version, group.version, kraft.version, transaction.version", assertThrows(classOf[FormatterException], () => runFormatCommand(new ByteArrayOutputStream(), properties, Seq("--feature", "non.existent.feature=20"))).getMessage) @@ -385,92 +371,11 @@ Found problem: "Failed to find content in output: " + stream.toString()) } - @Test - def testFormatWithReleaseVersionAndFeatureOverride(): Unit = { - val availableDirs = Seq(TestUtils.tempDir()) - val properties = new Properties() - properties.putAll(defaultStaticQuorumProperties) - properties.setProperty("log.dirs", availableDirs.mkString(",")) - val stream = new ByteArrayOutputStream() - assertEquals(0, runFormatCommand(stream, properties, Seq( - "--release-version", "3.7-IV0", - "--feature", "share.version=1"))) - - // Verify that the feature override is applied by checking the bootstrap metadata - val bootstrapMetadata = new BootstrapDirectory(availableDirs.head.toString).read - - // Verify that the share.version feature is set to 1 as specified - assertEquals(1.toShort, bootstrapMetadata.featureLevel("share.version"), - "share.version should be set to 1") - - // Verify the command output contains the expected release version - assertTrue(stream.toString().contains("3.7-IV0"), - "Failed to find release version in output: " + stream.toString()) - - // Verify that the format command completed successfully with features - assertTrue(stream.toString().contains("Formatting metadata directory"), - "Failed to find formatting message in output: " + stream.toString()) - } - - @Test - def testFormatWithMultipleFeatures(): Unit = { - val availableDirs = Seq(TestUtils.tempDir()) - val properties = new Properties() - properties.putAll(defaultStaticQuorumProperties) - properties.setProperty("log.dirs", availableDirs.mkString(",")) - val stream = new ByteArrayOutputStream() - assertEquals(0, runFormatCommand(stream, properties, Seq( - "--release-version", "3.8-IV0", - "--feature", "share.version=1", - "--feature", "transaction.version=2", - "--feature", "group.version=1"))) - - // Verify that all features are properly bootstrapped by checking the bootstrap metadata - val bootstrapMetadata = new BootstrapDirectory(availableDirs.head.toString).read - - // Verify that all specified features are set correctly - assertEquals(1.toShort, bootstrapMetadata.featureLevel("share.version"), - "share.version should be set to 1") - assertEquals(2.toShort, bootstrapMetadata.featureLevel("transaction.version"), - "transaction.version should be set to 2") - assertEquals(1.toShort, bootstrapMetadata.featureLevel("group.version"), - "group.version should be set to 1") - - // Verify the command output contains the expected release version - assertTrue(stream.toString().contains("3.8-IV0"), - "Failed to find release version in output: " + stream.toString()) - - // Verify that the format command completed successfully with multiple features - assertTrue(stream.toString().contains("Formatting metadata directory"), - "Failed to find formatting message in output: " + stream.toString()) - } - - @Test - def testFormatWithInvalidFeatureThrowsError(): Unit = { - val availableDirs = Seq(TestUtils.tempDir()) - val properties = new Properties() - properties.putAll(defaultStaticQuorumProperties) - properties.setProperty("log.dirs", availableDirs.mkString(",")) - val stream = new ByteArrayOutputStream() - - // Test with an invalid feature that doesn't exist - val exception = assertThrows(classOf[FormatterException], () => { - runFormatCommand(stream, properties, Seq( - "--release-version", "3.7-IV0", - "--feature", "stream.version=1")) - }) - - assertTrue(exception.getMessage.contains("Unsupported feature: stream.version.")) - } - @Test def testFormatWithStandaloneFlagOnBrokerFails(): Unit = { val availableDirs = Seq(TestUtils.tempDir()) val properties = new Properties() - properties.setProperty("process.roles", "broker") - properties.setProperty("node.id", "0") - properties.setProperty("controller.listener.names", "CONTROLLER") - properties.setProperty("controller.quorum.bootstrap.servers", "localhost:9093") + properties.putAll(defaultStaticQuorumProperties) properties.setProperty("log.dirs", availableDirs.mkString(",")) val stream = new ByteArrayOutputStream() val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--standalone") @@ -479,58 +384,6 @@ Found problem: () => runFormatCommand(stream, properties, arguments.toSeq)).getMessage) } - @Test - def testFormatWithStandaloneFailsWithStaticVotersConfig(): Unit = { - val availableDirs = Seq(TestUtils.tempDir()) - val properties = new Properties() - properties.putAll(defaultDynamicQuorumProperties) - properties.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "0@localhost:8020") - properties.setProperty("log.dirs", availableDirs.mkString(",")) - val stream = new ByteArrayOutputStream() - val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--standalone") - assertEquals("You cannot specify controller.quorum.voters and " + - "format the node with --initial-controllers or --standalone. If you " + - "want to use dynamic quorum, please remove controller.quorum.voters and " + - "specify controller.quorum.bootstrap.servers instead.", - assertThrows(classOf[TerseFailure], - () => runFormatCommand(stream, properties, arguments.toSeq)).getMessage - ) - } - - @Test - def testFormatWithInitialControllersFailsWithStaticVotersConfig(): Unit = { - val availableDirs = Seq(TestUtils.tempDir()) - val properties = new Properties() - properties.putAll(defaultDynamicQuorumProperties) - properties.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "0@localhost:8020") - properties.setProperty("log.dirs", availableDirs.mkString(",")) - val stream = new ByteArrayOutputStream() - val arguments = ListBuffer[String]( - "--release-version", "3.9-IV0", - "--initial-controllers", - "0@localhost:8020:K90IZ-0DRNazJ49kCZ1EMQ," - ) - assertEquals("You cannot specify controller.quorum.voters and " + - "format the node with --initial-controllers or --standalone. If you " + - "want to use dynamic quorum, please remove controller.quorum.voters and " + - "specify controller.quorum.bootstrap.servers instead.", - assertThrows(classOf[TerseFailure], - () => runFormatCommand(stream, properties, arguments.toSeq)).getMessage - ) - } - - @Test - def testFormatWithNoInitialControllersPassesWithVotersConfig(): Unit = { - val availableDirs = Seq(TestUtils.tempDir()) - val properties = new Properties() - properties.putAll(defaultDynamicQuorumProperties) - properties.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "0@localhost:8020") - properties.setProperty("log.dirs", availableDirs.mkString(",")) - val stream = new ByteArrayOutputStream() - val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--no-initial-controllers") - assertEquals(0, runFormatCommand(stream, properties, arguments.toSeq)) - } - @ParameterizedTest @ValueSource(booleans = Array(false, true)) def testFormatWithStandaloneFlag(setKraftVersionFeature: Boolean): Unit = { @@ -605,14 +458,19 @@ Found problem: Seq("--release-version", "3.9-IV0"))).getMessage) } - @Test - def testFormatWithNoInitialControllersSucceedsOnController(): Unit = { + @ParameterizedTest + @ValueSource(booleans = Array(false, true)) + def testFormatWithNoInitialControllersSucceedsOnController(setKraftVersionFeature: Boolean): Unit = { val availableDirs = Seq(TestUtils.tempDir()) val properties = new Properties() properties.putAll(defaultDynamicQuorumProperties) properties.setProperty("log.dirs", availableDirs.mkString(",")) val stream = new ByteArrayOutputStream() val arguments = ListBuffer[String]("--release-version", "3.9-IV0", "--no-initial-controllers") + if (setKraftVersionFeature) { + arguments += "--feature" + arguments += "kraft.version=1" + } assertEquals(0, runFormatCommand(stream, properties, arguments.toSeq)) assertTrue(stream.toString(). contains("Formatting metadata directory %s".format(availableDirs.head)), @@ -730,18 +588,18 @@ Found problem: runVersionMappingCommand(stream, "2.9-IV2") }) - assertEquals("Unknown metadata.version '2.9-IV2'. Supported metadata.version are: " + - MetadataVersion.metadataVersionsToString(MetadataVersion.MINIMUM_VERSION, MetadataVersion.latestTesting()), - exception.getMessage + assertEquals("Unknown release version '2.9-IV2'." + + " Supported versions are: " + MetadataVersion.MINIMUM_VERSION.version + + " to " + MetadataVersion.LATEST_PRODUCTION.version, exception.getMessage ) val exception2 = assertThrows(classOf[TerseFailure], () => { runVersionMappingCommand(stream, "invalid") }) - assertEquals("Unknown metadata.version 'invalid'. Supported metadata.version are: " + - MetadataVersion.metadataVersionsToString(MetadataVersion.MINIMUM_VERSION, MetadataVersion.latestTesting()), - exception2.getMessage + assertEquals("Unknown release version 'invalid'." + + " Supported versions are: " + MetadataVersion.MINIMUM_VERSION.version + + " to " + MetadataVersion.LATEST_PRODUCTION.version, exception2.getMessage ) } diff --git a/core/src/test/scala/unit/kafka/utils/CoreUtilsTest.scala b/core/src/test/scala/unit/kafka/utils/CoreUtilsTest.scala index 73a2403870fe1..5f703c7be3375 100755 --- a/core/src/test/scala/unit/kafka/utils/CoreUtilsTest.scala +++ b/core/src/test/scala/unit/kafka/utils/CoreUtilsTest.scala @@ -17,12 +17,16 @@ package kafka.utils +import java.util +import java.util.{Base64, UUID} import java.util.concurrent.locks.ReentrantLock +import java.nio.ByteBuffer import java.util.regex.Pattern import org.junit.jupiter.api.Assertions._ import org.junit.jupiter.api.Test import kafka.utils.CoreUtils.inLock import org.apache.kafka.common.KafkaException +import org.apache.kafka.common.utils.Utils import org.slf4j.event.Level @@ -63,6 +67,23 @@ class CoreUtilsTest extends Logging { assertEquals(Some("test"+Level.ERROR),loggedMessage) } + @Test + def testReadBytes(): Unit = { + for (testCase <- List("", "a", "abcd")) { + val bytes = testCase.getBytes + assertTrue(util.Arrays.equals(bytes, Utils.readBytes(ByteBuffer.wrap(bytes)))) + } + } + + @Test + def testAbs(): Unit = { + assertEquals(0, Utils.abs(Integer.MIN_VALUE)) + assertEquals(1, Utils.abs(-1)) + assertEquals(0, Utils.abs(0)) + assertEquals(1, Utils.abs(1)) + assertEquals(Integer.MAX_VALUE, Utils.abs(Integer.MAX_VALUE)) + } + @Test def testInLock(): Unit = { val lock = new ReentrantLock() @@ -73,4 +94,29 @@ class CoreUtilsTest extends Logging { assertEquals(2, result) assertFalse(lock.isLocked, "Should be unlocked") } + + @Test + def testUrlSafeBase64EncodeUUID(): Unit = { + + // Test a UUID that has no + or / characters in base64 encoding [a149b4a3-06e1-4b49-a8cb-8a9c4a59fa46 ->(base64)-> oUm0owbhS0moy4qcSln6Rg==] + val clusterId1 = Base64.getUrlEncoder.withoutPadding.encodeToString(CoreUtils.getBytesFromUuid(UUID.fromString( + "a149b4a3-06e1-4b49-a8cb-8a9c4a59fa46"))) + assertEquals(clusterId1, "oUm0owbhS0moy4qcSln6Rg") + assertEquals(clusterId1.length, 22) + assertTrue(clusterIdPattern.matcher(clusterId1).matches()) + + // Test a UUID that has + or / characters in base64 encoding [d418ec02-277e-4853-81e6-afe30259daec ->(base64)-> 1BjsAid+SFOB5q/jAlna7A==] + val clusterId2 = Base64.getUrlEncoder.withoutPadding.encodeToString(CoreUtils.getBytesFromUuid(UUID.fromString( + "d418ec02-277e-4853-81e6-afe30259daec"))) + assertEquals(clusterId2, "1BjsAid-SFOB5q_jAlna7A") + assertEquals(clusterId2.length, 22) + assertTrue(clusterIdPattern.matcher(clusterId2).matches()) + } + + @Test + def testGenerateUuidAsBase64(): Unit = { + val clusterId = CoreUtils.generateUuidAsBase64() + assertEquals(clusterId.length, 22) + assertTrue(clusterIdPattern.matcher(clusterId).matches()) + } } diff --git a/core/src/test/scala/unit/kafka/utils/PoolTest.scala b/core/src/test/scala/unit/kafka/utils/PoolTest.scala new file mode 100644 index 0000000000000..4f883296ef7cf --- /dev/null +++ b/core/src/test/scala/unit/kafka/utils/PoolTest.scala @@ -0,0 +1,40 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package kafka.utils + +import org.junit.jupiter.api.Assertions.assertEquals +import org.junit.jupiter.api.Test + + +class PoolTest { + @Test + def testRemoveAll(): Unit = { + val pool = new Pool[Int, String] + pool.put(1, "1") + pool.put(2, "2") + pool.put(3, "3") + + assertEquals(3, pool.size) + + pool.removeAll(Seq(1, 2)) + assertEquals(1, pool.size) + assertEquals("3", pool.get(3)) + pool.removeAll(Seq(3)) + assertEquals(0, pool.size) + } +} diff --git a/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala b/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala new file mode 100644 index 0000000000000..7afa2178f7300 --- /dev/null +++ b/core/src/test/scala/unit/kafka/utils/SchedulerTest.scala @@ -0,0 +1,206 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package kafka.utils + +import java.util.Properties +import java.util.concurrent.atomic._ +import java.util.concurrent.{ConcurrentHashMap, CountDownLatch, Executors, TimeUnit} +import kafka.log.UnifiedLog +import kafka.utils.TestUtils.retry +import org.apache.kafka.coordinator.transaction.TransactionLogConfig +import org.apache.kafka.server.util.{KafkaScheduler, MockTime} +import org.apache.kafka.storage.internals.log.{LocalLog, LogConfig, LogDirFailureChannel, LogLoader, LogSegments, ProducerStateManager, ProducerStateManagerConfig} +import org.apache.kafka.storage.log.metrics.BrokerTopicStats +import org.junit.jupiter.api.Assertions._ +import org.junit.jupiter.api.{AfterEach, BeforeEach, Test, Timeout} + + +class SchedulerTest { + + val scheduler = new KafkaScheduler(1) + val mockTime = new MockTime + val counter1 = new AtomicInteger(0) + val counter2 = new AtomicInteger(0) + + @BeforeEach + def setup(): Unit = { + scheduler.startup() + } + + @AfterEach + def teardown(): Unit = { + scheduler.shutdown() + } + + @Test + def testMockSchedulerNonPeriodicTask(): Unit = { + mockTime.scheduler.scheduleOnce("test1", () => counter1.getAndIncrement(), 1) + mockTime.scheduler.scheduleOnce("test2", () => counter2.getAndIncrement(), 100) + assertEquals(0, counter1.get, "Counter1 should not be incremented prior to task running.") + assertEquals(0, counter2.get, "Counter2 should not be incremented prior to task running.") + mockTime.sleep(1) + assertEquals(1, counter1.get, "Counter1 should be incremented") + assertEquals(0, counter2.get, "Counter2 should not be incremented") + mockTime.sleep(100000) + assertEquals(1, counter1.get, "More sleeping should not result in more incrementing on counter1.") + assertEquals(1, counter2.get, "Counter2 should now be incremented.") + } + + @Test + def testMockSchedulerPeriodicTask(): Unit = { + mockTime.scheduler.schedule("test1", () => counter1.getAndIncrement(), 1, 1) + mockTime.scheduler.schedule("test2", () => counter2.getAndIncrement(), 100, 100) + assertEquals(0, counter1.get, "Counter1 should not be incremented prior to task running.") + assertEquals(0, counter2.get, "Counter2 should not be incremented prior to task running.") + mockTime.sleep(1) + assertEquals(1, counter1.get, "Counter1 should be incremented") + assertEquals(0, counter2.get, "Counter2 should not be incremented") + mockTime.sleep(100) + assertEquals(101, counter1.get, "Counter1 should be incremented 101 times") + assertEquals(1, counter2.get, "Counter2 should not be incremented once") + } + + @Test + def testReentrantTaskInMockScheduler(): Unit = { + mockTime.scheduler.scheduleOnce("test1", () => mockTime.scheduler.scheduleOnce("test2", () => counter2.getAndIncrement(), 0), 1) + mockTime.sleep(1) + assertEquals(1, counter2.get) + } + + @Test + def testNonPeriodicTask(): Unit = { + scheduler.scheduleOnce("test", () => counter1.getAndIncrement()) + retry(30000) { + assertEquals(counter1.get, 1) + } + Thread.sleep(5) + assertEquals(1, counter1.get, "Should only run once") + } + + @Test + def testNonPeriodicTaskWhenPeriodIsZero(): Unit = { + scheduler.schedule("test", () => counter1.getAndIncrement(), 0, 0) + retry(30000) { + assertEquals(counter1.get, 1) + } + Thread.sleep(5) + assertEquals(1, counter1.get, "Should only run once") + } + + @Test + def testPeriodicTask(): Unit = { + scheduler.schedule("test", () => counter1.getAndIncrement(), 0, 5) + retry(30000) { + assertTrue(counter1.get >= 20, "Should count to 20") + } + } + + @Test + def testRestart(): Unit = { + // schedule a task to increment a counter + mockTime.scheduler.scheduleOnce("test1", () => counter1.getAndIncrement(), 1) + mockTime.sleep(1) + assertEquals(1, counter1.get()) + + // restart the scheduler + mockTime.scheduler.shutdown() + mockTime.scheduler.startup() + + // schedule another task to increment the counter + mockTime.scheduler.scheduleOnce("test1", () => counter1.getAndIncrement(), 1) + mockTime.sleep(1) + assertEquals(2, counter1.get()) + } + + @Test + def testUnscheduleProducerTask(): Unit = { + val tmpDir = TestUtils.tempDir() + val logDir = TestUtils.randomPartitionLogDir(tmpDir) + val logConfig = new LogConfig(new Properties()) + val brokerTopicStats = new BrokerTopicStats + val maxTransactionTimeoutMs = 5 * 60 * 1000 + val maxProducerIdExpirationMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_MS_DEFAULT + val producerIdExpirationCheckIntervalMs = TransactionLogConfig.PRODUCER_ID_EXPIRATION_CHECK_INTERVAL_MS_DEFAULT + val topicPartition = UnifiedLog.parseTopicPartitionName(logDir) + val logDirFailureChannel = new LogDirFailureChannel(10) + val segments = new LogSegments(topicPartition) + val leaderEpochCache = UnifiedLog.createLeaderEpochCache( + logDir, topicPartition, logDirFailureChannel, None, mockTime.scheduler) + val producerStateManager = new ProducerStateManager(topicPartition, logDir, + maxTransactionTimeoutMs, new ProducerStateManagerConfig(maxProducerIdExpirationMs, false), mockTime) + val offsets = new LogLoader( + logDir, + topicPartition, + logConfig, + scheduler, + mockTime, + logDirFailureChannel, + true, + segments, + 0L, + 0L, + leaderEpochCache, + producerStateManager, + new ConcurrentHashMap[String, Integer], + false + ).load() + val localLog = new LocalLog(logDir, logConfig, segments, offsets.recoveryPoint, + offsets.nextOffsetMetadata, scheduler, mockTime, topicPartition, logDirFailureChannel) + val log = new UnifiedLog(logStartOffset = offsets.logStartOffset, + localLog = localLog, + brokerTopicStats, producerIdExpirationCheckIntervalMs, + leaderEpochCache, producerStateManager, + _topicId = None, keepPartitionMetadataFile = true) + assertTrue(scheduler.taskRunning(log.producerExpireCheck)) + log.close() + assertFalse(scheduler.taskRunning(log.producerExpireCheck)) + } + + /** + * Verify that scheduler lock is not held when invoking task method, allowing new tasks to be scheduled + * when another is being executed. This is required to avoid deadlocks when: + * a) Thread1 executes a task which attempts to acquire LockA + * b) Thread2 holding LockA attempts to schedule a new task + */ + @Timeout(15) + @Test + def testMockSchedulerLocking(): Unit = { + val initLatch = new CountDownLatch(1) + val completionLatch = new CountDownLatch(2) + val taskLatches = List(new CountDownLatch(1), new CountDownLatch(1)) + def scheduledTask(taskLatch: CountDownLatch): Unit = { + initLatch.countDown() + assertTrue(taskLatch.await(30, TimeUnit.SECONDS), "Timed out waiting for latch") + completionLatch.countDown() + } + mockTime.scheduler.scheduleOnce("test1", () => scheduledTask(taskLatches.head), 1) + val tickExecutor = Executors.newSingleThreadScheduledExecutor() + try { + tickExecutor.scheduleWithFixedDelay(() => mockTime.sleep(1), 0, 1, TimeUnit.MILLISECONDS) + + // wait for first task to execute and then schedule the next task while the first one is running + assertTrue(initLatch.await(10, TimeUnit.SECONDS)) + mockTime.scheduler.scheduleOnce("test2", () => scheduledTask(taskLatches(1)), 1) + + taskLatches.foreach(_.countDown()) + assertTrue(completionLatch.await(10, TimeUnit.SECONDS), "Tasks did not complete") + + } finally { + tickExecutor.shutdownNow() + } + } +} diff --git a/core/src/test/scala/unit/kafka/utils/TestUtils.scala b/core/src/test/scala/unit/kafka/utils/TestUtils.scala index 8b0affae9eab9..d03834700ac8b 100755 --- a/core/src/test/scala/unit/kafka/utils/TestUtils.scala +++ b/core/src/test/scala/unit/kafka/utils/TestUtils.scala @@ -17,10 +17,11 @@ package kafka.utils import com.yammer.metrics.core.{Histogram, Meter} -import kafka.log.LogManager +import kafka.log._ import kafka.network.RequestChannel import kafka.security.JaasTestUtils import kafka.server._ +import kafka.server.metadata.{ConfigRepository, MockConfigRepository} import kafka.utils.Implicits._ import org.apache.kafka.clients.admin.AlterConfigOp.OpType import org.apache.kafka.clients.admin._ @@ -33,7 +34,7 @@ import org.apache.kafka.common.compress.Compression import org.apache.kafka.common.config.{ConfigException, ConfigResource} import org.apache.kafka.common.errors.{OperationNotAttemptedException, TopicExistsException, UnknownTopicOrPartitionException} import org.apache.kafka.common.header.Header -import org.apache.kafka.common.internals.{Plugin, Topic} +import org.apache.kafka.common.internals.Topic import org.apache.kafka.common.memory.MemoryPool import org.apache.kafka.common.metrics.Metrics import org.apache.kafka.common.network.{ClientInformation, ConnectionMode, ListenerName} @@ -46,7 +47,7 @@ import org.apache.kafka.common.serialization._ import org.apache.kafka.common.utils.Utils.formatAddress import org.apache.kafka.coordinator.group.GroupCoordinatorConfig import org.apache.kafka.coordinator.transaction.TransactionLogConfig -import org.apache.kafka.metadata.{ConfigRepository, LeaderAndIsr, MockConfigRepository} +import org.apache.kafka.metadata.LeaderAndIsr import org.apache.kafka.network.SocketServerConfigs import org.apache.kafka.network.metrics.RequestChannelMetrics import org.apache.kafka.raft.QuorumConfig @@ -56,7 +57,7 @@ import org.apache.kafka.server.config.{DelegationTokenManagerConfigs, KRaftConfi import org.apache.kafka.server.metrics.KafkaYammerMetrics import org.apache.kafka.server.util.MockTime import org.apache.kafka.storage.internals.checkpoint.OffsetCheckpointFile -import org.apache.kafka.storage.internals.log.{CleanerConfig, LogCleaner, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig, UnifiedLog} +import org.apache.kafka.storage.internals.log.{CleanerConfig, LogConfig, LogDirFailureChannel, ProducerStateManagerConfig} import org.apache.kafka.storage.log.metrics.BrokerTopicStats import org.apache.kafka.test.{TestUtils => JTestUtils} import org.junit.jupiter.api.Assertions._ @@ -72,7 +73,7 @@ import java.time.Duration import java.util import java.util.concurrent._ import java.util.concurrent.atomic.AtomicBoolean -import java.util.{Optional, Properties} +import java.util.{Collections, Optional, Properties} import scala.collection.mutable.ArrayBuffer import scala.collection.{Map, Seq, mutable} import scala.concurrent.duration.FiniteDuration @@ -181,7 +182,7 @@ object TestUtils extends Logging { listenerName: ListenerName ): String = { brokers.map { s => - val listener = s.config.effectiveAdvertisedBrokerListeners.find(_.listener == listenerName.value).getOrElse( + val listener = s.config.effectiveAdvertisedBrokerListeners.find(_.listenerName == listenerName).getOrElse( sys.error(s"Could not find listener with name ${listenerName.value}")) formatAddress(listener.host, s.boundPort(listenerName)) }.mkString(",") @@ -339,7 +340,7 @@ object TestUtils extends Logging { topicConfig.forEach((k, v) => configsMap.put(k.toString, v.toString)) val result = if (replicaAssignment.isEmpty) { - admin.createTopics(util.List.of(new NewTopic( + admin.createTopics(Collections.singletonList(new NewTopic( topic, numPartitions, replicationFactor.toShort).configs(configsMap))) } else { val assignment = new util.HashMap[Integer, util.List[Integer]]() @@ -348,7 +349,7 @@ object TestUtils extends Logging { v.foreach(r => replicas.add(r.asInstanceOf[Integer])) assignment.put(k.asInstanceOf[Integer], replicas) } - admin.createTopics(util.List.of(new NewTopic( + admin.createTopics(Collections.singletonList(new NewTopic( topic, assignment).configs(configsMap))) } @@ -410,7 +411,7 @@ object TestUtils extends Logging { topic: String ): TopicDescription = { val describedTopics = admin.describeTopics( - util.Set.of(topic) + Collections.singleton(topic) ).allTopicNames().get() describedTopics.get(topic) } @@ -466,7 +467,7 @@ object TestUtils extends Logging { controllers: Seq[ControllerServer] ): Unit = { try { - admin.deleteTopics(util.List.of(topic)).all().get() + admin.deleteTopics(Collections.singletonList(topic)).all().get() } catch { case e: ExecutionException if e.getCause != null && e.getCause.isInstanceOf[UnknownTopicOrPartitionException] => @@ -589,7 +590,7 @@ object TestUtils extends Logging { newLeaderOpt: Option[Int] = None ): Int = { def getPartitionLeader(topic: String, partition: Int): Option[Int] = { - admin.describeTopics(util.List.of(topic)).allTopicNames().get().get(topic).partitions().asScala. + admin.describeTopics(Collections.singletonList(topic)).allTopicNames().get().get(topic).partitions().asScala. find(_.partition() == partition). flatMap { p => if (p.leader().id() == Node.noNode().id()) { @@ -802,7 +803,7 @@ object TestUtils extends Logging { timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): Unit = { val expectedBrokerIds = brokers.map(_.config.brokerId).toSet waitUntilTrue(() => brokers.forall(server => - expectedBrokerIds.forall(server.dataPlaneRequestProcessor.metadataCache.hasAliveBroker(_)) + expectedBrokerIds == server.dataPlaneRequestProcessor.metadataCache.getAliveBrokers().map(_.id).toSet ), "Timed out waiting for broker metadata to propagate to all servers", timeout) } @@ -821,19 +822,17 @@ object TestUtils extends Logging { waitUntilTrue( () => brokers.forall { broker => if (expectedNumPartitions == 0) { - broker.metadataCache.numPartitions(topic).isEmpty + broker.metadataCache.numPartitions(topic) == None } else { - broker.metadataCache.numPartitions(topic).orElse(null) == expectedNumPartitions + broker.metadataCache.numPartitions(topic) == Some(expectedNumPartitions) } }, s"Topic [$topic] metadata not propagated after 60000 ms", waitTimeMs = 60000L) // since the metadata is propagated, we should get the same metadata from each server (0 until expectedNumPartitions).map { i => - new TopicPartition(topic, i) -> { - brokers.head.metadataCache.getLeaderAndIsr(topic, i).orElseThrow(() => - new IllegalStateException(s"Cannot get topic: $topic, partition: $i in server metadata cache")) - } + new TopicPartition(topic, i) -> brokers.head.metadataCache.getLeaderAndIsr(topic, i).getOrElse( + throw new IllegalStateException(s"Cannot get topic: $topic, partition: $i in server metadata cache")) }.toMap } @@ -852,7 +851,7 @@ object TestUtils extends Logging { timeout: Long = JTestUtils.DEFAULT_MAX_WAIT_MS): LeaderAndIsr = { waitUntilTrue( () => brokers.forall { broker => - OptionConverters.toScala(broker.metadataCache.getLeaderAndIsr(topic, partition)) match { + broker.metadataCache.getLeaderAndIsr(topic, partition) match { case Some(partitionState) => FetchRequest.isValidBrokerId(partitionState.leader) case _ => false } @@ -860,8 +859,8 @@ object TestUtils extends Logging { "Partition [%s,%d] metadata not propagated after %d ms".format(topic, partition, timeout), waitTimeMs = timeout) - brokers.head.metadataCache.getLeaderAndIsr(topic, partition).orElseThrow(() => - new IllegalStateException(s"Cannot get topic: $topic, partition: $partition in server metadata cache")) + brokers.head.metadataCache.getLeaderAndIsr(topic, partition).getOrElse( + throw new IllegalStateException(s"Cannot get topic: $topic, partition: $partition in server metadata cache")) } /** @@ -898,8 +897,9 @@ object TestUtils extends Logging { }.map(_.config.brokerId) } else if (oldLeaderOpt.isDefined) { - debug(s"Checking leader that has changed from $oldLeaderOpt") + debug(s"Checking leader that has changed from ${oldLeaderOpt}") brokers.find { broker => + broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) broker.config.brokerId != oldLeaderOpt.get && broker.replicaManager.onlinePartition(tp).exists(_.leaderLogIfLocal.isDefined) }.map(_.config.brokerId) @@ -935,13 +935,23 @@ object TestUtils extends Logging { } def appendNonsenseToFile(file: File, size: Int): Unit = { - val outputStream = Files.newOutputStream(file.toPath, StandardOpenOption.APPEND) + val outputStream = Files.newOutputStream(file.toPath(), StandardOpenOption.APPEND) try { for (_ <- 0 until size) outputStream.write(random.nextInt(255)) } finally outputStream.close() } + // Note: Call this method in the test itself, rather than the @AfterEach method. + // Because of the assert, if assertNoNonDaemonThreads fails, nothing after would be executed. + def assertNoNonDaemonThreads(threadNamePrefix: String): Unit = { + val nonDaemonThreads = Thread.getAllStackTraces.keySet.asScala.filter { t => + !t.isDaemon && t.isAlive && t.getName.startsWith(threadNamePrefix) + } + val threadCount = nonDaemonThreads.size + assertEquals(0, threadCount, s"Found unexpected $threadCount NonDaemon threads=${nonDaemonThreads.map(t => t.getName).mkString(", ")}") + } + /** * Create new LogManager instance with default configuration for testing */ @@ -952,7 +962,7 @@ object TestUtils extends Logging { time: MockTime = new MockTime(), recoveryThreadsPerDataDir: Int = 4, transactionVerificationEnabled: Boolean = false, - logFn: Option[(TopicPartition, Option[Uuid]) => UnifiedLog] = None, + log: Option[UnifiedLog] = None, remoteStorageSystemEnable: Boolean = false, initialTaskDelayMs: Long = ServerLogConfigs.LOG_INITIAL_TASK_DELAY_MS_DEFAULT): LogManager = { val logManager = new LogManager(logDirs = logDirs.map(_.getAbsoluteFile), @@ -972,18 +982,13 @@ object TestUtils extends Logging { time = time, brokerTopicStats = new BrokerTopicStats, logDirFailureChannel = new LogDirFailureChannel(logDirs.size), + keepPartitionMetadataFile = true, remoteStorageSystemEnable = remoteStorageSystemEnable, - initialTaskDelayMs = initialTaskDelayMs, - cleanerFactory = (cleanerConfig, files, map, logDirFailureChannel, time) => Mockito.spy(new LogCleaner(cleanerConfig, files, map, logDirFailureChannel, time)) - ) + initialTaskDelayMs = initialTaskDelayMs) - if (logFn.isDefined) { + if (log.isDefined) { val spyLogManager = Mockito.spy(logManager) - Mockito.doAnswer(answer => { - val topicPartition = answer.getArgument(0, classOf[TopicPartition]) - val topicId = answer.getArgument(3, classOf[Optional[Uuid]]) - logFn.get(topicPartition, OptionConverters.toScala(topicId)) - }).when(spyLogManager).getOrCreateLog(any(classOf[TopicPartition]), anyBoolean(), anyBoolean(), any(classOf[Optional[Uuid]]), any(classOf[Option[Uuid]])) + Mockito.doReturn(log.get, Nil: _*).when(spyLogManager).getOrCreateLog(any(classOf[TopicPartition]), anyBoolean(), anyBoolean(), any(classOf[Option[Uuid]]), any(classOf[Option[Uuid]])) spyLogManager } else logManager @@ -1094,19 +1099,19 @@ object TestUtils extends Logging { checkpoints.forall(checkpointsPerLogDir => !checkpointsPerLogDir.containsKey(tp)) }), "Cleaner offset for deleted partition should have been removed") waitUntilTrue(() => brokers.forall(broker => - broker.config.logDirs.stream().allMatch { logDir => + broker.config.logDirs.forall { logDir => topicPartitions.forall { tp => !new File(logDir, tp.topic + "-" + tp.partition).exists() } } ), "Failed to soft-delete the data to a delete directory") waitUntilTrue(() => brokers.forall(broker => - broker.config.logDirs.stream().allMatch { logDir => + broker.config.logDirs.forall { logDir => topicPartitions.forall { tp => - !util.List.of(new File(logDir).list()).asScala.exists { partitionDirectoryNames => + !util.Arrays.asList(new File(logDir).list()).asScala.exists { partitionDirectoryNames => partitionDirectoryNames.exists { directoryName => directoryName.startsWith(tp.topic + "-" + tp.partition) && - directoryName.endsWith(UnifiedLog.DELETE_DIR_SUFFIX) + directoryName.endsWith(UnifiedLog.DeleteDirSuffix) } } } @@ -1126,23 +1131,17 @@ object TestUtils extends Logging { new String(bytes, encoding) } - def waitAndVerifyAcls(expected: Set[AccessControlEntry], - authorizerPlugin: Plugin[JAuthorizer], - resource: ResourcePattern, - accessControlEntryFilter: AccessControlEntryFilter = AccessControlEntryFilter.ANY): Unit = { - waitAndVerifyAcls(expected, authorizerPlugin.get, resource, accessControlEntryFilter) - } - def waitAndVerifyAcls(expected: Set[AccessControlEntry], authorizer: JAuthorizer, resource: ResourcePattern, - accessControlEntryFilter: AccessControlEntryFilter): Unit = { + accessControlEntryFilter: AccessControlEntryFilter = AccessControlEntryFilter.ANY): Unit = { val newLine = scala.util.Properties.lineSeparator + val filter = new AclBindingFilter(resource.toFilter, accessControlEntryFilter) waitUntilTrue(() => authorizer.acls(filter).asScala.map(_.entry).toSet == expected, s"expected acls:${expected.mkString(newLine + "\t", newLine + "\t", newLine)}" + s"but got:${authorizer.acls(filter).asScala.map(_.entry).mkString(newLine + "\t", newLine + "\t", newLine)}", - 45000) + 45000) } def consumeTopicRecords[K, V, B <: KafkaBroker]( @@ -1160,7 +1159,7 @@ object TestUtils extends Logging { securityProtocol = securityProtocol, trustStoreFile = trustStoreFile) try { - consumer.subscribe(util.Set.of(topic)) + consumer.subscribe(Collections.singleton(topic)) consumeRecords(consumer, numMessages, waitTime) } finally consumer.close() } @@ -1258,7 +1257,7 @@ object TestUtils extends Logging { else abortedValue } - new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, key, value, util.Set.of(header)) + new ProducerRecord[Array[Byte], Array[Byte]](topic, partition, key, value, Collections.singleton(header)) } def producerRecordWithExpectedTransactionStatus(topic: String, partition: Integer, key: String, value: String, willBeCommitted: Boolean): ProducerRecord[Array[Byte], Array[Byte]] = { @@ -1281,7 +1280,7 @@ object TestUtils extends Logging { if (committed.contains(topicPartition)) consumer.seek(topicPartition, committed(topicPartition)) else - consumer.seekToBeginning(util.List.of(topicPartition)) + consumer.seekToBeginning(Collections.singletonList(topicPartition)) } } @@ -1298,7 +1297,7 @@ object TestUtils extends Logging { (resource, configEntries) }.toMap.asJava } else { - util.Map.of(new ConfigResource(ConfigResource.Type.BROKER, ""), configEntries) + Map(new ConfigResource(ConfigResource.Type.BROKER, "") -> configEntries).asJava } adminClient.incrementalAlterConfigs(configs) } @@ -1327,7 +1326,7 @@ object TestUtils extends Logging { val partitionId = topicPartition.partition def currentLeader: Try[Option[Int]] = Try { - val topicDescription = client.describeTopics(util.List.of(topic)).allTopicNames.get.get(topic) + val topicDescription = client.describeTopics(List(topic).asJava).allTopicNames.get.get(topic) topicDescription.partitions.asScala .find(_.partition == partitionId) .flatMap(partitionState => Option(partitionState.leader)) @@ -1361,7 +1360,7 @@ object TestUtils extends Logging { } def currentIsr(admin: Admin, partition: TopicPartition): Set[Int] = { - val description = admin.describeTopics(util.Set.of(partition.topic)) + val description = admin.describeTopics(Set(partition.topic).asJava) .allTopicNames .get .asScala @@ -1442,12 +1441,12 @@ object TestUtils extends Logging { controllers: Seq[ControllerServer], ): JAuthorizer = { if (controllers.isEmpty) { - brokers.head.authorizerPlugin.get.get + brokers.head.authorizer.get } else { var result: JAuthorizer = null TestUtils.retry(120000) { val active = controllers.filter(_.controller.isActive).head - result = active.authorizerPlugin.get.get + result = active.authorizer.get } result } diff --git a/docker/README.md b/docker/README.md index c4b9d49d0eaf1..5c46ef954edcf 100644 --- a/docker/README.md +++ b/docker/README.md @@ -27,11 +27,11 @@ Building image and running tests using github actions - This is the recommended way to build, test and get a CVE report for the docker image. - Just choose the image type and provide kafka url to `Docker Build Test` workflow. It will generate a test report and CVE report that can be shared with the community. -- kafka-url - This is the url to download kafka tarball from. For example kafka tarball url from [Kafka archive](https://archive.apache.org/dist/kafka). For building RC image this will be an RC tarball url. +- kafka-url - This is the url to download kafka tarball from. For example kafka tarball url from (https://archive.apache.org/dist/kafka). For building RC image this will be an RC tarball url. - image-type - This is the type of image that we intend to build. This will be dropdown menu type selection in the workflow. - - `jvm` image type is for official docker image (to be hosted on apache/kafka) as described in [KIP-975](https://cwiki.apache.org/confluence/x/z5izDw) - - `native` image type is for graalvm based `native` Kafka docker image (to be hosted on apache/kafka-native) as described in [KIP-974](https://cwiki.apache.org/confluence/x/KZizDw). Or you can see [native/README.md](native/README.md) for more information. + - `jvm` image type is for official docker image (to be hosted on apache/kafka) as described in [KIP-975](https://cwiki.apache.org/confluence/display/KAFKA/KIP-975%3A+Docker+Image+for+Apache+Kafka) + - `native` image type is for graalvm based `native` kafka docker image (to be hosted on apache/kafka-native) as described in [KIP-974](https://cwiki.apache.org/confluence/display/KAFKA/KIP-974%3A+Docker+Image+for+GraalVM+based+Native+Kafka+Broker#KIP974:DockerImageforGraalVMbasedNativeKafkaBroker-ImageNaming) - Example(jvm):- To build and test a jvm image type ensuring kafka to be containerised should be https://archive.apache.org/dist/kafka/3.6.0/kafka_2.13-3.6.0.tgz (it is recommended to use scala 2.13 binary tarball), following inputs in github actions workflow are recommended. @@ -130,10 +130,6 @@ python docker_build_test.py kafka/test --image-tag=3.6.0 --image-type=jvm --kafk ``` python docker_build_test.py kafka/test --image-tag=3.8.0 --image-type=native --kafka-url=https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz ``` -- Example(local build archive with jvm or native image type) :- To build and test an image named test with local build archive -``` -python docker_build_test.py kafka/test --image-tag=local-build --image-type= --kafka-archive= -``` Creating a Release Candidate ---------------------------- @@ -145,13 +141,13 @@ Creating a Release Candidate ``` # kafka/test is an example repo. Please replace with the docker hub repo you have push access to. -python docker_release.py kafka/test:3.6.0 --image-type=jvm --kafka-url=https://archive.apache.org/dist/kafka/3.6.0/kafka_2.13-3.6.0.tgz +python docker_release.py kafka/test:3.6.0 --kafka-url --image-type=jvm https://archive.apache.org/dist/kafka/3.6.0/kafka_2.13-3.6.0.tgz ``` - Example(native):- To push an image named test under kafka-native dockerhub namespace with 3.8.0 tag and native image type ensuring kafka to be containerised should be https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz (it is recommended to use scala 2.13 binary tarball), following command can be used. (Make sure you have push access to the docker repo) ``` # kafka-native/test is an example repo. Please replace with the docker hub repo you have push access to. -python docker_release.py kafka-native/test:3.8.0 --image-type=native --kafka-url=https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz +python docker_release.py kafka-native/test:3.8.0 --kafka-url --image-type=native https://archive.apache.org/dist/kafka/3.8.0/kafka_2.13-3.8.0.tgz ``` - Please note that we use docker buildx for preparing the multi-architecture image and pushing it to docker registry. It's possible to encounter build failures because of buildx. Please retry the command in case some buildx related error occurs. @@ -214,6 +210,6 @@ python generate_kafka_pr_template.py --image-type=jvm ``` - kafka-version - This is the version to create the Docker official images static Dockerfile and assets for, as well as the version to build and test the Docker official image for. -- image-type - This is the type of image that we intend to build. This will be dropdown menu type selection in the workflow. `jvm` image type is for official docker image (to be hosted on apache/kafka) as described in [KIP-975](https://cwiki.apache.org/confluence/x/z5izDw). - - **NOTE:** As of now [KIP-1028](https://cwiki.apache.org/confluence/x/0AmpEQ) only aims to release JVM based Docker Official Images and not GraalVM based native Apache Kafka docker image. +- image-type - This is the type of image that we intend to build. This will be dropdown menu type selection in the workflow. `jvm` image type is for official docker image (to be hosted on apache/kafka) as described in [KIP-975](https://cwiki.apache.org/confluence/display/KAFKA/KIP-975%3A+Docker+Image+for+Apache+Kafka). + - **NOTE:** As of now [KIP-1028](https://cwiki.apache.org/confluence/display/KAFKA/KIP-1028%3A+Docker+Official+Image+for+Apache+Kafka) only aims to release JVM based Docker Official Images and not GraalVM based native Apache Kafka docker image. diff --git a/docker/common.py b/docker/common.py index 5099a789da103..9c0f901823fa5 100644 --- a/docker/common.py +++ b/docker/common.py @@ -18,6 +18,9 @@ import subprocess import tempfile import os +from distutils.file_util import copy_file + +from distutils.dir_util import copy_tree import shutil def execute(command): @@ -30,14 +33,12 @@ def get_input(message): raise ValueError("This field cannot be empty") return value -def build_docker_image_runner(command, image_type, kafka_archive=None): +def build_docker_image_runner(command, image_type): temp_dir_path = tempfile.mkdtemp() current_dir = os.path.dirname(os.path.realpath(__file__)) - shutil.copytree(f"{current_dir}/{image_type}", f"{temp_dir_path}/{image_type}", dirs_exist_ok=True) - shutil.copytree(f"{current_dir}/resources", f"{temp_dir_path}/{image_type}/resources", dirs_exist_ok=True) - shutil.copy(f"{current_dir}/server.properties", f"{temp_dir_path}/{image_type}") - if kafka_archive: - shutil.copy(kafka_archive, f"{temp_dir_path}/{image_type}/kafka.tgz") + copy_tree(f"{current_dir}/{image_type}", f"{temp_dir_path}/{image_type}") + copy_tree(f"{current_dir}/resources", f"{temp_dir_path}/{image_type}/resources") + copy_file(f"{current_dir}/server.properties", f"{temp_dir_path}/{image_type}") command = command.replace("$DOCKER_FILE", f"{temp_dir_path}/{image_type}/Dockerfile") command = command.replace("$DOCKER_DIR", f"{temp_dir_path}/{image_type}") try: diff --git a/docker/docker_build_test.py b/docker/docker_build_test.py index 9a986875fe320..793148573f395 100755 --- a/docker/docker_build_test.py +++ b/docker/docker_build_test.py @@ -25,28 +25,31 @@ Example command:- docker_build_test.py --image-tag --image-type --kafka-url - docker_build_test.py --image-tag --image-type --kafka-archive This command will build an image with as image name, as image_tag (it will be latest by default), as image type (jvm by default), for the kafka inside the image and run tests on the image. - can be passed as an alternative to to use a local kafka archive. The path of kafka_archive should be absolute. -b can be passed as additional argument if you just want to build the image. -t can be passed if you just want to run tests on the image. """ from datetime import date import argparse +from distutils.dir_util import copy_tree import shutil from test.docker_sanity_test import run_tests from common import execute, build_docker_image_runner import tempfile import os +def build_docker_image(image, tag, kafka_url, image_type): + image = f'{image}:{tag}' + build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {image} --build-arg kafka_url={kafka_url} --build-arg build_date={date.today()} $DOCKER_DIR", image_type) + def run_docker_tests(image, tag, kafka_url, image_type): temp_dir_path = tempfile.mkdtemp() try: current_dir = os.path.dirname(os.path.realpath(__file__)) - shutil.copytree(f"{current_dir}/test/fixtures", f"{temp_dir_path}/fixtures", dirs_exist_ok=True) + copy_tree(f"{current_dir}/test/fixtures", f"{temp_dir_path}/fixtures") execute(["wget", "-nv", "-O", f"{temp_dir_path}/kafka.tgz", kafka_url]) execute(["mkdir", f"{temp_dir_path}/fixtures/kafka"]) execute(["tar", "xfz", f"{temp_dir_path}/kafka.tgz", "-C", f"{temp_dir_path}/fixtures/kafka", "--strip-components", "1"]) @@ -66,20 +69,16 @@ def run_docker_tests(image, tag, kafka_url, image_type): parser.add_argument("image", help="Image name that you want to keep for the Docker image") parser.add_argument("--image-tag", "-tag", default="latest", dest="tag", help="Image tag that you want to add to the image") parser.add_argument("--image-type", "-type", choices=["jvm", "native"], default="jvm", dest="image_type", help="Image type you want to build") + parser.add_argument("--kafka-url", "-u", dest="kafka_url", help="Kafka url to be used to download kafka binary tarball in the docker image") parser.add_argument("--build", "-b", action="store_true", dest="build_only", default=False, help="Only build the image, don't run tests") parser.add_argument("--test", "-t", action="store_true", dest="test_only", default=False, help="Only run the tests, don't build the image") - - archive_group = parser.add_mutually_exclusive_group(required=True) - archive_group.add_argument("--kafka-url", "-u", dest="kafka_url", help="Kafka url to be used to download kafka binary tarball in the docker image") - archive_group.add_argument("--kafka-archive", "-a", dest="kafka_archive", help="Kafka archive to be used to extract kafka binary tarball in the docker image") - args = parser.parse_args() if args.build_only or not (args.build_only or args.test_only): if args.kafka_url: - build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {args.image}:{args.tag} --build-arg kafka_url={args.kafka_url} --build-arg build_date={date.today()} --no-cache --progress=plain $DOCKER_DIR", args.image_type) - elif args.kafka_archive: - build_docker_image_runner(f"docker build -f $DOCKER_FILE -t {args.image}:{args.tag} --build-arg build_date={date.today()} --no-cache --progress=plain $DOCKER_DIR", args.image_type, args.kafka_archive) + build_docker_image(args.image, args.tag, args.kafka_url, args.image_type) + else: + raise ValueError("--kafka-url is a required argument for docker image") if args.test_only or not (args.build_only or args.test_only): run_docker_tests(args.image, args.tag, args.kafka_url, args.image_type) diff --git a/docker/docker_official_image_build_test.py b/docker/docker_official_image_build_test.py index 32869a1f4b209..3da68854c2318 100644 --- a/docker/docker_official_image_build_test.py +++ b/docker/docker_official_image_build_test.py @@ -34,6 +34,7 @@ """ import argparse +from distutils.dir_util import copy_tree import shutil from common import execute from docker_build_test import run_docker_tests @@ -45,11 +46,10 @@ def build_docker_official_image(image, tag, kafka_version, image_type): image = f'{image}:{tag}' current_dir = os.path.dirname(os.path.realpath(__file__)) temp_dir_path = tempfile.mkdtemp() - shutil.copytree(f"{current_dir}/docker_official_images/{kafka_version}/{image_type}", - f"{temp_dir_path}/{image_type}", dirs_exist_ok=True) - shutil.copytree(f"{current_dir}/docker_official_images/{kafka_version}/jvm/resources", - f"{temp_dir_path}/{image_type}/resources", dirs_exist_ok=True) - shutil.copy(f"{current_dir}/server.properties", f"{temp_dir_path}/{image_type}") + copy_tree(f"{current_dir}/docker_official_images/{kafka_version}/{image_type}", + f"{temp_dir_path}/{image_type}") + copy_tree(f"{current_dir}/docker_official_images/{kafka_version}/jvm/resources", + f"{temp_dir_path}/{image_type}/resources") command = f"docker build -f $DOCKER_FILE -t {image} $DOCKER_DIR" command = command.replace("$DOCKER_FILE", f"{temp_dir_path}/{image_type}/Dockerfile") command = command.replace("$DOCKER_DIR", f"{temp_dir_path}/{image_type}") diff --git a/docker/examples/README.md b/docker/examples/README.md index 162e27c711aca..bd6fcefd8282e 100644 --- a/docker/examples/README.md +++ b/docker/examples/README.md @@ -13,25 +13,6 @@ Kafka server can be started using following ways: - File input - Environment variables -Installation Preparation ------------- - -Note that the `Docker` version **must be >= 20.10.4**. - -The prior Docker versions may cause permission errors when running the Kafka container, as they do not correctly set directory permissions when creating container paths like `/opt/kafka/config`. - -If you are using the prior version, you may encounter the following error during container startup: -```text -===> User -uid=1000(appuser) gid=1000(appuser) groups=1000(appuser) -===> Setting default values of environment variables if not already set. -===> Configuring … -Running in KRaft mode… -/opt/kafka/config/ file not writable -``` - -To avoid this, **please upgrade Docker to 20.10.4 or later**. - Running on default configs -------------------------- @@ -147,7 +128,7 @@ Single Node - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo - $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --command-config ./docker/examples/fixtures/client-secrets/client-ssl.properties + $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` - File Input: - Here ssl configs are provided via file input. @@ -167,7 +148,7 @@ Single Node - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo - $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --command-config ./docker/examples/fixtures/client-secrets/client-ssl.properties + $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:9093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` Multi Node Cluster @@ -219,7 +200,7 @@ Multi Node Cluster - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo - $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --command-config ./docker/examples/fixtures/client-secrets/client-ssl.properties + $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` - Isolated: - Examples are present in `docker-compose-files/cluster/isolated` directory. @@ -258,7 +239,7 @@ Multi Node Cluster - To produce messages using client scripts (Ensure that java version >= 17): ``` # Run from root of the repo - $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --command-config ./docker/examples/fixtures/client-secrets/client-ssl.properties + $ bin/kafka-console-producer.sh --topic test --bootstrap-server localhost:29093 --producer.config ./docker/examples/fixtures/client-secrets/client-ssl.properties ``` - Note that the examples are meant to be tried one at a time, make sure you close an example server before trying out the other to avoid conflicts. diff --git a/docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml b/docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml index 6ca7081bb760e..6541f212d2381 100644 --- a/docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml +++ b/docker/examples/docker-compose-files/cluster/combined/plaintext/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: kafka-1: image: ${IMAGE} @@ -35,8 +36,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' kafka-2: @@ -59,8 +58,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' kafka-3: @@ -83,6 +80,4 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' diff --git a/docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml b/docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml index 0b80670a2ec34..da97eb815a1bb 100644 --- a/docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml +++ b/docker/examples/docker-compose-files/cluster/combined/ssl/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: kafka-1: image: ${IMAGE} @@ -38,8 +39,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' KAFKA_SSL_KEYSTORE_FILENAME: 'kafka01.keystore.jks' KAFKA_SSL_KEYSTORE_CREDENTIALS: 'kafka_keystore_creds' @@ -72,8 +71,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' KAFKA_SSL_KEYSTORE_FILENAME: 'kafka01.keystore.jks' KAFKA_SSL_KEYSTORE_CREDENTIALS: 'kafka_keystore_creds' @@ -106,8 +103,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' KAFKA_SSL_KEYSTORE_FILENAME: 'kafka01.keystore.jks' KAFKA_SSL_KEYSTORE_CREDENTIALS: 'kafka_keystore_creds' diff --git a/docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml b/docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml index e4994ce9318e5..54ecc00531a05 100644 --- a/docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml +++ b/docker/examples/docker-compose-files/cluster/isolated/plaintext/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: controller-1: image: ${IMAGE} @@ -28,8 +29,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' controller-2: @@ -45,8 +44,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' controller-3: @@ -62,8 +59,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' kafka-1: @@ -86,8 +81,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' depends_on: - controller-1 @@ -114,8 +107,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' depends_on: - controller-1 @@ -142,8 +133,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' depends_on: - controller-1 diff --git a/docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml b/docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml index aefb84b024cc1..f4b1ddb382c59 100644 --- a/docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml +++ b/docker/examples/docker-compose-files/cluster/isolated/ssl/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: controller-1: image: ${IMAGE} @@ -28,8 +29,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' controller-2: @@ -45,8 +44,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' controller-3: @@ -62,8 +59,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' kafka-1: @@ -89,8 +84,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' KAFKA_SSL_KEYSTORE_FILENAME: 'kafka01.keystore.jks' KAFKA_SSL_KEYSTORE_CREDENTIALS: 'kafka_keystore_creds' @@ -127,8 +120,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' KAFKA_SSL_KEYSTORE_FILENAME: 'kafka01.keystore.jks' KAFKA_SSL_KEYSTORE_CREDENTIALS: 'kafka_keystore_creds' @@ -165,8 +156,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' KAFKA_SSL_KEYSTORE_FILENAME: 'kafka01.keystore.jks' KAFKA_SSL_KEYSTORE_CREDENTIALS: 'kafka_keystore_creds' diff --git a/docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml b/docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml index 8a4fd85b9badc..34a7264d3e27e 100644 --- a/docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml +++ b/docker/examples/docker-compose-files/single-node/file-input/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: broker: image: ${IMAGE} diff --git a/docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml b/docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml index c33c947f073cc..fc6527521ecd4 100644 --- a/docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml +++ b/docker/examples/docker-compose-files/single-node/plaintext/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: broker: image: ${IMAGE} @@ -35,6 +36,4 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_LOG_DIRS: '/tmp/kraft-combined-logs' diff --git a/docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml b/docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml index cc98efcefcd0d..7539d440183fa 100644 --- a/docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml +++ b/docker/examples/docker-compose-files/single-node/ssl/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: broker: image: ${IMAGE} @@ -31,8 +32,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_PROCESS_ROLES: 'broker,controller' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker:29093' KAFKA_LISTENERS: 'SSL://:9093,CONTROLLER://:29093,SSL-INTERNAL://:19093' diff --git a/docker/examples/fixtures/file-input/server.properties b/docker/examples/fixtures/file-input/server.properties index beffc92cad421..2c529289146dd 100644 --- a/docker/examples/fixtures/file-input/server.properties +++ b/docker/examples/fixtures/file-input/server.properties @@ -29,5 +29,3 @@ ssl.truststore.location=/etc/kafka/secrets/kafka.truststore.jks ssl.truststore.password=abcdefgh transaction.state.log.min.isr=1 transaction.state.log.replication.factor=1 -share.coordinator.state.topic.replication.factor=1 -share.coordinator.state.topic.min.isr=1 diff --git a/docker/jvm/Dockerfile b/docker/jvm/Dockerfile index 1c7c95d48698f..f98f50a2e0390 100644 --- a/docker/jvm/Dockerfile +++ b/docker/jvm/Dockerfile @@ -23,27 +23,20 @@ USER root # Get kafka from https://archive.apache.org/dist/kafka and pass the url through build arguments ARG kafka_url -ENV KAFKA_URL=$kafka_url - COPY jsa_launch /etc/kafka/docker/jsa_launch COPY server.properties /etc/kafka/docker/server.properties -COPY *kafka.tgz kafka.tgz - RUN set -eux ; \ apk update ; \ apk upgrade ; \ - apk add --no-cache bash; \ - if [ -n "$KAFKA_URL" ]; then \ - apk add --no-cache wget gcompat gpg gpg-agent procps; \ - wget -nv -O kafka.tgz "$KAFKA_URL"; \ - wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \ - wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ - gpg --import KEYS; \ - gpg --batch --verify kafka.tgz.asc kafka.tgz; \ - fi; \ + apk add --no-cache wget gcompat gpg gpg-agent procps bash; \ mkdir opt/kafka; \ - tar xfz kafka.tgz -C opt/kafka --strip-components 1; + wget -nv -O kafka.tgz "$kafka_url"; \ + wget -nv -O kafka.tgz.asc "$kafka_url.asc"; \ + tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ + wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ + gpg --import KEYS; \ + gpg --batch --verify kafka.tgz.asc kafka.tgz # Generate jsa files using dynamic CDS for kafka server start command and kafka storage format command RUN /etc/kafka/docker/jsa_launch @@ -60,35 +53,24 @@ USER root ARG kafka_url ARG build_date -ENV KAFKA_URL=$kafka_url - -COPY *kafka.tgz kafka.tgz LABEL org.label-schema.name="kafka" \ org.label-schema.description="Apache Kafka" \ org.label-schema.build-date="${build_date}" \ org.label-schema.vcs-url="https://github.com/apache/kafka" \ - org.opencontainers.image.authors="Apache Kafka" + maintainer="Apache Kafka" -RUN mkdir opt/kafka; \ - set -eux ; \ +RUN set -eux ; \ apk update ; \ apk upgrade ; \ - apk add --no-cache bash; \ - if [ -n "$KAFKA_URL" ]; then \ - apk add --no-cache wget gcompat gpg gpg-agent procps; \ - wget -nv -O kafka.tgz "$KAFKA_URL"; \ - wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \ - tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ - wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ - gpg --import KEYS; \ - gpg --batch --verify kafka.tgz.asc kafka.tgz; \ - rm kafka.tgz kafka.tgz.asc KEYS; \ - apk del wget gpg gpg-agent; \ - else \ - tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ - rm kafka.tgz; \ - fi; \ + apk add --no-cache wget gcompat gpg gpg-agent procps bash; \ + mkdir opt/kafka; \ + wget -nv -O kafka.tgz "$kafka_url"; \ + wget -nv -O kafka.tgz.asc "$kafka_url.asc"; \ + tar xfz kafka.tgz -C /opt/kafka --strip-components 1; \ + wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ + gpg --import KEYS; \ + gpg --batch --verify kafka.tgz.asc kafka.tgz; \ mkdir -p /var/lib/kafka/data /etc/kafka/secrets; \ mkdir -p /etc/kafka/docker /usr/logs /mnt/shared/config; \ adduser -h /home/appuser -D --shell /bin/bash appuser; \ @@ -97,6 +79,8 @@ RUN mkdir opt/kafka; \ chmod -R ug+w /etc/kafka /var/lib/kafka /etc/kafka/secrets; \ cp /opt/kafka/config/log4j2.yaml /etc/kafka/docker/log4j2.yaml; \ cp /opt/kafka/config/tools-log4j2.yaml /etc/kafka/docker/tools-log4j2.yaml; \ + rm kafka.tgz kafka.tgz.asc KEYS; \ + apk del wget gpg gpg-agent; \ apk cache clean; COPY server.properties /etc/kafka/docker/server.properties diff --git a/docker/native/Dockerfile b/docker/native/Dockerfile index d458dab72ce64..ca85f35562df1 100644 --- a/docker/native/Dockerfile +++ b/docker/native/Dockerfile @@ -29,18 +29,15 @@ ENV TARGET_PATH="$KAFKA_DIR/kafka.Kafka" COPY native-image-configs $NATIVE_CONFIGS_DIR COPY native_command.sh native_command.sh -COPY *kafka.tgz /app - -RUN if [ -n "$KAFKA_URL" ]; then \ - microdnf install wget; \ - wget -nv -O kafka.tgz "$KAFKA_URL"; \ - wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \ - wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ - gpg --import KEYS; \ - gpg --batch --verify kafka.tgz.asc kafka.tgz; \ - fi; \ - mkdir $KAFKA_DIR; \ +RUN mkdir $KAFKA_DIR; \ + microdnf install wget; \ + wget -nv -O kafka.tgz "$KAFKA_URL"; \ + wget -nv -O kafka.tgz.asc "$KAFKA_URL.asc"; \ tar xfz kafka.tgz -C $KAFKA_DIR --strip-components 1; \ + wget -nv -O KEYS https://downloads.apache.org/kafka/KEYS; \ + gpg --import KEYS; \ + gpg --batch --verify kafka.tgz.asc kafka.tgz; \ + rm kafka.tgz ; \ # Build the native-binary of the apache kafka using graalVM native-image. /app/native_command.sh $NATIVE_IMAGE_PATH $NATIVE_CONFIGS_DIR $KAFKA_LIBS_DIR $TARGET_PATH @@ -55,7 +52,7 @@ LABEL org.label-schema.name="kafka" \ org.label-schema.description="Apache Kafka" \ org.label-schema.build-date="${build_date}" \ org.label-schema.vcs-url="https://github.com/apache/kafka" \ - org.opencontainers.image.authors="Apache Kafka" + maintainer="Apache Kafka" RUN apk update ; \ apk add --no-cache gcompat ; \ diff --git a/docker/native/README.md b/docker/native/README.md index 4bb56261e5148..42f38bf4e922a 100644 --- a/docker/native/README.md +++ b/docker/native/README.md @@ -4,7 +4,7 @@ - The Native Apache Kafka Docker Image can launch brokers with sub-second startup time and minimal memory footprint by leveraging native Kafka executable. - The native Kafka executable is built by compiling Apache Kafka code ahead-of-time using the [GraalVM native-image tool](https://www.graalvm.org/jdk21/reference-manual/native-image/). - This image is experimental and intended for local development and testing purposes only; it is not recommended for production use. -- This is introduced with [KIP-974](https://cwiki.apache.org/confluence/x/KZizDw). +- This is introduced with [KIP-974](https://cwiki.apache.org/confluence/display/KAFKA/KIP-974%3A+Docker+Image+for+GraalVM+based+Native+Kafka+Broker). ## Native-Image reachability metadata The native-image tool performs static analysis while building a native binary to determine the dynamic features(the dynamic language features of the JVM, including reflection and resource handling, compute the dynamically-accessed program elements such as invoked methods or resource URLs at runtime), but it cannot always exhaustively predict all uses. diff --git a/docker/native/native-image-configs/reflect-config.json b/docker/native/native-image-configs/reflect-config.json index c953ea9fe1d8e..f4263a08898f2 100644 --- a/docker/native/native-image-configs/reflect-config.json +++ b/docker/native/native-image-configs/reflect-config.json @@ -1023,12 +1023,6 @@ "name":"org.apache.kafka.common.security.kerberos.KerberosLogin", "methods":[{"name":"","parameterTypes":[] }] }, -{ - "name":"org.apache.kafka.common.security.oauthbearer.DefaultJwtRetriever" -}, -{ - "name":"org.apache.kafka.common.security.oauthbearer.DefaultJwtValidator" -}, { "name":"org.apache.kafka.common.security.plain.PlainLoginModule", "methods":[{"name":"","parameterTypes":[] }] @@ -1073,18 +1067,6 @@ "name":"org.apache.kafka.metadata.authorizer.StandardAuthorizer", "methods":[{"name":"","parameterTypes":[] }] }, -{ - "name":"org.apache.kafka.server.logger.LoggingController", - "queryAllPublicConstructors":true -}, -{ - "name":"org.apache.kafka.server.logger.LoggingControllerMBean", - "queryAllPublicMethods":true -}, -{ - "name":"org.apache.kafka.server.share.persister.DefaultStatePersister", - "methods":[{"name":"","parameterTypes":["org.apache.kafka.server.share.persister.PersisterStateManager"] }] -}, { "name":"org.apache.kafka.storage.internals.checkpoint.CleanShutdownFileHandler$Content", "allDeclaredFields":true, diff --git a/docker/native/native-image-configs/resource-config.json b/docker/native/native-image-configs/resource-config.json index f7832b3809216..ae5bdb17391c2 100644 --- a/docker/native/native-image-configs/resource-config.json +++ b/docker/native/native-image-configs/resource-config.json @@ -71,9 +71,9 @@ }, { "pattern":"\\Qkafka/kafka-version.properties\\E" }, { - "pattern":"\\Qlinux/aarch64/libzstd-jni-1.5.6-10.so\\E" + "pattern":"\\Qlinux/aarch64/libzstd-jni-1.5.6-6.so\\E" }, { - "pattern":"\\Qlinux/amd64/libzstd-jni-1.5.6-10.so\\E" + "pattern":"\\Qlinux/amd64/libzstd-jni-1.5.6-6.so\\E" }, { "pattern":"\\Qlog4j2.StatusLogger.properties\\E" }, { diff --git a/docker/prepare_docker_official_image_source.py b/docker/prepare_docker_official_image_source.py index bbc539b5c4c54..25d57c53e0fd8 100644 --- a/docker/prepare_docker_official_image_source.py +++ b/docker/prepare_docker_official_image_source.py @@ -33,6 +33,7 @@ from datetime import date import argparse +from distutils.dir_util import copy_tree import os import shutil import re @@ -60,10 +61,12 @@ def remove_args_and_hardcode_values(file_path, kafka_version, kafka_url): args = parser.parse_args() kafka_url = f"https://archive.apache.org/dist/kafka/{args.kafka_version}/kafka_2.13-{args.kafka_version}.tgz" current_dir = os.path.dirname(os.path.realpath(__file__)) - new_dir = os.path.join(current_dir, 'docker_official_images', args.kafka_version) + new_dir = os.path.join( + current_dir, f'docker_official_images', args.kafka_version) if os.path.exists(new_dir): shutil.rmtree(new_dir) os.makedirs(new_dir) - shutil.copytree(os.path.join(current_dir, args.image_type), os.path.join(new_dir, args.image_type), dirs_exist_ok=True) - shutil.copytree(os.path.join(current_dir, 'resources'), os.path.join(new_dir, args.image_type, 'resources'), dirs_exist_ok=True) - remove_args_and_hardcode_values(os.path.join(new_dir, args.image_type, 'Dockerfile'), args.kafka_version, kafka_url) + copy_tree(os.path.join(current_dir, args.image_type), os.path.join(new_dir, args.kafka_version, args.image_type)) + copy_tree(os.path.join(current_dir, 'resources'), os.path.join(new_dir, args.kafka_version, args.image_type, 'resources')) + remove_args_and_hardcode_values( + os.path.join(new_dir, args.kafka_version, args.image_type, 'Dockerfile'), args.kafka_version, kafka_url) diff --git a/docker/server.properties b/docker/server.properties index 8ed486f3736c4..eb0b445c344db 100644 --- a/docker/server.properties +++ b/docker/server.properties @@ -87,8 +87,8 @@ log.dirs=/tmp/kraft-combined-logs num.partitions=1 # The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. -# This value is recommended to be increased based on the installation resources. -num.recovery.threads.per.data.dir=2 +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 ############################# Internal Topic Settings ############################# # The replication factor for the group metadata internal topics "__consumer_offsets", "__share_group_state" and "__transaction_state" diff --git a/docker/test/docker_sanity_test.py b/docker/test/docker_sanity_test.py index 0d21bf47fee0d..d2135fb029507 100644 --- a/docker/test/docker_sanity_test.py +++ b/docker/test/docker_sanity_test.py @@ -65,7 +65,7 @@ def produce_message(self, topic, producer_config, key, value): subprocess.run(["bash", "-c", " ".join(command)]) def consume_message(self, topic, consumer_config): - command = [f"{self.FIXTURES_DIR}/{constants.KAFKA_CONSOLE_CONSUMER}", "--topic", topic, "--formatter-property", "'print.key=true'", "--formatter-property", "'key.separator=:'", "--from-beginning", "--max-messages", "1", "--timeout-ms", f"{constants.CLIENT_TIMEOUT}"] + command = [f"{self.FIXTURES_DIR}/{constants.KAFKA_CONSOLE_CONSUMER}", "--topic", topic, "--property", "'print.key=true'", "--property", "'key.separator=:'", "--from-beginning", "--max-messages", "1", "--timeout-ms", f"{constants.CLIENT_TIMEOUT}"] command.extend(consumer_config) message = subprocess.check_output(["bash", "-c", " ".join(command)]) return message.decode("utf-8").strip() @@ -93,9 +93,9 @@ def broker_metrics_flow(self): errors.append(constants.BROKER_METRICS_ERROR_PREFIX + str(e)) return errors - producer_config = ["--bootstrap-server", "localhost:9092", "--command-property", "client.id=host"] + producer_config = ["--bootstrap-server", "localhost:9092", "--property", "client.id=host"] self.produce_message(constants.BROKER_METRICS_TEST_TOPIC, producer_config, "key", "message") - consumer_config = ["--bootstrap-server", "localhost:9092", "--command-property", "auto.offset.reset=earliest"] + consumer_config = ["--bootstrap-server", "localhost:9092", "--property", "auto.offset.reset=earliest"] message = self.consume_message(constants.BROKER_METRICS_TEST_TOPIC, consumer_config) try: self.assertEqual(message, "key:message") @@ -129,13 +129,13 @@ def ssl_flow(self, ssl_broker_port, test_name, test_error_prefix, topic): return errors producer_config = ["--bootstrap-server", ssl_broker_port, - "--command-config", f"{self.FIXTURES_DIR}/{constants.SSL_CLIENT_CONFIG}"] + "--producer.config", f"{self.FIXTURES_DIR}/{constants.SSL_CLIENT_CONFIG}"] self.produce_message(topic, producer_config, "key", "message") consumer_config = [ "--bootstrap-server", ssl_broker_port, - "--command-property", "auto.offset.reset=earliest", - "--command-config", f"{self.FIXTURES_DIR}/{constants.SSL_CLIENT_CONFIG}", + "--property", "auto.offset.reset=earliest", + "--consumer.config", f"{self.FIXTURES_DIR}/{constants.SSL_CLIENT_CONFIG}", ] message = self.consume_message(topic, consumer_config) try: @@ -155,7 +155,7 @@ def broker_restart_flow(self): errors.append(constants.BROKER_RESTART_ERROR_PREFIX + str(e)) return errors - producer_config = ["--bootstrap-server", "localhost:9092", "--command-property", "client.id=host"] + producer_config = ["--bootstrap-server", "localhost:9092", "--property", "client.id=host"] self.produce_message(constants.BROKER_RESTART_TEST_TOPIC, producer_config, "key", "message") print("Stopping Container") @@ -163,7 +163,7 @@ def broker_restart_flow(self): print("Resuming Container") self.resume_container() - consumer_config = ["--bootstrap-server", "localhost:9092", "--command-property", "auto.offset.reset=earliest"] + consumer_config = ["--bootstrap-server", "localhost:9092", "--property", "auto.offset.reset=earliest"] message = self.consume_message(constants.BROKER_RESTART_TEST_TOPIC, consumer_config) try: self.assertEqual(message, "key:message") diff --git a/docker/test/fixtures/file-input/server.properties b/docker/test/fixtures/file-input/server.properties index eda59db02f114..3beea51711235 100644 --- a/docker/test/fixtures/file-input/server.properties +++ b/docker/test/fixtures/file-input/server.properties @@ -29,5 +29,3 @@ ssl.truststore.location=/etc/kafka/secrets/kafka.truststore.jks ssl.truststore.password=abcdefgh transaction.state.log.min.isr=1 transaction.state.log.replication.factor=1 -share.coordinator.state.topic.replication.factor=1 -share.coordinator.state.topic.min.isr=1 diff --git a/docker/test/fixtures/mode/combined/docker-compose.yml b/docker/test/fixtures/mode/combined/docker-compose.yml index 8691019d02447..da199ea93a85b 100644 --- a/docker/test/fixtures/mode/combined/docker-compose.yml +++ b/docker/test/fixtures/mode/combined/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: broker1: image: {$IMAGE} @@ -33,8 +34,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_PROCESS_ROLES: 'broker,controller' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker1:19092,2@broker2:19092,3@broker3:19092' KAFKA_LISTENERS: 'CONTROLLER://:19092,PLAINTEXT_HOST://:9092,SSL://:9093,PLAINTEXT://:29092' @@ -68,8 +67,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_PROCESS_ROLES: 'broker,controller' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@broker1:19092,2@broker2:19092,3@broker3:19092' KAFKA_LISTENERS: 'CONTROLLER://:19092,PLAINTEXT_HOST://:9092,SSL://:9093,PLAINTEXT://:29092' diff --git a/docker/test/fixtures/mode/isolated/docker-compose.yml b/docker/test/fixtures/mode/isolated/docker-compose.yml index 2c77fc123f137..0dcf8d2f876f0 100644 --- a/docker/test/fixtures/mode/isolated/docker-compose.yml +++ b/docker/test/fixtures/mode/isolated/docker-compose.yml @@ -14,6 +14,7 @@ # limitations under the License. --- +version: '2' services: controller1: image: {$IMAGE} @@ -26,8 +27,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_PROCESS_ROLES: 'controller' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@controller1:19092,2@controller2:19092,3@controller3:19092' KAFKA_LISTENERS: 'CONTROLLER://:19092' @@ -46,8 +45,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_PROCESS_ROLES: 'controller' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@controller1:19092,2@controller2:19092,3@controller3:19092' KAFKA_LISTENERS: 'CONTROLLER://:19092' @@ -66,8 +63,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_PROCESS_ROLES: 'controller' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@controller1:19092,2@controller2:19092,3@controller3:19092' KAFKA_LISTENERS: 'CONTROLLER://:19092' @@ -93,8 +88,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_PROCESS_ROLES: 'broker' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@controller1:19092,2@controller2:19092,3@controller3:19092' KAFKA_LISTENERS: 'PLAINTEXT_HOST://:9092,SSL://:9093,PLAINTEXT://:29092' @@ -132,8 +125,6 @@ services: KAFKA_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1 KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_REPLICATION_FACTOR: 1 - KAFKA_SHARE_COORDINATOR_STATE_TOPIC_MIN_ISR: 1 KAFKA_PROCESS_ROLES: 'broker' KAFKA_CONTROLLER_QUORUM_VOTERS: '1@controller1:19092,2@controller2:19092,3@controller3:19092' KAFKA_LISTENERS: 'PLAINTEXT_HOST://:9092,SSL://:9093,PLAINTEXT://:29092' diff --git a/docs/api.html b/docs/api.html index e35d79ca097f8..8f76bbc7036a3 100644 --- a/docs/api.html +++ b/docs/api.html @@ -24,16 +24,16 @@
        • The Admin API allows managing and inspecting topics, brokers, and other Kafka objects. - Kafka exposes all its functionality over a language independent protocol which has clients available in many programming languages. However only the Java clients are maintained as part of the main Kafka project, the others are available as independent open source projects. A list of non-Java clients is available here. + Kafka exposes all its functionality over a language independent protocol which has clients available in many programming languages. However only the Java clients are maintained as part of the main Kafka project, the others are available as independent open source projects. A list of non-Java clients is available here.

          2.1 Producer API

          The Producer API allows applications to send streams of data to topics in the Kafka cluster.

          - Examples of using the producer are shown in the + Examples showing how to use the producer are given in the javadocs.

          - To use the producer, add the following Maven dependency to your project: + To use the producer, you can use the following maven dependency:

          <dependency>
           	<groupId>org.apache.kafka</groupId>
          @@ -45,40 +45,26 @@ 

          javadocs.

          - To use the consumer, add the following Maven dependency to your project: + To use the consumer, you can use the following maven dependency:

          <dependency>
           	<groupId>org.apache.kafka</groupId>
           	<artifactId>kafka-clients</artifactId>
           	<version>{{fullDotVersion}}</version>
           </dependency>
          -

          2.3 Share Consumer API (Preview)

          - - The Share Consumer API (Preview) enables applications within a share group to cooperatively consume and process data from Kafka topics. -

          - Examples of using the share consumer are shown in the - javadocs. -

          - To use the share consumer, add the following Maven dependency to your project: -

          <dependency>
          -	<groupId>org.apache.kafka</groupId>
          -	<artifactId>kafka-clients</artifactId>
          -	<version>{{fullDotVersion}}</version>
          -</dependency>
          - -

          2.4 Streams API

          +

          2.3 Streams API

          The Streams API allows transforming streams of data from input topics to output topics.

          - Examples of using this library are shown in the + Examples showing how to use this library are given in the javadocs.

          Additional documentation on using the Streams API is available here.

          - To use Kafka Streams, add the following Maven dependency to your project: + To use Kafka Streams you can use the following maven dependency:

          <dependency>
           	<groupId>org.apache.kafka</groupId>
          @@ -89,7 +75,7 @@ 

          in the developer guide.

          - To use Kafka Streams DSL for Scala {{scalaVersion}}, add the following Maven dependency to your project: + To use Kafka Streams DSL for Scala {{scalaVersion}} you can use the following maven dependency:

          <dependency>
           	<groupId>org.apache.kafka</groupId>
          @@ -97,7 +83,7 @@ 

          2.5 Connect API

          +

          2.4 Connect API

          The Connect API allows implementing connectors that continually pull from some source data system into Kafka or push from Kafka into some sink data system.

          @@ -106,11 +92,11 @@

          javadoc.

          -

          2.6 Admin API

          +

          2.5 Admin API

          The Admin API supports managing and inspecting topics, brokers, acls, and other Kafka objects.

          - To use the Admin API, add the following Maven dependency to your project: + To use the Admin API, add the following Maven dependency:

          <dependency>
           	<groupId>org.apache.kafka</groupId>
           	<artifactId>kafka-clients</artifactId>
          diff --git a/docs/configuration.html b/docs/configuration.html
          index 0c114f79ef4c9..d38dfce2aab9c 100644
          --- a/docs/configuration.html
          +++ b/docs/configuration.html
          @@ -26,10 +26,9 @@ 

          log.dirs
        • process.roles
        • controller.quorum.bootstrap.servers -
        • controller.listener.names - Topic configurations and defaults are discussed in more detail below. + Topic-level configurations and defaults are discussed in more detail below. @@ -157,7 +156,6 @@
          Updating Thread Configs
        • remote.log.reader.threads
        • remote.log.manager.copier.thread.pool.size
        • remote.log.manager.expiration.thread.pool.size
        • -
        • remote.log.manager.follower.thread.pool.size
        • Updating ConnectionQuota Configs
          @@ -187,7 +185,7 @@
          Adding and Removing Listeners
          Inter-broker listener must be configured using the static broker configuration inter.broker.listener.name or security.inter.broker.protocol. -

          3.2 Topic Configs

          +

          3.2 Topic-Level Configs

          Configurations pertinent to topics have both a server default as well an optional per-topic override. If no per-topic configuration is given the server default is used. The override can be set at topic creation time by giving one or more --config options. This example creates a topic named my-topic with a custom max message size and flush rate:
          $ bin/kafka-topics.sh --bootstrap-server localhost:9092 --create --topic my-topic --partitions 1 \
          @@ -203,78 +201,63 @@ 

          $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --entity-type topics --entity-name my-topic --alter --delete-config max.message.bytes

          - Below is the topic configuration. The server's default configuration for this property is given under the Server Default Property heading. A given server default config value only applies to a topic if it does not have an explicit topic config override. + The following are the topic-level configurations. The server's default configuration for this property is given under the Server Default Property heading. A given server default config value only applies to a topic if it does not have an explicit topic config override. -

          3.3 Group Configs

          +

          3.3 Producer Configs

          - Below is the group configuration: - - -

          3.4 Producer Configs

          - - Below is the producer configuration: + Below is the configuration of the producer: -

          3.5 Consumer Configs

          +

          3.4 Consumer Configs

          - Below is the consumer and share consumer configuration: + Below is the configuration for the consumer: -

          3.6 Kafka Connect Configs

          - Below is the Kafka Connect framework configuration. +

          3.5 Kafka Connect Configs

          + Below is the configuration of the Kafka Connect framework. -

          3.6.1 Source Connector Configs

          - Below is the source connector configuration. +

          3.5.1 Source Connector Configs

          + Below is the configuration of a source connector. -

          3.6.2 Sink Connector Configs

          - Below is the sink connector configuration. +

          3.5.2 Sink Connector Configs

          + Below is the configuration of a sink connector. -

          3.7 Kafka Streams Configs

          - Below is the Kafka Streams client library configuration. +

          3.6 Kafka Streams Configs

          + Below is the configuration of the Kafka Streams client library. -

          3.8 Admin Configs

          - Below is the Kafka Admin client library configuration. +

          3.7 Admin Configs

          + Below is the configuration of the Kafka Admin client library. -

          3.9 MirrorMaker Configs

          +

          3.8 MirrorMaker Configs

          Below is the configuration of the connectors that make up MirrorMaker 2. -

          3.9.1 MirrorMaker Common Configs

          - Below is the common configuration that applies to all three connectors. +

          3.8.1 MirrorMaker Common Configs

          + Below are the common configuration properties that apply to all three connectors. -

          3.9.2 MirrorMaker Source Configs

          +

          3.8.2 MirrorMaker Source Configs

          Below is the configuration of MirrorMaker 2 source connector for replicating topics. -

          3.9.3 MirrorMaker Checkpoint Configs

          +

          3.8.3 MirrorMaker Checkpoint Configs

          Below is the configuration of MirrorMaker 2 checkpoint connector for emitting consumer offset checkpoints. -

          3.9.4 MirrorMaker HeartBeat Configs

          +

          3.8.4 MirrorMaker HeartBeat Configs

          Below is the configuration of MirrorMaker 2 heartbeat connector for checking connectivity between connectors and clusters. -

          3.10 System Properties

          +

          3.9 System Properties

          Kafka supports some configuration that can be enabled through Java system properties. System properties are usually set by passing the -D flag to the Java virtual machine in which Kafka components are running. Below are the supported system properties.
            -
          • -

            org.apache.kafka.sasl.oauthbearer.allowed.files

            -

            This system property is used to determine which files, if any, are allowed to be read by the SASL OAUTHBEARER plugin. This property accepts comma-separated list of files. By default the value is an empty list. -

            If users want to enable some files, users need to explicitly set the system property like below. -

            -Dorg.apache.kafka.sasl.oauthbearer.allowed.files=/tmp/token,/tmp/private_key.pem
            - - - -
            Since:4.1.0
            Default Value:
            -
          • org.apache.kafka.sasl.oauthbearer.allowed.urls

            This system property is used to set the allowed URLs as SASL OAUTHBEARER token or jwks endpoints. This property accepts comma-separated list of URLs. By default the value is an empty list. @@ -287,26 +270,14 @@

            org.apache.kafka.disallowed.login.modules

            -

            This system property is used to disable the problematic login modules usage in SASL JAAS configuration. This property accepts comma-separated list of loginModule names. By default com.sun.security.auth.module.JndiLoginModule and com.sun.security.auth.module.LdapLoginModule loginModule is disabled. -

            If users want to enable JndiLoginModule or LdapLoginModule, users need to explicitly reset the system property like below. We advise the users to validate configurations and only allow trusted JNDI configurations. For more details CVE-2023-25194. +

            This system property is used to disable the problematic login modules usage in SASL JAAS configuration. This property accepts comma-separated list of loginModule names. By default com.sun.security.auth.module.JndiLoginModule loginModule is disabled. +

            If users want to enable JndiLoginModule, users need to explicitly reset the system property like below. We advise the users to validate configurations and only allow trusted JNDI configurations. For more details CVE-2023-25194.

            -Dorg.apache.kafka.disallowed.login.modules=

            To disable more loginModules, update the system property with comma-separated loginModule names. Make sure to explicitly add JndiLoginModule module name to the comma-separated list like below.

            -Dorg.apache.kafka.disallowed.login.modules=com.sun.security.auth.module.JndiLoginModule,com.ibm.security.auth.module.LdapLoginModule,com.ibm.security.auth.module.Krb5LoginModule
            -

            The configuration is deprecated and will be removed in a future release. Please use org.apache.kafka.allowed.login.modules instead. - - -
            Since:3.4.0
            Deprecated:4.2.0
            Default Value:com.sun.security.auth.module.JndiLoginModule,com.sun.security.auth.module.LdapLoginModule
            -

          • -
          • -

            org.apache.kafka.allowed.login.modules

            -

            This system property is used to explicitly allow specific login modules in SASL JAAS configuration. It accepts a comma-separated list of login module class names. This property provides a stricter, allowed-list-based alternative to the deprecated org.apache.kafka.disallowed.login.modules property. - It is recommended to use this property to improve the security of JAAS configurations. -

            If both properties are set, org.apache.kafka.allowed.login.modules takes precedence.

            - - - +
            Since:4.2.0
            Default Value:
            Default Value:com.sun.security.auth.module.JndiLoginModule
          • @@ -327,29 +298,14 @@

            3.11 Tiered Storage Configs

            - Below is the Tiered Storage configuration. +

            3.10 Tiered Storage Configs

            + Below are the configuration properties for Tiered Storage. - -

            3.11.1 RLMM Configs

            -

            Below is the configuration for TopicBasedRemoteLogMetadataManager, which is the default implementation of RemoteLogMetadataManager.

            -

            All configurations here should start with the prefix defined by remote.log.metadata.manager.impl.prefix, for example, rlmm.config.remote.log.metadata.consume.wait.ms.

            -

            The implementation of TopicBasedRemoteLogMetadataManager needs to create admin, producer, and consumer clients for the internal topic __remote_log_metadata.

            -

            Additional configurations can be provided for different types of clients using the following configuration properties:

            -
            # Configs for admin, producer, and consumer clients
            -<rlmm.prefix>.remote.log.metadata.common.client.<kafka.property> = <value>
            -
            -# Configs only for producer client
            -<rlmm.prefix>.remote.log.metadata.producer.<kafka.property> = <value>
            -
            -# Configs only for consumer client
            -<rlmm.prefix>.remote.log.metadata.consumer.<kafka.property> = <value>
            -

            - 3.12 Configuration Providers + 3.11 Configuration Providers

            diff --git a/docs/connect.html b/docs/connect.html index 85571bf115cb3..16cbf0f39ea54 100644 --- a/docs/connect.html +++ b/docs/connect.html @@ -47,7 +47,7 @@

            Running Kafka Connectbootstrap.servers - List of Kafka servers used to bootstrap connections to Kafka

          • key.converter - Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the keys in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.
          • value.converter - Converter class used to convert between Kafka Connect format and the serialized form that is written to Kafka. This controls the format of the values in messages written to or read from Kafka, and since this is independent of connectors it allows any connector to work with any serialization format. Examples of common formats include JSON and Avro.
          • -
          • plugin.path (default null) - a list of paths that contain Connect plugins (connectors, converters, transformations). Before running quick starts, users must add the absolute path that contains the example FileStreamSourceConnector and FileStreamSinkConnector packaged in connect-file-{{fullDotVersion}}.jar, because these connectors are not included by default to the CLASSPATH or the plugin.path of the Connect worker (see plugin.path property for examples).
          • +
          • plugin.path (default empty) - a list of paths that contain Connect plugins (connectors, converters, transformations). Before running quick starts, users must add the absolute path that contains the example FileStreamSourceConnector and FileStreamSinkConnector packaged in connect-file-"version".jar, because these connectors are not included by default to the CLASSPATH or the plugin.path of the Connect worker (see plugin.path property for examples).

          The important configuration options specific to standalone mode are:

          @@ -57,7 +57,7 @@

          Running Kafka ConnectThe parameters that are configured here are intended for producers and consumers used by Kafka Connect to access the configuration, offset and status topics. For configuration of the producers used by Kafka source tasks and the consumers used by Kafka sink tasks, the same parameters can be used but need to be prefixed with producer. and consumer. respectively. The only Kafka client parameter that is inherited without a prefix from the worker configuration is bootstrap.servers, which in most cases will be sufficient, since the same cluster is often used for all purposes. A notable exception is a secured cluster, which requires extra parameters to allow connections. These parameters will need to be set up to three times in the worker configuration, once for management access, once for Kafka sources and once for Kafka sinks.

          -

          Client configuration overrides can be configured individually per connector by using the prefixes producer.override. and consumer.override. for Kafka sources or Kafka sinks respectively. These overrides are included with the rest of the connector's configuration properties.

          +

          Starting with 2.3.0, client configuration overrides can be configured individually per connector by using the prefixes producer.override. and consumer.override. for Kafka sources or Kafka sinks respectively. These overrides are included with the rest of the connector's configuration properties.

          The remaining parameters are connector configuration files. Each file may either be a Java Properties file or a JSON file containing an object with the same structure as the request body of either the POST /connectors endpoint or the PUT /connectors/{name}/config endpoint (see the OpenAPI documentation). You may include as many as you want, but all will execute within the same process (on different threads). You can also choose not to specify any connector configuration files on the command line, and instead use the REST API to create connectors at runtime after your standalone worker starts.

          @@ -69,10 +69,10 @@

          Running Kafka ConnectIn particular, the following configuration parameters, in addition to the common settings mentioned above, are critical to set before starting your cluster:

            -
          • group.id - Unique name for the cluster, used in forming the Connect cluster group; note that this must not conflict with consumer group IDs
          • -
          • config.storage.topic - Name for the topic to use for storing connector and task configurations; this topic should have a single partition, be replicated, and be configured for compaction
          • -
          • offset.storage.topic - Name for the topic to use for storing offsets; this topic should have many partitions, be replicated, and be configured for compaction
          • -
          • status.storage.topic - Name for the topic to use for storing statuses; this topic can have multiple partitions, be replicated, and be configured for compaction
          • +
          • group.id (default connect-cluster) - unique name for the cluster, used in forming the Connect cluster group; note that this must not conflict with consumer group IDs
          • +
          • config.storage.topic (default connect-configs) - topic to use for storing connector and task configurations; note that this should be a single partition, highly replicated, compacted topic. You may need to manually create the topic to ensure the correct configuration as auto created topics may have multiple partitions or be automatically configured for deletion rather than compaction
          • +
          • offset.storage.topic (default connect-offsets) - topic to use for storing offsets; this topic should have many partitions, be replicated, and be configured for compaction
          • +
          • status.storage.topic (default connect-status) - topic to use for storing statuses; this topic can have multiple partitions, and should be replicated and configured for compaction

          Note that in distributed mode the connector configurations are not passed on the command line. Instead, use the REST API described below to create, modify, and destroy connectors.

          @@ -308,7 +308,7 @@

          REST API

        • DELETE /connectors/{name} - delete a connector, halting all tasks and deleting its configuration
        • GET /connectors/{name}/topics - get the set of topics that a specific connector is using since the connector was created or since a request to reset its set of active topics was issued
        • PUT /connectors/{name}/topics/reset - send a request to empty the set of active topics of a connector
        • -
        • Offsets management endpoints (see KIP-875 for more details): +
        • Offsets management endpoints (see KIP-875 for more details):
          • GET /connectors/{name}/offsets - get the current offsets for a connector
          • DELETE /connectors/{name}/offsets - reset the offsets for a connector. The connector must exist and must be in the stopped state (see PUT /connectors/{name}/stop)
          • @@ -381,7 +381,7 @@

            REST API

          • PUT /admin/loggers/{name} - set the log level for the specified logger
          -

          See KIP-495 for more details about the admin logger REST APIs.

          +

          See KIP-495 for more details about the admin logger REST APIs.

          For the complete specification of the Kafka Connect REST API, see the OpenAPI documentation

          @@ -762,11 +762,11 @@
          Sink Tasks

          The flush() method is used during the offset commit process, which allows tasks to recover from failures and resume from a safe point such that no events will be missed. The method should push any outstanding data to the destination system and then block until the write has been acknowledged. The offsets parameter can often be ignored, but is useful in some cases where implementations want to store offset information in the destination store to provide exactly-once delivery. For example, an HDFS connector could do this and use atomic move operations to make sure the flush() operation atomically commits the data and offsets to a final location in HDFS.

          -
          Errant Record Reporter
          +
          Errant Record Reporter

          When error reporting is enabled for a connector, the connector can use an ErrantRecordReporter to report problems with individual records sent to a sink connector. The following example shows how a connector's SinkTask subclass might obtain and use the ErrantRecordReporter, safely handling a null reporter when the DLQ is not enabled or when the connector is installed in an older Connect runtime that doesn't have this reporter feature:

          -
          private ErrantRecordReporter reporter;
          +    <
          private ErrantRecordReporter reporter;
           
           @Override
           public void start(Map<String, String> props) {
          @@ -817,7 +817,7 @@ 
          KIP-618, Kafka Connect supports exactly-once source connectors as of version 3.3.0. In order for a source connector to take advantage of this support, it must be able to provide meaningful source offsets for each record that it emits, and resume consumption from the external system at the exact position corresponding to any of those offsets without dropping or duplicating messages.

          +

          With the passing of KIP-618, Kafka Connect supports exactly-once source connectors as of version 3.3.0. In order for a source connector to take advantage of this support, it must be able to provide meaningful source offsets for each record that it emits, and resume consumption from the external system at the exact position corresponding to any of those offsets without dropping or duplicating messages.

          Defining transaction boundaries
          @@ -985,7 +985,7 @@

          8.4 Administra

          Starting with 2.3.0, Kafka Connect is using by default a protocol that performs - incremental cooperative rebalancing + incremental cooperative rebalancing that incrementally balances the connectors and tasks across the Connect workers, affecting only tasks that are new, to be removed, or need to move from one worker to another. Other tasks are not stopped and restarted during the rebalance, as they would have been with the old protocol.

          diff --git a/docs/design.html b/docs/design.html index c58ead07c9ae0..a59a83b5bfbc7 100644 --- a/docs/design.html +++ b/docs/design.html @@ -140,7 +140,7 @@

          - Kafka supports GZIP, Snappy, LZ4 and ZStandard compression protocols. More details on compression can be found here. + Kafka supports GZIP, Snappy, LZ4 and ZStandard compression protocols. More details on compression can be found here.

          4.4 The Producer

          @@ -240,7 +240,7 @@

          org.apache.kafka.common.errors.FencedInstanceIdException. For more details, see - KIP-345 + KIP-345

          4.6 Message Delivery Semantics

          @@ -263,9 +263,8 @@

          - Kafka's semantics are straightforward. When publishing a message we have a notion of the message being "committed" to the log. A message is considered committed only when all replicas in the in-sync replicas (ISR) for that - partition have applied it to their log. Once a published message is committed, it will not be lost as long as one broker that replicates the partition to which this message was written remains "alive". - The definition of committed message and alive partition as well as a description of which types of failures we attempt to handle will be + Kafka's semantics are straightforward. When publishing a message we have a notion of the message being "committed" to the log. Once a published message is committed, it will not be lost as long as one broker that + replicates the partition to which this message was written remains "alive". The definition of committed message and alive partition as well as a description of which types of failures we attempt to handle will be described in more detail in the next section. For now let's assume a perfect, lossless broker and try to understand the guarantees to the producer and consumer. If a producer attempts to publish a message and experiences a network error, it cannot be sure if this error happened before or after the message was committed. This is similar to the semantics of inserting into a database table with an autogenerated key.

          @@ -340,58 +339,11 @@

          - The error handling for transactional producer has been standardized which ensures consistent behavior and clearer error handling patterns. The exception categories are now more precisely defined: -
            -
          1. RetriableException: Temporary exceptions that are retried automatically by the client. These are handled internally and don't bubble up to the application.
          2. -
          3. RefreshRetriableException: Exceptions requiring metadata refresh before retry. These are handled internally by the client after refreshing metadata and don't bubble up to the application.
          4. -
          5. AbortableException: Exceptions that require transaction abort and reprocessing. These bubble up to the application, which must handle them by aborting the transaction and resetting the consumer position.
          6. -
          7. ApplicationRecoverableException: Exceptions that bubble up to the application and require application handling. The application must implement its own recovery strategy, which must include restarting the producer.
          8. -
          9. InvalidConfigurationException: Configuration-related exceptions that bubble up to the application and require application handling. The producer doesn't need to restart, but the application may choose to restart it.
          10. -
          11. KafkaException: General Kafka exceptions that don't fit into the above categories. These bubble up to the application for handling.
          12. -
          -

          - Example template code for handling transaction exceptions link : Transaction Client Demo -

          A simple policy for handling exceptions and aborted transactions is to discard and recreate the Kafka producer and consumer objects and start afresh. As part of recreating the consumer, the consumer group will rebalance and fetch the last committed offset, which has the effect of rewinding back to the state before the transaction aborted. Alternatively, a more sophisticated application (such as the transactional message copier) can choose not to use KafkaConsumer.committed to retrieve the committed offset from Kafka, and then KafkaConsumer.seek to rewind the current position. -

          4.8 Share groups

          -

          - Share groups are available as a preview in Apache Kafka 4.1. -

          - Share groups are a new type of group, existing alongside traditional consumer groups. Share groups enable Kafka consumers to cooperatively consume and process records from topics. - They offer an alternative to traditional consumer groups, particularly when applications require finer-grained sharing of partitions and records. -

          - The fundamental differences between a share group and a consumer group are: -

            -
          • The consumers within a share group cooperatively consume records, and partitions may be assigned to multiple consumers.
          • -
          • The number of consumers in a share group can exceed the number of partitions in a topic.
          • -
          • Records are acknowledged individually, though the system is optimized for batch processing to improve efficiency.
          • -
          • Delivery attempts to consumers in a share group are counted, which enables automated handling of unprocessable records.
          • -
          -

          - All consumers in the same share group subscribed to the same topic will cooperatively consume the records of that topic. If a topic is accessed by consumers in multiple share groups, each share group - consumes from that topic independently of the others. -

          - Each consumer can dynamically set its list of subscribed topics. In practice, all consumers within a share group typically subscribe to the same topic or topics. -

          - When a consumer in a share-group fetches records, it receives available records from any of the topic-partitions matching its subscriptions. Records are acquired for delivery to this consumer with a time-limited - acquisition lock. While a record is acquired, it is unavailable to other consumers. -

          By default, the lock duration is 30 seconds, but you can control it using the group configuration parameter share.record.lock.duration.ms. The lock is released automatically once its - duration elapses, making the record available to another consumer. A consumer holding the lock can handle the record in the following ways: -

            -
          • Acknowledge successful processing of the record.
          • -
          • Release the record, making it available for another delivery attempt.
          • -
          • Reject the record, indicating it's unprocessable and preventing further delivery attempts for that record.
          • -
          • Do nothing, in which case the lock is automatically released when its duration expires.
          • -
          -

          - The Kafka cluster limits the number of records acquired for consumers for each topic-partition within a share group. Once this limit is reached, fetching operations will temporarily yield no further records - until the number of acquired records decreases (as locks naturally time out). This limit is controlled by the broker configuration property group.share.partition.max.record.locks. By limiting - the duration of the acquisition lock and automatically releasing the locks, the broker ensures delivery progresses even in the presence of consumer failures. - -

          4.9 Replication

          +

          4.8 Replication

          Kafka replicates the log for each topic's partitions across a configurable number of servers (you can set this replication factor on a topic-by-topic basis). This allows automatic failover to these replicas when a server in the cluster fails so messages remain available in the presence of failures. @@ -428,6 +380,7 @@

          + We can now more precisely define that a message is considered committed when all replicas in the ISR for that partition have applied it to their log. Only committed messages are ever given out to the consumer. This means that the consumer need not worry about potentially seeing a message that could be lost if the leader fails. Producers, on the other hand, have the option of either waiting for the message to be committed or not, depending on their preference for tradeoff between latency and durability. This preference is controlled by the acks setting that the producer uses. @@ -522,7 +475,7 @@

          < The result is that we are able to batch together many of the required leadership change notifications which makes the election process far cheaper and faster for a large number of partitions. If the controller itself fails, then another controller will be elected. -

          4.10 Log Compaction

          +

          4.9 Log Compaction

          Log compaction ensures that Kafka will always retain at least the last known value for each message key within the log of data for a single topic partition. It addresses use cases and scenarios such as restoring state after application crashes or system failure, or reloading caches after application restarts during operational maintenance. Let's dive into these use cases in more detail and then describe how compaction works. @@ -674,7 +627,7 @@

          <

          Further cleaner configurations are described here. -

          4.11 Quotas

          +

          4.10 Quotas

          Kafka cluster has the ability to enforce quotas on requests to control the broker resources used by clients. Two types of client quotas can be enforced by Kafka brokers for each group of clients sharing a quota: diff --git a/docs/ecosystem.html b/docs/ecosystem.html index a198e2efd713f..5fbcec59ba2ee 100644 --- a/docs/ecosystem.html +++ b/docs/ecosystem.html @@ -15,4 +15,4 @@ limitations under the License. --> -There are a plethora of tools that integrate with Kafka outside the main distribution. The ecosystem page lists many of these, including stream processing systems, Hadoop integration, monitoring, and deployment tools. +There are a plethora of tools that integrate with Kafka outside the main distribution. The ecosystem page lists many of these, including stream processing systems, Hadoop integration, monitoring, and deployment tools. diff --git a/docs/implementation.html b/docs/implementation.html index 3be539e0ba82e..a25a9b98d2275 100644 --- a/docs/implementation.html +++ b/docs/implementation.html @@ -59,9 +59,6 @@

          -

          The key of a record header is guaranteed to be non-null, while the value of a record header may be null. The order of headers in a record is preserved when producing and consuming.

          We use the same varint encoding as Protobuf. More information on the latter can be found here. The count of headers in a record is also encoded as a varint.

          diff --git a/docs/introduction.html b/docs/introduction.html index 5e1597f725aad..49de2faf030dd 100644 --- a/docs/introduction.html +++ b/docs/introduction.html @@ -119,7 +119,7 @@

          Clients: They allow you to write distributed applications and microservices that read, write, and process streams of events in parallel, at scale, and in a fault-tolerant manner even in the case of network problems or machine failures. Kafka ships with some such clients included, which are augmented by - dozens of clients provided by the Kafka + dozens of clients provided by the Kafka community: clients are available for Java and Scala including the higher-level Kafka Streams library, for Go, Python, C/C++, and many other programming languages as well as REST APIs. diff --git a/docs/js/templateData.js b/docs/js/templateData.js index 5ab87d9f7c50b..34c8a96b7c263 100644 --- a/docs/js/templateData.js +++ b/docs/js/templateData.js @@ -17,8 +17,8 @@ limitations under the License. // Define variables for doc templates var context={ - "version": "42", - "dotVersion": "4.2", - "fullDotVersion": "4.2.0", + "version": "40", + "dotVersion": "4.0", + "fullDotVersion": "4.0.0", "scalaVersion": "2.13" }; diff --git a/docs/ops.html b/docs/ops.html index 2d050ec76dadb..5a60a4cde89c6 100644 --- a/docs/ops.html +++ b/docs/ops.html @@ -82,7 +82,7 @@

          $ bin/kafka-leader-election.sh --bootstrap-server localhost:9092 --election-type preferred --all-topic-partitions

          -

          Balancing replicas across racks

          +

          Balancing Replicas Across Racks

          The rack awareness feature spreads replicas of the same partition across different racks. This extends the guarantees Kafka provides for broker-failure to cover rack-failure, limiting the risk of data loss should all the brokers on a rack fail at once. The feature can also be applied to other broker groupings such as availability zones in EC2.

          You can specify that a broker belongs to a particular rack by adding a property to the broker config: @@ -107,18 +107,7 @@

        • -

          Managing groups

          - - With the GroupCommand tool, we can list groups of all types, including consumer groups, share groups and streams groups. Each type of group has its own tool for administering groups of that type. - - For example, to list all groups in the cluster: - -
          $ bin/kafka-groups.sh --bootstrap-server localhost:9092 --list
          -GROUP                    TYPE                     PROTOCOL
          -my-consumer-group        Consumer                 consumer
          -my-share-group           Share                    share
          - -

          Managing consumer groups

          +

          Managing Consumer Groups

          With the ConsumerGroupCommand tool, we can list, describe, or delete the consumer groups. The consumer group can be deleted manually, or automatically when the last committed offset for that group expires. Manual deletion works only if the group does not have any active members. @@ -173,7 +162,7 @@

          To reset offsets of a consumer group, "--reset-offsets" option can be used. This option supports one consumer group at the time. It requires defining following scopes: --all-topics or --topic. One scope must be selected, unless you use '--from-file' scenario. Also, first make sure that the consumer instances are inactive. - See KIP-122 for more details. + See KIP-122 for more details.

          It has 3 execution options: @@ -193,7 +182,7 @@

          --reset-offsets also has the following scenarios to choose from (at least one scenario must be selected):

          - Describe default quota for user: -
          $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type users --entity-default
          -Quota configs for the default user-principal are consumer_byte_rate=2048.0, request_percentage=200.0, producer_byte_rate=1024.0
          - Describe default quota for client-id: -
          $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type clients --entity-default
          -Quota configs for the default client-id are consumer_byte_rate=2048.0, request_percentage=200.0, producer_byte_rate=1024.0
          - If entity name is not specified, all entities of the specified type are described. For example, describe all users:
          $ bin/kafka-configs.sh --bootstrap-server localhost:9092 --describe --entity-type users
           Configs for user-principal 'user1' are producer_byte_rate=1024,consumer_byte_rate=2048,request_percentage=200
          @@ -655,7 +590,7 @@ 

          Configuring Geo-Replication

          - The following sections describe how to configure and run a dedicated MirrorMaker cluster. If you want to run MirrorMaker within an existing Kafka Connect cluster or other supported deployment setups, please refer to KIP-382: MirrorMaker 2.0 and be aware that the names of configuration settings may vary between deployment modes. + The following sections describe how to configure and run a dedicated MirrorMaker cluster. If you want to run MirrorMaker within an existing Kafka Connect cluster or other supported deployment setups, please refer to KIP-382: MirrorMaker 2.0 and be aware that the names of configuration settings may vary between deployment modes.

          @@ -665,7 +600,7 @@

          MirrorMakerConfig, MirrorConnectorConfig

        • DefaultTopicFilter for topics, DefaultGroupFilter for consumer groups
        • -
        • Example configuration settings in connect-mirror-maker.properties, KIP-382: MirrorMaker 2.0
        • +
        • Example configuration settings in connect-mirror-maker.properties, KIP-382: MirrorMaker 2.0
        • Configuration File Syntax
          @@ -760,7 +695,7 @@
          KIP-710. To do this, the dedicated.mode.enable.internal.rest property must be set to true. In addition, many of the REST-related configuration properties available for Kafka Connect can be specified the MirrorMaker config. For example, to enable intra-cluster communication in MirrorMaker cluster with each node listening on port 8080 of their local machine, the following should be added to the MirrorMaker config file: + In either case, it is also necessary to enable intra-cluster communication between the MirrorMaker nodes, as described in KIP-710. To do this, the dedicated.mode.enable.internal.rest property must be set to true. In addition, many of the REST-related configuration properties available for Kafka Connect can be specified the MirrorMaker config. For example, to enable intra-cluster communication in MirrorMaker cluster with each node listening on port 8080 of their local machine, the following should be added to the MirrorMaker config file:

          dedicated.mode.enable.internal.rest = true
          @@ -1102,7 +1037,6 @@ 

          # MBean: kafka.connect.mirror:type=MirrorSourceConnector,target=([-.w]+),topic=([-.w]+),partition=([0-9]+) record-count # number of records replicated source -> target -record-rate # average number of records/sec in replicated records record-age-ms # age of records when they are replicated record-age-ms-min record-age-ms-max @@ -1112,9 +1046,8 @@

          target -# MBean: kafka.connect.mirror:type=MirrorCheckpointConnector,source=([-.w]+),target=([-.w]+),group=([-.w]+),topic=([-.w]+),partition=([0-9]+) +# MBean: kafka.connect.mirror:type=MirrorCheckpointConnector,source=([-.w]+),target=([-.w]+) checkpoint-latency-ms # time it takes to replicate consumer offsets checkpoint-latency-ms-min @@ -1194,8 +1127,8 @@

            -
          • Use prefix ACLs (cf. KIP-290) to enforce a common prefix for topic names. For example, team A may only be permitted to create topics whose names start with payments.teamA..
          • -
          • Define a custom CreateTopicPolicy (cf. KIP-108 and the setting create.topic.policy.class.name) to enforce strict naming patterns. These policies provide the most flexibility and can cover complex patterns and rules to match an organization's needs.
          • +
          • Use prefix ACLs (cf. KIP-290) to enforce a common prefix for topic names. For example, team A may only be permitted to create topics whose names start with payments.teamA..
          • +
          • Define a custom CreateTopicPolicy (cf. KIP-108 and the setting create.topic.policy.class.name) to enforce strict naming patterns. These policies provide the most flexibility and can cover complex patterns and rules to match an organization's needs.
          • Disable topic creation for normal users by denying it with an ACL, and then rely on an external process to create topics on behalf of users (e.g., scripting or your favorite automation toolkit).
          • It may also be useful to disable the Kafka feature to auto-create topics on demand by setting auto.create.topics.enable=false in the broker configuration. Note that you should not rely solely on this option.
          @@ -1220,7 +1153,7 @@

        • Encryption of data transferred between Kafka brokers and Kafka clients, between brokers, and between brokers and other optional tools.
        • Authentication of connections from Kafka clients and applications to Kafka brokers, as well as connections between Kafka brokers.
        • -
        • Authorization of client operations such as creating, deleting, and altering the configuration of topics; writing events to or reading events from a topic; creating and deleting ACLs. Administrators can also define custom policies to put in place additional restrictions, such as a CreateTopicPolicy and AlterConfigPolicy (see KIP-108 and the settings create.topic.policy.class.name, alter.config.policy.class.name).
        • +
        • Authorization of client operations such as creating, deleting, and altering the configuration of topics; writing events to or reading events from a topic; creating and deleting ACLs. Administrators can also define custom policies to put in place additional restrictions, such as a CreateTopicPolicy and AlterConfigPolicy (see KIP-108 and the settings create.topic.policy.class.name, alter.config.policy.class.name).
        • @@ -1249,7 +1182,7 @@

          - Client quotas: Kafka supports different types of (per-user principal) client quotas. Because a client's quotas apply irrespective of which topics the client is writing to or reading from, they are a convenient and effective tool to allocate resources in a multi-tenant cluster. Request rate quotas, for example, help to limit a user's impact on broker CPU usage by limiting the time a broker spends on the request handling path for that user, after which throttling kicks in. In many situations, isolating users with request rate quotas has a bigger impact in multi-tenant clusters than setting incoming/outgoing network bandwidth quotas, because excessive broker CPU usage for processing requests reduces the effective bandwidth the broker can serve. Furthermore, administrators can also define quotas on topic operations—such as create, delete, and alter—to prevent Kafka clusters from being overwhelmed by highly concurrent topic operations (see KIP-599 and the quota type controller_mutation_rate). + Client quotas: Kafka supports different types of (per-user principal) client quotas. Because a client's quotas apply irrespective of which topics the client is writing to or reading from, they are a convenient and effective tool to allocate resources in a multi-tenant cluster. Request rate quotas, for example, help to limit a user's impact on broker CPU usage by limiting the time a broker spends on the request handling path for that user, after which throttling kicks in. In many situations, isolating users with request rate quotas has a bigger impact in multi-tenant clusters than setting incoming/outgoing network bandwidth quotas, because excessive broker CPU usage for processing requests reduces the effective bandwidth the broker can serve. Furthermore, administrators can also define quotas on topic operations—such as create, delete, and alter—to prevent Kafka clusters from being overwhelmed by highly concurrent topic operations (see KIP-599 and the quota type controller_mutation_rate).

          @@ -1586,11 +1519,6 @@

          The max time in millis remote copies was throttled by a broker kafka.server:type=RemoteLogManager, name=remote-copy-throttle-time-max - - RemoteLogReader Fetch Rate And Time - The time to read data from remote storage by a broker - kafka.log.remote:type=RemoteLogManager,name=RemoteLogReaderFetchRateAndTimeMs - - - Delayed Remote List Offsets Expires Per Sec - The number of expired remote list offsets per second. Omitting 'topic=(...), partition=(...)' will yield the all-topic rate - kafka.server:type=DelayedRemoteListOffsetsMetrics,name=ExpiresPerSec,topic=([-.\w]+),partition=([0-9]+) - @@ -3079,11 +2997,6 @@
          Upgrade
          - -

          Apache Kafka 4.1 added support for upgrading a cluster from a static controller configuration to a dynamic controller configuration. Dynamic controller configuration allows users to add controller to and remove controller from the cluster. See the Controller membership changes section for more details.

          - -

          This feature upgrade is done by upgrading the KRaft feature version and updating the nodes' configuration.

          - -

          Describe KRaft Version
          - -

          Dynamic controller cluster was added in kraft.version=1 or release-version 4.1. To determine which kraft feature version the cluster is using you can execute the following CLI command:

          - -
          $ bin/kafka-features.sh --bootstrap-controller localhost:9093 describe
          -...
          -Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 0        Epoch: 7
          -Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 4.0-IV3    FinalizedVersionLevel: 4.0-IV3  Epoch: 7
          - -

          If the FinalizedVersionLevel for Feature: kraft.version is 0, the version needs to be upgraded to at least 1 to support a dynamic controller cluster.

          - -

          Upgrade KRaft Version
          - -

          The KRaft feature version can be upgraded to support dynamic controller clusters by using the kafka-feature CLI command. To upgrade all of the feature versions to the latest version:

          - -
          $ bin/kafka-features.sh --bootstrap-server localhost:9092 upgrade --release-version 4.1
          - -

          To upgrade just the KRaft feature version:

          - -
          $ bin/kafka-features.sh --bootstrap-server localhost:9092 upgrade --feature kraft.version=1
          - -
          Update KRaft Config
          - -

          KRaft version 1 deprecated the controller.quorum.voters property and added the controller.quorum.bootstrap.servers property. After checking that the KRaft version has been successfully upgraded to at least version 1, remove the controller.quorum.voters property and add the controller.quorum.bootstrap.servers to all of the nodes (controllers and brokers) in the cluster.

          - -
          process.roles=...
          -node.id=...
          -controller.quorum.bootstrap.servers=controller1.example.com:9093,controller2.example.com:9093,controller3.example.com:9093
          -controller.listener.names=CONTROLLER
          - -

          Provisioning Nodes

          +

          Provisioning Nodes

          The bin/kafka-storage.sh random-uuid command can be used to generate a cluster ID for your new cluster. This cluster ID must be used when formatting each server in the cluster with the bin/kafka-storage.sh format command.

          This is different from how Kafka has operated in the past. Previously, Kafka would format blank storage directories automatically, and also generate a new cluster ID automatically. One reason for the change is that auto-formatting can sometimes obscure an error condition. This is particularly important for the metadata log maintained by the controller and broker servers. If a majority of the controllers were able to start with an empty log directory, a leader might be able to be elected with missing committed data.

          -
          Bootstrap a Standalone Controller
          +
          Bootstrap a Standalone Controller
          The recommended method for creating a new KRaft controller cluster is to bootstrap it with one voter and dynamically add the rest of the controllers. Bootstrapping the first controller can be done with the following CLI command:
          $ bin/kafka-storage.sh format --cluster-id <CLUSTER_ID> --standalone --config config/controller.properties
          This command will 1) create a meta.properties file in metadata.log.dir with a randomly generated directory.id, 2) create a snapshot at 00000000000000000000-0000000000.checkpoint with the necessary control records (KRaftVersionRecord and VotersRecord) to make this Kafka node the only voter for the quorum. -
          Bootstrap with Multiple Controllers
          +
          Bootstrap with Multiple Controllers
          The KRaft cluster metadata partition can also be bootstrapped with more than one voter. This can be done by using the --initial-controllers flag:
          CLUSTER_ID="$(bin/kafka-storage.sh random-uuid)"
          @@ -4059,7 +3846,7 @@ 
          Formatting Brokers and New Controllers
          +
          Formatting Brokers and New Controllers
          When provisioning new broker and controller nodes that we want to add to an existing Kafka cluster, use the kafka-storage.sh format command with the --no-initial-controllers flag.
          $ bin/kafka-storage.sh format --cluster-id <CLUSTER_ID> --config config/server.properties --no-initial-controllers
          @@ -4067,45 +3854,63 @@
          Controller membership changes
          Static versus Dynamic KRaft Quorums
          - There are two ways to run KRaft: using KIP-853 dynamic controller quorums, or the old way - using static controller quorums.

          + There are two ways to run KRaft: the old way using static controller quorums, and the new way + using KIP-853 dynamic controller quorums.

          - When using a dynamic quorum, controller.quorum.voters must not be set - and controller.quorum.bootstrap.servers is set instead. This configuration key need not + When using a static quorum, the configuration file for each broker and controller must specify + the IDs, hostnames, and ports of all controllers in controller.quorum.voters.

          + + In contrast, when using a dynamic quorum, you should set + controller.quorum.bootstrap.servers instead. This configuration key need not contain all the controllers, but it should contain as many as possible so that all the servers can locate the quorum. In other words, its function is much like the bootstrap.servers configuration used by Kafka clients.

          - When using a static quorum, the configuration file for each broker and controller must specify - the IDs, hostnames, and ports of all controllers in controller.quorum.voters.

          - If you are not sure whether you are using static or dynamic quorums, you can determine this by running something like the following:

          -

          $ bin/kafka-features.sh --bootstrap-controller localhost:9093 describe
          -

          - If the kraft.version field is level 0 or absent, you are using a static quorum. If - it is 1 or above, you are using a dynamic quorum. For example, here is an example of a static - quorum:

          -

          Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 0 Epoch: 5
          -Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0  Epoch: 5
          -

          - Here is another example of a static quorum:

          -

          Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.8-IV0 FinalizedVersionLevel: 3.8-IV0  Epoch: 5
          -

          - Here is an example of a dynamic quorum:

          -

          Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 1 Epoch: 5
          -Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0  Epoch: 5
          -

          - The static versus dynamic nature of the quorum is determined at the time of formatting. - Specifically, the quorum will be formatted as dynamic if controller.quorum.voters is - not present, and one of --standalone, --initial-controllers, or --no-initial-controllers is set. - If you have followed the instructions earlier in this document, you will get a dynamic quorum. -

          - Note: To migrate from static voter set to dynamic voter set, please refer to the Upgrade section. +

          
          +  $ bin/kafka-features.sh --bootstrap-controller localhost:9093 describe
          +

          + + If the kraft.version field is level 0 or absent, you are using a static quorum. If + it is 1 or above, you are using a dynamic quorum. For example, here is an example of a static + quorum:

          +

          
          +Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 0 Epoch: 5
          +Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0  Epoch: 5
          +

          + + Here is another example of a static quorum:

          +

          
          +Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.8-IV0 FinalizedVersionLevel: 3.8-IV0  Epoch: 5
          +

          + + Here is an example of a dynamic quorum:

          +

          
          +Feature: kraft.version  SupportedMinVersion: 0  SupportedMaxVersion: 1  FinalizedVersionLevel: 1 Epoch: 5
          +Feature: metadata.version       SupportedMinVersion: 3.3-IV3    SupportedMaxVersion: 3.9-IV0 FinalizedVersionLevel: 3.9-IV0  Epoch: 5
          +

          + + The static versus dynamic nature of the quorum is determined at the time of formatting. + Specifically, the quorum will be formatted as dynamic if controller.quorum.voters is + not present, and if the software version is Apache Kafka 3.9 or newer. If you have + followed the instructions earlier in this document, you will get a dynamic quorum.

          + + If you would like the formatting process to fail if a dynamic quorum cannot be achieved, format your + controllers using the --feature kraft.version=1. (Note that you should not supply + this flag when formatting brokers -- only when formatting controllers.)

          + +

          
          +  $ bin/kafka-storage.sh format -t KAFKA_CLUSTER_ID --feature kraft.version=1 -c controller_static.properties
          +  Cannot set kraft.version to 1 unless KIP-853 configuration is present. Try removing the --feature flag for kraft.version.
          +

          + + Note: Currently it is not possible to convert clusters using a static controller quorum to + use a dynamic controller quorum. This function will be supported in the future release.

          Add New Controller
          - If a dynamic controller cluster already exists, it can be expanded by first provisioning a new controller using the kafka-storage.sh tool and starting the controller. + If a dynamic controller cluster already exists, it can be expanded by first provisioning a new controller using the kafka-storage.sh tool and starting the controller. After starting the controller, the replication to the new controller can be monitored using the bin/kafka-metadata-quorum.sh describe --replication command. Once the new controller has caught up to the active controller, it can be added to the cluster using the bin/kafka-metadata-quorum.sh add-controller command. @@ -4115,9 +3920,6 @@
          $ bin/kafka-metadata-quorum.sh --command-config config/controller.properties --bootstrap-controller localhost:9093 add-controller
          -

          Note that if there are any configs needed to be passed to the Admin Client, like the authentication configuration, - please also include in the "controller.properties".

          -
          Remove Controller
          If the dynamic controller cluster already exists, it can be shrunk using the bin/kafka-metadata-quorum.sh remove-controller command. Until KIP-996: Pre-vote has been implemented and released, it is recommended to shutdown the controller that will be removed before running the remove-controller command. @@ -4153,7 +3955,7 @@
          $ bin/kafka-dump-log.sh --cluster-metadata-decoder --files metadata_log_dir/__cluster_metadata-0/00000000000000000000.log

          -

          This command decodes and prints the records in a cluster metadata snapshot:

          +

          This command decodes and prints the records in the a cluster metadata snapshot:

          $ bin/kafka-dump-log.sh --cluster-metadata-decoder --files metadata_log_dir/__cluster_metadata-0/00000000000000000100-0000000001.checkpoint
          @@ -4161,7 +3963,7 @@
          The kafka-metadata-shell.sh tool can be used to interactively inspect the state of the cluster metadata partition:

          -
          $ bin/kafka-metadata-shell.sh --snapshot metadata_log_dir/__cluster_metadata-0/00000000000000007228-0000000001.checkpoint
          +  
          $ bin/kafka-metadata-shell.sh --snapshot metadata_log_dir/__cluster_metadata-0/00000000000000000000.checkpoint
           >> ls /
           brokers  local  metadataQuorum  topicIds  topics
           >> ls /topics
          @@ -4179,11 +3981,11 @@ 
          - Note: 00000000000000000000-0000000000.checkpoint does not contain cluster metadata. Use a valid snapshot file when examining metadata with the kafka-metadata-shell.sh tool. +

          Deploying Considerations

            -
          • Kafka server's process.roles should be set to either broker or controller but not both. Combined mode can be used in development environments, but it should be avoided in critical deployment environments.
          • +
          • Kafka server's process.role should be set to either broker or controller but not both. Combined mode can be used in development environments, but it should be avoided in critical deployment environments.
          • For redundancy, a Kafka cluster should use 3 or more controllers, depending on factors like cost and the number of concurrent failures your system should withstand without availability impact. For the KRaft controller cluster to withstand N concurrent failures the controller cluster must include 2N + 1 controllers.
          • The Kafka controllers store all the metadata for the cluster in memory and on disk. We believe that for a typical Kafka cluster 5GB of main memory and 5GB of disk space on the metadata log director is sufficient.
          @@ -4193,7 +3995,7 @@

          In order to migrate from ZooKeeper to KRaft you need to use a bridge release. The last bridge release is Kafka 3.9. See the ZooKeeper to KRaft Migration steps in the 3.9 documentation.

          -

          6.9 Tiered Storage

          +

          6.9 Tiered Storage

          Tiered Storage Overview

          @@ -4203,19 +4005,19 @@

          <

          In the tiered storage approach, Kafka cluster is configured with two tiers of storage - local and remote. The local tier is the same as the current Kafka that uses the local disks on the Kafka brokers to store the log segments. The new remote tier uses external storage systems, such as HDFS or S3, to store the completed log segments. - Please check KIP-405 for more information. + Please check KIP-405 for more information.

          Configuration

          Broker Configurations
          -

          By default, the Kafka server will not enable the tiered storage feature. remote.log.storage.system.enable +

          By default, Kafka server will not enable tiered storage feature. remote.log.storage.system.enable is the property to control whether to enable tiered storage functionality in a broker or not. Setting it to "true" enables this feature.

          RemoteStorageManager is an interface to provide the lifecycle of remote log segments and indexes. Kafka server - doesn't provide out-of-the-box implementation of RemoteStorageManager. Users must configure remote.log.storage.manager.class.name + doesn't provide out-of-the-box implementation of RemoteStorageManager. Configuring remote.log.storage.manager.class.name and remote.log.storage.manager.class.path to specify the implementation of RemoteStorageManager.

          @@ -4251,7 +4053,7 @@

          implemented for integration test can be used, which will create a temporary directory in local storage to simulate the remote storage.

          -

          To adopt the LocalTieredStorage, the test library needs to be built locally

          +

          To adopt the `LocalTieredStorage`, the test library needs to be built locally

          # please checkout to the specific version tag you're using before building it
           # ex: `git checkout {{fullDotVersion}}`
           $ ./gradlew clean :storage:testJar
          @@ -4300,7 +4102,7 @@

          Try to send messages to the `tieredTopic` topic to roll the log segment:

          -
          $ bin/kafka-producer-perf-test.sh --bootstrap-server localhost:9092 --topic tieredTopic --num-records 1200 --record-size 1024 --throughput -1
          +
          $ bin/kafka-producer-perf-test.sh --topic tieredTopic --num-records 1200 --record-size 1024 --throughput -1 --producer-props bootstrap.servers=localhost:9092

          Then, after the active segment is rolled, the old segment should be moved to the remote storage and get deleted. This can be verified by checking the remote log directory configured above. For example: @@ -4350,19 +4152,19 @@

          While the Tiered Storage works for most use cases, it is still important to be aware of the following limitations:
          • No support for compacted topics
          • -
          • Disabling tiered storage on all topics where it is enabled is required before disabling tiered storage at the broker level
          • +
          • Deleting tiered storage enabled topics is required before disabling tiered storage at the broker level
          • Admin actions related to tiered storage feature are only supported on clients from version 3.0 onwards
          • No support for log segments missing producer snapshot file. It can happen when topic is created before v2.8.0.
          -

          For more information, please check Kafka Tiered Storage GA Release Notes. +

          For more information, please check Kafka Tiered Storage GA Release Notes.

          6.10 Consumer Rebalance Protocol

          Overview

          -

          Starting from Apache Kafka 4.0, the Next Generation of the Consumer Rebalance Protocol (KIP-848) +

          Starting from Apache Kafka 4.0, the Next Generation of the Consumer Rebalance Protocol (KIP-848) is Generally Available (GA). It improves the scalability of consumer groups while simplifying consumers. It also decreases rebalance times, thanks to its fully incremental design, which no longer relies on a global synchronization barrier.

          @@ -4398,7 +4200,7 @@

          New Consumer Metrics.

          + See New Consumer Metrics.

          When the new rebalance protocol is enabled, the following configurations and APIs are no longer usable:

            @@ -4437,7 +4239,7 @@

            Overview

            -

            Starting from Apache Kafka 4.0, Transactions Server Side Defense (KIP-890) +

            Starting from Apache Kafka 4.0, Transactions Server Side Defense (KIP-890) brings a strengthened transactional protocol. When enabled and using 4.0 producer clients, the producer epoch is bumped on every transaction to ensure every transaction includes the intended messages and duplicates are not written as part of the next transaction.

            @@ -4475,11 +4277,11 @@

            Overview

            -

            Starting from Apache Kafka 4.0, Eligible Leader Replicas (KIP-966 Part 1) - is available for the users to an improvement to Kafka replication (ELR is enabled by default on new clusters starting 4.1). As the "strict min ISR" rule has been generally applied, - which means the high watermark for the data partition can't advance if the size of the ISR is smaller than the min ISR(min.insync.replicas), it makes some replicas - that are not in the ISR safe to become the leader. The KRaft controller stores such replicas in the PartitionRecord field called Eligible Leader Replicas. During the - leader election, the controller will select the leaders with the following order:

            +

            Starting from Apache Kafka 4.0, Eligible Leader Replicas (KIP-966 Part 1) + is available for the users to an improvement to Kafka replication. As the "strict min ISR" rule has been generally applied, which means the high watermark for the data partition can't advance + if the size of the ISR is smaller than the min ISR(min.insync.replicas), it makes some replicas that are not in the ISR safe to become the leader. The KRaft controller + stores such replicas in the PartitionRecord field called Eligible Leader Replicas. During the leader election, the controller will select the leaders + with the following order:

            • If ISR is not empty, select one of them.
            • If ELR is not empty, select one that is not fenced.
            • @@ -4493,16 +4295,9 @@

              Tool

              -

              The ELR fields can be checked through the API DescribeTopicPartitions. The admin client can fetch the ELR info by describing the topics.

              -

              Note that when the ELR feature is enabled:

              -
                -
              • The cluster-level min.insync.replicas config will be added if there is not any. The value is the same as the static config in the active controller.
              • -
              • The removal of min.insync.replicas config at the cluster-level is not allowed.
              • -
              • If the cluster-level min.insync.replicas is updated, even if the value is unchanged, all the ELR state will be cleaned.
              • -
              • The previously set min.insync.replicas value at the broker-level config will be removed. Please set at the cluster-level if necessary.
              • -
              • The alteration of min.insync.replicas config at the broker-level is not allowed.
              • -
              • If min.insync.replicas is updated for a topic, the ELR state will be cleaned.
              • -
              +

              The ELR fields can be checked through the API DescribeTopicPartitions. The admin client can fetch the ELR info by describing the topics. + Also note that, if min.insync.replicas is updated for a topic, the ELR field will be cleaned. If cluster default min ISR is updated, + all the ELR fields will be cleaned.

              diff --git a/docs/protocol.html b/docs/protocol.html index 963ce5a5a5f74..84f4aec4b0d4f 100644 --- a/docs/protocol.html +++ b/docs/protocol.html @@ -122,11 +122,11 @@
              The server will reject requests with a version it does not support, and will always respond to the client with exactly the protocol format it expects based on the version it included in its request. The intended upgrade path is that new features would first be rolled out on the server (with the older clients not making use of them) and then as newer clients are deployed these new features would gradually be taken advantage of. Note there is an exceptional case while retrieving supported API versions where the server can respond with a different version.

              -

              Note that KIP-482 tagged fields can be added to a request without incrementing the version number. This offers an additional way of evolving the message schema without breaking compatibility. Tagged fields do not take up any space when the field is not set. Therefore, if a field is rarely used, it is more efficient to make it a tagged field than to put it in the mandatory schema. However, tagged fields are ignored by recipients that don't know about them, which could pose a challenge if this is not the behavior that the sender wants. In such cases, a version bump may be more appropriate. +

              Note that KIP-482 tagged fields can be added to a request without incrementing the version number. This offers an additional way of evolving the message schema without breaking compatibility. Tagged fields do not take up any space when the field is not set. Therefore, if a field is rarely used, it is more efficient to make it a tagged field than to put it in the mandatory schema. However, tagged fields are ignored by recipients that don't know about them, which could pose a challenge if this is not the behavior that the sender wants. In such cases, a version bump may be more appropriate.

              Retrieving Supported API versions

              In order to work against multiple broker versions, clients need to know what versions of various APIs a - broker supports. The broker exposes this information since 0.10.0.0 as described in KIP-35. + broker supports. The broker exposes this information since 0.10.0.0 as described in KIP-35. Clients should use the supported API versions information to choose the highest API version supported by both client and broker. If no such version exists, an error should be reported to the user.

              The following sequence may be used by a client to obtain supported API versions from a broker.

              @@ -144,7 +144,7 @@
              error code set to UNSUPPORTED_VERSION and the api_versions field populated with the supported version of the ApiVersionsRequest. It is then up to the client to retry, making another ApiVersionsRequest using the highest version supported by the client and broker. - See KIP-511: Collect and Expose Client's Name and Version in the Brokers + See KIP-511: Collect and Expose Client's Name and Version in the Brokers
            • If multiple versions of an API are supported by broker and client, clients are recommended to use the latest version supported by the broker and itself.
            • Deprecation of a protocol version is done by marking an API version as deprecated in the protocol documentation.
            • diff --git a/docs/security.html b/docs/security.html index 9364a05e40af7..819b993969b4f 100644 --- a/docs/security.html +++ b/docs/security.html @@ -510,8 +510,8 @@
              SSL key and certificates in PEM format

              Examples using console-producer and console-consumer: -
              $ bin/kafka-console-producer.sh --bootstrap-server localhost:9093 --topic test --command-config client-ssl.properties
              -$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9093 --topic test --command-config client-ssl.properties
              +
              $ bin/kafka-console-producer.sh --bootstrap-server localhost:9093 --topic test --producer.config client-ssl.properties
              +$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9093 --topic test --consumer.config client-ssl.properties

              7.4 Authentication using SASL

              @@ -556,8 +556,8 @@

              See GSSAPI (Kerberos), PLAIN, - SCRAM, or - non-production/production OAUTHBEARER for example broker configurations.

              + SCRAM or + OAUTHBEARER for example broker configurations.

            • JAAS configuration for Kafka clients
              @@ -579,8 +579,8 @@

              See GSSAPI (Kerberos), PLAIN, - SCRAM, or - non-production/production OAUTHBEARER for example client configurations.

            • + SCRAM or + OAUTHBEARER for example configurations.

            • JAAS configuration using static config file
              To configure SASL authentication on the clients using static JAAS config file: @@ -589,8 +589,8 @@

              KafkaClient for the selected mechanism as described in the examples for setting up GSSAPI (Kerberos), PLAIN, - SCRAM, or - non-production/production OAUTHBEARER. + SCRAM or + OAUTHBEARER. For example, GSSAPI credentials may be configured as:
              KafkaClient {
              @@ -905,13 +905,10 @@ 

              RFC 7628. The default OAUTHBEARER implementation in Kafka creates and validates Unsecured JSON Web Tokens and is only suitable for use in non-production Kafka installations. Refer to Security Considerations - for more details. Recent versions of Apache Kafka have added production-ready OAUTHBEARER implementations that support interaction with an OAuth 2.0-standards - compliant identity provider. Both modes are described in the following, noted where applicable.

              + for more details.

              Under the default implementation of principal.builder.class, the principalName of OAuthBearerToken is used as the authenticated Principal for configuration of ACLs etc.
                -
              1. Configuring Non-production Kafka Brokers
                -

                The default implementation of SASL/OAUTHBEARER in Kafka creates and validates Unsecured JSON Web Tokens. - While suitable only for non-production use, it does provide the flexibility to create arbitrary tokens in a DEV or TEST environment.

                +
              2. Configuring Kafka Brokers
                1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example:
                  KafkaServer {
                  @@ -920,40 +917,7 @@ 

                  The property unsecuredLoginStringClaim_sub in the KafkaServer section is used by the broker when it initiates connections to other brokers. In this example, admin will appear in the - subject (sub) claim and will be the user for inter-broker communication. - -

                  Here are the various supported JAAS module options on the broker side for Unsecured JSON Web Token validation: - - - - - - - - - - - - - - - - - - - - - -
                  JAAS Module Option for Unsecured Token ValidationDocumentation
                  unsecuredValidatorPrincipalClaimName="value"Set to a non-empty value if you wish a particular String claim - holding a principal name to be checked for existence; the default is to check - for the existence of the 'sub' claim.
                  unsecuredValidatorScopeClaimName="value"Set to a custom claim name if you wish the name of the String or - String List claim holding any token scope to be something other than - 'scope'.
                  unsecuredValidatorRequiredScope="value"Set to a space-delimited list of scope values if you wish the - String/String List claim holding the token scope to be checked to - make sure it contains certain values.
                  unsecuredValidatorAllowableClockSkewMs="value"Set to a positive integer value if you wish to allow up to some number of - positive milliseconds of clock skew (the default is 0).
                  -

                  -
                2. + subject (sub) claim and will be the user for inter-broker communication.
                3. Pass the JAAS config file location as JVM parameter to each Kafka broker:
                  -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
                4. Configure SASL port and SASL mechanisms in server.properties as described here. For example: @@ -963,40 +927,8 @@

            • -
            • Configuring Production Kafka Brokers
              -
                -
              1. Add a suitably modified JAAS file similar to the one below to each Kafka broker's config directory, let's call it kafka_server_jaas.conf for this example: -
                KafkaServer {
                -    org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ;
                -};
              2. -
              3. Pass the JAAS config file location as JVM parameter to each Kafka broker: -
                -Djava.security.auth.login.config=/etc/kafka/kafka_server_jaas.conf
              4. -
              5. Configure SASL port and SASL mechanisms in server.properties as described here. For example: -
                listeners=SASL_SSL://host.name:port
                -security.inter.broker.protocol=SASL_SSL
                -sasl.mechanism.inter.broker.protocol=OAUTHBEARER
                -sasl.enabled.mechanisms=OAUTHBEARER
                -listener.name.<listener name>.oauthbearer.sasl.server.callback.handler.class=org.apache.kafka.common.security.oauthbearer.OAuthBearerValidatorCallbackHandler
                -listener.name.<listener name>.oauthbearer.sasl.oauthbearer.jwks.endpoint.url=https://example.com/oauth2/v1/keys
                - - The OAUTHBEARER broker configuration includes: - -
                  -
                • sasl.oauthbearer.clock.skew.seconds
                • -
                • sasl.oauthbearer.expected.audience
                • -
                • sasl.oauthbearer.expected.issuer
                • -
                • sasl.oauthbearer.jwks.endpoint.refresh.ms
                • -
                • sasl.oauthbearer.jwks.endpoint.retry.backoff.max.ms
                • -
                • sasl.oauthbearer.jwks.endpoint.retry.backoff.ms
                • -
                • sasl.oauthbearer.jwks.endpoint.url
                • -
                • sasl.oauthbearer.scope.claim.name
                • -
                • sasl.oauthbearer.sub.claim.name
                • -
                -
              6. -
              -
            • -
            • Configuring Non-production Kafka Clients
              +
            • Configuring Kafka Clients
              To configure SASL authentication on the clients:
              1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. @@ -1011,61 +943,6 @@

                sub) claims in sasl.jaas.config.

                -

                The default implementation of SASL/OAUTHBEARER in Kafka creates and validates Unsecured JSON Web Tokens. - While suitable only for non-production use, it does provide the flexibility to create arbitrary tokens in a DEV or TEST environment.

                -

                Here are the various supported JAAS module options on the client side (and on the broker side if OAUTHBEARER is the inter-broker protocol): - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
                JAAS Module Option for Unsecured Token CreationDocumentation
                unsecuredLoginStringClaim_<claimname>="value"Creates a String claim with the given name and value. Any valid - claim name can be specified except 'iat' and 'exp' (these are - automatically generated).
                unsecuredLoginNumberClaim_<claimname>="value"Creates a Number claim with the given name and value. Any valid - claim name can be specified except 'iat' and 'exp' (these are - automatically generated).
                unsecuredLoginListClaim_<claimname>="value"Creates a String List claim with the given name and values parsed - from the given value where the first character is taken as the delimiter. For - example: unsecuredLoginListClaim_fubar="|value1|value2". Any valid - claim name can be specified except 'iat' and 'exp' (these are - automatically generated).
                unsecuredLoginExtension_<extensionname>="value"Creates a String extension with the given name and value. - For example: unsecuredLoginExtension_traceId="123". A valid extension name - is any sequence of lowercase or uppercase alphabet characters. In addition, the "auth" extension name is reserved. - A valid extension value is any combination of characters with ASCII codes 1-127. -
                unsecuredLoginPrincipalClaimNameSet to a custom claim name if you wish the name of the String - claim holding the principal name to be something other than 'sub'.
                unsecuredLoginLifetimeSecondsSet to an integer value if the token expiration is to be set to something - other than the default value of 3600 seconds (which is 1 hour). The - 'exp' claim will be set to reflect the expiration time.
                unsecuredLoginScopeClaimNameSet to a custom claim name if you wish the name of the String or - String List claim holding any token scope to be something other than - 'scope'.
                -

                -

                JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers as described here. Clients use the login section named KafkaClient. This option allows only one user for all client connections from a JVM.

              2. @@ -1076,64 +953,101 @@

            • -
            • Configuring Production Kafka Clients
              - To configure SASL authentication on the clients: -
                -
              1. Configure the JAAS configuration property for each client in producer.properties or consumer.properties. - The login module describes how the clients like producer and consumer can connect to the Kafka Broker. - The following is an example configuration for a client for the OAUTHBEARER mechanisms: -
                sasl.jaas.config=org.apache.kafka.common.security.oauthbearer.OAuthBearerLoginModule required ;
                - -

                JAAS configuration for clients may alternatively be specified as a JVM parameter similar to brokers - as described here. Clients use the login section named - KafkaClient. This option allows only one user for all client connections from a JVM.

              2. -
              3. Configure the following properties in producer.properties or consumer.properties. For example, if using the OAuth client_credentials grant type - to communicate with the OAuth identity provider, the configuration might look like this: -
                security.protocol=SASL_SSL
                -sasl.mechanism=OAUTHBEARER
                -sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.ClientCredentialsJwtRetriever
                -sasl.oauthbearer.client.credentials.client.id=jdoe
                -sasl.oauthbearer.client.credentials.client.secret=$3cr3+
                -sasl.oauthbearer.scope=my-application-scope
                -sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
                - - Or, if using the OAuth urn:ietf:params:oauth:grant-type:jwt-bearer grant type - to communicate with the OAuth identity provider, the configuration might look like this: -
                security.protocol=SASL_SSL
                -sasl.mechanism=OAUTHBEARER
                -sasl.oauthbearer.jwt.retriever.class=org.apache.kafka.common.security.oauthbearer.JwtBearerJwtRetriever
                -sasl.oauthbearer.assertion.private.key.file=/path/to/private.key
                -sasl.oauthbearer.assertion.algorithm=RS256
                -sasl.oauthbearer.assertion.claim.exp.seconds=600
                -sasl.oauthbearer.assertion.template.file=/path/to/template.json
                -sasl.oauthbearer.scope=my-application-scope
                -sasl.oauthbearer.token.endpoint.url=https://example.com/oauth2/v1/token
                - The OAUTHBEARER client configuration includes: - -
                  -
                • sasl.oauthbearer.assertion.algorithm
                • -
                • sasl.oauthbearer.assertion.claim.aud
                • -
                • sasl.oauthbearer.assertion.claim.exp.seconds
                • -
                • sasl.oauthbearer.assertion.claim.iss
                • -
                • sasl.oauthbearer.assertion.claim.jti.include
                • -
                • sasl.oauthbearer.assertion.claim.nbf.seconds
                • -
                • sasl.oauthbearer.assertion.claim.sub
                • -
                • sasl.oauthbearer.assertion.file
                • -
                • sasl.oauthbearer.assertion.private.key.file
                • -
                • sasl.oauthbearer.assertion.private.key.passphrase
                • -
                • sasl.oauthbearer.assertion.template.file
                • -
                • sasl.oauthbearer.client.credentials.client.id
                • -
                • sasl.oauthbearer.client.credentials.client.secret
                • -
                • sasl.oauthbearer.header.urlencode
                • -
                • sasl.oauthbearer.jwt.retriever.class
                • -
                • sasl.oauthbearer.jwt.validator.class
                • -
                • sasl.oauthbearer.scope
                • -
                • sasl.oauthbearer.token.endpoint.url
                • -
                +
              4. Unsecured Token Creation Options for SASL/OAUTHBEARER
                +
                  +
                • The default implementation of SASL/OAUTHBEARER in Kafka creates and validates Unsecured JSON Web Tokens. + While suitable only for non-production use, it does provide the flexibility to create arbitrary tokens in a DEV or TEST environment.
                • +
                • Here are the various supported JAAS module options on the client side (and on the broker side if OAUTHBEARER is the inter-broker protocol): + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
                  JAAS Module Option for Unsecured Token CreationDocumentation
                  unsecuredLoginStringClaim_<claimname>="value"Creates a String claim with the given name and value. Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
                  unsecuredLoginNumberClaim_<claimname>="value"Creates a Number claim with the given name and value. Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
                  unsecuredLoginListClaim_<claimname>="value"Creates a String List claim with the given name and values parsed + from the given value where the first character is taken as the delimiter. For + example: unsecuredLoginListClaim_fubar="|value1|value2". Any valid + claim name can be specified except 'iat' and 'exp' (these are + automatically generated).
                  unsecuredLoginExtension_<extensionname>="value"Creates a String extension with the given name and value. + For example: unsecuredLoginExtension_traceId="123". A valid extension name + is any sequence of lowercase or uppercase alphabet characters. In addition, the "auth" extension name is reserved. + A valid extension value is any combination of characters with ASCII codes 1-127. +
                  unsecuredLoginPrincipalClaimNameSet to a custom claim name if you wish the name of the String + claim holding the principal name to be something other than 'sub'.
                  unsecuredLoginLifetimeSecondsSet to an integer value if the token expiration is to be set to something + other than the default value of 3600 seconds (which is 1 hour). The + 'exp' claim will be set to reflect the expiration time.
                  unsecuredLoginScopeClaimNameSet to a custom claim name if you wish the name of the String or + String List claim holding any token scope to be something other than + 'scope'.
                • -
                • The default implementation of SASL/OAUTHBEARER depends on the jackson-databind library. - Since it's an optional dependency, users have to configure it as a dependency via their build tool.
                • -
              +
            + +
          • Unsecured Token Validation Options for SASL/OAUTHBEARER
            +
              +
            • Here are the various supported JAAS module options on the broker side for Unsecured JSON Web Token validation: + + + + + + + + + + + + + + + + + + + + + +
              JAAS Module Option for Unsecured Token ValidationDocumentation
              unsecuredValidatorPrincipalClaimName="value"Set to a non-empty value if you wish a particular String claim + holding a principal name to be checked for existence; the default is to check + for the existence of the 'sub' claim.
              unsecuredValidatorScopeClaimName="value"Set to a custom claim name if you wish the name of the String or + String List claim holding any token scope to be something other than + 'scope'.
              unsecuredValidatorRequiredScope="value"Set to a space-delimited list of scope values if you wish the + String/String List claim holding the token scope to be checked to + make sure it contains certain values.
              unsecuredValidatorAllowableClockSkewMs="value"Set to a positive integer value if you wish to allow up to some number of + positive milliseconds of clock skew (the default is 0).
              +
            • +
            • The default unsecured SASL/OAUTHBEARER implementation may be overridden (and must be overridden in production environments) + using custom login and SASL Server callback handlers.
            • +
            • For more details on security considerations, refer to RFC 6749, Section 10.
            • +
          • Token Refresh for SASL/OAUTHBEARER
            Kafka periodically refreshes any token before it expires so that the client can continue to make @@ -1211,7 +1125,7 @@

        • Follow the mechanism-specific steps in GSSAPI (Kerberos), PLAIN, - SCRAM, and non-production/production OAUTHBEARER + SCRAM and OAUTHBEARER to configure SASL for the enabled mechanisms.
        • @@ -1232,7 +1146,7 @@

          Delegation token based authentication is a lightweight authentication mechanism to complement existing SASL/SSL methods. Delegation tokens are shared secrets between kafka brokers and clients. Delegation tokens will help processing frameworks to distribute the workload to available workers in a secure environment without the added cost of distributing - Kerberos TGT/keytabs or keystores when 2-way SSL is used. See KIP-48 + Kerberos TGT/keytabs or keystores when 2-way SSL is used. See KIP-48 for more details.

          Under the default implementation of principal.builder.class, the owner of delegation token is used as the authenticated Principal for configuration of ACLs etc. @@ -1332,21 +1246,13 @@

          authorizer.class.name=org.apache.kafka.metadata.authorizer.StandardAuthorizer Kafka ACLs are defined in the general format of "Principal {P} is [Allowed|Denied] Operation {O} From Host {H} on any Resource {R} matching ResourcePattern {RP}". - You can read more about the ACL structure in KIP-11 and - resource patterns in KIP-290. - In order to add, remove, or list ACLs, you can use the Kafka ACL CLI kafka-acls.sh. -

          Behavior Without ACLs:
          -

          If a resource (R) does not have any ACLs defined, meaning that no ACL matches the resource, Kafka will restrict - access to that resource. In this situation, only super users are allowed to access it.

          -
          Changing the Default Behavior:
          -

          If you prefer that resources without any ACLs be accessible by all users (instead of just super users), you can - change the default behavior. To do this, add the following line to your server.properties file:

          + You can read more about the ACL structure in KIP-11 and + resource patterns in KIP-290. + In order to add, remove, or list ACLs, you can use the Kafka ACL CLI kafka-acls.sh. By default, if no ResourcePatterns match a specific Resource R, + then R has no associated ACLs, and therefore no one other than super users is allowed to access R. + If you want to change that behavior, you can include the following in server.properties.
          allow.everyone.if.no.acl.found=true
          -

          With this setting enabled, if a resource does not have any ACLs defined, Kafka will allow access to everyone. If a - resource has one or more ACLs defined, those ACL rules will be enforced as usual, regardless of the setting. - One can also add super users in server.properties like the following (note that the delimiter is semicolon since SSL - user names may contain comma). Default PrincipalType string "User" is case sensitive. -

          + One can also add super users in server.properties like the following (note that the delimiter is semicolon since SSL user names may contain comma). Default PrincipalType string "User" is case sensitive.
          super.users=User:Bob;User:Alice
          KRaft Principal Forwarding
          @@ -1659,10 +1565,10 @@
          DelegationToken: this represents the delegation tokens in the cluster. Actions, such as describing delegation tokens could be protected by a privilege on the DelegationToken resource. Since these objects have a little special behavior in Kafka it is recommended to read - KIP-48 + KIP-48 and the related upstream documentation at Authentication using Delegation Tokens.
        • User: CreateToken and DescribeToken operations can be granted to User resources to allow creating and describing - tokens for other users. More info can be found in KIP-373.
        • + tokens for other users. More info can be found in KIP-373.
          Operations and Resources on Protocols

          In the below table we'll list the valid operations on resources that are executed by the Kafka API protocols.

          @@ -1924,7 +1830,7 @@
          - diff --git a/docs/streams/architecture.html b/docs/streams/architecture.html index ca77069700e80..1094f16595745 100644 --- a/docs/streams/architecture.html +++ b/docs/streams/architecture.html @@ -102,7 +102,7 @@

          Kafka's coordination functionality. + The assignment of Kafka topic partitions amongst the various stream threads is transparently handled by Kafka Streams leveraging Kafka's coordination functionality.

          diff --git a/docs/streams/core-concepts.html b/docs/streams/core-concepts.html index a2d1b7209b551..d9a2851e2713f 100644 --- a/docs/streams/core-concepts.html +++ b/docs/streams/core-concepts.html @@ -279,7 +279,7 @@

          Lambda Architecture. + to the stream processing pipeline, known as the Lambda Architecture. Prior to 0.11.0.0, Kafka only provides at-least-once delivery guarantees and hence any stream processing systems that leverage it as the backend storage could not guarantee end-to-end exactly-once semantics. In fact, even for those stream processing systems that claim to support exactly-once processing, as long as they are reading from / writing to Kafka as the source / sink, their applications cannot actually guarantee that no duplicates will be generated throughout the pipeline.
          @@ -289,7 +289,7 @@

          KIP-129.
          + For more information on how this is done inside Kafka Streams, see KIP-129.
          As of the 2.6.0 release, Kafka Streams supports an improved implementation of exactly-once processing, named "exactly-once v2", which requires broker version 2.5.0 or newer. @@ -298,7 +298,7 @@

          KIP-447.
          + KIP-447.
          To enable exactly-once semantics when running Kafka Streams applications, set the processing.guarantee config value (default value is at_least_once) diff --git a/docs/streams/developer-guide/app-reset-tool.html b/docs/streams/developer-guide/app-reset-tool.html index 7f299ac994160..9b75db64e5e41 100644 --- a/docs/streams/developer-guide/app-reset-tool.html +++ b/docs/streams/developer-guide/app-reset-tool.html @@ -67,7 +67,7 @@

          Step 1: Run the application reset tool

          Invoke the application reset tool from the command line

          Warning! This tool makes irreversible changes to your application. It is strongly recommended that you run this once with --dry-run to preview your changes before making them.

          -
          $ bin/kafka-streams-application-reset.sh
          +
          $ bin/kafka-streams-application-reset

          The tool accepts the following parameters:

          Option (* = required)                 Description
           ---------------------                 -----------
          @@ -80,12 +80,9 @@ 

          Step 1: Run the application reset toolOptional configuration parameters
        • Kafka consumers and producer configuration parameters @@ -297,12 +296,12 @@

          num.standby.replicascommit.interval.ms Low The frequency in milliseconds with which to save the position (offsets in source topics) of tasks. - 30000 (30 seconds) (at-least-once) / 100 (exactly-once) + 30000 (30 seconds) default.deserialization.exception.handler (Deprecated. Use deserialization.exception.handler instead.) Medium Exception handling class that implements the DeserializationExceptionHandler interface. - LogAndFailExceptionHandler + LogAndContinueExceptionHandler default.key.serde Medium @@ -327,10 +326,11 @@

          num.standby.replicasnull - default.dsl.store (Deprecated. Use dsl.store.suppliers.class instead.) + default.dsl.store Low - The default state store type used by DSL operators. + [DEPRECATED] The default state store type used by DSL operators. Deprecated in + favor of dsl.store.suppliers.class "ROCKS_DB" @@ -348,26 +348,17 @@

          num.standby.replicasensure.explicit.internal.resource.naming - High - - Whether to enforce explicit naming for all internal resources of the topology, including internal - topics (e.g., changelog and repartition topics) and their associated state stores. - When enabled, the application will refuse to start if any internal resource has an auto-generated name. - - false - - log.summary.interval.ms + log.summary.interval.ms Low The output interval in milliseconds for logging summary information (disabled if negative). 120000 (2 minutes) - enable.metrics.push + enable.metrics.push Low Whether to enable pushing of client metrics to the cluster, if the cluster has a client metrics subscription which matches this client. true - max.task.idle.ms + max.task.idle.ms Medium

          @@ -386,76 +377,76 @@

          num.standby.replicas0 - max.warmup.replicas + max.warmup.replicas Medium The maximum number of warmup replicas (extra standbys beyond the configured num.standbys) that can be assigned at once. 2 - metric.reporters + metric.reporters Low A list of classes to use as metrics reporters. the empty list - metrics.num.samples + metrics.num.samples Low The number of samples maintained to compute metrics. 2 - metrics.recording.level + metrics.recording.level Low The highest recording level for metrics. INFO - metrics.sample.window.ms + metrics.sample.window.ms Low The window of time in milliseconds a metrics sample is computed over. 30000 (30 seconds) - num.standby.replicas + num.standby.replicas High The number of standby replicas for each task. 0 - num.stream.threads + num.stream.threads Medium The number of threads to execute stream processing. 1 - probing.rebalance.interval.ms + probing.rebalance.interval.ms Low The maximum time in milliseconds to wait before triggering a rebalance to probe for warmup replicas that have sufficiently caught up. 600000 (10 minutes) - processing.exception.handler + processing.exception.handler Medium Exception handling class that implements the ProcessingExceptionHandler interface. LogAndFailProcessingExceptionHandler - processing.guarantee + processing.guarantee Medium The processing mode. Can be either "at_least_once" or "exactly_once_v2" (for EOS version 2, requires broker version 2.5+). See Processing Guarantee.. "at_least_once" - processor.wrapper.class + processor.wrapper.class Medium A class or class name implementing the ProcessorWrapper interface. Must be passed in when creating the topology, and will not be applied unless passed in to the appropriate constructor as a TopologyConfig. You should use the StreamsBuilder#new(TopologyConfig) constructor for DSL applications, and the Topology#new(TopologyConfig) constructor for PAPI applications. - production.exception.handler + production.exception.handler Medium Exception handling class that implements the ProductionExceptionHandler interface. DefaultProductionExceptionHandler - poll.ms + poll.ms Low The amount of time in milliseconds to block waiting for input. 100 - rack.aware.assignment.strategy + rack.aware.assignment.strategy Low The strategy used for rack aware assignment. Acceptable value are "none" (default), @@ -464,7 +455,7 @@

          num.standby.replicasRack Aware Assignment Strategy. "none" - rack.aware.assignment.tags + List of tag keys used to distribute standby replicas across Kafka Streams clients. When configured, Kafka Streams will make a best-effort to distribute the standby tasks over @@ -472,29 +463,24 @@

          num.standby.replicasRack Aware Assignment Tags. the empty list - rack.aware.assignment.non_overlap_cost + rack.aware.assignment.non_overlap_cost Low Cost associated with moving tasks from existing assignment. See Rack Aware Assignment Non-Overlap-Cost. null - rack.aware.assignment.non_overlap_cost + rack.aware.assignment.non_overlap_cost Low Cost associated with cross rack traffic. See Rack Aware Assignment Traffic-Cost. null - replication.factor + replication.factor Medium The replication factor for changelog topics and repartition topics created by the application. The default of -1 (meaning: use broker default replication factor) requires broker version 2.4 or newer. -1 - repartition.purge.interval.ms - Low - The frequency in milliseconds with which to delete fully consumed records from repartition topics. Purging will occur after at least this value since the last purge, but may be delayed until later. - 30000 (30 seconds) - retry.backoff.ms Low The amount of time in milliseconds, before a request is retried. @@ -542,16 +528,11 @@

          num.standby.replicasAdded to a windows maintainMs to ensure data is not deleted from the log prematurely. Allows for clock drift. 86400000 (1 day) - window.size.ms (Deprecated. See Window Serdes for alternatives.) + window.size.ms Low Sets window size for the deserializer in order to calculate window end times. null - windowed.inner.class.serde (Deprecated. See Window Serdes for alternatives.) - Low - Serde for the inner class of a windowed record. Must implement the Serde interface. - null -
          @@ -670,7 +651,7 @@

          acceptable.recovery.lagFailOnInvalidTimestamp. This extractor retrieves built-in timestamps that are automatically embedded into Kafka messages by the Kafka producer client since - Kafka version 0.10. + Kafka version 0.10. Depending on the setting of Kafka’s server-side log.message.timestamp.type broker and message.timestamp.type topic parameters, this extractor provides you with:

          -
          -

          ensure.explicit.internal.resource.naming

          -
          -
          -

          - Whether to enforce explicit naming for all internal resources of the topology, including internal - topics (e.g., changelog and repartition topics) and their associated state stores. - When enabled, the application will refuse to start if any internal resource has an auto-generated name. -

          -
          -
          -
          -
          -

          group.protocol

          -
          -
          -

          - The group protocol used by the Kafka Streams client used for coordination. - It determines how the client will communicate with the Kafka brokers and other clients in the same group. - The default value is "classic", which is the classic consumer group protocol. - Can be set to "streams" (requires broker-side enablement) to enable the new Kafka Streams group protocol. - Note that the "streams" rebalance protocol is an Early Access feature and should not be used in production. -

          -
          -
          -

          rack.aware.assignment.non_overlap_cost

          @@ -1228,6 +1183,18 @@

          topology.optimization +

          windowed.inner.class.serde

          +
          +
          +

          + Serde for the inner class of a windowed record. Must implement the org.apache.kafka.common.serialization.Serde interface. +

          +

          + Note that this config is only used by plain consumer/producer clients that set a windowed de/serializer type via configs. For Kafka Streams applications that deal with windowed types, you must pass in the inner serde type when you instantiate the windowed serde object for your topology. +

          +
          +

          upgrade.from

          diff --git a/docs/streams/developer-guide/datatypes.html b/docs/streams/developer-guide/datatypes.html index 2bc2d7d5d0ef3..f2968591ccd20 100644 --- a/docs/streams/developer-guide/datatypes.html +++ b/docs/streams/developer-guide/datatypes.html @@ -48,10 +48,9 @@ -
        • Kafka Streams DSL for Scala Implicit Serdes
        • +
        • Kafka Streams DSL for Scala Implicit Serdes
        • This artifact provides the following serde implementations under the package org.apache.kafka.common.serialization, which you can leverage when e.g., defining default serializers in your Streams configuration.

          @@ -157,83 +156,11 @@

          Primitive and basic types

          JSON

          -

          The Kafka Streams code examples also include a basic serde implementation for JSON:

          - -

          As shown in the example, you can use JSONSerdes inner classes Serdes.serdeFrom(<serializerInstance>, <deserializerInstance>) to construct JSON compatible serializers and deserializers. +

          You can use JsonSerializer and JsonDeserializer from Kafka Connect to construct JSON compatible serializers and deserializers + using Serdes.serdeFrom(<serializerInstance>, <deserializerInstance>). + Note, that Kafka Connect's Json (de)serializer requires Java 17.

          -
          -

          Window Serdes

          -

          Apache Kafka Streams includes serde implementations for windowed types in - its kafka-streams Maven artifact:

          -
          <dependency>
          -    <groupId>org.apache.kafka</groupId>
          -    <artifactId>kafka-streams</artifactId>
          -    <version>{{fullDotVersion}}</version>
          -</dependency>
          -

          This artifact provides the following windowed serde implementations under the package org.apache.kafka.streams.kstream:

          - -

          Serdes:

          -
            -
          • WindowedSerdes.TimeWindowedSerde<T>
          • -
          • WindowedSerdes.SessionWindowedSerde<T>
          • -
          - -

          Serializers:

          -
            -
          • TimeWindowedSerializer<T>
          • -
          • SessionWindowedSerializer<T>
          • -
          - -

          Deserializers:

          -
            -
          • TimeWindowedDeserializer<T>
          • -
          • SessionWindowedDeserializer<T>
          • -
          -

          Usage in Code

          -

          When using windowed serdes in your application code, you typically create instances via constructors or factory methods:

          -
          // Time windowed serde - using factory method
          -Serde<Windowed<String>> timeWindowedSerde = 
          -    WindowedSerdes.timeWindowedSerdeFrom(String.class, 500L);
          -
          -// Time windowed serde - using constructor
          -Serde<Windowed<String>> timeWindowedSerde2 = 
          -    new WindowedSerdes.TimeWindowedSerde<>(Serdes.String(), 500L);
          -
          -// Session windowed serde - using factory method
          -Serde<Windowed<String>> sessionWindowedSerde = 
          -    WindowedSerdes.sessionWindowedSerdeFrom(String.class);
          -
          -// Session windowed serde - using constructor  
          -Serde<Windowed<String>> sessionWindowedSerde2 = 
          -    new WindowedSerdes.SessionWindowedSerde<>(Serdes.String());
          -
          -// Using individual serializers/deserializers
          -TimeWindowedSerializer<String> serializer = new TimeWindowedSerializer<>(Serdes.String().serializer());
          -TimeWindowedDeserializer<String> deserializer = new TimeWindowedDeserializer<>(Serdes.String().deserializer(), 500L);
          - -

          Usage in Command Line

          -

          When using command-line tools (like bin/kafka-console-consumer.sh), you can configure windowed deserializers by passing the inner class and window size via configuration properties. The property names use a prefix pattern:

          -
          # Time windowed deserializer configuration
          ---property print.key=true \
          ---property key.deserializer=org.apache.kafka.streams.kstream.TimeWindowedDeserializer \
          ---property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer \
          ---property key.deserializer.window.size.ms=500
          -
          -# Session windowed deserializer configuration  
          ---property print.key=true \
          ---property key.deserializer=org.apache.kafka.streams.kstream.SessionWindowedDeserializer \
          ---property key.deserializer.windowed.inner.deserializer.class=org.apache.kafka.common.serialization.StringDeserializer
          - -

          Deprecated Configs

          -

          The following StreamsConfig parameters are deprecated in favor of passing parameters directly to serializer/deserializer constructors:

          -
            -
          • StreamsConfig.WINDOWED_INNER_CLASS_SERDE is deprecated in favor of TimeWindowedSerializer.WINDOWED_INNER_SERIALIZER_CLASS and TimeWindowedDeserializer.WINDOWED_INNER_DESERIALIZER_CLASS
          • -
          • StreamsConfig.WINDOW_SIZE_MS_CONFIG is deprecated in favor of TimeWindowedDeserializer.WINDOW_SIZE_MS_CONFIG
          • -
          -

          Implementing custom Serdes

          If you need to implement custom Serdes, your best starting point is to take a look at the source code references of diff --git a/docs/streams/developer-guide/dsl-api.html b/docs/streams/developer-guide/dsl-api.html index 4de5389ac75f4..b59ac764f32d2 100644 --- a/docs/streams/developer-guide/dsl-api.html +++ b/docs/streams/developer-guide/dsl-api.html @@ -764,10 +764,10 @@

          Manually trigger repartitioning of the stream with desired number of partitions. (details)

          - Kafka Streams will manage the topic for repartition(). + repartition() is similar to through() however Kafka Streams will manage the topic for you. Generated topic is treated as internal topic, as a result data will be purged automatically as any other internal repartition topic. In addition, you can specify the desired number of partitions, which allows to easily scale in/out downstream sub-topologies. - repartition() operation always triggers repartitioning of the stream, as a result it can be used with embedded Processor API methods (like process() et al.) that do not trigger auto repartitioning when key changing operation is performed beforehand. + repartition() operation always triggers repartitioning of the stream, as a result it can be used with embedded Processor API methods (like transform() et al.) that do not trigger auto repartitioning when key changing operation is performed beforehand.
          KStream<byte[], String> stream = ... ;
           KStream<byte[], String> repartitionedStream = stream.repartition(Repartitioned.numberOfPartitions(10));
          @@ -3130,20 +3130,15 @@

          Operations and concepts

          Processor (provided by a given ProcessorSupplier);
        • KStream#processValues: Process all records in a stream, one record at a time, by applying a - FixedKeyProcessor (provided by a given FixedKeyProcessorSupplier) - [CAUTION: If you are deploying a new Kafka Streams application, and you are using the - "merge repartition topics" optimization, you should enable the fix for - KAFKA-19668 to avoid compatibility - issues for future upgrades to newer versions of Kafka Streams; - For more details, see the migration guide below]; + FixedKeyProcessor (provided by a given FixedKeyProcessorSupplier);
        • Processor: A processor of key-value pair records;
        • ContextualProcessor: An abstract implementation of Processor that manages the - ProcessorContext instance; + ProcessorContext instance.
        • FixedKeyProcessor: A processor of key-value pair records where keys are immutable;
        • ContextualFixedKeyProcessor: An abstract implementation of FixedKeyProcessor that - manages the FixedKeyProcessorContext instance; + manages the FixedKeyProcessorContext instance.
        • ProcessorSupplier: A processor supplier that can create one or more Processor instances; and
        • @@ -3461,25 +3456,6 @@

          The Processor API now serves as a unified replacement for all these methods. It simplifies the API surface while maintaining support for both stateless and stateful operations.

          - -

          CAUTION: If you are using KStream.transformValues() and you have the "merge repartition topics" - optimization enabled, rewriting your program to KStream.processValues() might not be safe due to - KAFKA-19668. For this case, you should not upgrade - to Kafka Streams 4.0.0 or 4.1.0, but use Kafka Streams 4.0.1 instead, which contains a fix. - Note, that the fix is not enabled by default for backward compatibility reasons, and you would need to - enable the fix by setting config __enable.process.processValue.fix__ = true and pass it - into StreamsBuilder() constructor.

          -
          final Properties properties = new Properties();
          -properties.put(StreamsConfig.APPLICATION_ID_CONFIG, ...);
          -properties.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, ...);
          -properties.put(TopologyConfig.InternalConfig.ENABLE_PROCESS_PROCESSVALUE_FIX, true);
          -
          -final StreamsBuilder builder = new StreamsBuilder(new TopologyConfig(new StreamsConfig(properties)));
          - -

          It is recommended, that you compare the output of Topology.describe() for the old and new topology, - to verify if the rewrite to processValues() is correct, and that it does not introduce any incompatibilities. - You should also test the upgrade in a non-production environment.

          -

          Migration Examples

          To migrate from the deprecated transform, transformValues, flatTransform, and flatTransformValues methods to the Processor API (PAPI) in Kafka Streams, let's resume the diff --git a/docs/streams/developer-guide/dsl-topology-naming.html b/docs/streams/developer-guide/dsl-topology-naming.html index 832806050a593..ec3bc857c10ef 100644 --- a/docs/streams/developer-guide/dsl-topology-naming.html +++ b/docs/streams/developer-guide/dsl-topology-naming.html @@ -300,19 +300,6 @@

          Conclusion

          Stream/Table non-stateful operationsNamed
          - - To further enforce best practices, Kafka Streams provides a configuration option, - ensure.explicit.internal.resource.naming: -
          /
          -            Properties props = new Properties();
          -            props.put(StreamsConfig.ENSURE_EXPLICIT_INTERNAL_RESOURCE_NAMING_CONFIG, true);
          -		  
          - This parameter ensures that all internal topics, state stores, and changelog topics have explicitly defined names. When this configuration - is enabled, a Kafka Streams application will not start if any of these components rely on auto-generated names. This guarantees - stability across topology updates, as manually defined names remain unchanged even when new processors or transformations are added. - Enforcing explicit naming is particularly important in production environments, where consistency and backward compatibility are essential - for maintaining reliable stream processing applications. -

          diff --git a/docs/streams/developer-guide/running-app.html b/docs/streams/developer-guide/running-app.html index a6c603f2a3c21..7ef2b98a580b8 100644 --- a/docs/streams/developer-guide/running-app.html +++ b/docs/streams/developer-guide/running-app.html @@ -67,7 +67,7 @@ dynamically during application runtime without any downtime or data loss. This makes your applications resilient in the face of failures and for allows you to perform maintenance as needed (e.g. rolling upgrades).

          For more information about this elasticity, see the Parallelism Model section. Kafka Streams - leverages the Kafka group management functionality, which is built right into the Kafka wire protocol. It is the foundation that enables the + leverages the Kafka group management functionality, which is built right into the Kafka wire protocol. It is the foundation that enables the elasticity of Kafka Streams applications: members of a group coordinate and collaborate jointly on the consumption and processing of data in Kafka. Additionally, Kafka Streams provides stateful processing and allows for fault-tolerant state in environments where application instances may come and go at any time.

          diff --git a/docs/streams/developer-guide/security.html b/docs/streams/developer-guide/security.html index bae4d90b8b24a..e3622bf80e82c 100644 --- a/docs/streams/developer-guide/security.html +++ b/docs/streams/developer-guide/security.html @@ -70,65 +70,7 @@ the ACL set so that the application has the permissions to create, read and write internal topics.

          -
          -

          If the streams rebalance protocol is enabled by setting group.protocol=streams, the following ACLs are required on the topic and group resources:

          -
          - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
          API PROTOCOLOPERATIONResourceNotes
          STREAMS_GROUP_HEARTBEATReadGroupRequired for the application's streams group
          STREAMS_GROUP_HEARTBEATCreateCluster or Topic - Required only if auto-creating internal topics.
          - • Create on Cluster resource
          - • or Create on all topics in StateChangelogTopics and RepartitionSourceTopics
          - Not required if internal topics are pre-created -
          STREAMS_GROUP_HEARTBEATDescribeTopicRequired for all topics used in the application's topology, when first joining.
          STREAMS_GROUP_DESCRIBEDescribeGroupRequired for the application's streams group
          STREAMS_GROUP_DESCRIBEDescribeTopicRequired for all topics used in the group's topology
          - -

          As mentioned earlier, Kafka Streams applications need appropriate ACLs to create internal topics when running against a secured Kafka cluster. - To avoid providing this permission to your application, you can create the required internal topics manually. +

          To avoid providing this permission to your application, you can create the required internal topics manually. If the internal topics exist, Kafka Streams will not try to recreate them. Note, that the internal repartition and changelog topics must be created with the correct number of partitions—otherwise, Kafka Streams will fail on startup. The topics must be created with the same number of partitions as your input topic, or if there are multiple topics, the maximum number of partitions across all input topics. @@ -143,11 +85,10 @@ it is recommended to use ACLs on prefixed resource pattern to configure control lists to allow client to manage all topics and consumer groups started with this prefix as --resource-pattern-type prefixed --topic your.application.id --operation All - (see KIP-277 - and KIP-290 for details). + (see KIP-277 + and KIP-290 for details).

          -

          Security example

          The purpose is to configure a Kafka Streams application to enable client authentication and encrypt data-in-transit when diff --git a/docs/streams/upgrade-guide.html b/docs/streams/upgrade-guide.html index 99c221cbbb5e4..1c6de66ab8036 100644 --- a/docs/streams/upgrade-guide.html +++ b/docs/streams/upgrade-guide.html @@ -136,123 +136,9 @@

          < can choose whether or not to reuse the source topic based on the StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG: if you are upgrading from the old KStreamBuilder class and hence you need to change your code to use the new StreamsBuilder, you should set this config value to StreamsConfig#OPTIMIZE to continue reusing the source topic; if you are upgrading from 1.0 or 1.1 where you are already using StreamsBuilder and hence have already created a separate changelog topic, you should set this config value to StreamsConfig#NO_OPTIMIZATION when upgrading to {{fullDotVersion}} in order to use that changelog topic for restoring the state store. - More details about the new config StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG can be found in KIP-295. + More details about the new config StreamsConfig#TOPOLOGY_OPTIMIZATION_CONFIG can be found in KIP-295.

          -

          Streams API changes in 4.1.0

          - -

          Early Access of the Streams Rebalance Protocol

          - -

          - The Streams Rebalance Protocol is a broker-driven rebalancing system designed specifically for Kafka - Streams applications. Following the pattern of KIP-848, which moved rebalance coordination of plain consumers - from clients to brokers, KIP-1071 extends this model to Kafka Streams workloads. Instead of clients - computing new assignments on the client during rebalance events involving all members of the group, assignments are - computed continuously on the broker. Instead of using a consumer group, the streams application registers as a - streams group with the broker, which manages and exposes all metadata required for coordination of the - streams application instances. -

          - -

          - This Early Access release covers a subset of the functionality detailed in - KIP-1071. - Do not use the new protocol in production. The API is subject to change in future - releases. -

          - -

          What's Included in Early Access

          - -
            -
          • Core Streams Group Rebalance Protocol: The group.protocol=streams configuration - enables the dedicated streams rebalance protocol. This separates streams groups from consumer groups and - provides a streams-specific group membership lifecycle and metadata management on the broker.
          • -
          • Sticky Task Assignor: A basic task assignment strategy that minimizes task movement - during rebalances is included.
          • -
          • Interactive Query Support: IQ operations are compatible with the new streams protocol.
          • -
          • New Admin RPC: The StreamsGroupDescribe RPC provides streams-specific metadata - separate from consumer group information, with corresponding access via the Admin client.
          • -
          • CLI Integration: You can list, describe, and delete streams groups via the kafka-streams-groups.sh script.
          • -
          - -

          What's Not Included in Early Access

          - -
            -
          • Static Membership: Setting a client `instance.id` will be rejected.
          • -
          • Topology Updates: If a topology is changed significantly (e.g., by adding new source topics - or changing the number of sub-topologies), a new streams group must be created.
          • -
          • High Availability Assignor: Only the sticky assignor is supported.
          • -
          • Regular Expressions: Pattern-based topic subscription is not supported.
          • -
          • Reset Operations: CLI offset reset operations are not supported.
          • -
          • Protocol Migration: Group migration is not available between the classic and new streams protocols.
          • -
          - -

          Why Use the Streams Rebalance Protocol?

          - -
            -
          • - Broker-Driven Coordination: - Centralizes task assignment logic on brokers instead of the client. This provides consistent, - authoritative task assignment decisions from a single coordination point and reduces the potential for - split-brain scenarios. -
          • -
          • - Faster, More Stable Rebalances: - Reduces rebalance duration and impact by removing the global synchronization point. This minimizes - application downtime during membership changes or failures. -
          • -
          • - Better Observability: - Provides dedicated metrics and admin interfaces that separate streams from consumer groups, leading to - clearer troubleshooting with broker-side observability. -
          • -
          - -

          - Enabling the protocol requires the brokers and clients are running Apache Kafka 4.1. It should be enabled - only on new clusters for testing purposes. - Set unstable.feature.versions.enable=true for controllers and brokers, and - set unstable.api.versions.enable=true on the brokers as well. In your Kafka Streams application - configuration, set group.protocol=streams. - After the new feature is configured, check - kafka-features.sh --bootstrap-server localhost:9092 describe - and `streams.version` should now have FinalizedVersionLevel 1. -

          - -

          - Migration between the classic consumer group protocol and the Streams Rebalance Protocol is not supported in - either direction. An application using this protocol must use a new application.id that has not - been used by any application on the classic protocol. Furthermore, this ID must not be in use as a - group.id by any consumer ("classic" or "consumer") nor share-group application. - It is also possible to delete a previous consumer group using kafka-consumer-groups.sh before - starting the application with the new protocol, which will however also delete all offsets for that group. -

          - -

          - To operate the new streams groups, explore the options of kafka-streams-groups.sh to list, - describe, and delete streams groups. In the new protocol, streams.session.timeout.ms, - streams.heartbeat.interval.ms and streams.num.standby.replicas are group-level configurations, - which are ignored when they are set on the client side. Use the kafka-configs.sh tool to set - these configurations, for example: - kafka-configs.sh --bootstrap-server localhost:9092 --alter --entity-type groups - --entity-name wordcount --add-config streams.num.standby.replicas=1. -

          - -

          - Please provide feedback on this feature via the - Kafka mailing lists or by filing - JIRA issues. -

          - -

          Other changes

          - -

          - The introduction of KIP-1111 - enables you to enforce explicit naming for all internal resources of the topology, including internal topics (e.g., changelog and repartition topics) and their associated state stores. - This ensures that every internal resource is named before the Kafka Streams application is deployed, which is essential for upgrading your topology. - You can enable this feature via StreamsConfig using the StreamsConfig#ENSURE_EXPLICIT_INTERNAL_RESOURCE_NAMING_CONFIG parameter. - When set to true, the application will refuse to start if any internal resource has an auto-generated name. -

          -

          Streams API changes in 4.0.0

          @@ -262,15 +148,7 @@

          Streams API
          • Old processor APIs
          • KStream#through() in both Java and Scala
          • -
          • - "transformer" methods and classes in both Java and Scala -
              -
            • migrating from KStreams#transformValues() to KStreams.processValues() might not be safe - due to KAFKA-19668. - Please refer to the migration guide for more details. -
            • -
            -
          • +
          • "transformer" methods and classes in both Java and Scala
          • kstream.KStream#branch in both Java and Scala
          • builder methods for Time/Session/Join/SlidingWindows
          • KafkaStreams#setUncaughtExceptionHandler()
          • @@ -279,66 +157,66 @@

            Streams API

            In this release the ClientInstanceIds instance stores the global consumerUuid for the - KIP-714 + KIP-714 id with a key of global stream-thread name appended with "-global-consumer" where before it was only the global stream-thread name.

            In this release two configs default.deserialization.exception.handler and default.production.exception.handler are deprecated, as they don't have any overwrites, which is described in - KIP-1056 + KIP-1056 You can refer to new configs via deserialization.exception.handler and production.exception.handler.

            In previous release, a new version of the Processor API was introduced and the old Processor API was incrementally replaced and deprecated. - KIP-1070 + KIP-1070 follow this path by deprecating MockProcessorContext, Transformer, TransformerSupplier, ValueTransformer, and ValueTransformerSupplier.

            Previously, the ProductionExceptionHandler was not invoked on a (retriable) TimeoutException. With Kafka Streams 4.0, the handler is called, and the default handler would return RETRY to not change existing behavior. - However, a custom handler can now decide to break the infinite retry loop by returning either CONTINUE or FAIL (KIP-1065). + However, a custom handler can now decide to break the infinite retry loop by returning either CONTINUE or FAIL (KIP-1065).

            In this release, Kafka Streams metrics can be collected broker side via the KIP-714 broker-plugin. - For more detailed information, refer to KIP-1076 document please. + For more detailed information, refer to KIP-1076 document please.

            - KIP-1077 + KIP-1077 deprecates the ForeachProcessor class. This change is aimed at improving the organization and clarity of the Kafka Streams API by ensuring that internal classes are not exposed in public packages.

            - KIP-1078 deprecates the leaking getter methods in the Joined helper class. + KIP-1078 deprecates the leaking getter methods in the Joined helper class. These methods are deprecated without a replacement for future removal, as they don't add any value to Kafka Streams users.

            To ensures better encapsulation and organization of configuration documentation within Kafka Streams, - KIP-1085 + KIP-1085 deprecate certain public doc description variables that are only used within the StreamsConfig or TopologyConfig classes. Additionally, the unused variable DUMMY_THREAD_INDEX will also be deprecated.

            Due to the removal of the already deprecated #through method in Kafka Streams, the intermediateTopicsOption of StreamsResetter tool in Apache Kafka is - not needed any more and therefore is deprecated (KIP-1087). + not needed any more and therefore is deprecated (KIP-1087).

            - Since string metrics cannot be collected on the broker side (KIP-714), KIP-1091 + Since string metrics cannot be collected on the broker side (KIP-714), KIP-1091 introduces numeric counterparts to allow proper broker-side metric collection for Kafka Streams applications. These metrics will be available at the INFO recording level, and a thread-level metric with a String value will be available for users leveraging Java Management Extensions (JMX).

            In order to reduce storage overhead and improve API usability, a new method in the Java and Scala APIs that accepts a BiFunction for foreign key extraction is introduced by - KIP-1104. + KIP-1104. KIP-1104 allows foreign key extraction from both the key and value in KTable joins in Apache Kafka. Previously, foreign key joins in KTables only allowed extraction from the value, which led to data duplication and potential inconsistencies. This enhancement introduces a new method in the Java and Scala APIs that accepts a BiFunction for foreign key extraction, enabling more intuitive and efficient joins. @@ -346,29 +224,29 @@

            Streams API

            - With introduction of KIP-1106, + With introduction of KIP-1106, the existing Topology.AutoOffsetReset is deprecated and replaced with a new class org.apache.kafka.streams.AutoOffsetReset to capture the reset strategies. New methods will be added to the org.apache.kafka.streams.Topology and org.apache.kafka.streams.kstream.Consumed classes to support the new reset strategy. These changes aim to provide more flexibility and efficiency in managing offsets, especially in scenarios involving long-term storage and infinite retention.

            - You can now configure your topology with a ProcessorWrapper, which allows you to access and optionally wrap/replace - any processor in the topology by injecting an alternative ProcessorSupplier in its place. This can be used to peek - records and access the processor context even for DSL operators, for example to implement a logging or tracing framework, or to - aid in testing or debugging scenarios. You must implement the ProcessorWrapper interface and then pass the class - or class name into the configs via the new StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG config. NOTE: this config is - applied during the topology building phase, and therefore will not take effect unless the config is passed in when creating - the StreamsBuilder (DSL) or Topology(PAPI) objects. You MUST use the StreamsBuilder/Topology constructor overload that - accepts a TopologyConfig parameter for the StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG to be picked up. - See KIP-1112 for more details. + You can now configure your topology with a ProcessorWrapper, which allows you to access and optionally wrap/replace + any processor in the topology by injecting an alternative ProcessorSupplier in its place. This can be used to peek + records and access the processor context even for DSL operators, for example to implement a logging or tracing framework, or to + aid in testing or debugging scenarios. You must implement the ProcessorWrapper interface and then pass the class + or class name into the configs via the new StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG config. NOTE: this config is + applied during the topology building phase, and therefore will not take effect unless the config is passed in when creating + the StreamsBuilder (DSL) or Topology(PAPI) objects. You MUST use the StreamsBuilder/Topology constructor overload that + accepts a TopologyConfig parameter for the StreamsConfig#PROCESSOR_WRAPPER_CLASS_CONFIG to be picked up. + See KIP-1112 for more details.

            Upgraded RocksDB dependency to version 9.7.3 (from 7.9.2). This upgrade incorporates various improvements and optimizations within RocksDB. However, it also introduces some API changes. The org.rocksdb.AccessHint class, along with its associated methods, has been removed. Several methods related to compressed block cache configuration in the BlockBasedTableConfig class have been removed, including blockCacheCompressedNumShardBits, blockCacheCompressedSize, and their corresponding setters. These functionalities are now consolidated under the cache option, and developers should configure their compressed block cache using the setCache method instead. - The NO_FILE_CLOSES field has been removed from the org.rocksdb.TickerTypeenum as a result the number-open-files metrics does not work as expected. Metric number-open-files returns constant -1 from now on until it will officially be removed. + The NO_FILE_CLOSES field has been removed from the org.rocksdb.TickerTypeenum as a result the number-open-files metrics does not work as expected. Metric number-open-files returns constant -1 from now on until it will officially be removed. The org.rocksdb.Options.setLogger() method now accepts a LoggerInterface as a parameter instead of the previous Logger. Some data types used in RocksDB's Java API have been modified. These changes, along with the removed class, field, and new methods, are primarily relevant to users implementing custom RocksDB configurations. These changes are expected to be largely transparent to most Kafka Streams users. However, those employing advanced RocksDB customizations within their Streams applications, particularly through the rocksdb.config.setter, are advised to consult the detailed RocksDB 9.7.3 changelog to ensure a smooth transition and adapt their configurations as needed. Specifically, users leveraging the removed AccessHint class, the removed methods from the BlockBasedTableConfig class, the NO_FILE_CLOSES field from TickerType, or relying on the previous signature of setLogger() will need to update their implementations. @@ -377,7 +255,7 @@

            Streams API

            Streams API changes in 3.9.0

            - The introduction of KIP-1033 + The introduction of KIP-1033 enables you to provide a processing exception handler to manage exceptions during the processing of a record rather than throwing the exception all the way out of your streams application. You can provide the configs via the StreamsConfig as StreamsConfig#PROCESSING_EXCEPTION_HANDLER_CLASS_CONFIG. The specified handler must implement the org.apache.kafka.streams.errors.ProcessingExceptionHandler interface. @@ -386,7 +264,7 @@

            Streams API

            Kafka Streams now allows to customize the logging interval of stream-thread runtime summary, via the newly added config log.summary.interval.ms. By default, the summary is logged every 2 minutes. More details can be found in - KIP-1049. + KIP-1049.

            Streams API changes in 3.8.0

            @@ -411,12 +289,12 @@

            Streams API that implements the new public TaskAssignor interface. For more details, see the public interface section of - KIP-924. + KIP-924.

            The Processor API now support so-called read-only state stores, added via - KIP-813. + KIP-813. These stores don't have a dedicated changelog topic, but use their source topic for fault-tolerance, similar to KTables with source-topic optimization enabled.

            @@ -426,41 +304,41 @@

            Streams API age of open iterators. The new metrics are num-open-iterators, iterator-duration-avg, iterator-duration-max and oldest-iterator-open-since-ms. These metrics are available for all state stores, including RocksDB, in-memory, and custom stores. More details can be found in - KIP-989. + KIP-989.

            Streams API changes in 3.7.0

            We added a new method to KafkaStreams, namely KafkaStreams#setStandbyUpdateListener() in - KIP-988, + KIP-988, in which users can provide their customized implementation of the newly added StandbyUpdateListener interface to continuously monitor changes to standby tasks.

            IQv2 supports RangeQuery that allows to specify unbounded, bounded, or half-open key-ranges, which return data in unordered (byte[]-lexicographical) order (per partition). - KIP-985 extends this functionality by adding .withDescendingKeys() and .withAscendingKeys()to allow user to receive data in descending or ascending order. + KIP-985 extends this functionality by adding .withDescendingKeys() and .withAscendingKeys()to allow user to receive data in descending or ascending order.

            - KIP-992 adds two new query types, + KIP-992 adds two new query types, namely TimestampedKeyQuery and TimestampedRangeQuery. Both should be used to query a timestamped key-value store, to retrieve a ValueAndTimestamp result. The existing KeyQuery and RangeQuery are changed to always return the value only for timestamped key-value stores.

            - IQv2 adds support for MultiVersionedKeyQuery (introduced in KIP-968) + IQv2 adds support for MultiVersionedKeyQuery (introduced in KIP-968) that allows retrieving a set of records from a versioned state store for a given key and a specified time range. Users have to use fromTime(Instant) and/or toTime(Instant) to specify a half or a complete time range.

            - IQv2 adds support for VersionedKeyQuery (introduced in KIP-960) + IQv2 adds support for VersionedKeyQuery (introduced in KIP-960) that allows retrieving a single record from a versioned state store based on its key and timestamp. Users have to use the asOf(Instant) method to define a query that returns the record's version for the specified timestamp. To be more precise, the key query returns the record with the greatest timestamp <= Instant.

            - The non-null key requirements for Kafka Streams join operators were relaxed as part of KIP-962. + The non-null key requirements for Kafka Streams join operators were relaxed as part of KIP-962. The behavior of the following operators changed.

            - See KIP-216 for more information. + See KIP-216 for more information.

            We deprecated the StreamsConfig processing.guarantee configuration value "exactly_once" (for EOS version 1) in favor of the improved EOS version 2, formerly configured via @@ -830,7 +703,7 @@

            Streams API when playing around with Kafka Streams for the first time. Note that using the new APIs for the JoinWindows class will also enable a fix for spurious left/outer join results, as described in the following paragraph. For more details on the grace period and new static constructors, see - KIP-633 + KIP-633

            Additionally, in older versions Kafka Streams emitted stream-stream left/outer join results eagerly. This behavior may lead to spurious left/outer join result records. @@ -863,22 +736,22 @@

            Streams API We removed the following deprecated APIs:

              -
            • --zookeeper flag of the application reset tool: deprecated in Kafka 1.0.0 (KIP-198).
            • -
            • --execute flag of the application reset tool: deprecated in Kafka 1.1.0 (KIP-171).
            • -
            • StreamsBuilder#addGlobalStore (one overload): deprecated in Kafka 1.1.0 (KIP-233).
            • -
            • ProcessorContext#forward (some overloads): deprecated in Kafka 2.0.0 (KIP-251).
            • -
            • WindowBytesStoreSupplier#segments: deprecated in Kafka 2.1.0 (KIP-319).
            • -
            • segments, until, maintainMs on TimeWindows, JoinWindows, and SessionWindows: deprecated in Kafka 2.1.0 (KIP-328).
            • -
            • Overloaded JoinWindows#of, before, after, SessionWindows#with, TimeWindows#of, advanceBy, UnlimitedWindows#startOn and KafkaStreams#close with long typed parameters: deprecated in Kafka 2.1.0 (KIP-358).
            • -
            • Overloaded KStream#groupBy, groupByKey and KTable#groupBy with Serialized parameter: deprecated in Kafka 2.1.0 (KIP-372).
            • -
            • Joined#named, name: deprecated in Kafka 2.3.0 (KIP-307).
            • -
            • TopologyTestDriver#pipeInput, readOutput, OutputVerifier and ConsumerRecordFactory classes (KIP-470).
            • -
            • KafkaClientSupplier#getAdminClient: deprecated in Kafka 2.4.0 (KIP-476).
            • -
            • Overloaded KStream#join, leftJoin, outerJoin with KStream and Joined parameters: deprecated in Kafka 2.4.0 (KIP-479).
            • -
            • WindowStore#put(K key, V value): deprecated in Kafka 2.4.0 (KIP-474).
            • -
            • UsePreviousTimeOnInvalidTimestamp: deprecated in Kafka 2.5.0 as renamed to UsePartitionTimeOnInvalidTimestamp (KIP-530).
            • -
            • Overloaded KafkaStreams#metadataForKey: deprecated in Kafka 2.5.0 (KIP-535).
            • -
            • Overloaded KafkaStreams#store: deprecated in Kafka 2.5.0 (KIP-562).
            • +
            • --zookeeper flag of the application reset tool: deprecated in Kafka 1.0.0 (KIP-198).
            • +
            • --execute flag of the application reset tool: deprecated in Kafka 1.1.0 (KIP-171).
            • +
            • StreamsBuilder#addGlobalStore (one overload): deprecated in Kafka 1.1.0 (KIP-233).
            • +
            • ProcessorContext#forward (some overloads): deprecated in Kafka 2.0.0 (KIP-251).
            • +
            • WindowBytesStoreSupplier#segments: deprecated in Kafka 2.1.0 (KIP-319).
            • +
            • segments, until, maintainMs on TimeWindows, JoinWindows, and SessionWindows: deprecated in Kafka 2.1.0 (KIP-328).
            • +
            • Overloaded JoinWindows#of, before, after, SessionWindows#with, TimeWindows#of, advanceBy, UnlimitedWindows#startOn and KafkaStreams#close with long typed parameters: deprecated in Kafka 2.1.0 (KIP-358).
            • +
            • Overloaded KStream#groupBy, groupByKey and KTable#groupBy with Serialized parameter: deprecated in Kafka 2.1.0 (KIP-372).
            • +
            • Joined#named, name: deprecated in Kafka 2.3.0 (KIP-307).
            • +
            • TopologyTestDriver#pipeInput, readOutput, OutputVerifier and ConsumerRecordFactory classes (KIP-470).
            • +
            • KafkaClientSupplier#getAdminClient: deprecated in Kafka 2.4.0 (KIP-476).
            • +
            • Overloaded KStream#join, leftJoin, outerJoin with KStream and Joined parameters: deprecated in Kafka 2.4.0 (KIP-479).
            • +
            • WindowStore#put(K key, V value): deprecated in Kafka 2.4.0 (KIP-474).
            • +
            • UsePreviousTimeOnInvalidTimestamp: deprecated in Kafka 2.5.0 as renamed to UsePartitionTimeOnInvalidTimestamp (KIP-530).
            • +
            • Overloaded KafkaStreams#metadataForKey: deprecated in Kafka 2.5.0 (KIP-535).
            • +
            • Overloaded KafkaStreams#store: deprecated in Kafka 2.5.0 (KIP-562).

            The following dependencies were removed from Kafka Streams: @@ -901,7 +774,7 @@

            Streams API

            Streams API changes in 2.8.0

            We extended StreamJoined to include the options withLoggingEnabled() and withLoggingDisabled() in - KIP-689. + KIP-689.

            We added two new methods to KafkaStreams, namely KafkaStreams#addStreamThread() and KafkaStreams#removeStreamThread() in @@ -930,19 +803,19 @@

            Streams API The TimeWindowedDeserializer constructor TimeWindowedDeserializer(final Deserializer inner) was deprecated to encourage users to properly set their window size through TimeWindowedDeserializer(final Deserializer inner, Long windowSize). An additional streams config, window.size.ms, was added for users that cannot set the window size through - the constructor, such as when using the console consumer. KIP-659 + the constructor, such as when using the console consumer. KIP-659 has more details.

            To simplify testing, two new constructors that don't require a Properties parameter have been added to the TopologyTestDriver class. If Properties are passed into the constructor, it is no longer required to set mandatory configuration parameters - (cf. KIP-680). + (cf. KIP-680).

            We added the prefixScan() method to interface ReadOnlyKeyValueStore. The new prefixScan() allows fetching all values whose keys start with a given prefix. - See KIP-614 for more details. + See KIP-614 for more details.

            Kafka Streams is now handling TimeoutException thrown by the consumer, producer, and admin client. @@ -951,12 +824,12 @@

            Streams API To bound how long Kafka Streams retries a task, you can set task.timeout.ms (default is 5 minutes). If a task does not make progress within the specified task timeout, which is tracked on a per-task basis, Kafka Streams throws a TimeoutException - (cf. KIP-572). + (cf. KIP-572).

            We changed the default value of default.key.serde and default.value.serde to be null instead of ByteArraySerde. Users will now see a ConfigException if their serdes are not correctly configured through those configs or passed in explicitly. - See KIP-741 for more details. + See KIP-741 for more details.

            Streams API changes in 2.7.0

            @@ -966,13 +839,13 @@

            Streams API KeyQueryMetadata was introduced in Kafka Streams 2.5 release with getter methods having prefix get. The intend of this change is to bring the method names to Kafka custom to not use the get prefix for getter methods. The old methods are deprecated and is not effected. - (Cf. KIP-648.) + (Cf. KIP-648.)

            The StreamsConfig variable for configuration parameter "topology.optimization" is renamed from TOPOLOGY_OPTIMIZATION to TOPOLOGY_OPTIMIZATION_CONFIG. The old variable is deprecated. Note, that the parameter name itself is not affected. - (Cf. KIP-626.) + (Cf. KIP-629.)

            The configuration parameter retries is deprecated in favor of the new parameter task.timeout.ms. @@ -981,7 +854,7 @@

            Streams API

            We added SlidingWindows as an option for windowedBy() windowed aggregations as described in - KIP-450. + KIP-450. Sliding windows are fixed-time and data-aligned windows that allow for flexible and efficient windowed aggregations.

            @@ -994,7 +867,7 @@

            Streams API

            Streams API changes in 2.6.0

            We added a new processing mode, EOS version 2, that improves application scalability using exactly-once guarantees - (via KIP-447). + (via KIP-447). You can enable this new feature by setting the configuration parameter processing.guarantee to the new value "exactly_once_beta". Note that you need brokers with version 2.5 or newer to use this feature. @@ -1013,9 +886,9 @@

            Streams API

            As of 2.6.0 Kafka Streams deprecates KStream.through() in favor of the new KStream.repartition() operator - (as per KIP-221). + (as per KIP-221). KStream.repartition() is similar to KStream.through(), however Kafka Streams will manage the topic for you. - If you need to write into and read back from a topic that you manage, you can fall back to use KStream.to() in combination with StreamsBuilder#stream(). + If you need to write into and read back from a topic that you mange, you can fall back to use KStream.to() in combination with StreamsBuilder#stream(). Please refer to the developer guide for more details about KStream.repartition().

            @@ -1027,50 +900,50 @@

            Streams API

            We added a --force option in StreamsResetter to force remove left-over members on broker side when long session time out was configured - as per KIP-571. + as per KIP-571.

            We added Suppressed.withLoggingDisabled() and Suppressed.withLoggingEnabled(config) methods to allow disabling or configuring of the changelog topic and allows for configuration of the changelog topic - as per KIP-446. + as per KIP-446.

            Streams API changes in 2.5.0

            - We add a new cogroup() operator (via KIP-150) + We add a new cogroup() operator (via KIP-150) that allows to aggregate multiple streams in a single operation. Cogrouped streams can also be windowed before they are aggregated. Please refer to the developer guide for more details.

            We added a new KStream.toTable() API to translate an input event stream into a changelog stream as per - KIP-523. + KIP-523.

            - We added a new Serde type Void in KIP-527 to represent + We added a new Serde type Void in KIP-527 to represent null keys or null values from input topic.

            Deprecated UsePreviousTimeOnInvalidTimestamp and replaced it with UsePartitionTimeOnInvalidTimeStamp as per - KIP-530. + KIP-530.

            Deprecated KafkaStreams.store(String, QueryableStoreType) and replaced it with KafkaStreams.store(StoreQueryParameters) to allow querying for a store with variety of parameters, including querying a specific task and stale stores, as per - KIP-562 and - KIP-535 respectively. + KIP-562 and + KIP-535 respectively.

            Streams API changes in 2.4.0

            - As of 2.4.0 Kafka Streams offers a KTable-KTable foreign-key join (as per KIP-213). + As of 2.4.0 Kafka Streams offers a KTable-KTable foreign-key join (as per KIP-213). This joiner allows for records to be joined between two KTables with different keys. Both INNER and LEFT foreign-key joins are supported.

            In the 2.4 release, you now can name all operators in a Kafka Streams DSL topology via - KIP-307. + KIP-307. Giving your operators meaningful names makes it easier to understand the topology description (Topology#describe()#toString()) and understand the full context of what your Kafka Streams application is doing. @@ -1098,19 +971,19 @@

            < of StreamJoined, stream-stream join operations using Joined have been deprecated. Please switch over to stream-stream join methods using the new overloaded methods. You can get more details from - KIP-479. + KIP-479.

            With the introduction of incremental cooperative rebalancing, Streams no longer requires all tasks be revoked at the beginning of a rebalance. Instead, at the completion of the rebalance only those tasks which are to be migrated to another consumer for overall load balance will need to be closed and revoked. This changes the semantics of the StateListener a bit, as it will not necessarily transition to REBALANCING at the beginning of a rebalance anymore. Note that this means IQ will now be available at all times except during state restoration, including while a rebalance is in progress. If restoration is occurring when a rebalance begins, we will continue to actively restore the state stores and/or process standby tasks during a cooperative rebalance. Note that with this new rebalancing protocol, you may sometimes see a rebalance be followed by a second short rebalance that ensures all tasks are safely distributed. For details on please see - KIP-429. + KIP-429.

            The 2.4.0 release contains newly added and reworked metrics. - KIP-444 + KIP-444 adds new client level (i.e., KafkaStreams instance level) metrics to the existing thread-level, task-level, and processor-/state-store-level metrics. For a full list of available client level metrics, see the @@ -1118,7 +991,7 @@

            < section in the operations guide.
            Furthermore, RocksDB metrics are exposed via -
            KIP-471. + KIP-471. For a full list of available RocksDB metrics, see the RocksDB monitoring section in the operations guide. @@ -1126,7 +999,7 @@

            <

            Kafka Streams test-utils got improved via - KIP-470 + KIP-470 to simplify the process of using TopologyTestDriver to test your application code. We deprecated ConsumerRecordFactory, TopologyTestDriver#pipeInput(), OutputVerifier, as well as TopologyTestDriver#readOutput() and replace them with @@ -1145,7 +1018,7 @@

            <

            Furthermore, the PartitionGrouper interface and its corresponding configuration parameter partition.grouper were deprecated - (KIP-528) + (KIP-528) and will be removed in the next major release (KAFKA-7785. Hence, this feature won't be supported in the future any longer and you need to updated your code accordingly. If you use a custom PartitionGrouper and stop to use it, the created tasks might change. @@ -1157,8 +1030,8 @@

            <

            Version 2.3.0 adds the Suppress operator to the kafka-streams-scala Ktable API.

            - As of 2.3.0 Streams now offers an in-memory version of the window (KIP-428) - and the session (KIP-445) store, in addition to the persistent ones based on RocksDB. + As of 2.3.0 Streams now offers an in-memory version of the window (KIP-428) + and the session (KIP-445) store, in addition to the persistent ones based on RocksDB. The new public interfaces inMemoryWindowStore() and inMemorySessionStore() are added to Stores and provide the built-in in-memory window or session store.

            @@ -1173,7 +1046,7 @@

            <

            In 2.3.0 we have added default implementation to close() and configure() for Serializer, Deserializer and Serde so that they can be implemented by lambda expression. - For more details please read KIP-331. + For more details please read KIP-331.

            @@ -1187,18 +1060,18 @@

            < java.lang.ClassCastException: class org.apache.kafka.streams.state.ValueAndTimestamp cannot be cast to class YOUR-VALUE-TYPE upon getting a value from the store. Additionally, TopologyTestDriver#getStateStore() only returns non-built-in stores and throws an exception if a built-in store is accessed. - For more details please read KIP-258. + For more details please read KIP-258.

            To improve type safety, a new operator KStream#flatTransformValues is added. - For more details please read KIP-313. + For more details please read KIP-313.

            Kafka Streams used to set the configuration parameter max.poll.interval.ms to Integer.MAX_VALUE. This default value is removed and Kafka Streams uses the consumer default value now. - For more details please read KIP-442. + For more details please read KIP-442.

            @@ -1206,13 +1079,13 @@

            < The segment size for index files (segment.index.bytes) is no longer 50MB, but uses the cluster default. Similarly, the configuration segment.ms in no longer 10 minutes, but uses the cluster default configuration. Lastly, the retention period (retention.ms) is changed from Long.MAX_VALUE to -1 (infinite). - For more details please read KIP-443. + For more details please read KIP-443.

            To avoid memory leaks, RocksDBConfigSetter has a new close() method that is called on shutdown. Users should implement this method to release any memory used by RocksDB config objects, by closing those objects. - For more details please read KIP-453. + For more details please read KIP-453.

            @@ -1230,12 +1103,12 @@

            <

            In WindowedSerdes, we've added a new static constructor to return a TimeWindowSerde with configurable window size. This is to help users to construct time window serdes to read directly from a time-windowed store's changelog. - More details can be found in KIP-393. + More details can be found in KIP-393.

            In 2.2.0 we have extended a few public interfaces including KafkaStreams to extend AutoCloseable so that they can be - used in a try-with-resource statement. For a full list of public interfaces that get impacted please read KIP-376. + used in a try-with-resource statement. For a full list of public interfaces that get impacted please read KIP-376.

            Streams API changes in 2.1.0

            @@ -1244,7 +1117,7 @@

            < Users are encouraged to use #topicSet() and #topicPattern() accordingly on TopologyDescription.Source nodes, instead of using #topics(), which has since been deprecated. Similarly, use #topic() and #topicNameExtractor() to get descriptions of TopologyDescription.Sink nodes. For more details, see - KIP-321. + KIP-321.

            @@ -1257,7 +1130,7 @@

            < Additionally, we've updated the Joined class with a new method Joined#withName enabling users to name any repartition topics required for performing Stream/Stream or Stream/Table join. For more details repartition - topic naming, see KIP-372. + topic naming, see KIP-372. As a result we've updated the Kafka Streams Scala API and removed the Serialized class in favor of adding Grouped. If you just rely on the implicit Serialized, you just need to recompile; if you pass in Serialized explicitly, sorry you'll have to make code changes. @@ -1278,7 +1151,7 @@

            <

            We added a new serde for UUIDs (Serdes.UUIDSerde) that you can use via Serdes.UUID() - (cf. KIP-206). + (cf. KIP-206).

            @@ -1292,19 +1165,19 @@

            < The Window class has new methods #startTime() and #endTime() that return window start/end timestamp as Instant. For interactive queries, there are new #fetch(...) overloads taking Instant arguments. Additionally, punctuations are now registered via ProcessorContext#schedule(Duration interval, ...). - For more details, see KIP-358. + For more details, see KIP-358.

            We deprecated KafkaStreams#close(...) and replaced it with KafkaStreams#close(Duration) that accepts a single timeout argument Note: the new #close(Duration) method has improved (but slightly different) semantics. - For more details, see KIP-358. + For more details, see KIP-358.

            The newly exposed AdminClient metrics are now available when calling the KafkaStream#metrics() method. For more details on exposing AdminClients metrics - see KIP-324 + see KIP-324

            @@ -1314,9 +1187,9 @@

            < Similarly, WindowBytesStoreSupplier#segments() was deprecated and replaced with WindowBytesStoreSupplier#segmentInterval(). If you implement custom window store, you need to update your code accordingly. Finally, Stores#persistentWindowStore(...) were deprecated and replaced with a new overload that does not allow to specify the number of segments any longer. - For more details, see KIP-319 - (note: KIP-328 and - KIP-358 'overlap' with KIP-319). + For more details, see KIP-319 + (note: KIP-328 and + KIP-358 'overlap' with KIP-319).

            @@ -1328,7 +1201,7 @@

            < reusing the source topic as the changelog topic, the topology may be optimized to merge redundant repartition topics into one repartition topic. The original no parameter version of StreamsBuilder#build is still available for those who wish to not optimize their topology. Note that enabling optimization of the topology may require you to do an application reset when redeploying the application. For more - details, see KIP-312 + details, see KIP-312

            @@ -1366,7 +1239,7 @@

            < skipped-records-rate and skipped-records-total. When a record is skipped, the event is now logged at WARN level. If these warnings become burdensome, we recommend explicitly filtering out unprocessable records instead of depending on record skipping semantics. For more details, see - KIP-274. + KIP-274. As of right now, the potential causes of skipped records are:

            -

            Relaxed type constraints of many DSL interfaces, classes, and methods (cf. KIP-100).

            +

            Relaxed type constraints of many DSL interfaces, classes, and methods (cf. KIP-100).

            Streams API changes in 0.10.1.0

            @@ -1864,8 +1737,8 @@

            Kafka Streams API (rows) - 2.1.x and
            2.2.x and
            2.3.x and
            2.4.x and
            2.5.x and
            2.6.x and
            2.7.x and
            2.8.x and
            3.0.x and
            3.1.x and
            3.2.x and
            3.3.x and
            3.4.x and
            3.5.x and
            3.6.x and
            3.7.x and
            3.8.x and
            3.9.x and
            4.0.x - 4.1.x + 2.1.x and
            2.2.x and
            2.3.x and
            2.4.x and
            2.5.x and
            2.6.x and
            2.7.x and
            2.8.x and
            3.0.x and
            3.1.x and
            3.2.x and
            3.3.x and
            3.4.x and
            3.5.x and
            3.6.x and
            3.7.x and
            3.8.x and
            3.9.x + 4.0.x 2.4.x and
            2.5.x @@ -1873,7 +1746,7 @@

            compatible - 2.6.x and
            2.7.x and
            2.8.x and
            3.0.x and
            3.1.x and
            3.2.x and
            3.3.x and
            3.4.x and
            3.5.x and
            3.6.x and
            3.7.x and
            3.8.x and
            3.9.x and
            4.0.x and
            4.1.x + 2.6.x and
            2.7.x and
            2.8.x and
            3.0.x and
            3.1.x and
            3.2.x and
            3.3.x and
            3.4.x and
            3.5.x and
            3.6.x and
            3.7.x and
            3.8.x and
            3.9.x and
            4.0.x compatible; enabling exactly-once v2 requires broker version 2.5.x or higher compatible diff --git a/docs/toc.html b/docs/toc.html index 304bd1c8a53c1..c42961cf7fbcd 100644 --- a/docs/toc.html +++ b/docs/toc.html @@ -36,31 +36,29 @@
          • 3. Configuration @@ -153,7 +148,6 @@
          • 6.7 Monitoring
            • Security Considerations for Remote Monitoring using JMX -
            • Group Coordinator Monitoring
            • Tiered Storage Monitoring
            • KRaft Monitoring
            • Selector Monitoring @@ -168,9 +162,7 @@
            • 6.8 KRaft
              • Configuration -
              • Upgrade -
              • Provisioning Nodes -
              • Controller membership changes +
              • Storage Tool
              • Debugging
              • Deploying Considerations
              • ZooKeeper to KRaft Migration diff --git a/docs/upgrade.html b/docs/upgrade.html index 66e05d90a5db2..eba558f39cef4 100644 --- a/docs/upgrade.html +++ b/docs/upgrade.html @@ -19,222 +19,9 @@